From e8ca5fa5123bfc8d6b9e9c20c03ffec216931dfd Mon Sep 17 00:00:00 2001 From: jgray Date: Wed, 1 Dec 2010 16:11:57 -0800 Subject: [PATCH] Initial commit of hadoop-20-warehouse --- .eclipse.templates/.classpath | 50 + .../Hadoop_Ant_Builder.launch | 22 + .eclipse.templates/.project | 27 + .eclipse.templates/README.txt | 6 + APACHE-README.txt | 31 + CHANGES.txt | 8552 +++ FB-CHANGES.txt | 127 + LICENSE.txt | 244 + NOTICE.txt | 7 + README.txt | 24 + YAHOO-CHANGES.txt | 506 + bin/hadoop | 312 + bin/hadoop-config.sh | 68 + bin/hadoop-daemon.sh | 151 + bin/hadoop-daemons.sh | 34 + bin/rcc | 99 + bin/slaves.sh | 68 + bin/start-all.sh | 30 + bin/start-avatar.sh | 136 + bin/start-balancer.sh | 25 + bin/start-dfs.sh | 52 + bin/start-hmon-remote.sh | 44 + bin/start-hmon.sh | 41 + bin/start-mapred.sh | 31 + bin/start-raidnode-remote.sh | 43 + bin/start-raidnode.sh | 40 + bin/stop-all.sh | 27 + bin/stop-avatar.sh | 119 + bin/stop-balancer.sh | 26 + bin/stop-dfs.sh | 29 + bin/stop-hmon-remote.sh | 40 + bin/stop-hmon.sh | 37 + bin/stop-mapred.sh | 30 + bin/stop-raidnode-remote.sh | 40 + bin/stop-raidnode.sh | 37 + build.xml | 1885 + conf/capacity-scheduler.xml | 117 + conf/capacity-scheduler.xml.template | 117 + conf/configuration.xsl | 24 + conf/core-site.xml | 8 + conf/core-site.xml.template | 8 + conf/hadoop-env.sh | 67 + conf/hadoop-env.sh.template | 67 + conf/hadoop-metrics.properties | 40 + conf/hadoop-policy.xml | 97 + conf/hadoop-policy.xml.template | 97 + conf/hdfs-site.xml | 8 + conf/hdfs-site.xml.template | 8 + conf/log4j.properties | 115 + conf/mapred-queue-acls.xml | 31 + conf/mapred-queue-acls.xml.template | 31 + conf/mapred-site.xml | 8 + conf/mapred-site.xml.template | 8 + conf/masters | 1 + conf/masters.template | 1 + conf/slaves | 1 + conf/slaves.template | 1 + conf/ssl-client.xml.example | 57 + conf/ssl-server.xml.example | 55 + conf/taskcontroller.cfg | 4 + ivy.xml | 274 + ivy/hadoop-core.pom | 265 + ivy/ivy-2.0.0-rc2.jar | Bin 0 -> 893199 bytes ivy/ivysettings.xml | 81 + ivy/libraries.properties | 74 + lib/commons-cli-2.0-SNAPSHOT.jar | Bin 0 -> 258337 bytes lib/hsqldb-1.8.0.10.LICENSE.txt | 66 + lib/hsqldb-1.8.0.10.jar | Bin 0 -> 706710 bytes lib/jdiff/hadoop_0.17.0.xml | 43272 +++++++++++++++ lib/jdiff/hadoop_0.18.1.xml | 44778 ++++++++++++++++ lib/jdiff/hadoop_0.18.2.xml | 38788 +++++++++++++ lib/jdiff/hadoop_0.18.3.xml | 38826 ++++++++++++++ lib/jdiff/hadoop_0.19.0.xml | 43972 +++++++++++++++ lib/jdiff/hadoop_0.19.1.xml | 44195 +++++++++++++++ lib/jdiff/hadoop_0.19.2.xml | 44204 +++++++++++++++ lib/jsp-2.1/jsp-2.1.jar | Bin 0 -> 1024681 bytes lib/jsp-2.1/jsp-api-2.1.jar | Bin 0 -> 134910 bytes lib/kfs-0.2.2.jar | Bin 0 -> 11428 bytes lib/kfs-0.2.LICENSE.txt | 202 + lib/zookeeper-3.3.1.jar | Bin 0 -> 1011828 bytes nativelib/lzma/liblzma.so | Bin 0 -> 547460 bytes nativelib/lzma/lzma/lzma.h | 223 + nativelib/lzma/lzma/lzma/alignment.h | 60 + nativelib/lzma/lzma/lzma/base.h | 543 + nativelib/lzma/lzma/lzma/block.h | 282 + nativelib/lzma/lzma/lzma/check.h | 149 + nativelib/lzma/lzma/lzma/container.h | 266 + nativelib/lzma/lzma/lzma/delta.h | 79 + nativelib/lzma/lzma/lzma/filter.h | 286 + nativelib/lzma/lzma/lzma/index.h | 242 + nativelib/lzma/lzma/lzma/index_hash.h | 98 + nativelib/lzma/lzma/lzma/init.h | 85 + nativelib/lzma/lzma/lzma/lzma.h | 360 + nativelib/lzma/lzma/lzma/memlimit.h | 207 + nativelib/lzma/lzma/lzma/simple.h | 94 + nativelib/lzma/lzma/lzma/stream_flags.h | 139 + nativelib/lzma/lzma/lzma/subblock.h | 204 + nativelib/lzma/lzma/lzma/version.h | 57 + nativelib/lzma/lzma/lzma/vli.h | 174 + src/ant/org/apache/hadoop/ant/DfsTask.java | 203 + src/ant/org/apache/hadoop/ant/antlib.xml | 12 + .../ant/condition/DfsBaseConditional.java | 68 + .../hadoop/ant/condition/DfsExists.java | 24 + .../apache/hadoop/ant/condition/DfsIsDir.java | 24 + .../hadoop/ant/condition/DfsZeroLen.java | 24 + src/benchmarks/gridmix/README | 168 + src/benchmarks/gridmix/generateData.sh | 79 + src/benchmarks/gridmix/gridmix-env | 75 + .../gridmix/javasort/text-sort.large | 14 + .../gridmix/javasort/text-sort.medium | 14 + .../gridmix/javasort/text-sort.small | 14 + src/benchmarks/gridmix/maxent/maxent.large | 26 + .../gridmix/monsterQuery/monster_query.large | 27 + .../gridmix/monsterQuery/monster_query.medium | 27 + .../gridmix/monsterQuery/monster_query.small | 27 + .../gridmix/pipesort/text-sort.large | 16 + .../gridmix/pipesort/text-sort.medium | 16 + .../gridmix/pipesort/text-sort.small | 16 + .../gridmix/streamsort/text-sort.large | 16 + .../gridmix/streamsort/text-sort.medium | 16 + .../gridmix/streamsort/text-sort.small | 16 + .../gridmix/submissionScripts/allThroughHod | 13 + .../submissionScripts/allToSameCluster | 27 + .../gridmix/submissionScripts/maxentHod | 15 + .../submissionScripts/maxentToSameCluster | 19 + .../submissionScripts/monsterQueriesHod | 33 + .../monsterQueriesToSameCluster | 34 + .../submissionScripts/sleep_if_too_busy | 10 + .../gridmix/submissionScripts/textSortHod | 64 + .../submissionScripts/textSortToSameCluster | 53 + .../gridmix/submissionScripts/webdataScanHod | 34 + .../webdataScanToSameCluster | 34 + .../gridmix/submissionScripts/webdataSortHod | 17 + .../webdataSortToSameCluster | 18 + .../gridmix/webdatascan/webdata_scan.large | 14 + .../gridmix/webdatascan/webdata_scan.medium | 14 + .../gridmix/webdatascan/webdata_scan.small | 14 + .../gridmix/webdatasort/webdata_sort.large | 16 + .../gridmix/webdatasort/webdata_sort.medium | 16 + .../gridmix/webdatasort/webdata_sort.small | 16 + src/benchmarks/gridmix2/README.gridmix2 | 136 + src/benchmarks/gridmix2/build.xml | 67 + .../gridmix2/generateGridmix2data.sh | 94 + src/benchmarks/gridmix2/gridmix-env-2 | 35 + src/benchmarks/gridmix2/gridmix_config.xml | 550 + src/benchmarks/gridmix2/rungridmix_2 | 37 + .../hadoop/mapred/CombinerJobCreator.java | 106 + .../mapred/GenericMRLoadJobCreator.java | 98 + .../apache/hadoop/mapred/GridMixRunner.java | 659 + src/c++/libhdfs/Makefile.am | 41 + src/c++/libhdfs/Makefile.in | 515 + src/c++/libhdfs/aclocal.m4 | 1028 + src/c++/libhdfs/config.guess | 1447 + src/c++/libhdfs/config.sub | 1555 + src/c++/libhdfs/configure | 16051 ++++++ src/c++/libhdfs/configure.ac | 120 + src/c++/libhdfs/depcomp | 522 + src/c++/libhdfs/docs/Doxyfile | 1228 + src/c++/libhdfs/docs/libhdfs_footer.html | 9 + src/c++/libhdfs/hdfs.c | 2326 + src/c++/libhdfs/hdfs.h | 482 + src/c++/libhdfs/hdfsJniHelper.c | 480 + src/c++/libhdfs/hdfsJniHelper.h | 111 + src/c++/libhdfs/hdfs_read.c | 67 + src/c++/libhdfs/hdfs_test.c | 446 + src/c++/libhdfs/hdfs_write.c | 71 + src/c++/libhdfs/install-sh | 322 + src/c++/libhdfs/ltmain.sh | 7750 +++ src/c++/libhdfs/m4/apfunctions.m4 | 41 + src/c++/libhdfs/m4/apjava.m4 | 142 + src/c++/libhdfs/m4/apsupport.m4 | 164 + src/c++/libhdfs/m4/libtool.m4 | 7252 +++ src/c++/libhdfs/m4/ltoptions.m4 | 367 + src/c++/libhdfs/m4/ltsugar.m4 | 123 + src/c++/libhdfs/m4/ltversion.m4 | 23 + src/c++/libhdfs/m4/lt~obsolete.m4 | 92 + src/c++/libhdfs/missing | 353 + src/c++/libhdfs/tests/conf/core-site.xml | 24 + src/c++/libhdfs/tests/conf/hadoop-site.xml | 13 + src/c++/libhdfs/tests/conf/hdfs-site.xml | 60 + src/c++/libhdfs/tests/conf/mapred-site.xml | 8 + src/c++/libhdfs/tests/conf/slaves | 1 + src/c++/libhdfs/tests/test-libhdfs.sh | 129 + src/c++/librecordio/Makefile | 72 + src/c++/librecordio/archive.hh | 122 + src/c++/librecordio/binarchive.cc | 330 + src/c++/librecordio/binarchive.hh | 81 + src/c++/librecordio/csvarchive.cc | 368 + src/c++/librecordio/csvarchive.hh | 128 + src/c++/librecordio/exception.cc | 152 + src/c++/librecordio/exception.hh | 129 + src/c++/librecordio/fieldTypeInfo.cc | 64 + src/c++/librecordio/fieldTypeInfo.hh | 59 + src/c++/librecordio/filestream.cc | 98 + src/c++/librecordio/filestream.hh | 55 + src/c++/librecordio/recordTypeInfo.cc | 143 + src/c++/librecordio/recordTypeInfo.hh | 68 + src/c++/librecordio/recordio.cc | 75 + src/c++/librecordio/recordio.hh | 82 + src/c++/librecordio/test/Makefile | 50 + src/c++/librecordio/test/test.cc | 309 + src/c++/librecordio/test/test.hh | 26 + src/c++/librecordio/test/test.jr | 63 + src/c++/librecordio/test/testFromJava.cc | 71 + src/c++/librecordio/test/testFromJava.hh | 26 + src/c++/librecordio/typeIDs.cc | 274 + src/c++/librecordio/typeIDs.hh | 169 + src/c++/librecordio/typeInfo.cc | 69 + src/c++/librecordio/typeInfo.hh | 56 + src/c++/librecordio/utils.cc | 109 + src/c++/librecordio/utils.hh | 50 + src/c++/librecordio/xmlarchive.cc | 431 + src/c++/librecordio/xmlarchive.hh | 265 + src/c++/pipes/.autom4te.cfg | 42 + src/c++/pipes/Makefile.am | 31 + src/c++/pipes/Makefile.in | 524 + src/c++/pipes/aclocal.m4 | 7011 +++ src/c++/pipes/api/hadoop/Pipes.hh | 260 + src/c++/pipes/api/hadoop/TemplateFactory.hh | 96 + src/c++/pipes/compile | 136 + src/c++/pipes/config.guess | 1449 + src/c++/pipes/config.sub | 1552 + src/c++/pipes/configure | 22647 ++++++++ src/c++/pipes/configure.ac | 54 + .../debug/pipes-default-gdb-commands.txt | 3 + src/c++/pipes/debug/pipes-default-script | 3 + src/c++/pipes/depcomp | 522 + src/c++/pipes/impl/HadoopPipes.cc | 1078 + src/c++/pipes/impl/config.h.in | 103 + src/c++/pipes/install-sh | 322 + src/c++/pipes/ltmain.sh | 6530 +++ src/c++/pipes/missing | 360 + src/c++/task-controller/Makefile.in | 46 + src/c++/task-controller/configuration.c | 229 + src/c++/task-controller/configuration.h.in | 62 + src/c++/task-controller/configure | 5237 ++ src/c++/task-controller/configure.ac | 61 + src/c++/task-controller/main.c | 126 + src/c++/task-controller/task-controller.c | 586 + src/c++/task-controller/task-controller.h | 90 + src/c++/utils/.autom4te.cfg | 42 + src/c++/utils/Makefile.am | 33 + src/c++/utils/Makefile.in | 529 + src/c++/utils/aclocal.m4 | 7011 +++ src/c++/utils/api/hadoop/SerialUtils.hh | 170 + src/c++/utils/api/hadoop/StringUtils.hh | 81 + src/c++/utils/config.guess | 1449 + src/c++/utils/config.sub | 1552 + src/c++/utils/configure | 22398 ++++++++ src/c++/utils/configure.ac | 53 + src/c++/utils/depcomp | 522 + src/c++/utils/impl/SerialUtils.cc | 295 + src/c++/utils/impl/StringUtils.cc | 180 + src/c++/utils/impl/config.h.in | 97 + src/c++/utils/install-sh | 322 + src/c++/utils/ltmain.sh | 6530 +++ src/c++/utils/m4/hadoop_utils.m4 | 66 + src/c++/utils/missing | 360 + src/contrib/bash-tab-completion/README | 11 + src/contrib/bash-tab-completion/hadoop.sh | 121 + src/contrib/build-contrib.xml | 308 + src/contrib/build.xml | 68 + src/contrib/capacity-scheduler/README | 16 + src/contrib/capacity-scheduler/build.xml | 28 + src/contrib/capacity-scheduler/ivy.xml | 56 + .../ivy/libraries.properties | 5 + .../hadoop/mapred/CapacitySchedulerConf.java | 414 + .../hadoop/mapred/CapacityTaskScheduler.java | 1507 + .../mapred/JobInitializationPoller.java | 591 + .../hadoop/mapred/JobQueuesManager.java | 287 + .../apache/hadoop/mapred/MemoryMatcher.java | 138 + .../mapred/ClusterWithCapacityScheduler.java | 233 + .../hadoop/mapred/TestCapacityScheduler.java | 3218 ++ .../mapred/TestCapacitySchedulerConf.java | 431 + .../TestCapacitySchedulerWithJobTracker.java | 129 + .../mapred/TestJobTrackerRestartWithCS.java | 82 + src/contrib/data_join/build.xml | 45 + src/contrib/data_join/ivy.xml | 36 + .../data_join/ivy/libraries.properties | 5 + .../hadoop/contrib/utils/join/README.txt | 50 + .../utils/join/SampleDataJoinMapper.java | 54 + .../utils/join/SampleDataJoinReducer.java | 58 + .../utils/join/SampleTaggedMapOutput.java | 60 + .../utils/join/ArrayListBackedIterator.java | 70 + .../contrib/utils/join/DataJoinJob.java | 174 + .../utils/join/DataJoinMapperBase.java | 121 + .../utils/join/DataJoinReducerBase.java | 237 + .../hadoop/contrib/utils/join/JobBase.java | 173 + .../contrib/utils/join/ResetableIterator.java | 35 + .../contrib/utils/join/TaggedMapOutput.java | 56 + .../contrib/utils/join/TestDataJoin.java | 154 + .../dynamicclouds/bin/clusterbalancer.sh | 31 + src/contrib/dynamicclouds/build.xml | 74 + src/contrib/dynamicclouds/ivy.xml | 63 + .../dynamicclouds/ivy/libraries.properties | 5 + .../org/apache/hadoop/mapred/Cluster.java | 319 + .../mapred/ClusterBalancerAdminProtocol.java | 19 + .../hadoop/mapred/ClusterBalancerTool.java | 106 + .../mapred/ClusterStatusJSONParser.java | 62 + .../hadoop/mapred/ClustersBalancer.java | 367 + .../hadoop/mapred/DynamicCloudsDaemon.java | 343 + .../apache/hadoop/mapred/TTLaunchTask.java | 32 + .../org/apache/hadoop/mapred/TTLauncher.java | 93 + .../org/apache/hadoop/mapred/TTMover.java | 317 + .../hadoop/mapred/TaskTrackerLoadInfo.java | 265 + .../mapred/TaskTrackerLoadInfoIterator.java | 17 + .../hadoop/mapred/WastedTimeTTLIIterator.java | 99 + .../apache/hadoop/mapred/TTMoverTestStub.java | 44 + .../hadoop/mapred/TestClustersBalancer.java | 45 + .../org/apache/hadoop/mapred/TestTTMover.java | 150 + .../mapred/TestTaskTrackerLoadInfo.java | 118 + .../dynamicclouds/src/webapps/cb/index.html | 20 + .../dynamicclouds/src/webapps/cb/status.jsp | 88 + src/contrib/ec2/README.txt | 13 + src/contrib/ec2/bin/cmd-hadoop-cluster | 69 + src/contrib/ec2/bin/create-hadoop-image | 78 + src/contrib/ec2/bin/delete-hadoop-cluster | 58 + src/contrib/ec2/bin/hadoop-ec2 | 61 + src/contrib/ec2/bin/hadoop-ec2-env.sh | 101 + .../ec2/bin/hadoop-ec2-env.sh.template | 101 + src/contrib/ec2/bin/hadoop-ec2-init-remote.sh | 150 + .../ec2/bin/image/create-hadoop-image-remote | 80 + src/contrib/ec2/bin/image/ec2-run-user-data | 51 + src/contrib/ec2/bin/launch-hadoop-cluster | 40 + src/contrib/ec2/bin/launch-hadoop-master | 120 + src/contrib/ec2/bin/launch-hadoop-slaves | 55 + src/contrib/ec2/bin/list-hadoop-clusters | 31 + src/contrib/ec2/bin/terminate-hadoop-cluster | 46 + src/contrib/eclipse-plugin/.classpath | 9 + src/contrib/eclipse-plugin/.project | 28 + .../.settings/org.eclipse.jdt.core.prefs | 262 + .../.settings/org.eclipse.jdt.ui.prefs | 6 + .../org.eclipse.wst.validation.prefs | 6 + .../eclipse-plugin/META-INF/MANIFEST.MF | 28 + src/contrib/eclipse-plugin/build.properties | 7 + src/contrib/eclipse-plugin/build.xml | 79 + src/contrib/eclipse-plugin/ivy.xml | 36 + .../eclipse-plugin/ivy/libraries.properties | 5 + src/contrib/eclipse-plugin/plugin.xml | 287 + .../resources/Components/Conf.png | Bin 0 -> 1006 bytes .../resources/Components/Export.png | Bin 0 -> 2500 bytes .../resources/Components/Import.png | Bin 0 -> 2288 bytes .../resources/Components/New.png | Bin 0 -> 359 bytes .../resources/Components/Reload.png | Bin 0 -> 1455 bytes .../resources/Components/Tool.png | Bin 0 -> 853 bytes .../resources/Components/Tools.png | Bin 0 -> 1202 bytes .../eclipse-plugin/resources/ConnectDFS.xml | 32 + .../eclipse-plugin/resources/CreateProj.xml | 62 + .../resources/Elephant-16x16.png | Bin 0 -> 866 bytes .../resources/Elephant-24x24.png | Bin 0 -> 1671 bytes .../resources/Elephant-32x32.png | Bin 0 -> 2513 bytes .../resources/Elephant-64x64.png | Bin 0 -> 31779 bytes .../resources/Elephant-small-16x16.png | Bin 0 -> 809 bytes .../eclipse-plugin/resources/Elephant.jpg | Bin 0 -> 4105 bytes .../resources/Elephant100x100.gif | Bin 0 -> 7553 bytes .../resources/Elephant16x16.gif | Bin 0 -> 1080 bytes .../resources/Elephant2-136x136.png | Bin 0 -> 23414 bytes .../resources/Elephant2-16x16.png | Bin 0 -> 790 bytes .../resources/Elephant2-24x24.png | Bin 0 -> 1416 bytes .../resources/Elephant2-32x32.png | Bin 0 -> 2165 bytes .../resources/Elephant2-64x64.png | Bin 0 -> 6662 bytes .../eclipse-plugin/resources/Elephant2.jpg | Bin 0 -> 3562 bytes .../resources/Elephant3-122x122.png | Bin 0 -> 16171 bytes .../resources/Elephant3-16x16.png | Bin 0 -> 777 bytes .../resources/Elephant3-24x24.png | Bin 0 -> 1324 bytes .../eclipse-plugin/resources/HelloWorld.xml | 121 + .../eclipse-plugin/resources/MAP100x100.gif | Bin 0 -> 6978 bytes .../eclipse-plugin/resources/MAP16x15.gif | Bin 0 -> 1065 bytes .../eclipse-plugin/resources/RunProj.xml | 24 + .../resources/SetHadoopPath.xml | 25 + .../eclipse-plugin/resources/Setup.xml | 18 + .../eclipse-plugin/resources/download.png | Bin 0 -> 661 bytes .../eclipse-plugin/resources/drive100x100.gif | Bin 0 -> 5903 bytes .../eclipse-plugin/resources/drive16x16.gif | Bin 0 -> 1071 bytes .../eclipse-plugin/resources/driver.png | Bin 0 -> 820 bytes .../eclipse-plugin/resources/driverwiz.png | Bin 0 -> 4431 bytes .../resources/elephantblue16x16.gif | Bin 0 -> 1053 bytes .../eclipse-plugin/resources/files.gif | Bin 0 -> 339 bytes .../resources/hadoop-logo-16x16.png | Bin 0 -> 808 bytes .../resources/hadoop-logo-24x24.png | Bin 0 -> 1422 bytes .../resources/hadoop-logo-85x85.png | Bin 0 -> 10725 bytes .../eclipse-plugin/resources/hadoop-logo.jpg | Bin 0 -> 9443 bytes .../eclipse-plugin/resources/hadoop.gif | Bin 0 -> 1465 bytes .../eclipse-plugin/resources/hadoop_small.gif | Bin 0 -> 1072 bytes src/contrib/eclipse-plugin/resources/job.gif | Bin 0 -> 59 bytes .../resources/location-edit-16x16.png | Bin 0 -> 930 bytes .../resources/location-new-16x16.png | Bin 0 -> 888 bytes .../eclipse-plugin/resources/map16x16.gif | Bin 0 -> 1032 bytes .../eclipse-plugin/resources/mapper16.png | Bin 0 -> 851 bytes .../eclipse-plugin/resources/mapwiz.png | Bin 0 -> 4378 bytes .../eclipse-plugin/resources/new-folder.png | Bin 0 -> 456 bytes .../eclipse-plugin/resources/projwiz.png | Bin 0 -> 6867 bytes .../resources/reduce100x100.gif | Bin 0 -> 7166 bytes .../eclipse-plugin/resources/reduce16x16.gif | Bin 0 -> 1077 bytes .../resources/reducer-16x16.gif | Bin 0 -> 988 bytes .../eclipse-plugin/resources/reducer16.png | Bin 0 -> 853 bytes .../eclipse-plugin/resources/reducewiz.png | Bin 0 -> 4470 bytes .../eclipse-plugin/resources/refresh.png | Bin 0 -> 986 bytes .../resources/spite_overcloud.png | Bin 0 -> 838 bytes .../eclipse-plugin/resources/spitesmall.gif | Bin 0 -> 162 bytes .../eclipse-plugin/resources/spitesmall.png | Bin 0 -> 395 bytes .../eclipse-plugin/resources/upload.png | Bin 0 -> 745 bytes .../org/apache/hadoop/eclipse/Activator.java | 77 + .../hadoop/eclipse/ErrorMessageDialog.java | 45 + .../eclipse/HadoopPerspectiveFactory.java | 95 + .../apache/hadoop/eclipse/ImageLibrary.java | 252 + .../hadoop/eclipse/MapReduceNature.java | 146 + .../hadoop/eclipse/NewDriverWizard.java | 99 + .../hadoop/eclipse/NewDriverWizardPage.java | 263 + .../eclipse/NewMapReduceProjectWizard.java | 411 + .../hadoop/eclipse/NewMapperWizard.java | 181 + .../hadoop/eclipse/NewReducerWizard.java | 184 + .../apache/hadoop/eclipse/PropertyTester.java | 43 + .../hadoop/eclipse/actions/DFSActionImpl.java | 478 + .../eclipse/actions/EditLocationAction.java | 73 + .../eclipse/actions/NewLocationAction.java | 64 + .../actions/OpenNewMRClassWizardAction.java | 76 + .../actions/OpenNewMRProjectAction.java | 48 + .../hadoop/eclipse/dfs/ActionProvider.java | 193 + .../apache/hadoop/eclipse/dfs/DFSActions.java | 44 + .../apache/hadoop/eclipse/dfs/DFSContent.java | 32 + .../eclipse/dfs/DFSContentProvider.java | 244 + .../apache/hadoop/eclipse/dfs/DFSFile.java | 350 + .../apache/hadoop/eclipse/dfs/DFSFolder.java | 213 + .../hadoop/eclipse/dfs/DFSLocation.java | 108 + .../hadoop/eclipse/dfs/DFSLocationsRoot.java | 150 + .../apache/hadoop/eclipse/dfs/DFSMessage.java | 57 + .../apache/hadoop/eclipse/dfs/DFSPath.java | 160 + .../HadoopApplicationLaunchShortcut.java | 142 + .../launch/LocalMapReduceLaunchTabGroup.java | 182 + .../hadoop/eclipse/launch/MutexRule.java | 37 + .../launch/StartHadoopLaunchTabGroup.java | 47 + .../preferences/MapReducePreferencePage.java | 64 + .../preferences/PreferenceConstants.java | 34 + .../preferences/PreferenceInitializer.java | 33 + .../hadoop/eclipse/server/ConfProp.java | 147 + .../hadoop/eclipse/server/HadoopJob.java | 346 + .../hadoop/eclipse/server/HadoopPathPage.java | 124 + .../hadoop/eclipse/server/HadoopServer.java | 510 + .../hadoop/eclipse/server/IJobListener.java | 36 + .../hadoop/eclipse/server/JarModule.java | 146 + .../eclipse/servers/HadoopLocationWizard.java | 972 + ...oopServerSelectionListContentProvider.java | 76 + .../servers/IHadoopServerListener.java | 28 + .../eclipse/servers/RunOnHadoopWizard.java | 355 + .../eclipse/servers/ServerRegistry.java | 203 + .../eclipse/view/servers/ServerView.java | 460 + src/contrib/failmon/README | 97 + src/contrib/failmon/bin/failmon.sh | 54 + src/contrib/failmon/bin/scheduler.py | 235 + src/contrib/failmon/build.xml | 120 + .../failmon/conf/commons-logging.properties | 25 + src/contrib/failmon/conf/failmon.properties | 80 + src/contrib/failmon/conf/global.config | 39 + src/contrib/failmon/conf/hosts.list | 10 + src/contrib/failmon/conf/log4j.properties | 40 + src/contrib/failmon/ivy.xml | 36 + src/contrib/failmon/ivy/libraries.properties | 5 + .../hadoop/contrib/failmon/Anonymizer.java | 154 + .../hadoop/contrib/failmon/CPUParser.java | 101 + .../hadoop/contrib/failmon/Continuous.java | 41 + .../hadoop/contrib/failmon/Environment.java | 458 + .../hadoop/contrib/failmon/EventRecord.java | 151 + .../hadoop/contrib/failmon/Executor.java | 120 + .../hadoop/contrib/failmon/HDFSMerger.java | 154 + .../contrib/failmon/HadoopLogParser.java | 136 + .../hadoop/contrib/failmon/LocalStore.java | 268 + .../hadoop/contrib/failmon/LogParser.java | 214 + .../hadoop/contrib/failmon/MonitorJob.java | 43 + .../hadoop/contrib/failmon/Monitored.java | 53 + .../hadoop/contrib/failmon/NICParser.java | 140 + .../contrib/failmon/OfflineAnonymizer.java | 132 + .../contrib/failmon/PersistentState.java | 163 + .../hadoop/contrib/failmon/RunOnce.java | 120 + .../hadoop/contrib/failmon/SMARTParser.java | 206 + .../hadoop/contrib/failmon/SensorsParser.java | 112 + .../contrib/failmon/SerializedRecord.java | 163 + .../hadoop/contrib/failmon/ShellParser.java | 102 + .../contrib/failmon/SystemLogParser.java | 126 + src/contrib/fairscheduler/README | 22 + src/contrib/fairscheduler/build.xml | 28 + src/contrib/fairscheduler/ivy.xml | 58 + .../fairscheduler/ivy/libraries.properties | 5 + .../AllocationConfigurationException.java | 30 + .../hadoop/mapred/CapBasedLoadManager.java | 79 + .../hadoop/mapred/DefaultTaskSelector.java | 76 + .../apache/hadoop/mapred/FairScheduler.java | 1731 + .../mapred/FairSchedulerMetricsInst.java | 163 + .../hadoop/mapred/FairSchedulerServlet.java | 644 + .../hadoop/mapred/FifoJobComparator.java | 42 + .../org/apache/hadoop/mapred/LoadManager.java | 91 + .../hadoop/mapred/MemBasedLoadManager.java | 122 + .../hadoop/mapred/NewJobWeightBooster.java | 57 + .../java/org/apache/hadoop/mapred/Pool.java | 60 + .../org/apache/hadoop/mapred/PoolManager.java | 550 + .../apache/hadoop/mapred/TaskSelector.java | 101 + .../org/apache/hadoop/mapred/TaskType.java | 26 + .../apache/hadoop/mapred/WeightAdjuster.java | 33 + .../hadoop/mapred/TestFairScheduler.java | 2286 + src/contrib/fuse-dfs/Makefile.am | 27 + src/contrib/fuse-dfs/README | 125 + src/contrib/fuse-dfs/acinclude.m4 | 268 + src/contrib/fuse-dfs/bootstrap.sh | 21 + src/contrib/fuse-dfs/build.xml | 109 + src/contrib/fuse-dfs/configure.ac | 74 + src/contrib/fuse-dfs/global_footer.mk | 17 + src/contrib/fuse-dfs/global_header.mk | 50 + src/contrib/fuse-dfs/ivy.xml | 37 + src/contrib/fuse-dfs/ivy/libraries.properties | 5 + src/contrib/fuse-dfs/src/Makefile.am | 20 + src/contrib/fuse-dfs/src/fuse_connect.c | 53 + src/contrib/fuse-dfs/src/fuse_connect.h | 32 + .../fuse-dfs/src/fuse_context_handle.h | 48 + src/contrib/fuse-dfs/src/fuse_dfs.c | 140 + src/contrib/fuse-dfs/src/fuse_dfs.h | 67 + src/contrib/fuse-dfs/src/fuse_dfs_wrapper.sh | 39 + src/contrib/fuse-dfs/src/fuse_file_handle.h | 44 + src/contrib/fuse-dfs/src/fuse_impls.h | 63 + src/contrib/fuse-dfs/src/fuse_impls_access.c | 43 + src/contrib/fuse-dfs/src/fuse_impls_chmod.c | 50 + src/contrib/fuse-dfs/src/fuse_impls_chown.c | 81 + src/contrib/fuse-dfs/src/fuse_impls_create.c | 27 + src/contrib/fuse-dfs/src/fuse_impls_flush.c | 55 + src/contrib/fuse-dfs/src/fuse_impls_getattr.c | 68 + src/contrib/fuse-dfs/src/fuse_impls_mkdir.c | 68 + src/contrib/fuse-dfs/src/fuse_impls_mknod.c | 26 + src/contrib/fuse-dfs/src/fuse_impls_open.c | 83 + src/contrib/fuse-dfs/src/fuse_impls_read.c | 162 + src/contrib/fuse-dfs/src/fuse_impls_readdir.c | 117 + src/contrib/fuse-dfs/src/fuse_impls_release.c | 82 + src/contrib/fuse-dfs/src/fuse_impls_rename.c | 63 + src/contrib/fuse-dfs/src/fuse_impls_rmdir.c | 71 + src/contrib/fuse-dfs/src/fuse_impls_statfs.c | 85 + src/contrib/fuse-dfs/src/fuse_impls_symlink.c | 30 + .../fuse-dfs/src/fuse_impls_truncate.c | 67 + src/contrib/fuse-dfs/src/fuse_impls_unlink.c | 62 + src/contrib/fuse-dfs/src/fuse_impls_utimens.c | 52 + src/contrib/fuse-dfs/src/fuse_impls_write.c | 75 + src/contrib/fuse-dfs/src/fuse_init.c | 139 + src/contrib/fuse-dfs/src/fuse_init.h | 31 + src/contrib/fuse-dfs/src/fuse_options.c | 165 + src/contrib/fuse-dfs/src/fuse_options.h | 44 + src/contrib/fuse-dfs/src/fuse_stat_struct.c | 123 + src/contrib/fuse-dfs/src/fuse_stat_struct.h | 36 + src/contrib/fuse-dfs/src/fuse_trash.c | 125 + src/contrib/fuse-dfs/src/fuse_trash.h | 26 + src/contrib/fuse-dfs/src/fuse_users.c | 221 + src/contrib/fuse-dfs/src/fuse_users.h | 70 + .../fuse-dfs/src/test/TestFuseDFS.java | 626 + src/contrib/gridmix/README | 22 + src/contrib/gridmix/build.xml | 23 + src/contrib/gridmix/ivy.xml | 97 + src/contrib/gridmix/ivy/libraries.properties | 22 + .../mapred/gridmix/AvgRecordFactory.java | 91 + .../mapred/gridmix/CombineFileSplit.java | 196 + .../hadoop/mapred/gridmix/FilePool.java | 369 + .../hadoop/mapred/gridmix/FileQueue.java | 104 + .../hadoop/mapred/gridmix/GenerateData.java | 293 + .../apache/hadoop/mapred/gridmix/Gridmix.java | 351 + .../hadoop/mapred/gridmix/GridmixJob.java | 523 + .../hadoop/mapred/gridmix/GridmixKey.java | 258 + .../hadoop/mapred/gridmix/GridmixRecord.java | 215 + .../hadoop/mapred/gridmix/GridmixSplit.java | 147 + .../hadoop/mapred/gridmix/InputStriper.java | 126 + .../gridmix/IntermediateRecordFactory.java | 110 + .../hadoop/mapred/gridmix/JobFactory.java | 272 + .../hadoop/mapred/gridmix/JobMonitor.java | 243 + .../hadoop/mapred/gridmix/JobSubmitter.java | 177 + .../mapred/gridmix/ReadRecordFactory.java | 85 + .../hadoop/mapred/gridmix/RecordFactory.java | 40 + .../mapred/gridmix/DebugJobFactory.java | 277 + .../hadoop/mapred/gridmix/TestFilePool.java | 188 + .../hadoop/mapred/gridmix/TestFileQueue.java | 142 + .../mapred/gridmix/TestGridmixRecord.java | 277 + .../mapred/gridmix/TestGridmixSubmission.java | 323 + .../mapred/gridmix/TestRecordFactory.java | 79 + src/contrib/hdfsproxy/README | 30 + src/contrib/hdfsproxy/bin/hdfsproxy | 170 + src/contrib/hdfsproxy/bin/hdfsproxy-config.sh | 67 + src/contrib/hdfsproxy/bin/hdfsproxy-daemon.sh | 141 + .../hdfsproxy/bin/hdfsproxy-daemons.sh | 34 + src/contrib/hdfsproxy/bin/hdfsproxy-slaves.sh | 68 + src/contrib/hdfsproxy/bin/start-hdfsproxy.sh | 37 + src/contrib/hdfsproxy/bin/stop-hdfsproxy.sh | 28 + src/contrib/hdfsproxy/build.xml | 183 + src/contrib/hdfsproxy/conf/configuration.xsl | 24 + .../hdfsproxy/conf/hdfsproxy-default.xml | 59 + src/contrib/hdfsproxy/conf/hdfsproxy-env.sh | 44 + .../hdfsproxy/conf/hdfsproxy-env.sh.template | 44 + src/contrib/hdfsproxy/conf/hdfsproxy-hosts | 1 + src/contrib/hdfsproxy/conf/log4j.properties | 61 + src/contrib/hdfsproxy/conf/user-certs.xml | 26 + .../hdfsproxy/conf/user-permissions.xml | 28 + src/contrib/hdfsproxy/ivy.xml | 78 + .../hdfsproxy/ivy/libraries.properties | 5 + .../apache/hadoop/hdfsproxy/HdfsProxy.java | 285 + .../hdfsproxy/ProxyFileDataServlet.java | 51 + .../apache/hadoop/hdfsproxy/ProxyFilter.java | 330 + .../hadoop/hdfsproxy/ProxyHttpServer.java | 76 + .../hdfsproxy/ProxyListPathsServlet.java | 35 + .../hadoop/hdfsproxy/ProxyStreamFile.java | 55 + .../hadoop/hdfsproxy/ProxyUgiManager.java | 152 + .../hadoop/hdfsproxy/TestHdfsProxy.java | 262 + .../hadoop/hdfsproxy/TestProxyUgiManager.java | 107 + src/contrib/highavailability/README | 66 + src/contrib/highavailability/build.xml | 59 + src/contrib/highavailability/conf/raid.xml | 58 + src/contrib/highavailability/ivy.xml | 78 + .../highavailability/ivy/libraries.properties | 5 + .../org/apache/hadoop/hdfs/AvatarShell.java | 556 + .../hadoop/hdfs/AvatarZooKeeperClient.java | 266 + .../hdfs/DistributedAvatarFileSystem.java | 915 + .../hadoop/hdfs/protocol/AvatarConstants.java | 108 + .../hadoop/hdfs/protocol/AvatarProtocol.java | 69 + .../hdfs/server/datanode/AvatarDataNode.java | 746 + .../server/datanode/DatanodeProtocols.java | 228 + .../hdfs/server/datanode/OfferService.java | 441 + .../hdfs/server/namenode/AvatarNode.java | 1177 + .../hadoop/hdfs/server/namenode/Ingest.java | 770 + .../hadoop/hdfs/server/namenode/Standby.java | 424 + .../apache/hadoop/hdfs/MiniAvatarCluster.java | 768 + .../hadoop/hdfs/TestAvatarFailover.java | 185 + src/contrib/hive-streaming/build.xml | 41 + src/contrib/hive-streaming/ivy.xml | 76 + .../hive-streaming/ivy/libraries.properties | 5 + .../streaming/BufferingOutputCollector.java | 20 + .../apache/hadoop/streaming/Environment.java | 105 + .../hadoop/streaming/HadoopStreaming.java | 39 + .../InmemBufferingOutputCollector.java | 35 + .../apache/hadoop/streaming/JarBuilder.java | 205 + .../apache/hadoop/streaming/PathFinder.java | 147 + .../apache/hadoop/streaming/PipeMapRed.java | 628 + .../apache/hadoop/streaming/PipeMapper.java | 146 + .../apache/hadoop/streaming/PipeReducer.java | 169 + .../streaming/StreamBaseRecordReader.java | 155 + .../hadoop/streaming/StreamInputFormat.java | 80 + .../apache/hadoop/streaming/StreamJob.java | 1117 + .../streaming/StreamLineRecordReader.java | 38 + .../hadoop/streaming/StreamOutputFormat.java | 28 + .../streaming/StreamSequenceRecordReader.java | 38 + .../apache/hadoop/streaming/StreamUtil.java | 493 + .../streaming/StreamXmlRecordReader.java | 309 + .../hadoop/streaming/UTF8ByteArrayUtils.java | 165 + .../org/apache/hadoop/streaming/package.html | 9 + .../hadoop/streaming/StreamAggregate.java | 57 + .../hadoop/streaming/TestGzipInput.java | 62 + .../hadoop/streaming/TestStreamAggregate.java | 116 + .../streaming/TestStreamDataProtocol.java | 125 + .../streaming/TestStreamReduceNone.java | 117 + .../hadoop/streaming/TestStreamedMerge.java | 323 + .../hadoop/streaming/TestStreaming.java | 121 + .../streaming/TestStreamingFailure.java | 86 + .../org/apache/hadoop/streaming/TrApp.java | 116 + .../org/apache/hadoop/streaming/UniqApp.java | 57 + .../org/apache/hadoop/streaming/UtilTest.java | 58 + .../hadoop/streaming/ValueCountReduce.java | 66 + src/contrib/hmon/README | 80 + src/contrib/hmon/bin/start-hmon.sh | 41 + src/contrib/hmon/bin/stop-hmon.sh | 37 + src/contrib/hmon/build.xml | 58 + src/contrib/hmon/ivy.xml | 58 + src/contrib/hmon/ivy/libraries.properties | 5 + .../hadoop/mapred/ClusterUtilization.java | 258 + .../hadoop/mapred/HmonResourceReporter.java | 163 + .../apache/hadoop/mapred/JobUtilization.java | 363 + .../hadoop/mapred/LinuxUtilizationGauger.java | 308 + .../hadoop/mapred/LocalJobUtilization.java | 109 + .../hadoop/mapred/TaskTrackerUtilization.java | 235 + .../hadoop/mapred/UtilizationCollector.java | 484 + .../mapred/UtilizationCollectorCached.java | 224 + .../mapred/UtilizationCollectorProtocol.java | 86 + .../hadoop/mapred/UtilizationGauger.java | 71 + .../hadoop/mapred/UtilizationReporter.java | 124 + .../hadoop/mapred/UtilizationShell.java | 172 + .../hadoop/mapred/resourceutilization.xml | 64 + .../mapred/TestUtilizationCollector.java | 633 + src/contrib/hod/CHANGES.txt | 248 + src/contrib/hod/README | 104 + src/contrib/hod/bin/VERSION | 1 + src/contrib/hod/bin/checknodes | 31 + src/contrib/hod/bin/hod | 580 + src/contrib/hod/bin/hodcleanup | 183 + src/contrib/hod/bin/hodring | 290 + src/contrib/hod/bin/ringmaster | 352 + src/contrib/hod/bin/verify-account | 11 + src/contrib/hod/build.xml | 81 + src/contrib/hod/conf/hodrc | 47 + src/contrib/hod/config.txt | 172 + src/contrib/hod/getting_started.txt | 233 + .../hod/hodlib/AllocationManagers/__init__.py | 16 + .../goldAllocationManager.py | 104 + src/contrib/hod/hodlib/Common/__init__.py | 15 + .../hodlib/Common/allocationManagerUtil.py | 27 + src/contrib/hod/hodlib/Common/desc.py | 298 + .../hod/hodlib/Common/descGenerator.py | 72 + src/contrib/hod/hodlib/Common/hodsvc.py | 228 + src/contrib/hod/hodlib/Common/logger.py | 788 + .../hod/hodlib/Common/miniHTMLParser.py | 45 + src/contrib/hod/hodlib/Common/nodepoolutil.py | 26 + src/contrib/hod/hodlib/Common/setup.py | 1058 + .../hod/hodlib/Common/socketServers.py | 621 + src/contrib/hod/hodlib/Common/tcp.py | 176 + src/contrib/hod/hodlib/Common/threads.py | 389 + src/contrib/hod/hodlib/Common/types.py | 1266 + src/contrib/hod/hodlib/Common/util.py | 309 + src/contrib/hod/hodlib/Common/xmlrpc.py | 57 + .../hod/hodlib/GridServices/__init__.py | 18 + src/contrib/hod/hodlib/GridServices/hdfs.py | 310 + src/contrib/hod/hodlib/GridServices/mapred.py | 272 + .../hod/hodlib/GridServices/service.py | 266 + src/contrib/hod/hodlib/Hod/__init__.py | 15 + src/contrib/hod/hodlib/Hod/hadoop.py | 747 + src/contrib/hod/hodlib/Hod/hod.py | 754 + src/contrib/hod/hodlib/Hod/nodePool.py | 128 + src/contrib/hod/hodlib/HodRing/__init__.py | 15 + src/contrib/hod/hodlib/HodRing/hodRing.py | 930 + src/contrib/hod/hodlib/NodePools/__init__.py | 15 + src/contrib/hod/hodlib/NodePools/torque.py | 334 + src/contrib/hod/hodlib/RingMaster/__init__.py | 15 + .../hod/hodlib/RingMaster/idleJobTracker.py | 218 + .../hod/hodlib/RingMaster/ringMaster.py | 1019 + src/contrib/hod/hodlib/Schedulers/__init__.py | 15 + src/contrib/hod/hodlib/Schedulers/torque.py | 175 + .../hod/hodlib/ServiceProxy/__init__.py | 15 + .../hod/hodlib/ServiceProxy/serviceProxy.py | 49 + .../hod/hodlib/ServiceRegistry/__init__.py | 15 + .../hodlib/ServiceRegistry/serviceRegistry.py | 127 + src/contrib/hod/hodlib/__init__.py | 16 + src/contrib/hod/ivy.xml | 22 + src/contrib/hod/ivy/libraries.properties | 5 + src/contrib/hod/support/checklimits.sh | 57 + src/contrib/hod/support/logcondense.py | 212 + src/contrib/hod/testing/__init__.py | 15 + src/contrib/hod/testing/helper.py | 33 + src/contrib/hod/testing/lib.py | 113 + src/contrib/hod/testing/main.py | 83 + src/contrib/hod/testing/testHadoop.py | 123 + src/contrib/hod/testing/testHod.py | 310 + src/contrib/hod/testing/testHodCleanup.py | 113 + src/contrib/hod/testing/testHodRing.py | 117 + src/contrib/hod/testing/testModule.py | 88 + src/contrib/hod/testing/testRingmasterRPCs.py | 171 + src/contrib/hod/testing/testThreads.py | 99 + src/contrib/hod/testing/testTypes.py | 180 + src/contrib/hod/testing/testUtil.py | 62 + src/contrib/hod/testing/testXmlrpc.py | 109 + src/contrib/index/README | 43 + src/contrib/index/build.xml | 80 + src/contrib/index/conf/index-config.xml | 48 + .../index/conf/index-config.xml.template | 48 + src/contrib/index/ivy.xml | 40 + src/contrib/index/ivy/libraries.properties | 4 + src/contrib/index/sample/data.txt | 10 + src/contrib/index/sample/data2.txt | 10 + .../example/HashingDistributionPolicy.java | 56 + .../index/example/IdentityLocalAnalysis.java | 57 + .../index/example/LineDocInputFormat.java | 46 + .../index/example/LineDocLocalAnalysis.java | 80 + .../index/example/LineDocRecordReader.java | 231 + .../index/example/LineDocTextAndOp.java | 92 + .../example/RoundRobinDistributionPolicy.java | 58 + .../index/lucene/FileSystemDirectory.java | 349 + .../lucene/LuceneIndexFileNameFilter.java | 55 + .../contrib/index/lucene/LuceneUtil.java | 112 + .../index/lucene/MixedDeletionPolicy.java | 49 + .../contrib/index/lucene/MixedDirectory.java | 185 + .../index/lucene/RAMDirectoryUtil.java | 119 + .../contrib/index/lucene/ShardWriter.java | 233 + .../contrib/index/main/UpdateIndex.java | 275 + .../contrib/index/mapred/DocumentAndOp.java | 208 + .../contrib/index/mapred/DocumentID.java | 89 + .../index/mapred/IDistributionPolicy.java | 50 + .../contrib/index/mapred/IIndexUpdater.java | 46 + .../contrib/index/mapred/ILocalAnalysis.java | 32 + .../index/mapred/IndexUpdateCombiner.java | 77 + .../mapred/IndexUpdateConfiguration.java | 238 + .../index/mapred/IndexUpdateMapper.java | 199 + .../index/mapred/IndexUpdateOutputFormat.java | 65 + .../index/mapred/IndexUpdatePartitioner.java | 60 + .../index/mapred/IndexUpdateReducer.java | 143 + .../contrib/index/mapred/IndexUpdater.java | 150 + .../index/mapred/IntermediateForm.java | 239 + .../hadoop/contrib/index/mapred/Shard.java | 240 + .../index/lucene/TestMixedDirectory.java | 105 + .../index/mapred/TestDistributionPolicy.java | 234 + .../index/mapred/TestIndexUpdater.java | 255 + src/contrib/mumak/bin/mumak.sh | 175 + src/contrib/mumak/build.xml | 28 + src/contrib/mumak/conf/log4j.properties | 89 + src/contrib/mumak/conf/mumak.xml | 37 + src/contrib/mumak/ivy.xml | 110 + src/contrib/mumak/ivy/libraries.properties | 23 + .../mapred/AllMapsCompletedTaskAction.java | 81 + .../EagerTaskInitializationListenerAspects.aj | 49 + .../apache/hadoop/mapred/HeartbeatEvent.java | 37 + .../hadoop/mapred/JobCompleteEvent.java | 49 + .../hadoop/mapred/JobSubmissionEvent.java | 41 + .../apache/hadoop/mapred/SimulatorClock.java | 39 + .../apache/hadoop/mapred/SimulatorEngine.java | 239 + .../apache/hadoop/mapred/SimulatorEvent.java | 86 + .../hadoop/mapred/SimulatorEventListener.java | 40 + .../hadoop/mapred/SimulatorEventQueue.java | 137 + .../hadoop/mapred/SimulatorJobCache.java | 59 + .../hadoop/mapred/SimulatorJobClient.java | 123 + .../hadoop/mapred/SimulatorJobInProgress.java | 331 + .../hadoop/mapred/SimulatorJobStory.java | 105 + .../mapred/SimulatorJobStoryProducer.java | 90 + .../hadoop/mapred/SimulatorJobTracker.java | 669 + .../mapred/SimulatorLaunchTaskAction.java | 53 + .../hadoop/mapred/SimulatorTaskTracker.java | 735 + .../mapred/SimulatorTaskTrackerStatus.java | 56 + .../mapred/SortedZombieJobProducer.java | 84 + .../mapred/TaskAttemptCompletionEvent.java | 56 + .../org/apache/hadoop/net/StaticMapping.java | 62 + .../hadoop/mapred/CheckedEventQueue.java | 232 + .../org/apache/hadoop/mapred/FakeJobs.java | 170 + .../apache/hadoop/mapred/HeartbeatHelper.java | 94 + .../hadoop/mapred/MockSimulatorEngine.java | 130 + .../mapred/MockSimulatorJobTracker.java | 436 + .../hadoop/mapred/TestSimulatorEndToEnd.java | 81 + .../hadoop/mapred/TestSimulatorEngine.java | 181 + .../mapred/TestSimulatorEventQueue.java | 149 + .../hadoop/mapred/TestSimulatorJobClient.java | 220 + .../mapred/TestSimulatorJobTracker.java | 243 + .../mapred/TestSimulatorTaskTracker.java | 269 + src/contrib/raid/README | 217 + src/contrib/raid/build.xml | 58 + src/contrib/raid/conf/raid.xml | 58 + src/contrib/raid/ivy.xml | 78 + src/contrib/raid/ivy/libraries.properties | 5 + .../hdfs/DistributedRaidFileSystem.java | 441 + .../org/apache/hadoop/hdfs/RaidDFSUtil.java | 63 + .../namenode/BlockPlacementPolicyRaid.java | 605 + .../org/apache/hadoop/raid/BlockFixer.java | 856 + .../org/apache/hadoop/raid/ConfigManager.java | 406 + .../java/org/apache/hadoop/raid/Decoder.java | 213 + .../hadoop/raid/DirectoryTraversal.java | 314 + .../apache/hadoop/raid/DistBlockFixer.java | 706 + .../java/org/apache/hadoop/raid/DistRaid.java | 405 + .../org/apache/hadoop/raid/DistRaidNode.java | 108 + .../java/org/apache/hadoop/raid/Encoder.java | 351 + .../org/apache/hadoop/raid/ErasureCode.java | 60 + .../org/apache/hadoop/raid/GaloisField.java | 282 + .../java/org/apache/hadoop/raid/HarIndex.java | 143 + .../org/apache/hadoop/raid/JobMonitor.java | 198 + .../apache/hadoop/raid/LocalBlockFixer.java | 170 + .../org/apache/hadoop/raid/LocalRaidNode.java | 62 + .../apache/hadoop/raid/ParityInputStream.java | 151 + .../raid/RaidConfigurationException.java | 30 + .../org/apache/hadoop/raid/RaidFilter.java | 294 + .../java/org/apache/hadoop/raid/RaidNode.java | 1793 + .../apache/hadoop/raid/RaidNodeMetrics.java | 96 + .../org/apache/hadoop/raid/RaidShell.java | 681 + .../org/apache/hadoop/raid/RaidUtils.java | 171 + .../apache/hadoop/raid/ReedSolomonCode.java | 106 + .../hadoop/raid/ReedSolomonDecoder.java | 227 + .../hadoop/raid/ReedSolomonEncoder.java | 94 + .../org/apache/hadoop/raid/XORDecoder.java | 92 + .../org/apache/hadoop/raid/XOREncoder.java | 63 + .../hadoop/raid/protocol/PolicyInfo.java | 256 + .../hadoop/raid/protocol/PolicyList.java | 106 + .../hadoop/raid/protocol/RaidProtocol.java | 58 + .../org/apache/hadoop/hdfs/TestRaidDfs.java | 513 + .../TestBlockPlacementPolicyRaid.java | 482 + .../apache/hadoop/raid/TestBlockFixer.java | 946 + .../hadoop/raid/TestDirectoryTraversal.java | 222 + .../apache/hadoop/raid/TestErasureCodes.java | 186 + .../apache/hadoop/raid/TestGaloisField.java | 168 + .../hadoop/raid/TestHarIndexParser.java | 72 + .../apache/hadoop/raid/TestRaidFilter.java | 119 + .../org/apache/hadoop/raid/TestRaidHar.java | 284 + .../org/apache/hadoop/raid/TestRaidNode.java | 712 + .../hadoop/raid/TestRaidNodeMetrics.java | 37 + .../org/apache/hadoop/raid/TestRaidPurge.java | 513 + .../org/apache/hadoop/raid/TestRaidShell.java | 278 + .../apache/hadoop/raid/TestRaidShellFsck.java | 769 + .../hadoop/raid/TestReedSolomonDecoder.java | 146 + .../hadoop/raid/TestReedSolomonEncoder.java | 104 + src/contrib/streaming/build.xml | 85 + src/contrib/streaming/ivy.xml | 80 + .../streaming/ivy/libraries.properties | 5 + .../apache/hadoop/streaming/Environment.java | 116 + .../hadoop/streaming/HadoopStreaming.java | 38 + .../apache/hadoop/streaming/JarBuilder.java | 205 + .../apache/hadoop/streaming/PathFinder.java | 145 + .../apache/hadoop/streaming/PipeCombiner.java | 42 + .../apache/hadoop/streaming/PipeMapRed.java | 674 + .../hadoop/streaming/PipeMapRunner.java | 38 + .../apache/hadoop/streaming/PipeMapper.java | 149 + .../apache/hadoop/streaming/PipeReducer.java | 154 + .../streaming/StreamBaseRecordReader.java | 148 + .../hadoop/streaming/StreamInputFormat.java | 82 + .../apache/hadoop/streaming/StreamJob.java | 980 + .../hadoop/streaming/StreamKeyValUtil.java | 141 + .../apache/hadoop/streaming/StreamUtil.java | 496 + .../streaming/StreamXmlRecordReader.java | 301 + .../hadoop/streaming/UTF8ByteArrayUtils.java | 215 + .../org/apache/hadoop/streaming/package.html | 27 + .../org/apache/hadoop/streaming/FailApp.java | 58 + .../apache/hadoop/streaming/StderrApp.java | 69 + .../hadoop/streaming/StreamAggregate.java | 57 + .../hadoop/streaming/TestGzipInput.java | 61 + .../streaming/TestMultipleArchiveFiles.java | 159 + .../streaming/TestMultipleCachefiles.java | 159 + .../hadoop/streaming/TestStreamAggregate.java | 119 + .../streaming/TestStreamDataProtocol.java | 128 + .../streaming/TestStreamReduceNone.java | 119 + .../streaming/TestStreamXmlRecordReader.java | 92 + .../hadoop/streaming/TestStreamedMerge.java | 321 + .../hadoop/streaming/TestStreaming.java | 106 + .../TestStreamingAsDifferentUser.java | 71 + .../streaming/TestStreamingBadRecords.java | 291 + .../streaming/TestStreamingCombiner.java | 60 + .../streaming/TestStreamingCounters.java | 72 + .../TestStreamingEmptyInpNonemptyOut.java | 120 + .../streaming/TestStreamingExitStatus.java | 107 + .../streaming/TestStreamingFailure.java | 90 + .../streaming/TestStreamingKeyValue.java | 123 + .../streaming/TestStreamingSeparator.java | 134 + .../hadoop/streaming/TestStreamingStderr.java | 113 + .../apache/hadoop/streaming/TestSymLink.java | 148 + .../apache/hadoop/streaming/TestUlimit.java | 127 + .../org/apache/hadoop/streaming/TrApp.java | 113 + .../apache/hadoop/streaming/TrAppReduce.java | 111 + .../apache/hadoop/streaming/UlimitApp.java | 45 + .../org/apache/hadoop/streaming/UniqApp.java | 57 + .../org/apache/hadoop/streaming/UtilTest.java | 82 + .../hadoop/streaming/ValueCountReduce.java | 64 + src/contrib/test/core-site.xml | 16 + src/contrib/test/hadoop-site.xml | 14 + src/contrib/test/hdfs-site.xml | 9 + src/contrib/test/mapred-site.xml | 13 + src/contrib/thriftfs/README | 39 + src/contrib/thriftfs/build.xml | 109 + src/contrib/thriftfs/gen-cocoa/hadoopfs.h | 214 + src/contrib/thriftfs/gen-cocoa/hadoopfs.m | 4507 ++ .../gen-cpp/ThriftHadoopFileSystem.cpp | 5344 ++ .../thriftfs/gen-cpp/ThriftHadoopFileSystem.h | 2359 + ...ThriftHadoopFileSystem_server.skeleton.cpp | 132 + .../thriftfs/gen-cpp/hadoopfs_constants.cpp | 16 + .../thriftfs/gen-cpp/hadoopfs_constants.h | 23 + .../thriftfs/gen-cpp/hadoopfs_types.cpp | 495 + src/contrib/thriftfs/gen-cpp/hadoopfs_types.h | 274 + .../hadoop/thriftfs/api/BlockLocation.java | 506 + .../hadoop/thriftfs/api/FileStatus.java | 794 + .../thriftfs/api/MalformedInputException.java | 219 + .../apache/hadoop/thriftfs/api/Pathname.java | 219 + .../thriftfs/api/ThriftHadoopFileSystem.java | 10976 ++++ .../hadoop/thriftfs/api/ThriftHandle.java | 214 + .../thriftfs/api/ThriftIOException.java | 219 + src/contrib/thriftfs/gen-perl/Constants.pm | 13 + .../gen-perl/ThriftHadoopFileSystem.pm | 4538 ++ src/contrib/thriftfs/gen-perl/Types.pm | 610 + .../gen-php/ThriftHadoopFileSystem.php | 4852 ++ .../thriftfs/gen-php/hadoopfs_types.php | 714 + src/contrib/thriftfs/gen-py/__init__.py | 0 .../hadoopfs/ThriftHadoopFileSystem-remote | 205 + .../gen-py/hadoopfs/ThriftHadoopFileSystem.py | 3601 ++ .../thriftfs/gen-py/hadoopfs/__init__.py | 1 + .../thriftfs/gen-py/hadoopfs/constants.py | 9 + .../thriftfs/gen-py/hadoopfs/ttypes.py | 506 + .../thriftfs/gen-rb/ThriftHadoopFileSystem.rb | 855 + .../thriftfs/gen-rb/hadoopfs_constants.rb | 9 + src/contrib/thriftfs/gen-rb/hadoopfs_types.rb | 77 + src/contrib/thriftfs/gen-st/hadoopfs.st | 1486 + src/contrib/thriftfs/if/hadoopfs.thrift | 127 + src/contrib/thriftfs/ivy.xml | 36 + src/contrib/thriftfs/ivy/libraries.properties | 5 + src/contrib/thriftfs/lib/README | 1 + src/contrib/thriftfs/lib/Thrift.LICENSE | 24 + src/contrib/thriftfs/lib/hadoopthriftapi.jar | Bin 0 -> 256686 bytes src/contrib/thriftfs/lib/libthrift.jar | Bin 0 -> 169666 bytes src/contrib/thriftfs/scripts/hdfs.py | 555 + .../thriftfs/scripts/start_thrift_server.sh | 31 + .../hadoop/thriftfs/HadoopThriftServer.java | 621 + .../apache/hadoop/thriftfs/TestThriftfs.java | 51 + src/contrib/vaidya/build.xml | 69 + src/contrib/vaidya/ivy.xml | 36 + src/contrib/vaidya/ivy/libraries.properties | 5 + .../apache/hadoop/vaidya/DiagnosticTest.java | 370 + .../apache/hadoop/vaidya/JobDiagnoser.java | 85 + .../PostExPerformanceDiagnoser.java | 269 + .../tests/BalancedReducePartitioning.java | 119 + .../tests/MapSideDiskSpill.java | 117 + .../tests/MapsReExecutionImpact.java | 87 + .../tests/ReadingHDFSFilesAsSideEffect.java | 112 + .../tests/ReducesReExecutionImpact.java | 94 + .../tests/postex_diagnosis_tests.xml | 105 + .../vaidya/statistics/job/JobStatistics.java | 656 + .../job/JobStatisticsInterface.java | 125 + .../statistics/job/MapTaskStatistics.java | 25 + .../statistics/job/ReduceTaskStatistics.java | 25 + .../vaidya/statistics/job/TaskStatistics.java | 103 + .../apache/hadoop/vaidya/util/XMLUtils.java | 237 + .../java/org/apache/hadoop/vaidya/vaidya.sh | 47 + src/core/core-default.xml | 459 + .../hadoop/HadoopVersionAnnotation.java | 56 + .../org/apache/hadoop/conf/Configurable.java | 29 + .../org/apache/hadoop/conf/Configuration.java | 1391 + .../org/apache/hadoop/conf/Configured.java | 46 + .../apache/hadoop/conf/Reconfigurable.java | 57 + .../hadoop/conf/ReconfigurableBase.java | 114 + .../hadoop/conf/ReconfigurationException.java | 104 + .../hadoop/conf/ReconfigurationServlet.java | 248 + .../hadoop/conf/ReconfigurationUtil.java | 66 + src/core/org/apache/hadoop/conf/package.html | 23 + .../hadoop/filecache/DistributedCache.java | 1244 + .../org/apache/hadoop/fs/BlockLocation.java | 279 + .../hadoop/fs/BlockMissingException.java | 55 + .../hadoop/fs/BufferedFSInputStream.java | 96 + .../apache/hadoop/fs/ChecksumException.java | 34 + .../apache/hadoop/fs/ChecksumFileSystem.java | 552 + .../org/apache/hadoop/fs/ContentSummary.java | 164 + .../apache/hadoop/fs/CorruptFileBlocks.java | 108 + src/core/org/apache/hadoop/fs/DF.java | 203 + src/core/org/apache/hadoop/fs/DU.java | 198 + .../apache/hadoop/fs/FSDataInputStream.java | 66 + .../apache/hadoop/fs/FSDataOutputStream.java | 100 + src/core/org/apache/hadoop/fs/FSError.java | 27 + .../org/apache/hadoop/fs/FSInputChecker.java | 432 + .../org/apache/hadoop/fs/FSInputStream.java | 78 + .../org/apache/hadoop/fs/FSOutputSummer.java | 176 + .../org/apache/hadoop/fs/FileChecksum.java | 53 + src/core/org/apache/hadoop/fs/FileStatus.java | 261 + src/core/org/apache/hadoop/fs/FileSystem.java | 1771 + src/core/org/apache/hadoop/fs/FileUtil.java | 776 + .../apache/hadoop/fs/FilterFileSystem.java | 355 + src/core/org/apache/hadoop/fs/FsShell.java | 1918 + .../apache/hadoop/fs/FsShellPermissions.java | 179 + .../org/apache/hadoop/fs/FsUrlConnection.java | 61 + .../apache/hadoop/fs/FsUrlStreamHandler.java | 47 + .../hadoop/fs/FsUrlStreamHandlerFactory.java | 78 + .../org/apache/hadoop/fs/GlobExpander.java | 166 + .../org/apache/hadoop/fs/HarFileSystem.java | 1049 + .../apache/hadoop/fs/InMemoryFileSystem.java | 446 + .../apache/hadoop/fs/LocalDirAllocator.java | 418 + .../org/apache/hadoop/fs/LocalFileSystem.java | 111 + .../apache/hadoop/fs/LocatedFileStatus.java | 109 + .../hadoop/fs/MD5MD5CRC32FileChecksum.java | 113 + src/core/org/apache/hadoop/fs/Path.java | 298 + src/core/org/apache/hadoop/fs/PathFilter.java | 32 + .../apache/hadoop/fs/PositionedReadable.java | 47 + .../apache/hadoop/fs/RawLocalFileSystem.java | 533 + .../org/apache/hadoop/fs/RemoteIterator.java | 42 + src/core/org/apache/hadoop/fs/Seekable.java | 41 + src/core/org/apache/hadoop/fs/Syncable.java | 30 + src/core/org/apache/hadoop/fs/Trash.java | 307 + .../apache/hadoop/fs/ftp/FTPException.java | 38 + .../apache/hadoop/fs/ftp/FTPFileSystem.java | 583 + .../apache/hadoop/fs/ftp/FTPInputStream.java | 126 + .../org/apache/hadoop/fs/kfs/IFSImpl.java | 58 + .../org/apache/hadoop/fs/kfs/KFSImpl.java | 144 + .../apache/hadoop/fs/kfs/KFSInputStream.java | 130 + .../apache/hadoop/fs/kfs/KFSOutputStream.java | 87 + .../hadoop/fs/kfs/KosmosFileSystem.java | 335 + .../org/apache/hadoop/fs/kfs/package.html | 98 + src/core/org/apache/hadoop/fs/package.html | 23 + .../fs/permission/AccessControlException.java | 61 + .../hadoop/fs/permission/ChmodParser.java | 51 + .../apache/hadoop/fs/permission/FsAction.java | 67 + .../hadoop/fs/permission/FsPermission.java | 225 + .../fs/permission/PermissionParser.java | 178 + .../fs/permission/PermissionStatus.java | 118 + .../hadoop/fs/permission/UmaskParser.java | 43 + src/core/org/apache/hadoop/fs/s3/Block.java | 47 + .../apache/hadoop/fs/s3/FileSystemStore.java | 63 + src/core/org/apache/hadoop/fs/s3/INode.java | 117 + .../hadoop/fs/s3/Jets3tFileSystemStore.java | 382 + .../apache/hadoop/fs/s3/MigrationTool.java | 280 + .../apache/hadoop/fs/s3/S3Credentials.java | 99 + .../org/apache/hadoop/fs/s3/S3Exception.java | 30 + .../org/apache/hadoop/fs/s3/S3FileSystem.java | 372 + .../hadoop/fs/s3/S3FileSystemException.java | 29 + .../apache/hadoop/fs/s3/S3InputStream.java | 202 + .../apache/hadoop/fs/s3/S3OutputStream.java | 219 + .../fs/s3/VersionMismatchException.java | 30 + src/core/org/apache/hadoop/fs/s3/package.html | 55 + .../hadoop/fs/s3native/FileMetadata.java | 54 + .../s3native/Jets3tNativeFileSystemStore.java | 255 + .../fs/s3native/NativeFileSystemStore.java | 65 + .../fs/s3native/NativeS3FileSystem.java | 578 + .../hadoop/fs/s3native/PartialListing.java | 59 + .../apache/hadoop/fs/s3native/package.html | 32 + .../org/apache/hadoop/fs/shell/Command.java | 95 + .../apache/hadoop/fs/shell/CommandFormat.java | 75 + .../apache/hadoop/fs/shell/CommandUtils.java | 28 + .../org/apache/hadoop/fs/shell/Count.java | 77 + .../apache/hadoop/http/FilterContainer.java | 40 + .../apache/hadoop/http/FilterInitializer.java | 29 + .../org/apache/hadoop/http/HtmlQuoting.java | 207 + .../org/apache/hadoop/http/HttpServer.java | 675 + .../apache/hadoop/io/AbstractMapWritable.java | 209 + src/core/org/apache/hadoop/io/ArrayFile.java | 94 + .../org/apache/hadoop/io/ArrayWritable.java | 103 + .../apache/hadoop/io/BinaryComparable.java | 76 + .../org/apache/hadoop/io/BloomMapFile.java | 259 + .../org/apache/hadoop/io/BooleanWritable.java | 113 + .../org/apache/hadoop/io/ByteWritable.java | 87 + .../org/apache/hadoop/io/BytesWritable.java | 216 + src/core/org/apache/hadoop/io/Closeable.java | 24 + .../apache/hadoop/io/CompressedWritable.java | 86 + .../org/apache/hadoop/io/DataInputBuffer.java | 91 + .../apache/hadoop/io/DataOutputBuffer.java | 108 + .../apache/hadoop/io/DefaultStringifier.java | 199 + .../org/apache/hadoop/io/DoubleWritable.java | 95 + .../org/apache/hadoop/io/FloatWritable.java | 87 + .../org/apache/hadoop/io/GenericWritable.java | 152 + src/core/org/apache/hadoop/io/IOUtils.java | 177 + .../org/apache/hadoop/io/InputBuffer.java | 89 + .../org/apache/hadoop/io/IntWritable.java | 86 + .../org/apache/hadoop/io/LongWritable.java | 97 + src/core/org/apache/hadoop/io/MD5Hash.java | 239 + src/core/org/apache/hadoop/io/MapFile.java | 694 + .../org/apache/hadoop/io/MapWritable.java | 171 + .../apache/hadoop/io/MultipleIOException.java | 49 + .../org/apache/hadoop/io/NullWritable.java | 70 + .../org/apache/hadoop/io/ObjectWritable.java | 263 + .../org/apache/hadoop/io/OutputBuffer.java | 92 + .../org/apache/hadoop/io/RawComparator.java | 37 + .../org/apache/hadoop/io/SequenceFile.java | 3198 ++ src/core/org/apache/hadoop/io/SetFile.java | 105 + .../apache/hadoop/io/SortedMapWritable.java | 206 + .../org/apache/hadoop/io/Stringifier.java | 54 + src/core/org/apache/hadoop/io/Text.java | 594 + .../apache/hadoop/io/TwoDArrayWritable.java | 91 + src/core/org/apache/hadoop/io/UTF8.java | 286 + .../org/apache/hadoop/io/VIntWritable.java | 73 + .../org/apache/hadoop/io/VLongWritable.java | 73 + .../hadoop/io/VersionMismatchException.java | 41 + .../apache/hadoop/io/VersionedWritable.java | 50 + src/core/org/apache/hadoop/io/Writable.java | 80 + .../apache/hadoop/io/WritableComparable.java | 55 + .../apache/hadoop/io/WritableComparator.java | 211 + .../apache/hadoop/io/WritableFactories.java | 63 + .../org/apache/hadoop/io/WritableFactory.java | 28 + .../org/apache/hadoop/io/WritableName.java | 79 + .../org/apache/hadoop/io/WritableUtils.java | 418 + .../apache/hadoop/io/compress/BZip2Codec.java | 301 + .../io/compress/BlockCompressorStream.java | 156 + .../io/compress/BlockDecompressorStream.java | 136 + .../apache/hadoop/io/compress/CodecPool.java | 154 + .../compress/CodecPrematureEOFException.java | 22 + .../hadoop/io/compress/CompressionCodec.java | 110 + .../io/compress/CompressionCodecFactory.java | 249 + .../io/compress/CompressionInputStream.java | 63 + .../io/compress/CompressionOutputStream.java | 69 + .../apache/hadoop/io/compress/Compressor.java | 106 + .../hadoop/io/compress/CompressorStream.java | 109 + .../hadoop/io/compress/Decompressor.java | 97 + .../io/compress/DecompressorStream.java | 158 + .../hadoop/io/compress/DefaultCodec.java | 87 + .../apache/hadoop/io/compress/GzipCodec.java | 216 + .../apache/hadoop/io/compress/LzmaCodec.java | 169 + .../io/compress/bzip2/BZip2Constants.java | 97 + .../compress/bzip2/BZip2DummyCompressor.java | 62 + .../bzip2/BZip2DummyDecompressor.java | 52 + .../io/compress/bzip2/CBZip2InputStream.java | 969 + .../io/compress/bzip2/CBZip2OutputStream.java | 2081 + .../apache/hadoop/io/compress/bzip2/CRC.java | 125 + .../io/compress/lzma/LzmaCompressor.java | 263 + .../io/compress/lzma/LzmaDecompressor.java | 251 + .../io/compress/zlib/BuiltInZlibDeflater.java | 49 + .../io/compress/zlib/BuiltInZlibInflater.java | 50 + .../io/compress/zlib/ZlibCompressor.java | 378 + .../io/compress/zlib/ZlibDecompressor.java | 287 + .../hadoop/io/compress/zlib/ZlibFactory.java | 110 + .../apache/hadoop/io/file/tfile/BCFile.java | 979 + .../tfile/BoundedByteArrayOutputStream.java | 96 + .../tfile/BoundedRangeFileInputStream.java | 141 + .../hadoop/io/file/tfile/ByteArray.java | 92 + .../apache/hadoop/io/file/tfile/Chunk.java | 429 + .../hadoop/io/file/tfile/CompareUtils.java | 97 + .../hadoop/io/file/tfile/Compression.java | 361 + .../io/file/tfile/MetaBlockAlreadyExists.java | 36 + .../io/file/tfile/MetaBlockDoesNotExist.java | 36 + .../hadoop/io/file/tfile/RawComparable.java | 57 + .../tfile/SimpleBufferedOutputStream.java | 77 + .../apache/hadoop/io/file/tfile/TFile.java | 2352 + .../hadoop/io/file/tfile/TFileDumper.java | 295 + .../apache/hadoop/io/file/tfile/Utils.java | 516 + src/core/org/apache/hadoop/io/package.html | 24 + .../io/retry/RetryInvocationHandler.java | 88 + .../apache/hadoop/io/retry/RetryPolicies.java | 258 + .../apache/hadoop/io/retry/RetryPolicy.java | 43 + .../apache/hadoop/io/retry/RetryProxy.java | 68 + .../org/apache/hadoop/io/retry/package.html | 48 + .../hadoop/io/serializer/Deserializer.java | 59 + .../io/serializer/DeserializerComparator.java | 70 + .../io/serializer/JavaSerialization.java | 101 + .../JavaSerializationComparator.java | 46 + .../hadoop/io/serializer/Serialization.java | 44 + .../io/serializer/SerializationFactory.java | 89 + .../hadoop/io/serializer/Serializer.java | 52 + .../io/serializer/WritableSerialization.java | 111 + .../apache/hadoop/io/serializer/package.html | 37 + .../CorruptedDataException.java | 20 + .../DataSegmentReader.java | 73 + .../DataSegmentWriter.java | 98 + .../InterleavedInputStream.java | 162 + .../InterleavedOutputStream.java | 88 + .../SimpleSeekableFormat.java | 87 + .../SimpleSeekableFormatCodec.java | 79 + .../SimpleSeekableFormatInputStream.java | 117 + .../SimpleSeekableFormatOutputStream.java | 176 + src/core/org/apache/hadoop/ipc/Client.java | 934 + .../apache/hadoop/ipc/ConnectionHeader.java | 93 + src/core/org/apache/hadoop/ipc/RPC.java | 618 + .../apache/hadoop/ipc/RemoteException.java | 120 + src/core/org/apache/hadoop/ipc/Server.java | 1379 + src/core/org/apache/hadoop/ipc/Status.java | 32 + .../apache/hadoop/ipc/VersionedProtocol.java | 41 + .../hadoop/ipc/metrics/RpcActivityMBean.java | 80 + .../apache/hadoop/ipc/metrics/RpcMetrics.java | 104 + .../org/apache/hadoop/ipc/metrics/RpcMgt.java | 119 + .../hadoop/ipc/metrics/RpcMgtMBean.java | 105 + src/core/org/apache/hadoop/ipc/package.html | 23 + src/core/org/apache/hadoop/log/LogLevel.java | 151 + .../apache/hadoop/metrics/ContextFactory.java | 197 + .../apache/hadoop/metrics/MetricsContext.java | 107 + .../hadoop/metrics/MetricsException.java | 42 + .../apache/hadoop/metrics/MetricsRecord.java | 246 + .../apache/hadoop/metrics/MetricsUtil.java | 100 + .../org/apache/hadoop/metrics/Updater.java | 33 + .../hadoop/metrics/file/FileContext.java | 151 + .../apache/hadoop/metrics/file/package.html | 43 + .../metrics/ganglia/GangliaContext.java | 243 + .../hadoop/metrics/ganglia/package.html | 74 + .../apache/hadoop/metrics/jmx/JMXContext.java | 125 + .../hadoop/metrics/jmx/JMXContextMBean.java | 124 + .../hadoop/metrics/jvm/EventCounter.java | 94 + .../apache/hadoop/metrics/jvm/JvmMetrics.java | 191 + .../org/apache/hadoop/metrics/package.html | 159 + .../metrics/spi/AbstractMetricsContext.java | 427 + .../hadoop/metrics/spi/CompositeContext.java | 186 + .../hadoop/metrics/spi/MetricValue.java | 52 + .../hadoop/metrics/spi/MetricsRecordImpl.java | 275 + .../hadoop/metrics/spi/NullContext.java | 58 + .../spi/NullContextWithUpdateThread.java | 83 + .../hadoop/metrics/spi/OutputRecord.java | 72 + .../org/apache/hadoop/metrics/spi/Util.java | 67 + .../apache/hadoop/metrics/spi/package.html | 36 + .../apache/hadoop/metrics/util/MBeanUtil.java | 87 + .../hadoop/metrics/util/MetricsBase.java | 47 + .../metrics/util/MetricsDynamicMBeanBase.java | 226 + .../hadoop/metrics/util/MetricsIntValue.java | 104 + .../hadoop/metrics/util/MetricsLongValue.java | 88 + .../hadoop/metrics/util/MetricsRegistry.java | 85 + .../metrics/util/MetricsTimeVaryingInt.java | 128 + .../metrics/util/MetricsTimeVaryingLong.java | 124 + .../metrics/util/MetricsTimeVaryingRate.java | 196 + .../hadoop/net/CachedDNSToSwitchMapping.java | 80 + src/core/org/apache/hadoop/net/DNS.java | 210 + .../apache/hadoop/net/DNSToSwitchMapping.java | 42 + .../net/IPv4AddressTruncationMapping.java | 77 + .../hadoop/net/InetSocketAddressFactory.java | 77 + src/core/org/apache/hadoop/net/NetUtils.java | 445 + .../apache/hadoop/net/NetworkTopology.java | 656 + src/core/org/apache/hadoop/net/Node.java | 47 + src/core/org/apache/hadoop/net/NodeBase.java | 134 + .../apache/hadoop/net/ScriptBasedMapping.java | 159 + .../hadoop/net/SocketIOWithTimeout.java | 455 + .../apache/hadoop/net/SocketInputStream.java | 170 + .../apache/hadoop/net/SocketOutputStream.java | 219 + .../apache/hadoop/net/SocksSocketFactory.java | 161 + .../hadoop/net/StandardSocketFactory.java | 122 + src/core/org/apache/hadoop/net/package.html | 23 + .../hadoop/record/BinaryRecordInput.java | 136 + .../hadoop/record/BinaryRecordOutput.java | 120 + src/core/org/apache/hadoop/record/Buffer.java | 246 + .../apache/hadoop/record/CsvRecordInput.java | 200 + .../apache/hadoop/record/CsvRecordOutput.java | 140 + src/core/org/apache/hadoop/record/Index.java | 37 + src/core/org/apache/hadoop/record/Record.java | 91 + .../hadoop/record/RecordComparator.java | 47 + .../org/apache/hadoop/record/RecordInput.java | 120 + .../apache/hadoop/record/RecordOutput.java | 141 + src/core/org/apache/hadoop/record/Utils.java | 490 + .../apache/hadoop/record/XmlRecordInput.java | 243 + .../apache/hadoop/record/XmlRecordOutput.java | 248 + .../hadoop/record/compiler/CGenerator.java | 71 + .../hadoop/record/compiler/CodeBuffer.java | 96 + .../hadoop/record/compiler/CodeGenerator.java | 53 + .../apache/hadoop/record/compiler/Consts.java | 44 + .../hadoop/record/compiler/CppGenerator.java | 74 + .../hadoop/record/compiler/JBoolean.java | 92 + .../hadoop/record/compiler/JBuffer.java | 103 + .../apache/hadoop/record/compiler/JByte.java | 80 + .../hadoop/record/compiler/JCompType.java | 72 + .../hadoop/record/compiler/JDouble.java | 89 + .../apache/hadoop/record/compiler/JField.java | 44 + .../apache/hadoop/record/compiler/JFile.java | 70 + .../apache/hadoop/record/compiler/JFloat.java | 86 + .../apache/hadoop/record/compiler/JInt.java | 80 + .../apache/hadoop/record/compiler/JLong.java | 84 + .../apache/hadoop/record/compiler/JMap.java | 229 + .../hadoop/record/compiler/JRecord.java | 806 + .../hadoop/record/compiler/JString.java | 83 + .../apache/hadoop/record/compiler/JType.java | 222 + .../hadoop/record/compiler/JVector.java | 197 + .../hadoop/record/compiler/JavaGenerator.java | 50 + .../hadoop/record/compiler/ant/RccTask.java | 136 + .../compiler/generated/ParseException.java | 210 + .../hadoop/record/compiler/generated/Rcc.java | 535 + .../compiler/generated/RccConstants.java | 88 + .../compiler/generated/RccTokenManager.java | 833 + .../compiler/generated/SimpleCharStream.java | 439 + .../record/compiler/generated/Token.java | 99 + .../compiler/generated/TokenMgrError.java | 151 + .../record/compiler/generated/package.html | 29 + .../hadoop/record/compiler/generated/rcc.jj | 384 + .../hadoop/record/compiler/package.html | 31 + .../hadoop/record/meta/FieldTypeInfo.java | 98 + .../apache/hadoop/record/meta/MapTypeID.java | 82 + .../hadoop/record/meta/RecordTypeInfo.java | 151 + .../hadoop/record/meta/StructTypeID.java | 151 + .../org/apache/hadoop/record/meta/TypeID.java | 107 + .../org/apache/hadoop/record/meta/Utils.java | 96 + .../hadoop/record/meta/VectorTypeID.java | 65 + .../org/apache/hadoop/record/package.html | 800 + .../security/AccessControlException.java | 56 + .../org/apache/hadoop/security/Group.java | 70 + .../hadoop/security/PermissionChecker.java | 80 + .../apache/hadoop/security/SecurityUtil.java | 159 + .../security/UnixUserGroupInformation.java | 425 + src/core/org/apache/hadoop/security/User.java | 70 + .../hadoop/security/UserGroupInformation.java | 129 + .../authorize/AuthorizationException.java | 76 + .../security/authorize/ConfiguredPolicy.java | 156 + .../authorize/ConnectionPermission.java | 74 + .../security/authorize/PolicyProvider.java | 50 + .../RefreshAuthorizationPolicyProtocol.java | 39 + .../hadoop/security/authorize/Service.java | 53 + .../ServiceAuthorizationManager.java | 105 + .../apache/hadoop/util/AsyncDiskService.java | 177 + .../apache/hadoop/util/CyclicIteration.java | 108 + src/core/org/apache/hadoop/util/Daemon.java | 51 + .../org/apache/hadoop/util/DataChecksum.java | 247 + .../org/apache/hadoop/util/DiskChecker.java | 89 + .../hadoop/util/GenericOptionsParser.java | 409 + .../org/apache/hadoop/util/GenericsUtil.java | 70 + src/core/org/apache/hadoop/util/HeapSort.java | 71 + .../apache/hadoop/util/HostsFileReader.java | 119 + .../apache/hadoop/util/IndexedSortable.java | 36 + .../org/apache/hadoop/util/IndexedSorter.java | 46 + .../org/apache/hadoop/util/LineReader.java | 190 + .../util/LinuxResourceCalculatorPlugin.java | 407 + .../hadoop/util/MRAsyncDiskService.java | 392 + .../org/apache/hadoop/util/MergeSort.java | 85 + .../apache/hadoop/util/NativeCodeLoader.java | 89 + .../org/apache/hadoop/util/PlatformName.java | 45 + .../apache/hadoop/util/PrintJarMainClass.java | 51 + .../org/apache/hadoop/util/PriorityQueue.java | 150 + .../org/apache/hadoop/util/ProcessTree.java | 313 + .../hadoop/util/ProcfsBasedProcessTree.java | 730 + .../org/apache/hadoop/util/ProgramDriver.java | 142 + src/core/org/apache/hadoop/util/Progress.java | 129 + .../org/apache/hadoop/util/Progressable.java | 35 + .../org/apache/hadoop/util/QuickSort.java | 131 + .../apache/hadoop/util/ReflectionUtils.java | 291 + .../hadoop/util/ResourceCalculatorPlugin.java | 159 + src/core/org/apache/hadoop/util/RunJar.java | 162 + .../org/apache/hadoop/util/ServletUtil.java | 105 + src/core/org/apache/hadoop/util/Shell.java | 538 + .../org/apache/hadoop/util/StringUtils.java | 716 + src/core/org/apache/hadoop/util/Tool.java | 79 + .../org/apache/hadoop/util/ToolRunner.java | 91 + .../hadoop/util/UTF8ByteArrayUtils.java | 98 + .../org/apache/hadoop/util/VersionInfo.java | 99 + src/core/org/apache/hadoop/util/XMLUtils.java | 56 + .../apache/hadoop/util/bloom/BloomFilter.java | 234 + .../util/bloom/CountingBloomFilter.java | 305 + .../hadoop/util/bloom/DynamicBloomFilter.java | 293 + .../org/apache/hadoop/util/bloom/Filter.java | 213 + .../hadoop/util/bloom/HashFunction.java | 119 + .../org/apache/hadoop/util/bloom/Key.java | 178 + .../hadoop/util/bloom/RemoveScheme.java | 91 + .../util/bloom/RetouchedBloomFilter.java | 450 + .../org/apache/hadoop/util/hash/Hash.java | 119 + .../apache/hadoop/util/hash/JenkinsHash.java | 260 + .../apache/hadoop/util/hash/MurmurHash.java | 83 + src/core/org/apache/hadoop/util/package.html | 23 + src/core/overview.html | 276 + src/docs/changes/ChangesFancyStyle.css | 170 + src/docs/changes/ChangesSimpleStyle.css | 49 + src/docs/changes/changes2html.pl | 282 + src/docs/forrest.properties | 106 + src/docs/releasenotes.html | 348 + src/docs/src/documentation/README.txt | 7 + .../classes/CatalogManager.properties | 37 + src/docs/src/documentation/conf/cli.xconf | 327 + .../content/xdocs/SLG_user_guide.xml | 192 + .../content/xdocs/capacity_scheduler.xml | 379 + .../content/xdocs/cluster_setup.xml | 929 + .../content/xdocs/commands_manual.xml | 685 + .../documentation/content/xdocs/distcp.xml | 351 + .../content/xdocs/fair_scheduler.xml | 371 + .../documentation/content/xdocs/gridmix.xml | 164 + .../content/xdocs/hadoop_archives.xml | 116 + .../content/xdocs/hdfs_design.xml | 367 + .../content/xdocs/hdfs_permissions_guide.xml | 191 + .../content/xdocs/hdfs_quota_admin_guide.xml | 105 + .../content/xdocs/hdfs_shell.xml | 470 + .../content/xdocs/hdfs_user_guide.xml | 489 + .../content/xdocs/hod_admin_guide.xml | 386 + .../content/xdocs/hod_config_guide.xml | 328 + .../content/xdocs/hod_user_guide.xml | 545 + .../src/documentation/content/xdocs/index.xml | 36 + .../documentation/content/xdocs/libhdfs.xml | 96 + .../content/xdocs/mapred_tutorial.xml | 3126 ++ .../content/xdocs/native_libraries.xml | 211 + .../content/xdocs/quickstart.xml | 295 + .../content/xdocs/service_level_auth.xml | 233 + .../src/documentation/content/xdocs/site.xml | 281 + .../documentation/content/xdocs/streaming.xml | 668 + .../src/documentation/content/xdocs/tabs.xml | 36 + .../documentation/content/xdocs/vaidya.xml | 171 + .../resources/images/architecture.gif | Bin 0 -> 15461 bytes .../resources/images/core-logo.gif | Bin 0 -> 6665 bytes .../resources/images/favicon.ico | Bin 0 -> 766 bytes .../resources/images/hadoop-logo-big.jpg | Bin 0 -> 127869 bytes .../resources/images/hadoop-logo.jpg | Bin 0 -> 9443 bytes .../resources/images/hdfsarchitecture.gif | Bin 0 -> 17653 bytes .../resources/images/hdfsarchitecture.odg | Bin 0 -> 41298 bytes .../resources/images/hdfsarchitecture.png | Bin 0 -> 40571 bytes .../resources/images/hdfsdatanodes.gif | Bin 0 -> 16060 bytes .../resources/images/hdfsdatanodes.odg | Bin 0 -> 37296 bytes .../resources/images/hdfsdatanodes.png | Bin 0 -> 30012 bytes src/docs/src/documentation/skinconf.xml | 354 + src/docs/status.xml | 74 + .../hadoop/examples/AggregateWordCount.java | 77 + .../examples/AggregateWordHistogram.java | 81 + .../hadoop/examples/DBCountPageView.java | 428 + .../apache/hadoop/examples/ExampleDriver.java | 76 + .../org/apache/hadoop/examples/Grep.java | 97 + .../org/apache/hadoop/examples/Join.java | 167 + .../hadoop/examples/MultiFileWordCount.java | 268 + .../apache/hadoop/examples/PiEstimator.java | 353 + .../hadoop/examples/RandomTextWriter.java | 758 + .../apache/hadoop/examples/RandomWriter.java | 287 + .../apache/hadoop/examples/SecondarySort.java | 239 + .../org/apache/hadoop/examples/SleepJob.java | 241 + .../org/apache/hadoop/examples/Sort.java | 198 + .../org/apache/hadoop/examples/WordCount.java | 69 + .../hadoop/examples/dancing/DancingLinks.java | 438 + .../dancing/DistributedPentomino.java | 207 + .../examples/dancing/OneSidedPentomino.java | 70 + .../hadoop/examples/dancing/Pentomino.java | 450 + .../hadoop/examples/dancing/Sudoku.java | 318 + .../hadoop/examples/dancing/package.html | 75 + .../hadoop/examples/dancing/puzzle1.dta | 9 + .../org/apache/hadoop/examples/package.html | 23 + .../hadoop/examples/terasort/TeraGen.java | 361 + .../examples/terasort/TeraInputFormat.java | 212 + .../examples/terasort/TeraOutputFormat.java | 88 + .../hadoop/examples/terasort/TeraSort.java | 261 + .../examples/terasort/TeraValidate.java | 157 + .../examples/terasort/job_history_summary.py | 100 + .../hadoop/examples/terasort/package.html | 113 + src/examples/pipes/.autom4te.cfg | 42 + src/examples/pipes/Makefile.am | 36 + src/examples/pipes/Makefile.in | 535 + src/examples/pipes/README.txt | 16 + src/examples/pipes/aclocal.m4 | 7011 +++ src/examples/pipes/conf/word-part.xml | 24 + src/examples/pipes/conf/word.xml | 28 + src/examples/pipes/config.guess | 1449 + src/examples/pipes/config.sub | 1552 + src/examples/pipes/configure | 22882 ++++++++ src/examples/pipes/configure.ac | 55 + src/examples/pipes/depcomp | 522 + src/examples/pipes/impl/config.h.in | 109 + src/examples/pipes/impl/sort.cc | 96 + src/examples/pipes/impl/wordcount-nopipe.cc | 148 + src/examples/pipes/impl/wordcount-part.cc | 76 + src/examples/pipes/impl/wordcount-simple.cc | 67 + src/examples/pipes/install-sh | 322 + src/examples/pipes/ltmain.sh | 6530 +++ src/examples/pipes/missing | 360 + src/examples/python/WordCount.py | 70 + src/examples/python/compile | 21 + .../python/pyAbacus/JyAbacusWCPlugIN.py | 36 + src/examples/python/pyAbacus/JythonAbacus.py | 82 + src/examples/python/pyAbacus/compile | 25 + .../python/pyAbacus/wordcountaggregator.spec | 15 + src/hdfs/hdfs-default.xml | 399 + .../hdfs/ChecksumDistributedFileSystem.java | 129 + .../org/apache/hadoop/hdfs/DFSClient.java | 3736 ++ src/hdfs/org/apache/hadoop/hdfs/DFSUtil.java | 228 + .../hadoop/hdfs/DistributedFileSystem.java | 542 + .../hadoop/hdfs/HDFSPolicyProvider.java | 50 + .../apache/hadoop/hdfs/HftpFileSystem.java | 437 + .../org/apache/hadoop/hdfs/HighTideShell.java | 270 + .../apache/hadoop/hdfs/HsftpFileSystem.java | 101 + src/hdfs/org/apache/hadoop/hdfs/package.html | 34 + .../AlreadyBeingCreatedException.java | 31 + .../apache/hadoop/hdfs/protocol/Block.java | 187 + .../hdfs/protocol/BlockListAsLongs.java | 127 + .../hdfs/protocol/ClientDatanodeProtocol.java | 69 + .../hadoop/hdfs/protocol/ClientProtocol.java | 571 + .../protocol/DSQuotaExceededException.java | 43 + .../hdfs/protocol/DataTransferProtocol.java | 61 + .../hadoop/hdfs/protocol/DatanodeID.java | 190 + .../hadoop/hdfs/protocol/DatanodeInfo.java | 345 + .../hdfs/protocol/DirectoryListing.java | 123 + .../hadoop/hdfs/protocol/FSConstants.java | 90 + .../hadoop/hdfs/protocol/HdfsFileStatus.java | 253 + .../hdfs/protocol/HighTideProtocol.java | 47 + .../hadoop/hdfs/protocol/LocatedBlock.java | 136 + .../hadoop/hdfs/protocol/LocatedBlocks.java | 187 + .../protocol/LocatedDirectoryListing.java | 96 + .../protocol/NSQuotaExceededException.java | 42 + .../hadoop/hdfs/protocol/PolicyInfo.java | 283 + .../hdfs/protocol/ProtocolCompatible.java | 56 + .../hdfs/protocol/QuotaExceededException.java | 57 + .../UnregisteredDatanodeException.java | 42 + .../hadoop/hdfs/server/balancer/Balancer.java | 1709 + .../hdfs/server/common/GenerationStamp.java | 114 + .../hdfs/server/common/HdfsConstants.java | 70 + .../common/InconsistentFSStateException.java | 46 + .../common/IncorrectVersionException.java | 43 + .../hadoop/hdfs/server/common/Storage.java | 767 + .../hdfs/server/common/StorageInfo.java | 54 + .../server/common/ThreadLocalDateFormat.java | 87 + .../hdfs/server/common/UpgradeManager.java | 89 + .../hdfs/server/common/UpgradeObject.java | 66 + .../common/UpgradeObjectCollection.java | 130 + .../server/common/UpgradeStatusReport.java | 124 + .../hdfs/server/common/Upgradeable.java | 98 + .../hadoop/hdfs/server/common/Util.java | 28 + .../datanode/BlockAlreadyExistsException.java | 38 + .../server/datanode/BlockMetadataHeader.java | 130 + .../hdfs/server/datanode/BlockReceiver.java | 1031 + .../hdfs/server/datanode/BlockSender.java | 473 + .../datanode/BlockTransferThrottler.java | 0 .../server/datanode/DataBlockScanner.java | 968 + .../hadoop/hdfs/server/datanode/DataNode.java | 1750 + .../hdfs/server/datanode/DataStorage.java | 439 + .../hdfs/server/datanode/DataXceiver.java | 669 + .../server/datanode/DataXceiverServer.java | 176 + .../server/datanode/DatanodeBlockInfo.java | 134 + .../hdfs/server/datanode/FSDataset.java | 1620 + .../datanode/FSDatasetAsyncDiskService.java | 199 + .../server/datanode/FSDatasetInterface.java | 273 + .../datanode/UpgradeManagerDatanode.java | 151 + .../datanode/UpgradeObjectDatanode.java | 134 + .../metrics/DataNodeActivityMBean.java | 76 + .../datanode/metrics/DataNodeMetrics.java | 138 + .../datanode/metrics/FSDatasetMBean.java | 65 + .../server/hightidenode/ConfigManager.java | 396 + .../hightidenode/DirectoryTraversal.java | 200 + .../hdfs/server/hightidenode/FileFixer.java | 510 + .../HighTideConfigurationException.java | 30 + .../server/hightidenode/HighTideNode.java | 494 + .../hightidenode/PendingReplication.java | 195 + .../hadoop/hdfs/server/hightidenode/README | 14 + .../hdfs/server/hightidenode/hightide.xml | 35 + .../metrics/HighTideNodeActivityMBean.java | 38 + .../metrics/HighTideNodeMetrics.java | 121 + .../server/namenode/BlockPlacementPolicy.java | 173 + .../namenode/BlockPlacementPolicyDefault.java | 508 + .../hdfs/server/namenode/BlocksMap.java | 487 + .../server/namenode/CheckpointSignature.java | 135 + .../hdfs/server/namenode/ConfigManager.java | 145 + .../server/namenode/CorruptReplicasMap.java | 129 + .../server/namenode/DatanodeDescriptor.java | 528 + .../server/namenode/DecommissionManager.java | 311 + .../hdfs/server/namenode/DfsServlet.java | 101 + .../server/namenode/EditLogInputStream.java | 54 + .../server/namenode/EditLogOutputStream.java | 113 + .../hdfs/server/namenode/FSClusterStats.java | 36 + .../hdfs/server/namenode/FSDirectory.java | 1801 + .../hdfs/server/namenode/FSEditLog.java | 1383 + .../hadoop/hdfs/server/namenode/FSImage.java | 1827 + .../hdfs/server/namenode/FSInodeInfo.java | 35 + .../hdfs/server/namenode/FSNamesystem.java | 6129 +++ .../server/namenode/FSPermissionChecker.java | 160 + .../server/namenode/FileChecksumServlets.java | 103 + .../hdfs/server/namenode/FileDataServlet.java | 128 + .../hdfs/server/namenode/FsckServlet.java | 45 + .../hdfs/server/namenode/GetImageServlet.java | 76 + .../hdfs/server/namenode/Host2NodesMap.java | 188 + .../hadoop/hdfs/server/namenode/INode.java | 408 + .../hdfs/server/namenode/INodeDirectory.java | 456 + .../namenode/INodeDirectoryWithQuota.java | 163 + .../hdfs/server/namenode/INodeFile.java | 234 + .../namenode/INodeFileUnderConstruction.java | 181 + .../hdfs/server/namenode/JspHelper.java | 482 + .../namenode/LeaseExpiredException.java | 30 + .../hdfs/server/namenode/LeaseManager.java | 421 + .../server/namenode/ListPathsServlet.java | 193 + .../hdfs/server/namenode/NameCache.java | 155 + .../hadoop/hdfs/server/namenode/NameNode.java | 1238 + .../server/namenode/NameNodeConfServlet.java | 117 + .../hdfs/server/namenode/NamenodeFsck.java | 868 + .../namenode/NotReplicatedYetException.java | 30 + .../namenode/PendingReplicationBlocks.java | 251 + .../server/namenode/PermissionChecker.java | 179 + .../server/namenode/SafeModeException.java | 34 + .../server/namenode/SecondaryNameNode.java | 614 + .../server/namenode/SerialNumberManager.java | 72 + .../hdfs/server/namenode/StreamFile.java | 93 + .../hdfs/server/namenode/TransferFsImage.java | 230 + .../namenode/UnderReplicatedBlocks.java | 250 + .../namenode/UnsupportedActionException.java | 34 + .../namenode/UpgradeManagerNamenode.java | 148 + .../namenode/UpgradeObjectNamenode.java | 67 + .../namenode/metrics/FSNamesystemMBean.java | 112 + .../namenode/metrics/FSNamesystemMetrics.java | 124 + .../metrics/NameNodeActivtyMBean.java | 67 + .../namenode/metrics/NameNodeMetrics.java | 169 + .../hdfs/server/protocol/BlockCommand.java | 122 + .../server/protocol/BlockMetaDataInfo.java | 58 + .../server/protocol/BlocksWithLocations.java | 117 + .../hdfs/server/protocol/DatanodeCommand.java | 75 + .../server/protocol/DatanodeProtocol.java | 157 + .../server/protocol/DatanodeRegistration.java | 128 + .../protocol/DisallowedDatanodeException.java | 37 + .../protocol/InterDatanodeProtocol.java | 47 + .../server/protocol/NamenodeProtocol.java | 74 + .../hdfs/server/protocol/NamespaceInfo.java | 87 + .../hdfs/server/protocol/UpgradeCommand.java | 92 + .../apache/hadoop/hdfs/tools/DFSAdmin.java | 864 + .../org/apache/hadoop/hdfs/tools/DFSck.java | 248 + .../apache/hadoop/hdfs/tools/HDFSConcat.java | 54 + .../org/apache/hadoop/hdfs/tools/JMXGet.java | 336 + .../apache/hadoop/hdfs/util/ByteArray.java | 52 + .../hdfs/util/DataTransferThrottler.java | 111 + .../org/apache/hadoop/hdfs/util/GSet.java | 81 + .../hadoop/hdfs/util/GSetByHashMap.java | 65 + .../hadoop/hdfs/util/LightWeightGSet.java | 283 + src/mapred/mapred-default.xml | 967 + .../mapred/AdminOperationsProtocol.java | 46 + .../hadoop/mapred/BasicTypeSorterBase.java | 242 + .../apache/hadoop/mapred/BufferSorter.java | 74 + .../org/apache/hadoop/mapred/Child.java | 206 + .../apache/hadoop/mapred/CleanupQueue.java | 141 + .../org/apache/hadoop/mapred/Clock.java | 28 + .../apache/hadoop/mapred/ClusterStatus.java | 488 + .../hadoop/mapred/CommitTaskAction.java | 54 + .../mapred/CompletedJobStatusStore.java | 327 + .../CompositeTaskTrackerInstrumentation.java | 85 + .../org/apache/hadoop/mapred/Counters.java | 708 + .../mapred/DefaultJobHistoryParser.java | 176 + .../hadoop/mapred/DefaultTaskController.java | 154 + .../DisallowedTaskTrackerException.java | 35 + .../EagerTaskInitializationListener.java | 180 + .../mapred/FileAlreadyExistsException.java | 37 + .../apache/hadoop/mapred/FileInputFormat.java | 730 + .../hadoop/mapred/FileOutputCommitter.java | 262 + .../hadoop/mapred/FileOutputFormat.java | 293 + .../org/apache/hadoop/mapred/FileSplit.java | 112 + .../hadoop/mapred/HeartbeatResponse.java | 136 + .../apache/hadoop/mapred/HistoryViewer.java | 617 + src/mapred/org/apache/hadoop/mapred/ID.java | 41 + .../org/apache/hadoop/mapred/IFile.java | 567 + .../hadoop/mapred/IFileInputStream.java | 186 + .../hadoop/mapred/IFileOutputStream.java | 93 + .../org/apache/hadoop/mapred/IndexCache.java | 166 + .../org/apache/hadoop/mapred/InputFormat.java | 100 + .../org/apache/hadoop/mapred/InputSplit.java | 55 + .../hadoop/mapred/InterTrackerProtocol.java | 145 + .../mapred/InvalidFileTypeException.java | 38 + .../hadoop/mapred/InvalidInputException.java | 63 + .../mapred/InvalidJobConfException.java | 38 + .../apache/hadoop/mapred/IsolationRunner.java | 221 + .../org/apache/hadoop/mapred/JSPUtil.java | 730 + .../org/apache/hadoop/mapred/JVMId.java | 148 + .../apache/hadoop/mapred/JobChangeEvent.java | 37 + .../org/apache/hadoop/mapred/JobClient.java | 2374 + .../org/apache/hadoop/mapred/JobConf.java | 2009 + .../apache/hadoop/mapred/JobConfigurable.java | 29 + .../org/apache/hadoop/mapred/JobContext.java | 58 + .../apache/hadoop/mapred/JobEndNotifier.java | 242 + .../org/apache/hadoop/mapred/JobHistory.java | 2187 + .../org/apache/hadoop/mapred/JobID.java | 117 + .../apache/hadoop/mapred/JobInProgress.java | 3734 ++ .../hadoop/mapred/JobInProgressListener.java | 47 + .../mapred/JobInProgress_Counter.properties | 14 + .../org/apache/hadoop/mapred/JobPriority.java | 32 + .../org/apache/hadoop/mapred/JobProfile.java | 183 + .../apache/hadoop/mapred/JobQueueClient.java | 194 + .../apache/hadoop/mapred/JobQueueInfo.java | 118 + .../mapred/JobQueueJobInProgressListener.java | 145 + .../hadoop/mapred/JobQueueTaskScheduler.java | 311 + .../org/apache/hadoop/mapred/JobStatus.java | 358 + .../hadoop/mapred/JobStatusChangeEvent.java | 74 + .../hadoop/mapred/JobSubmissionProtocol.java | 226 + .../org/apache/hadoop/mapred/JobTracker.java | 4939 ++ .../mapred/JobTrackerInstrumentation.java | 193 + .../hadoop/mapred/JobTrackerMetricsInst.java | 818 + .../hadoop/mapred/JobTrackerStatistics.java | 87 + .../org/apache/hadoop/mapred/JvmContext.java | 57 + .../org/apache/hadoop/mapred/JvmManager.java | 507 + .../org/apache/hadoop/mapred/JvmTask.java | 63 + .../mapred/KeyValueLineRecordReader.java | 115 + .../mapred/KeyValueTextInputFormat.java | 56 + .../apache/hadoop/mapred/KillJobAction.java | 59 + .../apache/hadoop/mapred/KillTaskAction.java | 57 + .../hadoop/mapred/LaunchTaskAction.java | 61 + .../mapred/LimitTasksPerJobTaskScheduler.java | 200 + .../hadoop/mapred/LineRecordReader.java | 174 + .../hadoop/mapred/LinuxTaskController.java | 607 + .../apache/hadoop/mapred/LocalJobRunner.java | 483 + .../org/apache/hadoop/mapred/MRConstants.java | 59 + .../hadoop/mapred/MapFileOutputFormat.java | 109 + .../apache/hadoop/mapred/MapOutputFile.java | 204 + .../apache/hadoop/mapred/MapReduceBase.java | 43 + .../mapred/MapReducePolicyProvider.java | 45 + .../org/apache/hadoop/mapred/MapRunnable.java | 50 + .../org/apache/hadoop/mapred/MapRunner.java | 64 + .../org/apache/hadoop/mapred/MapTask.java | 1572 + .../mapred/MapTaskCompletionEventsUpdate.java | 67 + .../apache/hadoop/mapred/MapTaskRunner.java | 65 + .../apache/hadoop/mapred/MapTaskStatus.java | 57 + .../org/apache/hadoop/mapred/Mapper.java | 159 + .../org/apache/hadoop/mapred/MergeSorter.java | 80 + .../org/apache/hadoop/mapred/Merger.java | 559 + .../hadoop/mapred/MultiFileInputFormat.java | 105 + .../apache/hadoop/mapred/MultiFileSplit.java | 86 + .../mapred/NodeHealthCheckerService.java | 367 + .../apache/hadoop/mapred/OutputCollector.java | 41 + .../apache/hadoop/mapred/OutputCommitter.java | 237 + .../apache/hadoop/mapred/OutputFormat.java | 78 + .../apache/hadoop/mapred/OutputLogFilter.java | 41 + .../org/apache/hadoop/mapred/Partitioner.java | 49 + .../apache/hadoop/mapred/QueueAclsInfo.java | 80 + .../apache/hadoop/mapred/QueueManager.java | 383 + .../org/apache/hadoop/mapred/RamManager.java | 44 + .../hadoop/mapred/RawKeyValueIterator.java | 66 + .../apache/hadoop/mapred/RecordReader.java | 84 + .../apache/hadoop/mapred/RecordWriter.java | 51 + .../org/apache/hadoop/mapred/ReduceTask.java | 2826 + .../hadoop/mapred/ReduceTaskRunner.java | 71 + .../hadoop/mapred/ReduceTaskStatus.java | 151 + .../org/apache/hadoop/mapred/Reducer.java | 201 + .../hadoop/mapred/ReinitTrackerAction.java | 40 + .../org/apache/hadoop/mapred/Reporter.java | 119 + .../hadoop/mapred/ResourceEstimator.java | 110 + .../hadoop/mapred/ResourceReporter.java | 114 + .../org/apache/hadoop/mapred/RunningJob.java | 192 + .../SequenceFileAsBinaryInputFormat.java | 140 + .../SequenceFileAsBinaryOutputFormat.java | 187 + .../mapred/SequenceFileAsTextInputFormat.java | 45 + .../SequenceFileAsTextRecordReader.java | 82 + .../mapred/SequenceFileInputFilter.java | 305 + .../mapred/SequenceFileInputFormat.java | 68 + .../mapred/SequenceFileOutputFormat.java | 123 + .../mapred/SequenceFileRecordReader.java | 128 + .../apache/hadoop/mapred/SkipBadRecords.java | 308 + .../apache/hadoop/mapred/SortedRanges.java | 383 + .../org/apache/hadoop/mapred/SpillRecord.java | 153 + .../hadoop/mapred/StatisticsCollector.java | 294 + .../org/apache/hadoop/mapred/TIPStatus.java | 24 + src/mapred/org/apache/hadoop/mapred/Task.java | 1328 + .../hadoop/mapred/TaskAttemptContext.java | 62 + .../apache/hadoop/mapred/TaskAttemptID.java | 149 + .../hadoop/mapred/TaskCompletionEvent.java | 234 + .../apache/hadoop/mapred/TaskController.java | 235 + .../hadoop/mapred/TaskGraphServlet.java | 235 + .../org/apache/hadoop/mapred/TaskID.java | 139 + .../apache/hadoop/mapred/TaskInProgress.java | 1430 + .../org/apache/hadoop/mapred/TaskLog.java | 694 + .../apache/hadoop/mapred/TaskLogAppender.java | 101 + .../apache/hadoop/mapred/TaskLogServlet.java | 241 + .../apache/hadoop/mapred/TaskLogsMonitor.java | 449 + .../mapred/TaskMemoryManagerThread.java | 550 + .../org/apache/hadoop/mapred/TaskReport.java | 239 + .../org/apache/hadoop/mapred/TaskRunner.java | 823 + .../apache/hadoop/mapred/TaskScheduler.java | 95 + .../org/apache/hadoop/mapred/TaskStatus.java | 470 + .../org/apache/hadoop/mapred/TaskTracker.java | 3675 ++ .../hadoop/mapred/TaskTrackerAction.java | 117 + .../mapred/TaskTrackerInstrumentation.java | 71 + .../hadoop/mapred/TaskTrackerManager.java | 115 + .../hadoop/mapred/TaskTrackerMetricsInst.java | 86 + .../hadoop/mapred/TaskTrackerStatus.java | 688 + .../hadoop/mapred/TaskUmbilicalProtocol.java | 157 + .../hadoop/mapred/Task_Counter.properties | 19 + .../apache/hadoop/mapred/TextInputFormat.java | 99 + .../hadoop/mapred/TextOutputFormat.java | 140 + .../org/apache/hadoop/mapred/Utils.java | 60 + .../apache/hadoop/mapred/jobcontrol/Job.java | 387 + .../hadoop/mapred/jobcontrol/JobControl.java | 298 + .../hadoop/mapred/jobcontrol/package.html | 25 + .../mapred/join/ArrayListBackedIterator.java | 89 + .../mapred/join/ComposableInputFormat.java | 40 + .../mapred/join/ComposableRecordReader.java | 65 + .../mapred/join/CompositeInputFormat.java | 181 + .../mapred/join/CompositeInputSplit.java | 149 + .../mapred/join/CompositeRecordReader.java | 459 + .../mapred/join/InnerJoinRecordReader.java | 50 + .../hadoop/mapred/join/JoinRecordReader.java | 114 + .../mapred/join/MultiFilterRecordReader.java | 154 + .../mapred/join/OuterJoinRecordReader.java | 45 + .../mapred/join/OverrideRecordReader.java | 93 + .../org/apache/hadoop/mapred/join/Parser.java | 487 + .../hadoop/mapred/join/ResetableIterator.java | 93 + .../mapred/join/StreamBackedIterator.java | 99 + .../hadoop/mapred/join/TupleWritable.java | 227 + .../mapred/join/WrappedRecordReader.java | 206 + .../apache/hadoop/mapred/join/package.html | 105 + .../org/apache/hadoop/mapred/lib/Chain.java | 543 + .../apache/hadoop/mapred/lib/ChainMapper.java | 178 + .../hadoop/mapred/lib/ChainReducer.java | 222 + .../mapred/lib/CombineFileInputFormat.java | 693 + .../mapred/lib/CombineFileRecordReader.java | 162 + .../hadoop/mapred/lib/CombineFileSplit.java | 205 + .../mapred/lib/DelegatingInputFormat.java | 127 + .../hadoop/mapred/lib/DelegatingMapper.java | 65 + .../mapred/lib/FieldSelectionMapReduce.java | 337 + .../hadoop/mapred/lib/HashPartitioner.java | 39 + .../hadoop/mapred/lib/IdentityMapper.java | 42 + .../hadoop/mapred/lib/IdentityReducer.java | 46 + .../hadoop/mapred/lib/InputSampler.java | 418 + .../hadoop/mapred/lib/InverseMapper.java | 43 + .../mapred/lib/KeyFieldBasedComparator.java | 328 + .../mapred/lib/KeyFieldBasedPartitioner.java | 112 + .../hadoop/mapred/lib/KeyFieldHelper.java | 296 + .../hadoop/mapred/lib/LongSumReducer.java | 54 + .../hadoop/mapred/lib/MultipleInputs.java | 131 + .../mapred/lib/MultipleOutputFormat.java | 227 + .../hadoop/mapred/lib/MultipleOutputs.java | 563 + .../lib/MultipleSequenceFileOutputFormat.java | 49 + .../mapred/lib/MultipleTextOutputFormat.java | 46 + .../mapred/lib/MultithreadedMapRunner.java | 256 + .../hadoop/mapred/lib/NLineInputFormat.java | 122 + .../hadoop/mapred/lib/NullOutputFormat.java | 45 + .../apache/hadoop/mapred/lib/RegexMapper.java | 57 + .../hadoop/mapred/lib/TaggedInputSplit.java | 140 + .../hadoop/mapred/lib/TokenCountMapper.java | 56 + .../mapred/lib/TotalOrderPartitioner.java | 264 + .../mapred/lib/aggregate/DoubleValueSum.java | 95 + .../mapred/lib/aggregate/LongValueMax.java | 98 + .../mapred/lib/aggregate/LongValueMin.java | 98 + .../mapred/lib/aggregate/LongValueSum.java | 95 + .../mapred/lib/aggregate/StringValueMax.java | 86 + .../mapred/lib/aggregate/StringValueMin.java | 86 + .../mapred/lib/aggregate/UniqValueCount.java | 125 + .../UserDefinedValueAggregatorDescriptor.java | 115 + .../mapred/lib/aggregate/ValueAggregator.java | 53 + .../ValueAggregatorBaseDescriptor.java | 160 + .../aggregate/ValueAggregatorCombiner.java | 89 + .../aggregate/ValueAggregatorDescriptor.java | 68 + .../lib/aggregate/ValueAggregatorJob.java | 210 + .../lib/aggregate/ValueAggregatorJobBase.java | 88 + .../lib/aggregate/ValueAggregatorMapper.java | 65 + .../lib/aggregate/ValueAggregatorReducer.java | 74 + .../mapred/lib/aggregate/ValueHistogram.java | 179 + .../hadoop/mapred/lib/aggregate/package.html | 186 + .../hadoop/mapred/lib/db/DBConfiguration.java | 216 + .../hadoop/mapred/lib/db/DBInputFormat.java | 394 + .../hadoop/mapred/lib/db/DBOutputFormat.java | 186 + .../hadoop/mapred/lib/db/DBWritable.java | 75 + .../apache/hadoop/mapred/lib/db/package.html | 44 + .../org/apache/hadoop/mapred/lib/package.html | 25 + .../org/apache/hadoop/mapred/package.html | 230 + .../hadoop/mapred/pipes/Application.java | 188 + .../hadoop/mapred/pipes/BinaryProtocol.java | 348 + .../hadoop/mapred/pipes/DownwardProtocol.java | 117 + .../hadoop/mapred/pipes/OutputHandler.java | 159 + .../hadoop/mapred/pipes/PipesMapRunner.java | 107 + .../mapred/pipes/PipesNonJavaInputFormat.java | 101 + .../hadoop/mapred/pipes/PipesPartitioner.java | 69 + .../hadoop/mapred/pipes/PipesReducer.java | 125 + .../apache/hadoop/mapred/pipes/Submitter.java | 498 + .../hadoop/mapred/pipes/UpwardProtocol.java | 91 + .../apache/hadoop/mapred/pipes/package.html | 127 + .../apache/hadoop/mapred/tools/MRAdmin.java | 262 + .../hadoop/mapreduce/ClusterMetrics.java | 230 + .../org/apache/hadoop/mapreduce/Counter.java | 139 + .../apache/hadoop/mapreduce/CounterGroup.java | 184 + .../org/apache/hadoop/mapreduce/Counters.java | 185 + .../org/apache/hadoop/mapreduce/ID.java | 90 + .../apache/hadoop/mapreduce/InputFormat.java | 103 + .../apache/hadoop/mapreduce/InputSplit.java | 56 + .../org/apache/hadoop/mapreduce/Job.java | 479 + .../apache/hadoop/mapreduce/JobContext.java | 236 + .../org/apache/hadoop/mapreduce/JobID.java | 153 + .../apache/hadoop/mapreduce/JobStatus.java | 45 + .../apache/hadoop/mapreduce/MapContext.java | 71 + .../org/apache/hadoop/mapreduce/Mapper.java | 148 + .../hadoop/mapreduce/OutputCommitter.java | 140 + .../apache/hadoop/mapreduce/OutputFormat.java | 84 + .../apache/hadoop/mapreduce/Partitioner.java | 48 + .../apache/hadoop/mapreduce/RecordReader.java | 82 + .../apache/hadoop/mapreduce/RecordWriter.java | 53 + .../hadoop/mapreduce/ReduceContext.java | 198 + .../org/apache/hadoop/mapreduce/Reducer.java | 180 + .../hadoop/mapreduce/StatusReporter.java | 25 + .../hadoop/mapreduce/TaskAttemptContext.java | 66 + .../hadoop/mapreduce/TaskAttemptID.java | 174 + .../org/apache/hadoop/mapreduce/TaskID.java | 187 + .../mapreduce/TaskInputOutputContext.java | 104 + .../org/apache/hadoop/mapreduce/TaskType.java | 26 + .../mapreduce/lib/input/FileInputFormat.java | 477 + .../hadoop/mapreduce/lib/input/FileSplit.java | 97 + .../lib/input/InvalidInputException.java | 64 + .../mapreduce/lib/input/LineRecordReader.java | 147 + .../lib/input/SequenceFileInputFormat.java | 71 + .../lib/input/SequenceFileRecordReader.java | 104 + .../mapreduce/lib/input/TextInputFormat.java | 51 + .../mapreduce/lib/map/InverseMapper.java | 35 + .../lib/map/MultithreadedMapper.java | 271 + .../mapreduce/lib/map/TokenCounterMapper.java | 45 + .../lib/output/FileOutputCommitter.java | 277 + .../lib/output/FileOutputFormat.java | 269 + .../lib/output/NullOutputFormat.java | 58 + .../lib/output/SequenceFileOutputFormat.java | 109 + .../lib/output/TextOutputFormat.java | 138 + .../lib/partition/HashPartitioner.java | 32 + .../mapreduce/lib/reduce/IntSumReducer.java | 40 + .../mapreduce/lib/reduce/LongSumReducer.java | 40 + .../jobtracker/JobTrackerJspHelper.java | 106 + .../server/jobtracker/TaskTracker.java | 201 + src/native/.autom4te.cfg | 42 + src/native/AUTHORS | 3 + src/native/COPYING | 54 + src/native/ChangeLog | 3 + src/native/INSTALL | 236 + src/native/Makefile.am | 46 + src/native/Makefile.in | 659 + src/native/NEWS | 5 + src/native/README | 10 + src/native/acinclude.m4 | 26 + src/native/aclocal.m4 | 7250 +++ src/native/config.h.in | 91 + src/native/config/config.guess | 1477 + src/native/config/config.sub | 1566 + src/native/config/depcomp | 530 + src/native/config/install-sh | 323 + src/native/config/ltmain.sh | 6971 +++ src/native/config/missing | 360 + src/native/configure | 21284 ++++++++ src/native/configure.ac | 107 + src/native/lib/Makefile.am | 44 + src/native/lib/Makefile.in | 423 + src/native/packageNativeHadoop.sh | 67 + .../hadoop/io/compress/lzma/LzmaCompressor.c | 238 + .../io/compress/lzma/LzmaDecompressor.c | 244 + .../hadoop/io/compress/lzma/Makefile.am | 50 + .../hadoop/io/compress/lzma/Makefile.in | 470 + .../lzma/org_apache_hadoop_io_compress_lzma.h | 78 + .../hadoop/io/compress/zlib/Makefile.am | 50 + .../hadoop/io/compress/zlib/Makefile.in | 470 + .../hadoop/io/compress/zlib/ZlibCompressor.c | 304 + .../io/compress/zlib/ZlibDecompressor.c | 317 + .../zlib/org_apache_hadoop_io_compress_zlib.h | 64 + src/native/src/org_apache_hadoop.h | 98 + src/saveVersion.sh | 50 + src/test/bin/test-patch.sh | 694 + src/test/checkstyle-noframes-sorted.xsl | 178 + src/test/checkstyle.xml | 170 + src/test/core-site.xml | 50 + src/test/ddl/buffer.jr | 6 + src/test/ddl/int.jr | 6 + src/test/ddl/string.jr | 6 + src/test/ddl/test.jr | 46 + src/test/findbugsExcludeFile.xml | 83 + src/test/hadoop-policy.xml | 97 + src/test/hadoop-site.xml | 14 + src/test/hdfs-site.xml | 9 + src/test/lib/ftplet-api-1.0.0-SNAPSHOT.jar | Bin 0 -> 19002 bytes .../lib/ftpserver-core-1.0.0-SNAPSHOT.jar | Bin 0 -> 236242 bytes .../lib/ftpserver-server-1.0.0-SNAPSHOT.jar | Bin 0 -> 17084 bytes .../mina-core-2.0.0-M2-20080407.124109-12.jar | Bin 0 -> 540100 bytes src/test/log4j.properties | 7 + src/test/mapred-site.xml | 21 + src/test/org/apache/hadoop/cli/TestCLI.java | 474 + .../hadoop/cli/clitest_data/data120bytes | 8 + .../hadoop/cli/clitest_data/data15bytes | 1 + .../hadoop/cli/clitest_data/data30bytes | 2 + .../hadoop/cli/clitest_data/data60bytes | 4 + src/test/org/apache/hadoop/cli/testConf.xml | 3339 ++ src/test/org/apache/hadoop/cli/testConf.xsl | 28 + .../apache/hadoop/cli/util/CLITestData.java | 135 + .../hadoop/cli/util/CommandExecutor.java | 186 + .../hadoop/cli/util/ComparatorBase.java | 39 + .../hadoop/cli/util/ComparatorData.java | 108 + .../hadoop/cli/util/ExactComparator.java | 34 + .../hadoop/cli/util/RegexpComparator.java | 50 + .../hadoop/cli/util/SubstringComparator.java | 33 + .../hadoop/cli/util/TokenComparator.java | 49 + .../apache/hadoop/conf/TestConfiguration.java | 633 + .../org/apache/hadoop/conf/TestJobConf.java | 179 + .../hadoop/conf/TestNoDefaultsJobConf.java | 103 + .../hadoop/conf/TestReconfiguration.java | 320 + .../filecache/TestDistributedCache.java | 130 + .../apache/hadoop/fs/AccumulatingReducer.java | 101 + src/test/org/apache/hadoop/fs/DFSCIOTest.java | 560 + .../apache/hadoop/fs/DistributedFSCheck.java | 367 + .../hadoop/fs/FileSystemContractBaseTest.java | 462 + .../org/apache/hadoop/fs/IOMapperBase.java | 125 + .../hadoop/fs/TestChecksumFileSystem.java | 108 + .../org/apache/hadoop/fs/TestCopyFiles.java | 1042 + .../hadoop/fs/TestCorruptFileBlocks.java | 79 + src/test/org/apache/hadoop/fs/TestDFSIO.java | 484 + src/test/org/apache/hadoop/fs/TestDU.java | 95 + .../org/apache/hadoop/fs/TestFileSystem.java | 703 + .../hadoop/fs/TestFilterFileSystem.java | 126 + .../hadoop/fs/TestGetFileBlockLocations.java | 139 + .../apache/hadoop/fs/TestGlobExpander.java | 62 + .../org/apache/hadoop/fs/TestGlobPaths.java | 431 + .../apache/hadoop/fs/TestHarFileSystem.java | 462 + .../hadoop/fs/TestLocalDirAllocator.java | 211 + .../apache/hadoop/fs/TestLocalFileSystem.java | 156 + .../fs/TestLocalFileSystemPermission.java | 157 + .../apache/hadoop/fs/TestLocatedStatus.java | 150 + src/test/org/apache/hadoop/fs/TestPath.java | 152 + src/test/org/apache/hadoop/fs/TestTrash.java | 436 + .../hadoop/fs/TestTruncatedInputBug.java | 111 + .../hadoop/fs/TestUrlStreamHandler.java | 155 + .../hadoop/fs/ftp/TestFTPFileSystem.java | 156 + .../hadoop/fs/kfs/KFSEmulationImpl.java | 146 + .../hadoop/fs/kfs/TestKosmosFileSystem.java | 179 + .../fs/loadGenerator/DataGenerator.java | 160 + .../fs/loadGenerator/LoadGenerator.java | 466 + .../fs/loadGenerator/StructureGenerator.java | 307 + .../fs/loadGenerator/TestLoadGenerator.java | 232 + .../fs/permission/TestFsPermission.java | 130 + .../hadoop/fs/s3/InMemoryFileSystemStore.java | 185 + .../fs/s3/Jets3tS3FileSystemContractTest.java | 31 + .../fs/s3/S3FileSystemContractBaseTest.java | 48 + .../org/apache/hadoop/fs/s3/TestINode.java | 60 + .../s3/TestInMemoryS3FileSystemContract.java | 31 + .../hadoop/fs/s3/TestS3Credentials.java | 36 + .../apache/hadoop/fs/s3/TestS3FileSystem.java | 50 + .../InMemoryNativeFileSystemStore.java | 198 + .../Jets3tNativeS3FileSystemContractTest.java | 30 + .../NativeS3FileSystemContractBaseTest.java | 59 + ...estInMemoryNativeS3FileSystemContract.java | 30 + .../apache/hadoop/hdfs/AppendTestUtil.java | 119 + .../hadoop/hdfs/BenchmarkThroughput.java | 234 + .../apache/hadoop/hdfs/DFSClientAdapter.java | 19 + .../org/apache/hadoop/hdfs/DFSTestUtil.java | 280 + .../apache/hadoop/hdfs/DataNodeCluster.java | 240 + .../apache/hadoop/hdfs/MiniDFSCluster.java | 957 + src/test/org/apache/hadoop/hdfs/NNBench.java | 996 + .../apache/hadoop/hdfs/NNBenchWithoutMR.java | 372 + .../apache/hadoop/hdfs/TestAbandonBlock.java | 71 + .../hdfs/TestBlockMissingException.java | 164 + .../apache/hadoop/hdfs/TestBlockReport.java | 133 + .../hdfs/TestBlockReportProcessingTime.java | 141 + .../hdfs/TestBlocksScheduledCounter.java | 64 + .../apache/hadoop/hdfs/TestCrcCorruption.java | 225 + .../hadoop/hdfs/TestDFSClientRetries.java | 273 + .../apache/hadoop/hdfs/TestDFSFinalize.java | 127 + .../org/apache/hadoop/hdfs/TestDFSMkdirs.java | 76 + .../apache/hadoop/hdfs/TestDFSPermission.java | 992 + .../org/apache/hadoop/hdfs/TestDFSRemove.java | 91 + .../org/apache/hadoop/hdfs/TestDFSRename.java | 205 + .../apache/hadoop/hdfs/TestDFSRollback.java | 244 + .../org/apache/hadoop/hdfs/TestDFSShell.java | 1287 + .../hdfs/TestDFSShellGenericOptions.java | 122 + .../hadoop/hdfs/TestDFSStartupVersions.java | 213 + .../hdfs/TestDFSStorageStateRecovery.java | 249 + .../apache/hadoop/hdfs/TestDFSUpgrade.java | 252 + .../hadoop/hdfs/TestDFSUpgradeFromImage.java | 203 + .../org/apache/hadoop/hdfs/TestDFSUtil.java | 69 + .../hadoop/hdfs/TestDataTransferProtocol.java | 337 + .../hadoop/hdfs/TestDatanodeBlockScanner.java | 440 + .../apache/hadoop/hdfs/TestDatanodeDeath.java | 417 + .../hadoop/hdfs/TestDatanodeReport.java | 87 + .../apache/hadoop/hdfs/TestDecommission.java | 297 + .../hadoop/hdfs/TestDefaultNameNodePort.java | 60 + .../hdfs/TestDistributedFileSystem.java | 192 + .../hadoop/hdfs/TestFSInputChecker.java | 346 + .../hadoop/hdfs/TestFSOutputSummer.java | 133 + .../apache/hadoop/hdfs/TestFileAppend.java | 310 + .../apache/hadoop/hdfs/TestFileAppend2.java | 427 + .../apache/hadoop/hdfs/TestFileAppend3.java | 270 + .../hadoop/hdfs/TestFileCorruption.java | 340 + .../apache/hadoop/hdfs/TestFileCreation.java | 797 + .../hadoop/hdfs/TestFileCreationClient.java | 145 + .../hadoop/hdfs/TestFileCreationDelete.java | 99 + .../hadoop/hdfs/TestFileCreationEmpty.java | 80 + .../hdfs/TestFileCreationNamenodeRestart.java | 24 + .../apache/hadoop/hdfs/TestFileStatus.java | 212 + .../org/apache/hadoop/hdfs/TestGetBlocks.java | 177 + .../apache/hadoop/hdfs/TestHDFSConcat.java | 362 + .../hdfs/TestHDFSFileSystemContract.java | 50 + .../hadoop/hdfs/TestHDFSServerPorts.java | 243 + .../org/apache/hadoop/hdfs/TestHDFSTrash.java | 65 + .../hadoop/hdfs/TestHftpFileSystem.java | 286 + .../TestInjectionForSimulatedStorage.java | 199 + .../apache/hadoop/hdfs/TestLargeBlock.java | 222 + .../org/apache/hadoop/hdfs/TestLease.java | 66 + .../apache/hadoop/hdfs/TestLeaseRecovery.java | 197 + .../hadoop/hdfs/TestLeaseRecovery2.java | 151 + .../hadoop/hdfs/TestListPathServlet.java | 136 + .../org/apache/hadoop/hdfs/TestLocalDFS.java | 95 + .../hadoop/hdfs/TestLocatedStatusInDFS.java | 57 + .../hadoop/hdfs/TestMissingBlocksAlert.java | 119 + .../org/apache/hadoop/hdfs/TestModTime.java | 187 + .../org/apache/hadoop/hdfs/TestPread.java | 217 + .../org/apache/hadoop/hdfs/TestQuota.java | 623 + .../hadoop/hdfs/TestRenameWhileOpen.java | 321 + .../apache/hadoop/hdfs/TestReplication.java | 453 + .../apache/hadoop/hdfs/TestRestartDFS.java | 81 + .../org/apache/hadoop/hdfs/TestSafeMode.java | 95 + .../org/apache/hadoop/hdfs/TestSeekBug.java | 154 + .../org/apache/hadoop/hdfs/TestSetTimes.java | 247 + .../hadoop/hdfs/TestSetrepDecreasing.java | 28 + .../hadoop/hdfs/TestSetrepIncreasing.java | 77 + .../apache/hadoop/hdfs/TestSmallBlock.java | 115 + .../apache/hadoop/hdfs/UpgradeUtilities.java | 392 + .../apache/hadoop/hdfs/hadoop-14-dfs-dir.tgz | Bin 0 -> 153081 bytes .../org/apache/hadoop/hdfs/hadoop-dfs-dir.txt | 67 + .../hdfs/server/balancer/TestBalancer.java | 286 + .../server/common/TestDistributedUpgrade.java | 239 + .../common/TestThreadLocalDateFormat.java | 107 + .../server/datanode/SimulatedFSDataset.java | 662 + .../server/datanode/TestBlockReplacement.java | 252 + .../server/datanode/TestDataNodeMetrics.java | 50 + .../hdfs/server/datanode/TestDiskError.java | 151 + .../datanode/TestInterDatanodeProtocol.java | 114 + .../datanode/TestParallelBlockScan.java | 69 + .../datanode/TestSimulatedFSDataset.java | 294 + .../hdfs/server/namenode/CreateEditsLog.java | 210 + .../server/namenode/FSNamesystemAdapter.java | 69 + .../server/namenode/FileNameGenerator.java | 93 + .../namenode/HttpServletResponseStub.java | 150 + .../namenode/NNThroughputBenchmark.java | 1183 + .../hdfs/server/namenode/TestAllowFormat.java | 109 + .../hdfs/server/namenode/TestCheckpoint.java | 764 + .../namenode/TestComputeInvalidateWork.java | 57 + .../server/namenode/TestCorruptFilesJsp.java | 126 + .../namenode/TestDatanodeDescriptor.java | 51 + .../server/namenode/TestDeadDatanode.java | 127 + .../namenode/TestDecommissioningStatus.java | 228 + .../namenode/TestDualRPCServerStartup.java | 88 + .../hdfs/server/namenode/TestEditLog.java | 159 + .../namenode/TestFileDeleteWhitelist.java | 120 + .../hdfs/server/namenode/TestFileLimit.java | 186 + .../hadoop/hdfs/server/namenode/TestFsck.java | 524 + .../namenode/TestHeartbeatHandling.java | 88 + .../server/namenode/TestHost2NodesMap.java | 97 + .../hdfs/server/namenode/TestINodeFile.java | 123 + .../namenode/TestLargeDirectoryDelete.java | 219 + .../namenode/TestListCorruptFileBlocks.java | 491 + .../namenode/TestNNThroughputBenchmark.java | 40 + .../hdfs/server/namenode/TestNameCache.java | 75 + .../server/namenode/TestNameEditsConfigs.java | 391 + .../server/namenode/TestNameNodePorts.java | 81 + .../namenode/TestNamenodeCapacityReport.java | 137 + .../hdfs/server/namenode/TestNodeCount.java | 107 + .../namenode/TestOverReplicatedBlocks.java | 132 + .../namenode/TestParallelImageWrite.java | 111 + .../server/namenode/TestPathComponents.java | 59 + .../namenode/TestPendingReplication.java | 114 + .../namenode/TestReplicationPolicy.java | 424 + .../hdfs/server/namenode/TestStartup.java | 443 + .../namenode/TestUnderReplicatedBlocks.java | 44 + .../namenode/metrics/TestNameNodeMetrics.java | 168 + .../apache/hadoop/http/TestGlobalFilter.java | 139 + .../apache/hadoop/http/TestHtmlQuoting.java | 65 + .../apache/hadoop/http/TestHttpServer.java | 183 + .../apache/hadoop/http/TestServletFilter.java | 138 + src/test/org/apache/hadoop/io/FileBench.java | 603 + .../org/apache/hadoop/io/RandomDatum.java | 108 + .../org/apache/hadoop/io/TestArrayFile.java | 155 + .../apache/hadoop/io/TestArrayWritable.java | 64 + .../apache/hadoop/io/TestBloomMapFile.java | 70 + .../apache/hadoop/io/TestBytesWritable.java | 95 + .../hadoop/io/TestDefaultStringifier.java | 113 + .../apache/hadoop/io/TestGenericWritable.java | 178 + .../org/apache/hadoop/io/TestMD5Hash.java | 115 + .../org/apache/hadoop/io/TestMapFile.java | 92 + .../org/apache/hadoop/io/TestMapWritable.java | 134 + .../apache/hadoop/io/TestSequenceFile.java | 549 + .../io/TestSequenceFileMergeProgress.java | 98 + .../io/TestSequenceFileSerialization.java | 69 + .../org/apache/hadoop/io/TestSetFile.java | 157 + .../hadoop/io/TestSortedMapWritable.java | 104 + src/test/org/apache/hadoop/io/TestText.java | 266 + .../org/apache/hadoop/io/TestTextNonUTF8.java | 56 + src/test/org/apache/hadoop/io/TestUTF8.java | 96 + .../hadoop/io/TestVersionedWritable.java | 179 + .../org/apache/hadoop/io/TestWritable.java | 99 + .../apache/hadoop/io/TestWritableName.java | 107 + .../apache/hadoop/io/TestWritableUtils.java | 65 + .../apache/hadoop/io/compress/TestCodec.java | 249 + .../hadoop/io/compress/TestCodecFactory.java | 151 + .../hadoop/io/compress/TestGzipCodec.java | 104 + .../hadoop/io/file/tfile/KVGenerator.java | 105 + .../hadoop/io/file/tfile/KeySampler.java | 56 + .../hadoop/io/file/tfile/NanoTimer.java | 193 + .../io/file/tfile/RandomDistribution.java | 266 + .../hadoop/io/file/tfile/TestTFile.java | 431 + .../io/file/tfile/TestTFileByteArrays.java | 790 + .../io/file/tfile/TestTFileComparators.java | 122 + .../TestTFileJClassComparatorByteArrays.java | 58 + .../tfile/TestTFileLzoCodecsByteArrays.java | 42 + .../file/tfile/TestTFileLzoCodecsStreams.java | 39 + .../tfile/TestTFileNoneCodecsByteArrays.java | 32 + ...eNoneCodecsJClassComparatorByteArrays.java | 43 + .../tfile/TestTFileNoneCodecsStreams.java | 32 + .../hadoop/io/file/tfile/TestTFileSeek.java | 504 + .../tfile/TestTFileSeqFileComparison.java | 782 + .../hadoop/io/file/tfile/TestTFileSplit.java | 192 + .../io/file/tfile/TestTFileStreams.java | 423 + .../tfile/TestTFileUnsortedByteArrays.java | 238 + .../hadoop/io/file/tfile/TestVLong.java | 161 + .../apache/hadoop/io/file/tfile/Timer.java | 63 + .../hadoop/io/retry/TestRetryProxy.java | 170 + .../io/retry/UnreliableImplementation.java | 60 + .../hadoop/io/retry/UnreliableInterface.java | 42 + .../serializer/TestWritableSerialization.java | 95 + .../simpleseekableformat/TestDataSegment.java | 87 + .../TestInterleavedStreams.java | 157 + .../TestSimpleSeekableFormatCodec.java | 141 + .../TestSimpleSeekableFormatStreams.java | 108 + .../io/simpleseekableformat/TestUtils.java | 67 + .../org/apache/hadoop/ipc/ClientAdapter.java | 39 + src/test/org/apache/hadoop/ipc/TestIPC.java | 279 + .../hadoop/ipc/TestIPCServerResponder.java | 157 + src/test/org/apache/hadoop/ipc/TestRPC.java | 391 + .../hadoop/ipc/TestRPCCompatibility.java | 214 + .../apache/hadoop/ipc/TestSocketFactory.java | 197 + .../org/apache/hadoop/log/TestLogLevel.java | 78 + .../apache/hadoop/mapred/BigMapOutput.java | 162 + .../mapred/ClusterMapReduceTestCase.java | 198 + .../ClusterWithLinuxTaskController.java | 241 + .../hadoop/mapred/ControlledMapReduceJob.java | 578 + .../mapred/DummyResourceCalculatorPlugin.java | 116 + .../DummyTaskTrackerInstrumentation.java | 69 + .../hadoop/mapred/EmptyInputFormat.java | 45 + .../hadoop/mapred/FakeObjectUtilities.java | 275 + .../hadoop/mapred/GenericMRLoadGenerator.java | 685 + .../apache/hadoop/mapred/HadoopTestCase.java | 206 + .../org/apache/hadoop/mapred/MRBench.java | 318 + .../org/apache/hadoop/mapred/MRCaching.java | 300 + .../apache/hadoop/mapred/MRSharedCaching.java | 445 + .../apache/hadoop/mapred/MiniMRCluster.java | 727 + .../hadoop/mapred/NotificationTestCase.java | 243 + .../apache/hadoop/mapred/ReliabilityTest.java | 506 + .../apache/hadoop/mapred/SortValidator.java | 579 + .../apache/hadoop/mapred/TestBadRecords.java | 271 + .../hadoop/mapred/TestChildTaskDirs.java | 208 + .../mapred/TestClusterMRNotification.java | 32 + .../mapred/TestClusterMapReduceTestCase.java | 121 + .../hadoop/mapred/TestClusterStatus.java | 310 + .../org/apache/hadoop/mapred/TestCollect.java | 150 + .../mapred/TestCommandLineJobSubmission.java | 97 + .../apache/hadoop/mapred/TestComparators.java | 467 + ...stCompositeTaskTrackerInstrumentation.java | 98 + .../mapred/TestCompressedEmptyMapOutputs.java | 123 + .../mapred/TestControlledMapReduceJob.java | 81 + .../apache/hadoop/mapred/TestCounters.java | 96 + .../mapred/TestCustomOutputCommitter.java | 65 + .../apache/hadoop/mapred/TestEmptyJob.java | 245 + .../hadoop/mapred/TestFieldSelection.java | 129 + .../hadoop/mapred/TestFileInputFormat.java | 186 + .../mapred/TestFileInputFormatPathFilter.java | 147 + .../mapred/TestFileOutputCommitter.java | 93 + .../hadoop/mapred/TestFileOutputFormat.java | 163 + .../hadoop/mapred/TestGetSplitHosts.java | 107 + .../hadoop/mapred/TestIFileStreams.java | 100 + .../apache/hadoop/mapred/TestIndexCache.java | 161 + .../apache/hadoop/mapred/TestInputPath.java | 109 + .../hadoop/mapred/TestJavaSerialization.java | 151 + .../apache/hadoop/mapred/TestJobCleanup.java | 78 + .../apache/hadoop/mapred/TestJobClient.java | 130 + .../apache/hadoop/mapred/TestJobCounters.java | 376 + .../hadoop/mapred/TestJobDirCleanup.java | 86 + .../TestJobExecutionAsDifferentUser.java | 107 + .../apache/hadoop/mapred/TestJobHistory.java | 1198 + .../hadoop/mapred/TestJobHistoryParsing.java | 118 + .../hadoop/mapred/TestJobHistoryVersion.java | 152 + .../hadoop/mapred/TestJobInProgress.java | 259 + .../mapred/TestJobInProgressListener.java | 405 + .../hadoop/mapred/TestJobKillAndFail.java | 129 + .../org/apache/hadoop/mapred/TestJobName.java | 109 + .../mapred/TestJobQueueInformation.java | 146 + .../mapred/TestJobQueueTaskScheduler.java | 329 + .../apache/hadoop/mapred/TestJobRetire.java | 360 + .../mapred/TestJobStatusPersistency.java | 136 + .../hadoop/mapred/TestJobSysDirWithDFS.java | 137 + .../hadoop/mapred/TestJobTrackerRestart.java | 571 + .../TestJobTrackerRestartWithLostTracker.java | 177 + .../hadoop/mapred/TestJobTrackerSafeMode.java | 280 + .../hadoop/mapred/TestJobTrackerStart.java | 43 + .../hadoop/mapred/TestJobTrackerXmlJsp.java | 55 + .../mapred/TestKeyValueTextInputFormat.java | 233 + .../hadoop/mapred/TestKillCompletedJob.java | 124 + .../hadoop/mapred/TestKillSubProcesses.java | 548 + .../TestLimitTasksPerJobTaskScheduler.java | 124 + .../mapred/TestLocalMRNotification.java | 33 + .../apache/hadoop/mapred/TestLostTracker.java | 180 + .../hadoop/mapred/TestMRServerPorts.java | 212 + .../hadoop/mapred/TestMapCollection.java | 304 + .../hadoop/mapred/TestMapOutputType.java | 159 + .../org/apache/hadoop/mapred/TestMapRed.java | 842 + .../hadoop/mapred/TestMapredSystemDir.java | 102 + .../hadoop/mapred/TestMiniMRBringup.java | 38 + .../hadoop/mapred/TestMiniMRChildTask.java | 437 + .../hadoop/mapred/TestMiniMRClasspath.java | 217 + .../hadoop/mapred/TestMiniMRDFSCaching.java | 78 + .../mapred/TestMiniMRDFSSharedCaching.java | 92 + .../hadoop/mapred/TestMiniMRDFSSort.java | 179 + .../hadoop/mapred/TestMiniMRLocalFS.java | 342 + .../mapred/TestMiniMRMapRedDebugScript.java | 240 + .../hadoop/mapred/TestMiniMRTaskTempDir.java | 188 + .../hadoop/mapred/TestMiniMRWithDFS.java | 295 + .../TestMiniMRWithDFSWithDistinctUsers.java | 85 + .../mapred/TestMultiFileInputFormat.java | 145 + .../hadoop/mapred/TestMultiFileSplit.java | 61 + .../mapred/TestMultipleLevelCaching.java | 124 + .../mapred/TestMultipleTextOutputFormat.java | 152 + .../hadoop/mapred/TestNodeBlacklisting.java | 144 + .../hadoop/mapred/TestNodeHealthService.java | 160 + .../apache/hadoop/mapred/TestNodeRefresh.java | 457 + .../mapred/TestParallelInitialization.java | 236 + .../mapred/TestQueueAclsForCurrentUser.java | 174 + .../hadoop/mapred/TestQueueManager.java | 604 + .../mapred/TestRackAwareTaskPlacement.java | 180 + .../hadoop/mapred/TestRecoveryManager.java | 453 + .../apache/hadoop/mapred/TestReduceFetch.java | 148 + .../apache/hadoop/mapred/TestReduceTask.java | 142 + .../hadoop/mapred/TestResourceEstimation.java | 109 + .../TestSequenceFileAsBinaryInputFormat.java | 101 + .../TestSequenceFileAsBinaryOutputFormat.java | 212 + .../TestSequenceFileAsTextInputFormat.java | 119 + .../mapred/TestSequenceFileInputFilter.java | 176 + .../mapred/TestSequenceFileInputFormat.java | 117 + .../mapred/TestSetupAndCleanupFailure.java | 294 + .../hadoop/mapred/TestSetupWorkDir.java | 84 + .../hadoop/mapred/TestSortedRanges.java | 99 + .../TestSpecialCharactersInOutputPath.java | 133 + .../mapred/TestSpeculativeExecution.java | 341 + .../mapred/TestStatisticsCollector.java | 83 + .../apache/hadoop/mapred/TestSubmitJob.java | 111 + .../hadoop/mapred/TestTTCpuToTaskSlots.java | 113 + .../mapred/TestTTResourceReporting.java | 356 + .../apache/hadoop/mapred/TestTaskCommit.java | 62 + .../apache/hadoop/mapred/TestTaskFail.java | 227 + .../apache/hadoop/mapred/TestTaskLimits.java | 118 + .../hadoop/mapred/TestTaskLogsMonitor.java | 500 + .../TestTaskTrackerInstrumentation.java | 120 + .../mapred/TestTaskTrackerMemoryManager.java | 565 + .../hadoop/mapred/TestTextInputFormat.java | 347 + .../hadoop/mapred/TestTextOutputFormat.java | 154 + .../TestTrackerBlacklistAcrossJobs.java | 108 + .../mapred/TestUserDefinedCounters.java | 105 + .../hadoop/mapred/TestWritableJobConf.java | 101 + .../hadoop/mapred/ThreadedMapBenchmark.java | 292 + .../apache/hadoop/mapred/UtilsForTests.java | 761 + .../org/apache/hadoop/mapred/WordCount.java | 159 + .../jobcontrol/JobControlTestUtils.java | 154 + .../mapred/jobcontrol/TestJobControl.java | 204 + .../jobcontrol/TestLocalJobControl.java | 135 + .../hadoop/mapred/join/IncomparableKey.java | 31 + .../hadoop/mapred/join/TestDatamerge.java | 422 + .../hadoop/mapred/join/TestTupleWritable.java | 174 + .../hadoop/mapred/lib/TestChainMapReduce.java | 284 + .../lib/TestCombineFileInputFormat.java | 966 + .../mapred/lib/TestDelegatingInputFormat.java | 133 + .../TestHarWithCombineFileInputFormat.java | 112 + .../lib/TestKeyFieldBasedComparator.java | 156 + .../lib/TestKeyFieldBasedPartitioner.java | 126 + .../hadoop/mapred/lib/TestKeyFieldHelper.java | 425 + .../mapred/lib/TestLineInputFormat.java | 118 + .../hadoop/mapred/lib/TestMultipleInputs.java | 85 + .../mapred/lib/TestMultipleOutputs.java | 260 + .../lib/TestMultithreadedMapRunner.java | 170 + .../mapred/lib/TestTotalOrderPartitioner.java | 191 + .../mapred/lib/aggregate/AggregatorTests.java | 88 + .../mapred/lib/aggregate/TestAggregates.java | 126 + .../mapred/lib/db/TestConstructQuery.java | 19 + .../hadoop/mapred/lib/db/TestDBJob.java | 42 + .../apache/hadoop/mapred/pipes/TestPipes.java | 277 + .../pipes/TestPipesAsDifferentUser.java | 74 + .../mapred/pipes/WordCountInputFormat.java | 85 + .../hadoop/mapred/sharedTest1/sharedTest.txt | 1 + .../hadoop/mapred/sharedTest1/sharedTest.zip | Bin 0 -> 225 bytes .../hadoop/mapred/sharedTest2/sharedTest.txt | 2 + src/test/org/apache/hadoop/mapred/test.jar | Bin 0 -> 518 bytes src/test/org/apache/hadoop/mapred/test.tar | Bin 0 -> 10240 bytes src/test/org/apache/hadoop/mapred/test.tar.gz | Bin 0 -> 189 bytes src/test/org/apache/hadoop/mapred/test.tgz | Bin 0 -> 180 bytes src/test/org/apache/hadoop/mapred/test.txt | 1 + src/test/org/apache/hadoop/mapred/test.zip | Bin 0 -> 213 bytes .../org/apache/hadoop/mapred/testscript.txt | 2 + .../hadoop/mapreduce/MapReduceTestUtil.java | 311 + .../apache/hadoop/mapreduce/TestChild.java | 142 + .../hadoop/mapreduce/TestMapReduceLocal.java | 168 + .../mapreduce/TestNoJobSetupCleanup.java | 108 + .../lib/map/TestMultithreadedMapper.java | 123 + .../util/TestMRAsyncDiskService.java | 199 + .../hadoop/metrics/TestContextFactory.java | 80 + .../org/apache/hadoop/net/StaticMapping.java | 62 + .../net/TestIPv4AddressTruncationMapping.java | 50 + .../net/TestInetSocketAddressFactory.java | 79 + .../hadoop/net/TestNetworkTopology.java | 171 + .../hadoop/net/TestScriptBasedMapping.java | 46 + .../hadoop/net/TestSocketIOWithTimeout.java | 155 + .../org/apache/hadoop/record/FromCpp.java | 120 + .../org/apache/hadoop/record/RecordBench.java | 313 + .../org/apache/hadoop/record/TestBuffer.java | 124 + .../apache/hadoop/record/TestRecordIO.java | 199 + .../apache/hadoop/record/TestRecordMR.java | 467 + .../hadoop/record/TestRecordVersioning.java | 239 + .../hadoop/record/TestRecordWritable.java | 114 + src/test/org/apache/hadoop/record/ToCpp.java | 113 + .../security/TestAccessControlList.java | 104 + .../hadoop/security/TestPermission.java | 262 + .../TestUnixUserGroupInformation.java | 112 + .../authorize/HadoopPolicyProvider.java | 39 + .../authorize/TestConfiguredPolicy.java | 82 + .../TestServiceLevelAuthorization.java | 152 + .../org/apache/hadoop/test/AllTestDriver.java | 87 + .../org/apache/hadoop/tools/TestDistCh.java | 221 + .../org/apache/hadoop/tools/TestJMXGet.java | 129 + .../tools/rumen/HistogramRawTestData.java | 54 + .../hadoop/tools/rumen/TestHistograms.java | 155 + .../TestPiecewiseLinearInterpolation.java | 123 + .../tools/rumen/TestRumenJobTraces.java | 125 + .../hadoop/tools/rumen/TestZombieJob.java | 338 + .../hadoop/util/TestAsyncDiskService.java | 82 + .../hadoop/util/TestCyclicIteration.java | 61 + .../apache/hadoop/util/TestGenericsUtil.java | 127 + .../hadoop/util/TestHostsFileReader.java | 230 + .../apache/hadoop/util/TestIndexedSort.java | 361 + .../util/TestProcfsBasedProcessTree.java | 678 + .../hadoop/util/TestReflectionUtils.java | 136 + .../org/apache/hadoop/util/TestShell.java | 127 + .../apache/hadoop/util/TestStringUtils.java | 121 + src/test/testjar/ClassWordCount.java | 60 + src/test/testjar/CustomOutputCommitter.java | 25 + src/test/testjar/ExternalMapperReducer.java | 73 + src/test/testjar/ExternalWritable.java | 86 + src/test/testshell/ExternalMapReduce.java | 132 + .../rumen/histogram-tests/gold-minimal.json | 15 + .../gold-one-value-many-repeats.json | 15 + .../histogram-tests/gold-only-one-value.json | 15 + .../histogram-tests/gold-three-values.json | 15 + .../rumen/histogram-tests/input-minimal.json | 17 + .../input-one-value-many-repeats.json | 76 + .../histogram-tests/input-only-one-value.json | 13 + .../histogram-tests/input-three-values.json | 15 + .../job-tracker-logs-topology-output | 1693 + .../job-tracker-logs-trace-output.gz | Bin 0 -> 25116 bytes .../sample-job-tracker-logs.gz | Bin 0 -> 29907 bytes .../truncated-job-tracker-log | 110 + .../truncated-topology-output | 343 + .../small-trace-test/truncated-trace-output | 1407 + .../data/rumen/zombie/input-topology.json | 1693 + .../tools/data/rumen/zombie/input-trace.json | 11364 ++++ src/tools/org/apache/hadoop/tools/DistCh.java | 511 + src/tools/org/apache/hadoop/tools/DistCp.java | 1450 + .../hadoop/tools/DistCp_Counter.properties | 9 + .../org/apache/hadoop/tools/DistTool.java | 118 + .../apache/hadoop/tools/HadoopArchives.java | 799 + .../org/apache/hadoop/tools/Logalyzer.java | 312 + .../tools/rumen/AbstractClusterStory.java | 185 + .../CDFPiecewiseLinearRandomGenerator.java | 68 + .../tools/rumen/CDFRandomGenerator.java | 89 + .../hadoop/tools/rumen/ClusterStory.java | 81 + .../tools/rumen/ClusterTopologyReader.java | 81 + .../hadoop/tools/rumen/DeepCompare.java | 45 + .../tools/rumen/DeepInequalityException.java | 61 + .../tools/rumen/HadoopLogsAnalyzer.java | 1827 + .../apache/hadoop/tools/rumen/Histogram.java | 164 + .../apache/hadoop/tools/rumen/JobStory.java | 118 + .../hadoop/tools/rumen/JobStoryProducer.java | 33 + .../hadoop/tools/rumen/JobTraceReader.java | 51 + .../tools/rumen/JsonObjectMapperParser.java | 116 + .../hadoop/tools/rumen/LogRecordType.java | 73 + .../hadoop/tools/rumen/LoggedDiscreteCDF.java | 144 + .../apache/hadoop/tools/rumen/LoggedJob.java | 586 + .../hadoop/tools/rumen/LoggedLocation.java | 98 + .../tools/rumen/LoggedNetworkTopology.java | 168 + .../rumen/LoggedSingleRelativeRanking.java | 101 + .../apache/hadoop/tools/rumen/LoggedTask.java | 266 + .../hadoop/tools/rumen/LoggedTaskAttempt.java | 344 + .../hadoop/tools/rumen/MachineNode.java | 205 + .../tools/rumen/MapTaskAttemptInfo.java | 48 + .../org/apache/hadoop/tools/rumen/Node.java | 148 + .../org/apache/hadoop/tools/rumen/Pair.java | 38 + .../hadoop/tools/rumen/ParsedConfigFile.java | 204 + .../apache/hadoop/tools/rumen/ParsedHost.java | 141 + .../apache/hadoop/tools/rumen/ParsedLine.java | 117 + .../tools/rumen/Pre21JobHistoryConstants.java | 49 + .../apache/hadoop/tools/rumen/RackNode.java | 48 + .../tools/rumen/ReduceTaskAttemptInfo.java | 71 + .../hadoop/tools/rumen/TaskAttemptInfo.java | 64 + .../apache/hadoop/tools/rumen/TaskInfo.java | 73 + .../apache/hadoop/tools/rumen/TreePath.java | 57 + .../hadoop/tools/rumen/ZombieCluster.java | 149 + .../apache/hadoop/tools/rumen/ZombieJob.java | 880 + .../hadoop/tools/rumen/ZombieJobProducer.java | 81 + src/webapps/datanode/browseBlock.jsp | 393 + src/webapps/datanode/browseDirectory.jsp | 181 + src/webapps/datanode/tail.jsp | 133 + src/webapps/hdfs/corrupt_files.jsp | 76 + src/webapps/hdfs/dfshealth.jsp | 313 + src/webapps/hdfs/dfsnodelist.jsp | 349 + src/webapps/hdfs/dfsnodelist_txt.jsp | 56 + src/webapps/hdfs/index.html | 20 + src/webapps/hdfs/nn_browsedfscontent.jsp | 58 + src/webapps/job/analysejobhistory.jsp | 249 + src/webapps/job/gc.jsp | 89 + src/webapps/job/index.html | 20 + src/webapps/job/jobblacklistedtrackers.jsp | 61 + src/webapps/job/jobcompletionevents.jsp | 105 + src/webapps/job/jobconf.jsp | 52 + src/webapps/job/jobconf_history.jsp | 56 + src/webapps/job/jobdetails.jsp | 446 + src/webapps/job/jobdetailshistory.jsp | 328 + src/webapps/job/jobdetailsjson.jsp | 100 + src/webapps/job/jobfailures.jsp | 169 + src/webapps/job/jobhistory.jsp | 314 + src/webapps/job/joblogs.jsp | 71 + src/webapps/job/jobqueue_details.jsp | 70 + src/webapps/job/jobtasks.jsp | 135 + src/webapps/job/jobtaskshistory.jsp | 67 + src/webapps/job/jobtracker.jsp | 207 + src/webapps/job/jobtracker.jspx | 87 + src/webapps/job/jobtracker_hmon.jsp | 205 + src/webapps/job/jobtracker_txt.jsp | 24 + src/webapps/job/jobtrackersdetailsjson.jsp | 126 + src/webapps/job/loadhistory.jsp | 49 + src/webapps/job/locality.jsp | 66 + src/webapps/job/machines.jsp | 165 + src/webapps/job/machines_txt.jsp | 53 + src/webapps/job/taskdetails.jsp | 299 + src/webapps/job/taskdetailshistory.jsp | 122 + src/webapps/job/taskstats.jsp | 87 + src/webapps/static/hadoop-logo.jpg | Bin 0 -> 9443 bytes src/webapps/static/hadoop.css | 134 + src/webapps/static/jobconf.xsl | 18 + src/webapps/static/jobtracker.js | 151 + src/webapps/task/index.html | 1 + src/webapps/task/taskcompletionevents.jsp | 67 + src/webapps/task/tasktracker.jsp | 89 + 2534 files changed, 1051247 insertions(+) create mode 100644 .eclipse.templates/.classpath create mode 100644 .eclipse.templates/.externalToolBuilders/Hadoop_Ant_Builder.launch create mode 100644 .eclipse.templates/.project create mode 100644 .eclipse.templates/README.txt create mode 100644 APACHE-README.txt create mode 100644 CHANGES.txt create mode 100644 FB-CHANGES.txt create mode 100644 LICENSE.txt create mode 100644 NOTICE.txt create mode 100644 README.txt create mode 100644 YAHOO-CHANGES.txt create mode 100755 bin/hadoop create mode 100644 bin/hadoop-config.sh create mode 100755 bin/hadoop-daemon.sh create mode 100755 bin/hadoop-daemons.sh create mode 100755 bin/rcc create mode 100755 bin/slaves.sh create mode 100755 bin/start-all.sh create mode 100755 bin/start-avatar.sh create mode 100755 bin/start-balancer.sh create mode 100755 bin/start-dfs.sh create mode 100755 bin/start-hmon-remote.sh create mode 100755 bin/start-hmon.sh create mode 100755 bin/start-mapred.sh create mode 100755 bin/start-raidnode-remote.sh create mode 100755 bin/start-raidnode.sh create mode 100755 bin/stop-all.sh create mode 100755 bin/stop-avatar.sh create mode 100755 bin/stop-balancer.sh create mode 100755 bin/stop-dfs.sh create mode 100755 bin/stop-hmon-remote.sh create mode 100755 bin/stop-hmon.sh create mode 100755 bin/stop-mapred.sh create mode 100755 bin/stop-raidnode-remote.sh create mode 100755 bin/stop-raidnode.sh create mode 100644 build.xml create mode 100644 conf/capacity-scheduler.xml create mode 100644 conf/capacity-scheduler.xml.template create mode 100644 conf/configuration.xsl create mode 100644 conf/core-site.xml create mode 100644 conf/core-site.xml.template create mode 100644 conf/hadoop-env.sh create mode 100644 conf/hadoop-env.sh.template create mode 100644 conf/hadoop-metrics.properties create mode 100644 conf/hadoop-policy.xml create mode 100644 conf/hadoop-policy.xml.template create mode 100644 conf/hdfs-site.xml create mode 100644 conf/hdfs-site.xml.template create mode 100644 conf/log4j.properties create mode 100644 conf/mapred-queue-acls.xml create mode 100644 conf/mapred-queue-acls.xml.template create mode 100644 conf/mapred-site.xml create mode 100644 conf/mapred-site.xml.template create mode 100644 conf/masters create mode 100644 conf/masters.template create mode 100644 conf/slaves create mode 100644 conf/slaves.template create mode 100644 conf/ssl-client.xml.example create mode 100644 conf/ssl-server.xml.example create mode 100644 conf/taskcontroller.cfg create mode 100644 ivy.xml create mode 100644 ivy/hadoop-core.pom create mode 100644 ivy/ivy-2.0.0-rc2.jar create mode 100644 ivy/ivysettings.xml create mode 100644 ivy/libraries.properties create mode 100644 lib/commons-cli-2.0-SNAPSHOT.jar create mode 100644 lib/hsqldb-1.8.0.10.LICENSE.txt create mode 100644 lib/hsqldb-1.8.0.10.jar create mode 100644 lib/jdiff/hadoop_0.17.0.xml create mode 100644 lib/jdiff/hadoop_0.18.1.xml create mode 100644 lib/jdiff/hadoop_0.18.2.xml create mode 100644 lib/jdiff/hadoop_0.18.3.xml create mode 100644 lib/jdiff/hadoop_0.19.0.xml create mode 100644 lib/jdiff/hadoop_0.19.1.xml create mode 100644 lib/jdiff/hadoop_0.19.2.xml create mode 100644 lib/jsp-2.1/jsp-2.1.jar create mode 100644 lib/jsp-2.1/jsp-api-2.1.jar create mode 100644 lib/kfs-0.2.2.jar create mode 100644 lib/kfs-0.2.LICENSE.txt create mode 100644 lib/zookeeper-3.3.1.jar create mode 100755 nativelib/lzma/liblzma.so create mode 100644 nativelib/lzma/lzma/lzma.h create mode 100644 nativelib/lzma/lzma/lzma/alignment.h create mode 100644 nativelib/lzma/lzma/lzma/base.h create mode 100644 nativelib/lzma/lzma/lzma/block.h create mode 100644 nativelib/lzma/lzma/lzma/check.h create mode 100644 nativelib/lzma/lzma/lzma/container.h create mode 100644 nativelib/lzma/lzma/lzma/delta.h create mode 100644 nativelib/lzma/lzma/lzma/filter.h create mode 100644 nativelib/lzma/lzma/lzma/index.h create mode 100644 nativelib/lzma/lzma/lzma/index_hash.h create mode 100644 nativelib/lzma/lzma/lzma/init.h create mode 100644 nativelib/lzma/lzma/lzma/lzma.h create mode 100644 nativelib/lzma/lzma/lzma/memlimit.h create mode 100644 nativelib/lzma/lzma/lzma/simple.h create mode 100644 nativelib/lzma/lzma/lzma/stream_flags.h create mode 100644 nativelib/lzma/lzma/lzma/subblock.h create mode 100644 nativelib/lzma/lzma/lzma/version.h create mode 100644 nativelib/lzma/lzma/lzma/vli.h create mode 100644 src/ant/org/apache/hadoop/ant/DfsTask.java create mode 100644 src/ant/org/apache/hadoop/ant/antlib.xml create mode 100644 src/ant/org/apache/hadoop/ant/condition/DfsBaseConditional.java create mode 100644 src/ant/org/apache/hadoop/ant/condition/DfsExists.java create mode 100644 src/ant/org/apache/hadoop/ant/condition/DfsIsDir.java create mode 100644 src/ant/org/apache/hadoop/ant/condition/DfsZeroLen.java create mode 100644 src/benchmarks/gridmix/README create mode 100644 src/benchmarks/gridmix/generateData.sh create mode 100644 src/benchmarks/gridmix/gridmix-env create mode 100644 src/benchmarks/gridmix/javasort/text-sort.large create mode 100644 src/benchmarks/gridmix/javasort/text-sort.medium create mode 100644 src/benchmarks/gridmix/javasort/text-sort.small create mode 100644 src/benchmarks/gridmix/maxent/maxent.large create mode 100644 src/benchmarks/gridmix/monsterQuery/monster_query.large create mode 100644 src/benchmarks/gridmix/monsterQuery/monster_query.medium create mode 100644 src/benchmarks/gridmix/monsterQuery/monster_query.small create mode 100644 src/benchmarks/gridmix/pipesort/text-sort.large create mode 100644 src/benchmarks/gridmix/pipesort/text-sort.medium create mode 100644 src/benchmarks/gridmix/pipesort/text-sort.small create mode 100644 src/benchmarks/gridmix/streamsort/text-sort.large create mode 100644 src/benchmarks/gridmix/streamsort/text-sort.medium create mode 100644 src/benchmarks/gridmix/streamsort/text-sort.small create mode 100644 src/benchmarks/gridmix/submissionScripts/allThroughHod create mode 100644 src/benchmarks/gridmix/submissionScripts/allToSameCluster create mode 100644 src/benchmarks/gridmix/submissionScripts/maxentHod create mode 100644 src/benchmarks/gridmix/submissionScripts/maxentToSameCluster create mode 100644 src/benchmarks/gridmix/submissionScripts/monsterQueriesHod create mode 100644 src/benchmarks/gridmix/submissionScripts/monsterQueriesToSameCluster create mode 100644 src/benchmarks/gridmix/submissionScripts/sleep_if_too_busy create mode 100644 src/benchmarks/gridmix/submissionScripts/textSortHod create mode 100644 src/benchmarks/gridmix/submissionScripts/textSortToSameCluster create mode 100644 src/benchmarks/gridmix/submissionScripts/webdataScanHod create mode 100644 src/benchmarks/gridmix/submissionScripts/webdataScanToSameCluster create mode 100644 src/benchmarks/gridmix/submissionScripts/webdataSortHod create mode 100644 src/benchmarks/gridmix/submissionScripts/webdataSortToSameCluster create mode 100644 src/benchmarks/gridmix/webdatascan/webdata_scan.large create mode 100644 src/benchmarks/gridmix/webdatascan/webdata_scan.medium create mode 100644 src/benchmarks/gridmix/webdatascan/webdata_scan.small create mode 100644 src/benchmarks/gridmix/webdatasort/webdata_sort.large create mode 100644 src/benchmarks/gridmix/webdatasort/webdata_sort.medium create mode 100644 src/benchmarks/gridmix/webdatasort/webdata_sort.small create mode 100644 src/benchmarks/gridmix2/README.gridmix2 create mode 100644 src/benchmarks/gridmix2/build.xml create mode 100644 src/benchmarks/gridmix2/generateGridmix2data.sh create mode 100644 src/benchmarks/gridmix2/gridmix-env-2 create mode 100644 src/benchmarks/gridmix2/gridmix_config.xml create mode 100644 src/benchmarks/gridmix2/rungridmix_2 create mode 100644 src/benchmarks/gridmix2/src/java/org/apache/hadoop/mapred/CombinerJobCreator.java create mode 100644 src/benchmarks/gridmix2/src/java/org/apache/hadoop/mapred/GenericMRLoadJobCreator.java create mode 100644 src/benchmarks/gridmix2/src/java/org/apache/hadoop/mapred/GridMixRunner.java create mode 100644 src/c++/libhdfs/Makefile.am create mode 100644 src/c++/libhdfs/Makefile.in create mode 100644 src/c++/libhdfs/aclocal.m4 create mode 100644 src/c++/libhdfs/config.guess create mode 100644 src/c++/libhdfs/config.sub create mode 100644 src/c++/libhdfs/configure create mode 100644 src/c++/libhdfs/configure.ac create mode 100644 src/c++/libhdfs/depcomp create mode 100644 src/c++/libhdfs/docs/Doxyfile create mode 100644 src/c++/libhdfs/docs/libhdfs_footer.html create mode 100644 src/c++/libhdfs/hdfs.c create mode 100644 src/c++/libhdfs/hdfs.h create mode 100644 src/c++/libhdfs/hdfsJniHelper.c create mode 100644 src/c++/libhdfs/hdfsJniHelper.h create mode 100644 src/c++/libhdfs/hdfs_read.c create mode 100644 src/c++/libhdfs/hdfs_test.c create mode 100644 src/c++/libhdfs/hdfs_write.c create mode 100644 src/c++/libhdfs/install-sh create mode 100755 src/c++/libhdfs/ltmain.sh create mode 100644 src/c++/libhdfs/m4/apfunctions.m4 create mode 100644 src/c++/libhdfs/m4/apjava.m4 create mode 100644 src/c++/libhdfs/m4/apsupport.m4 create mode 100644 src/c++/libhdfs/m4/libtool.m4 create mode 100644 src/c++/libhdfs/m4/ltoptions.m4 create mode 100644 src/c++/libhdfs/m4/ltsugar.m4 create mode 100644 src/c++/libhdfs/m4/ltversion.m4 create mode 100644 src/c++/libhdfs/m4/lt~obsolete.m4 create mode 100644 src/c++/libhdfs/missing create mode 100644 src/c++/libhdfs/tests/conf/core-site.xml create mode 100644 src/c++/libhdfs/tests/conf/hadoop-site.xml create mode 100644 src/c++/libhdfs/tests/conf/hdfs-site.xml create mode 100644 src/c++/libhdfs/tests/conf/mapred-site.xml create mode 100644 src/c++/libhdfs/tests/conf/slaves create mode 100755 src/c++/libhdfs/tests/test-libhdfs.sh create mode 100644 src/c++/librecordio/Makefile create mode 100644 src/c++/librecordio/archive.hh create mode 100644 src/c++/librecordio/binarchive.cc create mode 100644 src/c++/librecordio/binarchive.hh create mode 100644 src/c++/librecordio/csvarchive.cc create mode 100644 src/c++/librecordio/csvarchive.hh create mode 100644 src/c++/librecordio/exception.cc create mode 100644 src/c++/librecordio/exception.hh create mode 100644 src/c++/librecordio/fieldTypeInfo.cc create mode 100644 src/c++/librecordio/fieldTypeInfo.hh create mode 100644 src/c++/librecordio/filestream.cc create mode 100644 src/c++/librecordio/filestream.hh create mode 100644 src/c++/librecordio/recordTypeInfo.cc create mode 100644 src/c++/librecordio/recordTypeInfo.hh create mode 100644 src/c++/librecordio/recordio.cc create mode 100644 src/c++/librecordio/recordio.hh create mode 100644 src/c++/librecordio/test/Makefile create mode 100644 src/c++/librecordio/test/test.cc create mode 100644 src/c++/librecordio/test/test.hh create mode 100644 src/c++/librecordio/test/test.jr create mode 100644 src/c++/librecordio/test/testFromJava.cc create mode 100644 src/c++/librecordio/test/testFromJava.hh create mode 100644 src/c++/librecordio/typeIDs.cc create mode 100644 src/c++/librecordio/typeIDs.hh create mode 100644 src/c++/librecordio/typeInfo.cc create mode 100644 src/c++/librecordio/typeInfo.hh create mode 100644 src/c++/librecordio/utils.cc create mode 100644 src/c++/librecordio/utils.hh create mode 100644 src/c++/librecordio/xmlarchive.cc create mode 100644 src/c++/librecordio/xmlarchive.hh create mode 100644 src/c++/pipes/.autom4te.cfg create mode 100644 src/c++/pipes/Makefile.am create mode 100644 src/c++/pipes/Makefile.in create mode 100644 src/c++/pipes/aclocal.m4 create mode 100644 src/c++/pipes/api/hadoop/Pipes.hh create mode 100644 src/c++/pipes/api/hadoop/TemplateFactory.hh create mode 100644 src/c++/pipes/compile create mode 100644 src/c++/pipes/config.guess create mode 100644 src/c++/pipes/config.sub create mode 100755 src/c++/pipes/configure create mode 100644 src/c++/pipes/configure.ac create mode 100644 src/c++/pipes/debug/pipes-default-gdb-commands.txt create mode 100644 src/c++/pipes/debug/pipes-default-script create mode 100644 src/c++/pipes/depcomp create mode 100644 src/c++/pipes/impl/HadoopPipes.cc create mode 100644 src/c++/pipes/impl/config.h.in create mode 100644 src/c++/pipes/install-sh create mode 100644 src/c++/pipes/ltmain.sh create mode 100644 src/c++/pipes/missing create mode 100644 src/c++/task-controller/Makefile.in create mode 100644 src/c++/task-controller/configuration.c create mode 100644 src/c++/task-controller/configuration.h.in create mode 100644 src/c++/task-controller/configure create mode 100644 src/c++/task-controller/configure.ac create mode 100644 src/c++/task-controller/main.c create mode 100644 src/c++/task-controller/task-controller.c create mode 100644 src/c++/task-controller/task-controller.h create mode 100644 src/c++/utils/.autom4te.cfg create mode 100644 src/c++/utils/Makefile.am create mode 100644 src/c++/utils/Makefile.in create mode 100644 src/c++/utils/aclocal.m4 create mode 100644 src/c++/utils/api/hadoop/SerialUtils.hh create mode 100644 src/c++/utils/api/hadoop/StringUtils.hh create mode 100644 src/c++/utils/config.guess create mode 100644 src/c++/utils/config.sub create mode 100755 src/c++/utils/configure create mode 100644 src/c++/utils/configure.ac create mode 100644 src/c++/utils/depcomp create mode 100644 src/c++/utils/impl/SerialUtils.cc create mode 100644 src/c++/utils/impl/StringUtils.cc create mode 100644 src/c++/utils/impl/config.h.in create mode 100644 src/c++/utils/install-sh create mode 100644 src/c++/utils/ltmain.sh create mode 100644 src/c++/utils/m4/hadoop_utils.m4 create mode 100644 src/c++/utils/missing create mode 100644 src/contrib/bash-tab-completion/README create mode 100644 src/contrib/bash-tab-completion/hadoop.sh create mode 100644 src/contrib/build-contrib.xml create mode 100644 src/contrib/build.xml create mode 100644 src/contrib/capacity-scheduler/README create mode 100644 src/contrib/capacity-scheduler/build.xml create mode 100644 src/contrib/capacity-scheduler/ivy.xml create mode 100644 src/contrib/capacity-scheduler/ivy/libraries.properties create mode 100644 src/contrib/capacity-scheduler/src/java/org/apache/hadoop/mapred/CapacitySchedulerConf.java create mode 100644 src/contrib/capacity-scheduler/src/java/org/apache/hadoop/mapred/CapacityTaskScheduler.java create mode 100644 src/contrib/capacity-scheduler/src/java/org/apache/hadoop/mapred/JobInitializationPoller.java create mode 100644 src/contrib/capacity-scheduler/src/java/org/apache/hadoop/mapred/JobQueuesManager.java create mode 100644 src/contrib/capacity-scheduler/src/java/org/apache/hadoop/mapred/MemoryMatcher.java create mode 100644 src/contrib/capacity-scheduler/src/test/org/apache/hadoop/mapred/ClusterWithCapacityScheduler.java create mode 100644 src/contrib/capacity-scheduler/src/test/org/apache/hadoop/mapred/TestCapacityScheduler.java create mode 100644 src/contrib/capacity-scheduler/src/test/org/apache/hadoop/mapred/TestCapacitySchedulerConf.java create mode 100644 src/contrib/capacity-scheduler/src/test/org/apache/hadoop/mapred/TestCapacitySchedulerWithJobTracker.java create mode 100644 src/contrib/capacity-scheduler/src/test/org/apache/hadoop/mapred/TestJobTrackerRestartWithCS.java create mode 100644 src/contrib/data_join/build.xml create mode 100644 src/contrib/data_join/ivy.xml create mode 100644 src/contrib/data_join/ivy/libraries.properties create mode 100644 src/contrib/data_join/src/examples/org/apache/hadoop/contrib/utils/join/README.txt create mode 100644 src/contrib/data_join/src/examples/org/apache/hadoop/contrib/utils/join/SampleDataJoinMapper.java create mode 100644 src/contrib/data_join/src/examples/org/apache/hadoop/contrib/utils/join/SampleDataJoinReducer.java create mode 100644 src/contrib/data_join/src/examples/org/apache/hadoop/contrib/utils/join/SampleTaggedMapOutput.java create mode 100644 src/contrib/data_join/src/java/org/apache/hadoop/contrib/utils/join/ArrayListBackedIterator.java create mode 100644 src/contrib/data_join/src/java/org/apache/hadoop/contrib/utils/join/DataJoinJob.java create mode 100644 src/contrib/data_join/src/java/org/apache/hadoop/contrib/utils/join/DataJoinMapperBase.java create mode 100644 src/contrib/data_join/src/java/org/apache/hadoop/contrib/utils/join/DataJoinReducerBase.java create mode 100644 src/contrib/data_join/src/java/org/apache/hadoop/contrib/utils/join/JobBase.java create mode 100644 src/contrib/data_join/src/java/org/apache/hadoop/contrib/utils/join/ResetableIterator.java create mode 100644 src/contrib/data_join/src/java/org/apache/hadoop/contrib/utils/join/TaggedMapOutput.java create mode 100644 src/contrib/data_join/src/test/org/apache/hadoop/contrib/utils/join/TestDataJoin.java create mode 100755 src/contrib/dynamicclouds/bin/clusterbalancer.sh create mode 100644 src/contrib/dynamicclouds/build.xml create mode 100644 src/contrib/dynamicclouds/ivy.xml create mode 100644 src/contrib/dynamicclouds/ivy/libraries.properties create mode 100644 src/contrib/dynamicclouds/src/java/org/apache/hadoop/mapred/Cluster.java create mode 100644 src/contrib/dynamicclouds/src/java/org/apache/hadoop/mapred/ClusterBalancerAdminProtocol.java create mode 100644 src/contrib/dynamicclouds/src/java/org/apache/hadoop/mapred/ClusterBalancerTool.java create mode 100644 src/contrib/dynamicclouds/src/java/org/apache/hadoop/mapred/ClusterStatusJSONParser.java create mode 100644 src/contrib/dynamicclouds/src/java/org/apache/hadoop/mapred/ClustersBalancer.java create mode 100644 src/contrib/dynamicclouds/src/java/org/apache/hadoop/mapred/DynamicCloudsDaemon.java create mode 100644 src/contrib/dynamicclouds/src/java/org/apache/hadoop/mapred/TTLaunchTask.java create mode 100644 src/contrib/dynamicclouds/src/java/org/apache/hadoop/mapred/TTLauncher.java create mode 100644 src/contrib/dynamicclouds/src/java/org/apache/hadoop/mapred/TTMover.java create mode 100644 src/contrib/dynamicclouds/src/java/org/apache/hadoop/mapred/TaskTrackerLoadInfo.java create mode 100644 src/contrib/dynamicclouds/src/java/org/apache/hadoop/mapred/TaskTrackerLoadInfoIterator.java create mode 100644 src/contrib/dynamicclouds/src/java/org/apache/hadoop/mapred/WastedTimeTTLIIterator.java create mode 100644 src/contrib/dynamicclouds/src/test/org/apache/hadoop/mapred/TTMoverTestStub.java create mode 100644 src/contrib/dynamicclouds/src/test/org/apache/hadoop/mapred/TestClustersBalancer.java create mode 100644 src/contrib/dynamicclouds/src/test/org/apache/hadoop/mapred/TestTTMover.java create mode 100644 src/contrib/dynamicclouds/src/test/org/apache/hadoop/mapred/TestTaskTrackerLoadInfo.java create mode 100644 src/contrib/dynamicclouds/src/webapps/cb/index.html create mode 100644 src/contrib/dynamicclouds/src/webapps/cb/status.jsp create mode 100644 src/contrib/ec2/README.txt create mode 100644 src/contrib/ec2/bin/cmd-hadoop-cluster create mode 100755 src/contrib/ec2/bin/create-hadoop-image create mode 100644 src/contrib/ec2/bin/delete-hadoop-cluster create mode 100644 src/contrib/ec2/bin/hadoop-ec2 create mode 100644 src/contrib/ec2/bin/hadoop-ec2-env.sh create mode 100644 src/contrib/ec2/bin/hadoop-ec2-env.sh.template create mode 100644 src/contrib/ec2/bin/hadoop-ec2-init-remote.sh create mode 100755 src/contrib/ec2/bin/image/create-hadoop-image-remote create mode 100644 src/contrib/ec2/bin/image/ec2-run-user-data create mode 100644 src/contrib/ec2/bin/launch-hadoop-cluster create mode 100644 src/contrib/ec2/bin/launch-hadoop-master create mode 100644 src/contrib/ec2/bin/launch-hadoop-slaves create mode 100644 src/contrib/ec2/bin/list-hadoop-clusters create mode 100755 src/contrib/ec2/bin/terminate-hadoop-cluster create mode 100644 src/contrib/eclipse-plugin/.classpath create mode 100644 src/contrib/eclipse-plugin/.project create mode 100644 src/contrib/eclipse-plugin/.settings/org.eclipse.jdt.core.prefs create mode 100644 src/contrib/eclipse-plugin/.settings/org.eclipse.jdt.ui.prefs create mode 100644 src/contrib/eclipse-plugin/.settings/org.eclipse.wst.validation.prefs create mode 100644 src/contrib/eclipse-plugin/META-INF/MANIFEST.MF create mode 100644 src/contrib/eclipse-plugin/build.properties create mode 100644 src/contrib/eclipse-plugin/build.xml create mode 100644 src/contrib/eclipse-plugin/ivy.xml create mode 100644 src/contrib/eclipse-plugin/ivy/libraries.properties create mode 100644 src/contrib/eclipse-plugin/plugin.xml create mode 100644 src/contrib/eclipse-plugin/resources/Components/Conf.png create mode 100644 src/contrib/eclipse-plugin/resources/Components/Export.png create mode 100644 src/contrib/eclipse-plugin/resources/Components/Import.png create mode 100644 src/contrib/eclipse-plugin/resources/Components/New.png create mode 100644 src/contrib/eclipse-plugin/resources/Components/Reload.png create mode 100644 src/contrib/eclipse-plugin/resources/Components/Tool.png create mode 100644 src/contrib/eclipse-plugin/resources/Components/Tools.png create mode 100644 src/contrib/eclipse-plugin/resources/ConnectDFS.xml create mode 100644 src/contrib/eclipse-plugin/resources/CreateProj.xml create mode 100644 src/contrib/eclipse-plugin/resources/Elephant-16x16.png create mode 100644 src/contrib/eclipse-plugin/resources/Elephant-24x24.png create mode 100644 src/contrib/eclipse-plugin/resources/Elephant-32x32.png create mode 100644 src/contrib/eclipse-plugin/resources/Elephant-64x64.png create mode 100644 src/contrib/eclipse-plugin/resources/Elephant-small-16x16.png create mode 100644 src/contrib/eclipse-plugin/resources/Elephant.jpg create mode 100644 src/contrib/eclipse-plugin/resources/Elephant100x100.gif create mode 100644 src/contrib/eclipse-plugin/resources/Elephant16x16.gif create mode 100644 src/contrib/eclipse-plugin/resources/Elephant2-136x136.png create mode 100644 src/contrib/eclipse-plugin/resources/Elephant2-16x16.png create mode 100644 src/contrib/eclipse-plugin/resources/Elephant2-24x24.png create mode 100644 src/contrib/eclipse-plugin/resources/Elephant2-32x32.png create mode 100644 src/contrib/eclipse-plugin/resources/Elephant2-64x64.png create mode 100644 src/contrib/eclipse-plugin/resources/Elephant2.jpg create mode 100644 src/contrib/eclipse-plugin/resources/Elephant3-122x122.png create mode 100644 src/contrib/eclipse-plugin/resources/Elephant3-16x16.png create mode 100644 src/contrib/eclipse-plugin/resources/Elephant3-24x24.png create mode 100644 src/contrib/eclipse-plugin/resources/HelloWorld.xml create mode 100644 src/contrib/eclipse-plugin/resources/MAP100x100.gif create mode 100644 src/contrib/eclipse-plugin/resources/MAP16x15.gif create mode 100644 src/contrib/eclipse-plugin/resources/RunProj.xml create mode 100644 src/contrib/eclipse-plugin/resources/SetHadoopPath.xml create mode 100644 src/contrib/eclipse-plugin/resources/Setup.xml create mode 100644 src/contrib/eclipse-plugin/resources/download.png create mode 100644 src/contrib/eclipse-plugin/resources/drive100x100.gif create mode 100644 src/contrib/eclipse-plugin/resources/drive16x16.gif create mode 100644 src/contrib/eclipse-plugin/resources/driver.png create mode 100644 src/contrib/eclipse-plugin/resources/driverwiz.png create mode 100644 src/contrib/eclipse-plugin/resources/elephantblue16x16.gif create mode 100644 src/contrib/eclipse-plugin/resources/files.gif create mode 100644 src/contrib/eclipse-plugin/resources/hadoop-logo-16x16.png create mode 100644 src/contrib/eclipse-plugin/resources/hadoop-logo-24x24.png create mode 100644 src/contrib/eclipse-plugin/resources/hadoop-logo-85x85.png create mode 100644 src/contrib/eclipse-plugin/resources/hadoop-logo.jpg create mode 100644 src/contrib/eclipse-plugin/resources/hadoop.gif create mode 100644 src/contrib/eclipse-plugin/resources/hadoop_small.gif create mode 100644 src/contrib/eclipse-plugin/resources/job.gif create mode 100644 src/contrib/eclipse-plugin/resources/location-edit-16x16.png create mode 100644 src/contrib/eclipse-plugin/resources/location-new-16x16.png create mode 100644 src/contrib/eclipse-plugin/resources/map16x16.gif create mode 100644 src/contrib/eclipse-plugin/resources/mapper16.png create mode 100644 src/contrib/eclipse-plugin/resources/mapwiz.png create mode 100644 src/contrib/eclipse-plugin/resources/new-folder.png create mode 100644 src/contrib/eclipse-plugin/resources/projwiz.png create mode 100644 src/contrib/eclipse-plugin/resources/reduce100x100.gif create mode 100644 src/contrib/eclipse-plugin/resources/reduce16x16.gif create mode 100644 src/contrib/eclipse-plugin/resources/reducer-16x16.gif create mode 100644 src/contrib/eclipse-plugin/resources/reducer16.png create mode 100644 src/contrib/eclipse-plugin/resources/reducewiz.png create mode 100644 src/contrib/eclipse-plugin/resources/refresh.png create mode 100644 src/contrib/eclipse-plugin/resources/spite_overcloud.png create mode 100644 src/contrib/eclipse-plugin/resources/spitesmall.gif create mode 100644 src/contrib/eclipse-plugin/resources/spitesmall.png create mode 100644 src/contrib/eclipse-plugin/resources/upload.png create mode 100644 src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/Activator.java create mode 100644 src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/ErrorMessageDialog.java create mode 100644 src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/HadoopPerspectiveFactory.java create mode 100644 src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/ImageLibrary.java create mode 100644 src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/MapReduceNature.java create mode 100644 src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/NewDriverWizard.java create mode 100644 src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/NewDriverWizardPage.java create mode 100644 src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/NewMapReduceProjectWizard.java create mode 100644 src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/NewMapperWizard.java create mode 100644 src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/NewReducerWizard.java create mode 100644 src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/PropertyTester.java create mode 100644 src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/actions/DFSActionImpl.java create mode 100644 src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/actions/EditLocationAction.java create mode 100644 src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/actions/NewLocationAction.java create mode 100644 src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/actions/OpenNewMRClassWizardAction.java create mode 100644 src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/actions/OpenNewMRProjectAction.java create mode 100644 src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/dfs/ActionProvider.java create mode 100644 src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/dfs/DFSActions.java create mode 100644 src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/dfs/DFSContent.java create mode 100644 src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/dfs/DFSContentProvider.java create mode 100644 src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/dfs/DFSFile.java create mode 100644 src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/dfs/DFSFolder.java create mode 100644 src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/dfs/DFSLocation.java create mode 100644 src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/dfs/DFSLocationsRoot.java create mode 100644 src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/dfs/DFSMessage.java create mode 100644 src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/dfs/DFSPath.java create mode 100644 src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/launch/HadoopApplicationLaunchShortcut.java create mode 100644 src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/launch/LocalMapReduceLaunchTabGroup.java create mode 100644 src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/launch/MutexRule.java create mode 100644 src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/launch/StartHadoopLaunchTabGroup.java create mode 100644 src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/preferences/MapReducePreferencePage.java create mode 100644 src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/preferences/PreferenceConstants.java create mode 100644 src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/preferences/PreferenceInitializer.java create mode 100644 src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/server/ConfProp.java create mode 100644 src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/server/HadoopJob.java create mode 100644 src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/server/HadoopPathPage.java create mode 100644 src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/server/HadoopServer.java create mode 100644 src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/server/IJobListener.java create mode 100644 src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/server/JarModule.java create mode 100644 src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/servers/HadoopLocationWizard.java create mode 100644 src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/servers/HadoopServerSelectionListContentProvider.java create mode 100644 src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/servers/IHadoopServerListener.java create mode 100644 src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/servers/RunOnHadoopWizard.java create mode 100644 src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/servers/ServerRegistry.java create mode 100644 src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/view/servers/ServerView.java create mode 100644 src/contrib/failmon/README create mode 100644 src/contrib/failmon/bin/failmon.sh create mode 100644 src/contrib/failmon/bin/scheduler.py create mode 100644 src/contrib/failmon/build.xml create mode 100644 src/contrib/failmon/conf/commons-logging.properties create mode 100644 src/contrib/failmon/conf/failmon.properties create mode 100644 src/contrib/failmon/conf/global.config create mode 100644 src/contrib/failmon/conf/hosts.list create mode 100644 src/contrib/failmon/conf/log4j.properties create mode 100644 src/contrib/failmon/ivy.xml create mode 100644 src/contrib/failmon/ivy/libraries.properties create mode 100644 src/contrib/failmon/src/java/org/apache/hadoop/contrib/failmon/Anonymizer.java create mode 100644 src/contrib/failmon/src/java/org/apache/hadoop/contrib/failmon/CPUParser.java create mode 100644 src/contrib/failmon/src/java/org/apache/hadoop/contrib/failmon/Continuous.java create mode 100644 src/contrib/failmon/src/java/org/apache/hadoop/contrib/failmon/Environment.java create mode 100644 src/contrib/failmon/src/java/org/apache/hadoop/contrib/failmon/EventRecord.java create mode 100644 src/contrib/failmon/src/java/org/apache/hadoop/contrib/failmon/Executor.java create mode 100644 src/contrib/failmon/src/java/org/apache/hadoop/contrib/failmon/HDFSMerger.java create mode 100644 src/contrib/failmon/src/java/org/apache/hadoop/contrib/failmon/HadoopLogParser.java create mode 100644 src/contrib/failmon/src/java/org/apache/hadoop/contrib/failmon/LocalStore.java create mode 100644 src/contrib/failmon/src/java/org/apache/hadoop/contrib/failmon/LogParser.java create mode 100644 src/contrib/failmon/src/java/org/apache/hadoop/contrib/failmon/MonitorJob.java create mode 100644 src/contrib/failmon/src/java/org/apache/hadoop/contrib/failmon/Monitored.java create mode 100644 src/contrib/failmon/src/java/org/apache/hadoop/contrib/failmon/NICParser.java create mode 100644 src/contrib/failmon/src/java/org/apache/hadoop/contrib/failmon/OfflineAnonymizer.java create mode 100644 src/contrib/failmon/src/java/org/apache/hadoop/contrib/failmon/PersistentState.java create mode 100644 src/contrib/failmon/src/java/org/apache/hadoop/contrib/failmon/RunOnce.java create mode 100644 src/contrib/failmon/src/java/org/apache/hadoop/contrib/failmon/SMARTParser.java create mode 100644 src/contrib/failmon/src/java/org/apache/hadoop/contrib/failmon/SensorsParser.java create mode 100644 src/contrib/failmon/src/java/org/apache/hadoop/contrib/failmon/SerializedRecord.java create mode 100644 src/contrib/failmon/src/java/org/apache/hadoop/contrib/failmon/ShellParser.java create mode 100644 src/contrib/failmon/src/java/org/apache/hadoop/contrib/failmon/SystemLogParser.java create mode 100644 src/contrib/fairscheduler/README create mode 100644 src/contrib/fairscheduler/build.xml create mode 100644 src/contrib/fairscheduler/ivy.xml create mode 100644 src/contrib/fairscheduler/ivy/libraries.properties create mode 100644 src/contrib/fairscheduler/src/java/org/apache/hadoop/mapred/AllocationConfigurationException.java create mode 100644 src/contrib/fairscheduler/src/java/org/apache/hadoop/mapred/CapBasedLoadManager.java create mode 100644 src/contrib/fairscheduler/src/java/org/apache/hadoop/mapred/DefaultTaskSelector.java create mode 100644 src/contrib/fairscheduler/src/java/org/apache/hadoop/mapred/FairScheduler.java create mode 100644 src/contrib/fairscheduler/src/java/org/apache/hadoop/mapred/FairSchedulerMetricsInst.java create mode 100644 src/contrib/fairscheduler/src/java/org/apache/hadoop/mapred/FairSchedulerServlet.java create mode 100644 src/contrib/fairscheduler/src/java/org/apache/hadoop/mapred/FifoJobComparator.java create mode 100644 src/contrib/fairscheduler/src/java/org/apache/hadoop/mapred/LoadManager.java create mode 100644 src/contrib/fairscheduler/src/java/org/apache/hadoop/mapred/MemBasedLoadManager.java create mode 100644 src/contrib/fairscheduler/src/java/org/apache/hadoop/mapred/NewJobWeightBooster.java create mode 100644 src/contrib/fairscheduler/src/java/org/apache/hadoop/mapred/Pool.java create mode 100644 src/contrib/fairscheduler/src/java/org/apache/hadoop/mapred/PoolManager.java create mode 100644 src/contrib/fairscheduler/src/java/org/apache/hadoop/mapred/TaskSelector.java create mode 100644 src/contrib/fairscheduler/src/java/org/apache/hadoop/mapred/TaskType.java create mode 100644 src/contrib/fairscheduler/src/java/org/apache/hadoop/mapred/WeightAdjuster.java create mode 100644 src/contrib/fairscheduler/src/test/org/apache/hadoop/mapred/TestFairScheduler.java create mode 100644 src/contrib/fuse-dfs/Makefile.am create mode 100644 src/contrib/fuse-dfs/README create mode 100644 src/contrib/fuse-dfs/acinclude.m4 create mode 100755 src/contrib/fuse-dfs/bootstrap.sh create mode 100644 src/contrib/fuse-dfs/build.xml create mode 100644 src/contrib/fuse-dfs/configure.ac create mode 100644 src/contrib/fuse-dfs/global_footer.mk create mode 100644 src/contrib/fuse-dfs/global_header.mk create mode 100644 src/contrib/fuse-dfs/ivy.xml create mode 100644 src/contrib/fuse-dfs/ivy/libraries.properties create mode 100644 src/contrib/fuse-dfs/src/Makefile.am create mode 100644 src/contrib/fuse-dfs/src/fuse_connect.c create mode 100644 src/contrib/fuse-dfs/src/fuse_connect.h create mode 100644 src/contrib/fuse-dfs/src/fuse_context_handle.h create mode 100644 src/contrib/fuse-dfs/src/fuse_dfs.c create mode 100644 src/contrib/fuse-dfs/src/fuse_dfs.h create mode 100755 src/contrib/fuse-dfs/src/fuse_dfs_wrapper.sh create mode 100644 src/contrib/fuse-dfs/src/fuse_file_handle.h create mode 100644 src/contrib/fuse-dfs/src/fuse_impls.h create mode 100644 src/contrib/fuse-dfs/src/fuse_impls_access.c create mode 100644 src/contrib/fuse-dfs/src/fuse_impls_chmod.c create mode 100644 src/contrib/fuse-dfs/src/fuse_impls_chown.c create mode 100644 src/contrib/fuse-dfs/src/fuse_impls_create.c create mode 100644 src/contrib/fuse-dfs/src/fuse_impls_flush.c create mode 100644 src/contrib/fuse-dfs/src/fuse_impls_getattr.c create mode 100644 src/contrib/fuse-dfs/src/fuse_impls_mkdir.c create mode 100644 src/contrib/fuse-dfs/src/fuse_impls_mknod.c create mode 100644 src/contrib/fuse-dfs/src/fuse_impls_open.c create mode 100644 src/contrib/fuse-dfs/src/fuse_impls_read.c create mode 100644 src/contrib/fuse-dfs/src/fuse_impls_readdir.c create mode 100644 src/contrib/fuse-dfs/src/fuse_impls_release.c create mode 100644 src/contrib/fuse-dfs/src/fuse_impls_rename.c create mode 100644 src/contrib/fuse-dfs/src/fuse_impls_rmdir.c create mode 100644 src/contrib/fuse-dfs/src/fuse_impls_statfs.c create mode 100644 src/contrib/fuse-dfs/src/fuse_impls_symlink.c create mode 100644 src/contrib/fuse-dfs/src/fuse_impls_truncate.c create mode 100644 src/contrib/fuse-dfs/src/fuse_impls_unlink.c create mode 100644 src/contrib/fuse-dfs/src/fuse_impls_utimens.c create mode 100644 src/contrib/fuse-dfs/src/fuse_impls_write.c create mode 100644 src/contrib/fuse-dfs/src/fuse_init.c create mode 100644 src/contrib/fuse-dfs/src/fuse_init.h create mode 100644 src/contrib/fuse-dfs/src/fuse_options.c create mode 100644 src/contrib/fuse-dfs/src/fuse_options.h create mode 100644 src/contrib/fuse-dfs/src/fuse_stat_struct.c create mode 100644 src/contrib/fuse-dfs/src/fuse_stat_struct.h create mode 100644 src/contrib/fuse-dfs/src/fuse_trash.c create mode 100644 src/contrib/fuse-dfs/src/fuse_trash.h create mode 100644 src/contrib/fuse-dfs/src/fuse_users.c create mode 100644 src/contrib/fuse-dfs/src/fuse_users.h create mode 100644 src/contrib/fuse-dfs/src/test/TestFuseDFS.java create mode 100644 src/contrib/gridmix/README create mode 100644 src/contrib/gridmix/build.xml create mode 100644 src/contrib/gridmix/ivy.xml create mode 100644 src/contrib/gridmix/ivy/libraries.properties create mode 100644 src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/AvgRecordFactory.java create mode 100644 src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/CombineFileSplit.java create mode 100644 src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/FilePool.java create mode 100644 src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/FileQueue.java create mode 100644 src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/GenerateData.java create mode 100644 src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/Gridmix.java create mode 100644 src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/GridmixJob.java create mode 100644 src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/GridmixKey.java create mode 100644 src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/GridmixRecord.java create mode 100644 src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/GridmixSplit.java create mode 100644 src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/InputStriper.java create mode 100644 src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/IntermediateRecordFactory.java create mode 100644 src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/JobFactory.java create mode 100644 src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/JobMonitor.java create mode 100644 src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/JobSubmitter.java create mode 100644 src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/ReadRecordFactory.java create mode 100644 src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/RecordFactory.java create mode 100644 src/contrib/gridmix/src/test/org/apache/hadoop/mapred/gridmix/DebugJobFactory.java create mode 100644 src/contrib/gridmix/src/test/org/apache/hadoop/mapred/gridmix/TestFilePool.java create mode 100644 src/contrib/gridmix/src/test/org/apache/hadoop/mapred/gridmix/TestFileQueue.java create mode 100644 src/contrib/gridmix/src/test/org/apache/hadoop/mapred/gridmix/TestGridmixRecord.java create mode 100644 src/contrib/gridmix/src/test/org/apache/hadoop/mapred/gridmix/TestGridmixSubmission.java create mode 100644 src/contrib/gridmix/src/test/org/apache/hadoop/mapred/gridmix/TestRecordFactory.java create mode 100644 src/contrib/hdfsproxy/README create mode 100755 src/contrib/hdfsproxy/bin/hdfsproxy create mode 100755 src/contrib/hdfsproxy/bin/hdfsproxy-config.sh create mode 100755 src/contrib/hdfsproxy/bin/hdfsproxy-daemon.sh create mode 100755 src/contrib/hdfsproxy/bin/hdfsproxy-daemons.sh create mode 100755 src/contrib/hdfsproxy/bin/hdfsproxy-slaves.sh create mode 100755 src/contrib/hdfsproxy/bin/start-hdfsproxy.sh create mode 100755 src/contrib/hdfsproxy/bin/stop-hdfsproxy.sh create mode 100644 src/contrib/hdfsproxy/build.xml create mode 100644 src/contrib/hdfsproxy/conf/configuration.xsl create mode 100644 src/contrib/hdfsproxy/conf/hdfsproxy-default.xml create mode 100644 src/contrib/hdfsproxy/conf/hdfsproxy-env.sh create mode 100644 src/contrib/hdfsproxy/conf/hdfsproxy-env.sh.template create mode 100644 src/contrib/hdfsproxy/conf/hdfsproxy-hosts create mode 100644 src/contrib/hdfsproxy/conf/log4j.properties create mode 100644 src/contrib/hdfsproxy/conf/user-certs.xml create mode 100644 src/contrib/hdfsproxy/conf/user-permissions.xml create mode 100644 src/contrib/hdfsproxy/ivy.xml create mode 100644 src/contrib/hdfsproxy/ivy/libraries.properties create mode 100644 src/contrib/hdfsproxy/src/java/org/apache/hadoop/hdfsproxy/HdfsProxy.java create mode 100644 src/contrib/hdfsproxy/src/java/org/apache/hadoop/hdfsproxy/ProxyFileDataServlet.java create mode 100644 src/contrib/hdfsproxy/src/java/org/apache/hadoop/hdfsproxy/ProxyFilter.java create mode 100644 src/contrib/hdfsproxy/src/java/org/apache/hadoop/hdfsproxy/ProxyHttpServer.java create mode 100644 src/contrib/hdfsproxy/src/java/org/apache/hadoop/hdfsproxy/ProxyListPathsServlet.java create mode 100644 src/contrib/hdfsproxy/src/java/org/apache/hadoop/hdfsproxy/ProxyStreamFile.java create mode 100644 src/contrib/hdfsproxy/src/java/org/apache/hadoop/hdfsproxy/ProxyUgiManager.java create mode 100644 src/contrib/hdfsproxy/src/test/org/apache/hadoop/hdfsproxy/TestHdfsProxy.java create mode 100644 src/contrib/hdfsproxy/src/test/org/apache/hadoop/hdfsproxy/TestProxyUgiManager.java create mode 100644 src/contrib/highavailability/README create mode 100644 src/contrib/highavailability/build.xml create mode 100644 src/contrib/highavailability/conf/raid.xml create mode 100644 src/contrib/highavailability/ivy.xml create mode 100644 src/contrib/highavailability/ivy/libraries.properties create mode 100644 src/contrib/highavailability/src/java/org/apache/hadoop/hdfs/AvatarShell.java create mode 100644 src/contrib/highavailability/src/java/org/apache/hadoop/hdfs/AvatarZooKeeperClient.java create mode 100644 src/contrib/highavailability/src/java/org/apache/hadoop/hdfs/DistributedAvatarFileSystem.java create mode 100644 src/contrib/highavailability/src/java/org/apache/hadoop/hdfs/protocol/AvatarConstants.java create mode 100644 src/contrib/highavailability/src/java/org/apache/hadoop/hdfs/protocol/AvatarProtocol.java create mode 100644 src/contrib/highavailability/src/java/org/apache/hadoop/hdfs/server/datanode/AvatarDataNode.java create mode 100644 src/contrib/highavailability/src/java/org/apache/hadoop/hdfs/server/datanode/DatanodeProtocols.java create mode 100644 src/contrib/highavailability/src/java/org/apache/hadoop/hdfs/server/datanode/OfferService.java create mode 100644 src/contrib/highavailability/src/java/org/apache/hadoop/hdfs/server/namenode/AvatarNode.java create mode 100644 src/contrib/highavailability/src/java/org/apache/hadoop/hdfs/server/namenode/Ingest.java create mode 100644 src/contrib/highavailability/src/java/org/apache/hadoop/hdfs/server/namenode/Standby.java create mode 100644 src/contrib/highavailability/src/test/org/apache/hadoop/hdfs/MiniAvatarCluster.java create mode 100644 src/contrib/highavailability/src/test/org/apache/hadoop/hdfs/TestAvatarFailover.java create mode 100644 src/contrib/hive-streaming/build.xml create mode 100644 src/contrib/hive-streaming/ivy.xml create mode 100644 src/contrib/hive-streaming/ivy/libraries.properties create mode 100644 src/contrib/hive-streaming/src/java/org/apache/hadoop/streaming/BufferingOutputCollector.java create mode 100644 src/contrib/hive-streaming/src/java/org/apache/hadoop/streaming/Environment.java create mode 100644 src/contrib/hive-streaming/src/java/org/apache/hadoop/streaming/HadoopStreaming.java create mode 100644 src/contrib/hive-streaming/src/java/org/apache/hadoop/streaming/InmemBufferingOutputCollector.java create mode 100644 src/contrib/hive-streaming/src/java/org/apache/hadoop/streaming/JarBuilder.java create mode 100644 src/contrib/hive-streaming/src/java/org/apache/hadoop/streaming/PathFinder.java create mode 100644 src/contrib/hive-streaming/src/java/org/apache/hadoop/streaming/PipeMapRed.java create mode 100644 src/contrib/hive-streaming/src/java/org/apache/hadoop/streaming/PipeMapper.java create mode 100644 src/contrib/hive-streaming/src/java/org/apache/hadoop/streaming/PipeReducer.java create mode 100644 src/contrib/hive-streaming/src/java/org/apache/hadoop/streaming/StreamBaseRecordReader.java create mode 100644 src/contrib/hive-streaming/src/java/org/apache/hadoop/streaming/StreamInputFormat.java create mode 100644 src/contrib/hive-streaming/src/java/org/apache/hadoop/streaming/StreamJob.java create mode 100644 src/contrib/hive-streaming/src/java/org/apache/hadoop/streaming/StreamLineRecordReader.java create mode 100644 src/contrib/hive-streaming/src/java/org/apache/hadoop/streaming/StreamOutputFormat.java create mode 100644 src/contrib/hive-streaming/src/java/org/apache/hadoop/streaming/StreamSequenceRecordReader.java create mode 100644 src/contrib/hive-streaming/src/java/org/apache/hadoop/streaming/StreamUtil.java create mode 100644 src/contrib/hive-streaming/src/java/org/apache/hadoop/streaming/StreamXmlRecordReader.java create mode 100644 src/contrib/hive-streaming/src/java/org/apache/hadoop/streaming/UTF8ByteArrayUtils.java create mode 100644 src/contrib/hive-streaming/src/java/org/apache/hadoop/streaming/package.html create mode 100644 src/contrib/hive-streaming/src/test/org/apache/hadoop/streaming/StreamAggregate.java create mode 100644 src/contrib/hive-streaming/src/test/org/apache/hadoop/streaming/TestGzipInput.java create mode 100644 src/contrib/hive-streaming/src/test/org/apache/hadoop/streaming/TestStreamAggregate.java create mode 100644 src/contrib/hive-streaming/src/test/org/apache/hadoop/streaming/TestStreamDataProtocol.java create mode 100644 src/contrib/hive-streaming/src/test/org/apache/hadoop/streaming/TestStreamReduceNone.java create mode 100644 src/contrib/hive-streaming/src/test/org/apache/hadoop/streaming/TestStreamedMerge.java create mode 100644 src/contrib/hive-streaming/src/test/org/apache/hadoop/streaming/TestStreaming.java create mode 100644 src/contrib/hive-streaming/src/test/org/apache/hadoop/streaming/TestStreamingFailure.java create mode 100644 src/contrib/hive-streaming/src/test/org/apache/hadoop/streaming/TrApp.java create mode 100644 src/contrib/hive-streaming/src/test/org/apache/hadoop/streaming/UniqApp.java create mode 100644 src/contrib/hive-streaming/src/test/org/apache/hadoop/streaming/UtilTest.java create mode 100644 src/contrib/hive-streaming/src/test/org/apache/hadoop/streaming/ValueCountReduce.java create mode 100644 src/contrib/hmon/README create mode 100644 src/contrib/hmon/bin/start-hmon.sh create mode 100644 src/contrib/hmon/bin/stop-hmon.sh create mode 100644 src/contrib/hmon/build.xml create mode 100644 src/contrib/hmon/ivy.xml create mode 100644 src/contrib/hmon/ivy/libraries.properties create mode 100644 src/contrib/hmon/src/java/org/apache/hadoop/mapred/ClusterUtilization.java create mode 100644 src/contrib/hmon/src/java/org/apache/hadoop/mapred/HmonResourceReporter.java create mode 100644 src/contrib/hmon/src/java/org/apache/hadoop/mapred/JobUtilization.java create mode 100644 src/contrib/hmon/src/java/org/apache/hadoop/mapred/LinuxUtilizationGauger.java create mode 100644 src/contrib/hmon/src/java/org/apache/hadoop/mapred/LocalJobUtilization.java create mode 100644 src/contrib/hmon/src/java/org/apache/hadoop/mapred/TaskTrackerUtilization.java create mode 100644 src/contrib/hmon/src/java/org/apache/hadoop/mapred/UtilizationCollector.java create mode 100644 src/contrib/hmon/src/java/org/apache/hadoop/mapred/UtilizationCollectorCached.java create mode 100644 src/contrib/hmon/src/java/org/apache/hadoop/mapred/UtilizationCollectorProtocol.java create mode 100644 src/contrib/hmon/src/java/org/apache/hadoop/mapred/UtilizationGauger.java create mode 100644 src/contrib/hmon/src/java/org/apache/hadoop/mapred/UtilizationReporter.java create mode 100644 src/contrib/hmon/src/java/org/apache/hadoop/mapred/UtilizationShell.java create mode 100644 src/contrib/hmon/src/java/org/apache/hadoop/mapred/resourceutilization.xml create mode 100644 src/contrib/hmon/src/test/org/apache/hadoop/mapred/TestUtilizationCollector.java create mode 100644 src/contrib/hod/CHANGES.txt create mode 100644 src/contrib/hod/README create mode 100644 src/contrib/hod/bin/VERSION create mode 100644 src/contrib/hod/bin/checknodes create mode 100644 src/contrib/hod/bin/hod create mode 100644 src/contrib/hod/bin/hodcleanup create mode 100644 src/contrib/hod/bin/hodring create mode 100644 src/contrib/hod/bin/ringmaster create mode 100644 src/contrib/hod/bin/verify-account create mode 100644 src/contrib/hod/build.xml create mode 100644 src/contrib/hod/conf/hodrc create mode 100644 src/contrib/hod/config.txt create mode 100644 src/contrib/hod/getting_started.txt create mode 100755 src/contrib/hod/hodlib/AllocationManagers/__init__.py create mode 100755 src/contrib/hod/hodlib/AllocationManagers/goldAllocationManager.py create mode 100755 src/contrib/hod/hodlib/Common/__init__.py create mode 100755 src/contrib/hod/hodlib/Common/allocationManagerUtil.py create mode 100755 src/contrib/hod/hodlib/Common/desc.py create mode 100755 src/contrib/hod/hodlib/Common/descGenerator.py create mode 100755 src/contrib/hod/hodlib/Common/hodsvc.py create mode 100755 src/contrib/hod/hodlib/Common/logger.py create mode 100755 src/contrib/hod/hodlib/Common/miniHTMLParser.py create mode 100755 src/contrib/hod/hodlib/Common/nodepoolutil.py create mode 100755 src/contrib/hod/hodlib/Common/setup.py create mode 100755 src/contrib/hod/hodlib/Common/socketServers.py create mode 100755 src/contrib/hod/hodlib/Common/tcp.py create mode 100755 src/contrib/hod/hodlib/Common/threads.py create mode 100755 src/contrib/hod/hodlib/Common/types.py create mode 100755 src/contrib/hod/hodlib/Common/util.py create mode 100755 src/contrib/hod/hodlib/Common/xmlrpc.py create mode 100755 src/contrib/hod/hodlib/GridServices/__init__.py create mode 100755 src/contrib/hod/hodlib/GridServices/hdfs.py create mode 100755 src/contrib/hod/hodlib/GridServices/mapred.py create mode 100755 src/contrib/hod/hodlib/GridServices/service.py create mode 100755 src/contrib/hod/hodlib/Hod/__init__.py create mode 100755 src/contrib/hod/hodlib/Hod/hadoop.py create mode 100755 src/contrib/hod/hodlib/Hod/hod.py create mode 100755 src/contrib/hod/hodlib/Hod/nodePool.py create mode 100755 src/contrib/hod/hodlib/HodRing/__init__.py create mode 100755 src/contrib/hod/hodlib/HodRing/hodRing.py create mode 100755 src/contrib/hod/hodlib/NodePools/__init__.py create mode 100755 src/contrib/hod/hodlib/NodePools/torque.py create mode 100755 src/contrib/hod/hodlib/RingMaster/__init__.py create mode 100755 src/contrib/hod/hodlib/RingMaster/idleJobTracker.py create mode 100755 src/contrib/hod/hodlib/RingMaster/ringMaster.py create mode 100755 src/contrib/hod/hodlib/Schedulers/__init__.py create mode 100755 src/contrib/hod/hodlib/Schedulers/torque.py create mode 100755 src/contrib/hod/hodlib/ServiceProxy/__init__.py create mode 100755 src/contrib/hod/hodlib/ServiceProxy/serviceProxy.py create mode 100755 src/contrib/hod/hodlib/ServiceRegistry/__init__.py create mode 100755 src/contrib/hod/hodlib/ServiceRegistry/serviceRegistry.py create mode 100755 src/contrib/hod/hodlib/__init__.py create mode 100644 src/contrib/hod/ivy.xml create mode 100644 src/contrib/hod/ivy/libraries.properties create mode 100644 src/contrib/hod/support/checklimits.sh create mode 100755 src/contrib/hod/support/logcondense.py create mode 100644 src/contrib/hod/testing/__init__.py create mode 100644 src/contrib/hod/testing/helper.py create mode 100644 src/contrib/hod/testing/lib.py create mode 100644 src/contrib/hod/testing/main.py create mode 100644 src/contrib/hod/testing/testHadoop.py create mode 100644 src/contrib/hod/testing/testHod.py create mode 100644 src/contrib/hod/testing/testHodCleanup.py create mode 100644 src/contrib/hod/testing/testHodRing.py create mode 100644 src/contrib/hod/testing/testModule.py create mode 100644 src/contrib/hod/testing/testRingmasterRPCs.py create mode 100644 src/contrib/hod/testing/testThreads.py create mode 100644 src/contrib/hod/testing/testTypes.py create mode 100644 src/contrib/hod/testing/testUtil.py create mode 100644 src/contrib/hod/testing/testXmlrpc.py create mode 100755 src/contrib/index/README create mode 100755 src/contrib/index/build.xml create mode 100644 src/contrib/index/conf/index-config.xml create mode 100755 src/contrib/index/conf/index-config.xml.template create mode 100644 src/contrib/index/ivy.xml create mode 100644 src/contrib/index/ivy/libraries.properties create mode 100755 src/contrib/index/sample/data.txt create mode 100755 src/contrib/index/sample/data2.txt create mode 100755 src/contrib/index/src/java/org/apache/hadoop/contrib/index/example/HashingDistributionPolicy.java create mode 100755 src/contrib/index/src/java/org/apache/hadoop/contrib/index/example/IdentityLocalAnalysis.java create mode 100755 src/contrib/index/src/java/org/apache/hadoop/contrib/index/example/LineDocInputFormat.java create mode 100755 src/contrib/index/src/java/org/apache/hadoop/contrib/index/example/LineDocLocalAnalysis.java create mode 100755 src/contrib/index/src/java/org/apache/hadoop/contrib/index/example/LineDocRecordReader.java create mode 100755 src/contrib/index/src/java/org/apache/hadoop/contrib/index/example/LineDocTextAndOp.java create mode 100755 src/contrib/index/src/java/org/apache/hadoop/contrib/index/example/RoundRobinDistributionPolicy.java create mode 100755 src/contrib/index/src/java/org/apache/hadoop/contrib/index/lucene/FileSystemDirectory.java create mode 100755 src/contrib/index/src/java/org/apache/hadoop/contrib/index/lucene/LuceneIndexFileNameFilter.java create mode 100755 src/contrib/index/src/java/org/apache/hadoop/contrib/index/lucene/LuceneUtil.java create mode 100755 src/contrib/index/src/java/org/apache/hadoop/contrib/index/lucene/MixedDeletionPolicy.java create mode 100755 src/contrib/index/src/java/org/apache/hadoop/contrib/index/lucene/MixedDirectory.java create mode 100755 src/contrib/index/src/java/org/apache/hadoop/contrib/index/lucene/RAMDirectoryUtil.java create mode 100755 src/contrib/index/src/java/org/apache/hadoop/contrib/index/lucene/ShardWriter.java create mode 100755 src/contrib/index/src/java/org/apache/hadoop/contrib/index/main/UpdateIndex.java create mode 100755 src/contrib/index/src/java/org/apache/hadoop/contrib/index/mapred/DocumentAndOp.java create mode 100755 src/contrib/index/src/java/org/apache/hadoop/contrib/index/mapred/DocumentID.java create mode 100755 src/contrib/index/src/java/org/apache/hadoop/contrib/index/mapred/IDistributionPolicy.java create mode 100755 src/contrib/index/src/java/org/apache/hadoop/contrib/index/mapred/IIndexUpdater.java create mode 100755 src/contrib/index/src/java/org/apache/hadoop/contrib/index/mapred/ILocalAnalysis.java create mode 100755 src/contrib/index/src/java/org/apache/hadoop/contrib/index/mapred/IndexUpdateCombiner.java create mode 100755 src/contrib/index/src/java/org/apache/hadoop/contrib/index/mapred/IndexUpdateConfiguration.java create mode 100755 src/contrib/index/src/java/org/apache/hadoop/contrib/index/mapred/IndexUpdateMapper.java create mode 100755 src/contrib/index/src/java/org/apache/hadoop/contrib/index/mapred/IndexUpdateOutputFormat.java create mode 100755 src/contrib/index/src/java/org/apache/hadoop/contrib/index/mapred/IndexUpdatePartitioner.java create mode 100755 src/contrib/index/src/java/org/apache/hadoop/contrib/index/mapred/IndexUpdateReducer.java create mode 100755 src/contrib/index/src/java/org/apache/hadoop/contrib/index/mapred/IndexUpdater.java create mode 100755 src/contrib/index/src/java/org/apache/hadoop/contrib/index/mapred/IntermediateForm.java create mode 100755 src/contrib/index/src/java/org/apache/hadoop/contrib/index/mapred/Shard.java create mode 100755 src/contrib/index/src/test/org/apache/hadoop/contrib/index/lucene/TestMixedDirectory.java create mode 100755 src/contrib/index/src/test/org/apache/hadoop/contrib/index/mapred/TestDistributionPolicy.java create mode 100755 src/contrib/index/src/test/org/apache/hadoop/contrib/index/mapred/TestIndexUpdater.java create mode 100644 src/contrib/mumak/bin/mumak.sh create mode 100644 src/contrib/mumak/build.xml create mode 100644 src/contrib/mumak/conf/log4j.properties create mode 100644 src/contrib/mumak/conf/mumak.xml create mode 100644 src/contrib/mumak/ivy.xml create mode 100644 src/contrib/mumak/ivy/libraries.properties create mode 100644 src/contrib/mumak/src/java/org/apache/hadoop/mapred/AllMapsCompletedTaskAction.java create mode 100644 src/contrib/mumak/src/java/org/apache/hadoop/mapred/EagerTaskInitializationListenerAspects.aj create mode 100644 src/contrib/mumak/src/java/org/apache/hadoop/mapred/HeartbeatEvent.java create mode 100644 src/contrib/mumak/src/java/org/apache/hadoop/mapred/JobCompleteEvent.java create mode 100644 src/contrib/mumak/src/java/org/apache/hadoop/mapred/JobSubmissionEvent.java create mode 100644 src/contrib/mumak/src/java/org/apache/hadoop/mapred/SimulatorClock.java create mode 100644 src/contrib/mumak/src/java/org/apache/hadoop/mapred/SimulatorEngine.java create mode 100644 src/contrib/mumak/src/java/org/apache/hadoop/mapred/SimulatorEvent.java create mode 100644 src/contrib/mumak/src/java/org/apache/hadoop/mapred/SimulatorEventListener.java create mode 100644 src/contrib/mumak/src/java/org/apache/hadoop/mapred/SimulatorEventQueue.java create mode 100644 src/contrib/mumak/src/java/org/apache/hadoop/mapred/SimulatorJobCache.java create mode 100644 src/contrib/mumak/src/java/org/apache/hadoop/mapred/SimulatorJobClient.java create mode 100644 src/contrib/mumak/src/java/org/apache/hadoop/mapred/SimulatorJobInProgress.java create mode 100644 src/contrib/mumak/src/java/org/apache/hadoop/mapred/SimulatorJobStory.java create mode 100644 src/contrib/mumak/src/java/org/apache/hadoop/mapred/SimulatorJobStoryProducer.java create mode 100644 src/contrib/mumak/src/java/org/apache/hadoop/mapred/SimulatorJobTracker.java create mode 100644 src/contrib/mumak/src/java/org/apache/hadoop/mapred/SimulatorLaunchTaskAction.java create mode 100644 src/contrib/mumak/src/java/org/apache/hadoop/mapred/SimulatorTaskTracker.java create mode 100644 src/contrib/mumak/src/java/org/apache/hadoop/mapred/SimulatorTaskTrackerStatus.java create mode 100644 src/contrib/mumak/src/java/org/apache/hadoop/mapred/SortedZombieJobProducer.java create mode 100644 src/contrib/mumak/src/java/org/apache/hadoop/mapred/TaskAttemptCompletionEvent.java create mode 100644 src/contrib/mumak/src/java/org/apache/hadoop/net/StaticMapping.java create mode 100644 src/contrib/mumak/src/test/org/apache/hadoop/mapred/CheckedEventQueue.java create mode 100644 src/contrib/mumak/src/test/org/apache/hadoop/mapred/FakeJobs.java create mode 100644 src/contrib/mumak/src/test/org/apache/hadoop/mapred/HeartbeatHelper.java create mode 100644 src/contrib/mumak/src/test/org/apache/hadoop/mapred/MockSimulatorEngine.java create mode 100644 src/contrib/mumak/src/test/org/apache/hadoop/mapred/MockSimulatorJobTracker.java create mode 100644 src/contrib/mumak/src/test/org/apache/hadoop/mapred/TestSimulatorEndToEnd.java create mode 100644 src/contrib/mumak/src/test/org/apache/hadoop/mapred/TestSimulatorEngine.java create mode 100644 src/contrib/mumak/src/test/org/apache/hadoop/mapred/TestSimulatorEventQueue.java create mode 100644 src/contrib/mumak/src/test/org/apache/hadoop/mapred/TestSimulatorJobClient.java create mode 100644 src/contrib/mumak/src/test/org/apache/hadoop/mapred/TestSimulatorJobTracker.java create mode 100644 src/contrib/mumak/src/test/org/apache/hadoop/mapred/TestSimulatorTaskTracker.java create mode 100644 src/contrib/raid/README create mode 100644 src/contrib/raid/build.xml create mode 100644 src/contrib/raid/conf/raid.xml create mode 100644 src/contrib/raid/ivy.xml create mode 100644 src/contrib/raid/ivy/libraries.properties create mode 100644 src/contrib/raid/src/java/org/apache/hadoop/hdfs/DistributedRaidFileSystem.java create mode 100644 src/contrib/raid/src/java/org/apache/hadoop/hdfs/RaidDFSUtil.java create mode 100644 src/contrib/raid/src/java/org/apache/hadoop/hdfs/server/namenode/BlockPlacementPolicyRaid.java create mode 100644 src/contrib/raid/src/java/org/apache/hadoop/raid/BlockFixer.java create mode 100644 src/contrib/raid/src/java/org/apache/hadoop/raid/ConfigManager.java create mode 100644 src/contrib/raid/src/java/org/apache/hadoop/raid/Decoder.java create mode 100644 src/contrib/raid/src/java/org/apache/hadoop/raid/DirectoryTraversal.java create mode 100644 src/contrib/raid/src/java/org/apache/hadoop/raid/DistBlockFixer.java create mode 100644 src/contrib/raid/src/java/org/apache/hadoop/raid/DistRaid.java create mode 100644 src/contrib/raid/src/java/org/apache/hadoop/raid/DistRaidNode.java create mode 100644 src/contrib/raid/src/java/org/apache/hadoop/raid/Encoder.java create mode 100644 src/contrib/raid/src/java/org/apache/hadoop/raid/ErasureCode.java create mode 100644 src/contrib/raid/src/java/org/apache/hadoop/raid/GaloisField.java create mode 100644 src/contrib/raid/src/java/org/apache/hadoop/raid/HarIndex.java create mode 100644 src/contrib/raid/src/java/org/apache/hadoop/raid/JobMonitor.java create mode 100644 src/contrib/raid/src/java/org/apache/hadoop/raid/LocalBlockFixer.java create mode 100644 src/contrib/raid/src/java/org/apache/hadoop/raid/LocalRaidNode.java create mode 100644 src/contrib/raid/src/java/org/apache/hadoop/raid/ParityInputStream.java create mode 100644 src/contrib/raid/src/java/org/apache/hadoop/raid/RaidConfigurationException.java create mode 100644 src/contrib/raid/src/java/org/apache/hadoop/raid/RaidFilter.java create mode 100644 src/contrib/raid/src/java/org/apache/hadoop/raid/RaidNode.java create mode 100644 src/contrib/raid/src/java/org/apache/hadoop/raid/RaidNodeMetrics.java create mode 100644 src/contrib/raid/src/java/org/apache/hadoop/raid/RaidShell.java create mode 100644 src/contrib/raid/src/java/org/apache/hadoop/raid/RaidUtils.java create mode 100644 src/contrib/raid/src/java/org/apache/hadoop/raid/ReedSolomonCode.java create mode 100644 src/contrib/raid/src/java/org/apache/hadoop/raid/ReedSolomonDecoder.java create mode 100644 src/contrib/raid/src/java/org/apache/hadoop/raid/ReedSolomonEncoder.java create mode 100644 src/contrib/raid/src/java/org/apache/hadoop/raid/XORDecoder.java create mode 100644 src/contrib/raid/src/java/org/apache/hadoop/raid/XOREncoder.java create mode 100644 src/contrib/raid/src/java/org/apache/hadoop/raid/protocol/PolicyInfo.java create mode 100644 src/contrib/raid/src/java/org/apache/hadoop/raid/protocol/PolicyList.java create mode 100644 src/contrib/raid/src/java/org/apache/hadoop/raid/protocol/RaidProtocol.java create mode 100644 src/contrib/raid/src/test/org/apache/hadoop/hdfs/TestRaidDfs.java create mode 100644 src/contrib/raid/src/test/org/apache/hadoop/hdfs/server/namenode/TestBlockPlacementPolicyRaid.java create mode 100644 src/contrib/raid/src/test/org/apache/hadoop/raid/TestBlockFixer.java create mode 100644 src/contrib/raid/src/test/org/apache/hadoop/raid/TestDirectoryTraversal.java create mode 100644 src/contrib/raid/src/test/org/apache/hadoop/raid/TestErasureCodes.java create mode 100644 src/contrib/raid/src/test/org/apache/hadoop/raid/TestGaloisField.java create mode 100644 src/contrib/raid/src/test/org/apache/hadoop/raid/TestHarIndexParser.java create mode 100644 src/contrib/raid/src/test/org/apache/hadoop/raid/TestRaidFilter.java create mode 100644 src/contrib/raid/src/test/org/apache/hadoop/raid/TestRaidHar.java create mode 100644 src/contrib/raid/src/test/org/apache/hadoop/raid/TestRaidNode.java create mode 100644 src/contrib/raid/src/test/org/apache/hadoop/raid/TestRaidNodeMetrics.java create mode 100644 src/contrib/raid/src/test/org/apache/hadoop/raid/TestRaidPurge.java create mode 100644 src/contrib/raid/src/test/org/apache/hadoop/raid/TestRaidShell.java create mode 100644 src/contrib/raid/src/test/org/apache/hadoop/raid/TestRaidShellFsck.java create mode 100644 src/contrib/raid/src/test/org/apache/hadoop/raid/TestReedSolomonDecoder.java create mode 100644 src/contrib/raid/src/test/org/apache/hadoop/raid/TestReedSolomonEncoder.java create mode 100644 src/contrib/streaming/build.xml create mode 100644 src/contrib/streaming/ivy.xml create mode 100644 src/contrib/streaming/ivy/libraries.properties create mode 100644 src/contrib/streaming/src/java/org/apache/hadoop/streaming/Environment.java create mode 100644 src/contrib/streaming/src/java/org/apache/hadoop/streaming/HadoopStreaming.java create mode 100644 src/contrib/streaming/src/java/org/apache/hadoop/streaming/JarBuilder.java create mode 100644 src/contrib/streaming/src/java/org/apache/hadoop/streaming/PathFinder.java create mode 100644 src/contrib/streaming/src/java/org/apache/hadoop/streaming/PipeCombiner.java create mode 100644 src/contrib/streaming/src/java/org/apache/hadoop/streaming/PipeMapRed.java create mode 100644 src/contrib/streaming/src/java/org/apache/hadoop/streaming/PipeMapRunner.java create mode 100644 src/contrib/streaming/src/java/org/apache/hadoop/streaming/PipeMapper.java create mode 100644 src/contrib/streaming/src/java/org/apache/hadoop/streaming/PipeReducer.java create mode 100644 src/contrib/streaming/src/java/org/apache/hadoop/streaming/StreamBaseRecordReader.java create mode 100644 src/contrib/streaming/src/java/org/apache/hadoop/streaming/StreamInputFormat.java create mode 100644 src/contrib/streaming/src/java/org/apache/hadoop/streaming/StreamJob.java create mode 100644 src/contrib/streaming/src/java/org/apache/hadoop/streaming/StreamKeyValUtil.java create mode 100644 src/contrib/streaming/src/java/org/apache/hadoop/streaming/StreamUtil.java create mode 100644 src/contrib/streaming/src/java/org/apache/hadoop/streaming/StreamXmlRecordReader.java create mode 100644 src/contrib/streaming/src/java/org/apache/hadoop/streaming/UTF8ByteArrayUtils.java create mode 100644 src/contrib/streaming/src/java/org/apache/hadoop/streaming/package.html create mode 100644 src/contrib/streaming/src/test/org/apache/hadoop/streaming/FailApp.java create mode 100644 src/contrib/streaming/src/test/org/apache/hadoop/streaming/StderrApp.java create mode 100644 src/contrib/streaming/src/test/org/apache/hadoop/streaming/StreamAggregate.java create mode 100644 src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestGzipInput.java create mode 100644 src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestMultipleArchiveFiles.java create mode 100644 src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestMultipleCachefiles.java create mode 100644 src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamAggregate.java create mode 100644 src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamDataProtocol.java create mode 100644 src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamReduceNone.java create mode 100644 src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamXmlRecordReader.java create mode 100644 src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamedMerge.java create mode 100644 src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreaming.java create mode 100644 src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamingAsDifferentUser.java create mode 100644 src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamingBadRecords.java create mode 100644 src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamingCombiner.java create mode 100644 src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamingCounters.java create mode 100644 src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamingEmptyInpNonemptyOut.java create mode 100644 src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamingExitStatus.java create mode 100644 src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamingFailure.java create mode 100644 src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamingKeyValue.java create mode 100644 src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamingSeparator.java create mode 100644 src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamingStderr.java create mode 100644 src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestSymLink.java create mode 100644 src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestUlimit.java create mode 100644 src/contrib/streaming/src/test/org/apache/hadoop/streaming/TrApp.java create mode 100644 src/contrib/streaming/src/test/org/apache/hadoop/streaming/TrAppReduce.java create mode 100644 src/contrib/streaming/src/test/org/apache/hadoop/streaming/UlimitApp.java create mode 100644 src/contrib/streaming/src/test/org/apache/hadoop/streaming/UniqApp.java create mode 100644 src/contrib/streaming/src/test/org/apache/hadoop/streaming/UtilTest.java create mode 100644 src/contrib/streaming/src/test/org/apache/hadoop/streaming/ValueCountReduce.java create mode 100644 src/contrib/test/core-site.xml create mode 100644 src/contrib/test/hadoop-site.xml create mode 100644 src/contrib/test/hdfs-site.xml create mode 100644 src/contrib/test/mapred-site.xml create mode 100644 src/contrib/thriftfs/README create mode 100644 src/contrib/thriftfs/build.xml create mode 100644 src/contrib/thriftfs/gen-cocoa/hadoopfs.h create mode 100644 src/contrib/thriftfs/gen-cocoa/hadoopfs.m create mode 100644 src/contrib/thriftfs/gen-cpp/ThriftHadoopFileSystem.cpp create mode 100644 src/contrib/thriftfs/gen-cpp/ThriftHadoopFileSystem.h create mode 100644 src/contrib/thriftfs/gen-cpp/ThriftHadoopFileSystem_server.skeleton.cpp create mode 100644 src/contrib/thriftfs/gen-cpp/hadoopfs_constants.cpp create mode 100644 src/contrib/thriftfs/gen-cpp/hadoopfs_constants.h create mode 100644 src/contrib/thriftfs/gen-cpp/hadoopfs_types.cpp create mode 100644 src/contrib/thriftfs/gen-cpp/hadoopfs_types.h create mode 100644 src/contrib/thriftfs/gen-java/org/apache/hadoop/thriftfs/api/BlockLocation.java create mode 100644 src/contrib/thriftfs/gen-java/org/apache/hadoop/thriftfs/api/FileStatus.java create mode 100644 src/contrib/thriftfs/gen-java/org/apache/hadoop/thriftfs/api/MalformedInputException.java create mode 100644 src/contrib/thriftfs/gen-java/org/apache/hadoop/thriftfs/api/Pathname.java create mode 100644 src/contrib/thriftfs/gen-java/org/apache/hadoop/thriftfs/api/ThriftHadoopFileSystem.java create mode 100644 src/contrib/thriftfs/gen-java/org/apache/hadoop/thriftfs/api/ThriftHandle.java create mode 100644 src/contrib/thriftfs/gen-java/org/apache/hadoop/thriftfs/api/ThriftIOException.java create mode 100644 src/contrib/thriftfs/gen-perl/Constants.pm create mode 100644 src/contrib/thriftfs/gen-perl/ThriftHadoopFileSystem.pm create mode 100644 src/contrib/thriftfs/gen-perl/Types.pm create mode 100644 src/contrib/thriftfs/gen-php/ThriftHadoopFileSystem.php create mode 100644 src/contrib/thriftfs/gen-php/hadoopfs_types.php create mode 100644 src/contrib/thriftfs/gen-py/__init__.py create mode 100755 src/contrib/thriftfs/gen-py/hadoopfs/ThriftHadoopFileSystem-remote create mode 100644 src/contrib/thriftfs/gen-py/hadoopfs/ThriftHadoopFileSystem.py create mode 100644 src/contrib/thriftfs/gen-py/hadoopfs/__init__.py create mode 100644 src/contrib/thriftfs/gen-py/hadoopfs/constants.py create mode 100644 src/contrib/thriftfs/gen-py/hadoopfs/ttypes.py create mode 100644 src/contrib/thriftfs/gen-rb/ThriftHadoopFileSystem.rb create mode 100644 src/contrib/thriftfs/gen-rb/hadoopfs_constants.rb create mode 100644 src/contrib/thriftfs/gen-rb/hadoopfs_types.rb create mode 100644 src/contrib/thriftfs/gen-st/hadoopfs.st create mode 100644 src/contrib/thriftfs/if/hadoopfs.thrift create mode 100644 src/contrib/thriftfs/ivy.xml create mode 100644 src/contrib/thriftfs/ivy/libraries.properties create mode 100644 src/contrib/thriftfs/lib/README create mode 100644 src/contrib/thriftfs/lib/Thrift.LICENSE create mode 100644 src/contrib/thriftfs/lib/hadoopthriftapi.jar create mode 100644 src/contrib/thriftfs/lib/libthrift.jar create mode 100755 src/contrib/thriftfs/scripts/hdfs.py create mode 100755 src/contrib/thriftfs/scripts/start_thrift_server.sh create mode 100644 src/contrib/thriftfs/src/java/org/apache/hadoop/thriftfs/HadoopThriftServer.java create mode 100644 src/contrib/thriftfs/test/org/apache/hadoop/thriftfs/TestThriftfs.java create mode 100644 src/contrib/vaidya/build.xml create mode 100644 src/contrib/vaidya/ivy.xml create mode 100644 src/contrib/vaidya/ivy/libraries.properties create mode 100644 src/contrib/vaidya/src/java/org/apache/hadoop/vaidya/DiagnosticTest.java create mode 100644 src/contrib/vaidya/src/java/org/apache/hadoop/vaidya/JobDiagnoser.java create mode 100644 src/contrib/vaidya/src/java/org/apache/hadoop/vaidya/postexdiagnosis/PostExPerformanceDiagnoser.java create mode 100644 src/contrib/vaidya/src/java/org/apache/hadoop/vaidya/postexdiagnosis/tests/BalancedReducePartitioning.java create mode 100644 src/contrib/vaidya/src/java/org/apache/hadoop/vaidya/postexdiagnosis/tests/MapSideDiskSpill.java create mode 100644 src/contrib/vaidya/src/java/org/apache/hadoop/vaidya/postexdiagnosis/tests/MapsReExecutionImpact.java create mode 100644 src/contrib/vaidya/src/java/org/apache/hadoop/vaidya/postexdiagnosis/tests/ReadingHDFSFilesAsSideEffect.java create mode 100644 src/contrib/vaidya/src/java/org/apache/hadoop/vaidya/postexdiagnosis/tests/ReducesReExecutionImpact.java create mode 100644 src/contrib/vaidya/src/java/org/apache/hadoop/vaidya/postexdiagnosis/tests/postex_diagnosis_tests.xml create mode 100644 src/contrib/vaidya/src/java/org/apache/hadoop/vaidya/statistics/job/JobStatistics.java create mode 100644 src/contrib/vaidya/src/java/org/apache/hadoop/vaidya/statistics/job/JobStatisticsInterface.java create mode 100644 src/contrib/vaidya/src/java/org/apache/hadoop/vaidya/statistics/job/MapTaskStatistics.java create mode 100644 src/contrib/vaidya/src/java/org/apache/hadoop/vaidya/statistics/job/ReduceTaskStatistics.java create mode 100644 src/contrib/vaidya/src/java/org/apache/hadoop/vaidya/statistics/job/TaskStatistics.java create mode 100644 src/contrib/vaidya/src/java/org/apache/hadoop/vaidya/util/XMLUtils.java create mode 100644 src/contrib/vaidya/src/java/org/apache/hadoop/vaidya/vaidya.sh create mode 100644 src/core/core-default.xml create mode 100644 src/core/org/apache/hadoop/HadoopVersionAnnotation.java create mode 100644 src/core/org/apache/hadoop/conf/Configurable.java create mode 100644 src/core/org/apache/hadoop/conf/Configuration.java create mode 100644 src/core/org/apache/hadoop/conf/Configured.java create mode 100644 src/core/org/apache/hadoop/conf/Reconfigurable.java create mode 100644 src/core/org/apache/hadoop/conf/ReconfigurableBase.java create mode 100644 src/core/org/apache/hadoop/conf/ReconfigurationException.java create mode 100644 src/core/org/apache/hadoop/conf/ReconfigurationServlet.java create mode 100644 src/core/org/apache/hadoop/conf/ReconfigurationUtil.java create mode 100644 src/core/org/apache/hadoop/conf/package.html create mode 100644 src/core/org/apache/hadoop/filecache/DistributedCache.java create mode 100644 src/core/org/apache/hadoop/fs/BlockLocation.java create mode 100644 src/core/org/apache/hadoop/fs/BlockMissingException.java create mode 100644 src/core/org/apache/hadoop/fs/BufferedFSInputStream.java create mode 100644 src/core/org/apache/hadoop/fs/ChecksumException.java create mode 100644 src/core/org/apache/hadoop/fs/ChecksumFileSystem.java create mode 100644 src/core/org/apache/hadoop/fs/ContentSummary.java create mode 100644 src/core/org/apache/hadoop/fs/CorruptFileBlocks.java create mode 100644 src/core/org/apache/hadoop/fs/DF.java create mode 100644 src/core/org/apache/hadoop/fs/DU.java create mode 100644 src/core/org/apache/hadoop/fs/FSDataInputStream.java create mode 100644 src/core/org/apache/hadoop/fs/FSDataOutputStream.java create mode 100644 src/core/org/apache/hadoop/fs/FSError.java create mode 100644 src/core/org/apache/hadoop/fs/FSInputChecker.java create mode 100644 src/core/org/apache/hadoop/fs/FSInputStream.java create mode 100644 src/core/org/apache/hadoop/fs/FSOutputSummer.java create mode 100644 src/core/org/apache/hadoop/fs/FileChecksum.java create mode 100644 src/core/org/apache/hadoop/fs/FileStatus.java create mode 100644 src/core/org/apache/hadoop/fs/FileSystem.java create mode 100644 src/core/org/apache/hadoop/fs/FileUtil.java create mode 100644 src/core/org/apache/hadoop/fs/FilterFileSystem.java create mode 100644 src/core/org/apache/hadoop/fs/FsShell.java create mode 100644 src/core/org/apache/hadoop/fs/FsShellPermissions.java create mode 100644 src/core/org/apache/hadoop/fs/FsUrlConnection.java create mode 100644 src/core/org/apache/hadoop/fs/FsUrlStreamHandler.java create mode 100644 src/core/org/apache/hadoop/fs/FsUrlStreamHandlerFactory.java create mode 100644 src/core/org/apache/hadoop/fs/GlobExpander.java create mode 100644 src/core/org/apache/hadoop/fs/HarFileSystem.java create mode 100644 src/core/org/apache/hadoop/fs/InMemoryFileSystem.java create mode 100644 src/core/org/apache/hadoop/fs/LocalDirAllocator.java create mode 100644 src/core/org/apache/hadoop/fs/LocalFileSystem.java create mode 100644 src/core/org/apache/hadoop/fs/LocatedFileStatus.java create mode 100644 src/core/org/apache/hadoop/fs/MD5MD5CRC32FileChecksum.java create mode 100644 src/core/org/apache/hadoop/fs/Path.java create mode 100644 src/core/org/apache/hadoop/fs/PathFilter.java create mode 100644 src/core/org/apache/hadoop/fs/PositionedReadable.java create mode 100644 src/core/org/apache/hadoop/fs/RawLocalFileSystem.java create mode 100644 src/core/org/apache/hadoop/fs/RemoteIterator.java create mode 100644 src/core/org/apache/hadoop/fs/Seekable.java create mode 100644 src/core/org/apache/hadoop/fs/Syncable.java create mode 100644 src/core/org/apache/hadoop/fs/Trash.java create mode 100644 src/core/org/apache/hadoop/fs/ftp/FTPException.java create mode 100644 src/core/org/apache/hadoop/fs/ftp/FTPFileSystem.java create mode 100644 src/core/org/apache/hadoop/fs/ftp/FTPInputStream.java create mode 100644 src/core/org/apache/hadoop/fs/kfs/IFSImpl.java create mode 100644 src/core/org/apache/hadoop/fs/kfs/KFSImpl.java create mode 100644 src/core/org/apache/hadoop/fs/kfs/KFSInputStream.java create mode 100644 src/core/org/apache/hadoop/fs/kfs/KFSOutputStream.java create mode 100644 src/core/org/apache/hadoop/fs/kfs/KosmosFileSystem.java create mode 100644 src/core/org/apache/hadoop/fs/kfs/package.html create mode 100644 src/core/org/apache/hadoop/fs/package.html create mode 100644 src/core/org/apache/hadoop/fs/permission/AccessControlException.java create mode 100644 src/core/org/apache/hadoop/fs/permission/ChmodParser.java create mode 100644 src/core/org/apache/hadoop/fs/permission/FsAction.java create mode 100644 src/core/org/apache/hadoop/fs/permission/FsPermission.java create mode 100644 src/core/org/apache/hadoop/fs/permission/PermissionParser.java create mode 100644 src/core/org/apache/hadoop/fs/permission/PermissionStatus.java create mode 100644 src/core/org/apache/hadoop/fs/permission/UmaskParser.java create mode 100644 src/core/org/apache/hadoop/fs/s3/Block.java create mode 100644 src/core/org/apache/hadoop/fs/s3/FileSystemStore.java create mode 100644 src/core/org/apache/hadoop/fs/s3/INode.java create mode 100644 src/core/org/apache/hadoop/fs/s3/Jets3tFileSystemStore.java create mode 100644 src/core/org/apache/hadoop/fs/s3/MigrationTool.java create mode 100644 src/core/org/apache/hadoop/fs/s3/S3Credentials.java create mode 100644 src/core/org/apache/hadoop/fs/s3/S3Exception.java create mode 100644 src/core/org/apache/hadoop/fs/s3/S3FileSystem.java create mode 100644 src/core/org/apache/hadoop/fs/s3/S3FileSystemException.java create mode 100644 src/core/org/apache/hadoop/fs/s3/S3InputStream.java create mode 100644 src/core/org/apache/hadoop/fs/s3/S3OutputStream.java create mode 100644 src/core/org/apache/hadoop/fs/s3/VersionMismatchException.java create mode 100644 src/core/org/apache/hadoop/fs/s3/package.html create mode 100644 src/core/org/apache/hadoop/fs/s3native/FileMetadata.java create mode 100644 src/core/org/apache/hadoop/fs/s3native/Jets3tNativeFileSystemStore.java create mode 100644 src/core/org/apache/hadoop/fs/s3native/NativeFileSystemStore.java create mode 100644 src/core/org/apache/hadoop/fs/s3native/NativeS3FileSystem.java create mode 100644 src/core/org/apache/hadoop/fs/s3native/PartialListing.java create mode 100644 src/core/org/apache/hadoop/fs/s3native/package.html create mode 100644 src/core/org/apache/hadoop/fs/shell/Command.java create mode 100644 src/core/org/apache/hadoop/fs/shell/CommandFormat.java create mode 100644 src/core/org/apache/hadoop/fs/shell/CommandUtils.java create mode 100644 src/core/org/apache/hadoop/fs/shell/Count.java create mode 100644 src/core/org/apache/hadoop/http/FilterContainer.java create mode 100644 src/core/org/apache/hadoop/http/FilterInitializer.java create mode 100644 src/core/org/apache/hadoop/http/HtmlQuoting.java create mode 100644 src/core/org/apache/hadoop/http/HttpServer.java create mode 100644 src/core/org/apache/hadoop/io/AbstractMapWritable.java create mode 100644 src/core/org/apache/hadoop/io/ArrayFile.java create mode 100644 src/core/org/apache/hadoop/io/ArrayWritable.java create mode 100644 src/core/org/apache/hadoop/io/BinaryComparable.java create mode 100644 src/core/org/apache/hadoop/io/BloomMapFile.java create mode 100644 src/core/org/apache/hadoop/io/BooleanWritable.java create mode 100644 src/core/org/apache/hadoop/io/ByteWritable.java create mode 100644 src/core/org/apache/hadoop/io/BytesWritable.java create mode 100644 src/core/org/apache/hadoop/io/Closeable.java create mode 100644 src/core/org/apache/hadoop/io/CompressedWritable.java create mode 100644 src/core/org/apache/hadoop/io/DataInputBuffer.java create mode 100644 src/core/org/apache/hadoop/io/DataOutputBuffer.java create mode 100644 src/core/org/apache/hadoop/io/DefaultStringifier.java create mode 100644 src/core/org/apache/hadoop/io/DoubleWritable.java create mode 100644 src/core/org/apache/hadoop/io/FloatWritable.java create mode 100644 src/core/org/apache/hadoop/io/GenericWritable.java create mode 100644 src/core/org/apache/hadoop/io/IOUtils.java create mode 100644 src/core/org/apache/hadoop/io/InputBuffer.java create mode 100644 src/core/org/apache/hadoop/io/IntWritable.java create mode 100644 src/core/org/apache/hadoop/io/LongWritable.java create mode 100644 src/core/org/apache/hadoop/io/MD5Hash.java create mode 100644 src/core/org/apache/hadoop/io/MapFile.java create mode 100644 src/core/org/apache/hadoop/io/MapWritable.java create mode 100644 src/core/org/apache/hadoop/io/MultipleIOException.java create mode 100644 src/core/org/apache/hadoop/io/NullWritable.java create mode 100644 src/core/org/apache/hadoop/io/ObjectWritable.java create mode 100644 src/core/org/apache/hadoop/io/OutputBuffer.java create mode 100644 src/core/org/apache/hadoop/io/RawComparator.java create mode 100644 src/core/org/apache/hadoop/io/SequenceFile.java create mode 100644 src/core/org/apache/hadoop/io/SetFile.java create mode 100644 src/core/org/apache/hadoop/io/SortedMapWritable.java create mode 100644 src/core/org/apache/hadoop/io/Stringifier.java create mode 100644 src/core/org/apache/hadoop/io/Text.java create mode 100644 src/core/org/apache/hadoop/io/TwoDArrayWritable.java create mode 100644 src/core/org/apache/hadoop/io/UTF8.java create mode 100644 src/core/org/apache/hadoop/io/VIntWritable.java create mode 100644 src/core/org/apache/hadoop/io/VLongWritable.java create mode 100644 src/core/org/apache/hadoop/io/VersionMismatchException.java create mode 100644 src/core/org/apache/hadoop/io/VersionedWritable.java create mode 100644 src/core/org/apache/hadoop/io/Writable.java create mode 100644 src/core/org/apache/hadoop/io/WritableComparable.java create mode 100644 src/core/org/apache/hadoop/io/WritableComparator.java create mode 100644 src/core/org/apache/hadoop/io/WritableFactories.java create mode 100644 src/core/org/apache/hadoop/io/WritableFactory.java create mode 100644 src/core/org/apache/hadoop/io/WritableName.java create mode 100644 src/core/org/apache/hadoop/io/WritableUtils.java create mode 100644 src/core/org/apache/hadoop/io/compress/BZip2Codec.java create mode 100644 src/core/org/apache/hadoop/io/compress/BlockCompressorStream.java create mode 100644 src/core/org/apache/hadoop/io/compress/BlockDecompressorStream.java create mode 100644 src/core/org/apache/hadoop/io/compress/CodecPool.java create mode 100644 src/core/org/apache/hadoop/io/compress/CodecPrematureEOFException.java create mode 100644 src/core/org/apache/hadoop/io/compress/CompressionCodec.java create mode 100644 src/core/org/apache/hadoop/io/compress/CompressionCodecFactory.java create mode 100644 src/core/org/apache/hadoop/io/compress/CompressionInputStream.java create mode 100644 src/core/org/apache/hadoop/io/compress/CompressionOutputStream.java create mode 100644 src/core/org/apache/hadoop/io/compress/Compressor.java create mode 100644 src/core/org/apache/hadoop/io/compress/CompressorStream.java create mode 100644 src/core/org/apache/hadoop/io/compress/Decompressor.java create mode 100644 src/core/org/apache/hadoop/io/compress/DecompressorStream.java create mode 100644 src/core/org/apache/hadoop/io/compress/DefaultCodec.java create mode 100644 src/core/org/apache/hadoop/io/compress/GzipCodec.java create mode 100644 src/core/org/apache/hadoop/io/compress/LzmaCodec.java create mode 100644 src/core/org/apache/hadoop/io/compress/bzip2/BZip2Constants.java create mode 100644 src/core/org/apache/hadoop/io/compress/bzip2/BZip2DummyCompressor.java create mode 100644 src/core/org/apache/hadoop/io/compress/bzip2/BZip2DummyDecompressor.java create mode 100644 src/core/org/apache/hadoop/io/compress/bzip2/CBZip2InputStream.java create mode 100644 src/core/org/apache/hadoop/io/compress/bzip2/CBZip2OutputStream.java create mode 100644 src/core/org/apache/hadoop/io/compress/bzip2/CRC.java create mode 100644 src/core/org/apache/hadoop/io/compress/lzma/LzmaCompressor.java create mode 100644 src/core/org/apache/hadoop/io/compress/lzma/LzmaDecompressor.java create mode 100644 src/core/org/apache/hadoop/io/compress/zlib/BuiltInZlibDeflater.java create mode 100644 src/core/org/apache/hadoop/io/compress/zlib/BuiltInZlibInflater.java create mode 100644 src/core/org/apache/hadoop/io/compress/zlib/ZlibCompressor.java create mode 100644 src/core/org/apache/hadoop/io/compress/zlib/ZlibDecompressor.java create mode 100644 src/core/org/apache/hadoop/io/compress/zlib/ZlibFactory.java create mode 100644 src/core/org/apache/hadoop/io/file/tfile/BCFile.java create mode 100644 src/core/org/apache/hadoop/io/file/tfile/BoundedByteArrayOutputStream.java create mode 100644 src/core/org/apache/hadoop/io/file/tfile/BoundedRangeFileInputStream.java create mode 100644 src/core/org/apache/hadoop/io/file/tfile/ByteArray.java create mode 100644 src/core/org/apache/hadoop/io/file/tfile/Chunk.java create mode 100644 src/core/org/apache/hadoop/io/file/tfile/CompareUtils.java create mode 100644 src/core/org/apache/hadoop/io/file/tfile/Compression.java create mode 100644 src/core/org/apache/hadoop/io/file/tfile/MetaBlockAlreadyExists.java create mode 100644 src/core/org/apache/hadoop/io/file/tfile/MetaBlockDoesNotExist.java create mode 100644 src/core/org/apache/hadoop/io/file/tfile/RawComparable.java create mode 100644 src/core/org/apache/hadoop/io/file/tfile/SimpleBufferedOutputStream.java create mode 100644 src/core/org/apache/hadoop/io/file/tfile/TFile.java create mode 100644 src/core/org/apache/hadoop/io/file/tfile/TFileDumper.java create mode 100644 src/core/org/apache/hadoop/io/file/tfile/Utils.java create mode 100644 src/core/org/apache/hadoop/io/package.html create mode 100644 src/core/org/apache/hadoop/io/retry/RetryInvocationHandler.java create mode 100644 src/core/org/apache/hadoop/io/retry/RetryPolicies.java create mode 100644 src/core/org/apache/hadoop/io/retry/RetryPolicy.java create mode 100644 src/core/org/apache/hadoop/io/retry/RetryProxy.java create mode 100644 src/core/org/apache/hadoop/io/retry/package.html create mode 100644 src/core/org/apache/hadoop/io/serializer/Deserializer.java create mode 100644 src/core/org/apache/hadoop/io/serializer/DeserializerComparator.java create mode 100644 src/core/org/apache/hadoop/io/serializer/JavaSerialization.java create mode 100644 src/core/org/apache/hadoop/io/serializer/JavaSerializationComparator.java create mode 100644 src/core/org/apache/hadoop/io/serializer/Serialization.java create mode 100644 src/core/org/apache/hadoop/io/serializer/SerializationFactory.java create mode 100644 src/core/org/apache/hadoop/io/serializer/Serializer.java create mode 100644 src/core/org/apache/hadoop/io/serializer/WritableSerialization.java create mode 100644 src/core/org/apache/hadoop/io/serializer/package.html create mode 100644 src/core/org/apache/hadoop/io/simpleseekableformat/CorruptedDataException.java create mode 100644 src/core/org/apache/hadoop/io/simpleseekableformat/DataSegmentReader.java create mode 100644 src/core/org/apache/hadoop/io/simpleseekableformat/DataSegmentWriter.java create mode 100644 src/core/org/apache/hadoop/io/simpleseekableformat/InterleavedInputStream.java create mode 100644 src/core/org/apache/hadoop/io/simpleseekableformat/InterleavedOutputStream.java create mode 100644 src/core/org/apache/hadoop/io/simpleseekableformat/SimpleSeekableFormat.java create mode 100644 src/core/org/apache/hadoop/io/simpleseekableformat/SimpleSeekableFormatCodec.java create mode 100644 src/core/org/apache/hadoop/io/simpleseekableformat/SimpleSeekableFormatInputStream.java create mode 100644 src/core/org/apache/hadoop/io/simpleseekableformat/SimpleSeekableFormatOutputStream.java create mode 100644 src/core/org/apache/hadoop/ipc/Client.java create mode 100644 src/core/org/apache/hadoop/ipc/ConnectionHeader.java create mode 100644 src/core/org/apache/hadoop/ipc/RPC.java create mode 100644 src/core/org/apache/hadoop/ipc/RemoteException.java create mode 100644 src/core/org/apache/hadoop/ipc/Server.java create mode 100644 src/core/org/apache/hadoop/ipc/Status.java create mode 100644 src/core/org/apache/hadoop/ipc/VersionedProtocol.java create mode 100644 src/core/org/apache/hadoop/ipc/metrics/RpcActivityMBean.java create mode 100644 src/core/org/apache/hadoop/ipc/metrics/RpcMetrics.java create mode 100644 src/core/org/apache/hadoop/ipc/metrics/RpcMgt.java create mode 100644 src/core/org/apache/hadoop/ipc/metrics/RpcMgtMBean.java create mode 100644 src/core/org/apache/hadoop/ipc/package.html create mode 100644 src/core/org/apache/hadoop/log/LogLevel.java create mode 100644 src/core/org/apache/hadoop/metrics/ContextFactory.java create mode 100644 src/core/org/apache/hadoop/metrics/MetricsContext.java create mode 100644 src/core/org/apache/hadoop/metrics/MetricsException.java create mode 100644 src/core/org/apache/hadoop/metrics/MetricsRecord.java create mode 100644 src/core/org/apache/hadoop/metrics/MetricsUtil.java create mode 100644 src/core/org/apache/hadoop/metrics/Updater.java create mode 100644 src/core/org/apache/hadoop/metrics/file/FileContext.java create mode 100644 src/core/org/apache/hadoop/metrics/file/package.html create mode 100644 src/core/org/apache/hadoop/metrics/ganglia/GangliaContext.java create mode 100644 src/core/org/apache/hadoop/metrics/ganglia/package.html create mode 100644 src/core/org/apache/hadoop/metrics/jmx/JMXContext.java create mode 100644 src/core/org/apache/hadoop/metrics/jmx/JMXContextMBean.java create mode 100644 src/core/org/apache/hadoop/metrics/jvm/EventCounter.java create mode 100644 src/core/org/apache/hadoop/metrics/jvm/JvmMetrics.java create mode 100644 src/core/org/apache/hadoop/metrics/package.html create mode 100644 src/core/org/apache/hadoop/metrics/spi/AbstractMetricsContext.java create mode 100644 src/core/org/apache/hadoop/metrics/spi/CompositeContext.java create mode 100644 src/core/org/apache/hadoop/metrics/spi/MetricValue.java create mode 100644 src/core/org/apache/hadoop/metrics/spi/MetricsRecordImpl.java create mode 100644 src/core/org/apache/hadoop/metrics/spi/NullContext.java create mode 100644 src/core/org/apache/hadoop/metrics/spi/NullContextWithUpdateThread.java create mode 100644 src/core/org/apache/hadoop/metrics/spi/OutputRecord.java create mode 100644 src/core/org/apache/hadoop/metrics/spi/Util.java create mode 100644 src/core/org/apache/hadoop/metrics/spi/package.html create mode 100644 src/core/org/apache/hadoop/metrics/util/MBeanUtil.java create mode 100644 src/core/org/apache/hadoop/metrics/util/MetricsBase.java create mode 100644 src/core/org/apache/hadoop/metrics/util/MetricsDynamicMBeanBase.java create mode 100644 src/core/org/apache/hadoop/metrics/util/MetricsIntValue.java create mode 100644 src/core/org/apache/hadoop/metrics/util/MetricsLongValue.java create mode 100644 src/core/org/apache/hadoop/metrics/util/MetricsRegistry.java create mode 100644 src/core/org/apache/hadoop/metrics/util/MetricsTimeVaryingInt.java create mode 100644 src/core/org/apache/hadoop/metrics/util/MetricsTimeVaryingLong.java create mode 100644 src/core/org/apache/hadoop/metrics/util/MetricsTimeVaryingRate.java create mode 100644 src/core/org/apache/hadoop/net/CachedDNSToSwitchMapping.java create mode 100644 src/core/org/apache/hadoop/net/DNS.java create mode 100644 src/core/org/apache/hadoop/net/DNSToSwitchMapping.java create mode 100644 src/core/org/apache/hadoop/net/IPv4AddressTruncationMapping.java create mode 100644 src/core/org/apache/hadoop/net/InetSocketAddressFactory.java create mode 100644 src/core/org/apache/hadoop/net/NetUtils.java create mode 100644 src/core/org/apache/hadoop/net/NetworkTopology.java create mode 100644 src/core/org/apache/hadoop/net/Node.java create mode 100644 src/core/org/apache/hadoop/net/NodeBase.java create mode 100644 src/core/org/apache/hadoop/net/ScriptBasedMapping.java create mode 100644 src/core/org/apache/hadoop/net/SocketIOWithTimeout.java create mode 100644 src/core/org/apache/hadoop/net/SocketInputStream.java create mode 100644 src/core/org/apache/hadoop/net/SocketOutputStream.java create mode 100644 src/core/org/apache/hadoop/net/SocksSocketFactory.java create mode 100644 src/core/org/apache/hadoop/net/StandardSocketFactory.java create mode 100644 src/core/org/apache/hadoop/net/package.html create mode 100644 src/core/org/apache/hadoop/record/BinaryRecordInput.java create mode 100644 src/core/org/apache/hadoop/record/BinaryRecordOutput.java create mode 100644 src/core/org/apache/hadoop/record/Buffer.java create mode 100644 src/core/org/apache/hadoop/record/CsvRecordInput.java create mode 100644 src/core/org/apache/hadoop/record/CsvRecordOutput.java create mode 100644 src/core/org/apache/hadoop/record/Index.java create mode 100644 src/core/org/apache/hadoop/record/Record.java create mode 100644 src/core/org/apache/hadoop/record/RecordComparator.java create mode 100644 src/core/org/apache/hadoop/record/RecordInput.java create mode 100644 src/core/org/apache/hadoop/record/RecordOutput.java create mode 100644 src/core/org/apache/hadoop/record/Utils.java create mode 100644 src/core/org/apache/hadoop/record/XmlRecordInput.java create mode 100644 src/core/org/apache/hadoop/record/XmlRecordOutput.java create mode 100644 src/core/org/apache/hadoop/record/compiler/CGenerator.java create mode 100644 src/core/org/apache/hadoop/record/compiler/CodeBuffer.java create mode 100644 src/core/org/apache/hadoop/record/compiler/CodeGenerator.java create mode 100644 src/core/org/apache/hadoop/record/compiler/Consts.java create mode 100644 src/core/org/apache/hadoop/record/compiler/CppGenerator.java create mode 100644 src/core/org/apache/hadoop/record/compiler/JBoolean.java create mode 100644 src/core/org/apache/hadoop/record/compiler/JBuffer.java create mode 100644 src/core/org/apache/hadoop/record/compiler/JByte.java create mode 100644 src/core/org/apache/hadoop/record/compiler/JCompType.java create mode 100644 src/core/org/apache/hadoop/record/compiler/JDouble.java create mode 100644 src/core/org/apache/hadoop/record/compiler/JField.java create mode 100644 src/core/org/apache/hadoop/record/compiler/JFile.java create mode 100644 src/core/org/apache/hadoop/record/compiler/JFloat.java create mode 100644 src/core/org/apache/hadoop/record/compiler/JInt.java create mode 100644 src/core/org/apache/hadoop/record/compiler/JLong.java create mode 100644 src/core/org/apache/hadoop/record/compiler/JMap.java create mode 100644 src/core/org/apache/hadoop/record/compiler/JRecord.java create mode 100644 src/core/org/apache/hadoop/record/compiler/JString.java create mode 100644 src/core/org/apache/hadoop/record/compiler/JType.java create mode 100644 src/core/org/apache/hadoop/record/compiler/JVector.java create mode 100644 src/core/org/apache/hadoop/record/compiler/JavaGenerator.java create mode 100644 src/core/org/apache/hadoop/record/compiler/ant/RccTask.java create mode 100644 src/core/org/apache/hadoop/record/compiler/generated/ParseException.java create mode 100644 src/core/org/apache/hadoop/record/compiler/generated/Rcc.java create mode 100644 src/core/org/apache/hadoop/record/compiler/generated/RccConstants.java create mode 100644 src/core/org/apache/hadoop/record/compiler/generated/RccTokenManager.java create mode 100644 src/core/org/apache/hadoop/record/compiler/generated/SimpleCharStream.java create mode 100644 src/core/org/apache/hadoop/record/compiler/generated/Token.java create mode 100644 src/core/org/apache/hadoop/record/compiler/generated/TokenMgrError.java create mode 100644 src/core/org/apache/hadoop/record/compiler/generated/package.html create mode 100644 src/core/org/apache/hadoop/record/compiler/generated/rcc.jj create mode 100644 src/core/org/apache/hadoop/record/compiler/package.html create mode 100644 src/core/org/apache/hadoop/record/meta/FieldTypeInfo.java create mode 100644 src/core/org/apache/hadoop/record/meta/MapTypeID.java create mode 100644 src/core/org/apache/hadoop/record/meta/RecordTypeInfo.java create mode 100644 src/core/org/apache/hadoop/record/meta/StructTypeID.java create mode 100644 src/core/org/apache/hadoop/record/meta/TypeID.java create mode 100644 src/core/org/apache/hadoop/record/meta/Utils.java create mode 100644 src/core/org/apache/hadoop/record/meta/VectorTypeID.java create mode 100644 src/core/org/apache/hadoop/record/package.html create mode 100644 src/core/org/apache/hadoop/security/AccessControlException.java create mode 100644 src/core/org/apache/hadoop/security/Group.java create mode 100644 src/core/org/apache/hadoop/security/PermissionChecker.java create mode 100644 src/core/org/apache/hadoop/security/SecurityUtil.java create mode 100644 src/core/org/apache/hadoop/security/UnixUserGroupInformation.java create mode 100644 src/core/org/apache/hadoop/security/User.java create mode 100644 src/core/org/apache/hadoop/security/UserGroupInformation.java create mode 100644 src/core/org/apache/hadoop/security/authorize/AuthorizationException.java create mode 100644 src/core/org/apache/hadoop/security/authorize/ConfiguredPolicy.java create mode 100644 src/core/org/apache/hadoop/security/authorize/ConnectionPermission.java create mode 100644 src/core/org/apache/hadoop/security/authorize/PolicyProvider.java create mode 100644 src/core/org/apache/hadoop/security/authorize/RefreshAuthorizationPolicyProtocol.java create mode 100644 src/core/org/apache/hadoop/security/authorize/Service.java create mode 100644 src/core/org/apache/hadoop/security/authorize/ServiceAuthorizationManager.java create mode 100644 src/core/org/apache/hadoop/util/AsyncDiskService.java create mode 100644 src/core/org/apache/hadoop/util/CyclicIteration.java create mode 100644 src/core/org/apache/hadoop/util/Daemon.java create mode 100644 src/core/org/apache/hadoop/util/DataChecksum.java create mode 100644 src/core/org/apache/hadoop/util/DiskChecker.java create mode 100644 src/core/org/apache/hadoop/util/GenericOptionsParser.java create mode 100644 src/core/org/apache/hadoop/util/GenericsUtil.java create mode 100644 src/core/org/apache/hadoop/util/HeapSort.java create mode 100644 src/core/org/apache/hadoop/util/HostsFileReader.java create mode 100644 src/core/org/apache/hadoop/util/IndexedSortable.java create mode 100644 src/core/org/apache/hadoop/util/IndexedSorter.java create mode 100644 src/core/org/apache/hadoop/util/LineReader.java create mode 100644 src/core/org/apache/hadoop/util/LinuxResourceCalculatorPlugin.java create mode 100644 src/core/org/apache/hadoop/util/MRAsyncDiskService.java create mode 100644 src/core/org/apache/hadoop/util/MergeSort.java create mode 100644 src/core/org/apache/hadoop/util/NativeCodeLoader.java create mode 100644 src/core/org/apache/hadoop/util/PlatformName.java create mode 100644 src/core/org/apache/hadoop/util/PrintJarMainClass.java create mode 100644 src/core/org/apache/hadoop/util/PriorityQueue.java create mode 100644 src/core/org/apache/hadoop/util/ProcessTree.java create mode 100644 src/core/org/apache/hadoop/util/ProcfsBasedProcessTree.java create mode 100644 src/core/org/apache/hadoop/util/ProgramDriver.java create mode 100644 src/core/org/apache/hadoop/util/Progress.java create mode 100644 src/core/org/apache/hadoop/util/Progressable.java create mode 100644 src/core/org/apache/hadoop/util/QuickSort.java create mode 100644 src/core/org/apache/hadoop/util/ReflectionUtils.java create mode 100644 src/core/org/apache/hadoop/util/ResourceCalculatorPlugin.java create mode 100644 src/core/org/apache/hadoop/util/RunJar.java create mode 100644 src/core/org/apache/hadoop/util/ServletUtil.java create mode 100644 src/core/org/apache/hadoop/util/Shell.java create mode 100644 src/core/org/apache/hadoop/util/StringUtils.java create mode 100644 src/core/org/apache/hadoop/util/Tool.java create mode 100644 src/core/org/apache/hadoop/util/ToolRunner.java create mode 100644 src/core/org/apache/hadoop/util/UTF8ByteArrayUtils.java create mode 100644 src/core/org/apache/hadoop/util/VersionInfo.java create mode 100644 src/core/org/apache/hadoop/util/XMLUtils.java create mode 100644 src/core/org/apache/hadoop/util/bloom/BloomFilter.java create mode 100644 src/core/org/apache/hadoop/util/bloom/CountingBloomFilter.java create mode 100644 src/core/org/apache/hadoop/util/bloom/DynamicBloomFilter.java create mode 100644 src/core/org/apache/hadoop/util/bloom/Filter.java create mode 100644 src/core/org/apache/hadoop/util/bloom/HashFunction.java create mode 100644 src/core/org/apache/hadoop/util/bloom/Key.java create mode 100644 src/core/org/apache/hadoop/util/bloom/RemoveScheme.java create mode 100644 src/core/org/apache/hadoop/util/bloom/RetouchedBloomFilter.java create mode 100644 src/core/org/apache/hadoop/util/hash/Hash.java create mode 100644 src/core/org/apache/hadoop/util/hash/JenkinsHash.java create mode 100644 src/core/org/apache/hadoop/util/hash/MurmurHash.java create mode 100644 src/core/org/apache/hadoop/util/package.html create mode 100644 src/core/overview.html create mode 100644 src/docs/changes/ChangesFancyStyle.css create mode 100644 src/docs/changes/ChangesSimpleStyle.css create mode 100755 src/docs/changes/changes2html.pl create mode 100644 src/docs/forrest.properties create mode 100644 src/docs/releasenotes.html create mode 100644 src/docs/src/documentation/README.txt create mode 100644 src/docs/src/documentation/classes/CatalogManager.properties create mode 100644 src/docs/src/documentation/conf/cli.xconf create mode 100644 src/docs/src/documentation/content/xdocs/SLG_user_guide.xml create mode 100644 src/docs/src/documentation/content/xdocs/capacity_scheduler.xml create mode 100644 src/docs/src/documentation/content/xdocs/cluster_setup.xml create mode 100644 src/docs/src/documentation/content/xdocs/commands_manual.xml create mode 100644 src/docs/src/documentation/content/xdocs/distcp.xml create mode 100644 src/docs/src/documentation/content/xdocs/fair_scheduler.xml create mode 100644 src/docs/src/documentation/content/xdocs/gridmix.xml create mode 100644 src/docs/src/documentation/content/xdocs/hadoop_archives.xml create mode 100644 src/docs/src/documentation/content/xdocs/hdfs_design.xml create mode 100644 src/docs/src/documentation/content/xdocs/hdfs_permissions_guide.xml create mode 100644 src/docs/src/documentation/content/xdocs/hdfs_quota_admin_guide.xml create mode 100644 src/docs/src/documentation/content/xdocs/hdfs_shell.xml create mode 100644 src/docs/src/documentation/content/xdocs/hdfs_user_guide.xml create mode 100644 src/docs/src/documentation/content/xdocs/hod_admin_guide.xml create mode 100644 src/docs/src/documentation/content/xdocs/hod_config_guide.xml create mode 100644 src/docs/src/documentation/content/xdocs/hod_user_guide.xml create mode 100644 src/docs/src/documentation/content/xdocs/index.xml create mode 100644 src/docs/src/documentation/content/xdocs/libhdfs.xml create mode 100644 src/docs/src/documentation/content/xdocs/mapred_tutorial.xml create mode 100644 src/docs/src/documentation/content/xdocs/native_libraries.xml create mode 100644 src/docs/src/documentation/content/xdocs/quickstart.xml create mode 100644 src/docs/src/documentation/content/xdocs/service_level_auth.xml create mode 100644 src/docs/src/documentation/content/xdocs/site.xml create mode 100644 src/docs/src/documentation/content/xdocs/streaming.xml create mode 100644 src/docs/src/documentation/content/xdocs/tabs.xml create mode 100644 src/docs/src/documentation/content/xdocs/vaidya.xml create mode 100644 src/docs/src/documentation/resources/images/architecture.gif create mode 100644 src/docs/src/documentation/resources/images/core-logo.gif create mode 100644 src/docs/src/documentation/resources/images/favicon.ico create mode 100644 src/docs/src/documentation/resources/images/hadoop-logo-big.jpg create mode 100644 src/docs/src/documentation/resources/images/hadoop-logo.jpg create mode 100644 src/docs/src/documentation/resources/images/hdfsarchitecture.gif create mode 100644 src/docs/src/documentation/resources/images/hdfsarchitecture.odg create mode 100644 src/docs/src/documentation/resources/images/hdfsarchitecture.png create mode 100644 src/docs/src/documentation/resources/images/hdfsdatanodes.gif create mode 100644 src/docs/src/documentation/resources/images/hdfsdatanodes.odg create mode 100644 src/docs/src/documentation/resources/images/hdfsdatanodes.png create mode 100644 src/docs/src/documentation/skinconf.xml create mode 100644 src/docs/status.xml create mode 100644 src/examples/org/apache/hadoop/examples/AggregateWordCount.java create mode 100644 src/examples/org/apache/hadoop/examples/AggregateWordHistogram.java create mode 100644 src/examples/org/apache/hadoop/examples/DBCountPageView.java create mode 100644 src/examples/org/apache/hadoop/examples/ExampleDriver.java create mode 100644 src/examples/org/apache/hadoop/examples/Grep.java create mode 100644 src/examples/org/apache/hadoop/examples/Join.java create mode 100644 src/examples/org/apache/hadoop/examples/MultiFileWordCount.java create mode 100644 src/examples/org/apache/hadoop/examples/PiEstimator.java create mode 100644 src/examples/org/apache/hadoop/examples/RandomTextWriter.java create mode 100644 src/examples/org/apache/hadoop/examples/RandomWriter.java create mode 100644 src/examples/org/apache/hadoop/examples/SecondarySort.java create mode 100644 src/examples/org/apache/hadoop/examples/SleepJob.java create mode 100644 src/examples/org/apache/hadoop/examples/Sort.java create mode 100644 src/examples/org/apache/hadoop/examples/WordCount.java create mode 100644 src/examples/org/apache/hadoop/examples/dancing/DancingLinks.java create mode 100644 src/examples/org/apache/hadoop/examples/dancing/DistributedPentomino.java create mode 100644 src/examples/org/apache/hadoop/examples/dancing/OneSidedPentomino.java create mode 100644 src/examples/org/apache/hadoop/examples/dancing/Pentomino.java create mode 100644 src/examples/org/apache/hadoop/examples/dancing/Sudoku.java create mode 100644 src/examples/org/apache/hadoop/examples/dancing/package.html create mode 100644 src/examples/org/apache/hadoop/examples/dancing/puzzle1.dta create mode 100644 src/examples/org/apache/hadoop/examples/package.html create mode 100644 src/examples/org/apache/hadoop/examples/terasort/TeraGen.java create mode 100644 src/examples/org/apache/hadoop/examples/terasort/TeraInputFormat.java create mode 100644 src/examples/org/apache/hadoop/examples/terasort/TeraOutputFormat.java create mode 100644 src/examples/org/apache/hadoop/examples/terasort/TeraSort.java create mode 100644 src/examples/org/apache/hadoop/examples/terasort/TeraValidate.java create mode 100644 src/examples/org/apache/hadoop/examples/terasort/job_history_summary.py create mode 100644 src/examples/org/apache/hadoop/examples/terasort/package.html create mode 100644 src/examples/pipes/.autom4te.cfg create mode 100644 src/examples/pipes/Makefile.am create mode 100644 src/examples/pipes/Makefile.in create mode 100644 src/examples/pipes/README.txt create mode 100644 src/examples/pipes/aclocal.m4 create mode 100644 src/examples/pipes/conf/word-part.xml create mode 100644 src/examples/pipes/conf/word.xml create mode 100644 src/examples/pipes/config.guess create mode 100644 src/examples/pipes/config.sub create mode 100755 src/examples/pipes/configure create mode 100644 src/examples/pipes/configure.ac create mode 100644 src/examples/pipes/depcomp create mode 100644 src/examples/pipes/impl/config.h.in create mode 100644 src/examples/pipes/impl/sort.cc create mode 100644 src/examples/pipes/impl/wordcount-nopipe.cc create mode 100644 src/examples/pipes/impl/wordcount-part.cc create mode 100644 src/examples/pipes/impl/wordcount-simple.cc create mode 100644 src/examples/pipes/install-sh create mode 100644 src/examples/pipes/ltmain.sh create mode 100644 src/examples/pipes/missing create mode 100644 src/examples/python/WordCount.py create mode 100644 src/examples/python/compile create mode 100644 src/examples/python/pyAbacus/JyAbacusWCPlugIN.py create mode 100644 src/examples/python/pyAbacus/JythonAbacus.py create mode 100644 src/examples/python/pyAbacus/compile create mode 100644 src/examples/python/pyAbacus/wordcountaggregator.spec create mode 100644 src/hdfs/hdfs-default.xml create mode 100644 src/hdfs/org/apache/hadoop/hdfs/ChecksumDistributedFileSystem.java create mode 100644 src/hdfs/org/apache/hadoop/hdfs/DFSClient.java create mode 100644 src/hdfs/org/apache/hadoop/hdfs/DFSUtil.java create mode 100644 src/hdfs/org/apache/hadoop/hdfs/DistributedFileSystem.java create mode 100644 src/hdfs/org/apache/hadoop/hdfs/HDFSPolicyProvider.java create mode 100644 src/hdfs/org/apache/hadoop/hdfs/HftpFileSystem.java create mode 100644 src/hdfs/org/apache/hadoop/hdfs/HighTideShell.java create mode 100644 src/hdfs/org/apache/hadoop/hdfs/HsftpFileSystem.java create mode 100644 src/hdfs/org/apache/hadoop/hdfs/package.html create mode 100644 src/hdfs/org/apache/hadoop/hdfs/protocol/AlreadyBeingCreatedException.java create mode 100644 src/hdfs/org/apache/hadoop/hdfs/protocol/Block.java create mode 100644 src/hdfs/org/apache/hadoop/hdfs/protocol/BlockListAsLongs.java create mode 100644 src/hdfs/org/apache/hadoop/hdfs/protocol/ClientDatanodeProtocol.java create mode 100644 src/hdfs/org/apache/hadoop/hdfs/protocol/ClientProtocol.java create mode 100644 src/hdfs/org/apache/hadoop/hdfs/protocol/DSQuotaExceededException.java create mode 100644 src/hdfs/org/apache/hadoop/hdfs/protocol/DataTransferProtocol.java create mode 100644 src/hdfs/org/apache/hadoop/hdfs/protocol/DatanodeID.java create mode 100644 src/hdfs/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java create mode 100644 src/hdfs/org/apache/hadoop/hdfs/protocol/DirectoryListing.java create mode 100644 src/hdfs/org/apache/hadoop/hdfs/protocol/FSConstants.java create mode 100644 src/hdfs/org/apache/hadoop/hdfs/protocol/HdfsFileStatus.java create mode 100644 src/hdfs/org/apache/hadoop/hdfs/protocol/HighTideProtocol.java create mode 100644 src/hdfs/org/apache/hadoop/hdfs/protocol/LocatedBlock.java create mode 100644 src/hdfs/org/apache/hadoop/hdfs/protocol/LocatedBlocks.java create mode 100644 src/hdfs/org/apache/hadoop/hdfs/protocol/LocatedDirectoryListing.java create mode 100644 src/hdfs/org/apache/hadoop/hdfs/protocol/NSQuotaExceededException.java create mode 100644 src/hdfs/org/apache/hadoop/hdfs/protocol/PolicyInfo.java create mode 100644 src/hdfs/org/apache/hadoop/hdfs/protocol/ProtocolCompatible.java create mode 100644 src/hdfs/org/apache/hadoop/hdfs/protocol/QuotaExceededException.java create mode 100644 src/hdfs/org/apache/hadoop/hdfs/protocol/UnregisteredDatanodeException.java create mode 100644 src/hdfs/org/apache/hadoop/hdfs/server/balancer/Balancer.java create mode 100644 src/hdfs/org/apache/hadoop/hdfs/server/common/GenerationStamp.java create mode 100644 src/hdfs/org/apache/hadoop/hdfs/server/common/HdfsConstants.java create mode 100644 src/hdfs/org/apache/hadoop/hdfs/server/common/InconsistentFSStateException.java create mode 100644 src/hdfs/org/apache/hadoop/hdfs/server/common/IncorrectVersionException.java create mode 100644 src/hdfs/org/apache/hadoop/hdfs/server/common/Storage.java create mode 100644 src/hdfs/org/apache/hadoop/hdfs/server/common/StorageInfo.java create mode 100644 src/hdfs/org/apache/hadoop/hdfs/server/common/ThreadLocalDateFormat.java create mode 100644 src/hdfs/org/apache/hadoop/hdfs/server/common/UpgradeManager.java create mode 100644 src/hdfs/org/apache/hadoop/hdfs/server/common/UpgradeObject.java create mode 100644 src/hdfs/org/apache/hadoop/hdfs/server/common/UpgradeObjectCollection.java create mode 100644 src/hdfs/org/apache/hadoop/hdfs/server/common/UpgradeStatusReport.java create mode 100644 src/hdfs/org/apache/hadoop/hdfs/server/common/Upgradeable.java create mode 100644 src/hdfs/org/apache/hadoop/hdfs/server/common/Util.java create mode 100644 src/hdfs/org/apache/hadoop/hdfs/server/datanode/BlockAlreadyExistsException.java create mode 100644 src/hdfs/org/apache/hadoop/hdfs/server/datanode/BlockMetadataHeader.java create mode 100644 src/hdfs/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java create mode 100644 src/hdfs/org/apache/hadoop/hdfs/server/datanode/BlockSender.java create mode 100644 src/hdfs/org/apache/hadoop/hdfs/server/datanode/BlockTransferThrottler.java create mode 100644 src/hdfs/org/apache/hadoop/hdfs/server/datanode/DataBlockScanner.java create mode 100644 src/hdfs/org/apache/hadoop/hdfs/server/datanode/DataNode.java create mode 100644 src/hdfs/org/apache/hadoop/hdfs/server/datanode/DataStorage.java create mode 100644 src/hdfs/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java create mode 100644 src/hdfs/org/apache/hadoop/hdfs/server/datanode/DataXceiverServer.java create mode 100644 src/hdfs/org/apache/hadoop/hdfs/server/datanode/DatanodeBlockInfo.java create mode 100644 src/hdfs/org/apache/hadoop/hdfs/server/datanode/FSDataset.java create mode 100644 src/hdfs/org/apache/hadoop/hdfs/server/datanode/FSDatasetAsyncDiskService.java create mode 100644 src/hdfs/org/apache/hadoop/hdfs/server/datanode/FSDatasetInterface.java create mode 100644 src/hdfs/org/apache/hadoop/hdfs/server/datanode/UpgradeManagerDatanode.java create mode 100644 src/hdfs/org/apache/hadoop/hdfs/server/datanode/UpgradeObjectDatanode.java create mode 100644 src/hdfs/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeActivityMBean.java create mode 100644 src/hdfs/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeMetrics.java create mode 100644 src/hdfs/org/apache/hadoop/hdfs/server/datanode/metrics/FSDatasetMBean.java create mode 100644 src/hdfs/org/apache/hadoop/hdfs/server/hightidenode/ConfigManager.java create mode 100644 src/hdfs/org/apache/hadoop/hdfs/server/hightidenode/DirectoryTraversal.java create mode 100644 src/hdfs/org/apache/hadoop/hdfs/server/hightidenode/FileFixer.java create mode 100644 src/hdfs/org/apache/hadoop/hdfs/server/hightidenode/HighTideConfigurationException.java create mode 100644 src/hdfs/org/apache/hadoop/hdfs/server/hightidenode/HighTideNode.java create mode 100644 src/hdfs/org/apache/hadoop/hdfs/server/hightidenode/PendingReplication.java create mode 100644 src/hdfs/org/apache/hadoop/hdfs/server/hightidenode/README create mode 100644 src/hdfs/org/apache/hadoop/hdfs/server/hightidenode/hightide.xml create mode 100644 src/hdfs/org/apache/hadoop/hdfs/server/hightidenode/metrics/HighTideNodeActivityMBean.java create mode 100644 src/hdfs/org/apache/hadoop/hdfs/server/hightidenode/metrics/HighTideNodeMetrics.java create mode 100644 src/hdfs/org/apache/hadoop/hdfs/server/namenode/BlockPlacementPolicy.java create mode 100644 src/hdfs/org/apache/hadoop/hdfs/server/namenode/BlockPlacementPolicyDefault.java create mode 100644 src/hdfs/org/apache/hadoop/hdfs/server/namenode/BlocksMap.java create mode 100644 src/hdfs/org/apache/hadoop/hdfs/server/namenode/CheckpointSignature.java create mode 100644 src/hdfs/org/apache/hadoop/hdfs/server/namenode/ConfigManager.java create mode 100644 src/hdfs/org/apache/hadoop/hdfs/server/namenode/CorruptReplicasMap.java create mode 100644 src/hdfs/org/apache/hadoop/hdfs/server/namenode/DatanodeDescriptor.java create mode 100644 src/hdfs/org/apache/hadoop/hdfs/server/namenode/DecommissionManager.java create mode 100644 src/hdfs/org/apache/hadoop/hdfs/server/namenode/DfsServlet.java create mode 100644 src/hdfs/org/apache/hadoop/hdfs/server/namenode/EditLogInputStream.java create mode 100644 src/hdfs/org/apache/hadoop/hdfs/server/namenode/EditLogOutputStream.java create mode 100644 src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSClusterStats.java create mode 100644 src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java create mode 100644 src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java create mode 100644 src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSImage.java create mode 100644 src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSInodeInfo.java create mode 100644 src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java create mode 100644 src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java create mode 100644 src/hdfs/org/apache/hadoop/hdfs/server/namenode/FileChecksumServlets.java create mode 100644 src/hdfs/org/apache/hadoop/hdfs/server/namenode/FileDataServlet.java create mode 100644 src/hdfs/org/apache/hadoop/hdfs/server/namenode/FsckServlet.java create mode 100644 src/hdfs/org/apache/hadoop/hdfs/server/namenode/GetImageServlet.java create mode 100644 src/hdfs/org/apache/hadoop/hdfs/server/namenode/Host2NodesMap.java create mode 100644 src/hdfs/org/apache/hadoop/hdfs/server/namenode/INode.java create mode 100644 src/hdfs/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java create mode 100644 src/hdfs/org/apache/hadoop/hdfs/server/namenode/INodeDirectoryWithQuota.java create mode 100644 src/hdfs/org/apache/hadoop/hdfs/server/namenode/INodeFile.java create mode 100644 src/hdfs/org/apache/hadoop/hdfs/server/namenode/INodeFileUnderConstruction.java create mode 100644 src/hdfs/org/apache/hadoop/hdfs/server/namenode/JspHelper.java create mode 100644 src/hdfs/org/apache/hadoop/hdfs/server/namenode/LeaseExpiredException.java create mode 100644 src/hdfs/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java create mode 100644 src/hdfs/org/apache/hadoop/hdfs/server/namenode/ListPathsServlet.java create mode 100644 src/hdfs/org/apache/hadoop/hdfs/server/namenode/NameCache.java create mode 100644 src/hdfs/org/apache/hadoop/hdfs/server/namenode/NameNode.java create mode 100644 src/hdfs/org/apache/hadoop/hdfs/server/namenode/NameNodeConfServlet.java create mode 100644 src/hdfs/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java create mode 100644 src/hdfs/org/apache/hadoop/hdfs/server/namenode/NotReplicatedYetException.java create mode 100644 src/hdfs/org/apache/hadoop/hdfs/server/namenode/PendingReplicationBlocks.java create mode 100644 src/hdfs/org/apache/hadoop/hdfs/server/namenode/PermissionChecker.java create mode 100644 src/hdfs/org/apache/hadoop/hdfs/server/namenode/SafeModeException.java create mode 100644 src/hdfs/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java create mode 100644 src/hdfs/org/apache/hadoop/hdfs/server/namenode/SerialNumberManager.java create mode 100644 src/hdfs/org/apache/hadoop/hdfs/server/namenode/StreamFile.java create mode 100644 src/hdfs/org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java create mode 100644 src/hdfs/org/apache/hadoop/hdfs/server/namenode/UnderReplicatedBlocks.java create mode 100644 src/hdfs/org/apache/hadoop/hdfs/server/namenode/UnsupportedActionException.java create mode 100644 src/hdfs/org/apache/hadoop/hdfs/server/namenode/UpgradeManagerNamenode.java create mode 100644 src/hdfs/org/apache/hadoop/hdfs/server/namenode/UpgradeObjectNamenode.java create mode 100644 src/hdfs/org/apache/hadoop/hdfs/server/namenode/metrics/FSNamesystemMBean.java create mode 100644 src/hdfs/org/apache/hadoop/hdfs/server/namenode/metrics/FSNamesystemMetrics.java create mode 100644 src/hdfs/org/apache/hadoop/hdfs/server/namenode/metrics/NameNodeActivtyMBean.java create mode 100644 src/hdfs/org/apache/hadoop/hdfs/server/namenode/metrics/NameNodeMetrics.java create mode 100644 src/hdfs/org/apache/hadoop/hdfs/server/protocol/BlockCommand.java create mode 100644 src/hdfs/org/apache/hadoop/hdfs/server/protocol/BlockMetaDataInfo.java create mode 100644 src/hdfs/org/apache/hadoop/hdfs/server/protocol/BlocksWithLocations.java create mode 100644 src/hdfs/org/apache/hadoop/hdfs/server/protocol/DatanodeCommand.java create mode 100644 src/hdfs/org/apache/hadoop/hdfs/server/protocol/DatanodeProtocol.java create mode 100644 src/hdfs/org/apache/hadoop/hdfs/server/protocol/DatanodeRegistration.java create mode 100644 src/hdfs/org/apache/hadoop/hdfs/server/protocol/DisallowedDatanodeException.java create mode 100644 src/hdfs/org/apache/hadoop/hdfs/server/protocol/InterDatanodeProtocol.java create mode 100644 src/hdfs/org/apache/hadoop/hdfs/server/protocol/NamenodeProtocol.java create mode 100644 src/hdfs/org/apache/hadoop/hdfs/server/protocol/NamespaceInfo.java create mode 100644 src/hdfs/org/apache/hadoop/hdfs/server/protocol/UpgradeCommand.java create mode 100644 src/hdfs/org/apache/hadoop/hdfs/tools/DFSAdmin.java create mode 100644 src/hdfs/org/apache/hadoop/hdfs/tools/DFSck.java create mode 100644 src/hdfs/org/apache/hadoop/hdfs/tools/HDFSConcat.java create mode 100644 src/hdfs/org/apache/hadoop/hdfs/tools/JMXGet.java create mode 100644 src/hdfs/org/apache/hadoop/hdfs/util/ByteArray.java create mode 100644 src/hdfs/org/apache/hadoop/hdfs/util/DataTransferThrottler.java create mode 100644 src/hdfs/org/apache/hadoop/hdfs/util/GSet.java create mode 100644 src/hdfs/org/apache/hadoop/hdfs/util/GSetByHashMap.java create mode 100644 src/hdfs/org/apache/hadoop/hdfs/util/LightWeightGSet.java create mode 100644 src/mapred/mapred-default.xml create mode 100644 src/mapred/org/apache/hadoop/mapred/AdminOperationsProtocol.java create mode 100644 src/mapred/org/apache/hadoop/mapred/BasicTypeSorterBase.java create mode 100644 src/mapred/org/apache/hadoop/mapred/BufferSorter.java create mode 100644 src/mapred/org/apache/hadoop/mapred/Child.java create mode 100644 src/mapred/org/apache/hadoop/mapred/CleanupQueue.java create mode 100644 src/mapred/org/apache/hadoop/mapred/Clock.java create mode 100644 src/mapred/org/apache/hadoop/mapred/ClusterStatus.java create mode 100644 src/mapred/org/apache/hadoop/mapred/CommitTaskAction.java create mode 100644 src/mapred/org/apache/hadoop/mapred/CompletedJobStatusStore.java create mode 100644 src/mapred/org/apache/hadoop/mapred/CompositeTaskTrackerInstrumentation.java create mode 100644 src/mapred/org/apache/hadoop/mapred/Counters.java create mode 100644 src/mapred/org/apache/hadoop/mapred/DefaultJobHistoryParser.java create mode 100644 src/mapred/org/apache/hadoop/mapred/DefaultTaskController.java create mode 100644 src/mapred/org/apache/hadoop/mapred/DisallowedTaskTrackerException.java create mode 100644 src/mapred/org/apache/hadoop/mapred/EagerTaskInitializationListener.java create mode 100644 src/mapred/org/apache/hadoop/mapred/FileAlreadyExistsException.java create mode 100644 src/mapred/org/apache/hadoop/mapred/FileInputFormat.java create mode 100644 src/mapred/org/apache/hadoop/mapred/FileOutputCommitter.java create mode 100644 src/mapred/org/apache/hadoop/mapred/FileOutputFormat.java create mode 100644 src/mapred/org/apache/hadoop/mapred/FileSplit.java create mode 100644 src/mapred/org/apache/hadoop/mapred/HeartbeatResponse.java create mode 100644 src/mapred/org/apache/hadoop/mapred/HistoryViewer.java create mode 100644 src/mapred/org/apache/hadoop/mapred/ID.java create mode 100644 src/mapred/org/apache/hadoop/mapred/IFile.java create mode 100644 src/mapred/org/apache/hadoop/mapred/IFileInputStream.java create mode 100644 src/mapred/org/apache/hadoop/mapred/IFileOutputStream.java create mode 100644 src/mapred/org/apache/hadoop/mapred/IndexCache.java create mode 100644 src/mapred/org/apache/hadoop/mapred/InputFormat.java create mode 100644 src/mapred/org/apache/hadoop/mapred/InputSplit.java create mode 100644 src/mapred/org/apache/hadoop/mapred/InterTrackerProtocol.java create mode 100644 src/mapred/org/apache/hadoop/mapred/InvalidFileTypeException.java create mode 100644 src/mapred/org/apache/hadoop/mapred/InvalidInputException.java create mode 100644 src/mapred/org/apache/hadoop/mapred/InvalidJobConfException.java create mode 100644 src/mapred/org/apache/hadoop/mapred/IsolationRunner.java create mode 100644 src/mapred/org/apache/hadoop/mapred/JSPUtil.java create mode 100644 src/mapred/org/apache/hadoop/mapred/JVMId.java create mode 100644 src/mapred/org/apache/hadoop/mapred/JobChangeEvent.java create mode 100644 src/mapred/org/apache/hadoop/mapred/JobClient.java create mode 100644 src/mapred/org/apache/hadoop/mapred/JobConf.java create mode 100644 src/mapred/org/apache/hadoop/mapred/JobConfigurable.java create mode 100644 src/mapred/org/apache/hadoop/mapred/JobContext.java create mode 100644 src/mapred/org/apache/hadoop/mapred/JobEndNotifier.java create mode 100644 src/mapred/org/apache/hadoop/mapred/JobHistory.java create mode 100644 src/mapred/org/apache/hadoop/mapred/JobID.java create mode 100644 src/mapred/org/apache/hadoop/mapred/JobInProgress.java create mode 100644 src/mapred/org/apache/hadoop/mapred/JobInProgressListener.java create mode 100644 src/mapred/org/apache/hadoop/mapred/JobInProgress_Counter.properties create mode 100644 src/mapred/org/apache/hadoop/mapred/JobPriority.java create mode 100644 src/mapred/org/apache/hadoop/mapred/JobProfile.java create mode 100644 src/mapred/org/apache/hadoop/mapred/JobQueueClient.java create mode 100644 src/mapred/org/apache/hadoop/mapred/JobQueueInfo.java create mode 100644 src/mapred/org/apache/hadoop/mapred/JobQueueJobInProgressListener.java create mode 100644 src/mapred/org/apache/hadoop/mapred/JobQueueTaskScheduler.java create mode 100644 src/mapred/org/apache/hadoop/mapred/JobStatus.java create mode 100644 src/mapred/org/apache/hadoop/mapred/JobStatusChangeEvent.java create mode 100644 src/mapred/org/apache/hadoop/mapred/JobSubmissionProtocol.java create mode 100644 src/mapred/org/apache/hadoop/mapred/JobTracker.java create mode 100644 src/mapred/org/apache/hadoop/mapred/JobTrackerInstrumentation.java create mode 100644 src/mapred/org/apache/hadoop/mapred/JobTrackerMetricsInst.java create mode 100644 src/mapred/org/apache/hadoop/mapred/JobTrackerStatistics.java create mode 100644 src/mapred/org/apache/hadoop/mapred/JvmContext.java create mode 100644 src/mapred/org/apache/hadoop/mapred/JvmManager.java create mode 100644 src/mapred/org/apache/hadoop/mapred/JvmTask.java create mode 100644 src/mapred/org/apache/hadoop/mapred/KeyValueLineRecordReader.java create mode 100644 src/mapred/org/apache/hadoop/mapred/KeyValueTextInputFormat.java create mode 100644 src/mapred/org/apache/hadoop/mapred/KillJobAction.java create mode 100644 src/mapred/org/apache/hadoop/mapred/KillTaskAction.java create mode 100644 src/mapred/org/apache/hadoop/mapred/LaunchTaskAction.java create mode 100644 src/mapred/org/apache/hadoop/mapred/LimitTasksPerJobTaskScheduler.java create mode 100644 src/mapred/org/apache/hadoop/mapred/LineRecordReader.java create mode 100644 src/mapred/org/apache/hadoop/mapred/LinuxTaskController.java create mode 100644 src/mapred/org/apache/hadoop/mapred/LocalJobRunner.java create mode 100644 src/mapred/org/apache/hadoop/mapred/MRConstants.java create mode 100644 src/mapred/org/apache/hadoop/mapred/MapFileOutputFormat.java create mode 100644 src/mapred/org/apache/hadoop/mapred/MapOutputFile.java create mode 100644 src/mapred/org/apache/hadoop/mapred/MapReduceBase.java create mode 100644 src/mapred/org/apache/hadoop/mapred/MapReducePolicyProvider.java create mode 100644 src/mapred/org/apache/hadoop/mapred/MapRunnable.java create mode 100644 src/mapred/org/apache/hadoop/mapred/MapRunner.java create mode 100644 src/mapred/org/apache/hadoop/mapred/MapTask.java create mode 100644 src/mapred/org/apache/hadoop/mapred/MapTaskCompletionEventsUpdate.java create mode 100644 src/mapred/org/apache/hadoop/mapred/MapTaskRunner.java create mode 100644 src/mapred/org/apache/hadoop/mapred/MapTaskStatus.java create mode 100644 src/mapred/org/apache/hadoop/mapred/Mapper.java create mode 100644 src/mapred/org/apache/hadoop/mapred/MergeSorter.java create mode 100644 src/mapred/org/apache/hadoop/mapred/Merger.java create mode 100644 src/mapred/org/apache/hadoop/mapred/MultiFileInputFormat.java create mode 100644 src/mapred/org/apache/hadoop/mapred/MultiFileSplit.java create mode 100644 src/mapred/org/apache/hadoop/mapred/NodeHealthCheckerService.java create mode 100644 src/mapred/org/apache/hadoop/mapred/OutputCollector.java create mode 100644 src/mapred/org/apache/hadoop/mapred/OutputCommitter.java create mode 100644 src/mapred/org/apache/hadoop/mapred/OutputFormat.java create mode 100644 src/mapred/org/apache/hadoop/mapred/OutputLogFilter.java create mode 100644 src/mapred/org/apache/hadoop/mapred/Partitioner.java create mode 100644 src/mapred/org/apache/hadoop/mapred/QueueAclsInfo.java create mode 100644 src/mapred/org/apache/hadoop/mapred/QueueManager.java create mode 100644 src/mapred/org/apache/hadoop/mapred/RamManager.java create mode 100644 src/mapred/org/apache/hadoop/mapred/RawKeyValueIterator.java create mode 100644 src/mapred/org/apache/hadoop/mapred/RecordReader.java create mode 100644 src/mapred/org/apache/hadoop/mapred/RecordWriter.java create mode 100644 src/mapred/org/apache/hadoop/mapred/ReduceTask.java create mode 100644 src/mapred/org/apache/hadoop/mapred/ReduceTaskRunner.java create mode 100644 src/mapred/org/apache/hadoop/mapred/ReduceTaskStatus.java create mode 100644 src/mapred/org/apache/hadoop/mapred/Reducer.java create mode 100644 src/mapred/org/apache/hadoop/mapred/ReinitTrackerAction.java create mode 100644 src/mapred/org/apache/hadoop/mapred/Reporter.java create mode 100644 src/mapred/org/apache/hadoop/mapred/ResourceEstimator.java create mode 100644 src/mapred/org/apache/hadoop/mapred/ResourceReporter.java create mode 100644 src/mapred/org/apache/hadoop/mapred/RunningJob.java create mode 100644 src/mapred/org/apache/hadoop/mapred/SequenceFileAsBinaryInputFormat.java create mode 100644 src/mapred/org/apache/hadoop/mapred/SequenceFileAsBinaryOutputFormat.java create mode 100644 src/mapred/org/apache/hadoop/mapred/SequenceFileAsTextInputFormat.java create mode 100644 src/mapred/org/apache/hadoop/mapred/SequenceFileAsTextRecordReader.java create mode 100644 src/mapred/org/apache/hadoop/mapred/SequenceFileInputFilter.java create mode 100644 src/mapred/org/apache/hadoop/mapred/SequenceFileInputFormat.java create mode 100644 src/mapred/org/apache/hadoop/mapred/SequenceFileOutputFormat.java create mode 100644 src/mapred/org/apache/hadoop/mapred/SequenceFileRecordReader.java create mode 100644 src/mapred/org/apache/hadoop/mapred/SkipBadRecords.java create mode 100644 src/mapred/org/apache/hadoop/mapred/SortedRanges.java create mode 100644 src/mapred/org/apache/hadoop/mapred/SpillRecord.java create mode 100644 src/mapred/org/apache/hadoop/mapred/StatisticsCollector.java create mode 100644 src/mapred/org/apache/hadoop/mapred/TIPStatus.java create mode 100644 src/mapred/org/apache/hadoop/mapred/Task.java create mode 100644 src/mapred/org/apache/hadoop/mapred/TaskAttemptContext.java create mode 100644 src/mapred/org/apache/hadoop/mapred/TaskAttemptID.java create mode 100644 src/mapred/org/apache/hadoop/mapred/TaskCompletionEvent.java create mode 100644 src/mapred/org/apache/hadoop/mapred/TaskController.java create mode 100644 src/mapred/org/apache/hadoop/mapred/TaskGraphServlet.java create mode 100644 src/mapred/org/apache/hadoop/mapred/TaskID.java create mode 100644 src/mapred/org/apache/hadoop/mapred/TaskInProgress.java create mode 100644 src/mapred/org/apache/hadoop/mapred/TaskLog.java create mode 100644 src/mapred/org/apache/hadoop/mapred/TaskLogAppender.java create mode 100644 src/mapred/org/apache/hadoop/mapred/TaskLogServlet.java create mode 100644 src/mapred/org/apache/hadoop/mapred/TaskLogsMonitor.java create mode 100644 src/mapred/org/apache/hadoop/mapred/TaskMemoryManagerThread.java create mode 100644 src/mapred/org/apache/hadoop/mapred/TaskReport.java create mode 100644 src/mapred/org/apache/hadoop/mapred/TaskRunner.java create mode 100644 src/mapred/org/apache/hadoop/mapred/TaskScheduler.java create mode 100644 src/mapred/org/apache/hadoop/mapred/TaskStatus.java create mode 100644 src/mapred/org/apache/hadoop/mapred/TaskTracker.java create mode 100644 src/mapred/org/apache/hadoop/mapred/TaskTrackerAction.java create mode 100644 src/mapred/org/apache/hadoop/mapred/TaskTrackerInstrumentation.java create mode 100644 src/mapred/org/apache/hadoop/mapred/TaskTrackerManager.java create mode 100644 src/mapred/org/apache/hadoop/mapred/TaskTrackerMetricsInst.java create mode 100644 src/mapred/org/apache/hadoop/mapred/TaskTrackerStatus.java create mode 100644 src/mapred/org/apache/hadoop/mapred/TaskUmbilicalProtocol.java create mode 100644 src/mapred/org/apache/hadoop/mapred/Task_Counter.properties create mode 100644 src/mapred/org/apache/hadoop/mapred/TextInputFormat.java create mode 100644 src/mapred/org/apache/hadoop/mapred/TextOutputFormat.java create mode 100644 src/mapred/org/apache/hadoop/mapred/Utils.java create mode 100644 src/mapred/org/apache/hadoop/mapred/jobcontrol/Job.java create mode 100644 src/mapred/org/apache/hadoop/mapred/jobcontrol/JobControl.java create mode 100644 src/mapred/org/apache/hadoop/mapred/jobcontrol/package.html create mode 100644 src/mapred/org/apache/hadoop/mapred/join/ArrayListBackedIterator.java create mode 100644 src/mapred/org/apache/hadoop/mapred/join/ComposableInputFormat.java create mode 100644 src/mapred/org/apache/hadoop/mapred/join/ComposableRecordReader.java create mode 100644 src/mapred/org/apache/hadoop/mapred/join/CompositeInputFormat.java create mode 100644 src/mapred/org/apache/hadoop/mapred/join/CompositeInputSplit.java create mode 100644 src/mapred/org/apache/hadoop/mapred/join/CompositeRecordReader.java create mode 100644 src/mapred/org/apache/hadoop/mapred/join/InnerJoinRecordReader.java create mode 100644 src/mapred/org/apache/hadoop/mapred/join/JoinRecordReader.java create mode 100644 src/mapred/org/apache/hadoop/mapred/join/MultiFilterRecordReader.java create mode 100644 src/mapred/org/apache/hadoop/mapred/join/OuterJoinRecordReader.java create mode 100644 src/mapred/org/apache/hadoop/mapred/join/OverrideRecordReader.java create mode 100644 src/mapred/org/apache/hadoop/mapred/join/Parser.java create mode 100644 src/mapred/org/apache/hadoop/mapred/join/ResetableIterator.java create mode 100644 src/mapred/org/apache/hadoop/mapred/join/StreamBackedIterator.java create mode 100644 src/mapred/org/apache/hadoop/mapred/join/TupleWritable.java create mode 100644 src/mapred/org/apache/hadoop/mapred/join/WrappedRecordReader.java create mode 100644 src/mapred/org/apache/hadoop/mapred/join/package.html create mode 100644 src/mapred/org/apache/hadoop/mapred/lib/Chain.java create mode 100644 src/mapred/org/apache/hadoop/mapred/lib/ChainMapper.java create mode 100644 src/mapred/org/apache/hadoop/mapred/lib/ChainReducer.java create mode 100644 src/mapred/org/apache/hadoop/mapred/lib/CombineFileInputFormat.java create mode 100644 src/mapred/org/apache/hadoop/mapred/lib/CombineFileRecordReader.java create mode 100644 src/mapred/org/apache/hadoop/mapred/lib/CombineFileSplit.java create mode 100644 src/mapred/org/apache/hadoop/mapred/lib/DelegatingInputFormat.java create mode 100644 src/mapred/org/apache/hadoop/mapred/lib/DelegatingMapper.java create mode 100644 src/mapred/org/apache/hadoop/mapred/lib/FieldSelectionMapReduce.java create mode 100644 src/mapred/org/apache/hadoop/mapred/lib/HashPartitioner.java create mode 100644 src/mapred/org/apache/hadoop/mapred/lib/IdentityMapper.java create mode 100644 src/mapred/org/apache/hadoop/mapred/lib/IdentityReducer.java create mode 100644 src/mapred/org/apache/hadoop/mapred/lib/InputSampler.java create mode 100644 src/mapred/org/apache/hadoop/mapred/lib/InverseMapper.java create mode 100644 src/mapred/org/apache/hadoop/mapred/lib/KeyFieldBasedComparator.java create mode 100644 src/mapred/org/apache/hadoop/mapred/lib/KeyFieldBasedPartitioner.java create mode 100644 src/mapred/org/apache/hadoop/mapred/lib/KeyFieldHelper.java create mode 100644 src/mapred/org/apache/hadoop/mapred/lib/LongSumReducer.java create mode 100644 src/mapred/org/apache/hadoop/mapred/lib/MultipleInputs.java create mode 100644 src/mapred/org/apache/hadoop/mapred/lib/MultipleOutputFormat.java create mode 100644 src/mapred/org/apache/hadoop/mapred/lib/MultipleOutputs.java create mode 100644 src/mapred/org/apache/hadoop/mapred/lib/MultipleSequenceFileOutputFormat.java create mode 100644 src/mapred/org/apache/hadoop/mapred/lib/MultipleTextOutputFormat.java create mode 100644 src/mapred/org/apache/hadoop/mapred/lib/MultithreadedMapRunner.java create mode 100644 src/mapred/org/apache/hadoop/mapred/lib/NLineInputFormat.java create mode 100644 src/mapred/org/apache/hadoop/mapred/lib/NullOutputFormat.java create mode 100644 src/mapred/org/apache/hadoop/mapred/lib/RegexMapper.java create mode 100644 src/mapred/org/apache/hadoop/mapred/lib/TaggedInputSplit.java create mode 100644 src/mapred/org/apache/hadoop/mapred/lib/TokenCountMapper.java create mode 100644 src/mapred/org/apache/hadoop/mapred/lib/TotalOrderPartitioner.java create mode 100644 src/mapred/org/apache/hadoop/mapred/lib/aggregate/DoubleValueSum.java create mode 100644 src/mapred/org/apache/hadoop/mapred/lib/aggregate/LongValueMax.java create mode 100644 src/mapred/org/apache/hadoop/mapred/lib/aggregate/LongValueMin.java create mode 100644 src/mapred/org/apache/hadoop/mapred/lib/aggregate/LongValueSum.java create mode 100644 src/mapred/org/apache/hadoop/mapred/lib/aggregate/StringValueMax.java create mode 100644 src/mapred/org/apache/hadoop/mapred/lib/aggregate/StringValueMin.java create mode 100644 src/mapred/org/apache/hadoop/mapred/lib/aggregate/UniqValueCount.java create mode 100644 src/mapred/org/apache/hadoop/mapred/lib/aggregate/UserDefinedValueAggregatorDescriptor.java create mode 100644 src/mapred/org/apache/hadoop/mapred/lib/aggregate/ValueAggregator.java create mode 100644 src/mapred/org/apache/hadoop/mapred/lib/aggregate/ValueAggregatorBaseDescriptor.java create mode 100644 src/mapred/org/apache/hadoop/mapred/lib/aggregate/ValueAggregatorCombiner.java create mode 100644 src/mapred/org/apache/hadoop/mapred/lib/aggregate/ValueAggregatorDescriptor.java create mode 100644 src/mapred/org/apache/hadoop/mapred/lib/aggregate/ValueAggregatorJob.java create mode 100644 src/mapred/org/apache/hadoop/mapred/lib/aggregate/ValueAggregatorJobBase.java create mode 100644 src/mapred/org/apache/hadoop/mapred/lib/aggregate/ValueAggregatorMapper.java create mode 100644 src/mapred/org/apache/hadoop/mapred/lib/aggregate/ValueAggregatorReducer.java create mode 100644 src/mapred/org/apache/hadoop/mapred/lib/aggregate/ValueHistogram.java create mode 100644 src/mapred/org/apache/hadoop/mapred/lib/aggregate/package.html create mode 100644 src/mapred/org/apache/hadoop/mapred/lib/db/DBConfiguration.java create mode 100644 src/mapred/org/apache/hadoop/mapred/lib/db/DBInputFormat.java create mode 100644 src/mapred/org/apache/hadoop/mapred/lib/db/DBOutputFormat.java create mode 100644 src/mapred/org/apache/hadoop/mapred/lib/db/DBWritable.java create mode 100644 src/mapred/org/apache/hadoop/mapred/lib/db/package.html create mode 100644 src/mapred/org/apache/hadoop/mapred/lib/package.html create mode 100644 src/mapred/org/apache/hadoop/mapred/package.html create mode 100644 src/mapred/org/apache/hadoop/mapred/pipes/Application.java create mode 100644 src/mapred/org/apache/hadoop/mapred/pipes/BinaryProtocol.java create mode 100644 src/mapred/org/apache/hadoop/mapred/pipes/DownwardProtocol.java create mode 100644 src/mapred/org/apache/hadoop/mapred/pipes/OutputHandler.java create mode 100644 src/mapred/org/apache/hadoop/mapred/pipes/PipesMapRunner.java create mode 100644 src/mapred/org/apache/hadoop/mapred/pipes/PipesNonJavaInputFormat.java create mode 100644 src/mapred/org/apache/hadoop/mapred/pipes/PipesPartitioner.java create mode 100644 src/mapred/org/apache/hadoop/mapred/pipes/PipesReducer.java create mode 100644 src/mapred/org/apache/hadoop/mapred/pipes/Submitter.java create mode 100644 src/mapred/org/apache/hadoop/mapred/pipes/UpwardProtocol.java create mode 100644 src/mapred/org/apache/hadoop/mapred/pipes/package.html create mode 100644 src/mapred/org/apache/hadoop/mapred/tools/MRAdmin.java create mode 100644 src/mapred/org/apache/hadoop/mapreduce/ClusterMetrics.java create mode 100644 src/mapred/org/apache/hadoop/mapreduce/Counter.java create mode 100644 src/mapred/org/apache/hadoop/mapreduce/CounterGroup.java create mode 100644 src/mapred/org/apache/hadoop/mapreduce/Counters.java create mode 100644 src/mapred/org/apache/hadoop/mapreduce/ID.java create mode 100644 src/mapred/org/apache/hadoop/mapreduce/InputFormat.java create mode 100644 src/mapred/org/apache/hadoop/mapreduce/InputSplit.java create mode 100644 src/mapred/org/apache/hadoop/mapreduce/Job.java create mode 100644 src/mapred/org/apache/hadoop/mapreduce/JobContext.java create mode 100644 src/mapred/org/apache/hadoop/mapreduce/JobID.java create mode 100644 src/mapred/org/apache/hadoop/mapreduce/JobStatus.java create mode 100644 src/mapred/org/apache/hadoop/mapreduce/MapContext.java create mode 100644 src/mapred/org/apache/hadoop/mapreduce/Mapper.java create mode 100644 src/mapred/org/apache/hadoop/mapreduce/OutputCommitter.java create mode 100644 src/mapred/org/apache/hadoop/mapreduce/OutputFormat.java create mode 100644 src/mapred/org/apache/hadoop/mapreduce/Partitioner.java create mode 100644 src/mapred/org/apache/hadoop/mapreduce/RecordReader.java create mode 100644 src/mapred/org/apache/hadoop/mapreduce/RecordWriter.java create mode 100644 src/mapred/org/apache/hadoop/mapreduce/ReduceContext.java create mode 100644 src/mapred/org/apache/hadoop/mapreduce/Reducer.java create mode 100644 src/mapred/org/apache/hadoop/mapreduce/StatusReporter.java create mode 100644 src/mapred/org/apache/hadoop/mapreduce/TaskAttemptContext.java create mode 100644 src/mapred/org/apache/hadoop/mapreduce/TaskAttemptID.java create mode 100644 src/mapred/org/apache/hadoop/mapreduce/TaskID.java create mode 100644 src/mapred/org/apache/hadoop/mapreduce/TaskInputOutputContext.java create mode 100644 src/mapred/org/apache/hadoop/mapreduce/TaskType.java create mode 100644 src/mapred/org/apache/hadoop/mapreduce/lib/input/FileInputFormat.java create mode 100644 src/mapred/org/apache/hadoop/mapreduce/lib/input/FileSplit.java create mode 100644 src/mapred/org/apache/hadoop/mapreduce/lib/input/InvalidInputException.java create mode 100644 src/mapred/org/apache/hadoop/mapreduce/lib/input/LineRecordReader.java create mode 100644 src/mapred/org/apache/hadoop/mapreduce/lib/input/SequenceFileInputFormat.java create mode 100644 src/mapred/org/apache/hadoop/mapreduce/lib/input/SequenceFileRecordReader.java create mode 100644 src/mapred/org/apache/hadoop/mapreduce/lib/input/TextInputFormat.java create mode 100644 src/mapred/org/apache/hadoop/mapreduce/lib/map/InverseMapper.java create mode 100644 src/mapred/org/apache/hadoop/mapreduce/lib/map/MultithreadedMapper.java create mode 100644 src/mapred/org/apache/hadoop/mapreduce/lib/map/TokenCounterMapper.java create mode 100644 src/mapred/org/apache/hadoop/mapreduce/lib/output/FileOutputCommitter.java create mode 100644 src/mapred/org/apache/hadoop/mapreduce/lib/output/FileOutputFormat.java create mode 100644 src/mapred/org/apache/hadoop/mapreduce/lib/output/NullOutputFormat.java create mode 100644 src/mapred/org/apache/hadoop/mapreduce/lib/output/SequenceFileOutputFormat.java create mode 100644 src/mapred/org/apache/hadoop/mapreduce/lib/output/TextOutputFormat.java create mode 100644 src/mapred/org/apache/hadoop/mapreduce/lib/partition/HashPartitioner.java create mode 100644 src/mapred/org/apache/hadoop/mapreduce/lib/reduce/IntSumReducer.java create mode 100644 src/mapred/org/apache/hadoop/mapreduce/lib/reduce/LongSumReducer.java create mode 100644 src/mapred/org/apache/hadoop/mapreduce/server/jobtracker/JobTrackerJspHelper.java create mode 100644 src/mapred/org/apache/hadoop/mapreduce/server/jobtracker/TaskTracker.java create mode 100644 src/native/.autom4te.cfg create mode 100644 src/native/AUTHORS create mode 100644 src/native/COPYING create mode 100644 src/native/ChangeLog create mode 100644 src/native/INSTALL create mode 100644 src/native/Makefile.am create mode 100644 src/native/Makefile.in create mode 100644 src/native/NEWS create mode 100644 src/native/README create mode 100644 src/native/acinclude.m4 create mode 100644 src/native/aclocal.m4 create mode 100644 src/native/config.h.in create mode 100755 src/native/config/config.guess create mode 100755 src/native/config/config.sub create mode 100755 src/native/config/depcomp create mode 100755 src/native/config/install-sh create mode 100644 src/native/config/ltmain.sh create mode 100755 src/native/config/missing create mode 100755 src/native/configure create mode 100644 src/native/configure.ac create mode 100644 src/native/lib/Makefile.am create mode 100644 src/native/lib/Makefile.in create mode 100755 src/native/packageNativeHadoop.sh create mode 100644 src/native/src/org/apache/hadoop/io/compress/lzma/LzmaCompressor.c create mode 100644 src/native/src/org/apache/hadoop/io/compress/lzma/LzmaDecompressor.c create mode 100644 src/native/src/org/apache/hadoop/io/compress/lzma/Makefile.am create mode 100644 src/native/src/org/apache/hadoop/io/compress/lzma/Makefile.in create mode 100644 src/native/src/org/apache/hadoop/io/compress/lzma/org_apache_hadoop_io_compress_lzma.h create mode 100644 src/native/src/org/apache/hadoop/io/compress/zlib/Makefile.am create mode 100644 src/native/src/org/apache/hadoop/io/compress/zlib/Makefile.in create mode 100644 src/native/src/org/apache/hadoop/io/compress/zlib/ZlibCompressor.c create mode 100644 src/native/src/org/apache/hadoop/io/compress/zlib/ZlibDecompressor.c create mode 100644 src/native/src/org/apache/hadoop/io/compress/zlib/org_apache_hadoop_io_compress_zlib.h create mode 100644 src/native/src/org_apache_hadoop.h create mode 100755 src/saveVersion.sh create mode 100755 src/test/bin/test-patch.sh create mode 100644 src/test/checkstyle-noframes-sorted.xsl create mode 100644 src/test/checkstyle.xml create mode 100644 src/test/core-site.xml create mode 100644 src/test/ddl/buffer.jr create mode 100644 src/test/ddl/int.jr create mode 100644 src/test/ddl/string.jr create mode 100644 src/test/ddl/test.jr create mode 100644 src/test/findbugsExcludeFile.xml create mode 100644 src/test/hadoop-policy.xml create mode 100644 src/test/hadoop-site.xml create mode 100644 src/test/hdfs-site.xml create mode 100644 src/test/lib/ftplet-api-1.0.0-SNAPSHOT.jar create mode 100644 src/test/lib/ftpserver-core-1.0.0-SNAPSHOT.jar create mode 100644 src/test/lib/ftpserver-server-1.0.0-SNAPSHOT.jar create mode 100644 src/test/lib/mina-core-2.0.0-M2-20080407.124109-12.jar create mode 100644 src/test/log4j.properties create mode 100644 src/test/mapred-site.xml create mode 100644 src/test/org/apache/hadoop/cli/TestCLI.java create mode 100644 src/test/org/apache/hadoop/cli/clitest_data/data120bytes create mode 100644 src/test/org/apache/hadoop/cli/clitest_data/data15bytes create mode 100644 src/test/org/apache/hadoop/cli/clitest_data/data30bytes create mode 100644 src/test/org/apache/hadoop/cli/clitest_data/data60bytes create mode 100644 src/test/org/apache/hadoop/cli/testConf.xml create mode 100644 src/test/org/apache/hadoop/cli/testConf.xsl create mode 100644 src/test/org/apache/hadoop/cli/util/CLITestData.java create mode 100644 src/test/org/apache/hadoop/cli/util/CommandExecutor.java create mode 100644 src/test/org/apache/hadoop/cli/util/ComparatorBase.java create mode 100644 src/test/org/apache/hadoop/cli/util/ComparatorData.java create mode 100644 src/test/org/apache/hadoop/cli/util/ExactComparator.java create mode 100644 src/test/org/apache/hadoop/cli/util/RegexpComparator.java create mode 100644 src/test/org/apache/hadoop/cli/util/SubstringComparator.java create mode 100644 src/test/org/apache/hadoop/cli/util/TokenComparator.java create mode 100644 src/test/org/apache/hadoop/conf/TestConfiguration.java create mode 100644 src/test/org/apache/hadoop/conf/TestJobConf.java create mode 100644 src/test/org/apache/hadoop/conf/TestNoDefaultsJobConf.java create mode 100644 src/test/org/apache/hadoop/conf/TestReconfiguration.java create mode 100644 src/test/org/apache/hadoop/filecache/TestDistributedCache.java create mode 100644 src/test/org/apache/hadoop/fs/AccumulatingReducer.java create mode 100644 src/test/org/apache/hadoop/fs/DFSCIOTest.java create mode 100644 src/test/org/apache/hadoop/fs/DistributedFSCheck.java create mode 100644 src/test/org/apache/hadoop/fs/FileSystemContractBaseTest.java create mode 100644 src/test/org/apache/hadoop/fs/IOMapperBase.java create mode 100644 src/test/org/apache/hadoop/fs/TestChecksumFileSystem.java create mode 100644 src/test/org/apache/hadoop/fs/TestCopyFiles.java create mode 100644 src/test/org/apache/hadoop/fs/TestCorruptFileBlocks.java create mode 100644 src/test/org/apache/hadoop/fs/TestDFSIO.java create mode 100644 src/test/org/apache/hadoop/fs/TestDU.java create mode 100644 src/test/org/apache/hadoop/fs/TestFileSystem.java create mode 100644 src/test/org/apache/hadoop/fs/TestFilterFileSystem.java create mode 100644 src/test/org/apache/hadoop/fs/TestGetFileBlockLocations.java create mode 100644 src/test/org/apache/hadoop/fs/TestGlobExpander.java create mode 100644 src/test/org/apache/hadoop/fs/TestGlobPaths.java create mode 100644 src/test/org/apache/hadoop/fs/TestHarFileSystem.java create mode 100644 src/test/org/apache/hadoop/fs/TestLocalDirAllocator.java create mode 100644 src/test/org/apache/hadoop/fs/TestLocalFileSystem.java create mode 100644 src/test/org/apache/hadoop/fs/TestLocalFileSystemPermission.java create mode 100644 src/test/org/apache/hadoop/fs/TestLocatedStatus.java create mode 100644 src/test/org/apache/hadoop/fs/TestPath.java create mode 100644 src/test/org/apache/hadoop/fs/TestTrash.java create mode 100644 src/test/org/apache/hadoop/fs/TestTruncatedInputBug.java create mode 100644 src/test/org/apache/hadoop/fs/TestUrlStreamHandler.java create mode 100644 src/test/org/apache/hadoop/fs/ftp/TestFTPFileSystem.java create mode 100644 src/test/org/apache/hadoop/fs/kfs/KFSEmulationImpl.java create mode 100644 src/test/org/apache/hadoop/fs/kfs/TestKosmosFileSystem.java create mode 100644 src/test/org/apache/hadoop/fs/loadGenerator/DataGenerator.java create mode 100644 src/test/org/apache/hadoop/fs/loadGenerator/LoadGenerator.java create mode 100644 src/test/org/apache/hadoop/fs/loadGenerator/StructureGenerator.java create mode 100644 src/test/org/apache/hadoop/fs/loadGenerator/TestLoadGenerator.java create mode 100644 src/test/org/apache/hadoop/fs/permission/TestFsPermission.java create mode 100644 src/test/org/apache/hadoop/fs/s3/InMemoryFileSystemStore.java create mode 100644 src/test/org/apache/hadoop/fs/s3/Jets3tS3FileSystemContractTest.java create mode 100644 src/test/org/apache/hadoop/fs/s3/S3FileSystemContractBaseTest.java create mode 100644 src/test/org/apache/hadoop/fs/s3/TestINode.java create mode 100644 src/test/org/apache/hadoop/fs/s3/TestInMemoryS3FileSystemContract.java create mode 100644 src/test/org/apache/hadoop/fs/s3/TestS3Credentials.java create mode 100644 src/test/org/apache/hadoop/fs/s3/TestS3FileSystem.java create mode 100644 src/test/org/apache/hadoop/fs/s3native/InMemoryNativeFileSystemStore.java create mode 100644 src/test/org/apache/hadoop/fs/s3native/Jets3tNativeS3FileSystemContractTest.java create mode 100644 src/test/org/apache/hadoop/fs/s3native/NativeS3FileSystemContractBaseTest.java create mode 100644 src/test/org/apache/hadoop/fs/s3native/TestInMemoryNativeS3FileSystemContract.java create mode 100644 src/test/org/apache/hadoop/hdfs/AppendTestUtil.java create mode 100644 src/test/org/apache/hadoop/hdfs/BenchmarkThroughput.java create mode 100644 src/test/org/apache/hadoop/hdfs/DFSClientAdapter.java create mode 100644 src/test/org/apache/hadoop/hdfs/DFSTestUtil.java create mode 100644 src/test/org/apache/hadoop/hdfs/DataNodeCluster.java create mode 100644 src/test/org/apache/hadoop/hdfs/MiniDFSCluster.java create mode 100644 src/test/org/apache/hadoop/hdfs/NNBench.java create mode 100644 src/test/org/apache/hadoop/hdfs/NNBenchWithoutMR.java create mode 100644 src/test/org/apache/hadoop/hdfs/TestAbandonBlock.java create mode 100644 src/test/org/apache/hadoop/hdfs/TestBlockMissingException.java create mode 100644 src/test/org/apache/hadoop/hdfs/TestBlockReport.java create mode 100644 src/test/org/apache/hadoop/hdfs/TestBlockReportProcessingTime.java create mode 100644 src/test/org/apache/hadoop/hdfs/TestBlocksScheduledCounter.java create mode 100644 src/test/org/apache/hadoop/hdfs/TestCrcCorruption.java create mode 100644 src/test/org/apache/hadoop/hdfs/TestDFSClientRetries.java create mode 100644 src/test/org/apache/hadoop/hdfs/TestDFSFinalize.java create mode 100644 src/test/org/apache/hadoop/hdfs/TestDFSMkdirs.java create mode 100644 src/test/org/apache/hadoop/hdfs/TestDFSPermission.java create mode 100644 src/test/org/apache/hadoop/hdfs/TestDFSRemove.java create mode 100644 src/test/org/apache/hadoop/hdfs/TestDFSRename.java create mode 100644 src/test/org/apache/hadoop/hdfs/TestDFSRollback.java create mode 100644 src/test/org/apache/hadoop/hdfs/TestDFSShell.java create mode 100644 src/test/org/apache/hadoop/hdfs/TestDFSShellGenericOptions.java create mode 100644 src/test/org/apache/hadoop/hdfs/TestDFSStartupVersions.java create mode 100644 src/test/org/apache/hadoop/hdfs/TestDFSStorageStateRecovery.java create mode 100644 src/test/org/apache/hadoop/hdfs/TestDFSUpgrade.java create mode 100644 src/test/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java create mode 100644 src/test/org/apache/hadoop/hdfs/TestDFSUtil.java create mode 100644 src/test/org/apache/hadoop/hdfs/TestDataTransferProtocol.java create mode 100644 src/test/org/apache/hadoop/hdfs/TestDatanodeBlockScanner.java create mode 100644 src/test/org/apache/hadoop/hdfs/TestDatanodeDeath.java create mode 100644 src/test/org/apache/hadoop/hdfs/TestDatanodeReport.java create mode 100644 src/test/org/apache/hadoop/hdfs/TestDecommission.java create mode 100644 src/test/org/apache/hadoop/hdfs/TestDefaultNameNodePort.java create mode 100644 src/test/org/apache/hadoop/hdfs/TestDistributedFileSystem.java create mode 100644 src/test/org/apache/hadoop/hdfs/TestFSInputChecker.java create mode 100644 src/test/org/apache/hadoop/hdfs/TestFSOutputSummer.java create mode 100644 src/test/org/apache/hadoop/hdfs/TestFileAppend.java create mode 100644 src/test/org/apache/hadoop/hdfs/TestFileAppend2.java create mode 100644 src/test/org/apache/hadoop/hdfs/TestFileAppend3.java create mode 100644 src/test/org/apache/hadoop/hdfs/TestFileCorruption.java create mode 100644 src/test/org/apache/hadoop/hdfs/TestFileCreation.java create mode 100644 src/test/org/apache/hadoop/hdfs/TestFileCreationClient.java create mode 100644 src/test/org/apache/hadoop/hdfs/TestFileCreationDelete.java create mode 100644 src/test/org/apache/hadoop/hdfs/TestFileCreationEmpty.java create mode 100644 src/test/org/apache/hadoop/hdfs/TestFileCreationNamenodeRestart.java create mode 100644 src/test/org/apache/hadoop/hdfs/TestFileStatus.java create mode 100644 src/test/org/apache/hadoop/hdfs/TestGetBlocks.java create mode 100644 src/test/org/apache/hadoop/hdfs/TestHDFSConcat.java create mode 100644 src/test/org/apache/hadoop/hdfs/TestHDFSFileSystemContract.java create mode 100644 src/test/org/apache/hadoop/hdfs/TestHDFSServerPorts.java create mode 100644 src/test/org/apache/hadoop/hdfs/TestHDFSTrash.java create mode 100644 src/test/org/apache/hadoop/hdfs/TestHftpFileSystem.java create mode 100644 src/test/org/apache/hadoop/hdfs/TestInjectionForSimulatedStorage.java create mode 100644 src/test/org/apache/hadoop/hdfs/TestLargeBlock.java create mode 100644 src/test/org/apache/hadoop/hdfs/TestLease.java create mode 100644 src/test/org/apache/hadoop/hdfs/TestLeaseRecovery.java create mode 100644 src/test/org/apache/hadoop/hdfs/TestLeaseRecovery2.java create mode 100644 src/test/org/apache/hadoop/hdfs/TestListPathServlet.java create mode 100644 src/test/org/apache/hadoop/hdfs/TestLocalDFS.java create mode 100644 src/test/org/apache/hadoop/hdfs/TestLocatedStatusInDFS.java create mode 100644 src/test/org/apache/hadoop/hdfs/TestMissingBlocksAlert.java create mode 100644 src/test/org/apache/hadoop/hdfs/TestModTime.java create mode 100644 src/test/org/apache/hadoop/hdfs/TestPread.java create mode 100644 src/test/org/apache/hadoop/hdfs/TestQuota.java create mode 100644 src/test/org/apache/hadoop/hdfs/TestRenameWhileOpen.java create mode 100644 src/test/org/apache/hadoop/hdfs/TestReplication.java create mode 100644 src/test/org/apache/hadoop/hdfs/TestRestartDFS.java create mode 100644 src/test/org/apache/hadoop/hdfs/TestSafeMode.java create mode 100644 src/test/org/apache/hadoop/hdfs/TestSeekBug.java create mode 100644 src/test/org/apache/hadoop/hdfs/TestSetTimes.java create mode 100644 src/test/org/apache/hadoop/hdfs/TestSetrepDecreasing.java create mode 100644 src/test/org/apache/hadoop/hdfs/TestSetrepIncreasing.java create mode 100644 src/test/org/apache/hadoop/hdfs/TestSmallBlock.java create mode 100644 src/test/org/apache/hadoop/hdfs/UpgradeUtilities.java create mode 100644 src/test/org/apache/hadoop/hdfs/hadoop-14-dfs-dir.tgz create mode 100644 src/test/org/apache/hadoop/hdfs/hadoop-dfs-dir.txt create mode 100644 src/test/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java create mode 100644 src/test/org/apache/hadoop/hdfs/server/common/TestDistributedUpgrade.java create mode 100644 src/test/org/apache/hadoop/hdfs/server/common/TestThreadLocalDateFormat.java create mode 100644 src/test/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java create mode 100644 src/test/org/apache/hadoop/hdfs/server/datanode/TestBlockReplacement.java create mode 100644 src/test/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetrics.java create mode 100644 src/test/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java create mode 100644 src/test/org/apache/hadoop/hdfs/server/datanode/TestInterDatanodeProtocol.java create mode 100644 src/test/org/apache/hadoop/hdfs/server/datanode/TestParallelBlockScan.java create mode 100644 src/test/org/apache/hadoop/hdfs/server/datanode/TestSimulatedFSDataset.java create mode 100644 src/test/org/apache/hadoop/hdfs/server/namenode/CreateEditsLog.java create mode 100644 src/test/org/apache/hadoop/hdfs/server/namenode/FSNamesystemAdapter.java create mode 100644 src/test/org/apache/hadoop/hdfs/server/namenode/FileNameGenerator.java create mode 100644 src/test/org/apache/hadoop/hdfs/server/namenode/HttpServletResponseStub.java create mode 100644 src/test/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java create mode 100644 src/test/org/apache/hadoop/hdfs/server/namenode/TestAllowFormat.java create mode 100644 src/test/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java create mode 100644 src/test/org/apache/hadoop/hdfs/server/namenode/TestComputeInvalidateWork.java create mode 100644 src/test/org/apache/hadoop/hdfs/server/namenode/TestCorruptFilesJsp.java create mode 100644 src/test/org/apache/hadoop/hdfs/server/namenode/TestDatanodeDescriptor.java create mode 100644 src/test/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java create mode 100644 src/test/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatus.java create mode 100644 src/test/org/apache/hadoop/hdfs/server/namenode/TestDualRPCServerStartup.java create mode 100644 src/test/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java create mode 100644 src/test/org/apache/hadoop/hdfs/server/namenode/TestFileDeleteWhitelist.java create mode 100644 src/test/org/apache/hadoop/hdfs/server/namenode/TestFileLimit.java create mode 100644 src/test/org/apache/hadoop/hdfs/server/namenode/TestFsck.java create mode 100644 src/test/org/apache/hadoop/hdfs/server/namenode/TestHeartbeatHandling.java create mode 100644 src/test/org/apache/hadoop/hdfs/server/namenode/TestHost2NodesMap.java create mode 100644 src/test/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java create mode 100644 src/test/org/apache/hadoop/hdfs/server/namenode/TestLargeDirectoryDelete.java create mode 100644 src/test/org/apache/hadoop/hdfs/server/namenode/TestListCorruptFileBlocks.java create mode 100644 src/test/org/apache/hadoop/hdfs/server/namenode/TestNNThroughputBenchmark.java create mode 100644 src/test/org/apache/hadoop/hdfs/server/namenode/TestNameCache.java create mode 100644 src/test/org/apache/hadoop/hdfs/server/namenode/TestNameEditsConfigs.java create mode 100644 src/test/org/apache/hadoop/hdfs/server/namenode/TestNameNodePorts.java create mode 100644 src/test/org/apache/hadoop/hdfs/server/namenode/TestNamenodeCapacityReport.java create mode 100644 src/test/org/apache/hadoop/hdfs/server/namenode/TestNodeCount.java create mode 100644 src/test/org/apache/hadoop/hdfs/server/namenode/TestOverReplicatedBlocks.java create mode 100644 src/test/org/apache/hadoop/hdfs/server/namenode/TestParallelImageWrite.java create mode 100644 src/test/org/apache/hadoop/hdfs/server/namenode/TestPathComponents.java create mode 100644 src/test/org/apache/hadoop/hdfs/server/namenode/TestPendingReplication.java create mode 100644 src/test/org/apache/hadoop/hdfs/server/namenode/TestReplicationPolicy.java create mode 100644 src/test/org/apache/hadoop/hdfs/server/namenode/TestStartup.java create mode 100644 src/test/org/apache/hadoop/hdfs/server/namenode/TestUnderReplicatedBlocks.java create mode 100644 src/test/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java create mode 100644 src/test/org/apache/hadoop/http/TestGlobalFilter.java create mode 100644 src/test/org/apache/hadoop/http/TestHtmlQuoting.java create mode 100644 src/test/org/apache/hadoop/http/TestHttpServer.java create mode 100644 src/test/org/apache/hadoop/http/TestServletFilter.java create mode 100644 src/test/org/apache/hadoop/io/FileBench.java create mode 100644 src/test/org/apache/hadoop/io/RandomDatum.java create mode 100644 src/test/org/apache/hadoop/io/TestArrayFile.java create mode 100644 src/test/org/apache/hadoop/io/TestArrayWritable.java create mode 100644 src/test/org/apache/hadoop/io/TestBloomMapFile.java create mode 100644 src/test/org/apache/hadoop/io/TestBytesWritable.java create mode 100644 src/test/org/apache/hadoop/io/TestDefaultStringifier.java create mode 100644 src/test/org/apache/hadoop/io/TestGenericWritable.java create mode 100644 src/test/org/apache/hadoop/io/TestMD5Hash.java create mode 100644 src/test/org/apache/hadoop/io/TestMapFile.java create mode 100644 src/test/org/apache/hadoop/io/TestMapWritable.java create mode 100644 src/test/org/apache/hadoop/io/TestSequenceFile.java create mode 100644 src/test/org/apache/hadoop/io/TestSequenceFileMergeProgress.java create mode 100644 src/test/org/apache/hadoop/io/TestSequenceFileSerialization.java create mode 100644 src/test/org/apache/hadoop/io/TestSetFile.java create mode 100644 src/test/org/apache/hadoop/io/TestSortedMapWritable.java create mode 100644 src/test/org/apache/hadoop/io/TestText.java create mode 100644 src/test/org/apache/hadoop/io/TestTextNonUTF8.java create mode 100644 src/test/org/apache/hadoop/io/TestUTF8.java create mode 100644 src/test/org/apache/hadoop/io/TestVersionedWritable.java create mode 100644 src/test/org/apache/hadoop/io/TestWritable.java create mode 100644 src/test/org/apache/hadoop/io/TestWritableName.java create mode 100644 src/test/org/apache/hadoop/io/TestWritableUtils.java create mode 100644 src/test/org/apache/hadoop/io/compress/TestCodec.java create mode 100644 src/test/org/apache/hadoop/io/compress/TestCodecFactory.java create mode 100644 src/test/org/apache/hadoop/io/compress/TestGzipCodec.java create mode 100644 src/test/org/apache/hadoop/io/file/tfile/KVGenerator.java create mode 100644 src/test/org/apache/hadoop/io/file/tfile/KeySampler.java create mode 100644 src/test/org/apache/hadoop/io/file/tfile/NanoTimer.java create mode 100644 src/test/org/apache/hadoop/io/file/tfile/RandomDistribution.java create mode 100644 src/test/org/apache/hadoop/io/file/tfile/TestTFile.java create mode 100644 src/test/org/apache/hadoop/io/file/tfile/TestTFileByteArrays.java create mode 100644 src/test/org/apache/hadoop/io/file/tfile/TestTFileComparators.java create mode 100644 src/test/org/apache/hadoop/io/file/tfile/TestTFileJClassComparatorByteArrays.java create mode 100644 src/test/org/apache/hadoop/io/file/tfile/TestTFileLzoCodecsByteArrays.java create mode 100644 src/test/org/apache/hadoop/io/file/tfile/TestTFileLzoCodecsStreams.java create mode 100644 src/test/org/apache/hadoop/io/file/tfile/TestTFileNoneCodecsByteArrays.java create mode 100644 src/test/org/apache/hadoop/io/file/tfile/TestTFileNoneCodecsJClassComparatorByteArrays.java create mode 100644 src/test/org/apache/hadoop/io/file/tfile/TestTFileNoneCodecsStreams.java create mode 100644 src/test/org/apache/hadoop/io/file/tfile/TestTFileSeek.java create mode 100644 src/test/org/apache/hadoop/io/file/tfile/TestTFileSeqFileComparison.java create mode 100644 src/test/org/apache/hadoop/io/file/tfile/TestTFileSplit.java create mode 100644 src/test/org/apache/hadoop/io/file/tfile/TestTFileStreams.java create mode 100644 src/test/org/apache/hadoop/io/file/tfile/TestTFileUnsortedByteArrays.java create mode 100644 src/test/org/apache/hadoop/io/file/tfile/TestVLong.java create mode 100644 src/test/org/apache/hadoop/io/file/tfile/Timer.java create mode 100644 src/test/org/apache/hadoop/io/retry/TestRetryProxy.java create mode 100644 src/test/org/apache/hadoop/io/retry/UnreliableImplementation.java create mode 100644 src/test/org/apache/hadoop/io/retry/UnreliableInterface.java create mode 100644 src/test/org/apache/hadoop/io/serializer/TestWritableSerialization.java create mode 100644 src/test/org/apache/hadoop/io/simpleseekableformat/TestDataSegment.java create mode 100644 src/test/org/apache/hadoop/io/simpleseekableformat/TestInterleavedStreams.java create mode 100644 src/test/org/apache/hadoop/io/simpleseekableformat/TestSimpleSeekableFormatCodec.java create mode 100644 src/test/org/apache/hadoop/io/simpleseekableformat/TestSimpleSeekableFormatStreams.java create mode 100644 src/test/org/apache/hadoop/io/simpleseekableformat/TestUtils.java create mode 100644 src/test/org/apache/hadoop/ipc/ClientAdapter.java create mode 100644 src/test/org/apache/hadoop/ipc/TestIPC.java create mode 100644 src/test/org/apache/hadoop/ipc/TestIPCServerResponder.java create mode 100644 src/test/org/apache/hadoop/ipc/TestRPC.java create mode 100644 src/test/org/apache/hadoop/ipc/TestRPCCompatibility.java create mode 100644 src/test/org/apache/hadoop/ipc/TestSocketFactory.java create mode 100644 src/test/org/apache/hadoop/log/TestLogLevel.java create mode 100644 src/test/org/apache/hadoop/mapred/BigMapOutput.java create mode 100644 src/test/org/apache/hadoop/mapred/ClusterMapReduceTestCase.java create mode 100644 src/test/org/apache/hadoop/mapred/ClusterWithLinuxTaskController.java create mode 100644 src/test/org/apache/hadoop/mapred/ControlledMapReduceJob.java create mode 100644 src/test/org/apache/hadoop/mapred/DummyResourceCalculatorPlugin.java create mode 100644 src/test/org/apache/hadoop/mapred/DummyTaskTrackerInstrumentation.java create mode 100644 src/test/org/apache/hadoop/mapred/EmptyInputFormat.java create mode 100644 src/test/org/apache/hadoop/mapred/FakeObjectUtilities.java create mode 100644 src/test/org/apache/hadoop/mapred/GenericMRLoadGenerator.java create mode 100644 src/test/org/apache/hadoop/mapred/HadoopTestCase.java create mode 100644 src/test/org/apache/hadoop/mapred/MRBench.java create mode 100644 src/test/org/apache/hadoop/mapred/MRCaching.java create mode 100644 src/test/org/apache/hadoop/mapred/MRSharedCaching.java create mode 100644 src/test/org/apache/hadoop/mapred/MiniMRCluster.java create mode 100644 src/test/org/apache/hadoop/mapred/NotificationTestCase.java create mode 100644 src/test/org/apache/hadoop/mapred/ReliabilityTest.java create mode 100644 src/test/org/apache/hadoop/mapred/SortValidator.java create mode 100644 src/test/org/apache/hadoop/mapred/TestBadRecords.java create mode 100644 src/test/org/apache/hadoop/mapred/TestChildTaskDirs.java create mode 100644 src/test/org/apache/hadoop/mapred/TestClusterMRNotification.java create mode 100644 src/test/org/apache/hadoop/mapred/TestClusterMapReduceTestCase.java create mode 100644 src/test/org/apache/hadoop/mapred/TestClusterStatus.java create mode 100644 src/test/org/apache/hadoop/mapred/TestCollect.java create mode 100644 src/test/org/apache/hadoop/mapred/TestCommandLineJobSubmission.java create mode 100644 src/test/org/apache/hadoop/mapred/TestComparators.java create mode 100644 src/test/org/apache/hadoop/mapred/TestCompositeTaskTrackerInstrumentation.java create mode 100644 src/test/org/apache/hadoop/mapred/TestCompressedEmptyMapOutputs.java create mode 100644 src/test/org/apache/hadoop/mapred/TestControlledMapReduceJob.java create mode 100644 src/test/org/apache/hadoop/mapred/TestCounters.java create mode 100644 src/test/org/apache/hadoop/mapred/TestCustomOutputCommitter.java create mode 100644 src/test/org/apache/hadoop/mapred/TestEmptyJob.java create mode 100644 src/test/org/apache/hadoop/mapred/TestFieldSelection.java create mode 100644 src/test/org/apache/hadoop/mapred/TestFileInputFormat.java create mode 100644 src/test/org/apache/hadoop/mapred/TestFileInputFormatPathFilter.java create mode 100644 src/test/org/apache/hadoop/mapred/TestFileOutputCommitter.java create mode 100644 src/test/org/apache/hadoop/mapred/TestFileOutputFormat.java create mode 100644 src/test/org/apache/hadoop/mapred/TestGetSplitHosts.java create mode 100644 src/test/org/apache/hadoop/mapred/TestIFileStreams.java create mode 100644 src/test/org/apache/hadoop/mapred/TestIndexCache.java create mode 100644 src/test/org/apache/hadoop/mapred/TestInputPath.java create mode 100644 src/test/org/apache/hadoop/mapred/TestJavaSerialization.java create mode 100644 src/test/org/apache/hadoop/mapred/TestJobCleanup.java create mode 100644 src/test/org/apache/hadoop/mapred/TestJobClient.java create mode 100644 src/test/org/apache/hadoop/mapred/TestJobCounters.java create mode 100644 src/test/org/apache/hadoop/mapred/TestJobDirCleanup.java create mode 100644 src/test/org/apache/hadoop/mapred/TestJobExecutionAsDifferentUser.java create mode 100644 src/test/org/apache/hadoop/mapred/TestJobHistory.java create mode 100644 src/test/org/apache/hadoop/mapred/TestJobHistoryParsing.java create mode 100644 src/test/org/apache/hadoop/mapred/TestJobHistoryVersion.java create mode 100644 src/test/org/apache/hadoop/mapred/TestJobInProgress.java create mode 100644 src/test/org/apache/hadoop/mapred/TestJobInProgressListener.java create mode 100644 src/test/org/apache/hadoop/mapred/TestJobKillAndFail.java create mode 100644 src/test/org/apache/hadoop/mapred/TestJobName.java create mode 100644 src/test/org/apache/hadoop/mapred/TestJobQueueInformation.java create mode 100644 src/test/org/apache/hadoop/mapred/TestJobQueueTaskScheduler.java create mode 100644 src/test/org/apache/hadoop/mapred/TestJobRetire.java create mode 100644 src/test/org/apache/hadoop/mapred/TestJobStatusPersistency.java create mode 100644 src/test/org/apache/hadoop/mapred/TestJobSysDirWithDFS.java create mode 100644 src/test/org/apache/hadoop/mapred/TestJobTrackerRestart.java create mode 100644 src/test/org/apache/hadoop/mapred/TestJobTrackerRestartWithLostTracker.java create mode 100644 src/test/org/apache/hadoop/mapred/TestJobTrackerSafeMode.java create mode 100644 src/test/org/apache/hadoop/mapred/TestJobTrackerStart.java create mode 100644 src/test/org/apache/hadoop/mapred/TestJobTrackerXmlJsp.java create mode 100644 src/test/org/apache/hadoop/mapred/TestKeyValueTextInputFormat.java create mode 100644 src/test/org/apache/hadoop/mapred/TestKillCompletedJob.java create mode 100644 src/test/org/apache/hadoop/mapred/TestKillSubProcesses.java create mode 100644 src/test/org/apache/hadoop/mapred/TestLimitTasksPerJobTaskScheduler.java create mode 100644 src/test/org/apache/hadoop/mapred/TestLocalMRNotification.java create mode 100644 src/test/org/apache/hadoop/mapred/TestLostTracker.java create mode 100644 src/test/org/apache/hadoop/mapred/TestMRServerPorts.java create mode 100644 src/test/org/apache/hadoop/mapred/TestMapCollection.java create mode 100644 src/test/org/apache/hadoop/mapred/TestMapOutputType.java create mode 100644 src/test/org/apache/hadoop/mapred/TestMapRed.java create mode 100644 src/test/org/apache/hadoop/mapred/TestMapredSystemDir.java create mode 100644 src/test/org/apache/hadoop/mapred/TestMiniMRBringup.java create mode 100644 src/test/org/apache/hadoop/mapred/TestMiniMRChildTask.java create mode 100644 src/test/org/apache/hadoop/mapred/TestMiniMRClasspath.java create mode 100644 src/test/org/apache/hadoop/mapred/TestMiniMRDFSCaching.java create mode 100644 src/test/org/apache/hadoop/mapred/TestMiniMRDFSSharedCaching.java create mode 100644 src/test/org/apache/hadoop/mapred/TestMiniMRDFSSort.java create mode 100644 src/test/org/apache/hadoop/mapred/TestMiniMRLocalFS.java create mode 100644 src/test/org/apache/hadoop/mapred/TestMiniMRMapRedDebugScript.java create mode 100644 src/test/org/apache/hadoop/mapred/TestMiniMRTaskTempDir.java create mode 100644 src/test/org/apache/hadoop/mapred/TestMiniMRWithDFS.java create mode 100644 src/test/org/apache/hadoop/mapred/TestMiniMRWithDFSWithDistinctUsers.java create mode 100644 src/test/org/apache/hadoop/mapred/TestMultiFileInputFormat.java create mode 100644 src/test/org/apache/hadoop/mapred/TestMultiFileSplit.java create mode 100644 src/test/org/apache/hadoop/mapred/TestMultipleLevelCaching.java create mode 100644 src/test/org/apache/hadoop/mapred/TestMultipleTextOutputFormat.java create mode 100644 src/test/org/apache/hadoop/mapred/TestNodeBlacklisting.java create mode 100644 src/test/org/apache/hadoop/mapred/TestNodeHealthService.java create mode 100644 src/test/org/apache/hadoop/mapred/TestNodeRefresh.java create mode 100644 src/test/org/apache/hadoop/mapred/TestParallelInitialization.java create mode 100644 src/test/org/apache/hadoop/mapred/TestQueueAclsForCurrentUser.java create mode 100644 src/test/org/apache/hadoop/mapred/TestQueueManager.java create mode 100644 src/test/org/apache/hadoop/mapred/TestRackAwareTaskPlacement.java create mode 100644 src/test/org/apache/hadoop/mapred/TestRecoveryManager.java create mode 100644 src/test/org/apache/hadoop/mapred/TestReduceFetch.java create mode 100644 src/test/org/apache/hadoop/mapred/TestReduceTask.java create mode 100644 src/test/org/apache/hadoop/mapred/TestResourceEstimation.java create mode 100644 src/test/org/apache/hadoop/mapred/TestSequenceFileAsBinaryInputFormat.java create mode 100644 src/test/org/apache/hadoop/mapred/TestSequenceFileAsBinaryOutputFormat.java create mode 100644 src/test/org/apache/hadoop/mapred/TestSequenceFileAsTextInputFormat.java create mode 100644 src/test/org/apache/hadoop/mapred/TestSequenceFileInputFilter.java create mode 100644 src/test/org/apache/hadoop/mapred/TestSequenceFileInputFormat.java create mode 100644 src/test/org/apache/hadoop/mapred/TestSetupAndCleanupFailure.java create mode 100644 src/test/org/apache/hadoop/mapred/TestSetupWorkDir.java create mode 100644 src/test/org/apache/hadoop/mapred/TestSortedRanges.java create mode 100644 src/test/org/apache/hadoop/mapred/TestSpecialCharactersInOutputPath.java create mode 100644 src/test/org/apache/hadoop/mapred/TestSpeculativeExecution.java create mode 100644 src/test/org/apache/hadoop/mapred/TestStatisticsCollector.java create mode 100644 src/test/org/apache/hadoop/mapred/TestSubmitJob.java create mode 100644 src/test/org/apache/hadoop/mapred/TestTTCpuToTaskSlots.java create mode 100644 src/test/org/apache/hadoop/mapred/TestTTResourceReporting.java create mode 100644 src/test/org/apache/hadoop/mapred/TestTaskCommit.java create mode 100644 src/test/org/apache/hadoop/mapred/TestTaskFail.java create mode 100644 src/test/org/apache/hadoop/mapred/TestTaskLimits.java create mode 100644 src/test/org/apache/hadoop/mapred/TestTaskLogsMonitor.java create mode 100644 src/test/org/apache/hadoop/mapred/TestTaskTrackerInstrumentation.java create mode 100644 src/test/org/apache/hadoop/mapred/TestTaskTrackerMemoryManager.java create mode 100644 src/test/org/apache/hadoop/mapred/TestTextInputFormat.java create mode 100644 src/test/org/apache/hadoop/mapred/TestTextOutputFormat.java create mode 100644 src/test/org/apache/hadoop/mapred/TestTrackerBlacklistAcrossJobs.java create mode 100644 src/test/org/apache/hadoop/mapred/TestUserDefinedCounters.java create mode 100644 src/test/org/apache/hadoop/mapred/TestWritableJobConf.java create mode 100644 src/test/org/apache/hadoop/mapred/ThreadedMapBenchmark.java create mode 100644 src/test/org/apache/hadoop/mapred/UtilsForTests.java create mode 100644 src/test/org/apache/hadoop/mapred/WordCount.java create mode 100644 src/test/org/apache/hadoop/mapred/jobcontrol/JobControlTestUtils.java create mode 100644 src/test/org/apache/hadoop/mapred/jobcontrol/TestJobControl.java create mode 100644 src/test/org/apache/hadoop/mapred/jobcontrol/TestLocalJobControl.java create mode 100644 src/test/org/apache/hadoop/mapred/join/IncomparableKey.java create mode 100644 src/test/org/apache/hadoop/mapred/join/TestDatamerge.java create mode 100644 src/test/org/apache/hadoop/mapred/join/TestTupleWritable.java create mode 100644 src/test/org/apache/hadoop/mapred/lib/TestChainMapReduce.java create mode 100644 src/test/org/apache/hadoop/mapred/lib/TestCombineFileInputFormat.java create mode 100644 src/test/org/apache/hadoop/mapred/lib/TestDelegatingInputFormat.java create mode 100644 src/test/org/apache/hadoop/mapred/lib/TestHarWithCombineFileInputFormat.java create mode 100644 src/test/org/apache/hadoop/mapred/lib/TestKeyFieldBasedComparator.java create mode 100644 src/test/org/apache/hadoop/mapred/lib/TestKeyFieldBasedPartitioner.java create mode 100644 src/test/org/apache/hadoop/mapred/lib/TestKeyFieldHelper.java create mode 100644 src/test/org/apache/hadoop/mapred/lib/TestLineInputFormat.java create mode 100644 src/test/org/apache/hadoop/mapred/lib/TestMultipleInputs.java create mode 100644 src/test/org/apache/hadoop/mapred/lib/TestMultipleOutputs.java create mode 100644 src/test/org/apache/hadoop/mapred/lib/TestMultithreadedMapRunner.java create mode 100644 src/test/org/apache/hadoop/mapred/lib/TestTotalOrderPartitioner.java create mode 100644 src/test/org/apache/hadoop/mapred/lib/aggregate/AggregatorTests.java create mode 100644 src/test/org/apache/hadoop/mapred/lib/aggregate/TestAggregates.java create mode 100644 src/test/org/apache/hadoop/mapred/lib/db/TestConstructQuery.java create mode 100644 src/test/org/apache/hadoop/mapred/lib/db/TestDBJob.java create mode 100644 src/test/org/apache/hadoop/mapred/pipes/TestPipes.java create mode 100644 src/test/org/apache/hadoop/mapred/pipes/TestPipesAsDifferentUser.java create mode 100644 src/test/org/apache/hadoop/mapred/pipes/WordCountInputFormat.java create mode 100644 src/test/org/apache/hadoop/mapred/sharedTest1/sharedTest.txt create mode 100644 src/test/org/apache/hadoop/mapred/sharedTest1/sharedTest.zip create mode 100644 src/test/org/apache/hadoop/mapred/sharedTest2/sharedTest.txt create mode 100644 src/test/org/apache/hadoop/mapred/test.jar create mode 100644 src/test/org/apache/hadoop/mapred/test.tar create mode 100644 src/test/org/apache/hadoop/mapred/test.tar.gz create mode 100644 src/test/org/apache/hadoop/mapred/test.tgz create mode 100644 src/test/org/apache/hadoop/mapred/test.txt create mode 100644 src/test/org/apache/hadoop/mapred/test.zip create mode 100644 src/test/org/apache/hadoop/mapred/testscript.txt create mode 100644 src/test/org/apache/hadoop/mapreduce/MapReduceTestUtil.java create mode 100644 src/test/org/apache/hadoop/mapreduce/TestChild.java create mode 100644 src/test/org/apache/hadoop/mapreduce/TestMapReduceLocal.java create mode 100644 src/test/org/apache/hadoop/mapreduce/TestNoJobSetupCleanup.java create mode 100644 src/test/org/apache/hadoop/mapreduce/lib/map/TestMultithreadedMapper.java create mode 100644 src/test/org/apache/hadoop/mapreduce/util/TestMRAsyncDiskService.java create mode 100644 src/test/org/apache/hadoop/metrics/TestContextFactory.java create mode 100644 src/test/org/apache/hadoop/net/StaticMapping.java create mode 100644 src/test/org/apache/hadoop/net/TestIPv4AddressTruncationMapping.java create mode 100644 src/test/org/apache/hadoop/net/TestInetSocketAddressFactory.java create mode 100644 src/test/org/apache/hadoop/net/TestNetworkTopology.java create mode 100644 src/test/org/apache/hadoop/net/TestScriptBasedMapping.java create mode 100644 src/test/org/apache/hadoop/net/TestSocketIOWithTimeout.java create mode 100644 src/test/org/apache/hadoop/record/FromCpp.java create mode 100644 src/test/org/apache/hadoop/record/RecordBench.java create mode 100644 src/test/org/apache/hadoop/record/TestBuffer.java create mode 100644 src/test/org/apache/hadoop/record/TestRecordIO.java create mode 100644 src/test/org/apache/hadoop/record/TestRecordMR.java create mode 100644 src/test/org/apache/hadoop/record/TestRecordVersioning.java create mode 100644 src/test/org/apache/hadoop/record/TestRecordWritable.java create mode 100644 src/test/org/apache/hadoop/record/ToCpp.java create mode 100644 src/test/org/apache/hadoop/security/TestAccessControlList.java create mode 100644 src/test/org/apache/hadoop/security/TestPermission.java create mode 100644 src/test/org/apache/hadoop/security/TestUnixUserGroupInformation.java create mode 100644 src/test/org/apache/hadoop/security/authorize/HadoopPolicyProvider.java create mode 100644 src/test/org/apache/hadoop/security/authorize/TestConfiguredPolicy.java create mode 100644 src/test/org/apache/hadoop/security/authorize/TestServiceLevelAuthorization.java create mode 100644 src/test/org/apache/hadoop/test/AllTestDriver.java create mode 100644 src/test/org/apache/hadoop/tools/TestDistCh.java create mode 100644 src/test/org/apache/hadoop/tools/TestJMXGet.java create mode 100644 src/test/org/apache/hadoop/tools/rumen/HistogramRawTestData.java create mode 100644 src/test/org/apache/hadoop/tools/rumen/TestHistograms.java create mode 100644 src/test/org/apache/hadoop/tools/rumen/TestPiecewiseLinearInterpolation.java create mode 100644 src/test/org/apache/hadoop/tools/rumen/TestRumenJobTraces.java create mode 100644 src/test/org/apache/hadoop/tools/rumen/TestZombieJob.java create mode 100644 src/test/org/apache/hadoop/util/TestAsyncDiskService.java create mode 100644 src/test/org/apache/hadoop/util/TestCyclicIteration.java create mode 100644 src/test/org/apache/hadoop/util/TestGenericsUtil.java create mode 100644 src/test/org/apache/hadoop/util/TestHostsFileReader.java create mode 100644 src/test/org/apache/hadoop/util/TestIndexedSort.java create mode 100644 src/test/org/apache/hadoop/util/TestProcfsBasedProcessTree.java create mode 100644 src/test/org/apache/hadoop/util/TestReflectionUtils.java create mode 100644 src/test/org/apache/hadoop/util/TestShell.java create mode 100644 src/test/org/apache/hadoop/util/TestStringUtils.java create mode 100644 src/test/testjar/ClassWordCount.java create mode 100644 src/test/testjar/CustomOutputCommitter.java create mode 100644 src/test/testjar/ExternalMapperReducer.java create mode 100644 src/test/testjar/ExternalWritable.java create mode 100644 src/test/testshell/ExternalMapReduce.java create mode 100644 src/test/tools/data/rumen/histogram-tests/gold-minimal.json create mode 100644 src/test/tools/data/rumen/histogram-tests/gold-one-value-many-repeats.json create mode 100644 src/test/tools/data/rumen/histogram-tests/gold-only-one-value.json create mode 100644 src/test/tools/data/rumen/histogram-tests/gold-three-values.json create mode 100644 src/test/tools/data/rumen/histogram-tests/input-minimal.json create mode 100644 src/test/tools/data/rumen/histogram-tests/input-one-value-many-repeats.json create mode 100644 src/test/tools/data/rumen/histogram-tests/input-only-one-value.json create mode 100644 src/test/tools/data/rumen/histogram-tests/input-three-values.json create mode 100644 src/test/tools/data/rumen/small-trace-test/job-tracker-logs-topology-output create mode 100644 src/test/tools/data/rumen/small-trace-test/job-tracker-logs-trace-output.gz create mode 100644 src/test/tools/data/rumen/small-trace-test/sample-job-tracker-logs.gz create mode 100644 src/test/tools/data/rumen/small-trace-test/truncated-job-tracker-log create mode 100644 src/test/tools/data/rumen/small-trace-test/truncated-topology-output create mode 100644 src/test/tools/data/rumen/small-trace-test/truncated-trace-output create mode 100644 src/test/tools/data/rumen/zombie/input-topology.json create mode 100644 src/test/tools/data/rumen/zombie/input-trace.json create mode 100644 src/tools/org/apache/hadoop/tools/DistCh.java create mode 100644 src/tools/org/apache/hadoop/tools/DistCp.java create mode 100644 src/tools/org/apache/hadoop/tools/DistCp_Counter.properties create mode 100644 src/tools/org/apache/hadoop/tools/DistTool.java create mode 100644 src/tools/org/apache/hadoop/tools/HadoopArchives.java create mode 100644 src/tools/org/apache/hadoop/tools/Logalyzer.java create mode 100644 src/tools/org/apache/hadoop/tools/rumen/AbstractClusterStory.java create mode 100644 src/tools/org/apache/hadoop/tools/rumen/CDFPiecewiseLinearRandomGenerator.java create mode 100644 src/tools/org/apache/hadoop/tools/rumen/CDFRandomGenerator.java create mode 100644 src/tools/org/apache/hadoop/tools/rumen/ClusterStory.java create mode 100644 src/tools/org/apache/hadoop/tools/rumen/ClusterTopologyReader.java create mode 100644 src/tools/org/apache/hadoop/tools/rumen/DeepCompare.java create mode 100644 src/tools/org/apache/hadoop/tools/rumen/DeepInequalityException.java create mode 100644 src/tools/org/apache/hadoop/tools/rumen/HadoopLogsAnalyzer.java create mode 100644 src/tools/org/apache/hadoop/tools/rumen/Histogram.java create mode 100644 src/tools/org/apache/hadoop/tools/rumen/JobStory.java create mode 100644 src/tools/org/apache/hadoop/tools/rumen/JobStoryProducer.java create mode 100644 src/tools/org/apache/hadoop/tools/rumen/JobTraceReader.java create mode 100644 src/tools/org/apache/hadoop/tools/rumen/JsonObjectMapperParser.java create mode 100644 src/tools/org/apache/hadoop/tools/rumen/LogRecordType.java create mode 100644 src/tools/org/apache/hadoop/tools/rumen/LoggedDiscreteCDF.java create mode 100644 src/tools/org/apache/hadoop/tools/rumen/LoggedJob.java create mode 100644 src/tools/org/apache/hadoop/tools/rumen/LoggedLocation.java create mode 100644 src/tools/org/apache/hadoop/tools/rumen/LoggedNetworkTopology.java create mode 100644 src/tools/org/apache/hadoop/tools/rumen/LoggedSingleRelativeRanking.java create mode 100644 src/tools/org/apache/hadoop/tools/rumen/LoggedTask.java create mode 100644 src/tools/org/apache/hadoop/tools/rumen/LoggedTaskAttempt.java create mode 100644 src/tools/org/apache/hadoop/tools/rumen/MachineNode.java create mode 100644 src/tools/org/apache/hadoop/tools/rumen/MapTaskAttemptInfo.java create mode 100644 src/tools/org/apache/hadoop/tools/rumen/Node.java create mode 100644 src/tools/org/apache/hadoop/tools/rumen/Pair.java create mode 100644 src/tools/org/apache/hadoop/tools/rumen/ParsedConfigFile.java create mode 100644 src/tools/org/apache/hadoop/tools/rumen/ParsedHost.java create mode 100644 src/tools/org/apache/hadoop/tools/rumen/ParsedLine.java create mode 100644 src/tools/org/apache/hadoop/tools/rumen/Pre21JobHistoryConstants.java create mode 100644 src/tools/org/apache/hadoop/tools/rumen/RackNode.java create mode 100644 src/tools/org/apache/hadoop/tools/rumen/ReduceTaskAttemptInfo.java create mode 100644 src/tools/org/apache/hadoop/tools/rumen/TaskAttemptInfo.java create mode 100644 src/tools/org/apache/hadoop/tools/rumen/TaskInfo.java create mode 100644 src/tools/org/apache/hadoop/tools/rumen/TreePath.java create mode 100644 src/tools/org/apache/hadoop/tools/rumen/ZombieCluster.java create mode 100644 src/tools/org/apache/hadoop/tools/rumen/ZombieJob.java create mode 100644 src/tools/org/apache/hadoop/tools/rumen/ZombieJobProducer.java create mode 100644 src/webapps/datanode/browseBlock.jsp create mode 100644 src/webapps/datanode/browseDirectory.jsp create mode 100644 src/webapps/datanode/tail.jsp create mode 100644 src/webapps/hdfs/corrupt_files.jsp create mode 100644 src/webapps/hdfs/dfshealth.jsp create mode 100644 src/webapps/hdfs/dfsnodelist.jsp create mode 100644 src/webapps/hdfs/dfsnodelist_txt.jsp create mode 100644 src/webapps/hdfs/index.html create mode 100644 src/webapps/hdfs/nn_browsedfscontent.jsp create mode 100644 src/webapps/job/analysejobhistory.jsp create mode 100644 src/webapps/job/gc.jsp create mode 100644 src/webapps/job/index.html create mode 100644 src/webapps/job/jobblacklistedtrackers.jsp create mode 100644 src/webapps/job/jobcompletionevents.jsp create mode 100644 src/webapps/job/jobconf.jsp create mode 100644 src/webapps/job/jobconf_history.jsp create mode 100644 src/webapps/job/jobdetails.jsp create mode 100644 src/webapps/job/jobdetailshistory.jsp create mode 100644 src/webapps/job/jobdetailsjson.jsp create mode 100644 src/webapps/job/jobfailures.jsp create mode 100644 src/webapps/job/jobhistory.jsp create mode 100644 src/webapps/job/joblogs.jsp create mode 100644 src/webapps/job/jobqueue_details.jsp create mode 100644 src/webapps/job/jobtasks.jsp create mode 100644 src/webapps/job/jobtaskshistory.jsp create mode 100644 src/webapps/job/jobtracker.jsp create mode 100644 src/webapps/job/jobtracker.jspx create mode 100644 src/webapps/job/jobtracker_hmon.jsp create mode 100644 src/webapps/job/jobtracker_txt.jsp create mode 100644 src/webapps/job/jobtrackersdetailsjson.jsp create mode 100644 src/webapps/job/loadhistory.jsp create mode 100644 src/webapps/job/locality.jsp create mode 100644 src/webapps/job/machines.jsp create mode 100644 src/webapps/job/machines_txt.jsp create mode 100644 src/webapps/job/taskdetails.jsp create mode 100644 src/webapps/job/taskdetailshistory.jsp create mode 100644 src/webapps/job/taskstats.jsp create mode 100644 src/webapps/static/hadoop-logo.jpg create mode 100644 src/webapps/static/hadoop.css create mode 100644 src/webapps/static/jobconf.xsl create mode 100644 src/webapps/static/jobtracker.js create mode 100644 src/webapps/task/index.html create mode 100644 src/webapps/task/taskcompletionevents.jsp create mode 100644 src/webapps/task/tasktracker.jsp diff --git a/.eclipse.templates/.classpath b/.eclipse.templates/.classpath new file mode 100644 index 0000000..34e5f3a --- /dev/null +++ b/.eclipse.templates/.classpath @@ -0,0 +1,50 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/.eclipse.templates/.externalToolBuilders/Hadoop_Ant_Builder.launch b/.eclipse.templates/.externalToolBuilders/Hadoop_Ant_Builder.launch new file mode 100644 index 0000000..1b944aa --- /dev/null +++ b/.eclipse.templates/.externalToolBuilders/Hadoop_Ant_Builder.launch @@ -0,0 +1,22 @@ + + + + + + + + + + + + + + + + + + + + + + diff --git a/.eclipse.templates/.project b/.eclipse.templates/.project new file mode 100644 index 0000000..8356099 --- /dev/null +++ b/.eclipse.templates/.project @@ -0,0 +1,27 @@ + + + @PROJECT@ + + + + + + org.eclipse.jdt.core.javabuilder + + + + + org.eclipse.ui.externaltools.ExternalToolBuilder + full,incremental, + + + LaunchConfigHandle + <project>/.externalToolBuilders/Hadoop_Ant_Builder.launch + + + + + + org.eclipse.jdt.core.javanature + + diff --git a/.eclipse.templates/README.txt b/.eclipse.templates/README.txt new file mode 100644 index 0000000..1905042 --- /dev/null +++ b/.eclipse.templates/README.txt @@ -0,0 +1,6 @@ +This directory contains templates for generating Eclipse files to configure +Eclipse for Hadoop development. + +For further information please consult + +http://wiki.apache.org/hadoop/EclipseEnvironment diff --git a/APACHE-README.txt b/APACHE-README.txt new file mode 100644 index 0000000..148cd31 --- /dev/null +++ b/APACHE-README.txt @@ -0,0 +1,31 @@ +For the latest information about Hadoop, please visit our website at: + + http://hadoop.apache.org/core/ + +and our wiki, at: + + http://wiki.apache.org/hadoop/ + +This distribution includes cryptographic software. The country in +which you currently reside may have restrictions on the import, +possession, use, and/or re-export to another country, of +encryption software. BEFORE using any encryption software, please +check your country's laws, regulations and policies concerning the +import, possession, or use, and re-export of encryption software, to +see if this is permitted. See for more +information. + +The U.S. Government Department of Commerce, Bureau of Industry and +Security (BIS), has classified this software as Export Commodity +Control Number (ECCN) 5D002.C.1, which includes information security +software using or performing cryptographic functions with asymmetric +algorithms. The form and manner of this Apache Software Foundation +distribution makes it eligible for export under the License Exception +ENC Technology Software Unrestricted (TSU) exception (see the BIS +Export Administration Regulations, Section 740.13) for both object +code and source code. + +The following provides more details on the included cryptographic +software: + Hadoop Core uses the SSL libraries from the Jetty project written +by mortbay.org. diff --git a/CHANGES.txt b/CHANGES.txt new file mode 100644 index 0000000..19535c2 --- /dev/null +++ b/CHANGES.txt @@ -0,0 +1,8552 @@ +Hadoop Change Log + +Release 0.20.1 - Unreleased + + INCOMPATIBLE CHANGES + + HADOOP-5726. Remove pre-emption from capacity scheduler code base. + (Rahul Kumar Singh via yhemanth) + + HADOOP-5881. Simplify memory monitoring and scheduling related + configuration. (Vinod Kumar Vavilapalli via yhemanth) + + NEW FEATURES + + HADOOP-6080. Introduce -skipTrash option to rm and rmr. + (Jakob Homan via shv) + + HADOOP-3315. Add a new, binary file foramt, TFile. (Hong Tang via cdouglas) + + IMPROVEMENTS + + HADOOP-5711. Change Namenode file close log to info. (szetszwo) + + HADOOP-5736. Update the capacity scheduler documentation for features + like memory based scheduling, job initialization and removal of pre-emption. + (Sreekanth Ramakrishnan via yhemanth) + + HADOOP-4674. Fix fs help messages for -test, -text, -tail, -stat + and -touchz options. (Ravi Phulari via szetszwo) + + HADOOP-4372. Improves the way history filenames are obtained and manipulated. + (Amar Kamat via ddas) + + HADOOP-5897. Add name-node metrics to capture java heap usage. + (Suresh Srinivas via shv) + + HDFS-438. Improve help message for space quota command. (Raghu Angadi) + + MAPREDUCE-767. Remove the dependence on the CLI 2.0 snapshot. + (Amar Kamat via ddas) + + HDFS-1111. Introduce getCorruptFileBlocks() for fsck. (Sriram Rao via shv) + + OPTIMIZATIONS + + BUG FIXES + + HADOOP-5691. Makes org.apache.hadoop.mapreduce.Reducer concrete class + instead of abstract. (Amareshwari Sriramadasu via sharad) + + HADOOP-5646. Fixes a problem in TestQueueCapacities. + (Vinod Kumar Vavilapalli via ddas) + + HADOOP-5655. TestMRServerPorts fails on java.net.BindException. (Devaraj + Das via hairong) + + HADOOP-5654. TestReplicationPolicy. fails on java.net.BindException. + (hairong) + + HADOOP-5688. Fix HftpFileSystem checksum path construction. (Tsz Wo + (Nicholas) Sze via cdouglas) + + HADOOP-5213. Fix Null pointer exception caused when bzip2compression + was used and user closed a output stream without writing any data. + (Zheng Shao via dhruba) + + HADOOP-5718. Remove the check for the default queue in capacity scheduler. + (Sreekanth Ramakrishnan via yhemanth) + + HADOOP-5719. Remove jobs that failed initialization from the waiting queue + in the capacity scheduler. (Sreekanth Ramakrishnan via yhemanth) + + HADOOP-4744. Attaching another fix to the jetty port issue. The TaskTracker + kills itself if it ever discovers that the port to which jetty is actually + bound is invalid (-1). (ddas) + + HADOOP-5349. Fixes a problem in LocalDirAllocator to check for the return + path value that is returned for the case where the file we want to write + is of an unknown size. (Vinod Kumar Vavilapalli via ddas) + + HADOOP-5636. Prevents a job from going to RUNNING state after it has been + KILLED (this used to happen when the SetupTask would come back with a + success after the job has been killed). (Amar Kamat via ddas) + + HADOOP-5641. Fix a NullPointerException in capacity scheduler's memory + based scheduling code when jobs get retired. (yhemanth) + + HADOOP-5828. Use absolute path for mapred.local.dir of JobTracker in + MiniMRCluster. (yhemanth) + + HADOOP-4981. Fix capacity scheduler to schedule speculative tasks + correctly in the presence of High RAM jobs. + (Sreekanth Ramakrishnan via yhemanth) + + HADOOP-5210. Solves a problem in the progress report of the reduce task. + (Ravi Gummadi via ddas) + + HADOOP-5850. Fixes a problem to do with not being able to jobs with + 0 maps/reduces. (Vinod K V via ddas) + + HADOOP-5728. Fixed FSEditLog.printStatistics IndexOutOfBoundsException. + (Wang Xu via johan) + + HADOOP-4626. Correct the API links in hdfs forrest doc so that they + point to the same version of hadoop. (szetszwo) + + HADOOP-5883. Fixed tasktracker memory monitoring to account for + momentary spurts in memory usage due to java's fork() model. + (yhemanth) + + HADOOP-5539. Fixes a problem to do with not preserving intermediate + output compression for merged data. + (Jothi Padmanabhan and Billy Pearson via ddas) + + HADOOP-5932. Fixes a problem in capacity scheduler in computing + available memory on a tasktracker. + (Vinod Kumar Vavilapalli via yhemanth) + + HADOOP-5648. Fixes a build issue in not being able to generate gridmix.jar + in hadoop binary tarball. (Giridharan Kesavan via gkesavan) + + HADOOP-5908. Fixes a problem to do with ArithmeticException in the + JobTracker when there are jobs with 0 maps. (Amar Kamat via ddas) + + HADOOP-5924. Fixes a corner case problem to do with job recovery with + empty history files. Also, after a JT restart, sends KillTaskAction to + tasks that report back but the corresponding job hasn't been initialized + yet. (Amar Kamat via ddas) + + HADOOP-5882. Fixes a reducer progress update problem for new mapreduce + api. (Amareshwari Sriramadasu via sharad) + + HADOOP-5746. Fixes a corner case problem in Streaming, where if an exception + happens in MROutputThread after the last call to the map/reduce method, the + exception goes undetected. (Amar Kamat via ddas) + + HADOOP-5884. Fixes accounting in capacity scheduler so that high RAM jobs + take more slots. (Vinod Kumar Vavilapalli via yhemanth) + + HADOOP-5937. Correct a safemode message in FSNamesystem. (Ravi Phulari + via szetszwo) + + HADOOP-5869. Fix bug in assignment of setup / cleanup task that was + causing TestQueueCapacities to fail. + (Sreekanth Ramakrishnan via yhemanth) + + HADOOP-5921. Fixes a problem in the JobTracker where it sometimes never used + to come up due to a system file creation on JobTracker's system-dir failing. + This problem would sometimes show up only when the FS for the system-dir + (usually HDFS) is started at nearly the same time as the JobTracker. + (Amar Kamat via ddas) + + HADOOP-5920. Fixes a testcase failure for TestJobHistory. + (Amar Kamat via ddas) + + HDFS-26. Better error message to users when commands fail because of + lack of quota. Allow quota to be set even if the limit is lower than + current consumption. (Boris Shkolnik via rangadi) + + MAPREDUCE-2. Fixes a bug in KeyFieldBasedPartitioner in handling empty + keys. (Amar Kamat via sharad) + + MAPREDUCE-130. Delete the jobconf copy from the log directory of the + JobTracker when the job is retired. (Amar Kamat via sharad) + + MAPREDUCE-657. Fix hardcoded filesystem problem in CompletedJobStatusStore. + (Amar Kamat via sharad) + + MAPREDUCE-179. Update progress in new RecordReaders. (cdouglas) + + MAPREDUCE-124. Fix a bug in failure handling of abort task of + OutputCommiter. (Amareshwari Sriramadasu via sharad) + + HADOOP-6139. Fix the FsShell help messages for rm and rmr. (Jakob Homan + via szetszwo) + + HADOOP-6141. Fix a few bugs in 0.20 test-patch.sh. (Hong Tang via + szetszwo) + + HADOOP-6145. Fix FsShell rm/rmr error messages when there is a FNFE. + (Jakob Homan via szetszwo) + + MAPREDUCE-565. Fix partitioner to work with new API. (Owen O'Malley via + cdouglas) + + MAPREDUCE-465. Fix a bug in MultithreadedMapRunner. (Amareshwari + Sriramadasu via sharad) + + MAPREDUCE-18. Puts some checks to detect cases where jetty serves up + incorrect output during shuffle. (Ravi Gummadi via ddas) + + MAPREDUCE-735. Fixes a problem in the KeyFieldHelper to do with + the end index for some inputs (Amar Kamat via ddas) + + HADOOP-6150. Users should be able to instantiate comparator using TFile + API. (Hong Tang via rangadi) + + MAPREDUCE-383. Fix a bug in Pipes combiner due to bytes count not + getting reset after the spill. (Christian Kunz via sharad) + + MAPREDUCE-40. Keep memory management backwards compatible for job + configuration parameters and limits. (Rahul Kumar Singh via yhemanth) + + MAPREDUCE-796. Fixes a ClassCastException in an exception log in + MultiThreadedMapRunner. (Amar Kamat via ddas) + + HDFS-525. The SimpleDateFormat object in ListPathsServlet is not thread + safe. (Suresh Srinivas via szetszwo) + + MAPREDUCE-838. Fixes a problem in the way commit of task outputs + happens. The bug was that even if commit failed, the task would + be declared as successful. (Amareshwari Sriramadasu via ddas) + + MAPREDUCE-805. Fixes some deadlocks in the JobTracker due to the fact + the JobTracker lock hierarchy wasn't maintained in some JobInProgress + method calls. (Amar Kamat via ddas) + + HDFS-167. Fix a bug in DFSClient that caused infinite retries on write. + (Bill Zeller via szetszwo) + + HDFS-527. Remove unnecessary DFSClient constructors. (szetszwo) + + MAPREDUCE-832. Reduce number of warning messages printed when + deprecated memory variables are used. (Rahul Kumar Singh via yhemanth) + + MAPREDUCE-745. Fixes a testcase problem to do with generation of JobTracker + IDs. (Amar Kamat via ddas) + + MAPREDUCE-834. Enables memory management on tasktrackers when old + memory management parameters are used in configuration. + (Sreekanth Ramakrishnan via yhemanth) + + MAPREDUCE-818. Fixes Counters#getGroup API. (Amareshwari Sriramadasu + via sharad) + + MAPREDUCE-807. Handles the AccessControlException during the deletion of + mapred.system.dir in the JobTracker. The JobTracker will bail out if it + encounters such an exception. (Amar Kamat via ddas) + + HADOOP-6213. Remove commons dependency on commons-cli2. (Amar Kamat via + sharad) + + MAPREDUCE-430. Fix a bug related to task getting stuck in case of + OOM error. (Amar Kamat via ddas) + +Release 0.20.0 - 2009-04-15 + + INCOMPATIBLE CHANGES + + HADOOP-4210. Fix findbugs warnings for equals implementations of mapred ID + classes. Removed public, static ID::read and ID::forName; made ID an + abstract class. (Suresh Srinivas via cdouglas) + + HADOOP-4253. Fix various warnings generated by findbugs. + Following deprecated methods in RawLocalFileSystem are removed: + public String getName() + public void lock(Path p, boolean shared) + public void release(Path p) + (Suresh Srinivas via johan) + + HADOOP-4618. Move http server from FSNamesystem into NameNode. + FSNamesystem.getNameNodeInfoPort() is removed. + FSNamesystem.getDFSNameNodeMachine() and FSNamesystem.getDFSNameNodePort() + replaced by FSNamesystem.getDFSNameNodeAddress(). + NameNode(bindAddress, conf) is removed. + (shv) + + HADOOP-4567. GetFileBlockLocations returns the NetworkTopology + information of the machines where the blocks reside. (dhruba) + + HADOOP-4435. The JobTracker WebUI displays the amount of heap memory + in use. (dhruba) + + HADOOP-4628. Move Hive into a standalone subproject. (omalley) + + HADOOP-4188. Removes task's dependency on concrete filesystems. + (Sharad Agarwal via ddas) + + HADOOP-1650. Upgrade to Jetty 6. (cdouglas) + + HADOOP-3986. Remove static Configuration from JobClient. (Amareshwari + Sriramadasu via cdouglas) + JobClient::setCommandLineConfig is removed + JobClient::getCommandLineConfig is removed + JobShell, TestJobShell classes are removed + + HADOOP-4422. S3 file systems should not create bucket. + (David Phillips via tomwhite) + + HADOOP-4035. Support memory based scheduling in capacity scheduler. + (Vinod Kumar Vavilapalli via yhemanth) + + HADOOP-3497. Fix bug in overly restrictive file globbing with a + PathFilter. (tomwhite) + + HADOOP-4445. Replace running task counts with running task + percentage in capacity scheduler UI. (Sreekanth Ramakrishnan via + yhemanth) + + HADOOP-4631. Splits the configuration into three parts - one for core, + one for mapred and the last one for HDFS. (Sharad Agarwal via cdouglas) + + HADOOP-3344. Fix libhdfs build to use autoconf and build the same + architecture (32 vs 64 bit) of the JVM running Ant. The libraries for + pipes, utils, and libhdfs are now all in c++//lib. + (Giridharan Kesavan via nigel) + + HADOOP-4874. Remove LZO codec because of licensing issues. (omalley) + + HADOOP-4970. The full path name of a file is preserved inside Trash. + (Prasad Chakka via dhruba) + + HADOOP-4103. NameNode keeps a count of missing blocks. It warns on + WebUI if there are such blocks. '-report' and '-metaSave' have extra + info to track such blocks. (Raghu Angadi) + + HADOOP-4783. Change permissions on history files on the jobtracker + to be only group readable instead of world readable. + (Amareshwari Sriramadasu via yhemanth) + + HADOOP-5531. Removed Chukwa from Hadoop 0.20.0. (nigel) + + NEW FEATURES + + HADOOP-4575. Add a proxy service for relaying HsftpFileSystem requests. + Includes client authentication via user certificates and config-based + access control. (Kan Zhang via cdouglas) + + HADOOP-4661. Add DistCh, a new tool for distributed ch{mod,own,grp}. + (szetszwo) + + HADOOP-4709. Add several new features and bug fixes to Chukwa. + Added Hadoop Infrastructure Care Center (UI for visualize data collected + by Chukwa) + Added FileAdaptor for streaming small file in one chunk + Added compression to archive and demux output + Added unit tests and validation for agent, collector, and demux map + reduce job + Added database loader for loading demux output (sequence file) to jdbc + connected database + Added algorithm to distribute collector load more evenly + (Jerome Boulon, Eric Yang, Andy Konwinski, Ariel Rabkin via cdouglas) + + HADOOP-4179. Add Vaidya tool to analyze map/reduce job logs for performanc + problems. (Suhas Gogate via omalley) + + HADOOP-4029. Add NameNode storage information to the dfshealth page and + move DataNode information to a separated page. (Boris Shkolnik via + szetszwo) + + HADOOP-4348. Add service-level authorization for Hadoop. (acmurthy) + + HADOOP-4826. Introduce admin command saveNamespace. (shv) + + HADOOP-3063 BloomMapFile - fail-fast version of MapFile for sparsely + populated key space (Andrzej Bialecki via stack) + + HADOOP-1230. Add new map/reduce API and deprecate the old one. Generally, + the old code should work without problem. The new api is in + org.apache.hadoop.mapreduce and the old classes in org.apache.hadoop.mapred + are deprecated. Differences in the new API: + 1. All of the methods take Context objects that allow us to add new + methods without breaking compatability. + 2. Mapper and Reducer now have a "run" method that is called once and + contains the control loop for the task, which lets applications + replace it. + 3. Mapper and Reducer by default are Identity Mapper and Reducer. + 4. The FileOutputFormats use part-r-00000 for the output of reduce 0 and + part-m-00000 for the output of map 0. + 5. The reduce grouping comparator now uses the raw compare instead of + object compare. + 6. The number of maps in FileInputFormat is controlled by min and max + split size rather than min size and the desired number of maps. + (omalley) + + HADOOP-3305. Use Ivy to manage dependencies. (Giridharan Kesavan + and Steve Loughran via cutting) + + IMPROVEMENTS + + HADOOP-4565. Added CombineFileInputFormat to use data locality information + to create splits. (dhruba via zshao) + + HADOOP-4749. Added a new counter REDUCE_INPUT_BYTES. (Yongqiang He via + zshao) + + HADOOP-4234. Fix KFS "glue" layer to allow applications to interface + with multiple KFS metaservers. (Sriram Rao via lohit) + + HADOOP-4245. Update to latest version of KFS "glue" library jar. + (Sriram Rao via lohit) + + HADOOP-4244. Change test-patch.sh to check Eclipse classpath no matter + it is run by Hudson or not. (szetszwo) + + HADOOP-3180. Add name of missing class to WritableName.getClass + IOException. (Pete Wyckoff via omalley) + + HADOOP-4178. Make the capacity scheduler's default values configurable. + (Sreekanth Ramakrishnan via omalley) + + HADOOP-4262. Generate better error message when client exception has null + message. (stevel via omalley) + + HADOOP-4226. Refactor and document LineReader to make it more readily + understandable. (Yuri Pradkin via cdouglas) + + HADOOP-4238. When listing jobs, if scheduling information isn't available + print NA instead of empty output. (Sreekanth Ramakrishnan via johan) + + HADOOP-4284. Support filters that apply to all requests, or global filters, + to HttpServer. (Kan Zhang via cdouglas) + + HADOOP-4276. Improve the hashing functions and deserialization of the + mapred ID classes. (omalley) + + HADOOP-4485. Add a compile-native ant task, as a shorthand. (enis) + + HADOOP-4454. Allow # comments in slaves file. (Rama Ramasamy via omalley) + + HADOOP-3461. Remove hdfs.StringBytesWritable. (szetszwo) + + HADOOP-4437. Use Halton sequence instead of java.util.Random in + PiEstimator. (szetszwo) + + HADOOP-4572. Change INode and its sub-classes to package private. + (szetszwo) + + HADOOP-4187. Does a runtime lookup for JobConf/JobConfigurable, and if + found, invokes the appropriate configure method. (Sharad Agarwal via ddas) + + HADOOP-4453. Improve ssl configuration and handling in HsftpFileSystem, + particularly when used with DistCp. (Kan Zhang via cdouglas) + + HADOOP-4583. Several code optimizations in HDFS. (Suresh Srinivas via + szetszwo) + + HADOOP-3923. Remove org.apache.hadoop.mapred.StatusHttpServer. (szetszwo) + + HADOOP-4622. Explicitly specify interpretor for non-native + pipes binaries. (Fredrik Hedberg via johan) + + HADOOP-4505. Add a unit test to test faulty setup task and cleanup + task killing the job. (Amareshwari Sriramadasu via johan) + + HADOOP-4608. Don't print a stack trace when the example driver gets an + unknown program to run. (Edward Yoon via omalley) + + HADOOP-4645. Package HdfsProxy contrib project without the extra level + of directories. (Kan Zhang via omalley) + + HADOOP-4126. Allow access to HDFS web UI on EC2 (tomwhite via omalley) + + HADOOP-4612. Removes RunJar's dependency on JobClient. + (Sharad Agarwal via ddas) + + HADOOP-4185. Adds setVerifyChecksum() method to FileSystem. + (Sharad Agarwal via ddas) + + HADOOP-4523. Prevent too many tasks scheduled on a node from bringing + it down by monitoring for cumulative memory usage across tasks. + (Vinod Kumar Vavilapalli via yhemanth) + + HADOOP-4640. Adds an input format that can split lzo compressed + text files. (johan) + + HADOOP-4666. Launch reduces only after a few maps have run in the + Fair Scheduler. (Matei Zaharia via johan) + + HADOOP-4339. Remove redundant calls from FileSystem/FsShell when + generating/processing ContentSummary. (David Phillips via cdouglas) + + HADOOP-2774. Add counters tracking records spilled to disk in MapTask and + ReduceTask. (Ravi Gummadi via cdouglas) + + HADOOP-4513. Initialize jobs asynchronously in the capacity scheduler. + (Sreekanth Ramakrishnan via yhemanth) + + HADOOP-4649. Improve abstraction for spill indices. (cdouglas) + + HADOOP-3770. Add gridmix2, an iteration on the gridmix benchmark. (Runping + Qi via cdouglas) + + HADOOP-4708. Add support for dfsadmin commands in TestCLI. (Boris Shkolnik + via cdouglas) + + HADOOP-4758. Add a splitter for metrics contexts to support more than one + type of collector. (cdouglas) + + HADOOP-4722. Add tests for dfsadmin quota error messages. (Boris Shkolnik + via cdouglas) + + HADOOP-4690. fuse-dfs - create source file/function + utils + config + + main source files. (pete wyckoff via mahadev) + + HADOOP-3750. Fix and enforce module dependencies. (Sharad Agarwal via + tomwhite) + + HADOOP-4747. Speed up FsShell::ls by removing redundant calls to the + filesystem. (David Phillips via cdouglas) + + HADOOP-4305. Improves the blacklisting strategy, whereby, tasktrackers + that are blacklisted are not given tasks to run from other jobs, subject + to the following conditions (all must be met): + 1) The TaskTracker has been blacklisted by at least 4 jobs (configurable) + 2) The TaskTracker has been blacklisted 50% more number of times than + the average (configurable) + 3) The cluster has less than 50% trackers blacklisted + Once in 24 hours, a TaskTracker blacklisted for all jobs is given a chance. + Restarting the TaskTracker moves it out of the blacklist. + (Amareshwari Sriramadasu via ddas) + + HADOOP-4688. Modify the MiniMRDFSSort unit test to spill multiple times, + exercising the map-side merge code. (cdouglas) + + HADOOP-4737. Adds the KILLED notification when jobs get killed. + (Amareshwari Sriramadasu via ddas) + + HADOOP-4728. Add a test exercising different namenode configurations. + (Boris Shkolnik via cdouglas) + + HADOOP-4807. Adds JobClient commands to get the active/blacklisted tracker + names. Also adds commands to display running/completed task attempt IDs. + (ddas) + + HADOOP-4699. Remove checksum validation from map output servlet. (cdouglas) + + HADOOP-4838. Added a registry to automate metrics and mbeans management. + (Sanjay Radia via acmurthy) + + HADOOP-3136. Fixed the default scheduler to assign multiple tasks to each + tasktracker per heartbeat, when feasible. To ensure locality isn't hurt + too badly, the scheudler will not assign more than one off-switch task per + heartbeat. The heartbeat interval is also halved since the task-tracker is + fixed to no longer send out heartbeats on each task completion. A + slow-start for scheduling reduces is introduced to ensure that reduces + aren't started till sufficient number of maps are done, else reduces of + jobs whose maps aren't scheduled might swamp the cluster. + Configuration changes to mapred-default.xml: + add mapred.reduce.slowstart.completed.maps + (acmurthy) + + HADOOP-4545. Add example and test case of secondary sort for the reduce. + (omalley) + + HADOOP-4753. Refactor gridmix2 to reduce code duplication. (cdouglas) + + HADOOP-4909. Fix Javadoc and make some of the API more consistent in their + use of the JobContext instead of Configuration. (omalley) + + HADOOP-4830. Add end-to-end test cases for testing queue capacities. + (Vinod Kumar Vavilapalli via yhemanth) + + HADOOP-4980. Improve code layout of capacity scheduler to make it + easier to fix some blocker bugs. (Vivek Ratan via yhemanth) + + HADOOP-4916. Make user/location of Chukwa installation configurable by an + external properties file. (Eric Yang via cdouglas) + + HADOOP-4950. Make the CompressorStream, DecompressorStream, + BlockCompressorStream, and BlockDecompressorStream public to facilitate + non-Hadoop codecs. (omalley) + + HADOOP-4843. Collect job history and configuration in Chukwa. (Eric Yang + via cdouglas) + + HADOOP-5030. Build Chukwa RPM to install into configured directory. (Eric + Yang via cdouglas) + + HADOOP-4828. Updates documents to do with configuration (HADOOP-4631). + (Sharad Agarwal via ddas) + + HADOOP-4939. Adds a test that would inject random failures for tasks in + large jobs and would also inject TaskTracker failures. (ddas) + + HADOOP-4920. Stop storing Forrest output in Subversion. (cutting) + + HADOOP-4944. A configuration file can include other configuration + files. (Rama Ramasamy via dhruba) + + HADOOP-4804. Provide Forrest documentation for the Fair Scheduler. + (Sreekanth Ramakrishnan via yhemanth) + + HADOOP-5248. A testcase that checks for the existence of job directory + after the job completes. Fails if it exists. (ddas) + + HADOOP-4664. Introduces multiple job initialization threads, where the + number of threads are configurable via mapred.jobinit.threads. + (Matei Zaharia and Jothi Padmanabhan via ddas) + + HADOOP-4191. Adds a testcase for JobHistory. (Ravi Gummadi via ddas) + + HADOOP-5466. Change documenation CSS style for headers and code. (Corinne + Chandel via szetszwo) + + HADOOP-5275. Add ivy directory and files to built tar. + (Giridharan Kesavan via nigel) + + HADOOP-5468. Add sub-menus to forrest documentation and make some minor + edits. (Corinne Chandel via szetszwo) + + HADOOP-5437. Fix TestMiniMRDFSSort to properly test jvm-reuse. (omalley) + + HADOOP-5521. Removes dependency of TestJobInProgress on RESTART_COUNT + JobHistory tag. (Ravi Gummadi via ddas) + + HADOOP-5714. Add a metric for NameNode getFileInfo operation. (Jakob Homan + via szetszwo) + + OPTIMIZATIONS + + HADOOP-3293. Fixes FileInputFormat to do provide locations for splits + based on the rack/host that has the most number of bytes. + (Jothi Padmanabhan via ddas) + + HADOOP-4683. Fixes Reduce shuffle scheduler to invoke + getMapCompletionEvents in a separate thread. (Jothi Padmanabhan + via ddas) + + BUG FIXES + + HADOOP-5379. CBZip2InputStream to throw IOException on data crc error. + (Rodrigo Schmidt via zshao) + + HADOOP-5326. Fixes CBZip2OutputStream data corruption problem. + (Rodrigo Schmidt via zshao) + + HADOOP-4204. Fix findbugs warnings related to unused variables, naive + Number subclass instantiation, Map iteration, and badly scoped inner + classes. (Suresh Srinivas via cdouglas) + + HADOOP-4207. Update derby jar file to release 10.4.2 release. + (Prasad Chakka via dhruba) + + HADOOP-4325. SocketInputStream.read() should return -1 in case EOF. + (Raghu Angadi) + + HADOOP-4408. FsAction functions need not create new objects. (cdouglas) + + HADOOP-4440. TestJobInProgressListener tests for jobs killed in queued + state (Amar Kamat via ddas) + + HADOOP-4346. Implement blocking connect so that Hadoop is not affected + by selector problem with JDK default implementation. (Raghu Angadi) + + HADOOP-4388. If there are invalid blocks in the transfer list, Datanode + should handle them and keep transferring the remaining blocks. (Suresh + Srinivas via szetszwo) + + HADOOP-4587. Fix a typo in Mapper javadoc. (Koji Noguchi via szetszwo) + + HADOOP-4530. In fsck, HttpServletResponse sendError fails with + IllegalStateException. (hairong) + + HADOOP-4377. Fix a race condition in directory creation in + NativeS3FileSystem. (David Phillips via cdouglas) + + HADOOP-4621. Fix javadoc warnings caused by duplicate jars. (Kan Zhang via + cdouglas) + + HADOOP-4566. Deploy new hive code to support more types. + (Zheng Shao via dhruba) + + HADOOP-4571. Add chukwa conf files to svn:ignore list. (Eric Yang via + szetszwo) + + HADOOP-4589. Correct PiEstimator output messages and improve the code + readability. (szetszwo) + + HADOOP-4650. Correct a mismatch between the default value of + local.cache.size in the config and the source. (Jeff Hammerbacher via + cdouglas) + + HADOOP-4606. Fix cygpath error if the log directory does not exist. + (szetszwo via omalley) + + HADOOP-4141. Fix bug in ScriptBasedMapping causing potential infinite + loop on misconfigured hadoop-site. (Aaron Kimball via tomwhite) + + HADOOP-4691. Correct a link in the javadoc of IndexedSortable. (szetszwo) + + HADOOP-4598. '-setrep' command skips under-replicated blocks. (hairong) + + HADOOP-4429. Set defaults for user, group in UnixUserGroupInformation so + login fails more predictably when misconfigured. (Alex Loddengaard via + cdouglas) + + HADOOP-4676. Fix broken URL in blacklisted tasktrackers page. (Amareshwari + Sriramadasu via cdouglas) + + HADOOP-3422 Ganglia counter metrics are all reported with the metric + name "value", so the counter values can not be seen. (Jason Attributor + and Brian Bockelman via stack) + + HADOOP-4704. Fix javadoc typos "the the". (szetszwo) + + HADOOP-4677. Fix semantics of FileSystem::getBlockLocations to return + meaningful values. (Hong Tang via cdouglas) + + HADOOP-4669. Use correct operator when evaluating whether access time is + enabled (Dhruba Borthakur via cdouglas) + + HADOOP-4732. Pass connection and read timeouts in the correct order when + setting up fetch in reduce. (Amareshwari Sriramadasu via cdouglas) + + HADOOP-4558. Fix capacity reclamation in capacity scheduler. + (Amar Kamat via yhemanth) + + HADOOP-4770. Fix rungridmix_2 script to work with RunJar. (cdouglas) + + HADOOP-4738. When using git, the saveVersion script will use only the + commit hash for the version and not the message, which requires escaping. + (cdouglas) + + HADOOP-4576. Show pending job count instead of task count in the UI per + queue in capacity scheduler. (Sreekanth Ramakrishnan via yhemanth) + + HADOOP-4623. Maintain running tasks even if speculative execution is off. + (Amar Kamat via yhemanth) + + HADOOP-4786. Fix broken compilation error in + TestTrackerBlacklistAcrossJobs. (yhemanth) + + HADOOP-4785. Fixes theJobTracker heartbeat to not make two calls to + System.currentTimeMillis(). (Amareshwari Sriramadasu via ddas) + + HADOOP-4792. Add generated Chukwa configuration files to version control + ignore lists. (cdouglas) + + HADOOP-4796. Fix Chukwa test configuration, remove unused components. (Eric + Yang via cdouglas) + + HADOOP-4708. Add binaries missed in the initial checkin for Chukwa. (Eric + Yang via cdouglas) + + HADOOP-4805. Remove black list collector from Chukwa Agent HTTP Sender. + (Eric Yang via cdouglas) + + HADOOP-4837. Move HADOOP_CONF_DIR configuration to chukwa-env.sh (Jerome + Boulon via cdouglas) + + HADOOP-4825. Use ps instead of jps for querying process status in Chukwa. + (Eric Yang via cdouglas) + + HADOOP-4844. Fixed javadoc for + org.apache.hadoop.fs.permission.AccessControlException to document that + it's deprecated in favour of + org.apache.hadoop.security.AccessControlException. (acmurthy) + + HADOOP-4706. Close the underlying output stream in + IFileOutputStream::close. (Jothi Padmanabhan via cdouglas) + + HADOOP-4855. Fixed command-specific help messages for refreshServiceAcl in + DFSAdmin and MRAdmin. (acmurthy) + + HADOOP-4820. Remove unused method FSNamesystem::deleteInSafeMode. (Suresh + Srinivas via cdouglas) + + HADOOP-4698. Lower io.sort.mb to 10 in the tests and raise the junit memory + limit to 512m from 256m. (Nigel Daley via cdouglas) + + HADOOP-4860. Split TestFileTailingAdapters into three separate tests to + avoid contention. (Eric Yang via cdouglas) + + HADOOP-3921. Fixed clover (code coverage) target to work with JDK 6. + (tomwhite via nigel) + + HADOOP-4845. Modify the reduce input byte counter to record only the + compressed size and add a human-readable label. (Yongqiang He via cdouglas) + + HADOOP-4458. Add a test creating symlinks in the working directory. + (Amareshwari Sriramadasu via cdouglas) + + HADOOP-4879. Fix org.apache.hadoop.mapred.Counters to correctly define + Object.equals rather than depend on contentEquals api. (omalley via + acmurthy) + + HADOOP-4791. Fix rpm build process for Chukwa. (Eric Yang via cdouglas) + + HADOOP-4771. Correct initialization of the file count for directories + with quotas. (Ruyue Ma via shv) + + HADOOP-4878. Fix eclipse plugin classpath file to point to ivy's resolved + lib directory and added the same to test-patch.sh. (Giridharan Kesavan via + acmurthy) + + HADOOP-4774. Fix default values of some capacity scheduler configuration + items which would otherwise not work on a fresh checkout. + (Sreekanth Ramakrishnan via yhemanth) + + HADOOP-4876. Fix capacity scheduler reclamation by updating count of + pending tasks correctly. (Sreekanth Ramakrishnan via yhemanth) + + HADOOP-4849. Documentation for Service Level Authorization implemented in + HADOOP-4348. (acmurthy) + + HADOOP-4827. Replace Consolidator with Aggregator macros in Chukwa (Eric + Yang via cdouglas) + + HADOOP-4894. Correctly parse ps output in Chukwa jettyCollector.sh. (Ari + Rabkin via cdouglas) + + HADOOP-4892. Close fds out of Chukwa ExecPlugin. (Ari Rabkin via cdouglas) + + HADOOP-4889. Fix permissions in RPM packaging. (Eric Yang via cdouglas) + + HADOOP-4869. Fixes the TT-JT heartbeat to have an explicit flag for + restart apart from the initialContact flag that there was earlier. + (Amareshwari Sriramadasu via ddas) + + HADOOP-4716. Fixes ReduceTask.java to clear out the mapping between + hosts and MapOutputLocation upon a JT restart (Amar Kamat via ddas) + + HADOOP-4880. Removes an unnecessary testcase from TestJobTrackerRestart. + (Amar Kamat via ddas) + + HADOOP-4924. Fixes a race condition in TaskTracker re-init. (ddas) + + HADOOP-4854. Read reclaim capacity interval from capacity scheduler + configuration. (Sreekanth Ramakrishnan via yhemanth) + + HADOOP-4896. HDFS Fsck does not load HDFS configuration. (Raghu Angadi) + + HADOOP-4956. Creates TaskStatus for failed tasks with an empty Counters + object instead of null. (ddas) + + HADOOP-4979. Fix capacity scheduler to block cluster for failed high + RAM requirements across task types. (Vivek Ratan via yhemanth) + + HADOOP-4949. Fix native compilation. (Chris Douglas via acmurthy) + + HADOOP-4787. Fixes the testcase TestTrackerBlacklistAcrossJobs which was + earlier failing randomly. (Amareshwari Sriramadasu via ddas) + + HADOOP-4914. Add description fields to Chukwa init.d scripts (Eric Yang via + cdouglas) + + HADOOP-4884. Make tool tip date format match standard HICC format. (Eric + Yang via cdouglas) + + HADOOP-4925. Make Chukwa sender properties configurable. (Ari Rabkin via + cdouglas) + + HADOOP-4947. Make Chukwa command parsing more forgiving of whitespace. (Ari + Rabkin via cdouglas) + + HADOOP-5026. Make chukwa/bin scripts executable in repository. (Andy + Konwinski via cdouglas) + + HADOOP-4977. Fix a deadlock between the reclaimCapacity and assignTasks + in capacity scheduler. (Vivek Ratan via yhemanth) + + HADOOP-4988. Fix reclaim capacity to work even when there are queues with + no capacity. (Vivek Ratan via yhemanth) + + HADOOP-5065. Remove generic parameters from argument to + setIn/OutputFormatClass so that it works with SequenceIn/OutputFormat. + (cdouglas via omalley) + + HADOOP-4818. Pass user config to instrumentation API. (Eric Yang via + cdouglas) + + HADOOP-4993. Fix Chukwa agent configuration and startup to make it both + more modular and testable. (Ari Rabkin via cdouglas) + + HADOOP-5048. Fix capacity scheduler to correctly cleanup jobs that are + killed after initialization, but before running. + (Sreekanth Ramakrishnan via yhemanth) + + HADOOP-4671. Mark loop control variables shared between threads as + volatile. (cdouglas) + + HADOOP-5079. HashFunction inadvertently destroys some randomness + (Jonathan Ellis via stack) + + HADOOP-4999. A failure to write to FsEditsLog results in + IndexOutOfBounds exception. (Boris Shkolnik via rangadi) + + HADOOP-5139. Catch IllegalArgumentException during metrics registration + in RPC. (Hairong Kuang via szetszwo) + + HADOOP-5085. Copying a file to local with Crc throws an exception. + (hairong) + + HADOOP-4759. Removes temporary output directory for failed and + killed tasks by launching special CLEANUP tasks for the same. + (Amareshwari Sriramadasu via ddas) + + HADOOP-5211. Fix check for job completion in TestSetupAndCleanupFailure. + (enis) + + HADOOP-5254. The Configuration class should be able to work with XML + parsers that do not support xmlinclude. (Steve Loughran via dhruba) + + HADOOP-4692. Namenode in infinite loop for replicating/deleting corrupt + blocks. (hairong) + + HADOOP-5255. Fix use of Math.abs to avoid overflow. (Jonathan Ellis via + cdouglas) + + HADOOP-5269. Fixes a problem to do with tasktracker holding on to + FAILED_UNCLEAN or KILLED_UNCLEAN tasks forever. (Amareshwari Sriramadasu + via ddas) + + HADOOP-5214. Fixes a ConcurrentModificationException while the Fairshare + Scheduler accesses the tasktrackers stored by the JobTracker. + (Rahul Kumar Singh via yhemanth) + + HADOOP-5233. Addresses the three issues - Race condition in updating + status, NPE in TaskTracker task localization when the conf file is missing + (HADOOP-5234) and NPE in handling KillTaskAction of a cleanup task + (HADOOP-5235). (Amareshwari Sriramadasu via ddas) + + HADOOP-5247. Introduces a broadcast of KillJobAction to all trackers when + a job finishes. This fixes a bunch of problems to do with NPE when a + completed job is not in memory and a tasktracker comes to the jobtracker + with a status report of a task belonging to that job. (Amar Kamat via ddas) + + HADOOP-5282. Fixed job history logs for task attempts that are + failed by the JobTracker, say due to lost task trackers. (Amar + Kamat via yhemanth) + + HADOOP-4963. Fixes a logging to do with getting the location of + map output file. (Amareshwari Sriramadasu via ddas) + + HADOOP-5292. Fix NPE in KFS::getBlockLocations. (Sriram Rao via lohit) + + HADOOP-5241. Fixes a bug in disk-space resource estimation. Makes + the estimation formula linear where blowUp = + Total-Output/Total-Input. (Sharad Agarwal via ddas) + + HADOOP-5142. Fix MapWritable#putAll to store key/value classes. + (Do??acan G??ney via enis) + + HADOOP-4744. Workaround for jetty6 returning -1 when getLocalPort + is invoked on the connector. The workaround patch retries a few + times before failing. (Jothi Padmanabhan via yhemanth) + + HADOOP-5280. Adds a check to prevent a task state transition from + FAILED to any of UNASSIGNED, RUNNING, COMMIT_PENDING or + SUCCEEDED. (ddas) + + HADOOP-5272. Fixes a problem to do with detecting whether an + attempt is the first attempt of a Task. This affects JobTracker + restart. (Amar Kamat via ddas) + + HADOOP-5306. Fixes a problem to do with logging/parsing the http port of a + lost tracker. Affects JobTracker restart. (Amar Kamat via ddas) + + HADOOP-5111. Fix Job::set* methods to work with generics. (cdouglas) + + HADOOP-5274. Fix gridmix2 dependency on wordcount example. (cdouglas) + + HADOOP-5145. Balancer sometimes runs out of memory after running + days or weeks. (hairong) + + HADOOP-5338. Fix jobtracker restart to clear task completion + events cached by tasktrackers forcing them to fetch all events + afresh, thus avoiding missed task completion events on the + tasktrackers. (Amar Kamat via yhemanth) + + HADOOP-4695. Change TestGlobalFilter so that it allows a web page to be + filtered more than once for a single access. (Kan Zhang via szetszwo) + + HADOOP-5298. Change TestServletFilter so that it allows a web page to be + filtered more than once for a single access. (szetszwo) + + HADOOP-5432. Disable ssl during unit tests in hdfsproxy, as it is unused + and causes failures. (cdouglas) + + HADOOP-5416. Correct the shell command "fs -test" forrest doc description. + (Ravi Phulari via szetszwo) + + HADOOP-5327. Fixed job tracker to remove files from system directory on + ACL check failures and also check ACLs on restart. + (Amar Kamat via yhemanth) + + HADOOP-5395. Change the exception message when a job is submitted to an + invalid queue. (Rahul Kumar Singh via yhemanth) + + HADOOP-5276. Fixes a problem to do with updating the start time of + a task when the tracker that ran the task is lost. (Amar Kamat via + ddas) + + HADOOP-5278. Fixes a problem to do with logging the finish time of + a task during recovery (after a JobTracker restart). (Amar Kamat + via ddas) + + HADOOP-5490. Fixes a synchronization problem in the + EagerTaskInitializationListener class. (Jothi Padmanabhan via + ddas) + + HADOOP-5493. The shuffle copier threads return the codecs back to + the pool when the shuffle completes. (Jothi Padmanabhan via ddas) + + HADOOP-5505. Fix JspHelper initialization in the context of + MiniDFSCluster. (Raghu Angadi) + + HADOOP-5414. Fixes IO exception while executing hadoop fs -touchz + fileName by making sure that lease renewal thread exits before dfs + client exits. (hairong) + + HADOOP-5103. FileInputFormat now reuses the clusterMap network + topology object and that brings down the log messages in the + JobClient to do with NetworkTopology.add significantly. (Jothi + Padmanabhan via ddas) + + HADOOP-5483. Fixes a problem in the Directory Cleanup Thread due to which + TestMiniMRWithDFS sometimes used to fail. (ddas) + + HADOOP-5281. Prevent sharing incompatible ZlibCompressor instances between + GzipCodec and DefaultCodec. (cdouglas) + + HADOOP-5463. Balancer throws "Not a host:port pair" unless port is + specified in fs.default.name. (Stuart White via hairong) + + HADOOP-5514. Fix JobTracker metrics and add metrics for wating, failed + tasks. (cdouglas) + + HADOOP-5516. Fix NullPointerException in TaskMemoryManagerThread + that comes when monitored processes disappear when the thread is + running. (Vinod Kumar Vavilapalli via yhemanth) + + HADOOP-5382. Support combiners in the new context object API. (omalley) + + HADOOP-5471. Fixes a problem to do with updating the log.index file in the + case where a cleanup task is run. (Amareshwari Sriramadasu via ddas) + + HADOOP-5534. Fixed a deadlock in Fair scheduler's servlet. + (Rahul Kumar Singh via yhemanth) + + HADOOP-5328. Fixes a problem in the renaming of job history files during + job recovery. Amar Kamat via ddas) + + HADOOP-5417. Don't ignore InterruptedExceptions that happen when calling + into rpc. (omalley) + + HADOOP-5320. Add a close() in TestMapReduceLocal. (Jothi Padmanabhan + via szetszwo) + + HADOOP-5520. Fix a typo in disk quota help message. (Ravi Phulari + via szetszwo) + + HADOOP-5519. Remove claims from mapred-default.xml that prime numbers + of tasks are helpful. (Owen O'Malley via szetszwo) + + HADOOP-5484. TestRecoveryManager fails wtih FileAlreadyExistsException. + (Amar Kamat via hairong) + + HADOOP-5564. Limit the JVM heap size in the java command for initializing + JAVA_PLATFORM. (Suresh Srinivas via szetszwo) + + HADOOP-5565. Add API for failing/finalized jobs to the JT metrics + instrumentation. (Jerome Boulon via cdouglas) + + HADOOP-5390. Remove duplicate jars from tarball, src from binary tarball + added by hdfsproxy. (Zhiyong Zhang via cdouglas) + + HADOOP-5066. Building binary tarball should not build docs/javadocs, copy + src, or run jdiff. (Giridharan Kesavan via cdouglas) + + HADOOP-5459. Fix undetected CRC errors where intermediate output is closed + before it has been completely consumed. (cdouglas) + + HADOOP-5571. Remove widening primitive conversion in TupleWritable mask + manipulation. (Jingkei Ly via cdouglas) + + HADOOP-5588. Remove an unnecessary call to listStatus(..) in + FileSystem.globStatusInternal(..). (Hairong Kuang via szetszwo) + + HADOOP-5473. Solves a race condition in killing a task - the state is KILLED + if there is a user request pending to kill the task and the TT reported + the state as SUCCESS. (Amareshwari Sriramadasu via ddas) + + HADOOP-5576. Fix LocalRunner to work with the new context object API in + mapreduce. (Tom White via omalley) + + HADOOP-4374. Installs a shutdown hook in the Task JVM so that log.index is + updated before the JVM exits. Also makes the update to log.index atomic. + (Ravi Gummadi via ddas) + + HADOOP-5577. Add a verbose flag to mapreduce.Job.waitForCompletion to get + the running job's information printed to the user's stdout as it runs. + (omalley) + + HADOOP-5607. Fix NPE in TestCapacityScheduler. (cdouglas) + + HADOOP-5605. All the replicas incorrectly got marked as corrupt. (hairong) + + HADOOP-5337. JobTracker, upon restart, now waits for the TaskTrackers to + join back before scheduling new tasks. This fixes race conditions associated + with greedy scheduling as was the case earlier. (Amar Kamat via ddas) + + HADOOP-5227. Fix distcp so -update and -delete can be meaningfully + combined. (Tsz Wo (Nicholas), SZE via cdouglas) + + HADOOP-5305. Increase number of files and print debug messages in + TestCopyFiles. (szetszwo) + + HADOOP-5548. Add synchronization for JobTracker methods in RecoveryManager. + (Amareshwari Sriramadasu via sharad) + + HADOOP-3810. NameNode seems unstable on a cluster with little space left. + (hairong) + + HADOOP-5068. Fix NPE in TestCapacityScheduler. (Vinod Kumar Vavilapalli + via szetszwo) + + HADOOP-5585. Clear FileSystem statistics between tasks when jvm-reuse + is enabled. (omalley) + + HADOOP-5394. JobTracker might schedule 2 attempts of the same task + with the same attempt id across restarts. (Amar Kamat via sharad) + + HADOOP-5645. After HADOOP-4920 we need a place to checkin + releasenotes.html. (nigel) + +Release 0.19.2 - Unreleased + + BUG FIXES + + HADOOP-5154. Fixes a deadlock in the fairshare scheduler. + (Matei Zaharia via yhemanth) + + HADOOP-5146. Fixes a race condition that causes LocalDirAllocator to miss + files. (Devaraj Das via yhemanth) + + HADOOP-4638. Fixes job recovery to not crash the job tracker for problems + with a single job file. (Amar Kamat via yhemanth) + + HADOOP-5384. Fix a problem that DataNodeCluster creates blocks with + generationStamp == 1. (szetszwo) + + HADOOP-5376. Fixes the code handling lost tasktrackers to set the task state + to KILLED_UNCLEAN only for relevant type of tasks. + (Amareshwari Sriramadasu via yhemanth) + + HADOOP-5285. Fixes the issues - (1) obtainTaskCleanupTask checks whether job is + inited before trying to lock the JobInProgress (2) Moves the CleanupQueue class + outside the TaskTracker and makes it a generic class that is used by the + JobTracker also for deleting the paths on the job's output fs. (3) Moves the + references to completedJobStore outside the block where the JobTracker is locked. + (ddas) + + HADOOP-5392. Fixes a problem to do with JT crashing during recovery when + the job files are garbled. (Amar Kamat vi ddas) + + HADOOP-5332. Appending to files is not allowed (by default) unless + dfs.support.append is set to true. (dhruba) + + HADOOP-5333. libhdfs supports appending to files. (dhruba) + + HADOOP-3998. Fix dfsclient exception when JVM is shutdown. (dhruba) + + HADOOP-5440. Fixes a problem to do with removing a taskId from the list + of taskIds that the TaskTracker's TaskMemoryManager manages. + (Amareshwari Sriramadasu via ddas) + + HADOOP-5446. Restore TaskTracker metrics. (cdouglas) + + HADOOP-5449. Fixes the history cleaner thread. + (Amareshwari Sriramadasu via ddas) + + HADOOP-5479. NameNode should not send empty block replication request to + DataNode. (hairong) + + HADOOP-5259. Job with output hdfs:/user//outputpath (no + authority) fails with Wrong FS. (Doug Cutting via hairong) + + HADOOP-5522. Documents the setup/cleanup tasks in the mapred tutorial. + (Amareshwari Sriramadasu via ddas) + + HADOOP-5549. ReplicationMonitor should schedule both replication and + deletion work in one iteration. (hairong) + + HADOOP-5554. DataNodeCluster and CreateEditsLog should create blocks with + the same generation stamp value. (hairong via szetszwo) + + HADOOP-5231. Clones the TaskStatus before passing it to the JobInProgress. + (Amareshwari Sriramadasu via ddas) + + HADOOP-4719. Fix documentation of 'ls' format for FsShell. (Ravi Phulari + via cdouglas) + + HADOOP-5374. Fixes a NPE problem in getTasksToSave method. + (Amareshwari Sriramadasu via ddas) + + HADOOP-4780. Cache the size of directories in DistributedCache, avoiding + long delays in recalculating it. (He Yongqiang via cdouglas) + + HADOOP-5551. Prevent directory destruction on file create. + (Brian Bockelman via shv) + + HADOOP-5671. Fix FNF exceptions when copying from old versions of + HftpFileSystem. (Tsz Wo (Nicholas), SZE via cdouglas) + + HADOOP-5579. Set errno correctly in libhdfs for permission, quota, and FNF + conditions. (Brian Bockelman via cdouglas) + + HADOOP-5816. Fixes a problem in the KeyFieldBasedComparator to do with + ArrayIndexOutOfBounds exception. (He Yongqiang via ddas) + + HADOOP-5951. Add Apache license header to StorageInfo.java. (Suresh + Srinivas via szetszwo) + +Release 0.19.1 - 2009-02-23 + + IMPROVEMENTS + + HADOOP-4739. Fix spelling and grammar, improve phrasing of some sections in + mapred tutorial. (Vivek Ratan via cdouglas) + + HADOOP-3894. DFSClient logging improvements. (Steve Loughran via shv) + + HADOOP-5126. Remove empty file BlocksWithLocations.java (shv) + + HADOOP-5127. Remove public methods in FSDirectory. (Jakob Homan via shv) + + BUG FIXES + + HADOOP-4697. Fix getBlockLocations in KosmosFileSystem to handle multiple + blocks correctly. (Sriram Rao via cdouglas) + + HADOOP-4420. Add null checks for job, caused by invalid job IDs. + (Aaron Kimball via tomwhite) + + HADOOP-4632. Fix TestJobHistoryVersion to use test.build.dir instead of the + current workding directory for scratch space. (Amar Kamat via cdouglas) + + HADOOP-4508. Fix FSDataOutputStream.getPos() for append. (dhruba via + szetszwo) + + HADOOP-4727. Fix a group checking bug in fill_stat_structure(...) in + fuse-dfs. (Brian Bockelman via szetszwo) + + HADOOP-4836. Correct typos in mapred related documentation. (Jord? Polo + via szetszwo) + + HADOOP-4821. Usage description in the Quotas guide documentations are + incorrect. (Boris Shkolnik via hairong) + + HADOOP-4847. Moves the loading of OutputCommitter to the Task. + (Amareshwari Sriramadasu via ddas) + + HADOOP-4966. Marks completed setup tasks for removal. + (Amareshwari Sriramadasu via ddas) + + HADOOP-4982. TestFsck should run in Eclipse. (shv) + + HADOOP-5008. TestReplication#testPendingReplicationRetry leaves an opened + fd unclosed. (hairong) + + HADOOP-4906. Fix TaskTracker OOM by keeping a shallow copy of JobConf in + TaskTracker.TaskInProgress. (Sharad Agarwal via acmurthy) + + HADOOP-4918. Fix bzip2 compression to work with Sequence Files. + (Zheng Shao via dhruba). + + HADOOP-4965. TestFileAppend3 should close FileSystem. (shv) + + HADOOP-4967. Fixes a race condition in the JvmManager to do with killing + tasks. (ddas) + + HADOOP-5009. DataNode#shutdown sometimes leaves data block scanner + verification log unclosed. (hairong) + + HADOOP-5086. Use the appropriate FileSystem for trash URIs. (cdouglas) + + HADOOP-4955. Make DBOutputFormat us column names from setOutput(). + (Kevin Peterson via enis) + + HADOOP-4862. Minor : HADOOP-3678 did not remove all the cases of + spurious IOExceptions logged by DataNode. (Raghu Angadi) + + HADOOP-5034. NameNode should send both replication and deletion requests + to DataNode in one reply to a heartbeat. (hairong) + + HADOOP-5156. TestHeartbeatHandling uses MiiDFSCluster.getNamesystem() + which does not exit in branch 0.19 and 0.20. (hairong) + + HADOOP-5161. Accepted sockets do not get placed in + DataXceiverServer#childSockets. (hairong) + + HADOOP-5193. Correct calculation of edits modification time. (shv) + + HADOOP-4494. Allow libhdfs to append to files. + (Pete Wyckoff via dhruba) + + HADOOP-5166. Fix JobTracker restart to work when ACLs are configured + for the JobTracker. (Amar Kamat via yhemanth). + + HADOOP-5067. Fixes TaskInProgress.java to keep track of count of failed and + killed tasks correctly. (Amareshwari Sriramadasu via ddas) + + HADOOP-4760. HDFS streams should not throw exceptions when closed twice. + (enis) + +Release 0.19.0 - 2008-11-18 + + INCOMPATIBLE CHANGES + + HADOOP-3595. Remove deprecated methods for mapred.combine.once + functionality, which was necessary to providing backwards + compatible combiner semantics for 0.18. (cdouglas via omalley) + + HADOOP-3667. Remove the following deprecated methods from JobConf: + addInputPath(Path) + getInputPaths() + getMapOutputCompressionType() + getOutputPath() + getSystemDir() + setInputPath(Path) + setMapOutputCompressionType(CompressionType style) + setOutputPath(Path) + (Amareshwari Sriramadasu via omalley) + + HADOOP-3652. Remove deprecated class OutputFormatBase. + (Amareshwari Sriramadasu via cdouglas) + + HADOOP-2885. Break the hadoop.dfs package into separate packages under + hadoop.hdfs that reflect whether they are client, server, protocol, + etc. DistributedFileSystem and DFSClient have moved and are now + considered package private. (Sanjay Radia via omalley) + + HADOOP-2325. Require Java 6. (cutting) + + HADOOP-372. Add support for multiple input paths with a different + InputFormat and Mapper for each path. (Chris Smith via tomwhite) + + HADOOP-1700. Support appending to file in HDFS. (dhruba) + + HADOOP-3792. Make FsShell -test consistent with unix semantics, returning + zero for true and non-zero for false. (Ben Slusky via cdouglas) + + HADOOP-3664. Remove the deprecated method InputFormat.validateInput, + which is no longer needed. (tomwhite via omalley) + + HADOOP-3549. Give more meaningful errno's in libhdfs. In particular, + EACCES is returned for permission problems. (Ben Slusky via omalley) + + HADOOP-4036. ResourceStatus was added to TaskTrackerStatus by HADOOP-3759, + so increment the InterTrackerProtocol version. (Hemanth Yamijala via + omalley) + + HADOOP-3150. Moves task promotion to tasks. Defines a new interface for + committing output files. Moves job setup to jobclient, and moves jobcleanup + to a separate task. (Amareshwari Sriramadasu via ddas) + + HADOOP-3446. Keep map outputs in memory during the reduce. Remove + fs.inmemory.size.mb and replace with properties defining in memory map + output retention during the shuffle and reduce relative to maximum heap + usage. (cdouglas) + + HADOOP-3245. Adds the feature for supporting JobTracker restart. Running + jobs can be recovered from the history file. The history file format has + been modified to support recovery. The task attempt ID now has the + JobTracker start time to disinguish attempts of the same TIP across + restarts. (Amar Ramesh Kamat via ddas) + + HADOOP-4007. REMOVE DFSFileInfo - FileStatus is sufficient. + (Sanjay Radia via hairong) + + HADOOP-3722. Fixed Hadoop Streaming and Hadoop Pipes to use the Tool + interface and GenericOptionsParser. (Enis Soztutar via acmurthy) + + HADOOP-2816. Cluster summary at name node web reports the space + utilization as: + Configured Capacity: capacity of all the data directories - Reserved space + Present Capacity: Space available for dfs,i.e. remaining+used space + DFS Used%: DFS used space/Present Capacity + (Suresh Srinivas via hairong) + + HADOOP-3938. Disk space quotas for HDFS. This is similar to namespace + quotas in 0.18. (rangadi) + + HADOOP-4293. Make Configuration Writable and remove unreleased + WritableJobConf. Configuration.write is renamed to writeXml. (omalley) + + HADOOP-4281. Change dfsadmin to report available disk space in a format + consistent with the web interface as defined in HADOOP-2816. (Suresh + Srinivas via cdouglas) + + HADOOP-4430. Further change the cluster summary at name node web that was + changed in HADOOP-2816: + Non DFS Used - This indicates the disk space taken by non DFS file from + the Configured capacity + DFS Used % - DFS Used % of Configured Capacity + DFS Remaining % - Remaing % Configured Capacity available for DFS use + DFS command line report reflects the same change. Config parameter + dfs.datanode.du.pct is no longer used and is removed from the + hadoop-default.xml. (Suresh Srinivas via hairong) + + HADOOP-4116. Balancer should provide better resource management. (hairong) + + HADOOP-4599. BlocksMap and BlockInfo made package private. (shv) + + NEW FEATURES + + HADOOP-3341. Allow streaming jobs to specify the field separator for map + and reduce input and output. The new configuration values are: + stream.map.input.field.separator + stream.map.output.field.separator + stream.reduce.input.field.separator + stream.reduce.output.field.separator + All of them default to "\t". (Zheng Shao via omalley) + + HADOOP-3479. Defines the configuration file for the resource manager in + Hadoop. You can configure various parameters related to scheduling, such + as queues and queue properties here. The properties for a queue follow a + naming convention,such as, hadoop.rm.queue.queue-name.property-name. + (Hemanth Yamijala via ddas) + + HADOOP-3149. Adds a way in which map/reducetasks can create multiple + outputs. (Alejandro Abdelnur via ddas) + + HADOOP-3714. Add a new contrib, bash-tab-completion, which enables + bash tab completion for the bin/hadoop script. See the README file + in the contrib directory for the installation. (Chris Smith via enis) + + HADOOP-3730. Adds a new JobConf constructor that disables loading + default configurations. (Alejandro Abdelnur via ddas) + + HADOOP-3772. Add a new Hadoop Instrumentation api for the JobTracker and + the TaskTracker, refactor Hadoop Metrics as an implementation of the api. + (Ari Rabkin via acmurthy) + + HADOOP-2302. Provides a comparator for numerical sorting of key fields. + (ddas) + + HADOOP-153. Provides a way to skip bad records. (Sharad Agarwal via ddas) + + HADOOP-657. Free disk space should be modelled and used by the scheduler + to make scheduling decisions. (Ari Rabkin via omalley) + + HADOOP-3719. Initial checkin of Chukwa, which is a data collection and + analysis framework. (Jerome Boulon, Andy Konwinski, Ari Rabkin, + and Eric Yang) + + HADOOP-3873. Add -filelimit and -sizelimit options to distcp to cap the + number of files/bytes copied in a particular run to support incremental + updates and mirroring. (TszWo (Nicholas), SZE via cdouglas) + + HADOOP-3585. FailMon package for hardware failure monitoring and + analysis of anomalies. (Ioannis Koltsidas via dhruba) + + HADOOP-1480. Add counters to the C++ Pipes API. (acmurthy via omalley) + + HADOOP-3854. Add support for pluggable servlet filters in the HttpServers. + (Tsz Wo (Nicholas) Sze via omalley) + + HADOOP-3759. Provides ability to run memory intensive jobs without + affecting other running tasks on the nodes. (Hemanth Yamijala via ddas) + + HADOOP-3746. Add a fair share scheduler. (Matei Zaharia via omalley) + + HADOOP-3754. Add a thrift interface to access HDFS. (dhruba via omalley) + + HADOOP-3828. Provides a way to write skipped records to DFS. + (Sharad Agarwal via ddas) + + HADOOP-3948. Separate name-node edits and fsimage directories. + (Lohit Vijayarenu via shv) + + HADOOP-3939. Add an option to DistCp to delete files at the destination + not present at the source. (Tsz Wo (Nicholas) Sze via cdouglas) + + HADOOP-3601. Add a new contrib module for Hive, which is a sql-like + query processing tool that uses map/reduce. (Ashish Thusoo via omalley) + + HADOOP-3866. Added sort and multi-job updates in the JobTracker web ui. + (Craig Weisenfluh via omalley) + + HADOOP-3698. Add access control to control who is allowed to submit or + modify jobs in the JobTracker. (Hemanth Yamijala via omalley) + + HADOOP-1869. Support access times for HDFS files. (dhruba) + + HADOOP-3941. Extend FileSystem API to return file-checksums. + (szetszwo) + + HADOOP-3581. Prevents memory intensive user tasks from taking down + nodes. (Vinod K V via ddas) + + HADOOP-3970. Provides a way to recover counters written to JobHistory. + (Amar Kamat via ddas) + + HADOOP-3702. Adds ChainMapper and ChainReducer classes allow composing + chains of Maps and Reduces in a single Map/Reduce job, something like + MAP+ / REDUCE MAP*. (Alejandro Abdelnur via ddas) + + HADOOP-3445. Add capacity scheduler that provides guaranteed capacities to + queues as a percentage of the cluster. (Vivek Ratan via omalley) + + HADOOP-3992. Add a synthetic load generation facility to the test + directory. (hairong via szetszwo) + + HADOOP-3981. Implement a distributed file checksum algorithm in HDFS + and change DistCp to use file checksum for comparing src and dst files + (szetszwo) + + HADOOP-3829. Narrown down skipped records based on user acceptable value. + (Sharad Agarwal via ddas) + + HADOOP-3930. Add common interfaces for the pluggable schedulers and the + cli & gui clients. (Sreekanth Ramakrishnan via omalley) + + HADOOP-4176. Implement getFileChecksum(Path) in HftpFileSystem. (szetszwo) + + HADOOP-249. Reuse JVMs across Map-Reduce Tasks. + Configuration changes to hadoop-default.xml: + add mapred.job.reuse.jvm.num.tasks + (Devaraj Das via acmurthy) + + HADOOP-4070. Provide a mechanism in Hive for registering UDFs from the + query language. (tomwhite) + + HADOOP-2536. Implement a JDBC based database input and output formats to + allow Map-Reduce applications to work with databases. (Fredrik Hedberg and + Enis Soztutar via acmurthy) + + HADOOP-3019. A new library to support total order partitions. + (cdouglas via omalley) + + HADOOP-3924. Added a 'KILLED' job status. (Subramaniam Krishnan via + acmurthy) + + IMPROVEMENTS + + HADOOP-4205. hive: metastore and ql to use the refactored SerDe library. + (zshao) + + HADOOP-4106. libhdfs: add time, permission and user attribute support + (part 2). (Pete Wyckoff through zshao) + + HADOOP-4104. libhdfs: add time, permission and user attribute support. + (Pete Wyckoff through zshao) + + HADOOP-3908. libhdfs: better error message if llibhdfs.so doesn't exist. + (Pete Wyckoff through zshao) + + HADOOP-3732. Delay intialization of datanode block verification till + the verification thread is started. (rangadi) + + HADOOP-1627. Various small improvements to 'dfsadmin -report' output. + (rangadi) + + HADOOP-3577. Tools to inject blocks into name node and simulated + data nodes for testing. (Sanjay Radia via hairong) + + HADOOP-2664. Add a lzop compatible codec, so that files compressed by lzop + may be processed by map/reduce. (cdouglas via omalley) + + HADOOP-3655. Add additional ant properties to control junit. (Steve + Loughran via omalley) + + HADOOP-3543. Update the copyright year to 2008. (cdouglas via omalley) + + HADOOP-3587. Add a unit test for the contrib/data_join framework. + (cdouglas) + + HADOOP-3402. Add terasort example program (omalley) + + HADOOP-3660. Add replication factor for injecting blocks in simulated + datanodes. (Sanjay Radia via cdouglas) + + HADOOP-3684. Add a cloning function to the contrib/data_join framework + permitting users to define a more efficient method for cloning values from + the reduce than serialization/deserialization. (Runping Qi via cdouglas) + + HADOOP-3478. Improves the handling of map output fetching. Now the + randomization is by the hosts (and not the map outputs themselves). + (Jothi Padmanabhan via ddas) + + HADOOP-3617. Removed redundant checks of accounting space in MapTask and + makes the spill thread persistent so as to avoid creating a new one for + each spill. (Chris Douglas via acmurthy) + + HADOOP-3412. Factor the scheduler out of the JobTracker and make + it pluggable. (Tom White and Brice Arnould via omalley) + + HADOOP-3756. Minor. Remove unused dfs.client.buffer.dir from + hadoop-default.xml. (rangadi) + + HADOOP-3747. Adds counter suport for MultipleOutputs. + (Alejandro Abdelnur via ddas) + + HADOOP-3169. LeaseChecker daemon should not be started in DFSClient + constructor. (TszWo (Nicholas), SZE via hairong) + + HADOOP-3824. Move base functionality of StatusHttpServer to a core + package. (TszWo (Nicholas), SZE via cdouglas) + + HADOOP-3646. Add a bzip2 compatible codec, so bzip compressed data + may be processed by map/reduce. (Abdul Qadeer via cdouglas) + + HADOOP-3861. MapFile.Reader and Writer should implement Closeable. + (tomwhite via omalley) + + HADOOP-3791. Introduce generics into ReflectionUtils. (Chris Smith via + cdouglas) + + HADOOP-3694. Improve unit test performance by changing + MiniDFSCluster to listen only on 127.0.0.1. (cutting) + + HADOOP-3620. Namenode should synchronously resolve a datanode's network + location when the datanode registers. (hairong) + + HADOOP-3860. NNThroughputBenchmark is extended with rename and delete + benchmarks. (shv) + + HADOOP-3892. Include unix group name in JobConf. (Matei Zaharia via johan) + + HADOOP-3875. Change the time period between heartbeats to be relative to + the end of the heartbeat rpc, rather than the start. This causes better + behavior if the JobTracker is overloaded. (acmurthy via omalley) + + HADOOP-3853. Move multiple input format (HADOOP-372) extension to + library package. (tomwhite via johan) + + HADOOP-9. Use roulette scheduling for temporary space when the size + is not known. (Ari Rabkin via omalley) + + HADOOP-3202. Use recursive delete rather than FileUtil.fullyDelete. + (Amareshwari Sriramadasu via omalley) + + HADOOP-3368. Remove common-logging.properties from conf. (Steve Loughran + via omalley) + + HADOOP-3851. Fix spelling mistake in FSNamesystemMetrics. (Steve Loughran + via omalley) + + HADOOP-3780. Remove asynchronous resolution of network topology in the + JobTracker (Amar Kamat via omalley) + + HADOOP-3852. Add ShellCommandExecutor.toString method to make nicer + error messages. (Steve Loughran via omalley) + + HADOOP-3844. Include message of local exception in RPC client failures. + (Steve Loughran via omalley) + + HADOOP-3935. Split out inner classes from DataNode.java. (johan) + + HADOOP-3905. Create generic interfaces for edit log streams. (shv) + + HADOOP-3062. Add metrics to DataNode and TaskTracker to record network + traffic for HDFS reads/writes and MR shuffling. (cdouglas) + + HADOOP-3742. Remove HDFS from public java doc and add javadoc-dev for + generative javadoc for developers. (Sanjay Radia via omalley) + + HADOOP-3944. Improve documentation for public TupleWritable class in + join package. (Chris Douglas via enis) + + HADOOP-2330. Preallocate HDFS transaction log to improve performance. + (dhruba and hairong) + + HADOOP-3965. Convert DataBlockScanner into a package private class. (shv) + + HADOOP-3488. Prevent hadoop-daemon from rsync'ing log files (Stefan + Groshupf and Craig Macdonald via omalley) + + HADOOP-3342. Change the kill task actions to require http post instead of + get to prevent accidental crawls from triggering it. (enis via omalley) + + HADOOP-3937. Limit the job name in the job history filename to 50 + characters. (Matei Zaharia via omalley) + + HADOOP-3943. Remove unnecessary synchronization in + NetworkTopology.pseudoSortByDistance. (hairong via omalley) + + HADOOP-3498. File globbing alternation should be able to span path + components. (tomwhite) + + HADOOP-3361. Implement renames for NativeS3FileSystem. + (Albert Chern via tomwhite) + + HADOOP-3605. Make EC2 scripts show an error message if AWS_ACCOUNT_ID is + unset. (Al Hoang via tomwhite) + + HADOOP-4147. Remove unused class JobWithTaskContext from class + JobInProgress. (Amareshwari Sriramadasu via johan) + + HADOOP-4151. Add a byte-comparable interface that both Text and + BytesWritable implement. (cdouglas via omalley) + + HADOOP-4174. Move fs image/edit log methods from ClientProtocol to + NamenodeProtocol. (shv via szetszwo) + + HADOOP-4181. Include a .gitignore and saveVersion.sh change to support + developing under git. (omalley) + + HADOOP-4186. Factor LineReader out of LineRecordReader. (tomwhite via + omalley) + + HADOOP-4184. Break the module dependencies between core, hdfs, and + mapred. (tomwhite via omalley) + + HADOOP-4075. test-patch.sh now spits out ant commands that it runs. + (Ramya R via nigel) + + HADOOP-4117. Improve configurability of Hadoop EC2 instances. + (tomwhite) + + HADOOP-2411. Add support for larger CPU EC2 instance types. + (Chris K Wensel via tomwhite) + + HADOOP-4083. Changed the configuration attribute queue.name to + mapred.job.queue.name. (Hemanth Yamijala via acmurthy) + + HADOOP-4194. Added the JobConf and JobID to job-related methods in + JobTrackerInstrumentation for better metrics. (Mac Yang via acmurthy) + + HADOOP-3975. Change test-patch script to report working the dir + modifications preventing the suite from being run. (Ramya R via cdouglas) + + HADOOP-4124. Added a command-line switch to allow users to set job + priorities, also allow it to be manipulated via the web-ui. (Hemanth + Yamijala via acmurthy) + + HADOOP-2165. Augmented JobHistory to include the URIs to the tasks' + userlogs. (Vinod Kumar Vavilapalli via acmurthy) + + HADOOP-4062. Remove the synchronization on the output stream when a + connection is closed and also remove an undesirable exception when + a client is stoped while there is no pending RPC request. (hairong) + + HADOOP-4227. Remove the deprecated class org.apache.hadoop.fs.ShellCommand. + (szetszwo) + + HADOOP-4006. Clean up FSConstants and move some of the constants to + better places. (Sanjay Radia via rangadi) + + HADOOP-4279. Trace the seeds of random sequences in append unit tests to + make itermitant failures reproducible. (szetszwo via cdouglas) + + HADOOP-4209. Remove the change to the format of task attempt id by + incrementing the task attempt numbers by 1000 when the job restarts. + (Amar Kamat via omalley) + + HADOOP-4301. Adds forrest doc for the skip bad records feature. + (Sharad Agarwal via ddas) + + HADOOP-4354. Separate TestDatanodeDeath.testDatanodeDeath() into 4 tests. + (szetszwo) + + HADOOP-3790. Add more unit tests for testing HDFS file append. (szetszwo) + + HADOOP-4321. Include documentation for the capacity scheduler. (Hemanth + Yamijala via omalley) + + HADOOP-4424. Change menu layout for Hadoop documentation (Boris Shkolnik + via cdouglas). + + HADOOP-4438. Update forrest documentation to include missing FsShell + commands. (Suresh Srinivas via cdouglas) + + HADOOP-4105. Add forrest documentation for libhdfs. + (Pete Wyckoff via cutting) + + HADOOP-4510. Make getTaskOutputPath public. (Chris Wensel via omalley) + + OPTIMIZATIONS + + HADOOP-3556. Removed lock contention in MD5Hash by changing the + singleton MessageDigester by an instance per Thread using + ThreadLocal. (Iv?n de Prado via omalley) + + HADOOP-3328. When client is writing data to DFS, only the last + datanode in the pipeline needs to verify the checksum. Saves around + 30% CPU on intermediate datanodes. (rangadi) + + HADOOP-3863. Use a thread-local string encoder rather than a static one + that is protected by a lock. (acmurthy via omalley) + + HADOOP-3864. Prevent the JobTracker from locking up when a job is being + initialized. (acmurthy via omalley) + + HADOOP-3816. Faster directory listing in KFS. (Sriram Rao via omalley) + + HADOOP-2130. Pipes submit job should have both blocking and non-blocking + versions. (acmurthy via omalley) + + HADOOP-3769. Make the SampleMapper and SampleReducer from + GenericMRLoadGenerator public, so they can be used in other contexts. + (Lingyun Yang via omalley) + + HADOOP-3514. Inline the CRCs in intermediate files as opposed to reading + it from a different .crc file. (Jothi Padmanabhan via ddas) + + HADOOP-3638. Caches the iFile index files in memory to reduce seeks + (Jothi Padmanabhan via ddas) + + HADOOP-4225. FSEditLog.logOpenFile() should persist accessTime + rather than modificationTime. (shv) + + HADOOP-4380. Made several new classes (Child, JVMId, + JobTrackerInstrumentation, QueueManager, ResourceEstimator, + TaskTrackerInstrumentation, and TaskTrackerMetricsInst) in + org.apache.hadoop.mapred package private instead of public. (omalley) + + BUG FIXES + + HADOOP-3563. Refactor the distributed upgrade code so that it is + easier to identify datanode and namenode related code. (dhruba) + + HADOOP-3640. Fix the read method in the NativeS3InputStream. (tomwhite via + omalley) + + HADOOP-3711. Fixes the Streaming input parsing to properly find the + separator. (Amareshwari Sriramadasu via ddas) + + HADOOP-3725. Prevent TestMiniMRMapDebugScript from swallowing exceptions. + (Steve Loughran via cdouglas) + + HADOOP-3726. Throw exceptions from TestCLI setup and teardown instead of + swallowing them. (Steve Loughran via cdouglas) + + HADOOP-3721. Refactor CompositeRecordReader and related mapred.join classes + to make them clearer. (cdouglas) + + HADOOP-3720. Re-read the config file when dfsadmin -refreshNodes is invoked + so dfs.hosts and dfs.hosts.exclude are observed. (lohit vijayarenu via + cdouglas) + + HADOOP-3485. Allow writing to files over fuse. + (Pete Wyckoff via dhruba) + + HADOOP-3723. The flags to the libhdfs.create call can be treated as + a bitmask. (Pete Wyckoff via dhruba) + + HADOOP-3643. Filter out completed tasks when asking for running tasks in + the JobTracker web/ui. (Amar Kamat via omalley) + + HADOOP-3777. Ensure that Lzo compressors/decompressors correctly handle the + case where native libraries aren't available. (Chris Douglas via acmurthy) + + HADOOP-3728. Fix SleepJob so that it doesn't depend on temporary files, + this ensures we can now run more than one instance of SleepJob + simultaneously. (Chris Douglas via acmurthy) + + HADOOP-3795. Fix saving image files on Namenode with different checkpoint + stamps. (Lohit Vijayarenu via mahadev) + + HADOOP-3624. Improving createeditslog to create tree directory structure. + (Lohit Vijayarenu via mahadev) + + HADOOP-3778. DFSInputStream.seek() did not retry in case of some errors. + (LN via rangadi) + + HADOOP-3661. The handling of moving files deleted through fuse-dfs to + Trash made similar to the behaviour from dfs shell. + (Pete Wyckoff via dhruba) + + HADOOP-3819. Unset LANG and LC_CTYPE in saveVersion.sh to make it + compatible with non-English locales. (Rong-En Fan via cdouglas) + + HADOOP-3848. Cache calls to getSystemDir in the TaskTracker instead of + calling it for each task start. (acmurthy via omalley) + + HADOOP-3131. Fix reduce progress reporting for compressed intermediate + data. (Matei Zaharia via acmurthy) + + HADOOP-3796. fuse-dfs configuration is implemented as file system + mount options. (Pete Wyckoff via dhruba) + + HADOOP-3836. Fix TestMultipleOutputs to correctly clean up. (Alejandro + Abdelnur via acmurthy) + + HADOOP-3805. Improve fuse-dfs write performance. + (Pete Wyckoff via zshao) + + HADOOP-3846. Fix unit test CreateEditsLog to generate paths correctly. + (Lohit Vjayarenu via cdouglas) + + HADOOP-3904. Fix unit tests using the old dfs package name. + (TszWo (Nicholas), SZE via johan) + + HADOOP-3319. Fix some HOD error messages to go stderr instead of + stdout. (Vinod Kumar Vavilapalli via omalley) + + HADOOP-3907. Move INodeDirectoryWithQuota to its own .java file. + (Tsz Wo (Nicholas), SZE via hairong) + + HADOOP-3919. Fix attribute name in hadoop-default for + mapred.jobtracker.instrumentation. (Ari Rabkin via omalley) + + HADOOP-3903. Change the package name for the servlets to be hdfs instead of + dfs. (Tsz Wo (Nicholas) Sze via omalley) + + HADOOP-3773. Change Pipes to set the default map output key and value + types correctly. (Koji Noguchi via omalley) + + HADOOP-3952. Fix compilation error in TestDataJoin referencing dfs package. + (omalley) + + HADOOP-3951. Fix package name for FSNamesystem logs and modify other + hard-coded Logs to use the class name. (cdouglas) + + HADOOP-3889. Improve error reporting from HftpFileSystem, handling in + DistCp. (Tsz Wo (Nicholas), SZE via cdouglas) + + HADOOP-3946. Fix TestMapRed after hadoop-3664. (tomwhite via omalley) + + HADOOP-3949. Remove duplicate jars from Chukwa. (Jerome Boulon via omalley) + + HADOOP-3933. DataNode sometimes sends up to io.byte.per.checksum bytes + more than required to client. (Ning Li via rangadi) + + HADOOP-3962. Shell command "fs -count" should support paths with different + file systems. (Tsz Wo (Nicholas), SZE via mahadev) + + HADOOP-3957. Fix javac warnings in DistCp and TestCopyFiles. (Tsz Wo + (Nicholas), SZE via cdouglas) + + HADOOP-3958. Fix TestMapRed to check the success of test-job. (omalley via + acmurthy) + + HADOOP-3985. Fix TestHDFSServerPorts to use random ports. (Hairong Kuang + via omalley) + + HADOOP-3964. Fix javadoc warnings introduced by FailMon. (dhruba) + + HADOOP-3785. Fix FileSystem cache to be case-insensitive for scheme and + authority. (Bill de hOra via cdouglas) + + HADOOP-3506. Fix a rare NPE caused by error handling in S3. (Tom White via + cdouglas) + + HADOOP-3705. Fix mapred.join parser to accept InputFormats named with + underscore and static, inner classes. (cdouglas) + + HADOOP-4023. Fix javadoc warnings introduced when the HDFS javadoc was + made private. (omalley) + + HADOOP-4030. Remove lzop from the default list of codecs. (Arun Murthy via + cdouglas) + + HADOOP-3961. Fix task disk space requirement estimates for virtual + input jobs. Delays limiting task placement until after 10% of the maps + have finished. (Ari Rabkin via omalley) + + HADOOP-2168. Fix problem with C++ record reader's progress not being + reported to framework. (acmurthy via omalley) + + HADOOP-3966. Copy findbugs generated output files to PATCH_DIR while + running test-patch. (Ramya R via lohit) + + HADOOP-4037. Fix the eclipse plugin for versions of kfs and log4j. (nigel + via omalley) + + HADOOP-3950. Cause the Mini MR cluster to wait for task trackers to + register before continuing. (enis via omalley) + + HADOOP-3910. Remove unused ClusterTestDFSNamespaceLogging and + ClusterTestDFS. (Tsz Wo (Nicholas), SZE via cdouglas) + + HADOOP-3954. Disable record skipping by default. (Sharad Agarwal via + cdouglas) + + HADOOP-4050. Fix TestFairScheduler to use absolute paths for the work + directory. (Matei Zaharia via omalley) + + HADOOP-4069. Keep temporary test files from TestKosmosFileSystem under + test.build.data instead of /tmp. (lohit via omalley) + + HADOOP-4078. Create test files for TestKosmosFileSystem in separate + directory under test.build.data. (lohit) + + HADOOP-3968. Fix getFileBlockLocations calls to use FileStatus instead + of Path reflecting the new API. (Pete Wyckoff via lohit) + + HADOOP-3963. libhdfs does not exit on its own, instead it returns error + to the caller and behaves as a true library. (Pete Wyckoff via dhruba) + + HADOOP-4100. Removes the cleanupTask scheduling from the Scheduler + implementations and moves it to the JobTracker. + (Amareshwari Sriramadasu via ddas) + + HADOOP-4097. Make hive work well with speculative execution turned on. + (Joydeep Sen Sarma via dhruba) + + HADOOP-4113. Changes to libhdfs to not exit on its own, rather return + an error code to the caller. (Pete Wyckoff via dhruba) + + HADOOP-4054. Remove duplicate lease removal during edit log loading. + (hairong) + + HADOOP-4071. FSNameSystem.isReplicationInProgress should add an + underReplicated block to the neededReplication queue using method + "add" not "update". (hairong) + + HADOOP-4154. Fix type warnings in WritableUtils. (szetszwo via omalley) + + HADOOP-4133. Log files generated by Hive should reside in the + build directory. (Prasad Chakka via dhruba) + + HADOOP-4094. Hive now has hive-default.xml and hive-site.xml similar + to core hadoop. (Prasad Chakka via dhruba) + + HADOOP-4112. Handles cleanupTask in JobHistory + (Amareshwari Sriramadasu via ddas) + + HADOOP-3831. Very slow reading clients sometimes failed while reading. + (rangadi) + + HADOOP-4155. Use JobTracker's start time while initializing JobHistory's + JobTracker Unique String. (lohit) + + HADOOP-4099. Fix null pointer when using HFTP from an 0.18 server. + (dhruba via omalley) + + HADOOP-3570. Includes user specified libjar files in the client side + classpath path. (Sharad Agarwal via ddas) + + HADOOP-4129. Changed memory limits of TaskTracker and Tasks to be in + KiloBytes rather than bytes. (Vinod Kumar Vavilapalli via acmurthy) + + HADOOP-4139. Optimize Hive multi group-by. + (Namin Jain via dhruba) + + HADOOP-3911. Add a check to fsck options to make sure -files is not + the first option to resolve conflicts with GenericOptionsParser + (lohit) + + HADOOP-3623. Refactor LeaseManager. (szetszwo) + + HADOOP-4125. Handles Reduce cleanup tip on the web ui. + (Amareshwari Sriramadasu via ddas) + + HADOOP-4087. Hive Metastore API for php and python clients. + (Prasad Chakka via dhruba) + + HADOOP-4197. Update DATA_TRANSFER_VERSION for HADOOP-3981. (szetszwo) + + HADOOP-4138. Refactor the Hive SerDe library to better structure + the interfaces to the serializer and de-serializer. + (Zheng Shao via dhruba) + + HADOOP-4195. Close compressor before returning to codec pool. + (acmurthy via omalley) + + HADOOP-2403. Escapes some special characters before logging to + history files. (Amareshwari Sriramadasu via ddas) + + HADOOP-4200. Fix a bug in the test-patch.sh script. + (Ramya R via nigel) + + HADOOP-4084. Add explain plan capabilities to Hive Query Language. + (Ashish Thusoo via dhruba) + + HADOOP-4121. Preserve cause for exception if the initialization of + HistoryViewer for JobHistory fails. (Amareshwari Sri Ramadasu via + acmurthy) + + HADOOP-4213. Fixes NPE in TestLimitTasksPerJobTaskScheduler. + (Sreekanth Ramakrishnan via ddas) + + HADOOP-4077. Setting access and modification time for a file + requires write permissions on the file. (dhruba) + + HADOOP-3592. Fix a couple of possible file leaks in FileUtil + (Bill de hOra via rangadi) + + HADOOP-4120. Hive interactive shell records the time taken by a + query. (Raghotham Murthy via dhruba) + + HADOOP-4090. The hive scripts pick up hadoop from HADOOP_HOME + and then the path. (Raghotham Murthy via dhruba) + + HADOOP-4242. Remove extra ";" in FSDirectory that blocks compilation + in some IDE's. (szetszwo via omalley) + + HADOOP-4249. Fix eclipse path to include the hsqldb.jar. (szetszwo via + omalley) + + HADOOP-4247. Move InputSampler into org.apache.hadoop.mapred.lib, so that + examples.jar doesn't depend on tools.jar. (omalley) + + HADOOP-4269. Fix the deprecation of LineReader by extending the new class + into the old name and deprecating it. Also update the tests to test the + new class. (cdouglas via omalley) + + HADOOP-4280. Fix conversions between seconds in C and milliseconds in + Java for access times for files. (Pete Wyckoff via rangadi) + + HADOOP-4254. -setSpaceQuota command does not convert "TB" extenstion to + terabytes properly. Implementation now uses StringUtils for parsing this. + (Raghu Angadi) + + HADOOP-4259. Findbugs should run over tools.jar also. (cdouglas via + omalley) + + HADOOP-4275. Move public method isJobValidName from JobID to a private + method in JobTracker. (omalley) + + HADOOP-4173. fix failures in TestProcfsBasedProcessTree and + TestTaskTrackerMemoryManager tests. ProcfsBasedProcessTree and + memory management in TaskTracker are disabled on Windows. + (Vinod K V via rangadi) + + HADOOP-4189. Fixes the history blocksize & intertracker protocol version + issues introduced as part of HADOOP-3245. (Amar Kamat via ddas) + + HADOOP-4190. Fixes the backward compatibility issue with Job History. + introduced by HADOOP-3245 and HADOOP-2403. (Amar Kamat via ddas) + + HADOOP-4237. Fixes the TestStreamingBadRecords.testNarrowDown testcase. + (Sharad Agarwal via ddas) + + HADOOP-4274. Capacity scheduler accidently modifies the underlying + data structures when browing the job lists. (Hemanth Yamijala via omalley) + + HADOOP-4309. Fix eclipse-plugin compilation. (cdouglas) + + HADOOP-4232. Fix race condition in JVM reuse when multiple slots become + free. (ddas via acmurthy) + + HADOOP-4302. Fix a race condition in TestReduceFetch that can yield false + negatvies. (cdouglas) + + HADOOP-3942. Update distcp documentation to include features introduced in + HADOOP-3873, HADOOP-3939. (Tsz Wo (Nicholas), SZE via cdouglas) + + HADOOP-4319. fuse-dfs dfs_read function returns as many bytes as it is + told to read unlesss end-of-file is reached. (Pete Wyckoff via dhruba) + + HADOOP-4246. Ensure we have the correct lower bound on the number of + retries for fetching map-outputs; also fixed the case where the reducer + automatically kills on too many unique map-outputs could not be fetched + for small jobs. (Amareshwari Sri Ramadasu via acmurthy) + + HADOOP-4163. Report FSErrors from map output fetch threads instead of + merely logging them. (Sharad Agarwal via cdouglas) + + HADOOP-4261. Adds a setup task for jobs. This is required so that we + don't setup jobs that haven't been inited yet (since init could lead + to job failure). Only after the init has successfully happened do we + launch the setupJob task. (Amareshwari Sriramadasu via ddas) + + HADOOP-4256. Removes Completed and Failed Job tables from + jobqueue_details.jsp. (Sreekanth Ramakrishnan via ddas) + + HADOOP-4267. Occasional exceptions during shutting down HSQLDB is logged + but not rethrown. (enis) + + HADOOP-4018. The number of tasks for a single job cannot exceed a + pre-configured maximum value. (dhruba) + + HADOOP-4288. Fixes a NPE problem in CapacityScheduler. + (Amar Kamat via ddas) + + HADOOP-4014. Create hard links with 'fsutil hardlink' on Windows. (shv) + + HADOOP-4393. Merged org.apache.hadoop.fs.permission.AccessControlException + and org.apache.hadoop.security.AccessControlIOException into a single + class hadoop.security.AccessControlException. (omalley via acmurthy) + + HADOOP-4287. Fixes an issue to do with maintaining counts of running/pending + maps/reduces. (Sreekanth Ramakrishnan via ddas) + + HADOOP-4361. Makes sure that jobs killed from command line are killed + fast (i.e., there is a slot to run the cleanup task soon). + (Amareshwari Sriramadasu via ddas) + + HADOOP-4400. Add "hdfs://" to fs.default.name on quickstart.html. + (Jeff Hammerbacher via omalley) + + HADOOP-4378. Fix TestJobQueueInformation to use SleepJob rather than + WordCount via TestMiniMRWithDFS. (Sreekanth Ramakrishnan via acmurthy) + + HADOOP-4376. Fix formatting in hadoop-default.xml for + hadoop.http.filter.initializers. (Enis Soztutar via acmurthy) + + HADOOP-4410. Adds an extra arg to the API FileUtil.makeShellPath to + determine whether to canonicalize file paths or not. + (Amareshwari Sriramadasu via ddas) + + HADOOP-4236. Ensure un-initialized jobs are killed correctly on + user-demand. (Sharad Agarwal via acmurthy) + + HADOOP-4373. Fix calculation of Guaranteed Capacity for the + capacity-scheduler. (Hemanth Yamijala via acmurthy) + + HADOOP-4053. Schedulers must be notified when jobs complete. (Amar Kamat via omalley) + + HADOOP-4335. Fix FsShell -ls for filesystems without owners/groups. (David + Phillips via cdouglas) + + HADOOP-4426. TestCapacityScheduler broke due to the two commits HADOOP-4053 + and HADOOP-4373. This patch fixes that. (Hemanth Yamijala via ddas) + + HADOOP-4418. Updates documentation in forrest for Mapred, streaming and pipes. + (Amareshwari Sriramadasu via ddas) + + HADOOP-3155. Ensure that there is only one thread fetching + TaskCompletionEvents on TaskTracker re-init. (Dhruba Borthakur via + acmurthy) + + HADOOP-4425. Fix EditLogInputStream to overload the bulk read method. + (cdouglas) + + HADOOP-4427. Adds the new queue/job commands to the manual. + (Sreekanth Ramakrishnan via ddas) + + HADOOP-4278. Increase debug logging for unit test TestDatanodeDeath. + Fix the case when primary is dead. (dhruba via szetszwo) + + HADOOP-4423. Keep block length when the block recovery is triggered by + append. (szetszwo) + + HADOOP-4449. Fix dfsadmin usage. (Raghu Angadi via cdouglas) + + HADOOP-4455. Added TestSerDe so that unit tests can run successfully. + (Ashish Thusoo via dhruba) + + HADOOP-4457. Fixes an input split logging problem introduced by + HADOOP-3245. (Amareshwari Sriramadasu via ddas) + + HADOOP-4464. Separate out TestFileCreationClient from TestFileCreation. + (Tsz Wo (Nicholas), SZE via cdouglas) + + HADOOP-4404. saveFSImage() removes files from a storage directory that do + not correspond to its type. (shv) + + HADOOP-4149. Fix handling of updates to the job priority, by changing the + list of jobs to be keyed by the priority, submit time, and job tracker id. + (Amar Kamat via omalley) + + HADOOP-4296. Fix job client failures by not retiring a job as soon as it + is finished. (dhruba) + + HADOOP-4439. Remove configuration variables that aren't usable yet, in + particular mapred.tasktracker.tasks.maxmemory and mapred.task.max.memory. + (Hemanth Yamijala via omalley) + + HADOOP-4230. Fix for serde2 interface, limit operator, select * operator, + UDF trim functions and sampling. (Ashish Thusoo via dhruba) + + HADOOP-4358. No need to truncate access time in INode. Also fixes NPE + in CreateEditsLog. (Raghu Angadi) + + HADOOP-4387. TestHDFSFileSystemContract fails on windows nightly builds. + (Raghu Angadi) + + HADOOP-4466. Ensure that SequenceFileOutputFormat isn't tied to Writables + and can be used with other Serialization frameworks. (Chris Wensel via + acmurthy) + + HADOOP-4525. Fix ipc.server.ipcnodelay originally missed in in HADOOP-2232. + (cdouglas via Clint Morgan) + + HADOOP-4498. Ensure that JobHistory correctly escapes the job name so that + regex patterns work. (Chris Wensel via acmurthy) + + HADOOP-4446. Modify guaranteed capacity labels in capacity scheduler's UI + to reflect the information being displayed. (Sreekanth Ramakrishnan via + yhemanth) + + HADOOP-4282. Some user facing URLs are not filtered by user filters. + (szetszwo) + + HADOOP-4595. Fixes two race conditions - one to do with updating free slot count, + and another to do with starting the MapEventsFetcher thread. (ddas) + + HADOOP-4552. Fix a deadlock in RPC server. (Raghu Angadi) + + HADOOP-4471. Sort running jobs by priority in the capacity scheduler. + (Amar Kamat via yhemanth) + + HADOOP-4500. Fix MultiFileSplit to get the FileSystem from the relevant + path rather than the JobClient. (Joydeep Sen Sarma via cdouglas) + +Release 0.18.4 - Unreleased + + BUG FIXES + + HADOOP-5114. Remove timeout for accept() in DataNode. This makes accept() + fail in JDK on Windows and causes many tests to fail. (Raghu Angadi) + + HADOOP-5192. Block receiver should not remove a block that's created or + being written by other threads. (hairong) + + HADOOP-5134. FSNamesystem#commitBlockSynchronization adds under-construction + block locations to blocksMap. (Dhruba Borthakur via hairong) + + HADOOP-5412. Simulated DataNode should not write to a block that's being + written by another thread. (hairong) + + HADOOP-5465. Fix the problem of blocks remaining under-replicated by + providing synchronized modification to the counter xmitsInProgress in + DataNode. (hairong) + + HADOOP-5557. Fixes some minor problems in TestOverReplicatedBlocks. + (szetszwo) + + HADOOP-5644. Namenode is stuck in safe mode. (suresh Srinivas via hairong) + + HADOOP-6017. Lease Manager in NameNode does not handle certain characters + in filenames. This results in fatal errors in Secondary NameNode and while + restrating NameNode. (Tsz Wo (Nicholas), SZE via rangadi) + +Release 0.18.3 - 2009-01-27 + + IMPROVEMENTS + + HADOOP-4150. Include librecordio in hadoop releases. (Giridharan Kesavan + via acmurthy) + + HADOOP-4668. Improve documentation for setCombinerClass to clarify the + restrictions on combiners. (omalley) + + BUG FIXES + + HADOOP-4499. DFSClient should invoke checksumOk only once. (Raghu Angadi) + + HADOOP-4597. Calculate mis-replicated blocks when safe-mode is turned + off manually. (shv) + + HADOOP-3121. lsr should keep listing the remaining items but not + terminate if there is any IOException. (szetszwo) + + HADOOP-4610. Always calculate mis-replicated blocks when safe-mode is + turned off. (shv) + + HADOOP-3883. Limit namenode to assign at most one generation stamp for + a particular block within a short period. (szetszwo) + + HADOOP-4556. Block went missing. (hairong) + + HADOOP-4643. NameNode should exclude excessive replicas when counting + live replicas for a block. (hairong) + + HADOOP-4703. Should not wait for proxy forever in lease recovering. + (szetszwo) + + HADOOP-4647. NamenodeFsck should close the DFSClient it has created. + (szetszwo) + + HADOOP-4616. Fuse-dfs can handle bad values from FileSystem.read call. + (Pete Wyckoff via dhruba) + + HADOOP-4061. Throttle Datanode decommission monitoring in Namenode. + (szetszwo) + + HADOOP-4659. Root cause of connection failure is being lost to code that + uses it for delaying startup. (Steve Loughran and Hairong via hairong) + + HADOOP-4614. Lazily open segments when merging map spills to avoid using + too many file descriptors. (Yuri Pradkin via cdouglas) + + HADOOP-4257. The DFS client should pick only one datanode as the candidate + to initiate lease recovery. (Tsz Wo (Nicholas), SZE via dhruba) + + HADOOP-4713. Fix librecordio to handle records larger than 64k. (Christian + Kunz via cdouglas) + + HADOOP-4635. Fix a memory leak in fuse dfs. (pete wyckoff via mahadev) + + HADOOP-4714. Report status between merges and make the number of records + between progress reports configurable. (Jothi Padmanabhan via cdouglas) + + HADOOP-4726. Fix documentation typos "the the". (Edward J. Yoon via + szetszwo) + + HADOOP-4679. Datanode prints tons of log messages: waiting for threadgroup + to exit, active threads is XX. (hairong) + + HADOOP-4746. Job output directory should be normalized. (hairong) + + HADOOP-4717. Removal of default port# in NameNode.getUri() causes a + map/reduce job failed to prompt temporary output. (hairong) + + HADOOP-4778. Check for zero size block meta file when updating a block. + (szetszwo) + + HADOOP-4742. Replica gets deleted by mistake. (Wang Xu via hairong) + + HADOOP-4702. Failed block replication leaves an incomplete block in + receiver's tmp data directory. (hairong) + + HADOOP-4613. Fix block browsing on Web UI. (Johan Oskarsson via shv) + + HADOOP-4806. HDFS rename should not use src path as a regular expression. + (szetszwo) + + HADOOP-4795. Prevent lease monitor getting into an infinite loop when + leases and the namespace tree does not match. (szetszwo) + + HADOOP-4620. Fixes Streaming to handle well the cases of map/reduce with empty + input/output. (Ravi Gummadi via ddas) + + HADOOP-4857. Fixes TestUlimit to have exactly 1 map in the jobs spawned. + (Ravi Gummadi via ddas) + + HADOOP-4810. Data lost at cluster startup time. (hairong) + + HADOOP-4797. Improve how RPC server reads and writes large buffers. Avoids + soft-leak of direct buffers and excess copies in NIO layer. (Raghu Angadi) + + HADOOP-4840. TestNodeCount sometimes fails with NullPointerException. + (hairong) + + HADOOP-4904. Fix deadlock while leaving safe mode. (shv) + + HADOOP-1980. 'dfsadmin -safemode enter' should prevent the namenode from + leaving safemode automatically. (shv) + + HADOOP-4951. Lease monitor should acquire the LeaseManager lock but not the + Monitor lock. (szetszwo) + + HADOOP-4935. processMisReplicatedBlocks() should not clear + excessReplicateMap. (shv) + + HADOOP-4961. Fix ConcurrentModificationException in lease recovery + of empty files. (shv) + + HADOOP-4971. A long (unexpected) delay at datanodes could make subsequent + block reports from many datanode at the same time. (Raghu Angadi) + + HADOOP-4910. NameNode should exclude replicas when choosing excessive + replicas to delete to avoid data lose. (hairong) + + HADOOP-4983. Fixes a problem in updating Counters in the status reporting. + (Amareshwari Sriramadasu via ddas) + +Release 0.18.2 - 2008-11-03 + + BUG FIXES + + HADOOP-3614. Fix a bug that Datanode may use an old GenerationStamp to get + meta file. (szetszwo) + + HADOOP-4314. Simulated datanodes should not include blocks that are still + being written in their block report. (Raghu Angadi) + + HADOOP-4228. dfs datanode metrics, bytes_read and bytes_written, overflow + due to incorrect type used. (hairong) + + HADOOP-4395. The FSEditLog loading is incorrect for the case OP_SET_OWNER. + (szetszwo) + + HADOOP-4351. FSNamesystem.getBlockLocationsInternal throws + ArrayIndexOutOfBoundsException. (hairong) + + HADOOP-4403. Make TestLeaseRecovery and TestFileCreation more robust. + (szetszwo) + + HADOOP-4292. Do not support append() for LocalFileSystem. (hairong) + + HADOOP-4399. Make fuse-dfs multi-thread access safe. + (Pete Wyckoff via dhruba) + + HADOOP-4369. Use setMetric(...) instead of incrMetric(...) for metrics + averages. (Brian Bockelman via szetszwo) + + HADOOP-4469. Rename and add the ant task jar file to the tar file. (nigel) + + HADOOP-3914. DFSClient sends Checksum Ok only once for a block. + (Christian Kunz via hairong) + + HADOOP-4467. SerializationFactory now uses the current context ClassLoader + allowing for user supplied Serialization instances. (Chris Wensel via + acmurthy) + + HADOOP-4517. Release FSDataset lock before joining ongoing create threads. + (szetszwo) + + HADOOP-4526. fsck failing with NullPointerException. (hairong) + + HADOOP-4483 Honor the max parameter in DatanodeDescriptor.getBlockArray(..) + (Ahad Rana and Hairong Kuang via szetszwo) + + HADOOP-4340. Correctly set the exit code from JobShell.main so that the + 'hadoop jar' command returns the right code to the user. (acmurthy) + + NEW FEATURES + + HADOOP-2421. Add jdiff output to documentation, listing all API + changes from the prior release. (cutting) + +Release 0.18.1 - 2008-09-17 + + IMPROVEMENTS + + HADOOP-3934. Upgrade log4j to 1.2.15. (omalley) + + BUG FIXES + + HADOOP-3995. In case of quota failure on HDFS, rename does not restore + source filename. (rangadi) + + HADOOP-3821. Prevent SequenceFile and IFile from duplicating codecs in + CodecPool when closed more than once. (Arun Murthy via cdouglas) + + HADOOP-4040. Remove coded default of the IPC idle connection timeout + from the TaskTracker, which was causing HDFS client connections to not be + collected. (ddas via omalley) + + HADOOP-4046. Made WritableComparable's constructor protected instead of + private to re-enable class derivation. (cdouglas via omalley) + + HADOOP-3940. Fix in-memory merge condition to wait when there are no map + outputs or when the final map outputs are being fetched without contention. + (cdouglas) + +Release 0.18.0 - 2008-08-19 + + INCOMPATIBLE CHANGES + + HADOOP-2703. The default options to fsck skips checking files + that are being written to. The output of fsck is incompatible + with previous release. (lohit vijayarenu via dhruba) + + HADOOP-2865. FsShell.ls() printout format changed to print file names + in the end of the line. (Edward J. Yoon via shv) + + HADOOP-3283. The Datanode has a RPC server. It currently supports + two RPCs: the first RPC retrives the metadata about a block and the + second RPC sets the generation stamp of an existing block. + (Tsz Wo (Nicholas), SZE via dhruba) + + HADOOP-2797. Code related to upgrading to 0.14 (Block CRCs) is + removed. As result, upgrade to 0.18 or later from 0.13 or earlier + is not supported. If upgrading from 0.13 or earlier is required, + please upgrade to an intermediate version (0.14-0.17) and then + to this version. (rangadi) + + HADOOP-544. This issue introduces new classes JobID, TaskID and + TaskAttemptID, which should be used instead of their string counterparts. + Functions in JobClient, TaskReport, RunningJob, jobcontrol.Job and + TaskCompletionEvent that use string arguments are deprecated in favor + of the corresponding ones that use ID objects. Applications can use + xxxID.toString() and xxxID.forName() methods to convert/restore objects + to/from strings. (Enis Soztutar via ddas) + + HADOOP-2188. RPC client sends a ping rather than throw timeouts. + RPC server does not throw away old RPCs. If clients and the server are on + different versions, they are not able to function well. In addition, + The property ipc.client.timeout is removed from the default hadoop + configuration. It also removes metrics RpcOpsDiscardedOPsNum. (hairong) + + HADOOP-2181. This issue adds logging for input splits in Jobtracker log + and jobHistory log. Also adds web UI for viewing input splits in job UI + and history UI. (Amareshwari Sriramadasu via ddas) + + HADOOP-3226. Run combiners multiple times over map outputs as they + are merged in both the map and the reduce tasks. (cdouglas via omalley) + + HADOOP-3329. DatanodeDescriptor objects should not be stored in the + fsimage. (dhruba) + + HADOOP-2656. The Block object has a generation stamp inside it. + Existing blocks get a generation stamp of 0. This is needed to support + appends. (dhruba) + + HADOOP-3390. Removed deprecated ClientProtocol.abandonFileInProgress(). + (Tsz Wo (Nicholas), SZE via rangadi) + + HADOOP-3405. Made some map/reduce internal classes non-public: + MapTaskStatus, ReduceTaskStatus, JobSubmissionProtocol, + CompletedJobStatusStore. (enis via omaley) + + HADOOP-3265. Removed depcrecated API getFileCacheHints(). + (Lohit Vijayarenu via rangadi) + + HADOOP-3310. The namenode instructs the primary datanode to do lease + recovery. The block gets a new generation stamp. + (Tsz Wo (Nicholas), SZE via dhruba) + + HADOOP-2909. Improve IPC idle connection management. Property + ipc.client.maxidletime is removed from the default configuration, + instead it is defined as twice of the ipc.client.connection.maxidletime. + A connection with outstanding requests won't be treated as idle. + (hairong) + + HADOOP-3459. Change in the output format of dfs -ls to more closely match + /bin/ls. New format is: perm repl owner group size date name + (Mukund Madhugiri via omally) + + HADOOP-3113. An fsync invoked on a HDFS file really really + persists data! The datanode moves blocks in the tmp directory to + the real block directory on a datanode-restart. (dhruba) + + HADOOP-3452. Change fsck to return non-zero status for a corrupt + FileSystem. (lohit vijayarenu via cdouglas) + + HADOOP-3193. Include the address of the client that found the corrupted + block in the log. Also include a CorruptedBlocks metric to track the size + of the corrupted block map. (cdouglas) + + HADOOP-3512. Separate out the tools into a tools jar. (omalley) + + HADOOP-3598. Ensure that temporary task-output directories are not created + if they are not necessary e.g. for Maps with no side-effect files. + (acmurthy) + + HADOOP-3665. Modify WritableComparator so that it only creates instances + of the keytype if the type does not define a WritableComparator. Calling + the superclass compare will throw a NullPointerException. Also define + a RawComparator for NullWritable and permit it to be written as a key + to SequenceFiles. (cdouglas) + + HADOOP-3673. Avoid deadlock caused by DataNode RPC receoverBlock(). + (Tsz Wo (Nicholas), SZE via rangadi) + + NEW FEATURES + + HADOOP-3074. Provides a UrlStreamHandler for DFS and other FS, + relying on FileSystem (taton) + + HADOOP-2585. Name-node imports namespace data from a recent checkpoint + accessible via a NFS mount. (shv) + + HADOOP-3061. Writable types for doubles and bytes. (Andrzej + Bialecki via omalley) + + HADOOP-2857. Allow libhdfs to set jvm options. (Craig Macdonald + via omalley) + + HADOOP-3317. Add default port for HDFS namenode. The port in + "hdfs:" URIs now defaults to 8020, so that one may simply use URIs + of the form "hdfs://example.com/dir/file". (cutting) + + HADOOP-2019. Adds support for .tar, .tgz and .tar.gz files in + DistributedCache (Amareshwari Sriramadasu via ddas) + + HADOOP-3058. Add FSNamesystem status metrics. + (Lohit Vjayarenu via rangadi) + + HADOOP-1915. Allow users to specify counters via strings instead + of enumerations. (tomwhite via omalley) + + HADOOP-2065. Delay invalidating corrupt replicas of block until its + is removed from under replicated state. If all replicas are found to + be corrupt, retain all copies and mark the block as corrupt. + (Lohit Vjayarenu via rangadi) + + HADOOP-3221. Adds org.apache.hadoop.mapred.lib.NLineInputFormat, which + splits files into splits each of N lines. N can be specified by + configuration property "mapred.line.input.format.linespermap", which + defaults to 1. (Amareshwari Sriramadasu via ddas) + + HADOOP-3336. Direct a subset of annotated FSNamesystem calls for audit + logging. (cdouglas) + + HADOOP-3400. A new API FileSystem.deleteOnExit() that facilitates + handling of temporary files in HDFS. (dhruba) + + HADOOP-4. Add fuse-dfs to contrib, permitting one to mount an + HDFS filesystem on systems that support FUSE, e.g., Linux. + (Pete Wyckoff via cutting) + + HADOOP-3246. Add FTPFileSystem. (Ankur Goel via cutting) + + HADOOP-3250. Extend FileSystem API to allow appending to files. + (Tsz Wo (Nicholas), SZE via cdouglas) + + HADOOP-3177. Implement Syncable interface for FileSystem. + (Tsz Wo (Nicholas), SZE via dhruba) + + HADOOP-1328. Implement user counters in streaming. (tomwhite via + omalley) + + HADOOP-3187. Quotas for namespace management. (Hairong Kuang via ddas) + + HADOOP-3307. Support for Archives in Hadoop. (Mahadev Konar via ddas) + + HADOOP-3460. Add SequenceFileAsBinaryOutputFormat to permit direct + writes of serialized data. (Koji Noguchi via cdouglas) + + HADOOP-3230. Add ability to get counter values from command + line. (tomwhite via omalley) + + HADOOP-930. Add support for native S3 files. (tomwhite via cutting) + + HADOOP-3502. Quota API needs documentation in Forrest. (hairong) + + HADOOP-3413. Allow SequenceFile.Reader to use serialization + framework. (tomwhite via omalley) + + HADOOP-3541. Import of the namespace from a checkpoint documented + in hadoop user guide. (shv) + + IMPROVEMENTS + + HADOOP-3677. Simplify generation stamp upgrade by making is a + local upgrade on datandodes. Deleted distributed upgrade. + (rangadi) + + HADOOP-2928. Remove deprecated FileSystem.getContentLength(). + (Lohit Vijayarenu via rangadi) + + HADOOP-3130. Make the connect timeout smaller for getFile. + (Amar Ramesh Kamat via ddas) + + HADOOP-3160. Remove deprecated exists() from ClientProtocol and + FSNamesystem (Lohit Vjayarenu via rangadi) + + HADOOP-2910. Throttle IPC Clients during bursts of requests or + server slowdown. Clients retry connection for up to 15 minutes + when socket connection times out. (hairong) + + HADOOP-3295. Allow TextOutputFormat to use configurable spearators. + (Zheng Shao via cdouglas). + + HADOOP-3308. Improve QuickSort by excluding values eq the pivot from the + partition. (cdouglas) + + HADOOP-2461. Trim property names in configuration. + (Tsz Wo (Nicholas), SZE via shv) + + HADOOP-2799. Deprecate o.a.h.io.Closable in favor of java.io.Closable. + (Tsz Wo (Nicholas), SZE via cdouglas) + + HADOOP-3345. Enhance the hudson-test-patch target to cleanup messages, + fix minor defects, and add eclipse plugin and python unit tests. (nigel) + + HADOOP-3144. Improve robustness of LineRecordReader by defining a maximum + line length (mapred.linerecordreader.maxlength), thereby avoiding reading + too far into the following split. (Zheng Shao via cdouglas) + + HADOOP-3334. Move lease handling from FSNamesystem into a seperate class. + (Tsz Wo (Nicholas), SZE via rangadi) + + HADOOP-3332. Reduces the amount of logging in Reducer's shuffle phase. + (Devaraj Das) + + HADOOP-3355. Enhances Configuration class to accept hex numbers for getInt + and getLong. (Amareshwari Sriramadasu via ddas) + + HADOOP-3350. Add an argument to distcp to permit the user to limit the + number of maps. (cdouglas) + + HADOOP-3013. Add corrupt block reporting to fsck. + (lohit vijayarenu via cdouglas) + + HADOOP-3377. Remove TaskRunner::replaceAll and replace with equivalent + String::replace. (Brice Arnould via cdouglas) + + HADOOP-3398. Minor improvement to a utility function in that participates + in backoff calculation. (cdouglas) + + HADOOP-3381. Clear referenced when directories are deleted so that + effect of memory leaks are not multiplied. (rangadi) + + HADOOP-2867. Adds the task's CWD to its LD_LIBRARY_PATH. + (Amareshwari Sriramadasu via ddas) + + HADOOP-3232. DU class runs the 'du' command in a seperate thread so + that it does not block user. DataNode misses heartbeats in large + nodes otherwise. (Johan Oskarsson via rangadi) + + HADOOP-3035. During block transfers between datanodes, the receiving + datanode, now can report corrupt replicas received from src node to + the namenode. (Lohit Vijayarenu via rangadi) + + HADOOP-3434. Retain the cause of the bind failure in Server::bind. + (Steve Loughran via cdouglas) + + HADOOP-3429. Increases the size of the buffers used for the communication + for Streaming jobs. (Amareshwari Sriramadasu via ddas) + + HADOOP-3486. Change default for initial block report to 0 seconds + and document it. (Sanjay Radia via omalley) + + HADOOP-3448. Improve the text in the assertion making sure the + layout versions are consistent in the data node. (Steve Loughran + via omalley) + + HADOOP-2095. Improve the Map-Reduce shuffle/merge by cutting down + buffer-copies; changed intermediate sort/merge to use the new IFile format + rather than SequenceFiles and compression of map-outputs is now + implemented by compressing the entire file rather than SequenceFile + compression. Shuffle also has been changed to use a simple byte-buffer + manager rather than the InMemoryFileSystem. + Configuration changes to hadoop-default.xml: + deprecated mapred.map.output.compression.type + (acmurthy) + + HADOOP-236. JobTacker now refuses connection from a task tracker with a + different version number. (Sharad Agarwal via ddas) + + HADOOP-3427. Improves the shuffle scheduler. It now waits for notifications + from shuffle threads when it has scheduled enough, before scheduling more. + (ddas) + + HADOOP-2393. Moves the handling of dir deletions in the tasktracker to + a separate thread. (Amareshwari Sriramadasu via ddas) + + HADOOP-3501. Deprecate InMemoryFileSystem. (cutting via omalley) + + HADOOP-3366. Stall the shuffle while in-memory merge is in progress. + (acmurthy) + + HADOOP-2916. Refactor src structure, but leave package structure alone. + (Raghu Angadi via mukund) + + HADOOP-3492. Add forrest documentation for user archives. + (Mahadev Konar via hairong) + + HADOOP-3467. Improve documentation for FileSystem::deleteOnExit. + (Tsz Wo (Nicholas), SZE via cdouglas) + + HADOOP-3379. Documents stream.non.zero.exit.status.is.failure for Streaming. + (Amareshwari Sriramadasu via ddas) + + HADOOP-3096. Improves documentation about the Task Execution Environment in + the Map-Reduce tutorial. (Amareshwari Sriramadasu via ddas) + + HADOOP-2984. Add forrest documentation for DistCp. (cdouglas) + + HADOOP-3406. Add forrest documentation for Profiling. + (Amareshwari Sriramadasu via ddas) + + HADOOP-2762. Add forrest documentation for controls of memory limits on + hadoop daemons and Map-Reduce tasks. (Amareshwari Sriramadasu via ddas) + + HADOOP-3535. Fix documentation and name of IOUtils.close to + reflect that it should only be used in cleanup contexts. (omalley) + + HADOOP-3593. Updates the mapred tutorial. (ddas) + + HADOOP-3547. Documents the way in which native libraries can be distributed + via the DistributedCache. (Amareshwari Sriramadasu via ddas) + + HADOOP-3606. Updates the Streaming doc. (Amareshwari Sriramadasu via ddas) + + HADOOP-3532. Add jdiff reports to the build scripts. (omalley) + + HADOOP-3100. Develop tests to test the DFS command line interface. (mukund) + + HADOOP-3688. Fix up HDFS docs. (Robert Chansler via hairong) + + OPTIMIZATIONS + + HADOOP-3274. The default constructor of BytesWritable creates empty + byte array. (Tsz Wo (Nicholas), SZE via shv) + + HADOOP-3272. Remove redundant copy of Block object in BlocksMap. + (Lohit Vjayarenu via shv) + + HADOOP-3164. Reduce DataNode CPU usage by using FileChannel.tranferTo(). + On Linux DataNode takes 5 times less CPU while serving data. Results may + vary on other platforms. (rangadi) + + HADOOP-3248. Optimization of saveFSImage. (Dhruba via shv) + + HADOOP-3297. Fetch more task completion events from the job + tracker and task tracker. (ddas via omalley) + + HADOOP-3364. Faster image and log edits loading. (shv) + + HADOOP-3369. Fast block processing during name-node startup. (shv) + + HADOOP-1702. Reduce buffer copies when data is written to DFS. + DataNodes take 30% less CPU while writing data. (rangadi) + + HADOOP-3095. Speed up split generation in the FileInputSplit, + especially for non-HDFS file systems. Deprecates + InputFormat.validateInput. (tomwhite via omalley) + + HADOOP-3552. Add forrest documentation for Hadoop commands. + (Sharad Agarwal via cdouglas) + + BUG FIXES + + HADOOP-2905. 'fsck -move' triggers NPE in NameNode. + (Lohit Vjayarenu via rangadi) + + Increment ClientProtocol.versionID missed by HADOOP-2585. (shv) + + HADOOP-3254. Restructure internal namenode methods that process + heartbeats to use well-defined BlockCommand object(s) instead of + using the base java Object. (Tsz Wo (Nicholas), SZE via dhruba) + + HADOOP-3176. Change lease record when a open-for-write-file + gets renamed. (dhruba) + + HADOOP-3269. Fix a case when namenode fails to restart + while processing a lease record. ((Tsz Wo (Nicholas), SZE via dhruba) + + HADOOP-3282. Port issues in TestCheckpoint resolved. (shv) + + HADOOP-3268. file:// URLs issue in TestUrlStreamHandler under Windows. + (taton) + + HADOOP-3127. Deleting files in trash should really remove them. + (Brice Arnould via omalley) + + HADOOP-3300. Fix locking of explicit locks in NetworkTopology. + (tomwhite via omalley) + + HADOOP-3270. Constant DatanodeCommands are stored in static final + immutable variables for better code clarity. + (Tsz Wo (Nicholas), SZE via dhruba) + + HADOOP-2793. Fix broken links for worst performing shuffle tasks in + the job history page. (Amareshwari Sriramadasu via ddas) + + HADOOP-3313. Avoid unnecessary calls to System.currentTimeMillis + in RPC::Invoker. (cdouglas) + + HADOOP-3318. Recognize "Darwin" as an alias for "Mac OS X" to + support Soylatte. (Sam Pullara via omalley) + + HADOOP-3301. Fix misleading error message when S3 URI hostname + contains an underscore. (tomwhite via omalley) + + HADOOP-3338. Fix Eclipse plugin to compile after HADOOP-544 was + committed. Updated all references to use the new JobID representation. + (taton via nigel) + + HADOOP-3337. Loading FSEditLog was broken by HADOOP-3283 since it + changed Writable serialization of DatanodeInfo. This patch handles it. + (Tsz Wo (Nicholas), SZE via rangadi) + + HADOOP-3101. Prevent JobClient from throwing an exception when printing + usage. (Edward J. Yoon via cdouglas) + + HADOOP-3119. Update javadoc for Text::getBytes to better describe its + behavior. (Tim Nelson via cdouglas) + + HADOOP-2294. Fix documentation in libhdfs to refer to the correct free + function. (Craig Macdonald via cdouglas) + + HADOOP-3335. Prevent the libhdfs build from deleting the wrong + files on make clean. (cutting via omalley) + + HADOOP-2930. Make {start,stop}-balancer.sh work even if hadoop-daemon.sh + is not in the PATH. (Spiros Papadimitriou via hairong) + + HADOOP-3085. Catch Exception in metrics util classes to ensure that + misconfigured metrics don't prevent others from updating. (cdouglas) + + HADOOP-3299. CompositeInputFormat should configure the sub-input + formats. (cdouglas via omalley) + + HADOOP-3309. Lower io.sort.mb and fs.inmemory.size.mb for MiniMRDFSSort + unit test so it passes on Windows. (lohit vijayarenu via cdouglas) + + HADOOP-3348. TestUrlStreamHandler should set URLStreamFactory after + DataNodes are initialized. (Lohit Vijayarenu via rangadi) + + HADOOP-3371. Ignore InstanceAlreadyExistsException from + MBeanUtil::registerMBean. (lohit vijayarenu via cdouglas) + + HADOOP-3349. A file rename was incorrectly changing the name inside a + lease record. (Tsz Wo (Nicholas), SZE via dhruba) + + HADOOP-3365. Removes an unnecessary copy of the key from SegmentDescriptor + to MergeQueue. (Devaraj Das) + + HADOOP-3388. Fix for TestDatanodeBlockScanner to handle blocks with + generation stamps in them. (dhruba) + + HADOOP-3203. Fixes TaskTracker::localizeJob to pass correct file sizes + for the jarfile and the jobfile. (Amareshwari Sriramadasu via ddas) + + HADOOP-3391. Fix a findbugs warning introduced by HADOOP-3248 (rangadi) + + HADOOP-3393. Fix datanode shutdown to call DataBlockScanner::shutdown and + close its log, even if the scanner thread is not running. (lohit vijayarenu + via cdouglas) + + HADOOP-3399. A debug message was logged at info level. (rangadi) + + HADOOP-3396. TestDatanodeBlockScanner occationally fails. + (Lohit Vijayarenu via rangadi) + + HADOOP-3339. Some of the failures on 3rd datanode in DFS write pipelie + are not detected properly. This could lead to hard failure of client's + write operation. (rangadi) + + HADOOP-3409. Namenode should save the root inode into fsimage. (hairong) + + HADOOP-3296. Fix task cache to work for more than two levels in the cache + hierarchy. This also adds a new counter to track cache hits at levels + greater than two. (Amar Kamat via cdouglas) + + HADOOP-3375. Lease paths were sometimes not removed from + LeaseManager.sortedLeasesByPath. (Tsz Wo (Nicholas), SZE via dhruba) + + HADOOP-3424. Values returned by getPartition should be checked to + make sure they are in the range 0 to #reduces - 1 (cdouglas via + omalley) + + HADOOP-3408. Change FSNamesystem to send its metrics as integers to + accommodate collectors that don't support long values. (lohit vijayarenu + via cdouglas) + + HADOOP-3403. Fixes a problem in the JobTracker to do with handling of lost + tasktrackers. (Arun Murthy via ddas) + + HADOOP-1318. Completed maps are not failed if the number of reducers are + zero. (Amareshwari Sriramadasu via ddas). + + HADOOP-3351. Fixes the history viewer tool to not do huge StringBuffer + allocations. (Amareshwari Sriramadasu via ddas) + + HADOOP-3419. Fixes TestFsck to wait for updates to happen before + checking results to make the test more reliable. (Lohit Vijaya + Renu via omalley) + + HADOOP-3259. Makes failure to read system properties due to a + security manager non-fatal. (Edward Yoon via omalley) + + HADOOP-3451. Update libhdfs to use FileSystem::getFileBlockLocations + instead of removed getFileCacheHints. (lohit vijayarenu via cdouglas) + + HADOOP-3401. Update FileBench to set the new + "mapred.work.output.dir" property to work post-3041. (cdouglas via omalley) + + HADOOP-2669. DFSClient locks pendingCreates appropriately. (dhruba) + + HADOOP-3410. Fix KFS implemenation to return correct file + modification time. (Sriram Rao via cutting) + + HADOOP-3340. Fix DFS metrics for BlocksReplicated, HeartbeatsNum, and + BlockReportsAverageTime. (lohit vijayarenu via cdouglas) + + HADOOP-3435. Remove the assuption in the scripts that bash is at + /bin/bash and fix the test patch to require bash instead of sh. + (Brice Arnould via omalley) + + HADOOP-3471. Fix spurious errors from TestIndexedSort and add additional + logging to let failures be reproducible. (cdouglas) + + HADOOP-3443. Avoid copying map output across partitions when renaming a + single spill. (omalley via cdouglas) + + HADOOP-3454. Fix Text::find to search only valid byte ranges. (Chad Whipkey + via cdouglas) + + HADOOP-3417. Removes the static configuration variable, + commandLineConfig from JobClient. Moves the cli parsing from + JobShell to GenericOptionsParser. Thus removes the class + org.apache.hadoop.mapred.JobShell. (Amareshwari Sriramadasu via + ddas) + + HADOOP-2132. Only RUNNING/PREP jobs can be killed. (Jothi Padmanabhan + via ddas) + + HADOOP-3476. Code cleanup in fuse-dfs. + (Peter Wyckoff via dhruba) + + HADOOP-2427. Ensure that the cwd of completed tasks is cleaned-up + correctly on task-completion. (Amareshwari Sri Ramadasu via acmurthy) + + HADOOP-2565. Remove DFSPath cache of FileStatus. + (Tsz Wo (Nicholas), SZE via hairong) + + HADOOP-3326. Cleanup the local-fs and in-memory merge in the ReduceTask by + spawing only one thread each for the on-disk and in-memory merge. + (Sharad Agarwal via acmurthy) + + HADOOP-3493. Fix TestStreamingFailure to use FileUtil.fullyDelete to + ensure correct cleanup. (Lohit Vijayarenu via acmurthy) + + HADOOP-3455. Fix NPE in ipc.Client in case of connection failure and + improve its synchronization. (hairong) + + HADOOP-3240. Fix a testcase to not create files in the current directory. + Instead the file is created in the test directory (Mahadev Konar via ddas) + + HADOOP-3496. Fix failure in TestHarFileSystem.testArchives due to change + in HADOOP-3095. (tomwhite) + + HADOOP-3135. Get the system directory from the JobTracker instead of from + the conf. (Subramaniam Krishnan via ddas) + + HADOOP-3503. Fix a race condition when client and namenode start + simultaneous recovery of the same block. (dhruba & Tsz Wo + (Nicholas), SZE) + + HADOOP-3440. Fixes DistributedCache to not create symlinks for paths which + don't have fragments even when createSymLink is true. + (Abhijit Bagri via ddas) + + HADOOP-3463. Hadoop-daemons script should cd to $HADOOP_HOME. (omalley) + + HADOOP-3489. Fix NPE in SafeModeMonitor. (Lohit Vijayarenu via shv) + + HADOOP-3509. Fix NPE in FSNamesystem.close. (Tsz Wo (Nicholas), SZE via + shv) + + HADOOP-3491. Name-node shutdown causes InterruptedException in + ResolutionMonitor. (Lohit Vijayarenu via shv) + + HADOOP-3511. Fixes namenode image to not set the root's quota to an + invalid value when the quota was not saved in the image. (hairong) + + HADOOP-3516. Ensure the JobClient in HadoopArchives is initialized + with a configuration. (Subramaniam Krishnan via omalley) + + HADOOP-3513. Improve NNThroughputBenchmark log messages. (shv) + + HADOOP-3519. Fix NPE in DFS FileSystem rename. (hairong via tomwhite) + + HADOOP-3528. Metrics FilesCreated and files_deleted metrics + do not match. (Lohit via Mahadev) + + HADOOP-3418. When a directory is deleted, any leases that point to files + in the subdirectory are removed. ((Tsz Wo (Nicholas), SZE via dhruba) + + HADOOP-3542. Diables the creation of _logs directory for the archives + directory. (Mahadev Konar via ddas) + + HADOOP-3544. Fixes a documentation issue for hadoop archives. + (Mahadev Konar via ddas) + + HADOOP-3517. Fixes a problem in the reducer due to which the last InMemory + merge may be missed. (Arun Murthy via ddas) + + HADOOP-3548. Fixes build.xml to copy all *.jar files to the dist. + (Owen O'Malley via ddas) + + HADOOP-3363. Fix unformatted storage detection in FSImage. (shv) + + HADOOP-3560. Fixes a problem to do with split creation in archives. + (Mahadev Konar via ddas) + + HADOOP-3545. Fixes a overflow problem in archives. + (Mahadev Konar via ddas) + + HADOOP-3561. Prevent the trash from deleting its parent directories. + (cdouglas) + + HADOOP-3575. Fix the clover ant target after package refactoring. + (Nigel Daley via cdouglas) + + HADOOP-3539. Fix the tool path in the bin/hadoop script under + cygwin. (Tsz Wo (Nicholas), Sze via omalley) + + HADOOP-3520. TestDFSUpgradeFromImage triggers a race condition in the + Upgrade Manager. Fixed. (dhruba) + + HADOOP-3586. Provide deprecated, backwards compatibile semantics for the + combiner to be run once and only once on each record. (cdouglas) + + HADOOP-3533. Add deprecated methods to provide API compatibility + between 0.18 and 0.17. Remove the deprecated methods in trunk. (omalley) + + HADOOP-3580. Fixes a problem to do with specifying a har as an input to + a job. (Mahadev Konar via ddas) + + HADOOP-3333. Don't assign a task to a tasktracker that it failed to + execute earlier (used to happen in the case of lost tasktrackers where + the tasktracker would reinitialize and bind to a different port). + (Jothi Padmanabhan and Arun Murthy via ddas) + + HADOOP-3534. Log IOExceptions that happen in closing the name + system when the NameNode shuts down. (Tsz Wo (Nicholas) Sze via omalley) + + HADOOP-3546. TaskTracker re-initialization gets stuck in cleaning up. + (Amareshwari Sriramadasu via ddas) + + HADOOP-3576. Fix NullPointerException when renaming a directory + to its subdirectory. (Tse Wo (Nicholas), SZE via hairong) + + HADOOP-3320. Fix NullPointerException in NetworkTopology.getDistance(). + (hairong) + + HADOOP-3569. KFS input stream read() now correctly reads 1 byte + instead of 4. (Sriram Rao via omalley) + + HADOOP-3599. Fix JobConf::setCombineOnceOnly to modify the instance rather + than a parameter. (Owen O'Malley via cdouglas) + + HADOOP-3590. Null pointer exception in JobTracker when the task tracker is + not yet resolved. (Amar Ramesh Kamat via ddas) + + HADOOP-3603. Fix MapOutputCollector to spill when io.sort.spill.percent is + 1.0 and to detect spills when emitted records write no data. (cdouglas) + + HADOOP-3615. Set DatanodeProtocol.versionID to the correct value. + (Tsz Wo (Nicholas), SZE via cdouglas) + + HADOOP-3559. Fix the libhdfs test script and config to work with the + current semantics. (lohit vijayarenu via cdouglas) + + HADOOP-3480. Need to update Eclipse template to reflect current trunk. + (Brice Arnould via tomwhite) + + HADOOP-3588. Fixed usability issues with archives. (mahadev) + + HADOOP-3635. Uncaught exception in DataBlockScanner. + (Tsz Wo (Nicholas), SZE via hairong) + + HADOOP-3639. Exception when closing DFSClient while multiple files are + open. (Benjamin Gufler via hairong) + + HADOOP-3572. SetQuotas usage interface has some minor bugs. (hairong) + + HADOOP-3649. Fix bug in removing blocks from the corrupted block map. + (Lohit Vijayarenu via shv) + + HADOOP-3604. Work around a JVM synchronization problem observed while + retrieving the address of direct buffers from compression code by obtaining + a lock during this call. (Arun C Murthy via cdouglas) + + HADOOP-3683. Fix dfs metrics to count file listings rather than files + listed. (lohit vijayarenu via cdouglas) + + HADOOP-3597. Fix SortValidator to use filesystems other than the default as + input. Validation job still runs on default fs. + (Jothi Padmanabhan via cdouglas) + + HADOOP-3693. Fix archives, distcp and native library documentation to + conform to style guidelines. (Amareshwari Sriramadasu via cdouglas) + + HADOOP-3653. Fix test-patch target to properly account for Eclipse + classpath jars. (Brice Arnould via nigel) + + HADOOP-3692. Fix documentation for Cluster setup and Quick start guides. + (Amareshwari Sriramadasu via ddas) + + HADOOP-3691. Fix streaming and tutorial docs. (Jothi Padmanabhan via ddas) + + HADOOP-3630. Fix NullPointerException in CompositeRecordReader from empty + sources (cdouglas) + + HADOOP-3706. Fix a ClassLoader issue in the mapred.join Parser that + prevents it from loading user-specified InputFormats. + (Jingkei Ly via cdouglas) + + HADOOP-3718. Fix KFSOutputStream::write(int) to output a byte instead of + an int, per the OutputStream contract. (Sriram Rao via cdouglas) + + HADOOP-3647. Add debug logs to help track down a very occassional, + hard-to-reproduce, bug in shuffle/merge on the reducer. (acmurthy) + + HADOOP-3716. Prevent listStatus in KosmosFileSystem from returning + null for valid, empty directories. (Sriram Rao via cdouglas) + + HADOOP-3752. Fix audit logging to record rename events. (cdouglas) + + HADOOP-3737. Fix CompressedWritable to call Deflater::end to release + compressor memory. (Grant Glouser via cdouglas) + + HADOOP-3670. Fixes JobTracker to clear out split bytes when no longer + required. (Amareshwari Sriramadasu via ddas) + + HADOOP-3755. Update gridmix to work with HOD 0.4 (Runping Qi via cdouglas) + + HADOOP-3743. Fix -libjars, -files, -archives options to work even if + user code does not implement tools. (Amareshwari Sriramadasu via mahadev) + + HADOOP-3774. Fix typos in shell output. (Tsz Wo (Nicholas), SZE via + cdouglas) + + HADOOP-3762. Fixed FileSystem cache to work with the default port. (cutting + via omalley) + + HADOOP-3798. Fix tests compilation. (Mukund Madhugiri via omalley) + + HADOOP-3794. Return modification time instead of zero for KosmosFileSystem. + (Sriram Rao via cdouglas) + + HADOOP-3806. Remove debug statement to stdout from QuickSort. (cdouglas) + + HADOOP-3776. Fix NPE at NameNode when datanode reports a block after it is + deleted at NameNode. (rangadi) + + HADOOP-3537. Disallow adding a datanode to a network topology when its + network location is not resolved. (hairong) + + HADOOP-3571. Fix bug in block removal used in lease recovery. (shv) + + HADOOP-3645. MetricsTimeVaryingRate returns wrong value for + metric_avg_time. (Lohit Vijayarenu via hairong) + + HADOOP-3521. Reverted the missing cast to float for sending Counters' values + to Hadoop metrics which was removed by HADOOP-544. (acmurthy) + + HADOOP-3820. Fixes two problems in the gridmix-env - a syntax error, and a + wrong definition of USE_REAL_DATASET by default. (Arun Murthy via ddas) + + HADOOP-3724. Fixes two problems related to storing and recovering lease + in the fsimage. (dhruba) + + HADOOP-3827. Fixed compression of empty map-outputs. (acmurthy) + + HADOOP-3865. Remove reference to FSNamesystem from metrics preventing + garbage collection. (Lohit Vijayarenu via cdouglas) + + HADOOP-3884. Fix so that Eclipse plugin builds against recent + Eclipse releases. (cutting) + + HADOOP-3837. Streaming jobs report progress status. (dhruba) + + HADOOP-3897. Fix a NPE in secondary namenode. (Lohit Vijayarenu via + cdouglas) + + HADOOP-3901. Fix bin/hadoop to correctly set classpath under cygwin. + (Tsz Wo (Nicholas) Sze via omalley) + + HADOOP-3947. Fix a problem in tasktracker reinitialization. + (Amareshwari Sriramadasu via ddas) + +Release 0.17.3 - Unreleased + + IMPROVEMENTS + + HADOOP-4164. Chinese translation of the documentation. (Xuebing Yan via + omalley) + + BUG FIXES + + HADOOP-4277. Checksum verification was mistakenly disabled for + LocalFileSystem. (Raghu Angadi) + + HADOOP-4271. Checksum input stream can sometimes return invalid + data to the user. (Ning Li via rangadi) + + HADOOP-4318. DistCp should use absolute paths for cleanup. (szetszwo) + + HADOOP-4326. ChecksumFileSystem does not override create(...) correctly. + (szetszwo) + +Release 0.17.2 - 2008-08-11 + + BUG FIXES + + HADOOP-3678. Avoid spurious exceptions logged at DataNode when clients + read from DFS. (rangadi) + + HADOOP-3707. NameNode keeps a count of number of blocks scheduled + to be written to a datanode and uses it to avoid allocating more + blocks than a datanode can hold. (rangadi) + + HADOOP-3760. Fix a bug with HDFS file close() mistakenly introduced + by HADOOP-3681. (Lohit Vijayarenu via rangadi) + + HADOOP-3681. DFSClient can get into an infinite loop while closing + a file if there are some errors. (Lohit Vijayarenu via rangadi) + + HADOOP-3002. Hold off block removal while in safe mode. (shv) + + HADOOP-3685. Unbalanced replication target. (hairong) + + HADOOP-3758. Shutdown datanode on version mismatch instead of retrying + continuously, preventing excessive logging at the namenode. + (lohit vijayarenu via cdouglas) + + HADOOP-3633. Correct exception handling in DataXceiveServer, and throttle + the number of xceiver threads in a data-node. (shv) + + HADOOP-3370. Ensure that the TaskTracker.runningJobs data-structure is + correctly cleaned-up on task completion. (Zheng Shao via acmurthy) + + HADOOP-3813. Fix task-output clean-up on HDFS to use the recursive + FileSystem.delete rather than the FileUtil.fullyDelete. (Amareshwari + Sri Ramadasu via acmurthy) + + HADOOP-3859. Allow the maximum number of xceivers in the data node to + be configurable. (Johan Oskarsson via omalley) + + HADOOP-3931. Fix corner case in the map-side sort that causes some values + to be counted as too large and cause pre-mature spills to disk. Some values + will also bypass the combiner incorrectly. (cdouglas via omalley) + +Release 0.17.1 - 2008-06-23 + + INCOMPATIBLE CHANGES + + HADOOP-3565. Fix the Java serialization, which is not enabled by + default, to clear the state of the serializer between objects. + (tomwhite via omalley) + + IMPROVEMENTS + + HADOOP-3522. Improve documentation on reduce pointing out that + input keys and values will be reused. (omalley) + + HADOOP-3487. Balancer uses thread pools for managing its threads; + therefore provides better resource management. (hairong) + + BUG FIXES + + HADOOP-2159 Namenode stuck in safemode. The counter blockSafe should + not be decremented for invalid blocks. (hairong) + + HADOOP-3472 MapFile.Reader getClosest() function returns incorrect results + when before is true (Todd Lipcon via Stack) + + HADOOP-3442. Limit recursion depth on the stack for QuickSort to prevent + StackOverflowErrors. To avoid O(n*n) cases, when partitioning depth exceeds + a multiple of log(n), change to HeapSort. (cdouglas) + + HADOOP-3477. Fix build to not package contrib/*/bin twice in + distributions. (Adam Heath via cutting) + + HADOOP-3475. Fix MapTask to correctly size the accounting allocation of + io.sort.mb. (cdouglas) + + HADOOP-3550. Fix the serialization data structures in MapTask where the + value lengths are incorrectly calculated. (cdouglas) + + HADOOP-3526. Fix contrib/data_join framework by cloning values retained + in the reduce. (Spyros Blanas via cdouglas) + + HADOOP-1979. Speed up fsck by adding a buffered stream. (Lohit + Vijaya Renu via omalley) + +Release 0.17.0 - 2008-05-18 + + INCOMPATIBLE CHANGES + + HADOOP-2786. Move hbase out of hadoop core + + HADOOP-2345. New HDFS transactions to support appending + to files. Disk layout version changed from -11 to -12. (dhruba) + + HADOOP-2192. Error messages from "dfs mv" command improved. + (Mahadev Konar via dhruba) + + HADOOP-1902. "dfs du" command without any arguments operates on the + current working directory. (Mahadev Konar via dhruba) + + HADOOP-2873. Fixed bad disk format introduced by HADOOP-2345. + Disk layout version changed from -12 to -13. See changelist 630992 + (dhruba) + + HADOOP-1985. This addresses rack-awareness for Map tasks and for + HDFS in a uniform way. (ddas) + + HADOOP-1986. Add support for a general serialization mechanism for + Map Reduce. (tomwhite) + + HADOOP-771. FileSystem.delete() takes an explicit parameter that + specifies whether a recursive delete is intended. + (Mahadev Konar via dhruba) + + HADOOP-2470. Remove getContentLength(String), open(String, long, long) + and isDir(String) from ClientProtocol. ClientProtocol version changed + from 26 to 27. (Tsz Wo (Nicholas), SZE via cdouglas) + + HADOOP-2822. Remove deprecated code for classes InputFormatBase and + PhasedFileSystem. (Amareshwari Sriramadasu via enis) + + HADOOP-2116. Changes the layout of the task execution directory. + (Amareshwari Sriramadasu via ddas) + + HADOOP-2828. The following deprecated methods in Configuration.java + have been removed + getObject(String name) + setObject(String name, Object value) + get(String name, Object defaultValue) + set(String name, Object value) + Iterator entries() + (Amareshwari Sriramadasu via ddas) + + HADOOP-2824. Removes one deprecated constructor from MiniMRCluster. + (Amareshwari Sriramadasu via ddas) + + HADOOP-2823. Removes deprecated methods getColumn(), getLine() from + org.apache.hadoop.record.compiler.generated.SimpleCharStream. + (Amareshwari Sriramadasu via ddas) + + HADOOP-3060. Removes one unused constructor argument from MiniMRCluster. + (Amareshwari Sriramadasu via ddas) + + HADOOP-2854. Remove deprecated o.a.h.ipc.Server::getUserInfo(). + (lohit vijayarenu via cdouglas) + + HADOOP-2563. Remove deprecated FileSystem::listPaths. + (lohit vijayarenu via cdouglas) + + HADOOP-2818. Remove deprecated methods in Counters. + (Amareshwari Sriramadasu via tomwhite) + + HADOOP-2831. Remove deprecated o.a.h.dfs.INode::getAbsoluteName() + (lohit vijayarenu via cdouglas) + + HADOOP-2839. Remove deprecated FileSystem::globPaths. + (lohit vijayarenu via cdouglas) + + HADOOP-2634. Deprecate ClientProtocol::exists. + (lohit vijayarenu via cdouglas) + + HADOOP-2410. Make EC2 cluster nodes more independent of each other. + Multiple concurrent EC2 clusters are now supported, and nodes may be + added to a cluster on the fly with new nodes starting in the same EC2 + availability zone as the cluster. Ganglia monitoring and large + instance sizes have also been added. (Chris K Wensel via tomwhite) + + HADOOP-2826. Deprecated FileSplit.getFile(), LineRecordReader.readLine(). + (Amareshwari Sriramadasu via ddas) + + HADOOP-3239. getFileInfo() returns null for non-existing files instead + of throwing FileNotFoundException. (Lohit Vijayarenu via shv) + + HADOOP-3266. Removed HOD changes from CHANGES.txt, as they are now inside + src/contrib/hod (Hemanth Yamijala via ddas) + + HADOOP-3280. Separate the configuration of the virtual memory size + (mapred.child.ulimit) from the jvm heap size, so that 64 bit + streaming applications are supported even when running with 32 bit + jvms. (acmurthy via omalley) + + NEW FEATURES + + HADOOP-1398. Add HBase in-memory block cache. (tomwhite) + + HADOOP-2178. Job History on DFS. (Amareshwari Sri Ramadasu via ddas) + + HADOOP-2063. A new parameter to dfs -get command to fetch a file + even if it is corrupted. (Tsz Wo (Nicholas), SZE via dhruba) + + HADOOP-2219. A new command "df -count" that counts the number of + files and directories. (Tsz Wo (Nicholas), SZE via dhruba) + + HADOOP-2906. Add an OutputFormat capable of using keys, values, and + config params to map records to different output files. + (Runping Qi via cdouglas) + + HADOOP-2346. Utilities to support timeout while writing to sockets. + DFSClient and DataNode sockets have 10min write timeout. (rangadi) + + HADOOP-2951. Add a contrib module that provides a utility to + build or update Lucene indexes using Map/Reduce. (Ning Li via cutting) + + HADOOP-1622. Allow multiple jar files for map reduce. + (Mahadev Konar via dhruba) + + HADOOP-2055. Allows users to set PathFilter on the FileInputFormat. + (Alejandro Abdelnur via ddas) + + HADOOP-2551. More environment variables like HADOOP_NAMENODE_OPTS + for better control of HADOOP_OPTS for each component. (rangadi) + + HADOOP-3001. Add job counters that measure the number of bytes + read and written to HDFS, S3, KFS, and local file systems. (omalley) + + HADOOP-3048. A new Interface and a default implementation to convert + and restore serializations of objects to/from strings. (enis) + + IMPROVEMENTS + + HADOOP-2655. Copy on write for data and metadata files in the + presence of snapshots. Needed for supporting appends to HDFS + files. (dhruba) + + HADOOP-1967. When a Path specifies the same scheme as the default + FileSystem but no authority, the default FileSystem's authority is + used. Also add warnings for old-format FileSystem names, accessor + methods for fs.default.name, and check for null authority in HDFS. + (cutting) + + HADOOP-2895. Let the profiling string be configurable. + (Martin Traverso via cdouglas) + + HADOOP-910. Enables Reduces to do merges for the on-disk map output files + in parallel with their copying. (Amar Kamat via ddas) + + HADOOP-730. Use rename rather than copy for local renames. (cdouglas) + + HADOOP-2810. Updated the Hadoop Core logo. (nigel) + + HADOOP-2057. Streaming should optionally treat a non-zero exit status + of a child process as a failed task. (Rick Cox via tomwhite) + + HADOOP-2765. Enables specifying ulimits for streaming/pipes tasks (ddas) + + HADOOP-2888. Make gridmix scripts more readily configurable and amenable + to automated execution. (Mukund Madhugiri via cdouglas) + + HADOOP-2908. A document that describes the DFS Shell command. + (Mahadev Konar via dhruba) + + HADOOP-2981. Update README.txt to reflect the upcoming use of + cryptography. (omalley) + + HADOOP-2804. Add support to publish CHANGES.txt as HTML when running + the Ant 'docs' target. (nigel) + + HADOOP-2559. Change DFS block placement to allocate the first replica + locally, the second off-rack, and the third intra-rack from the + second. (lohit vijayarenu via cdouglas) + + HADOOP-2939. Make the automated patch testing process an executable + Ant target, test-patch. (nigel) + + HADOOP-2239. Add HsftpFileSystem to permit transferring files over ssl. + (cdouglas) + + HADOOP-2886. Track individual RPC metrics. + (girish vaitheeswaran via dhruba) + + HADOOP-2373. Improvement in safe-mode reporting. (shv) + + HADOOP-3091. Modify FsShell command -put to accept multiple sources. + (Lohit Vijaya Renu via cdouglas) + + HADOOP-3092. Show counter values from job -status command. + (Tom White via ddas) + + HADOOP-1228. Ant task to generate Eclipse project files. (tomwhite) + + HADOOP-3093. Adds Configuration.getStrings(name, default-value) and + the corresponding setStrings. (Amareshwari Sriramadasu via ddas) + + HADOOP-3106. Adds documentation in forrest for debugging. + (Amareshwari Sriramadasu via ddas) + + HADOOP-3099. Add an option to distcp to preserve user, group, and + permission information. (Tsz Wo (Nicholas), SZE via cdouglas) + + HADOOP-2841. Unwrap AccessControlException and FileNotFoundException + from RemoteException for DFSClient. (shv) + + HADOOP-3152. Make index interval configuable when using + MapFileOutputFormat for map-reduce job. (Rong-En Fan via cutting) + + HADOOP-3143. Decrease number of slaves from 4 to 3 in TestMiniMRDFSSort, + as Hudson generates false negatives under the current load. + (Nigel Daley via cdouglas) + + HADOOP-3174. Illustrative example for MultipleFileInputFormat. (Enis + Soztutar via acmurthy) + + HADOOP-2993. Clarify the usage of JAVA_HOME in the Quick Start guide. + (acmurthy via nigel) + + HADOOP-3124. Make DataNode socket write timeout configurable. (rangadi) + + OPTIMIZATIONS + + HADOOP-2790. Fixed inefficient method hasSpeculativeTask by removing + repetitive calls to get the current time and late checking to see if + we want speculation on at all. (omalley) + + HADOOP-2758. Reduce buffer copies in DataNode when data is read from + HDFS, without negatively affecting read throughput. (rangadi) + + HADOOP-2399. Input key and value to combiner and reducer is reused. + (Owen O'Malley via ddas). + + HADOOP-2423. Code optimization in FSNamesystem.mkdirs. + (Tsz Wo (Nicholas), SZE via dhruba) + + HADOOP-2606. ReplicationMonitor selects data-nodes to replicate directly + from needed replication blocks instead of looking up for the blocks for + each live data-node. (shv) + + HADOOP-2148. Eliminate redundant data-node blockMap lookups. (shv) + + HADOOP-2027. Return the number of bytes in each block in a file + via a single rpc to the namenode to speed up job planning. + (Lohit Vijaya Renu via omalley) + + HADOOP-2902. Replace uses of "fs.default.name" with calls to the + accessor methods added in HADOOP-1967. (cutting) + + HADOOP-2119. Optimize scheduling of jobs with large numbers of + tasks by replacing static arrays with lists of runnable tasks. + (Amar Kamat via omalley) + + HADOOP-2919. Reduce the number of memory copies done during the + map output sorting. Also adds two config variables: + io.sort.spill.percent - the percentages of io.sort.mb that should + cause a spill (default 80%) + io.sort.record.percent - the percent of io.sort.mb that should + hold key/value indexes (default 5%) + (cdouglas via omalley) + + HADOOP-3140. Doesn't add a task in the commit queue if the task hadn't + generated any output. (Amar Kamat via ddas) + + HADOOP-3168. Reduce the amount of logging in streaming to an + exponentially increasing number of records (up to 10,000 + records/log). (Zheng Shao via omalley) + + BUG FIXES + + HADOOP-2195. '-mkdir' behaviour is now closer to Linux shell in case of + errors. (Mahadev Konar via rangadi) + + HADOOP-2190. bring behaviour '-ls' and '-du' closer to Linux shell + commands in case of errors. (Mahadev Konar via rangadi) + + HADOOP-2193. 'fs -rm' and 'fs -rmr' show error message when the target + file does not exist. (Mahadev Konar via rangadi) + + HADOOP-2738 Text is not subclassable because set(Text) and compareTo(Object) + access the other instance's private members directly. (jimk) + + HADOOP-2779. Remove the references to HBase in the build.xml. (omalley) + + HADOOP-2194. dfs cat on a non-existent file throws FileNotFoundException. + (Mahadev Konar via dhruba) + + HADOOP-2767. Fix for NetworkTopology erroneously skipping the last leaf + node on a rack. (Hairong Kuang and Mark Butler via dhruba) + + HADOOP-1593. FsShell works with paths in non-default FileSystem. + (Mahadev Konar via dhruba) + + HADOOP-2191. du and dus command on non-existent directory gives + appropriate error message. (Mahadev Konar via dhruba) + + HADOOP-2832. Remove tabs from code of DFSClient for better + indentation. (dhruba) + + HADOOP-2844. distcp closes file handles for sequence files. + (Tsz Wo (Nicholas), SZE via dhruba) + + HADOOP-2727. Fix links in Web UI of the hadoop daemons and some docs + (Amareshwari Sri Ramadasu via ddas) + + HADOOP-2871. Fixes a problem to do with file: URI in the JobHistory init. + (Amareshwari Sri Ramadasu via ddas) + + HADOOP-2800. Deprecate SetFile.Writer constructor not the whole class. + (Johan Oskarsson via tomwhite) + + HADOOP-2891. DFSClient.close() closes all open files. (dhruba) + + HADOOP-2845. Fix dfsadmin disk utilization report on Solaris. + (Martin Traverso via tomwhite) + + HADOOP-2912. MiniDFSCluster restart should wait for namenode to exit + safemode. This was causing TestFsck to fail. (Mahadev Konar via dhruba) + + HADOOP-2820. The following classes in streaming are removed : + StreamLineRecordReader StreamOutputFormat StreamSequenceRecordReader. + (Amareshwari Sri Ramadasu via ddas) + + HADOOP-2819. The following methods in JobConf are removed: + getInputKeyClass() setInputKeyClass getInputValueClass() + setInputValueClass(Class theClass) setSpeculativeExecution + getSpeculativeExecution() (Amareshwari Sri Ramadasu via ddas) + + HADOOP-2817. Removes deprecated mapred.tasktracker.tasks.maximum and + ClusterStatus.getMaxTasks(). (Amareshwari Sri Ramadasu via ddas) + + HADOOP-2821. Removes deprecated ShellUtil and ToolBase classes from + the util package. (Amareshwari Sri Ramadasu via ddas) + + HADOOP-2934. The namenode was encountreing a NPE while loading + leases from the fsimage. Fixed. (dhruba) + + HADOOP-2938. Some fs commands did not glob paths. + (Tsz Wo (Nicholas), SZE via rangadi) + + HADOOP-2943. Compression of intermediate map output causes failures + in the merge. (cdouglas) + + HADOOP-2870. DataNode and NameNode closes all connections while + shutting down. (Hairong Kuang via dhruba) + + HADOOP-2973. Fix TestLocalDFS for Windows platform. + (Tsz Wo (Nicholas), SZE via dhruba) + + HADOOP-2971. select multiple times if it returns early in + SocketIOWithTimeout. (rangadi) + + HADOOP-2955. Fix TestCrcCorruption test failures caused by HADOOP-2758 + (rangadi) + + HADOOP-2657. A flush call on the DFSOutputStream flushes the last + partial CRC chunk too. (dhruba) + + HADOOP-2974. IPC unit tests used "0.0.0.0" to connect to server, which + is not always supported. (rangadi) + + HADOOP-2996. Fixes uses of StringBuffer in StreamUtils class. + (Dave Brosius via ddas) + + HADOOP-2995. Fixes StreamBaseRecordReader's getProgress to return a + floating point number. (Dave Brosius via ddas) + + HADOOP-2972. Fix for a NPE in FSDataset.invalidate. + (Mahadev Konar via dhruba) + + HADOOP-2994. Code cleanup for DFSClient: remove redundant + conversions from string to string. (Dave Brosius via dhruba) + + HADOOP-3009. TestFileCreation sometimes fails because restarting + minidfscluster sometimes creates datanodes with ports that are + different from their original instance. (dhruba) + + HADOOP-2992. Distributed Upgrade framework works correctly with + more than one upgrade object. (Konstantin Shvachko via dhruba) + + HADOOP-2679. Fix a typo in libhdfs. (Jason via dhruba) + + HADOOP-2976. When a lease expires, the Namenode ensures that + blocks of the file are adequately replicated. (dhruba) + + HADOOP-2901. Fixes the creation of info servers in the JobClient + and JobTracker. Removes the creation from JobClient and removes + additional info server from the JobTracker. Also adds the command + line utility to view the history files (HADOOP-2896), and fixes + bugs in JSPs to do with analysis - HADOOP-2742, HADOOP-2792. + (Amareshwari Sri Ramadasu via ddas) + + HADOOP-2890. If different datanodes report the same block but + with different sizes to the namenode, the namenode picks the + replica(s) with the largest size as the only valid replica(s). (dhruba) + + HADOOP-2825. Deprecated MapOutputLocation.getFile() is removed. + (Amareshwari Sri Ramadasu via ddas) + + HADOOP-2806. Fixes a streaming document. + (Amareshwari Sriramadasu via ddas) + + HADOOP-3008. SocketIOWithTimeout throws InterruptedIOException if the + thread is interrupted while it is waiting. (rangadi) + + HADOOP-3006. Fix wrong packet size reported by DataNode when a block + is being replicated. (rangadi) + + HADOOP-3029. Datanode prints log message "firstbadlink" only if + it detects a bad connection to another datanode in the pipeline. (dhruba) + + HADOOP-3030. Release reserved space for file in InMemoryFileSystem if + checksum reservation fails. (Devaraj Das via cdouglas) + + HADOOP-3036. Fix findbugs warnings in UpgradeUtilities. (Konstantin + Shvachko via cdouglas) + + HADOOP-3025. ChecksumFileSystem supports the delete method with + the recursive flag. (Mahadev Konar via dhruba) + + HADOOP-3012. dfs -mv file to user home directory throws exception if + the user home directory does not exist. (Mahadev Konar via dhruba) + + HADOOP-3066. Should not require superuser privilege to query if hdfs is in + safe mode (jimk) + + HADOOP-3040. If the input line starts with the separator char, the key + is set as empty. (Amareshwari Sriramadasu via ddas) + + HADOOP-3080. Removes flush calls from JobHistory. + (Amareshwari Sriramadasu via ddas) + + HADOOP-3086. Adds the testcase missed during commit of hadoop-3040. + (Amareshwari Sriramadasu via ddas) + + HADOOP-3046. Fix the raw comparators for Text and BytesWritables + to use the provided length rather than recompute it. (omalley) + + HADOOP-3094. Fix BytesWritable.toString to avoid extending the sign bit + (Owen O'Malley via cdouglas) + + HADOOP-3067. DFSInputStream's position read does not close the sockets. + (rangadi) + + HADOOP-3073. close() on SocketInputStream or SocketOutputStream should + close the underlying channel. (rangadi) + + HADOOP-3087. Fixes a problem to do with refreshing of loadHistory.jsp. + (Amareshwari Sriramadasu via ddas) + + HADOOP-3065. Better logging message if the rack location of a datanode + cannot be determined. (Devaraj Das via dhruba) + + HADOOP-3064. Commas in a file path should not be treated as delimiters. + (Hairong Kuang via shv) + + HADOOP-2997. Adds test for non-writable serialier. Also fixes a problem + introduced by HADOOP-2399. (Tom White via ddas) + + HADOOP-3114. Fix TestDFSShell on Windows. (Lohit Vijaya Renu via cdouglas) + + HADOOP-3118. Fix Namenode NPE while loading fsimage after a cluster + upgrade from older disk format. (dhruba) + + HADOOP-3161. Fix FIleUtil.HardLink.getLinkCount on Mac OS. (nigel + via omalley) + + HADOOP-2927. Fix TestDU to acurately calculate the expected file size. + (shv via nigel) + + HADOOP-3123. Fix the native library build scripts to work on Solaris. + (tomwhite via omalley) + + HADOOP-3089. Streaming should accept stderr from task before + first key arrives. (Rick Cox via tomwhite) + + HADOOP-3146. A DFSOutputStream.flush method is renamed as + DFSOutputStream.fsync. (dhruba) + + HADOOP-3165. -put/-copyFromLocal did not treat input file "-" as stdin. + (Lohit Vijayarenu via rangadi) + + HADOOP-3041. Deprecate JobConf.setOutputPath and JobConf.getOutputPath. + Deprecate OutputFormatBase. Add FileOutputFormat. Existing output formats + extending OutputFormatBase, now extend FileOutputFormat. Add the following + APIs in FileOutputFormat: setOutputPath, getOutputPath, getWorkOutputPath. + (Amareshwari Sriramadasu via nigel) + + HADOOP-3083. The fsimage does not store leases. This would have to be + reworked in the next release to support appends. (dhruba) + + HADOOP-3166. Fix an ArrayIndexOutOfBoundsException in the spill thread + and make exception handling more promiscuous to catch this condition. + (cdouglas) + + HADOOP-3050. DataNode sends one and only one block report after + it registers with the namenode. (Hairong Kuang) + + HADOOP-3044. NNBench sets the right configuration for the mapper. + (Hairong Kuang) + + HADOOP-3178. Fix GridMix scripts for small and medium jobs + to handle input paths differently. (Mukund Madhugiri via nigel) + + HADOOP-1911. Fix an infinite loop in DFSClient when all replicas of a + block are bad (cdouglas) + + HADOOP-3157. Fix path handling in DistributedCache and TestMiniMRLocalFS. + (Doug Cutting via rangadi) + + HADOOP-3018. Fix the eclipse plug-in contrib wrt removed deprecated + methods (taton) + + HADOOP-3183. Fix TestJobShell to use 'ls' instead of java.io.File::exists + since cygwin symlinks are unsupported. + (Mahadev konar via cdouglas) + + HADOOP-3175. Fix FsShell.CommandFormat to handle "-" in arguments. + (Edward J. Yoon via rangadi) + + HADOOP-3220. Safemode message corrected. (shv) + + HADOOP-3208. Fix WritableDeserializer to set the Configuration on + deserialized Writables. (Enis Soztutar via cdouglas) + + HADOOP-3224. 'dfs -du /dir' does not return correct size. + (Lohit Vjayarenu via rangadi) + + HADOOP-3223. Fix typo in help message for -chmod. (rangadi) + + HADOOP-1373. checkPath() should ignore case when it compares authoriy. + (Edward J. Yoon via rangadi) + + HADOOP-3204. Fixes a problem to do with ReduceTask's LocalFSMerger not + catching Throwable. (Amar Ramesh Kamat via ddas) + + HADOOP-3229. Report progress when collecting records from the mapper and + the combiner. (Doug Cutting via cdouglas) + + HADOOP-3225. Unwrapping methods of RemoteException should initialize + detailedMassage field. (Mahadev Konar, shv, cdouglas) + + HADOOP-3247. Fix gridmix scripts to use the correct globbing syntax and + change maxentToSameCluster to run the correct number of jobs. + (Runping Qi via cdouglas) + + HADOOP-3242. Fix the RecordReader of SequenceFileAsBinaryInputFormat to + correctly read from the start of the split and not the beginning of the + file. (cdouglas via acmurthy) + + HADOOP-3256. Encodes the job name used in the filename for history files. + (Arun Murthy via ddas) + + HADOOP-3162. Ensure that comma-separated input paths are treated correctly + as multiple input paths. (Amareshwari Sri Ramadasu via acmurthy) + + HADOOP-3263. Ensure that the job-history log file always follows the + pattern of hostname_timestamp_jobid_username_jobname even if username + and/or jobname are not specfied. This helps to avoid wrong assumptions + made about the job-history log filename in jobhistory.jsp. (acmurthy) + + HADOOP-3251. Fixes getFilesystemName in JobTracker and LocalJobRunner to + use FileSystem.getUri instead of FileSystem.getName. (Arun Murthy via ddas) + + HADOOP-3237. Fixes TestDFSShell.testErrOutPut on Windows platform. + (Mahadev Konar via ddas) + + HADOOP-3279. TaskTracker checks for SUCCEEDED task status in addition to + COMMIT_PENDING status when it fails maps due to lost map. + (Devaraj Das) + + HADOOP-3286. Prevent collisions in gridmix output dirs by increasing the + granularity of the timestamp. (Runping Qi via cdouglas) + + HADOOP-3285. Fix input split locality when the splits align to + fs blocks. (omalley) + + HADOOP-3372. Fix heap management in streaming tests. (Arun Murthy via + cdouglas) + + HADOOP-3031. Fix javac warnings in test classes. (cdouglas) + + HADOOP-3382. Fix memory leak when files are not cleanly closed (rangadi) + + HADOOP-3322. Fix to push MetricsRecord for rpc metrics. (Eric Yang via + mukund) + +Release 0.16.4 - 2008-05-05 + + BUG FIXES + + HADOOP-3138. DFS mkdirs() should not throw an exception if the directory + already exists. (rangadi via mukund) + + HADOOP-3294. Fix distcp to check the destination length and retry the copy + if it doesn't match the src length. (Tsz Wo (Nicholas), SZE via mukund) + + HADOOP-3186. Fix incorrect permission checkding for mv and renameTo + in HDFS. (Tsz Wo (Nicholas), SZE via mukund) + +Release 0.16.3 - 2008-04-16 + + BUG FIXES + + HADOOP-3010. Fix ConcurrentModificationException in ipc.Server.Responder. + (rangadi) + + HADOOP-3154. Catch all Throwables from the SpillThread in MapTask, rather + than IOExceptions only. (ddas via cdouglas) + + HADOOP-3159. Avoid file system cache being overwritten whenever + configuration is modified. (Tsz Wo (Nicholas), SZE via hairong) + + HADOOP-3139. Remove the consistency check for the FileSystem cache in + closeAll() that causes spurious warnings and a deadlock. + (Tsz Wo (Nicholas), SZE via cdouglas) + + HADOOP-3195. Fix TestFileSystem to be deterministic. + (Tsz Wo (Nicholas), SZE via cdouglas) + + HADOOP-3069. Primary name-node should not truncate image when transferring + it from the secondary. (shv) + + HADOOP-3182. Change permissions of the job-submission directory to 777 + from 733 to ensure sharing of HOD clusters works correctly. (Tsz Wo + (Nicholas), Sze and Amareshwari Sri Ramadasu via acmurthy) + +Release 0.16.2 - 2008-04-02 + + BUG FIXES + + HADOOP-3011. Prohibit distcp from overwriting directories on the + destination filesystem with files. (cdouglas) + + HADOOP-3033. The BlockReceiver thread in the datanode writes data to + the block file, changes file position (if needed) and flushes all by + itself. The PacketResponder thread does not flush block file. (dhruba) + + HADOOP-2978. Fixes the JobHistory log format for counters. + (Runping Qi via ddas) + + HADOOP-2985. Fixes LocalJobRunner to tolerate null job output path. + Also makes the _temporary a constant in MRConstants.java. + (Amareshwari Sriramadasu via ddas) + + HADOOP-3003. FileSystem cache key is updated after a + FileSystem object is created. (Tsz Wo (Nicholas), SZE via dhruba) + + HADOOP-3042. Updates the Javadoc in JobConf.getOutputPath to reflect + the actual temporary path. (Amareshwari Sriramadasu via ddas) + + HADOOP-3007. Tolerate mirror failures while DataNode is replicating + blocks as it used to before. (rangadi) + + HADOOP-2944. Fixes a "Run on Hadoop" wizard NPE when creating a + Location from the wizard. (taton) + + HADOOP-3049. Fixes a problem in MultiThreadedMapRunner to do with + catching RuntimeExceptions. (Alejandro Abdelnur via ddas) + + HADOOP-3039. Fixes a problem to do with exceptions in tasks not + killing jobs. (Amareshwari Sriramadasu via ddas) + + HADOOP-3027. Fixes a problem to do with adding a shutdown hook in + FileSystem. (Amareshwari Sriramadasu via ddas) + + HADOOP-3056. Fix distcp when the target is an empty directory by + making sure the directory is created first. (cdouglas and acmurthy + via omalley) + + HADOOP-3070. Protect the trash emptier thread from null pointer + exceptions. (Koji Noguchi via omalley) + + HADOOP-3084. Fix HftpFileSystem to work for zero-lenghth files. + (cdouglas) + + HADOOP-3107. Fix NPE when fsck invokes getListings. (dhruba) + + HADOOP-3104. Limit MultithreadedMapRunner to have a fixed length queue + between the RecordReader and the map threads. (Alejandro Abdelnur via + omalley) + + HADOOP-2833. Do not use "Dr. Who" as the default user in JobClient. + A valid user name is required. (Tsz Wo (Nicholas), SZE via rangadi) + + HADOOP-3128. Throw RemoteException in setPermissions and setOwner of + DistributedFileSystem. (shv via nigel) + +Release 0.16.1 - 2008-03-13 + + INCOMPATIBLE CHANGES + + HADOOP-2869. Deprecate SequenceFile.setCompressionType in favor of + SequenceFile.createWriter, SequenceFileOutputFormat.setCompressionType, + and JobConf.setMapOutputCompressionType. (Arun C Murthy via cdouglas) + Configuration changes to hadoop-default.xml: + deprecated io.seqfile.compression.type + + IMPROVEMENTS + + HADOOP-2371. User guide for file permissions in HDFS. + (Robert Chansler via rangadi) + + HADOOP-3098. Allow more characters in user and group names while + using -chown and -chgrp commands. (rangadi) + + BUG FIXES + + HADOOP-2789. Race condition in IPC Server Responder that could close + connections early. (Raghu Angadi) + + HADOOP-2785. minor. Fix a typo in Datanode block verification + (Raghu Angadi) + + HADOOP-2788. minor. Fix help message for chgrp shell command (Raghu Angadi). + + HADOOP-1188. fstime file is updated when a storage directory containing + namespace image becomes inaccessible. (shv) + + HADOOP-2787. An application can set a configuration variable named + dfs.umask to set the umask that is used by DFS. + (Tsz Wo (Nicholas), SZE via dhruba) + + HADOOP-2780. The default socket buffer size for DataNodes is 128K. + (dhruba) + + HADOOP-2716. Superuser privileges for the Balancer. + (Tsz Wo (Nicholas), SZE via shv) + + HADOOP-2754. Filter out .crc files from local file system listing. + (Hairong Kuang via shv) + + HADOOP-2733. Fix compiler warnings in test code. + (Tsz Wo (Nicholas), SZE via cdouglas) + + HADOOP-2725. Modify distcp to avoid leaving partially copied files at + the destination after encountering an error. (Tsz Wo (Nicholas), SZE + via cdouglas) + + HADOOP-2391. Cleanup job output directory before declaring a job as + SUCCESSFUL. (Amareshwari Sri Ramadasu via ddas) + + HADOOP-2808. Minor fix to FileUtil::copy to mind the overwrite + formal. (cdouglas) + + HADOOP-2683. Moving UGI out of the RPC Server. + (Tsz Wo (Nicholas), SZE via shv) + + HADOOP-2814. Fix for NPE in datanode in unit test TestDataTransferProtocol. + (Raghu Angadi via dhruba) + + HADOOP-2811. Dump of counters in job history does not add comma between + groups. (runping via omalley) + + HADOOP-2735. Enables setting TMPDIR for tasks. + (Amareshwari Sri Ramadasu via ddas) + + HADOOP-2843. Fix protections on map-side join classes to enable derivation. + (cdouglas via omalley) + + HADOOP-2840. Fix gridmix scripts to correctly invoke the java sort through + the proper jar. (Mukund Madhugiri via cdouglas) + + HADOOP-2769. TestNNThroughputBnechmark should not use a fixed port for + the namenode http port. (omalley) + + HADOOP-2852. Update gridmix benchmark to avoid an artifically long tail. + (cdouglas) + + HADOOP-2894. Fix a problem to do with tasktrackers failing to connect to + JobTracker upon reinitialization. (Owen O'Malley via ddas). + + HADOOP-2903. Fix exception generated by Metrics while using pushMetric(). + (girish vaitheeswaran via dhruba) + + HADOOP-2904. Fix to RPC metrics to log the correct host name. + (girish vaitheeswaran via dhruba) + + HADOOP-2918. Improve error logging so that dfs writes failure with + "No lease on file" can be diagnosed. (dhruba) + + HADOOP-2923. Add SequenceFileAsBinaryInputFormat, which was + missed in the commit for HADOOP-2603. (cdouglas via omalley) + + HADOOP-2931. IOException thrown by DFSOutputStream had wrong stack + trace in some cases. (Michael Bieniosek via rangadi) + + HADOOP-2883. Write failures and data corruptions on HDFS files. + The write timeout is back to what it was on 0.15 release. Also, the + datnodes flushes the block file buffered output stream before + sending a positive ack for the packet back to the client. (dhruba) + + HADOOP-2756. NPE in DFSClient while closing DFSOutputStreams + under load. (rangadi) + + HADOOP-2958. Fixed FileBench which broke due to HADOOP-2391 which performs + a check for existence of the output directory and a trivial bug in + GenericMRLoadGenerator where min/max word lenghts were identical since + they were looking at the same config variables (Chris Douglas via + acmurthy) + + HADOOP-2915. Fixed FileSystem.CACHE so that a username is included + in the cache key. (Tsz Wo (Nicholas), SZE via nigel) + + HADOOP-2813. TestDU unit test uses its own directory to run its + sequence of tests. (Mahadev Konar via dhruba) + +Release 0.16.0 - 2008-02-07 + + INCOMPATIBLE CHANGES + + HADOOP-1245. Use the mapred.tasktracker.tasks.maximum value + configured on each tasktracker when allocating tasks, instead of + the value configured on the jobtracker. InterTrackerProtocol + version changed from 5 to 6. (Michael Bieniosek via omalley) + + HADOOP-1843. Removed code from Configuration and JobConf deprecated by + HADOOP-785 and a minor fix to Configuration.toString. Specifically the + important change is that mapred-default.xml is no longer supported and + Configuration no longer supports the notion of default/final resources. + (acmurthy) + + HADOOP-1302. Remove deprecated abacus code from the contrib directory. + This also fixes a configuration bug in AggregateWordCount, so that the + job now works. (enis) + + HADOOP-2288. Enhance FileSystem API to support access control. + (Tsz Wo (Nicholas), SZE via dhruba) + + HADOOP-2184. RPC Support for user permissions and authentication. + (Raghu Angadi via dhruba) + + HADOOP-2185. RPC Server uses any available port if the specified + port is zero. Otherwise it uses the specified port. Also combines + the configuration attributes for the servers' bind address and + port from "x.x.x.x" and "y" to "x.x.x.x:y". + Deprecated configuration variables: + dfs.info.bindAddress + dfs.info.port + dfs.datanode.bindAddress + dfs.datanode.port + dfs.datanode.info.bindAdress + dfs.datanode.info.port + dfs.secondary.info.bindAddress + dfs.secondary.info.port + mapred.job.tracker.info.bindAddress + mapred.job.tracker.info.port + mapred.task.tracker.report.bindAddress + tasktracker.http.bindAddress + tasktracker.http.port + New configuration variables (post HADOOP-2404): + dfs.secondary.http.address + dfs.datanode.address + dfs.datanode.http.address + dfs.http.address + mapred.job.tracker.http.address + mapred.task.tracker.report.address + mapred.task.tracker.http.address + (Konstantin Shvachko via dhruba) + + HADOOP-2401. Only the current leaseholder can abandon a block for + a HDFS file. ClientProtocol version changed from 20 to 21. + (Tsz Wo (Nicholas), SZE via dhruba) + + HADOOP-2381. Support permission information in FileStatus. Client + Protocol version changed from 21 to 22. (Raghu Angadi via dhruba) + + HADOOP-2110. Block report processing creates fewer transient objects. + Datanode Protocol version changed from 10 to 11. + (Sanjay Radia via dhruba) + + HADOOP-2567. Add FileSystem#getHomeDirectory(), which returns the + user's home directory in a FileSystem as a fully-qualified path. + FileSystem#getWorkingDirectory() is also changed to return a + fully-qualified path, which can break applications that attempt + to, e.g., pass LocalFileSystem#getWorkingDir().toString() directly + to java.io methods that accept file names. (cutting) + + HADOOP-2514. Change trash feature to maintain a per-user trash + directory, named ".Trash" in the user's home directory. The + "fs.trash.root" parameter is no longer used. Full source paths + are also no longer reproduced within the trash. + + HADOOP-2012. Periodic data verification on Datanodes. + (Raghu Angadi via dhruba) + + HADOOP-1707. The DFSClient does not use a local disk file to cache + writes to a HDFS file. Changed Data Transfer Version from 7 to 8. + (dhruba) + + HADOOP-2652. Fix permission issues for HftpFileSystem. This is an + incompatible change since distcp may not be able to copy files + from cluster A (compiled with this patch) to cluster B (compiled + with previous versions). (Tsz Wo (Nicholas), SZE via dhruba) + + NEW FEATURES + + HADOOP-1857. Ability to run a script when a task fails to capture stack + traces. (Amareshwari Sri Ramadasu via ddas) + + HADOOP-2299. Defination of a login interface. A simple implementation for + Unix users and groups. (Hairong Kuang via dhruba) + + HADOOP-1652. A utility to balance data among datanodes in a HDFS cluster. + (Hairong Kuang via dhruba) + + HADOOP-2085. A library to support map-side joins of consistently + partitioned and sorted data sets. (Chris Douglas via omalley) + + HADOOP-2336. Shell commands to modify file permissions. (rangadi) + + HADOOP-1298. Implement file permissions for HDFS. + (Tsz Wo (Nicholas) & taton via cutting) + + HADOOP-2447. HDFS can be configured to limit the total number of + objects (inodes and blocks) in the file system. (dhruba) + + HADOOP-2487. Added an option to get statuses for all submitted/run jobs. + This information can be used to develop tools for analysing jobs. + (Amareshwari Sri Ramadasu via acmurthy) + + HADOOP-1873. Implement user permissions for Map/Reduce framework. + (Hairong Kuang via shv) + + HADOOP-2532. Add to MapFile a getClosest method that returns the key + that comes just before if the key is not present. (stack via tomwhite) + + HADOOP-1883. Add versioning to Record I/O. (Vivek Ratan via ddas) + + HADOOP-2603. Add SeqeunceFileAsBinaryInputFormat, which reads + sequence files as BytesWritable/BytesWritable regardless of the + key and value types used to write the file. (cdouglas via omalley) + + HADOOP-2367. Add ability to profile a subset of map/reduce tasks and fetch + the result to the local filesystem of the submitting application. Also + includes a general IntegerRanges extension to Configuration for setting + positive, ranged parameters. (Owen O'Malley via cdouglas) + + IMPROVEMENTS + + HADOOP-2045. Change committer list on website to a table, so that + folks can list their organization, timezone, etc. (cutting) + + HADOOP-2058. Facilitate creating new datanodes dynamically in + MiniDFSCluster. (Hairong Kuang via dhruba) + + HADOOP-1855. fsck verifies block placement policies and reports + violations. (Konstantin Shvachko via dhruba) + + HADOOP-1604. An system administrator can finalize namenode upgrades + without running the cluster. (Konstantin Shvachko via dhruba) + + HADOOP-1839. Link-ify the Pending/Running/Complete/Killed grid in + jobdetails.jsp to help quickly narrow down and see categorized TIPs' + details via jobtasks.jsp. (Amar Kamat via acmurthy) + + HADOOP-1210. Log counters in job history. (Owen O'Malley via ddas) + + HADOOP-1912. Datanode has two new commands COPY and REPLACE. These are + needed for supporting data rebalance. (Hairong Kuang via dhruba) + + HADOOP-2086. This patch adds the ability to add dependencies to a job + (run via JobControl) after construction. (Adrian Woodhead via ddas) + + HADOOP-1185. Support changing the logging level of a server without + restarting the server. (Tsz Wo (Nicholas), SZE via dhruba) + + HADOOP-2134. Remove developer-centric requirements from overview.html and + keep it end-user focussed, specifically sections related to subversion and + building Hadoop. (Jim Kellerman via acmurthy) + + HADOOP-1989. Support simulated DataNodes. This helps creating large virtual + clusters for testing purposes. (Sanjay Radia via dhruba) + + HADOOP-1274. Support different number of mappers and reducers per + TaskTracker to allow administrators to better configure and utilize + heterogenous clusters. + Configuration changes to hadoop-default.xml: + add mapred.tasktracker.map.tasks.maximum (default value of 2) + add mapred.tasktracker.reduce.tasks.maximum (default value of 2) + remove mapred.tasktracker.tasks.maximum (deprecated for 0.16.0) + (Amareshwari Sri Ramadasu via acmurthy) + + HADOOP-2104. Adds a description to the ant targets. This makes the + output of "ant -projecthelp" sensible. (Chris Douglas via ddas) + + HADOOP-2127. Added a pipes sort example to benchmark trivial pipes + application versus trivial java application. (omalley via acmurthy) + + HADOOP-2113. A new shell command "dfs -text" to view the contents of + a gziped or SequenceFile. (Chris Douglas via dhruba) + + HADOOP-2207. Add a "package" target for contrib modules that + permits each to determine what files are copied into release + builds. (stack via cutting) + + HADOOP-1984. Makes the backoff for failed fetches exponential. + Earlier, it was a random backoff from an interval. + (Amar Kamat via ddas) + + HADOOP-1327. Include website documentation for streaming. (Rob Weltman + via omalley) + + HADOOP-2000. Rewrite NNBench to measure namenode performance accurately. + It now uses the map-reduce framework for load generation. + (Mukund Madhugiri via dhruba) + + HADOOP-2248. Speeds up the framework w.r.t Counters. Also has API + updates to the Counters part. (Owen O'Malley via ddas) + + HADOOP-2326. The initial block report at Datanode startup time has + a random backoff period. (Sanjay Radia via dhruba) + + HADOOP-2432. HDFS includes the name of the file while throwing + "File does not exist" exception. (Jim Kellerman via dhruba) + + HADOOP-2457. Added a 'forrest.home' property to the 'docs' target in + build.xml. (acmurthy) + + HADOOP-2149. A new benchmark for three name-node operation: file create, + open, and block report, to evaluate the name-node performance + for optimizations or new features. (Konstantin Shvachko via shv) + + HADOOP-2466. Change FileInputFormat.computeSplitSize to a protected + non-static method to allow sub-classes to provide alternate + implementations. (Alejandro Abdelnur via acmurthy) + + HADOOP-2425. Change TextOutputFormat to handle Text specifically for better + performance. Make NullWritable implement Comparable. Make TextOutputFormat + treat NullWritable like null. (omalley) + + HADOOP-1719. Improves the utilization of shuffle copier threads. + (Amar Kamat via ddas) + + HADOOP-2390. Added documentation for user-controls for intermediate + map-outputs & final job-outputs and native-hadoop libraries. (acmurthy) + + HADOOP-1660. Add the cwd of the map/reduce task to the java.library.path + of the child-jvm to support loading of native libraries distributed via + the DistributedCache. (acmurthy) + + HADOOP-2285. Speeds up TextInputFormat. Also includes updates to the + Text API. (Owen O'Malley via cdouglas) + + HADOOP-2233. Adds a generic load generator for modeling MR jobs. (cdouglas) + + HADOOP-2369. Adds a set of scripts for simulating a mix of user map/reduce + workloads. (Runping Qi via cdouglas) + + HADOOP-2547. Removes use of a 'magic number' in build.xml. + (Hrishikesh via nigel) + + HADOOP-2268. Fix org.apache.hadoop.mapred.jobcontrol classes to use the + List/Map interfaces rather than concrete ArrayList/HashMap classes + internally. (Adrian Woodhead via acmurthy) + + HADOOP-2406. Add a benchmark for measuring read/write performance through + the InputFormat interface, particularly with compression. (cdouglas) + + HADOOP-2131. Allow finer-grained control over speculative-execution. Now + users can set it for maps and reduces independently. + Configuration changes to hadoop-default.xml: + deprecated mapred.speculative.execution + add mapred.map.tasks.speculative.execution + add mapred.reduce.tasks.speculative.execution + (Amareshwari Sri Ramadasu via acmurthy) + + HADOOP-1965. Interleave sort/spill in teh map-task along with calls to the + Mapper.map method. This is done by splitting the 'io.sort.mb' buffer into + two and using one half for collecting map-outputs and the other half for + sort/spill. (Amar Kamat via acmurthy) + + HADOOP-2464. Unit tests for chmod, chown, and chgrp using DFS. + (Raghu Angadi) + + HADOOP-1876. Persist statuses of completed jobs in HDFS so that the + JobClient can query and get information about decommissioned jobs and also + across JobTracker restarts. + Configuration changes to hadoop-default.xml: + add mapred.job.tracker.persist.jobstatus.active (default value of false) + add mapred.job.tracker.persist.jobstatus.hours (default value of 0) + add mapred.job.tracker.persist.jobstatus.dir (default value of + /jobtracker/jobsInfo) + (Alejandro Abdelnur via acmurthy) + + HADOOP-2077. Added version and build information to STARTUP_MSG for all + hadoop daemons to aid error-reporting, debugging etc. (acmurthy) + + HADOOP-2398. Additional instrumentation for NameNode and RPC server. + Add support for accessing instrumentation statistics via JMX. + (Sanjay radia via dhruba) + + HADOOP-2449. A return of the non-MR version of NNBench. + (Sanjay Radia via shv) + + HADOOP-1989. Remove 'datanodecluster' command from bin/hadoop. + (Sanjay Radia via shv) + + HADOOP-1742. Improve JavaDoc documentation for ClientProtocol, DFSClient, + and FSNamesystem. (Konstantin Shvachko) + + HADOOP-2298. Add Ant target for a binary-only distribution. + (Hrishikesh via nigel) + + HADOOP-2509. Add Ant target for Rat report (Apache license header + reports). (Hrishikesh via nigel) + + HADOOP-2469. WritableUtils.clone should take a Configuration + instead of a JobConf. (stack via omalley) + + HADOOP-2659. Introduce superuser permissions for admin operations. + (Tsz Wo (Nicholas), SZE via shv) + + HADOOP-2596. Added a SequenceFile.createWriter api which allows the user + to specify the blocksize, replication factor and the buffersize to be + used for the underlying HDFS file. (Alejandro Abdelnur via acmurthy) + + HADOOP-2431. Test HDFS File Permissions. (Hairong Kuang via shv) + + HADOOP-2232. Add an option to disable Nagle's algorithm in the IPC stack. + (Clint Morgan via cdouglas) + + HADOOP-2342. Created a micro-benchmark for measuring + local-file versus hdfs reads. (Owen O'Malley via nigel) + + HADOOP-2529. First version of HDFS User Guide. (Raghu Angadi) + + HADOOP-2690. Add jar-test target to build.xml, separating compilation + and packaging of the test classes. (Enis Soztutar via cdouglas) + + OPTIMIZATIONS + + HADOOP-1898. Release the lock protecting the last time of the last stack + dump while the dump is happening. (Amareshwari Sri Ramadasu via omalley) + + HADOOP-1900. Makes the heartbeat and task event queries interval + dependent on the cluster size. (Amareshwari Sri Ramadasu via ddas) + + HADOOP-2208. Counter update frequency (from TaskTracker to JobTracker) is + capped at 1 minute. (Amareshwari Sri Ramadasu via ddas) + + HADOOP-2284. Reduce the number of progress updates during the sorting in + the map task. (Amar Kamat via ddas) + + BUG FIXES + + HADOOP-2583. Fixes a bug in the Eclipse plug-in UI to edit locations. + Plug-in version is now synchronized with Hadoop version. + + HADOOP-2100. Remove faulty check for existence of $HADOOP_PID_DIR and let + 'mkdir -p' check & create it. (Michael Bieniosek via acmurthy) + + HADOOP-1642. Ensure jobids generated by LocalJobRunner are unique to + avoid collissions and hence job-failures. (Doug Cutting via acmurthy) + + HADOOP-2096. Close open file-descriptors held by streams while localizing + job.xml in the JobTracker and while displaying it on the webui in + jobconf.jsp. (Amar Kamat via acmurthy) + + HADOOP-2098. Log start & completion of empty jobs to JobHistory, which + also ensures that we close the file-descriptor of the job's history log + opened during job-submission. (Amar Kamat via acmurthy) + + HADOOP-2112. Adding back changes to build.xml lost while reverting + HADOOP-1622 i.e. http://svn.apache.org/viewvc?view=rev&revision=588771. + (acmurthy) + + HADOOP-2089. Fixes the command line argument handling to handle multiple + -cacheArchive in Hadoop streaming. (Lohit Vijayarenu via ddas) + + HADOOP-2071. Fix StreamXmlRecordReader to use a BufferedInputStream + wrapped over the DFSInputStream since mark/reset aren't supported by + DFSInputStream anymore. (Lohit Vijayarenu via acmurthy) + + HADOOP-1348. Allow XML comments inside configuration files. + (Rajagopal Natarajan and Enis Soztutar via enis) + + HADOOP-1952. Improve handling of invalid, user-specified classes while + configuring streaming jobs such as combiner, input/output formats etc. + Now invalid options are caught, logged and jobs are failed early. (Lohit + Vijayarenu via acmurthy) + + HADOOP-2151. FileSystem.globPaths validates the list of Paths that + it returns. (Lohit Vijayarenu via dhruba) + + HADOOP-2121. Cleanup DFSOutputStream when the stream encountered errors + when Datanodes became full. (Raghu Angadi via dhruba) + + HADOOP-1130. The FileSystem.closeAll() method closes all existing + DFSClients. (Chris Douglas via dhruba) + + HADOOP-2204. DFSTestUtil.waitReplication was not waiting for all replicas + to get created, thus causing unit test failure. + (Raghu Angadi via dhruba) + + HADOOP-2078. An zero size file may have no blocks associated with it. + (Konstantin Shvachko via dhruba) + + HADOOP-2212. ChecksumFileSystem.getSumBufferSize might throw + java.lang.ArithmeticException. The fix is to initialize bytesPerChecksum + to 0. (Michael Bieniosek via ddas) + + HADOOP-2216. Fix jobtasks.jsp to ensure that it first collects the + taskids which satisfy the filtering criteria and then use that list to + print out only the required task-reports, previously it was oblivious to + the filtering and hence used the wrong index into the array of task-reports. + (Amar Kamat via acmurthy) + + HADOOP-2272. Fix findbugs target to reflect changes made to the location + of the streaming jar file by HADOOP-2207. (Adrian Woodhead via nigel) + + HADOOP-2244. Fixes the MapWritable.readFields to clear the instance + field variable every time readFields is called. (Michael Stack via ddas). + + HADOOP-2245. Fixes LocalJobRunner to include a jobId in the mapId. Also, + adds a testcase for JobControl. (Adrian Woodhead via ddas). + + HADOOP-2275. Fix erroneous detection of corrupted file when namenode + fails to allocate any datanodes for newly allocated block. + (Dhruba Borthakur via dhruba) + + HADOOP-2256. Fix a buf in the namenode that could cause it to encounter + an infinite loop while deleting excess replicas that were created by + block rebalancing. (Hairong Kuang via dhruba) + + HADOOP-2209. SecondaryNamenode process exits if it encounters exceptions + that it cannot handle. (Dhruba Borthakur via dhruba) + + HADOOP-2314. Prevent TestBlockReplacement from occasionally getting + into an infinite loop. (Hairong Kuang via dhruba) + + HADOOP-2300. This fixes a bug where mapred.tasktracker.tasks.maximum + would be ignored even if it was set in hadoop-site.xml. + (Amareshwari Sri Ramadasu via ddas) + + HADOOP-2349. Improve code layout in file system transaction logging code. + (Tsz Wo (Nicholas), SZE via dhruba) + + HADOOP-2368. Fix unit tests on Windows. + (Tsz Wo (Nicholas), SZE via dhruba) + + HADOOP-2363. This fix allows running multiple instances of the unit test + in parallel. The bug was introduced in HADOOP-2185 that changed + port-rolling behaviour. (Konstantin Shvachko via dhruba) + + HADOOP-2271. Fix chmod task to be non-parallel. (Adrian Woodhead via + omalley) + + HADOOP-2313. Fail the build if building libhdfs fails. (nigel via omalley) + + HADOOP-2359. Remove warning for interruptted exception when closing down + minidfs. (dhruba via omalley) + + HADOOP-1841. Prevent slow clients from consuming threads in the NameNode. + (dhruba) + + HADOOP-2323. JobTracker.close() should not print stack traces for + normal exit. (jimk via cutting) + + HADOOP-2376. Prevents sort example from overriding the number of maps. + (Owen O'Malley via ddas) + + HADOOP-2434. FSDatasetInterface read interface causes HDFS reads to occur + in 1 byte chunks, causing performance degradation. + (Raghu Angadi via dhruba) + + HADOOP-2459. Fix package target so that src/docs/build files are not + included in the release. (nigel) + + HADOOP-2215. Fix documentation in cluster_setup.html & + mapred_tutorial.html reflect that mapred.tasktracker.tasks.maximum has + been superceeded by mapred.tasktracker.{map|reduce}.tasks.maximum. + (Amareshwari Sri Ramadasu via acmurthy) + + HADOOP-2459. Fix package target so that src/docs/build files are not + included in the release. (nigel) + + HADOOP-2352. Remove AC_CHECK_LIB for libz and liblzo to ensure that + libhadoop.so doesn't have a dependency on them. (acmurthy) + + HADOOP-2453. Fix the configuration for wordcount-simple example in Hadoop + Pipes which currently produces an XML parsing error. (Amareshwari Sri + Ramadasu via acmurthy) + + HADOOP-2476. Unit test failure while reading permission bits of local + file system (on Windows) fixed. (Raghu Angadi via dhruba) + + HADOOP-2247. Fine-tune the strategies for killing mappers and reducers + due to failures while fetching map-outputs. Now the map-completion times + and number of currently running reduces are taken into account by the + JobTracker before killing the mappers, while the progress made by the + reducer and the number of fetch-failures vis-a-vis total number of + fetch-attempts are taken into account before teh reducer kills itself. + (Amar Kamat via acmurthy) + + HADOOP-2452. Fix eclipse plug-in build.xml to refers to the right + location where hadoop-*-core.jar is generated. (taton) + + HADOOP-2492. Additional debugging in the rpc server to better + diagnose ConcurrentModificationException. (dhruba) + + HADOOP-2344. Enhance the utility for executing shell commands to read the + stdout/stderr streams while waiting for the command to finish (to free up + the buffers). Also, this patch throws away stderr of the DF utility. + @deprecated + org.apache.hadoop.fs.ShellCommand for org.apache.hadoop.util.Shell + org.apache.hadoop.util.ShellUtil for + org.apache.hadoop.util.Shell.ShellCommandExecutor + (Amar Kamat via acmurthy) + + HADOOP-2511. Fix a javadoc warning in org.apache.hadoop.util.Shell + introduced by HADOOP-2344. (acmurthy) + + HADOOP-2442. Fix TestLocalFileSystemPermission.testLocalFSsetOwner + to work on more platforms. (Raghu Angadi via nigel) + + HADOOP-2488. Fix a regression in random read performance. + (Michael Stack via rangadi) + + HADOOP-2523. Fix TestDFSShell.testFilePermissions on Windows. + (Raghu Angadi via nigel) + + HADOOP-2535. Removed support for deprecated mapred.child.heap.size and + fixed some indentation issues in TaskRunner. (acmurthy) + Configuration changes to hadoop-default.xml: + remove mapred.child.heap.size + + HADOOP-2512. Fix error stream handling in Shell. Use exit code to + detect shell command errors in RawLocalFileSystem. (Raghu Angadi) + + HADOOP-2446. Fixes TestHDFSServerPorts and TestMRServerPorts so they + do not rely on statically configured ports and cleanup better. (nigel) + + HADOOP-2537. Make build process compatible with Ant 1.7.0. + (Hrishikesh via nigel) + + HADOOP-1281. Ensure running tasks of completed map TIPs (e.g. speculative + tasks) are killed as soon as the TIP completed. (acmurthy) + + HADOOP-2571. Suppress a suprious warning in test code. (cdouglas) + + HADOOP-2481. NNBench report its progress periodically. + (Hairong Kuang via dhruba) + + HADOOP-2601. Start name-node on a free port for TestNNThroughputBenchmark. + (Konstantin Shvachko) + + HADOOP-2494. Set +x on contrib/*/bin/* in packaged tar bundle. + (stack via tomwhite) + + HADOOP-2605. Remove bogus leading slash in task-tracker report bindAddress. + (Konstantin Shvachko) + + HADOOP-2620. Trivial. 'bin/hadoop fs -help' did not list chmod, chown, and + chgrp. (Raghu Angadi) + + HADOOP-2614. The DFS WebUI accesses are configured to be from the user + specified by dfs.web.ugi. (Tsz Wo (Nicholas), SZE via dhruba) + + HADOOP-2543. Implement a "no-permission-checking" mode for smooth + upgrade from a pre-0.16 install of HDFS. + (Hairong Kuang via dhruba) + + HADOOP-290. A DataNode log message now prints the target of a replication + request correctly. (dhruba) + + HADOOP-2538. Redirect to a warning, if plaintext parameter is true but + the filter parameter is not given in TaskLogServlet. + (Michael Bieniosek via enis) + + HADOOP-2582. Prevent 'bin/hadoop fs -copyToLocal' from creating + zero-length files when the src does not exist. + (Lohit Vijayarenu via cdouglas) + + HADOOP-2189. Incrementing user counters should count as progress. (ddas) + + HADOOP-2649. The NameNode periodically computes replication work for + the datanodes. The periodicity of this computation is now configurable. + (dhruba) + + HADOOP-2549. Correct disk size computation so that data-nodes could switch + to other local drives if current is full. (Hairong Kuang via shv) + + HADOOP-2633. Fsck should call name-node methods directly rather than + through rpc. (Tsz Wo (Nicholas), SZE via shv) + + HADOOP-2687. Modify a few log message generated by dfs client to be + logged only at INFO level. (stack via dhruba) + + HADOOP-2402. Fix BlockCompressorStream to ensure it buffers data before + sending it down to the compressor so that each write call doesn't + compress. (Chris Douglas via acmurthy) + + HADOOP-2645. The Metrics initialization code does not throw + exceptions when servers are restarted by MiniDFSCluster. + (Sanjay Radia via dhruba) + + HADOOP-2691. Fix a race condition that was causing the DFSClient + to erroneously remove a good datanode from a pipeline that actually + had another datanode that was bad. (dhruba) + + HADOOP-1195. All code in FSNamesystem checks the return value + of getDataNode for null before using it. (dhruba) + + HADOOP-2640. Fix a bug in MultiFileSplitInputFormat that was always + returning 1 split in some circumstances. (Enis Soztutar via nigel) + + HADOOP-2626. Fix paths with special characters to work correctly + with the local filesystem. (Thomas Friol via cutting) + + HADOOP-2646. Fix SortValidator to work with fully-qualified + working directories. (Arun C Murthy via nigel) + + HADOOP-2092. Added a ping mechanism to the pipes' task to periodically + check if the parent Java task is running, and exit if the parent isn't + alive and responding. (Amareshwari Sri Ramadasu via acmurthy) + + HADOOP-2714. TestDecommission failed on windows because the replication + request was timing out. (dhruba) + + HADOOP-2576. Namenode performance degradation over time triggered by + large heartbeat interval. (Raghu Angadi) + + HADOOP-2713. TestDatanodeDeath failed on windows because the replication + request was timing out. (dhruba) + + HADOOP-2639. Fixes a problem to do with incorrect maintenance of values + for runningMapTasks/runningReduceTasks. (Amar Kamat and Arun Murthy + via ddas) + + HADOOP-2723. Fixed the check for checking whether to do user task + profiling. (Amareshwari Sri Ramadasu via omalley) + + HADOOP-2734. Link forrest docs to new http://hadoop.apache.org + (Doug Cutting via nigel) + + HADOOP-2641. Added Apache license headers to 95 files. (nigel) + + HADOOP-2732. Fix bug in path globbing. (Hairong Kuang via nigel) + + HADOOP-2404. Fix backwards compatability with hadoop-0.15 configuration + files that was broken by HADOOP-2185. (omalley) + + HADOOP-2755. Fix fsck performance degradation because of permissions + issue. (Tsz Wo (Nicholas), SZE via dhruba) + + HADOOP-2768. Fix performance regression caused by HADOOP-1707. + (dhruba borthakur via nigel) + + HADOOP-3108. Fix NPE in setPermission and setOwner. (shv) + +Release 0.15.3 - 2008-01-18 + + BUG FIXES + + HADOOP-2562. globPaths supports {ab,cd}. (Hairong Kuang via dhruba) + + HADOOP-2540. fsck reports missing blocks incorrectly. (dhruba) + + HADOOP-2570. "work" directory created unconditionally, and symlinks + created from the task cwds. + + HADOOP-2574. Fixed mapred_tutorial.xml to correct minor errors with the + WordCount examples. (acmurthy) + +Release 0.15.2 - 2008-01-02 + + BUG FIXES + + HADOOP-2246. Moved the changelog for HADOOP-1851 from the NEW FEATURES + section to the INCOMPATIBLE CHANGES section. (acmurthy) + + HADOOP-2238. Fix TaskGraphServlet so that it sets the content type of + the response appropriately. (Paul Saab via enis) + + HADOOP-2129. Fix so that distcp works correctly when source is + HDFS but not the default filesystem. HDFS paths returned by the + listStatus() method are now fully-qualified. (cutting) + + HADOOP-2378. Fixes a problem where the last task completion event would + get created after the job completes. (Alejandro Abdelnur via ddas) + + HADOOP-2228. Checks whether a job with a certain jobId is already running + and then tries to create the JobInProgress object. + (Johan Oskarsson via ddas) + + HADOOP-2422. dfs -cat multiple files fail with 'Unable to write to + output stream'. (Raghu Angadi via dhruba) + + HADOOP-2460. When the namenode encounters ioerrors on writing a + transaction log, it stops writing new transactions to that one. + (Raghu Angadi via dhruba) + + HADOOP-2227. Use the LocalDirAllocator uniformly for handling all of the + temporary storage required for a given task. It also implies that + mapred.local.dir.minspacestart is handled by checking if there is enough + free-space on any one of the available disks. (Amareshwari Sri Ramadasu + via acmurthy) + + HADOOP-2437. Fix the LocalDirAllocator to choose the seed for the + round-robin disk selections randomly. This helps in spreading data across + multiple partitions much better. (acmurhty) + + HADOOP-2486. When the list of files from the InMemoryFileSystem is obtained + for merging, this patch will ensure that only those files whose checksums + have also got created (renamed) are returned. (ddas) + + HADOOP-2456. Hardcode English locale to prevent NumberFormatException + from occurring when starting the NameNode with certain locales. + (Matthias Friedrich via nigel) + + IMPROVEMENTS + + HADOOP-2160. Remove project-level, non-user documentation from + releases, since it's now maintained in a separate tree. (cutting) + + HADOOP-1327. Add user documentation for streaming. (cutting) + + HADOOP-2382. Add hadoop-default.html to subversion. (cutting) + + HADOOP-2158. hdfsListDirectory calls FileSystem.listStatus instead + of FileSystem.listPaths. This reduces the number of RPC calls on the + namenode, thereby improving scalability. (Christian Kunz via dhruba) + +Release 0.15.1 - 2007-11-27 + + INCOMPATIBLE CHANGES + + HADOOP-713. Reduce CPU usage on namenode while listing directories. + FileSystem.listPaths does not return the size of the entire subtree. + Introduced a new API ClientProtocol.getContentLength that returns the + size of the subtree. (Dhruba Borthakur via dhruba) + + IMPROVEMENTS + + HADOOP-1917. Addition of guides/tutorial for better overall + documentation for Hadoop. Specifically: + * quickstart.html is targetted towards first-time users and helps them + setup a single-node cluster and play with Hadoop. + * cluster_setup.html helps admins to configure and setup non-trivial + hadoop clusters. + * mapred_tutorial.html is a comprehensive Map-Reduce tutorial. + (acmurthy) + + BUG FIXES + + HADOOP-2174. Removed the unnecessary Reporter.setStatus call from + FSCopyFilesMapper.close which led to a NPE since the reporter isn't valid + in the close method. (Chris Douglas via acmurthy) + + HADOOP-2172. Restore performance of random access to local files + by caching positions of local input streams, avoiding a system + call. (cutting) + + HADOOP-2205. Regenerate the Hadoop website since some of the changes made + by HADOOP-1917 weren't correctly copied over to the trunk/docs directory. + Also fixed a couple of minor typos and broken links. (acmurthy) + +Release 0.15.0 - 2007-11-2 + + INCOMPATIBLE CHANGES + + HADOOP-1708. Make files appear in namespace as soon as they are + created. (Dhruba Borthakur via dhruba) + + HADOOP-999. A HDFS Client immediately informs the NameNode of a new + file creation. ClientProtocol version changed from 14 to 15. + (Tsz Wo (Nicholas), SZE via dhruba) + + HADOOP-932. File locking interfaces and implementations (that were + earlier deprecated) are removed. Client Protocol version changed + from 15 to 16. (Raghu Angadi via dhruba) + + HADOOP-1621. FileStatus is now a concrete class and FileSystem.listPaths + is deprecated and replaced with listStatus. (Chris Douglas via omalley) + + HADOOP-1656. The blockSize of a file is stored persistently in the file + inode. (Dhruba Borthakur via dhruba) + + HADOOP-1838. The blocksize of files created with an earlier release is + set to the default block size. (Dhruba Borthakur via dhruba) + + HADOOP-785. Add support for 'final' Configuration parameters, + removing support for 'mapred-default.xml', and changing + 'hadoop-site.xml' to not override other files. Now folks should + generally use 'hadoop-site.xml' for all configurations. Values + with a 'final' tag may not be overridden by subsequently loaded + configuration files, e.g., by jobs. (Arun C. Murthy via cutting) + + HADOOP-1846. DatanodeReport in ClientProtocol can report live + datanodes, dead datanodes or all datanodes. Client Protocol version + changed from 17 to 18. (Hairong Kuang via dhruba) + + HADOOP-1851. Permit specification of map output compression type + and codec, independent of the final output's compression + parameters. (Arun C Murthy via cutting) + + HADOOP-1819. Jobtracker cleanups, including binding ports before + clearing state directories, so that inadvertently starting a + second jobtracker doesn't trash one that's already running. Removed + method JobTracker.getTracker() because the static variable, which + stored the value caused initialization problems. + (omalley via cutting) + + NEW FEATURES + + HADOOP-89. A client can access file data even before the creator + has closed the file. Introduce a new command "tail" from dfs shell. + (Dhruba Borthakur via dhruba) + + HADOOP-1636. Allow configuration of the number of jobs kept in + memory by the JobTracker. (Michael Bieniosek via omalley) + + HADOOP-1667. Reorganize CHANGES.txt into sections to make it + easier to read. Also remove numbering, to make merging easier. + (cutting) + + HADOOP-1610. Add metrics for failed tasks. + (Devaraj Das via tomwhite) + + HADOOP-1767. Add "bin/hadoop job -list" sub-command. (taton via cutting) + + HADOOP-1351. Add "bin/hadoop job [-fail-task|-kill-task]" sub-commands + to terminate a particular task-attempt. (Enis Soztutar via acmurthy) + + HADOOP-1880. SleepJob : An example job that sleeps at each map and + reduce task. (enis) + + HADOOP-1809. Add a link in web site to #hadoop IRC channel. (enis) + + HADOOP-1894. Add percentage graphs and mapred task completion graphs + to Web User Interface. Users not using Firefox may install a plugin to + their browsers to see svg graphics. (enis) + + HADOOP-1914. Introduce a new NamenodeProtocol to allow secondary + namenodes and rebalancing processes to communicate with a primary + namenode. (Hairong Kuang via dhruba) + + HADOOP-1963. Add a FileSystem implementation for the Kosmos + Filesystem (KFS). (Sriram Rao via cutting) + + HADOOP-1822. Allow the specialization and configuration of socket + factories. Provide a StandardSocketFactory, and a SocksSocketFactory to + allow the use of SOCKS proxies. (taton). + + HADOOP-1968. FileSystem supports wildcard input syntax "{ }". + (Hairong Kuang via dhruba) + + HADOOP-2566. Add globStatus method to the FileSystem interface + and deprecate globPath and listPath. (Hairong Kuang via hairong) + + OPTIMIZATIONS + + HADOOP-1910. Reduce the number of RPCs that DistributedFileSystem.create() + makes to the namenode. (Raghu Angadi via dhruba) + + HADOOP-1565. Reduce memory usage of NameNode by replacing + TreeMap in HDFS Namespace with ArrayList. + (Dhruba Borthakur via dhruba) + + HADOOP-1743. Change DFS INode from a nested class to standalone + class, with specialized subclasses for directories and files, to + save memory on the namenode. (Konstantin Shvachko via cutting) + + HADOOP-1759. Change file name in INode from String to byte[], + saving memory on the namenode. (Konstantin Shvachko via cutting) + + HADOOP-1766. Save memory in namenode by having BlockInfo extend + Block, and replace many uses of Block with BlockInfo. + (Konstantin Shvachko via cutting) + + HADOOP-1687. Save memory in namenode by optimizing BlockMap + representation. (Konstantin Shvachko via cutting) + + HADOOP-1774. Remove use of INode.parent in Block CRC upgrade. + (Raghu Angadi via dhruba) + + HADOOP-1788. Increase the buffer size on the Pipes command socket. + (Amareshwari Sri Ramadasu and Christian Kunz via omalley) + + BUG FIXES + + HADOOP-1946. The Datanode code does not need to invoke du on + every heartbeat. (Hairong Kuang via dhruba) + + HADOOP-1935. Fix a NullPointerException in internalReleaseCreate. + (Dhruba Borthakur) + + HADOOP-1933. The nodes listed in include and exclude files + are always listed in the datanode report. + (Raghu Angadi via dhruba) + + HADOOP-1953. The job tracker should wait beteween calls to try and delete + the system directory (Owen O'Malley via devaraj) + + HADOOP-1932. TestFileCreation fails with message saying filestatus.dat + is of incorrect size. (Dhruba Borthakur via dhruba) + + HADOOP-1573. Support for 0 reducers in PIPES. + (Owen O'Malley via devaraj) + + HADOOP-1500. Fix typographical errors in the DFS WebUI. + (Nigel Daley via dhruba) + + HADOOP-1076. Periodic checkpoint can continue even if an earlier + checkpoint encountered an error. (Dhruba Borthakur via dhruba) + + HADOOP-1887. The Namenode encounters an ArrayIndexOutOfBoundsException + while listing a directory that had a file that was + being actively written to. (Dhruba Borthakur via dhruba) + + HADOOP-1904. The Namenode encounters an exception because the + list of blocks per datanode-descriptor was corrupted. + (Konstantin Shvachko via dhruba) + + HADOOP-1762. The Namenode fsimage does not contain a list of + Datanodes. (Raghu Angadi via dhruba) + + HADOOP-1890. Removed debugging prints introduced by HADOOP-1774. + (Raghu Angadi via dhruba) + + HADOOP-1763. Too many lost task trackers on large clusters due to + insufficient number of RPC handler threads on the JobTracker. + (Devaraj Das) + + HADOOP-1463. HDFS report correct usage statistics for disk space + used by HDFS. (Hairong Kuang via dhruba) + + HADOOP-1692. In DFS ant task, don't cache the Configuration. + (Chris Douglas via cutting) + + HADOOP-1726. Remove lib/jetty-ext/ant.jar. (omalley) + + HADOOP-1772. Fix hadoop-daemon.sh script to get correct hostname + under Cygwin. (Tsz Wo (Nicholas), SZE via cutting) + + HADOOP-1749. Change TestDFSUpgrade to sort files, fixing sporadic + test failures. (Enis Soztutar via cutting) + + HADOOP-1748. Fix tasktracker to be able to launch tasks when log + directory is relative. (omalley via cutting) + + HADOOP-1775. Fix a NullPointerException and an + IllegalArgumentException in MapWritable. + (Jim Kellerman via cutting) + + HADOOP-1795. Fix so that jobs can generate output file names with + special characters. (Fr??d??ric Bertin via cutting) + + HADOOP-1810. Fix incorrect value type in MRBench (SmallJobs) + (Devaraj Das via tomwhite) + + HADOOP-1806. Fix ant task to compile again, also fix default + builds to compile ant tasks. (Chris Douglas via cutting) + + HADOOP-1758. Fix escape processing in librecordio to not be + quadratic. (Vivek Ratan via cutting) + + HADOOP-1817. Fix MultiFileSplit to read and write the split + length, so that it is not always zero in map tasks. + (Thomas Friol via cutting) + + HADOOP-1853. Fix contrib/streaming to accept multiple -cacheFile + options. (Prachi Gupta via cutting) + + HADOOP-1818. Fix MultiFileInputFormat so that it does not return + empty splits when numPaths < numSplits. (Thomas Friol via enis) + + HADOOP-1840. Fix race condition which leads to task's diagnostic + messages getting lost. (acmurthy) + + HADOOP-1885. Fix race condition in MiniDFSCluster shutdown. + (Chris Douglas via nigel) + + HADOOP-1889. Fix path in EC2 scripts for building your own AMI. + (tomwhite) + + HADOOP-1892. Fix a NullPointerException in the JobTracker when + trying to fetch a task's diagnostic messages from the JobClient. + (Amar Kamat via acmurthy) + + HADOOP-1897. Completely remove about.html page from the web site. + (enis) + + HADOOP-1907. Fix null pointer exception when getting task diagnostics + in JobClient. (Christian Kunz via omalley) + + HADOOP-1882. Remove spurious asterisks from decimal number displays. + (Raghu Angadi via cutting) + + HADOOP-1783. Make S3 FileSystem return Paths fully-qualified with + scheme and host. (tomwhite) + + HADOOP-1925. Make pipes' autoconf script look for libsocket and libnsl, so + that it can compile under Solaris. (omalley) + + HADOOP-1940. TestDFSUpgradeFromImage must shut down its MiniDFSCluster. + (Chris Douglas via nigel) + + HADOOP-1930. Fix the blame for failed fetchs on the right host. (Arun C. + Murthy via omalley) + + HADOOP-1934. Fix the platform name on Mac to use underscores rather than + spaces. (omalley) + + HADOOP-1959. Use "/" instead of File.separator in the StatusHttpServer. + (jimk via omalley) + + HADOOP-1626. Improve dfsadmin help messages. + (Lohit Vijayarenu via dhruba) + + HADOOP-1695. The SecondaryNamenode waits for the Primary NameNode to + start up. (Dhruba Borthakur) + + HADOOP-1983. Have Pipes flush the command socket when progress is sent + to prevent timeouts during long computations. (omalley) + + HADOOP-1875. Non-existant directories or read-only directories are + filtered from dfs.client.buffer.dir. (Hairong Kuang via dhruba) + + HADOOP-1992. Fix the performance degradation in the sort validator. + (acmurthy via omalley) + + HADOOP-1874. Move task-outputs' promotion/discard to a separate thread + distinct from the main heartbeat-processing thread. The main upside being + that we do not lock-up the JobTracker during HDFS operations, which + otherwise may lead to lost tasktrackers if the NameNode is unresponsive. + (Devaraj Das via acmurthy) + + HADOOP-2026. Namenode prints out one log line for "Number of transactions" + at most once every minute. (Dhruba Borthakur) + + HADOOP-2022. Ensure that status information for successful tasks is correctly + recorded at the JobTracker, so that, for example, one may view correct + information via taskdetails.jsp. This bug was introduced by HADOOP-1874. + (Amar Kamat via acmurthy) + + HADOOP-2031. Correctly maintain the taskid which takes the TIP to + completion, failing which the case of lost tasktrackers isn't handled + properly i.e. the map TIP is incorrectly left marked as 'complete' and it + is never rescheduled elsewhere, leading to hung reduces. + (Devaraj Das via acmurthy) + + HADOOP-2018. The source datanode of a data transfer waits for + a response from the target datanode before closing the data stream. + (Hairong Kuang via dhruba) + + HADOOP-2023. Disable TestLocalDirAllocator on Windows. + (Hairong Kuang via nigel) + + HADOOP-2016. Ignore status-updates from FAILED/KILLED tasks at the + TaskTracker. This fixes a race-condition which caused the tasks to wrongly + remain in the RUNNING state even after being killed by the JobTracker and + thus handicap the cleanup of the task's output sub-directory. (acmurthy) + + HADOOP-1771. Fix a NullPointerException in streaming caused by an + IOException in MROutputThread. (lohit vijayarenu via nigel) + + HADOOP-2028. Fix distcp so that the log dir does not need to be + specified and the destination does not need to exist. + (Chris Douglas via nigel) + + HADOOP-2044. The namenode protects all lease manipulations using a + sortedLease lock. (Dhruba Borthakur) + + HADOOP-2051. The TaskCommit thread should not die for exceptions other + than the InterruptedException. This behavior is there for the other long + running threads in the JobTracker. (Arun C Murthy via ddas) + + HADOOP-1973. The FileSystem object would be accessed on the JobTracker + through a RPC in the InterTrackerProtocol. The check for the object being + null was missing and hence NPE would be thrown sometimes. This issue fixes + that problem. (Amareshwari Sri Ramadasu via ddas) + + HADOOP-2033. The SequenceFile.Writer.sync method was a no-op, which caused + very uneven splits for applications like distcp that count on them. + (omalley) + + HADOOP-2070. Added a flush method to pipes' DownwardProtocol and call + that before waiting for the application to finish to ensure all buffered + data is flushed. (Owen O'Malley via acmurthy) + + HADOOP-2080. Fixed calculation of the checksum file size when the values + are large. (omalley) + + HADOOP-2048. Change error handling in distcp so that each map copies + as much as possible before reporting the error. Also report progress on + every copy. (Chris Douglas via omalley) + + HADOOP-2073. Change size of VERSION file after writing contents to it. + (Konstantin Shvachko via dhruba) + + HADOOP-2102. Fix the deprecated ToolBase to pass its Configuration object + to the superceding ToolRunner to ensure it picks up the appropriate + configuration resources. (Dennis Kubes and Enis Soztutar via acmurthy) + + HADOOP-2103. Fix minor javadoc bugs introduce by HADOOP-2046. (Nigel + Daley via acmurthy) + + IMPROVEMENTS + + HADOOP-1908. Restructure data node code so that block sending and + receiving are seperated from data transfer header handling. + (Hairong Kuang via dhruba) + + HADOOP-1921. Save the configuration of completed/failed jobs and make them + available via the web-ui. (Amar Kamat via devaraj) + + HADOOP-1266. Remove dependency of package org.apache.hadoop.net on + org.apache.hadoop.dfs. (Hairong Kuang via dhruba) + + HADOOP-1779. Replace INodeDirectory.getINode() by a getExistingPathINodes() + to allow the retrieval of all existing INodes along a given path in a + single lookup. This facilitates removal of the 'parent' field in the + inode. (Christophe Taton via dhruba) + + HADOOP-1756. Add toString() to some Writable-s. (ab) + + HADOOP-1727. New classes: MapWritable and SortedMapWritable. + (Jim Kellerman via ab) + + HADOOP-1651. Improve progress reporting. + (Devaraj Das via tomwhite) + + HADOOP-1595. dfsshell can wait for a file to achieve its intended + replication target. (Tsz Wo (Nicholas), SZE via dhruba) + + HADOOP-1693. Remove un-needed log fields in DFS replication classes, + since the log may be accessed statically. (Konstantin Shvachko via cutting) + + HADOOP-1231. Add generics to Mapper and Reducer interfaces. + (tomwhite via cutting) + + HADOOP-1436. Improved command-line APIs, so that all tools need + not subclass ToolBase, and generic parameter parser is public. + (Enis Soztutar via cutting) + + HADOOP-1703. DFS-internal code cleanups, removing several uses of + the obsolete UTF8. (Christophe Taton via cutting) + + HADOOP-1731. Add Hadoop's version to contrib jar file names. + (cutting) + + HADOOP-1689. Make shell scripts more portable. All shell scripts + now explicitly depend on bash, but do not require that bash be + installed in a particular location, as long as it is on $PATH. + (cutting) + + HADOOP-1744. Remove many uses of the deprecated UTF8 class from + the HDFS namenode. (Christophe Taton via cutting) + + HADOOP-1654. Add IOUtils class, containing generic io-related + utility methods. (Enis Soztutar via cutting) + + HADOOP-1158. Change JobTracker to record map-output transmission + errors and use them to trigger speculative re-execution of tasks. + (Arun C Murthy via cutting) + + HADOOP-1601. Change GenericWritable to use ReflectionUtils for + instance creation, avoiding classloader issues, and to implement + Configurable. (Enis Soztutar via cutting) + + HADOOP-1750. Log standard output and standard error when forking + task processes. (omalley via cutting) + + HADOOP-1803. Generalize build.xml to make files in all + src/contrib/*/bin directories executable. (stack via cutting) + + HADOOP-1739. Let OS always choose the tasktracker's umbilical + port. Also switch default address for umbilical connections to + loopback. (cutting) + + HADOOP-1812. Let OS choose ports for IPC and RPC unit tests. (cutting) + + HADOOP-1825. Create $HADOOP_PID_DIR when it does not exist. + (Michael Bieniosek via cutting) + + HADOOP-1425. Replace uses of ToolBase with the Tool interface. + (Enis Soztutar via cutting) + + HADOOP-1569. Reimplement DistCP to use the standard FileSystem/URI + code in Hadoop so that you can copy from and to all of the supported file + systems.(Chris Douglas via omalley) + + HADOOP-1018. Improve documentation w.r.t handling of lost hearbeats between + TaskTrackers and JobTracker. (acmurthy) + + HADOOP-1718. Add ant targets for measuring code coverage with clover. + (simonwillnauer via nigel) + + HADOOP-1592. Log error messages to the client console when tasks + fail. (Amar Kamat via cutting) + + HADOOP-1879. Remove some unneeded casts. (Nilay Vaish via cutting) + + HADOOP-1878. Add space between priority links on job details + page. (Thomas Friol via cutting) + + HADOOP-120. In ArrayWritable, prevent creation with null value + class, and improve documentation. (Cameron Pope via cutting) + + HADOOP-1926. Add a random text writer example/benchmark so that we can + benchmark compression codecs on random data. (acmurthy via omalley) + + HADOOP-1906. Warn the user if they have an obsolete madred-default.xml + file in their configuration directory. (acmurthy via omalley) + + HADOOP-1971. Warn when job does not specify a jar. (enis via cutting) + + HADOOP-1942. Increase the concurrency of transaction logging to + edits log. Reduce the number of syncs by double-buffering the changes + to the transaction log. (Dhruba Borthakur) + + HADOOP-2046. Improve mapred javadoc. (Arun C. Murthy via cutting) + + HADOOP-2105. Improve overview.html to clarify supported platforms, + software pre-requisites for hadoop, how to install them on various + platforms and a better general description of hadoop and it's utility. + (Jim Kellerman via acmurthy) + + +Release 0.14.4 - 2007-11-26 + + BUG FIXES + + HADOOP-2140. Add missing Apache Licensing text at the front of several + C and C++ files. + + HADOOP-2169. Fix the DT_SONAME field of libhdfs.so to set it to the + correct value of 'libhdfs.so', currently it is set to the absolute path of + libhdfs.so. (acmurthy) + + HADOOP-2001. Make the job priority updates and job kills synchronized on + the JobTracker. Deadlock was seen in the JobTracker because of the lack of + this synchronization. (Arun C Murthy via ddas) + + +Release 0.14.3 - 2007-10-19 + + BUG FIXES + + HADOOP-2053. Fixed a dangling reference to a memory buffer in the map + output sorter. (acmurthy via omalley) + + HADOOP-2036. Fix a NullPointerException in JvmMetrics class. (nigel) + + HADOOP-2043. Release 0.14.2 was compiled with Java 1.6 rather than + Java 1.5. (cutting) + + +Release 0.14.2 - 2007-10-09 + + BUG FIXES + + HADOOP-1948. Removed spurious error message during block crc upgrade. + (Raghu Angadi via dhruba) + + HADOOP-1862. reduces are getting stuck trying to find map outputs. + (Arun C. Murthy via ddas) + + HADOOP-1977. Fixed handling of ToolBase cli options in JobClient. + (enis via omalley) + + HADOOP-1972. Fix LzoCompressor to ensure the user has actually asked + to finish compression. (arun via omalley) + + HADOOP-1970. Fix deadlock in progress reporting in the task. (Vivek + Ratan via omalley) + + HADOOP-1978. Name-node removes edits.new after a successful startup. + (Konstantin Shvachko via dhruba) + + HADOOP-1955. The Namenode tries to not pick the same source Datanode for + a replication request if the earlier replication request for the same + block and that source Datanode had failed. + (Raghu Angadi via dhruba) + + HADOOP-1961. The -get option to dfs-shell works when a single filename + is specified. (Raghu Angadi via dhruba) + + HADOOP-1997. TestCheckpoint closes the edits file after writing to it, + otherwise the rename of this file on Windows fails. + (Konstantin Shvachko via dhruba) + +Release 0.14.1 - 2007-09-04 + + BUG FIXES + + HADOOP-1740. Fix null pointer exception in sorting map outputs. (Devaraj + Das via omalley) + + HADOOP-1790. Fix tasktracker to work correctly on multi-homed + boxes. (Torsten Curdt via cutting) + + HADOOP-1798. Fix jobtracker to correctly account for failed + tasks. (omalley via cutting) + + +Release 0.14.0 - 2007-08-17 + + INCOMPATIBLE CHANGES + + 1. HADOOP-1134. + CONFIG/API - dfs.block.size must now be a multiple of + io.byte.per.checksum, otherwise new files can not be written. + LAYOUT - DFS layout version changed from -6 to -7, which will require an + upgrade from previous versions. + PROTOCOL - Datanode RPC protocol version changed from 7 to 8. + + 2. HADOOP-1283 + API - deprecated file locking API. + + 3. HADOOP-894 + PROTOCOL - changed ClientProtocol to fetch parts of block locations. + + 4. HADOOP-1336 + CONFIG - Enable speculative execution by default. + + 5. HADOOP-1197 + API - deprecated method for Configuration.getObject, because + Configurations should only contain strings. + + 6. HADOOP-1343 + API - deprecate Configuration.set(String,Object) so that only strings are + put in Configrations. + + 7. HADOOP-1207 + CLI - Fix FsShell 'rm' command to continue when a non-existent file is + encountered. + + 8. HADOOP-1473 + CLI/API - Job, TIP, and Task id formats have changed and are now unique + across job tracker restarts. + + 9. HADOOP-1400 + API - JobClient constructor now takes a JobConf object instead of a + Configuration object. + + NEW FEATURES and BUG FIXES + + 1. HADOOP-1197. In Configuration, deprecate getObject() and add + getRaw(), which skips variable expansion. (omalley via cutting) + + 2. HADOOP-1343. In Configuration, deprecate set(String,Object) and + implement Iterable. (omalley via cutting) + + 3. HADOOP-1344. Add RunningJob#getJobName(). (Michael Bieniosek via cutting) + + 4. HADOOP-1342. In aggregators, permit one to limit the number of + unique values per key. (Runping Qi via cutting) + + 5. HADOOP-1340. Set the replication factor of the MD5 file in the filecache + to be the same as the replication factor of the original file. + (Dhruba Borthakur via tomwhite.) + + 6. HADOOP-1355. Fix null pointer dereference in + TaskLogAppender.append(LoggingEvent). (Arun C Murthy via tomwhite.) + + 7. HADOOP-1357. Fix CopyFiles to correctly avoid removing "/". + (Arun C Murthy via cutting) + + 8. HADOOP-234. Add pipes facility, which permits writing MapReduce + programs in C++. + + 9. HADOOP-1359. Fix a potential NullPointerException in HDFS. + (Hairong Kuang via cutting) + + 10. HADOOP-1364. Fix inconsistent synchronization in SequenceFile. + (omalley via cutting) + + 11. HADOOP-1379. Add findbugs target to build.xml. + (Nigel Daley via cutting) + + 12. HADOOP-1364. Fix various inconsistent synchronization issues. + (Devaraj Das via cutting) + + 13. HADOOP-1393. Remove a potential unexpected negative number from + uses of random number generator. (omalley via cutting) + + 14. HADOOP-1387. A number of "performance" code-cleanups suggested + by findbugs. (Arun C Murthy via cutting) + + 15. HADOOP-1401. Add contrib/hbase javadoc to tree. (stack via cutting) + + 16. HADOOP-894. Change HDFS so that the client only retrieves a limited + number of block locations per request from the namenode. + (Konstantin Shvachko via cutting) + + 17. HADOOP-1406. Plug a leak in MapReduce's use of metrics. + (David Bowen via cutting) + + 18. HADOOP-1394. Implement "performance" code-cleanups in HDFS + suggested by findbugs. (Raghu Angadi via cutting) + + 19. HADOOP-1413. Add example program that uses Knuth's dancing links + algorithm to solve pentomino problems. (omalley via cutting) + + 20. HADOOP-1226. Change HDFS so that paths it returns are always + fully qualified. (Dhruba Borthakur via cutting) + + 21. HADOOP-800. Improvements to HDFS web-based file browser. + (Enis Soztutar via cutting) + + 22. HADOOP-1408. Fix a compiler warning by adding a class to replace + a generic. (omalley via cutting) + + 23. HADOOP-1376. Modify RandomWriter example so that it can generate + data for the Terasort benchmark. (Devaraj Das via cutting) + + 24. HADOOP-1429. Stop logging exceptions during normal IPC server + shutdown. (stack via cutting) + + 25. HADOOP-1461. Fix the synchronization of the task tracker to + avoid lockups in job cleanup. (Arun C Murthy via omalley) + + 26. HADOOP-1446. Update the TaskTracker metrics while the task is + running. (Devaraj via omalley) + + 27. HADOOP-1414. Fix a number of issues identified by FindBugs as + "Bad Practice". (Dhruba Borthakur via cutting) + + 28. HADOOP-1392. Fix "correctness" bugs identified by FindBugs in + fs and dfs packages. (Raghu Angadi via cutting) + + 29. HADOOP-1412. Fix "dodgy" bugs identified by FindBugs in fs and + io packages. (Hairong Kuang via cutting) + + 30. HADOOP-1261. Remove redundant events from HDFS namenode's edit + log when a datanode restarts. (Raghu Angadi via cutting) + + 31. HADOOP-1336. Re-enable speculative execution by + default. (omalley via cutting) + + 32. HADOOP-1311. Fix a bug in BytesWritable#set() where start offset + was ignored. (Dhruba Borthakur via cutting) + + 33. HADOOP-1450. Move checksumming closer to user code, so that + checksums are created before data is stored in large buffers and + verified after data is read from large buffers, to better catch + memory errors. (cutting) + + 34. HADOOP-1447. Add support in contrib/data_join for text inputs. + (Senthil Subramanian via cutting) + + 35. HADOOP-1456. Fix TestDecommission assertion failure by setting + the namenode to ignore the load on datanodes while allocating + replicas. (Dhruba Borthakur via tomwhite) + + 36. HADOOP-1396. Fix FileNotFoundException on DFS block. + (Dhruba Borthakur via tomwhite) + + 37. HADOOP-1467. Remove redundant counters from WordCount example. + (Owen O'Malley via tomwhite) + + 38. HADOOP-1139. Log HDFS block transitions at INFO level, to better + enable diagnosis of problems. (Dhruba Borthakur via cutting) + + 39. HADOOP-1269. Finer grained locking in HDFS namenode. + (Dhruba Borthakur via cutting) + + 40. HADOOP-1438. Improve HDFS documentation, correcting typos and + making images appear in PDF. Also update copyright date for all + docs. (Luke Nezda via cutting) + + 41. HADOOP-1457. Add counters for monitoring task assignments. + (Arun C Murthy via tomwhite) + + 42. HADOOP-1472. Fix so that timed-out tasks are counted as failures + rather than as killed. (Arun C Murthy via cutting) + + 43. HADOOP-1234. Fix a race condition in file cache that caused + tasktracker to not be able to find cached files. + (Arun C Murthy via cutting) + + 44. HADOOP-1482. Fix secondary namenode to roll info port. + (Dhruba Borthakur via cutting) + + 45. HADOOP-1300. Improve removal of excess block replicas to be + rack-aware. Attempts are now made to keep replicas on more + racks. (Hairong Kuang via cutting) + + 46. HADOOP-1417. Disable a few FindBugs checks that generate a lot + of spurious warnings. (Nigel Daley via cutting) + + 47. HADOOP-1320. Rewrite RandomWriter example to bypass reduce. + (Arun C Murthy via cutting) + + 48. HADOOP-1449. Add some examples to contrib/data_join. + (Senthil Subramanian via cutting) + + 49. HADOOP-1459. Fix so that, in HDFS, getFileCacheHints() returns + hostnames instead of IP addresses. (Dhruba Borthakur via cutting) + + 50. HADOOP-1493. Permit specification of "java.library.path" system + property in "mapred.child.java.opts" configuration property. + (Enis Soztutar via cutting) + + 51. HADOOP-1372. Use LocalDirAllocator for HDFS temporary block + files, so that disk space, writability, etc. is considered. + (Dhruba Borthakur via cutting) + + 52. HADOOP-1193. Pool allocation of compression codecs. This + eliminates a memory leak that could cause OutOfMemoryException, + and also substantially improves performance. + (Arun C Murthy via cutting) + + 53. HADOOP-1492. Fix a NullPointerException handling version + mismatch during datanode registration. + (Konstantin Shvachko via cutting) + + 54. HADOOP-1442. Fix handling of zero-length input splits. + (Senthil Subramanian via cutting) + + 55. HADOOP-1444. Fix HDFS block id generation to check pending + blocks for duplicates. (Dhruba Borthakur via cutting) + + 56. HADOOP-1207. Fix FsShell's 'rm' command to not stop when one of + the named files does not exist. (Tsz Wo Sze via cutting) + + 57. HADOOP-1475. Clear tasktracker's file cache before it + re-initializes, to avoid confusion. (omalley via cutting) + + 58. HADOOP-1505. Remove spurious stacktrace in ZlibFactory + introduced in HADOOP-1093. (Michael Stack via tomwhite) + + 59. HADOOP-1484. Permit one to kill jobs from the web ui. Note that + this is disabled by default. One must set + "webinterface.private.actions" to enable this. + (Enis Soztutar via cutting) + + 60. HADOOP-1003. Remove flushing of namenode edit log from primary + namenode lock, increasing namenode throughput. + (Dhruba Borthakur via cutting) + + 61. HADOOP-1023. Add links to searchable mail archives. + (tomwhite via cutting) + + 62. HADOOP-1504. Fix terminate-hadoop-cluster script in contrib/ec2 + to only terminate Hadoop instances, and not other instances + started by the same user. (tomwhite via cutting) + + 63. HADOOP-1462. Improve task progress reporting. Progress reports + are no longer blocking since i/o is performed in a separate + thread. Reporting during sorting and more is also more + consistent. (Vivek Ratan via cutting) + + 64. [ intentionally blank ] + + 65. HADOOP-1453. Remove some unneeded calls to FileSystem#exists() + when opening files, reducing the namenode load somewhat. + (Raghu Angadi via cutting) + + 66. HADOOP-1489. Fix text input truncation bug due to mark/reset. + Add a unittest. (Bwolen Yang via cutting) + + 67. HADOOP-1455. Permit specification of arbitrary job options on + pipes command line. (Devaraj Das via cutting) + + 68. HADOOP-1501. Better randomize sending of block reports to + namenode, so reduce load spikes. (Dhruba Borthakur via cutting) + + 69. HADOOP-1147. Remove @author tags from Java source files. + + 70. HADOOP-1283. Convert most uses of UTF8 in the namenode to be + String. (Konstantin Shvachko via cutting) + + 71. HADOOP-1511. Speedup hbase unit tests. (stack via cutting) + + 72. HADOOP-1517. Remove some synchronization in namenode to permit + finer grained locking previously added. (Konstantin Shvachko via cutting) + + 73. HADOOP-1512. Fix failing TestTextInputFormat on Windows. + (Senthil Subramanian via nigel) + + 74. HADOOP-1518. Add a session id to job metrics, for use by HOD. + (David Bowen via cutting) + + 75. HADOOP-1292. Change 'bin/hadoop fs -get' to first copy files to + a temporary name, then rename them to their final name, so that + failures don't leave partial files. (Tsz Wo Sze via cutting) + + 76. HADOOP-1377. Add support for modification time to FileSystem and + implement in HDFS and local implementations. Also, alter access + to file properties to be through a new FileStatus interface. + (Dhruba Borthakur via cutting) + + 77. HADOOP-1515. Add MultiFileInputFormat, which can pack multiple, + typically small, input files into each split. (Enis Soztutar via cutting) + + 78. HADOOP-1514. Make reducers report progress while waiting for map + outputs, so they're not killed. (Vivek Ratan via cutting) + + 79. HADOOP-1508. Add an Ant task for FsShell operations. Also add + new FsShell commands "touchz", "test" and "stat". + (Chris Douglas via cutting) + + 80. HADOOP-1028. Add log messages for server startup and shutdown. + (Tsz Wo Sze via cutting) + + 81. HADOOP-1485. Add metrics for monitoring shuffle. + (Devaraj Das via cutting) + + 82. HADOOP-1536. Remove file locks from libhdfs tests. + (Dhruba Borthakur via nigel) + + 83. HADOOP-1520. Add appropriate synchronization to FSEditsLog. + (Dhruba Borthakur via nigel) + + 84. HADOOP-1513. Fix a race condition in directory creation. + (Devaraj via omalley) + + 85. HADOOP-1546. Remove spurious column from HDFS web UI. + (Dhruba Borthakur via cutting) + + 86. HADOOP-1556. Make LocalJobRunner delete working files at end of + job run. (Devaraj Das via tomwhite) + + 87. HADOOP-1571. Add contrib lib directories to root build.xml + javadoc classpath. (Michael Stack via tomwhite) + + 88. HADOOP-1554. Log killed tasks to the job history and display them on the + web/ui. (Devaraj Das via omalley) + + 89. HADOOP-1533. Add persistent error logging for distcp. The logs are stored + into a specified hdfs directory. (Senthil Subramanian via omalley) + + 90. HADOOP-1286. Add support to HDFS for distributed upgrades, which + permits coordinated upgrade of datanode data. + (Konstantin Shvachko via cutting) + + 91. HADOOP-1580. Improve contrib/streaming so that subprocess exit + status is displayed for errors. (John Heidemann via cutting) + + 92. HADOOP-1448. In HDFS, randomize lists of non-local block + locations returned to client, so that load is better balanced. + (Hairong Kuang via cutting) + + 93. HADOOP-1578. Fix datanode to send its storage id to namenode + during registration. (Konstantin Shvachko via cutting) + + 94. HADOOP-1584. Fix a bug in GenericWritable which limited it to + 128 types instead of 256. (Espen Amble Kolstad via cutting) + + 95. HADOOP-1473. Make job ids unique across jobtracker restarts. + (omalley via cutting) + + 96. HADOOP-1582. Fix hdfslib to return 0 instead of -1 at + end-of-file, per C conventions. (Christian Kunz via cutting) + + 97. HADOOP-911. Fix a multithreading bug in libhdfs. + (Christian Kunz) + + 98. HADOOP-1486. Fix so that fatal exceptions in namenode cause it + to exit. (Dhruba Borthakur via cutting) + + 99. HADOOP-1470. Factor checksum generation and validation out of + ChecksumFileSystem so that it can be reused by FileSystem's with + built-in checksumming. (Hairong Kuang via cutting) + +100. HADOOP-1590. Use relative urls in jobtracker jsp pages, so that + webapp can be used in non-root contexts. (Thomas Friol via cutting) + +101. HADOOP-1596. Fix the parsing of taskids by streaming and improve the + error reporting. (omalley) + +102. HADOOP-1535. Fix the user-controlled grouping to the reduce function. + (Vivek Ratan via omalley) + +103. HADOOP-1585. Modify GenericWritable to declare the classes as subtypes + of Writable (Espen Amble Kolstad via omalley) + +104. HADOOP-1576. Fix errors in count of completed tasks when + speculative execution is enabled. (Arun C Murthy via cutting) + +105. HADOOP-1598. Fix license headers: adding missing; updating old. + (Enis Soztutar via cutting) + +106. HADOOP-1547. Provide examples for aggregate library. + (Runping Qi via tomwhite) + +107. HADOOP-1570. Permit jobs to enable and disable the use of + hadoop's native library. (Arun C Murthy via cutting) + +108. HADOOP-1433. Add job priority. (Johan Oskarsson via tomwhite) + +109. HADOOP-1597. Add status reports and post-upgrade options to HDFS + distributed upgrade. (Konstantin Shvachko via cutting) + +110. HADOOP-1524. Permit user task logs to appear as they're + created. (Michael Bieniosek via cutting) + +111. HADOOP-1599. Fix distcp bug on Windows. (Senthil Subramanian via cutting) + +112. HADOOP-1562. Add JVM metrics, including GC and logging stats. + (David Bowen via cutting) + +113. HADOOP-1613. Fix "DFS Health" page to display correct time of + last contact. (Dhruba Borthakur via cutting) + +114. HADOOP-1134. Add optimized checksum support to HDFS. Checksums + are now stored with each block, rather than as parallel files. + This reduces the namenode's memory requirements and increases + data integrity. (Raghu Angadi via cutting) + +115. HADOOP-1400. Make JobClient retry requests, so that clients can + survive jobtracker problems. (omalley via cutting) + +116. HADOOP-1564. Add unit tests for HDFS block-level checksums. + (Dhruba Borthakur via cutting) + +117. HADOOP-1620. Reduce the number of abstract FileSystem methods, + simplifying implementations. (cutting) + +118. HADOOP-1625. Fix a "could not move files" exception in datanode. + (Raghu Angadi via cutting) + +119. HADOOP-1624. Fix an infinite loop in datanode. (Raghu Angadi via cutting) + +120. HADOOP-1084. Switch mapred file cache to use file modification + time instead of checksum to detect file changes, as checksums are + no longer easily accessed. (Arun C Murthy via cutting) + +130. HADOOP-1623. Fix an infinite loop when copying directories. + (Dhruba Borthakur via cutting) + +131. HADOOP-1603. Fix a bug in namenode initialization where + default replication is sometimes reset to one on restart. + (Raghu Angadi via cutting) + +132. HADOOP-1635. Remove hardcoded keypair name and fix launch-hadoop-cluster + to support later versions of ec2-api-tools. (Stu Hood via tomwhite) + +133. HADOOP-1638. Fix contrib EC2 scripts to support NAT addressing. + (Stu Hood via tomwhite) + +134. HADOOP-1632. Fix an IllegalArgumentException in fsck. + (Hairong Kuang via cutting) + +135. HADOOP-1619. Fix FSInputChecker to not attempt to read past EOF. + (Hairong Kuang via cutting) + +136. HADOOP-1640. Fix TestDecommission on Windows. + (Dhruba Borthakur via cutting) + +137. HADOOP-1587. Fix TestSymLink to get required system properties. + (Devaraj Das via omalley) + +138. HADOOP-1628. Add block CRC protocol unit tests. (Raghu Angadi via omalley) + +139. HADOOP-1653. FSDirectory code-cleanups. FSDirectory.INode + becomes a static class. (Christophe Taton via dhruba) + +140. HADOOP-1066. Restructure documentation to make more user + friendly. (Connie Kleinjans and Jeff Hammerbacher via cutting) + +141. HADOOP-1551. libhdfs supports setting replication factor and + retrieving modification time of files. (Sameer Paranjpye via dhruba) + +141. HADOOP-1647. FileSystem.getFileStatus returns valid values for "/". + (Dhruba Borthakur via dhruba) + +142. HADOOP-1657. Fix NNBench to ensure that the block size is a + multiple of bytes.per.checksum. (Raghu Angadi via dhruba) + +143. HADOOP-1553. Replace user task output and log capture code to use shell + redirection instead of copier threads in the TaskTracker. Capping the + size of the output is now done via tail in memory and thus should not be + large. The output of the tasklog servlet is not forced into UTF8 and is + not buffered entirely in memory. (omalley) + Configuration changes to hadoop-default.xml: + remove mapred.userlog.num.splits + remove mapred.userlog.purge.splits + change default mapred.userlog.limit.kb to 0 (no limit) + change default mapred.userlog.retain.hours to 24 + Configuration changes to log4j.properties: + remove log4j.appender.TLA.noKeepSplits + remove log4j.appender.TLA.purgeLogSplits + remove log4j.appender.TLA.logsRetainHours + URL changes: + http:///tasklog.jsp -> http://tasklog with + parameters limited to start and end, which may be positive (from + start) or negative (from end). + Environment: + require bash (v2 or later) and tail + +144. HADOOP-1659. Fix a job id/job name mixup. (Arun C. Murthy via omalley) + +145. HADOOP-1665. With HDFS Trash enabled and the same file was created + and deleted more than once, the suceeding deletions creates Trash item + names suffixed with a integer. (Dhruba Borthakur via dhruba) + +146. HADOOP-1666. FsShell object can be used for multiple fs commands. + (Dhruba Borthakur via dhruba) + +147. HADOOP-1654. Remove performance regression introduced by Block CRC. + (Raghu Angadi via dhruba) + +148. HADOOP-1680. Improvements to Block CRC upgrade messages. + (Raghu Angadi via dhruba) + +149. HADOOP-71. Allow Text and SequenceFile Map/Reduce inputs from non-default + filesystems. (omalley) + +150. HADOOP-1568. Expose HDFS as xml/http filesystem to provide cross-version + compatability. (Chris Douglas via omalley) + +151. HADOOP-1668. Added an INCOMPATIBILITY section to CHANGES.txt. (nigel) + +152. HADOOP-1629. Added a upgrade test for HADOOP-1134. + (Raghu Angadi via nigel) + +153. HADOOP-1698. Fix performance problems on map output sorting for jobs + with large numbers of reduces. (Devaraj Das via omalley) + +154. HADOOP-1716. Fix a Pipes wordcount example to remove the 'file:' + schema from its output path. (omalley via cutting) + +155. HADOOP-1714. Fix TestDFSUpgradeFromImage to work on Windows. + (Raghu Angadi via nigel) + +156. HADOOP-1663. Return a non-zero exit code if streaming fails. (Lohit Renu + via omalley) + +157. HADOOP-1712. Fix an unhandled exception on datanode during block + CRC upgrade. (Raghu Angadi via cutting) + +158. HADOOP-1717. Fix TestDFSUpgradeFromImage to work on Solaris. + (nigel via cutting) + +159. HADOOP-1437. Add Eclipse plugin in contrib. + (Eugene Hung and Christophe Taton via cutting) + + +Release 0.13.0 - 2007-06-08 + + 1. HADOOP-1047. Fix TestReplication to succeed more reliably. + (Hairong Kuang via cutting) + + 2. HADOOP-1063. Fix a race condition in MiniDFSCluster test code. + (Hairong Kuang via cutting) + + 3. HADOOP-1101. In web ui, split shuffle statistics from reduce + statistics, and add some task averages. (Devaraj Das via cutting) + + 4. HADOOP-1071. Improve handling of protocol version mismatch in + JobTracker. (Tahir Hashmi via cutting) + + 5. HADOOP-1116. Increase heap size used for contrib unit tests. + (Philippe Gassmann via cutting) + + 6. HADOOP-1120. Add contrib/data_join, tools to simplify joining + data from multiple sources using MapReduce. (Runping Qi via cutting) + + 7. HADOOP-1064. Reduce log level of some DFSClient messages. + (Dhruba Borthakur via cutting) + + 8. HADOOP-1137. Fix StatusHttpServer to work correctly when + resources are in a jar file. (Benjamin Reed via cutting) + + 9. HADOOP-1094. Optimize generated Writable implementations for + records to not allocate a new BinaryOutputArchive or + BinaryInputArchive per call. (Milind Bhandarkar via cutting) + +10. HADOOP-1068. Improve error message for clusters with 0 datanodes. + (Dhruba Borthakur via tomwhite) + +11. HADOOP-1122. Fix divide-by-zero exception in FSNamesystem + chooseTarget method. (Dhruba Borthakur via tomwhite) + +12. HADOOP-1131. Add a closeAll() static method to FileSystem. + (Philippe Gassmann via tomwhite) + +13. HADOOP-1085. Improve port selection in HDFS and MapReduce test + code. Ports are now selected by the OS during testing rather than + by probing for free ports, improving test reliability. + (Arun C Murthy via cutting) + +14. HADOOP-1153. Fix HDFS daemons to correctly stop their threads. + (Konstantin Shvachko via cutting) + +15. HADOOP-1146. Add a counter for reduce input keys and rename the + "reduce input records" counter to be "reduce input groups". + (David Bowen via cutting) + +16. HADOOP-1165. In records, replace idential generated toString + methods with a method on the base class. (Milind Bhandarkar via cutting) + +17. HADOOP-1164. Fix TestReplicationPolicy to specify port zero, so + that a free port is automatically selected. (omalley via cutting) + +18. HADOOP-1166. Add a NullOutputFormat and use it in the + RandomWriter example. (omalley via cutting) + +19. HADOOP-1169. Fix a cut/paste error in CopyFiles utility so that + S3-based source files are correctly copied. (Michael Stack via cutting) + +20. HADOOP-1167. Remove extra synchronization in InMemoryFileSystem. + (omalley via cutting) + +21. HADOOP-1110. Fix an off-by-one error counting map inputs. + (David Bowen via cutting) + +22. HADOOP-1178. Fix a NullPointerException during namenode startup. + (Dhruba Borthakur via cutting) + +23. HADOOP-1011. Fix a ConcurrentModificationException when viewing + job history. (Tahir Hashmi via cutting) + +24. HADOOP-672. Improve help for fs shell commands. + (Dhruba Borthakur via cutting) + +25. HADOOP-1170. Improve datanode performance by removing device + checks from common operations. (Igor Bolotin via cutting) + +26. HADOOP-1090. Fix SortValidator's detection of whether the input + file belongs to the sort-input or sort-output directory. + (Arun C Murthy via tomwhite) + +27. HADOOP-1081. Fix bin/hadoop on Darwin. (Michael Bieniosek via cutting) + +28. HADOOP-1045. Add contrib/hbase, a BigTable-like online database. + (Jim Kellerman via cutting) + +29. HADOOP-1156. Fix a NullPointerException in MiniDFSCluster. + (Hairong Kuang via cutting) + +30. HADOOP-702. Add tools to help automate HDFS upgrades. + (Konstantin Shvachko via cutting) + +31. HADOOP-1163. Fix ganglia metrics to aggregate metrics from different + hosts properly. (Michael Bieniosek via tomwhite) + +32. HADOOP-1194. Make compression style record level for map output + compression. (Arun C Murthy via tomwhite) + +33. HADOOP-1187. Improve DFS Scalability: avoid scanning entire list of + datanodes in getAdditionalBlocks. (Dhruba Borthakur via tomwhite) + +34. HADOOP-1133. Add tool to analyze and debug namenode on a production + cluster. (Dhruba Borthakur via tomwhite) + +35. HADOOP-1151. Remove spurious printing to stderr in streaming + PipeMapRed. (Koji Noguchi via tomwhite) + +36. HADOOP-988. Change namenode to use a single map of blocks to metadata. + (Raghu Angadi via tomwhite) + +37. HADOOP-1203. Change UpgradeUtilities used by DFS tests to use + MiniDFSCluster to start and stop NameNode/DataNodes. + (Nigel Daley via tomwhite) + +38. HADOOP-1217. Add test.timeout property to build.xml, so that + long-running unit tests may be automatically terminated. + (Nigel Daley via cutting) + +39. HADOOP-1149. Improve DFS Scalability: make + processOverReplicatedBlock() a no-op if blocks are not + over-replicated. (Raghu Angadi via tomwhite) + +40. HADOOP-1149. Improve DFS Scalability: optimize getDistance(), + contains(), and isOnSameRack() in NetworkTopology. + (Hairong Kuang via tomwhite) + +41. HADOOP-1218. Make synchronization on TaskTracker's RunningJob + object consistent. (Devaraj Das via tomwhite) + +42. HADOOP-1219. Ignore progress report once a task has reported as + 'done'. (Devaraj Das via tomwhite) + +43. HADOOP-1114. Permit user to specify additional CLASSPATH elements + with a HADOOP_CLASSPATH environment variable. (cutting) + +44. HADOOP-1198. Remove ipc.client.timeout parameter override from + unit test configuration. Using the default is more robust and + has almost the same run time. (Arun C Murthy via tomwhite) + +45. HADOOP-1211. Remove deprecated constructor and unused static + members in DataNode class. (Konstantin Shvachko via tomwhite) + +46. HADOOP-1136. Fix ArrayIndexOutOfBoundsException in + FSNamesystem$UnderReplicatedBlocks add() method. + (Hairong Kuang via tomwhite) + +47. HADOOP-978. Add the client name and the address of the node that + previously started to create the file to the description of + AlreadyBeingCreatedException. (Konstantin Shvachko via tomwhite) + +48. HADOOP-1001. Check the type of keys and values generated by the + mapper against the types specified in JobConf. + (Tahir Hashmi via tomwhite) + +49. HADOOP-971. Improve DFS Scalability: Improve name node performance + by adding a hostname to datanodes map. (Hairong Kuang via tomwhite) + +50. HADOOP-1189. Fix 'No space left on device' exceptions on datanodes. + (Raghu Angadi via tomwhite) + +51. HADOOP-819. Change LineRecordWriter to not insert a tab between + key and value when either is null, and to print nothing when both + are null. (Runping Qi via cutting) + +52. HADOOP-1204. Rename InputFormatBase to be FileInputFormat, and + deprecate InputFormatBase. Also make LineRecordReader easier to + extend. (Runping Qi via cutting) + +53. HADOOP-1213. Improve logging of errors by IPC server, to + consistently include the service name and the call. (cutting) + +54. HADOOP-1238. Fix metrics reporting by TaskTracker to correctly + track maps_running and reduces_running. + (Michael Bieniosek via cutting) + +55. HADOOP-1093. Fix a race condition in HDFS where blocks were + sometimes erased before they were reported written. + (Dhruba Borthakur via cutting) + +56. HADOOP-1239. Add a package name to some testjar test classes. + (Jim Kellerman via cutting) + +57. HADOOP-1241. Fix NullPointerException in processReport when + namenode is restarted. (Dhruba Borthakur via tomwhite) + +58. HADOOP-1244. Fix stop-dfs.sh to no longer incorrectly specify + slaves file for stopping datanode. + (Michael Bieniosek via tomwhite) + +59. HADOOP-1253. Fix ConcurrentModificationException and + NullPointerException in JobControl. + (Johan Oskarson via tomwhite) + +60. HADOOP-1256. Fix NameNode so that multiple DataNodeDescriptors + can no longer be created on startup. (Hairong Kuang via cutting) + +61. HADOOP-1214. Replace streaming classes with new counterparts + from Hadoop core. (Runping Qi via tomwhite) + +62. HADOOP-1250. Move a chmod utility from streaming to FileUtil. + (omalley via cutting) + +63. HADOOP-1258. Fix TestCheckpoint test case to wait for + MiniDFSCluster to be active. (Nigel Daley via tomwhite) + +64. HADOOP-1148. Re-indent all Java source code to consistently use + two spaces per indent level. (cutting) + +65. HADOOP-1251. Add a method to Reporter to get the map InputSplit. + (omalley via cutting) + +66. HADOOP-1224. Fix "Browse the filesystem" link to no longer point + to dead datanodes. (Enis Soztutar via tomwhite) + +67. HADOOP-1154. Fail a streaming task if the threads reading from or + writing to the streaming process fail. (Koji Noguchi via tomwhite) + +68. HADOOP-968. Move shuffle and sort to run in reduce's child JVM, + rather than in TaskTracker. (Devaraj Das via cutting) + +69. HADOOP-1111. Add support for client notification of job + completion. If the job configuration has a job.end.notification.url + property it will make a HTTP GET request to the specified URL. + The number of retries and the interval between retries is also + configurable. (Alejandro Abdelnur via tomwhite) + +70. HADOOP-1275. Fix misspelled job notification property in + hadoop-default.xml. (Alejandro Abdelnur via tomwhite) + +71. HADOOP-1152. Fix race condition in MapOutputCopier.copyOutput file + rename causing possible reduce task hang. + (Tahir Hashmi via tomwhite) + +72. HADOOP-1050. Distinguish between failed and killed tasks so as to + not count a lost tasktracker against the job. + (Arun C Murthy via tomwhite) + +73. HADOOP-1271. Fix StreamBaseRecordReader to be able to log record + data that's not UTF-8. (Arun C Murthy via tomwhite) + +74. HADOOP-1190. Fix unchecked warnings in main Hadoop code. + (tomwhite) + +75. HADOOP-1127. Fix AlreadyBeingCreatedException in namenode for + jobs run with speculative execution. + (Arun C Murthy via tomwhite) + +76. HADOOP-1282. Omnibus HBase patch. Improved tests & configuration. + (Jim Kellerman via cutting) + +77. HADOOP-1262. Make dfs client try to read from a different replica + of the checksum file when a checksum error is detected. + (Hairong Kuang via tomwhite) + +78. HADOOP-1279. Fix JobTracker to maintain list of recently + completed jobs by order of completion, not submission. + (Arun C Murthy via cutting) + +79. HADOOP-1284. In contrib/streaming, permit flexible specification + of field delimiter and fields for partitioning and sorting. + (Runping Qi via cutting) + +80. HADOOP-1176. Fix a bug where reduce would hang when a map had + more than 2GB of output for it. (Arun C Murthy via cutting) + +81. HADOOP-1293. Fix contrib/streaming to print more than the first + twenty lines of standard error. (Koji Noguchi via cutting) + +82. HADOOP-1297. Fix datanode so that requests to remove blocks that + do not exist no longer causes block reports to be re-sent every + second. (Dhruba Borthakur via cutting) + +83. HADOOP-1216. Change MapReduce so that, when numReduceTasks is + zero, map outputs are written directly as final output, skipping + shuffle, sort and reduce. Use this to implement reduce=NONE + option in contrib/streaming. (Runping Qi via cutting) + +84. HADOOP-1294. Fix unchecked warnings in main Hadoop code under + Java 6. (tomwhite) + +85. HADOOP-1299. Fix so that RPC will restart after RPC.stopClient() + has been called. (Michael Stack via cutting) + +86. HADOOP-1278. Improve blacklisting of TaskTrackers by JobTracker, + to reduce false positives. (Arun C Murthy via cutting) + +87. HADOOP-1290. Move contrib/abacus into mapred/lib/aggregate. + (Runping Qi via cutting) + +88. HADOOP-1272. Extract inner classes from FSNamesystem into separate + classes. (Dhruba Borthakur via tomwhite) + +89. HADOOP-1247. Add support to contrib/streaming for aggregate + package, formerly called Abacus. (Runping Qi via cutting) + +90. HADOOP-1061. Fix bug in listing files in the S3 filesystem. + NOTE: this change is not backwards compatible! You should use the + MigrationTool supplied to migrate existing S3 filesystem data to + the new format. Please backup your data first before upgrading + (using 'hadoop distcp' for example). (tomwhite) + +91. HADOOP-1304. Make configurable the maximum number of task + attempts before a job fails. (Devaraj Das via cutting) + +92. HADOOP-1308. Use generics to restrict types when classes are + passed as parameters to JobConf methods. (Michael Bieniosek via cutting) + +93. HADOOP-1312. Fix a ConcurrentModificationException in NameNode + that killed the heartbeat monitoring thread. + (Dhruba Borthakur via cutting) + +94. HADOOP-1315. Clean up contrib/streaming, switching it to use core + classes more and removing unused code. (Runping Qi via cutting) + +95. HADOOP-485. Allow a different comparator for grouping keys in + calls to reduce. (Tahir Hashmi via cutting) + +96. HADOOP-1322. Fix TaskTracker blacklisting to work correctly in + one- and two-node clusters. (Arun C Murthy via cutting) + +97. HADOOP-1144. Permit one to specify a maximum percentage of tasks + that can fail before a job is aborted. The default is zero. + (Arun C Murthy via cutting) + +98. HADOOP-1184. Fix HDFS decomissioning to complete when the only + copy of a block is on a decommissioned node. (Dhruba Borthakur via cutting) + +99. HADOOP-1263. Change DFSClient to retry certain namenode calls + with a random, exponentially increasing backoff time, to avoid + overloading the namenode on, e.g., job start. (Hairong Kuang via cutting) + +100. HADOOP-1325. First complete, functioning version of HBase. + (Jim Kellerman via cutting) + +101. HADOOP-1276. Make tasktracker expiry interval configurable. + (Arun C Murthy via cutting) + +102. HADOOP-1326. Change JobClient#RunJob() to return the job. + (omalley via cutting) + +103. HADOOP-1270. Randomize the fetch of map outputs, speeding the + shuffle. (Arun C Murthy via cutting) + +104. HADOOP-1200. Restore disk checking lost in HADOOP-1170. + (Hairong Kuang via cutting) + +105. HADOOP-1252. Changed MapReduce's allocation of local files to + use round-robin among available devices, rather than a hashcode. + More care is also taken to not allocate files on full or offline + drives. (Devaraj Das via cutting) + +106. HADOOP-1324. Change so that an FSError kills only the task that + generates it rather than the entire task tracker. + (Arun C Murthy via cutting) + +107. HADOOP-1310. Fix unchecked warnings in aggregate code. (tomwhite) + +108. HADOOP-1255. Fix a bug where the namenode falls into an infinite + loop trying to remove a dead node. (Hairong Kuang via cutting) + +109. HADOOP-1160. Fix DistributedFileSystem.close() to close the + underlying FileSystem, correctly aborting files being written. + (Hairong Kuang via cutting) + +110. HADOOP-1341. Fix intermittent failures in HBase unit tests + caused by deadlock. (Jim Kellerman via cutting) + +111. HADOOP-1350. Fix shuffle performance problem caused by forcing + chunked encoding of map outputs. (Devaraj Das via cutting) + +112. HADOOP-1345. Fix HDFS to correctly retry another replica when a + checksum error is encountered. (Hairong Kuang via cutting) + +113. HADOOP-1205. Improve synchronization around HDFS block map. + (Hairong Kuang via cutting) + +114. HADOOP-1353. Fix a potential NullPointerException in namenode. + (Dhruba Borthakur via cutting) + +115. HADOOP-1354. Fix a potential NullPointerException in FsShell. + (Hairong Kuang via cutting) + +116. HADOOP-1358. Fix a potential bug when DFSClient calls skipBytes. + (Hairong Kuang via cutting) + +117. HADOOP-1356. Fix a bug in ValueHistogram. (Runping Qi via cutting) + +118. HADOOP-1363. Fix locking bug in JobClient#waitForCompletion(). + (omalley via cutting) + +119. HADOOP-1368. Fix inconsistent synchronization in JobInProgress. + (omalley via cutting) + +120. HADOOP-1369. Fix inconsistent synchronization in TaskTracker. + (omalley via cutting) + +121. HADOOP-1361. Fix various calls to skipBytes() to check return + value. (Hairong Kuang via cutting) + +122. HADOOP-1388. Fix a potential NullPointerException in web ui. + (Devaraj Das via cutting) + +123. HADOOP-1385. Fix MD5Hash#hashCode() to generally hash to more + than 256 values. (omalley via cutting) + +124. HADOOP-1386. Fix Path to not permit the empty string as a + path, as this has lead to accidental file deletion. Instead + force applications to use "." to name the default directory. + (Hairong Kuang via cutting) + +125. HADOOP-1407. Fix integer division bug in JobInProgress which + meant failed tasks didn't cause the job to fail. + (Arun C Murthy via tomwhite) + +126. HADOOP-1427. Fix a typo that caused GzipCodec to incorrectly use + a very small input buffer. (Espen Amble Kolstad via cutting) + +127. HADOOP-1435. Fix globbing code to no longer use the empty string + to indicate the default directory, per HADOOP-1386. + (Hairong Kuang via cutting) + +128. HADOOP-1411. Make task retry framework handle + AlreadyBeingCreatedException when wrapped as a RemoteException. + (Hairong Kuang via tomwhite) + +129. HADOOP-1242. Improve handling of DFS upgrades. + (Konstantin Shvachko via cutting) + +130. HADOOP-1332. Fix so that TaskTracker exits reliably during unit + tests on Windows. (omalley via cutting) + +131. HADOOP-1431. Fix so that sort progress reporting during map runs + only while sorting, so that stuck maps are correctly terminated. + (Devaraj Das and Arun C Murthy via cutting) + +132. HADOOP-1452. Change TaskTracker.MapOutputServlet.doGet.totalRead + to a long, permitting map outputs to exceed 2^31 bytes. + (omalley via cutting) + +133. HADOOP-1443. Fix a bug opening zero-length files in HDFS. + (Konstantin Shvachko via cutting) + + +Release 0.12.3 - 2007-04-06 + + 1. HADOOP-1162. Fix bug in record CSV and XML serialization of + binary values. (Milind Bhandarkar via cutting) + + 2. HADOOP-1123. Fix NullPointerException in LocalFileSystem when + trying to recover from a checksum error. + (Hairong Kuang & Nigel Daley via tomwhite) + + 3. HADOOP-1177. Fix bug where IOException in MapOutputLocation.getFile + was not being logged. (Devaraj Das via tomwhite) + + 4. HADOOP-1175. Fix bugs in JSP for displaying a task's log messages. + (Arun C Murthy via cutting) + + 5. HADOOP-1191. Fix map tasks to wait until sort progress thread has + stopped before reporting the task done. (Devaraj Das via cutting) + + 6. HADOOP-1192. Fix an integer overflow bug in FSShell's 'dus' + command and a performance problem in HDFS's implementation of it. + (Hairong Kuang via cutting) + + 7. HADOOP-1105. Fix reducers to make "progress" while iterating + through values. (Devaraj Das & Owen O'Malley via tomwhite) + + 8. HADOOP-1179. Make Task Tracker close index file as soon as the read + is done when serving get-map-output requests. + (Devaraj Das via tomwhite) + + +Release 0.12.2 - 2007-23-17 + + 1. HADOOP-1135. Fix bug in block report processing which may cause + the namenode to delete blocks. (Dhruba Borthakur via tomwhite) + + 2. HADOOP-1145. Make XML serializer and deserializer classes public + in record package. (Milind Bhandarkar via cutting) + + 3. HADOOP-1140. Fix a deadlock in metrics. (David Bowen via cutting) + + 4. HADOOP-1150. Fix streaming -reducer and -mapper to give them + defaults. (Owen O'Malley via tomwhite) + + +Release 0.12.1 - 2007-03-17 + + 1. HADOOP-1035. Fix a StackOverflowError in FSDataSet. + (Raghu Angadi via cutting) + + 2. HADOOP-1053. Fix VInt representation of negative values. Also + remove references in generated record code to methods outside of + the record package and improve some record documentation. + (Milind Bhandarkar via cutting) + + 3. HADOOP-1067. Compile fails if Checkstyle jar is present in lib + directory. Also remove dependency on a particular Checkstyle + version number. (tomwhite) + + 4. HADOOP-1060. Fix an IndexOutOfBoundsException in the JobTracker + that could cause jobs to hang. (Arun C Murthy via cutting) + + 5. HADOOP-1077. Fix a race condition fetching map outputs that could + hang reduces. (Devaraj Das via cutting) + + 6. HADOOP-1083. Fix so that when a cluster restarts with a missing + datanode, its blocks are replicated. (Hairong Kuang via cutting) + + 7. HADOOP-1082. Fix a NullPointerException in ChecksumFileSystem. + (Hairong Kuang via cutting) + + 8. HADOOP-1088. Fix record serialization of negative values. + (Milind Bhandarkar via cutting) + + 9. HADOOP-1080. Fix bug in bin/hadoop on Windows when native + libraries are present. (ab via cutting) + +10. HADOOP-1091. Fix a NullPointerException in MetricsRecord. + (David Bowen via tomwhite) + +11. HADOOP-1092. Fix a NullPointerException in HeartbeatMonitor + thread. (Hairong Kuang via tomwhite) + +12. HADOOP-1112. Fix a race condition in Hadoop metrics. + (David Bowen via tomwhite) + +13. HADOOP-1108. Checksummed file system should retry reading if a + different replica is found when handling ChecksumException. + (Hairong Kuang via tomwhite) + +14. HADOOP-1070. Fix a problem with number of racks and datanodes + temporarily doubling. (Konstantin Shvachko via tomwhite) + +15. HADOOP-1099. Fix NullPointerException in JobInProgress. + (Gautam Kowshik via tomwhite) + +16. HADOOP-1115. Fix bug where FsShell copyToLocal doesn't + copy directories. (Hairong Kuang via tomwhite) + +17. HADOOP-1109. Fix NullPointerException in StreamInputFormat. + (Koji Noguchi via tomwhite) + +18. HADOOP-1117. Fix DFS scalability: when the namenode is + restarted it consumes 80% CPU. (Dhruba Borthakur via + tomwhite) + +19. HADOOP-1089. Make the C++ version of write and read v-int + agree with the Java versions. (Milind Bhandarkar via + tomwhite) + +20. HADOOP-1096. Rename InputArchive and OutputArchive and + make them public. (Milind Bhandarkar via tomwhite) + +21. HADOOP-1128. Fix missing progress information in map tasks. + (Espen Amble Kolstad, Andrzej Bialecki, and Owen O'Malley + via tomwhite) + +22. HADOOP-1129. Fix DFSClient to not hide IOExceptions in + flush method. (Hairong Kuang via tomwhite) + +23. HADOOP-1126. Optimize CPU usage for under replicated blocks + when cluster restarts. (Hairong Kuang via tomwhite) + + +Release 0.12.0 - 2007-03-02 + + 1. HADOOP-975. Separate stdout and stderr from tasks. + (Arun C Murthy via cutting) + + 2. HADOOP-982. Add some setters and a toString() method to + BytesWritable. (omalley via cutting) + + 3. HADOOP-858. Move contrib/smallJobsBenchmark to src/test, removing + obsolete bits. (Nigel Daley via cutting) + + 4. HADOOP-992. Fix MiniMR unit tests to use MiniDFS when specified, + rather than the local FS. (omalley via cutting) + + 5. HADOOP-954. Change use of metrics to use callback mechanism. + Also rename utility class Metrics to MetricsUtil. + (David Bowen & Nigel Daley via cutting) + + 6. HADOOP-893. Improve HDFS client's handling of dead datanodes. + The set is no longer reset with each block, but rather is now + maintained for the life of an open file. (Raghu Angadi via cutting) + + 7. HADOOP-882. Upgrade to jets3t version 0.5, used by the S3 + FileSystem. This version supports retries. (Michael Stack via cutting) + + 8. HADOOP-977. Send task's stdout and stderr to JobClient's stdout + and stderr respectively, with each line tagged by the task's name. + (Arun C Murthy via cutting) + + 9. HADOOP-761. Change unit tests to not use /tmp. (Nigel Daley via cutting) + +10. HADOOP-1007. Make names of metrics used in Hadoop unique. + (Nigel Daley via cutting) + +11. HADOOP-491. Change mapred.task.timeout to be per-job, and make a + value of zero mean no timeout. Also change contrib/streaming to + disable task timeouts. (Arun C Murthy via cutting) + +12. HADOOP-1010. Add Reporter.NULL, a Reporter implementation that + does nothing. (Runping Qi via cutting) + +13. HADOOP-923. In HDFS NameNode, move replication computation to a + separate thread, to improve heartbeat processing time. + (Dhruba Borthakur via cutting) + +14. HADOOP-476. Rewrite contrib/streaming command-line processing, + improving parameter validation. (Sanjay Dahiya via cutting) + +15. HADOOP-973. Improve error messages in Namenode. This should help + to track down a problem that was appearing as a + NullPointerException. (Dhruba Borthakur via cutting) + +16. HADOOP-649. Fix so that jobs with no tasks are not lost. + (Thomas Friol via cutting) + +17. HADOOP-803. Reduce memory use by HDFS namenode, phase I. + (Raghu Angadi via cutting) + +18. HADOOP-1021. Fix MRCaching-based unit tests on Windows. + (Nigel Daley via cutting) + +19. HADOOP-889. Remove duplicate code from HDFS unit tests. + (Milind Bhandarkar via cutting) + +20. HADOOP-943. Improve HDFS's fsck command to display the filename + for under-replicated blocks. (Dhruba Borthakur via cutting) + +21. HADOOP-333. Add validator for sort benchmark output. + (Arun C Murthy via cutting) + +22. HADOOP-947. Improve performance of datanode decomissioning. + (Dhruba Borthakur via cutting) + +23. HADOOP-442. Permit one to specify hosts allowed to connect to + namenode and jobtracker with include and exclude files. (Wendy + Chien via cutting) + +24. HADOOP-1017. Cache constructors, for improved performance. + (Ron Bodkin via cutting) + +25. HADOOP-867. Move split creation out of JobTracker to client. + Splits are now saved in a separate file, read by task processes + directly, so that user code is no longer required in the + JobTracker. (omalley via cutting) + +26. HADOOP-1006. Remove obsolete '-local' option from test code. + (Gautam Kowshik via cutting) + +27. HADOOP-952. Create a public (shared) Hadoop EC2 AMI. + The EC2 scripts now support launch of public AMIs. + (tomwhite) + +28. HADOOP-1025. Remove some obsolete code in ipc.Server. (cutting) + +29. HADOOP-997. Implement S3 retry mechanism for failed block + transfers. This includes a generic retry mechanism for use + elsewhere in Hadoop. (tomwhite) + +30. HADOOP-990. Improve HDFS support for full datanode volumes. + (Raghu Angadi via cutting) + +31. HADOOP-564. Replace uses of "dfs://" URIs with the more standard + "hdfs://". (Wendy Chien via cutting) + +32. HADOOP-1030. In unit tests, unify setting of ipc.client.timeout. + Also increase the value used from one to two seconds, in hopes of + making tests complete more reliably. (cutting) + +33. HADOOP-654. Stop assigning tasks to a tasktracker if it has + failed more than a specified number in the job. + (Arun C Murthy via cutting) + +34. HADOOP-985. Change HDFS to identify nodes by IP address rather + than by DNS hostname. (Raghu Angadi via cutting) + +35. HADOOP-248. Optimize location of map outputs to not use random + probes. (Devaraj Das via cutting) + +36. HADOOP-1029. Fix streaming's input format to correctly seek to + the start of splits. (Arun C Murthy via cutting) + +37. HADOOP-492. Add per-job and per-task counters. These are + incremented via the Reporter interface and available through the + web ui and the JobClient API. The mapreduce framework maintains a + few basic counters, and applications may add their own. Counters + are also passed to the metrics system. + (David Bowen via cutting) + +38. HADOOP-1034. Fix datanode to better log exceptions. + (Philippe Gassmann via cutting) + +39. HADOOP-878. In contrib/streaming, fix reducer=NONE to work with + multiple maps. (Arun C Murthy via cutting) + +40. HADOOP-1039. In HDFS's TestCheckpoint, avoid restarting + MiniDFSCluster so often, speeding this test. (Dhruba Borthakur via cutting) + +41. HADOOP-1040. Update RandomWriter example to use counters and + user-defined input and output formats. (omalley via cutting) + +42. HADOOP-1027. Fix problems with in-memory merging during shuffle + and re-enable this optimization. (Devaraj Das via cutting) + +43. HADOOP-1036. Fix exception handling in TaskTracker to keep tasks + from being lost. (Arun C Murthy via cutting) + +44. HADOOP-1042. Improve the handling of failed map output fetches. + (Devaraj Das via cutting) + +45. HADOOP-928. Make checksums optional per FileSystem. + (Hairong Kuang via cutting) + +46. HADOOP-1044. Fix HDFS's TestDecommission to not spuriously fail. + (Wendy Chien via cutting) + +47. HADOOP-972. Optimize HDFS's rack-aware block placement algorithm. + (Hairong Kuang via cutting) + +48. HADOOP-1043. Optimize shuffle, increasing parallelism. + (Devaraj Das via cutting) + +49. HADOOP-940. Improve HDFS's replication scheduling. + (Dhruba Borthakur via cutting) + +50. HADOOP-1020. Fix a bug in Path resolution, and a with unit tests + on Windows. (cutting) + +51. HADOOP-941. Enhance record facility. + (Milind Bhandarkar via cutting) + +52. HADOOP-1000. Fix so that log messages in task subprocesses are + not written to a task's standard error. (Arun C Murthy via cutting) + +53. HADOOP-1037. Fix bin/slaves.sh, which currently only works with + /bin/bash, to specify /bin/bash rather than /bin/sh. (cutting) + +54. HADOOP-1046. Clean up tmp from partially received stale block files. (ab) + +55. HADOOP-1041. Optimize mapred counter implementation. Also group + counters by their declaring Enum. (David Bowen via cutting) + +56. HADOOP-1032. Permit one to specify jars that will be cached + across multiple jobs. (Gautam Kowshik via cutting) + +57. HADOOP-1051. Add optional checkstyle task to build.xml. To use + this developers must download the (LGPL'd) checkstyle jar + themselves. (tomwhite via cutting) + +58. HADOOP-1049. Fix a race condition in IPC client. + (Devaraj Das via cutting) + +60. HADOOP-1056. Check HDFS include/exclude node lists with both IP + address and hostname. (Wendy Chien via cutting) + +61. HADOOP-994. In HDFS, limit the number of blocks invalidated at + once. Large lists were causing datenodes to timeout. + (Dhruba Borthakur via cutting) + +62. HADOOP-432. Add a trash feature, disabled by default. When + enabled, the FSShell 'rm' command will move things to a trash + directory in the filesystem. In HDFS, a thread periodically + checkpoints the trash and removes old checkpoints. (cutting) + + +Release 0.11.2 - 2007-02-16 + + 1. HADOOP-1009. Fix an infinite loop in the HDFS namenode. + (Dhruba Borthakur via cutting) + + 2. HADOOP-1014. Disable in-memory merging during shuffle, as this is + causing data corruption. (Devaraj Das via cutting) + + +Release 0.11.1 - 2007-02-09 + + 1. HADOOP-976. Make SequenceFile.Metadata public. (Runping Qi via cutting) + + 2. HADOOP-917. Fix a NullPointerException in SequenceFile's merger + with large map outputs. (omalley via cutting) + + 3. HADOOP-984. Fix a bug in shuffle error handling introduced by + HADOOP-331. If a map output is unavailable, the job tracker is + once more informed. (Arun C Murthy via cutting) + + 4. HADOOP-987. Fix a problem in HDFS where blocks were not removed + from neededReplications after a replication target was selected. + (Hairong Kuang via cutting) + +Release 0.11.0 - 2007-02-02 + + 1. HADOOP-781. Remove methods deprecated in 0.10 that are no longer + widely used. (cutting) + + 2. HADOOP-842. Change HDFS protocol so that the open() method is + passed the client hostname, to permit the namenode to order block + locations on the basis of network topology. + (Hairong Kuang via cutting) + + 3. HADOOP-852. Add an ant task to compile record definitions, and + use it to compile record unit tests. (Milind Bhandarkar via cutting) + + 4. HADOOP-757. Fix "Bad File Descriptor" exception in HDFS client + when an output file is closed twice. (Raghu Angadi via cutting) + + 5. [ intentionally blank ] + + 6. HADOOP-890. Replace dashes in metric names with underscores, + for better compatibility with some monitoring systems. + (Nigel Daley via cutting) + + 7. HADOOP-801. Add to jobtracker a log of task completion events. + (Sanjay Dahiya via cutting) + + 8. HADOOP-855. In HDFS, try to repair files with checksum errors. + An exception is still thrown, but corrupt blocks are now removed + when they have replicas. (Wendy Chien via cutting) + + 9. HADOOP-886. Reduce number of timer threads created by metrics API + by pooling contexts. (Nigel Daley via cutting) + +10. HADOOP-897. Add a "javac.args" property to build.xml that permits + one to pass arbitrary options to javac. (Milind Bhandarkar via cutting) + +11. HADOOP-899. Update libhdfs for changes in HADOOP-871. + (Sameer Paranjpye via cutting) + +12. HADOOP-905. Remove some dead code from JobClient. (cutting) + +13. HADOOP-902. Fix a NullPointerException in HDFS client when + closing output streams. (Raghu Angadi via cutting) + +14. HADOOP-735. Switch generated record code to use BytesWritable to + represent fields of type 'buffer'. (Milind Bhandarkar via cutting) + +15. HADOOP-830. Improve mapreduce merge performance by buffering and + merging multiple map outputs as they arrive at reduce nodes before + they're written to disk. (Devaraj Das via cutting) + +16. HADOOP-908. Add a new contrib package, Abacus, that simplifies + counting and aggregation, built on MapReduce. (Runping Qi via cutting) + +17. HADOOP-901. Add support for recursive renaming to the S3 filesystem. + (Tom White via cutting) + +18. HADOOP-912. Fix a bug in TaskTracker.isIdle() that was + sporadically causing unit test failures. (Arun C Murthy via cutting) + +19. HADOOP-909. Fix the 'du' command to correctly compute the size of + FileSystem directory trees. (Hairong Kuang via cutting) + +20. HADOOP-731. When a checksum error is encountered on a file stored + in HDFS, try another replica of the data, if any. + (Wendy Chien via cutting) + +21. HADOOP-732. Add support to SequenceFile for arbitrary metadata, + as a set of attribute value pairs. (Runping Qi via cutting) + +22. HADOOP-929. Fix PhasedFileSystem to pass configuration to + underlying FileSystem. (Sanjay Dahiya via cutting) + +23. HADOOP-935. Fix contrib/abacus to not delete pre-existing output + files, but rather to fail in this case. (Runping Qi via cutting) + +24. HADOOP-936. More metric renamings, as in HADOOP-890. + (Nigel Daley via cutting) + +25. HADOOP-856. Fix HDFS's fsck command to not report that + non-existent filesystems are healthy. (Milind Bhandarkar via cutting) + +26. HADOOP-602. Remove the dependency on Lucene's PriorityQueue + utility, by copying it into Hadoop. This facilitates using Hadoop + with different versions of Lucene without worrying about CLASSPATH + order. (Milind Bhandarkar via cutting) + +27. [ intentionally blank ] + +28. HADOOP-227. Add support for backup namenodes, which periodically + get snapshots of the namenode state. (Dhruba Borthakur via cutting) + +29. HADOOP-884. Add scripts in contrib/ec2 to facilitate running + Hadoop on an Amazon's EC2 cluster. (Tom White via cutting) + +30. HADOOP-937. Change the namenode to request re-registration of + datanodes in more circumstances. (Hairong Kuang via cutting) + +31. HADOOP-922. Optimize small forward seeks in HDFS. If data is has + likely already in flight, skip ahead rather than re-opening the + block. (Dhruba Borthakur via cutting) + +32. HADOOP-961. Add a 'job -events' sub-command that prints job + events, including task completions and failures. (omalley via cutting) + +33. HADOOP-959. Fix namenode snapshot code added in HADOOP-227 to + work on Windows. (Dhruba Borthakur via cutting) + +34. HADOOP-934. Fix TaskTracker to catch metrics exceptions that were + causing heartbeats to fail. (Arun Murthy via cutting) + +35. HADOOP-881. Fix JobTracker web interface to display the correct + number of task failures. (Sanjay Dahiya via cutting) + +36. HADOOP-788. Change contrib/streaming to subclass TextInputFormat, + permitting it to take advantage of native compression facilities. + (Sanjay Dahiya via cutting) + +37. HADOOP-962. In contrib/ec2: make scripts executable in tar file; + add a README; make the environment file use a template. + (Tom White via cutting) + +38. HADOOP-549. Fix a NullPointerException in TaskReport's + serialization. (omalley via cutting) + +39. HADOOP-963. Fix remote exceptions to have the stack trace of the + caller thread, not the IPC listener thread. (omalley via cutting) + +40. HADOOP-967. Change RPC clients to start sending a version header. + (omalley via cutting) + +41. HADOOP-964. Fix a bug introduced by HADOOP-830 where jobs failed + whose comparators and/or i/o types were in the job's jar. + (Dennis Kubes via cutting) + +42. HADOOP-969. Fix a deadlock in JobTracker. (omalley via cutting) + +43. HADOOP-862. Add support for the S3 FileSystem to the CopyFiles + tool. (Michael Stack via cutting) + +44. HADOOP-965. Fix IsolationRunner so that job's jar can be found. + (Dennis Kubes via cutting) + +45. HADOOP-309. Fix two NullPointerExceptions in StatusHttpServer. + (navychen via cutting) + +46. HADOOP-692. Add rack awareness to HDFS's placement of blocks. + (Hairong Kuang via cutting) + + +Release 0.10.1 - 2007-01-10 + + 1. HADOOP-857. Fix S3 FileSystem implementation to permit its use + for MapReduce input and output. (Tom White via cutting) + + 2. HADOOP-863. Reduce logging verbosity introduced by HADOOP-813. + (Devaraj Das via cutting) + + 3. HADOOP-815. Fix memory leaks in JobTracker. (Arun C Murthy via cutting) + + 4. HADOOP-600. Fix a race condition in JobTracker. + (Arun C Murthy via cutting) + + 5. HADOOP-864. Fix 'bin/hadoop -jar' to operate correctly when + hadoop.tmp.dir does not yet exist. (omalley via cutting) + + 6. HADOOP-866. Fix 'dfs -get' command to remove existing crc files, + if any. (Milind Bhandarkar via cutting) + + 7. HADOOP-871. Fix a bug in bin/hadoop setting JAVA_LIBRARY_PATH. + (Arun C Murthy via cutting) + + 8. HADOOP-868. Decrease the number of open files during map, + respecting io.sort.fa ctor. (Devaraj Das via cutting) + + 9. HADOOP-865. Fix S3 FileSystem so that partially created files can + be deleted. (Tom White via cutting) + +10. HADOOP-873. Pass java.library.path correctly to child processes. + (omalley via cutting) + +11. HADOOP-851. Add support for the LZO codec. This is much faster + than the default, zlib-based compression, but it is only available + when the native library is built. (Arun C Murthy via cutting) + +12. HADOOP-880. Fix S3 FileSystem to remove directories. + (Tom White via cutting) + +13. HADOOP-879. Fix InputFormatBase to handle output generated by + MapFileOutputFormat. (cutting) + +14. HADOOP-659. In HDFS, prioritize replication of blocks based on + current replication level. Blocks which are severely + under-replicated should be further replicated before blocks which + are less under-replicated. (Hairong Kuang via cutting) + +15. HADOOP-726. Deprecate FileSystem locking methods. They are not + currently usable. Locking should eventually provided as an + independent service. (Raghu Angadi via cutting) + +16. HADOOP-758. Fix exception handling during reduce so that root + exceptions are not masked by exceptions in cleanups. + (Raghu Angadi via cutting) + + +Release 0.10.0 - 2007-01-05 + + 1. HADOOP-763. Change DFS namenode benchmark to not use MapReduce. + (Nigel Daley via cutting) + + 2. HADOOP-777. Use fully-qualified hostnames for tasktrackers and + datanodes. (Mahadev Konar via cutting) + + 3. HADOOP-621. Change 'dfs -cat' to exit sooner when output has been + closed. (Dhruba Borthakur via cutting) + + 4. HADOOP-752. Rationalize some synchronization in DFS namenode. + (Dhruba Borthakur via cutting) + + 5. HADOOP-629. Fix RPC services to better check the protocol name and + version. (omalley via cutting) + + 6. HADOOP-774. Limit the number of invalid blocks returned with + heartbeats by the namenode to datanodes. Transmitting and + processing very large invalid block lists can tie up both the + namenode and datanode for too long. (Dhruba Borthakur via cutting) + + 7. HADOOP-738. Change 'dfs -get' command to not create CRC files by + default, adding a -crc option to force their creation. + (Milind Bhandarkar via cutting) + + 8. HADOOP-676. Improved exceptions and error messages for common job + input specification errors. (Sanjay Dahiya via cutting) + + 9. [Included in 0.9.2 release] + +10. HADOOP-756. Add new dfsadmin option to wait for filesystem to be + operational. (Dhruba Borthakur via cutting) + +11. HADOOP-770. Fix jobtracker web interface to display, on restart, + jobs that were running when it was last stopped. + (Sanjay Dahiya via cutting) + +12. HADOOP-331. Write all map outputs to a single file with an index, + rather than to a separate file per reduce task. This should both + speed the shuffle and make things more scalable. + (Devaraj Das via cutting) + +13. HADOOP-818. Fix contrib unit tests to not depend on core unit + tests. (omalley via cutting) + +14. HADOOP-786. Log common exception at debug level. + (Sanjay Dahiya via cutting) + +15. HADOOP-796. Provide more convenient access to failed task + information in the web interface. (Sanjay Dahiya via cutting) + +16. HADOOP-764. Reduce memory allocations in namenode some. + (Dhruba Borthakur via cutting) + +17. HADOOP-802. Update description of mapred.speculative.execution to + mention reduces. (Nigel Daley via cutting) + +18. HADOOP-806. Include link to datanodes on front page of namenode + web interface. (Raghu Angadi via cutting) + +19. HADOOP-618. Make JobSubmissionProtocol public. + (Arun C Murthy via cutting) + +20. HADOOP-782. Fully remove killed tasks. (Arun C Murthy via cutting) + +21. HADOOP-792. Fix 'dfs -mv' to return correct status. + (Dhruba Borthakur via cutting) + +22. HADOOP-673. Give each task its own working directory again. + (Mahadev Konar via cutting) + +23. HADOOP-571. Extend the syntax of Path to be a URI; to be + optionally qualified with a scheme and authority. The scheme + determines the FileSystem implementation, while the authority + determines the FileSystem instance. New FileSystem + implementations may be provided by defining an fs..impl + property, naming the FileSystem implementation class. This + permits easy integration of new FileSystem implementations. + (cutting) + +24. HADOOP-720. Add an HDFS white paper to website. + (Dhruba Borthakur via cutting) + +25. HADOOP-794. Fix a divide-by-zero exception when a job specifies + zero map tasks. (omalley via cutting) + +26. HADOOP-454. Add a 'dfs -dus' command that provides summary disk + usage. (Hairong Kuang via cutting) + +27. HADOOP-574. Add an Amazon S3 implementation of FileSystem. To + use this, one need only specify paths of the form + s3://id:secret@bucket/. Alternately, the AWS access key id and + secret can be specified in your config, with the properties + fs.s3.awsAccessKeyId and fs.s3.awsSecretAccessKey. + (Tom White via cutting) + +28. HADOOP-824. Rename DFSShell to be FsShell, since it applies + generically to all FileSystem implementations. (cutting) + +29. HADOOP-813. Fix map output sorting to report progress, so that + sorts which take longer than the task timeout do not fail. + (Devaraj Das via cutting) + +30. HADOOP-825. Fix HDFS daemons when configured with new URI syntax. + (omalley via cutting) + +31. HADOOP-596. Fix a bug in phase reporting during reduce. + (Sanjay Dahiya via cutting) + +32. HADOOP-811. Add a utility, MultithreadedMapRunner. + (Alejandro Abdelnur via cutting) + +33. HADOOP-829. Within HDFS, clearly separate three different + representations for datanodes: one for RPCs, one for + namenode-internal use, and one for namespace persistence. + (Dhruba Borthakur via cutting) + +34. HADOOP-823. Fix problem starting datanode when not all configured + data directories exist. (Bryan Pendleton via cutting) + +35. HADOOP-451. Add a Split interface. CAUTION: This incompatibly + changes the InputFormat and RecordReader interfaces. Not only is + FileSplit replaced with Split, but a FileSystem parameter is no + longer passed in several methods, input validation has changed, + etc. (omalley via cutting) + +36. HADOOP-814. Optimize locking in namenode. (Dhruba Borthakur via cutting) + +37. HADOOP-738. Change 'fs -put' and 'fs -get' commands to accept + standard input and output, respectively. Standard i/o is + specified by a file named '-'. (Wendy Chien via cutting) + +38. HADOOP-835. Fix a NullPointerException reading record-compressed + SequenceFiles. (Hairong Kuang via cutting) + +39. HADOOP-836. Fix a MapReduce bug on Windows, where the wrong + FileSystem was used. Also add a static FileSystem.getLocal() + method and better Path checking in HDFS, to help avoid such issues + in the future. (omalley via cutting) + +40. HADOOP-837. Improve RunJar utility to unpack jar file + hadoop.tmp.dir, rather than the system temporary directory. + (Hairong Kuang via cutting) + +41. HADOOP-841. Fix native library to build 32-bit version even when + on a 64-bit host, if a 32-bit JVM is used. (Arun C Murthy via cutting) + +42. HADOOP-838. Fix tasktracker to pass java.library.path to + sub-processes, so that libhadoop.a is found. + (Arun C Murthy via cutting) + +43. HADOOP-844. Send metrics messages on a fixed-delay schedule + instead of a fixed-rate schedule. (David Bowen via cutting) + +44. HADOOP-849. Fix OutOfMemory exceptions in TaskTracker due to a + file handle leak in SequenceFile. (Devaraj Das via cutting) + +45. HADOOP-745. Fix a synchronization bug in the HDFS namenode. + (Dhruba Borthakur via cutting) + +46. HADOOP-850. Add Writable implementations for variable-length + integers. (ab via cutting) + +47. HADOOP-525. Add raw comparators to record types. This greatly + improves record sort performance. (Milind Bhandarkar via cutting) + +48. HADOOP-628. Fix a problem with 'fs -cat' command, where some + characters were replaced with question marks. (Wendy Chien via cutting) + +49. HADOOP-804. Reduce verbosity of MapReduce logging. + (Sanjay Dahiya via cutting) + +50. HADOOP-853. Rename 'site' to 'docs', in preparation for inclusion + in releases. (cutting) + +51. HADOOP-371. Include contrib jars and site documentation in + distributions. Also add contrib and example documentation to + distributed javadoc, in separate sections. (Nigel Daley via cutting) + +52. HADOOP-846. Report progress during entire map, as sorting of + intermediate outputs may happen at any time, potentially causing + task timeouts. (Devaraj Das via cutting) + +53. HADOOP-840. In task tracker, queue task cleanups and perform them + in a separate thread. (omalley & Mahadev Konar via cutting) + +54. HADOOP-681. Add to HDFS the ability to decommission nodes. This + causes their blocks to be re-replicated on other nodes, so that + they may be removed from a cluster. (Dhruba Borthakur via cutting) + +55. HADOOP-470. In HDFS web ui, list the datanodes containing each + copy of a block. (Hairong Kuang via cutting) + +56. HADOOP-700. Change bin/hadoop to only include core jar file on + classpath, not example, test, etc. Also rename core jar to + hadoop-${version}-core.jar so that it can be more easily + identified. (Nigel Daley via cutting) + +57. HADOOP-619. Extend InputFormatBase to accept individual files and + glob patterns as MapReduce inputs, not just directories. Also + change contrib/streaming to use this. (Sanjay Dahia via cutting) + + +Release 0.9.2 - 2006-12-15 + + 1. HADOOP-639. Restructure InterTrackerProtocol to make task + accounting more reliable. (Arun C Murthy via cutting) + + 2. HADOOP-827. Turn off speculative execution by default, since it's + currently broken. (omalley via cutting) + + 3. HADOOP-791. Fix a deadlock in the task tracker. + (Mahadev Konar via cutting) + + +Release 0.9.1 - 2006-12-06 + + 1. HADOOP-780. Use ReflectionUtils to instantiate key and value + objects. (ab) + + 2. HADOOP-779. Fix contrib/streaming to work correctly with gzipped + input files. (Hairong Kuang via cutting) + + +Release 0.9.0 - 2006-12-01 + + 1. HADOOP-655. Remove most deprecated code. A few deprecated things + remain, notably UTF8 and some methods that are still required. + Also cleaned up constructors for SequenceFile, MapFile, SetFile, + and ArrayFile a bit. (cutting) + + 2. HADOOP-565. Upgrade to Jetty version 6. (Sanjay Dahiya via cutting) + + 3. HADOOP-682. Fix DFS format command to work correctly when + configured with a non-existent directory. (Sanjay Dahiya via cutting) + + 4. HADOOP-645. Fix a bug in contrib/streaming when -reducer is NONE. + (Dhruba Borthakur via cutting) + + 5. HADOOP-687. Fix a classpath bug in bin/hadoop that blocked the + servers from starting. (Sameer Paranjpye via omalley) + + 6. HADOOP-683. Remove a script dependency on bash, so it works with + dash, the new default for /bin/sh on Ubuntu. (James Todd via cutting) + + 7. HADOOP-382. Extend unit tests to run multiple datanodes. + (Milind Bhandarkar via cutting) + + 8. HADOOP-604. Fix some synchronization issues and a + NullPointerException in DFS datanode. (Raghu Angadi via cutting) + + 9. HADOOP-459. Fix memory leaks and a host of other issues with + libhdfs. (Sameer Paranjpye via cutting) + +10. HADOOP-694. Fix a NullPointerException in jobtracker. + (Mahadev Konar via cutting) + +11. HADOOP-637. Fix a memory leak in the IPC server. Direct buffers + are not collected like normal buffers, and provided little + advantage. (Raghu Angadi via cutting) + +12. HADOOP-696. Fix TestTextInputFormat unit test to not rely on the + order of directory listings. (Sameer Paranjpye via cutting) + +13. HADOOP-611. Add support for iterator-based merging to + SequenceFile. (Devaraj Das via cutting) + +14. HADOOP-688. Move DFS administrative commands to a separate + command named 'dfsadmin'. (Dhruba Borthakur via cutting) + +15. HADOOP-708. Fix test-libhdfs to return the correct status, so + that failures will break the build. (Nigel Daley via cutting) + +16. HADOOP-646. Fix namenode to handle edits files larger than 2GB. + (Milind Bhandarkar via cutting) + +17. HADOOP-705. Fix a bug in the JobTracker when failed jobs were + not completely cleaned up. (Mahadev Konar via cutting) + +18. HADOOP-613. Perform final merge while reducing. This removes one + sort pass over the data and should consequently significantly + decrease overall processing time. (Devaraj Das via cutting) + +19. HADOOP-661. Make each job's configuration visible through the web + ui. (Arun C Murthy via cutting) + +20. HADOOP-489. In MapReduce, separate user logs from system logs. + Each task's log output is now available through the web ui. (Arun + C Murthy via cutting) + +21. HADOOP-712. Fix record io's xml serialization to correctly handle + control-characters. (Milind Bhandarkar via cutting) + +22. HADOOP-668. Improvements to the web-based DFS browser. + (Hairong Kuang via cutting) + +23. HADOOP-715. Fix build.xml so that test logs are written in build + directory, rather than in CWD. (Arun C Murthy via cutting) + +24. HADOOP-538. Add support for building an optional native library, + libhadoop.so, that improves the performance of zlib-based + compression. To build this, specify -Dcompile.native to Ant. + (Arun C Murthy via cutting) + +25. HADOOP-610. Fix an problem when the DFS block size is configured + to be smaller than the buffer size, typically only when debugging. + (Milind Bhandarkar via cutting) + +26. HADOOP-695. Fix a NullPointerException in contrib/streaming. + (Hairong Kuang via cutting) + +27. HADOOP-652. In DFS, when a file is deleted, the block count is + now decremented. (Vladimir Krokhmalyov via cutting) + +28. HADOOP-725. In DFS, optimize block placement algorithm, + previously a performance bottleneck. (Milind Bhandarkar via cutting) + +29. HADOOP-723. In MapReduce, fix a race condition during the + shuffle, which resulted in FileNotFoundExceptions. (omalley via cutting) + +30. HADOOP-447. In DFS, fix getBlockSize(Path) to work with relative + paths. (Raghu Angadi via cutting) + +31. HADOOP-733. Make exit codes in DFShell consistent and add a unit + test. (Dhruba Borthakur via cutting) + +32. HADOOP-709. Fix contrib/streaming to work with commands that + contain control characters. (Dhruba Borthakur via cutting) + +33. HADOOP-677. In IPC, permit a version header to be transmitted + when connections are established. This will permit us to change + the format of IPC requests back-compatibly in subsequent releases. + (omalley via cutting) + +34. HADOOP-699. Fix DFS web interface so that filesystem browsing + works correctly, using the right port number. Also add support + for sorting datanode list by various columns. + (Raghu Angadi via cutting) + +35. HADOOP-76. Implement speculative reduce. Now when a job is + configured for speculative execution, both maps and reduces will + execute speculatively. Reduce outputs are written to temporary + location and moved to the final location when reduce is complete. + (Sanjay Dahiya via cutting) + +36. HADOOP-736. Roll back to Jetty 5.1.4, due to performance problems + with Jetty 6.0.1. + +37. HADOOP-739. Fix TestIPC to use different port number, making it + more reliable. (Nigel Daley via cutting) + +38. HADOOP-749. Fix a NullPointerException in jobfailures.jsp. + (omalley via cutting) + +39. HADOOP-747. Fix record serialization to work correctly when + records are embedded in Maps. (Milind Bhandarkar via cutting) + +40. HADOOP-698. Fix HDFS client not to retry the same datanode on + read failures. (Milind Bhandarkar via cutting) + +41. HADOOP-689. Add GenericWritable, to facilitate polymorphism in + MapReduce, SequenceFile, etc. (Feng Jiang via cutting) + +42. HADOOP-430. Stop datanode's HTTP server when registration with + namenode fails. (Wendy Chien via cutting) + +43. HADOOP-750. Fix a potential race condition during mapreduce + shuffle. (omalley via cutting) + +44. HADOOP-728. Fix contrib/streaming-related issues, including + '-reducer NONE'. (Sanjay Dahiya via cutting) + + +Release 0.8.0 - 2006-11-03 + + 1. HADOOP-477. Extend contrib/streaming to scan the PATH environment + variables when resolving executable program names. + (Dhruba Borthakur via cutting) + + 2. HADOOP-583. In DFSClient, reduce the log level of re-connect + attempts from 'info' to 'debug', so they are not normally shown. + (Konstantin Shvachko via cutting) + + 3. HADOOP-498. Re-implement DFS integrity checker to run server-side, + for much improved performance. (Milind Bhandarkar via cutting) + + 4. HADOOP-586. Use the jar name for otherwise un-named jobs. + (Sanjay Dahiya via cutting) + + 5. HADOOP-514. Make DFS heartbeat interval configurable. + (Milind Bhandarkar via cutting) + + 6. HADOOP-588. Fix logging and accounting of failed tasks. + (Sanjay Dahiya via cutting) + + 7. HADOOP-462. Improve command line parsing in DFSShell, so that + incorrect numbers of arguments result in informative errors rather + than ArrayOutOfBoundsException. (Dhruba Borthakur via cutting) + + 8. HADOOP-561. Fix DFS so that one replica of each block is written + locally, if possible. This was the intent, but there as a bug. + (Dhruba Borthakur via cutting) + + 9. HADOOP-610. Fix TaskTracker to survive more exceptions, keeping + tasks from becoming lost. (omalley via cutting) + +10. HADOOP-625. Add a servlet to all http daemons that displays a + stack dump, useful for debugging. (omalley via cutting) + +11. HADOOP-554. Fix DFSShell to return -1 for errors. + (Dhruba Borthakur via cutting) + +12. HADOOP-626. Correct the documentation in the NNBench example + code, and also remove a mistaken call there. + (Nigel Daley via cutting) + +13. HADOOP-634. Add missing license to many files. + (Nigel Daley via cutting) + +14. HADOOP-627. Fix some synchronization problems in MiniMRCluster + that sometimes caused unit tests to fail. (Nigel Daley via cutting) + +15. HADOOP-563. Improve the NameNode's lease policy so that leases + are held for one hour without renewal (instead of one minute). + However another attempt to create the same file will still succeed + if the lease has not been renewed within a minute. This prevents + communication or scheduling problems from causing a write to fail + for up to an hour, barring some other process trying to create the + same file. (Dhruba Borthakur via cutting) + +16. HADOOP-635. In DFSShell, permit specification of multiple files + as the source for file copy and move commands. + (Dhruba Borthakur via cutting) + +17. HADOOP-641. Change NameNode to request a fresh block report from + a re-discovered DataNode, so that no-longer-needed replications + are stopped promptly. (Konstantin Shvachko via cutting) + +18. HADOOP-642. Change IPC client to specify an explicit connect + timeout. (Konstantin Shvachko via cutting) + +19. HADOOP-638. Fix an unsynchronized access to TaskTracker's + internal state. (Nigel Daley via cutting) + +20. HADOOP-624. Fix servlet path to stop a Jetty warning on startup. + (omalley via cutting) + +21. HADOOP-578. Failed tasks are no longer placed at the end of the + task queue. This was originally done to work around other + problems that have now been fixed. Re-executing failed tasks + sooner causes buggy jobs to fail faster. (Sanjay Dahiya via cutting) + +22. HADOOP-658. Update source file headers per Apache policy. (cutting) + +23. HADOOP-636. Add MapFile & ArrayFile constructors which accept a + Progressable, and pass it down to SequenceFile. This permits + reduce tasks which use MapFile to still report progress while + writing blocks to the filesystem. (cutting) + +24. HADOOP-576. Enable contrib/streaming to use the file cache. Also + extend the cache to permit symbolic links to cached items, rather + than local file copies. (Mahadev Konar via cutting) + +25. HADOOP-482. Fix unit tests to work when a cluster is running on + the same machine, removing port conflicts. (Wendy Chien via cutting) + +26. HADOOP-90. Permit dfs.name.dir to list multiple directories, + where namenode data is to be replicated. (Milind Bhandarkar via cutting) + +27. HADOOP-651. Fix DFSCk to correctly pass parameters to the servlet + on the namenode. (Milind Bhandarkar via cutting) + +28. HADOOP-553. Change main() routines of DataNode and NameNode to + log exceptions rather than letting the JVM print them to standard + error. Also, change the hadoop-daemon.sh script to rotate + standard i/o log files. (Raghu Angadi via cutting) + +29. HADOOP-399. Fix javadoc warnings. (Nigel Daley via cutting) + +30. HADOOP-599. Fix web ui and command line to correctly report DFS + filesystem size statistics. Also improve web layout. + (Raghu Angadi via cutting) + +31. HADOOP-660. Permit specification of junit test output format. + (Nigel Daley via cutting) + +32. HADOOP-663. Fix a few unit test issues. (Mahadev Konar via cutting) + +33. HADOOP-664. Cause entire build to fail if libhdfs tests fail. + (Nigel Daley via cutting) + +34. HADOOP-633. Keep jobtracker from dying when job initialization + throws exceptions. Also improve exception handling in a few other + places and add more informative thread names. + (omalley via cutting) + +35. HADOOP-669. Fix a problem introduced by HADOOP-90 that can cause + DFS to lose files. (Milind Bhandarkar via cutting) + +36. HADOOP-373. Consistently check the value returned by + FileSystem.mkdirs(). (Wendy Chien via cutting) + +37. HADOOP-670. Code cleanups in some DFS internals: use generic + types, replace Vector with ArrayList, etc. + (Konstantin Shvachko via cutting) + +38. HADOOP-647. Permit map outputs to use a different compression + type than the job output. (omalley via cutting) + +39. HADOOP-671. Fix file cache to check for pre-existence before + creating . (Mahadev Konar via cutting) + +40. HADOOP-665. Extend many DFSShell commands to accept multiple + arguments. Now commands like "ls", "rm", etc. will operate on + multiple files. (Dhruba Borthakur via cutting) + + +Release 0.7.2 - 2006-10-18 + + 1. HADOOP-607. Fix a bug where classes included in job jars were not + found by tasks. (Mahadev Konar via cutting) + + 2. HADOOP-609. Add a unit test that checks that classes in job jars + can be found by tasks. Also modify unit tests to specify multiple + local directories. (Mahadev Konar via cutting) + + +Release 0.7.1 - 2006-10-11 + + 1. HADOOP-593. Fix a NullPointerException in the JobTracker. + (omalley via cutting) + + 2. HADOOP-592. Fix a NullPointerException in the IPC Server. Also + consistently log when stale calls are discarded. (omalley via cutting) + + 3. HADOOP-594. Increase the DFS safe-mode threshold from .95 to + .999, so that nearly all blocks must be reported before filesystem + modifications are permitted. (Konstantin Shvachko via cutting) + + 4. HADOOP-598. Fix tasks to retry when reporting completion, so that + a single RPC timeout won't fail a task. (omalley via cutting) + + 5. HADOOP-597. Fix TaskTracker to not discard map outputs for errors + in transmitting them to reduce nodes. (omalley via cutting) + + +Release 0.7.0 - 2006-10-06 + + 1. HADOOP-243. Fix rounding in the display of task and job progress + so that things are not shown to be 100% complete until they are in + fact finished. (omalley via cutting) + + 2. HADOOP-438. Limit the length of absolute paths in DFS, since the + file format used to store pathnames has some limitations. + (Wendy Chien via cutting) + + 3. HADOOP-530. Improve error messages in SequenceFile when keys or + values are of the wrong type. (Hairong Kuang via cutting) + + 4. HADOOP-288. Add a file caching system and use it in MapReduce to + cache job jar files on slave nodes. (Mahadev Konar via cutting) + + 5. HADOOP-533. Fix unit test to not modify conf directory. + (Hairong Kuang via cutting) + + 6. HADOOP-527. Permit specification of the local address that various + Hadoop daemons should bind to. (Philippe Gassmann via cutting) + + 7. HADOOP-542. Updates to contrib/streaming: reformatted source code, + on-the-fly merge sort, a fix for HADOOP-540, etc. + (Michel Tourn via cutting) + + 8. HADOOP-545. Remove an unused config file parameter. + (Philippe Gassmann via cutting) + + 9. HADOOP-548. Add an Ant property "test.output" to build.xml that + causes test output to be logged to the console. (omalley via cutting) + +10. HADOOP-261. Record an error message when map output is lost. + (omalley via cutting) + +11. HADOOP-293. Report the full list of task error messages in the + web ui, not just the most recent. (omalley via cutting) + +12. HADOOP-551. Restore JobClient's console printouts to only include + a maximum of one update per one percent of progress. + (omalley via cutting) + +13. HADOOP-306. Add a "safe" mode to DFS. The name node enters this + when less than a specified percentage of file data is complete. + Currently safe mode is only used on startup, but eventually it + will also be entered when datanodes disconnect and file data + becomes incomplete. While in safe mode no filesystem + modifications are permitted and block replication is inhibited. + (Konstantin Shvachko via cutting) + +14. HADOOP-431. Change 'dfs -rm' to not operate recursively and add a + new command, 'dfs -rmr' which operates recursively. + (Sameer Paranjpye via cutting) + +15. HADOOP-263. Include timestamps for job transitions. The web + interface now displays the start and end times of tasks and the + start times of sorting and reducing for reduce tasks. Also, + extend ObjectWritable to handle enums, so that they can be passed + as RPC parameters. (Sanjay Dahiya via cutting) + +16. HADOOP-556. Contrib/streaming: send keep-alive reports to task + tracker every 10 seconds rather than every 100 records, to avoid + task timeouts. (Michel Tourn via cutting) + +17. HADOOP-547. Fix reduce tasks to ping tasktracker while copying + data, rather than only between copies, avoiding task timeouts. + (Sanjay Dahiya via cutting) + +18. HADOOP-537. Fix src/c++/libhdfs build process to create files in + build/, no longer modifying the source tree. + (Arun C Murthy via cutting) + +19. HADOOP-487. Throw a more informative exception for unknown RPC + hosts. (Sameer Paranjpye via cutting) + +20. HADOOP-559. Add file name globbing (pattern matching) support to + the FileSystem API, and use it in DFSShell ('bin/hadoop dfs') + commands. (Hairong Kuang via cutting) + +21. HADOOP-508. Fix a bug in FSDataInputStream. Incorrect data was + returned after seeking to a random location. + (Milind Bhandarkar via cutting) + +22. HADOOP-560. Add a "killed" task state. This can be used to + distinguish kills from other failures. Task state has also been + converted to use an enum type instead of an int, uncovering a bug + elsewhere. The web interface is also updated to display killed + tasks. (omalley via cutting) + +23. HADOOP-423. Normalize Paths containing directories named "." and + "..", using the standard, unix interpretation. Also add checks in + DFS, prohibiting the use of "." or ".." as directory or file + names. (Wendy Chien via cutting) + +24. HADOOP-513. Replace map output handling with a servlet, rather + than a JSP page. This fixes an issue where + IllegalStateException's were logged, sets content-length + correctly, and better handles some errors. (omalley via cutting) + +25. HADOOP-552. Improved error checking when copying map output files + to reduce nodes. (omalley via cutting) + +26. HADOOP-566. Fix scripts to work correctly when accessed through + relative symbolic links. (Lee Faris via cutting) + +27. HADOOP-519. Add positioned read methods to FSInputStream. These + permit one to read from a stream without moving its position, and + can hence be performed by multiple threads at once on a single + stream. Implement an optimized version for DFS and local FS. + (Milind Bhandarkar via cutting) + +28. HADOOP-522. Permit block compression with MapFile and SetFile. + Since these formats are always sorted, block compression can + provide a big advantage. (cutting) + +29. HADOOP-567. Record version and revision information in builds. A + package manifest is added to the generated jar file containing + version information, and a VersionInfo utility is added that + includes further information, including the build date and user, + and the subversion revision and repository. A 'bin/hadoop + version' comand is added to show this information, and it is also + added to various web interfaces. (omalley via cutting) + +30. HADOOP-568. Fix so that errors while initializing tasks on a + tasktracker correctly report the task as failed to the jobtracker, + so that it will be rescheduled. (omalley via cutting) + +31. HADOOP-550. Disable automatic UTF-8 validation in Text. This + permits, e.g., TextInputFormat to again operate on non-UTF-8 data. + (Hairong and Mahadev via cutting) + +32. HADOOP-343. Fix mapred copying so that a failed tasktracker + doesn't cause other copies to slow. (Sameer Paranjpye via cutting) + +33. HADOOP-239. Add a persistent job history mechanism, so that basic + job statistics are not lost after 24 hours and/or when the + jobtracker is restarted. (Sanjay Dahiya via cutting) + +34. HADOOP-506. Ignore heartbeats from stale task trackers. + (Sanjay Dahiya via cutting) + +35. HADOOP-255. Discard stale, queued IPC calls. Do not process + calls whose clients will likely time out before they receive a + response. When the queue is full, new calls are now received and + queued, and the oldest calls are discarded, so that, when servers + get bogged down, they no longer develop a backlog on the socket. + This should improve some DFS namenode failure modes. + (omalley via cutting) + +36. HADOOP-581. Fix datanode to not reset itself on communications + errors with the namenode. If a request to the namenode fails, the + datanode should retry, not restart. This reduces the load on the + namenode, since restarts cause a resend of the block report. + (omalley via cutting) + + +Release 0.6.2 - 2006-09-18 + +1. HADOOP-532. Fix a bug reading value-compressed sequence files, + where an exception was thrown reporting that the full value had not + been read. (omalley via cutting) + +2. HADOOP-534. Change the default value class in JobConf to be Text + instead of the now-deprecated UTF8. This fixes the Grep example + program, which was updated to use Text, but relies on this + default. (Hairong Kuang via cutting) + + +Release 0.6.1 - 2006-09-13 + + 1. HADOOP-520. Fix a bug in libhdfs, where write failures were not + correctly returning error codes. (Arun C Murthy via cutting) + + 2. HADOOP-523. Fix a NullPointerException when TextInputFormat is + explicitly specified. Also add a test case for this. + (omalley via cutting) + + 3. HADOOP-521. Fix another NullPointerException finding the + ClassLoader when using libhdfs. (omalley via cutting) + + 4. HADOOP-526. Fix a NullPointerException when attempting to start + two datanodes in the same directory. (Milind Bhandarkar via cutting) + + 5. HADOOP-529. Fix a NullPointerException when opening + value-compressed sequence files generated by pre-0.6.0 Hadoop. + (omalley via cutting) + + +Release 0.6.0 - 2006-09-08 + + 1. HADOOP-427. Replace some uses of DatanodeDescriptor in the DFS + web UI code with DatanodeInfo, the preferred public class. + (Devaraj Das via cutting) + + 2. HADOOP-426. Fix streaming contrib module to work correctly on + Solaris. This was causing nightly builds to fail. + (Michel Tourn via cutting) + + 3. HADOOP-400. Improvements to task assignment. Tasks are no longer + re-run on nodes where they have failed (unless no other node is + available). Also, tasks are better load-balanced among nodes. + (omalley via cutting) + + 4. HADOOP-324. Fix datanode to not exit when a disk is full, but + rather simply to fail writes. (Wendy Chien via cutting) + + 5. HADOOP-434. Change smallJobsBenchmark to use standard Hadoop + scripts. (Sanjay Dahiya via cutting) + + 6. HADOOP-453. Fix a bug in Text.setCapacity(). (siren via cutting) + + + 7. HADOOP-450. Change so that input types are determined by the + RecordReader rather than specified directly in the JobConf. This + facilitates jobs with a variety of input types. + + WARNING: This contains incompatible API changes! The RecordReader + interface has two new methods that all user-defined InputFormats + must now define. Also, the values returned by TextInputFormat are + no longer of class UTF8, but now of class Text. + + 8. HADOOP-436. Fix an error-handling bug in the web ui. + (Devaraj Das via cutting) + + 9. HADOOP-455. Fix a bug in Text, where DEL was not permitted. + (Hairong Kuang via cutting) + +10. HADOOP-456. Change the DFS namenode to keep a persistent record + of the set of known datanodes. This will be used to implement a + "safe mode" where filesystem changes are prohibited when a + critical percentage of the datanodes are unavailable. + (Konstantin Shvachko via cutting) + +11. HADOOP-322. Add a job control utility. This permits one to + specify job interdependencies. Each job is submitted only after + the jobs it depends on have successfully completed. + (Runping Qi via cutting) + +12. HADOOP-176. Fix a bug in IntWritable.Comparator. + (Dick King via cutting) + +13. HADOOP-421. Replace uses of String in recordio package with Text + class, for improved handling of UTF-8 data. + (Milind Bhandarkar via cutting) + +14. HADOOP-464. Improved error message when job jar not found. + (Michel Tourn via cutting) + +15. HADOOP-469. Fix /bin/bash specifics that have crept into our + /bin/sh scripts since HADOOP-352. + (Jean-Baptiste Quenot via cutting) + +16. HADOOP-468. Add HADOOP_NICENESS environment variable to set + scheduling priority for daemons. (Vetle Roeim via cutting) + +17. HADOOP-473. Fix TextInputFormat to correctly handle more EOL + formats. Things now work correctly with CR, LF or CRLF. + (Dennis Kubes & James White via cutting) + +18. HADOOP-461. Make Java 1.5 an explicit requirement. (cutting) + +19. HADOOP-54. Add block compression to SequenceFile. One may now + specify that blocks of keys and values are compressed together, + improving compression for small keys and values. + SequenceFile.Writer's constructor is now deprecated and replaced + with a factory method. (Arun C Murthy via cutting) + +20. HADOOP-281. Prohibit DFS files that are also directories. + (Wendy Chien via cutting) + +21. HADOOP-486. Add the job username to JobStatus instances returned + by JobClient. (Mahadev Konar via cutting) + +22. HADOOP-437. contrib/streaming: Add support for gzipped inputs. + (Michel Tourn via cutting) + +23. HADOOP-463. Add variable expansion to config files. + Configuration property values may now contain variable + expressions. A variable is referenced with the syntax + '${variable}'. Variables values are found first in the + configuration, and then in Java system properties. The default + configuration is modified so that temporary directories are now + under ${hadoop.tmp.dir}, which is, by default, + /tmp/hadoop-${user.name}. (Michel Tourn via cutting) + +24. HADOOP-419. Fix a NullPointerException finding the ClassLoader + when using libhdfs. (omalley via cutting) + +25. HADOOP-460. Fix contrib/smallJobsBenchmark to use Text instead of + UTF8. (Sanjay Dahiya via cutting) + +26. HADOOP-196. Fix Configuration(Configuration) constructor to work + correctly. (Sami Siren via cutting) + +27. HADOOP-501. Fix Configuration.toString() to handle URL resources. + (Thomas Friol via cutting) + +28. HADOOP-499. Reduce the use of Strings in contrib/streaming, + replacing them with Text for better performance. + (Hairong Kuang via cutting) + +29. HADOOP-64. Manage multiple volumes with a single DataNode. + Previously DataNode would create a separate daemon per configured + volume, each with its own connection to the NameNode. Now all + volumes are handled by a single DataNode daemon, reducing the load + on the NameNode. (Milind Bhandarkar via cutting) + +30. HADOOP-424. Fix MapReduce so that jobs which generate zero splits + do not fail. (Fr??d??ric Bertin via cutting) + +31. HADOOP-408. Adjust some timeouts and remove some others so that + unit tests run faster. (cutting) + +32. HADOOP-507. Fix an IllegalAccessException in DFS. + (omalley via cutting) + +33. HADOOP-320. Fix so that checksum files are correctly copied when + the destination of a file copy is a directory. + (Hairong Kuang via cutting) + +34. HADOOP-286. In DFSClient, avoid pinging the NameNode with + renewLease() calls when no files are being written. + (Konstantin Shvachko via cutting) + +35. HADOOP-312. Close idle IPC connections. All IPC connections were + cached forever. Now, after a connection has been idle for more + than a configurable amount of time (one second by default), the + connection is closed, conserving resources on both client and + server. (Devaraj Das via cutting) + +36. HADOOP-497. Permit the specification of the network interface and + nameserver to be used when determining the local hostname + advertised by datanodes and tasktrackers. + (Lorenzo Thione via cutting) + +37. HADOOP-441. Add a compression codec API and extend SequenceFile + to use it. This will permit the use of alternate compression + codecs in SequenceFile. (Arun C Murthy via cutting) + +38. HADOOP-483. Improvements to libhdfs build and documentation. + (Arun C Murthy via cutting) + +39. HADOOP-458. Fix a memory corruption bug in libhdfs. + (Arun C Murthy via cutting) + +40. HADOOP-517. Fix a contrib/streaming bug in end-of-line detection. + (Hairong Kuang via cutting) + +41. HADOOP-474. Add CompressionCodecFactory, and use it in + TextInputFormat and TextOutputFormat. Compressed input files are + automatically decompressed when they have the correct extension. + Output files will, when output compression is specified, be + generated with an approprate extension. Also add a gzip codec and + fix problems with UTF8 text inputs. (omalley via cutting) + + +Release 0.5.0 - 2006-08-04 + + 1. HADOOP-352. Fix shell scripts to use /bin/sh instead of + /bin/bash, for better portability. + (Jean-Baptiste Quenot via cutting) + + 2. HADOOP-313. Permit task state to be saved so that single tasks + may be manually re-executed when debugging. (omalley via cutting) + + 3. HADOOP-339. Add method to JobClient API listing jobs that are + not yet complete, i.e., that are queued or running. + (Mahadev Konar via cutting) + + 4. HADOOP-355. Updates to the streaming contrib module, including + API fixes, making reduce optional, and adding an input type for + StreamSequenceRecordReader. (Michel Tourn via cutting) + + 5. HADOOP-358. Fix a NPE bug in Path.equals(). + (Fr??d??ric Bertin via cutting) + + 6. HADOOP-327. Fix ToolBase to not call System.exit() when + exceptions are thrown. (Hairong Kuang via cutting) + + 7. HADOOP-359. Permit map output to be compressed. + (omalley via cutting) + + 8. HADOOP-341. Permit input URI to CopyFiles to use the HTTP + protocol. This lets one, e.g., more easily copy log files into + DFS. (Arun C Murthy via cutting) + + 9. HADOOP-361. Remove unix dependencies from streaming contrib + module tests, making them pure java. (Michel Tourn via cutting) + +10. HADOOP-354. Make public methods to stop DFS daemons. + (Barry Kaplan via cutting) + +11. HADOOP-252. Add versioning to RPC protocols. + (Milind Bhandarkar via cutting) + +12. HADOOP-356. Add contrib to "compile" and "test" build targets, so + that this code is better maintained. (Michel Tourn via cutting) + +13. HADOOP-307. Add smallJobsBenchmark contrib module. This runs + lots of small jobs, in order to determine per-task overheads. + (Sanjay Dahiya via cutting) + +14. HADOOP-342. Add a tool for log analysis: Logalyzer. + (Arun C Murthy via cutting) + +15. HADOOP-347. Add web-based browsing of DFS content. The namenode + redirects browsing requests to datanodes. Content requests are + redirected to datanodes where the data is local when possible. + (Devaraj Das via cutting) + +16. HADOOP-351. Make Hadoop IPC kernel independent of Jetty. + (Devaraj Das via cutting) + +17. HADOOP-237. Add metric reporting to DFS and MapReduce. With only + minor configuration changes, one can now monitor many Hadoop + system statistics using Ganglia or other monitoring systems. + (Milind Bhandarkar via cutting) + +18. HADOOP-376. Fix datanode's HTTP server to scan for a free port. + (omalley via cutting) + +19. HADOOP-260. Add --config option to shell scripts, specifying an + alternate configuration directory. (Milind Bhandarkar via cutting) + +20. HADOOP-381. Permit developers to save the temporary files for + tasks whose names match a regular expression, to facilliate + debugging. (omalley via cutting) + +21. HADOOP-344. Fix some Windows-related problems with DF. + (Konstantin Shvachko via cutting) + +22. HADOOP-380. Fix reduce tasks to poll less frequently for map + outputs. (Mahadev Konar via cutting) + +23. HADOOP-321. Refactor DatanodeInfo, in preparation for + HADOOP-306. (Konstantin Shvachko & omalley via cutting) + +24. HADOOP-385. Fix some bugs in record io code generation. + (Milind Bhandarkar via cutting) + +25. HADOOP-302. Add new Text class to replace UTF8, removing + limitations of that class. Also refactor utility methods for + writing zero-compressed integers (VInts and VLongs). + (Hairong Kuang via cutting) + +26. HADOOP-335. Refactor DFS namespace/transaction logging in + namenode. (Konstantin Shvachko via cutting) + +27. HADOOP-375. Fix handling of the datanode HTTP daemon's port so + that multiple datanode's can be run on a single host. + (Devaraj Das via cutting) + +28. HADOOP-386. When removing excess DFS block replicas, remove those + on nodes with the least free space first. + (Johan Oskarson via cutting) + +29. HADOOP-389. Fix intermittent failures of mapreduce unit tests. + Also fix some build dependencies. + (Mahadev & Konstantin via cutting) + +30. HADOOP-362. Fix a problem where jobs hang when status messages + are recieved out-of-order. (omalley via cutting) + +31. HADOOP-394. Change order of DFS shutdown in unit tests to + minimize errors logged. (Konstantin Shvachko via cutting) + +32. HADOOP-396. Make DatanodeID implement Writable. + (Konstantin Shvachko via cutting) + +33. HADOOP-377. Permit one to add URL resources to a Configuration. + (Jean-Baptiste Quenot via cutting) + +34. HADOOP-345. Permit iteration over Configuration key/value pairs. + (Michel Tourn via cutting) + +35. HADOOP-409. Streaming contrib module: make configuration + properties available to commands as environment variables. + (Michel Tourn via cutting) + +36. HADOOP-369. Add -getmerge option to dfs command that appends all + files in a directory into a single local file. + (Johan Oskarson via cutting) + +37. HADOOP-410. Replace some TreeMaps with HashMaps in DFS, for + a 17% performance improvement. (Milind Bhandarkar via cutting) + +38. HADOOP-411. Add unit tests for command line parser. + (Hairong Kuang via cutting) + +39. HADOOP-412. Add MapReduce input formats that support filtering + of SequenceFile data, including sampling and regex matching. + Also, move JobConf.newInstance() to a new utility class. + (Hairong Kuang via cutting) + +40. HADOOP-226. Fix fsck command to properly consider replication + counts, now that these can vary per file. (Bryan Pendleton via cutting) + +41. HADOOP-425. Add a Python MapReduce example, using Jython. + (omalley via cutting) + + +Release 0.4.0 - 2006-06-28 + + 1. HADOOP-298. Improved progress reports for CopyFiles utility, the + distributed file copier. (omalley via cutting) + + 2. HADOOP-299. Fix the task tracker, permitting multiple jobs to + more easily execute at the same time. (omalley via cutting) + + 3. HADOOP-250. Add an HTTP user interface to the namenode, running + on port 50070. (Devaraj Das via cutting) + + 4. HADOOP-123. Add MapReduce unit tests that run a jobtracker and + tasktracker, greatly increasing code coverage. + (Milind Bhandarkar via cutting) + + 5. HADOOP-271. Add links from jobtracker's web ui to tasktracker's + web ui. Also attempt to log a thread dump of child processes + before they're killed. (omalley via cutting) + + 6. HADOOP-210. Change RPC server to use a selector instead of a + thread per connection. This should make it easier to scale to + larger clusters. Note that this incompatibly changes the RPC + protocol: clients and servers must both be upgraded to the new + version to ensure correct operation. (Devaraj Das via cutting) + + 7. HADOOP-311. Change DFS client to retry failed reads, so that a + single read failure will not alone cause failure of a task. + (omalley via cutting) + + 8. HADOOP-314. Remove the "append" phase when reducing. Map output + files are now directly passed to the sorter, without first + appending them into a single file. Now, the first third of reduce + progress is "copy" (transferring map output to reduce nodes), the + middle third is "sort" (sorting map output) and the last third is + "reduce" (generating output). Long-term, the "sort" phase will + also be removed. (omalley via cutting) + + 9. HADOOP-316. Fix a potential deadlock in the jobtracker. + (omalley via cutting) + +10. HADOOP-319. Fix FileSystem.close() to remove the FileSystem + instance from the cache. (Hairong Kuang via cutting) + +11. HADOOP-135. Fix potential deadlock in JobTracker by acquiring + locks in a consistent order. (omalley via cutting) + +12. HADOOP-278. Check for existence of input directories before + starting MapReduce jobs, making it easier to debug this common + error. (omalley via cutting) + +13. HADOOP-304. Improve error message for + UnregisterdDatanodeException to include expected node name. + (Konstantin Shvachko via cutting) + +14. HADOOP-305. Fix TaskTracker to ask for new tasks as soon as a + task is finished, rather than waiting for the next heartbeat. + This improves performance when tasks are short. + (Mahadev Konar via cutting) + +15. HADOOP-59. Add support for generic command line options. One may + now specify the filesystem (-fs), the MapReduce jobtracker (-jt), + a config file (-conf) or any configuration property (-D). The + "dfs", "fsck", "job", and "distcp" commands currently support + this, with more to be added. (Hairong Kuang via cutting) + +16. HADOOP-296. Permit specification of the amount of reserved space + on a DFS datanode. One may specify both the percentage free and + the number of bytes. (Johan Oskarson via cutting) + +17. HADOOP-325. Fix a problem initializing RPC parameter classes, and + remove the workaround used to initialize classes. + (omalley via cutting) + +18. HADOOP-328. Add an option to the "distcp" command to ignore read + errors while copying. (omalley via cutting) + +19. HADOOP-27. Don't allocate tasks to trackers whose local free + space is too low. (Johan Oskarson via cutting) + +20. HADOOP-318. Keep slow DFS output from causing task timeouts. + This incompatibly changes some public interfaces, adding a + parameter to OutputFormat.getRecordWriter() and the new method + Reporter.progress(), but it makes lots of tasks succeed that were + previously failing. (Milind Bhandarkar via cutting) + + +Release 0.3.2 - 2006-06-09 + + 1. HADOOP-275. Update the streaming contrib module to use log4j for + its logging. (Michel Tourn via cutting) + + 2. HADOOP-279. Provide defaults for log4j logging parameters, so + that things still work reasonably when Hadoop-specific system + properties are not provided. (omalley via cutting) + + 3. HADOOP-280. Fix a typo in AllTestDriver which caused the wrong + test to be run when "DistributedFSCheck" was specified. + (Konstantin Shvachko via cutting) + + 4. HADOOP-240. DFS's mkdirs() implementation no longer logs a warning + when the directory already exists. (Hairong Kuang via cutting) + + 5. HADOOP-285. Fix DFS datanodes to be able to re-join the cluster + after the connection to the namenode is lost. (omalley via cutting) + + 6. HADOOP-277. Fix a race condition when creating directories. + (Sameer Paranjpye via cutting) + + 7. HADOOP-289. Improved exception handling in DFS datanode. + (Konstantin Shvachko via cutting) + + 8. HADOOP-292. Fix client-side logging to go to standard error + rather than standard output, so that it can be distinguished from + application output. (omalley via cutting) + + 9. HADOOP-294. Fixed bug where conditions for retrying after errors + in the DFS client were reversed. (omalley via cutting) + + +Release 0.3.1 - 2006-06-05 + + 1. HADOOP-272. Fix a bug in bin/hadoop setting log + parameters. (omalley & cutting) + + 2. HADOOP-274. Change applications to log to standard output rather + than to a rolling log file like daemons. (omalley via cutting) + + 3. HADOOP-262. Fix reduce tasks to report progress while they're + waiting for map outputs, so that they do not time out. + (Mahadev Konar via cutting) + + 4. HADOOP-245 and HADOOP-246. Improvements to record io package. + (Mahadev Konar via cutting) + + 5. HADOOP-276. Add logging config files to jar file so that they're + always found. (omalley via cutting) + + +Release 0.3.0 - 2006-06-02 + + 1. HADOOP-208. Enhance MapReduce web interface, adding new pages + for failed tasks, and tasktrackers. (omalley via cutting) + + 2. HADOOP-204. Tweaks to metrics package. (David Bowen via cutting) + + 3. HADOOP-209. Add a MapReduce-based file copier. This will + copy files within or between file systems in parallel. + (Milind Bhandarkar via cutting) + + 4. HADOOP-146. Fix DFS to check when randomly generating a new block + id that no existing blocks already have that id. + (Milind Bhandarkar via cutting) + + 5. HADOOP-180. Make a daemon thread that does the actual task clean ups, so + that the main offerService thread in the taskTracker doesn't get stuck + and miss his heartbeat window. This was killing many task trackers as + big jobs finished (300+ tasks / node). (omalley via cutting) + + 6. HADOOP-200. Avoid transmitting entire list of map task names to + reduce tasks. Instead just transmit the number of map tasks and + henceforth refer to them by number when collecting map output. + (omalley via cutting) + + 7. HADOOP-219. Fix a NullPointerException when handling a checksum + exception under SequenceFile.Sorter.sort(). (cutting & stack) + + 8. HADOOP-212. Permit alteration of the file block size in DFS. The + default block size for new files may now be specified in the + configuration with the dfs.block.size property. The block size + may also be specified when files are opened. + (omalley via cutting) + + 9. HADOOP-218. Avoid accessing configuration while looping through + tasks in JobTracker. (Mahadev Konar via cutting) + +10. HADOOP-161. Add hashCode() method to DFS's Block. + (Milind Bhandarkar via cutting) + +11. HADOOP-115. Map output types may now be specified. These are also + used as reduce input types, thus permitting reduce input types to + differ from reduce output types. (Runping Qi via cutting) + +12. HADOOP-216. Add task progress to task status page. + (Bryan Pendelton via cutting) + +13. HADOOP-233. Add web server to task tracker that shows running + tasks and logs. Also add log access to job tracker web interface. + (omalley via cutting) + +14. HADOOP-205. Incorporate pending tasks into tasktracker load + calculations. (Mahadev Konar via cutting) + +15. HADOOP-247. Fix sort progress to better handle exceptions. + (Mahadev Konar via cutting) + +16. HADOOP-195. Improve performance of the transfer of map outputs to + reduce nodes by performing multiple transfers in parallel, each on + a separate socket. (Sameer Paranjpye via cutting) + +17. HADOOP-251. Fix task processes to be tolerant of failed progress + reports to their parent process. (omalley via cutting) + +18. HADOOP-325. Improve the FileNotFound exceptions thrown by + LocalFileSystem to include the name of the file. + (Benjamin Reed via cutting) + +19. HADOOP-254. Use HTTP to transfer map output data to reduce + nodes. This, together with HADOOP-195, greatly improves the + performance of these transfers. (omalley via cutting) + +20. HADOOP-163. Cause datanodes that\ are unable to either read or + write data to exit, so that the namenode will no longer target + them for new blocks and will replicate their data on other nodes. + (Hairong Kuang via cutting) + +21. HADOOP-222. Add a -setrep option to the dfs commands that alters + file replication levels. (Johan Oskarson via cutting) + +22. HADOOP-75. In DFS, only check for a complete file when the file + is closed, rather than as each block is written. + (Milind Bhandarkar via cutting) + +23. HADOOP-124. Change DFS so that datanodes are identified by a + persistent ID rather than by host and port. This solves a number + of filesystem integrity problems, when, e.g., datanodes are + restarted. (Konstantin Shvachko via cutting) + +24. HADOOP-256. Add a C API for DFS. (Arun C Murthy via cutting) + +25. HADOOP-211. Switch to use the Jakarta Commons logging internally, + configured to use log4j by default. (Arun C Murthy and cutting) + +26. HADOOP-265. Tasktracker now fails to start if it does not have a + writable local directory for temporary files. In this case, it + logs a message to the JobTracker and exits. (Hairong Kuang via cutting) + +27. HADOOP-270. Fix potential deadlock in datanode shutdown. + (Hairong Kuang via cutting) + +Release 0.2.1 - 2006-05-12 + + 1. HADOOP-199. Fix reduce progress (broken by HADOOP-182). + (omalley via cutting) + + 2. HADOOP-201. Fix 'bin/hadoop dfs -report'. (cutting) + + 3. HADOOP-207. Fix JDK 1.4 incompatibility introduced by HADOOP-96. + System.getenv() does not work in JDK 1.4. (Hairong Kuang via cutting) + + +Release 0.2.0 - 2006-05-05 + + 1. Fix HADOOP-126. 'bin/hadoop dfs -cp' now correctly copies .crc + files. (Konstantin Shvachko via cutting) + + 2. Fix HADOOP-51. Change DFS to support per-file replication counts. + (Konstantin Shvachko via cutting) + + 3. Fix HADOOP-131. Add scripts to start/stop dfs and mapred daemons. + Use these in start/stop-all scripts. (Chris Mattmann via cutting) + + 4. Stop using ssh options by default that are not yet in widely used + versions of ssh. Folks can still enable their use by uncommenting + a line in conf/hadoop-env.sh. (cutting) + + 5. Fix HADOOP-92. Show information about all attempts to run each + task in the web ui. (Mahadev konar via cutting) + + 6. Fix HADOOP-128. Improved DFS error handling. (Owen O'Malley via cutting) + + 7. Fix HADOOP-129. Replace uses of java.io.File with new class named + Path. This fixes bugs where java.io.File methods were called + directly when FileSystem methods were desired, and reduces the + likelihood of such bugs in the future. It also makes the handling + of pathnames more consistent between local and dfs FileSystems and + between Windows and Unix. java.io.File-based methods are still + available for back-compatibility, but are deprecated and will be + removed once 0.2 is released. (cutting) + + 8. Change dfs.data.dir and mapred.local.dir to be comma-separated + lists of directories, no longer be space-separated. This fixes + several bugs on Windows. (cutting) + + 9. Fix HADOOP-144. Use mapred task id for dfs client id, to + facilitate debugging. (omalley via cutting) + +10. Fix HADOOP-143. Do not line-wrap stack-traces in web ui. + (omalley via cutting) + +11. Fix HADOOP-118. In DFS, improve clean up of abandoned file + creations. (omalley via cutting) + +12. Fix HADOOP-138. Stop multiple tasks in a single heartbeat, rather + than one per heartbeat. (Stefan via cutting) + +13. Fix HADOOP-139. Remove a potential deadlock in + LocalFileSystem.lock(). (Igor Bolotin via cutting) + +14. Fix HADOOP-134. Don't hang jobs when the tasktracker is + misconfigured to use an un-writable local directory. (omalley via cutting) + +15. Fix HADOOP-115. Correct an error message. (Stack via cutting) + +16. Fix HADOOP-133. Retry pings from child to parent, in case of + (local) communcation problems. Also log exit status, so that one + can distinguish patricide from other deaths. (omalley via cutting) + +17. Fix HADOOP-142. Avoid re-running a task on a host where it has + previously failed. (omalley via cutting) + +18. Fix HADOOP-148. Maintain a task failure count for each + tasktracker and display it in the web ui. (omalley via cutting) + +19. Fix HADOOP-151. Close a potential socket leak, where new IPC + connection pools were created per configuration instance that RPCs + use. Now a global RPC connection pool is used again, as + originally intended. (cutting) + +20. Fix HADOOP-69. Don't throw a NullPointerException when getting + hints for non-existing file split. (Bryan Pendelton via cutting) + +21. Fix HADOOP-157. When a task that writes dfs files (e.g., a reduce + task) failed and was retried, it would fail again and again, + eventually failing the job. The problem was that dfs did not yet + know that the failed task had abandoned the files, and would not + yet let another task create files with the same names. Dfs now + retries when creating a file long enough for locks on abandoned + files to expire. (omalley via cutting) + +22. Fix HADOOP-150. Improved task names that include job + names. (omalley via cutting) + +23. Fix HADOOP-162. Fix ConcurrentModificationException when + releasing file locks. (omalley via cutting) + +24. Fix HADOOP-132. Initial check-in of new Metrics API, including + implementations for writing metric data to a file and for sending + it to Ganglia. (David Bowen via cutting) + +25. Fix HADOOP-160. Remove some uneeded synchronization around + time-consuming operations in the TaskTracker. (omalley via cutting) + +26. Fix HADOOP-166. RPCs failed when passed subclasses of a declared + parameter type. This is fixed by changing ObjectWritable to store + both the declared type and the instance type for Writables. Note + that this incompatibly changes the format of ObjectWritable and + will render unreadable any ObjectWritables stored in files. + Nutch only uses ObjectWritable in intermediate files, so this + should not be a problem for Nutch. (Stefan & cutting) + +27. Fix HADOOP-168. MapReduce RPC protocol methods should all declare + IOException, so that timeouts are handled appropriately. + (omalley via cutting) + +28. Fix HADOOP-169. Don't fail a reduce task if a call to the + jobtracker to locate map outputs fails. (omalley via cutting) + +29. Fix HADOOP-170. Permit FileSystem clients to examine and modify + the replication count of individual files. Also fix a few + replication-related bugs. (Konstantin Shvachko via cutting) + +30. Permit specification of a higher replication levels for job + submission files (job.xml and job.jar). This helps with large + clusters, since these files are read by every node. (cutting) + +31. HADOOP-173. Optimize allocation of tasks with local data. (cutting) + +32. HADOOP-167. Reduce number of Configurations and JobConf's + created. (omalley via cutting) + +33. NUTCH-256. Change FileSystem#createNewFile() to create a .crc + file. The lack of a .crc file was causing warnings. (cutting) + +34. HADOOP-174. Change JobClient to not abort job until it has failed + to contact the job tracker for five attempts, not just one as + before. (omalley via cutting) + +35. HADOOP-177. Change MapReduce web interface to page through tasks. + Previously, when jobs had more than a few thousand tasks they + could crash web browsers. (Mahadev Konar via cutting) + +36. HADOOP-178. In DFS, piggyback blockwork requests from datanodes + on heartbeat responses from namenode. This reduces the volume of + RPC traffic. Also move startup delay in blockwork from datanode + to namenode. This fixes a problem where restarting the namenode + triggered a lot of uneeded replication. (Hairong Kuang via cutting) + +37. HADOOP-183. If the DFS namenode is restarted with different + minimum and/or maximum replication counts, existing files' + replication counts are now automatically adjusted to be within the + newly configured bounds. (Hairong Kuang via cutting) + +38. HADOOP-186. Better error handling in TaskTracker's top-level + loop. Also improve calculation of time to send next heartbeat. + (omalley via cutting) + +39. HADOOP-187. Add two MapReduce examples/benchmarks. One creates + files containing random data. The second sorts the output of the + first. (omalley via cutting) + +40. HADOOP-185. Fix so that, when a task tracker times out making the + RPC asking for a new task to run, the job tracker does not think + that it is actually running the task returned. (omalley via cutting) + +41. HADOOP-190. If a child process hangs after it has reported + completion, its output should not be lost. (Stack via cutting) + +42. HADOOP-184. Re-structure some test code to better support testing + on a cluster. (Mahadev Konar via cutting) + +43. HADOOP-191 Add streaming package, Hadoop's first contrib module. + This permits folks to easily submit MapReduce jobs whose map and + reduce functions are implemented by shell commands. Use + 'bin/hadoop jar build/hadoop-streaming.jar' to get details. + (Michel Tourn via cutting) + +44. HADOOP-189. Fix MapReduce in standalone configuration to + correctly handle job jar files that contain a lib directory with + nested jar files. (cutting) + +45. HADOOP-65. Initial version of record I/O framework that enables + the specification of record types and generates marshalling code + in both Java and C++. Generated Java code implements + WritableComparable, but is not yet otherwise used by + Hadoop. (Milind Bhandarkar via cutting) + +46. HADOOP-193. Add a MapReduce-based FileSystem benchmark. + (Konstantin Shvachko via cutting) + +47. HADOOP-194. Add a MapReduce-based FileSystem checker. This reads + every block in every file in the filesystem. (Konstantin Shvachko + via cutting) + +48. HADOOP-182. Fix so that lost task trackers to not change the + status of reduce tasks or completed jobs. Also fixes the progress + meter so that failed tasks are subtracted. (omalley via cutting) + +49. HADOOP-96. Logging improvements. Log files are now separate from + standard output and standard error files. Logs are now rolled. + Logging of all DFS state changes can be enabled, to facilitate + debugging. (Hairong Kuang via cutting) + + +Release 0.1.1 - 2006-04-08 + + 1. Added CHANGES.txt, logging all significant changes to Hadoop. (cutting) + + 2. Fix MapReduceBase.close() to throw IOException, as declared in the + Closeable interface. This permits subclasses which override this + method to throw that exception. (cutting) + + 3. Fix HADOOP-117. Pathnames were mistakenly transposed in + JobConf.getLocalFile() causing many mapred temporary files to not + be removed. (Raghavendra Prabhu via cutting) + + 4. Fix HADOOP-116. Clean up job submission files when jobs complete. + (cutting) + + 5. Fix HADOOP-125. Fix handling of absolute paths on Windows (cutting) + +Release 0.1.0 - 2006-04-01 + + 1. The first release of Hadoop. + diff --git a/FB-CHANGES.txt b/FB-CHANGES.txt new file mode 100644 index 0000000..03e44c2 --- /dev/null +++ b/FB-CHANGES.txt @@ -0,0 +1,127 @@ +Patches from the following Apache Jira issues have been applied +to this release in the order indicated. This is in addition to +the patches applied from issues referenced in CHANGES.txt. + +Release 0.20.3 + FB - Unreleased. + + MAPREDUCE-2141 Add an "extra data" field to Task for use by Mesos + MAPREDUCE-2118 optimize getJobSetupAndCleanupTasks (by removing global lock - r9768) + MAPREDUCE-2157 taskLauncher threads in TT can die because of unexpected interrupts + MAPREDUCE-2116 optimize GetTasksToKill + MAPREDUCE-2114 finer grained locking for getCounters implementation + MAPREDUCE-2100 log split information for map task + MAPREDUCE-2085 job submissions to a tracker with different filesystem fails + MAPREDUCE-2062 speculative execution is too aggressive + MAPREDUCE-2047/2048. performance improvements in heartbeat processing + HDFS-1109. Fix url encoding with HFTP protocol + HDFS-1250. Namenode accepts block report from dead datanodes + + MAPREDUCE-1442. Fixing JobHistory regular expression parsing + MAPREDUCE-1873. Add a metrics instrumentation class to collect + metrics about fair share scheduler + HDFS-1140 Speedup INode.getPathComponents + HDFS-1110 Namenode heap optimization + HADOOP-5124 A few optimizations to FsNamesystem#RecentInvalidateSets + HDFS-1295 Improve namenode restart times by short-circuiting + the first block reports from datanodes + HADOOP-6904 A baby step towards inter-version communications between + dfs client and NameNode + HDFS-1335 HDFS side of HADOOP-6904: first step towards inter-version + communications between dfs client and NameNode. + HDFS-1348 Improve NameNode reponsiveness while it is checking if + datanode decommissions are complete + HDFS-946 NameNode should not return full path name when listing + a directory or getting the status of a file. + MAPREDUCE-1463 Reducer should start faster for smaller jobs + HDFS-985 HDFS should issue multiple RPCs for listing a large + directory + HDFS-1368 Add a block counter to DatanodeDescriptor + MAPREDUCE-2046 CombineFileInputFormat should not create splits larger than + the specified maxSplitSize. + HDFS-202 Add a bulk FIleSystem.getFileBlockLocations + HADOOP-6870/6890/6900 Add FileSystem#listLocatedStatus to list a + directory's content together with each file's block + locations + MAPREDUCE-2021 CombineFileInputFormat returns + duplicate hostnames in split locations. + HDFS-173 Recursively deleting a directory with millions of files + makes NameNode unresponsive for other commands until the + deletion completes + HDFS-278 Add timeout to DFSOutputStream.close() + HDFS-1391 Reduce the time needed to exit safemode. + MAPREDUCE-1981 Improve getSplits performance by using listLocatedStatus, + the new FileSystem API + HDFS-96 integer overflow for blocks > 2GB (DFS client) + HADOOP-6975 integer overflow for blocks > 2GB (S3 client) + MAPREDUCE-1597 CombinefileInputformat does not work with non-splittable + files + HDFS-1429 Make lease expiration limit configurable + HADOOP-6974 Configurable header buffer size for Hadoop HTTP server + HDFS-1436 Lease renew RPC does not need to grab fsnamesytem write + lock + MAPREDUCE-2099 Purge Outdated RAID parity HARs. + MAPREDUCE-2108 Allow TaskScheduler manage number slots on TaskTrackers + (Here we use an alternative approach. We make TT read the + number of CPUs and change number of slots.) + MAPREDUCE-2110 add getArchiveIndex to HarFileSystem + MAPREDUCE-2111 make getPathInHar public in HarFileSystem + MAPREDUCE-961 ResourceAwareLoadManager to dynamically decide new tasks + based on current CPU/memory load on TaskTracker(s) + HDFS-1432 HDFS across data centers: HighTide + MAPREDUCE-2124 Add job counters for measuring time spent in three + different phases in reducers + MAPREDUCE-1819 RaidNode is now smarter in submitting RAID jobs. + MAPREDUCE-1894 Fixed a bug in DistributedRaidFileSystem.readFully() that + was causing it to loop infinitely. + + MAPREDUCE-1838 Reduce the time needed for raiding a bunch of files + by randomly assigning files to map tasks. + MAPREDUCE-1670 RAID policies should not scan their own destination path. + MAPREDUCE-1668 RaidNode Hars a directory only if all its parity files + have been created. + MAPREDUCE-2029 DistributedRaidFileSystem removes itself from FileSystem + cache when it is closed. + MAPREDUCE-1816 HAR files used for RAID parity-bite have configurable + partfile size. + MAPREDUCE-1908 DistributedRaidFileSystem now handles ChecksumException + correctly. + MAPREDUCE-1783 Task Initialization should be delayed till when a job can + be run. + MAPREDUCE-2142 Refactor RaidNode to remove map reduce dependency. + HDFS-1463 accessTime updates should not occur in safeMode + HDFS-1435 Provide an option to store fsimage compressed + MAPREDUCE-2143 HarFileSystem should be able to handle spaces in its path. + HDFS-222 Support for concatenating of files into a single file. + MAPREDUCE-2150 RaidNode should periodically fix corrupt blocks + MAPREDUCE-2155 RaidNode should optionally dispatch map reduce jobs to fix + corrupt blocks (instead of fixing locally) + MAPREDUCE-2156 Raid-aware FSCK + HDFS-903 NN should verify images and edit los on startup + MAPREDUCE-1892 RaidNode can allow layered policies more efficiently. + HDFS-1458 Improve checkpoint performance by avoiding unncessary + image downloads & loading. + HDFS-1031 Enhance the webUi to list a few of the corrupted files + in HDFS + HDFS-1472 Refactor DFSck to allow programmatic access to output + HDFS-1111 Iterative listCorruptFilesBlocks() returns all corrupt + files. + HADOOP-7023 Add listCorruptFileBlocks to FileSystem. + HDFS-1482 Add listCorruptFileBlocks to DistributedFileSystem. + MAPREDUCE-2146 Raid does not affect access time of a source file. + HDFS-1457 Limit transmission rate when transfering image between + primary and secondary NNs + MAPREDUCE-2167 Faster directory traversal for RAID. + MAPREDUCE-2185 Infinite loop at creating splits using + CombineFileInputFormat + MAPREDUCE-2189 RAID Parallel traversal needs to synchronize stats + HDFS-1476 Configurable threshold for initializing replication queues + (before leaving safe mode). + HADOOP-7047 RPC client gets stuck + HADOOP-7013 Add field is Corrupt to BlockLocation. + HDFS-1483 Populate BlockLocation.isCorrupt in + DFSUtil.locatedBlocks2Locations. + HDFS-1458 Improve checkpoint performance by avoiding unnecessary + image downloads. + HADOOP-7001 Allow run-time configuration of configured nodes. + HADOOP-7049 Fixed TestReconfiguration. + MAPREDUCE-1752 HarFileSystem.getFileBlockLocations() diff --git a/LICENSE.txt b/LICENSE.txt new file mode 100644 index 0000000..59bcdbc --- /dev/null +++ b/LICENSE.txt @@ -0,0 +1,244 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + +APACHE HADOOP SUBCOMPONENTS: + +The Apache Hadoop project contains subcomponents with separate copyright +notices and license terms. Your use of the source code for the these +subcomponents is subject to the terms and conditions of the following +licenses. + +For the org.apache.hadoop.util.bloom.* classes: + +/** + * + * Copyright (c) 2005, European Commission project OneLab under contract + * 034819 (http://www.one-lab.org) + * All rights reserved. + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * - Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * - Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the distribution. + * - Neither the name of the University Catholique de Louvain - UCL + * nor the names of its contributors may be used to endorse or + * promote products derived from this software without specific prior + * written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS + * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE + * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, + * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN + * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/NOTICE.txt b/NOTICE.txt new file mode 100644 index 0000000..ded4f61 --- /dev/null +++ b/NOTICE.txt @@ -0,0 +1,7 @@ +This product includes software developed by The Apache Software +Foundation (http://www.apache.org/). + +This product includes software developed by Yahoo! Inc., +powering the largest Hadoop clusters in the Universe! +(http://developer.yahoo.com/hadoop). + diff --git a/README.txt b/README.txt new file mode 100644 index 0000000..cbb7c0e --- /dev/null +++ b/README.txt @@ -0,0 +1,24 @@ +This is a version of code that runs on Facebook's data warehouse clusters and +is powered by Apache Hadoop. The size of the biggest single cluster is 30 PB +and has 3000 nodes. + +This code is based on Apache Hadoop 0.20. + +FB-CHANGES.txt contains the additional pathches that have been committed to +the original code base. + +PLEASE NOTE: + + * This distribution includes cryptographic software that + is subject to U.S. export control laws and applicable + export and import laws of other countries. BEFORE using + any software made available from this site, it is your + responsibility to understand and comply with these laws. + This software is being exported in accordance with the + Export Administration Regulations. As of June 2009, you + are prohibited from exporting and re-exporting this + software to Cuba, Iran, North Korea, Sudan, Syria and + any other countries specified by regulatory update to + the U.S. export control laws and regulations. Diversion + contrary to U.S. law is prohibited. + diff --git a/YAHOO-CHANGES.txt b/YAHOO-CHANGES.txt new file mode 100644 index 0000000..4c8243b --- /dev/null +++ b/YAHOO-CHANGES.txt @@ -0,0 +1,506 @@ +Yahoo! Distribution of Hadoop Change Log + +Patches from the following Apache Jira issues have been applied +to this release in the order indicated. This is in addition to +the patches applied from issues referenced in CHANGES.txt. + +yahoo-hadoop-0.20.1-3195383008 + HADOOP-6521. Fix backward compatiblity issue with umask when applications + use deprecated param dfs.umask in configuration or use + FsPermission.setUMask(). (suresh) + + MAPREDUCE-1372. Fixed a ConcurrentModificationException in jobtracker. + (Arun C Murthy via yhemanth) + + MAPREDUCE-1316. Fix jobs' retirement from the JobTracker to prevent memory + leaks via stale references. (Amar Kamat via acmurthy) + + MAPREDUCE-1342. Fixed deadlock in global blacklisting of tasktrackers. + (Amareshwari Sriramadasu via acmurthy) + + HADOOP-6460. Reinitializes buffers used for serializing responses in ipc + server on exceeding maximum response size to free up Java heap. (suresh) + + MAPREDUCE-1100. Truncate user logs to prevent TaskTrackers' disks from + filling up. (Vinod Kumar Vavilapalli via acmurthy) + + MAPREDUCE-1143. Fix running task counters to be updated correctly + when speculative attempts are running for a TIP. + (Rahul Kumar Singh via yhemanth) + + HADOOP-6151, 6281, 6285, 6441. Add HTML quoting of the parameters to all + of the servlets to prevent XSS attacks. (omalley) + + MAPREDUCE-896. Fix bug in earlier implementation to prevent + spurious logging in tasktracker logs for absent file paths. + (Ravi Gummadi via yhemanth) + + MAPREDUCE-676. Fix Hadoop Vaidya to ensure it works for map-only jobs. + (Suhas Gogate via acmurthy) + + HADOOP-5582. Fix Hadoop Vaidya to use new Counters in + org.apache.hadoop.mapreduce package. (Suhas Gogate via acmurthy) + + HDFS-595. umask settings in configuration may now use octal or + symbolic instead of decimal. Update HDFS tests as such. (jghoman) + + MAPREDUCE-1068. Added a verbose error message when user specifies an + incorrect -file parameter. (Amareshwari Sriramadasu via acmurthy) + + MAPREDUCE-1171. Allow the read-error notification in shuffle to be + configurable. (Amareshwari Sriramadasu via acmurthy) + + MAPREDUCE-353. Allow shuffle read and connection timeouts to be + configurable. (Amareshwari Sriramadasu via acmurthy) + + HADOOP-6428. HttpServer sleeps with negative values (cos) + + HADOOP-6386. NameNode's HttpServer can't instantiate InetSocketAddress: + IllegalArgumentException is thrown. (cos) + + HDFS-781. Namenode metrics PendingDeletionBlocks is not decremented. (suresh) + + MAPREDUCE-1185. Redirect running job url to history url if job is already + retired. (Amareshwari Sriramadasu and Sharad Agarwal via sharad) + + MAPREDUCE-754. Fix NPE in expiry thread when a TT is lost. (Amar Kamat + via sharad) + + MAPREDUCE-896. Modify permissions for local files on tasktracker before + deletion so they can be deleted cleanly. (Ravi Gummadi via yhemanth) + + HADOOP-5771. Implements unit tests for LinuxTaskController. + (Sreekanth Ramakrishnan and Vinod Kumar Vavilapalli via yhemanth) + + MAPREDUCE-1124. Import Gridmix3 and Rumen. (cdouglas) + + MAPREDUCE-1063. Document gridmix benchmark. (cdouglas) + + HDFS-758. Changes to report status of decommissioining on the namenode web + UI. (jitendra) + + HADOOP-6234. Add new option dfs.umaskmode to set umask in configuration + to use octal or symbolic instead of decimal. (Jakob Homan via suresh) + + MAPREDUCE-1147. Add map output counters to new API. (Amar Kamat via + cdouglas) + + MAPREDUCE-1182. Fix overflow in reduce causing allocations to exceed the + configured threshold. (cdouglas) + + HADOOP-4933. Fixes a ConcurrentModificationException problem that shows up + when the history viewer is accessed concurrently. + (Amar Kamat via ddas) + + MAPREDUCE-1140. Fix DistributedCache to not decrement reference counts for + unreferenced files in error conditions. + (Amareshwari Sriramadasu via yhemanth) + + HADOOP-6203. FsShell rm/rmr error message indicates exceeding Trash quota + and suggests using -skpTrash, when moving to trash fails. + (Boris Shkolnik via suresh) + + HADOOP-5675. Do not launch a job if DistCp has no work to do. (Tsz Wo + (Nicholas), SZE via cdouglas) + + HDFS-457. Better handling of volume failure in Data Node storage, + This fix is a port from hdfs-0.22 to common-0.20 by Boris Shkolnik. + Contributed by Erik Steffl + + HDFS-625. Fix NullPointerException thrown from ListPathServlet. + Contributed by Suresh Srinivas. + + HADOOP-6343. Log unexpected throwable object caught in RPC. + Contributed by Jitendra Nath Pandey + +yahoo-hadoop-0.20.1-3092118007: + + MAPREDUCE-1186. Fixed DistributedCache to do a recursive chmod on just the + per-cache directory, not all of mapred.local.dir. + (Amareshwari Sriramadasu via acmurthy) + + MAPREDUCE-1231. Add an option to distcp to ignore checksums when used with + the upgrade option. + (Jothi Padmanabhan via yhemanth) + +yahoo-hadoop-0.20.1-3092118006: + + MAPREDUCE-1219. Fixed JobTracker to not collect per-job metrics, thus + easing load on it. (Amareshwari Sriramadasu via acmurthy) + + HDFS-761. Fix failure to process rename operation from edits log due to + quota verification. (suresh) + +yahoo-hadoop-0.20.1-3092118005: + + MAPREDUCE-1196. Fix FileOutputCommitter to use the deprecated cleanupJob + api correctly. (acmurthy) + +yahoo-hadoop-0.20.1-3092118004: + + HADOOP-6344. rm and rmr immediately delete files rather than sending + to trash, despite trash being enabled, if a user is over-quota. (jhoman) + + MAPREDUCE-1160. Reduce verbosity of log lines in some Map/Reduce classes + to avoid filling up jobtracker logs on a busy cluster. + (Ravi Gummadi and Hong Tang via yhemanth) + + HDFS-587. Add ability to run HDFS with MR test on non-default queue, + also updated junit dependendcy from junit-3.8.1 to junit-4.5 (to make + it possible to use Configured and Tool to process command line to + be able to specify a queue). Contributed by Erik Steffl. + + MAPREDUCE-1158. Fix JT running maps and running reduces metrics. + (sharad) + + MAPREDUCE-947. Fix bug in earlier implementation that was + causing unit tests to fail. + (Ravi Gummadi via yhemanth) + + MAPREDUCE-1062. Fix MRReliabilityTest to work with retired jobs + (Contributed by Sreekanth Ramakrishnan) + + MAPREDUCE-1090. Modified log statement in TaskMemoryManagerThread to + include task attempt id. (yhemanth) + + MAPREDUCE-1098. Fixed the distributed-cache to not do i/o while + holding a global lock. (Amareshwari Sriramadasu via acmurthy) + + MAPREDUCE-1048. Add occupied/reserved slot usage summary on + jobtracker UI. (Amareshwari Sriramadasu via sharad) + + MAPREDUCE-1103. Added more metrics to Jobtracker. (sharad) + + MAPREDUCE-947. Added commitJob and abortJob apis to OutputCommitter. + Enhanced FileOutputCommitter to create a _SUCCESS file for successful + jobs. (Amar Kamat & Jothi Padmanabhan via acmurthy) + + MAPREDUCE-1105. Remove max limit configuration in capacity scheduler in + favor of max capacity percentage thus allowing the limit to go over + queue capacity. (Rahul Kumar Singh via yhemanth) + + MAPREDUCE-1086. Setup Hadoop logging environment for tasks to point to + task related parameters. (Ravi Gummadi via yhemanth) + + MAPREDUCE-739. Allow relative paths to be created inside archives. + (mahadev) + + HADOOP-6097. Multiple bugs w/ Hadoop archives (mahadev) + + HADOOP-6231. Allow caching of filesystem instances to be disabled on a + per-instance basis (ben slusky via mahadev) + + MAPREDUCE-826. harchive doesn't use ToolRunner / harchive returns 0 even + if the job fails with exception (koji via mahadev) + + HDFS-686. NullPointerException is thrown while merging edit log and + image. (hairong) + + HDFS-709. Fix TestDFSShell failure due to rename bug introduced by + HDFS-677. (suresh) + + HDFS-677. Rename failure when both source and destination quota exceeds + results in deletion of source. (suresh) + + HADOOP-6284. Add a new parameter, HADOOP_JAVA_PLATFORM_OPTS, to + hadoop-config.sh so that it allows setting java command options for + JAVA_PLATFORM. (Koji Noguchi via szetszwo) + + MAPREDUCE-732. Removed spurious log statements in the node + blacklisting logic. (Sreekanth Ramakrishnan via yhemanth) + + MAPREDUCE-144. Includes dump of the process tree in task diagnostics when + a task is killed due to exceeding memory limits. + (Vinod Kumar Vavilapalli via yhemanth) + + MAPREDUCE-979. Fixed JobConf APIs related to memory parameters to + return values of new configuration variables when deprecated + variables are disabled. (Sreekanth Ramakrishnan via yhemanth) + + MAPREDUCE-277. Makes job history counters available on the job history + viewers. (Jothi Padmanabhan via ddas) + + HADOOP-5625. Add operation duration to clienttrace. (Lei Xu + via cdouglas) + + HADOOP-5222. Add offset to datanode clienttrace. (Lei Xu via cdouglas) + + HADOOP-6218. Adds a feature where TFile can be split by Record + Sequence number. Contributed by Hong Tang and Raghu Angadi. + +yahoo-hadoop-0.20.1-3041192001 + + MAPREDUCE-1088. Changed permissions on JobHistory files on local disk to + 0744. Contributed by Arun C. Murthy. + + HADOOP-6304. Use java.io.File.set{Readable|Writable|Executable} where + possible in RawLocalFileSystem. Contributed by Arun C. Murthy. + +yahoo-hadoop-0.20.1-3041192000 + + MAPREDUCE-270. Fix the tasktracker to optionally send an out-of-band + heartbeat on task-completion for better job-latency. Contributed by + Arun C. Murthy + Configuration changes: + add mapreduce.tasktracker.outofband.heartbeat + + MAPREDUCE-1030. Fix capacity-scheduler to assign a map and a reduce task + per-heartbeat. Contributed by Rahuk K Singh. + + MAPREDUCE-1028. Fixed number of slots occupied by cleanup tasks to one + irrespective of slot size for the job. Contributed by Ravi Gummadi. + + MAPREDUCE-964. Fixed start and finish times of TaskStatus to be + consistent, thereby fixing inconsistencies in metering tasks. + Contributed by Sreekanth Ramakrishnan. + + HADOOP-5976. Add a new command, classpath, to the hadoop + script. Contributed by Owen O'Malley and Gary Murry + + HADOOP-5784. Makes the number of heartbeats that should arrive + a second at the JobTracker configurable. Contributed by + Amareshwari Sriramadasu. + + MAPREDUCE-945. Modifies MRBench and TestMapRed to use + ToolRunner so that options such as queue name can be + passed via command line. Contributed by Sreekanth Ramakrishnan. + +yahoo-hadoop-0.20.0-3006291003 + + HADOOP:5420 Correct bug in earlier implementation + by Arun C. Murthy + + HADOOP-5363 Add support for proxying connections to multiple + clusters with different versions to hdfsproxy. Contributed + by Zhiyong Zhang + + HADOOP-5780. Improve per block message prited by -metaSave + in HDFS. (Raghu Angadi) + +yahoo-hadoop-0.20.0-2957040010 + + HADOOP-6227. Fix Configuration to allow final parameters to be set + to null and prevent them from being overridden. Contributed by + Amareshwari Sriramadasu. + +yahoo-hadoop-0.20.0-2957040007 + + MAPREDUCE-430 Added patch supplied by Amar Kamat to allow + roll forward on branch to includ externally committed + patch. + +yahoo-hadoop-0.20.0-2957040006 + + MAPREDUCE-768. Provide an option to dump jobtracker configuration in + JSON format to standard output. Contributed by V.V.Chaitanya + +yahoo-hadoop-0.20.0-2957040004 + + MAPREDUCE-834 Correct an issue created by merging this issue with + patch attached to external Jira. + +yahoo-hadoop-0.20.0-2957040003 + + HADOOP-6184 Provide an API to dump Configuration in a JSON format. + Contributed by V.V.Chaitanya Krishna. + + MAPREDUCE-745 Patch added for this issue to allow branch-0.20 to + merge cleanly. + +yahoo-hadoop-0.20.0-2957040000 + + MAPREDUCE:478 Allow map and reduce jvm parameters, environment + variables and ulimit to be set separately. + + MAPREDUCE:682 Removes reservations on tasktrackers which are blacklisted. + Contributed by Sreekanth Ramakrishnan. + + HADOOP:5420 Support killing of process groups in LinuxTaskController + binary + + HADOOP-5488 Removes the pidfile management for the Task JVM from the + framework and instead passes the PID back and forth between the + TaskTracker and the Task processes. Contributed by Ravi Gummadi. + + MAPREDUCE:467 Provide ability to collect statistics about total tasks and + succeeded tasks in different time windows. + +yahoo-hadoop-0.20.0.2949784002: + + MAPREDUCE-817. Add a cache for retired jobs with minimal job + info and provide a way to access history file url + + MAPREDUCE-814. Provide a way to configure completed job history + files to be on HDFS. + + MAPREDUCE-838 Fixes a problem in the way commit of task outputs + happens. The bug was that even if commit failed, the task would be + declared as successful. Contributed by Amareshwari Sriramadasu. + +yahoo-hadoop-0.20.0.2902658004: + + MAPREDUCE-809 Fix job-summary logs to correctly record final status of + FAILED and KILLED jobs. + http://issues.apache.org/jira/secure/attachment/12414726/MAPREDUCE-809_0_20090728_yhadoop20.patch + + MAPREDUCE-740 Log a job-summary at the end of a job, while + allowing it to be configured to use a custom appender if desired. + http://issues.apache.org/jira/secure/attachment/12413941/MAPREDUCE-740_2_20090717_yhadoop20.patch + + MAPREDUCE-771 Fixes a bug which delays normal jobs in favor of + high-ram jobs. + http://issues.apache.org/jira/secure/attachment/12413990/MAPREDUCE-771-20.patch + + HADOOP-5420 Support setsid based kill in LinuxTaskController. + http://issues.apache.org/jira/secure/attachment/12414735/5420-ydist.patch.txt + + MAPREDUCE-733 Fixes a bug that when a task tracker is killed , + it throws exception. Instead it should catch it and process it and + allow the rest of the flow to go through + http://issues.apache.org/jira/secure/attachment/12413015/MAPREDUCE-733-ydist.patch + + MAPREDUCE-734 Fixes a bug which prevented hi ram jobs from being + removed from the scheduler queue. + http://issues.apache.org/jira/secure/attachment/12413035/MAPREDUCE-734-20.patch + + MAPREDUCE-693 Fixes a bug that when a job is submitted and the + JT is restarted (before job files have been written) and the job + is killed after recovery, the conf files fail to be moved to the + "done" subdirectory. + http://issues.apache.org/jira/secure/attachment/12412823/MAPREDUCE-693-v1.2-branch-0.20.patch + + MAPREDUCE-722 Fixes a bug where more slots are getting reserved + for HiRAM job tasks than required. + http://issues.apache.org/jira/secure/attachment/12412744/MAPREDUCE-722.1.txt + + MAPREDUCE-683 TestJobTrackerRestart failed because of stale + filemanager cache (which was created once per jvm). This patch makes + sure that the filemanager is inited upon every JobHistory.init() + and hence upon every restart. Note that this wont happen in production + as upon a restart the new jobtracker will start in a new jvm and + hence a new cache will be created. + http://issues.apache.org/jira/secure/attachment/12412743/MAPREDUCE-683-v1.2.1-branch-0.20.patch + + MAPREDUCE-709 Fixes a bug where node health check script does + not display the correct message on timeout. + http://issues.apache.org/jira/secure/attachment/12412711/mapred-709-ydist.patch + + MAPREDUCE-708 Fixes a bug where node health check script does + not refresh the "reason for blacklisting". + http://issues.apache.org/jira/secure/attachment/12412706/MAPREDUCE-708-ydist.patch + + MAPREDUCE-522 Rewrote TestQueueCapacities to make it simpler + and avoid timeout errors. + http://issues.apache.org/jira/secure/attachment/12412472/mapred-522-ydist.patch + + MAPREDUCE-532 Provided ability in the capacity scheduler to + limit the number of slots that can be concurrently used per queue + at any given time. + http://issues.apache.org/jira/secure/attachment/12412592/MAPREDUCE-532-20.patch + + MAPREDUCE-211 Provides ability to run a health check script on + the tasktracker nodes and blacklist nodes if they are unhealthy. + Contributed by Sreekanth Ramakrishnan. + http://issues.apache.org/jira/secure/attachment/12412161/mapred-211-internal.patch + + MAPREDUCE-516 Remove .orig file included by mistake. + http://issues.apache.org/jira/secure/attachment/12412108/HADOOP-5964_2_20090629_yhadoop.patch + + MAPREDUCE-416 Moves the history file to a "done" folder whenever + a job completes. + http://issues.apache.org/jira/secure/attachment/12411938/MAPREDUCE-416-v1.6-branch-0.20.patch + + HADOOP-5980 Previously, task spawned off by LinuxTaskController + didn't get LD_LIBRARY_PATH in their environment. The tasks will now + get same LD_LIBRARY_PATH value as when spawned off by + DefaultTaskController. + http://issues.apache.org/jira/secure/attachment/12410825/hadoop-5980-v20.patch + + HADOOP-5981 This issue completes the feature mentioned in + HADOOP-2838. HADOOP-2838 provided a way to set env variables in + child process. This issue provides a way to inherit tt's env variables + and append or reset it. So now X=$X:y will inherit X (if there) and + append y to it. + http://issues.apache.org/jira/secure/attachment/12410454/hadoop5981-branch-20-example.patch + + HADOOP-5419 This issue is to provide an improvement on the + existing M/R framework to let users know which queues they have + access to, and for what operations. One use case for this would + that currently there is no easy way to know if the user has access + to submit jobs to a queue, until it fails with an access control + exception. + http://issues.apache.org/jira/secure/attachment/12410824/hadoop-5419-v20.2.patch + + HADOOP-5420 Support setsid based kill in LinuxTaskController. + http://issues.apache.org/jira/secure/attachment/12414735/5420-ydist.patch.txt + + HADOOP-5643 Added the functionality to refresh jobtrackers node + list via command line (bin/hadoop mradmin -refreshNodes). The command + should be run as the jobtracker owner (jobtracker process owner) + or from a super group (mapred.permissions.supergroup). + http://issues.apache.org/jira/secure/attachment/12410619/Fixed%2B5643-0.20-final + + + HADOOP-2838 Now the users can set environment variables using + mapred.child.env. They can do the following X=Y : set X to Y X=$X:Y + : Append Y to X (which should be taken from the tasktracker) + http://issues.apache.org/jira/secure/attachment/12409895/HADOOP-2838-v2.2-branch-20-example.patch + + HADOOP-5818. Revert the renaming from FSNamesystem.checkSuperuserPrivilege + to checkAccess by HADOOP-5643. (Amar Kamat via szetszwo) + https://issues.apache.org/jira/secure/attachment/12409835/5818for0.20.patch + + HADOOP-5801. Fixes the problem: If the hosts file is changed across restart + then it should be refreshed upon recovery so that the excluded hosts are + lost and the maps are re-executed. (Amar Kamat via ddas) + https://issues.apache.org/jira/secure/attachment/12409834/5801-0.20.patch + + HADOOP-5643. HADOOP-5643. Adds a way to decommission TaskTrackers + while the JobTracker is running. (Amar Kamat via ddas) + https://issues.apache.org/jira/secure/attachment/12409833/Fixed+5643-0.20 + + HADOOP-5419. Provide a facility to query the Queue ACLs for the + current user. (Rahul Kumar Singh via yhemanth) + http://issues.apache.org/jira/secure/attachment/12409323/hadoop-5419-v20.patch + + HADOOP-5733. Add map/reduce slot capacity and blacklisted capacity to + JobTracker metrics. (Sreekanth Ramakrishnan via cdouglas) + http://issues.apache.org/jira/secure/attachment/12409322/hadoop-5733-v20.patch + + HADOOP-5738. Split "waiting_tasks" JobTracker metric into waiting maps and + waiting reduces. (Sreekanth Ramakrishnan via cdouglas) + https://issues.apache.org/jira/secure/attachment/12409321/5738-y20.patch + + HADOOP-4842. Streaming now allows specifiying a command for the combiner. + (Amareshwari Sriramadasu via ddas) + http://issues.apache.org/jira/secure/attachment/12402355/patch-4842-3.txt + + HADOOP-4490. Provide ability to run tasks as job owners. + (Sreekanth Ramakrishnan via yhemanth) + http://issues.apache.org/jira/secure/attachment/12409318/hadoop-4490-br20-3.patch + https://issues.apache.org/jira/secure/attachment/12410170/hadoop-4490-br20-3.2.patch + + HADOOP-5442. Paginate jobhistory display and added some search + capabilities. (Amar Kamat via acmurthy) + http://issues.apache.org/jira/secure/attachment/12402301/HADOOP-5442-v1.12.patch + + HADOOP-3327. Improves handling of READ_TIMEOUT during map output copying. + (Amareshwari Sriramadasu via ddas) + http://issues.apache.org/jira/secure/attachment/12399449/patch-3327-2.txt + + HADOOP-5113. Fixed logcondense to remove files for usernames + beginning with characters specified in the -l option. + (Peeyush Bishnoi via yhemanth) + http://issues.apache.org/jira/secure/attachment/12409317/hadoop-5113-0.18.txt + + HADOOP-2898. Provide an option to specify a port range for + Hadoop services provisioned by HOD. + (Peeyush Bishnoi via yhemanth) + http://issues.apache.org/jira/secure/attachment/12409316/hadoop-2898-0.20.txt + + HADOOP-4930. Implement a Linux native executable that can be used to + launch tasks as users. (Sreekanth Ramakrishnan via yhemanth) + http://issues.apache.org/jira/secure/attachment/12409402/hadoop-4930v20.patch + + + diff --git a/bin/hadoop b/bin/hadoop new file mode 100755 index 0000000..cc2020d --- /dev/null +++ b/bin/hadoop @@ -0,0 +1,312 @@ +#!/usr/bin/env bash + +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# The Hadoop command script +# +# Environment Variables +# +# JAVA_HOME The java implementation to use. Overrides JAVA_HOME. +# +# HADOOP_CLASSPATH Extra Java CLASSPATH entries. +# +# HADOOP_HEAPSIZE The maximum amount of heap to use, in MB. +# Default is 1000. +# +# HADOOP_OPTS Extra Java runtime options. +# +# HADOOP_NAMENODE_OPTS These options are added to HADOOP_OPTS +# HADOOP_CLIENT_OPTS when the respective command is run. +# HADOOP_{COMMAND}_OPTS etc HADOOP_JT_OPTS applies to JobTracker +# for e.g. HADOOP_CLIENT_OPTS applies to +# more than one command (fs, dfs, fsck, +# dfsadmin etc) +# +# HADOOP_CONF_DIR Alternate conf dir. Default is ${HADOOP_HOME}/conf. +# +# HADOOP_ROOT_LOGGER The root appender. Default is INFO,console +# + +bin=`dirname "$0"` +bin=`cd "$bin"; pwd` + +. "$bin"/hadoop-config.sh + +cygwin=false +case "`uname`" in +CYGWIN*) cygwin=true;; +esac + +# if no args specified, show usage +if [ $# = 0 ]; then + echo "Usage: hadoop [--config confdir] COMMAND" + echo "where COMMAND is one of:" + echo " namenode -format format the DFS filesystem" + echo " secondarynamenode run the DFS secondary namenode" + echo " namenode run the DFS namenode" + echo " datanode run a DFS datanode" + echo " dfsadmin run a DFS admin client" + echo " mradmin run a Map-Reduce admin client" + echo " fsck run a DFS filesystem checking utility" + echo " raidfsck [path] run RAID-aware filesystem checking utility" + echo " fs run a generic filesystem user client" + echo " balancer run a cluster balancing utility" + echo " jmxget get JMX exported values from NameNode or DataNode." + echo " Use -help to see options" + echo " jobtracker run the MapReduce job Tracker node" + echo " pipes run a Pipes job" + echo " tasktracker run a MapReduce task Tracker node" + echo " job manipulate MapReduce jobs" + echo " queue get information regarding JobQueues" + echo " version print the version" + echo " jar run a jar file" + echo " distcp copy file or directories recursively" + echo " archive -archiveName NAME -p * create a hadoop archive" + echo " daemonlog get/set the log level for each daemon" + echo " or" + echo " CLASSNAME run the class named CLASSNAME" + echo "Most commands print help when invoked w/o parameters." + exit 1 +fi + +# get arguments +COMMAND=$1 +shift + +if [ -f "${HADOOP_CONF_DIR}/hadoop-env.sh" ]; then + . "${HADOOP_CONF_DIR}/hadoop-env.sh" +fi + +# some Java parameters +if [ "$JAVA_HOME" != "" ]; then + #echo "run java in $JAVA_HOME" + JAVA_HOME=$JAVA_HOME +fi + +if [ "$JAVA_HOME" = "" ]; then + echo "Error: JAVA_HOME is not set." + exit 1 +fi + +JAVA=$JAVA_HOME/bin/java +JAVA_HEAP_MAX=-Xmx1000m + +# check envvars which might override default args +if [ "$HADOOP_HEAPSIZE" != "" ]; then + #echo "run with heapsize $HADOOP_HEAPSIZE" + JAVA_HEAP_MAX="-Xmx""$HADOOP_HEAPSIZE""m" + #echo $JAVA_HEAP_MAX +fi + +# CLASSPATH initially contains $HADOOP_CONF_DIR +CLASSPATH="${HADOOP_CONF_DIR}" +CLASSPATH=${CLASSPATH}:$JAVA_HOME/lib/tools.jar + +# for developers, add Hadoop classes to CLASSPATH +if [ -d "$HADOOP_HOME/build/classes" ]; then + CLASSPATH=${CLASSPATH}:$HADOOP_HOME/build/classes +fi +if [ -d "$HADOOP_HOME/build/webapps" ]; then + CLASSPATH=${CLASSPATH}:$HADOOP_HOME/build +fi +if [ -d "$HADOOP_HOME/build/test/classes" ]; then + CLASSPATH=${CLASSPATH}:$HADOOP_HOME/build/test/classes +fi +if [ -d "$HADOOP_HOME/build/tools" ]; then + CLASSPATH=${CLASSPATH}:$HADOOP_HOME/build/tools +fi + +# so that filenames w/ spaces are handled correctly in loops below +IFS= + +# for releases, add core hadoop jar & webapps to CLASSPATH +if [ -d "$HADOOP_HOME/webapps" ]; then + CLASSPATH=${CLASSPATH}:$HADOOP_HOME +fi +for f in $HADOOP_HOME/hadoop-*-core.jar; do + CLASSPATH=${CLASSPATH}:$f; +done + +# add libs to CLASSPATH +for f in $HADOOP_HOME/lib/*.jar; do + CLASSPATH=${CLASSPATH}:$f; +done + +if [ -d "$HADOOP_HOME/build/ivy/lib/Hadoop/common" ]; then +for f in $HADOOP_HOME/build/ivy/lib/Hadoop/common/*.jar; do + CLASSPATH=${CLASSPATH}:$f; +done +fi + +for f in $HADOOP_HOME/lib/jsp-2.1/*.jar; do + CLASSPATH=${CLASSPATH}:$f; +done + +for f in $HADOOP_HOME/hadoop-*-tools.jar; do + TOOL_PATH=${TOOL_PATH}:$f; +done +for f in $HADOOP_HOME/build/hadoop-*-tools.jar; do + TOOL_PATH=${TOOL_PATH}:$f; +done + +# add user-specified CLASSPATH last +if [ "$HADOOP_CLASSPATH" != "" ]; then + CLASSPATH=${CLASSPATH}:${HADOOP_CLASSPATH} +fi + +# default log directory & file +if [ "$HADOOP_LOG_DIR" = "" ]; then + HADOOP_LOG_DIR="$HADOOP_HOME/logs" +fi +if [ "$HADOOP_LOGFILE" = "" ]; then + HADOOP_LOGFILE='hadoop.log' +fi + +# default policy file for service-level authorization +if [ "$HADOOP_POLICYFILE" = "" ]; then + HADOOP_POLICYFILE="hadoop-policy.xml" +fi + +if [ "$HADOOP_GC_LOG_OPTS" != "" ]; then + HADOOP_GC_LOG_OPTS="${HADOOP_GC_LOG_OPTS}$HADOOP_LOG_DIR/hadoop-$HADOOP_IDENT_STRING-$COMMAND-gc.log" +fi + +# restore ordinary behaviour +unset IFS + +# figure out which class to run +if [ "$COMMAND" = "namenode" ] ; then + CLASS='org.apache.hadoop.hdfs.server.namenode.NameNode' + HADOOP_OPTS="$HADOOP_OPTS $HADOOP_GC_LOG_OPTS $HADOOP_NAMENODE_OPTS" +elif [ "$COMMAND" = "avatarnode" ] ; then + CLASS='org.apache.hadoop.hdfs.server.namenode.AvatarNode' + HADOOP_OPTS="$HADOOP_OPTS $HADOOP_GC_LOG_OPTS $HADOOP_NAMENODE_OPTS" +elif [ "$COMMAND" = "secondarynamenode" ] ; then + CLASS='org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode' + HADOOP_OPTS="$HADOOP_OPTS $HADOOP_GC_LOG_OPTS $HADOOP_SECONDARYNAMENODE_OPTS" +elif [ "$COMMAND" = "avatardatanode" ] ; then + CLASS='org.apache.hadoop.hdfs.server.datanode.AvatarDataNode' + HADOOP_OPTS="$HADOOP_OPTS $HADOOP_GC_LOG_OPTS $HADOOP_DATANODE_OPTS" +elif [ "$COMMAND" = "datanode" ] ; then + CLASS='org.apache.hadoop.hdfs.server.datanode.DataNode' + HADOOP_OPTS="$HADOOP_OPTS $HADOOP_GC_LOG_OPTS $HADOOP_DATANODE_OPTS" +elif [ "$COMMAND" = "fs" ] ; then + CLASS=org.apache.hadoop.fs.FsShell + HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS" +elif [ "$COMMAND" = "dfs" ] ; then + CLASS=org.apache.hadoop.fs.FsShell + HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS" +elif [ "$COMMAND" = "dfsadmin" ] ; then + CLASS=org.apache.hadoop.hdfs.tools.DFSAdmin + HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS" +elif [ "$COMMAND" = "mradmin" ] ; then + CLASS=org.apache.hadoop.mapred.tools.MRAdmin + HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS" +elif [ "$COMMAND" = "fsck" ] ; then + CLASS=org.apache.hadoop.hdfs.tools.DFSck + HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS" +elif [ "$COMMAND" = "raidfsck" ] ; then + CLASS=org.apache.hadoop.raid.RaidShell + HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS" + CMDLINE_OPTS="-fsck $CMDLINE_OPTS" +elif [ "$COMMAND" = "balancer" ] ; then + CLASS=org.apache.hadoop.hdfs.server.balancer.Balancer + HADOOP_OPTS="$HADOOP_OPTS $HADOOP_BALANCER_OPTS" + CMDLINE_OPTS="$CMDLINE_OPTS $BALANCER_CMDLINE_OPTS" +elif [ "$COMMAND" = "jmxget" ] ; then + CLASS=org.apache.hadoop.hdfs.tools.JMXGet + HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS" +elif [ "$COMMAND" = "jobtracker" ] ; then + CLASS=org.apache.hadoop.mapred.JobTracker + HADOOP_OPTS="$HADOOP_OPTS $HADOOP_GC_LOG_OPTS $HADOOP_JOBTRACKER_OPTS" +elif [ "$COMMAND" = "tasktracker" ] ; then + CLASS=org.apache.hadoop.mapred.TaskTracker + HADOOP_OPTS="$HADOOP_OPTS $HADOOP_GC_LOG_OPTS $HADOOP_TASKTRACKER_OPTS" +elif [ "$COMMAND" = "job" ] ; then + CLASS=org.apache.hadoop.mapred.JobClient +elif [ "$COMMAND" = "queue" ] ; then + CLASS=org.apache.hadoop.mapred.JobQueueClient +elif [ "$COMMAND" = "pipes" ] ; then + CLASS=org.apache.hadoop.mapred.pipes.Submitter + HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS" +elif [ "$COMMAND" = "version" ] ; then + CLASS=org.apache.hadoop.util.VersionInfo + HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS" +elif [ "$COMMAND" = "jar" ] ; then + CLASS=org.apache.hadoop.util.RunJar +elif [ "$COMMAND" = "distcp" ] ; then + CLASS=org.apache.hadoop.tools.DistCp + CLASSPATH=${CLASSPATH}:${TOOL_PATH} + HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS" +elif [ "$COMMAND" = "daemonlog" ] ; then + CLASS=org.apache.hadoop.log.LogLevel + HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS" +elif [ "$COMMAND" = "archive" ] ; then + CLASS=org.apache.hadoop.tools.HadoopArchives + CLASSPATH=${CLASSPATH}:${TOOL_PATH} + HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS" +elif [ "$COMMAND" = "sampler" ] ; then + CLASS=org.apache.hadoop.mapred.lib.InputSampler + HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS" +else + CLASS=$COMMAND +fi + +# cygwin path translation +if $cygwin; then + CLASSPATH=`cygpath -p -w "$CLASSPATH"` + HADOOP_HOME=`cygpath -w "$HADOOP_HOME"` + HADOOP_LOG_DIR=`cygpath -w "$HADOOP_LOG_DIR"` + TOOL_PATH=`cygpath -p -w "$TOOL_PATH"` +fi +# setup 'java.library.path' for native-hadoop code if necessary +JAVA_LIBRARY_PATH='' +if [ -d "${HADOOP_HOME}/build/native" -o -d "${HADOOP_HOME}/lib/native" ]; then + JAVA_PLATFORM=`CLASSPATH=${CLASSPATH} ${JAVA} -Xmx32m ${HADOOP_JAVA_PLATFORM_OPTS} org.apache.hadoop.util.PlatformName | sed -e "s/ /_/g"` + + if [ -d "$HADOOP_HOME/build/native" ]; then + JAVA_LIBRARY_PATH=${HADOOP_HOME}/build/native/${JAVA_PLATFORM}/lib + fi + + if [ -d "${HADOOP_HOME}/lib/native" ]; then + if [ "x$JAVA_LIBRARY_PATH" != "x" ]; then + JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}:${HADOOP_HOME}/lib/native/${JAVA_PLATFORM} + else + JAVA_LIBRARY_PATH=${HADOOP_HOME}/lib/native/${JAVA_PLATFORM} + fi + fi +fi + +# cygwin path translation +if $cygwin; then + JAVA_LIBRARY_PATH=`cygpath -p "$JAVA_LIBRARY_PATH"` +fi +export LD_LIBRARY_PATH="$JAVA_LIBRARY_PATH" + +HADOOP_OPTS="$HADOOP_OPTS $HADOOP_DAEMON_OPTS" +HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.log.dir=$HADOOP_LOG_DIR" +HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.log.file=$HADOOP_LOGFILE" +HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.home.dir=$HADOOP_HOME" +HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.id.str=$HADOOP_IDENT_STRING" +HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.root.logger=${HADOOP_ROOT_LOGGER:-INFO,console}" +if [ "x$JAVA_LIBRARY_PATH" != "x" ]; then + HADOOP_OPTS="$HADOOP_OPTS -Djava.library.path=$JAVA_LIBRARY_PATH" +fi +HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.policy.file=$HADOOP_POLICYFILE" + +# run it +exec "$JAVA" $JAVA_HEAP_MAX $HADOOP_OPTS -classpath "$CLASSPATH" $CLASS $CMDLINE_OPTS "$@" diff --git a/bin/hadoop-config.sh b/bin/hadoop-config.sh new file mode 100644 index 0000000..1f9d52d --- /dev/null +++ b/bin/hadoop-config.sh @@ -0,0 +1,68 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# included in all the hadoop scripts with source command +# should not be executable directly +# also should not be passed any arguments, since we need original $* + +# resolve links - $0 may be a softlink + +this="$0" +while [ -h "$this" ]; do + ls=`ls -ld "$this"` + link=`expr "$ls" : '.*-> \(.*\)$'` + if expr "$link" : '.*/.*' > /dev/null; then + this="$link" + else + this=`dirname "$this"`/"$link" + fi +done + +# convert relative path to absolute path +bin=`dirname "$this"` +script=`basename "$this"` +bin=`cd "$bin"; pwd` +this="$bin/$script" + +# the root of the Hadoop installation +export HADOOP_HOME=`dirname "$this"`/.. + +#check to see if the conf dir is given as an optional argument +if [ $# -gt 1 ] +then + if [ "--config" = "$1" ] + then + shift + confdir=$1 + shift + HADOOP_CONF_DIR=$confdir + fi +fi + +# Allow alternate conf dir location. +HADOOP_CONF_DIR="${HADOOP_CONF_DIR:-$HADOOP_HOME/conf}" + +#check to see it is specified whether to use the slaves or the +# masters file +if [ $# -gt 1 ] +then + if [ "--hosts" = "$1" ] + then + shift + slavesfile=$1 + shift + export HADOOP_SLAVES="${HADOOP_CONF_DIR}/$slavesfile" + fi +fi diff --git a/bin/hadoop-daemon.sh b/bin/hadoop-daemon.sh new file mode 100755 index 0000000..aadb726 --- /dev/null +++ b/bin/hadoop-daemon.sh @@ -0,0 +1,151 @@ +#!/usr/bin/env bash + +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# Runs a Hadoop command as a daemon. +# +# Environment Variables +# +# HADOOP_CONF_DIR Alternate conf dir. Default is ${HADOOP_HOME}/conf. +# HADOOP_LOG_DIR Where log files are stored. PWD by default. +# HADOOP_MASTER host:path where hadoop code should be rsync'd from +# HADOOP_PID_DIR The pid files are stored. /tmp by default. +# HADOOP_IDENT_STRING A string representing this instance of hadoop. $USER by default +# HADOOP_NICENESS The scheduling priority for daemons. Defaults to 0. +## + +usage="Usage: hadoop-daemon.sh [--config ] [--hosts hostlistfile] (start|stop) " + +# if no args specified, show usage +if [ $# -le 1 ]; then + echo $usage + exit 1 +fi + +bin=`dirname "$0"` +bin=`cd "$bin"; pwd` + +. "$bin"/hadoop-config.sh + +# get arguments +startStop=$1 +shift +command=$1 +shift + +hadoop_rotate_log () +{ + rlog=$1; + num=5; + if [ -n "$2" ]; then + num=$2 + fi + if [ -f "$rlog" ]; then # rotate logs + while [ $num -gt 1 ]; do + prev=`expr $num - 1` + [ -f "$rlog.$prev" ] && mv "$rlog.$prev" "$rlog.$num" + num=$prev + done + mv "$rlog" "$rlog.$num"; + fi +} + +if [ -f "${HADOOP_CONF_DIR}/hadoop-env.sh" ]; then + . "${HADOOP_CONF_DIR}/hadoop-env.sh" +fi + +# Make sure we start the daemons with the correct username +if [ -n "${HADOOP_USERNAME}" -a "$(whoami)" != "${HADOOP_USERNAME}" ]; then + echo "Must be run as ${HADOOP_USERNAME}. You are $(whoami)" + exit 1 +fi + +# get log directory +if [ "$HADOOP_LOG_DIR" = "" ]; then + export HADOOP_LOG_DIR="$HADOOP_HOME/logs" +fi +mkdir -p "$HADOOP_LOG_DIR" + +if [ "$HADOOP_PID_DIR" = "" ]; then + HADOOP_PID_DIR=/tmp +fi + +if [ "$HADOOP_IDENT_STRING" = "" ]; then + export HADOOP_IDENT_STRING="$USER" +fi + +# some variables +export HADOOP_LOGFILE=hadoop-$HADOOP_IDENT_STRING-$command-$HOSTNAME.log +export HADOOP_ROOT_LOGGER="INFO,DRFA" +log=$HADOOP_LOG_DIR/hadoop-$HADOOP_IDENT_STRING-$command-$HOSTNAME.out +pid=$HADOOP_PID_DIR/hadoop-$HADOOP_IDENT_STRING-$command.pid +gc_log=$HADOOP_LOG_DIR/hadoop-$HADOOP_IDENT_STRING-$command-gc.log + +# Set default scheduling priority +if [ "$HADOOP_NICENESS" = "" ]; then + export HADOOP_NICENESS=0 +fi + +case $startStop in + + (start) + + mkdir -p "$HADOOP_PID_DIR" + + if [ -f $pid ]; then + if kill -0 `cat $pid` > /dev/null 2>&1; then + echo $command running as process `cat $pid`. Stop it first. + exit 1 + fi + fi + + if [ "$HADOOP_MASTER" != "" ]; then + echo rsync from $HADOOP_MASTER + rsync -a -e ssh --delete --exclude=.svn --exclude='logs/*' --exclude='contrib/hod/logs/*' $HADOOP_MASTER/ "$HADOOP_HOME" + fi + + hadoop_rotate_log $log + hadoop_rotate_log $gc_log + echo starting $command, logging to $log + cd "$HADOOP_HOME" + nohup nice -n $HADOOP_NICENESS "$HADOOP_HOME"/bin/hadoop --config $HADOOP_CONF_DIR $command "$@" > "$log" 2>&1 < /dev/null & + echo $! > $pid + sleep 1; head "$log" + ;; + + (stop) + + if [ -f $pid ]; then + if kill -0 `cat $pid` > /dev/null 2>&1; then + echo stopping $command + kill `cat $pid` + else + echo no $command to stop + fi + else + echo no $command to stop + fi + ;; + + (*) + echo $usage + exit 1 + ;; + +esac + + diff --git a/bin/hadoop-daemons.sh b/bin/hadoop-daemons.sh new file mode 100755 index 0000000..894d8ab --- /dev/null +++ b/bin/hadoop-daemons.sh @@ -0,0 +1,34 @@ +#!/usr/bin/env bash + +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# Run a Hadoop command on all slave hosts. + +usage="Usage: hadoop-daemons.sh [--config confdir] [--hosts hostlistfile] [start|stop] command args..." + +# if no args specified, show usage +if [ $# -le 1 ]; then + echo $usage + exit 1 +fi + +bin=`dirname "$0"` +bin=`cd "$bin"; pwd` + +. $bin/hadoop-config.sh + +exec "$bin/slaves.sh" --config $HADOOP_CONF_DIR cd "$HADOOP_HOME" \; "$bin/hadoop-daemon.sh" --config $HADOOP_CONF_DIR "$@" diff --git a/bin/rcc b/bin/rcc new file mode 100755 index 0000000..a39745b --- /dev/null +++ b/bin/rcc @@ -0,0 +1,99 @@ +#!/usr/bin/env bash + +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# The Hadoop record compiler +# +# Environment Variables +# +# JAVA_HOME The java implementation to use. Overrides JAVA_HOME. +# +# HADOOP_OPTS Extra Java runtime options. +# +# HADOOP_CONF_DIR Alternate conf dir. Default is ${HADOOP_HOME}/conf. +# + +bin=`dirname "$0"` +bin=`cd "$bin"; pwd` + +. "$bin"/hadoop-config.sh + +if [ -f "${HADOOP_CONF_DIR}/hadoop-env.sh" ]; then + . "${HADOOP_CONF_DIR}/hadoop-env.sh" +fi + +# some Java parameters +if [ "$JAVA_HOME" != "" ]; then + #echo "run java in $JAVA_HOME" + JAVA_HOME=$JAVA_HOME +fi + +if [ "$JAVA_HOME" = "" ]; then + echo "Error: JAVA_HOME is not set." + exit 1 +fi + +JAVA=$JAVA_HOME/bin/java +JAVA_HEAP_MAX=-Xmx1000m + +# CLASSPATH initially contains $HADOOP_CONF_DIR +CLASSPATH="${HADOOP_CONF_DIR}" +CLASSPATH=${CLASSPATH}:$JAVA_HOME/lib/tools.jar + +# for developers, add Hadoop classes to CLASSPATH +if [ -d "$HADOOP_HOME/build/classes" ]; then + CLASSPATH=${CLASSPATH}:$HADOOP_HOME/build/classes +fi +if [ -d "$HADOOP_HOME/build/webapps" ]; then + CLASSPATH=${CLASSPATH}:$HADOOP_HOME/build +fi +if [ -d "$HADOOP_HOME/build/test/classes" ]; then + CLASSPATH=${CLASSPATH}:$HADOOP_HOME/build/test/classes +fi + +# so that filenames w/ spaces are handled correctly in loops below +IFS= + +# for releases, add core hadoop jar & webapps to CLASSPATH +if [ -d "$HADOOP_HOME/webapps" ]; then + CLASSPATH=${CLASSPATH}:$HADOOP_HOME +fi +for f in $HADOOP_HOME/hadoop-*-core.jar; do + CLASSPATH=${CLASSPATH}:$f; +done + +# add libs to CLASSPATH +for f in $HADOOP_HOME/lib/*.jar; do + CLASSPATH=${CLASSPATH}:$f; +done + +for f in $HADOOP_HOME/lib/jetty-ext/*.jar; do + CLASSPATH=${CLASSPATH}:$f; +done + +# restore ordinary behaviour +unset IFS + +CLASS='org.apache.hadoop.record.compiler.generated.Rcc' + +# cygwin path translation +if expr `uname` : 'CYGWIN*' > /dev/null; then + CLASSPATH=`cygpath -p -w "$CLASSPATH"` +fi + +# run it +exec "$JAVA" $HADOOP_OPTS -classpath "$CLASSPATH" $CLASS "$@" diff --git a/bin/slaves.sh b/bin/slaves.sh new file mode 100755 index 0000000..fc9f720 --- /dev/null +++ b/bin/slaves.sh @@ -0,0 +1,68 @@ +#!/usr/bin/env bash + +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# Run a shell command on all slave hosts. +# +# Environment Variables +# +# HADOOP_SLAVES File naming remote hosts. +# Default is ${HADOOP_CONF_DIR}/slaves. +# HADOOP_CONF_DIR Alternate conf dir. Default is ${HADOOP_HOME}/conf. +# HADOOP_SLAVE_SLEEP Seconds to sleep between spawning remote commands. +# HADOOP_SSH_OPTS Options passed to ssh when running remote commands. +## + +usage="Usage: slaves.sh [--config confdir] command..." + +# if no args specified, show usage +if [ $# -le 0 ]; then + echo $usage + exit 1 +fi + +bin=`dirname "$0"` +bin=`cd "$bin"; pwd` + +. "$bin"/hadoop-config.sh + +# If the slaves file is specified in the command line, +# then it takes precedence over the definition in +# hadoop-env.sh. Save it here. +HOSTLIST=$HADOOP_SLAVES + +if [ -f "${HADOOP_CONF_DIR}/hadoop-env.sh" ]; then + . "${HADOOP_CONF_DIR}/hadoop-env.sh" +fi + +if [ "$HOSTLIST" = "" ]; then + if [ "$HADOOP_SLAVES" = "" ]; then + export HOSTLIST="${HADOOP_CONF_DIR}/slaves" + else + export HOSTLIST="${HADOOP_SLAVES}" + fi +fi + +for slave in `cat "$HOSTLIST"|sed "s/#.*$//;/^$/d"`; do + ssh $HADOOP_SSH_OPTS $slave $"${@// /\\ }" \ + 2>&1 | sed "s/^/$slave: /" & + if [ "$HADOOP_SLAVE_SLEEP" != "" ]; then + sleep $HADOOP_SLAVE_SLEEP + fi +done + +wait diff --git a/bin/start-all.sh b/bin/start-all.sh new file mode 100755 index 0000000..b1eefc8 --- /dev/null +++ b/bin/start-all.sh @@ -0,0 +1,30 @@ +#!/usr/bin/env bash + +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# Start all hadoop daemons. Run this on master node. + +bin=`dirname "$0"` +bin=`cd "$bin"; pwd` + +. "$bin"/hadoop-config.sh + +# start dfs daemons +"$bin"/start-dfs.sh --config $HADOOP_CONF_DIR + +# start mapred daemons +"$bin"/start-mapred.sh --config $HADOOP_CONF_DIR diff --git a/bin/start-avatar.sh b/bin/start-avatar.sh new file mode 100755 index 0000000..c30238c --- /dev/null +++ b/bin/start-avatar.sh @@ -0,0 +1,136 @@ +#!/usr/bin/env bash + +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# Start hadoop avatar daemons. +# Optinally upgrade or rollback dfs state. +# Run this on master node. + +usage="Usage: start-avatar.sh [-upgrade|-rollback|-zero|-one|-help]" + +params=$# +bin=`dirname "$0"` +bin=`cd "$bin"; pwd` +thishost=`hostname` + +. "$bin"/hadoop-config.sh + +# get arguments +if [ $# -ge 1 ]; then + nameStartOpt=$1 + shift + case $nameStartOpt in + (-help) + echo $usage + echo "-zero: Copy over transaction logs from remote machine to local machine." + echo " Start the instance of AvatarNode in standby avatar." + echo "-one: Copy transaction logs from this machine to remote machine." + echo " Start the instance of AvatarNode on remote machine in standby avatar." + echo " If no parameters are specified then start the first instance of AvatarNode" + echo " in primary Avatar and the second instance in standby avatar." + exit 1 + ;; + (-upgrade) + ;; + (-rollback) + dataStartOpt=$nameStartOpt + ;; + (-zero) + instance0="-zero $*" + instance1="" + ;; + (-one) + instance0="" + instance1="-one $*" + ;; + (*) + echo $usage + exit 1 + ;; + esac +fi + +# If no options are provided then start both AvatarNodes +if [ $params -eq 0 ]; then + instance0="-zero" + instance1="-one -sync -standby" +fi + +# start avatar daemons +# start namenode after datanodes, to minimize time namenode is up w/o data +# note: datanodes will log connection errors until namenode starts +if [ -f "${HADOOP_CONF_DIR}/hadoop-env.sh" ]; then + . "${HADOOP_CONF_DIR}/hadoop-env.sh" +fi + +# read the contents of the masters file +mastersfile="${HADOOP_CONF_DIR}/masters" +slavesfile=${HADOOP_SLAVES} + +let numhost=0 +host0=="" +host1="" +for hosts in `cat "$mastersfile"|sed "s/#.*$//;/^$/d"`; do + if [ $numhost -ge 2 ] ; then + echo "You must list only two entries in the masters file." + echo "The first entry is the zero-th instance of the AvatarNode." + echo "The second entry is the one-th instance of the AvatarNode." + exit; + fi + if [ $numhost -eq 0 ] ; then + host0=$hosts + else + host1=$hosts + fi + ((numhost++)) +done + +# check that there are only two elements in the masters file +if [ $numhost -ne 2 ] ; then + echo "You must list only two entries in the masters file." + echo "The first entry is the zero-th instance of the AvatarNode." + echo "The second entry is the one-th instance of the AvatarNode." + exit; +fi + +# start the zero-th of AvatarNode +if [ "x$instance0" != "x" ]; then + if [ "x$thishost" == "x$host0" ]; then + "$bin"/hadoop-daemon.sh --config $HADOOP_CONF_DIR start avatarnode $instance0 + else + export HADOOP_SLAVES="/tmp/hadoop.avatarnode.tmpfile.0" + echo $host0 > ${HADOOP_SLAVES} + "$bin"/slaves.sh "$bin"/start-avatar.sh $instance0 + fi +fi + +# start the one-th of AvatarNode +if [ "x$instance1" != "x" ]; then + if [ "x$thishost" == "x$host1" ]; then + "$bin"/hadoop-daemon.sh --config $HADOOP_CONF_DIR start avatarnode $instance1 + else + export HADOOP_SLAVES="/tmp/hadoop.avatarnode.tmpfile.1" + echo $host1 > ${HADOOP_SLAVES} + "$bin"/slaves.sh "$bin"/start-avatar.sh $instance1 + fi +fi + +# start the AvatarDataNodes +if [ $params -eq 0 ]; then + export HADOOP_SLAVES=$slavesfile + "$bin"/hadoop-daemons.sh --config $HADOOP_CONF_DIR start avatardatanode $dataStartOpt +fi diff --git a/bin/start-balancer.sh b/bin/start-balancer.sh new file mode 100755 index 0000000..e8c93f9 --- /dev/null +++ b/bin/start-balancer.sh @@ -0,0 +1,25 @@ +#!/usr/bin/env bash + +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +bin=`dirname "$0"` +bin=`cd "$bin"; pwd` + +. "$bin"/hadoop-config.sh + +# Start balancer daemon. + +"$bin"/hadoop-daemon.sh --config $HADOOP_CONF_DIR start balancer $@ diff --git a/bin/start-dfs.sh b/bin/start-dfs.sh new file mode 100755 index 0000000..bda2035 --- /dev/null +++ b/bin/start-dfs.sh @@ -0,0 +1,52 @@ +#!/usr/bin/env bash + +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# Start hadoop dfs daemons. +# Optinally upgrade or rollback dfs state. +# Run this on master node. + +usage="Usage: start-dfs.sh [-upgrade|-rollback]" + +bin=`dirname "$0"` +bin=`cd "$bin"; pwd` + +. "$bin"/hadoop-config.sh + +# get arguments +if [ $# -ge 1 ]; then + nameStartOpt=$1 + shift + case $nameStartOpt in + (-upgrade) + ;; + (-rollback) + dataStartOpt=$nameStartOpt + ;; + (*) + echo $usage + exit 1 + ;; + esac +fi + +# start dfs daemons +# start namenode after datanodes, to minimize time namenode is up w/o data +# note: datanodes will log connection errors until namenode starts +"$bin"/hadoop-daemon.sh --config $HADOOP_CONF_DIR start namenode $nameStartOpt +"$bin"/hadoop-daemons.sh --config $HADOOP_CONF_DIR start datanode $dataStartOpt +"$bin"/hadoop-daemons.sh --config $HADOOP_CONF_DIR --hosts masters start secondarynamenode diff --git a/bin/start-hmon-remote.sh b/bin/start-hmon-remote.sh new file mode 100755 index 0000000..c3bf60e --- /dev/null +++ b/bin/start-hmon-remote.sh @@ -0,0 +1,44 @@ + +#!/usr/bin/env bash + +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# Start hadoop Hmon process on machine specified on file conf/hmon + +usage="Usage: start-hmon-remote.sh" + +params=$# +bin=`dirname "$0"` +bin=`cd "$bin"; pwd` + +. "$bin"/hadoop-config.sh + +# get arguments +if [ $# -ge 1 ]; then + echo $usage + exit +fi + +if [ -f "${HADOOP_CONF_DIR}/hmon" ]; then + export HADOOP_SLAVES="${HADOOP_CONF_DIR}/hmon" + echo "Starting hmon at "`cat ${HADOOP_SLAVES}` + "$bin"/slaves.sh --config $HADOOP_CONF_DIR cd "$HADOOP_HOME" \; "$bin/start-hmon.sh" +else + echo "NOTE: Not starting hmon because there is no hmon file in ${HADOOP_CONF_DIR}/hmon" +fi + + diff --git a/bin/start-hmon.sh b/bin/start-hmon.sh new file mode 100755 index 0000000..b304f4b --- /dev/null +++ b/bin/start-hmon.sh @@ -0,0 +1,41 @@ +#!/usr/bin/env bash + +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# Start hadoop resource monitoring daemons. +# Run this on master node. + +usage="Usage: start-hmon.sh" + +params=$# +bin=`dirname "$0"` +bin=`cd "$bin"; pwd` + +. "$bin"/hadoop-config.sh + +# get arguments +if [ $# -ge 1 ]; then + echo $usage +fi + +export HADOOP_OPTS="$HADOOP_OPTS" + +"$bin"/hadoop-daemon.sh --config $HADOOP_CONF_DIR start \ +org.apache.hadoop.mapred.UtilizationCollector + +"$bin"/hadoop-daemons.sh --config $HADOOP_CONF_DIR start \ +org.apache.hadoop.mapred.UtilizationReporter diff --git a/bin/start-mapred.sh b/bin/start-mapred.sh new file mode 100755 index 0000000..a340649 --- /dev/null +++ b/bin/start-mapred.sh @@ -0,0 +1,31 @@ +#!/usr/bin/env bash + +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# Start hadoop map reduce daemons. Run this on master node. + +bin=`dirname "$0"` +bin=`cd "$bin"; pwd` + +. "$bin"/hadoop-config.sh + +# start mapred daemons +# start jobtracker first to minimize connection errors at startup +"$bin"/hadoop-daemon.sh --config $HADOOP_CONF_DIR start jobtracker +"$bin"/hadoop-daemons.sh --config $HADOOP_CONF_DIR start tasktracker +"$bin"/start-raidnode-remote.sh --config $HADOOP_CONF_DIR +"$bin"/start-hmon-remote.sh --config $HADOOP_CONF_DIR diff --git a/bin/start-raidnode-remote.sh b/bin/start-raidnode-remote.sh new file mode 100755 index 0000000..76bd356 --- /dev/null +++ b/bin/start-raidnode-remote.sh @@ -0,0 +1,43 @@ +#!/usr/bin/env bash + +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# Start hadoop RaidNode process on machine specified on file conf/raidnode + +usage="Usage: start-raidnode-remote.sh" + +params=$# +bin=`dirname "$0"` +bin=`cd "$bin"; pwd` + +. "$bin"/hadoop-config.sh + +# get arguments +if [ $# -ge 1 ]; then + echo $usage + exit +fi + +if [ -f "${HADOOP_CONF_DIR}/raidnode" ]; then + export HADOOP_SLAVES="${HADOOP_CONF_DIR}/raidnode" + echo "Starting raidnode at "`cat ${HADOOP_SLAVES}` + "$bin"/slaves.sh --config $HADOOP_CONF_DIR cd "$HADOOP_HOME" \; "$bin/start-raidnode.sh" +else + echo "No raidnode file in ${HADOOP_CONF_DIR}/raidnode" +fi + + diff --git a/bin/start-raidnode.sh b/bin/start-raidnode.sh new file mode 100755 index 0000000..72f5cc1 --- /dev/null +++ b/bin/start-raidnode.sh @@ -0,0 +1,40 @@ +#!/usr/bin/env bash + +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# Start hadoop RaidNode process +# Run this on RaidNode machine + +usage="Usage: start-raidnode.sh" + +params=$# +bin=`dirname "$0"` +bin=`cd "$bin"; pwd` + +. "$bin"/hadoop-config.sh + +# get arguments +if [ $# -ge 1 ]; then + echo $usage +fi + +if [ -f "${HADOOP_CONF_DIR}/hadoop-env.sh" ]; then + . "${HADOOP_CONF_DIR}/hadoop-env.sh" +fi +export HADOOP_DAEMON_OPTS=$HADOOP_RAIDNODE_OPTS + +"$bin"/hadoop-daemon.sh --config $HADOOP_CONF_DIR start org.apache.hadoop.raid.RaidNode diff --git a/bin/stop-all.sh b/bin/stop-all.sh new file mode 100755 index 0000000..033f2fe --- /dev/null +++ b/bin/stop-all.sh @@ -0,0 +1,27 @@ +#!/usr/bin/env bash + +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# Stop all hadoop daemons. Run this on master node. + +bin=`dirname "$0"` +bin=`cd "$bin"; pwd` + +. "$bin"/hadoop-config.sh + +"$bin"/stop-mapred.sh --config $HADOOP_CONF_DIR +"$bin"/stop-dfs.sh --config $HADOOP_CONF_DIR diff --git a/bin/stop-avatar.sh b/bin/stop-avatar.sh new file mode 100755 index 0000000..7fadfe9 --- /dev/null +++ b/bin/stop-avatar.sh @@ -0,0 +1,119 @@ +#!/usr/bin/env bash + +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# Start hadoop avatar daemons. +# Optinally upgrade or rollback dfs state. +# Run this on master node. + +usage="Usage: start-avatar.sh [-upgrade|-rollback|-zero|-one|-help]" + +params=$# +bin=`dirname "$0"` +bin=`cd "$bin"; pwd` +thishost=`hostname` + +. "$bin"/hadoop-config.sh + +# If no options are provided then start both AvatarNodes +instance0="-zero" +instance1="-one -sync -standby" + +# get arguments +if [ $# -ge 1 ]; then + nameStartOpt=$1 + shift + case $nameStartOpt in + (-help) + echo $usage + echo "-zero: Copy over transaction logs from remote machine to local machine." + echo " Start the instance of AvatarNode in standby avatar." + echo "-one: Copy transaction logs from this machine to remote machine." + echo " Start the instance of AvatarNode on remote machine in standby avatar." + echo " If no parameters are specified then start the first instance of AvatarNode" + echo " in primary Avatar and the second instance in standby avatar." + exit 1 + ;; + (-upgrade) + ;; + (-rollback) + dataStartOpt=$nameStartOpt + ;; + (-zero) + instance0="-zero -standby" + instance1="" + ;; + (-one) + instance0="" + instance1="-one -standby" + ;; + (*) + echo $usage + exit 1 + ;; + esac +fi + +# start avatar daemons +# start namenode after datanodes, to minimize time namenode is up w/o data +# note: datanodes will log connection errors until namenode starts +if [ -f "${HADOOP_CONF_DIR}/hadoop-env.sh" ]; then + . "${HADOOP_CONF_DIR}/hadoop-env.sh" +fi +export HADOOP_OPTS="$HADOOP_OPTS $HADOOP_NAMENODE_OPTS" + +# read the contents of the masters file +mastersfile="${HADOOP_CONF_DIR}/masters" + +let numhost=0 +host0=="" +host1="" +for hosts in `cat "$mastersfile"|sed "s/#.*$//;/^$/d"`; do + if [ $numhost -ge 2 ] ; then + echo "You must list only two entries in the masters file." + echo "The first entry is the zero-th instance of the AvatarNode." + echo "The second entry is the one-th instance of the AvatarNode." + exit; + fi + if [ $numhost -eq 0 ] ; then + host0=$hosts + else + host1=$hosts + fi + ((numhost++)) +done + +# check that there are only two elements in the masters file +if [ $numhost -ne 2 ] ; then + echo "You must list only two entries in the masters file." + echo "The first entry is the zero-th instance of the AvatarNode." + echo "The second entry is the one-th instance of the AvatarNode." + exit; +fi + +# stop the zero-th of AvatarNode +if [ "x$instance0" != "x" ]; then + ssh $host0 "$bin"/hadoop-daemon.sh --config $HADOOP_CONF_DIR stop org.apache.hadoop.hdfs.server.namenode.AvatarNode +fi + +# stop the one-th of AvatarNode +if [ "x$instance1" != "x" ]; then + ssh $host1 "$bin"/hadoop-daemon.sh --config $HADOOP_CONF_DIR stop org.apache.hadoop.hdfs.server.namenode.AvatarNode +fi + +# stop the AvatarDataNodes +"$bin"/hadoop-daemons.sh --config $HADOOP_CONF_DIR stop org.apache.hadoop.hdfs.server.datanode.AvatarDataNode diff --git a/bin/stop-balancer.sh b/bin/stop-balancer.sh new file mode 100755 index 0000000..483a9c2 --- /dev/null +++ b/bin/stop-balancer.sh @@ -0,0 +1,26 @@ +#!/usr/bin/env bash + +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +bin=`dirname "$0"` +bin=`cd "$bin"; pwd` + +. "$bin"/hadoop-config.sh + +# Stop balancer daemon. +# Run this on the machine where the balancer is running + +"$bin"/hadoop-daemon.sh --config $HADOOP_CONF_DIR stop balancer diff --git a/bin/stop-dfs.sh b/bin/stop-dfs.sh new file mode 100755 index 0000000..14fe61d --- /dev/null +++ b/bin/stop-dfs.sh @@ -0,0 +1,29 @@ +#!/usr/bin/env bash + +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# Stop hadoop DFS daemons. Run this on master node. + +bin=`dirname "$0"` +bin=`cd "$bin"; pwd` + +. "$bin"/hadoop-config.sh + +"$bin"/hadoop-daemon.sh --config $HADOOP_CONF_DIR stop namenode +"$bin"/hadoop-daemons.sh --config $HADOOP_CONF_DIR stop datanode +"$bin"/hadoop-daemons.sh --config $HADOOP_CONF_DIR --hosts masters stop secondarynamenode + diff --git a/bin/stop-hmon-remote.sh b/bin/stop-hmon-remote.sh new file mode 100755 index 0000000..24cbacb --- /dev/null +++ b/bin/stop-hmon-remote.sh @@ -0,0 +1,40 @@ +#!/usr/bin/env bash + +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# Stop hadoop Hmon process on machine specified on file conf/hmon + +usage="Usage: stop-hmon-remote.sh" + +params=$# +bin=`dirname "$0"` +bin=`cd "$bin"; pwd` + +. "$bin"/hadoop-config.sh + +# get arguments +if [ $# -ge 1 ]; then + echo $usage +fi + +if [ -f "${HADOOP_CONF_DIR}/hmon" ]; then + export HADOOP_SLAVES="${HADOOP_CONF_DIR}/hmon" + echo "Stopping hmon at "`cat ${HADOOP_SLAVES}` + "$bin"/slaves.sh --config $HADOOP_CONF_DIR cd "$HADOOP_HOME" \; "$bin/stop-hmon.sh" +else + echo "No hmon file in ${HADOOP_CONF_DIR}/hmon" +fi diff --git a/bin/stop-hmon.sh b/bin/stop-hmon.sh new file mode 100755 index 0000000..685ed7f --- /dev/null +++ b/bin/stop-hmon.sh @@ -0,0 +1,37 @@ +#!/usr/bin/env bash + +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +usage="Usage: stop-hmon.sh" + +params=$# +bin=`dirname "$0"` +bin=`cd "$bin"; pwd` + +. "$bin"/hadoop-config.sh + +# get arguments +if [ $# -ge 1 ]; then + echo $usage +fi + +export HADOOP_OPTS="$HADOOP_OPTS" + +"$bin"/hadoop-daemon.sh --config $HADOOP_CONF_DIR stop \ +org.apache.hadoop.mapred.UtilizationCollector + +"$bin"/hadoop-daemons.sh --config $HADOOP_CONF_DIR stop \ +org.apache.hadoop.mapred.UtilizationReporter diff --git a/bin/stop-mapred.sh b/bin/stop-mapred.sh new file mode 100755 index 0000000..5c3ec11 --- /dev/null +++ b/bin/stop-mapred.sh @@ -0,0 +1,30 @@ +#!/usr/bin/env bash + +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# Stop hadoop map reduce daemons. Run this on master node. + +bin=`dirname "$0"` +bin=`cd "$bin"; pwd` + +. "$bin"/hadoop-config.sh + +"$bin"/hadoop-daemon.sh --config $HADOOP_CONF_DIR stop jobtracker +"$bin"/hadoop-daemons.sh --config $HADOOP_CONF_DIR stop tasktracker +"$bin"/stop-raidnode-remote.sh --config $HADOOP_CONF_DIR +"$bin"/stop-hmon-remote.sh --config $HADOOP_CONF_DIR + diff --git a/bin/stop-raidnode-remote.sh b/bin/stop-raidnode-remote.sh new file mode 100755 index 0000000..18d91ff --- /dev/null +++ b/bin/stop-raidnode-remote.sh @@ -0,0 +1,40 @@ +#!/usr/bin/env bash + +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# Stop hadoop RaidNode process on machine specified on file conf/raidnode + +usage="Usage: stop-raidnode-remote.sh" + +params=$# +bin=`dirname "$0"` +bin=`cd "$bin"; pwd` + +. "$bin"/hadoop-config.sh + +# get arguments +if [ $# -ge 1 ]; then + echo $usage +fi + +if [ -f "${HADOOP_CONF_DIR}/raidnode" ]; then + export HADOOP_SLAVES="${HADOOP_CONF_DIR}/raidnode" + echo "Stopping raidnode at "`cat ${HADOOP_SLAVES}` + "$bin"/slaves.sh --config $HADOOP_CONF_DIR cd "$HADOOP_HOME" \; "$bin/stop-raidnode.sh" +else + echo "No raidnode file in ${HADOOP_CONF_DIR}/raidnode" +fi diff --git a/bin/stop-raidnode.sh b/bin/stop-raidnode.sh new file mode 100755 index 0000000..d207e5a --- /dev/null +++ b/bin/stop-raidnode.sh @@ -0,0 +1,37 @@ +#!/usr/bin/env bash + +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# Stop hadoop RaidNode process +# Run this on RaidNode machine. + +usage="Usage: stop-raidnode.sh" + +params=$# +bin=`dirname "$0"` +bin=`cd "$bin"; pwd` + +. "$bin"/hadoop-config.sh + +# get arguments +if [ $# -ge 1 ]; then + echo $usage +fi + +export HADOOP_OPTS="$HADOOP_OPTS $HADOOP_RAIDNODE_OPTS" + +"$bin"/hadoop-daemon.sh --config $HADOOP_CONF_DIR stop org.apache.hadoop.raid.RaidNode diff --git a/build.xml b/build.xml new file mode 100644 index 0000000..e447b2d --- /dev/null +++ b/build.xml @@ -0,0 +1,1885 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+
+ + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Tests failed! + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + ") + + def writeUserData(self, user, options, serviceInfo, count): + hostInfo = self.getServiceHosts(serviceInfo) + hostKey = 'short' + if options == 'display=long': + hostKey = 'long' + + if count == 0: + self.w('') + self.w('') + self.w('Active Users') + self.w('') + self.w('') + self.w('') + self.w('%s' % user) + self.w('') + jobIDs = serviceInfo[user].keys() + jobIDs.sort() + for jobID in jobIDs: + self.w('') + if count == 0: + self.w('') + self.w('') + self.w('') + self.w('') + self.w('' % jobID) + self.w('') + self.w('') + self.w('
') + self.w('PBS Job Identifiers') + self.w('
%s') + hosts = serviceInfo[user][jobID].keys() + hosts.sort() + for host in hosts: + if hostInfo[hostKey].has_key(self.getJobKey(user, jobID, host)): + self.w('') + if count == 0: + self.w('') + self.w('') + self.w('') + self.w('') + self.w('' % host) + self.w('') + self.w('') + self.w('
') + self.w('Hosts Running Services') + self.w('
%s') + self.w('') + self.w('') + self.w('') + self.w('') + for serviceItem in serviceInfo[user][jobID][host]: + serviceName = serviceItem.keys() + serviceName = serviceName[0] + if isinstance(serviceItem[serviceName], dict) and \ + options == 'display=long': + self.w('') + self.w('' % serviceName) + self.w('') + self.w('') + elif isinstance(serviceItem[serviceName], str): + self.w('') + self.w('' % serviceName) + self.w('') + self.w('') + self.w('
') + self.w('Service Information') + self.w('
%s') + self.w('') + for key in serviceItem[serviceName]: + self.w('') + self.w('' % key) + self.w('' % serviceItem[serviceName][key]) + self.w('') + self.w('
%s%s
') + self.w('
%s') + (host, port) = serviceItem[serviceName].split(':') + hostnameInfo = socket.gethostbyname_ex(host) + if serviceName.startswith('mapred'): + self.w('Hadoop Job Tracker' % (hostnameInfo[0], port)) + elif serviceName.startswith('hdfs'): + self.w('HDFS Name Node ' % (hostnameInfo[0], port)) + else: + self.w('%s' % serviceItem[serviceName]) + self.w('
') + self.w('
') + count = count + 1 + self.w('
') + count = count + 1 + self.w('') + self.w('') +# self.w("
")
+#    self.w(pprint.pformat(serviceInfo))
+#    self.w("
") + +class baseSocketServer: + def __init__(self, host, ports): + self.host = host + self.ports = ports + self.__stopForever = threading.Event() + self.__stopForever.clear() + self.__run = threading.Event() + self.__run.set() + self.server_address = () + self.mThread = None + + def server_bind(self): + """server_bind() method binds to a random range of ports.""" + + self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) + + if len(self.ports) > 1: + randomPort = Random(os.getpid()) + portSequence = range(self.ports[0], self.ports[1]) + + maxTryCount = abs(self.ports[0] - self.ports[1]) + tryCount = 0 + while True: + somePort = randomPort.choice(portSequence) + self.server_address = (self.host, somePort) + try: + self.socket.bind(self.server_address) + except socket.gaierror, errData: + raise socket.gaierror, errData + except: + tryCount = tryCount + 1 + if tryCount > maxTryCount: + bindError = "bind failure for port range %s:%d" % ( + self.ports) + + raise socket.error, bindError + else: + break + else: + self.server_address = (self.host, int(self.ports[0])) + self.socket.bind(self.server_address) + + if self.host == '': + self.server_address = (local_fqdn(), self.server_address[1]) + + def _serve_forever(self): + """Replacement for serve_forever loop. + + All baseSocketServers run within a master thread; that thread + imitates serve_forever, but checks an event (self.__stopForever) + before processing new connections. + """ + + while not self.__stopForever.isSet(): + (rlist, wlist, xlist) = select([self.socket], [], [], + 1) + + if (len(rlist) > 0 and self.socket == rlist[0]): + self.handle_request() + + while not self.__run.isSet(): + if self.__stopForever.isSet(): + break + time.sleep(1) + + self.server_close() + + return True + + def serve_forever(self): + """Handle requests until stopForever event flag indicates stop.""" + + self.mThread = threading.Thread(name="baseSocketServer", + target=self._serve_forever) + self.mThread.start() + + return self.mThread + + def pause(self): + """Temporarily stop servicing requests.""" + + self.__run.clear() + + def cont(self): + """Resume servicing requests.""" + + self.__run.set() + + def stop(self): + """Set the stopForever flag to tell serve_forever() to exit.""" + + self.__stopForever.set() + if self.mThread: self.mThread.join() + return True + + def is_alive(self): + if self.mThread != None: + return self.mThread.isAlive() + else: + return False + +class threadedHTTPServer(baseSocketServer, ThreadingMixIn, HTTPServer): + def __init__(self, host, ports): + baseSocketServer.__init__(self, host, ports) + HTTPServer.__init__(self, self.server_address, SimpleHTTPRequestHandler) + +class forkingHTTPServer(baseSocketServer, ForkingMixIn, HTTPServer): + def __init__(self, host, ports): + baseSocketServer.__init__(self, host, ports) + HTTPServer.__init__(self, self.server_address, SimpleHTTPRequestHandler) + +class hodHTTPServer(baseSocketServer, ThreadingMixIn, HTTPServer): + service = None + def __init__(self, host, ports, serviceobj = None): + self.service = serviceobj + baseSocketServer.__init__(self, host, ports) + HTTPServer.__init__(self, self.server_address, hodHTTPHandler) + + def finish_request(self, request, client_address): + self.RequestHandlerClass(request, client_address, self, self.service) + +class hodXMLRPCServer(baseSocketServer, ThreadingMixIn, SimpleXMLRPCServer): + def __init__(self, host, ports, + requestHandler=SimpleXMLRPCRequestHandler, + logRequests=False, allow_none=False, encoding=None): + baseSocketServer.__init__(self, host, ports) + SimpleXMLRPCServer.__init__(self, self.server_address, requestHandler, + logRequests) + + self.register_function(self.stop, 'stop') + +try: + from twisted.web import server, xmlrpc + from twisted.internet import reactor, defer + from twisted.internet.threads import deferToThread + from twisted.python import log + + class twistedXMLRPC(xmlrpc.XMLRPC): + def __init__(self, logger): + xmlrpc.XMLRPC.__init__(self) + + self.__XRMethods = {} + self.__numRequests = 0 + self.__logger = logger + self.__pause = False + + def render(self, request): + request.content.seek(0, 0) + args, functionPath = xmlrpclib.loads(request.content.read()) + try: + function = self._getFunction(functionPath) + except Fault, f: + self._cbRender(f, request) + else: + request.setHeader("content-type", "text/xml") + defer.maybeDeferred(function, *args).addErrback( + self._ebRender).addCallback(self._cbRender, request) + + return server.NOT_DONE_YET + + def _cbRender(self, result, request): + if isinstance(result, xmlrpc.Handler): + result = result.result + if not isinstance(result, Fault): + result = (result,) + try: + s = xmlrpclib.dumps(result, methodresponse=1) + except: + f = Fault(self.FAILURE, "can't serialize output") + s = xmlrpclib.dumps(f, methodresponse=1) + request.setHeader("content-length", str(len(s))) + request.write(s) + request.finish() + + def _ebRender(self, failure): + if isinstance(failure.value, Fault): + return failure.value + log.err(failure) + return Fault(self.FAILURE, "error") + + def _getFunction(self, methodName): + while self.__pause: + time.sleep(1) + + self.__numRequests = self.__numRequests + 1 + function = None + try: + def defer_function(*args): + return deferToThread(self.__XRMethods[methodName], + *args) + function = defer_function + self.__logger.info( + "[%s] processing defered XML-RPC call to: %s ..." % + (self.__numRequests, methodName)) + except KeyError: + self.__logger.warn( + "[%s] fault %s on XML-RPC call to %s, method not found." % ( + self.__numRequests, self.NOT_FOUND, methodName)) + raise xmlrpc.NoSuchFunction(self.NOT_FOUND, + "method %s not found" % methodName) + + return function + + def register_function(self, functionRef, methodName): + self.__XRMethods[methodName] = functionRef + + def list_methods(self): + return self.__XRMethods.keys() + + def num_requests(self): + return self.__numRequests + + def pause(self): + self.__pause = True + + def cont(self): + self.__pause = False + + class twistedXMLRPCServer: + def __init__(self, host, ports, logger=None, threadPoolSize=100): + self.__host = host + self.__ports = ports + + if logger == None: + logger = hodDummyLogger() + + self.__logger = logger + + self.server_address = ['', ''] + reactor.suggestThreadPoolSize(threadPoolSize) + + self.__stopForever = threading.Event() + self.__stopForever.clear() + self.__mThread = None + + self.__xmlrpc = twistedXMLRPC(self.__logger) + + def _serve_forever(self): + if len(self.__ports) > 1: + randomPort = Random(os.getpid()) + portSequence = range(self.__ports[0], self.__ports[1]) + + maxTryCount = abs(self.__ports[0] - self.__ports[1]) + tryCount = 0 + while True: + somePort = randomPort.choice(portSequence) + self.server_address = (self.__host, int(somePort)) + if self.__host == '': + self.server_address = (local_fqdn(), self.server_address[1]) + try: + reactor.listenTCP(int(somePort), server.Site( + self.__xmlrpc), interface=self.__host) + reactor.run(installSignalHandlers=0) + except: + self.__logger.debug("Failed to bind to: %s:%s." % ( + self.__host, somePort)) + tryCount = tryCount + 1 + if tryCount > maxTryCount: + self.__logger.warn("Failed to bind to: %s:%s" % ( + self.__host, self.__ports)) + sys.exit(1) + else: + break + else: + try: + self.server_address = (self.__host, int(self.__ports[0])) + if self.__host == '': + self.server_address = (local_fqdn(), self.server_address[1]) + reactor.listenTCP(int(self.__ports[0]), server.Site(self.__xmlrpc), + interface=self.__host) + reactor.run(installSignalHandlers=0) + except: + self.__logger.warn("Failed to bind to: %s:%s."% ( + self.__host, self.__ports[0])) + sys.exit(1) + + def serve_forever(self): + """Handle requests until stopForever event flag indicates stop.""" + + self.__mThread = threading.Thread(name="XRServer", + target=self._serve_forever) + self.__mThread.start() + + if not self.__mThread.isAlive(): + raise Exception("Twisted XMLRPC server thread dead.") + + def register_function(self, functionRef, methodName): + self.__xmlrpc.register_function(functionRef, methodName) + + def register_introspection_functions(self): + pass + + def register_instance(self, instance): + for method in dir(instance): + if not method.startswith('_'): + self.register_function(getattr(instance, method), method) + + def pause(self): + self.__xmlrpc.pause() + + def cont(self): + self.__xmlrpc.cont() + + def stop(self): + def stop_thread(): + time.sleep(2) + reactor.stop() + + self.__stopForever.set() + + stopThread = threading.Thread(name='XRStop', target=stop_thread) + stopThread.start() + + return True + + def is_alive(self): + status = False + if reactor.running == 1: + status = True + + return status + + def status(self): + """Return status information on running XMLRPC Server.""" + stat = { 'XR server address' : self.server_address, + 'XR methods' : self.system_listMethods(), + 'XR server alive' : self.is_alive(), + 'XR requests processed' : self.__xmlrpc.num_requests(), + 'XR server stop flag' : self.__stopForever.isSet()} + return(stat) + + def system_listMethods(self): + return self.__xmlrpc.list_methods() + + def get_server_address(self): + waitCount = 0 + while self.server_address == '': + if waitCount == 9: + break + time.sleep(1) + waitCount = waitCount + 1 + + return self.server_address +except ImportError: + pass diff --git a/src/contrib/hod/hodlib/Common/tcp.py b/src/contrib/hod/hodlib/Common/tcp.py new file mode 100755 index 0000000..a118a67 --- /dev/null +++ b/src/contrib/hod/hodlib/Common/tcp.py @@ -0,0 +1,176 @@ +#Licensed to the Apache Software Foundation (ASF) under one +#or more contributor license agreements. See the NOTICE file +#distributed with this work for additional information +#regarding copyright ownership. The ASF licenses this file +#to you under the Apache License, Version 2.0 (the +#"License"); you may not use this file except in compliance +#with the License. You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. +# $Id:tcp.py 6172 2007-05-22 20:26:54Z zim $ +# +#------------------------------------------------------------------------------ + +""" TCP related classes. """ + +import socket, re, string +reAddress = re.compile(":") +reMayBeIp = re.compile("^\d+\.\d+\.\d+\.\d+$") +reValidPort = re.compile("^\d+$") + +class Error(Exception): + def __init__(self, msg=''): + self.message = msg + Exception.__init__(self, msg) + + def __repr__(self): + return self.message + +class tcpError(Error): + def __init__(self, message): + Error.__init__(self, message) + +class tcpSocket: + def __init__(self, address, timeout=30, autoflush=0): + """Constructs a tcpSocket object. + + address - standard tcp address (HOST:PORT) + timeout - socket timeout""" + + self.address = address + self.__autoFlush = autoflush + self.__remoteSock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + self.__remoteSock.settimeout(timeout) + self.host = None + self.port = None + splitAddress = address + if isinstance(address, (tuple, list)): + self.host = address[0] + self.port = int(address[1]) + else: + splitAddress = get_address_tuple(address) + if not splitAddress[0]: + self.host = 'localhost' + else: + self.host = splitAddress[0] + + self.port = int(splitAddress[1]) + + self.__fileObjectOut = '' + self.__fileObjectIn = '' + + def __repr__(self): + return self.address + + def __iter__(self): + return self + + def next(self): + sockLine = self.read() + if not sockLine: + raise StopIteration + + return sockLine + + def open(self): + """Attempts to open a socket to the specified address.""" + + socketAddress = (self.host, self.port) + + try: + self.__remoteSock.connect(socketAddress) + if self.__autoFlush: + self.__fileObjectOut = self.__remoteSock.makefile('wb', 0) + else: + self.__fileObjectOut = self.__remoteSock.makefile('wb') + + self.__fileObjectIn = self.__remoteSock.makefile('rb', 0) + except: + raise tcpError, "connection failure: %s" % self.address + + def flush(self): + """Flushes write buffer.""" + self.__fileObjectOut.flush() + + def close(self): + """Attempts to close and open socket connection""" + + try: + self.__remoteSock.close() + self.__fileObjectOut.close() + self.__fileObjectIn.close() + except socket.error, exceptionObject: + exceptionMessage = "close failure %s %s" % (self.address, + exceptionObject.__str__()) + raise tcpError, exceptionMessage + + def verify(self): + """Verifies that a given IP address/host and port are valid. This + method will not attempt to open a socket to the specified address. + """ + + isValidAddress = False + if reMayBeIp.match(self.host): + if check_ip_address(self.host): + if reValidPort.match(str(self.port)): + isValidAddress = True + else: + if reValidPort.match(str(self.port)): + isValidAddress = True + + return(isValidAddress) + + def read(self): + """Reads a line off of the active socket.""" + + return self.__fileObjectIn.readline() + + def write(self, string): + """Writes a string to the active socket.""" + + print >> self.__fileObjectOut, string + +def check_net_address(address): + valid = True + pieces = string.split(address, '.') + if len(pieces) != 4: + valid = False + else: + for piece in pieces: + if int(piece) < 0 or int(piece) > 255: + valid = False + + return valid + +def check_ip_address(address): + valid = True + pieces = string.split(address, '.') + if len(pieces) != 4: + valid = False + else: + if int(pieces[0]) < 1 or int(pieces[0]) > 254: + valid = False + for i in range(1,4): + if int(pieces[i]) < 0 or int(pieces[i]) > 255: + valid = False + + return valid + +def get_address_tuple(address): + """ Returns an address tuple for TCP address. + + address - TCP address of the form host:port + + returns address tuple (host, port) + """ + + addressList = reAddress.split(address) + addressTuple = (addressList[0], int(addressList[1])) + + return addressTuple diff --git a/src/contrib/hod/hodlib/Common/threads.py b/src/contrib/hod/hodlib/Common/threads.py new file mode 100755 index 0000000..0d19042 --- /dev/null +++ b/src/contrib/hod/hodlib/Common/threads.py @@ -0,0 +1,389 @@ +#Licensed to the Apache Software Foundation (ASF) under one +#or more contributor license agreements. See the NOTICE file +#distributed with this work for additional information +#regarding copyright ownership. The ASF licenses this file +#to you under the Apache License, Version 2.0 (the +#"License"); you may not use this file except in compliance +#with the License. You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. + +import threading, time, os, sys, pprint + +from popen2 import Popen4, Popen3, MAXFD +from signal import SIGTERM, SIGKILL + +class baseThread(threading.Thread): + """Base CAM threading class. The run method should be overridden.""" + + def __init__(self, name): + threading.Thread.__init__(self, name=name) + self.stopFlag = threading.Event() + self.stopFlag.clear() + self.running = threading.Event() + self.running.set() + self.isFinished = threading.Event() + self.isFinished.clear() + + def join(self, timeout=None): + self.stopFlag.set() + threading.Thread.join(self, timeout) + + def pause(self): + """Pause thread.""" + + self.running.clear() + + def cont(self): + """Resume thread operation.""" + + self.running.set() + +class simpleCommand(baseThread): + """Command execution object. Command output and exit status are captured. + + Public class attributes: + + cmdString - command to be executed + outputBuffer - command output, stdout + stderr + status - exit status, as returned by wait + + stdin - standard input for command + stdout - standard output of command when buffer == False + stderr - standard error of command when mode == 3 and buffer == False + + """ + + def __init__(self, name, cmdString, env=os.environ, mode=4, buffer=True, + wait=True, chdir=None): + """Class initialization. + + name - thread name to use when running the command + cmdString - command string to execute + inputString - string to print to command's stdin + env - shell environment dictionary + mode - 3 for popen3 and 4 for popen4 + buffer - out put to be retrieved with output() method + wait - return immediately after start() is called and output + command results as they come to stdout""" + + baseThread.__init__(self, name=name) + + self.cmdString = cmdString + self.__mode = mode + self.__buffer = buffer + self.__wait = wait + self.__chdir = chdir + self.__outputBuffer = [] + self.__status = None + self.__pid = None + self.__isFinished = threading.Event() + self.__isFinished.clear() + + self.stdin = None + self.stdout = None + self.stderr = None + + self.__env = env + + def run(self): + """ Overridden run method. Most of the work happens here. start() + should be called in place of this method.""" + + oldDir = None + if self.__chdir: + if os.path.exists(self.__chdir): + oldDir = os.getcwd() + os.chdir(self.__chdir) + else: + raise Exception( + "simpleCommand: invalid chdir specified: %s" % + self.__chdir) + + cmd = None + if self.__mode == 3: + cmd = _Popen3Env(self.cmdString, env=self.__env) + else: + cmd = _Popen4Env(self.cmdString, env=self.__env) + self.__pid = cmd.pid + + self.stdin = cmd.tochild + + if self.__mode == 3: + self.stderr = cmd.childerr + + while cmd.fromchild == None: + time.sleep(1) + + if self.__buffer == True: + output = cmd.fromchild.readline() + while output != '': + while not self.running.isSet(): + if self.stopFlag.isSet(): + break + time.sleep(1) + self.__outputBuffer.append(output) + output = cmd.fromchild.readline() + + elif self.__wait == False: + output = cmd.fromchild.readline() + while output != '': + while not self.running.isSet(): + if self.stopFlag.isSet(): + break + time.sleep(1) + print output, + if self.stopFlag.isSet(): + break + output = cmd.fromchild.readline() + else: + self.stdout = cmd.fromchild + + self.__status = cmd.poll() + while self.__status == -1: + while not self.running.isSet(): + if self.stopFlag.isSet(): + break + time.sleep(1) + + self.__status = cmd.poll() + time.sleep(1) + + if oldDir: + os.chdir(oldDir) + + self.__isFinished.set() + + sys.exit(0) + + def getPid(self): + """return pid of the launches process""" + return self.__pid + + def output(self): + return self.__outputBuffer[:] + + def wait(self): + """Wait blocking until command execution completes.""" + + self.__isFinished.wait() + + return os.WEXITSTATUS(self.__status) + + def is_running(self): + """Returns boolean, are we running?""" + + status = True + if self.__isFinished.isSet(): + status = False + + return status + + def exit_code(self): + """ Returns process exit code.""" + + if self.__status != None: + return os.WEXITSTATUS(self.__status) + else: + return None + + def exit_status_string(self): + """Return a string representation of the command's exit status.""" + + statusString = None + if self.__status: + exitStatus = os.WEXITSTATUS(self.__status) + exitSignal = os.WIFSIGNALED(self.__status) + coreDump = os.WCOREDUMP(self.__status) + + statusString = "exit code: %s | signal: %s | core %s" % \ + (exitStatus, exitSignal, coreDump) + + return(statusString) + + def stop(self): + """Stop the running command and join it's execution thread.""" + + self.join() + + def kill(self): + count = 0 + while self.is_running(): + try: + if count > 20: + os.kill(self.__pid, SIGKILL) + break + else: + os.kill(self.__pid, SIGTERM) + except: + break + + time.sleep(.1) + count = count + 1 + + self.stop() + +class _Popen3Env(Popen3): + def __init__(self, cmd, capturestderr=False, bufsize=-1, env=os.environ): + self._env = env + Popen3.__init__(self, cmd, capturestderr, bufsize) + + def _run_child(self, cmd): + if isinstance(cmd, basestring): + cmd = ['/bin/sh', '-c', cmd] + for i in xrange(3, MAXFD): + try: + os.close(i) + except OSError: + pass + + try: + os.execvpe(cmd[0], cmd, self._env) + finally: + os._exit(1) + +class _Popen4Env(_Popen3Env, Popen4): + childerr = None + + def __init__(self, cmd, bufsize=-1, env=os.environ): + self._env = env + Popen4.__init__(self, cmd, bufsize) + +class loop(baseThread): + """ A simple extension of the threading.Thread class which continuously + executes a block of code until join(). + """ + + def __init__(self, name, functionRef, functionArgs=None, sleep=1, wait=0, + offset=False): + """Initialize a loop object. + + name - thread name + functionRef - a function reference + functionArgs - function arguments in the form of a tuple, + sleep - time to wait between function execs + wait - time to wait before executing the first time + offset - set true to sleep as an offset of the start of the + last func exec instead of the end of the last func + exec + """ + + self.__functionRef = functionRef + self.__functionArgs = functionArgs + self.__sleep = sleep + self.__wait = wait + self.__offset = offset + + baseThread.__init__(self, name=name) + + def run(self): + """Do not call this directly. Call self.start().""" + + startTime = None + while not self.stopFlag.isSet(): + sleep = self.__sleep + if self.__wait > 0: + startWaitCount = 0 + while not self.stopFlag.isSet(): + while not self.running.isSet(): + if self.stopFlag.isSet(): + break + time.sleep(1) + time.sleep(0.5) + startWaitCount = startWaitCount + .5 + if startWaitCount >= self.__wait: + self.__wait = 0 + break + startTime = time.time() + + if not self.stopFlag.isSet(): + if self.running.isSet(): + if self.__functionArgs: + self.__functionRef(self.__functionArgs) + else: + self.__functionRef() + endTime = time.time() + + while not self.running.isSet(): + time.sleep(1) + + while not self.stopFlag.isSet(): + while not self.running.isSet(): + if self.stopFlag.isSet(): + break + time.sleep(1) + + currentTime = time.time() + if self.__offset: + elapsed = time.time() - startTime + else: + elapsed = time.time() - endTime + + if elapsed >= self.__sleep: + break + + time.sleep(0.5) + + self.isFinished.set() + + def set_sleep(self, sleep, wait=None, offset=None): + """Modify loop frequency paramaters. + + sleep - time to wait between function execs + wait - time to wait before executing the first time + offset - set true to sleep as an offset of the start of the + last func exec instead of the end of the last func + exec + """ + + self.__sleep = sleep + if wait != None: + self.__wait = wait + if offset != None: + self.__offset = offset + + def get_sleep(self): + """Get loop frequency paramaters. + Returns a dictionary with sleep, wait, offset. + """ + + return { + 'sleep' : self.__sleep, + 'wait' : self.__wait, + 'offset' : self.__offset, + } + +class func(baseThread): + """ A simple extension of the threading.Thread class which executes + a function in a separate thread. + """ + + def __init__(self, name, functionRef, functionArgs=None): + """Initialize a func object. + + name - thread name + functionRef - a function reference + functionArgs - function arguments in the form of a tuple, + """ + + self.__functionRef = functionRef + self.__functionArgs = functionArgs + + baseThread.__init__(self, name=name) + + def run(self): + """Do not call this directly. Call self.start().""" + + if not self.stopFlag.isSet(): + if self.running.isSet(): + if self.__functionArgs: + self.__functionRef(self.__functionArgs) + else: + self.__functionRef() + sys.exit(0) diff --git a/src/contrib/hod/hodlib/Common/types.py b/src/contrib/hod/hodlib/Common/types.py new file mode 100755 index 0000000..9612ce4 --- /dev/null +++ b/src/contrib/hod/hodlib/Common/types.py @@ -0,0 +1,1266 @@ +#Licensed to the Apache Software Foundation (ASF) under one +#or more contributor license agreements. See the NOTICE file +#distributed with this work for additional information +#regarding copyright ownership. The ASF licenses this file +#to you under the Apache License, Version 2.0 (the +#"License"); you may not use this file except in compliance +#with the License. You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. +# $Id:types.py 6172 2007-05-22 20:26:54Z zim $ +# +#------------------------------------------------------------------------------ + +""" Higher level data types and type related classes. + + Supported Types (Verification and Display): + + address - validates ip:port and host:port tcp addresses + ip_address - validates and IP address + net_address - validates an IP like address, ie netmask + hostname - validates a hostname with DNS + eaddress - validates a single email address or a comma + seperated list of email addresses + http_version - validates a value is a http version (1.0/1.1) + tcp_port - validates a value to be a valid tcp port (2-65535) + bool - validates value is (0, 1, true, false) / converts + true -> 1 and false -> 0 + directory - validates a values is a directory / resolves path to + absolute path + file - validates a value is a file / resolves path to absolute + path + float - validates a value is a float, converts string to float + pos_float - validates a value is a float and >= 0, converts string + to float + pos_num - same as pos_float + neg_float - validates a value is a float and < 0, converts string to + float + int - validates a value is an integer, converts string to + integer + pos_int - validates a value is an integer and >= 0, converts + string to integer + neg_int - validates a values is an integer and < 0, converts + striing to integer + freq - frequency, positive integer + size - validates a size in bytes, kb, mb, kb, and tb + (int > 0 post fixed with K, M, G, or T) also converts + value to integer bytes + range - numeric range, x-y normalized to a tuple, if a single + number is supplie a single element tuple is returned + timestamp - utc timestamp of the form YYYYMMDDHHMMSS + user_account - UNIX user account name + user_group - UNIX group name + string - arbitrarily long string + list - comma seperated list of strings of arbitrary length, + keyval - comma seperated list of key=value pairs, key does not + need to be unique. + uri - a uri """ + +import sys, os, socket, pwd, grp, stat, re, re, string, pprint, urlparse + +from tcp import tcpSocket, check_net_address, check_ip_address +from util import check_timestamp + +types = { 'directory' : { 'db' : 'string', + 'units' : None }, + + 'address' : { 'db' : 'string', + 'units' : None }, + + 'ip_address' : { 'db' : 'string', + 'units' : None }, + + 'net_address' : { 'db' : 'string', + 'units' : None }, + + 'bool' : { 'db' : 'bool', + 'units' : None }, + + 'int' : { 'db' : 'integer', + 'units' : None }, + + 'float' : { 'db' : 'float', + 'units' : None }, + + 'pos_int' : { 'db' : 'integer', + 'units' : None }, + + 'neg_int' : { 'db' : 'integer', + 'units' : None }, + + 'pos_num' : { 'db' : 'float', + 'units' : None }, + + 'pos_float' : { 'db' : 'float', + 'units' : None }, + + 'neg_float' : { 'db' : 'float', + 'units' : None }, + + 'string' : { 'db' : 'string', + 'units' : None }, + + 'list' : { 'db' : 'string', + 'units' : None }, + + 'file' : { 'db' : 'string', + 'units' : None }, + + 'size' : { 'db' : 'integer', + 'units' : 'bytes' }, + + 'freq' : { 'db' : 'integer', + 'units' : 'hz' }, + + 'eaddress' : { 'db' : 'string', + 'units' : None }, + + 'tcp_port' : { 'db' : 'integer', + 'units' : None }, + + 'http_version' : { 'db' : 'float', + 'units' : None }, + + 'range' : { 'db' : 'string', + 'units' : None }, + + 'hostname' : { 'db' : 'string', + 'units' : None }, + + 'user_account' : { 'db' : 'string', + 'units' : None }, + + 'user_group' : { 'db' : 'string', + 'units' : None }, + + 'timestamp' : { 'db' : 'timestamp', + 'units' : None }, + + 'keyval' : { 'db' : 'string', + 'units' : None }, + + 'uri' : { 'db' : 'string', + 'units' : None }, + + '' : { 'db' : 'string', + 'units' : None }} + +dbTypes = { 'string' : { 'type' : 'varchar', + 'store' : 'type_strings_0', + 'table' : True }, + + 'integer' : { 'type' : 'bigint', + 'store' : 'integers', + 'table' : False }, + + 'float' : { 'type' : 'real', + 'store' : 'floats', + 'table' : False }, + + 'bool' : { 'type' : 'boolean', + 'store' : 'bools', + 'table' : False }, + + 'timestamp' : { 'type' : 'timestamp(0)', + 'store' : 'timestamps', + 'table' : False }} + +reSizeFormat = re.compile("^(\d+)(k|m|g|t|p|kb|mb|gb|tb|pb)$", flags=2) +reDash = re.compile("\s*-\s*") + +sizeFactors = { 'b' : 1, + 'bytes' : 1, + 'k' : 1024, + 'kb' : 1024, + 'm' : 1048576, + 'mb' : 1048576, + 'g' : 1073741824, + 'gb' : 1073741824, + 't' : 1099511627776, + 'tb' : 1099511627776, + 'p' : 1125899906842624, + 'pb' : 1125899906842624 } + +freqFactors = { 'hz' : 1, + 'khz' : 1000, + 'mhz' : 1000000, + 'ghz' : 1000000000, + 'thz' : 1000000000000, + 'phz' : 1000000000000000 } + +sizeMap = [ { 'factor' : sizeFactors['b'], + 'long' : 'byte', + 'short' : 'byte' }, + + { 'factor' : sizeFactors['k'], + 'long' : 'Kilobyte', + 'short' : 'KB' }, + + { 'factor' : sizeFactors['m'], + 'long' : 'Megabyte', + 'short' : 'MB' }, + + { 'factor' : sizeFactors['g'], + 'long' : 'Gigabyte', + 'short' : 'GB' }, + + { 'factor' : sizeFactors['t'], + 'long' : 'Terabyte', + 'short' : 'TB' }, + + { 'factor' : sizeFactors['p'], + 'long' : 'Petabyte', + 'short' : 'PB' } ] + +freqMap = [ { 'factor' : freqFactors['hz'], + 'long' : 'Hertz', + 'short' : 'Hz' }, + + { 'factor' : freqFactors['khz'], + 'long' : 'Kilohertz', + 'short' : 'KHz' }, + + { 'factor' : freqFactors['mhz'], + 'long' : 'Megahertz', + 'short' : 'MHz' }, + + { 'factor' : freqFactors['ghz'], + 'long' : 'Gigahertz', + 'short' : 'GHz' }, + + { 'factor' : freqFactors['thz'], + 'long' : 'Terahertz', + 'short' : 'THz' }, + + { 'factor' : freqFactors['phz'], + 'long' : 'Petahertz', + 'short' : 'PHz' } ] + +reListString = r"(?, + 'name' : 'SA_COMMON.old_xml_dir', + 'value': 'var/data/old' }, + + { 'func' : , + 'name' : 'SA_COMMON.log_level', + 'value': '4' } ] + + validatedInfo = [ { # name supplied to add() + 'name' : 'SA_COMMON.tmp_xml_dir', + + # is valid or not + 'isValid' : 1 + + # normalized value + 'normalized' : /var/data/tmp, + + # error string ? + 'errorData' : 0 }, + + { 'name' : 'SA_COMMON.new_xml_dir', + 'isValid' : 1 + 'normalized' : /var/data/new, + 'errorData' : 0 } ]""" + + if attrname == "validateList": + return self.validateList # list of items to be validated + elif attrname == "validatedInfo": + return self.validatedInfo # list of validation results + else: raise AttributeError, attrname + + def __build_verify_functions(self): + functions = {} + for function in dir(self): + functions[function] = 1 + + for type in types.keys(): + # kinda bad, need to find out how to know the name of the class + # I'm in. But it works. + functionName = "_typeValidator__verify_%s" % type + if functions.has_key(functionName): + self.verifyFunctions[type] = getattr(self, functionName) + else: + if type == '': + self.verifyFunctions[type] = self.__verify_nothing + else: + error = "Verify function %s for type %s does not exist." \ + % (functionName, type) + raise Exception(error) + sys.exit(1) + + def __get_value_info(self): + valueInfo = { 'isValid' : 0, 'normalized' : 0, 'errorData' : 0 } + + return valueInfo + + def __set_value_info(self, valueInfo, **valueData): + try: + valueInfo['normalized'] = valueData['normalized'] + valueInfo['isValid'] = 1 + except KeyError: + valueInfo['isValid'] = 0 + try: + valueInfo['errorData'] = valueData['errorData'] + except: + pass + + # start of 'private' verification methods, each one should correspond to a + # type string (see self.verify_config()) + def __verify_directory(self, type, value): + valueInfo = self.__get_value_info() + + if os.path.isdir(value): + self.__set_value_info(valueInfo, normalized=self.normalize(type, + value)) + else: + self.__set_value_info(valueInfo) + + return valueInfo + + def __norm_directory(self, value): + return self.__normalizedPath(value) + + def __verify_address(self, type, value): + valueInfo = self.__get_value_info() + + try: + socket = tcpSocket(value) + if socket.verify(): + self.__set_value_info(valueInfo, normalized=self.normalize(type, + value)) + else: + self.__set_value_info(valueInfo) + except: + self.__set_value_info(valueInfo) + + return valueInfo + + def __norm_address(self, value): + return value.split(':') + + def __verify_ip_address(self, type, value): + valueInfo = self.__get_value_info() + + if check_ip_address(value): + self.__set_value_info(valueInfo, normalized=self.normalize(type, + value)) + else: + self.__set_value_info(valueInfo) + + return valueInfo + + def __verify_net_address(self, type, value): + valueInfo = self.__get_value_info() + + if check_net_address(value): + self.__set_value_info(valueInfo, normalized=self.normalize(type, + value)) + else: + self.__set_value_info(valueInfo) + + return valueInfo + + def __verify_bool(self, type, value): + valueInfo = self.__get_value_info() + + value = str(value) + if re.match("^false|0|f|no$", value, 2): + self.__set_value_info(valueInfo, normalized=False) + elif re.match("^true|1|t|yes$", value, 2): + self.__set_value_info(valueInfo, normalized=True) + else: + self.__set_value_info(valueInfo) + + return valueInfo + + def __norm_bool(self, value): + value = str(value) + norm = "" + if re.match("^false|0|f|no$", value, 2): + norm = False + elif re.match("^true|1|t|yes$", value, 2): + norm = True + else: + raise Exception("invalid bool specified: %s" % value) + + return norm + + def __verify_int(self, type, value): + valueInfo = self.__get_value_info() + + try: + self.__set_value_info(valueInfo, normalized=self.normalize(type, + value)) + except: + self.__set_value_info(valueInfo) + + return valueInfo + + def __norm_int(self, value): + return int(value) + + def __verify_float(self, type, value): + valueInfo = self.__get_value_info() + + try: + self.__set_value_info(valueInfo, normalized=self.normalize(type, + value)) + except: + self.__set_value_info(valueInfo) + + return valueInfo + + def __norm_float(self, value): + return float(value) + + def __verify_pos_int(self, type, value): + valueInfo = self.__get_value_info() + + try: + value = self.normalize(type, value) + except: + self.__set_value_info(valueInfo) + else: + self.__set_value_info(valueInfo, normalized=value) + + return valueInfo + + def __norm_pos_int(self, value): + value = int(value) + if value < 0: + raise Exception("value is not positive: %s" % value) + + return value + + def __verify_neg_int(self, type, value): + valueInfo = self.__get_value_info() + + try: + value = self.normalize(type, value) + except: + self.__set_value_info(valueInfo) + else: + self.__set_value_info(valueInfo, normalized=value) + + return valueInfo + + def __norm_neg_int(self, type, value): + value = int(value) + if value > 0: + raise Exception("value is not negative: %s" % value) + + return value + + def __verify_freq(self, type, value): + return self.__verify_pos_int(type, value) + + def __norm_freq(self, value): + return self.__norm_pos_int(value) + + def __verify_pos_float(self, type, value): + valueInfo = self.__get_value_info() + + try: + value = self.normalize(type, value) + except: + self.__set_value_info(valueInfo) + else: + self.__set_value_info(valueInfo, normalized=value) + + return valueInfo + + def __norm_pos_float(self, value): + value = float(value) + if value < 0: + raise Exception("value is not positive: %s" % value) + + return value + + def __verify_pos_num(self, type, value): + return self.__verify_pos_float(value) + + def __norm_pos_num(self, value): + return self.__norm_pos_float(value) + + def __verify_neg_float(self, type, value): + valueInfo = self.__get_value_info() + + try: + value = self.normalize(type, value) + except: + self.__set_value_info(valueInfo) + else: + self.__set_value_info(valueInfo, normalized=value) + + return valueInfo + + def __norm_neg_float(self, value): + value = float(value) + if value >= 0: + raise Exception("value is not negative: %s" % value) + + return value + + def __verify_string(self, type, value): + valueInfo = self.__get_value_info() + self.__set_value_info(valueInfo, normalized=self.normalize(type, + value)) + + return valueInfo + + def __norm_string(self, value): + return str(value) + + def __verify_keyval(self, type, value): + valueInfo = self.__get_value_info() + + if reKeyVal.search(value): + try: + self.__set_value_info(valueInfo, normalized=self.normalize(type, + value)) + except: + self.__set_value_info(valueInfo, errorData = \ + "invalid list of key-value pairs : [ %s ]" % value) + else: + msg = "No key value pairs found?" + self.__set_value_info(valueInfo, errorData=msg) + + return valueInfo + + def __norm_keyval(self, value): + list = self.__norm_list(value) + keyValue = {} + for item in list: + (key, value) = reKeyVal.split(item) + #if not keyValue.has_key(key): + # keyValue[key] = [] + #keyValue[key].append(value) + keyValue[key] = value + return keyValue + + def __verify_list(self, type, value): + valueInfo = self.__get_value_info() + + self.__set_value_info(valueInfo, normalized=self.normalize(type,value)) + + return valueInfo + + def __norm_list(self, value): + norm = [] + if reList.search(value): + norm = reList.split(value) + else: + norm = [value,] + + return norm + + def __verify_file(self, type, value): + valueInfo = self.__get_value_info() + + if os.path.isfile(value): + self.__set_value_info(valueInfo, normalized=self.normalize(type, + value)) + else: + self.__set_value_info(valueInfo) + + return valueInfo + + def __norm_file(self, value): + return self.__normalizedPath(value) + + def __verify_size(self, type, value): + valueInfo = self.__get_value_info() + + value = str(value) + if reSizeFormat.match(value): + numberPart = int(reSizeFormat.sub("\g<1>", value)) + factorPart = reSizeFormat.sub("\g<2>", value) + try: + normalized = normalize_size(numberPart, factorPart) + self.__set_value_info(valueInfo, + normalized=normalized) + except: + self.__set_value_info(valueInfo) + else: + try: + value = int(value) + except: + self.__set_value_info(valueInfo) + else: + if value >= 0: + self.__set_value_info(valueInfo, normalized=value) + else: + self.__set_value_info(valueInfo) + + return valueInfo + + def __norm_size(self, file): + norm = None + if reSizeFormat.match(value): + numberPart = int(reSizeFormat.sub("\g<1>", value)) + factorPart = reSizeFormat.sub("\g<2>", value) + norm = normalize_size(numberPart, factorPart) + else: + norm = int(value) + + return norm + + + def __verify_eaddress(self, type, value): + valueInfo = self.__get_value_info() + + emailList = reComma.split(value) + + for emailAddress in emailList: + if reEmailAddress.match(emailAddress): + emailParts = reEmailDelimit.split(emailAddress) + try: + socket.gethostbyname(emailParts[1]) + self.__set_value_info(valueInfo, normalized=self.normalize( + type, value)) + except: + errorString = "%s is invalid (domain lookup failed)" % \ + emailAddress + self.__set_value_info(valueInfo, errorData=errorString) + else: + errorString = "%s is invalid" % emailAddress + self.__set_value_info(valueInfo, errorData=errorString) + + return valueInfo + + def __verify_tcp_port(self, type, value): + valueInfo = self.__get_value_info() + + try: + value = self.__norm_tcp_port(value) + except: + self.__set_value_info(valueInfo) + else: + if value in range(2, 65536): + self.__set_value_info(valueInfo, normalized=value) + else: + self.__set_value_info(valueInfo) + + return valueInfo + + def __norm_tcp_port(self, value): + return int(value) + + def __verify_http_version(self, type, value): + valueInfo = self.__get_value_info() + + if value in ('1.0', '1.1'): + self.__set_value_info(valueInfo, normalized=float(value)) + else: + self.__set_value_info(valueInfo) + + return valueInfo + + def __verify_range(self, type, value): + valueInfo = self.__get_value_info() + + range = reDash.split(value) + + try: + if len(range) > 1: + start = int(range[0]) + end = int(range[1]) + else: + start = int(range[0]) + end = None + except: + self.__set_value_info(valueInfo) + else: + if end: + if end - start != 0: + self.__set_value_info(valueInfo, normalized=(start, end)) + else: + self.__set_value_info(valueInfo) + else: + self.__set_value_info(valueInfo, normalized=(start,)) + + return valueInfo + + def __norm_range(self, value): + range = reDash.split(value) + if len(range) > 1: + start = int(range[0]) + end = int(range[1]) + else: + start = int(range[0]) + end = None + + return (start, end) + + def __verify_uri(self, type, value): + valueInfo = self.__get_value_info() + + _norm = None + try: + uriComponents = urlparse.urlparse(value) + if uriComponents[0] == '' or uriComponents[0] == 'file': + # if scheme is '' or 'file' + if not os.path.isfile(uriComponents[2]) and \ + not os.path.isdir(uriComponents[2]): + raise Exception("Invalid local URI") + else: + self.__set_value_info(valueInfo, normalized=self.normalize( + type,value)) + else: + # other schemes + # currently not checking anything. TODO + self.__set_value_info(valueInfo, normalized=self.normalize( + type,value)) + except: + errorString = "%s is an invalid uri" % value + self.__set_value_info(valueInfo, errorData=errorString) + + return valueInfo + + def __norm_uri(self, value): + uriComponents = list(urlparse.urlparse(value)) + if uriComponents[0] == '': + # if scheme is ''' + return self.__normalizedPath(uriComponents[2]) + elif uriComponents[0] == 'file': + # if scheme is 'file' + normalizedPath = self.__normalizedPath(uriComponents[2]) + return urlparse.urlunsplit(uriComponents[0:1] + [normalizedPath] + uriComponents[3:]) + + # Not dealing with any other case right now + return value + + def __verify_timestamp(self, type, value): + valueInfo = self.__get_value_info() + + if check_timestamp(value): + self.__set_value_info(valueInfo, normalized=self.normalize(type, + value)) + else: + self.__set_value_info(valueInfo) + + return valueInfo + + def __verify_hostname(self, type, value): + valueInfo = self.__get_value_info() + + try: + socket.gethostbyname(value) + self.__set_value_info(valueInfo, normalized=self.normalize(type, + value)) + except: + errorString = "%s is invalid (domain lookup failed)" % value + self.__set_value_info(valueInfo, errorData=errorString) + + return valueInfo + + def __verify_user_account(self, type, value): + valueInfo = self.__get_value_info() + + try: + pwd.getpwnam(value) + except: + errorString = "'%s' user account does not exist" % value + self.__set_value_info(valueInfo, errorData=errorString) + else: + self.__set_value_info(valueInfo, normalized=self.normalize(type, + value)) + + return valueInfo + + def __verify_user_group(self, type, value): + valueInfo = self.__get_value_info() + + try: + grp.getgrnam(value) + except: + errorString = "'%s' group does not exist" % value + self.__set_value_info(valueInfo, errorData=errorString) + else: + self.__set_value_info(valueInfo, normalized=self.normalize(type, + value)) + + return valueInfo + + def __verify_nothing(self, type, value): + valueInfo = self.__get_value_info() + + self.__set_value_info(valueInfo, normalized=self.normalize(type, + value)) + + return valueInfo + + #-------------------------------------------------------------------------- + + def normalize(self, type, value): + try: + normFunc = getattr(self, "_typeValidator__norm_%s" % type) + return normFunc(value) + except AttributeError, A: + # this exception should occur only when we don't have corresponding normalize function + return value + + def verify(self, type, value, allowNone=False): + """Verifies a value based on its type. + + type - supported configValidator type + value - data to be validated + allowNone - don't freak out if None or '' is supplied + + returns a valueInfo dictionary: + + valueInfo = { 'isValid' : 1, 'normalized' : 5, 'errorData' : 0 } + + where: + + isValid - true or false (0/1) + normalized - the normalized value + errorData - if invalid an error string + + supported types: + + see top level""" + + result = None + if allowNone: + if value == '' or value == None: + result = self.__verify_nothing(None, None) + result['normalized'] = None + else: + result = self.verifyFunctions[type](type, value) + else: + result = self.verifyFunctions[type](type, value) + + return result + + def is_valid_type(self, type): + """Returns true if type is valid.""" + + return types.has_key(type) + + def type_info(self, type): + """Returns type info dictionary.""" + + dbInfo = dbTypes[types[type]['db']] + typeInfo = types[type].copy() + typeInfo['db'] = dbInfo + + return typeInfo + + def add(self, name, type, value): + """Adds a value and type by name to the configValidate object to be + verified using validate(). + + name - name used to key values and access the results of the + validation + type - configValidator type + value - data to be verified""" + + self.validateList.append({ 'name' : name, + 'type' : type, + 'value': value }) + + def validate(self, allowNone=False): + """Validates configValidate object populating validatedInfo with + valueInfo dictionaries for each value added to the object.""" + + for valItem in self.validateList: + valueInfo = self.verify(valItem['type'], valItem['value'], + allowNone) + if valueInfo: + valueInfo['name'] = valItem['name'] + self.validatedInfo.append(valueInfo) + else: + raise Exception("\nMissing a return value: valueInfo\n%s" % \ + self.verifyFunctions[valItem['type']](valItem['value'])) + + def __normalizedPath(self, value): + oldWd = os.getcwd() + if self.__originalDir: + os.chdir(self.__originalDir) + normPath = os.path.realpath(value) + os.chdir(oldWd) + return normPath + + +class display: + def __init__(self): + self.displayFunctions = {} + self.__build_dispaly_functions() + + def __build_dispaly_functions(self): + functions = {} + for function in dir(self): + functions[function] = 1 + + for type in types.keys(): + # kinda bad, need to find out how to know the name of the class + # I'm in. But it works. + functionName = "_cisplay__display_%s" % type + if functions.has_key(functionName): + self.displayFunctions[type] = getattr(self, functionName) + else: + if type == '': + self.displayFunctions[type] = self.__display_default + else: + error = "Display function %s for type %s does not exist." \ + % (functionName, type) + raise Exception(error) + sys.exit(1) + + def __display_default(self, value, style): + return value + + def __display_generic_number(self, value): + displayNumber = '' + splitNum = string.split(str(value), sep='.') + numList = list(str(splitNum[0])) + numList.reverse() + length = len(numList) + counter = 0 + for char in numList: + counter = counter + 1 + if counter % 3 or counter == length: + displayNumber = "%s%s" % (char, displayNumber) + else: + displayNumber = ",%s%s" % (char, displayNumber) + + if len(splitNum) > 1: + displayNumber = "%s.%s" % (displayNumber, splitNum[1]) + + return displayNumber + + def __display_generic_mappable(self, map, value, style, plural=True): + displayValue = '' + length = len(str(value)) + if length > 3: + for factorSet in map: + displayValue = float(value) / factorSet['factor'] + if len(str(int(displayValue))) <= 3 or \ + factorSet['factor'] == map[-1]['factor']: + displayValue = "%10.2f" % displayValue + if displayValue[-1] == '0': + if displayValue > 1 and style != 'short' and plural: + displayValue = "%s %ss" % (displayValue[:-1], + factorSet[style]) + else: + displayValue = "%s %s" % (displayValue[:-1], + factorSet[style]) + else: + if displayValue > 1 and style != 'short' and plural: + displayValue = "%s %ss" % (displayValue, + factorSet[style]) + else: + displayValue = "%s %s" % (displayValue, + factorSet[style]) + break + + return displayValue + + def __display_directory(self, value, style): + return self.__display_default(value, style) + + def __display_address(self, value, style): + return self.__display_default(value, style) + + def __display_ip_address(self, value, style): + return self.__display_default(value, style) + + def __display_net_address(self, value, style): + return self.__display_default(value, style) + + def __display_bool(self, value, style): + displayValue = value + + if not isinstance(displayValue, bool): + if re.match("^false|0|f|no$", value, 2): + displayValue=False + elif re.match("^true|1|t|yes$", value, 2): + displayValue=True + + return displayValue + + def __display_int(self, value, style): + return self.__display_generic_number(value) + + def __display_float(self, value, style): + return self.__display_generic_number(value) + + def __display_pos_int(self, value, style): + return self.__display_generic_number(value) + + def __display_neg_int(self, value, style): + return self.__display_generic_number(value) + + def __display_pos_num(self, value, style): + return self.__display_generic_number(value) + + def __display_pos_float(self, value, style): + return self.__display_generic_number(value) + + def __display_neg_float(self, value, style): + return self.__display_generic_number(value) + + def __display_string(self, value, style): + return self.__display_default(value, style) + + def __display_list(self, value, style): + value = value.rstrip() + return value.rstrip(',') + + def __display_keyval(self, value, style): + value = value.rstrip() + return value.rstrip(',') + + def __display_file(self, value, style): + return self.__display_default(value, style) + + def __display_size(self, value, style): + return self.__display_generic_mappable(sizeMap, value, style) + + def __display_freq(self, value, style): + return self.__display_generic_mappable(freqMap, value, style, False) + + def __display_eaddress(self, value, style): + return self.__display_default(value, style) + + def __display_tcp_port(self, value, style): + return self.__display_default(value, style) + + def __display_http_version(self, value, style): + return self.__display_default(value, style) + + def __display_range(self, value, style): + return self.__display_default(value, style) + + def __display_hostname(self, value, style): + return self.__display_default(value, style) + + def __display_user_account(self, value, style): + return self.__display_default(value, style) + + def __display_user_group(self, value, style): + return self.__display_default(value, style) + + def __display_timestamp(self, value, style): + return self.__display_default(value, style) + + def display(self, type, value, style='short'): + displayValue = value + if value != None: + displayValue = self.displayFunctions[type](value, style) + + return displayValue + +typeValidatorInstance = typeValidator() + +def is_valid_type(type): + """Returns true if type is valid.""" + + return typeValidatorInstance.is_valid_type(type) + +def type_info(type): + """Returns type info dictionary.""" + + return typeValidatorInstance.type_info(type) + +def verify(type, value, allowNone=False): + """Returns a normalized valueInfo dictionary.""" + + return typeValidatorInstance.verify(type, value, allowNone) + +def __normalize(map, val, factor): + normFactor = string.lower(factor) + normVal = float(val) + return int(normVal * map[normFactor]) + +def normalize_size(size, factor): + """ Normalize a size to bytes. + + size - number of B, KB, MB, GB, TB, or PB + factor - size factor (case insensitive): + b | bytes - bytes + k | kb - kilobytes + m | mb - megabytes + g | gb - gigabytes + t | tb - terabytes + p | pb - petabytes + """ + + return __normalize(sizeFactors, size, factor) + +def normalize_freq(freq, factor): + """ Normalize a frequency to hertz. + + freq - number of Hz, Khz, Mhz, Ghz, Thz, or Phz + factor - size factor (case insensitive): + Hz - Hertz + Mhz - Megahertz + Ghz - Gigahertz + Thz - Terahertz + Phz - Petahertz + """ + + return __normalize(freqFactors, freq, factor) diff --git a/src/contrib/hod/hodlib/Common/util.py b/src/contrib/hod/hodlib/Common/util.py new file mode 100755 index 0000000..3d5cb6f --- /dev/null +++ b/src/contrib/hod/hodlib/Common/util.py @@ -0,0 +1,309 @@ +#Licensed to the Apache Software Foundation (ASF) under one +#or more contributor license agreements. See the NOTICE file +#distributed with this work for additional information +#regarding copyright ownership. The ASF licenses this file +#to you under the Apache License, Version 2.0 (the +#"License"); you may not use this file except in compliance +#with the License. You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. +import errno, sys, os, traceback, stat, socket, re, warnings, signal + +from hodlib.Common.tcp import tcpSocket, tcpError +from hodlib.Common.threads import simpleCommand + +setUGV = { 'S_ISUID' : 2, 'S_ISGID' : 1, 'S_ISVTX' : 0 } +reEscapeSeq = r"\\(.)?" +reEscapeSeq = re.compile(reEscapeSeq) + +HOD_INTERRUPTED_CODE = 127 +HOD_INTERRUPTED_MESG = "Hod interrupted. Cleaning up and exiting" +TORQUE_USER_LIMITS_COMMENT_FIELD = "User-limits exceeded. " + \ + "Requested:([0-9]*) Used:([0-9]*) MaxLimit:([0-9]*)" +TORQUE_USER_LIMITS_EXCEEDED_MSG = "Requested number of nodes exceeded " + \ + "maximum user limits. " + +class AlarmException(Exception): + def __init__(self, msg=''): + self.message = msg + Exception.__init__(self, msg) + + def __repr__(self): + return self.message + +def isProcessRunning(pid): + '''Check if a process is running, by sending it a 0 signal, and checking for errors''' + # This method is documented in some email threads on the python mailing list. + # For e.g.: http://mail.python.org/pipermail/python-list/2002-May/144522.html + try: + os.kill(pid, 0) + return True + except OSError, err: + return err.errno == errno.EPERM + +def untar(file, targetDir): + status = False + command = 'tar -C %s -zxf %s' % (targetDir, file) + commandObj = simpleCommand('untar', command) + commandObj.start() + commandObj.wait() + commandObj.join() + if commandObj.exit_code() == 0: + status = True + + return status + +def tar(tarFile, tarDirectory, tarList): + currentDir = os.getcwd() + os.chdir(tarDirectory) + status = False + command = 'tar -czf %s ' % (tarFile) + + for file in tarList: + command = "%s%s " % (command, file) + + commandObj = simpleCommand('tar', command) + commandObj.start() + commandObj.wait() + commandObj.join() + if commandObj.exit_code() == 0: + status = True + else: + status = commandObj.exit_status_string() + + os.chdir(currentDir) + + return status + +def to_http_url(list): + """convert [hostname, port] to a http url""" + str = '' + str = "http://%s:%s" % (list[0], list[1]) + + return str + +def get_exception_string(): + (type, value, tb) = sys.exc_info() + exceptList = traceback.format_exception(type, value, tb) + exceptString = '' + for line in exceptList: + exceptString = "%s%s" % (exceptString, line) + + return exceptString + +def get_exception_error_string(): + (type, value, tb) = sys.exc_info() + if value: + exceptString = "%s %s" % (type, value) + else: + exceptString = type + + return exceptString + +def check_timestamp(timeStamp): + """ Checks the validity of a timeStamp. + + timeStamp - (YYYY-MM-DD HH:MM:SS in UTC) + + returns True or False + """ + isValid = True + + try: + timeStruct = time.strptime(timeStamp, "%Y-%m-%d %H:%M:%S") + except: + isValid = False + + return isValid + +def sig_wrapper(sigNum, handler, *args): + if args: + handler(args) + else: + handler() + +def get_perms(filename): + mode = stat.S_IMODE(os.stat(filename)[stat.ST_MODE]) + permsString = '' + permSet = 0 + place = 2 + for who in "USR", "GRP", "OTH": + for what in "R", "W", "X": + if mode & getattr(stat,"S_I"+what+who): + permSet = permSet + 2**place + place = place - 1 + + permsString = "%s%s" % (permsString, permSet) + permSet = 0 + place = 2 + + permSet = 0 + for permFlag in setUGV.keys(): + if mode & getattr(stat, permFlag): + permSet = permSet + 2**setUGV[permFlag] + + permsString = "%s%s" % (permSet, permsString) + + return permsString + +def local_fqdn(): + """Return a system's true FQDN rather than any aliases, which are + occasionally returned by socket.gethostname.""" + + fqdn = None + me = os.uname()[1] + nameInfo=socket.gethostbyname_ex(me) + nameInfo[1].append(nameInfo[0]) + for name in nameInfo[1]: + if name.count(".") and name.startswith(me): + fqdn = name + if fqdn == None: + fqdn = me + return(fqdn) + +def need_to_allocate(allocated, config, command): + status = True + + if allocated.isSet(): + status = False + elif re.search("\s*dfs.*$", command) and \ + config['gridservice-hdfs']['external']: + status = False + elif config['gridservice-mapred']['external']: + status = False + + return status + +def filter_warnings(): + warnings.filterwarnings('ignore', + message=".*?'with' will become a reserved keyword.*") + +def args_to_string(list): + """return a string argument space seperated""" + arg = '' + for item in list: + arg = "%s%s " % (arg, item) + return arg[:-1] + +def replace_escapes(object): + """ replace any escaped character. e.g \, with , \= with = and so on """ + # here object is either a config object or a options object + for section in object._mySections: + for option in object._configDef[section].keys(): + if object[section].has_key(option): + if object._configDef[section][option]['type'] == 'keyval': + keyValDict = object[section][option] + object[section][option] = {} + for (key,value) in keyValDict.iteritems(): + match = reEscapeSeq.search(value) + if match: + value = reEscapeSeq.sub(r"\1", value) + object[section][option][key] = value + +def hadoopVersion(hadoopDir, java_home, log): + # Determine the version of hadoop being used by executing the + # hadoop version command. Code earlier in idleTracker.py + hadoopVersion = { 'major' : None, 'minor' : None } + hadoopPath = os.path.join(hadoopDir, 'bin', 'hadoop') + cmd = "%s version" % hadoopPath + log.debug('Executing command %s to find hadoop version' % cmd) + env = os.environ + env['JAVA_HOME'] = java_home + hadoopVerCmd = simpleCommand('HadoopVersion', cmd, env) + hadoopVerCmd.start() + hadoopVerCmd.wait() + hadoopVerCmd.join() + if hadoopVerCmd.exit_code() == 0: + verLine = hadoopVerCmd.output()[0] + log.debug('Version from hadoop command: %s' % verLine) + hadoopVerRegExp = re.compile("Hadoop ([0-9]+)\.([0-9]+).*") + verMatch = hadoopVerRegExp.match(verLine) + if verMatch != None: + hadoopVersion['major'] = verMatch.group(1) + hadoopVersion['minor'] = verMatch.group(2) + return hadoopVersion + + +def get_cluster_status(hdfsAddress, mapredAddress): + """Determine the status of the cluster based on socket availability + of HDFS and Map/Reduce.""" + status = 0 + + mapredSocket = tcpSocket(mapredAddress) + try: + mapredSocket.open() + mapredSocket.close() + except tcpError: + status = 14 + + hdfsSocket = tcpSocket(hdfsAddress) + try: + hdfsSocket.open() + hdfsSocket.close() + except tcpError: + if status > 0: + status = 10 + else: + status = 13 + + return status + +def parseEquals(list): + # takes in a list of keyval pairs e.g ['a=b','c=d'] and returns a + # dict e.g {'a'='b','c'='d'}. Used in GridService/{mapred.py/hdfs.py} and + # HodRing/hodring.py. No need for specially treating escaped =. as in \=, + # since all keys are generated by hod and don't contain such anomalies + dict = {} + for elems in list: + splits = elems.split('=') + dict[splits[0]] = splits[1] + return dict + +def getMapredSystemDirectory(mrSysDirRoot, userid, jobid): + return os.path.join(mrSysDirRoot, userid, 'mapredsystem', jobid) + +class HodInterrupt: + def __init__(self): + self.HodInterruptFlag = False + self.log = None + + def set_log(self, log): + self.log = log + + def init_signals(self): + + def sigStop(sigNum, handler): + sig_wrapper(sigNum, self.setFlag) + + signal.signal(signal.SIGTERM, sigStop) # 15 : software termination signal + signal.signal(signal.SIGQUIT, sigStop) # 3 : Quit program + signal.signal(signal.SIGINT, sigStop) # 2 ^C : Interrupt program + + def sig_wrapper(sigNum, handler, *args): + self.log.critical("Caught signal %s." % sigNum ) + + if args: + handler(args) + else: + handler() + + def setFlag(self, val = True): + self.HodInterruptFlag = val + + def isSet(self): + return self.HodInterruptFlag + +class HodInterruptException(Exception): + def __init__(self, value = ""): + self.value = value + + def __str__(self): + return repr(self.value) + +hodInterrupt = HodInterrupt() diff --git a/src/contrib/hod/hodlib/Common/xmlrpc.py b/src/contrib/hod/hodlib/Common/xmlrpc.py new file mode 100755 index 0000000..bb7ef8b --- /dev/null +++ b/src/contrib/hod/hodlib/Common/xmlrpc.py @@ -0,0 +1,57 @@ +#Licensed to the Apache Software Foundation (ASF) under one +#or more contributor license agreements. See the NOTICE file +#distributed with this work for additional information +#regarding copyright ownership. The ASF licenses this file +#to you under the Apache License, Version 2.0 (the +#"License"); you may not use this file except in compliance +#with the License. You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. +import xmlrpclib, time, random, signal +from hodlib.Common.util import hodInterrupt, HodInterruptException + +class hodXRClient(xmlrpclib.ServerProxy): + def __init__(self, uri, transport=None, encoding=None, verbose=0, + allow_none=0, installSignalHandlers=1, retryRequests=True, timeOut=15): + xmlrpclib.ServerProxy.__init__(self, uri, transport, encoding, verbose, + allow_none) + self.__retryRequests = retryRequests + self.__timeOut = timeOut + if (installSignalHandlers!=0): + self.__set_alarm() + + def __set_alarm(self): + def alarm_handler(sigNum, sigHandler): + raise Exception("XML-RPC socket timeout.") + + signal.signal(signal.SIGALRM, alarm_handler) + + def __request(self, methodname, params): + response = None + retryWaitTime = 5 + random.randint(0, 5) + for i in range(0, 30): + signal.alarm(self.__timeOut) + try: + response = self._ServerProxy__request(methodname, params) + signal.alarm(0) + break + except Exception: + if self.__retryRequests: + if hodInterrupt.isSet(): + raise HodInterruptException() + time.sleep(retryWaitTime) + else: + raise Exception("hodXRClientTimeout") + + return response + + def __getattr__(self, name): + # magic method dispatcher + return xmlrpclib._Method(self.__request, name) + diff --git a/src/contrib/hod/hodlib/GridServices/__init__.py b/src/contrib/hod/hodlib/GridServices/__init__.py new file mode 100755 index 0000000..52138f2 --- /dev/null +++ b/src/contrib/hod/hodlib/GridServices/__init__.py @@ -0,0 +1,18 @@ +#Licensed to the Apache Software Foundation (ASF) under one +#or more contributor license agreements. See the NOTICE file +#distributed with this work for additional information +#regarding copyright ownership. The ASF licenses this file +#to you under the Apache License, Version 2.0 (the +#"License"); you may not use this file except in compliance +#with the License. You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. + +from mapred import MapReduce, MapReduceExternal +from hdfs import Hdfs, HdfsExternal diff --git a/src/contrib/hod/hodlib/GridServices/hdfs.py b/src/contrib/hod/hodlib/GridServices/hdfs.py new file mode 100755 index 0000000..11efd11 --- /dev/null +++ b/src/contrib/hod/hodlib/GridServices/hdfs.py @@ -0,0 +1,310 @@ +#Licensed to the Apache Software Foundation (ASF) under one +#or more contributor license agreements. See the NOTICE file +#distributed with this work for additional information +#regarding copyright ownership. The ASF licenses this file +#to you under the Apache License, Version 2.0 (the +#"License"); you may not use this file except in compliance +#with the License. You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. +"""define Hdfs as subclass of Service""" + +# -*- python -*- + +import os + +from service import * +from hodlib.Hod.nodePool import * +from hodlib.Common.desc import CommandDesc +from hodlib.Common.util import get_exception_string, parseEquals + +class HdfsExternal(MasterSlave): + """dummy proxy to external HDFS instance""" + + def __init__(self, serviceDesc, workDirs, version): + MasterSlave.__init__(self, serviceDesc, workDirs,None) + self.launchedMaster = True + self.masterInitialized = True + self.version = version + + def getMasterRequest(self): + return None + + def getMasterCommands(self, serviceDict): + return [] + + def getAdminCommands(self, serviceDict): + return [] + + def getWorkerCommands(self, serviceDict): + return [] + + def getMasterAddrs(self): + attrs = self.serviceDesc.getfinalAttrs() + addr = attrs['fs.default.name'] + return [addr] + + def setMasterParams(self, dict): + self.serviceDesc.dict['final-attrs']['fs.default.name'] = "%s:%s" % \ + (dict['host'], dict['fs_port']) + + if self.version < 16: + self.serviceDesc.dict['final-attrs']['dfs.info.port'] = \ + str(self.serviceDesc.dict['info_port']) + else: + # After Hadoop-2185 + self.serviceDesc.dict['final-attrs']['dfs.http.address'] = "%s:%s" % \ + (dict['host'], dict['info_port']) + + def getInfoAddrs(self): + attrs = self.serviceDesc.getfinalAttrs() + if self.version < 16: + addr = attrs['fs.default.name'] + k,v = addr.split( ":") + infoaddr = k + ':' + attrs['dfs.info.port'] + else: + # After Hadoop-2185 + infoaddr = attrs['dfs.http.address'] + return [infoaddr] + +class Hdfs(MasterSlave): + + def __init__(self, serviceDesc, nodePool, required_node, version, \ + format=True, upgrade=False, + workers_per_ring = 1): + MasterSlave.__init__(self, serviceDesc, nodePool, required_node) + self.masterNode = None + self.masterAddr = None + self.runAdminCommands = True + self.infoAddr = None + self._isLost = False + self.format = format + self.upgrade = upgrade + self.workers = [] + self.version = version + self.workers_per_ring = workers_per_ring + + def getMasterRequest(self): + req = NodeRequest(1, [], False) + return req + + def getMasterCommands(self, serviceDict): + + masterCommands = [] + if self.format: + masterCommands.append(self._getNameNodeCommand(True)) + + if self.upgrade: + masterCommands.append(self._getNameNodeCommand(False, True)) + else: + masterCommands.append(self._getNameNodeCommand(False)) + + return masterCommands + + def getAdminCommands(self, serviceDict): + + adminCommands = [] + if self.upgrade and self.runAdminCommands: + adminCommands.append(self._getNameNodeAdminCommand('-safemode wait')) + adminCommands.append(self._getNameNodeAdminCommand('-finalizeUpgrade', + True, True)) + + self.runAdminCommands = False + return adminCommands + + def getWorkerCommands(self, serviceDict): + workerCmds = [] + for id in range(1, self.workers_per_ring + 1): + workerCmds.append(self._getDataNodeCommand(str(id))) + + return workerCmds + + def setMasterNodes(self, list): + node = list[0] + self.masterNode = node + + def getMasterAddrs(self): + return [self.masterAddr] + + def getInfoAddrs(self): + return [self.infoAddr] + + def getWorkers(self): + return self.workers + + def setMasterParams(self, list): + dict = self._parseEquals(list) + self.masterAddr = dict['fs.default.name'] + k,v = self.masterAddr.split( ":") + self.masterNode = k + if self.version < 16: + self.infoAddr = self.masterNode + ':' + dict['dfs.info.port'] + else: + # After Hadoop-2185 + self.infoAddr = dict['dfs.http.address'] + + def _parseEquals(self, list): + return parseEquals(list) + + def _setWorkDirs(self, workDirs, envs, attrs, parentDirs, subDir): + namedir = None + hadooptmpdir = None + datadir = [] + + for p in parentDirs: + workDirs.append(p) + workDirs.append(os.path.join(p, subDir)) + dir = os.path.join(p, subDir, 'dfs-data') + datadir.append(dir) + if not hadooptmpdir: + # Not used currently, generating hadooptmpdir just in case + hadooptmpdir = os.path.join(p, subDir, 'hadoop-tmp') + + if not namedir: + namedir = os.path.join(p, subDir, 'dfs-name') + + workDirs.append(namedir) + workDirs.extend(datadir) + + # FIXME!! use csv + attrs['dfs.name.dir'] = namedir + attrs['hadoop.tmp.dir'] = hadooptmpdir + attrs['dfs.data.dir'] = ','.join(datadir) + envs['HADOOP_ROOT_LOGGER'] = "INFO,DRFA" + + + def _getNameNodeCommand(self, format=False, upgrade=False): + sd = self.serviceDesc + + parentDirs = self.workDirs + workDirs = [] + attrs = sd.getfinalAttrs().copy() + envs = sd.getEnvs().copy() + + if 'fs.default.name' not in attrs: + attrs['fs.default.name'] = 'fillinhostport' + + if self.version < 16: + if 'dfs.info.port' not in attrs: + attrs['dfs.info.port'] = 'fillinport' + else: + # Addressing Hadoop-2185, added the following. Earlier versions don't + # care about this + if 'dfs.http.address' not in attrs: + attrs['dfs.http.address'] = 'fillinhostport' + + self._setWorkDirs(workDirs, envs, attrs, parentDirs, 'hdfs-nn') + + dict = { 'name' : 'namenode' } + dict['program'] = os.path.join('bin', 'hadoop') + argv = ['namenode'] + if format: + argv.append('-format') + elif upgrade: + argv.append('-upgrade') + dict['argv'] = argv + dict['envs'] = envs + dict['pkgdirs'] = sd.getPkgDirs() + dict['workdirs'] = workDirs + dict['final-attrs'] = attrs + dict['attrs'] = sd.getAttrs() + if format: + dict['fg'] = 'true' + dict['stdin'] = 'Y' + cmd = CommandDesc(dict) + return cmd + + def _getNameNodeAdminCommand(self, adminCommand, wait=True, ignoreFailures=False): + sd = self.serviceDesc + + parentDirs = self.workDirs + workDirs = [] + attrs = sd.getfinalAttrs().copy() + envs = sd.getEnvs().copy() + nn = self.masterAddr + + if nn == None: + raise ValueError, "Can't get namenode address" + + attrs['fs.default.name'] = nn + + self._setWorkDirs(workDirs, envs, attrs, parentDirs, 'hdfs-nn') + + dict = { 'name' : 'dfsadmin' } + dict['program'] = os.path.join('bin', 'hadoop') + argv = ['dfsadmin'] + argv.append(adminCommand) + dict['argv'] = argv + dict['envs'] = envs + dict['pkgdirs'] = sd.getPkgDirs() + dict['workdirs'] = workDirs + dict['final-attrs'] = attrs + dict['attrs'] = sd.getAttrs() + if wait: + dict['fg'] = 'true' + dict['stdin'] = 'Y' + if ignoreFailures: + dict['ignorefailures'] = 'Y' + cmd = CommandDesc(dict) + return cmd + + def _getDataNodeCommand(self, id): + + sd = self.serviceDesc + + parentDirs = self.workDirs + workDirs = [] + attrs = sd.getfinalAttrs().copy() + envs = sd.getEnvs().copy() + nn = self.masterAddr + + if nn == None: + raise ValueError, "Can't get namenode address" + + attrs['fs.default.name'] = nn + + if self.version < 16: + if 'dfs.datanode.port' not in attrs: + attrs['dfs.datanode.port'] = 'fillinport' + if 'dfs.datanode.info.port' not in attrs: + attrs['dfs.datanode.info.port'] = 'fillinport' + else: + # Adding the following. Hadoop-2185 + if 'dfs.datanode.address' not in attrs: + attrs['dfs.datanode.address'] = 'fillinhostport' + if 'dfs.datanode.http.address' not in attrs: + attrs['dfs.datanode.http.address'] = 'fillinhostport' + + if self.version >= 18: + # After HADOOP-3283 + # TODO: check for major as well as minor versions + attrs['dfs.datanode.ipc.address'] = 'fillinhostport' + + # unique workdirs in case of multiple datanodes per hodring + pd = [] + for dir in parentDirs: + dir = dir + "-" + id + pd.append(dir) + parentDirs = pd + # end of unique workdirs + + self._setWorkDirs(workDirs, envs, attrs, parentDirs, 'hdfs-dn') + + dict = { 'name' : 'datanode' } + dict['program'] = os.path.join('bin', 'hadoop') + dict['argv'] = ['datanode'] + dict['envs'] = envs + dict['pkgdirs'] = sd.getPkgDirs() + dict['workdirs'] = workDirs + dict['final-attrs'] = attrs + dict['attrs'] = sd.getAttrs() + + cmd = CommandDesc(dict) + return cmd + diff --git a/src/contrib/hod/hodlib/GridServices/mapred.py b/src/contrib/hod/hodlib/GridServices/mapred.py new file mode 100755 index 0000000..086f052 --- /dev/null +++ b/src/contrib/hod/hodlib/GridServices/mapred.py @@ -0,0 +1,272 @@ +#Licensed to the Apache Software Foundation (ASF) under one +#or more contributor license agreements. See the NOTICE file +#distributed with this work for additional information +#regarding copyright ownership. The ASF licenses this file +#to you under the Apache License, Version 2.0 (the +#"License"); you may not use this file except in compliance +#with the License. You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. +"""define MapReduce as subclass of Service""" + +# -*- python -*- + +import os, copy, time + +from service import * +from hodlib.Hod.nodePool import * +from hodlib.Common.desc import CommandDesc +from hodlib.Common.util import get_exception_string, parseEquals + +class MapReduceExternal(MasterSlave): + """dummy proxy to external MapReduce instance""" + + def __init__(self, serviceDesc, workDirs, version): + MasterSlave.__init__(self, serviceDesc, workDirs,None) + self.launchedMaster = True + self.masterInitialized = True + self.version = version + + def getMasterRequest(self): + return None + + def getMasterCommands(self, serviceDict): + return [] + + def getAdminCommands(self, serviceDict): + return [] + + def getWorkerCommands(self, serviceDict): + return [] + + def getMasterAddrs(self): + attrs = self.serviceDesc.getfinalAttrs() + addr = attrs['mapred.job.tracker'] + return [addr] + + def needsMore(self): + return 0 + + def needsLess(self): + return 0 + + def setMasterParams(self, dict): + self.serviceDesc['final-attrs']['mapred.job.tracker'] = "%s:%s" % (dict['host'], + dict['tracker_port']) + + if self.version < 16: + self.serviceDesc.dict['final-attrs']['mapred.job.tracker.info.port'] = \ + str(self.serviceDesc.dict['info_port']) + else: + # After Hadoop-2185 + self.serviceDesc['final-attrs']['mapred.job.tracker.http.address'] = \ + "%s:%s" %(dict['host'], dict['info_port']) + + def getInfoAddrs(self): + attrs = self.serviceDesc.getfinalAttrs() + if self.version < 16: + addr = attrs['mapred.job.tracker'] + k,v = addr.split( ":") + infoaddr = k + ':' + attrs['mapred.job.tracker.info.port'] + else: + # After Hadoop-2185 + # Note: earlier,we never respected mapred.job.tracker.http.address + infoaddr = attrs['mapred.job.tracker.http.address'] + return [infoaddr] + +class MapReduce(MasterSlave): + + def __init__(self, serviceDesc, workDirs,required_node, version, + workers_per_ring = 1): + MasterSlave.__init__(self, serviceDesc, workDirs,required_node) + + self.masterNode = None + self.masterAddr = None + self.infoAddr = None + self.workers = [] + self.required_node = required_node + self.version = version + self.workers_per_ring = workers_per_ring + + def isLaunchable(self, serviceDict): + hdfs = serviceDict['hdfs'] + if (hdfs.isMasterInitialized()): + return True + return False + + def getMasterRequest(self): + req = NodeRequest(1, [], False) + return req + + def getMasterCommands(self, serviceDict): + + hdfs = serviceDict['hdfs'] + + cmdDesc = self._getJobTrackerCommand(hdfs) + return [cmdDesc] + + def getAdminCommands(self, serviceDict): + return [] + + def getWorkerCommands(self, serviceDict): + + hdfs = serviceDict['hdfs'] + + workerCmds = [] + for id in range(1, self.workers_per_ring + 1): + workerCmds.append(self._getTaskTrackerCommand(str(id), hdfs)) + + return workerCmds + + def setMasterNodes(self, list): + node = list[0] + self.masterNode = node + + def getMasterAddrs(self): + return [self.masterAddr] + + def getInfoAddrs(self): + return [self.infoAddr] + + def getWorkers(self): + return self.workers + + def requiredNode(self): + return self.required_host + + def setMasterParams(self, list): + dict = self._parseEquals(list) + self.masterAddr = dict['mapred.job.tracker'] + k,v = self.masterAddr.split(":") + self.masterNode = k + if self.version < 16: + self.infoAddr = self.masterNode + ':' + dict['mapred.job.tracker.info.port'] + else: + # After Hadoop-2185 + self.infoAddr = dict['mapred.job.tracker.http.address'] + + def _parseEquals(self, list): + return parseEquals(list) + + def _setWorkDirs(self, workDirs, envs, attrs, parentDirs, subDir): + local = [] + system = None + temp = None + hadooptmpdir = None + dfsclient = [] + + for p in parentDirs: + workDirs.append(p) + workDirs.append(os.path.join(p, subDir)) + dir = os.path.join(p, subDir, 'mapred-local') + local.append(dir) + if not system: + system = os.path.join(p, subDir, 'mapred-system') + if not temp: + temp = os.path.join(p, subDir, 'mapred-temp') + if not hadooptmpdir: + # Not used currently, generating hadooptmpdir just in case + hadooptmpdir = os.path.join(p, subDir, 'hadoop-tmp') + dfsclientdir = os.path.join(p, subDir, 'dfs-client') + dfsclient.append(dfsclientdir) + workDirs.append(dfsclientdir) + # FIXME!! use csv + attrs['mapred.local.dir'] = ','.join(local) + attrs['mapred.system.dir'] = 'fillindir' + attrs['mapred.temp.dir'] = temp + attrs['hadoop.tmp.dir'] = hadooptmpdir + + + envs['HADOOP_ROOT_LOGGER'] = "INFO,DRFA" + + + def _getJobTrackerCommand(self, hdfs): + sd = self.serviceDesc + + parentDirs = self.workDirs + workDirs = [] + attrs = sd.getfinalAttrs().copy() + envs = sd.getEnvs().copy() + + if 'mapred.job.tracker' not in attrs: + attrs['mapred.job.tracker'] = 'fillinhostport' + + if self.version < 16: + if 'mapred.job.tracker.info.port' not in attrs: + attrs['mapred.job.tracker.info.port'] = 'fillinport' + else: + # Addressing Hadoop-2185, + if 'mapred.job.tracker.http.address' not in attrs: + attrs['mapred.job.tracker.http.address'] = 'fillinhostport' + + attrs['fs.default.name'] = hdfs.getMasterAddrs()[0] + + self._setWorkDirs(workDirs, envs, attrs, parentDirs, 'mapred-jt') + + dict = { 'name' : 'jobtracker' } + dict['version'] = self.version + dict['program'] = os.path.join('bin', 'hadoop') + dict['argv'] = ['jobtracker'] + dict['envs'] = envs + dict['pkgdirs'] = sd.getPkgDirs() + dict['workdirs'] = workDirs + dict['final-attrs'] = attrs + dict['attrs'] = sd.getAttrs() + cmd = CommandDesc(dict) + return cmd + + def _getTaskTrackerCommand(self, id, hdfs): + + sd = self.serviceDesc + + parentDirs = self.workDirs + workDirs = [] + attrs = sd.getfinalAttrs().copy() + envs = sd.getEnvs().copy() + jt = self.masterAddr + + if jt == None: + raise ValueError, "Can't get job tracker address" + + attrs['mapred.job.tracker'] = jt + attrs['fs.default.name'] = hdfs.getMasterAddrs()[0] + + if self.version < 16: + if 'tasktracker.http.port' not in attrs: + attrs['tasktracker.http.port'] = 'fillinport' + # earlier to 16, tasktrackers always took ephemeral port 0 for + # tasktracker.report.bindAddress + else: + # Adding the following. Hadoop-2185 + if 'mapred.task.tracker.report.address' not in attrs: + attrs['mapred.task.tracker.report.address'] = 'fillinhostport' + if 'mapred.task.tracker.http.address' not in attrs: + attrs['mapred.task.tracker.http.address'] = 'fillinhostport' + + # unique parentDirs in case of multiple tasktrackers per hodring + pd = [] + for dir in parentDirs: + dir = dir + "-" + id + pd.append(dir) + parentDirs = pd + # end of unique workdirs + + self._setWorkDirs(workDirs, envs, attrs, parentDirs, 'mapred-tt') + + dict = { 'name' : 'tasktracker' } + dict['program'] = os.path.join('bin', 'hadoop') + dict['argv'] = ['tasktracker'] + dict['envs'] = envs + dict['pkgdirs'] = sd.getPkgDirs() + dict['workdirs'] = workDirs + dict['final-attrs'] = attrs + dict['attrs'] = sd.getAttrs() + cmd = CommandDesc(dict) + return cmd + diff --git a/src/contrib/hod/hodlib/GridServices/service.py b/src/contrib/hod/hodlib/GridServices/service.py new file mode 100755 index 0000000..f0c7f5c --- /dev/null +++ b/src/contrib/hod/hodlib/GridServices/service.py @@ -0,0 +1,266 @@ +#Licensed to the Apache Software Foundation (ASF) under one +#or more contributor license agreements. See the NOTICE file +#distributed with this work for additional information +#regarding copyright ownership. The ASF licenses this file +#to you under the Apache License, Version 2.0 (the +#"License"); you may not use this file except in compliance +#with the License. You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. +"""defines Service as abstract interface""" + +# -*- python -*- +import random, socket + +class Service: + """ the service base class that all the + other services inherit from. """ + def __init__(self, serviceDesc, workDirs): + self.serviceDesc = serviceDesc + self.workDirs = workDirs + + def getName(self): + return self.serviceDesc.getName() + + def getInfoAddrs(self): + """Return a list of addresses that provide + information about the servie""" + return [] + + def isLost(self): + """True if the service is down""" + raise NotImplementedError + + def addNodes(self, nodeList): + """add nodeSet""" + raise NotImplementedError + + def removeNodes(self, nodeList): + """remove a nodeset""" + raise NotImplementedError + + def getWorkers(self): + raise NotImplementedError + + def needsMore(self): + """return number of nodes the service wants to add""" + raise NotImplementedError + + def needsLess(self): + """return number of nodes the service wants to remove""" + raise NotImplementedError + +class MasterSlave(Service): + """ the base class for a master slave + service architecture. """ + def __init__(self, serviceDesc, workDirs,requiredNode): + Service.__init__(self, serviceDesc, workDirs) + self.launchedMaster = False + self.masterInitialized = False + self.masterAddress = 'none' + self.requiredNode = requiredNode + self.failedMsg = None + self.masterFailureCount = 0 + + def getRequiredNode(self): + return self.requiredNode + + def getMasterRequest(self): + """ the number of master you need + to run for this service. """ + raise NotImplementedError + + def isLaunchable(self, serviceDict): + """ if your service does not depend on + other services. is set to true by default. """ + return True + + def getMasterCommands(self, serviceDict): + """ a list of master commands you + want to run for this service. """ + raise NotImplementedError + + def getAdminCommands(self, serviceDict): + """ a list of admin commands you + want to run for this service. """ + raise NotImplementedError + + def getWorkerCommands(self, serviceDict): + """ a list of worker commands you want to + run for this service. """ + raise NotImplementedError + + def setMasterNodes(self, list): + """ set the status of master nodes + after they start running on a node cluster. """ + raise NotImplementedError + + def addNodes(self, list): + """ add nodes to a service. Not implemented + currently. """ + raise NotImplementedError + + def getMasterAddrs(self): + """ return the addresses of master. the + hostname:port to which worker nodes should + connect. """ + raise NotImplementedError + + def setMasterParams(self, list): + """ set the various master params + depending on what each hodring set + the master params to. """ + raise NotImplementedError + + def setlaunchedMaster(self): + """ set the status of master launched + to true. """ + self.launchedMaster = True + + def isMasterLaunched(self): + """ return if a master has been launched + for the service or not. """ + return self.launchedMaster + + def isMasterInitialized(self): + """ return if a master if launched + has been initialized or not. """ + return self.masterInitialized + + def setMasterInitialized(self): + """ set the master initialized to + true. """ + self.masterInitialized = True + # Reset failure related variables, as master is initialized successfully. + self.masterFailureCount = 0 + self.failedMsg = None + + def getMasterAddress(self): + """ it needs to change to reflect + more that one masters. Currently it + keeps a knowledge of where the master + was launched and to keep track if it was actually + up or not. """ + return self.masterAddress + + def setMasterAddress(self, addr): + self.masterAddress = addr + + def isExternal(self): + return self.serviceDesc.isExternal() + + def setMasterFailed(self, err): + """Sets variables related to Master failure""" + self.masterFailureCount += 1 + self.failedMsg = err + # When command is sent to HodRings, this would have been set to True. + # Reset it to reflect the correct status. + self.launchedMaster = False + + def getMasterFailed(self): + return self.failedMsg + + def getMasterFailureCount(self): + return self.masterFailureCount + +class NodeRequest: + """ A class to define + a node request. """ + def __init__(self, n, required = [], preferred = [], isPreemptee = True): + self.numNodes = n + self.preferred = preferred + self.isPreemptee = isPreemptee + self.required = required + + def setNumNodes(self, n): + self.numNodes = n + + def setPreferredList(self, list): + self.preferred = list + + def setIsPreemptee(self, flag): + self.isPreemptee = flag + + +class ServiceUtil: + """ this class should be moved out of + service.py to a util file""" + localPortUsed = {} + + def getUniqRandomPort(h=None, low=50000, high=60000, retry=900, log=None): + """This allocates a randome free port between low and high""" + # We use a default value of 900 retries, which takes an agreeable + # time limit of ~ 6.2 seconds to check 900 ports, in the worse case + # of no available port in those 900. + + while retry > 0: + n = random.randint(low, high) + if n in ServiceUtil.localPortUsed: + continue + s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + if not h: + h = socket.gethostname() + avail = False + if log: log.debug("Trying to see if port %s is available"% n) + try: + s.bind((h, n)) + if log: log.debug("Yes, port %s is available" % n) + avail = True + except socket.error,e: + if log: log.debug("Could not bind to the port %s. Reason %s" % (n,e)) + retry -= 1 + pass + # The earlier code that used to be here had syntax errors. The code path + # couldn't be followd anytime, so the error remained uncaught. + # This time I stumbled upon the error + s.close() + + if avail: + ServiceUtil.localPortUsed[n] = True + return n + raise ValueError, "Can't find unique local port between %d and %d" % (low, high) + + getUniqRandomPort = staticmethod(getUniqRandomPort) + + def getUniqPort(h=None, low=40000, high=60000, retry=900, log=None): + """get unique port on a host that can be used by service + This and its consumer code should disappear when master + nodes get allocatet by nodepool""" + + # We use a default value of 900 retries, which takes an agreeable + # time limit of ~ 6.2 seconds to check 900 ports, in the worse case + # of no available port in those 900. + + n = low + while retry > 0: + n = n + 1 + if n in ServiceUtil.localPortUsed: + continue + s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + if not h: + h = socket.gethostname() + avail = False + if log: log.debug("Trying to see if port %s is available"% n) + try: + s.bind((h, n)) + if log: log.debug("Yes, port %s is available" % n) + avail = True + except socket.error,e: + if log: log.debug("Could not bind to the port %s. Reason %s" % (n,e)) + retry -= 1 + pass + s.close() + + if avail: + ServiceUtil.localPortUsed[n] = True + return n + + raise ValueError, "Can't find unique local port between %d and %d" % (low, high) + + getUniqPort = staticmethod(getUniqPort) diff --git a/src/contrib/hod/hodlib/Hod/__init__.py b/src/contrib/hod/hodlib/Hod/__init__.py new file mode 100755 index 0000000..12c2f1e --- /dev/null +++ b/src/contrib/hod/hodlib/Hod/__init__.py @@ -0,0 +1,15 @@ +#Licensed to the Apache Software Foundation (ASF) under one +#or more contributor license agreements. See the NOTICE file +#distributed with this work for additional information +#regarding copyright ownership. The ASF licenses this file +#to you under the Apache License, Version 2.0 (the +#"License"); you may not use this file except in compliance +#with the License. You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. diff --git a/src/contrib/hod/hodlib/Hod/hadoop.py b/src/contrib/hod/hodlib/Hod/hadoop.py new file mode 100755 index 0000000..616d775 --- /dev/null +++ b/src/contrib/hod/hodlib/Hod/hadoop.py @@ -0,0 +1,747 @@ +#Licensed to the Apache Software Foundation (ASF) under one +#or more contributor license agreements. See the NOTICE file +#distributed with this work for additional information +#regarding copyright ownership. The ASF licenses this file +#to you under the Apache License, Version 2.0 (the +#"License"); you may not use this file except in compliance +#with the License. You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. +"""define WorkLoad as abstract interface for user job""" +# -*- python -*- + +import os, time, sys, shutil, exceptions, re, threading, signal, urllib, pprint, math + +from HTMLParser import HTMLParser + +import xml.dom.minidom +import xml.dom.pulldom +from xml.dom import getDOMImplementation + +from hodlib.Common.util import * +from hodlib.Common.xmlrpc import hodXRClient +from hodlib.Common.miniHTMLParser import miniHTMLParser +from hodlib.Common.nodepoolutil import NodePoolUtil +from hodlib.Common.tcp import tcpError, tcpSocket + +reCommandDelimeterString = r"(?> sitefile, topElement.toxml() + sitefile.close() + +class hadoopCluster: + def __init__(self, cfg, log): + self.__cfg = cfg + self.__log = log + self.__changedClusterParams = [] + + self.__hostname = local_fqdn() + self.__svcrgyClient = None + self.__nodePool = NodePoolUtil.getNodePool(self.__cfg['nodepooldesc'], + self.__cfg, self.__log) + self.__hadoopCfg = hadoopConfig() + self.jobId = None + self.mapredInfo = None + self.hdfsInfo = None + self.ringmasterXRS = None + + def __get_svcrgy_client(self): + svcrgyUrl = to_http_url(self.__cfg['hod']['xrs-address']) + return hodXRClient(svcrgyUrl) + + def __get_service_status(self): + serviceData = self.__get_service_data() + + status = True + hdfs = False + mapred = False + + for host in serviceData.keys(): + for item in serviceData[host]: + service = item.keys() + if service[0] == 'hdfs.grid' and \ + self.__cfg['gridservice-hdfs']['external'] == False: + hdfs = True + elif service[0] == 'mapred.grid': + mapred = True + + if not mapred: + status = "mapred" + + if not hdfs and self.__cfg['gridservice-hdfs']['external'] == False: + if status != True: + status = "mapred and hdfs" + else: + status = "hdfs" + + return status + + def __get_service_data(self): + registry = to_http_url(self.__cfg['hod']['xrs-address']) + serviceData = self.__svcrgyClient.getServiceInfo( + self.__cfg['hod']['userid'], self.__setup.np.getNodePoolId()) + + return serviceData + + def __check_job_status(self): + failureCount = 0 + status = False + state = 'Q' + userLimitsFirstFlag = True + + while (state=='Q') or (state==False): + if hodInterrupt.isSet(): + raise HodInterruptException() + + jobInfo = self.__nodePool.getJobInfo() + state = jobInfo['job_state'] + self.__log.debug('job state %s' % state) + if state == False: + failureCount += 1 + if (failureCount >= self.__cfg['hod']['job-status-query-failure-retries']): + self.__log.debug('Number of retries reached max limit while querying job status') + break + time.sleep(self.__cfg['hod']['job-command-failure-interval']) + elif state!='Q': + break + else: + self.__log.debug('querying for job status after job-status-query-interval') + time.sleep(self.__cfg['hod']['job-status-query-interval']) + + if self.__cfg['hod'].has_key('job-feasibility-attr') and \ + self.__cfg['hod']['job-feasibility-attr']: + (status, msg) = self.__isJobFeasible() + if status == "Never": + self.__log.critical(TORQUE_USER_LIMITS_EXCEEDED_MSG + msg + \ + "This cluster cannot be allocated now.") + return -1 + elif status == False: + if userLimitsFirstFlag: + self.__log.critical(TORQUE_USER_LIMITS_EXCEEDED_MSG + msg + \ + "This cluster allocation will succeed only after other " + \ + "clusters are deallocated.") + userLimitsFirstFlag = False + + if state and state != 'C': + status = True + + return status + + def __isJobFeasible(self): + return self.__nodePool.isJobFeasible() + + def __get_ringmaster_client(self): + ringmasterXRS = None + + ringList = self.__svcrgyClient.getServiceInfo( + self.__cfg['ringmaster']['userid'], self.__nodePool.getServiceId(), + 'ringmaster', 'hod') + + if ringList and len(ringList): + if isinstance(ringList, list): + ringmasterXRS = ringList[0]['xrs'] + else: + count = 0 + waitTime = self.__cfg['hod']['allocate-wait-time'] + + while count < waitTime: + if hodInterrupt.isSet(): + raise HodInterruptException() + + ringList = self.__svcrgyClient.getServiceInfo( + self.__cfg['ringmaster']['userid'], self.__nodePool.getServiceId(), + 'ringmaster', + 'hod') + + if ringList and len(ringList): + if isinstance(ringList, list): + ringmasterXRS = ringList[0]['xrs'] + + if ringmasterXRS is not None: + break + else: + time.sleep(1) + count = count + 1 + # check to see if the job exited by any chance in that time: + if (count % self.__cfg['hod']['job-status-query-interval'] == 0): + if not self.__check_job_status(): + break + return ringmasterXRS + + def __init_hadoop_service(self, serviceName, xmlrpcClient): + status = True + serviceAddress = None + serviceInfo = None + + for i in range(0, 250): + try: + if hodInterrupt.isSet(): + raise HodInterruptException() + + serviceAddress = xmlrpcClient.getServiceAddr(serviceName) + if serviceAddress: + if serviceAddress == 'not found': + time.sleep(1) + # check to see if the job exited by any chance in that time: + if ((i+1) % self.__cfg['hod']['job-status-query-interval'] == 0): + if not self.__check_job_status(): + break + else: + serviceInfo = xmlrpcClient.getURLs(serviceName) + break + except HodInterruptException,h : + raise h + except: + self.__log.critical("'%s': ringmaster xmlrpc error." % serviceName) + self.__log.debug(get_exception_string()) + status = False + break + + if serviceAddress == 'not found' or not serviceAddress: + self.__log.critical("Failed to retrieve '%s' service address." % + serviceName) + status = False + elif serviceAddress.startswith("Error: "): + errs = serviceAddress[len("Error: "):] + self.__log.critical("Cluster could not be allocated because of the following errors.\n%s" % \ + errs) + status = False + else: + try: + self.__svcrgyClient.registerService(self.__cfg['hodring']['userid'], + self.jobId, self.__hostname, + serviceName, 'grid', serviceInfo) + + except HodInterruptException, h: + raise h + except: + self.__log.critical("'%s': registry xmlrpc error." % serviceName) + self.__log.debug(get_exception_string()) + status = False + + return status, serviceAddress, serviceInfo + + def __collect_jobtracker_ui(self, dir): + + link = self.mapredInfo + "/jobtracker.jsp" + parser = miniHTMLParser() + parser.setBaseUrl(self.mapredInfo) + node_cache = {} + + self.__log.debug("collect_jobtracker_ui seeded with " + link) + + def alarm_handler(number, stack): + raise AlarmException("timeout") + + signal.signal(signal.SIGALRM, alarm_handler) + + input = None + while link: + self.__log.debug("link: %s" % link) + # taskstats.jsp,taskdetails.jsp not included since too many to collect + if re.search( + "jobfailures\.jsp|jobtracker\.jsp|jobdetails\.jsp|jobtasks\.jsp", + link): + + for i in range(1,5): + if hodInterrupt.isSet(): + raise HodInterruptException() + try: + input = urllib.urlopen(link) + break + except: + self.__log.debug(get_exception_string()) + time.sleep(1) + + if input: + out = None + + self.__log.debug("collecting " + link + "...") + filename = re.sub(self.mapredInfo, "", link) + filename = dir + "/" + filename + filename = re.sub("http://","", filename) + filename = re.sub("[\?\&=:]","_",filename) + filename = filename + ".html" + + try: + tempdir, tail = os.path.split(filename) + if not os.path.exists(tempdir): + os.makedirs(tempdir) + except: + self.__log.debug(get_exception_string()) + + out = open(filename, 'w') + + bufSz = 8192 + + signal.alarm(10) + + try: + self.__log.debug("Starting to grab: %s" % link) + buf = input.read(bufSz) + + while len(buf) > 0: + # Feed the file into the HTML parser + parser.feed(buf) + + # Re-write the hrefs in the file + p = re.compile("\?(.+?)=(.+?)") + buf = p.sub(r"_\1_\2",buf) + p= re.compile("&(.+?)=(.+?)") + buf = p.sub(r"_\1_\2",buf) + p = re.compile("http://(.+?):(\d+)?") + buf = p.sub(r"\1_\2/",buf) + buf = re.sub("href=\"/","href=\"",buf) + p = re.compile("href=\"(.+?)\"") + buf = p.sub(r"href=\1.html",buf) + + out.write(buf) + buf = input.read(bufSz) + + signal.alarm(0) + input.close() + if out: + out.close() + + self.__log.debug("Finished grabbing: %s" % link) + except AlarmException: + if hodInterrupt.isSet(): + raise HodInterruptException() + if out: out.close() + if input: input.close() + + self.__log.debug("Failed to retrieve: %s" % link) + else: + self.__log.debug("Failed to retrieve: %s" % link) + + # Get the next link in level traversal order + link = parser.getNextLink() + + parser.close() + + def check_cluster(self, clusterInfo): + status = 0 + + if 'mapred' in clusterInfo: + mapredAddress = clusterInfo['mapred'][7:] + hdfsAddress = clusterInfo['hdfs'][7:] + status = get_cluster_status(hdfsAddress, mapredAddress) + if status == 0: + status = 12 + else: + status = 15 + + return status + + def is_cluster_deallocated(self, jobId): + """Returns True if the JobId that represents this cluster + is in the Completed or exiting state.""" + jobInfo = self.__nodePool.getJobInfo(jobId) + state = None + if jobInfo is not None and jobInfo.has_key('job_state'): + state = jobInfo['job_state'] + return ((state == 'C') or (state == 'E')) + + def cleanup(self): + if self.__nodePool: self.__nodePool.finalize() + + def get_job_id(self): + return self.jobId + + def delete_job(self, jobId): + '''Delete a job given it's ID''' + ret = 0 + if self.__nodePool: + ret = self.__nodePool.deleteJob(jobId) + else: + raise Exception("Invalid state: Node pool is not initialized to delete the given job.") + return ret + + def is_valid_account(self): + """Verify if the account being used to submit the job is a valid account. + This code looks for a file /bin/verify-account. + If the file is present, it executes the file, passing as argument + the account name. It returns the exit code and output from the + script on non-zero exit code.""" + + accountValidationScript = os.path.abspath('./verify-account') + if not os.path.exists(accountValidationScript): + return (0, None) + + account = self.__nodePool.getAccountString() + exitCode = 0 + errMsg = None + try: + accountValidationCmd = simpleCommand('Account Validation Command',\ + '%s %s' % (accountValidationScript, + account)) + accountValidationCmd.start() + accountValidationCmd.wait() + accountValidationCmd.join() + exitCode = accountValidationCmd.exit_code() + self.__log.debug('account validation script is run %d' \ + % exitCode) + errMsg = None + if exitCode is not 0: + errMsg = accountValidationCmd.output() + except Exception, e: + exitCode = 0 + self.__log.warn('Error executing account script: %s ' \ + 'Accounting is disabled.' \ + % get_exception_error_string()) + self.__log.debug(get_exception_string()) + return (exitCode, errMsg) + + def allocate(self, clusterDir, min, max=None): + status = 0 + failureCount = 0 + self.__svcrgyClient = self.__get_svcrgy_client() + + self.__log.debug("allocate %s %s %s" % (clusterDir, min, max)) + + if min < 3: + self.__log.critical("Minimum nodes must be greater than 2.") + status = 2 + else: + nodeSet = self.__nodePool.newNodeSet(min) + walltime = None + if self.__cfg['hod'].has_key('walltime'): + walltime = self.__cfg['hod']['walltime'] + self.jobId, exitCode = self.__nodePool.submitNodeSet(nodeSet, walltime) + # if the job submission returned an error other than no resources + # retry a couple of times + while (self.jobId is False) and (exitCode != 188): + if hodInterrupt.isSet(): + raise HodInterruptException() + + failureCount += 1 + if (failureCount >= self.__cfg['hod']['job-status-query-failure-retries']): + self.__log.debug("failed submitting job more than the retries. exiting") + break + else: + # wait a bit before retrying + time.sleep(self.__cfg['hod']['job-command-failure-interval']) + if hodInterrupt.isSet(): + raise HodInterruptException() + self.jobId, exitCode = self.__nodePool.submitNodeSet(nodeSet, walltime) + + if self.jobId: + jobStatus = None + try: + jobStatus = self.__check_job_status() + except HodInterruptException, h: + self.__log.info(HOD_INTERRUPTED_MESG) + self.delete_job(self.jobId) + self.__log.info("Cluster %s removed from queue." % self.jobId) + raise h + else: + if jobStatus == -1: + self.delete_job(self.jobId); + status = 4 + return status + + if jobStatus: + self.__log.info("Cluster Id %s" \ + % self.jobId) + try: + self.ringmasterXRS = self.__get_ringmaster_client() + + self.__log.debug("Ringmaster at : %s" % self.ringmasterXRS ) + ringClient = None + if self.ringmasterXRS: + ringClient = hodXRClient(self.ringmasterXRS) + + hdfsStatus, hdfsAddr, self.hdfsInfo = \ + self.__init_hadoop_service('hdfs', ringClient) + + if hdfsStatus: + self.__log.info("HDFS UI at http://%s" % self.hdfsInfo) + + mapredStatus, mapredAddr, self.mapredInfo = \ + self.__init_hadoop_service('mapred', ringClient) + + if mapredStatus: + self.__log.info("Mapred UI at http://%s" % self.mapredInfo) + + if self.__cfg['hod'].has_key('update-worker-info') \ + and self.__cfg['hod']['update-worker-info']: + workerInfoMap = {} + workerInfoMap['HDFS UI'] = 'http://%s' % self.hdfsInfo + workerInfoMap['Mapred UI'] = 'http://%s' % self.mapredInfo + # Ringmaster URL sample format : http://hostname:port/ + workerInfoMap['RM RPC Port'] = '%s' % self.ringmasterXRS.split(":")[2].strip("/") + if mapredAddr.find(':') != -1: + workerInfoMap['Mapred RPC Port'] = mapredAddr.split(':')[1] + ret = self.__nodePool.updateWorkerInfo(workerInfoMap, self.jobId) + if ret != 0: + self.__log.warn('Could not update HDFS and Mapred information.' \ + 'User Portal may not show relevant information.' \ + 'Error code=%s' % ret) + + self.__cfg.replace_escape_seqs() + + # Go generate the client side hadoop-site.xml now + # adding final-params as well, just so that conf on + # client-side and server-side are (almost) the same + clientParams = None + serverParams = {} + finalServerParams = {} + + # client-params + if self.__cfg['hod'].has_key('client-params'): + clientParams = self.__cfg['hod']['client-params'] + + # server-params + if self.__cfg['gridservice-mapred'].has_key('server-params'): + serverParams.update(\ + self.__cfg['gridservice-mapred']['server-params']) + if self.__cfg['gridservice-hdfs'].has_key('server-params'): + # note that if there are params in both mapred and hdfs + # sections, the ones in hdfs overwirte the ones in mapred + serverParams.update(\ + self.__cfg['gridservice-hdfs']['server-params']) + + # final-server-params + if self.__cfg['gridservice-mapred'].has_key(\ + 'final-server-params'): + finalServerParams.update(\ + self.__cfg['gridservice-mapred']['final-server-params']) + if self.__cfg['gridservice-hdfs'].has_key( + 'final-server-params'): + finalServerParams.update(\ + self.__cfg['gridservice-hdfs']['final-server-params']) + + clusterFactor = self.__cfg['hod']['cluster-factor'] + tempDir = self.__cfg['hod']['temp-dir'] + if not os.path.exists(tempDir): + os.makedirs(tempDir) + tempDir = os.path.join( tempDir, self.__cfg['hod']['userid']\ + + "." + self.jobId ) + mrSysDir = getMapredSystemDirectory(self.__cfg['hodring']['mapred-system-dir-root'],\ + self.__cfg['hod']['userid'], self.jobId) + self.__hadoopCfg.gen_site_conf(clusterDir, tempDir, min,\ + hdfsAddr, mrSysDir, mapredAddr, clientParams,\ + serverParams, finalServerParams,\ + clusterFactor) + self.__log.info("hadoop-site.xml at %s" % clusterDir) + # end of hadoop-site.xml generation + else: + status = 8 + else: + status = 7 + else: + status = 6 + if status != 0: + self.__log.debug("Cleaning up cluster id %s, as cluster could not be allocated." % self.jobId) + if ringClient is None: + self.delete_job(self.jobId) + else: + self.__log.debug("Calling rm.stop()") + ringClient.stopRM() + self.__log.debug("Returning from rm.stop()") + except HodInterruptException, h: + self.__log.info(HOD_INTERRUPTED_MESG) + if self.ringmasterXRS: + if ringClient is None: + ringClient = hodXRClient(self.ringmasterXRS) + self.__log.debug("Calling rm.stop()") + ringClient.stopRM() + self.__log.debug("Returning from rm.stop()") + self.__log.info("Cluster Shutdown by informing ringmaster.") + else: + self.delete_job(self.jobId) + self.__log.info("Cluster %s removed from queue directly." % self.jobId) + raise h + else: + self.__log.critical("No cluster found, ringmaster failed to run.") + status = 5 + + elif self.jobId == False: + if exitCode == 188: + self.__log.critical("Request execeeded maximum resource allocation.") + else: + self.__log.critical("Job submission failed with exit code %s" % exitCode) + status = 4 + else: + self.__log.critical("Scheduler failure, allocation failed.\n\n") + status = 4 + + if status == 5 or status == 6: + ringMasterErrors = self.__svcrgyClient.getRMError() + if ringMasterErrors: + self.__log.critical("Cluster could not be allocated because" \ + " of the following errors on the "\ + "ringmaster host %s.\n%s" % \ + (ringMasterErrors[0], ringMasterErrors[1])) + self.__log.debug("Stack trace on ringmaster: %s" % ringMasterErrors[2]) + return status + + def __isRingMasterAlive(self, rmAddr): + ret = True + rmSocket = tcpSocket(rmAddr) + try: + rmSocket.open() + rmSocket.close() + except tcpError: + ret = False + + return ret + + def deallocate(self, clusterDir, clusterInfo): + status = 0 + + nodeSet = self.__nodePool.newNodeSet(clusterInfo['min'], + id=clusterInfo['jobid']) + self.mapredInfo = clusterInfo['mapred'] + self.hdfsInfo = clusterInfo['hdfs'] + + try: + if self.__cfg['hod'].has_key('hadoop-ui-log-dir'): + clusterStatus = self.check_cluster(clusterInfo) + if clusterStatus != 14 and clusterStatus != 10: + # If JT is still alive + self.__collect_jobtracker_ui(self.__cfg['hod']['hadoop-ui-log-dir']) + else: + self.__log.debug('hadoop-ui-log-dir not specified. Skipping Hadoop UI log collection.') + except HodInterruptException, h: + # got an interrupt. just pass and proceed to qdel + pass + except: + self.__log.info("Exception in collecting Job tracker logs. Ignoring.") + + rmAddr = None + if clusterInfo.has_key('ring'): + # format is http://host:port/ We need host:port + rmAddr = clusterInfo['ring'][7:] + if rmAddr.endswith('/'): + rmAddr = rmAddr[:-1] + + if (rmAddr is None) or (not self.__isRingMasterAlive(rmAddr)): + # Cluster is already dead, don't try to contact ringmaster. + self.__nodePool.finalize() + status = 10 # As cluster is dead, we just set the status to 'cluster dead'. + else: + xrsAddr = clusterInfo['ring'] + rmClient = hodXRClient(xrsAddr) + self.__log.debug('calling rm.stop') + rmClient.stopRM() + self.__log.debug('completed rm.stop') + + # cleanup hod temp dirs + tempDir = os.path.join( self.__cfg['hod']['temp-dir'], \ + self.__cfg['hod']['userid'] + "." + clusterInfo['jobid'] ) + if os.path.exists(tempDir): + shutil.rmtree(tempDir) + + return status + +class hadoopScript: + def __init__(self, conf, execDir): + self.__environ = os.environ.copy() + self.__environ['HADOOP_CONF_DIR'] = conf + self.__execDir = execDir + + def run(self, script): + scriptThread = simpleCommand(script, script, self.__environ, 4, False, + False, self.__execDir) + scriptThread.start() + scriptThread.wait() + scriptThread.join() + + return scriptThread.exit_code() diff --git a/src/contrib/hod/hodlib/Hod/hod.py b/src/contrib/hod/hodlib/Hod/hod.py new file mode 100755 index 0000000..b2587bb --- /dev/null +++ b/src/contrib/hod/hodlib/Hod/hod.py @@ -0,0 +1,754 @@ +#Licensed to the Apache Software Foundation (ASF) under one +#or more contributor license agreements. See the NOTICE file +#distributed with this work for additional information +#regarding copyright ownership. The ASF licenses this file +#to you under the Apache License, Version 2.0 (the +#"License"); you may not use this file except in compliance +#with the License. You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. +# -*- python -*- + +import sys, os, getpass, pprint, re, cPickle, random, shutil, time, errno + +import hodlib.Common.logger + +from hodlib.ServiceRegistry.serviceRegistry import svcrgy +from hodlib.Common.xmlrpc import hodXRClient +from hodlib.Common.util import to_http_url, get_exception_string +from hodlib.Common.util import get_exception_error_string +from hodlib.Common.util import hodInterrupt, HodInterruptException +from hodlib.Common.util import HOD_INTERRUPTED_CODE + +from hodlib.Common.nodepoolutil import NodePoolUtil +from hodlib.Hod.hadoop import hadoopCluster, hadoopScript + +CLUSTER_DATA_FILE = 'clusters' +INVALID_STATE_FILE_MSGS = \ + [ + + "Requested operation cannot be performed. Cannot read %s: " + \ + "Permission denied.", + + "Requested operation cannot be performed. " + \ + "Cannot write to %s: Permission denied.", + + "Requested operation cannot be performed. " + \ + "Cannot read/write to %s: Permission denied.", + + "Cannot update %s: Permission denied. " + \ + "Cluster is deallocated, but info and list " + \ + "operations might show incorrect information.", + + ] + +class hodState: + def __init__(self, store): + self.__store = store + self.__stateFile = None + self.__init_store() + self.__STORE_EXT = ".state" + + def __init_store(self): + if not os.path.exists(self.__store): + os.mkdir(self.__store) + + def __set_state_file(self, id=None): + if id: + self.__stateFile = os.path.join(self.__store, "%s%s" % (id, + self.__STORE_EXT)) + else: + for item in os.listdir(self.__store): + if item.endswith(self.__STORE_EXT): + self.__stateFile = os.path.join(self.__store, item) + + def get_state_file(self): + return self.__stateFile + + def checkStateFile(self, id=None, modes=(os.R_OK,)): + # is state file exists/readable/writable/both? + self.__set_state_file(id) + + # return true if file doesn't exist, because HOD CAN create + # state file and so WILL have permissions to read and/or write + try: + os.stat(self.__stateFile) + except OSError, err: + if err.errno == errno.ENOENT: # error 2 (no such file) + return True + + # file exists + ret = True + for mode in modes: + ret = ret and os.access(self.__stateFile, mode) + return ret + + def read(self, id=None): + info = {} + + self.__set_state_file(id) + + if self.__stateFile: + if os.path.isfile(self.__stateFile): + stateFile = open(self.__stateFile, 'r') + try: + info = cPickle.load(stateFile) + except EOFError: + pass + + stateFile.close() + + return info + + def write(self, id, info): + self.__set_state_file(id) + if not os.path.exists(self.__stateFile): + self.clear(id) + + stateFile = open(self.__stateFile, 'w') + cPickle.dump(info, stateFile) + stateFile.close() + + def clear(self, id=None): + self.__set_state_file(id) + if self.__stateFile and os.path.exists(self.__stateFile): + os.remove(self.__stateFile) + else: + for item in os.listdir(self.__store): + if item.endswith(self.__STORE_EXT): + os.remove(item) + +class hodRunner: + + def __init__(self, cfg, log=None, cluster=None): + self.__hodhelp = hodHelp() + self.__ops = self.__hodhelp.ops + self.__cfg = cfg + self.__npd = self.__cfg['nodepooldesc'] + self.__opCode = 0 + self.__user = getpass.getuser() + self.__registry = None + self.__baseLogger = None + # Allowing to pass in log object to help testing - a stub can be passed in + if log is None: + self.__setup_logger() + else: + self.__log = log + + self.__userState = hodState(self.__cfg['hod']['user_state']) + + self.__clusterState = None + self.__clusterStateInfo = { 'env' : None, 'hdfs' : None, 'mapred' : None } + + # Allowing to pass in log object to help testing - a stib can be passed in + if cluster is None: + self.__cluster = hadoopCluster(self.__cfg, self.__log) + else: + self.__cluster = cluster + + def __setup_logger(self): + self.__baseLogger = hodlib.Common.logger.hodLog('hod') + self.__log = self.__baseLogger.add_logger(self.__user ) + + if self.__cfg['hod']['stream']: + self.__baseLogger.add_stream(level=self.__cfg['hod']['debug'], + addToLoggerNames=(self.__user ,)) + + if self.__cfg['hod'].has_key('syslog-address'): + self.__baseLogger.add_syslog(self.__cfg['hod']['syslog-address'], + level=self.__cfg['hod']['debug'], + addToLoggerNames=(self.__user ,)) + + def get_logger(self): + return self.__log + + def __setup_cluster_logger(self, directory): + self.__baseLogger.add_file(logDirectory=directory, level=4, + backupCount=self.__cfg['hod']['log-rollover-count'], + addToLoggerNames=(self.__user ,)) + + def __setup_cluster_state(self, directory): + self.__clusterState = hodState(directory) + + def __norm_cluster_dir(self, directory): + directory = os.path.expanduser(directory) + if not os.path.isabs(directory): + directory = os.path.join(self.__cfg['hod']['original-dir'], directory) + directory = os.path.abspath(directory) + + return directory + + def __setup_service_registry(self): + cfg = self.__cfg['hod'].copy() + cfg['debug'] = 0 + self.__registry = svcrgy(cfg, self.__log) + self.__registry.start() + self.__log.debug(self.__registry.getXMLRPCAddr()) + self.__cfg['hod']['xrs-address'] = self.__registry.getXMLRPCAddr() + self.__cfg['ringmaster']['svcrgy-addr'] = self.__cfg['hod']['xrs-address'] + + def __set_cluster_state_info(self, env, hdfs, mapred, ring, jobid, min, max): + self.__clusterStateInfo['env'] = env + self.__clusterStateInfo['hdfs'] = "http://%s" % hdfs + self.__clusterStateInfo['mapred'] = "http://%s" % mapred + self.__clusterStateInfo['ring'] = ring + self.__clusterStateInfo['jobid'] = jobid + self.__clusterStateInfo['min'] = min + self.__clusterStateInfo['max'] = max + + def __set_user_state_info(self, info): + userState = self.__userState.read(CLUSTER_DATA_FILE) + for key in info.keys(): + userState[key] = info[key] + + self.__userState.write(CLUSTER_DATA_FILE, userState) + + def __remove_cluster(self, clusterDir): + clusterInfo = self.__userState.read(CLUSTER_DATA_FILE) + if clusterDir in clusterInfo: + del(clusterInfo[clusterDir]) + self.__userState.write(CLUSTER_DATA_FILE, clusterInfo) + + def __cleanup(self): + if self.__registry: self.__registry.stop() + + def __check_operation(self, operation): + opList = operation.split() + + if not opList[0] in self.__ops: + self.__log.critical("Invalid hod operation specified: %s" % operation) + self._op_help(None) + self.__opCode = 2 + + return opList + + def __adjustMasterFailureCountConfig(self, nodeCount): + # This method adjusts the ringmaster.max-master-failures variable + # to a value that is bounded by the a function of the number of + # nodes. + + maxFailures = self.__cfg['ringmaster']['max-master-failures'] + # Count number of masters required - depends on which services + # are external + masters = 0 + if not self.__cfg['gridservice-hdfs']['external']: + masters += 1 + if not self.__cfg['gridservice-mapred']['external']: + masters += 1 + + # So, if there are n nodes and m masters, we look atleast for + # all masters to come up. Therefore, atleast m nodes should be + # good, which means a maximum of n-m master nodes can fail. + maxFailedNodes = nodeCount - masters + + # The configured max number of failures is now bounded by this + # number. + self.__cfg['ringmaster']['max-master-failures'] = \ + min(maxFailures, maxFailedNodes) + + def _op_allocate(self, args): + operation = "allocate" + argLength = len(args) + min = 0 + max = 0 + errorFlag = False + errorMsgs = [] + + if argLength == 3: + nodes = args[2] + clusterDir = self.__norm_cluster_dir(args[1]) + + if not os.path.exists(clusterDir): + try: + os.makedirs(clusterDir) + except OSError, err: + errorFlag = True + errorMsgs.append("Could not create cluster directory. %s" \ + % (str(err))) + elif not os.path.isdir(clusterDir): + errorFlag = True + errorMsgs.append( \ + "Invalid cluster directory (--hod.clusterdir or -d) : " + \ + clusterDir + " : Not a directory") + + if int(nodes) < 3 : + errorFlag = True + errorMsgs.append("Invalid nodecount (--hod.nodecount or -n) : " + \ + "Must be >= 3. Given nodes: %s" % nodes) + if errorFlag: + for msg in errorMsgs: + self.__log.critical(msg) + self.__opCode = 3 + return + + if not self.__userState.checkStateFile(CLUSTER_DATA_FILE, \ + (os.R_OK, os.W_OK)): + self.__log.critical(INVALID_STATE_FILE_MSGS[2] % \ + self.__userState.get_state_file()) + self.__opCode = 1 + return + + clusterList = self.__userState.read(CLUSTER_DATA_FILE) + if clusterDir in clusterList.keys(): + self.__setup_cluster_state(clusterDir) + clusterInfo = self.__clusterState.read() + # Check if the job is not running. Only then can we safely + # allocate another cluster. Otherwise the user would need + # to deallocate and free up resources himself. + if clusterInfo.has_key('jobid') and \ + self.__cluster.is_cluster_deallocated(clusterInfo['jobid']): + self.__log.warn("Found a dead cluster at cluster directory '%s'. Deallocating it to allocate a new one." % (clusterDir)) + self.__remove_cluster(clusterDir) + self.__clusterState.clear() + else: + self.__log.critical("Found a previously allocated cluster at cluster directory '%s'. HOD cannot determine if this cluster can be automatically deallocated. Deallocate the cluster if it is unused." % (clusterDir)) + self.__opCode = 12 + return + + self.__setup_cluster_logger(clusterDir) + + (status, message) = self.__cluster.is_valid_account() + if status is not 0: + if message: + for line in message: + self.__log.critical("verify-account output: %s" % line) + self.__log.critical("Cluster cannot be allocated because account verification failed. " \ + + "verify-account returned exit code: %s." % status) + self.__opCode = 4 + return + else: + self.__log.debug("verify-account returned zero exit code.") + if message: + self.__log.debug("verify-account output: %s" % message) + + if re.match('\d+-\d+', nodes): + (min, max) = nodes.split("-") + min = int(min) + max = int(max) + else: + try: + nodes = int(nodes) + min = nodes + max = nodes + except ValueError: + print self.__hodhelp.help(operation) + self.__log.critical( + "%s operation requires a pos_int value for n(nodecount)." % + operation) + self.__opCode = 3 + else: + self.__setup_cluster_state(clusterDir) + clusterInfo = self.__clusterState.read() + self.__opCode = self.__cluster.check_cluster(clusterInfo) + if self.__opCode == 0 or self.__opCode == 15: + self.__setup_service_registry() + if hodInterrupt.isSet(): + self.__cleanup() + raise HodInterruptException() + self.__log.debug("Service Registry started.") + + self.__adjustMasterFailureCountConfig(nodes) + + try: + allocateStatus = self.__cluster.allocate(clusterDir, min, max) + except HodInterruptException, h: + self.__cleanup() + raise h + # Allocation has gone through. + # Don't care about interrupts any more + + try: + if allocateStatus == 0: + self.__set_cluster_state_info(os.environ, + self.__cluster.hdfsInfo, + self.__cluster.mapredInfo, + self.__cluster.ringmasterXRS, + self.__cluster.jobId, + min, max) + self.__setup_cluster_state(clusterDir) + self.__clusterState.write(self.__cluster.jobId, + self.__clusterStateInfo) + # Do we need to check for interrupts here ?? + + self.__set_user_state_info( + { clusterDir : self.__cluster.jobId, } ) + self.__opCode = allocateStatus + except Exception, e: + # Some unknown problem. + self.__cleanup() + self.__cluster.deallocate(clusterDir, self.__clusterStateInfo) + self.__opCode = 1 + raise Exception(e) + elif self.__opCode == 12: + self.__log.critical("Cluster %s already allocated." % clusterDir) + elif self.__opCode == 10: + self.__log.critical("dead\t%s\t%s" % (clusterInfo['jobid'], + clusterDir)) + elif self.__opCode == 13: + self.__log.warn("hdfs dead\t%s\t%s" % (clusterInfo['jobid'], + clusterDir)) + elif self.__opCode == 14: + self.__log.warn("mapred dead\t%s\t%s" % (clusterInfo['jobid'], + clusterDir)) + + if self.__opCode > 0 and self.__opCode != 15: + self.__log.critical("Cannot allocate cluster %s" % clusterDir) + else: + print self.__hodhelp.help(operation) + self.__log.critical("%s operation requires two arguments. " % operation + + "A cluster directory and a nodecount.") + self.__opCode = 3 + + def _is_cluster_allocated(self, clusterDir): + if os.path.isdir(clusterDir): + self.__setup_cluster_state(clusterDir) + clusterInfo = self.__clusterState.read() + if clusterInfo != {}: + return True + return False + + def _op_deallocate(self, args): + operation = "deallocate" + argLength = len(args) + if argLength == 2: + clusterDir = self.__norm_cluster_dir(args[1]) + if os.path.isdir(clusterDir): + self.__setup_cluster_state(clusterDir) + clusterInfo = self.__clusterState.read() + if clusterInfo == {}: + self.__handle_invalid_cluster_directory(clusterDir, cleanUp=True) + else: + self.__opCode = \ + self.__cluster.deallocate(clusterDir, clusterInfo) + # irrespective of whether deallocate failed or not\ + # remove the cluster state. + self.__clusterState.clear() + if not self.__userState.checkStateFile(CLUSTER_DATA_FILE, (os.W_OK,)): + self.__log.critical(INVALID_STATE_FILE_MSGS[3] % \ + self.__userState.get_state_file()) + self.__opCode = 1 + return + self.__remove_cluster(clusterDir) + else: + self.__handle_invalid_cluster_directory(clusterDir, cleanUp=True) + else: + print self.__hodhelp.help(operation) + self.__log.critical("%s operation requires one argument. " % operation + + "A cluster path.") + self.__opCode = 3 + + def _op_list(self, args): + operation = 'list' + clusterList = self.__userState.read(CLUSTER_DATA_FILE) + for path in clusterList.keys(): + if not os.path.isdir(path): + self.__log.info("cluster state unknown\t%s\t%s" % (clusterList[path], path)) + continue + self.__setup_cluster_state(path) + clusterInfo = self.__clusterState.read() + if clusterInfo == {}: + # something wrong with the cluster directory. + self.__log.info("cluster state unknown\t%s\t%s" % (clusterList[path], path)) + continue + clusterStatus = self.__cluster.check_cluster(clusterInfo) + if clusterStatus == 12: + self.__log.info("alive\t%s\t%s" % (clusterList[path], path)) + elif clusterStatus == 10: + self.__log.info("dead\t%s\t%s" % (clusterList[path], path)) + elif clusterStatus == 13: + self.__log.info("hdfs dead\t%s\t%s" % (clusterList[path], path)) + elif clusterStatus == 14: + self.__log.info("mapred dead\t%s\t%s" % (clusterList[path], path)) + + def _op_info(self, args): + operation = 'info' + argLength = len(args) + if argLength == 2: + clusterDir = self.__norm_cluster_dir(args[1]) + if os.path.isdir(clusterDir): + self.__setup_cluster_state(clusterDir) + clusterInfo = self.__clusterState.read() + if clusterInfo == {}: + # something wrong with the cluster directory. + self.__handle_invalid_cluster_directory(clusterDir) + else: + clusterStatus = self.__cluster.check_cluster(clusterInfo) + if clusterStatus == 12: + self.__print_cluster_info(clusterInfo) + self.__log.info("hadoop-site.xml at %s" % clusterDir) + elif clusterStatus == 10: + self.__log.critical("%s cluster is dead" % clusterDir) + elif clusterStatus == 13: + self.__log.warn("%s cluster hdfs is dead" % clusterDir) + elif clusterStatus == 14: + self.__log.warn("%s cluster mapred is dead" % clusterDir) + + if clusterStatus != 12: + if clusterStatus == 15: + self.__log.critical("Cluster %s not allocated." % clusterDir) + else: + self.__print_cluster_info(clusterInfo) + self.__log.info("hadoop-site.xml at %s" % clusterDir) + + self.__opCode = clusterStatus + else: + self.__handle_invalid_cluster_directory(clusterDir) + else: + print self.__hodhelp.help(operation) + self.__log.critical("%s operation requires one argument. " % operation + + "A cluster path.") + self.__opCode = 3 + + def __handle_invalid_cluster_directory(self, clusterDir, cleanUp=False): + if not self.__userState.checkStateFile(CLUSTER_DATA_FILE, (os.R_OK,)): + self.__log.critical(INVALID_STATE_FILE_MSGS[0] % \ + self.__userState.get_state_file()) + self.__opCode = 1 + return + + clusterList = self.__userState.read(CLUSTER_DATA_FILE) + if clusterDir in clusterList.keys(): + # previously allocated cluster. + self.__log.critical("Cannot find information for cluster with id '%s' in previously allocated cluster directory '%s'." % (clusterList[clusterDir], clusterDir)) + if cleanUp: + self.__cluster.delete_job(clusterList[clusterDir]) + self.__log.critical("Freeing resources allocated to the cluster.") + if not self.__userState.checkStateFile(CLUSTER_DATA_FILE, (os.W_OK,)): + self.__log.critical(INVALID_STATE_FILE_MSGS[1] % \ + self.__userState.get_state_file()) + self.__opCode = 1 + return + self.__remove_cluster(clusterDir) + self.__opCode = 3 + else: + if not os.path.exists(clusterDir): + self.__log.critical( \ + "Invalid hod.clusterdir(--hod.clusterdir or -d). " + \ + clusterDir + " : No such directory") + elif not os.path.isdir(clusterDir): + self.__log.critical( \ + "Invalid hod.clusterdir(--hod.clusterdir or -d). " + \ + clusterDir + " : Not a directory") + else: + self.__log.critical( \ + "Invalid hod.clusterdir(--hod.clusterdir or -d). " + \ + clusterDir + " : Not tied to any allocated cluster.") + self.__opCode = 15 + + def __print_cluster_info(self, clusterInfo): + keys = clusterInfo.keys() + + _dict = { + 'jobid' : 'Cluster Id', 'min' : 'Nodecount', + 'hdfs' : 'HDFS UI at' , 'mapred' : 'Mapred UI at' + } + + for key in _dict.keys(): + if clusterInfo.has_key(key): + self.__log.info("%s %s" % (_dict[key], clusterInfo[key])) + + if clusterInfo.has_key('ring'): + self.__log.debug("%s\t%s" % ('Ringmaster at ', clusterInfo['ring'])) + + if self.__cfg['hod']['debug'] == 4: + for var in clusterInfo['env'].keys(): + self.__log.debug("%s = %s" % (var, clusterInfo['env'][var])) + + def _op_help(self, arg): + if arg == None or arg.__len__() != 2: + print "hod commands:\n" + for op in self.__ops: + print self.__hodhelp.help(op) + else: + if arg[1] not in self.__ops: + print self.__hodhelp.help('help') + self.__log.critical("Help requested for invalid operation : %s"%arg[1]) + self.__opCode = 3 + else: print self.__hodhelp.help(arg[1]) + + def operation(self): + operation = self.__cfg['hod']['operation'] + try: + opList = self.__check_operation(operation) + if self.__opCode == 0: + if not self.__userState.checkStateFile(CLUSTER_DATA_FILE, (os.R_OK,)): + self.__log.critical(INVALID_STATE_FILE_MSGS[0] % \ + self.__userState.get_state_file()) + self.__opCode = 1 + return self.__opCode + getattr(self, "_op_%s" % opList[0])(opList) + except HodInterruptException, h: + self.__log.critical("op: %s failed because of a process interrupt." \ + % operation) + self.__opCode = HOD_INTERRUPTED_CODE + except: + self.__log.critical("op: %s failed: %s" % (operation, + get_exception_error_string())) + self.__log.debug(get_exception_string()) + + self.__cleanup() + + self.__log.debug("return code: %s" % self.__opCode) + + return self.__opCode + + def script(self): + errorFlag = False + errorMsgs = [] + scriptRet = 0 # return from the script, if run + + script = self.__cfg['hod']['script'] + nodes = self.__cfg['hod']['nodecount'] + clusterDir = self.__cfg['hod']['clusterdir'] + + if not os.path.exists(script): + errorFlag = True + errorMsgs.append("Invalid script file (--hod.script or -s) : " + \ + script + " : No such file") + elif not os.path.isfile(script): + errorFlag = True + errorMsgs.append("Invalid script file (--hod.script or -s) : " + \ + script + " : Not a file.") + else: + isExecutable = os.access(script, os.X_OK) + if not isExecutable: + errorFlag = True + errorMsgs.append("Invalid script file (--hod.script or -s) : " + \ + script + " : Not an executable.") + + if not os.path.exists(clusterDir): + try: + os.makedirs(clusterDir) + except OSError, err: + errorFlag = True + errorMsgs.append("Could not create cluster directory. %s" % (str(err))) + elif not os.path.isdir(clusterDir): + errorFlag = True + errorMsgs.append( \ + "Invalid cluster directory (--hod.clusterdir or -d) : " + \ + clusterDir + " : Not a directory") + + if int(self.__cfg['hod']['nodecount']) < 3 : + errorFlag = True + errorMsgs.append("Invalid nodecount (--hod.nodecount or -n) : " + \ + "Must be >= 3. Given nodes: %s" % nodes) + + if errorFlag: + for msg in errorMsgs: + self.__log.critical(msg) + self.handle_script_exit_code(scriptRet, clusterDir) + sys.exit(3) + + try: + self._op_allocate(('allocate', clusterDir, str(nodes))) + if self.__opCode == 0: + if self.__cfg['hod'].has_key('script-wait-time'): + time.sleep(self.__cfg['hod']['script-wait-time']) + self.__log.debug('Slept for %d time. Now going to run the script' % self.__cfg['hod']['script-wait-time']) + if hodInterrupt.isSet(): + self.__log.debug('Hod interrupted - not executing script') + else: + scriptRunner = hadoopScript(clusterDir, + self.__cfg['hod']['original-dir']) + self.__opCode = scriptRunner.run(script) + scriptRet = self.__opCode + self.__log.info("Exit code from running the script: %d" % self.__opCode) + else: + self.__log.critical("Error %d in allocating the cluster. Cannot run the script." % self.__opCode) + + if hodInterrupt.isSet(): + # Got interrupt while executing script. Unsetting it for deallocating + hodInterrupt.setFlag(False) + if self._is_cluster_allocated(clusterDir): + self._op_deallocate(('deallocate', clusterDir)) + except HodInterruptException, h: + self.__log.critical("Script failed because of a process interrupt.") + self.__opCode = HOD_INTERRUPTED_CODE + except: + self.__log.critical("script: %s failed: %s" % (script, + get_exception_error_string())) + self.__log.debug(get_exception_string()) + + self.__cleanup() + + self.handle_script_exit_code(scriptRet, clusterDir) + + return self.__opCode + + def handle_script_exit_code(self, scriptRet, clusterDir): + # We want to give importance to a failed script's exit code, and write out exit code to a file separately + # so users can easily get it if required. This way they can differentiate between the script's exit code + # and hod's exit code. + if os.path.exists(clusterDir): + exit_code_file_name = (os.path.join(clusterDir, 'script.exitcode')) + if scriptRet != 0: + exit_code_file = open(exit_code_file_name, 'w') + print >>exit_code_file, scriptRet + exit_code_file.close() + self.__opCode = scriptRet + else: + #ensure script exit code file is not there: + if (os.path.exists(exit_code_file_name)): + os.remove(exit_code_file_name) + +class hodHelp: + def __init__(self): + self.ops = ['allocate', 'deallocate', 'info', 'list','script', 'help'] + + self.usage_strings = \ + { + 'allocate' : 'hod allocate -d -n [OPTIONS]', + 'deallocate' : 'hod deallocate -d [OPTIONS]', + 'list' : 'hod list [OPTIONS]', + 'info' : 'hod info -d [OPTIONS]', + 'script' : + 'hod script -d -n -s + + + +Hadoop +

$title

+ +__HTML_HEADER__ + +my $heading; +my $relcnt = 0; +my $header = 'h2'; +for my $rel (@releases) { + if (++$relcnt == 3) { + $header = 'h3'; + print "

"; + print "Older Releases"; + print "

\n"; + print "
    \n" + } + + ($release, $sections) = @$rel; + + # The first section heading is undefined for the older sectionless releases + my $has_release_sections = $sections->[0][0]; + + (my $relid = lc($release)) =~ s/\s+/_/g; + print "<$header>"; + print "$release"; + print "\n"; + print "
      \n" + if ($has_release_sections); + + for my $section (@$sections) { + ($heading, $items) = @$section; + (my $sectid = lc($heading)) =~ s/\s+/_/g; + my $numItemsStr = $#{$items} > 0 ? "($#{$items})" : "(none)"; + + print "
    • ", + ($heading || ''), "   $numItemsStr\n" + if ($has_release_sections); + + my $list_type = $items->[0] || ''; + my $list = ($has_release_sections || $list_type eq 'numbered' ? 'ol' : 'ul'); + my $listid = $sectid ? "$relid.$sectid" : $relid; + print " <$list id=\"$listid\">\n"; + + for my $itemnum (1..$#{$items}) { + my $item = $items->[$itemnum]; + $item =~ s:&:&:g; # Escape HTML metachars + $item =~ s:<:<:g; + $item =~ s:>:>:g; + + $item =~ s:\s*(\([^)"]+?\))\s*$:
      $1:; # Separate attribution + $item =~ s:\n{2,}:\n

      \n:g; # Keep paragraph breaks + $item =~ s{(?:${jira_url_prefix})?(HADOOP-\d+)} # Link to JIRA + {$1}g; + print "

    • $item
    • \n"; + } + print " \n"; + print " \n" if ($has_release_sections); + } + print "
    \n" if ($has_release_sections); +} +print "
\n" if ($relcnt > 3); +print "\n\n"; + + +# +# Subroutine: get_list_type +# +# Takes one parameter: +# +# - The first line of a sub-section/point +# +# Returns one scalar: +# +# - The list type: 'numbered'; or one of the bulleted types '-', or '.' or +# 'paragraph'. +# +sub get_list_type { + my $first_list_item_line = shift; + my $type = 'paragraph'; # Default to paragraph type + + if ($first_list_item_line =~ /^\s{0,2}\d+\.\s+\S+/) { + $type = 'numbered'; + } elsif ($first_list_item_line =~ /^\s*([-.])\s+\S+/) { + $type = $1; + } + return $type; +} + +1; diff --git a/src/docs/forrest.properties b/src/docs/forrest.properties new file mode 100644 index 0000000..a814b3e --- /dev/null +++ b/src/docs/forrest.properties @@ -0,0 +1,106 @@ +# Copyright 2002-2004 The Apache Software Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +############## +# Properties used by forrest.build.xml for building the website +# These are the defaults, un-comment them if you need to change them. +############## + +# Prints out a summary of Forrest settings for this project +#forrest.echo=true + +# Project name (used to name .war file) +#project.name=my-project + +# Specifies name of Forrest skin to use +#project.skin=tigris +#project.skin=pelt + +# comma separated list, file:// is supported +#forrest.skins.descriptors=http://forrest.apache.org/skins/skins.xml,file:///c:/myskins/skins.xml + +############## +# behavioural properties +#project.menu-scheme=tab_attributes +#project.menu-scheme=directories + +############## +# layout properties + +# Properties that can be set to override the default locations +# +# Parent properties must be set. This usually means uncommenting +# project.content-dir if any other property using it is uncommented + +#project.status=status.xml +#project.content-dir=src/documentation +#project.raw-content-dir=${project.content-dir}/content +#project.conf-dir=${project.content-dir}/conf +#project.sitemap-dir=${project.content-dir} +#project.xdocs-dir=${project.content-dir}/content/xdocs +#project.resources-dir=${project.content-dir}/resources +#project.stylesheets-dir=${project.resources-dir}/stylesheets +#project.images-dir=${project.resources-dir}/images +#project.schema-dir=${project.resources-dir}/schema +#project.skins-dir=${project.content-dir}/skins +#project.skinconf=${project.content-dir}/skinconf.xml +#project.lib-dir=${project.content-dir}/lib +#project.classes-dir=${project.content-dir}/classes +#project.translations-dir=${project.content-dir}/translations + +############## +# validation properties + +# This set of properties determine if validation is performed +# Values are inherited unless overridden. +# e.g. if forrest.validate=false then all others are false unless set to true. +#forrest.validate=true +#forrest.validate.xdocs=${forrest.validate} +#forrest.validate.skinconf=${forrest.validate} +#forrest.validate.sitemap=${forrest.validate} +#forrest.validate.stylesheets=${forrest.validate} +#forrest.validate.skins=${forrest.validate} +#forrest.validate.skins.stylesheets=${forrest.validate.skins} + +# *.failonerror=(true|false) - stop when an XML file is invalid +#forrest.validate.failonerror=true + +# *.excludes=(pattern) - comma-separated list of path patterns to not validate +# e.g. +#forrest.validate.xdocs.excludes=samples/subdir/**, samples/faq.xml +#forrest.validate.xdocs.excludes= + + +############## +# General Forrest properties + +# The URL to start crawling from +#project.start-uri=linkmap.html +# Set logging level for messages printed to the console +# (DEBUG, INFO, WARN, ERROR, FATAL_ERROR) +#project.debuglevel=ERROR +# Max memory to allocate to Java +#forrest.maxmemory=64m +# Any other arguments to pass to the JVM. For example, to run on an X-less +# server, set to -Djava.awt.headless=true +#forrest.jvmargs= +# The bugtracking URL - the issue number will be appended +#project.bugtracking-url=http://issues.apache.org/bugzilla/show_bug.cgi?id= +#project.bugtracking-url=http://issues.apache.org/jira/browse/ +# The issues list as rss +#project.issues-rss-url= +#I18n Property only works for the "forrest run" target. +#project.i18n=true +project.configfile=${project.home}/src/documentation/conf/cli.xconf + diff --git a/src/docs/releasenotes.html b/src/docs/releasenotes.html new file mode 100644 index 0000000..7783bb4 --- /dev/null +++ b/src/docs/releasenotes.html @@ -0,0 +1,348 @@ + + + + +Hadoop 0.20.1 Release Notes + + + +

Hadoop 0.20.1 Release Notes

+ These release notes include new developer and user-facing incompatibilities, features, and major improvements. The table below is sorted by Component. + + +

Changes Since Hadoop 0.20.0

+ +

Common

+ +

Sub-task +

+
    +
  • [HADOOP-6213] - Remove commons dependency on commons-cli2 +
  • +
+ +

Bug +

+
    +
  • [HADOOP-4626] - API link in forrest doc should point to the same version of hadoop. +
  • +
  • [HADOOP-4674] - hadoop fs -help should list detailed help info for the following commands: test, text, tail, stat & touchz +
  • +
  • [HADOOP-4856] - Document JobInitializationPoller configuration in capacity scheduler forrest documentation. +
  • +
  • [HADOOP-4931] - Document TaskTracker's memory management functionality and CapacityScheduler's memory based scheduling. +
  • +
  • [HADOOP-5210] - Reduce Task Progress shows > 100% when the total size of map outputs (for a single reducer) is high +
  • +
  • [HADOOP-5213] - BZip2CompressionOutputStream NullPointerException +
  • +
  • [HADOOP-5349] - When the size required for a path is -1, LocalDirAllocator.getLocalPathForWrite fails with a DiskCheckerException when the disk it selects is bad. +
  • +
  • [HADOOP-5533] - Recovery duration shown on the jobtracker webpage is inaccurate +
  • +
  • [HADOOP-5539] - o.a.h.mapred.Merger not maintaining map out compression on intermediate files +
  • +
  • [HADOOP-5636] - Job is left in Running state after a killJob +
  • +
  • [HADOOP-5641] - Possible NPE in CapacityScheduler's MemoryMatcher +
  • +
  • [HADOOP-5646] - TestQueueCapacities is failing Hudson tests for the last few builds +
  • +
  • [HADOOP-5648] - Not able to generate gridmix.jar on already compiled version of hadoop +
  • +
  • [HADOOP-5654] - TestReplicationPolicy.<init> fails on java.net.BindException +
  • +
  • [HADOOP-5655] - TestMRServerPorts fails on java.net.BindException +
  • +
  • [HADOOP-5688] - HftpFileSystem.getChecksum(..) does not work for the paths with scheme and authority +
  • +
  • [HADOOP-5691] - org.apache.hadoop.mapreduce.Reducer should not be abstract. +
  • +
  • [HADOOP-5711] - Change Namenode file close log to info +
  • +
  • [HADOOP-5718] - Capacity Scheduler should not check for presence of default queue while starting up. +
  • +
  • [HADOOP-5719] - Jobs failed during job initalization are never removed from Capacity Schedulers waiting list +
  • +
  • [HADOOP-5736] - Update CapacityScheduler documentation to reflect latest changes +
  • +
  • [HADOOP-5746] - Errors encountered in MROutputThread after the last map/reduce call can go undetected +
  • +
  • [HADOOP-5796] - DFS Write pipeline does not detect defective datanode correctly in some cases (HADOOP-3339) +
  • +
  • [HADOOP-5828] - Use absolute path for JobTracker's mapred.local.dir in MiniMRCluster +
  • +
  • [HADOOP-5850] - map/reduce doesn't run jobs with 0 maps +
  • +
  • [HADOOP-5863] - mapred metrics shows negative count of waiting maps and reduces +
  • +
  • [HADOOP-5869] - TestQueueCapacitisues.apache.org/jjira/browse/HADOOP-OP-6017] - NameNode and SecondaryNameNode fail to restart because of abnormal filenames. +
  • +
  • [HADOOP-6097] - Multiple bugs w/ Hadoop archives +
  • +
  • [HADOOP-6139] - Incomplete help message is displayed for rm and rmr options. +
  • +
  • [HADOOP-6141] - hadoop 0.20 branch "test-patch" is broken +
  • +
  • [HADOOP-6145] - No error message for deleting non-existant file or directory. +
  • +
  • [HADOOP-6215] - fix GenericOptionParser to deal with -D with '=' in the value +
  • +
+ +

Improvement +

+
    +
  • [HADOOP-5726] - Remove pre-emption from the capacity scheduler code base +
  • +
+ +

New Feature +

+
    +
  • [HADOOP-3315] - New binary file format +
  • +
  • [HADOOP-5714] - Metric to show number of fs.exists (or number of getFileInfo) calls +
  • +
  • [HADOOP-6080] - Handling of Trash with quota +
  • +
+ +

HDFS

+ +

Bug +

+
    +
  • [HDFS-26] - HADOOP-5862 for version .20 (Namespace quota exceeded message unclear) +
  • +
  • [HDFS-167] - DFSClient continues to retry indefinitely +
  • +
  • [HDFS-438] - Improve help message for quotas +
  • +
  • [HDFS-442] - dfsthroughput in test.jar throws NPE +
  • +
  • [HDFS-485] - error : too many fetch failures +
  • +
  • [HDFS-495] - Hadoop FSNamesystem startFileInternal() getLease() has bug +
  • +
  • [HDFS-525] - ListPathsServlet.java uses static SimpleDateFormat that has threading issues +
  • +
+ +

Improvement +

+
    +
  • [HDFS-504] - HDFS updates the modification time of a file when the file is closed. +
  • +
  • [HDFS-527] - Refactor DFSClient constructors +
  • +
+ +

Map/Reduce

+ +

Bug +

+
    +
  • [MAPREDUCE-2] - ArrayOutOfIndex error in KeyFieldBasedPartitioner on empty key +
  • +
  • [MAPREDUCE-18] - Under load the shuffle sometimes gets incorrect data +
  • +
  • [MAPREDUCE-40] - Memory management variables need a backwards compatibility option after HADOOP-5881 +
  • +
  • [MAPREDUCE-112] - Reduce Input Records and Reduce Output Records counters are not being set when using the new Mapreduce reducer API +
  • +
  • [MAPREDUCE-124] - When abortTask of OutputCommitter fails with an Exception for a map-only job, the task is marked as success +
  • +
  • [MAPREDUCE-130] - Delete the jobconf copy from the log directory of the JobTracker when the job is retired +
  • +
  • [MAPREDUCE-179] - setProgress not called for new RecordReaders +
  • +
  • [MAPREDUCE-383] - pipes combiner does not reset properly after a spill +
  • +
  • [MAPREDUCE-421] - mapred pipes might return exit code 0 even when failing +
  • +
  • [MAPREDUCE-430] - Task stuck in cleanup with OutOfMemoryErrors +
  • +
  • [MAPREDUCE-565] - Partitioner does not work with new API +
  • +
  • [MAPREDUCE-657] - CompletedJobStatusStore hardcodes filesystem to hdfs +
  • +
  • [MAPREDUCE-687] - TestMiniMRMapRedDebugScript fails sometimes +
  • +
  • [MAPREDUCE-735] - ArrayIndexOutOfBoundsException is thrown by KeyFieldBasedPartitioner +
  • +
  • [MAPREDUCE-745] - TestRecoveryManager fails sometimes +
  • +
  • [MAPREDUCE-796] - Encountered "ClassCastException" on tasktracker while running wordcount with MultithreadedMapRunner +
  • +
  • [MAPREDUCE-805] - Deadlock in Jobtracker +
  • +
  • [MAPREDUCE-806] - WordCount example does not compile given the current instructions +
  • +
  • [MAPREDUCE-807] - Stray user files in mapred.system.dir with permissions other than 777 can prevent the jobtracker from starting up. +
  • +
  • [MAPREDUCE-818] - org.apache.hadoop.mapreduce.Counters.getGroup returns null if the group name doesnt exist. +
  • +
  • [MAPREDUCE-827] - "hadoop job -status <jobid>" command should display job's completion status also. +
  • +
  • [MAPREDUCE-832] - Too many WARN messages about deprecated memorty config variables in JobTacker log +
  • +
  • [MAPREDUCE-834] - When TaskTracker config use old memory management values its memory monitoring is diabled. +
  • +
  • [MAPREDUCE-838] - Task succeeds even when committer.commitTask fails with IOException +
  • +
  • [MAPREDUCE-911] - TestTaskFail fail sometimes +
  • +
  • [MAPREDUCE-924] - TestPipes crashes on trunk +
  • +
+ +

Improvement +

+
    +
  • [MAPREDUCE-465] - Deprecate org.apache.hadoop.mapred.lib.MultithreadedMapRunner +
  • +
  • [MAPREDUCE-487] - DBInputFormat support for Oracle +
  • +
  • [MAPREDUCE-767] - to remove mapreduce dependency on commons-cli2 +
  • +
+ +

Changes Since Hadoop 0.19.1

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
IssueComponentNotes
HADOOP-3344buildChanged build procedure for libhdfs to build correctly for different platforms. Build instructions are in the Jira item.
HADOOP-4253confRemoved from class org.apache.hadoop.fs.RawLocalFileSystem deprecated methods public String getName(), public void lock(Path p, boolean shared) and public void release(Path p).
HADOOP-4454confChanged processing of conf/slaves file to allow # to begin a comment.
HADOOP-4631confSplit hadoop-default.xml into core-default.xml, hdfs-default.xml and mapreduce-default.xml.
HADOOP-4035contrib/capacity-schedChanged capacity scheduler policy to take note of task memory requirements and task tracker memory availability.
HADOOP-4445contrib/capacity-schedChanged JobTracker UI to better present the number of active tasks.
HADOOP-4576contrib/capacity-schedChanged capacity scheduler UI to better present number of running and pending tasks.
HADOOP-4179contrib/chukwaIntroduced Vaidya rule based performance diagnostic tool for Map/Reduce jobs.
HADOOP-4827contrib/chukwaImproved framework for data aggregation in Chuckwa.
HADOOP-4843contrib/chukwaIntroduced Chuckwa collection of job history.
HADOOP-5030contrib/chukwaChanged RPM install location to the value specified by build.properties file.
HADOOP-5531contrib/chukwaDisabled Chukwa unit tests for 0.20 branch only.
HADOOP-4789contrib/fair-shareChanged fair scheduler to divide resources equally between pools, not jobs.
HADOOP-4873contrib/fair-shareChanged fair scheduler UI to display minMaps and minReduces variables.
HADOOP-3750dfsRemoved deprecated method parseArgs from org.apache.hadoop.fs.FileSystem.
HADOOP-4029dfsAdded name node storage information to the dfshealth page, and moved data node information to a separated page.
HADOOP-4103dfsModified dfsadmin -report to report under replicated blocks. blocks with corrupt replicas, and missing blocks".
HADOOP-4567dfsChanged GetFileBlockLocations to return topology information for nodes that host the block replicas.
HADOOP-4572dfsMoved org.apache.hadoop.hdfs.{CreateEditsLog, NNThroughputBenchmark} to org.apache.hadoop.hdfs.server.namenode.
HADOOP-4618dfsMoved HTTP server from FSNameSystem to NameNode. Removed FSNamesystem.getNameNodeInfoPort(). Replaced FSNamesystem.getDFSNameNodeMachine() and FSNamesystem.getDFSNameNodePort() with new method FSNamesystem.getDFSNameNodeAddress(). Removed constructor NameNode(bindAddress, conf).
HADOOP-4826dfsIntroduced new dfsadmin command saveNamespace to command the name service to do an immediate save of the file system image.
HADOOP-4970dfsChanged trash facility to use absolute path of the deleted file.
HADOOP-5468documentationReformatted HTML documentation for Hadoop to use submenus at the left column.
HADOOP-3497fsChanged the semantics of file globbing with a PathFilter (using the globStatus method of FileSystem). Previously, the filtering was too restrictive, so that a glob of /*/* and a filter that only accepts /a/b would not have matched /a/b. With this change /a/b does match.
HADOOP-4234fsChanged KFS glue layer to allow applications to interface with multiple KFS metaservers.
HADOOP-4422fs/s3Modified Hadoop file system to no longer create S3 buckets. Applications can create buckets for their S3 file systems by other means, for example, using the JetS3t API.
HADOOP-3063ioIntroduced BloomMapFile subclass of MapFile that creates a Bloom filter from all keys.
HADOOP-1230mapredReplaced parameters with context obejcts in Mapper, Reducer, Partitioner, InputFormat, and OutputFormat classes.
HADOOP-1650mapredUpgraded all core servers to use Jetty 6
HADOOP-3923mapredMoved class org.apache.hadoop.mapred.StatusHttpServer to org.apache.hadoop.http.HttpServer.
HADOOP-3986mapredRemoved classes org.apache.hadoop.mapred.JobShell and org.apache.hadoop.mapred.TestJobShell. Removed from JobClient methods static void setCommandLineConfig(Configuration conf) and public static Configuration getCommandLineConfig().
HADOOP-4188mapredRemoved Task's dependency on concrete file systems by taking list from FileSystem class. Added statistics table to FileSystem class. Deprecated FileSystem method getStatistics(Class<? extends FileSystem> cls).
HADOOP-4210mapredChanged public class org.apache.hadoop.mapreduce.ID to be an abstract class. Removed from class org.apache.hadoop.mapreduce.ID the methods public static ID read(DataInput in) and public static ID forName(String str).
HADOOP-4305mapredImproved TaskTracker blacklisting strategy to better exclude faulty tracker from executing tasks.
HADOOP-4435mapredChanged JobTracker web status page to display the amount of heap memory in use. This changes the JobSubmissionProtocol.
HADOOP-4565mapredImproved MultiFileInputFormat so that multiple blocks from the same node or same rack can be combined into a single split.
HADOOP-4749mapredAdded a new counter REDUCE_INPUT_BYTES.
HADOOP-4783mapredChanged history directory permissions to 750 and history file permissions to 740.
HADOOP-3422metricsChanged names of ganglia metrics to avoid conflicts and to better identify source function.
HADOOP-4284securityIntroduced HttpServer method to support global filters.
HADOOP-4575securityIntroduced independent HSFTP proxy server for authenticated access to clusters.
HADOOP-4661tools/distcpIntroduced distch tool for parallel ch{mod, own, grp}.
+ + diff --git a/src/docs/src/documentation/README.txt b/src/docs/src/documentation/README.txt new file mode 100644 index 0000000..9bc261b --- /dev/null +++ b/src/docs/src/documentation/README.txt @@ -0,0 +1,7 @@ +This is the base documentation directory. + +skinconf.xml # This file customizes Forrest for your project. In it, you + # tell forrest the project name, logo, copyright info, etc + +sitemap.xmap # Optional. This sitemap is consulted before all core sitemaps. + # See http://forrest.apache.org/docs/project-sitemap.html diff --git a/src/docs/src/documentation/classes/CatalogManager.properties b/src/docs/src/documentation/classes/CatalogManager.properties new file mode 100644 index 0000000..ac060b9 --- /dev/null +++ b/src/docs/src/documentation/classes/CatalogManager.properties @@ -0,0 +1,37 @@ +# Copyright 2002-2004 The Apache Software Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +#======================================================================= +# CatalogManager.properties +# +# This is the default properties file for Apache Forrest. +# This facilitates local configuration of application-specific catalogs. +# +# See the Apache Forrest documentation: +# http://forrest.apache.org/docs/your-project.html +# http://forrest.apache.org/docs/validation.html + +# verbosity ... level of messages for status/debug +# See forrest/src/core/context/WEB-INF/cocoon.xconf + +# catalogs ... list of additional catalogs to load +# (Note that Apache Forrest will automatically load its own default catalog +# from src/core/context/resources/schema/catalog.xcat) +# use full pathnames +# pathname separator is always semi-colon (;) regardless of operating system +# directory separator is always slash (/) regardless of operating system +# +#catalogs=/home/me/forrest/my-site/src/documentation/resources/schema/catalog.xcat +catalogs= + diff --git a/src/docs/src/documentation/conf/cli.xconf b/src/docs/src/documentation/conf/cli.xconf new file mode 100644 index 0000000..5c6e245 --- /dev/null +++ b/src/docs/src/documentation/conf/cli.xconf @@ -0,0 +1,327 @@ + + + + + + + + . + WEB-INF/cocoon.xconf + ../tmp/cocoon-work + ../site + + + + + + + + + + + + + + + index.html + + + + + + + */* + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/src/docs/src/documentation/content/xdocs/SLG_user_guide.xml b/src/docs/src/documentation/content/xdocs/SLG_user_guide.xml new file mode 100644 index 0000000..96ac622 --- /dev/null +++ b/src/docs/src/documentation/content/xdocs/SLG_user_guide.xml @@ -0,0 +1,192 @@ + + + + +
+ HDFS Synthetic Load Generator Guide +
+ +
+ Description +

+ The synthetic load generator (SLG) is a tool for testing NameNode behavior + under different client loads. The user can generate different mixes + of read, write, and list requests by specifying the probabilities of + read and write. The user controls the intensity of the load by adjusting + parameters for the number of worker threads and the delay between + operations. While load generators are running, the user can profile and + monitor the running of the NameNode. When a load generator exits, it + prints some NameNode statistics like the average execution time of each + kind of operation and the NameNode throughput. +

+
+
+ Synopsis +

+ java LoadGenerator [options]
+

+

+ Options include:
+   -readProbability <read probability>
+     the probability of the read operation; + default is 0.3333.
+   -writeProbability <write probability>
+     the probability of the write + operations; default is 0.3333.
+   -root <test space root>
+     the root of the test space; + default is /testLoadSpace.
+   -maxDelayBetweenOps + <maxDelayBetweenOpsInMillis>
+     the maximum delay between two consecutive + operations in a thread; default is 0 indicating no delay. +
+   -numOfThreads <numOfThreads>
+     the number of threads to spawn; + default is 200.
+   -elapsedTime <elapsedTimeInSecs>
+     the number of seconds that the program + will run; A value of zero indicates that the program runs + forever. The default value is 0.
+   -startTime <startTimeInMillis>
+     the time that all worker threads + start to run. By default it is 10 seconds after the main + program starts running.This creates a barrier if more than + one load generator is running. +
+   -seed <seed>
+     the random generator seed for repeating + requests to NameNode when running with a single thread; + default is the current time.
+

+

+ After command line argument parsing, the load generator traverses + the test space and builds a table of all directories and another table + of all files in the test space. It then waits until the start time to + spawn the number of worker threads as specified by the user. Each + thread sends a stream of requests to NameNode. At each iteration, + it first decides if it is going to read a file, create a file, or + list a directory following the read and write probabilities specified + by the user. The listing probability is equal to + 1-read probability-write probability. When reading, + it randomly picks a file in the test space and reads the entire file. + When writing, it randomly picks a directory in the test space and + creates a file there. To avoid two threads with the same load + generator or from two different load generators create the same + file, the file name consists of the current machine's host name + and the thread id. The length of the file follows Gaussian + distribution with an average size of 2 blocks and the standard + deviation of 1. The new file is filled with byte 'a'. To avoid + the test space to grow indefinitely, the file is deleted immediately + after the file creation completes. While listing, it randomly + picks a directory in the test space and lists its content. + After an operation completes, the thread pauses for a random + amount of time in the range of [0, maxDelayBetweenOps] if the + specified maximum delay is not zero. All threads are stopped when + the specified elapsed time is passed. Before exiting, the program + prints the average execution for each kind of NameNode operations, + and the number of requests served by the NameNode per second. +

+
+
+ Test Space Population +

+ The user needs to populate a test space before she runs a + load generator. The structure generator generates a random + test space structure and the data generator creates the files + and directories of the test space in Hadoop distributed file system. +

+
+ Structure Generator +

+ This tool generates a random namespace structure with the + following constraints: +

+
    +
  1. The number of subdirectories that a directory can have is + a random number in [minWidth, maxWidth].
  2. +
  3. The maximum depth of each subdirectory is a random number + [2*maxDepth/3, maxDepth].
  4. +
  5. Files are randomly placed in leaf directories. The size of + each file follows Gaussian distribution with an average size + of 1 block and a standard deviation of 1.
  6. +
+

+ The generated namespace structure is described by two files in + the output directory. Each line of the first file contains the + full name of a leaf directory. Each line of the second file + contains the full name of a file and its size, separated by a blank. +

+

+ The synopsis of the command is +

+

+ java StructureGenerator [options] +

+

+ Options include:
+   -maxDepth <maxDepth>
+     maximum depth of the directory tree; + default is 5.
+   -minWidth <minWidth>
+     minimum number of subdirectories per + directories; default is 1.
+   -maxWidth <maxWidth>
+     maximum number of subdirectories per + directories; default is 5.
+   -numOfFiles <#OfFiles>
+     the total number of files in the test + space; default is 10.
+   -avgFileSize <avgFileSizeInBlocks>
+     average size of blocks; default is 1. +
+   -outDir <outDir>
+     output directory; default is the + current directory.
+   -seed <seed>
+     random number generator seed; + default is the current time.
+

+
+
+ Test Space Generator +

+ This tool reads the directory structure and file structure from + the input directory and creates the namespace in Hadoop distributed + file system. All files are filled with byte 'a'. +

+

+ The synopsis of the command is +

+

+ java DataGenerator [options] +

+

+ Options include:
+   -inDir <inDir>
+     input directory name where directory/file + structures are stored; default is the current directory. +
+   -root <test space root>
+     the name of the root directory which the + new namespace is going to be placed under; + default is "/testLoadSpace".
+

+
+
+ +
diff --git a/src/docs/src/documentation/content/xdocs/capacity_scheduler.xml b/src/docs/src/documentation/content/xdocs/capacity_scheduler.xml new file mode 100644 index 0000000..f3a356f --- /dev/null +++ b/src/docs/src/documentation/content/xdocs/capacity_scheduler.xml @@ -0,0 +1,379 @@ + + + + + + + +
+ Capacity Scheduler Guide +
+ + + +
+ Purpose + +

This document describes the Capacity Scheduler, a pluggable + Map/Reduce scheduler for Hadoop which provides a way to share + large clusters.

+
+ +
+ Features + +

The Capacity Scheduler supports the following features:

+
    +
  • + Support for multiple queues, where a job is submitted to a queue. +
  • +
  • + Queues are allocated a fraction of the capacity of the grid in the + sense that a certain capacity of resources will be at their + disposal. All jobs submitted to a queue will have access to the + capacity allocated to the queue. +
  • +
  • + Free resources can be allocated to any queue beyond it's capacity. + When there is demand for these resources from queues running below + capacity at a future point in time, as tasks scheduled on these + resources complete, they will be assigned to jobs on queues + running below the capacity. +
  • +
  • + Queues optionally support job priorities (disabled by default). +
  • +
  • + Within a queue, jobs with higher priority will have access to the + queue's resources before jobs with lower priority. However, once a + job is running, it will not be preempted for a higher priority job, + though new tasks from the higher priority job will be + preferentially scheduled. +
  • +
  • + In order to prevent one or more users from monopolizing its + resources, each queue enforces a limit on the percentage of + resources allocated to a user at any given time, if there is + competition for them. +
  • +
  • + Support for memory-intensive jobs, wherein a job can optionally + specify higher memory-requirements than the default, and the tasks + of the job will only be run on TaskTrackers that have enough memory + to spare. +
  • +
+
+ +
+ Picking a task to run + +

Note that many of these steps can be, and will be, enhanced over time + to provide better algorithms.

+ +

Whenever a TaskTracker is free, the Capacity Scheduler picks + a queue which has most free space (whose ratio of # of running slots to + capacity is the lowest).

+ +

Once a queue is selected, the Scheduler picks a job in the queue. Jobs + are sorted based on when they're submitted and their priorities (if the + queue supports priorities). Jobs are considered in order, and a job is + selected if its user is within the user-quota for the queue, i.e., the + user is not already using queue resources above his/her limit. The + Scheduler also makes sure that there is enough free memory in the + TaskTracker to tun the job's task, in case the job has special memory + requirements.

+ +

Once a job is selected, the Scheduler picks a task to run. This logic + to pick a task remains unchanged from earlier versions.

+ +
+ +
+ Installation + +

The Capacity Scheduler is available as a JAR file in the Hadoop + tarball under the contrib/capacity-scheduler directory. The name of + the JAR file would be on the lines of hadoop-*-capacity-scheduler.jar.

+

You can also build the Scheduler from source by executing + ant package, in which case it would be available under + build/contrib/capacity-scheduler.

+

To run the Capacity Scheduler in your Hadoop installation, you need + to put it on the CLASSPATH. The easiest way is to copy the + hadoop-*-capacity-scheduler.jar from + to HADOOP_HOME/lib. Alternatively, you can modify + HADOOP_CLASSPATH to include this jar, in + conf/hadoop-env.sh.

+
+ +
+ Configuration + +
+ Using the Capacity Scheduler +

+ To make the Hadoop framework use the Capacity Scheduler, set up + the following property in the site configuration:

+ + + + + + + + + +
PropertyValue
mapred.jobtracker.taskSchedulerorg.apache.hadoop.mapred.CapacityTaskScheduler
+
+ +
+ Setting up queues +

+ You can define multiple queues to which users can submit jobs with + the Capacity Scheduler. To define multiple queues, you should edit + the site configuration for Hadoop and modify the + mapred.queue.names property. +

+

+ You can also configure ACLs for controlling which users or groups + have access to the queues. +

+

+ For more details, refer to + Cluster + Setup documentation. +

+
+ +
+ Configuring properties for queues + +

The Capacity Scheduler can be configured with several properties + for each queue that control the behavior of the Scheduler. This + configuration is in the conf/capacity-scheduler.xml. By + default, the configuration is set up for one queue, named + default.

+

To specify a property for a queue that is defined in the site + configuration, you should use the property name as + mapred.capacity-scheduler.queue.<queue-name>.<property-name>. +

+

For example, to define the property capacity + for queue named research, you should specify the property + name as + mapred.capacity-scheduler.queue.research.capacity. +

+ +

The properties defined for queues and their descriptions are + listed in the table below:

+ + + + + + + + + + + + + + + +
NameDescription
mapred.capacity-scheduler.queue.<queue-name>.capacityPercentage of the number of slots in the cluster that are made + to be available for jobs in this queue. The sum of capacities + for all queues should be less than or equal 100.
mapred.capacity-scheduler.queue.<queue-name>.supports-priorityIf true, priorities of jobs will be taken into account in scheduling + decisions.
mapred.capacity-scheduler.queue.<queue-name>.minimum-user-limit-percentEach queue enforces a limit on the percentage of resources + allocated to a user at any given time, if there is competition + for them. This user limit can vary between a minimum and maximum + value. The former depends on the number of users who have submitted + jobs, and the latter is set to this property value. For example, + suppose the value of this property is 25. If two users have + submitted jobs to a queue, no single user can use more than 50% + of the queue resources. If a third user submits a job, no single + user can use more than 33% of the queue resources. With 4 or more + users, no user can use more than 25% of the queue's resources. A + value of 100 implies no user limits are imposed.
mapred.capacity-scheduler.queue.<queue-name>.maximum-capacity + maximum-capacity defines a limit beyond which a queue cannot + use the capacity of the cluster.This provides a means to limit + how much excess capacity a queue can use. By default, there + is no limit. + The maximum-capacity of a queue can only be greater than or + equal to its minimum capacity. + Default value of -1 implies a queue can use complete capacity + of the cluster. + + This property could be to curtail certain jobs which are long + running in nature from occupying more than a certain + percentage of the cluster, which in the absence of + pre-emption, could lead to capacity guarantees of other queues + being affected. + + One important thing to note is that maximum-capacity is a + percentage , so based on the cluster's capacity + it would change. So if large no of nodes or racks get added + to the cluster , maximum Capacity in + absolute terms would increase accordingly. +
+
+ +
+ Memory management + +

The Capacity Scheduler supports scheduling of tasks on a + TaskTracker(TT) based on a job's memory requirements + and the availability of RAM and Virtual Memory (VMEM) on the TT node. + See the Hadoop + Map/Reduce tutorial for details on how the TT monitors + memory usage.

+

Currently the memory based scheduling is only supported + in Linux platform.

+

Memory-based scheduling works as follows:

+
    +
  1. The absence of any one or more of three config parameters + or -1 being set as value of any of the parameters, + mapred.tasktracker.vmem.reserved, + mapred.task.default.maxvmem, or + mapred.task.limit.maxvmem, disables memory-based + scheduling, just as it disables memory monitoring for a TT. These + config parameters are described in the + Hadoop Map/Reduce + tutorial. The value of + mapred.tasktracker.vmem.reserved is + obtained from the TT via its heartbeat. +
  2. +
  3. If all the three mandatory parameters are set, the Scheduler + enables VMEM-based scheduling. First, the Scheduler computes the free + VMEM on the TT. This is the difference between the available VMEM on the + TT (the node's total VMEM minus the offset, both of which are sent by + the TT on each heartbeat)and the sum of VMs already allocated to + running tasks (i.e., sum of the VMEM task-limits). Next, the Scheduler + looks at the VMEM requirements for the job that's first in line to + run. If the job's VMEM requirements are less than the available VMEM on + the node, the job's task can be scheduled. If not, the Scheduler + ensures that the TT does not get a task to run (provided the job + has tasks to run). This way, the Scheduler ensures that jobs with + high memory requirements are not starved, as eventually, the TT + will have enough VMEM available. If the high-mem job does not have + any task to run, the Scheduler moves on to the next job. +
  4. +
  5. In addition to VMEM, the Capacity Scheduler can also consider + RAM on the TT node. RAM is considered the same way as VMEM. TTs report + the total RAM available on their node, and an offset. If both are + set, the Scheduler computes the available RAM on the node. Next, + the Scheduler figures out the RAM requirements of the job, if any. + As with VMEM, users can optionally specify a RAM limit for their job + (mapred.task.maxpmem, described in the Map/Reduce + tutorial). The Scheduler also maintains a limit for this value + (mapred.capacity-scheduler.task.default-pmem-percentage-in-vmem, + described below). All these three values must be set for the + Scheduler to schedule tasks based on RAM constraints. +
  6. +
  7. The Scheduler ensures that jobs cannot ask for RAM or VMEM higher + than configured limits. If this happens, the job is failed when it + is submitted. +
  8. +
+ +

As described above, the additional scheduler-based config + parameters are as follows:

+ + + + + + + + + +
NameDescription
mapred.capacity-scheduler.task.default-pmem-percentage-in-vmemA percentage of the default VMEM limit for jobs + (mapred.task.default.maxvmem). This is the default + RAM task-limit associated with a task. Unless overridden by a + job's setting, this number defines the RAM task-limit.
mapred.capacity-scheduler.task.limit.maxpmemConfiguration which provides an upper limit to maximum physical + memory which can be specified by a job. If a job requires more + physical memory than what is specified in this limit then the same + is rejected.
+
+
+ Job Initialization Parameters +

Capacity scheduler lazily initializes the jobs before they are + scheduled, for reducing the memory footprint on jobtracker. + Following are the parameters, by which you can control the laziness + of the job initialization. The following parameters can be + configured in capacity-scheduler.xml +

+ + + + + + + + + + + + + + + +
NameDescription
+ mapred.capacity-scheduler.queue.<queue-name>.maximum-initialized-jobs-per-user + + Maximum number of jobs which are allowed to be pre-initialized for + a particular user in the queue. Once a job is scheduled, i.e. + it starts running, then that job is not considered + while scheduler computes the maximum job a user is allowed to + initialize. +
+ mapred.capacity-scheduler.init-poll-interval + + Amount of time in miliseconds which is used to poll the scheduler + job queue to look for jobs to be initialized. +
+ mapred.capacity-scheduler.init-worker-threads + + Number of worker threads which would be used by Initialization + poller to initialize jobs in a set of queue. If number mentioned + in property is equal to number of job queues then a thread is + assigned jobs from one queue. If the number configured is lesser than + number of queues, then a thread can get jobs from more than one queue + which it initializes in a round robin fashion. If the number configured + is greater than number of queues, then number of threads spawned + would be equal to number of job queues. +
+
+
+ Reviewing the configuration of the Capacity Scheduler +

+ Once the installation and configuration is completed, you can review + it after starting the Map/Reduce cluster from the admin UI. +

+
    +
  • Start the Map/Reduce cluster as usual.
  • +
  • Open the JobTracker web UI.
  • +
  • The queues you have configured should be listed under the Scheduling + Information section of the page.
  • +
  • The properties for the queues should be visible in the Scheduling + Information column against each queue.
  • +
+
+ +
+ + +
diff --git a/src/docs/src/documentation/content/xdocs/cluster_setup.xml b/src/docs/src/documentation/content/xdocs/cluster_setup.xml new file mode 100644 index 0000000..e5c7009 --- /dev/null +++ b/src/docs/src/documentation/content/xdocs/cluster_setup.xml @@ -0,0 +1,929 @@ + + + + + + + +
+ Cluster Setup +
+ + + +
+ Purpose + +

This document describes how to install, configure and manage non-trivial + Hadoop clusters ranging from a few nodes to extremely large clusters with + thousands of nodes.

+

+ To play with Hadoop, you may first want to install Hadoop on a single machine (see Hadoop Quick Start). +

+
+ +
+ Pre-requisites + +
    +
  1. + Make sure all requisite software + is installed on all nodes in your cluster. +
  2. +
  3. + Get the Hadoop software. +
  4. +
+
+ +
+ Installation + +

Installing a Hadoop cluster typically involves unpacking the software + on all the machines in the cluster.

+ +

Typically one machine in the cluster is designated as the + NameNode and another machine the as JobTracker, + exclusively. These are the masters. The rest of the machines in + the cluster act as both DataNode and + TaskTracker. These are the slaves.

+ +

The root of the distribution is referred to as + HADOOP_HOME. All machines in the cluster usually have the same + HADOOP_HOME path.

+
+ +
+ Configuration + +

The following sections describe how to configure a Hadoop cluster.

+ +
+ Configuration Files + +

Hadoop configuration is driven by two types of important + configuration files:

+
    +
  1. + Read-only default configuration - + src/core/core-default.xml, + src/hdfs/hdfs-default.xml and + src/mapred/mapred-default.xml. +
  2. +
  3. + Site-specific configuration - + conf/core-site.xml, + conf/hdfs-site.xml and + conf/mapred-site.xml. +
  4. +
+ +

To learn more about how the Hadoop framework is controlled by these + configuration files, look + here.

+ +

Additionally, you can control the Hadoop scripts found in the + bin/ directory of the distribution, by setting site-specific + values via the conf/hadoop-env.sh.

+
+ +
+ Site Configuration + +

To configure the Hadoop cluster you will need to configure the + environment in which the Hadoop daemons execute as well as + the configuration parameters for the Hadoop daemons.

+ +

The Hadoop daemons are NameNode/DataNode + and JobTracker/TaskTracker.

+ +
+ Configuring the Environment of the Hadoop Daemons + +

Administrators should use the conf/hadoop-env.sh script + to do site-specific customization of the Hadoop daemons' process + environment.

+ +

At the very least you should specify the + JAVA_HOME so that it is correctly defined on each + remote node.

+ +

Administrators can configure individual daemons using the + configuration options HADOOP_*_OPTS. Various options + available are shown below in the table.

+ + + + + + + + +
DaemonConfigure Options
NameNodeHADOOP_NAMENODE_OPTS
DataNodeHADOOP_DATANODE_OPTS
SecondaryNamenodeHADOOP_SECONDARYNAMENODE_OPTS
JobTrackerHADOOP_JOBTRACKER_OPTS
TaskTrackerHADOOP_TASKTRACKER_OPTS
+ +

For example, To configure Namenode to use parallelGC, the + following statement should be added in hadoop-env.sh : +
+ export HADOOP_NAMENODE_OPTS="-XX:+UseParallelGC ${HADOOP_NAMENODE_OPTS}" +

+ +

Other useful configuration parameters that you can customize + include:

+
    +
  • + HADOOP_LOG_DIR - The directory where the daemons' + log files are stored. They are automatically created if they don't + exist. +
  • +
  • + HADOOP_HEAPSIZE - The maximum amount of heapsize + to use, in MB e.g. 1000MB. This is used to + configure the heap size for the hadoop daemon. By default, + the value is 1000MB. +
  • +
+
+ +
+ Configuring the Hadoop Daemons + +

This section deals with important parameters to be specified in the + following: +
+ conf/core-site.xml:

+ + + + + + + + + + + + +
ParameterValueNotes
fs.default.nameURI of NameNode.hdfs://hostname/
+ +


conf/hdfs-site.xml:

+ + + + + + + + + + + + + + + + + +
ParameterValueNotes
dfs.name.dir + Path on the local filesystem where the NameNode + stores the namespace and transactions logs persistently. + If this is a comma-delimited list of directories then the name + table is replicated in all of the directories, for redundancy. +
dfs.data.dir + Comma separated list of paths on the local filesystem of a + DataNode where it should store its blocks. + + If this is a comma-delimited list of directories, then data will + be stored in all named directories, typically on different + devices. +
+ +


conf/mapred-site.xml:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ParameterValueNotes
mapred.job.trackerHost or IP and port of JobTracker.host:port pair.
mapred.system.dir + Path on the HDFS where where the Map/Reduce framework stores + system files e.g. /hadoop/mapred/system/. + + This is in the default filesystem (HDFS) and must be accessible + from both the server and client machines. +
mapred.local.dir + Comma-separated list of paths on the local filesystem where + temporary Map/Reduce data is written. + Multiple paths help spread disk i/o.
mapred.tasktracker.{map|reduce}.tasks.maximum + The maximum number of Map/Reduce tasks, which are run + simultaneously on a given TaskTracker, individually. + + Defaults to 2 (2 maps and 2 reduces), but vary it depending on + your hardware. +
dfs.hosts/dfs.hosts.excludeList of permitted/excluded DataNodes. + If necessary, use these files to control the list of allowable + datanodes. +
mapred.hosts/mapred.hosts.excludeList of permitted/excluded TaskTrackers. + If necessary, use these files to control the list of allowable + TaskTrackers. +
mapred.queue.namesComma separated list of queues to which jobs can be submitted. + The Map/Reduce system always supports atleast one queue + with the name as default. Hence, this parameter's + value should always contain the string default. + Some job schedulers supported in Hadoop, like the + Capacity + Scheduler, support multiple queues. If such a scheduler is + being used, the list of configured queue names must be + specified here. Once queues are defined, users can submit + jobs to a queue using the property name + mapred.job.queue.name in the job configuration. + There could be a separate + configuration file for configuring properties of these + queues that is managed by the scheduler. + Refer to the documentation of the scheduler for information on + the same. +
mapred.acls.enabledSpecifies whether ACLs are supported for controlling job + submission and administration + If true, ACLs would be checked while submitting + and administering jobs. ACLs can be specified using the + configuration parameters of the form + mapred.queue.queue-name.acl-name, defined below. +
+ +


conf/mapred-queue-acls.xml

+ + + + + + + + + + + + + + + + + +
ParameterValueNotes
mapred.queue.queue-name.acl-submit-jobList of users and groups that can submit jobs to the + specified queue-name. + The list of users and groups are both comma separated + list of names. The two lists are separated by a blank. + Example: user1,user2 group1,group2. + If you wish to define only a list of groups, provide + a blank at the beginning of the value. +
mapred.queue.queue-name.acl-administer-jobList of users and groups that can change the priority + or kill jobs that have been submitted to the + specified queue-name. + The list of users and groups are both comma separated + list of names. The two lists are separated by a blank. + Example: user1,user2 group1,group2. + If you wish to define only a list of groups, provide + a blank at the beginning of the value. Note that an + owner of a job can always change the priority or kill + his/her own job, irrespective of the ACLs. +
+ + +

Typically all the above parameters are marked as + + final to ensure that they cannot be overriden by user-applications. +

+ +
+ Real-World Cluster Configurations + +

This section lists some non-default configuration parameters which + have been used to run the sort benchmark on very large + clusters.

+ +
    +
  • +

    Some non-default configuration values used to run sort900, + that is 9TB of data sorted on a cluster with 900 nodes:

    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    Configuration FileParameterValueNotes
    conf/hdfs-site.xmldfs.block.size134217728HDFS blocksize of 128MB for large file-systems.
    conf/hdfs-site.xmldfs.namenode.handler.count40 + More NameNode server threads to handle RPCs from large + number of DataNodes. +
    conf/mapred-site.xmlmapred.reduce.parallel.copies20 + Higher number of parallel copies run by reduces to fetch + outputs from very large number of maps. +
    conf/mapred-site.xmlmapred.map.child.java.opts-Xmx512M + Larger heap-size for child jvms of maps. +
    conf/mapred-site.xmlmapred.reduce.child.java.opts-Xmx512M + Larger heap-size for child jvms of reduces. +
    conf/core-site.xmlfs.inmemory.size.mb200 + Larger amount of memory allocated for the in-memory + file-system used to merge map-outputs at the reduces. +
    conf/core-site.xmlio.sort.factor100More streams merged at once while sorting files.
    conf/core-site.xmlio.sort.mb200Higher memory-limit while sorting data.
    conf/core-site.xmlio.file.buffer.size131072Size of read/write buffer used in SequenceFiles.
    +
  • +
  • +

    Updates to some configuration values to run sort1400 and + sort2000, that is 14TB of data sorted on 1400 nodes and 20TB of + data sorted on 2000 nodes:

    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    Configuration FileParameterValueNotes
    conf/mapred-site.xmlmapred.job.tracker.handler.count60 + More JobTracker server threads to handle RPCs from large + number of TaskTrackers. +
    conf/mapred-site.xmlmapred.reduce.parallel.copies50
    conf/mapred-site.xmltasktracker.http.threads50 + More worker threads for the TaskTracker's http server. The + http server is used by reduces to fetch intermediate + map-outputs. +
    conf/mapred-site.xmlmapred.map.child.java.opts-Xmx512M + Larger heap-size for child jvms of maps. +
    conf/mapred-site.xmlmapred.reduce.child.java.opts-Xmx1024MLarger heap-size for child jvms of reduces.
    +
  • +
+
+ +
+ Task Controllers +

Task controllers are classes in the Hadoop Map/Reduce + framework that define how user's map and reduce tasks + are launched and controlled. They can + be used in clusters that require some customization in + the process of launching or controlling the user tasks. + For example, in some + clusters, there may be a requirement to run tasks as + the user who submitted the job, instead of as the task + tracker user, which is how tasks are launched by default. + This section describes how to configure and use + task controllers.

+

The following task controllers are the available in + Hadoop. +

+ + + + + + + + + + + + +
NameClass NameDescription
DefaultTaskControllerorg.apache.hadoop.mapred.DefaultTaskController The default task controller which Hadoop uses to manage task + execution. The tasks run as the task tracker user.
LinuxTaskControllerorg.apache.hadoop.mapred.LinuxTaskControllerThis task controller, which is supported only on Linux, + runs the tasks as the user who submitted the job. It requires + these user accounts to be created on the cluster nodes + where the tasks are launched. It + uses a setuid executable that is included in the Hadoop + distribution. The task tracker uses this executable to + launch and kill tasks. The setuid executable switches to + the user who has submitted the job and launches or kills + the tasks. Currently, this task controller + opens up permissions to local files and directories used + by the tasks such as the job jar files, distributed archive + files, intermediate files and task log files. In future, + it is expected that stricter file permissions are used. +
+
+ Configuring Task Controllers +

The task controller to be used can be configured by setting the + value of the following key in mapred-site.xml

+ + + + + + + + + +
PropertyValueNotes
mapred.task.tracker.task-controllerFully qualified class name of the task controller classCurrently there are two implementations of task controller + in the Hadoop system, DefaultTaskController and LinuxTaskController. + Refer to the class names mentioned above to determine the value + to set for the class of choice. +
+
+
+ Using the LinuxTaskController +

This section of the document describes the steps required to + use the LinuxTaskController.

+ +

In order to use the LinuxTaskController, a setuid executable + should be built and deployed on the compute nodes. The + executable is named task-controller. To build the executable, + execute + ant task-controller -Dhadoop.conf.dir=/path/to/conf/dir. + + The path passed in -Dhadoop.conf.dir should be the path + on the cluster nodes where a configuration file for the setuid + executable would be located. The executable would be built to + build.dir/dist.dir/bin and should be installed to + $HADOOP_HOME/bin. +

+ +

+ The executable must be deployed as a setuid executable, by changing + the ownership to root, group ownership to that of tasktracker + and giving it permissions 4510.Please take a note that, + group which owns task-controller should contain only tasktracker + as its memeber and not users who submit jobs. +

+ +

The executable requires a configuration file called + taskcontroller.cfg to be + present in the configuration directory passed to the ant target + mentioned above. If the binary was not built with a specific + conf directory, the path defaults to /path-to-binary/../conf. +

+ +

The executable requires following configuration items to be + present in the taskcontroller.cfg file. The items should + be mentioned as simple key=value pairs. +

+ + + + + +
NameDescription
mapred.local.dirPath to mapred local directories. Should be same as the value + which was provided to key in mapred-site.xml. This is required to + validate paths passed to the setuid executable in order to prevent + arbitrary paths being passed to it.
+ +

+ The LinuxTaskController requires that paths leading up to + the directories specified in + mapred.local.dir and hadoop.log.dir to be 755 + and directories themselves having 777 permissions. +

+
+ +
+
+ Monitoring Health of TaskTracker Nodes +

Hadoop Map/Reduce provides a mechanism by which administrators + can configure the TaskTracker to run an administrator supplied + script periodically to determine if a node is healthy or not. + Administrators can determine if the node is in a healthy state + by performing any checks of their choice in the script. If the + script detects the node to be in an unhealthy state, it must print + a line to standard output beginning with the string ERROR. + The TaskTracker spawns the script periodically and checks its + output. If the script's output contains the string ERROR, + as described above, the node's status is reported as 'unhealthy' + and the node is black-listed on the JobTracker. No further tasks + will be assigned to this node. However, the + TaskTracker continues to run the script, so that if the node + becomes healthy again, it will be removed from the blacklisted + nodes on the JobTracker automatically. The node's health + along with the output of the script, if it is unhealthy, is + available to the administrator in the JobTracker's web interface. + The time since the node was healthy is also displayed on the + web interface. +

+ +
+ Configuring the Node Health Check Script +

The following parameters can be used to control the node health + monitoring script in mapred-site.xml.

+ + + + + + + + + + + + + + + + + +
NameDescription
mapred.healthChecker.script.pathAbsolute path to the script which is periodically run by the + TaskTracker to determine if the node is + healthy or not. The file should be executable by the TaskTracker. + If the value of this key is empty or the file does + not exist or is not executable, node health monitoring + is not started.
mapred.healthChecker.intervalFrequency at which the node health script is run, + in milliseconds
mapred.healthChecker.script.timeoutTime after which the node health script will be killed by + the TaskTracker if unresponsive. + The node is marked unhealthy. if node health script times out.
mapred.healthChecker.script.argsExtra arguments that can be passed to the node health script + when launched. + These should be comma separated list of arguments.
+
+
+ +
+
+ Memory monitoring +

A TaskTracker(TT) can be configured to monitor memory + usage of tasks it spawns, so that badly-behaved jobs do not bring + down a machine due to excess memory consumption. With monitoring + enabled, every task is assigned a task-limit for virtual memory (VMEM). + In addition, every node is assigned a node-limit for VMEM usage. + A TT ensures that a task is killed if it, and + its descendants, use VMEM over the task's per-task limit. It also + ensures that one or more tasks are killed if the sum total of VMEM + usage by all tasks, and their descendents, cross the node-limit.

+ +

Users can, optionally, specify the VMEM task-limit per job. If no + such limit is provided, a default limit is used. A node-limit can be + set per node.

+

Currently the memory monitoring and management is only supported + in Linux platform.

+

To enable monitoring for a TT, the + following parameters all need to be set:

+ + + + + + + + + +
NameTypeDescription
mapred.tasktracker.vmem.reservedlongA number, in bytes, that represents an offset. The total VMEM on + the machine, minus this offset, is the VMEM node-limit for all + tasks, and their descendants, spawned by the TT. +
mapred.task.default.maxvmemlongA number, in bytes, that represents the default VMEM task-limit + associated with a task. Unless overridden by a job's setting, + this number defines the VMEM task-limit. +
mapred.task.limit.maxvmemlongA number, in bytes, that represents the upper VMEM task-limit + associated with a task. Users, when specifying a VMEM task-limit + for their tasks, should not specify a limit which exceeds this amount. +
+ +

In addition, the following parameters can also be configured.

+ + + + + + +
NameTypeDescription
mapred.tasktracker.taskmemorymanager.monitoring-intervallongThe time interval, in milliseconds, between which the TT + checks for any memory violation. The default value is 5000 msec + (5 seconds). +
+ +

Here's how the memory monitoring works for a TT.

+
    +
  1. If one or more of the configuration parameters described + above are missing or -1 is specified , memory monitoring is + disabled for the TT. +
  2. +
  3. In addition, monitoring is disabled if + mapred.task.default.maxvmem is greater than + mapred.task.limit.maxvmem. +
  4. +
  5. If a TT receives a task whose task-limit is set by the user + to a value larger than mapred.task.limit.maxvmem, it + logs a warning but executes the task. +
  6. +
  7. Periodically, the TT checks the following: +
      +
    • If any task's current VMEM usage is greater than that task's + VMEM task-limit, the task is killed and reason for killing + the task is logged in task diagonistics . Such a task is considered + failed, i.e., the killing counts towards the task's failure count. +
    • +
    • If the sum total of VMEM used by all tasks and descendants is + greater than the node-limit, the TT kills enough tasks, in the + order of least progress made, till the overall VMEM usage falls + below the node-limt. Such killed tasks are not considered failed + and their killing does not count towards the tasks' failure counts. +
    • +
    +
  8. +
+ +

Schedulers can choose to ease the monitoring pressure on the TT by + preventing too many tasks from running on a node and by scheduling + tasks only if the TT has enough VMEM free. In addition, Schedulers may + choose to consider the physical memory (RAM) available on the node + as well. To enable Scheduler support, TTs report their memory settings + to the JobTracker in every heartbeat. Before getting into details, + consider the following additional memory-related parameters than can be + configured to enable better scheduling:

+ + + + + +
NameTypeDescription
mapred.tasktracker.pmem.reservedintA number, in bytes, that represents an offset. The total + physical memory (RAM) on the machine, minus this offset, is the + recommended RAM node-limit. The RAM node-limit is a hint to a + Scheduler to scheduler only so many tasks such that the sum + total of their RAM requirements does not exceed this limit. + RAM usage is not monitored by a TT. +
+ +

A TT reports the following memory-related numbers in every + heartbeat:

+
    +
  • The total VMEM available on the node.
  • +
  • The value of mapred.tasktracker.vmem.reserved, + if set.
  • +
  • The total RAM available on the node.
  • +
  • The value of mapred.tasktracker.pmem.reserved, + if set.
  • +
+
+ +
+ Slaves + +

Typically you choose one machine in the cluster to act as the + NameNode and one machine as to act as the + JobTracker, exclusively. The rest of the machines act as + both a DataNode and TaskTracker and are + referred to as slaves.

+ +

List all slave hostnames or IP addresses in your + conf/slaves file, one per line.

+
+ +
+ Logging + +

Hadoop uses the Apache + log4j via the Apache + Commons Logging framework for logging. Edit the + conf/log4j.properties file to customize the Hadoop + daemons' logging configuration (log-formats and so on).

+ +
+ History Logging + +

The job history files are stored in central location + hadoop.job.history.location which can be on DFS also, + whose default value is ${HADOOP_LOG_DIR}/history. + The history web UI is accessible from job tracker web UI.

+ +

The history files are also logged to user specified directory + hadoop.job.history.user.location + which defaults to job output directory. The files are stored in + "_logs/history/" in the specified directory. Hence, by default + they will be in "mapred.output.dir/_logs/history/". User can stop + logging by giving the value none for + hadoop.job.history.user.location

+ +

User can view the history logs summary in specified directory + using the following command
+ $ bin/hadoop job -history output-dir
+ This command will print job details, failed and killed tip + details.
+ More details about the job such as successful tasks and + task attempts made for each task can be viewed using the + following command
+ $ bin/hadoop job -history all output-dir

+
+
+
+ +

Once all the necessary configuration is complete, distribute the files + to the HADOOP_CONF_DIR directory on all the machines, + typically ${HADOOP_HOME}/conf.

+
+
+ Cluster Restartability +
+ Map/Reduce +

The job tracker restart can recover running jobs if + mapred.jobtracker.restart.recover is set true and + JobHistory logging is enabled. Also + mapred.jobtracker.job.history.block.size value should be + set to an optimal value to dump job history to disk as soon as + possible, the typical value is 3145728(3MB).

+
+
+ +
+ Hadoop Rack Awareness +

The HDFS and the Map/Reduce components are rack-aware.

+

The NameNode and the JobTracker obtains the + rack id of the slaves in the cluster by invoking an API + resolve in an administrator configured + module. The API resolves the slave's DNS name (also IP address) to a + rack id. What module to use can be configured using the configuration + item topology.node.switch.mapping.impl. The default + implementation of the same runs a script/command configured using + topology.script.file.name. If topology.script.file.name is + not set, the rack id /default-rack is returned for any + passed IP address. The additional configuration in the Map/Reduce + part is mapred.cache.task.levels which determines the number + of levels (in the network topology) of caches. So, for example, if it is + the default value of 2, two levels of caches will be constructed - + one for hosts (host -> task mapping) and another for racks + (rack -> task mapping). +

+
+ +
+ Hadoop Startup + +

To start a Hadoop cluster you will need to start both the HDFS and + Map/Reduce cluster.

+ +

+ Format a new distributed filesystem:
+ $ bin/hadoop namenode -format +

+ +

+ Start the HDFS with the following command, run on the designated + NameNode:
+ $ bin/start-dfs.sh +

+

The bin/start-dfs.sh script also consults the + ${HADOOP_CONF_DIR}/slaves file on the NameNode + and starts the DataNode daemon on all the listed slaves.

+ +

+ Start Map-Reduce with the following command, run on the designated + JobTracker:
+ $ bin/start-mapred.sh +

+

The bin/start-mapred.sh script also consults the + ${HADOOP_CONF_DIR}/slaves file on the JobTracker + and starts the TaskTracker daemon on all the listed slaves. +

+
+ +
+ Hadoop Shutdown + +

+ Stop HDFS with the following command, run on the designated + NameNode:
+ $ bin/stop-dfs.sh +

+

The bin/stop-dfs.sh script also consults the + ${HADOOP_CONF_DIR}/slaves file on the NameNode + and stops the DataNode daemon on all the listed slaves.

+ +

+ Stop Map/Reduce with the following command, run on the designated + the designated JobTracker:
+ $ bin/stop-mapred.sh
+

+

The bin/stop-mapred.sh script also consults the + ${HADOOP_CONF_DIR}/slaves file on the JobTracker + and stops the TaskTracker daemon on all the listed slaves.

+
+ + +
diff --git a/src/docs/src/documentation/content/xdocs/commands_manual.xml b/src/docs/src/documentation/content/xdocs/commands_manual.xml new file mode 100644 index 0000000..d17730f --- /dev/null +++ b/src/docs/src/documentation/content/xdocs/commands_manual.xml @@ -0,0 +1,685 @@ + + + + + +
+ Commands Guide +
+ + +
+ Overview +

+ All hadoop commands are invoked by the bin/hadoop script. Running the hadoop + script without any arguments prints the description for all commands. +

+

+ Usage: hadoop [--config confdir] [COMMAND] [GENERIC_OPTIONS] [COMMAND_OPTIONS] +

+

+ Hadoop has an option parsing framework that employs parsing generic options as well as running classes. +

+ + + + + + + + + + + + + + + +
COMMAND_OPTION Description
--config confdirOverwrites the default Configuration directory. Default is ${HADOOP_HOME}/conf.
GENERIC_OPTIONSThe common set of options supported by multiple commands.
COMMAND
COMMAND_OPTIONS
Various commands with their options are described in the following sections. The commands + have been grouped into User Commands + and Administration Commands.
+
+ Generic Options +

+ The following options are supported by dfsadmin, + fs, fsck and + job. + Applications should implement + Tool to support + + GenericOptions. +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
GENERIC_OPTION Description
-conf <configuration file>Specify an application configuration file.
-D <property=value>Use value for given property.
-fs <local|namenode:port>Specify a namenode.
-jt <local|jobtracker:port>Specify a job tracker. Applies only to job.
-files <comma separated list of files>Specify comma separated files to be copied to the map reduce cluster. + Applies only to job.
-libjars <comma seperated list of jars>Specify comma separated jar files to include in the classpath. + Applies only to job.
-archives <comma separated list of archives>Specify comma separated archives to be unarchived on the compute machines. + Applies only to job.
+
+
+ +
+ User Commands +

Commands useful for users of a hadoop cluster.

+
+ archive +

+ Creates a hadoop archive. More information can be found at Hadoop Archives. +

+

+ Usage: hadoop archive -archiveName NAME <src>* <dest> +

+ + + + + + + + + + + + + + +
COMMAND_OPTION Description
-archiveName NAMEName of the archive to be created.
srcFilesystem pathnames which work as usual with regular expressions.
destDestination directory which would contain the archive.
+
+ +
+ distcp +

+ Copy file or directories recursively. More information can be found at Hadoop DistCp Guide. +

+

+ Usage: hadoop distcp <srcurl> <desturl> +

+ + + + + + + + + + + +
COMMAND_OPTION Description
srcurlSource Url
desturlDestination Url
+
+ +
+ fs +

+ Usage: hadoop fs [GENERIC_OPTIONS] + [COMMAND_OPTIONS] +

+

+ Runs a generic filesystem user client. +

+

+ The various COMMAND_OPTIONS can be found at Hadoop FS Shell Guide. +

+
+ +
+ fsck +

+ Runs a HDFS filesystem checking utility. See Fsck for more info. +

+

Usage: hadoop fsck [GENERIC_OPTIONS] + <path> [-move | -delete | -openforwrite] [-files [-blocks + [-locations | -racks]]]

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
COMMAND_OPTION Description
<path>Start checking from this path.
-moveMove corrupted files to /lost+found
-deleteDelete corrupted files.
-openforwritePrint out files opened for write.
-filesPrint out files being checked.
-blocksPrint out block report.
-locationsPrint out locations for every block.
-racksPrint out network topology for data-node locations.
+
+ +
+ jar +

+ Runs a jar file. Users can bundle their Map Reduce code in a jar file and execute it using this command. +

+

+ Usage: hadoop jar <jar> [mainClass] args... +

+

+ The streaming jobs are run via this command. Examples can be referred from + Streaming examples +

+

+ Word count example is also run using jar command. It can be referred from + Wordcount example +

+
+ +
+ job +

+ Command to interact with Map Reduce Jobs. +

+

+ Usage: hadoop job [GENERIC_OPTIONS] + [-submit <job-file>] | [-status <job-id>] | + [-counter <job-id> <group-name> <counter-name>] | [-kill <job-id>] | + [-events <job-id> <from-event-#> <#-of-events>] | [-history [all] <jobOutputDir>] | + [-list [all]] | [-kill-task <task-id>] | [-fail-task <task-id>] | + [-set-priority <job-id> <priority>] +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
COMMAND_OPTION Description
-submit <job-file>Submits the job.
-status <job-id>Prints the map and reduce completion percentage and all job counters.
-counter <job-id> <group-name> <counter-name>Prints the counter value.
-kill <job-id>Kills the job.
-events <job-id> <from-event-#> <#-of-events>Prints the events' details received by jobtracker for the given range.
-history [all] <jobOutputDir>-history <jobOutputDir> prints job details, failed and killed tip details. More details + about the job such as successful tasks and task attempts made for each task can be viewed by + specifying the [all] option.
-list [all]-list all displays all jobs. -list displays only jobs which are yet to complete.
-kill-task <task-id>Kills the task. Killed tasks are NOT counted against failed attempts.
-fail-task <task-id>Fails the task. Failed tasks are counted against failed attempts.
-set-priority <job-id> <priority>Changes the priority of the job. + Allowed priority values are VERY_HIGH, HIGH, NORMAL, LOW, VERY_LOW
+
+ +
+ pipes +

+ Runs a pipes job. +

+

+ Usage: hadoop pipes [-conf <path>] [-jobconf <key=value>, <key=value>, ...] + [-input <path>] [-output <path>] [-jar <jar file>] [-inputformat <class>] + [-map <class>] [-partitioner <class>] [-reduce <class>] [-writer <class>] + [-program <executable>] [-reduces <num>] +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
COMMAND_OPTION Description
-conf <path>Configuration for job
-jobconf <key=value>, <key=value>, ...Add/override configuration for job
-input <path>Input directory
-output <path>Output directory
-jar <jar file>Jar filename
-inputformat <class>InputFormat class
-map <class>Java Map class
-partitioner <class>Java Partitioner
-reduce <class>Java Reduce class
-writer <class>Java RecordWriter
-program <executable>Executable URI
-reduces <num>Number of reduces
+
+
+ queue +

+ command to interact and view Job Queue information +

+

+ Usage : hadoop queue [-list] | [-info <job-queue-name> [-showJobs]] | [-showacls] +

+ + + + + + + + + + + + + + + + +
COMMAND_OPTION Description
-list Gets list of Job Queues configured in the system. Along with scheduling information + associated with the job queues. +
-info <job-queue-name> [-showJobs] + Displays the job queue information and associated scheduling information of particular + job queue. If -showJobs options is present a list of jobs submitted to the particular job + queue is displayed. +
-showaclsDisplays the queue name and associated queue operations allowed for the current user. + The list consists of only those queues to which the user has access. +
+
+
+ version +

+ Prints the version. +

+

+ Usage: hadoop version +

+
+
+ CLASSNAME +

+ hadoop script can be used to invoke any class. +

+

+ Usage: hadoop CLASSNAME +

+

+ Runs the class named CLASSNAME. +

+
+
+ classpath +

+ Prints the class path needed to get the Hadoop jar and the required libraries. +

+

+ Usage: hadoop classpath +

+
+
+
+ Administration Commands +

Commands useful for administrators of a hadoop cluster.

+
+ balancer +

+ Runs a cluster balancing utility. An administrator can simply press Ctrl-C to stop the + rebalancing process. See Rebalancer for more details. +

+

+ Usage: hadoop balancer [-threshold <threshold>] +

+ + + + + + + +
COMMAND_OPTION Description
-threshold <threshold>Percentage of disk capacity. This overwrites the default threshold.
+
+ +
+ daemonlog +

+ Get/Set the log level for each daemon. +

+

+ Usage: hadoop daemonlog -getlevel <host:port> <name>
+ Usage: hadoop daemonlog -setlevel <host:port> <name> <level> +

+ + + + + + + + + + + +
COMMAND_OPTION Description
-getlevel <host:port> <name>Prints the log level of the daemon running at <host:port>. + This command internally connects to http://<host:port>/logLevel?log=<name>
-setlevel <host:port> <name> <level>Sets the log level of the daemon running at <host:port>. + This command internally connects to http://<host:port>/logLevel?log=<name>
+
+ +
+ datanode +

+ Runs a HDFS datanode. +

+

+ Usage: hadoop datanode [-rollback] +

+ + + + + + + +
COMMAND_OPTION Description
-rollbackRollsback the datanode to the previous version. This should be used after stopping the datanode + and distributing the old hadoop version.
+
+ +
+ dfsadmin +

+ Runs a HDFS dfsadmin client. +

+

+ Usage: hadoop dfsadmin [GENERIC_OPTIONS] [-report] [-safemode enter | leave | get | wait] [-refreshNodes] + [-finalizeUpgrade] [-upgradeProgress status | details | force] [-metasave filename] + [-setQuota <quota> <dirname>...<dirname>] [-clrQuota <dirname>...<dirname>] + [-help [cmd]] +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
COMMAND_OPTION Description
-reportReports basic filesystem information and statistics.
-safemode enter | leave | get | waitSafe mode maintenance command. + Safe mode is a Namenode state in which it
+ 1. does not accept changes to the name space (read-only)
+ 2. does not replicate or delete blocks.
+ Safe mode is entered automatically at Namenode startup, and + leaves safe mode automatically when the configured minimum + percentage of blocks satisfies the minimum replication + condition. Safe mode can also be entered manually, but then + it can only be turned off manually as well.
-refreshNodesRe-read the hosts and exclude files to update the set + of Datanodes that are allowed to connect to the Namenode + and those that should be decommissioned or recommissioned.
-finalizeUpgradeFinalize upgrade of HDFS. + Datanodes delete their previous version working directories, + followed by Namenode doing the same. + This completes the upgrade process.
-upgradeProgress status | details | forceRequest current distributed upgrade status, + a detailed status or force the upgrade to proceed.
-metasave filenameSave Namenode's primary data structures + to <filename> in the directory specified by hadoop.log.dir property. + <filename> will contain one line for each of the following
+ 1. Datanodes heart beating with Namenode
+ 2. Blocks waiting to be replicated
+ 3. Blocks currrently being replicated
+ 4. Blocks waiting to be deleted
-setQuota <quota> <dirname>...<dirname>Set the quota <quota> for each directory <dirname>. + The directory quota is a long integer that puts a hard limit on the number of names in the directory tree.
+ Best effort for the directory, with faults reported if
+ 1. N is not a positive integer, or
+ 2. user is not an administrator, or
+ 3. the directory does not exist or is a file, or
+ 4. the directory would immediately exceed the new quota.
-clrQuota <dirname>...<dirname>Clear the quota for each directory <dirname>.
+ Best effort for the directory. with fault reported if
+ 1. the directory does not exist or is a file, or
+ 2. user is not an administrator.
+ It does not fault if the directory has no quota.
-help [cmd] Displays help for the given command or all commands if none + is specified.
+
+
+ mradmin +

Runs MR admin client

+

Usage: hadoop mradmin [ + GENERIC_OPTIONS + ] [-refreshQueueAcls]

+ + + + + + + + +
COMMAND_OPTION Description
-refreshQueueAcls Refresh the queue acls used by hadoop, to check access during submissions + and administration of the job by the user. The properties present in + mapred-queue-acls.xml is reloaded by the queue manager.
+
+
+ jobtracker +

Runs the MapReduce job Tracker node.

+

Usage: hadoop jobtracker [-dumpConfiguration]

+ + + + + + + + +
COMMAND_OPTION Description
-dumpConfiguration Dumps the configuration used by the JobTracker alongwith queue + configuration in JSON format into Standard output used by the + jobtracker and exits.
+ +
+ +
+ namenode +

+ Runs the namenode. More info about the upgrade, rollback and finalize is at + Upgrade Rollback +

+

+ Usage: hadoop namenode [-format] | [-upgrade] | [-rollback] | [-finalize] | [-importCheckpoint] +

+ + + + + + + + + + + + + + + + + + + + + + + +
COMMAND_OPTION Description
-formatFormats the namenode. It starts the namenode, formats it and then shut it down.
-upgradeNamenode should be started with upgrade option after the distribution of new hadoop version.
-rollbackRollsback the namenode to the previous version. This should be used after stopping the cluster + and distributing the old hadoop version.
-finalizeFinalize will remove the previous state of the files system. Recent upgrade will become permanent. + Rollback option will not be available anymore. After finalization it shuts the namenode down.
-importCheckpointLoads image from a checkpoint directory and save it into the current one. Checkpoint dir + is read from property fs.checkpoint.dir
+
+ +
+ secondarynamenode +

+ Runs the HDFS secondary namenode. See Secondary Namenode + for more info. +

+

+ Usage: hadoop secondarynamenode [-checkpoint [force]] | [-geteditsize] +

+ + + + + + + + + + + +
COMMAND_OPTION Description
-checkpoint [force]Checkpoints the Secondary namenode if EditLog size >= fs.checkpoint.size. + If -force is used, checkpoint irrespective of EditLog size.
-geteditsizePrints the EditLog size.
+
+ +
+ tasktracker +

+ Runs a MapReduce task Tracker node. +

+

+ Usage: hadoop tasktracker +

+
+ +
+ + + + + +
diff --git a/src/docs/src/documentation/content/xdocs/distcp.xml b/src/docs/src/documentation/content/xdocs/distcp.xml new file mode 100644 index 0000000..a4899a5 --- /dev/null +++ b/src/docs/src/documentation/content/xdocs/distcp.xml @@ -0,0 +1,351 @@ + + + + + + + +
+ DistCp Guide +
+ + + +
+ Overview + +

DistCp (distributed copy) is a tool used for large inter/intra-cluster + copying. It uses Map/Reduce to effect its distribution, error + handling and recovery, and reporting. It expands a list of files and + directories into input to map tasks, each of which will copy a partition + of the files specified in the source list. Its Map/Reduce pedigree has + endowed it with some quirks in both its semantics and execution. The + purpose of this document is to offer guidance for common tasks and to + elucidate its model.

+ +
+ +
+ Usage + +
+ Basic +

The most common invocation of DistCp is an inter-cluster copy:

+

bash$ hadoop distcp hdfs://nn1:8020/foo/bar \
+           +           + hdfs://nn2:8020/bar/foo

+ +

This will expand the namespace under /foo/bar on nn1 + into a temporary file, partition its contents among a set of map + tasks, and start a copy on each TaskTracker from nn1 to nn2. Note + that DistCp expects absolute paths.

+ +

One can also specify multiple source directories on the command + line:

+

bash$ hadoop distcp hdfs://nn1:8020/foo/a \
+           +           + hdfs://nn1:8020/foo/b \
+           +           + hdfs://nn2:8020/bar/foo

+ +

Or, equivalently, from a file using the -f option:
+ bash$ hadoop distcp -f hdfs://nn1:8020/srclist \
+            +            +  hdfs://nn2:8020/bar/foo

+ +

Where srclist contains
+     hdfs://nn1:8020/foo/a
+     hdfs://nn1:8020/foo/b

+ +

When copying from multiple sources, DistCp will abort the copy with + an error message if two sources collide, but collisions at the + destination are resolved per the options + specified. By default, files already existing at the destination are + skipped (i.e. not replaced by the source file). A count of skipped + files is reported at the end of each job, but it may be inaccurate if a + copier failed for some subset of its files, but succeeded on a later + attempt (see Appendix).

+ +

It is important that each TaskTracker can reach and communicate with + both the source and destination file systems. For HDFS, both the source + and destination must be running the same version of the protocol or use + a backwards-compatible protocol (see Copying Between + Versions).

+ +

After a copy, it is recommended that one generates and cross-checks + a listing of the source and destination to verify that the copy was + truly successful. Since DistCp employs both Map/Reduce and the + FileSystem API, issues in or between any of the three could adversely + and silently affect the copy. Some have had success running with + -update enabled to perform a second pass, but users should + be acquainted with its semantics before attempting this.

+ +

It's also worth noting that if another client is still writing to a + source file, the copy will likely fail. Attempting to overwrite a file + being written at the destination should also fail on HDFS. If a source + file is (re)moved before it is copied, the copy will fail with a + FileNotFoundException.

+ +
+ +
+ Options + +
+ Option Index + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Flag Description Notes
-p[rbugp]Preserve
+   r: replication number
+   b: block size
+   u: user
+   g: group
+   p: permission
Modification times are not preserved. Also, when + -update is specified, status updates will + not be synchronized unless the file sizes + also differ (i.e. unless the file is re-created). +
-iIgnore failuresAs explained in the Appendix, this option + will keep more accurate statistics about the copy than the + default case. It also preserves logs from failed copies, which + can be valuable for debugging. Finally, a failing map will not + cause the job to fail before all splits are attempted. +
-log <logdir>Write logs to <logdir>DistCp keeps logs of each file it attempts to copy as map + output. If a map fails, the log output will not be retained if + it is re-executed. +
-m <num_maps>Maximum number of simultaneous copiesSpecify the number of maps to copy data. Note that more maps + may not necessarily improve throughput. +
-overwriteOverwrite destinationIf a map fails and -i is not specified, all the + files in the split, not only those that failed, will be recopied. + As discussed in the following, it also changes + the semantics for generating destination paths, so users should + use this carefully. +
-updateOverwrite if src size different from dst sizeAs noted in the preceding, this is not a "sync" + operation. The only criterion examined is the source and + destination file sizes; if they differ, the source file + replaces the destination file. As discussed in the + following, it also changes the semantics for + generating destination paths, so users should use this carefully. +
-f <urilist_uri>Use list at <urilist_uri> as src listThis is equivalent to listing each source on the command + line. The urilist_uri list should be a fully + qualified URI. +
-filelimit <n>Limit the total number of files to be <= nSee also Symbolic + Representations. +
-sizelimit <n>Limit the total size to be <= n bytesSee also Symbolic + Representations. +
-deleteDelete the files existing in the dst but not in srcThe deletion is done by FS Shell. So the trash will be used, + if it is enable. +
+ +
+ +
+ Symbolic Representations +

+ The parameter <n> in -filelimit + and -sizelimit can be specified with symbolic + representation. For examples, +

+
    +
  • 1230k = 1230 * 1024 = 1259520
  • +
  • 891g = 891 * 1024^3 = 956703965184
  • +
+
+ +
+ Update and Overwrite + +

It's worth giving some examples of -update and + -overwrite. Consider a copy from /foo/a and + /foo/b to /bar/foo, where the sources contain + the following:

+ +

    hdfs://nn1:8020/foo/a
+     hdfs://nn1:8020/foo/a/aa
+     hdfs://nn1:8020/foo/a/ab
+     hdfs://nn1:8020/foo/b
+     hdfs://nn1:8020/foo/b/ba
+     hdfs://nn1:8020/foo/b/ab

+ +

If either -update or -overwrite is set, + then both sources will map an entry to /bar/foo/ab at the + destination. For both options, the contents of each source directory + are compared with the contents of the destination + directory. Rather than permit this conflict, DistCp will abort.

+ +

In the default case, both /bar/foo/a and + /bar/foo/b will be created and neither will collide.

+ +

Now consider a legal copy using -update:
+ distcp -update hdfs://nn1:8020/foo/a \
+         +        + hdfs://nn1:8020/foo/b \
+         +        + hdfs://nn2:8020/bar

+ +

With sources/sizes:

+ +

    hdfs://nn1:8020/foo/a
+     hdfs://nn1:8020/foo/a/aa 32
+     hdfs://nn1:8020/foo/a/ab 32
+     hdfs://nn1:8020/foo/b
+     hdfs://nn1:8020/foo/b/ba 64
+     hdfs://nn1:8020/foo/b/bb 32

+ +

And destination/sizes:

+ +

    hdfs://nn2:8020/bar
+     hdfs://nn2:8020/bar/aa 32
+     hdfs://nn2:8020/bar/ba 32
+     hdfs://nn2:8020/bar/bb 64

+ +

Will effect:

+ +

    hdfs://nn2:8020/bar
+     hdfs://nn2:8020/bar/aa 32
+     hdfs://nn2:8020/bar/ab 32
+     hdfs://nn2:8020/bar/ba 64
+     hdfs://nn2:8020/bar/bb 32

+ +

Only aa is not overwritten on nn2. If + -overwrite were specified, all elements would be + overwritten.

+ +
+ +
+ +
+ +
+ Appendix + +
+ Map sizing + +

DistCp makes a faint attempt to size each map comparably so that + each copies roughly the same number of bytes. Note that files are the + finest level of granularity, so increasing the number of simultaneous + copiers (i.e. maps) may not always increase the number of + simultaneous copies nor the overall throughput.

+ +

If -m is not specified, DistCp will attempt to + schedule work for min (total_bytes / bytes.per.map, 20 * + num_task_trackers) where bytes.per.map defaults + to 256MB.

+ +

Tuning the number of maps to the size of the source and + destination clusters, the size of the copy, and the available + bandwidth is recommended for long-running and regularly run jobs.

+ +
+ +
+ Copying between versions of HDFS + +

For copying between two different versions of Hadoop, one will + usually use HftpFileSystem. This is a read-only FileSystem, so DistCp + must be run on the destination cluster (more specifically, on + TaskTrackers that can write to the destination cluster). Each source is + specified as hftp://<dfs.http.address>/<path> + (the default dfs.http.address is + <namenode>:50070).

+ +
+ +
+ Map/Reduce and other side-effects + +

As has been mentioned in the preceding, should a map fail to copy + one of its inputs, there will be several side-effects.

+ +
    + +
  • Unless -i is specified, the logs generated by that + task attempt will be replaced by the previous attempt.
  • + +
  • Unless -overwrite is specified, files successfully + copied by a previous map on a re-execution will be marked as + "skipped".
  • + +
  • If a map fails mapred.map.max.attempts times, the + remaining map tasks will be killed (unless -i is + set).
  • + +
  • If mapred.speculative.execution is set set + final and true, the result of the copy is + undefined.
  • + +
+ +
+ + + +
+ + + +
diff --git a/src/docs/src/documentation/content/xdocs/fair_scheduler.xml b/src/docs/src/documentation/content/xdocs/fair_scheduler.xml new file mode 100644 index 0000000..6066196 --- /dev/null +++ b/src/docs/src/documentation/content/xdocs/fair_scheduler.xml @@ -0,0 +1,371 @@ + + + + + +
+ Fair Scheduler Guide +
+ + +
+ Purpose + +

This document describes the Fair Scheduler, a pluggable + Map/Reduce scheduler for Hadoop which provides a way to share + large clusters.

+
+ +
+ Introduction +

Fair scheduling is a method of assigning resources to jobs + such that all jobs get, on average, an equal share of resources + over time. When there is a single job running, that job uses the + entire cluster. When other jobs are submitted, tasks slots that + free up are assigned to the new jobs, so that each job gets + roughly the same amount of CPU time. Unlike the default Hadoop + scheduler, which forms a queue of jobs, this lets short jobs finish + in reasonable time while not starving long jobs. It is also a + reasonable way to share a cluster between a number of users. Finally, + fair sharing can also work with job priorities - the priorities are + used as weights to determine the fraction of total compute time that + each job should get. +

+

+ The scheduler actually organizes jobs further into "pools", and + shares resources fairly between these pools. By default, there is a + separate pool for each user, so that each user gets the same share + of the cluster no matter how many jobs they submit. However, it is + also possible to set a job's pool based on the user's Unix group or + any other jobconf property, such as the queue name property used by + Capacity Scheduler. + Within each pool, fair sharing is used to share capacity between + the running jobs. Pools can also be given weights to share the + cluster non-proportionally in the config file. +

+

+ In addition to providing fair sharing, the Fair Scheduler allows + assigning guaranteed minimum shares to pools, which is useful for + ensuring that certain users, groups or production applications + always get sufficient resources. When a pool contains jobs, it gets + at least its minimum share, but when the pool does not need its full + guaranteed share, the excess is split between other running jobs. + This lets the scheduler guarantee capacity for pools while utilizing + resources efficiently when these pools don't contain jobs. +

+

+ The Fair Scheduler lets all jobs run by default, but it is also + possible to limit the number of running jobs per user and per pool + through the config file. This can be useful when a user must submit + hundreds of jobs at once, or in general to improve performance if + running too many jobs at once would cause too much intermediate data + to be created or too much context-switching. Limiting the jobs does + not cause any subsequently submitted jobs to fail, only to wait in the + sheduler's queue until some of the user's earlier jobs finish. Jobs to + run from each user/pool are chosen in order of priority and then + submit time, as in the default FIFO scheduler in Hadoop. +

+

+ Finally, the fair scheduler provides several extension points where + the basic functionality can be extended. For example, the weight + calculation can be modified to give a priority boost to new jobs, + implementing a "shortest job first" policy which reduces response + times for interactive jobs even further. +

+
+ +
+ Installation +

+ To run the fair scheduler in your Hadoop installation, you need to put + it on the CLASSPATH. The easiest way is to copy the + hadoop-*-fairscheduler.jar from + HADOOP_HOME/contrib/fairscheduler to HADOOP_HOME/lib. + Alternatively you can modify HADOOP_CLASSPATH to include this jar, in + HADOOP_CONF_DIR/hadoop-env.sh +

+

+ In order to compile fair scheduler, from sources execute ant + package in source folder and copy the + build/contrib/fair-scheduler/hadoop-*-fairscheduler.jar + to HADOOP_HOME/lib +

+

+ You will also need to set the following property in the Hadoop config + file HADOOP_CONF_DIR/mapred-site.xml to have Hadoop use + the fair scheduler:
+ <property>
+   <name>mapred.jobtracker.taskScheduler</name>
+   <value>org.apache.hadoop.mapred.FairScheduler</value>
+ </property> +

+

+ Once you restart the cluster, you can check that the fair scheduler + is running by going to http://<jobtracker URL>/scheduler + on the JobTracker's web UI. A "job scheduler administration" page should + be visible there. This page is described in the Administration section. +

+
+ +
+ Configuring the Fair scheduler +

+ The following properties can be set in mapred-site.xml to configure + the fair scheduler: +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameDescription
+ mapred.fairscheduler.allocation.file + + Specifies an absolute path to an XML file which contains the + allocations for each pool, as well as the per-pool and per-user + limits on number of running jobs. If this property is not + provided, allocations are not used.
+ This file must be in XML format, and can contain three types of + elements: +
    +
  • pool elements, which may contain elements for minMaps, + minReduces, maxRunningJobs (limit the number of jobs from the + pool to run at once),and weight (to share the cluster + non-proportionally with other pools). +
  • +
  • user elements, which may contain a maxRunningJobs to limit + jobs. Note that by default, there is a separate pool for each + user, so these may not be necessary; they are useful, however, + if you create a pool per user group or manually assign jobs + to pools.
  • +
  • A userMaxJobsDefault element, which sets the default running + job limit for any users whose limit is not specified.
  • +
+
+ Example Allocation file is listed below :
+ <?xml version="1.0"?>
+ <allocations>
+   <pool name="sample_pool">
+     <minMaps>5</minMaps>
+     <minReduces>5</minReduces>
+     <weight>2.0</weight>
+   </pool>
+   <user name="sample_user">
+     <maxRunningJobs>6</maxRunningJobs>
+   </user>
+   <userMaxJobsDefault>3</userMaxJobsDefault>
+ </allocations> +
+ This example creates a pool sample_pool with a guarantee of 5 map + slots and 5 reduce slots. The pool also has a weight of 2.0, meaning + it has a 2x higher share of the cluster than other pools (the default + weight is 1). Finally, the example limits the number of running jobs + per user to 3, except for sample_user, who can run 6 jobs concurrently. + Any pool not defined in the allocations file will have no guaranteed + capacity and a weight of 1.0. Also, any pool or user with no max + running jobs set in the file will be allowed to run an unlimited + number of jobs. +
+ mapred.fairscheduler.assignmultiple + + Allows the scheduler to assign both a map task and a reduce task + on each heartbeat, which improves cluster throughput when there + are many small tasks to run. Boolean value, default: false. +
+ mapred.fairscheduler.sizebasedweight + + Take into account job sizes in calculating their weights for fair + sharing.By default, weights are only based on job priorities. + Setting this flag to true will make them based on the size of the + job (number of tasks needed) as well,though not linearly + (the weight will be proportional to the log of the number of tasks + needed). This lets larger jobs get larger fair shares while still + providing enough of a share to small jobs to let them finish fast. + Boolean value, default: false. +
+ mapred.fairscheduler.poolnameproperty + + Specify which jobconf property is used to determine the pool that a + job belongs in. String, default: user.name (i.e. one pool for each + user). Some other useful values to set this to are:
+
    +
  • group.name (to create a pool per Unix group).
  • +
  • mapred.job.queue.name (the same property as the queue name in + Capacity Scheduler).
  • +
+
+ mapred.fairscheduler.weightadjuster + + An extensibility point that lets you specify a class to adjust the + weights of running jobs. This class should implement the + WeightAdjuster interface. There is currently one example + implementation - NewJobWeightBooster, which increases the + weight of jobs for the first 5 minutes of their lifetime to let + short jobs finish faster. To use it, set the weightadjuster + property to the full class name, + org.apache.hadoop.mapred.NewJobWeightBooster + NewJobWeightBooster itself provides two parameters for setting the + duration and boost factor.
+
    +
  1. mapred.newjobweightbooster.factor + Factor by which new jobs weight should be boosted. Default is 3
  2. +
  3. mapred.newjobweightbooster.duration + Duration in milliseconds, default 300000 for 5 minutes
  4. +
+
+ mapred.fairscheduler.loadmanager + + An extensibility point that lets you specify a class that determines + how many maps and reduces can run on a given TaskTracker. This class + should implement the LoadManager interface. By default the task caps + in the Hadoop config file are used, but this option could be used to + make the load based on available memory and CPU utilization for example. +
+ mapred.fairscheduler.taskselector: + + An extensibility point that lets you specify a class that determines + which task from within a job to launch on a given tracker. This can be + used to change either the locality policy (e.g. keep some jobs within + a particular rack) or the speculative execution algorithm (select + when to launch speculative tasks). The default implementation uses + Hadoop's default algorithms from JobInProgress. +
+
+
+ Administration +

+ The fair scheduler provides support for administration at runtime + through two mechanisms: +

+
    +
  1. + It is possible to modify pools' allocations + and user and pool running job limits at runtime by editing the allocation + config file. The scheduler will reload this file 10-15 seconds after it + sees that it was modified. +
  2. +
  3. + Current jobs, pools, and fair shares can be examined through the + JobTracker's web interface, at http://<jobtracker URL>/scheduler. + On this interface, it is also possible to modify jobs' priorities or + move jobs from one pool to another and see the effects on the fair + shares (this requires JavaScript). +
  4. +
+

+ The following fields can be seen for each job on the web interface: +

+
    +
  • Submitted - Date and time job was submitted.
  • +
  • JobID, User, Name - Job identifiers as on the standard + web UI.
  • +
  • Pool - Current pool of job. Select another value to move job to + another pool.
  • +
  • Priority - Current priority. Select another value to change the + job's priority
  • +
  • Maps/Reduces Finished: Number of tasks finished / total tasks.
  • +
  • Maps/Reduces Running: Tasks currently running.
  • +
  • Map/Reduce Fair Share: The average number of task slots that this + job should have at any given time according to fair sharing. The actual + number of tasks will go up and down depending on how much compute time + the job has had, but on average it will get its fair share amount.
  • +
+

+ In addition, it is possible to turn on an "advanced" view for the web UI, + by going to http://<jobtracker URL>/scheduler?advanced. This view shows + four more columns used for calculations internally: +

+
    +
  • Maps/Reduce Weight: Weight of the job in the fair sharing + calculations. This depends on priority and potentially also on + job size and job age if the sizebasedweight and + NewJobWeightBooster are enabled.
  • +
  • Map/Reduce Deficit: The job's scheduling deficit in machine- + seconds - the amount of resources it should have gotten according to + its fair share, minus how many it actually got. Positive deficit means + the job will be scheduled again in the near future because it needs to + catch up to its fair share. The scheduler schedules jobs with higher + deficit ahead of others. Please see the Implementation section of + this document for details.
  • +
+
+
+ Implementation +

There are two aspects to implementing fair scheduling: Calculating + each job's fair share, and choosing which job to run when a task slot + becomes available.

+

To select jobs to run, the scheduler then keeps track of a + "deficit" for each job - the difference between the amount of + compute time it should have gotten on an ideal scheduler, and the amount + of compute time it actually got. This is a measure of how + "unfair" we've been to the job. Every few hundred + milliseconds, the scheduler updates the deficit of each job by looking + at how many tasks each job had running during this interval vs. its + fair share. Whenever a task slot becomes available, it is assigned to + the job with the highest deficit. There is one exception - if there + were one or more jobs who were not meeting their pool capacity + guarantees, we only choose among these "needy" jobs (based + again on their deficit), to ensure that the scheduler meets pool + guarantees as soon as possible.

+

+ The fair shares are calculated by dividing the capacity of the cluster + among runnable jobs according to a "weight" for each job. By + default the weight is based on priority, with each level of priority + having 2x higher weight than the next (for example, VERY_HIGH has 4x the + weight of NORMAL). However, weights can also be based on job sizes and ages, + as described in the Configuring section. For jobs that are in a pool, + fair shares also take into account the minimum guarantee for that pool. + This capacity is divided among the jobs in that pool according again to + their weights. +

+

Finally, when limits on a user's running jobs or a pool's running jobs + are in place, we choose which jobs get to run by sorting all jobs in order + of priority and then submit time, as in the standard Hadoop scheduler. Any + jobs that fall after the user/pool's limit in this ordering are queued up + and wait idle until they can be run. During this time, they are ignored + from the fair sharing calculations and do not gain or lose deficit (their + fair share is set to zero).

+
+ +
diff --git a/src/docs/src/documentation/content/xdocs/gridmix.xml b/src/docs/src/documentation/content/xdocs/gridmix.xml new file mode 100644 index 0000000..8b2edfb --- /dev/null +++ b/src/docs/src/documentation/content/xdocs/gridmix.xml @@ -0,0 +1,164 @@ + + + + + + + +
+ Gridmix +
+ + + +
+ Overview + +

Gridmix is a benchmark for live clusters. It submits a mix of synthetic + jobs, modeling a profile mined from production loads.

+ +

There exist three versions of the Gridmix tool. This document discusses + the third (checked into contrib), distinct from the two checked into the + benchmarks subdirectory. While the first two versions of the tool included + stripped-down versions of common jobs, both were principally saturation + tools for stressing the framework at scale. In support of a broader range of + deployments and finer-tuned job mixes, this version of the tool will attempt + to model the resource profiles of production jobs to identify bottlenecks, + guide development, and serve as a replacement for the existing gridmix + benchmarks.

+ +
+ +
+ + Usage + +

To run Gridmix, one requires a job trace describing the job mix for a + given cluster. Such traces are typically genenerated by Rumen (see related + documentation). Gridmix also requires input data from which the synthetic + jobs will draw bytes. The input data need not be in any particular format, + as the synthetic jobs are currently binary readers. If one is running on a + new cluster, an optional step generating input data may precede the run.

+ +

Basic command line usage:

+ + +bin/mapred org.apache.hadoop.mapred.gridmix.Gridmix [-generate <MiB>] <iopath> <trace> + + +

The -generate parameter accepts standard units, e.g. + 100g will generate 100 * 230 bytes. The + <iopath> parameter is the destination directory for generated and/or + the directory from which input data will be read. The <trace> + parameter is a path to a job trace. The following configuration parameters + are also accepted in the standard idiom, before other Gridmix + parameters.

+ +
+ Configuration parameters +

+ + + + + + + + + + + + + + +
Parameter Description Notes
gridmix.output.directoryThe directory into which output will be written. If specified, the + iopath will be relative to this parameter.The submitting user must have read/write access to this + directory. The user should also be mindful of any quota issues that + may arise during a run.
gridmix.client.submit.threadsThe number of threads submitting jobs to the cluster. This also + controls how many splits will be loaded into memory at a given time, + pending the submit time in the trace.Splits are pregenerated to hit submission deadlines, so + particularly dense traces may want more submitting threads. However, + storing splits in memory is reasonably expensive, so one should raise + this cautiously.
gridmix.client.pending.queue.depthThe depth of the queue of job descriptions awaiting split + generation.The jobs read from the trace occupy a queue of this depth before + being processed by the submission threads. It is unusual to configure + this.
gridmix.min.key.lengthThe key size for jobs submitted to the cluster.While this is clearly a job-specific, even task-specific property, + no data on key length is currently available. Since the intermediate + data are random, memcomparable data, not even the sort is likely + affected. It exists as a tunable as no default value is appropriate, + but future versions will likely replace it with trace data.
+ +
+
+ +
+ + Simplifying Assumptions + +

Gridmix will be developed in stages, incorporating feedback and patches + from the community. Currently, its intent is to evaluate Map/Reduce and HDFS + performance and not the layers on top of them (i.e. the extensive lib and + subproject space). Given these two limitations, the following + characteristics of job load are not currently captured in job traces and + cannot be accurately reproduced in Gridmix.

+ + + + + + + + + + +
PropertyNotes
CPU usageWe have no data for per-task CPU usage, so we + cannot attempt even an approximation. Gridmix tasks are never CPU bound + independent of I/O, though this surely happens in practice.
Filesystem propertiesNo attempt is made to match block + sizes, namespace hierarchies, or any property of input, intermediate, or + output data other than the bytes/records consumed and emitted from a given + task. This implies that some of the most heavily used parts of the system- + the compression libraries, text processing, streaming, etc.- cannot be + meaningfully tested with the current implementation.
I/O ratesThe rate at which records are consumed/emitted is + assumed to be limited only by the speed of the reader/writer and constant + throughout the task.
Memory profileNo data on tasks' memory usage over time is + available, though the max heap size is retained.
SkewThe records consumed and emitted to/from a given task + are assumed to follow observed averages, i.e. records will be more regular + than may be seen in the wild. Each map also generates a proportional + percentage of data for each reduce, so a job with unbalanced input will be + flattened.
Job failureUser code is assumed to be correct.
Job independenceThe output or outcome of one job does not + affect when or whether a subsequent job will run.
+ +
+ +
+ + Appendix + +

Issues tracking the implementations of gridmix1, gridmix2, and + gridmix3. + Other issues tracking the development of Gridmix can be found by searching + the Map/Reduce JIRA

+ +
+ + + +
diff --git a/src/docs/src/documentation/content/xdocs/hadoop_archives.xml b/src/docs/src/documentation/content/xdocs/hadoop_archives.xml new file mode 100644 index 0000000..7f1dbf6 --- /dev/null +++ b/src/docs/src/documentation/content/xdocs/hadoop_archives.xml @@ -0,0 +1,116 @@ + + + + +
+ Archives Guide +
+ +
+ What are Hadoop archives? +

+ Hadoop archives are special format archives. A Hadoop archive + maps to a file system directory. A Hadoop archive always has a *.har + extension. A Hadoop archive directory contains metadata (in the form + of _index and _masterindex) and data (part-*) files. The _index file contains + the name of the files that are part of the archive and the location + within the part files. +

+
+ +
+ How to create an archive? +

+ Usage: hadoop archive -archiveName name -p <parent> <src>* <dest> +

+

+ -archiveName is the name of the archive you would like to create. + An example would be foo.har. The name should have a *.har extension. + The parent argument is to specify the relative path to which the files should be + archived to. Example would be : +

-p /foo/bar a/b/c e/f/g

+ Here /foo/bar is the parent path and a/b/c, e/f/g are relative paths to parent. + Note that this is a Map/Reduce job that creates the archives. You would + need a map reduce cluster to run this. For a detailed example the later sections.

+

If you just want to archive a single directory /foo/bar then you can just use

+

hadoop archive -archiveName zoo.har -p /foo/bar /outputdir

+
+ +
+ How to look up files in archives? +

+ The archive exposes itself as a file system layer. So all the fs shell + commands in the archives work but with a different URI. Also, note that + archives are immutable. So, rename's, deletes and creates return + an error. URI for Hadoop Archives is +

har://scheme-hostname:port/archivepath/fileinarchive

+ If no scheme is provided it assumes the underlying filesystem. + In that case the URI would look like

+

har:///archivepath/fileinarchive

+
+ +
+ Example on creating and looking up archives +

hadoop archive -archiveName foo.har -p /user/hadoop dir1 dir2 /user/zoo

+

+ The above example is creating an archive using /user/hadoop as the relative archive directory. + The directories /user/hadoop/dir1 and /user/hadoop/dir2 will be + archived in the following file system directory -- /user/zoo/foo.har. Archiving does not delete the input + files. If you want to delete the input files after creating the archives (to reduce namespace), you + will have to do it on your own. +

+ +
+ Looking up files and understanding the -p option +

Looking up files in hadoop archives is as easy as doing an ls on the filesystem. After you have + archived the directories /user/hadoop/dir1 and /user/hadoop/dir2 as in the exmaple above, to see all + the files in the archives you can just run:

+

hadoop dfs -lsr har:///user/zoo/foo.har/

+

To understand the significance of the -p argument, lets go through the above example again. If you just do + an ls (not lsr) on the hadoop archive using

+

hadoop dfs -ls har:///user/zoo/foo.har

+

The output should be:

+ +har:///user/zoo/foo.har/dir1 +har:///user/zoo/foo.har/dir2 + +

As you can recall the archives were created with the following command

+

hadoop archive -archiveName foo.har -p /user/hadoop dir1 dir2 /user/zoo

+

If we were to change the command to:

+

hadoop archive -archiveName foo.har -p /user/ hadoop/dir1 hadoop/dir2 /user/zoo

+

then a ls on the hadoop archive using

+

hadoop dfs -ls har:///user/zoo/foo.har

+

would give you

+ +har:///user/zoo/foo.har/hadoop/dir1 +har:///user/zoo/foo.har/hadoop/dir2 + +

+ Notice that the archived files have been archived relative to /user/ rather than /user/hadoop. +

+
+
+ +
+ Using Hadoop Archives with Map Reduce +

Using Hadoop Archives in Map Reduce is as easy as specifying a different input filesystem than the default file system. + If you have a hadoop archive stored in HDFS in /user/zoo/foo.har then for using this archive for Map Reduce input, all + you need to specify the input directory as har:///user/zoo/foo.har. Since Hadoop Archives is exposed as a file system + Map Reduce will be able to use all the logical input files in Hadoop Archives as input.

+
+ +
diff --git a/src/docs/src/documentation/content/xdocs/hdfs_design.xml b/src/docs/src/documentation/content/xdocs/hdfs_design.xml new file mode 100644 index 0000000..22df739 --- /dev/null +++ b/src/docs/src/documentation/content/xdocs/hdfs_design.xml @@ -0,0 +1,367 @@ + + + + + + + + +
+ + HDFS Architecture + + + + +
+ + +
+ Introduction +

+ The Hadoop Distributed File System (HDFS) is a distributed file system designed to run on commodity hardware. It has many similarities with existing distributed file systems. However, the differences from other distributed file systems are significant. HDFS is highly fault-tolerant and is designed to be deployed on low-cost hardware. HDFS provides high throughput access to application data and is suitable for applications that have large data sets. HDFS relaxes a few POSIX requirements to enable streaming access to file system data. HDFS was originally built as infrastructure for the Apache Nutch web search engine project. HDFS is part of the Apache Hadoop Core project. The project URL is http://hadoop.apache.org/core/. +

+
+ +
+ Assumptions and Goals + +
+ Hardware Failure +

+ Hardware failure is the norm rather than the exception. An HDFS instance may consist of hundreds or thousands of server machines, each storing part of the file system’s data. The fact that there are a huge number of components and that each component has a non-trivial probability of failure means that some component of HDFS is always non-functional. Therefore, detection of faults and quick, automatic recovery from them is a core architectural goal of HDFS. +

+
+ + +
+ Streaming Data Access +

+ Applications that run on HDFS need streaming access to their data sets. They are not general purpose applications that typically run on general purpose file systems. HDFS is designed more for batch processing rather than interactive use by users. The emphasis is on high throughput of data access rather than low latency of data access. POSIX imposes many hard requirements that are not needed for applications that are targeted for HDFS. POSIX semantics in a few key areas has been traded to increase data throughput rates. +

+
+ +
+ Large Data Sets +

+ Applications that run on HDFS have large data sets. A typical file in HDFS is gigabytes to terabytes in size. Thus, HDFS is tuned to support large files. It should provide high aggregate data bandwidth and scale to hundreds of nodes in a single cluster. It should support tens of millions of files in a single instance. +

+
+ + +
+ Simple Coherency Model +

+ HDFS applications need a write-once-read-many access model for files. A file once created, written, and closed need not be changed. This assumption simplifies data coherency issues and enables high throughput data access. A Map/Reduce application or a web crawler application fits perfectly with this model. There is a plan to support appending-writes to files in the future. +

+
+ + +
+ “Moving Computation is Cheaper than Moving Data” +

+ A computation requested by an application is much more efficient if it is executed near the data it operates on. This is especially true when the size of the data set is huge. This minimizes network congestion and increases the overall throughput of the system. The assumption is that it is often better to migrate the computation closer to where the data is located rather than moving the data to where the application is running. HDFS provides interfaces for applications to move themselves closer to where the data is located. +

+
+ + +
+ Portability Across Heterogeneous Hardware and Software Platforms +

+ HDFS has been designed to be easily portable from one platform to another. This facilitates widespread adoption of HDFS as a platform of choice for a large set of applications. +

+
+
+ + +
+ NameNode and DataNodes +

+ HDFS has a master/slave architecture. An HDFS cluster consists of a single NameNode, a master server that manages the file system namespace and regulates access to files by clients. In addition, there are a number of DataNodes, usually one per node in the cluster, which manage storage attached to the nodes that they run on. HDFS exposes a file system namespace and allows user data to be stored in files. Internally, a file is split into one or more blocks and these blocks are stored in a set of DataNodes. The NameNode executes file system namespace operations like opening, closing, and renaming files and directories. It also determines the mapping of blocks to DataNodes. The DataNodes are responsible for serving read and write requests from the file system’s clients. The DataNodes also perform block creation, deletion, and replication upon instruction from the NameNode. +

+
+

+ The NameNode and DataNode are pieces of software designed to run on commodity machines. These machines typically run a GNU/Linux operating system (OS). HDFS is built using the Java language; any machine that supports Java can run the NameNode or the DataNode software. Usage of the highly portable Java language means that HDFS can be deployed on a wide range of machines. A typical deployment has a dedicated machine that runs only the NameNode software. Each of the other machines in the cluster runs one instance of the DataNode software. The architecture does not preclude running multiple DataNodes on the same machine but in a real deployment that is rarely the case. +

+

+ The existence of a single NameNode in a cluster greatly simplifies the architecture of the system. The NameNode is the arbitrator and repository for all HDFS metadata. The system is designed in such a way that user data never flows through the NameNode. +

+
+ + + +
+ The File System Namespace +

+ HDFS supports a traditional hierarchical file organization. A user or an application can create directories and store files inside these directories. The file system namespace hierarchy is similar to most other existing file systems; one can create and remove files, move a file from one directory to another, or rename a file. HDFS does not yet implement user quotas or access permissions. HDFS does not support hard links or soft links. However, the HDFS architecture does not preclude implementing these features. +

+

+ The NameNode maintains the file system namespace. Any change to the file system namespace or its properties is recorded by the NameNode. An application can specify the number of replicas of a file that should be maintained by HDFS. The number of copies of a file is called the replication factor of that file. This information is stored by the NameNode. +

+
+ + + +
+ Data Replication +

+ HDFS is designed to reliably store very large files across machines in a large cluster. It stores each file as a sequence of blocks; all blocks in a file except the last block are the same size. The blocks of a file are replicated for fault tolerance. The block size and replication factor are configurable per file. An application can specify the number of replicas of a file. The replication factor can be specified at file creation time and can be changed later. Files in HDFS are write-once and have strictly one writer at any time. +

+

+ The NameNode makes all decisions regarding replication of blocks. It periodically receives a Heartbeat and a Blockreport from each of the DataNodes in the cluster. Receipt of a Heartbeat implies that the DataNode is functioning properly. A Blockreport contains a list of all blocks on a DataNode. +

+
+ +
+ Replica Placement: The First Baby Steps +

+ The placement of replicas is critical to HDFS reliability and performance. Optimizing replica placement distinguishes HDFS from most other distributed file systems. This is a feature that needs lots of tuning and experience. The purpose of a rack-aware replica placement policy is to improve data reliability, availability, and network bandwidth utilization. The current implementation for the replica placement policy is a first effort in this direction. The short-term goals of implementing this policy are to validate it on production systems, learn more about its behavior, and build a foundation to test and research more sophisticated policies. +

+

+ Large HDFS instances run on a cluster of computers that commonly spread across many racks. Communication between two nodes in different racks has to go through switches. In most cases, network bandwidth between machines in the same rack is greater than network bandwidth between machines in different racks. +

+

+ The NameNode determines the rack id each DataNode belongs to via the process outlined in Rack Awareness. A simple but non-optimal policy is to place replicas on unique racks. This prevents losing data when an entire rack fails and allows use of bandwidth from multiple racks when reading data. This policy evenly distributes replicas in the cluster which makes it easy to balance load on component failure. However, this policy increases the cost of writes because a write needs to transfer blocks to multiple racks. +

+

+ For the common case, when the replication factor is three, HDFS’s placement policy is to put one replica on one node in the local rack, another on a different node in the local rack, and the last on a different node in a different rack. This policy cuts the inter-rack write traffic which generally improves write performance. The chance of rack failure is far less than that of node failure; this policy does not impact data reliability and availability guarantees. However, it does reduce the aggregate network bandwidth used when reading data since a block is placed in only two unique racks rather than three. With this policy, the replicas of a file do not evenly distribute across the racks. One third of replicas are on one node, two thirds of replicas are on one rack, and the other third are evenly distributed across the remaining racks. This policy improves write performance without compromising data reliability or read performance. +

+

+ The current, default replica placement policy described here is a work in progress. +

+
+ +
+ Replica Selection +

+ To minimize global bandwidth consumption and read latency, HDFS tries to satisfy a read request from a replica that is closest to the reader. If there exists a replica on the same rack as the reader node, then that replica is preferred to satisfy the read request. If angg/ HDFS cluster spans multiple data centers, then a replica that is resident in the local data center is preferred over any remote replica. +

+
+ +
+ Safemode +

+ On startup, the NameNode enters a special state called Safemode. Replication of data blocks does not occur when the NameNode is in the Safemode state. The NameNode receives Heartbeat and Blockreport messages from the DataNodes. A Blockreport contains the list of data blocks that a DataNode is hosting. Each block has a specified minimum number of replicas. A block is considered safely replicated when the minimum number of replicas of that data block has checked in with the NameNode. After a configurable percentage of safely replicated data blocks checks in with the NameNode (plus an additional 30 seconds), the NameNode exits the Safemode state. It then determines the list of data blocks (if any) that still have fewer than the specified number of replicas. The NameNode then replicates these blocks to other DataNodes. +

+
+ +
+ +
+ The Persistence of File System Metadata +

+ The HDFS namespace is stored by the NameNode. The NameNode uses a transaction log called the EditLog to persistently record every change that occurs to file system metadata. For example, creating a new file in HDFS causes the NameNode to insert a record into the EditLog indicating this. Similarly, changing the replication factor of a file causes a new record to be inserted into the EditLog. The NameNode uses a file in its local host OS file system to store the EditLog. The entire file system namespace, including the mapping of blocks to files and file system properties, is stored in a file called the FsImage. The FsImage is stored as a file in the NameNode’s local file system too. +

+

+ The NameNode keeps an image of the entire file system namespace and file Blockmap in memory. This key metadata item is designed to be compact, such that a NameNode with 4 GB of RAM is plenty to support a huge number of files and directories. When the NameNode starts up, it reads the FsImage and EditLog from disk, applies all the transactions from the EditLog to the in-memory representation of the FsImage, and flushes out this new version into a new FsImage on disk. It can then truncate the old EditLog because its transactions have been applied to the persistent FsImage. This process is called a checkpoint. In the current implementation, a checkpoint only occurs when the NameNode starts up. Work is in progress to support periodic checkpointing in the near future. +

+

+ The DataNode stores HDFS data in files in its local file system. The DataNode has no knowledge about HDFS files. It stores each block of HDFS data in a separate file in its local file system. The DataNode does not create all files in the same directory. Instead, it uses a heuristic to determine the optimal number of files per directory and creates subdirectories appropriately. It is not optimal to create all local files in the same directory because the local file system might not be able to efficiently support a huge number of files in a single directory. When a DataNode starts up, it scans through its local file system, generates a list of all HDFS data blocks that correspond to each of these local files and sends this report to the NameNode: this is the Blockreport. +

+
+ + +
+ The Communication Protocols +

+ All HDFS communication protocols are layered on top of the TCP/IP protocol. A client establishes a connection to a configurable TCP port on the NameNode machine. It talks the ClientProtocol with the NameNode. The DataNodes talk to the NameNode using the DataNode Protocol. A Remote Procedure Call (RPC) abstraction wraps both the Client Protocol and the DataNode Protocol. By design, the NameNode never initiates any RPCs. Instead, it only responds to RPC requests issued by DataNodes or clients. +

+
+ + +
+ Robustness +

+ The primary objective of HDFS is to store data reliably even in the presence of failures. The three common types of failures are NameNode failures, DataNode failures and network partitions. +

+ +
+ Data Disk Failure, Heartbeats and Re-Replication +

+ Each DataNode sends a Heartbeat message to the NameNode periodically. A network partition can cause a subset of DataNodes to lose connectivity with the NameNode. The NameNode detects this condition by the absence of a Heartbeat message. The NameNode marks DataNodes without recent Heartbeats as dead and does not forward any new IO requests to them. Any data that was registered to a dead DataNode is not available to HDFS any more. DataNode death may cause the replication factor of some blocks to fall below their specified value. The NameNode constantly tracks which blocks need to be replicated and initiates replication whenever necessary. The necessity for re-replication may arise due to many reasons: a DataNode may become unavailable, a replica may become corrupted, a hard disk on a DataNode may fail, or the replication factor of a file may be increased. +

+
+ +
+ Cluster Rebalancing +

+ The HDFS architecture is compatible with data rebalancing schemes. A scheme might automatically move data from one DataNode to another if the free space on a DataNode falls below a certain threshold. In the event of a sudden high demand for a particular file, a scheme might dynamically create additional replicas and rebalance other data in the cluster. These types of data rebalancing schemes are not yet implemented. +

+
+ +
+ Data Integrity +

+ + It is possible that a block of data fetched from a DataNode arrives corrupted. This corruption can occur because of faults in a storage device, network faults, or buggy software. The HDFS client software implements checksum checking on the contents of HDFS files. When a client creates an HDFS file, it computes a checksum of each block of the file and stores these checksums in a separate hidden file in the same HDFS namespace. When a client retrieves file contents it verifies that the data it received from each DataNode matches the checksum stored in the associated checksum file. If not, then the client can opt to retrieve that block from another DataNode that has a replica of that block. +

+
+ + +
+ Metadata Disk Failure +

+ The FsImage and the EditLog are central data structures of HDFS. A corruption of these files can cause the HDFS instance to be non-functional. For this reason, the NameNode can be configured to support maintaining multiple copies of the FsImage and EditLog. Any update to either the FsImage or EditLog causes each of the FsImages and EditLogs to get updated synchronously. This synchronous updating of multiple copies of the FsImage and EditLog may degrade the rate of namespace transactions per second that a NameNode can support. However, this degradation is acceptable because even though HDFS applications are very data intensive in nature, they are not metadata intensive. When a NameNode restarts, it selects the latest consistent FsImage and EditLog to use. +

+

+ The NameNode machine is a single point of failure for an HDFS cluster. If the NameNode machine fails, manual intervention is necessary. Currently, automatic restart and failover of the NameNode software to another machine is not supported. +

+
+ +
+ Snapshots +

+ Snapshots support storing a copy of data at a particular instant of time. One usage of the snapshot feature may be to roll back a corrupted HDFS instance to a previously known good point in time. HDFS does not currently support snapshots but will in a future release. +

+
+ +
+ + +
+ + Data Organization + +
+ Data Blocks +

+ HDFS is designed to support very large files. Applications that are compatible with HDFS are those that deal with large data sets. These applications write their data only once but they read it one or more times and require these reads to be satisfied at streaming speeds. HDFS supports write-once-read-many semantics on files. A typical block size used by HDFS is 64 MB. Thus, an HDFS file is chopped up into 64 MB chunks, and if possible, each chunk will reside on a different DataNode. +

+
+ + +
+ + Staging +

+ A client request to create a file does not reach the NameNode immediately. In fact, initially the HDFS client caches the file data into a temporary local file. Application writes are transparently redirected to this temporary local file. When the local file accumulates data worth over one HDFS block size, the client contacts the NameNode. The NameNode inserts the file name into the file system hierarchy and allocates a data block for it. The NameNode responds to the client request with the identity of the DataNode and the destination data block. Then the client flushes the block of data from the local temporary file to the specified DataNode. When a file is closed, the remaining un-flushed data in the temporary local file is transferred to the DataNode. The client then tells the NameNode that the file is closed. At this point, the NameNode commits the file creation operation into a persistent store. If the NameNode dies before the file is closed, the file is lost. +

+

+ The above approach has been adopted after careful consideration of target applications that run on HDFS. These applications need streaming writes to files. If a client writes to a remote file directly without any client side buffering, the network speed and the congestion in the network impacts throughput considerably. This approach is not without precedent. Earlier distributed file systems, e.g. AFS, have used client side caching to improve performance. A POSIX requirement has been relaxed to achieve higher performance of data uploads. +

+
+ +
+ Replication Pipelining +

+ When a client is writing data to an HDFS file, its data is first written to a local file as explained in the previous section. Suppose the HDFS file has a replication factor of three. When the local file accumulates a full block of user data, the client retrieves a list of DataNodes from the NameNode. This list contains the DataNodes that will host a replica of that block. The client then flushes the data block to the first DataNode. The first DataNode starts receiving the data in small portions (4 KB), writes each portion to its local repository and transfers that portion to the second DataNode in the list. The second DataNode, in turn starts receiving each portion of the data block, writes that portion to its repository and then flushes that portion to the third DataNode. Finally, the third DataNode writes the data to its local repository. Thus, a DataNode can be receiving data from the previous one in the pipeline and at the same time forwarding data to the next one in the pipeline. Thus, the data is pipelined from one DataNode to the next. +

+
+ +
+ +
+ + Accessibility + +

+ HDFS can be accessed from applications in many different ways. Natively, HDFS provides a FileSystem Java API for applications to use. A C language wrapper for this Java API is also available. In addition, an HTTP browser can also be used to browse the files of an HDFS instance. Work is in progress to expose HDFS through the WebDAV protocol. +

+ +
+ FS Shell +

+ HDFS allows user data to be organized in the form of files and directories. It provides a commandline interface called FS shell that lets a user interact with the data in HDFS. The syntax of this command set is similar to other shells (e.g. bash, csh) that users are already familiar with. Here are some sample action/command pairs: +

+ + + + + + + + + + + + + +
Action Command
Create a directory named /foodir bin/hadoop dfs -mkdir /foodir
Remove a directory named /foodir bin/hadoop dfs -rmr /foodir
View the contents of a file named /foodir/myfile.txt bin/hadoop dfs -cat /foodir/myfile.txt
+

+ FS shell is targeted for applications that need a scripting language to interact with the stored data. +

+
+ +
+ DFSAdmin +

+ The DFSAdmin command set is used for administering an HDFS cluster. These are commands that are used only by an HDFS administrator. Here are some sample action/command pairs: +

+ + + + + + + + + + + + + +
Action Command
Put the cluster in Safemode bin/hadoop dfsadmin -safemode enter
Generate a list of DataNodes bin/hadoop dfsadmin -report
Recommission or decommission DataNode(s) bin/hadoop dfsadmin -refreshNodes
+
+ +
+ Browser Interface +

+ A typical HDFS install configures a web server to expose the HDFS namespace through a configurable TCP port. This allows a user to navigate the HDFS namespace and view the contents of its files using a web browser. +

+
+ +
+ +
+ Space Reclamation + +
+ File Deletes and Undeletes +

+ When a file is deleted by a user or an application, it is not immediately removed from HDFS. Instead, HDFS first renames it to a file in the /trash directory. The file can be restored quickly as long as it remains in /trash. A file remains in /trash for a configurable amount of time. After the expiry of its life in /trash, the NameNode deletes the file from the HDFS namespace. The deletion of a file causes the blocks associated with the file to be freed. Note that there could be an appreciable time delay between the time a file is deleted by a user and the time of the corresponding increase in free space in HDFS. +

+

+ A user can Undelete a file after deleting it as long as it remains in the /trash directory. If a user wants to undelete a file that he/she has deleted, he/she can navigate the /trash directory and retrieve the file. The /trash directory contains only the latest copy of the file that was deleted. The /trash directory is just like any other directory with one special feature: HDFS applies specified policies to automatically delete files from this directory. The current default policy is to delete files from /trash that are more than 6 hours old. In the future, this policy will be configurable through a well defined interface. +

+
+ +
+ Decrease Replication Factor +

+ When the replication factor of a file is reduced, the NameNode selects excess replicas that can be deleted. The next Heartbeat transfers this information to the DataNode. The DataNode then removes the corresponding blocks and the corresponding free space appears in the cluster. Once again, there might be a time delay between the completion of the setReplication API call and the appearance of free space in the cluster. +

+
+
+ + +
+ References +

+ Hadoop JavaDoc API. +

+

+ HDFS source code: + + http://hadoop.apache.org/core/version_control.html + +

+
+ + +
+ diff --git a/src/docs/src/documentation/content/xdocs/hdfs_permissions_guide.xml b/src/docs/src/documentation/content/xdocs/hdfs_permissions_guide.xml new file mode 100644 index 0000000..8899c37 --- /dev/null +++ b/src/docs/src/documentation/content/xdocs/hdfs_permissions_guide.xml @@ -0,0 +1,191 @@ + + + + + + + + +
+ + HDFS Permissions Guide + +
+ + +
Overview +

+ The Hadoop Distributed File System (HDFS) implements a permissions model for files and directories that shares much of the POSIX model. Each file and directory is associated with an owner and a group. The file or directory has separate permissions for the user that is the owner, for other users that are members of the group, and for all other users. For files, the r permission is required to read the file, and the w permission is required to write or append to the file. For directories, the r permission is required to list the contents of the directory, the w permission is required to create or delete files or directories, and the x permission is required to access a child of the directory. In contrast to the POSIX model, there are no sticky, setuid or setgid bits for files as there is no notion of executable files. For directories, there no sticky, setuid or setgid bits directory as a simplification. Collectively, the permissions of a file or directory are its mode. In general, Unix customs for representing and displaying modes will be used, including the use of octal numbers in this description. When a file or directory is created, its owner is the user identity of the client process, and its group is the group of the parent directory (the BSD rule). +

+

+ Each client process that accesses HDFS has a two-part identity composed of the user name, and groups list. Whenever HDFS must do a permissions check for a file or directory foo accessed by a client process, +

+
    +
  • + If the user name matches the owner of foo, then the owner permissions are tested; +
  • +
  • + Else if the group of foo matches any of member of the groups list, then the group permissions are tested; +
  • +
  • + Otherwise the other permissions of foo are tested. +
  • +
+ +

+ If a permissions check fails, the client operation fails. +

+
+ +
User Identity +

+In this release of Hadoop the identity of a client process is just whatever the host operating system says it is. For Unix-like systems, +

+
    +
  • + The user name is the equivalent of `whoami`; +
  • +
  • + The group list is the equivalent of `bash -c groups`. +
  • +
+ +

+In the future there will be other ways of establishing user identity (think Kerberos, LDAP, and others). There is no expectation that this first method is secure in protecting one user from impersonating another. This user identity mechanism combined with the permissions model allows a cooperative community to share file system resources in an organized fashion. +

+

+In any case, the user identity mechanism is extrinsic to HDFS itself. There is no provision within HDFS for creating user identities, establishing groups, or processing user credentials. +

+
+ +
Understanding the Implementation +

+Each file or directory operation passes the full path name to the name node, and the permissions checks are applied along the path for each operation. The client framework will implicitly associate the user identity with the connection to the name node, reducing the need for changes to the existing client API. It has always been the case that when one operation on a file succeeds, the operation might fail when repeated because the file, or some directory on the path, no longer exists. For instance, when the client first begins reading a file, it makes a first request to the name node to discover the location of the first blocks of the file. A second request made to find additional blocks may fail. On the other hand, deleting a file does not revoke access by a client that already knows the blocks of the file. With the addition of permissions, a client's access to a file may be withdrawn between requests. Again, changing permissions does not revoke the access of a client that already knows the file's blocks. +

+

+The map-reduce framework delegates the user identity by passing strings without special concern for confidentiality. The owner and group of a file or directory are stored as strings; there is no conversion from user and group identity numbers as is conventional in Unix. +

+

+The permissions features of this release did not require any changes to the behavior of data nodes. Blocks on the data nodes do not have any of the Hadoop ownership or permissions attributes associated with them. +

+
+ +
Changes to the File System API +

+ All methods that use a path parameter will throw AccessControlException if permission checking fails. +

+

New methods:

+
    +
  • + public FSDataOutputStream create(Path f, FsPermission permission, boolean overwrite, int bufferSize, short replication, long blockSize, Progressable progress) throws IOException; +
  • +
  • + public boolean mkdirs(Path f, FsPermission permission) throws IOException; +
  • +
  • + public void setPermission(Path p, FsPermission permission) throws IOException; +
  • +
  • + public void setOwner(Path p, String username, String groupname) throws IOException; +
  • +
  • + public FileStatus getFileStatus(Path f) throws IOException; will additionally return the user, group and mode associated with the path. +
  • + +
+

+The mode of a new file or directory is restricted my the umask set as a configuration parameter. When the existing create(path, …) method (without the permission parameter) is used, the mode of the new file is 666 & ^umask. When the new create(path, permission, …) method (with the permission parameter P) is used, the mode of the new file is P & ^umask & 666. When a new directory is created with the existing mkdirs(path) method (without the permission parameter), the mode of the new directory is 777 & ^umask. When the new mkdirs(path, permission ) method (with the permission parameter P) is used, the mode of new directory is P & ^umask & 777. +

+
+ + +
Changes to the Application Shell +

New operations:

+
+
chmod [-R] mode file …
+
+ Only the owner of a file or the super-user is permitted to change the mode of a file. +
+
chgrp [-R] group file …
+
+ The user invoking chgrp must belong to the specified group and be the owner of the file, or be the super-user. +
+
chown [-R] [owner][:[group]] file …
+
+ The owner of a file may only be altered by a super-user. +
+
ls file …
+
lsr file …
+
+ The output is reformatted to display the owner, group and mode. +
+
+ + +
The Super-User +

+ The super-user is the user with the same identity as name node process itself. Loosely, if you started the name node, then you are the super-user. The super-user can do anything in that permissions checks never fail for the super-user. There is no persistent notion of who was the super-user; when the name node is started the process identity determines who is the super-user for now. The HDFS super-user does not have to be the super-user of the name node host, nor is it necessary that all clusters have the same super-user. Also, an experimenter running HDFS on a personal workstation, conveniently becomes that installation's super-user without any configuration. +

+

+ In addition, the administrator my identify a distinguished group using a configuration parameter. If set, members of this group are also super-users. +

+
+ +
The Web Server +

+The identity of the web server is a configuration parameter. That is, the name node has no notion of the identity of the real user, but the web server behaves as if it has the identity (user and groups) of a user chosen by the administrator. Unless the chosen identity matches the super-user, parts of the name space may be invisible to the web server.

+
+ +
On-line Upgrade +

+If a cluster starts with a version 0.15 data set (fsimage), all files and directories will have owner O, group G, and mode M, where O and G are the user and group identity of the super-user, and M is a configuration parameter.

+
+ +
Configuration Parameters +
+
dfs.permissions = true
+
+ If yes use the permissions system as described here. If no, permission checking is turned off, but all other behavior is unchanged. Switching from one parameter value to the other does not change the mode, owner or group of files or directories. +

+

+ Regardless of whether permissions are on or off, chmod, chgrp and chown always check permissions. These functions are only useful in the permissions context, and so there is no backwards compatibility issue. Furthermore, this allows administrators to reliably set owners and permissions in advance of turning on regular permissions checking. +
+
dfs.web.ugi = webuser,webgroup
+
+ The user name to be used by the web server. Setting this to the name of the super-user allows any web client to see everything. Changing this to an otherwise unused identity allows web clients to see only those things visible using "other" permissions. Additional groups may be added to the comma-separated list. +
+
dfs.permissions.supergroup = supergroup
+
+ The name of the group of super-users. +
+
dfs.upgrade.permission = 777
+
+ The choice of initial mode during upgrade. The x permission is never set for files. For configuration files, the decimal value 51110 may be used. +
+
dfs.umaskmode = 022
+
+ The umask used when creating files and directories. May be specified either via three octal digits or symbolic values, with the same constraints as the dfs chmod command. +
+
+
+ + + +
+ + diff --git a/src/docs/src/documentation/content/xdocs/hdfs_quota_admin_guide.xml b/src/docs/src/documentation/content/xdocs/hdfs_quota_admin_guide.xml new file mode 100644 index 0000000..343a6b1 --- /dev/null +++ b/src/docs/src/documentation/content/xdocs/hdfs_quota_admin_guide.xml @@ -0,0 +1,105 @@ + + + + + + + + +
HDFS Quotas Guide
+ + + +

The Hadoop Distributed File System (HDFS) allows the administrator to set quotas for the number of names used and the +amount of space used for individual directories. Name quotas and space quotas operate independently, but the administration and +implementation of the two types of quotas are closely parallel.

+ +
Name Quotas + +

The name quota is a hard limit on the number of file and directory names in the tree rooted at that directory. File and +directory creations fail if the quota would be exceeded. Quotas stick with renamed directories; the rename operation fails if +operation would result in a quota violation. The attempt to set a quota will still succeed even if the directory would be in violation of the new +quota. A newly created directory has no associated quota. The largest quota is Long.Max_Value. A quota of one +forces a directory to remain empty. (Yes, a directory counts against its own quota!)

+ +

Quotas are persistent with the fsimage. When starting, if the fsimage is immediately in +violation of a quota (perhaps the fsimage was surreptitiously modified), +a warning is printed for each of such violations. Setting or removing a quota creates a journal entry.

+ +
Space Quotas + +

The space quota is a hard limit on the number of bytes used by files in the tree rooted at that directory. Block +allocations fail if the quota would not allow a full block to be written. Each replica of a block counts against the quota. Quotas +stick with renamed directories; the rename operation fails if the operation would result in a quota violation. A newly created directory has no associated quota. +The largest quota is Long.Max_Value. A quota of zero still permits files to be created, but no blocks can be added to the files. +Directories don't use host file system space and don't count against the space quota. The host file system space used to save +the file meta data is not counted against the quota. Quotas are charged at the intended replication factor for the file; +changing the replication factor for a file will credit or debit quotas.

+ +

Quotas are persistent with the fsimage. When starting, if the fsimage is immediately in +violation of a quota (perhaps the fsimage was surreptitiously modified), a warning is printed for +each of such violations. Setting or removing a quota creates a journal entry.

+ +
+ +
+ + Administrative Commands + +

Quotas are managed by a set of commands available only to the administrator.

+ +
    + +
  • dfsadmin -setQuota <N> <directory>...<directory>
    Set the name quota to be N for +each directory. Best effort for each directory, with faults reported if N is not a positive long integer, the +directory does not exist or it is a file, or the directory would immediately exceed the new quota.
  • + +
  • dfsadmin -clrQuota <directory>...<director>
    Remove any name quota for each directory. Best +effort for each directory, with faults reported if the directory does not exist or it is a file. It is not a fault if the +directory has no quota.
  • + +
  • dfsadmin -setSpaceQuota <N> <directory>...<directory>
    Set the space quota to be +N bytes for each directory. This is a hard limit on total size of all the files under the directory tree. +The space quota takes replication also into account, i.e. one GB of data with replication of 3 consumes 3GB of quota. N can also be specified with a binary prefix for convenience, for e.g. 50g for 50 gigabytes and +2t for 2 terabytes etc. Best effort for each directory, with faults reported if N is +neither zero nor a positive integer, the directory does not exist or it is a file, or the directory would immediately exceed +the new quota.
  • + +
  • dfsadmin -clrSpaceQuota <directory>...<director>
    Remove any space quota for each directory. Best +effort for each directory, with faults reported if the directory does not exist or it is a file. It is not a fault if the +directory has no quota.
  • + +
+ +
+ +
+ + Reporting Command + +

An an extension to the count command of the HDFS shell reports quota values and the current count of names and bytes in use.

+ +
    + +
  • + + fs -count -q <directory>...<directory>
    With the -q option, also report the name quota +value set for each directory, the available name quota remaining, the space quota value set, and the available space quota +remaining. If the directory does not have a quota set, the reported values are none and inf. + +
  • + +
+ + + +
diff --git a/src/docs/src/documentation/content/xdocs/hdfs_shell.xml b/src/docs/src/documentation/content/xdocs/hdfs_shell.xml new file mode 100644 index 0000000..04794be --- /dev/null +++ b/src/docs/src/documentation/content/xdocs/hdfs_shell.xml @@ -0,0 +1,470 @@ + + + + +
+ HDFS File System Shell Guide +
+ +
+ Overview +

+ The FileSystem (FS) shell is invoked by + bin/hadoop fs <args>. + All FS shell commands take path URIs as arguments. The URI + format is scheme://autority/path. For HDFS the scheme + is hdfs, and for the local filesystem the scheme + is file. The scheme and authority are optional. If not + specified, the default scheme specified in the configuration is + used. An HDFS file or directory such as /parent/child + can be specified as hdfs://namenodehost/parent/child or + simply as /parent/child (given that your configuration + is set to point to hdfs://namenodehost). Most of the + commands in FS shell behave like corresponding Unix + commands. Differences are described with each of the + commands. Error information is sent to stderr and the + output is sent to stdout. +

+
+ cat +

+ Usage: hadoop fs -cat URI [URI …] +

+

+ Copies source paths to stdout. +

+

Example:

+
    +
  • + hadoop fs -cat hdfs://nn1.example.com/file1 hdfs://nn2.example.com/file2 + +
  • +
  • + hadoop fs -cat file:///file3 /user/hadoop/file4 +
  • +
+

Exit Code:
+ Returns 0 on success and -1 on error.

+
+
+ chgrp +

+ Usage: hadoop fs -chgrp [-R] GROUP URI [URI …] +

+

+ Change group association of files. With -R, make the change recursively through the directory structure. The user must be the owner of files, or else a super-user. Additional information is in the HDFS Admin Guide: Permissions. +

+
+
+ chmod +

+ Usage: hadoop fs -chmod [-R] <MODE[,MODE]... | OCTALMODE> URI [URI …] +

+

+ Change the permissions of files. With -R, make the change recursively through the directory structure. The user must be the owner of the file, or else a super-user. Additional information is in the HDFS Admin Guide: Permissions. +

+
+
+ chown +

+ Usage: hadoop fs -chown [-R] [OWNER][:[GROUP]] URI [URI ] +

+

+ Change the owner of files. With -R, make the change recursively through the directory structure. The user must be a super-user. Additional information is in the HDFS Admin Guide: Permissions. +

+
+
+ copyFromLocal +

+ Usage: hadoop fs -copyFromLocal <localsrc> URI +

+

Similar to put command, except that the source is restricted to a local file reference.

+
+
+ copyToLocal +

+ Usage: hadoop fs -copyToLocal [-ignorecrc] [-crc] URI <localdst> +

+

Similar to get command, except that the destination is restricted to a local file reference.

+
+
+ count +

+ Usage: hadoop fs -count [-q] <paths> +

+

+ Count the number of directories, files and bytes under the paths that match the specified file pattern. The output columns are:
DIR_COUNT, FILE_COUNT, CONTENT_SIZE FILE_NAME.

The output columns with -q are:
QUOTA, REMAINING_QUATA, SPACE_QUOTA, REMAINING_SPACE_QUOTA, DIR_COUNT, FILE_COUNT, CONTENT_SIZE, FILE_NAME. +

+

Example:

+
    +
  • + hadoop fs -count hdfs://nn1.example.com/file1 hdfs://nn2.example.com/file2 + +
  • +
  • + hadoop fs -count -q hdfs://nn1.example.com/file1 + +
  • +
+

Exit Code:

+

+ Returns 0 on success and -1 on error. +

+
+
+ cp +

+ Usage: hadoop fs -cp URI [URI …] <dest> +

+

+ Copy files from source to destination. This command allows multiple sources as well in which case the destination must be a directory. +
+ Example:

+
    +
  • + hadoop fs -cp /user/hadoop/file1 /user/hadoop/file2 +
  • +
  • + hadoop fs -cp /user/hadoop/file1 /user/hadoop/file2 /user/hadoop/dir +
  • +
+

Exit Code:

+

+ Returns 0 on success and -1 on error. +

+
+
+ du +

+ Usage: hadoop fs -du URI [URI …] +

+

+ Displays aggregate length of files contained in the directory or the length of a file in case its just a file.
+ Example:
hadoop fs -du /user/hadoop/dir1 /user/hadoop/file1 hdfs://nn.example.com/user/hadoop/dir1
+ Exit Code:
Returns 0 on success and -1 on error.

+
+
+ dus +

+ Usage: hadoop fs -dus <args> +

+

+ Displays a summary of file lengths. +

+
+
+ expunge +

+ Usage: hadoop fs -expunge +

+

Empty the Trash. Refer to HDFS Architecture for more information on Trash feature. +

+
+
+ get +

+ Usage: hadoop fs -get [-ignorecrc] [-crc] <src> <localdst> +
+

+

+ Copy files to the local file system. Files that fail the CRC check may be copied with the + -ignorecrc option. Files and CRCs may be copied using the + -crc option. +

+

Example:

+
    +
  • + hadoop fs -get /user/hadoop/file localfile +
  • +
  • + hadoop fs -get hdfs://nn.example.com/user/hadoop/file localfile +
  • +
+

Exit Code:

+

+ Returns 0 on success and -1 on error. +

+
+
+ getmerge +

+ Usage: hadoop fs -getmerge <src> <localdst> [addnl] +

+

+ Takes a source directory and a destination file as input and concatenates files in src into the destination local file. Optionally addnl can be set to enable adding a newline character at the end of each file. +

+
+
+ ls +

+ Usage: hadoop fs -ls <args> +

+

For a file returns stat on the file with the following format:

+

+ permissions number_of_replicas userid groupid filesize modification_date modification_time filename +

+

For a directory it returns list of its direct children as in unix.A directory is listed as:

+

+ permissions userid groupid modification_date modification_time dirname +

+

Example:

+

+ hadoop fs -ls /user/hadoop/file1 +

+

Exit Code:

+

+ Returns 0 on success and -1 on error. +

+
+
+ lsr +

Usage: hadoop fs -lsr <args>
+ Recursive version of ls. Similar to Unix ls -R. +

+
+
+ mkdir +

+ Usage: hadoop fs -mkdir <paths> +
+

+

+ Takes path uri's as argument and creates directories. The behavior is much like unix mkdir -p creating parent directories along the path. +

+

Example:

+
    +
  • + hadoop fs -mkdir /user/hadoop/dir1 /user/hadoop/dir2 +
  • +
  • + hadoop fs -mkdir hdfs://nn1.example.com/user/hadoop/dir hdfs://nn2.example.com/user/hadoop/dir + +
  • +
+

Exit Code:

+

+ Returns 0 on success and -1 on error. +

+
+
+ moveFromLocal +

+ Usage: dfs -moveFromLocal <localsrc> <dst> +

+

Similar to put command, except that the source localsrc is deleted after it's copied.

+
+
+ moveToLocal +

+ Usage: hadoop fs -moveToLocal [-crc] <src> <dst> +

+

Displays a "Not implemented yet" message.

+
+
+ mv +

+ Usage: hadoop fs -mv URI [URI …] <dest> +

+

+ Moves files from source to destination. This command allows multiple sources as well in which case the destination needs to be a directory. Moving files across filesystems is not permitted. +
+ Example: +

+
    +
  • + hadoop fs -mv /user/hadoop/file1 /user/hadoop/file2 +
  • +
  • + hadoop fs -mv hdfs://nn.example.com/file1 hdfs://nn.example.com/file2 hdfs://nn.example.com/file3 hdfs://nn.example.com/dir1 +
  • +
+

Exit Code:

+

+ Returns 0 on success and -1 on error. +

+
+
+ put +

+ Usage: hadoop fs -put <localsrc> ... <dst> +

+

Copy single src, or multiple srcs from local file system to the destination filesystem. Also reads input from stdin and writes to destination filesystem.
+

+
    +
  • + hadoop fs -put localfile /user/hadoop/hadoopfile +
  • +
  • + hadoop fs -put localfile1 localfile2 /user/hadoop/hadoopdir +
  • +
  • + hadoop fs -put localfile hdfs://nn.example.com/hadoop/hadoopfile +
  • +
  • hadoop fs -put - hdfs://nn.example.com/hadoop/hadoopfile
    Reads the input from stdin.
  • +
+

Exit Code:

+

+ Returns 0 on success and -1 on error. +

+
+
+ rm +

+ Usage: hadoop fs -rm [-skipTrash] URI [URI …] +

+

+ Delete files specified as args. Only deletes non empty directory and files. If the -skipTrash option + is specified, the trash, if enabled, will be bypassed and the specified file(s) deleted immediately. This can be + useful when it is necessary to delete files from an over-quota directory. + Refer to rmr for recursive deletes.
+ Example: +

+
    +
  • + hadoop fs -rm hdfs://nn.example.com/file /user/hadoop/emptydir +
  • +
+

Exit Code:

+

+ Returns 0 on success and -1 on error. +

+
+
+ rmr +

+ Usage: hadoop fs -rmr [-skipTrash] URI [URI …] +

+

Recursive version of delete. If the -skipTrash option + is specified, the trash, if enabled, will be bypassed and the specified file(s) deleted immediately. This can be + useful when it is necessary to delete files from an over-quota directory.
+ + Example: +

+
    +
  • + hadoop fs -rmr /user/hadoop/dir +
  • +
  • + hadoop fs -rmr hdfs://nn.example.com/user/hadoop/dir +
  • +
+

Exit Code:

+

+ Returns 0 on success and -1 on error. +

+
+
+ setrep +

+ Usage: hadoop fs -setrep [-R] <path> +

+

+ Changes the replication factor of a file. -R option is for recursively increasing the replication factor of files within a directory. +

+

Example:

+
    +
  • + hadoop fs -setrep -w 3 -R /user/hadoop/dir1 +
  • +
+

Exit Code:

+

+ Returns 0 on success and -1 on error. +

+
+
+ stat +

+ Usage: hadoop fs -stat URI [URI …] +

+

+ Returns the stat information on the path. +

+

Example:

+
    +
  • + hadoop fs -stat path +
  • +
+

Exit Code:
+ Returns 0 on success and -1 on error.

+
+
+ tail +

+ Usage: hadoop fs -tail [-f] URI +

+

+ Displays last kilobyte of the file to stdout. -f option can be used as in Unix. +

+

Example:

+
    +
  • + hadoop fs -tail pathname +
  • +
+

Exit Code:
+ Returns 0 on success and -1 on error.

+
+
+ test +

+ Usage: hadoop fs -test -[ezd] URI +

+

+ Options:
+ -e check to see if the file exists. Return 0 if true.
+ -z check to see if the file is zero length. Return 0 if true.
+ -d check to see if the path is directory. Return 0 if true.

+

Example:

+
    +
  • + hadoop fs -test -e filename +
  • +
+
+
+ text +

+ Usage: hadoop fs -text <src> +
+

+

+ Takes a source file and outputs the file in text format. The allowed formats are zip and TextRecordInputStream. +

+
+
+ touchz +

+ Usage: hadoop fs -touchz URI [URI …] +
+

+

+ Create a file of zero length. +

+

Example:

+
    +
  • + hadoop -touchz pathname +
  • +
+

Exit Code:
+ Returns 0 on success and -1 on error.

+
+
+ +
diff --git a/src/docs/src/documentation/content/xdocs/hdfs_user_guide.xml b/src/docs/src/documentation/content/xdocs/hdfs_user_guide.xml new file mode 100644 index 0000000..33f3867 --- /dev/null +++ b/src/docs/src/documentation/content/xdocs/hdfs_user_guide.xml @@ -0,0 +1,489 @@ + + + + + + + + +
+ + HDFS User Guide + +
+ + +
Purpose +

+ This document is a starting point for users working with + Hadoop Distributed File System (HDFS) either as a part of a + Hadoop + cluster or as a stand-alone general purpose distributed file system. + While HDFS is designed to "just work" in many environments, a working + knowledge of HDFS helps greatly with configuration improvements and + diagnostics on a specific cluster. +

+
+ +
Overview +

+ HDFS is the primary distributed storage used by Hadoop applications. A + HDFS cluster primarily consists of a NameNode that manages the + file system metadata and DataNodes that store the actual data. The + HDFS Architecture describes HDFS in detail. This user guide primarily deals with + the interaction of users and administrators with HDFS clusters. + The HDFS architecture diagram depicts + basic interactions among NameNode, the DataNodes, and the clients. + Clients contact NameNode for file metadata or file modifications and perform + actual file I/O directly with the DataNodes. +

+

+ The following are some of the salient features that could be of + interest to many users. +

+
    +
  • + Hadoop, including HDFS, is well suited for distributed storage + and distributed processing using commodity hardware. It is fault + tolerant, scalable, and extremely simple to expand. + Map/Reduce, + well known for its simplicity and applicability for large set of + distributed applications, is an integral part of Hadoop. +
  • +
  • + HDFS is highly configurable with a default configuration well + suited for many installations. Most of the time, configuration + needs to be tuned only for very large clusters. +
  • +
  • + Hadoop is written in Java and is supported on all major platforms. +
  • +
  • + Hadoop supports shell-like commands to interact with HDFS directly. +
  • +
  • + The NameNode and Datanodes have built in web servers that makes it + easy to check current status of the cluster. +
  • +
  • + New features and improvements are regularly implemented in HDFS. + The following is a subset of useful features in HDFS: +
      +
    • + File permissions and authentication. +
    • +
    • + Rack awareness: to take a node's physical location into + account while scheduling tasks and allocating storage. +
    • +
    • + Safemode: an administrative mode for maintenance. +
    • +
    • + fsck: a utility to diagnose health of the file system, to + find missing files or blocks. +
    • +
    • + Rebalancer: tool to balance the cluster when the data is + unevenly distributed among DataNodes. +
    • +
    • + Upgrade and rollback: after a software upgrade, + it is possible to + rollback to HDFS' state before the upgrade in case of unexpected + problems. +
    • +
    • + Secondary NameNode: performs periodic checkpoints of the + namespace and helps keep the size of file containing log of HDFS + modifications within certain limits at the NameNode. +
    • +
    +
  • +
+ +
Pre-requisites +

+ The following documents describe installation and set up of a + Hadoop cluster : +

+ +

+ The rest of this document assumes the user is able to set up and run a + HDFS with at least one DataNode. For the purpose of this document, + both the NameNode and DataNode could be running on the same physical + machine. +

+ +
Web Interface +

+ NameNode and DataNode each run an internal web server in order to + display basic information about the current status of the cluster. + With the default configuration, the NameNode front page is at + http://namenode-name:50070/. + It lists the DataNodes in the cluster and basic statistics of the + cluster. The web interface can also be used to browse the file + system (using "Browse the file system" link on the NameNode front + page). +

+ +
Shell Commands +

+ Hadoop includes various shell-like commands that directly + interact with HDFS and other file systems that Hadoop supports. + The command + bin/hadoop fs -help + lists the commands supported by Hadoop + shell. Furthermore, the command + bin/hadoop fs -help command-name + displays more detailed help for a command. These commands support + most of the normal files ystem operations like copying files, + changing file permissions, etc. It also supports a few HDFS + specific operations like changing replication of files. +

+ +
DFSAdmin Command +

+ The bin/hadoop dfsadmin + command supports a few HDFS administration related operations. + The bin/hadoop dfsadmin -help command + lists all the commands currently supported. For e.g.: +

+
    +
  • + -report + : reports basic statistics of HDFS. Some of this information is + also available on the NameNode front page. +
  • +
  • + -safemode + : though usually not required, an administrator can manually enter + or leave Safemode. +
  • +
  • + -finalizeUpgrade + : removes previous backup of the cluster made during last upgrade. +
  • +
  • + -refreshNodes + : Updates the set of hosts allowed to connect to namenode. + Re-reads the config file to update values defined by dfs.hosts and + dfs.host.exclude and reads the entires (hostnames) in those files. + Each entry not defined in dfs.hosts but in dfs.hosts.exclude + is decommissioned. Each entry defined in dfs.hosts and also in + dfs.host.exclude is stopped from decommissioning if it has aleady + been marked for decommission. Entires not present in both the lists + are decommissioned. +
  • +
+

+ For command usage, see dfsadmin command. +

+
+ +
Secondary NameNode +

+ The NameNode stores modifications to the file system as a log + appended to a native file system file (edits). + When a NameNode starts up, it reads HDFS state from an image + file (fsimage) and then applies edits from the + edits log file. It then writes new HDFS state to the fsimage + and starts normal + operation with an empty edits file. Since NameNode merges + fsimage and edits files only during start up, + the edits log file could get very large over time on a busy cluster. + Another side effect of a larger edits file is that next + restart of NameNode takes longer. +

+

+ The secondary NameNode merges the fsimage and the edits log files periodically + and keeps edits log size within a limit. It is usually run on a + different machine than the primary NameNode since its memory requirements + are on the same order as the primary NameNode. The secondary + NameNode is started by bin/start-dfs.sh on the nodes + specified in conf/masters file. +

+

+ The start of the checkpoint process on the secondary NameNode is + controlled by two configuration parameters. +

+
    +
  • + fs.checkpoint.period, set to 1 hour by default, specifies + the maximum delay between two consecutive checkpoints, and +
  • +
  • + fs.checkpoint.size, set to 64MB by default, defines the + size of the edits log file that forces an urgent checkpoint even if + the maximum checkpoint delay is not reached. +
  • +
+

+ The secondary NameNode stores the latest checkpoint in a + directory which is structured the same way as the primary NameNode's + directory. So that the check pointed image is always ready to be + read by the primary NameNode if necessary. +

+

+ The latest checkpoint can be imported to the primary NameNode if + all other copies of the image and the edits files are lost. + In order to do that one should: +

+
    +
  • + Create an empty directory specified in the + dfs.name.dir configuration variable; +
  • +
  • + Specify the location of the checkpoint directory in the + configuration variable fs.checkpoint.dir; +
  • +
  • + and start the NameNode with -importCheckpoint option. +
  • +
+

+ The NameNode will upload the checkpoint from the + fs.checkpoint.dir directory and then save it to the NameNode + directory(s) set in dfs.name.dir. + The NameNode will fail if a legal image is contained in + dfs.name.dir. + The NameNode verifies that the image in fs.checkpoint.dir is + consistent, but does not modify it in any way. +

+

+ For command usage, see secondarynamenode command. +

+ +
Rebalancer +

+ HDFS data might not always be be placed uniformly across the + DataNode. One common reason is addition of new DataNodes to an + existing cluster. While placing new blocks (data for a file is + stored as a series of blocks), NameNode considers various + parameters before choosing the DataNodes to receive these blocks. + Some of the considerations are: +

+
    +
  • + Policy to keep one of the replicas of a block on the same node + as the node that is writing the block. +
  • +
  • + Need to spread different replicas of a block across the racks so + that cluster can survive loss of whole rack. +
  • +
  • + One of the replicas is usually placed on the same rack as the + node writing to the file so that cross-rack network I/O is + reduced. +
  • +
  • + Spread HDFS data uniformly across the DataNodes in the cluster. +
  • +
+

+ Due to multiple competing considerations, data might not be + uniformly placed across the DataNodes. + HDFS provides a tool for administrators that analyzes block + placement and rebalanaces data across the DataNode. A brief + administrator's guide for rebalancer as a + PDF + is attached to + HADOOP-1652. +

+

+ For command usage, see balancer command. +

+ +
Rack Awareness +

+ Typically large Hadoop clusters are arranged in racks and + network traffic between different nodes with in the same rack is + much more desirable than network traffic across the racks. In + addition NameNode tries to place replicas of block on + multiple racks for improved fault tolerance. Hadoop lets the + cluster administrators decide which rack a node belongs to + through configuration variable dfs.network.script. When this + script is configured, each node runs the script to determine its + rack id. A default installation assumes all the nodes belong to + the same rack. This feature and configuration is further described + in PDF + attached to + HADOOP-692. +

+ +
Safemode +

+ During start up the NameNode loads the file system state from the + fsimage and the edits log file. It then waits for DataNodes + to report their blocks so that it does not prematurely start + replicating the blocks though enough replicas already exist in the + cluster. During this time NameNode stays in Safemode. + Safemode + for the NameNode is essentially a read-only mode for the HDFS cluster, + where it does not allow any modifications to file system or blocks. + Normally the NameNode leaves Safemode automatically after the DataNodes + have reported that most file system blocks are available. + If required, HDFS could be placed in Safemode explicitly + using 'bin/hadoop dfsadmin -safemode' command. NameNode front + page shows whether Safemode is on or off. A more detailed + description and configuration is maintained as JavaDoc for + setSafeMode(). +

+ +
fsck +

+ HDFS supports the fsck command to check for various + inconsistencies. + It it is designed for reporting problems with various + files, for example, missing blocks for a file or under-replicated + blocks. Unlike a traditional fsck utility for native file systems, + this command does not correct the errors it detects. Normally NameNode + automatically corrects most of the recoverable failures. By default + fsck ignores open files but provides an option to select all files during reporting. + The HDFS fsck command is not a + Hadoop shell command. It can be run as 'bin/hadoop fsck'. + For command usage, see fsck command. + fsck can be run on the whole file system or on a subset of files. +

+ +
Upgrade and Rollback +

+ When Hadoop is upgraded on an existing cluster, as with any + software upgrade, it is possible there are new bugs or + incompatible changes that affect existing applications and were + not discovered earlier. In any non-trivial HDFS installation, it + is not an option to loose any data, let alone to restart HDFS from + scratch. HDFS allows administrators to go back to earlier version + of Hadoop and rollback the cluster to the state it was in + before + the upgrade. HDFS upgrade is described in more detail in + upgrade wiki. + HDFS can have one such backup at a time. Before upgrading, + administrators need to remove existing backup using bin/hadoop + dfsadmin -finalizeUpgrade command. The following + briefly describes the typical upgrade procedure: +

+
    +
  • + Before upgrading Hadoop software, + finalize if there an existing backup. + dfsadmin -upgradeProgress status + can tell if the cluster needs to be finalized. +
  • +
  • Stop the cluster and distribute new version of Hadoop.
  • +
  • + Run the new version with -upgrade option + (bin/start-dfs.sh -upgrade). +
  • +
  • + Most of the time, cluster works just fine. Once the new HDFS is + considered working well (may be after a few days of operation), + finalize the upgrade. Note that until the cluster is finalized, + deleting the files that existed before the upgrade does not free + up real disk space on the DataNodes. +
  • +
  • + If there is a need to move back to the old version, +
      +
    • stop the cluster and distribute earlier version of Hadoop.
    • +
    • start the cluster with rollback option. + (bin/start-dfs.h -rollback). +
    • +
    +
  • +
+ +
File Permissions and Security +

+ The file permissions are designed to be similar to file permissions on + other familiar platforms like Linux. Currently, security is limited + to simple file permissions. The user that starts NameNode is + treated as the superuser for HDFS. Future versions of HDFS will + support network authentication protocols like Kerberos for user + authentication and encryption of data transfers. The details are discussed in the + HDFS Admin Guide: Permissions. +

+ +
Scalability +

+ Hadoop currently runs on clusters with thousands of nodes. + Powered By Hadoop + lists some of the organizations that deploy Hadoop on large + clusters. HDFS has one NameNode for each cluster. Currently + the total memory available on NameNode is the primary scalability + limitation. On very large clusters, increasing average size of + files stored in HDFS helps with increasing cluster size without + increasing memory requirements on NameNode. + + The default configuration may not suite very large clustes. + Hadoop FAQ page lists + suggested configuration improvements for large Hadoop clusters. +

+ +
Related Documentation +

+ This user guide is a good starting point for + working with HDFS. While the user guide continues to improve, + there is a large wealth of documentation about Hadoop and HDFS. + The following list is a starting point for further exploration: +

+
    +
  • + Hadoop Home Page: The start page for everything Hadoop. +
  • +
  • + Hadoop Wiki + : Front page for Hadoop Wiki documentation. Unlike this + guide which is part of Hadoop source tree, Hadoop Wiki is + regularly edited by Hadoop Community. +
  • +
  • FAQ from Hadoop Wiki. +
  • +
  • + Hadoop JavaDoc API. +
  • +
  • + Hadoop User Mailing List : + core-user[at]hadoop.apache.org. +
  • +
  • + Explore src/hdfs/hdfs-default.xml. + It includes brief + description of most of the configuration variables available. +
  • +
  • + Hadoop Command Guide: commands usage. +
  • +
+
+ + +
+ + diff --git a/src/docs/src/documentation/content/xdocs/hod_admin_guide.xml b/src/docs/src/documentation/content/xdocs/hod_admin_guide.xml new file mode 100644 index 0000000..0dda1f3 --- /dev/null +++ b/src/docs/src/documentation/content/xdocs/hod_admin_guide.xml @@ -0,0 +1,386 @@ + + + + + + + +
+ + HOD Administrator Guide + +
+ + +
+Overview +

Hadoop On Demand (HOD) is a system for provisioning and +managing independent Hadoop Map/Reduce and Hadoop Distributed File System (HDFS) +instances on a shared cluster +of nodes. HOD is a tool that makes it easy for administrators and users to +quickly setup and use Hadoop. HOD is also a very useful tool for Hadoop developers +and testers who need to share a physical cluster for testing their own Hadoop +versions. +

+ +

HOD relies on a resource manager (RM) for allocation of nodes that it can use for +running Hadoop instances. At present it runs with the Torque +resource manager. +

+ +

+The basic system architecture of HOD includes these components:

+
    +
  • A Resource manager (possibly together with a scheduler)
  • +
  • Various HOD components
  • +
  • Hadoop Map/Reduce and HDFS daemons
  • +
+ +

+HOD provisions and maintains Hadoop Map/Reduce and, optionally, HDFS instances +through interaction with the above components on a given cluster of nodes. A cluster of +nodes can be thought of as comprising two sets of nodes:

+
    +
  • Submit nodes: Users use the HOD client on these nodes to allocate clusters, and then +use the Hadoop client to submit Hadoop jobs.
  • +
  • Compute nodes: Using the resource manager, HOD components are run on these nodes to +provision the Hadoop daemons. After that Hadoop jobs run on them.
  • +
+ +

+Here is a brief description of the sequence of operations in allocating a cluster and +running jobs on them. +

+ +
    +
  • The user uses the HOD client on the Submit node to allocate a desired number of +cluster nodes and to provision Hadoop on them.
  • +
  • The HOD client uses a resource manager interface (qsub, in Torque) to submit a HOD +process, called the RingMaster, as a Resource Manager job, to request the user's desired number +of nodes. This job is submitted to the central server of the resource manager (pbs_server, in Torque).
  • +
  • On the compute nodes, the resource manager slave daemons (pbs_moms in Torque) accept +and run jobs that they are assigned by the central server (pbs_server in Torque). The RingMaster +process is started on one of the compute nodes (mother superior, in Torque).
  • +
  • The RingMaster then uses another resource manager interface (pbsdsh, in Torque) to run +the second HOD component, HodRing, as distributed tasks on each of the compute +nodes allocated.
  • +
  • The HodRings, after initializing, communicate with the RingMaster to get Hadoop commands, +and run them accordingly. Once the Hadoop commands are started, they register with the RingMaster, +giving information about the daemons.
  • +
  • All the configuration files needed for Hadoop instances are generated by HOD itself, +some obtained from options given by user in its own configuration file.
  • +
  • The HOD client keeps communicating with the RingMaster to find out the location of the +JobTracker and HDFS daemons.
  • +
+ +

This guide shows you how to get started using HOD, reviews various HOD features and command line options, and provides detailed troubleshooting help.

+ +
+ +
+Pre-requisites +

To use HOD, your system should include the following hardware and software +components.

+

Operating System: HOD is currently tested on RHEL4.
+Nodes : HOD requires a minimum of three nodes configured through a resource manager.

+ +

Software

+

The following components must be installed on ALL nodes before using HOD:

+ + +

The following components are optional and can be installed to obtain better +functionality from HOD:

+
    +
  • Twisted Python: This can be + used for improving the scalability of HOD. If this module is detected to be + installed, HOD uses it, else it falls back to default modules.
  • +
  • Hadoop: HOD can automatically + distribute Hadoop to all nodes in the cluster. However, it can also use a + pre-installed version of Hadoop, if it is available on all nodes in the cluster. + HOD currently supports Hadoop 0.15 and above.
  • +
+ +

NOTE: HOD configuration requires the location of installs of these +components to be the same on all nodes in the cluster. It will also +make the configuration simpler to have the same location on the submit +nodes. +

+
+ +
+Resource Manager +

Currently HOD works with the Torque resource manager, which it uses for its node + allocation and job submission. Torque is an open source resource manager from + Cluster Resources, a community effort + based on the PBS project. It provides control over batch jobs and distributed compute nodes. Torque is + freely available for download from here. +

+ +

All documentation related to torque can be seen under + the section TORQUE Resource Manager here. You can + get wiki documentation from here. + Users may wish to subscribe to TORQUE’s mailing list or view the archive for questions, + comments here. +

+ +

To use HOD with Torque:

+
    +
  • Install Torque components: pbs_server on one node (head node), pbs_mom on all + compute nodes, and PBS client tools on all compute nodes and submit + nodes. Perform at least a basic configuration so that the Torque system is up and + running, that is, pbs_server knows which machines to talk to. Look here + for basic configuration. + + For advanced configuration, see here
  • +
  • Create a queue for submitting jobs on the pbs_server. The name of the queue is the + same as the HOD configuration parameter, resource-manager.queue. The HOD client uses this queue to + submit the RingMaster process as a Torque job.
  • +
  • Specify a cluster name as a property for all nodes in the cluster. + This can be done by using the qmgr command. For example: + qmgr -c "set node node properties=cluster-name". The name of the cluster is the same as + the HOD configuration parameter, hod.cluster.
  • +
  • Make sure that jobs can be submitted to the nodes. This can be done by + using the qsub command. For example: + echo "sleep 30" | qsub -l nodes=3
  • +
+ +
+ +
+Installing HOD + +

Once the resource manager is set up, you can obtain and +install HOD.

+
    +
  • If you are getting HOD from the Hadoop tarball, it is available under the + 'contrib' section of Hadoop, under the root directory 'hod'.
  • +
  • If you are building from source, you can run ant tar from the Hadoop root + directory to generate the Hadoop tarball, and then get HOD from there, + as described above.
  • +
  • Distribute the files under this directory to all the nodes in the + cluster. Note that the location where the files are copied should be + the same on all the nodes.
  • +
  • Note that compiling hadoop would build HOD with appropriate permissions + set on all the required script files in HOD.
  • +
+
+ +
+Configuring HOD + +

You can configure HOD once it is installed. The minimal configuration needed +to run HOD is described below. More advanced configuration options are discussed +in the HOD Configuration Guide.

+
+ Minimal Configuration +

To get started using HOD, the following minimal configuration is + required:

+
    +
  • On the node from where you want to run HOD, edit the file hodrc + located in the <install dir>/conf directory. This file + contains the minimal set of values required to run hod.
  • +
  • +

    Specify values suitable to your environment for the following + variables defined in the configuration file. Note that some of these + variables are defined at more than one place in the file.

    + +
      +
    • ${JAVA_HOME}: Location of Java for Hadoop. Hadoop supports Sun JDK + 1.6.x and above.
    • +
    • ${CLUSTER_NAME}: Name of the cluster which is specified in the + 'node property' as mentioned in resource manager configuration.
    • +
    • ${HADOOP_HOME}: Location of Hadoop installation on the compute and + submit nodes.
    • +
    • ${RM_QUEUE}: Queue configured for submitting jobs in the resource + manager configuration.
    • +
    • ${RM_HOME}: Location of the resource manager installation on the + compute and submit nodes.
    • +
    +
  • + +
  • +

    The following environment variables may need to be set depending on + your environment. These variables must be defined where you run the + HOD client and must also be specified in the HOD configuration file as the + value of the key resource_manager.env-vars. Multiple variables can be + specified as a comma separated list of key=value pairs.

    + +
      +
    • HOD_PYTHON_HOME: If you install python to a non-default location + of the compute nodes, or submit nodes, then this variable must be + defined to point to the python executable in the non-standard + location.
    • +
    +
  • +
+
+ +
+ Advanced Configuration +

You can review and modify other configuration options to suit + your specific needs. Refer to the HOD Configuration + Guide for more information.

+
+
+ +
+ Running HOD +

You can run HOD once it is configured. Refer to the HOD User Guide for more information.

+
+ +
+ Supporting Tools and Utilities +

This section describes supporting tools and utilities that can be used to + manage HOD deployments.

+ +
+ logcondense.py - Manage Log Files +

As mentioned in the + HOD User Guide, + HOD can be configured to upload + Hadoop logs to a statically configured HDFS. Over time, the number of logs uploaded + to HDFS could increase. logcondense.py is a tool that helps + administrators to remove log files uploaded to HDFS.

+
+ Running logcondense.py +

logcondense.py is available under hod_install_location/support folder. You can either + run it using python, for example, python logcondense.py, or give execute permissions + to the file, and directly run it as logcondense.py. logcondense.py needs to be + run by a user who has sufficient permissions to remove files from locations where log + files are uploaded in the HDFS, if permissions are enabled. For example as mentioned in the + HOD Configuration Guide, the logs could + be configured to come under the user's home directory in HDFS. In that case, the user + running logcondense.py should have super user privileges to remove the files from under + all user home directories.

+
+
+ Command Line Options for logcondense.py +

The following command line options are supported for logcondense.py.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Short OptionLong optionMeaningExample
-p--packageComplete path to the hadoop script. The version of hadoop must be the same as the + one running HDFS./usr/bin/hadoop
-d--daysDelete log files older than the specified number of days7
-c--configPath to the Hadoop configuration directory, under which hadoop-site.xml resides. + The hadoop-site.xml must point to the HDFS NameNode from where logs are to be removed./home/foo/hadoop/conf
-l--logsA HDFS path, this must be the same HDFS path as specified for the log-destination-uri, + as mentioned in the HOD Configuration Guide, + without the hdfs:// URI string/user
-n--dynamicdfsIf true, this will indicate that the logcondense.py script should delete HDFS logs + in addition to Map/Reduce logs. Otherwise, it only deletes Map/Reduce logs, which is also the + default if this option is not specified. This option is useful if + dynamic HDFS installations + are being provisioned by HOD, and the static HDFS installation is being used only to collect + logs - a scenario that may be common in test clusters.false
+

So, for example, to delete all log files older than 7 days using a hadoop-site.xml stored in + ~/hadoop-conf, using the hadoop installation under ~/hadoop-0.17.0, you could say:

+

python logcondense.py -p ~/hadoop-0.17.0/bin/hadoop -d 7 -c ~/hadoop-conf -l /user

+
+
+
+ checklimits.sh - Monitor Resource Limits +

checklimits.sh is a HOD tool specific to the Torque/Maui environment + (Maui Cluster Scheduler is an open source job + scheduler for clusters and supercomputers, from clusterresources). The + checklimits.sh script + updates the torque comment field when newly submitted job(s) violate or + exceed + over user limits set up in Maui scheduler. It uses qstat, does one pass + over the torque job-list to determine queued or unfinished jobs, runs Maui + tool checkjob on each job to see if user limits are violated and then + runs torque's qalter utility to update job attribute 'comment'. Currently + it updates the comment as User-limits exceeded. Requested:([0-9]*) + Used:([0-9]*) MaxLimit:([0-9]*) for those jobs that violate limits. + This comment field is then used by HOD to behave accordingly depending on + the type of violation.

+
+ Running checklimits.sh +

checklimits.sh is available under the hod_install_location/support + folder. This shell script can be run directly as sh + checklimits.sh or as ./checklimits.sh after enabling + execute permissions. Torque and Maui binaries should be available + on the machine where the tool is run and should be in the path + of the shell script process. To update the + comment field of jobs from different users, this tool must be run with + torque administrative privileges. This tool must be run repeatedly + after specific intervals of time to frequently update jobs violating + constraints, for example via cron. Please note that the resource manager + and scheduler commands used in this script can be expensive and so + it is better not to run this inside a tight loop without sleeping.

+
+
+ +
+ verify-account - Script to verify an account under which + jobs are submitted +

Production systems use accounting packages to charge users for using + shared compute resources. HOD supports a parameter + resource_manager.pbs-account to allow users to identify the + account under which they would like to submit jobs. It may be necessary + to verify that this account is a valid one configured in an accounting + system. The hod-install-dir/bin/verify-account script + provides a mechanism to plug-in a custom script that can do this + verification.

+ +
+ Integrating the verify-account script with HOD +

HOD runs the verify-account script passing in the + resource_manager.pbs-account value as argument to the script, + before allocating a cluster. Sites can write a script that verify this + account against their accounting systems. Returning a non-zero exit + code from this script will cause HOD to fail allocation. Also, in + case of an error, HOD will print the output of script to the user. + Any descriptive error message can be passed to the user from the + script in this manner.

+

The default script that comes with the HOD installation does not + do any validation, and returns a zero exit code.

+

If the verify-account script is not found, then HOD will treat + that verification is disabled, and continue allocation as is.

+
+
+ +
+ + +
diff --git a/src/docs/src/documentation/content/xdocs/hod_config_guide.xml b/src/docs/src/documentation/content/xdocs/hod_config_guide.xml new file mode 100644 index 0000000..559893f --- /dev/null +++ b/src/docs/src/documentation/content/xdocs/hod_config_guide.xml @@ -0,0 +1,328 @@ + + + + + + + +
+ + HOD Configuration Guide + +
+ + +
+ 1. Introduction +

This guide discusses Hadoop on Demand (HOD) configuration sections and shows you how to work with the most important + and commonly used HOD configuration options.

+

Configuration options + can be specified in two ways: a configuration file + in the INI format, and as command line options to the HOD shell, + specified in the format --section.option[=value]. If the same option is + specified in both places, the value specified on the command line + overrides the value in the configuration file.

+ +

+ To get a simple description of all configuration options, type: +

+
$ hod --verbose-help
+ + +
+ +
+ 2. Sections + +

HOD organizes configuration options into these sections:

+ +
    +
  • hod: Options for the HOD client
  • +
  • resource_manager: Options for specifying which resource manager + to use, and other parameters for using that resource manager
  • +
  • ringmaster: Options for the RingMaster process,
  • +
  • hodring: Options for the HodRing processes
  • +
  • gridservice-mapred: Options for the Map/Reduce daemons
  • +
  • gridservice-hdfs: Options for the HDFS daemons.
  • +
+ +
+ +
+ 3. HOD Configuration Options + +

The following section describes configuration options common to most + HOD sections followed by sections that describe configuration options + specific to each HOD section.

+ +
+ 3.1 Common configuration options + +

Certain configuration options are defined in most of the sections of + the HOD configuration. Options defined in a section, are used by the + process for which that section applies. These options have the same + meaning, but can have different values in each section. +

+ +
    +
  • temp-dir: Temporary directory for usage by the HOD processes. Make + sure that the users who will run hod have rights to create + directories under the directory specified here. If you + wish to make this directory vary across allocations, + you can make use of the environmental variables which will + be made available by the resource manager to the HOD + processes. For example, in a Torque setup, having + --ringmaster.temp-dir=/tmp/hod-temp-dir.$PBS_JOBID would + let ringmaster use different temp-dir for each + allocation; Torque expands this variable before starting + the ringmaster.
  • + +
  • debug: Numeric value from 1-4. 4 produces the most log information, + and 1 the least.
  • + +
  • log-dir: Directory where log files are stored. By default, this is + <install-location>/logs/. The restrictions and notes for the + temp-dir variable apply here too. +
  • + +
  • xrs-port-range: Range of ports, among which an available port shall + be picked for use to run an XML-RPC server.
  • + +
  • http-port-range: Range of ports, among which an available port shall + be picked for use to run an HTTP server.
  • + +
  • java-home: Location of Java to be used by Hadoop.
  • +
  • syslog-address: Address to which a syslog daemon is bound to. The format + of the value is host:port. If configured, HOD log messages + will be logged to syslog using this value.
  • + +
+
+ +
+ 3.2 hod options + +
    +
  • cluster: Descriptive name given to the cluster. For Torque, this is + specified as a 'Node property' for every node in the cluster. + HOD uses this value to compute the number of available nodes.
  • + +
  • client-params: Comma-separated list of hadoop config parameters + specified as key-value pairs. These will be used to + generate a hadoop-site.xml on the submit node that + should be used for running Map/Reduce jobs.
  • +
  • job-feasibility-attr: Regular expression string that specifies + whether and how to check job feasibility - resource + manager or scheduler limits. The current + implementation corresponds to the torque job + attribute 'comment' and by default is disabled. + When set, HOD uses it to decide what type + of limit violation is triggered and either + deallocates the cluster or stays in queued state + according as the request is beyond maximum limits or + the cumulative usage has crossed maximum limits. + The torque comment attribute may be updated + periodically by an external mechanism. For example, + comment attribute can be updated by running + checklimits.sh script in hod/support directory, + and then setting job-feasibility-attr equal to the + value TORQUE_USER_LIMITS_COMMENT_FIELD, + "User-limits exceeded. Requested:([0-9]*) + Used:([0-9]*) MaxLimit:([0-9]*)", will make HOD + behave accordingly. +
  • +
+
+ +
+ 3.3 resource_manager options + +
    +
  • queue: Name of the queue configured in the resource manager to which + jobs are to be submitted.
  • + +
  • batch-home: Install directory to which 'bin' is appended and under + which the executables of the resource manager can be + found.
  • + +
  • env-vars: Comma-separated list of key-value pairs, + expressed as key=value, which would be passed to the jobs + launched on the compute nodes. + For example, if the python installation is + in a non-standard location, one can set the environment + variable 'HOD_PYTHON_HOME' to the path to the python + executable. The HOD processes launched on the compute nodes + can then use this variable.
  • +
  • options: Comma-separated list of key-value pairs, + expressed as + <option>:<sub-option>=<value>. When + passing to the job submission program, these are expanded + as -<option> <sub-option>=<value>. These + are generally used for specifying additional resource + contraints for scheduling. For instance, with a Torque + setup, one can specify + --resource_manager.options='l:arch=x86_64' for + constraining the nodes being allocated to a particular + architecture; this option will be passed to Torque's qsub + command as "-l arch=x86_64".
  • +
+
+ +
+ 3.4 ringmaster options + +
    +
  • work-dirs: Comma-separated list of paths that will serve + as the root for directories that HOD generates and passes + to Hadoop for use to store DFS and Map/Reduce data. For + example, + this is where DFS data blocks will be stored. Typically, + as many paths are specified as there are disks available + to ensure all disks are being utilized. The restrictions + and notes for the temp-dir variable apply here too.
  • +
  • max-master-failures: Number of times a hadoop master + daemon can fail to launch, beyond which HOD will fail + the cluster allocation altogether. In HOD clusters, + sometimes there might be a single or few "bad" nodes due + to issues like missing java, missing or incorrect version + of Hadoop etc. When this configuration variable is set + to a positive integer, the RingMaster returns an error + to the client only when the number of times a hadoop + master (JobTracker or NameNode) fails to start on these + bad nodes because of above issues, exceeds the specified + value. If the number is not exceeded, the next HodRing + which requests for a command to launch is given the same + hadoop master again. This way, HOD tries its best for a + successful allocation even in the presence of a few bad + nodes in the cluster. +
  • +
  • workers_per_ring: Number of workers per service per HodRing. + By default this is set to 1. If this configuration + variable is set to a value 'n', the HodRing will run + 'n' instances of the workers (TaskTrackers or DataNodes) + on each node acting as a slave. This can be used to run + multiple workers per HodRing, so that the total number of + workers in a HOD cluster is not limited by the total + number of nodes requested during allocation. However, note + that this will mean each worker should be configured to use + only a proportional fraction of the capacity of the + resources on the node. In general, this feature is only + useful for testing and simulation purposes, and not for + production use.
  • +
+
+ +
+ 3.5 gridservice-hdfs options + +
    +
  • external: If false, indicates that a HDFS cluster must be + bought up by the HOD system, on the nodes which it + allocates via the allocate command. Note that in that case, + when the cluster is de-allocated, it will bring down the + HDFS cluster, and all the data will be lost. + If true, it will try and connect to an externally configured + HDFS system. + Typically, because input for jobs are placed into HDFS + before jobs are run, and also the output from jobs in HDFS + is required to be persistent, an internal HDFS cluster is + of little value in a production system. However, it allows + for quick testing.
  • + +
  • host: Hostname of the externally configured NameNode, if any
  • + +
  • fs_port: Port to which NameNode RPC server is bound.
  • + +
  • info_port: Port to which the NameNode web UI server is bound.
  • + +
  • pkgs: Installation directory, under which bin/hadoop executable is + located. This can be used to use a pre-installed version of + Hadoop on the cluster.
  • + +
  • server-params: Comma-separated list of hadoop config parameters + specified key-value pairs. These will be used to + generate a hadoop-site.xml that will be used by the + NameNode and DataNodes.
  • + +
  • final-server-params: Same as above, except they will be marked final.
  • +
+
+ +
+ 3.6 gridservice-mapred options + +
    +
  • external: If false, indicates that a Map/Reduce cluster must be + bought up by the HOD system on the nodes which it allocates + via the allocate command. + If true, if will try and connect to an externally + configured Map/Reduce system.
  • + +
  • host: Hostname of the externally configured JobTracker, if any
  • + +
  • tracker_port: Port to which the JobTracker RPC server is bound
  • + +
  • info_port: Port to which the JobTracker web UI server is bound.
  • + +
  • pkgs: Installation directory, under which bin/hadoop executable is + located
  • + +
  • server-params: Comma-separated list of hadoop config parameters + specified key-value pairs. These will be used to + generate a hadoop-site.xml that will be used by the + JobTracker and TaskTrackers
  • + +
  • final-server-params: Same as above, except they will be marked final.
  • +
+
+ +
+ 3.7 hodring options + +
    +
  • mapred-system-dir-root: Directory in the DFS under which HOD will + generate sub-directory names and pass the full path + as the value of the 'mapred.system.dir' configuration + parameter to Hadoop daemons. The format of the full + path will be value-of-this-option/userid/mapredsystem/cluster-id. + Note that the directory specified here should be such + that all users can create directories under this, if + permissions are enabled in HDFS. Setting the value of + this option to /user will make HOD use the user's + home directory to generate the mapred.system.dir value.
  • + +
  • log-destination-uri: URL describing a path in an external, static DFS or the + cluster node's local file system where HOD will upload + Hadoop logs when a cluster is deallocated. To specify a + DFS path, use the format 'hdfs://path'. To specify a + cluster node's local file path, use the format 'file://path'. + + When clusters are deallocated by HOD, the hadoop logs will + be deleted as part of HOD's cleanup process. To ensure these + logs persist, you can use this configuration option. + + The format of the path is + value-of-this-option/userid/hod-logs/cluster-id + + Note that the directory you specify here must be such that all + users can create sub-directories under this. Setting this value + to hdfs://user will make the logs come in the user's home directory + in DFS.
  • + +
  • pkgs: Installation directory, under which bin/hadoop executable is located. This will + be used by HOD to upload logs if a HDFS URL is specified in log-destination-uri + option. Note that this is useful if the users are using a tarball whose version + may differ from the external, static HDFS version.
  • + +
  • hadoop-port-range: Range of ports, among which an available port shall + be picked for use to run a Hadoop Service, like JobTracker or TaskTracker.
  • + + +
+
+
+ +
+ diff --git a/src/docs/src/documentation/content/xdocs/hod_user_guide.xml b/src/docs/src/documentation/content/xdocs/hod_user_guide.xml new file mode 100644 index 0000000..060e7bf --- /dev/null +++ b/src/docs/src/documentation/content/xdocs/hod_user_guide.xml @@ -0,0 +1,545 @@ + + + + +
+ + HOD User Guide + +
+ + +
+ Introduction +

Hadoop On Demand (HOD) is a system for provisioning virtual Hadoop clusters over a large physical cluster. It uses the Torque resource manager to do node allocation. On the allocated nodes, it can start Hadoop Map/Reduce and HDFS daemons. It automatically generates the appropriate configuration files (hadoop-site.xml) for the Hadoop daemons and client. HOD also has the capability to distribute Hadoop to the nodes in the virtual cluster that it allocates. In short, HOD makes it easy for administrators and users to quickly setup and use Hadoop. It is also a very useful tool for Hadoop developers and testers who need to share a physical cluster for testing their own Hadoop versions.

+

HOD supports Hadoop from version 0.15 onwards.

+

This guide shows you how to get started using HOD, reviews various HOD features and command line options, and provides detailed troubleshooting help.

+
+
+ Getting Started Using HOD +

In this section, we shall see a step-by-step introduction on how to use HOD for the most basic operations. Before following these steps, it is assumed that HOD and its dependent hardware and software components are setup and configured correctly. This is a step that is generally performed by system administrators of the cluster.

+

The HOD user interface is a command line utility called hod. It is driven by a configuration file, that is typically setup for users by system administrators. Users can override this configuration when using the hod, which is described later in this documentation. The configuration file can be specified in two ways when using hod, as described below:

+
    +
  • Specify it on command line, using the -c option. Such as hod <operation> <required-args> -c path-to-the-configuration-file [other-options]
  • +
  • Set up an environment variable HOD_CONF_DIR where hod will be run. This should be pointed to a directory on the local file system, containing a file called hodrc. Note that this is analogous to the HADOOP_CONF_DIR and hadoop-site.xml file for Hadoop. If no configuration file is specified on the command line, hod shall look for the HOD_CONF_DIR environment variable and a hodrc file under that.
  • +
+

In examples listed below, we shall not explicitly point to the configuration option, assuming it is correctly specified.

+
A typical HOD session +

A typical session of HOD will involve at least three steps: allocate, run hadoop jobs, deallocate. In order to do this, perform the following steps.

+

Create a Cluster Directory

+

The cluster directory is a directory on the local file system where hod will generate the Hadoop configuration, hadoop-site.xml, corresponding to the cluster it allocates. Pass this directory to the hod operations as stated below. If the cluster directory passed doesn't already exist, HOD will automatically try to create it and use it. Once a cluster is allocated, a user can utilize it to run Hadoop jobs by specifying the cluster directory as the Hadoop --config option.

+

Operation allocate

+

The allocate operation is used to allocate a set of nodes and install and provision Hadoop on them. It has the following syntax. Note that it requires a cluster_dir ( -d, --hod.clusterdir) and the number of nodes (-n, --hod.nodecount) needed to be allocated:

+ + + + + + +
$ hod allocate -d cluster_dir -n number_of_nodes [OPTIONS]
+

If the command completes successfully, then cluster_dir/hadoop-site.xml will be generated and will contain information about the allocated cluster. It will also print out the information about the Hadoop web UIs.

+

An example run of this command produces the following output. Note in this example that ~/hod-clusters/test is the cluster directory, and we are allocating 5 nodes:

+ + + + +
$ hod allocate -d ~/hod-clusters/test -n 5
+ INFO - HDFS UI on http://foo1.bar.com:53422
+ INFO - Mapred UI on http://foo2.bar.com:55380
+

Running Hadoop jobs using the allocated cluster

+

Now, one can run Hadoop jobs using the allocated cluster in the usual manner. This assumes variables like JAVA_HOME and path to the Hadoop installation are set up correctly.:

+ + + + + + +
$ hadoop --config cluster_dir hadoop_command hadoop_command_args
+

or

+ + + + + + +
$ export HADOOP_CONF_DIR=cluster_dir
+ $ hadoop hadoop_command hadoop_command_args
+

Continuing our example, the following command will run a wordcount example on the allocated cluster:

+
$ hadoop --config ~/hod-clusters/test jar /path/to/hadoop/hadoop-examples.jar wordcount /path/to/input /path/to/output
+

or

+ + + +
$ export HADOOP_CONF_DIR=~/hod-clusters/test
+ $ hadoop jar /path/to/hadoop/hadoop-examples.jar wordcount /path/to/input /path/to/output
+

Operation deallocate

+

The deallocate operation is used to release an allocated cluster. When finished with a cluster, deallocate must be run so that the nodes become free for others to use. The deallocate operation has the following syntax. Note that it requires the cluster_dir (-d, --hod.clusterdir) argument:

+ + + + + + +
$ hod deallocate -d cluster_dir
+

Continuing our example, the following command will deallocate the cluster:

+
$ hod deallocate -d ~/hod-clusters/test
+

As can be seen, HOD allows the users to allocate a cluster, and use it flexibly for running Hadoop jobs. For example, users can run multiple jobs in parallel on the same cluster, by running hadoop from multiple shells pointing to the same configuration.

+
+
Running hadoop scripts using HOD +

The HOD script operation combines the operations of allocating, using and deallocating a cluster into a single operation. This is very useful for users who want to run a script of hadoop jobs and let HOD handle the cleanup automatically once the script completes. In order to run hadoop scripts using hod, do the following:

+

Create a script file

+

This will be a regular shell script that will typically contain hadoop commands, such as:

+ +
$ hadoop jar jar_file options
+

However, the user can add any valid commands as part of the script. HOD will execute this script setting HADOOP_CONF_DIR automatically to point to the allocated cluster. So users do not need to worry about this. The users however need to specify a cluster directory just like when using the allocate operation.

+

Running the script

+

The syntax for the script operation as is as follows. Note that it requires a cluster directory ( -d, --hod.clusterdir), number of nodes (-n, --hod.nodecount) and a script file (-s, --hod.script):

+ + + + + + +
$ hod script -d cluster_directory -n number_of_nodes -s script_file
+

Note that HOD will deallocate the cluster as soon as the script completes, and this means that the script must not complete until the hadoop jobs themselves are completed. Users must take care of this while writing the script.

+
+
+
+ HOD Features +
Provisioning and Managing Hadoop Clusters +

The primary feature of HOD is to provision Hadoop Map/Reduce and HDFS clusters. This is described above in the Getting Started section. Also, as long as nodes are available, and organizational policies allow, a user can use HOD to allocate multiple Map/Reduce clusters simultaneously. The user would need to specify different paths for the cluster_dir parameter mentioned above for each cluster he/she allocates. HOD provides the list and the info operations to enable managing multiple clusters.

+

Operation list

+

The list operation lists all the clusters allocated so far by a user. The cluster directory where the hadoop-site.xml is stored for the cluster, and its status vis-a-vis connectivity with the JobTracker and/or HDFS is shown. The list operation has the following syntax:

+ + + + + + +
$ hod list
+

Operation info

+

The info operation shows information about a given cluster. The information shown includes the Torque job id, and locations of the important daemons like the HOD Ringmaster process, and the Hadoop JobTracker and NameNode daemons. The info operation has the following syntax. Note that it requires a cluster directory (-d, --hod.clusterdir):

+ + + + + + +
$ hod info -d cluster_dir
+

The cluster_dir should be a valid cluster directory specified in an earlier allocate operation.

+
+
Using a tarball to distribute Hadoop +

When provisioning Hadoop, HOD can use either a pre-installed Hadoop on the cluster nodes or distribute and install a Hadoop tarball as part of the provisioning operation. If the tarball option is being used, there is no need to have a pre-installed Hadoop on the cluster nodes, nor a need to use a pre-installed one. This is especially useful in a development / QE environment where individual developers may have different versions of Hadoop to test on a shared cluster.

+

In order to use a pre-installed Hadoop, you must specify, in the hodrc, the pkgs option in the gridservice-hdfs and gridservice-mapred sections. This must point to the path where Hadoop is installed on all nodes of the cluster.

+

The syntax for specifying tarball is as follows:

+ + + + +
$ hod allocate -d cluster_dir -n number_of_nodes -t hadoop_tarball_location
+

For example, the following command allocates Hadoop provided by the tarball ~/share/hadoop.tar.gz:

+
$ hod allocate -d ~/hadoop-cluster -n 10 -t ~/share/hadoop.tar.gz
+

Similarly, when using hod script, the syntax is as follows:

+ + + + +
$ hod script -d cluster_directory -s script_file -n number_of_nodes -t hadoop_tarball_location
+

The hadoop_tarball specified in the syntax above should point to a path on a shared file system that is accessible from all the compute nodes. Currently, HOD only supports NFS mounted file systems.

+

Note:

+
    +
  • For better distribution performance it is recommended that the Hadoop tarball contain only the libraries and binaries, and not the source or documentation.
  • +
  • When you want to run jobs against a cluster allocated using the tarball, you must use a compatible version of hadoop to submit your jobs. The best would be to untar and use the version that is present in the tarball itself.
  • +
  • You need to make sure that there are no Hadoop configuration files, hadoop-env.sh and hadoop-site.xml, present in the conf directory of the tarred distribution. The presence of these files with incorrect values could make the cluster allocation to fail.
  • +
+
+
Using an external HDFS +

In typical Hadoop clusters provisioned by HOD, HDFS is already set up statically (without using HOD). This allows data to persist in HDFS after the HOD provisioned clusters is deallocated. To use a statically configured HDFS, your hodrc must point to an external HDFS. Specifically, set the following options to the correct values in the section gridservice-hdfs of the hodrc:

+
external = true
host = Hostname of the HDFS NameNode
fs_port = Port number of the HDFS NameNode
info_port = Port number of the HDFS NameNode web UI
+

Note: You can also enable this option from command line. That is, to use a static HDFS, you will need to say:
+

+ + + + +
$ hod allocate -d cluster_dir -n number_of_nodes --gridservice-hdfs.external
+

HOD can be used to provision an HDFS cluster as well as a Map/Reduce cluster, if required. To do so, set the following option in the section gridservice-hdfs of the hodrc:

+
external = false
+
+
Options for Configuring Hadoop +

HOD provides a very convenient mechanism to configure both the Hadoop daemons that it provisions and also the hadoop-site.xml that it generates on the client side. This is done by specifying Hadoop configuration parameters in either the HOD configuration file, or from the command line when allocating clusters.

+

Configuring Hadoop Daemons

+

For configuring the Hadoop daemons, you can do the following:

+

For Map/Reduce, specify the options as a comma separated list of key-value pairs to the server-params option in the gridservice-mapred section. Likewise for a dynamically provisioned HDFS cluster, specify the options in the server-params option in the gridservice-hdfs section. If these parameters should be marked as final, then include these in the final-server-params option of the appropriate section.

+

For example:

+ +
server-params = mapred.reduce.parallel.copies=20,io.sort.factor=100,io.sort.mb=128,io.file.buffer.size=131072
final-server-params = mapred.child.java.opts=-Xmx512m,dfs.block.size=134217728,fs.inmemory.size.mb=128
+

In order to provide the options from command line, you can use the following syntax:

+

For configuring the Map/Reduce daemons use:

+ + + + +
$ hod allocate -d cluster_dir -n number_of_nodes -Mmapred.reduce.parallel.copies=20 -Mio.sort.factor=100
+

In the example above, the mapred.reduce.parallel.copies parameter and the io.sort.factor parameter will be appended to the other server-params or if they already exist in server-params, will override them. In order to specify these are final parameters, you can use:

+ + + + +
$ hod allocate -d cluster_dir -n number_of_nodes -Fmapred.reduce.parallel.copies=20 -Fio.sort.factor=100
+

However, note that final parameters cannot be overwritten from command line. They can only be appended if not already specified.

+

Similar options exist for configuring dynamically provisioned HDFS daemons. For doing so, replace -M with -H and -F with -S.

+

Configuring Hadoop Job Submission (Client) Programs

+

As mentioned above, if the allocation operation completes successfully then cluster_dir/hadoop-site.xml will be generated and will contain information about the allocated cluster's JobTracker and NameNode. This configuration is used when submitting jobs to the cluster. HOD provides an option to include additional Hadoop configuration parameters into this file. The syntax for doing so is as follows:

+ + + + +
$ hod allocate -d cluster_dir -n number_of_nodes -Cmapred.userlog.limit.kb=200 -Cmapred.child.java.opts=-Xmx512m
+

In this example, the mapred.userlog.limit.kb and mapred.child.java.opts options will be included into the hadoop-site.xml that is generated by HOD.

+
+
Viewing Hadoop Web-UIs +

The HOD allocation operation prints the JobTracker and NameNode web UI URLs. For example:

+
$ hod allocate -d ~/hadoop-cluster -n 10 -c ~/hod-conf-dir/hodrc
+ INFO - HDFS UI on http://host242.foo.com:55391
+ INFO - Mapred UI on http://host521.foo.com:54874 +
+

The same information is also available via the info operation described above.

+
+
Collecting and Viewing Hadoop Logs +

To get the Hadoop logs of the daemons running on one of the allocated nodes:

+
    +
  • Log into the node of interest. If you want to look at the logs of the JobTracker or NameNode, then you can find the node running these by using the list and info operations mentioned above.
  • +
  • Get the process information of the daemon of interest (for example, ps ux | grep TaskTracker)
  • +
  • In the process information, search for the value of the variable -Dhadoop.log.dir. Typically this will be a decendent directory of the hodring.temp-dir value from the hod configuration file.
  • +
  • Change to the hadoop.log.dir directory to view daemon and user logs.
  • +
+

HOD also provides a mechanism to collect logs when a cluster is being deallocated and persist them into a file system, or an externally configured HDFS. By doing so, these logs can be viewed after the jobs are completed and the nodes are released. In order to do so, configure the log-destination-uri to a URI as follows:

+ + +
log-destination-uri = hdfs://host123:45678/user/hod/logs or
log-destination-uri = file://path/to/store/log/files
+

Under the root directory specified above in the path, HOD will create a path user_name/torque_jobid and store gzipped log files for each node that was part of the job.

+

Note that to store the files to HDFS, you may need to configure the hodring.pkgs option with the Hadoop version that matches the HDFS mentioned. If not, HOD will try to use the Hadoop version that it is using to provision the Hadoop cluster itself.

+
+
Auto-deallocation of Idle Clusters +

HOD automatically deallocates clusters that are not running Hadoop jobs for a given period of time. Each HOD allocation includes a monitoring facility that constantly checks for running Hadoop jobs. If it detects no running Hadoop jobs for a given period, it will automatically deallocate its own cluster and thus free up nodes which are not being used effectively.

+

Note: While the cluster is deallocated, the cluster directory is not cleaned up automatically. The user must deallocate this cluster through the regular deallocate operation to clean this up.

+
+
Specifying Additional Job Attributes +

HOD allows the user to specify a wallclock time and a name (or title) for a Torque job.

+

The wallclock time is the estimated amount of time for which the Torque job will be valid. After this time has expired, Torque will automatically delete the job and free up the nodes. Specifying the wallclock time can also help the job scheduler to better schedule jobs, and help improve utilization of cluster resources.

+

To specify the wallclock time, use the following syntax:

+ + + + +
$ hod allocate -d cluster_dir -n number_of_nodes -l time_in_seconds
+

The name or title of a Torque job helps in user friendly identification of the job. The string specified here will show up in all information where Torque job attributes are displayed, including the qstat command.

+

To specify the name or title, use the following syntax:

+ + + + +
$ hod allocate -d cluster_dir -n number_of_nodes -N name_of_job
+

Note: Due to restriction in the underlying Torque resource manager, names which do not start with an alphabet character or contain a 'space' will cause the job to fail. The failure message points to the problem being in the specified job name.

+
+
Capturing HOD exit codes in Torque +

HOD exit codes are captured in the Torque exit_status field. This will help users and system administrators to distinguish successful runs from unsuccessful runs of HOD. The exit codes are 0 if allocation succeeded and all hadoop jobs ran on the allocated cluster correctly. They are non-zero if allocation failed or some of the hadoop jobs failed on the allocated cluster. The exit codes that are possible are mentioned in the table below. Note: Hadoop job status is captured only if the version of Hadoop used is 16 or above.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Exit Code Meaning
6 Ringmaster failure
7 HDFS failure
8 Job tracker failure
10 Cluster dead
12 Cluster already allocated
13 HDFS dead
14 Mapred dead
16 All Map/Reduce jobs that ran on the cluster failed. Refer to hadoop logs for more details.
17 Some of the Map/Reduce jobs that ran on the cluster failed. Refer to hadoop logs for more details.
+
+
+ Command Line +

HOD command line has the following general syntax:
+ hod <operation> [ARGS] [OPTIONS]
+ Allowed operations are 'allocate', 'deallocate', 'info', 'list', 'script' and 'help'. For help on a particular operation one can do : hod help <operation>. To have a look at possible options one can do a hod help options.

+

allocate
+ Usage : hod allocate -d cluster_dir -n number_of_nodes [OPTIONS]
+ Allocates a cluster on the given number of cluster nodes, and store the allocation information in cluster_dir for use with subsequent hadoop commands. Note that the cluster_dir must exist before running the command.

+

list
+ Usage : hod list [OPTIONS]
+ Lists the clusters allocated by this user. Information provided includes the Torque job id corresponding to the cluster, the cluster directory where the allocation information is stored, and whether the Map/Reduce daemon is still active or not.

+

info
+ Usage : hod info -d cluster_dir [OPTIONS]
+ Lists information about the cluster whose allocation information is stored in the specified cluster directory.

+

deallocate
+ Usage : hod deallocate -d cluster_dir [OPTIONS]
+ Deallocates the cluster whose allocation information is stored in the specified cluster directory.

+

script
+ Usage : hod script -s script_file -d cluster_directory -n number_of_nodes [OPTIONS]
+ Runs a hadoop script using HODscript operation. Provisions Hadoop on a given number of nodes, executes the given script from the submitting node, and deallocates the cluster when the script completes.

+

help
+ Usage : hod help [operation | 'options']
+ When no argument is specified, hod help gives the usage and basic options, and is equivalent to hod --help (See below). When 'options' is given as argument, hod displays only the basic options that hod takes. When an operation is specified, it displays the usage and description corresponding to that particular operation. For e.g, to know about allocate operation, one can do a hod help allocate

+

Besides the operations, HOD can take the following command line options.

+

--help
+ Prints out the help message to see the usage and basic options.

+

--verbose-help
+ All configuration options provided in the hodrc file can be passed on the command line, using the syntax --section_name.option_name[=value]. When provided this way, the value provided on command line overrides the option provided in hodrc. The verbose-help command lists all the available options in the hodrc file. This is also a nice way to see the meaning of the configuration options.

+

See the next section for a description of most important hod configuration options. For basic options, one can do a hod help options and for all options possible in hod configuration, one can see hod --verbose-help. See config guide for a description of all options.

+
+ +
Options Configuring HOD +

As described above, HOD is configured using a configuration file that is usually set up by system administrators. This is a INI style configuration file that is divided into sections, and options inside each section. Each section relates to one of the HOD processes: client, ringmaster, hodring, mapreduce or hdfs. The options inside a section comprise of an option name and value.

+

Users can override the configuration defined in the default configuration in two ways:

+
    +
  • Users can supply their own configuration file to HOD in each of the commands, using the -c option
  • +
  • Users can supply specific configuration options to HOD/ Options provided on command line override the values provided in the configuration file being used.
  • +
+

This section describes some of the most commonly used configuration options. These commonly used options are provided with a short option for convenience of specification. All other options can be specified using a long option that is also described below.

+

-c config_file
+ Provides the configuration file to use. Can be used with all other options of HOD. Alternatively, the HOD_CONF_DIR environment variable can be defined to specify a directory that contains a file named hodrc, alleviating the need to specify the configuration file in each HOD command.

+

-d cluster_dir
+ This is required for most of the hod operations. As described here, the cluster directory is a directory on the local file system where hod will generate the Hadoop configuration, hadoop-site.xml, corresponding to the cluster it allocates. Pass it to the hod operations as an argument to -d or --hod.clusterdir. If it doesn't already exist, HOD will automatically try to create it and use it. Once a cluster is allocated, a user can utilize it to run Hadoop jobs by specifying the clusterdirectory as the Hadoop --config option.

+

-n number_of_nodes
+ This is required for the hod 'allocation' operation and for script operation. This denotes the number of nodes to be allocated.

+

-s script-file
+ Required when using script operation, specifies the script file to execute.

+

-b 1|2|3|4
+ Enables the given debug level. Can be used with all other options of HOD. 4 is most verbose.

+

-t hadoop_tarball
+ Provisions Hadoop from the given tar.gz file. This option is only applicable to the allocate operation. For better distribution performance it is strongly recommended that the Hadoop tarball is created after removing the source or documentation.

+

-N job-name
+ The Name to give to the resource manager job that HOD uses underneath. For e.g. in the case of Torque, this translates to the qsub -N option, and can be seen as the job name using the qstat command.

+

-l wall-clock-time
+ The amount of time for which the user expects to have work on the allocated cluster. This is passed to the resource manager underneath HOD, and can be used in more efficient scheduling and utilization of the cluster. Note that in the case of Torque, the cluster is automatically deallocated after this time expires.

+

-j java-home
+ Path to be set to the JAVA_HOME environment variable. This is used in the script operation. HOD sets the JAVA_HOME environment variable tot his value and launches the user script in that.

+

-A account-string
+ Accounting information to pass to underlying resource manager.

+

-Q queue-name
+ Name of the queue in the underlying resource manager to which the job must be submitted.

+

-Mkey1=value1 -Mkey2=value2
+ Provides configuration parameters for the provisioned Map/Reduce daemons (JobTracker and TaskTrackers). A hadoop-site.xml is generated with these values on the cluster nodes.
+ Note: Values which have the following characters: space, comma, equal-to, semi-colon need to be escaped with a '\' character, and need to be enclosed within quotes. You can escape a '\' with a '\' too.

+

-Hkey1=value1 -Hkey2=value2
+ Provides configuration parameters for the provisioned HDFS daemons (NameNode and DataNodes). A hadoop-site.xml is generated with these values on the cluster nodes
+ Note: Values which have the following characters: space, comma, equal-to, semi-colon need to be escaped with a '\' character, and need to be enclosed within quotes. You can escape a '\' with a '\' too.

+

-Ckey1=value1 -Ckey2=value2
+ Provides configuration parameters for the client from where jobs can be submitted. A hadoop-site.xml is generated with these values on the submit node.
+ Note: Values which have the following characters: space, comma, equal-to, semi-colon need to be escaped with a '\' character, and need to be enclosed within quotes. You can escape a '\' with a '\' too.

+

--section-name.option-name=value
+ This is the method to provide options using the long format. For e.g. you could say --hod.script-wait-time=20

+
+
+
+ Troubleshooting +

The following section identifies some of the most likely error conditions users can run into when using HOD and ways to trouble-shoot them

+
<code>hod</code> Hangs During Allocation +

Possible Cause: One of the HOD or Hadoop components have failed to come up. In such a case, the hod command will return after a few minutes (typically 2-3 minutes) with an error code of either 7 or 8 as defined in the Error Codes section. Refer to that section for further details.

+

Possible Cause: A large allocation is fired with a tarball. Sometimes due to load in the network, or on the allocated nodes, the tarball distribution might be significantly slow and take a couple of minutes to come back. Wait for completion. Also check that the tarball does not have the Hadoop sources or documentation.

+

Possible Cause: A Torque related problem. If the cause is Torque related, the hod command will not return for more than 5 minutes. Running hod in debug mode may show the qstat command being executed repeatedly. Executing the qstat command from a separate shell may show that the job is in the Q (Queued) state. This usually indicates a problem with Torque. Possible causes could include some nodes being down, or new nodes added that Torque is not aware of. Generally, system administator help is needed to resolve this problem.

+
+
<code>hod</code> Hangs During Deallocation +

Possible Cause: A Torque related problem, usually load on the Torque server, or the allocation is very large. Generally, waiting for the command to complete is the only option.

+
+
<code>hod</code> Fails With an Error Code and Error Message +

If the exit code of the hod command is not 0, then refer to the following table of error exit codes to determine why the code may have occurred and how to debug the situation.

+

Error Codes

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Error CodeMeaningPossible Causes and Remedial Actions
1 Configuration error Incorrect configuration values specified in hodrc, or other errors related to HOD configuration. The error messages in this case must be sufficient to debug and fix the problem.
2 Invalid operation Do hod help for the list of valid operations.
3 Invalid operation arguments Do hod help operation for listing the usage of a particular operation.
4 Scheduler failure 1. Requested more resources than available. Run checknodes cluster_name to see if enough nodes are available.
+ 2. Requested resources exceed resource manager limits.
+ 3. Torque is misconfigured, the path to Torque binaries is misconfigured, or other Torque problems. Contact system administrator.
5 Job execution failure 1. Torque Job was deleted from outside. Execute the Torque qstat command to see if you have any jobs in the R (Running) state. If none exist, try re-executing HOD.
+ 2. Torque problems such as the server momentarily going down, or becoming unresponsive. Contact system administrator.
+ 3. The system administrator might have configured account verification, and an invalid account is specified. Contact system administrator.
6 Ringmaster failure HOD prints the message "Cluster could not be allocated because of the following errors on the ringmaster host <hostname>". The actual error message may indicate one of the following:
+ 1. Invalid configuration on the node running the ringmaster, specified by the hostname in the error message.
+ 2. Invalid configuration in the ringmaster section,
+ 3. Invalid pkgs option in gridservice-mapred or gridservice-hdfs section,
+ 4. An invalid hadoop tarball, or a tarball which has bundled an invalid configuration file in the conf directory,
+ 5. Mismatched version in Hadoop between the MapReduce and an external HDFS.
+ The Torque qstat command will most likely show a job in the C (Completed) state.
+ One can login to the ringmaster host as given by HOD failure message and debug the problem with the help of the error message. If the error message doesn't give complete information, ringmaster logs should help finding out the root cause of the problem. Refer to the section Locating Ringmaster Logs below for more information.
7 HDFS failure When HOD fails to allocate due to HDFS failures (or Job tracker failures, error code 8, see below), it prints a failure message "Hodring at <hostname> failed with following errors:" and then gives the actual error message, which may indicate one of the following:
+ 1. Problem in starting Hadoop clusters. Usually the actual cause in the error message will indicate the problem on the hostname mentioned. Also, review the Hadoop related configuration in the HOD configuration files. Look at the Hadoop logs using information specified in Collecting and Viewing Hadoop Logs section above.
+ 2. Invalid configuration on the node running the hodring, specified by the hostname in the error message
+ 3. Invalid configuration in the hodring section of hodrc. ssh to the hostname specified in the error message and grep for ERROR or CRITICAL in hodring logs. Refer to the section Locating Hodring Logs below for more information.
+ 4. Invalid tarball specified which is not packaged correctly.
+ 5. Cannot communicate with an externally configured HDFS.
+ When such HDFS or Job tracker failure occurs, one can login into the host with hostname mentioned in HOD failure message and debug the problem. While fixing the problem, one should also review other log messages in the ringmaster log to see which other machines also might have had problems bringing up the jobtracker/namenode, apart from the hostname that is reported in the failure message. This possibility of other machines also having problems occurs because HOD continues to try and launch hadoop daemons on multiple machines one after another depending upon the value of the configuration variable ringmaster.max-master-failures. Refer to the section Locating Ringmaster Logs below to find more about ringmaster logs. +
8 Job tracker failure Similar to the causes in DFS failure case.
10 Cluster dead 1. Cluster was auto-deallocated because it was idle for a long time.
+ 2. Cluster was auto-deallocated because the wallclock time specified by the system administrator or user was exceeded.
+ 3. Cannot communicate with the JobTracker and HDFS NameNode which were successfully allocated. Deallocate the cluster, and allocate again.
12 Cluster already allocated The cluster directory specified has been used in a previous allocate operation and is not yet deallocated. Specify a different directory, or deallocate the previous allocation first.
13 HDFS dead Cannot communicate with the HDFS NameNode. HDFS NameNode went down.
14 Mapred dead 1. Cluster was auto-deallocated because it was idle for a long time.
+ 2. Cluster was auto-deallocated because the wallclock time specified by the system administrator or user was exceeded.
+ 3. Cannot communicate with the Map/Reduce JobTracker. JobTracker node went down.
+
15 Cluster not allocated An operation which requires an allocated cluster is given a cluster directory with no state information.
Any non-zero exit code HOD script error If the hod script option was used, it is likely that the exit code is from the script. Unfortunately, this could clash with the exit codes of the hod command itself. In order to help users differentiate these two, hod writes the script's exit code to a file called script.exitcode in the cluster directory, if the script returned an exit code. You can cat this file to determine the script's exit code. If it does not exist, then it is a hod command exit code.
+
+
Hadoop DFSClient Warns with a + NotReplicatedYetException +

Sometimes, when you try to upload a file to the HDFS immediately after + allocating a HOD cluster, DFSClient warns with a NotReplicatedYetException. It + usually shows a message something like -

WARN + hdfs.DFSClient: NotReplicatedYetException sleeping <filename> retries + left 3
08/01/25 16:31:40 INFO hdfs.DFSClient: + org.apache.hadoop.ipc.RemoteException: java.io.IOException: File + <filename> could only be replicated to 0 nodes, instead of + 1

This scenario arises when you try to upload a file + to the HDFS while the DataNodes are still in the process of contacting the + NameNode. This can be resolved by waiting for some time before uploading a new + file to the HDFS, so that enough DataNodes start and contact the + NameNode.

+
+
Hadoop Jobs Not Running on a Successfully Allocated Cluster +

This scenario generally occurs when a cluster is allocated, and is left inactive for sometime, and then hadoop jobs are attempted to be run on them. Then Hadoop jobs fail with the following exception:

+
08/01/25 16:31:40 INFO ipc.Client: Retrying connect to server: foo.bar.com/1.1.1.1:53567. Already tried 1 time(s).
+

Possible Cause: No Hadoop jobs were run for a significant portion of time. Thus the cluster would have got deallocated as described in the section Auto-deallocation of Idle Clusters. Deallocate the cluster and allocate it again.

+

Possible Cause: The wallclock limit specified by the Torque administrator or the -l option defined in the section Specifying Additional Job Attributes was exceeded since allocation time. Thus the cluster would have got released. Deallocate the cluster and allocate it again.

+

Possible Cause: There is a version mismatch between the version of the hadoop being used in provisioning (typically via the tarball option) and the external HDFS. Ensure compatible versions are being used.

+

Possible Cause: There is a version mismatch between the version of the hadoop client being used to submit jobs and the hadoop used in provisioning (typically via the tarball option). Ensure compatible versions are being used.

+

Possible Cause: You used one of the options for specifying Hadoop configuration -M or -H, which had special characters like space or comma that were not escaped correctly. Refer to the section Options Configuring HOD for checking how to specify such options correctly.

+
+
My Hadoop Job Got Killed +

Possible Cause: The wallclock limit specified by the Torque administrator or the -l option defined in the section Specifying Additional Job Attributes was exceeded since allocation time. Thus the cluster would have got released. Deallocate the cluster and allocate it again, this time with a larger wallclock time.

+

Possible Cause: Problems with the JobTracker node. Refer to the section in Collecting and Viewing Hadoop Logs to get more information.

+
+
Hadoop Job Fails with Message: 'Job tracker still initializing' +

Possible Cause: The hadoop job was being run as part of the HOD script command, and it started before the JobTracker could come up fully. Allocate the cluster using a large value for the configuration option --hod.script-wait-time. Typically a value of 120 should work, though it is typically unnecessary to be that large.

+
+
The Exit Codes For HOD Are Not Getting Into Torque +

Possible Cause: Version 0.16 of hadoop is required for this functionality to work. The version of Hadoop used does not match. Use the required version of Hadoop.

+

Possible Cause: The deallocation was done without using the hod command; for e.g. directly using qdel. When the cluster is deallocated in this manner, the HOD processes are terminated using signals. This results in the exit code to be based on the signal number, rather than the exit code of the program.

+
+
The Hadoop Logs are Not Uploaded to HDFS +

Possible Cause: There is a version mismatch between the version of the hadoop being used for uploading the logs and the external HDFS. Ensure that the correct version is specified in the hodring.pkgs option.

+
+
Locating Ringmaster Logs +

To locate the ringmaster logs, follow these steps:

+
    +
  • Execute hod in the debug mode using the -b option. This will print the Torque job id for the current run.
  • +
  • Execute qstat -f torque_job_id and look up the value of the exec_host parameter in the output. The first host in this list is the ringmaster node.
  • +
  • Login to this node.
  • +
  • The ringmaster log location is specified by the ringmaster.log-dir option in the hodrc. The name of the log file will be username.torque_job_id/ringmaster-main.log.
  • +
  • If you don't get enough information, you may want to set the ringmaster debug level to 4. This can be done by passing --ringmaster.debug 4 to the hod command line.
  • +
+
+
Locating Hodring Logs +

To locate hodring logs, follow the steps below:

+
    +
  • Execute hod in the debug mode using the -b option. This will print the Torque job id for the current run.
  • +
  • Execute qstat -f torque_job_id and look up the value of the exec_host parameter in the output. All nodes in this list should have a hodring on them.
  • +
  • Login to any of these nodes.
  • +
  • The hodring log location is specified by the hodring.log-dir option in the hodrc. The name of the log file will be username.torque_job_id/hodring-main.log.
  • +
  • If you don't get enough information, you may want to set the hodring debug level to 4. This can be done by passing --hodring.debug 4 to the hod command line.
  • +
+
+
+ +
diff --git a/src/docs/src/documentation/content/xdocs/index.xml b/src/docs/src/documentation/content/xdocs/index.xml new file mode 100644 index 0000000..72ed5f4 --- /dev/null +++ b/src/docs/src/documentation/content/xdocs/index.xml @@ -0,0 +1,36 @@ + + + + + + + +
+ Overview +
+ + +

+ The Hadoop Documentation provides the information you need to get started using Hadoop, the Hadoop Distributed File System (HDFS), and Hadoop on Demand (HOD). +

+Begin with the Hadoop Quick Start which shows you how to set up a single-node Hadoop installation. Then move on to the Hadoop Cluster Setup to learn how to set up a multi-node Hadoop installation. Once your Hadoop installation is in place, try out the Hadoop Map/Reduce Tutorial. +

+If you have more questions, you can ask on the Hadoop Core Mailing Lists or browse the Mailing List Archives. +

+ + +
diff --git a/src/docs/src/documentation/content/xdocs/libhdfs.xml b/src/docs/src/documentation/content/xdocs/libhdfs.xml new file mode 100644 index 0000000..d1e9684 --- /dev/null +++ b/src/docs/src/documentation/content/xdocs/libhdfs.xml @@ -0,0 +1,96 @@ + + + + + + + +
+C API to HDFS: libhdfs +Content-Type +text/html; +utf-8 +
+ +
+C API to HDFS: libhdfs + +

+libhdfs is a JNI based C api for Hadoop's DFS. It provides C apis to a subset of the HDFS APIs to manipulate DFS files and the filesystem. libhdfs is part of the hadoop distribution and comes pre-compiled in ${HADOOP_HOME}/libhdfs/libhdfs.so . +

+ +
+
+The APIs + +

+The libhdfs APIs are a subset of: hadoop fs APIs. +

+

+The header file for libhdfs describes each API in detail and is available in ${HADOOP_HOME}/src/c++/libhdfs/hdfs.h +

+
+
+A sample program + + +#include "hdfs.h" + +int main(int argc, char **argv) { + + hdfsFS fs = hdfsConnect("default", 0); + const char* writePath = "/tmp/testfile.txt"; + hdfsFile writeFile = hdfsOpenFile(fs, writePath, O_WRONLY|O_CREAT, 0, 0, 0); + if(!writeFile) { + fprintf(stderr, "Failed to open %s for writing!\n", writePath); + exit(-1); + } + char* buffer = "Hello, World!"; + tSize num_written_bytes = hdfsWrite(fs, writeFile, (void*)buffer, strlen(buffer)+1); + if (hdfsFlush(fs, writeFile)) { + fprintf(stderr, "Failed to 'flush' %s\n", writePath); + exit(-1); + } + hdfsCloseFile(fs, writeFile); +} + + +
+ +
+How to link with the library +

+See the Makefile for hdfs_test.c in the libhdfs source directory (${HADOOP_HOME}/src/c++/libhdfs/Makefile) or something like: +gcc above_sample.c -I${HADOOP_HOME}/src/c++/libhdfs -L${HADOOP_HOME}/libhdfs -lhdfs -o above_sample +

+
+
+Common problems +

+The most common problem is the CLASSPATH is not set properly when calling a program that uses libhdfs. Make sure you set it to all the hadoop jars needed to run Hadoop itself. Currently, there is no way to programmatically generate the classpath, but a good bet is to include all the jar files in ${HADOOP_HOME} and ${HADOOP_HOME}/lib as well as the right configuration directory containing hdfs-site.xml +

+
+
+libhdfs is thread safe +

Concurrency and Hadoop FS "handles" - the hadoop FS implementation includes a FS handle cache which caches based on the URI of the namenode along with the user connecting. So, all calls to hdfsConnect will return the same handle but calls to hdfsConnectAsUser with different users will return different handles. But, since HDFS client handles are completely thread safe, this has no bearing on concurrency. +

+

Concurrency and libhdfs/JNI - the libhdfs calls to JNI should always be creating thread local storage, so (in theory), libhdfs should be as thread safe as the underlying calls to the Hadoop FS. +

+
+ +
diff --git a/src/docs/src/documentation/content/xdocs/mapred_tutorial.xml b/src/docs/src/documentation/content/xdocs/mapred_tutorial.xml new file mode 100644 index 0000000..efb7305 --- /dev/null +++ b/src/docs/src/documentation/content/xdocs/mapred_tutorial.xml @@ -0,0 +1,3126 @@ + + + + + + + +
+ Map/Reduce Tutorial +
+ + + +
+ Purpose + +

This document comprehensively describes all user-facing facets of the + Hadoop Map/Reduce framework and serves as a tutorial. +

+
+ +
+ Pre-requisites + +

Ensure that Hadoop is installed, configured and is running. More + details:

+ +
+ +
+ Overview + +

Hadoop Map/Reduce is a software framework for easily writing + applications which process vast amounts of data (multi-terabyte data-sets) + in-parallel on large clusters (thousands of nodes) of commodity + hardware in a reliable, fault-tolerant manner.

+ +

A Map/Reduce job usually splits the input data-set into + independent chunks which are processed by the map tasks in a + completely parallel manner. The framework sorts the outputs of the maps, + which are then input to the reduce tasks. Typically both the + input and the output of the job are stored in a file-system. The framework + takes care of scheduling tasks, monitoring them and re-executes the failed + tasks.

+ +

Typically the compute nodes and the storage nodes are the same, that is, + the Map/Reduce framework and the Hadoop Distributed File System (see HDFS Architecture ) + are running on the same set of nodes. This configuration + allows the framework to effectively schedule tasks on the nodes where data + is already present, resulting in very high aggregate bandwidth across the + cluster.

+ +

The Map/Reduce framework consists of a single master + JobTracker and one slave TaskTracker per + cluster-node. The master is responsible for scheduling the jobs' component + tasks on the slaves, monitoring them and re-executing the failed tasks. The + slaves execute the tasks as directed by the master.

+ +

Minimally, applications specify the input/output locations and supply + map and reduce functions via implementations of + appropriate interfaces and/or abstract-classes. These, and other job + parameters, comprise the job configuration. The Hadoop + job client then submits the job (jar/executable etc.) and + configuration to the JobTracker which then assumes the + responsibility of distributing the software/configuration to the slaves, + scheduling tasks and monitoring them, providing status and diagnostic + information to the job-client.

+ +

Although the Hadoop framework is implemented in JavaTM, + Map/Reduce applications need not be written in Java.

+
    +
  • + + Hadoop Streaming is a utility which allows users to create and run + jobs with any executables (e.g. shell utilities) as the mapper and/or + the reducer. +
  • +
  • + + Hadoop Pipes is a SWIG- + compatible C++ API to implement Map/Reduce applications (non + JNITM based). +
  • +
+
+ +
+ Inputs and Outputs + +

The Map/Reduce framework operates exclusively on + <key, value> pairs, that is, the framework views the + input to the job as a set of <key, value> pairs and + produces a set of <key, value> pairs as the output of + the job, conceivably of different types.

+ +

The key and value classes have to be + serializable by the framework and hence need to implement the + Writable + interface. Additionally, the key classes have to implement the + + WritableComparable interface to facilitate sorting by the framework. +

+ +

Input and Output types of a Map/Reduce job:

+

+ (input) <k1, v1> + -> + map + -> + <k2, v2> + -> + combine + -> + <k2, v2> + -> + reduce + -> + <k3, v3> (output) +

+
+ +
+ Example: WordCount v1.0 + +

Before we jump into the details, lets walk through an example Map/Reduce + application to get a flavour for how they work.

+ +

WordCount is a simple application that counts the number of + occurences of each word in a given input set.

+ +

This works with a local-standalone, pseudo-distributed or fully-distributed + Hadoop installation(see Hadoop Quick Start).

+ +
+ Source Code + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
WordCount.java
1. + package org.myorg; +
2.
3. + import java.io.IOException; +
4. + import java.util.*; +
5.
6. + import org.apache.hadoop.fs.Path; +
7. + import org.apache.hadoop.conf.*; +
8. + import org.apache.hadoop.io.*; +
9. + import org.apache.hadoop.mapred.*; +
10. + import org.apache.hadoop.util.*; +
11.
12. + public class WordCount { +
13.
14. +    + + public static class Map extends MapReduceBase + implements Mapper<LongWritable, Text, Text, IntWritable> { + +
15. +      + + private final static IntWritable one = new IntWritable(1); + +
16. +      + private Text word = new Text(); +
17.
18. +      + + public void map(LongWritable key, Text value, + OutputCollector<Text, IntWritable> output, + Reporter reporter) throws IOException { + +
19. +        + String line = value.toString(); +
20. +        + StringTokenizer tokenizer = new StringTokenizer(line); +
21. +        + while (tokenizer.hasMoreTokens()) { +
22. +          + word.set(tokenizer.nextToken()); +
23. +          + output.collect(word, one); +
24. +        + } +
25. +      + } +
26. +    + } +
27.
28. +    + + public static class Reduce extends MapReduceBase implements + Reducer<Text, IntWritable, Text, IntWritable> { + +
29. +      + + public void reduce(Text key, Iterator<IntWritable> values, + OutputCollector<Text, IntWritable> output, + Reporter reporter) throws IOException { + +
30. +        + int sum = 0; +
31. +        + while (values.hasNext()) { +
32. +          + sum += values.next().get(); +
33. +        + } +
34. +        + output.collect(key, new IntWritable(sum)); +
35. +      + } +
36. +    + } +
37.
38. +    + + public static void main(String[] args) throws Exception { + +
39. +      + + JobConf conf = new JobConf(WordCount.class); + +
40. +      + conf.setJobName("wordcount"); +
41.
42. +      + conf.setOutputKeyClass(Text.class); +
43. +      + conf.setOutputValueClass(IntWritable.class); +
44.
45. +      + conf.setMapperClass(Map.class); +
46. +      + conf.setCombinerClass(Reduce.class); +
47. +      + conf.setReducerClass(Reduce.class); +
48.
49. +      + conf.setInputFormat(TextInputFormat.class); +
50. +      + conf.setOutputFormat(TextOutputFormat.class); +
51.
52. +      + FileInputFormat.setInputPaths(conf, new Path(args[0])); +
53. +      + FileOutputFormat.setOutputPath(conf, new Path(args[1])); +
54.
55. +      + JobClient.runJob(conf); +
57. +    + } +
58. + } +
59.
+
+ +
+ Usage + +

Assuming HADOOP_HOME is the root of the installation and + HADOOP_VERSION is the Hadoop version installed, compile + WordCount.java and create a jar:

+

+ $ mkdir wordcount_classes
+ + $ javac -classpath ${HADOOP_HOME}/hadoop-${HADOOP_VERSION}-core.jar + -d wordcount_classes WordCount.java +
+ $ jar -cvf /usr/joe/wordcount.jar -C wordcount_classes/ . +

+ +

Assuming that:

+
    +
  • + /usr/joe/wordcount/input - input directory in HDFS +
  • +
  • + /usr/joe/wordcount/output - output directory in HDFS +
  • +
+ +

Sample text-files as input:

+

+ $ bin/hadoop dfs -ls /usr/joe/wordcount/input/
+ /usr/joe/wordcount/input/file01
+ /usr/joe/wordcount/input/file02
+
+ $ bin/hadoop dfs -cat /usr/joe/wordcount/input/file01
+ Hello World Bye World
+
+ $ bin/hadoop dfs -cat /usr/joe/wordcount/input/file02
+ Hello Hadoop Goodbye Hadoop +

+ +

Run the application:

+

+ + $ bin/hadoop jar /usr/joe/wordcount.jar org.myorg.WordCount + /usr/joe/wordcount/input /usr/joe/wordcount/output + +

+ +

Output:

+

+ + $ bin/hadoop dfs -cat /usr/joe/wordcount/output/part-00000 + +
+ Bye 1
+ Goodbye 1
+ Hadoop 2
+ Hello 2
+ World 2
+

+ +

Applications can specify a comma separated list of paths which + would be present in the current working directory of the task + using the option -files. The -libjars + option allows applications to add jars to the classpaths of the maps + and reduces. The -archives allows them to pass archives + as arguments that are unzipped/unjarred and a link with name of the + jar/zip are created in the current working directory of tasks. More + details about the command line options are available at + Hadoop Command Guide.

+ +

Running wordcount example with + -libjars and -files:
+ hadoop jar hadoop-examples.jar wordcount -files cachefile.txt + -libjars mylib.jar input output +

+
+ +
+ Walk-through + +

The WordCount application is quite straight-forward.

+ +

The Mapper implementation (lines 14-26), via the + map method (lines 18-25), processes one line at a time, + as provided by the specified TextInputFormat (line 49). + It then splits the line into tokens separated by whitespaces, via the + StringTokenizer, and emits a key-value pair of + < <word>, 1>.

+ +

+ For the given sample input the first map emits:
+ < Hello, 1>
+ < World, 1>
+ < Bye, 1>
+ < World, 1>
+

+ +

+ The second map emits:
+ < Hello, 1>
+ < Hadoop, 1>
+ < Goodbye, 1>
+ < Hadoop, 1>
+

+ +

We'll learn more about the number of maps spawned for a given job, and + how to control them in a fine-grained manner, a bit later in the + tutorial.

+ +

WordCount also specifies a combiner (line + 46). Hence, the output of each map is passed through the local combiner + (which is same as the Reducer as per the job + configuration) for local aggregation, after being sorted on the + keys.

+ +

+ The output of the first map:
+ < Bye, 1>
+ < Hello, 1>
+ < World, 2>
+

+ +

+ The output of the second map:
+ < Goodbye, 1>
+ < Hadoop, 2>
+ < Hello, 1>
+

+ +

The Reducer implementation (lines 28-36), via the + reduce method (lines 29-35) just sums up the values, + which are the occurence counts for each key (i.e. words in this example). +

+ +

+ Thus the output of the job is:
+ < Bye, 1>
+ < Goodbye, 1>
+ < Hadoop, 2>
+ < Hello, 2>
+ < World, 2>
+

+ +

The run method specifies various facets of the job, such + as the input/output paths (passed via the command line), key/value + types, input/output formats etc., in the JobConf. + It then calls the JobClient.runJob (line 55) to submit the + and monitor its progress.

+ +

We'll learn more about JobConf, JobClient, + Tool and other interfaces and classes a bit later in the + tutorial.

+
+
+ +
+ Map/Reduce - User Interfaces + +

This section provides a reasonable amount of detail on every user-facing + aspect of the Map/Reduce framwork. This should help users implement, + configure and tune their jobs in a fine-grained manner. However, please + note that the javadoc for each class/interface remains the most + comprehensive documentation available; this is only meant to be a tutorial. +

+ +

Let us first take the Mapper and Reducer + interfaces. Applications typically implement them to provide the + map and reduce methods.

+ +

We will then discuss other core interfaces including + JobConf, JobClient, Partitioner, + OutputCollector, Reporter, + InputFormat, OutputFormat, + OutputCommitter and others.

+ +

Finally, we will wrap up by discussing some useful features of the + framework such as the DistributedCache, + IsolationRunner etc.

+ +
+ Payload + +

Applications typically implement the Mapper and + Reducer interfaces to provide the map and + reduce methods. These form the core of the job.

+ +
+ Mapper + +

+ Mapper maps input key/value pairs to a set of intermediate + key/value pairs.

+ +

Maps are the individual tasks that transform input records into + intermediate records. The transformed intermediate records do not need + to be of the same type as the input records. A given input pair may + map to zero or many output pairs.

+ +

The Hadoop Map/Reduce framework spawns one map task for each + InputSplit generated by the InputFormat for + the job.

+ +

Overall, Mapper implementations are passed the + JobConf for the job via the + + JobConfigurable.configure(JobConf) method and override it to + initialize themselves. The framework then calls + + map(WritableComparable, Writable, OutputCollector, Reporter) for + each key/value pair in the InputSplit for that task. + Applications can then override the + + Closeable.close() method to perform any required cleanup.

+ + +

Output pairs do not need to be of the same types as input pairs. A + given input pair may map to zero or many output pairs. Output pairs + are collected with calls to + + OutputCollector.collect(WritableComparable,Writable).

+ +

Applications can use the Reporter to report + progress, set application-level status messages and update + Counters, or just indicate that they are alive.

+ +

All intermediate values associated with a given output key are + subsequently grouped by the framework, and passed to the + Reducer(s) to determine the final output. Users can + control the grouping by specifying a Comparator via + + JobConf.setOutputKeyComparatorClass(Class).

+ +

The Mapper outputs are sorted and then + partitioned per Reducer. The total number of partitions is + the same as the number of reduce tasks for the job. Users can control + which keys (and hence records) go to which Reducer by + implementing a custom Partitioner.

+ +

Users can optionally specify a combiner, via + + JobConf.setCombinerClass(Class), to perform local aggregation of + the intermediate outputs, which helps to cut down the amount of data + transferred from the Mapper to the Reducer. +

+ +

The intermediate, sorted outputs are always stored in a simple + (key-len, key, value-len, value) format. + Applications can control if, and how, the + intermediate outputs are to be compressed and the + + CompressionCodec to be used via the JobConf. +

+ +
+ How Many Maps? + +

The number of maps is usually driven by the total size of the + inputs, that is, the total number of blocks of the input files.

+ +

The right level of parallelism for maps seems to be around 10-100 + maps per-node, although it has been set up to 300 maps for very + cpu-light map tasks. Task setup takes awhile, so it is best if the + maps take at least a minute to execute.

+ +

Thus, if you expect 10TB of input data and have a blocksize of + 128MB, you'll end up with 82,000 maps, unless + + setNumMapTasks(int) (which only provides a hint to the framework) + is used to set it even higher.

+
+
+ +
+ Reducer + +

+ Reducer reduces a set of intermediate values which share a key to + a smaller set of values.

+ +

The number of reduces for the job is set by the user + via + JobConf.setNumReduceTasks(int).

+ +

Overall, Reducer implementations are passed the + JobConf for the job via the + + JobConfigurable.configure(JobConf) method and can override it to + initialize themselves. The framework then calls + + reduce(WritableComparable, Iterator, OutputCollector, Reporter) + method for each <key, (list of values)> + pair in the grouped inputs. Applications can then override the + + Closeable.close() method to perform any required cleanup.

+ +

Reducer has 3 primary phases: shuffle, sort and reduce. +

+ +
+ Shuffle + +

Input to the Reducer is the sorted output of the + mappers. In this phase the framework fetches the relevant partition + of the output of all the mappers, via HTTP.

+
+ +
+ Sort + +

The framework groups Reducer inputs by keys (since + different mappers may have output the same key) in this stage.

+ +

The shuffle and sort phases occur simultaneously; while + map-outputs are being fetched they are merged.

+ +
+ Secondary Sort + +

If equivalence rules for grouping the intermediate keys are + required to be different from those for grouping keys before + reduction, then one may specify a Comparator via + + JobConf.setOutputValueGroupingComparator(Class). Since + + JobConf.setOutputKeyComparatorClass(Class) can be used to + control how intermediate keys are grouped, these can be used in + conjunction to simulate secondary sort on values.

+
+
+ +
+ Reduce + +

In this phase the + + reduce(WritableComparable, Iterator, OutputCollector, Reporter) + method is called for each <key, (list of values)> + pair in the grouped inputs.

+ +

The output of the reduce task is typically written to the + + FileSystem via + + OutputCollector.collect(WritableComparable, Writable).

+ +

Applications can use the Reporter to report + progress, set application-level status messages and update + Counters, or just indicate that they are alive.

+ +

The output of the Reducer is not sorted.

+
+ +
+ How Many Reduces? + +

The right number of reduces seems to be 0.95 or + 1.75 multiplied by (<no. of nodes> * + mapred.tasktracker.reduce.tasks.maximum).

+ +

With 0.95 all of the reduces can launch immediately + and start transfering map outputs as the maps finish. With + 1.75 the faster nodes will finish their first round of + reduces and launch a second wave of reduces doing a much better job + of load balancing.

+ +

Increasing the number of reduces increases the framework overhead, + but increases load balancing and lowers the cost of failures.

+ +

The scaling factors above are slightly less than whole numbers to + reserve a few reduce slots in the framework for speculative-tasks and + failed tasks.

+
+ +
+ Reducer NONE + +

It is legal to set the number of reduce-tasks to zero if + no reduction is desired.

+ +

In this case the outputs of the map-tasks go directly to the + FileSystem, into the output path set by + + setOutputPath(Path). The framework does not sort the + map-outputs before writing them out to the FileSystem. +

+
+
+ +
+ Partitioner + +

+ Partitioner partitions the key space.

+ +

Partitioner controls the partitioning of the keys of the + intermediate map-outputs. The key (or a subset of the key) is used to + derive the partition, typically by a hash function. The total + number of partitions is the same as the number of reduce tasks for the + job. Hence this controls which of the m reduce tasks the + intermediate key (and hence the record) is sent to for reduction.

+ +

+ HashPartitioner is the default Partitioner.

+
+ +
+ Reporter + +

+ Reporter is a facility for Map/Reduce applications to report + progress, set application-level status messages and update + Counters.

+ +

Mapper and Reducer implementations can use + the Reporter to report progress or just indicate + that they are alive. In scenarios where the application takes a + significant amount of time to process individual key/value pairs, + this is crucial since the framework might assume that the task has + timed-out and kill that task. Another way to avoid this is to + set the configuration parameter mapred.task.timeout to a + high-enough value (or even set it to zero for no time-outs). +

+ +

Applications can also update Counters using the + Reporter.

+
+ +
+ OutputCollector + +

+ OutputCollector is a generalization of the facility provided by + the Map/Reduce framework to collect data output by the + Mapper or the Reducer (either the + intermediate outputs or the output of the job).

+
+ +

Hadoop Map/Reduce comes bundled with a + + library of generally useful mappers, reducers, and partitioners.

+
+ +
+ Job Configuration + +

+ JobConf represents a Map/Reduce job configuration.

+ +

JobConf is the primary interface for a user to describe + a Map/Reduce job to the Hadoop framework for execution. The framework + tries to faithfully execute the job as described by JobConf, + however:

+
    +
  • f + Some configuration parameters may have been marked as + + final by administrators and hence cannot be altered. +
  • +
  • + While some job parameters are straight-forward to set (e.g. + + setNumReduceTasks(int)), other parameters interact subtly with + the rest of the framework and/or job configuration and are + more complex to set (e.g. + + setNumMapTasks(int)). +
  • +
+ +

JobConf is typically used to specify the + Mapper, combiner (if any), Partitioner, + Reducer, InputFormat, + OutputFormat and OutputCommitter + implementations. JobConf also + indicates the set of input files + (setInputPaths(JobConf, Path...) + /addInputPath(JobConf, Path)) + and (setInputPaths(JobConf, String) + /addInputPaths(JobConf, String)) + and where the output files should be written + (setOutputPath(Path)).

+ +

Optionally, JobConf is used to specify other advanced + facets of the job such as the Comparator to be used, files + to be put in the DistributedCache, whether intermediate + and/or job outputs are to be compressed (and how), debugging via + user-provided scripts + (setMapDebugScript(String)/setReduceDebugScript(String)) + , whether job tasks can be executed in a speculative manner + (setMapSpeculativeExecution(boolean))/(setReduceSpeculativeExecution(boolean)) + , maximum number of attempts per task + (setMaxMapAttempts(int)/setMaxReduceAttempts(int)) + , percentage of tasks failure which can be tolerated by the job + (setMaxMapTaskFailuresPercent(int)/setMaxReduceTaskFailuresPercent(int)) + etc.

+ +

Of course, users can use + set(String, String)/get(String, String) + to set/get arbitrary parameters needed by applications. However, use the + DistributedCache for large amounts of (read-only) data.

+
+ +
+ Task Execution & Environment + +

The TaskTracker executes the Mapper/ + Reducer task as a child process in a separate jvm. +

+ +

The child-task inherits the environment of the parent + TaskTracker. The user can specify additional options to the + child-jvm via the mapred.{map|reduce}.child.java.opts + configuration parameter in the JobConf such as non-standard + paths for the run-time linker to search shared libraries via + -Djava.library.path=<> etc. If the + mapred.{map|reduce}.child.java.opts parameters contains the + symbol @taskid@ it is interpolated with value of + taskid of the map/reduce task.

+ +

Here is an example with multiple arguments and substitutions, + showing jvm GC logging, and start of a passwordless JVM JMX agent so that + it can connect with jconsole and the likes to watch child memory, + threads and get thread dumps. It also sets the maximum heap-size of the + map and reduce child jvm to 512MB & 1024MB respectively. It also + adds an additional path to the java.library.path of the + child-jvm.

+ +

+ <property>
+   <name>mapred.map.child.java.opts</name>
+   <value>
+      + -Xmx512M -Djava.library.path=/home/mycompany/lib + -verbose:gc -Xloggc:/tmp/@taskid@.gc
+      + -Dcom.sun.management.jmxremote.authenticate=false + -Dcom.sun.management.jmxremote.ssl=false
+   </value>
+ </property> +

+ +

+ <property>
+   <name>mapred.reduce.child.java.opts</name>
+   <value>
+      + -Xmx1024M -Djava.library.path=/home/mycompany/lib + -verbose:gc -Xloggc:/tmp/@taskid@.gc
+      + -Dcom.sun.management.jmxremote.authenticate=false + -Dcom.sun.management.jmxremote.ssl=false
+   </value>
+ </property> +

+ +
+ Memory management +

Users/admins can also specify the maximum virtual memory + of the launched child-task, and any sub-process it launches + recursively, using mapred.{map|reduce}.child.ulimit. Note + that the value set here is a per process limit. + The value for mapred.{map|reduce}.child.ulimit should be + specified in kilo bytes (KB). And also the value must be greater than + or equal to the -Xmx passed to JavaVM, else the VM might not start. +

+ +

Note: mapred.{map|reduce}.child.java.opts are used only + for configuring the launched child tasks from task tracker. Configuring + the memory options for daemons is documented in + + cluster_setup.html

+ +

The memory available to some parts of the framework is also + configurable. In map and reduce tasks, performance may be influenced + by adjusting parameters influencing the concurrency of operations and + the frequency with which data will hit disk. Monitoring the filesystem + counters for a job- particularly relative to byte counts from the map + and into the reduce- is invaluable to the tuning of these + parameters.

+ +

Users can choose to override default limits of Virtual Memory and RAM + enforced by the task tracker, if memory management is enabled. + Users can set the following parameter per job:

+ + + + + + + +
NameTypeDescription
mapred.task.maxvmemintA number, in bytes, that represents the maximum Virtual Memory + task-limit for each task of the job. A task will be killed if + it consumes more Virtual Memory than this number. +
mapred.task.maxpmemintA number, in bytes, that represents the maximum RAM task-limit + for each task of the job. This number can be optionally used by + Schedulers to prevent over-scheduling of tasks on a node based + on RAM needs. +
+
+
+ Map Parameters + +

A record emitted from a map will be serialized into a buffer and + metadata will be stored into accounting buffers. As described in the + following options, when either the serialization buffer or the + metadata exceed a threshold, the contents of the buffers will be + sorted and written to disk in the background while the map continues + to output records. If either buffer fills completely while the spill + is in progress, the map thread will block. When the map is finished, + any remaining records are written to disk and all on-disk segments + are merged into a single file. Minimizing the number of spills to + disk can decrease map time, but a larger buffer also decreases the + memory available to the mapper.

+ + + + + + + + + +
NameTypeDescription
io.sort.mbintThe cumulative size of the serialization and accounting + buffers storing records emitted from the map, in megabytes. +
io.sort.record.percentfloatThe ratio of serialization to accounting space can be + adjusted. Each serialized record requires 16 bytes of + accounting information in addition to its serialized size to + effect the sort. This percentage of space allocated from + io.sort.mb affects the probability of a spill to + disk being caused by either exhaustion of the serialization + buffer or the accounting space. Clearly, for a map outputting + small records, a higher value than the default will likely + decrease the number of spills to disk.
io.sort.spill.percentfloatThis is the threshold for the accounting and serialization + buffers. When this percentage of either buffer has filled, + their contents will be spilled to disk in the background. Let + io.sort.record.percent be r, + io.sort.mb be x, and this value be + q. The maximum number of records collected before the + collection thread will spill is r * x * q * 2^16. + Note that a higher value may decrease the number of- or even + eliminate- merges, but will also increase the probability of + the map task getting blocked. The lowest average map times are + usually obtained by accurately estimating the size of the map + output and preventing multiple spills.
+ +

Other notes

+
    +
  • If either spill threshold is exceeded while a spill is in + progress, collection will continue until the spill is finished. + For example, if io.sort.buffer.spill.percent is set + to 0.33, and the remainder of the buffer is filled while the spill + runs, the next spill will include all the collected records, or + 0.66 of the buffer, and will not generate additional spills. In + other words, the thresholds are defining triggers, not + blocking.
  • +
  • A record larger than the serialization buffer will first + trigger a spill, then be spilled to a separate file. It is + undefined whether or not this record will first pass through the + combiner.
  • +
+
+ +
+ Shuffle/Reduce Parameters + +

As described previously, each reduce fetches the output assigned + to it by the Partitioner via HTTP into memory and periodically + merges these outputs to disk. If intermediate compression of map + outputs is turned on, each output is decompressed into memory. The + following options affect the frequency of these merges to disk prior + to the reduce and the memory allocated to map output during the + reduce.

+ + + + + + + + + + + + + +
NameTypeDescription
io.sort.factorintSpecifies the number of segments on disk to be merged at + the same time. It limits the number of open files and + compression codecs during the merge. If the number of files + exceeds this limit, the merge will proceed in several passes. + Though this limit also applies to the map, most jobs should be + configured so that hitting this limit is unlikely + there.
mapred.inmem.merge.thresholdintThe number of sorted map outputs fetched into memory + before being merged to disk. Like the spill thresholds in the + preceding note, this is not defining a unit of partition, but + a trigger. In practice, this is usually set very high (1000) + or disabled (0), since merging in-memory segments is often + less expensive than merging from disk (see notes following + this table). This threshold influences only the frequency of + in-memory merges during the shuffle.
mapred.job.shuffle.merge.percentfloatThe memory threshold for fetched map outputs before an + in-memory merge is started, expressed as a percentage of + memory allocated to storing map outputs in memory. Since map + outputs that can't fit in memory can be stalled, setting this + high may decrease parallelism between the fetch and merge. + Conversely, values as high as 1.0 have been effective for + reduces whose input can fit entirely in memory. This parameter + influences only the frequency of in-memory merges during the + shuffle.
mapred.job.shuffle.input.buffer.percentfloatThe percentage of memory- relative to the maximum heapsize + as typically specified in mapred.reduce.child.java.opts- + that can be allocated to storing map outputs during the + shuffle. Though some memory should be set aside for the + framework, in general it is advantageous to set this high + enough to store large and numerous map outputs.
mapred.job.reduce.input.buffer.percentfloatThe percentage of memory relative to the maximum heapsize + in which map outputs may be retained during the reduce. When + the reduce begins, map outputs will be merged to disk until + those that remain are under the resource limit this defines. + By default, all map outputs are merged to disk before the + reduce begins to maximize the memory available to the reduce. + For less memory-intensive reduces, this should be increased to + avoid trips to disk.
+ +

Other notes

+
    +
  • If a map output is larger than 25 percent of the memory + allocated to copying map outputs, it will be written directly to + disk without first staging through memory.
  • +
  • When running with a combiner, the reasoning about high merge + thresholds and large buffers may not hold. For merges started + before all map outputs have been fetched, the combiner is run + while spilling to disk. In some cases, one can obtain better + reduce times by spending resources combining map outputs- making + disk spills small and parallelizing spilling and fetching- rather + than aggressively increasing buffer sizes.
  • +
  • When merging in-memory map outputs to disk to begin the + reduce, if an intermediate merge is necessary because there are + segments to spill and at least io.sort.factor + segments already on disk, the in-memory map outputs will be part + of the intermediate merge.
  • +
+ +
+ +
+ Directory Structure +

The task tracker has local directory, + ${mapred.local.dir}/taskTracker/ to create localized + cache and localized job. It can define multiple local directories + (spanning multiple disks) and then each filename is assigned to a + semi-random local directory. When the job starts, task tracker + creates a localized job directory relative to the local directory + specified in the configuration. Thus the task tracker directory + structure looks the following:

+
    +
  • ${mapred.local.dir}/taskTracker/archive/ : + The distributed cache. This directory holds the localized distributed + cache. Thus localized distributed cache is shared among all + the tasks and jobs
  • +
  • ${mapred.local.dir}/taskTracker/jobcache/$jobid/ : + The localized job directory +
      +
    • ${mapred.local.dir}/taskTracker/jobcache/$jobid/work/ + : The job-specific shared directory. The tasks can use this space as + scratch space and share files among them. This directory is exposed + to the users through the configuration property + job.local.dir. The directory can accessed through + api + JobConf.getJobLocalDir(). It is available as System property also. + So, users (streaming etc.) can call + System.getProperty("job.local.dir") to access the + directory.
    • +
    • ${mapred.local.dir}/taskTracker/jobcache/$jobid/jars/ + : The jars directory, which has the job jar file and expanded jar. + The job.jar is the application's jar file that is + automatically distributed to each machine. It is expanded in jars + directory before the tasks for the job start. The job.jar location + is accessible to the application through the api + + JobConf.getJar() . To access the unjarred directory, + JobConf.getJar().getParent() can be called.
    • +
    • ${mapred.local.dir}/taskTracker/jobcache/$jobid/job.xml + : The job.xml file, the generic job configuration, localized for + the job.
    • +
    • ${mapred.local.dir}/taskTracker/jobcache/$jobid/$taskid + : The task directory for each task attempt. Each task directory + again has the following structure : +
        +
      • ${mapred.local.dir}/taskTracker/jobcache/$jobid/$taskid/job.xml + : A job.xml file, task localized job configuration, Task localization + means that properties have been set that are specific to + this particular task within the job. The properties localized for + each task are described below.
      • +
      • ${mapred.local.dir}/taskTracker/jobcache/$jobid/$taskid/output + : A directory for intermediate output files. This contains the + temporary map reduce data generated by the framework + such as map output files etc.
      • +
      • ${mapred.local.dir}/taskTracker/jobcache/$jobid/$taskid/work + : The curernt working directory of the task. + With jvm reuse enabled for tasks, this + directory will be the directory on which the jvm has started
      • +
      • ${mapred.local.dir}/taskTracker/jobcache/$jobid/$taskid/work/tmp + : The temporary directory for the task. + (User can specify the property mapred.child.tmp to set + the value of temporary directory for map and reduce tasks. This + defaults to ./tmp. If the value is not an absolute path, + it is prepended with task's working directory. Otherwise, it is + directly assigned. The directory will be created if it doesn't exist. + Then, the child java tasks are executed with option + -Djava.io.tmpdir='the absolute path of the tmp dir'. + Anp pipes and streaming are set with environment variable, + TMPDIR='the absolute path of the tmp dir'). This + directory is created, if mapred.child.tmp has the value + ./tmp
      • +
      +
    • +
    +
  • +
+
+ +
+ Task JVM Reuse +

Jobs can enable task JVMs to be reused by specifying the job + configuration mapred.job.reuse.jvm.num.tasks. If the + value is 1 (the default), then JVMs are not reused + (i.e. 1 task per JVM). If it is -1, there is no limit to the number + of tasks a JVM can run (of the same job). One can also specify some + value greater than 1 using the api + + JobConf.setNumTasksToExecutePerJvm(int)

+
+ +

The following properties are localized in the job configuration + for each task's execution:

+ + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescription
mapred.job.idStringThe job id
mapred.jarStringjob.jar location in job directory
job.local.dir String The job specific shared scratch space
mapred.tip.id String The task id
mapred.task.id String The task attempt id
mapred.task.is.map boolean Is this a map task
mapred.task.partition int The id of the task within the job
map.input.file String The filename that the map is reading from
map.input.start long The offset of the start of the map input split
map.input.length long The number of bytes in the map input split
mapred.work.output.dir String The task's temporary output directory
+ +

The standard output (stdout) and error (stderr) streams of the task + are read by the TaskTracker and logged to + ${HADOOP_LOG_DIR}/userlogs

+ +

The DistributedCache can also be used + to distribute both jars and native libraries for use in the map + and/or reduce tasks. The child-jvm always has its + current working directory added to the + java.library.path and LD_LIBRARY_PATH. + And hence the cached libraries can be loaded via + + System.loadLibrary or + + System.load. More details on how to load shared libraries through + distributed cache are documented at + + native_libraries.html

+
+ +
+ Job Submission and Monitoring + +

+ JobClient is the primary interface by which user-job interacts + with the JobTracker.

+ +

JobClient provides facilities to submit jobs, track their + progress, access component-tasks' reports and logs, get the Map/Reduce + cluster's status information and so on.

+ +

The job submission process involves:

+
    +
  1. Checking the input and output specifications of the job.
  2. +
  3. Computing the InputSplit values for the job.
  4. +
  5. + Setting up the requisite accounting information for the + DistributedCache of the job, if necessary. +
  6. +
  7. + Copying the job's jar and configuration to the Map/Reduce system + directory on the FileSystem. +
  8. +
  9. + Submitting the job to the JobTracker and optionally + monitoring it's status. +
  10. +
+

Job history files are also logged to user specified directory + hadoop.job.history.user.location + which defaults to job output directory. The files are stored in + "_logs/history/" in the specified directory. Hence, by default they + will be in mapred.output.dir/_logs/history. User can stop + logging by giving the value none for + hadoop.job.history.user.location

+ +

User can view the history logs summary in specified directory + using the following command
+ $ bin/hadoop job -history output-dir
+ This command will print job details, failed and killed tip + details.
+ More details about the job such as successful tasks and + task attempts made for each task can be viewed using the + following command
+ $ bin/hadoop job -history all output-dir

+ +

User can use + OutputLogFilter + to filter log files from the output directory listing.

+ +

Normally the user creates the application, describes various facets + of the job via JobConf, and then uses the + JobClient to submit the job and monitor its progress.

+ +
+ Job Control + +

Users may need to chain Map/Reduce jobs to accomplish complex + tasks which cannot be done via a single Map/Reduce job. This is fairly + easy since the output of the job typically goes to distributed + file-system, and the output, in turn, can be used as the input for the + next job.

+ +

However, this also means that the onus on ensuring jobs are + complete (success/failure) lies squarely on the clients. In such + cases, the various job-control options are:

+ +
+
+ +
+ Job Input + +

+ InputFormat describes the input-specification for a Map/Reduce job. +

+ +

The Map/Reduce framework relies on the InputFormat of + the job to:

+
    +
  1. Validate the input-specification of the job.
  2. +
  3. + Split-up the input file(s) into logical InputSplit + instances, each of which is then assigned to an individual + Mapper. +
  4. +
  5. + Provide the RecordReader implementation used to + glean input records from the logical InputSplit for + processing by the Mapper. +
  6. +
+ +

The default behavior of file-based InputFormat + implementations, typically sub-classes of + + FileInputFormat, is to split the input into logical + InputSplit instances based on the total size, in bytes, of + the input files. However, the FileSystem blocksize of the + input files is treated as an upper bound for input splits. A lower bound + on the split size can be set via mapred.min.split.size.

+ +

Clearly, logical splits based on input-size is insufficient for many + applications since record boundaries must be respected. In such cases, + the application should implement a RecordReader, who is + responsible for respecting record-boundaries and presents a + record-oriented view of the logical InputSplit to the + individual task.

+ +

+ TextInputFormat is the default InputFormat.

+ +

If TextInputFormat is the InputFormat for a + given job, the framework detects input-files with the .gz + extensions and automatically decompresses them using the + appropriate CompressionCodec. However, it must be noted that + compressed files with the above extensions cannot be split and + each compressed file is processed in its entirety by a single mapper.

+ +
+ InputSplit + +

+ InputSplit represents the data to be processed by an individual + Mapper.

+ +

Typically InputSplit presents a byte-oriented view of + the input, and it is the responsibility of RecordReader + to process and present a record-oriented view.

+ +

+ FileSplit is the default InputSplit. It sets + map.input.file to the path of the input file for the + logical split.

+
+ +
+ RecordReader + +

+ RecordReader reads <key, value> pairs from an + InputSplit.

+ +

Typically the RecordReader converts the byte-oriented + view of the input, provided by the InputSplit, and + presents a record-oriented to the Mapper implementations + for processing. RecordReader thus assumes the + responsibility of processing record boundaries and presents the tasks + with keys and values.

+
+
+ +
+ Job Output + +

+ OutputFormat describes the output-specification for a Map/Reduce + job.

+ +

The Map/Reduce framework relies on the OutputFormat of + the job to:

+
    +
  1. + Validate the output-specification of the job; for example, check that + the output directory doesn't already exist. +
  2. +
  3. + Provide the RecordWriter implementation used to + write the output files of the job. Output files are stored in a + FileSystem. +
  4. +
+ +

TextOutputFormat is the default + OutputFormat.

+ +
+ OutputCommitter + +

+ OutputCommitter describes the commit of task output for a + Map/Reduce job.

+ +

The Map/Reduce framework relies on the OutputCommitter + of the job to:

+
    +
  1. + Setup the job during initialization. For example, create + the temporary output directory for the job during the + initialization of the job. + Job setup is done by a separate task when the job is + in PREP state and after initializing tasks. Once the setup task + completes, the job will be moved to RUNNING state. +
  2. +
  3. + Cleanup the job after the job completion. For example, remove the + temporary output directory after the job completion. + Job cleanup is done by a separate task at the end of the job. + Job is declared SUCCEDED/FAILED/KILLED after the cleanup + task completes. +
  4. +
  5. + Setup the task temporary output. + Task setup is done as part of the same task, during task initialization. +
  6. +
  7. + Check whether a task needs a commit. This is to avoid the commit + procedure if a task does not need commit. +
  8. +
  9. + Commit of the task output. + Once task is done, the task will commit it's output if required. +
  10. +
  11. + Discard the task commit. + If the task has been failed/killed, the output will be cleaned-up. + If task could not cleanup (in exception block), a separate task + will be launched with same attempt-id to do the cleanup. +
  12. +
+

FileOutputCommitter is the default + OutputCommitter. Job setup/cleanup tasks occupy + map or reduce slots, whichever is free on the TaskTracker. And + JobCleanup task, TaskCleanup tasks and JobSetup task have the highest + priority, and in that order.

+
+ +
+ Task Side-Effect Files + +

In some applications, component tasks need to create and/or write to + side-files, which differ from the actual job-output files.

+ +

In such cases there could be issues with two instances of the same + Mapper or Reducer running simultaneously (for + example, speculative tasks) trying to open and/or write to the same + file (path) on the FileSystem. Hence the + application-writer will have to pick unique names per task-attempt + (using the attemptid, say attempt_200709221812_0001_m_000000_0), + not just per task.

+ +

To avoid these issues the Map/Reduce framework, when the + OutputCommitter is FileOutputCommitter, + maintains a special + ${mapred.output.dir}/_temporary/_${taskid} sub-directory + accessible via ${mapred.work.output.dir} + for each task-attempt on the FileSystem where the output + of the task-attempt is stored. On successful completion of the + task-attempt, the files in the + ${mapred.output.dir}/_temporary/_${taskid} (only) + are promoted to ${mapred.output.dir}. Of course, + the framework discards the sub-directory of unsuccessful task-attempts. + This process is completely transparent to the application.

+ +

The application-writer can take advantage of this feature by + creating any side-files required in ${mapred.work.output.dir} + during execution of a task via + + FileOutputFormat.getWorkOutputPath(), and the framework will promote them + similarly for succesful task-attempts, thus eliminating the need to + pick unique paths per task-attempt.

+ +

Note: The value of ${mapred.work.output.dir} during + execution of a particular task-attempt is actually + ${mapred.output.dir}/_temporary/_{$taskid}, and this value is + set by the Map/Reduce framework. So, just create any side-files in the + path returned by + + FileOutputFormat.getWorkOutputPath() from map/reduce + task to take advantage of this feature.

+ +

The entire discussion holds true for maps of jobs with + reducer=NONE (i.e. 0 reduces) since output of the map, in that case, + goes directly to HDFS.

+
+ +
+ RecordWriter + +

+ RecordWriter writes the output <key, value> + pairs to an output file.

+ +

RecordWriter implementations write the job outputs to the + FileSystem.

+
+
+ +
+ Other Useful Features + +
+ Submitting Jobs to Queues +

Users submit jobs to Queues. Queues, as collection of jobs, + allow the system to provide specific functionality. For example, + queues use ACLs to control which users + who can submit jobs to them. Queues are expected to be primarily + used by Hadoop Schedulers.

+ +

Hadoop comes configured with a single mandatory queue, called + 'default'. Queue names are defined in the + mapred.queue.names property of the Hadoop site + configuration. Some job schedulers, such as the + Capacity Scheduler, + support multiple queues.

+ +

A job defines the queue it needs to be submitted to through the + mapred.job.queue.name property, or through the + setQueueName(String) + API. Setting the queue name is optional. If a job is submitted + without an associated queue name, it is submitted to the 'default' + queue.

+
+
+ Counters + +

Counters represent global counters, defined either by + the Map/Reduce framework or applications. Each Counter can + be of any Enum type. Counters of a particular + Enum are bunched into groups of type + Counters.Group.

+ +

Applications can define arbitrary Counters (of type + Enum) and update them via + + Reporter.incrCounter(Enum, long) or + + Reporter.incrCounter(String, String, long) + in the map and/or + reduce methods. These counters are then globally + aggregated by the framework.

+
+ +
+ DistributedCache + +

+ DistributedCache distributes application-specific, large, read-only + files efficiently.

+ +

DistributedCache is a facility provided by the + Map/Reduce framework to cache files (text, archives, jars and so on) + needed by applications.

+ +

Applications specify the files to be cached via urls (hdfs://) + in the JobConf. The DistributedCache + assumes that the files specified via hdfs:// urls are already present + on the FileSystem.

+ +

The framework will copy the necessary files to the slave node + before any tasks for the job are executed on that node. Its + efficiency stems from the fact that the files are only copied once + per job and the ability to cache archives which are un-archived on + the slaves.

+ +

DistributedCache tracks the modification timestamps of + the cached files. Clearly the cache files should not be modified by + the application or externally while the job is executing.

+ +

DistributedCache can be used to distribute simple, + read-only data/text files and more complex types such as archives and + jars. Archives (zip, tar, tgz and tar.gz files) are + un-archived at the slave nodes. Files + have execution permissions set.

+ +

The files/archives can be distributed by setting the property + mapred.cache.{files|archives}. If more than one + file/archive has to be distributed, they can be added as comma + separated paths. The properties can also be set by APIs + + DistributedCache.addCacheFile(URI,conf)/ + + DistributedCache.addCacheArchive(URI,conf) and + + DistributedCache.setCacheFiles(URIs,conf)/ + + DistributedCache.setCacheArchives(URIs,conf) + where URI is of the form + hdfs://host:port/absolute-path#link-name. + In Streaming, the files can be distributed through command line + option -cacheFile/-cacheArchive.

+ +

Optionally users can also direct the DistributedCache + to symlink the cached file(s) into the current working + directory of the task via the + + DistributedCache.createSymlink(Configuration) api. Or by setting + the configuration property mapred.create.symlink + as yes. The DistributedCache will use the + fragment of the URI as the name of the symlink. + For example, the URI + hdfs://namenode:port/lib.so.1#lib.so + will have the symlink name as lib.so in task's cwd + for the file lib.so.1 in distributed cache.

+ +

The DistributedCache can also be used as a + rudimentary software distribution mechanism for use in the + map and/or reduce tasks. It can be used to distribute both + jars and native libraries. The + + DistributedCache.addArchiveToClassPath(Path, Configuration) or + + DistributedCache.addFileToClassPath(Path, Configuration) api + can be used to cache files/jars and also add them to the + classpath of child-jvm. The same can be done by setting + the configuration properties + mapred.job.classpath.{files|archives}. Similarly the + cached files that are symlinked into the working directory of the + task can be used to distribute native libraries and load them.

+ +
+ +
+ Tool + +

The Tool + interface supports the handling of generic Hadoop command-line options. +

+ +

Tool is the standard for any Map/Reduce tool or + application. The application should delegate the handling of + standard command-line options to + + GenericOptionsParser via + + ToolRunner.run(Tool, String[]) and only handle its custom + arguments.

+ +

+ The generic Hadoop command-line options are:
+ + -conf <configuration file> + +
+ + -D <property=value> + +
+ + -fs <local|namenode:port> + +
+ + -jt <local|jobtracker:port> + +

+
+ +
+ IsolationRunner + +

+ IsolationRunner is a utility to help debug Map/Reduce programs.

+ +

To use the IsolationRunner, first set + keep.failed.tasks.files to true + (also see keep.tasks.files.pattern).

+ +

+ Next, go to the node on which the failed task ran and go to the + TaskTracker's local directory and run the + IsolationRunner:
+ $ cd <local path>/taskTracker/${taskid}/work
+ + $ bin/hadoop org.apache.hadoop.mapred.IsolationRunner ../job.xml + +

+ +

IsolationRunner will run the failed task in a single + jvm, which can be in the debugger, over precisely the same input.

+
+ +
+ Profiling +

Profiling is a utility to get a representative (2 or 3) sample + of built-in java profiler for a sample of maps and reduces.

+ +

User can specify whether the system should collect profiler + information for some of the tasks in the job by setting the + configuration property mapred.task.profile. The + value can be set using the api + + JobConf.setProfileEnabled(boolean). If the value is set + true, the task profiling is enabled. The profiler + information is stored in the user log directory. By default, + profiling is not enabled for the job.

+ +

Once user configures that profiling is needed, she/he can use + the configuration property + mapred.task.profile.{maps|reduces} to set the ranges + of map/reduce tasks to profile. The value can be set using the api + + JobConf.setProfileTaskRange(boolean,String). + By default, the specified range is 0-2.

+ +

User can also specify the profiler configuration arguments by + setting the configuration property + mapred.task.profile.params. The value can be specified + using the api + + JobConf.setProfileParams(String). If the string contains a + %s, it will be replaced with the name of the profiling + output file when the task runs. These parameters are passed to the + task child JVM on the command line. The default value for + the profiling parameters is + -agentlib:hprof=cpu=samples,heap=sites,force=n,thread=y,verbose=n,file=%s +

+
+ +
+ Debugging +

The Map/Reduce framework provides a facility to run user-provided + scripts for debugging. When a map/reduce task fails, a user can run + a debug script, to process task logs for example. The script is + given access to the task's stdout and stderr outputs, syslog and + jobconf. The output from the debug script's stdout and stderr is + displayed on the console diagnostics and also as part of the + job UI.

+ +

In the following sections we discuss how to submit a debug script + with a job. The script file needs to be distributed and submitted to + the framework.

+
+ How to distribute the script file: +

+ The user needs to use + DistributedCache + to distribute and symlink the script file.

+
+
+ How to submit the script: +

A quick way to submit the debug script is to set values for the + properties mapred.map.task.debug.script and + mapred.reduce.task.debug.script, for debugging map and + reduce tasks respectively. These properties can also be set by using APIs + + JobConf.setMapDebugScript(String) and + + JobConf.setReduceDebugScript(String) . In streaming mode, a debug + script can be submitted with the command-line options + -mapdebug and -reducedebug, for debugging + map and reduce tasks respectively.

+ +

The arguments to the script are the task's stdout, stderr, + syslog and jobconf files. The debug command, run on the node where + the map/reduce task failed, is:
+ $script $stdout $stderr $syslog $jobconf

+ +

Pipes programs have the c++ program name as a fifth argument + for the command. Thus for the pipes programs the command is
+ $script $stdout $stderr $syslog $jobconf $program +

+
+ +
+ Default Behavior: +

For pipes, a default script is run to process core dumps under + gdb, prints stack trace and gives info about running threads.

+
+
+ +
+ JobControl + +

+ JobControl is a utility which encapsulates a set of Map/Reduce jobs + and their dependencies.

+
+ +
+ Data Compression + +

Hadoop Map/Reduce provides facilities for the application-writer to + specify compression for both intermediate map-outputs and the + job-outputs i.e. output of the reduces. It also comes bundled with + + CompressionCodec implementation for the + zlib compression + algorithm. The gzip file format is also + supported.

+ +

Hadoop also provides native implementations of the above compression + codecs for reasons of both performance (zlib) and non-availability of + Java libraries. More details on their usage and availability are + available here.

+ +
+ Intermediate Outputs + +

Applications can control compression of intermediate map-outputs + via the + + JobConf.setCompressMapOutput(boolean) api and the + CompressionCodec to be used via the + + JobConf.setMapOutputCompressorClass(Class) api.

+
+ +
+ Job Outputs + +

Applications can control compression of job-outputs via the + + FileOutputFormat.setCompressOutput(JobConf, boolean) api and the + CompressionCodec to be used can be specified via the + + FileOutputFormat.setOutputCompressorClass(JobConf, Class) api.

+ +

If the job outputs are to be stored in the + + SequenceFileOutputFormat, the required + SequenceFile.CompressionType (i.e. RECORD / + BLOCK - defaults to RECORD) can be + specified via the + + SequenceFileOutputFormat.setOutputCompressionType(JobConf, + SequenceFile.CompressionType) api.

+
+
+ +
+ Skipping Bad Records +

Hadoop provides an option where a certain set of bad input + records can be skipped when processing map inputs. Applications + can control this feature through the + + SkipBadRecords class.

+ +

This feature can be used when map tasks crash deterministically + on certain input. This usually happens due to bugs in the + map function. Usually, the user would have to fix these bugs. + This is, however, not possible sometimes. The bug may be in third + party libraries, for example, for which the source code is not + available. In such cases, the task never completes successfully even + after multiple attempts, and the job fails. With this feature, only + a small portion of data surrounding the + bad records is lost, which may be acceptable for some applications + (those performing statistical analysis on very large data, for + example).

+ +

By default this feature is disabled. For enabling it, + refer to + SkipBadRecords.setMapperMaxSkipRecords(Configuration, long) and + + SkipBadRecords.setReducerMaxSkipGroups(Configuration, long). +

+ +

With this feature enabled, the framework gets into 'skipping + mode' after a certain number of map failures. For more details, + see + SkipBadRecords.setAttemptsToStartSkipping(Configuration, int). + In 'skipping mode', map tasks maintain the range of records being + processed. To do this, the framework relies on the processed record + counter. See + SkipBadRecords.COUNTER_MAP_PROCESSED_RECORDS and + + SkipBadRecords.COUNTER_REDUCE_PROCESSED_GROUPS. + This counter enables the framework to know how many records have + been processed successfully, and hence, what record range caused + a task to crash. On further attempts, this range of records is + skipped.

+ +

The number of records skipped depends on how frequently the + processed record counter is incremented by the application. + It is recommended that this counter be incremented after every + record is processed. This may not be possible in some applications + that typically batch their processing. In such cases, the framework + may skip additional records surrounding the bad record. Users can + control the number of skipped records through + + SkipBadRecords.setMapperMaxSkipRecords(Configuration, long) and + + SkipBadRecords.setReducerMaxSkipGroups(Configuration, long). + The framework tries to narrow the range of skipped records using a + binary search-like approach. The skipped range is divided into two + halves and only one half gets executed. On subsequent + failures, the framework figures out which half contains + bad records. A task will be re-executed till the + acceptable skipped value is met or all task attempts are exhausted. + To increase the number of task attempts, use + + JobConf.setMaxMapAttempts(int) and + + JobConf.setMaxReduceAttempts(int). +

+ +

Skipped records are written to HDFS in the sequence file + format, for later analysis. The location can be changed through + + SkipBadRecords.setSkipOutputPath(JobConf, Path). +

+ +
+ +
+
+ +
+ Example: WordCount v2.0 + +

Here is a more complete WordCount which uses many of the + features provided by the Map/Reduce framework we discussed so far.

+ +

This needs the HDFS to be up and running, especially for the + DistributedCache-related features. Hence it only works with a + pseudo-distributed or + fully-distributed + Hadoop installation.

+ +
+ Source Code + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
WordCount.java
1. + package org.myorg; +
2.
3. + import java.io.*; +
4. + import java.util.*; +
5.
6. + import org.apache.hadoop.fs.Path; +
7. + import org.apache.hadoop.filecache.DistributedCache; +
8. + import org.apache.hadoop.conf.*; +
9. + import org.apache.hadoop.io.*; +
10. + import org.apache.hadoop.mapred.*; +
11. + import org.apache.hadoop.util.*; +
12.
13. + public class WordCount extends Configured implements Tool { +
14.
15. +    + + public static class Map extends MapReduceBase + implements Mapper<LongWritable, Text, Text, IntWritable> { + +
16.
17. +      + + static enum Counters { INPUT_WORDS } + +
18.
19. +      + + private final static IntWritable one = new IntWritable(1); + +
20. +      + private Text word = new Text(); +
21.
22. +      + private boolean caseSensitive = true; +
23. +      + private Set<String> patternsToSkip = new HashSet<String>(); +
24.
25. +      + private long numRecords = 0; +
26. +      + private String inputFile; +
27.
28. +      + public void configure(JobConf job) { +
29. +        + + caseSensitive = job.getBoolean("wordcount.case.sensitive", true); + +
30. +        + inputFile = job.get("map.input.file"); +
31.
32. +        + if (job.getBoolean("wordcount.skip.patterns", false)) { +
33. +          + Path[] patternsFiles = new Path[0]; +
34. +          + try { +
35. +            + + patternsFiles = DistributedCache.getLocalCacheFiles(job); + +
36. +          + } catch (IOException ioe) { +
37. +            + + System.err.println("Caught exception while getting cached files: " + + StringUtils.stringifyException(ioe)); + +
38. +          + } +
39. +          + for (Path patternsFile : patternsFiles) { +
40. +            + parseSkipFile(patternsFile); +
41. +          + } +
42. +        + } +
43. +      + } +
44.
45. +      + private void parseSkipFile(Path patternsFile) { +
46. +        + try { +
47. +          + + BufferedReader fis = + new BufferedReader(new FileReader(patternsFile.toString())); + +
48. +          + String pattern = null; +
49. +          + while ((pattern = fis.readLine()) != null) { +
50. +            + patternsToSkip.add(pattern); +
51. +          + } +
52. +        + } catch (IOException ioe) { +
53. +          + + System.err.println("Caught exception while parsing the cached file '" + + patternsFile + "' : " + + StringUtils.stringifyException(ioe)); + + +
54. +        + } +
55. +      + } +
56.
57. +      + + public void map(LongWritable key, Text value, + OutputCollector<Text, IntWritable> output, + Reporter reporter) throws IOException { + +
58. +        + + String line = + (caseSensitive) ? value.toString() : + value.toString().toLowerCase(); + +
59.
60. +        + for (String pattern : patternsToSkip) { +
61. +          + line = line.replaceAll(pattern, ""); +
62. +        + } +
63.
64. +        + StringTokenizer tokenizer = new StringTokenizer(line); +
65. +        + while (tokenizer.hasMoreTokens()) { +
66. +          + word.set(tokenizer.nextToken()); +
67. +          + output.collect(word, one); +
68. +          + reporter.incrCounter(Counters.INPUT_WORDS, 1); +
69. +        + } +
70.
71. +        + if ((++numRecords % 100) == 0) { +
72. +          + + reporter.setStatus("Finished processing " + numRecords + + " records " + "from the input file: " + + inputFile); + +
73. +        + } +
74. +      + } +
75. +    + } +
76.
77. +    + + public static class Reduce extends MapReduceBase implements + Reducer<Text, IntWritable, Text, IntWritable> { + +
78. +      + + public void reduce(Text key, Iterator<IntWritable> values, + OutputCollector<Text, IntWritable> output, + Reporter reporter) throws IOException { + +
79. +        + int sum = 0; +
80. +        + while (values.hasNext()) { +
81. +          + sum += values.next().get(); +
82. +        + } +
83. +        + output.collect(key, new IntWritable(sum)); +
84. +      + } +
85. +    + } +
86.
87. +    + public int run(String[] args) throws Exception { +
88. +      + + JobConf conf = new JobConf(getConf(), WordCount.class); + +
89. +      + conf.setJobName("wordcount"); +
90.
91. +      + conf.setOutputKeyClass(Text.class); +
92. +      + conf.setOutputValueClass(IntWritable.class); +
93.
94. +      + conf.setMapperClass(Map.class); +
95. +      + conf.setCombinerClass(Reduce.class); +
96. +      + conf.setReducerClass(Reduce.class); +
97.
98. +      + conf.setInputFormat(TextInputFormat.class); +
99. +      + conf.setOutputFormat(TextOutputFormat.class); +
100.
101. +      + + List<String> other_args = new ArrayList<String>(); + +
102. +      + for (int i=0; i < args.length; ++i) { +
103. +        + if ("-skip".equals(args[i])) { +
104. +          + + DistributedCache.addCacheFile(new Path(args[++i]).toUri(), conf); + +
105. +          + + conf.setBoolean("wordcount.skip.patterns", true); + +
106. +        + } else { +
107. +          + other_args.add(args[i]); +
108. +        + } +
109. +      + } +
110.
111. +      + FileInputFormat.setInputPaths(conf, new Path(other_args.get(0))); +
112. +      + FileOutputFormat.setOutputPath(conf, new Path(other_args.get(1))); +
113.
114. +      + JobClient.runJob(conf); +
115. +      + return 0; +
116. +    + } +
117.
118. +    + + public static void main(String[] args) throws Exception { + +
119. +      + + int res = ToolRunner.run(new Configuration(), new WordCount(), + args); + +
120. +      + System.exit(res); +
121. +    + } +
122. + } +
123.
+
+ +
+ Sample Runs + +

Sample text-files as input:

+

+ $ bin/hadoop dfs -ls /usr/joe/wordcount/input/
+ /usr/joe/wordcount/input/file01
+ /usr/joe/wordcount/input/file02
+
+ $ bin/hadoop dfs -cat /usr/joe/wordcount/input/file01
+ Hello World, Bye World!
+
+ $ bin/hadoop dfs -cat /usr/joe/wordcount/input/file02
+ Hello Hadoop, Goodbye to hadoop. +

+ +

Run the application:

+

+ + $ bin/hadoop jar /usr/joe/wordcount.jar org.myorg.WordCount + /usr/joe/wordcount/input /usr/joe/wordcount/output + +

+ +

Output:

+

+ + $ bin/hadoop dfs -cat /usr/joe/wordcount/output/part-00000 + +
+ Bye 1
+ Goodbye 1
+ Hadoop, 1
+ Hello 2
+ World! 1
+ World, 1
+ hadoop. 1
+ to 1
+

+ +

Notice that the inputs differ from the first version we looked at, + and how they affect the outputs.

+ +

Now, lets plug-in a pattern-file which lists the word-patterns to be + ignored, via the DistributedCache.

+ +

+ $ hadoop dfs -cat /user/joe/wordcount/patterns.txt
+ \.
+ \,
+ \!
+ to
+

+ +

Run it again, this time with more options:

+

+ + $ bin/hadoop jar /usr/joe/wordcount.jar org.myorg.WordCount + -Dwordcount.case.sensitive=true /usr/joe/wordcount/input + /usr/joe/wordcount/output -skip /user/joe/wordcount/patterns.txt + +

+ +

As expected, the output:

+

+ + $ bin/hadoop dfs -cat /usr/joe/wordcount/output/part-00000 + +
+ Bye 1
+ Goodbye 1
+ Hadoop 1
+ Hello 2
+ World 2
+ hadoop 1
+

+ +

Run it once more, this time switch-off case-sensitivity:

+

+ + $ bin/hadoop jar /usr/joe/wordcount.jar org.myorg.WordCount + -Dwordcount.case.sensitive=false /usr/joe/wordcount/input + /usr/joe/wordcount/output -skip /user/joe/wordcount/patterns.txt + +

+ +

Sure enough, the output:

+

+ + $ bin/hadoop dfs -cat /usr/joe/wordcount/output/part-00000 + +
+ bye 1
+ goodbye 1
+ hadoop 2
+ hello 2
+ world 2
+

+
+ +
+ Highlights + +

The second version of WordCount improves upon the + previous one by using some features offered by the Map/Reduce framework: +

+
    +
  • + Demonstrates how applications can access configuration parameters + in the configure method of the Mapper (and + Reducer) implementations (lines 28-43). +
  • +
  • + Demonstrates how the DistributedCache can be used to + distribute read-only data needed by the jobs. Here it allows the user + to specify word-patterns to skip while counting (line 104). +
  • +
  • + Demonstrates the utility of the Tool interface and the + GenericOptionsParser to handle generic Hadoop + command-line options (lines 87-116, 119). +
  • +
  • + Demonstrates how applications can use Counters (line 68) + and how they can set application-specific status information via + the Reporter instance passed to the map (and + reduce) method (line 72). +
  • +
+ +
+
+ +

+ Java and JNI are trademarks or registered trademarks of + Sun Microsystems, Inc. in the United States and other countries. +

+ + + +
diff --git a/src/docs/src/documentation/content/xdocs/native_libraries.xml b/src/docs/src/documentation/content/xdocs/native_libraries.xml new file mode 100644 index 0000000..19aacad --- /dev/null +++ b/src/docs/src/documentation/content/xdocs/native_libraries.xml @@ -0,0 +1,211 @@ + + + + + + + +
+ Native Libraries Guide +
+ + + +
+ Purpose + +

Hadoop has native implementations of certain components for reasons of + both performance and non-availability of Java implementations. These + components are available in a single, dynamically-linked, native library. + On the *nix platform it is libhadoop.so. This document describes + the usage and details on how to build the native libraries.

+
+ +
+ Components + +

Hadoop currently has the following + + compression codecs as the native components:

+ + +

Of the above, the availability of native hadoop libraries is imperative + for the gzip and bzip2 compression codecs to work.

+
+ +
+ Usage + +

It is fairly simple to use the native hadoop libraries:

+ +
    +
  • + Take a look at the + supported platforms. +
  • +
  • + Either download the pre-built + 32-bit i386-Linux native hadoop libraries (available as part of hadoop + distribution in lib/native directory) or + build them yourself. +
  • +
  • + Make sure you have any of or all of >zlib-1.2, + >gzip-1.2, and >bzip2-1.0 + packages for your platform installed; + depending on your needs. +
  • +
+ +

The bin/hadoop script ensures that the native hadoop + library is on the library path via the system property + -Djava.library.path=<path>.

+ +

To check everything went alright check the hadoop log files for:

+ +

+ + DEBUG util.NativeCodeLoader - Trying to load the custom-built + native-hadoop library... +
+ + INFO util.NativeCodeLoader - Loaded the native-hadoop library + +

+ +

If something goes wrong, then:

+

+ + INFO util.NativeCodeLoader - Unable to load native-hadoop library for + your platform... using builtin-java classes where applicable + +

+
+ +
+ Supported Platforms + +

Hadoop native library is supported only on *nix platforms only. + Unfortunately it is known not to work on Cygwin + and Mac OS X and has mainly been used on the + GNU/Linux platform.

+ +

It has been tested on the following GNU/Linux distributions:

+ + +

On all the above platforms a 32/64 bit Hadoop native library will work + with a respective 32/64 bit jvm.

+
+ +
+ Building Native Hadoop Libraries + +

Hadoop native library is written in + ANSI C and built using + the GNU autotools-chain (autoconf, autoheader, automake, autoscan, libtool). + This means it should be straight-forward to build them on any platform with + a standards compliant C compiler and the GNU autotools-chain. + See supported platforms.

+ +

In particular the various packages you would need on the target + platform are:

+ + +

Once you have the pre-requisites use the standard build.xml + and pass along the compile.native flag (set to + true) to build the native hadoop library:

+ +

$ ant -Dcompile.native=true <target>

+ +

The native hadoop library is not built by default since not everyone is + interested in building them.

+ +

You should see the newly-built native hadoop library in:

+ +

$ build/native/<platform>/lib

+ +

where <platform> is combination of the system-properties: + ${os.name}-${os.arch}-${sun.arch.data.model}; for e.g. + Linux-i386-32.

+ +
+ Notes + +
    +
  • + It is mandatory to have the + zlib, gzip, and bzip2 + development packages on the target platform for building the + native hadoop library; however for deployment it is sufficient to + install one of them if you wish to use only one of them. +
  • +
  • + It is necessary to have the correct 32/64 libraries of both zlib + depending on the 32/64 bit jvm for the target platform for + building/deployment of the native hadoop library. +
  • +
+
+
+
+ Loading native libraries through DistributedCache +

User can load native shared libraries through + DistributedCache + for distributing and symlinking the library files

+ +

Here is an example, describing how to distribute the library and + load it from map/reduce task.

+
    +
  1. First copy the library to the HDFS.
    + bin/hadoop fs -copyFromLocal mylib.so.1 /libraries/mylib.so.1 +
  2. +
  3. The job launching program should contain the following:
    + DistributedCache.createSymlink(conf);
    + DistributedCache.addCacheFile("hdfs://host:port/libraries/mylib.so.1#mylib.so", conf); + +
  4. +
  5. The map/reduce task can contain:
    + System.loadLibrary("mylib.so"); +
  6. +
+
+ + +
diff --git a/src/docs/src/documentation/content/xdocs/quickstart.xml b/src/docs/src/documentation/content/xdocs/quickstart.xml new file mode 100644 index 0000000..3ed77f5 --- /dev/null +++ b/src/docs/src/documentation/content/xdocs/quickstart.xml @@ -0,0 +1,295 @@ + + + + + + + +
+ Quick Start +
+ + + +
+ Purpose + +

The purpose of this document is to help you get a single-node Hadoop + installation up and running very quickly so that you can get a flavour + of the Hadoop Distributed File System + (see HDFS Architecture) and + the Map/Reduce framework; that is, perform simple operations on HDFS and + run example jobs.

+
+ +
+ Pre-requisites + +
+ Supported Platforms + +
    +
  • + GNU/Linux is supported as a development and production platform. + Hadoop has been demonstrated on GNU/Linux clusters with 2000 nodes. +
  • +
  • + Win32 is supported as a development platform. Distributed + operation has not been well tested on Win32, so it is not + supported as a production platform. +
  • +
+
+ +
+ Required Software +

Required software for Linux and Windows include:

+
    +
  1. + JavaTM 1.6.x, preferably from Sun, must be installed. +
  2. +
  3. + ssh must be installed and sshd must + be running to use the Hadoop scripts that manage remote Hadoop + daemons. +
  4. +
+

Additional requirements for Windows include:

+
    +
  1. + Cygwin - Required for shell + support in addition to the required software above. +
  2. +
+
+ +
+ Installing Software + +

If your cluster doesn't have the requisite software you will need to + install it.

+ +

For example on Ubuntu Linux:

+

+ $ sudo apt-get install ssh
+ $ sudo apt-get install rsync +

+ +

On Windows, if you did not install the required software when you + installed cygwin, start the cygwin installer and select the packages:

+
    +
  • openssh - the Net category
  • +
+
+ +
+ +
+ Download + +

+ To get a Hadoop distribution, download a recent + stable release from one of the Apache Download + Mirrors. +

+
+ +
+ Prepare to Start the Hadoop Cluster +

+ Unpack the downloaded Hadoop distribution. In the distribution, edit the + file conf/hadoop-env.sh to define at least + JAVA_HOME to be the root of your Java installation. +

+ +

+ Try the following command:
+ $ bin/hadoop
+ This will display the usage documentation for the hadoop + script. +

+ +

Now you are ready to start your Hadoop cluster in one of the three supported + modes: +

+
    +
  • Local (Standalone) Mode
  • +
  • Pseudo-Distributed Mode
  • +
  • Fully-Distributed Mode
  • +
+
+ +
+ Standalone Operation + +

By default, Hadoop is configured to run in a non-distributed + mode, as a single Java process. This is useful for debugging.

+ +

+ The following example copies the unpacked conf directory to + use as input and then finds and displays every match of the given regular + expression. Output is written to the given output directory. +
+ $ mkdir input
+ $ cp conf/*.xml input
+ + $ bin/hadoop jar hadoop-*-examples.jar grep input output 'dfs[a-z.]+' +
+ $ cat output/* +

+
+ +
+ Pseudo-Distributed Operation + +

Hadoop can also be run on a single-node in a pseudo-distributed mode + where each Hadoop daemon runs in a separate Java process.

+ +
+ Configuration +

Use the following: +
+ conf/core-site.xml:

+ + + + + + + + + +
<configuration>
  <property>
    <name>fs.default.name</name>
    <value>hdfs://localhost:9000</value>
  </property>
</configuration>
+ +


conf/hdfs-site.xml:

+ + + + + + + + + +
<configuration>
  <property>
    <name>dfs.replication</name>
    <value>1</value>
  </property>
</configuration>
+ +


conf/mapred-site.xml:

+ + + + + + + + + +
<configuration>
  <property>
    <name>mapred.job.tracker</name>
    <value>localhost:9001</value>
  </property>
</configuration>
+
+ +
+ Setup passphraseless <em>ssh</em> + +

+ Now check that you can ssh to the localhost without a passphrase:
+ $ ssh localhost +

+ +

+ If you cannot ssh to localhost without a passphrase, execute the + following commands:
+ $ ssh-keygen -t dsa -P '' -f ~/.ssh/id_dsa
+ $ cat ~/.ssh/id_dsa.pub >> ~/.ssh/authorized_keys +

+
+ +
+ Execution + +

+ Format a new distributed-filesystem:
+ $ bin/hadoop namenode -format +

+ +

+ Start the hadoop daemons:
+ $ bin/start-all.sh +

+ +

The hadoop daemon log output is written to the + ${HADOOP_LOG_DIR} directory (defaults to + ${HADOOP_HOME}/logs).

+ +

Browse the web interface for the NameNode and the JobTracker; by + default they are available at:

+ + +

+ Copy the input files into the distributed filesystem:
+ $ bin/hadoop fs -put conf input +

+ +

+ Run some of the examples provided:
+ + $ bin/hadoop jar hadoop-*-examples.jar grep input output 'dfs[a-z.]+' + +

+ +

Examine the output files:

+

+ Copy the output files from the distributed filesystem to the local + filesytem and examine them:
+ $ bin/hadoop fs -get output output
+ $ cat output/* +

+

or

+

+ View the output files on the distributed filesystem:
+ $ bin/hadoop fs -cat output/* +

+ +

+ When you're done, stop the daemons with:
+ $ bin/stop-all.sh +

+
+
+ +
+ Fully-Distributed Operation + +

For information on setting up fully-distributed, non-trivial clusters + see Hadoop Cluster Setup.

+
+ +

+ Java and JNI are trademarks or registered trademarks of + Sun Microsystems, Inc. in the United States and other countries. +

+ + + +
diff --git a/src/docs/src/documentation/content/xdocs/service_level_auth.xml b/src/docs/src/documentation/content/xdocs/service_level_auth.xml new file mode 100644 index 0000000..86ad486 --- /dev/null +++ b/src/docs/src/documentation/content/xdocs/service_level_auth.xml @@ -0,0 +1,233 @@ + + + + + + + +
+ Service Level Authorization Guide +
+ + + +
+ Purpose + +

This document describes how to configure and manage Service Level + Authorization for Hadoop.

+
+ +
+ Pre-requisites + +

Ensure that Hadoop is installed, configured and setup correctly. More + details:

+ +
+ +
+ Overview + +

Service Level Authorization is the initial authorization mechanism to + ensure clients connecting to a particular Hadoop service have the + necessary, pre-configured, permissions and are authorized to access the given + service. For e.g. a Map/Reduce cluster can use this mechanism to allow a + configured list of users/groups to submit jobs.

+ +

The ${HADOOP_CONF_DIR}/hadoop-policy.xml configuration file + is used to define the access control lists for various Hadoop services.

+ +

Service Level Authorization is performed much before to other access + control checks such as file-permission checks, access control on job queues + etc.

+
+ +
+ Configuration + +

This section describes how to configure service-level authorization + via the configuration file {HADOOP_CONF_DIR}/hadoop-policy.xml. +

+ +
+ Enable Service Level Authorization + +

By default, service-level authorization is disabled for Hadoop. To + enable it set the configuration property + hadoop.security.authorization to true + in ${HADOOP_CONF_DIR}/core-site.xml.

+
+ +
+ Hadoop Services and Configuration Properties + +

This section lists the various Hadoop services and their configuration + knobs:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
PropertyService
security.client.protocol.aclACL for ClientProtocol, which is used by user code via the + DistributedFileSystem.
security.client.datanode.protocol.aclACL for ClientDatanodeProtocol, the client-to-datanode protocol + for block recovery.
security.datanode.protocol.aclACL for DatanodeProtocol, which is used by datanodes to + communicate with the namenode.
security.inter.datanode.protocol.aclACL for InterDatanodeProtocol, the inter-datanode protocol + for updating generation timestamp.
security.namenode.protocol.aclACL for NamenodeProtocol, the protocol used by the secondary + namenode to communicate with the namenode.
security.inter.tracker.protocol.aclACL for InterTrackerProtocol, used by the tasktrackers to + communicate with the jobtracker.
security.job.submission.protocol.aclACL for JobSubmissionProtocol, used by job clients to + communciate with the jobtracker for job submission, querying job status + etc.
security.task.umbilical.protocol.aclACL for TaskUmbilicalProtocol, used by the map and reduce + tasks to communicate with the parent tasktracker.
security.refresh.policy.protocol.aclACL for RefreshAuthorizationPolicyProtocol, used by the + dfsadmin and mradmin commands to refresh the security policy in-effect. +
+
+ +
+ Access Control Lists + +

${HADOOP_CONF_DIR}/hadoop-policy.xml defines an access + control list for each Hadoop service. Every access control list has a + simple format:

+ +

The list of users and groups are both comma separated list of names. + The two lists are separated by a space.

+ +

Example: user1,user2 group1,group2.

+ +

Add a blank at the beginning of the line if only a list of groups + is to be provided, equivalently a comman-separated list of users followed + by a space or nothing implies only a set of given users.

+ +

A special value of * implies that all users are + allowed to access the service.

+
+ +
+ Refreshing Service Level Authorization Configuration + +

The service-level authorization configuration for the NameNode and + JobTracker can be changed without restarting either of the Hadoop master + daemons. The cluster administrator can change + ${HADOOP_CONF_DIR}/hadoop-policy.xml on the master nodes and + instruct the NameNode and JobTracker to reload their respective + configurations via the -refreshServiceAcl switch to + dfsadmin and mradmin commands respectively.

+ +

Refresh the service-level authorization configuration for the + NameNode:

+

+ $ bin/hadoop dfsadmin -refreshServiceAcl +

+ +

Refresh the service-level authorization configuration for the + JobTracker:

+

+ $ bin/hadoop mradmin -refreshServiceAcl +

+ +

Of course, one can use the + security.refresh.policy.protocol.acl property in + ${HADOOP_CONF_DIR}/hadoop-policy.xml to restrict access to + the ability to refresh the service-level authorization configuration to + certain users/groups.

+ +
+ +
+ Examples + +

Allow only users alice, bob and users in the + mapreduce group to submit jobs to the Map/Reduce cluster:

+ + + + + + +
  <property>
    <name>security.job.submission.protocol.acl</name>
    <value>alice,bob mapreduce</value>
  </property>
+ +

Allow only DataNodes running as the users who belong to the + group datanodes to communicate with the NameNode:

+ + + + + + +
  <property>
    <name>security.datanode.protocol.acl</name>
    <value> datanodes</value>
  </property>
+ +

Allow any user to talk to the HDFS cluster as a DFSClient:

+ + + + + + +
  <property>
    <name>security.client.protocol.acl</name>
    <value>*</value>
  </property>
+ +
+
+ + + +
diff --git a/src/docs/src/documentation/content/xdocs/site.xml b/src/docs/src/documentation/content/xdocs/site.xml new file mode 100644 index 0000000..53eb99e --- /dev/null +++ b/src/docs/src/documentation/content/xdocs/site.xml @@ -0,0 +1,281 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/src/docs/src/documentation/content/xdocs/streaming.xml b/src/docs/src/documentation/content/xdocs/streaming.xml new file mode 100644 index 0000000..d48acc1 --- /dev/null +++ b/src/docs/src/documentation/content/xdocs/streaming.xml @@ -0,0 +1,668 @@ + + + + + + + +
+Hadoop Streaming +Content-Type +text/html; +utf-8 +
+ +
+Hadoop Streaming + +

+Hadoop streaming is a utility that comes with the Hadoop distribution. The utility allows you to create and run Map/Reduce jobs with any executable or script as the mapper and/or the reducer. For example: +

+ +$HADOOP_HOME/bin/hadoop jar $HADOOP_HOME/hadoop-streaming.jar \ + -input myInputDirs \ + -output myOutputDir \ + -mapper /bin/cat \ + -reducer /bin/wc + +
+ +
+How Does Streaming Work +

+In the above example, both the mapper and the reducer are executables that read the input from stdin (line by line) and emit the output to stdout. The utility will create a Map/Reduce job, submit the job to an appropriate cluster, and monitor the progress of the job until it completes. +

+ When an executable is specified for mappers, each mapper task will launch the executable as a separate process when the mapper is initialized. As the mapper task runs, it converts its inputs into lines and feed the lines to the stdin of the process. In the meantime, the mapper collects the line oriented outputs from the stdout of the process and converts each line into a key/value pair, which is collected as the output of the mapper. By default, the + prefix of a line up to the first tab character is the key and the rest of the line (excluding the tab character) will be the value. + If there is no tab character in the line, then entire line is considered as key and the value is null. However, this can be customized, as discussed later. +

+

+When an executable is specified for reducers, each reducer task will launch the executable as a separate process then the reducer is initialized. As the reducer task runs, it converts its input key/values pairs into lines and feeds the lines to the stdin of the process. In the meantime, the reducer collects the line oriented outputs from the stdout of the process, converts each line into a key/value pair, which is collected as the output of the reducer. By default, the prefix of a line up to the first tab character is the key and the rest of the line (excluding the tab character) is the value. However, this can be customized, as discussed later. +

+This is the basis for the communication protocol between the Map/Reduce framework and the streaming mapper/reducer. +

+You can supply a Java class as the mapper and/or the reducer. The above example is equivalent to: +

+ +$HADOOP_HOME/bin/hadoop jar $HADOOP_HOME/hadoop-streaming.jar \ + -input myInputDirs \ + -output myOutputDir \ + -mapper org.apache.hadoop.mapred.lib.IdentityMapper \ + -reducer /bin/wc + +

User can specify stream.non.zero.exit.is.failure as +true or false to make a streaming task that exits +with a non-zero status to be Failure +or Success respectively. By default, streaming tasks exiting +with non-zero status are considered to be failed tasks.

+ +
+ +
+Package Files With Job Submissions +

+You can specify any executable as the mapper and/or the reducer. The executables do not need to pre-exist on the machines in the cluster; however, if they don't, you will need to use "-file" option to tell the framework to pack your executable files as a part of job submission. For example: +

+ +$HADOOP_HOME/bin/hadoop jar $HADOOP_HOME/hadoop-streaming.jar \ + -input myInputDirs \ + -output myOutputDir \ + -mapper myPythonScript.py \ + -reducer /bin/wc \ + -file myPythonScript.py + +

+The above example specifies a user defined Python executable as the mapper. The option "-file myPythonScript.py" causes the python executable shipped to the cluster machines as a part of job submission. +

+

+In addition to executable files, you can also package other auxiliary files (such as dictionaries, configuration files, etc) that may be used by the mapper and/or the reducer. For example: +

+ +$HADOOP_HOME/bin/hadoop jar $HADOOP_HOME/hadoop-streaming.jar \ + -input myInputDirs \ + -output myOutputDir \ + -mapper myPythonScript.py \ + -reducer /bin/wc \ + -file myPythonScript.py \ + -file myDictionary.txt + +
+ +
+Streaming Options and Usage + +
+Mapper-Only Jobs +

+Often, you may want to process input data using a map function only. To do this, simply set mapred.reduce.tasks to zero. The Map/Reduce framework will not create any reducer tasks. Rather, the outputs of the mapper tasks will be the final output of the job. +

+To be backward compatible, Hadoop Streaming also supports the "-reduce NONE" option, which is equivalent to "-D mapred.reduce.tasks=0". +

+
+ +
+Specifying Other Plugins for Jobs +

+Just as with a normal Map/Reduce job, you can specify other plugins for a streaming job: +

+ + -inputformat JavaClassName + -outputformat JavaClassName + -partitioner JavaClassName + -combiner streamingCommand or JavaClassName + +

+The class you supply for the input format should return key/value pairs of Text class. If you do not specify an input format class, the TextInputFormat is used as the default. Since the TextInputFormat returns keys of LongWritable class, which are actually not part of the input data, the keys will be discarded; only the values will be piped to the streaming mapper. +

+The class you supply for the output format is expected to take key/value pairs of Text class. If you do not specify an output format class, the TextOutputFormat is used as the default. +

+
+ +
+Large files and archives in Hadoop Streaming + +

+The -files and -archives options allow you to make files and archives available to the tasks. The argument is a URI to the file or archive that you have already uploaded to HDFS. These files and archives are cached across jobs. You can retrieve the host and fs_port values from the fs.default.name config variable. +

+

+Here are examples of the -files option: +

+ +-files hdfs://host:fs_port/user/testfile.txt#testlink + +

+In the above example, the part of the url after # is used as the symlink name that is created in the current working directory of tasks. So the tasks will have a symlink called testlink in the cwd that points to a local copy of testfile.txt. Multiple entries can be specified as: +

+ +-files hdfs://host:fs_port/user/testfile1.txt#testlink1 -files hdfs://host:fs_port/user/testfile2.txt#testlink2 + +

+The -archives option allows you to copy jars locally to the cwd of tasks and automatically unjar the files. For example: +

+ +-archives hdfs://host:fs_port/user/testfile.jar#testlink3 + +

+In the example above, a symlink testlink3 is created in the current working directory of tasks. This symlink points to the directory that stores the unjarred contents of the uploaded jar file. +

+

+Here's another example of the -archives option. Here, the input.txt file has two lines specifying the names of the two files: testlink/cache.txt and testlink/cache2.txt. "testlink" is a symlink to the archived directory, which has the files "cache.txt" and "cache2.txt". +

+ +$HADOOP_HOME/bin/hadoop jar $HADOOP_HOME/hadoop-streaming.jar \ + -input "/user/me/samples/cachefile/input.txt" \ + -mapper "xargs cat" \ + -reducer "cat" \ + -output "/user/me/samples/cachefile/out" \ + -archives 'hdfs://hadoop-nn1.example.com/user/me/samples/cachefile/cachedir.jar#testlink' \ + -D mapred.map.tasks=1 \ + -D mapred.reduce.tasks=1 \ + -D mapred.job.name="Experiment" + +$ ls test_jar/ +cache.txt cache2.txt + +$ jar cvf cachedir.jar -C test_jar/ . +added manifest +adding: cache.txt(in = 30) (out= 29)(deflated 3%) +adding: cache2.txt(in = 37) (out= 35)(deflated 5%) + +$ hadoop dfs -put cachedir.jar samples/cachefile + +$ hadoop dfs -cat /user/me/samples/cachefile/input.txt +testlink/cache.txt +testlink/cache2.txt + +$ cat test_jar/cache.txt +This is just the cache string + +$ cat test_jar/cache2.txt +This is just the second cache string + +$ hadoop dfs -ls /user/me/samples/cachefile/out +Found 1 items +/user/me/samples/cachefile/out/part-00000 <r 3> 69 + +$ hadoop dfs -cat /user/me/samples/cachefile/out/part-00000 +This is just the cache string +This is just the second cache string + + +
+ +
+Specifying Additional Configuration Variables for Jobs +

+You can specify additional configuration variables by using "-D <n>=<v>". For example: +

+ +$HADOOP_HOME/bin/hadoop jar $HADOOP_HOME/hadoop-streaming.jar \ + -input myInputDirs \ + -output myOutputDir \ + -mapper org.apache.hadoop.mapred.lib.IdentityMapper\ + -reducer /bin/wc \ + -D mapred.reduce.tasks=2 + +

+The -D mapred.reduce.tasks=2 in the above example specifies to use two reducers for the job. +

+

+For more details on the jobconf parameters see: +mapred-default.html

+
+ +
+Other Supported Options +

+Other options you may specify for a streaming job are described here: +

+ + + + + + + +
ParameterOptional/Required Description
-cmdenv name=value Optional Pass env var to streaming commands
-inputreader JavaClassName Optional For backwards-compatibility: specifies a record reader class (instead of an input format class)
-verbose Optional Verbose output
+

+Streaming support Hadoop generic command line options. + +Supported parameters are : +The general command line syntax is : +
bin/hadoop command [genericOptions] [commandOptions] +

+ + + + + + + + + + + + +
ParameterOptional/Required Description
-conf configuration_file Optional specify an application configuration file
-D property=value Optional use value for given property
-fs host:port or local Optional specify a namenode
-jt host:port or local Optional specify a job tracker
-files Optional specify comma separated files to be copied to the map reduce cluster
-archives Optional specify comma separated archives to be unarchived on the compute machines
Optional
-jt host:port or local Optional
+ +

+To change the local temp directory use: +

+ + -D dfs.data.dir=/tmp + +

+To specify additional local temp directories use: +

+ + -D mapred.local.dir=/tmp/local + -D mapred.system.dir=/tmp/system + -D mapred.temp.dir=/tmp/temp + +

+For more details on jobconf parameters see: +mapred-default.html

+

+To set an environment variable in a streaming command use: +

+ +-cmdenv EXAMPLE_DIR=/home/example/dictionaries/ + +
+
+ +
+More usage examples + +
+Customizing the Way to Split Lines into Key/Value Pairs +

+As noted earlier, when the Map/Reduce framework reads a line from the stdout of the mapper, it splits the line into a key/value pair. By default, the prefix of the line up to the first tab character is the key and the rest of the line (excluding the tab character) is the value. +

+

+However, you can customize this default. You can specify a field separator other than the tab character (the default), and you can specify the nth (n >= 1) character rather than the first character in a line (the default) as the separator between the key and value. For example: +

+ + +$HADOOP_HOME/bin/hadoop jar $HADOOP_HOME/hadoop-streaming.jar \ + -input myInputDirs \ + -output myOutputDir \ + -mapper org.apache.hadoop.mapred.lib.IdentityMapper \ + -reducer org.apache.hadoop.mapred.lib.IdentityReducer \ + -D stream.map.output.field.separator=. \ + -D stream.num.map.output.key.fields=4 + +

+In the above example, "-D stream.map.output.field.separator=." specifies "." as the field separator for the map outputs, and the prefix up to the fourth "." in a line will be the key and the rest of the line (excluding the fourth ".") will be the value. If a line has less than four "."s, then the whole line will be the key and the value will be an empty Text object (like the one created by new Text("")). +

+Similarly, you can use "-D stream.reduce.output.field.separator=SEP" and "-D stream.num.reduce.output.fields=NUM" to specify the nth field separator in a line of the reduce outputs as the separator between the key and the value. +

+

Similarly, you can specify "stream.map.input.field.separator" and +"stream.reduce.input.field.separator" as the input separator for map/reduce +inputs. By default the separator is the tab character.

+
+ + +
+A Useful Partitioner Class (secondary sort, the -partitioner org.apache.hadoop.mapred.lib.KeyFieldBasedPartitioner option) +

+Hadoop has a library class, +KeyFieldBasedPartitioner, +that is useful for many applications. This class allows the Map/Reduce +framework to partition the map outputs based on certain key fields, not +the whole keys. For example: +

+ +$HADOOP_HOME/bin/hadoop jar $HADOOP_HOME/hadoop-streaming.jar \ + -input myInputDirs \ + -output myOutputDir \ + -mapper org.apache.hadoop.mapred.lib.IdentityMapper \ + -reducer org.apache.hadoop.mapred.lib.IdentityReducer \ + -partitioner org.apache.hadoop.mapred.lib.KeyFieldBasedPartitioner \ + -D stream.map.output.field.separator=. \ + -D stream.num.map.output.key.fields=4 \ + -D map.output.key.field.separator=. \ + -D mapred.text.key.partitioner.options=-k1,2\ + -D mapred.reduce.tasks=12 + +

+Here, -D stream.map.output.field.separator=. and -D stream.num.map.output.key.fields=4 are as explained in previous example. The two variables are used by streaming to identify the key/value pair of mapper. +

+The map output keys of the above Map/Reduce job normally have four fields +separated by ".". However, the Map/Reduce framework will partition the map +outputs by the first two fields of the keys using the +-D mapred.text.key.partitioner.options=-k1,2 option. +Here, -D map.output.key.field.separator=. specifies the separator +for the partition. This guarantees that all the key/value pairs with the +same first two fields in the keys will be partitioned into the same reducer. +

+This is effectively equivalent to specifying the first two fields as the primary key and the next two fields as the secondary. The primary key is used for partitioning, and the combination of the primary and secondary keys is used for sorting. A simple illustration is shown here: +

+

+Output of map (the keys)

+11.12.1.2 +11.14.2.3 +11.11.4.1 +11.12.1.1 +11.14.2.2 + + +

+Partition into 3 reducers (the first 2 fields are used as keys for partition)

+11.11.4.1 +----------- +11.12.1.2 +11.12.1.1 +----------- +11.14.2.3 +11.14.2.2 + +

+Sorting within each partition for the reducer(all 4 fields used for sorting)

+11.11.4.1 +----------- +11.12.1.1 +11.12.1.2 +----------- +11.14.2.2 +11.14.2.3 + +
+
+A Useful Comparator Class +

+Hadoop has a library class, +KeyFieldBasedComparator, +that is useful for many applications. This class provides a subset of features +provided by the Unix/GNU Sort. For example: +

+ +$HADOOP_HOME/bin/hadoop jar $HADOOP_HOME/hadoop-streaming.jar \ + -input myInputDirs \ + -output myOutputDir \ + -mapper org.apache.hadoop.mapred.lib.IdentityMapper \ + -reducer org.apache.hadoop.mapred.lib.IdentityReducer \ + -D mapred.output.key.comparator.class=org.apache.hadoop.mapred.lib.KeyFieldBasedComparator \ + -D stream.map.output.field.separator=. \ + -D stream.num.map.output.key.fields=4 \ + -D map.output.key.field.separator=. \ + -D mapred.text.key.comparator.options=-k2,2nr\ + -D mapred.reduce.tasks=12 + +

+The map output keys of the above Map/Reduce job normally have four fields +separated by ".". However, the Map/Reduce framework will sort the +outputs by the second field of the keys using the +-D mapred.text.key.comparator.options=-k2,2nr option. +Here, -n specifies that the sorting is numerical sorting and +-r specifies that the result should be reversed. A simple illustration +is shown below: +

+

+Output of map (the keys)

+ +11.12.1.2 +11.14.2.3 +11.11.4.1 +11.12.1.1 +11.14.2.2 + +

+Sorting output for the reducer(where second field used for sorting)

+ +11.14.2.3 +11.14.2.2 +11.12.1.2 +11.12.1.1 +11.11.4.1 + +
+ +
+Working with the Hadoop Aggregate Package (the -reduce aggregate option) +

+Hadoop has a library package called +Aggregate. +Aggregate provides a special reducer class and a special combiner class, and +a list of simple aggregators that perform aggregations such as "sum", "max", +"min" and so on over a sequence of values. Aggregate allows you to define a +mapper plugin class that is expected to generate "aggregatable items" for each +input key/value pair of the mappers. The combiner/reducer will aggregate those +aggregatable items by invoking the appropriate aggregators. +

+To use Aggregate, simply specify "-reducer aggregate": +

+ +$HADOOP_HOME/bin/hadoop jar $HADOOP_HOME/hadoop-streaming.jar \ + -input myInputDirs \ + -output myOutputDir \ + -mapper myAggregatorForKeyCount.py \ + -reducer aggregate \ + -file myAggregatorForKeyCount.py \ + -D mapred.reduce.tasks=12 + +

+The python program myAggregatorForKeyCount.py looks like: +

+ +#!/usr/bin/python + +import sys; + +def generateLongCountToken(id): + return "LongValueSum:" + id + "\t" + "1" + +def main(argv): + line = sys.stdin.readline(); + try: + while line: + line = line[:-1]; + fields = line.split("\t"); + print generateLongCountToken(fields[0]); + line = sys.stdin.readline(); + except "end of file": + return None +if __name__ == "__main__": + main(sys.argv) + +
+ +
+Field Selection ( similar to unix 'cut' command) +

+Hadoop has a library class, org.apache.hadoop.mapred.lib.FieldSelectionMapReduce, that effectively allows you to process text data like the unix "cut" utility. The map function defined in the class treats each input key/value pair as a list of fields. You can specify the field separator (the default is the tab character). You can select an arbitrary list of fields as the map output key, and an arbitrary list of fields as the map output value. Similarly, the reduce function defined in the class treats each input key/value pair as a list of fields. You can select an arbitrary list of fields as the reduce output key, and an arbitrary list of fields as the reduce output value. For example: +

+ +$HADOOP_HOME/bin/hadoop jar $HADOOP_HOME/hadoop-streaming.jar \ + -input myInputDirs \ + -output myOutputDir \ + -mapper org.apache.hadoop.mapred.lib.FieldSelectionMapReduce\ + -reducer org.apache.hadoop.mapred.lib.FieldSelectionMapReduce\ + -partitioner org.apache.hadoop.mapred.lib.KeyFieldBasedPartitioner \ + -D map.output.key.field.separa=. \ + -D mapred.text.key.partitioner.options=-k1,2 \ + -D mapred.data.field.separator=. \ + -D map.output.key.value.fields.spec=6,5,1-3:0- \ + -D reduce.output.key.value.fields.spec=0-2:5- \ + -D mapred.reduce.tasks=12 + +

+The option "-D map.output.key.value.fields.spec=6,5,1-3:0-" specifies key/value selection for the map outputs. Key selection spec and value selection spec are separated by ":". In this case, the map output key will consist of fields 6, 5, 1, 2, and 3. The map output value will consist of all fields (0- means field 0 and all +the subsequent fields). +

+The option "-D reduce.output.key.value.fields.spec=0-2:5-" specifies +key/value selection for the reduce outputs. In this case, the reduce +output key will consist of fields 0, 1, 2 (corresponding to the original +fields 6, 5, 1). The reduce output value will consist of all fields starting +from field 5 (corresponding to all the original fields). +

+
+
+ +
+Frequently Asked Questions + +
+How do I use Hadoop Streaming to run an arbitrary set of (semi-)independent tasks? +

+Often you do not need the full power of Map Reduce, but only need to run multiple instances of the same program - either on different parts of the data, or on the same data, but with different parameters. You can use Hadoop Streaming to do this. +

+ +
+ +
+How do I process files, one per map? +

+As an example, consider the problem of zipping (compressing) a set of files across the hadoop cluster. You can achieve this using either of these methods: +

    +
  1. Hadoop Streaming and custom mapper script:
      +
    • Generate a file containing the full HDFS path of the input files. Each map task would get one file name as input.
    • +
    • Create a mapper script which, given a filename, will get the file to local disk, gzip the file and put it back in the desired output directory
    • +
  2. +
  3. The existing Hadoop Framework:
      +
    • Add these commands to your main function: + + FileOutputFormat.setCompressOutput(conf, true); + FileOutputFormat.setOutputCompressorClass(conf, org.apache.hadoop.io.compress.GzipCodec.class); + conf.setOutputFormat(NonSplitableTextInputFormat.class); + conf.setNumReduceTasks(0); +
    • +
    • Write your map function: + + + public void map(WritableComparable key, Writable value, + OutputCollector output, + Reporter reporter) throws IOException { + output.collect((Text)value, null); + } +
    • +
    • Note that the output filename will not be the same as the original filename
    • +
  4. +
+
+ +
+How many reducers should I use? +

+See the Hadoop Wiki for details: Reducer +

+
+ +
+If I set up an alias in my shell script, will that work after -mapper, i.e. say I do: alias c1='cut -f1'. Will -mapper "c1" work? +

+Using an alias will not work, but variable substitution is allowed as shown in this example: +

+ +$ hadoop dfs -cat samples/student_marks +alice 50 +bruce 70 +charlie 80 +dan 75 + +$ c2='cut -f2'; $HADOOP_HOME/bin/hadoop jar $HADOOP_HOME/hadoop-streaming.jar \ + -input /user/me/samples/student_marks + -mapper \"$c2\" -reducer 'cat' + -output /user/me/samples/student_out + -D mapred.job.name='Experiment' + +$ hadoop dfs -ls samples/student_out +Found 1 items/user/me/samples/student_out/part-00000 <r 3> 16 + +$ hadoop dfs -cat samples/student_out/part-00000 +50 +70 +75 +80 + +
+ +
+Can I use UNIX pipes? For example, will -mapper "cut -f1 | sed s/foo/bar/g" work? +

+Currently this does not work and gives an "java.io.IOException: Broken pipe" error. This is probably a bug that needs to be investigated. +

+
+ +
+When I run a streaming job by <strong>distributing large executables</strong> (for example, 3.6G) through the -file option, I get a "No space left on device" error. What do I do? +

+The jar packaging happens in a directory pointed to by the configuration variable stream.tmpdir. The default value of stream.tmpdir is /tmp. Set the value to a directory with more space: +

+ +-D stream.tmpdir=/export/bigspace/... + +
+ +
+How do I specify multiple input directories? +

+You can specify multiple input directories with multiple '-input' options: +

+ hadoop jar hadoop-streaming.jar -input '/user/foo/dir1' -input '/user/foo/dir2' + +
+ +
+How do I generate output files with gzip format? +

+Instead of plain text files, you can generate gzip files as your generated output. Pass '-D mapred.output.compress=true -D mapred.output.compression.codec=org.apache.hadoop.io.compress.GzipCode' as option to your streaming job. +

+
+ +
+How do I provide my own input/output format with streaming? +

+At least as late as version 0.14, Hadoop does not support multiple jar files. So, when specifying your own custom classes you will have to pack them along with the streaming jar and use the custom jar instead of the default hadoop streaming jar. +

+
+ +
+How do I parse XML documents using streaming? +

+You can use the record reader StreamXmlRecordReader to process XML documents. +

+ +hadoop jar hadoop-streaming.jar -inputreader "StreamXmlRecord,begin=BEGIN_STRING,end=END_STRING" ..... (rest of the command) + +

+Anything found between BEGIN_STRING and END_STRING would be treated as one record for map tasks. +

+
+ +
+How do I update counters in streaming applications? +

+A streaming process can use the stderr to emit counter information. +reporter:counter:<group>,<counter>,<amount> +should be sent to stderr to update the counter. +

+
+ +
+How do I update status in streaming applications? +

+A streaming process can use the stderr to emit status information. +To set a status, reporter:status:<message> should be sent +to stderr. +

+
+ +
+ +
diff --git a/src/docs/src/documentation/content/xdocs/tabs.xml b/src/docs/src/documentation/content/xdocs/tabs.xml new file mode 100644 index 0000000..d55e3e4 --- /dev/null +++ b/src/docs/src/documentation/content/xdocs/tabs.xml @@ -0,0 +1,36 @@ + + + + + + + + + + + + + + diff --git a/src/docs/src/documentation/content/xdocs/vaidya.xml b/src/docs/src/documentation/content/xdocs/vaidya.xml new file mode 100644 index 0000000..1de2401 --- /dev/null +++ b/src/docs/src/documentation/content/xdocs/vaidya.xml @@ -0,0 +1,171 @@ + + + + + + + +
+ Vaidya Guide +
+ + + +
+ Purpose + +

This document describes various user-facing facets of Hadoop Vaidya, a performance diagnostic tool for map/reduce jobs. It + describes how to execute a default set of rules against your map/reduce job counters and + how to write and execute new rules to detect specific performance problems. +

+

A few sample test rules are provided with the tool with the objective of growing the rules database over the time. + You are welcome to contribute new rules for everyone's benefit; to do so, follow the + How to Contribute procedure + specified on Apache Hadoop website. +

+
+ +
+ Pre-requisites + +

Ensure that Hadoop is installed and configured. More details:

+
    +
  • + Make sure HADOOP_HOME environment variable is set. +
  • +
  • + Make sure Java is installed and configured as a part of the Hadoop installation. +
  • +
+
+ +
+ Overview + +

Hadoop Vaidya (Vaidya in Sanskrit language means "one who knows", or "a physician") + is a rule based performance diagnostic tool for + Map/Reduce jobs. It performs a post execution analysis of map/reduce + job by parsing and collecting execution statistics through job history + and job configuration files. It runs a set of predefined tests/rules + against job execution statistics to diagnose various performance problems. + Each test rule detects a specific performance problem with the Map/Reduce job and provides + a targeted advice to the user. This tool generates an XML report based on + the evaluation results of individual test rules. +

+ +
+ +
+ Terminology + +

This section describes main concepts and terminology involved with Hadoop Vaidya,

+
    +
  • PostExPerformanceDiagnoser: This class extends the base Diagnoser class and acts as a driver for post execution performance analysis of Map/Reduce Jobs. + It detects performance inefficiencies by executing a set of performance diagnosis rules against the job execution statistics.
  • +
  • Job Statistics: This includes the job configuration information (job.xml) and various counters logged by Map/Reduce job as a part of the job history log + file. The counters are parsed and collected into the Job Statistics data structures, which contains global job level aggregate counters and + a set of counters for each Map and Reduce task.
  • +
  • Diagnostic Test/Rule: This is a program logic that detects the inefficiency of M/R job based on the job statistics. The + description of the Test is specified as an XML element (DiagnosticTest) in a test description file e.g. + default tests description file, $HADOOP_HOME/contrib/vaidya/conf/postex_diagnosis_tests.xml. The actual logic is coded as + a java class and referenced in the DiagnosticTest XML element.
  • +
+

+

Following section describes the DiagnosticTest XML element in a diagnostic test description file

+
    +
  • DiagnosticTest{Title}: Specifies a short name/description of the test.
  • +
  • DiagnosticTest{ClassName}: Specifies fully qualified class name that implements the test logic.
  • +
  • DiagnosticTest{Description}: Specifies a full description of the test rule.
  • +
  • DiagnosticTest{Importance}: Specifies a declarative value for overall importance of the test rule. (Values: High, Medium, Low)
  • +
  • DiagnosticTest{SuccessThreshod}: This is a threshold value specified by test case writer such that if impact level of the test case + is lesser, then test is declared as PASSED (or NEGATIVE). The impact level is calculated and returned + by individual test's evaluate function, specifying the degree of problem job has with respect to the condition being evaluated.
  • +
  • DiagnosticTest{Prescription}: This is a targeted advice written by the test case adviser for the user to follow when test is not PASSED.
  • +
  • DiagonsticTest{InputElement}: This is a test specific input that test writer has to optionally provide. This will be supplied to individual test case + class so that test writer can use it within test case. This is typically a test configuration information such that test writer need not change the + Java code for test case but rather can configure the test case using these input values.
  • +
+

+

Following section describes the performance analysis report generated by the tool in XML format

+
    +
  • PostExPerformanceDiagnosticReport: This is a document (root) element from the XML report generated by the tool.
  • +
  • TestReportElement: This is a XML report element from the test report document, one for each individual test specified in test description + file
  • +
  • TestReportElement{TestTitle}: Will be included from DiagnosticTest{Title}
  • +
  • TestReportElement{TestDescription}: Will be included from DiagnosticTest{Description}
  • +
  • TestReportElement{TestImportance}: Will be included from DiagnosticTest{Importance}
  • +
  • TestReportElement{TestSeverity}: This is a product of Test Impact level and Test Importance. It indicates overall severity of the test.
  • +
  • TestReportElement{ReferenceDetails}: This is a test specific runtime information provided by test case to support the test result and severity. Typically + Test writer should print the test impact level in this section.
  • +
  • TestReportElement{TestResults}: This is boolean outcome of the test based on the SuccessThreshold specified by test writer in the DiagnosticTest description. The + test PASSED(NEGATIVE) indicates no problem vs. FAILED (POSITIVE) indicates a potential problem with the job for given test case.
  • +
  • TestReportElement{TestPrescription}: This will be included from DiagnosticTest{Prescription}, unless test case writer overrides it in the test case class through getPrescription() + method
  • +
+
+ +
+ How to Execute the Hadoop Vaidya Tool + +

Script to execute Hadoop Vaidya is in $HADOOP_HOME/contrib/vaidya/bin/ directory. + It comes with a default set of rules defined in file: + $HADOOP_HOME/contrib/vaidya/conf/postex_diagnosis_tests.xml

+
    +
  • Make sure HADOOP_HOME environment variable is set and Java is installed and configured.
  • +
  • Execute the Hadoop Vaidya script with -help (or without any arguments) to get the command line help. e.g. + =>sh $HADOOP_HOME/contrib/vaidya/bin/vaidya.sh -help
  • +
  • User needs to + supply job's configuration file (-jobconf job_conf.xml), job history log file (-joblog job_history_log_file), and optionally the test description + file (-testconf postex_diagonostic_tests.xml). If test description file is not specified then the default one is picked up from the Hadoop Vaidya Jar ($HADOOP_HOME/contrib/vaidya/hadoop-{version}-vaidya.jar). + This default test description file is also available at following location for users to make a local copy, modify and add new test rules: + $HADOOP_HOME/contrib/vaidya/conf/postex_diagnostic_tests.xml
  • +
  • Use -report report_file option to store the xml report into specified report_file.
  • +
+
+ +
+ How to Write and Execute your own Tests +

Writing and executing your own test rules is not very hard. You can take a look at Hadoop Vaidya source code for existing set of tests. + The source code is at this hadoop svn repository location + . The default set of tests are under "postexdiagnosis/tests/" folder.

+
    +
  • Writing a test class for your new test case should extend the org.apache.hadoop.vaidya.DiagnosticTest class and + it should override following three methods from the base class, +
      +
    • evaluate()
    • +
    • getPrescription()
    • +
    • getReferenceDetails()
    • +
    +
  • +
  • Make a local copy of the $HADOOP_HOME/contrib/vaidya/conf/postex_diagnostic_tests.xml file or create a new test description XML file.
  • +
  • Add the test description element for your new test case to this test description file.
  • +
  • Compile your new test class (or multiple classes), archive them into a Jar file and add it to the CLASSPATH e.g. (export CLASSPATH=$CLASSPATH:newtests.jar)
  • +
  • Execute the Hadoop Vaidya script with the job configuration, job history log and reference to newly created test description file using --testconf option. + =>sh $HADOOP_HOME/contrib/vaidya/bin/vaidya.sh -joblog job_history_log_file -jobconf job.xml -testconf new_test_description_file -report report.xml
  • +
+
+ +

+

+

+ Java and JNI are trademarks or registered trademarks of + Sun Microsystems, Inc. in the United States and other countries. +

+ + + +
\ No newline at end of file diff --git a/src/docs/src/documentation/resources/images/architecture.gif b/src/docs/src/documentation/resources/images/architecture.gif new file mode 100644 index 0000000000000000000000000000000000000000..8d84a23b07a3f326f8b2a3eb912192048d15733c GIT binary patch literal 15461 zcmds-)mzjLu*Sce?6M0iAstKix^#%JbR#Y8f}kiNB_-&RlCpq=lr)mk(k>}2DUE=F zh=_oQh{5@t^Jkp7nyY!9xtQmjc|W78ucN5soBe zdwY9oYHDOMd1`8^u&|IwB-Yp0`}p{Dbac$k&1Gd}m6n!XzI^${jT}-F3|M>Wrg@t8(ef`?CYqPVnf`WpLjg4Qwe$CF#zH;RXKR^G~ zt5;DdR8dh84u@-MYMPv!tgf!^?CdNrFK=sWo1dS5_wJpChsV;=(vKfMbai#Bs;aiP zw<{_tHa9mdEG&Y8f_8Rxii?YLa&mHWb9;Mxo12@vySt5yj7myM-n@Bp|Ni~<_V%u> zuHoU~^z`(osHld91|A-s&!0aF3k%=9d-v6=SE{P2nVFe%backX#x*rH`uh5Qetu$N zV$;*p85tQzM@Myab@KA^Po6v(A0L;Ll$4T^diwOKjEv0I)>cbPOKWRuWo6~^^0JDG ziod^qPft%;TH4Ui(AwJC$;nAyU!Sb3Y;A4r!NI}Jn>W9F`4SQmGBYzHDk^GaWtE?w z9~c<;;>C++&z>nNDo#vHh=_=UhK9z)#Ysp={QC9l`}gl&US7MqyUNPSpFVx^_V!*` zSy^0MOifL-wY7D0bhNRtNk~Y@%ga+zQVI?ZHZwCD9UXObb#-@l=jP@Hfk1}PMA3uI%WMq8s-~ow5dj0w}000;N7yl*yZ-Vz16a)aBfW-fH^uIF! zATEF)B&S||VQ&TpBJn-r{O0;ecFFpw@{QROhw zST>eL<=>m9-&8)4D+c4S6vQq`E~NDLF+ni$N#?z^)$uHe%qwj|AXK~bDj1+dm;-`bLS)uL5D7*j zpJg4#*8Fjr(tl8(bmjVus9*@59QK77M4?5940jSX-j9Zqpd5Uv&v6GRYWxLxc;h<` zj69*mW%Dk;qPmeUnHR%8MWR5QA^?Wq5)=V^9HjUHHRTl8n0;x;$Ys1!l0)EplV9oua^+K_qr~XFxCrM58CW7za*{E65UBI^za? z6=c1kF72&XlKXZ0R$8#2D1oG<+EI}Udu8)(Hc|(EmnjV=sy`KMQ}AgUCllyye(wo2 zbm;*x^M!3`XZ<4Q-7lX5O>=i9;_TqGXKXy_Ve8UP@Qz=f84DwaF3uT?BrD@G;~^7^ z?w;hKTc2OPb_q=l=?r*g=e>n77LSJ;ZCr}Uj(g&Fs~g~?YWE$mOQWX#$K`x)>N!ej!Jd0WR}JhBW6h$FN_wk! zzK*^BxBG3Ju58hIb?vqW(dwkGG~J?GF!w)8RjB%RwS>Xc&_u}m;F}W2-}aD1FUy`c zr4Kgs10qH;&3;$3P*c$`_KRP_eAg0M3)}Q%d9Yj*D|uiJ#Il*{Z%u#VnqX5$y2p8h z4ehw7Kbka~`w+rj?#t1ky(4QunKmLEp51pyw#sAY;T>ZHH#S^_b6l;$^hI z_oXI&D1Q|mdGh<9)-Xd62>V8xdeHZF{lSqxeM;Bfo6B!Ak0()UVJElMuZ8!`-ng9k zV|gqm{HLQY-NVB7DJdCeI|??crd)`Pf4>jkT)y~ovcU6*3f*$=;@_X2bZY;Q>Nn0K zn^X$|NKk&82r3mk39ft?fk`Ta3hVWPBz$1}SP+2pqd;->kyvyQLUI>`zI%uTb8fg( z2dC2}5dlflXcqP@4E8%gd)CF~2sL-HM|$+xD!K)>P;i)*An z@V*{1DyN9f~fl zAT5menL;gp7%4suTtoOoHwQ2s0F#UV!gS`scJ0BdDOJ1~=hv0-%O3G+0KbVwF#dXla}* zW6#&m?prowCxNKusGL;%2Eo>y4-a<$nZyCMp)_} zlHS#4i4b&iA$8~A;)%vwG5e1fdd5}eS)+5sb^xT#g<9L5j|Ac*Ak@f&OT|hchf)C4 zHl=-ClN!QN6;R4Vfq~_TqrXiUDk>5MqF6s|mpRw)#P}nWH6GPp z4AhCfpqN+?f!2#HFpS;C_3nL-g$H(4&73dPZ>WuqP+JzZB&>SzZ6+*hJp?KyX*}!| z07pkpLVJA8zFWVm>4t0t?&;Xz$P;O+Opqobq# z<$an$5cR%n>f*e%ErpVVVFkDZQ0vRF=E}x8O zhnQ;eoz*9Z0^ish^ZK5`Y$#x4J$)T~=Igz;GOHRpO|R}hh^B=8`GBQsA67d6pY3UM z{=4X5rYJO}|2ruIr8Ffy`z_mXkkl?LevbfGma8sN`Bb@HG*brabD8m8-6|Xij_?W( z0C-qRO;bAND!6~tNr#TDPV85VZoi}v#X>;C1UPlQJmZp==PBRlXwcCQuiMokFdty? z5hJOHajnW`{_bq*x_;S$UE71#((_c;f?wD>riy$ze%;M^m`~Y}fn}THdSstYNCMKS zA4Zlfm`J%RrCr$^bdX{(j1fH0)PkVj~I4M}|pk z0_Sgp*oY)ybu(Hj--1_|(E+1LE#z%em=zYx-WI{R7{QtyA&80OQ;ZZ?j1Y>8V8K&A zo+ClbPWcz0x%g8lz;_!F2;JF#d@AW zmFs~3o5z8{kAveLD^o!3hY&vug17;XBq2E{NKF86pF((1V1;b^;5JE>z8f{A&&4$_H;T9V(c2k6^Y4W?^ratPMOLp1=9 zP)M3cNSah3kOm+CGK{evBCV7zYnv`_o344GM-@P%iY6e0eZbK~s(^Y(W<5L+k8~zK zHBdq(*26P5fSY94mHHH;4Y2VB7-o50_tH>^-x*70n?J( z{UQ~pX%M}l!e#pKGO&*BQ!R@*E#d|=sGjO79&vMu`l@BNl9r|n1rm&bsu5rhiP2bX z&|@sp(+B)p52T9+DN!I`HkgtRL}}_KSoG0Nw_s&c=;aNlED`$Psb6Dpn49y}cROJL zBMez@g#DKoxJn6VsD=&^>J7jnuvBy#kYZVlO9AR^b0A&}nBodlAAY|@%wE$QJXxDzuX#nT0Kw>;JV@0R0>>l|XBX#~o5p*cv zf(>dKiom~Fweh9ASyw=dg5)4U=GpGIV=~*PpsVruO4x$`bc}ig()tq+Y|ayDPT8_(7%0P@vc3ce3GtkS3I$;8Q+d+LC9-xU@&<+PT?>g=1fsr>96~M>iMGT7 zcG1k-%6A0pu>DP?bY?#Lzc3S9fi0ggT>80I5uq{}sG94AT2szfsh@GdN~0F6&=JMA zioy&+MW{DR=UdBRyk%j{0)Kob z3z#jfd@TfvYy->yGQiQ_aLiEC@hKC|TQ?{zbHlIhJheQBK5NUjE@v7L?L-}&H|AW> zH_33kl)aMp=e!m||J=(L^A;2cR5QcvYk(c%BR4>-48%xwjc2-xAXy;DVEt1N6R~Ba z#X8ZoVJxQM$q+}k9%Hr`b54h}3_t7BJ}!hi_i8FP%m>8XiK4|+-%V61xeM~W^6LHy z5HuDTV%UVNm$8Vf#!m+Z8DO|RHyQahCo;PwHrweZ-iY3|K7!x4g>P}>sD%Tqw7#I9 zl?-ki%m-}DcImpQ^z{%Bz(NKU*|9w7bV`f4{qqOtdq(r6SfKTavuJ&1!a2yn(A{^t zou;8$UaEs=)`7fvRk{Ii{=}I`-V~Mtuoyx4nU3{(~MA2#AkVPU>WK-*Z!W z%nVDV_>RhX@9|kH^MqJ5gP?abA4s74UjG$0yvTikEE3x@Y=ln(UWqQhk1ECc6}I-e z%9eqOJx+0dPAI<0hQSwyu9i0H7yivQs?OHH&L35+D2iqU0Dl3oD*=r%g+WaH3C$d6 z11`>11TQOZS1{F4tdAW6MJG%NgbaC!02C> z7$)b;Hv%>qp`{ETRIvrxV<|Et(MKY$rU$E!*q(>IK}-X0#myc#FxgN;2Mj8k9Ud@E zmHm8$sf_M&YzVw!!5!4!HxUF9@Z@gD;ij4aHZ;b2jri~t{v%!E(kon(Dl#>VQp)to z1D-=$yp$UIajM8v|Ef_#m*)yUN43nt&!I5ssY&eNlY!o9d)Q!F%Jr-Yn^>t?3HdAXn%KIqz1 zS_uH#m^SYZo6&pUL_dm^B;$W#V>BeeP; zXstj%OcAr$1MPjMhN6F%&RCQBrrS=SKU;q&E@`}NA#82o;nxMVLfGwRnI8HED6`?) zk|O`fZ$*E3^28!tol#FN&dbmb3BdO0(liYxv5mt4o-Mz(>7YyS$0kk%?=O+)3kHG|9P+=tK$6)yscY zM#DwFPxlXXchDsBX8*_H*&W(9!NJ5|JHw2_NqRlpH82CyQc(^2a zYmeM{H69!6ZJ5b5vu<>u&(`*%2wm}ce7LRA5DglswwL)=^uDesFRY0+_{HyYB3OsE za_aIWcuqpOWoSoo8bm)S_wR&$SZ#QjGE-NgGAj3Q^u7APJKiA6-mb=CHIVYQrAeu< z^N0QJa`Cz)bBmy^QI6BU+>L7hbs?M0Oh`&slRrDx$vLO1314-DP^~X}s)~%8$rAjD z;1wAefA1lKDn|Dn_PU}^S2jD$VB4QAo^IKiOllu7{o8xznMQg9Qbd{#3`QpY<{ip# z;tI&-C{SNTU4Q)d>*qM>-!_^MMB)16UQ`@Ce>%frGc2M~!^XiN6BfY^ z*t1U*r=p9x7WodSeHZM>s20xeQ5U+PR^noD@7Oy=_WA}scW+Qym(-S<6wkLKt;<<; zd6=AAZA6@k#B+>!a;yn@INueBC%k*y;M0BZg|_H>`a*}VKD}(Bct6$g$t17K_2VD* zE{3nK7(c?`zZzY`{4l4}hx<@#hO^R6@y(hXy{7$^;4eX2_=<`Lj5#pi#p>Hq5>(lbsj*#aahYxCS>J@vj)Qm%V;q&@R282ZGQNg0Z|B zxTp?lYS{>vDjegbVrE$qDhpHtHwrr=7FW;FAms5{y;HmcMZVhwu7}f~9FnykP!FyI zm9EFh`|dp*#(r3fi~^(g!)2}T{wpJ!_!oqTvAu_yxR1$!t( z0tOeDL<7PL3C=|q1n6Cw^5o^iq8l3z8H2}7v5NX5+37sDO)MLUiLW>n+q4Ox5P+Fk zq-~ILRtKYHI5fickYe^0_{E51N?+YEar~ltoa?U%m8cC`J}3$7EO)f$1BJM#~j$RHN=!V&!Y!X>35&0*VN&>qbdO zQwe^~7wAD5Itw3`KE#=~bUag>37@_=m>^=v^59ARsQ28<2y*c1APDB?H$4haS{Q4wXxQWm8w>f-c8koH`g!EQ}Bo^*d}rP#F}-&Liew z^08<==O{YNd7&l^+TBpzVVV7vY_!228na;jzQS3}4DCt20~|><;vlY>PW2&6n|4GR z)Jgirqu{uRy7kg2X8G5&>cj)Dg{eCuIR+!(Y=ncxZy6^v#Z;s*WB|xqDe~v%pQ5Q~@#;n}@ z{e4Ow_V?tn**gb5_TR1HWv>~_e&5Q4UkpbQ=E6u(18fTaf?usC|IFkU^Z6IDTaxuK zbg%mHzqsz8(XL{dOZdqLTvq?;0DTnpec1OB7*AL_<@z(yG18RquXdt_wFKOnwR@a? z#xwFL^$)c_^HTo9(pd)dVws@)2xX&rR<;bWNp(%r36}Dyt!8b@#A%aAb@l8*9TRIB z{oe_7`mb0mVa=N)Csm|ndB3^ewA4N68L#&EWBO5If)mS9eiJP#2^u;eD32U152RP* z`aAgud`JPs5@1-Sw~-ckcA6?Ax!^lK(e@u*Sd?t~E*-!gXII@Iirufgcsd>T#rNqt zPD3C{5DN;JrR9`@DHkBd!*<(eIDbA?xH?&pVq_1jHx4`p?@q^T|C*P?T`x4=CX1+B zS_0CCC4Am=@mwJrTG_+@XJeWkI*J;H7+97wWISo(d*8gPZ8LP7DCvr%{`&Z({jQ-4 z_3tcBE~WZND_ysCBi0C3Aud2g!iBL@5QSWu8%VFDPZUrjz$hML=%$?4@As`>LYG@`d#5D z-4A5!Y_C;DnsO|;E5GWfl=MvWd%__Ho82>gV<~?=UFGV=C_+zop|g!NcPpC%OI=bS zmnK;_?wJ=S+^I^I{;UPQJ`@*F2+8t+NHX!lF7JX63`7t6wR7vXA*(w73;;vMLqrF# zNJ&=p_f8|d)aV1a%HkH(k9USHEkMMv_O`L&W9fHnLl*%`N^6ov*XPq~O`g=QNbCdH zD+g;Se47F;5QKFZ;Gi#T04C^j5&(YNH_fWZ1ci8*eev*VGw#Q&4A0j3l^$JP1yTLG zzvfMx=)FYrYV@d}Sb(7hV5ABLaR8f-sa$yxE8E^d-M<~brDCI80(iCoKG};#ZCvza}56{`jmAc947H&(5cw^n`b@+FdgfKFt7M z*`8WqrvvZT{(SgFV$LNeO3dxOjX0)bzvq85?lUZHYRgl?ATr2ZJ+w8;k~H5qjW|-L z9!wWZK}7mMP-%yey!9ZM$gX;7a+s@tFx!aP1>#Pat_|q>Ijp+0?in|SLlV+;@;I~8$mUn9l)nC~Po$NNIqbddTc@T9ZDo8UsF?_H zawkC658*s7tiaa6eT)Hz?s~d*GwGQDd^!6cF&gdK7PR|Nk&VdSV=R^Yt_JNBQX)UG z9m(Ar^UOEm)~R_R*bF0AY5Y}vaLYs*=3b~fW${`^o4O`?iTCU93sIBJtibORbNUIj zskd(nET7%XsmoefrDu(Rn*^)l{JwihM{`hp5DIFa2=@;7K%~kVI-!zF<}>*Oo@*z9 zan|9{*FJ5`D;y>~b4VLTYYv{*CScY+@d8_15+nnI*q>(ZbNak6e)?#1q9YI&I zbiHwhHW{g74WmIHzWJSXcWqTaPJ`HwtY*tCq8gE)DI!v8QKKb*9-MdOXZw{;A#cO~ z`GZU&)^4m4VIos7+n(QAM?LAz83d@*HNsxoFzB_twADygS|F2zG~QP5A-x5K|UUWC5; z?Yw*E6fx7U@>^2OQ4m#62)DlAW=$?vUX0zba$%U zxMDsCKl;^HcW15ZBK(kr_RlMPS)bXt&08<)#**rNH?u5TxE8Z+K8?Y?`K{Y2*FS>U zs!bp#iZ3mHrN#M>V61eUBiXljIWA`7;V)9|iOItC2w)pZoI3@6R&H#TcA8B|ibte^ zNCPN}>+uuxpKc1!tyL5kzE|-mG;*$_xay8v&{z!T3x$yH(q!dG5eiHQ8_BPy8k8uV z`~t+I+NPGbGH%8$5VS-p0j>8VwE0LV;m&G6SNhHdsR3n-{ zlrW86K4Po?2gaZ9KEL>Z!T+;NIpREiKdFEwQ^V~OsqOjW0&mJn)-!1+W}rng9ScHrtk z;lH(b$K!4t<)ht|^wj$T{Tm$^OX68+m2>m2My38qROtuU+@&rFj?mj8**VlD&sc{B z@U$qsAt7G6Tb}6KPcx=LfIGTj%q-VsPOJb!NUYR)nU*OOS}bDFy+Da+nTiQ;h+D9E z!u`tg1Ym|+C1ZkP_&d9DVr93_)K1pUN^pN0Fi?g)c_o(0M;q$@~65G#Bcb zJQ|?wkgghw5jktNvH+jnm3xRXwEolfc2v-C6xPsmnaT$Xm=DJ=RmQ6Da#RV%u4E-b zvTyKZ$6sM}X>5D!ksTkEot*fjyE5O&VVp5QDL$KdOTQbrNwZ4QU0r0}fh79g$$A-_ zyi`KkomH6XYLpUhs_JZMN-70^Mzo?yJn>2AynS&3O6_@LHdTf<2DA;9DqqQPG>3?C z6?I;`Dym!`s5G7y&CoDPaWxvUG)g5Hye%w5)5lF8CeRQfkWN#MD%g3oQ8A(c?Y8$6HVqOwKttiGZZ5N9ysxxPxSWCFf|0qjwSx8G-$XvCMz7P6O{4Y9X zXWrVID0IsROvItfGHWp?YWKuykz^Gv#JfU(<}9I}6}7xeecU_|q4MMpNKMDfQa5}~ z_pkZ6U5zfiysY^%L$B5n{Z<9Z8O4@-B{_D%WE0V?0h2D!7k#TQypha*=W3j5o>Zqg z%#1z$U>I>^7{w;!!C>^LSMjlJwj1A!oBuq&+dvxcUcE>3vkGm#cccc{F&p=_u ztqqcCPtsqH@NtbP=R@60fBBof1xqS=ErdEM-^|D?)v^xB=L;En(Z%NBi`0#L5@NQgZ9ocn?gMGwtM>XJ+6p!- zoLq06dbpGf*d;I0wi6bav}{co1kJ^0%DwC=!tGup+f^ppL5XEqF-u8PgAP;gq?&D# z=H_x1^$hg+le>m}qAl2#Ii6@D6Kw2d?yRIIG88`*5Uy-W!rK{Isy{pyCMidWy#UiA z9D2DN`Xn9tC0AenUFn6bzP=iTLd0m|LELS8Nqu(i9C>nI?SR^+wrgc}Kg#TBgea@? zkk3|qbBgh7wgO&+T=#6tY1)He$!EQ}$EH%MX-`bd#Ihw0?h z#1T{XjzJjYJp}Jb8P_Cwd7>qkB=Q$WOujZ{CAL3X0RvN!Ji!2?sxy;`^Cg{43HMDY zR)|VAn61E>J=0l*5+yYZ79~gV>4DkZLHw)$FSiRH&V^rSi_<=W9t-+m#mm>67~HV* zopI{q<`nqHb*M2{grI3;jA>N<6ly|~BV#GiZ~_`djoc~s5oaP z53N@)>UY55m6?|QX|D)4=Q{S~hAgS=u@cOY0Blh$hxd~1alF|G>9#k?otE{kQ}#WB zt~5oQ`zfJCU&W1^)#G?aHE>)FE~|bs0up>X_{r^%g4-c*h_POz;37o(KQ>mV@#x=e z(hh`U1HwtRzJ1j_+GKYX50P|t=`oWM9wA+?*}e!fUd;Q^Y4nWeg9uN$n+^Xar@dTSbZmi}UfSK-9{TPn}l=Z*Bj2FKn1@PZVbqRaDtpF#LY=E4X8 z5wH>f;{^72O|}FNfV`WwtHWT1#U|KP)-B%AoR)1VKKrC3dBY4|ldJaES{!zW>m_rD ze9apz>+Q!sNHlE_c}k28fz-0DRPpfci=kwdUGb}5hLg0IM4g_570o-fSjCUcCy1}b z9L@Kk-#K%8&@~GdOvH^0#f|pFopVw*6*ScN;^^?v1OR$j4+@u z%$zDW^f1!eoY$!-e_@?dZ0Gvv6;sl@%S7WAPAC&$wZQ@!sb=Gy3F5(182A#&n>K;*i>vN^Cd4;SpU|f{h?ne8 zchX)@e$|k7ZO?Sl{pZQs)4=y_PS`EpOSje^3Vn$yzy7Q|Jul4zTmi~t@|G56eN6P* zEB7zf8MUbpX-`cHt{7%zyvua>$@L)C3bIRE$wV}8bZ?}|Hh8HRcf|~)K4DN3;L}9G zAg6)t1}?Rax%)%57lWFGkyS;0z=hh=n-$b)Wd;LspV+jX-i)vvC}ygw>eG>3s8YIi zy9Dp|c7p^-F<;L%io#g>(_5-2#c3*#Xr`!R-C0&Qj;klwTV@XvCg+?PSL(O-jOS0@ zaKB5yO6F!u&Y9QR-l^Rft&Lf#UCxPLNT{`{kZkefml^BP%JOn#AtK z@~h^TAO8K}$l`VI*M-yH-e(*(=ltI;!|&G#V{>ZhGbnqBR`J|iNEn3_(*7%Wjd~D3XW%Yf&sE9c}ElW3b)^pvB4Y#e#MS3I`r?Agvq9g&4sADLkj;R?QPot zFarlg3k3&%5$t_Q2kH{&{BoQ0%rYXA{a@d8(?vwnx#QG*@60HA)j6FVZ-a9H zgaEPL5H$T7Jq})6YZCst*5vVA5ZE~LUi5a1l3dIqf>!hxWwB)WuQ0ILPi90Rr5A_3 zTWQb28juz9ZS&3bpa-hn%hiwO6W_NGd+XMgN^+7@a*Aw%+{kMNX=Tjm05>NddWql; zWsj!MoFh?*a=9~t;7-HKkMJ$b))2G^nF8Y%np(lZhCeabiv63nh!ak5XG)-%8s4%J z2>s!3O@)@p3XBSSJcpx|m?EW~tp$#OkxFDB*q9b+ZPh}5==zk_NYpf=_1DvfII4!ZpJbQ4r|Z@*y+b?h_he9g%Q-sB1JK@cVk&Dy<ZlG^p_RYqv-iv#eE?^{;Hg4`UuxHSNpFRztS?WB`wdIrr-%Ej(Q(pGlt`# z;#RzPPw=LM=TskX_r^XDtTp!4>v7QKTPHI)7h$sMNNlO;d((7$A2oL{9{5t*`A>Pj zVNCTmQ1ucIMtayMfn4w1?wQ#Rv{-Hb#2iKI2t3GI_o4zTUZ|W*8>(IXF|Ygb_aK{? z>#uK^Psdl!HWSrUe{JVJ>iLz&^62LgN|LOOTY0JW+DIe(zS_TE{m(AcE`Eu(yC3fA zK08I4+Cv5OV!16SFdZm>D+HkYBE3lS`bc_}KEFLC0^B7jm@{b}giK3>+iZyP{J{c3 zq{K^&^)a|GUbL-E1lrp@O86;amwM`I+@BP|s}3V{R$#z-Ngx{HJDjn2zaLzVk2MK% zLESzR5|eC zF_KE~m8b`<&_>GxdMI;6w+QP<$K=Nop|YJ~c_+!i zTa;Xc!9retjJJXRc>`v#%?HMm zK^ngz4eL706JX9oQE3FTj+qf$XitWqn^;=HV5)n?W2&<=|1CYpLO=b|8*u-`HiQoT zy;PDdnL&!58tVF7z*Xgu{AA!xA53>c9p_CP4d~d&W2}!r`4OU7IxM0N$3;kOu<_q= z1ko=@z>AWWuLj2Q9#$F{uW4kXPo4s5TnWaoexoA?k_3&2nC9>AZ}Lt9t{P`l7!|zB z@9wp9;TY70`Kd>zC0NE>GVP=BBPAj~lBk$2h6hFMl?Jl{=an!aJ3av6DSTf_G~tN< z;ejSo{#irNPvm}TrlpPU0RtdB^OO%}KLYm|eR*4H@@4~j`b<2I_rBbMoo!B{p9u+J z#9E1RASbru$gy1VaW?eOTX<`J(zMb%a!l<5LY+*uO5Z8?k*6DTzqKA&ZB@%4QfS_` zOl)EFiLl?Kwuh?FJt&KC6H$L9`izt=xaK$#WCgAQg&y*bG3r6!0&4I7^PejW*Y@1A zU|WrcwQa;ml+V#Sk<#XN4`VOdNu<5>+$7}HnB(30E4ow84Jiyoo1t8-WW)oHOqV<# zHeZhPAy;hNzM$S5Agnk<|DQ?aCVb(MZv*1MO!|`3^X}fVUnt?ClJXQIyph$>vMSSl zZ0Vd0?ij1&)?^&kIH0wrpTLbvOH!~CODP=#f*m`C51glSMXMtbGV>to>QAM~U7BX8 zlyojQtNGhjeKU(|nxCpAup$X{$M~@o@`!+$i9K`Nu-+|c^hU8vk+zh$IOW-sjYzsH ziANQ+ify zl15HCVc^f#*H^wXlTql(BN?A(sOn4@ZE7xS!F)1QXHFkY1PiVTh#I@vGB)x1WA90i zw;pf-1t(A#o1%jN4<48+!TqST=-#uqrv7S^0@kjyUHx!+Zh+`1nsiwfqR`-$XP=zT z(g0`;cT<{dTv59&f<;&nSm1%38hyLkZ(BQSz0igES!{B@`HIRGYS3ZS!a`CS|9A2FVyTza zruU;#|rez?iQP>dC#gF^{8 zkuB^gFRvYiUuTQku2|5(1z_F6s=5f@<>T>o;W^#J`ZCr8u&YOpHBXG8ZE`OS73q_; zKhFI*Sia;eeB;Yz5&JWKb>>p_&>{IiLx%2wpEd7stNI1l-kFRK>XTf!eFEE*$ZUWu(AB_?s7Dcm2Yxi$>N78=C@XJUO}ZGHxYc|WB*KNR-8BqI zdh2I1r0oM7%jHY$X~4QN_5E&CdJTXdlBDel^t?&%hd0z3mmM=fX7CH4)*o(00i0Cv zLU&cSi9-fCMBvvj(#2)Krohcrz8n|)(Fpe*m+21zN>6`!2>bv+64 zcI!rWop!>Xy9GjT32nE8ILSM$LeYH?SrYduiXvN8qFP14B=wS3xLQBwI_>EV-02^$ zbdnclJ|%GiU|ea^_|vM&03VOL@l2HKMtv`Hg{wy>}9Wa67dOeOx3E3fuS1QM{G?NuoAwqtKVehhM5? ztLsSHbYZtE%FEZ3A~;xkw?qp%dpshzY+i+mx4pag5)Ylf0i4#pe?r&YTD$QuX=ism&P{yle7*HRl9XNg zL^*eg_Mqba*YTdL&0g^x?t-ryCS#}3A9gfEKHjbU7|FFgG`b`xu~+W+iFx1?t^4j1 w-2MSrD3kB&ySlIIxPzU#g9F^R(}S;e6c*NmL)yzn)T8@5IJ0FBSldv)islOiHa^=AyT+)8a^*G zRJvAC^u8*hsGAn=#{Is(&p+V&aLy0s@p!s|bTDJJTggdNJ7cxobEaa^8JDmowH6Q7cj6(;_=Su6sg zt_#FJua-C65U+UM?tLO&@mrgR#Vh-t4{s8GHS>=P60c~q-Hfffaa;V=;LY>xF=?TH zzm+^2Z@+ch1)yj=o|t~pZy(?PQM^Lz?T%eru=#hLx9nx- zl%Ec3=s9;WU~M6w@hY=>`1y~wp!i?G%*@R6h|S+ma?2~`nHt_5=2 zzlr)qv!4I{`Wi$YZvx=N?30;U**UqV@=oU$6c!bia7xS0lvkWR zcfPWez^$&i$g90nS6>~xJ(ysyaeG{Suz@wj{(${8i-wMSwN;$^|3f?6Y?KXTWvyFl zH`|kfi25U=`nDuWd)HLW!;+V;yWCu?4}>2$RO?5PwH;et4!8IFr)#?S>(BZSNoCnL zJPUhGa%DenC{r`kS@)f6L^nCkPFbbv+|^wh^C|YrlO~ru^}YR$61f2lj2gN1YSDp} zvl)*mn;3MJQlD}cSBr#wv$eBlat+5i-|=O6J-Nj7W%}Fhm38A}r0a|Z))wbAnkfed zzTCP5nk$4W77FBqnIjEeK{x8jaQrQO12JEl-|YVBp~8;MajbRQpnQP6iMQ?Qodz!n zP2yzUGG7HB{9gO~Cl|kS1iiB?b+IJ(_0v~(cSsyRA$mHZU6<@M@ZMwL zM(~343@1G|>+6oHJohzRMcu$`w3_GjY(+2qYC%_J>g1Ccrezm%(X$=tUVmNU6B0G09%iKmD6C4|(VNsP@T*j)eD)p2=TXwt) z1`$@QM1zjxtsWq`&NJwP&PKGcIb}U91FN_xPvlsVSEJO88|xOuw%mDRnPjmj*qudlW7~ppY`i{$ zhG`*fC0_59^Jer=meQ{~>Bu!xdz4rVQ8t6f^STjssCV><*{65>T$WpMEhH^qFuNyV z96lk^i9v&V?B9NX+8uCVCgiR3F+kzBvsnh9#wPd5vG`}4uG2jG88YD=U!Om}Xz~Sv zw|-3f@g(M#B~Ox?J>rJfe*P~F=-4h}B1kq!>O-`jBr8jgvZ*MjJ-}njtUGBtGH#)I zon17V?$Zm|I6EOhp7tn48hf7XO29IQbu-kn)nTRTW1r00>v1!WxMOVesChU80L4pA zbShig++2o)J*aeHOv*+i^#KY|~dy+MWkU*5p&#e}w661@K2rL1*{wUIK~FQtb6zkMii&L_Q?nezr{b zfQN7l9HykAI?PLBv0H^s&r||IJ)~Azws1YEjFu_%h=P*NJ0Yf4pd$Af4r0^SMPJHN z7VcBq=&mAN#?M_rSa$mEjsQ=1k=V<^Lqy8@_!v4gk}pj=U*4mZhSAOt;#Axt1yV^F zK%j%NZ5l*6%hyuW73OadW%NapNu+e2^>JkR97WbvGg}j}|1YmW$y^ruI-&4XXGVsq zQvWF>Ya*ZkXP7g{ryxH%M` zpHSEqJRdW&-_KxeGFaJ1kHI)I$h%ORnumkijxF^>UN*@#;m)4*cn6TE^k>EcD-s;d z7)=`#+@K=F`L11(dlylu=9i((OX~jFNtchu@`&$(-d+lng_X(=u=cXxYg+)oxC7;u zzpMkI7mM)%p+vD^H6V{dN^D^XCB$({!JzjBIuLa_2tYIAHe-H$6k4|LL?Q?E-@ z?N%jE{yhU)CPnPsblO4MMj{iRe(R{ZI8g|jTy8hqhJv!1DAGyGPIYv`ntcbi7R8O3 z;N775mzh{D6GLpr+>5nMIDJ;?m>f(QFLh43Mt`OM zy(Ppr0+JJS;jeMBV}JV)4UAMt-7HAaFat+rJCP}TtK+Q`=Uw@6Dr&yS)>MhMFE*J; z1I?L+X-0y_!KlTc45Fpfgs=xF^ zmn>Vat71EVNKnyrDF6ekCzZ8XWTxBD*~W(2k|#cZCi!%x4lx?PN!QVwT*WrRx8KbO z+j#QH3czhBH3QuV;->?a{u`_Fz=ginLmPBk^ZRBo%*h!QC}>wifDiZ~0m6fa`c{Wt z5WitE4UY(Q2rBB5OIbxp40i6iXa~e`)|n(V&E9?>CO6-cpcr_U{k}ZT$|QDI&IH1i zZ32+iy{tnvPa^VVP;f~(3lJ(1)w3*G%9g*mrqH0o1+q-Ts2Fq z92Lv$wv>+>oj3??pwJM#=yQ3Y#UU6KdKU9SCG5M*jA@``Ag2NQj-4Sx>mP7Az1>$$1YJqG zEG)SrMdaEW+=;Uw65Aa&Xl$P!Rp9lmJEe)J&>7%w6oDu}Zim5ZX=M$EEm0WBrwddm|$_6Am#er=KesUXj@+pOum$%OCXf{$ka@?;|r zP0qO1cHyJ84jt^=p5PnDlH{UjMa1?&tR@})$KadtW7G3>y(|-9>Mb^l|1&YwYfJ$> zPmCqE8q2+Y>N?=g09l8W7X|3+%fr1xT@ z?uI7XEuH&w66owu7@NJkf5VgMk2RN7*y!gCThk%;8Xz)>o+!G#_fcITxw6siC?|muB?nD?2+F0Coll0hC}RTVpUOFN^|CAsmW|QkP@S)Dz4k_SFKSDovk5rZ0kt6_#~3@o0uvk$ zn;{#RtrS!ggJK{tj-OY-W|za#^N?#AdO86m!)DUY6b zGmfF-)3Z|@ z6WWJSphLz<82uzn+1%mVfw}SoZVgrK)uMK1zc0s-2>m_4ZXC7UW~0vIX4 zges9-*hj7em3Hv=TsZ(vH6CwWr-!2+do@_CcLgZF0+mu~5au~~UBFOi4$;7CbAU<` zb`LLL-o45*4Ui!mx|nJFQ|W96P2Hd7^k>n@Ce1(r#o3~OFO4T%pBSA4IG~#2s2KY4 z9?Wp|u9!?V6asswz~Hw{{~fs0D&oy5?r}?h>jcXv;iS~U~!d6?hsCO9H+*dmoNZ52If65>)&? zyT{+Bfk({`7h=}&8YTwmFa1y1(oG_QdDB_$mQ4S1|K(^iqFB&LvmI^zwFVlEno8h7 z02_ruhBWL!)cv2ab^o(yYW9$@Y=iO}XZLGvNO|gDya-A|V8Ab4|jAPDsTxZ?WY=$~4IXn=epn=kfXIQS(!q$j2CJRaH0fxpyfdFF2vhn9nzI`bbl*K4AclPM^UDfL z6xstsdDDHDMb)OWM^?I8s@d=u4U*>5gHkVq-boApvT2io!p^hMC)>7I&*T&(#``_U z7agodwtHYm?MG+wL1Dnw92r3e?mI-X;fq0IUPS!kxFn!ugobPtwJIM25OVOFreg@X z=CuN;<`T?e)s9NU{*8ny4CI2)okWd01Rj&s+C)N|$yyrt+?L|v~S_K_n! zlpqu~{=K<88BwP7^a;d-R6cCM(b>-Lv77GMo`ksZA%~?N=joo!A_jqj4dHjXO!pWt z;UB*UzqE_)5P)=nmFL?o6aDO%{^YJBCla(g7D6_gGEeD?j4O3I^5HG`1~Bhtf0D|h zd?#e-D{jEx5&1f_V}+neA!&<@Dh~7keK1R+PVOrbZ_Rl8vd%RRRZB|O<3LJv67&oN z&j#X?2G3s`tV|zt6k)dW;VpTHBXj6-`q1SA5508axZxldm{cnVVy z7p54o5@M(Ah9|Ef-Rj6Y#_+ry*_|`|wh(bb@1YoyCF?@Sb6l8c-zHmL^2TfJDakkv zWG<-tlG@2#E&o)-fGaSHG~lpEKvo@*wFma0K$-xOG8$1PjlgU`pNA+t98sQms?P>A zj*lAgf_QWc`~|ZqKM@CDyRRd^RWf8m@N323PXY07C*v2gI>Q8B$>fXM^1$uvVVUY- znHeaYhgEMGS82h93m}zk!yZLI8hP|EFdNJVO8h8*p@^UG8jPxJ`RF0?6hM;jW;ck zH!-L8oj_ssEL>D%em@@0WBhv$blXz zJ+Wb8p3F?`3B@6;kH`6wH&v!yU(+#Q5l4Yh>9CRI^cip2q5s8$`#7_nD7=q4QvF~u zo;?|I9M061xTi?6ej2g*XP}@KHm9X|I0G?H#DFa`pduc&Bx8;ANtVoiHsYd0hXp$W zu9a}9dfAhjMoqzVNlC!Y2C%aOW~Ga8y=4HdAIN?{p2IL;usZ=WW&&V{exEXbIu&P5;gP93Asnco;5N#c7q`|Q*OI;&sKD{Tws z!#GV69RnmeH%A3&ru5pKYmu7kPKMFV^_d+jLvQq%h}>U+Efc8`5N!m24S(tJuBF^W z^C$ySf7s+5yG4CErho?Z?GjIZdN4y|_g@YA5Fvt-g{_UJc_D`G_f6lAiwJ+UNKU-S zGUw!4Lc*|Z{|$3Qq`dUwyTx%MVegxkLYYZ-bVy17=_VFj;R@mZ#xupmRIP&>KP2HCc3YGqDMIW1CHWRWuvi6}nJY%jjY(~8Z zhw;Jq3%^;h#mnG6aO-Wnfe6D@ojDMU_zQqXv=0peIPj~f?pAlL2gN`}qEjb+ zBz{faS7oZcVE6geq~x*@6|o}!x*@%qGXvS7(;+QLnjnefhB&hkZp)B{+V{=;Wh>5j zyztl1=Uhm*gt9I+6FN+;w$TiNV~B!AUjK^&<3%Ui87C$8!2& z{ON*`(flGiqS4q?0jxMINOAGVd3(~v{pAAeE}2#Rsro>xw!-CxE#_Tnkx{n#&VJkv z(aGPP0OPQj*fd@;aB&OOi{5b~!ZEiHPdOLM1@hhNJ*ZCG9WT8pkS*2K2@=+JcU!3{ z4=5Iy29%DPI!rN1+M0|)Zc!|n~Rgw0o9 zw1uS#ed_A3P2G%Vd8hcDSgjC>r!tCJAtDoZw# zXKS-geg4NTk38sDpO?!xkhc&x{J=^yNx3rUw-+w);Y*@n#dXar$8|fu-yM?vvwBb^$NYI-O}6!XY!nn? zE>wUc&I_DX=qWz+6gnS}kOr#p<@xmXg$+CP&3RMBfiabD&yr(0fQnsTSdPdxwu!5R zcDt|h`l4BZznse!^L}!xIcKgq@W2%vo5QUFrv#I~_>i4^-upTUC&L(%TwHQJr%ECy z^z{3BsZViw-f5Q|aWCl#L~?wvcdAL@qmQo+mnA~@WAH(1B(1R^#A|> literal 0 HcmV?d00001 diff --git a/src/docs/src/documentation/resources/images/favicon.ico b/src/docs/src/documentation/resources/images/favicon.ico new file mode 100644 index 0000000000000000000000000000000000000000..161bcf7841cb95d326d9729fb0000941cef010d2 GIT binary patch literal 766 zcmcIiyKVw85Znt$i7TjRC~{@~0Yy+;gB1ES`4%kk0ioT3FQGA2+AEUc3QLrfNHkYO z+061d+ZUu%tZ%$Kp7HuDnJB}^^FC?p?uZ7+Pe2Y#fQ-7Bi7d41VIe}X_LMNAB^)TsBsHWP6w-$U*}emq-5YDhJteeZ$Sa@ literal 0 HcmV?d00001 diff --git a/src/docs/src/documentation/resources/images/hadoop-logo-big.jpg b/src/docs/src/documentation/resources/images/hadoop-logo-big.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0c6996cdcb0ed8b4115aab112b8a411f25f165f6 GIT binary patch literal 127869 zcmeFa2UJu`vnad)0YwD^sALo+Nsycw5y>J-8iIi2%#el*Vl`~wJR0OFH002mX{{+YHWVEdhh z5TxM+z!;zeKTZUEf2RHXt$0(!km=As9#qh<@jd zItW+@|E&A0<8Rdg3g8oz#BWdh-((2FKrDU@ICs()Lm3r- z5WfzPpX313XU8S+DBu-v?)2%?XHK6xbLQN|vt(y4Qc;{cM?rP%@@1;am#XU~(Lzd%lYfs&k@obos#r~IwR#s5kL_>aKF(*OhTjELX{KzNaW=pq4r z0_^r_d@}(ASI5D}gtBqk*} zO-yu-A4FaxI(7XLFR}DvqsuoOpYxGWMx>S8W4=kH_Q{xE!0AP=4C$>%^`AdBO`HX< zyf1ypaz9GLRQ7Yai_q0R&1ti(vR6(0@R@Bn*657auNRQ`lv%Tw2RD#24511e>Zni3el`K36n0RQ3(y0r3}jAPo;#BG(zDOV+0b zJwv57&5X(=yA#G1N>+U!dxL^{RX8g0#^m1h=4&G5L%f@|khB~UKW*|QxLru4%`)#g z>`>AsC9DM#;LNa7`cU`dSH{^)Kga3s#j%X(2j?0`Zsblr@&;PQUN7lJbA z2pJSxCJFbN_;?~@m& zkO5rd(?J=O&4C=((4@A^v(lzTsOq8F!_unso%O0H|A-jXhb#0PZ40K2c;Ml;6?<;q zvHbKMon6>YZr)p0PZvD!Qs0fqX=v zlh;~y1maHNfg*YYqtxL8Tw3oYv29+5p_!Jz2-mK#Z|Hj3!yef|#AiGp(roQpxhwHT z?;%8caab1*r0c^)H+YLHf|p-^)slQ0ry=g$D?R@qtWVsq6M`MC(sVv3ey~(%QX8s& z&F({ZlTS#2NLXgsptBpdY^n~fqK=0n4w*GG{kylv3t<=efZbWCbR-?+JY=)5Pc zNH%q!b+L?}VX7e6Fw1OqH<+$!C%bmp@lHZT4_{jx#MNH!C^QS%b^qxX1uEU4j2llT zFPynFo!0U$4(*@+dgA~AUD8)Ytgd$OOS0`so~1RP=r2QLE!a(!bWCs8ljjcp+*odq zdmDW2!R=Dlv)k9u)xi#5letS0w4(=Ai)B;7_T@*gkne-5u!0G%J2ln%s_Ao*^+8sZ zUy^B&eajTiv^6}&Zg&ms?oQ(YVwt2CDD!88f<|T&&&2c(FVna)szVzEL&kOD9JrW% zM}3q0{6V`)qLQ!K&ZUe|uML-M^V<5lQ@d}>9GM+_if6dv87JT2EJ~$r^O42XGpi`g zB5Qcb4gc$h5~;*O&zYz&fElAip8qI!};ch zw=wWObWr22-^!a}icXGc9QrK&+tR_Lfj#5U6Yt}E>y2G~1 zR2J%*J-Y>YsiV=ph0BI}nTAD%{rStp*|Z$1Nwk`7bhNUv)5D`$+)~h$YjstkaZ>q^ zj{{w)ed?D=UBAPv|P2BkcpCs3^p`FoHJP?c=WT7`yv!d2C)0caw zY#)rOjelA*B)`~Bt@&+ArVsaK&pN?>2@lBQfz!3(4UA>T0i21^6rF9DTQ5#^J=gbY z$M?DB_xv+Gu?D&E@f{9vm`Ar-INvgMb2zc45o|pSi~z?r*--n#Nb4h8A9_gNQ=7X5 zkktZ{YeHUC-?_+ZLOZ3ZkMs^|pi6lN$N^Jj&~g8GgeBbT8-`%t4WF-@hqlHr1+=r zTp7N2q~AP%SfJe%I4U_fh@XzXiYt= zgE&S!a2EF=j-NSJ?i6`EiB_({d4-FN3++BCcYBjHD zR%S!>8Z7vN&1XexwA9)v2CDcMkxP>$cf>3-m#Tfd@^;Bu=w%ADH{`OaAvWvqn7-~^ ztT2R?tFcx5@X}yD9-s+RNr@YiluS4E?L$>c)007B>4sC&Z@!#y11t6H`pj9`d#SMu08PiqD-fHk;yZgo12?~TZ%Dx zNp+1obs#CC|0iRD0jsJnH__O#?vMdmux=Hr5(BYFe-kvZ%h&TWpXc3cy{o>TYbhsLrm0O67s1X*1o6DbFykolATe0FKoroY*z>& zd_V!kSUofe^5&JeuhqajiL8CAJ-0SJ{Z@qeItOhh>$>BnFBU(FU%M#xFBO3wR1rk( zDXN^hb+S*ra1#F|I>r(GrU+d4UD+V~<9Sk!<%F30Uqv9#fZu97e*!yJSQt(yssElQ z+CIsjIf=oD;I~p_CwWjkI4;Bb8$VcTOC@Pwb;6B-48$J`vm*n%;rn1F%ccSu@7 z;-3({6SyuI|3M>o0^#}>c@Fnm=dXiDR|ChQ+Eu3I*hrfYHU`|#RR`zE0a67QfIa&4N+Eq_rVE()j z+yy3YW^d+XG!`XIx+os&E%)v%gLpGFxyW{V&jS#xNIfLj5oBv)~Y0{zvkY@8ZSdh5&-! zSEFMS%x@Ey-~@Z_ID0k(jE;r%4E%uKPXh4C1tviQ$2HJA@ynp`%b+p8_sgL1%b@Yg zpz+I~@ynp`%b@Ygpz+I~@ynp`KV{JPO)tI@1 zFbq_GVSw{V3RLrf%l{h+CO{bA2JReJ_*)CV>D#2>f$m8g_#c-PEL~h2#CUk@ow<#U zk6yV=V0Jw2Mh-l@+;?~Y35dIck%^6&3zM-KsAEd9Zqy-JnXF7DS)qa|cT^l?%q*=G zJe|zcJs)bAc-okVnzBNq$Rr>qRdIpUaG6`#x|lgRGnqM?NZyc9yK#(m7XvYNW-dld z?sm5J&SLJ8tS1G;!1OVghn4As#KlIERa@l|lMATW*;u(S@o|fA^KtR>b26Db8QGb+ z!JKTEc)0~x!780h&BfI2%l%#cTv1A^_#>@VJ2X+4u2e%|G|LZ{}%%i$K(6=%D`^% z9E?8w?aHC}5sH2kn@h|M~E%k$)uPU*h^Du74zfe?1h(;Q)h_CK-d4XAee**Z2wOrh$12-JWX&0AUk^jAS56H zUEGO|IU*!E11A8T-!BqhBDqd_`37*Bmr|OKnd;2rdsmEZ@?TYRe16S1;+9NW$xjvm zCo<|!y@ISi)P-JbM!vtV;VdgBEN{Z5sRgAe{oFUbW%|-3>Xlh~SyTTEyNLNVoI_Mh zK~YItN7usA%G%Y<-NW;>pMOALP;^XeTzo=eMrKxaPHtX#MP*fWO>J{aYulIhj)B3U z;gQj?*}3_J#iiw)-97X^<^b&eu}l6bBH~l0PLYrh+kq@!1YPopc}aj9m!%(*@)=P& zp1${-If63AM=}YKASw$T;xyx{C_#;D`;D~2gK)8e;HZa7gf};@(4`G zt!W=!!f=Qw=~{aRCFa$3j4dDh-G_c^W~vr#fL?DHZon49H+)wny#5(L7t%#B-(!7< z$XWUNYCmG&T5-6cfOsosin8%)Lw|fBszI^;$fRmHm(4A^ogxBTM^8Jr#+^8-SwrUe zG(tcVGakJnd+))U+b!!2g95vVZ9d#n$<2l`2sJWxN8AhZ!i|_gk1+tlb=XnIL!mx2 z)#?0RSVi_M6l&}Hd<4a!y+!|ArquE_bFfIKX_<$0#^ny1rCF%|gjfFe{ozd_H+a3s z#KsnMHq6euY9qH^=fTkGqzBuc#iO-?)e<}qqYfu+D-5o-uNW1fVcmJ$qpy-MenYT+ zR8%Jsw=zMeKUlk0*@8+){?SmyO=MxmRU<70by!axjBm4Bt9jniS5#XNYatw)o6%~# z$R3rzlA;kpdNT|ZJ3mUTOjyrsCE>1QZ*6<4>IChFZ5HR$UL`b(74zvH#n2&L zHrk&8r#W($tf0oW^@1`^J?92QO$~tXktk4zuEpC>Jn)*wyW69RQD+a*T{!wW|B9bs zNHT>^MP-8kck6v!@6^ZeB5r`N>XNd?R!qfsm{F0*!b8&?ro)AXnV4%K~9HMY8|I4b>TZ_VFz;sN2S4y|^A)ss74`%9$1L{UV% z`_lKN-q-J0E-qz87Te`BnQ+LvVaREXSr>Ze+kXp^stj?qA1Gd5LQc{iPLS1+;wW)(jUZ~2Ic~teog2^(?*QYqQ2ZkuUu0R>0 zSe?+qTd6{*BSVgDsi~T=Z}hHJ7slC0U+c0u-0owyTX?pVa#ao&&U`3oNAQyfN44~m zb#}t{s*!g-YIN$gWJGu<8GPI;o1I~A@kyvcGfsm(ETqkZ4=ZUNDXBDCufi=w^|aWa zyZxfeu%5Q<#8Mgds?F1?9>ev*nnoe5#Wv|UlwO^s`DR^|?fN;*M_j6rpBEf<1>?Ii z&wZ!A^yzlG-ph>%uS{Vie7ylo6nmi!Rwj>zYtg>||JTU_|Lxyi35z$jgHcBnTNWOx zu(|xrD_Qz6lZJwOenQ1_#lC$Xne!HV<~D|sipuEgO`jxanh*XIJpDNmZ#EdU zJ;#hV_h9!9xC+}tE;h2d_g_$M3TnPHAg?iSf{q{iv9rnpY+xoLoM0r95T}#7iSH|97QYbi%-@2ESn`_gTGW$HI;GppbObmAcV*Om&0#7s z*5dQNmEJqt&D{^<110j8_Yl@5$bM4u!A{!1^LE@btNhg@Be7PL{45Nd-f|x;L4R93 zumMBA#3p=<L>vha*?VYYRR;)I2}l^i0epzHqqvE;Jg-N61Esg>mSha=cnDDK8~6 zA7*1H;~D266yfhMF6=?4(;kJl=A}F%z>$~4?p=$WVab-1AXQk~{!qzkzSiZ}v+9#j zA&l$ql(_}Ixdu~d2%MlrFrUilZ9i|%D6FU-mK)L=aDC=R8!E|y5B8)1L9+5;b!c}f|?!XVhP@k%v&Kk6c_F??=e(Z;f#?#V-SH2n=i+)*dMp#v9}w( zu`SzddYxIhy>&!Xw-wPE#voLD5qc=^0B@S5;inliHjiUeGKBtYyIOVs$Pc$|=q$>D zwI7w74m)V;ApYF&MN_5Y)Xl-c;rmcs8?~sMm?hO*gBN<)ETiN3?NzBHGyOrA_Z$s7 z8M!NZ_sg~~V+s(TEpdwdyJ$!F`h3ok{vf~RdgIGU(Sx^DihHgM*6}#B$Id*uNi`Og zn^Zr?124Qh2s6 z&6uQO-6G<-tD*L+b9rC4tZMb0d7ULb?ZlcTj_aA7CKd&)nu;nMHzyTlLv9&_@0#L% zt@!^}|KK6D5DSJ|||)jqTBkX;x@X;bYfFFrEWnf^WNd;X`rA|a8zg)gv( zeY#B6aa-o_cZypRI)hZzV+qS_Zfg=$$|`TWO8mU(8g<1qb$27*hfR3pjpCH1e#Rbc zY@yB$8}i@~^L$vMnZ2FNtSLR7496JFa-X{KcrkQl`%^T@PPa1s9?7SPfhfTTzN}G_ zk?9epVXJDE*1C7*kq+-rx;{uNf(?x&i|IUX@V&{)L_sOvyc1u!RGz)J<8i0Khq5** zyEZ#bh-7lPD0-zZVNeoQ!8VhILa`dzt(uRZ;;S!GQNL?7&Ru%vnfKy(c5+kCmpmv*EiN76BXANEMh_wZ>Ii?beuCFqNnD_dq(-lWvwCGa?|1d`PZ_>{&;{YNI0AV z!_ju+Ur&og!>VXu6%xpD8J74e`lVB;eah~p(LAHml(4<@G#<|T3AV!jQ0 zV)#*svUlyabBR&^h>dsVp$&gBG zk8loAFaIbq$ionnW)fZ0l&N)eZ2c@0sZ00rUVpEOs?LM=sY~+fRcPd_YJKB3qmqEN z@eLSZ{hk_M1k?AeF-aRvh421agxc1%pR}NsUS1>Il~x>W`XvJABpQf3X&kns43c3j zhjEBhNE>W^qHNhUEiHXXHCCMeq9U~MfQR>OMxIKy{Q2{@&8jw3hgCQ=vFWBQ5pGIB z9wyWhqHGmPn-YTRb5r*v4S(%+|L@)GBk_Iz&QjOwgSoX;H{;ecS)a28i#i${o?!m) zG->U6&&I?jGj&|#LMeHL;a4t(pJ4zs+Ovm4kRSR8njBxL zjfZ789gps%M(l=HaOk)6^^m3#=enzw@P~w@LGM$qBP%mmHqz62?CqAQ;SVJEXO?oU z%whG(85eGG$=9NFEAYTgaC2D>mC&FXWV%Y9f!l3reqoPsb{ z&3tj*9^4R_&TcoovBJTDd7s376Q_tiuiU^?elJhe=%H2pq;a{8N@V-wvTWxw%V#sE zC<1~OXdK)lMC@+oB}Qa)n99uAFR53Xd~me|Lg`az;gv1GTvr$r1iXt+;)7maz#Dr*5F`6ZU69Y zW*E_2Q+j}2n;4Uv>y9StA+Y;&!fsLzR`yOXexhUqtM#fV&oiZcWYo7lC1;u7@g1*R zM^tRW=)>ZzAU1&R(zT20=OOM0WAYV-?yI#}WkY^qQoR605#R~s1;_18_U&vr!&)~< z5*ZVg)G|m}*w&I=L)x~{bk4nN>VvYk2K=-_%`7Vjxla$sOMMSR8gyc3M(wf_IOlY( zqueZS4P@|@L)rEYVq){;Duew_y`YfuE@SntZa+(QH8`xx;_&EHOT*@*URc*I6TuMQ zWT#nmG4y0vZI@F0Jpt*F)IQ#|8gjMu z0@Z0}KawX@^kWnoN2?m$Y+s1fKijP-W2nd%IsHOA*3#@=X@8eYb304yrMR+sR9Ag* zSMR!aMa{U*y3i|Cs%GxT-(wWWI-2%BadcQ0r7IIw(BBwm^0QQW@I2y!G}S5ic0A64 z5!gg@($))Q`yO7$Fh+mY`XT(lD-O0=uQmPk5jy++0&{MnH{6~AZuFGbVlR{P<|niu zmB6Ty%MFg4Jd=oQSO%g74@|}-`mBuN0T-{=9csE8v7675qLp&Zd@=w&AYup!Ih%c4yCF^I`3MgzkE%3DG9s! zy4$FVh4ylF<+>hqDhhLGMq)#p{CT)<&SA7bNGq0O)m-m#R9dVjXDC0jteDEFpLHz) z4~v2|j*Sx85YRyN4=sKF4ddXiBL76l`2PRy>a4MLIEQ%4qljRq2p!a0aIC!S>-~C4 zpu!#Zd=c7p6btnE0PkZvl_Ys}wdg_2l4B{Egb%+; ze9xd-c}0v09TinLX-!kg#3OjnKk6vuDc zsHq+xr7Fjk5c`U4TjeX91~jQ4&zwp_(A$*r24g+-0KYsB2@kLSvteh1MBD1^vP<}EK_ zDwS1{3G+*dJw4z+^@w1da9r4C?enx=9k$)P##zhF+|6WJC-CM`cDhE$BVtVgUwzT2 z)kvM6(mx))CCb*N>ni`&DXnIZepM|?bm@)$d(sF&X2C;sxsP`kGXoY5u(2LQlM_=h zm6B7aMstrU9;+(tV5pXC-cJ&y!mpDXkBOVPub4?6@Rbt7PpzD`uN+y36|c68TeL2m z^;KOy-Ih}%W)`JXmscGjbD@ZUUNO5cbJWO#pICL1{3d~ZEeaSkma)!u!F+FDIyDfy zgNkS|+KP^RG`o2%?|@pfPf4r`POl>I=GJb$i~zU4K4xZ20j<2E6YHfn`u?qhnX48h z=H+0}Z8E+WVneBzEW~Gb9IJOul&hPCx|-@Hww9l=@qr)AQvKtEN95PmOe;QY&(jt{ zSuQTG2r9TMpOVLZNaC)X?}EmaFe&9aM*I%5%|IjNbH< zo}g3@ydfI$;O+~8x&_t|!~Hm+nNKxRCO?XE4Sz<+ETrW0U}O>E$XLequ-Rw`fv-(x z>_jT1(6F)M2VuYQaXzitD%Q5L@WRkU^3Thhb2gtO4Ye6--f&4#M=aVnI#p((5{&rzWtyB&LY;^vdWPybG#LgpeCi-%Zto;?~C6RW=85- zcW`uAX(?oeB;1rcm+^w41oi+f3?3-ys^9_1;(d+K&3(fyvg~OHxB+O{H8@x$FU=`< zhdDcz9QdkZ{ivkUEiyh*s(56iOK1*NUQy?7OTfB$Fcz2hBC&ADPozaw^06v>Jr55| zyB%`z%#-v^QBqFv)ly_*!}4u7jKa7K`?bg0psv0o2`_K%?!&w7?Yc|Rx1}AXU%WqO z^)>|Jbz0277Hud^5rE{XY;F%4+n7H4X6}ID^wAhuT_2th zv0_jPzOsl)(6wUEXE$Yi5LhG>c{LP@HdyTVWzYXVd7ObC_y@n1-+0II0JAT|+ZnZG z58kk{zMj3LUq{C`aIVeDm1J-vnqQuRVlQ(2NtS?Cd;YWV6<1^6RJbH)o{?o_Lj2Hz zv^S-jx0`&B9B%ZzXW`h%SIM7t(Bx>IW04!|$&@*P`ff`frB7G5lNiVR_7S~zS*luxb-A(dK z($*)G_K}(JDm0hkc7cAS*nflvw&{YxO0oQoF#46cA<6cM9}yTwSw~1p48r2OD{F-q$Qy5>KHsQ zWe#bmBV_P@P&z;_&C!S8w#~JnnN(q15&ld;#elTJ$>d$axf%9UIeXKtKH+I9evM@% zM&SV-{`gwDp^d^0lP%ghf`RJs3W19KVCu1JWVKKHrdOXd((p!7_N!2F^Hbjx5W7&!8p2U3YaMo=JE$`kEn)enRdMgA zi>s=mQj6EW;YM~Bmpd6br<|{hXt9R;Phkvbdiwkvl9+9?-km#?%E#*?{-N@rkVMtf zJ^2HwWihUVGFk6Z=~t+c#m!_J!{o2_!8;#12BH!0{7)(Zecg19%eKwvxDjNH@I+_q z-b7ElWI>e6r2Q0mZ;m%UmKps11hcMCQWH} zSrhKf)M(LJDUWZ+&9`_!s)(XkEZu&$k>m|gP{Ftdji(U5hjU%JlSOu7W&oeYpyq-T z=0T{O$LXt)rJe~YuO~ke_#JkBbvtbGl3d-}>*WtpV>1dS-U8$Ta!50*-yAW#x`WD4 zqP*nWr@F7Y?|J#G7m>2)mLxk-5phFYCxnKfi1hw#OfVj}Qxt>KMbd5~u6xC8lyQ!7 z_~Z`jyS+$BIo~sy1m1(CtOI>BHe$u`@{{z#^61Nh&|!rzs`HeW6yNK3KT_uRnLK*E zpjsbRJ!CMo<)~mqtLo;)#zHHuVFpdPTh0j=%Xmykqe!ilSDSpcO22Q)qTD7w=xwit zqxlxm;Y|$1XfOLd^>8Y`ul;Hj=J8=u)!z2XTJBf`;==OO3w^zqcDTPje7z|;{Raou z1X9J?>KbPPJ2%KzsG+*ZpTR}KuP(iO+{W-QH7>SRYgVhQh}q)IRz@sy8wkdyBEUI z%TaP-eC^gisLn(nXP1idB_~Bu*06hKci(XYv8^Pv!!{us2AvGnKP8VhSFuQ8sa{24 zy4|N@0b9PRU|P_fp@8(Uf$9nlH6|Rluc}3qb&%cS<(RNa+!;alW2wo$WL#(pU}g%7m(){WdcixCj0b+cgUuDb*=Wq4)W-wEwEF6^ zq;1wp%k{6yDYxnTOj-EJPK9uJ#-7KqAr@k)#wqIUd>8!`~Gh?aiQ`o@? ztk+j@#wxnry31>A{IwjUM(;aa;*{k1qJGN4nJzr6I8FaPj^;@@yc%A%dJm^LMX8?yLp`3 z9s%vstbv^O<1r9A*NP|V=bIYcRPNs^JO6t1qSBQ(;-Ea1N{*n2@K_3BF;3jdgw0tz za0U6S zn-gtwcs9vlZ5}talQ4ArH z8g$e!u0gL~;%XpIfvjTG_Lkl#jHq<1dsrnJu+pJ(M@Mo7UzKDQ$EW>dJm8|XsJ7`N;(OoUs^u8G)2gPTEY@@eMRpy7 zYQS+GVkeQEo#k7*)hRU?W|q}fauEG?pF`t_8G42Mb6=YL$)alcsc2v!eOD_6{(LC6QpG} zi3$!!qCdCZ!0ir?%p@Y;1<#|0uGogztm`ga;7}@o_L*edTucvpOcJR~r(~c*^~fU< zILa=3_7GSqOGo^WWc=E>&?raVIn@BItCW@U6f-_b-WD(6OHy~#>Zd)Ok&8mbR6(42 z9)>3?Y`os&i%1JOdW9zUnCq^O5pLvs<`8UMJktCM^3;0gK<@z_@O!y{%iDEIg5WMz zlpZ7%q8VY{b2wuW$=7ABEaOEJ$0Fgoe&i$PJEqwu>{Cps6S#*=~fy3fa}s zid&HdcU>OQz$Om6@s6#DOS?Hs=jPcgy#sFVNj)U?L@bu)T7mX<=s}~tb={ERB}Ya{ zU#}mH+z9TY)X~&&@=sF^Yeh93Zs&bi<|p>K>s#9?slWbMr}OJXxqGurwW^uiNa798 zPEY?iGi~-hNGbQTq6p5XWiKST7@cdf*~{@sT%+WBV9tW%yQtKFZw?BVo1-2)?LX34 zS)u<)i}Cb!!)c{b3r)58KL6a;=vfpQR9B8QD5I5CL3+8lXK?c+Azf#gDAdY7d(8P? z47t=GbmR4_3*&*e;$jSaDe7exxSYzihWhISBZao)c?jmnFm;o6>?ctM=+$@41DAn-$0-T#=7f zqNobsvCGCz1PYHkmYWnaZM5jmSBVdx3hcR2K}DqbbZkfEhqb*c2ygTCsSv{s(n{PT zS9%h8H^d#p{A1LSbI^nynmt%BxujeS-Qav#OLkafwcFIN<&J1kcdNQWO`@k^g}J{_ z^%8;Z(?%?5DfVRt9R{5;mukN!nqIM|9PV0|?Ki=w@^)CZxz<8Md7~ z-|R=Zl^~AO9paE3Q%WYv8|1WTtasdJNS_L*s z_UX*bY}rsYqq{+b&jbaZ@i6PIwyw5CDxzOqt$T%bR&;dcm*}!q6ojl&NPl#>r9lP| z(JOGRz6UK&JHHx(wuf=n3*wB#WtEAho4coXG%GYL=*czm^P{fNO4HnYI=ZpST6f1w27MnrOGF#!T>Qot`T+NDjj??Sag>W_|o?7jsBdo0=vEBQ?6o0?>A1StR z`i6t_TtY&jNsO}B{1PK6{cb)Yjv+2msLXbDHJ2#6q{=!a&cl;gSDm;#f3IKs5(2X2 zNpU^&bx_*fM|Z^5W)CUM!h{nwixa?PYo0<$S;AvRI&TIF%X8^5?cx@OC?9CZ*tvXLp%`FFvxgdcjALsz)gy z)^syQ57tL(ayovWg=8}UZlu1KYsr#}cALdJ4#IY8x zMbqQdL9B)R4N}gw>nWvEjzizsjQbd3byEtxQf z^~t+7Abl&d+_k@1!&Q-f0_pEK&7K}8q1H8{enn7H?kp0<&i}xTS~)7bOF`;gZccVb z@{L;YeDVBQ)=qyEq%)fcRZg4lGRIdzd);*p?kYDsLTlq}VwyExo|NiqcbGy;TCwMQ zBknddIF6TajdzJ4eD@i%zY z7N}BYq2qZ>c#FL6cp*@{net!G9qf2mW5vtC`+(a_jHCp-71}l^LJ^&pb28dY>UtU~ z_a^dJl%xhn%bC=S4;W{P_$2IwSATkV>XP~uX%D5!(w?z?=a;~g{>5ZKBD;gp!54}| zLnpnG7%Z`)FzG3)G4lji60F8|N!Vxk-bSAGXp>wTnPbG#Z|Tyfv8LU}z+=Td)+T&6QGz zw`lQb36g8he6+W&4Ig;HB(>o4y_9!gsX81(G+su5(4BH1cYf%IE%0)onagY(lWj%u z8f(9_5f6yXh?2JbX+`=XoP>SgVz(9^P;Ho)eshZsQArJ65DYCW2d|-jU2`bK=4bdC z+?rpymuIStHiUi&nmw-+I;tHUMEguBmpO?dyZ-B<VRJd5g(5cHpPC1EW8N5 zMlKis4%;iO6Z)`c8!92vBYIxu8_-t8noTb+K|$rG79CPFc~#fi_ya;|7QNM~e`;iY z%|G_Vr$+vtKkporLI@xitTq`ja&w+0XSSgBZgPw5=vauinNTK=>Y7h>4I{nuhdiMM z7OiYY`rgJ$?e|K{$-`uQ?!MnUjFPlMh@iD-O;C)|sd#{63bP9lgzP|fdGG+t@VI-p zmk^vLa(mg=8nIZsY>1m|Ja#=iB0(JHBNhe@2k=0z;Q;ce;n+C2ac~X~V8GeZlZ(m4 z1MiW!2&^X-6!p{&IBm8H@j$BjwmKe=MX2F{z1`y%M39U8;CO&u#ZuYx*|;EFCLUN@ z0nd`tu+zvxjw*1U_8D`vKlPgDn_ORmDigHpLDMQ%zR>rT(9-v){e#tlP_xc;vT-*j zvz5i4+SY7lc?F3yx`8*cUXRE%>w$*^dIHB|{r^7?c_e=5L~GIJu+GhS7n@i5o=i#m z>XSi1e)&@uGCvM;_La)hj2|fyfqm3zlL$)#r8y*;6_moxnxg&v#kEPDtb`$v_d$YO zHzO71)I@rvZ$<*g#45C2&UW3XkOxBp80Db|!$zeNw{gPK_6 zW?Bbvd@*1Tm&%#FS3pDvTCH8s;JCb>*(#f>YD<09z*Fmm2WE#Y>?R9WBy&}6_e6IO zzWcJ^gtU9CE>q#^OMYY24pq;8BVp(bwKU}}{9Hz!71RtIi+S0@F+A`?hLIPNRJ571 zpSme;2`ehv@2uZ0N7>lzd~HrI>OuQ|QA*RxztZqVy`rzOGB4?tfTDmnl$^m&=t^GF zjovm+yH3tWa@#65GL$$JnE**e5qeuf6`)5RictkEt#WlTRXI$-HZ?L-~6C}S>mU7vYKIgmsT>KPon_w^CSU1W>Fn-NLgN{YZORvTmKaq-Z+ zYmukEtgrouj6}NHkEezJ*x(Sm2k#s*xcsY~ohH$eV@8s@sJc)p38|jzd!PoHekz2S zQuEznH+1;hbdHTvg3{OndwyC)_3(DnQmQ+>-%J+S(mWG8mX|x?LQ^6`BU6MTbxh3^ zdX;&B%c~Wa6f-pL>dHY9miLL~hP&Qi_7Fk_KX9g3A1=QYy#M8nZM@IamU>A^b=St1 zqRH&+^DKJ2s%KLCi}vg^?>`=TyxZE9@?OF6Tfpp>!TO5+_w^-Bs)Sw*>&dabCEHg; zHk_|vVAd6p#?PKwKjvgR!I*UWx})`8|n+)EAnjb zo=xt&xGzh0)8^=k06>8d_5JQVzq`&O3>#nSx-{&Jd|LzTHNUK6Q|}WoAq$ey6lKkI z8X6K;b}yW!4`Sa-Li=VBWYXhE{PmD^!u6$Ds4=q!a?Sq)CS*nYJ; zB7W-HND|C8<;Qa4QcIy~kRt0iy8H32HKLhrRfJ)*rCuI1Lo&n3O)5C6B0!xWmlnEhXVsH-nCWaBX&t4+j7hW z?HIla(>J1mA$jql5|Poyyf}u!OC#}=DGzYGS8d%*1T2x!st(^9rduL!z z16gNe`ohJ?TzOz?3{8{Coh7borf{8>9{q1tbdU!%)C#Xk2UX#enTVM z8#;I zs_=Z>7G&ySSA17KK5z14r2GI@k7U_+L^!!#!HV+(4K#pw_kz^73W$XUyN-#c18JsA zJ7O0J8uuryp^{Rhx_t=n=WqX!n*n%6R9oQR2L6*7FZmjrPQ1ClbCFWflQ{hyXd+;R zL|!!JSh9A;u0Q5zCnw}cL{xNiSTtl2b+Mj7>7?-t0kUlUFct)zdKqEHdG0D2=z!6QQrOR^N9#&pvM5Fy5`=1fBFQ49G`X1Jk z-gEt_OL%7s=F}w!=qMiM{@rvq+_ASoEVO%{aqVc8!l0qoZ8g5}?wW7Y=RHJgQO)Af z!iY=KWoO%%F1M832|Z<^s+{fJ{ov?GlOH1}T90+xlAQdHYBV{IH#a*eIU4P>ZiB}O zPKFIv*^b79i>${z=7g1#{T-gk+1VNe&bg{l-{eN4*(gjn}bh3VR%W`UOEor z{TSPFD#s=_@ZByY5M|iuuj_;0W1KyI*y?NL?Y}{hNS`*rc+RjCz3T2gxiZ?IEIXP6 zvzxg-CN6)b%#kvF+}Gr6YEOA#04lokF;s`MOk=RDlbOwiM^}^wJ0Hh2Jm8NLGps zLpGXkCsf+N>Qp71T94eU&Ns|22XfUHUsRk6YSlJ{2EWWr4|;Uo>0RilLBpyy=ltC( z6K@%7a-1%_%Xzw|E40!h3-_3@Qz{P8=W3fuUYpNf=eQkI;A~$x;ni<9lXrhbE$Z89 z$V82^7kBdeWU2^R(h#x`4oB;p+nm;2ZN)kxP2A|;pYemDkLI1w~K9d~FP4_XS2d@0|s`M=4w4qrq z{OQ9o0yS@BzDvsprBM>IjJT^kBP$C4k~825DjYn=8tCB~FVLA7sjH)lbu-q9e;e5x z>7EjLmy9rnKpqewuekz*b@xD4d|?|JD-)LBn`C4uOT*3o4}0$c)l}QA4MtH>6i^YA z8Wj*k5CrKZHkyb??;stdiS!x;0qFwLd+)u39+lpE4G`%q5Fn5MDa_{m-gD-ZZ`S(D z%>47uI%~ODve`Si_w(HKy32z$4AcLm@T!QOIb)2zaOksu&~4tZdGWVb*qF*cyurRn zhdc{Fra`5Vb5{qp+{^I=-TVUz&~J9i>`-JybImU@;!{4&RgWOi6U3WZ;k(eA;4WXt zZ;!DEq=iR>*jXn_iQ(}lD3{a_$@qw66aWz`T_QBMN#(t^$=B*7=K=;-GG|3oTIFNA zl%9v)5ka;-pbG?Fi%yWMD-Kawn@#9qnc4wztyC)xW}ZwpbQ^fI<;S$1Jk21hzBr!N zQO)Gx-#m#cvcTr9nOC*USTxl)$un`k4as;{=sR=fh%IRGnj5bo#!H@(pZaO{=9GUyOrGx8MVt~;7`a>*Wj zf9srcNSykMQ+!h~8EqEKlpq z6*deO)b_ogtRE*rt31%>C^9=Lj9fHkJGPtjjA(|wa5_|kP?6W3cxI3TZub97`}_Yy zvg2RBcK;6$5JwWe=5152dRB?)-qu!94m%p%+cMvtt-k)cviZRgswq~t(($2kcI2(H z>Q8v+ko!41pR@38Ea~qTnCR8zCDAC%H^R(qh||jZCIx(|J0%4$;iq+C&3P?cA9l;? z_{Owd+Q!G8gM2-TUFY3Qoel&tSR>piO>l0=GMjC?Q;_$w)ccRDI6rW4$+n4n{3F)p z-HCSSbrJU`2Jgq9iY|=EFezN8&IsNFEnmucCMBE0OC-BGcM8JAGy{e?-JEm^Dw7(A z{zL*SG<=yX){E7(E%UQSeNFvT)U3%p<}+FRL9d_G#jh^}zk9r@ z)AjjJ(m8j`Oyj@{L`fMM_sz`R{6_JI>?eg)bR}|`8z91z>R^HR%%B%aa&*278`cYto0isBRtZT_RFRFjb!Y~pvm@*yXj}#{+G((@C%$>-_ChknX}qs}t#E(1 zAsj#q4=KRT-DRWInJIpN?i+h~d_Jga*)!N9rQsy&f7hsXa@L{_8n4-55QA(kne@!^ zpa|{kOwE+U?Hh80<`fhmAS@nHAKrwSQ;SbeuiuM}3UordRRExYYi&En+|HXG8H<2# zD$pOm`>$sZIJZlWN#;qTjF?BR+k`R&C~wotT7?CeyoKe)U6g%PO{Ci3#oIl0_*_Rd zyrY>i5k6}}nMd=ufL(9Jv2V{f*t5);<`@Q*DEo%&(Y4Hlly6yVD@8WOU45WhCM9Z< z9LD6C#j3;_&k3U=^u4Z8OL44NDbTp8tA1UuHyNeWad;h9m2t?{u9 zLWxmMxqPxTzv!J~(KS1oj4`>$x|fpu$(aww?>q@mDI_ud;CPitua%K?(TolB8)i9o zULfO~zqD5;MMRvTI&vjwudIx8AIggkN&`0x3d4joCa30jt1Y}Ys;%=YlPr98^yg%W zhw}*h#hA|6tR7?G+PzuzX)&E|(-W^fvhRJXz?zJU8F)<{m+fJWeFz@&hU_h`(IP^D zhPHwM!QrW#GcX5x$;n6ni~DTjo!}pEsNFxDmYCAUmwx5ilU~(#dn&&qh5;C8-|JX# zKj|2#Z2leE;<0U;0^6lm?2exm^-AT)6eF`+<}%)*3Y|J{mc)_VXZqnL7lnF8M?3S| zl{xkGXbrjGVAdeZ^-SqHJGvzOY#6Na>lZJg(s!2ZW9ogiLsJv>xD^$miBDe=zBpOYO#bM*5}$Wk*d52iUuZ1 zZ?>fABrt|z299Jz3BeFM)pzO3Cv>bqm;)AVhWMGQX5Z9SPT*h6# z%7lN~ZhqOCbOY}3W*2Z&8zjK_xGCI1QqvCs<$#&@>J;|vp!)1Gp5dWO=G||!;_hjE zGHkO7U)K3(R-EWo0kWdMY!**U_Itl-8!$Ng=;})SG{?1xkquK>HFH(5qu}h$0;Pb1 zk)LkC+FnBJ_%E^aI>WJ1RIGziT9`v(YO`(`fg|vk-WIu%H}({IR~&6OWG3~=1&au^ z6Mb}%PDbj>wDxo6*M}P8MR~EM(fDqI5T>L!jthZES%!`nEEQl2SmQdwAH^t~`^*+~~LFtbnSd5J3 zLEp66(_hEAAx=9p+r9PYr>t!V!o>U2(*1-va$Zl9?X?F8$WiUE5%)fYYWtk9@ZOqv z9?i9@Rv(nU*Fp_~c((gsFxLyNazCk~9*}Q~B@tm7Ce3{Cc5LcPa)p~;B-EbnzVhv5 z`ZzJdrw)F`8vYnEfeS`Rp$~M&YZnB@3O*?naJ3#s4=+b7j+T5N;!bgd>N~kOQeXT)wVx9Hl?#E|`vQLR6I)XjDs8s#;AJ+1l{0#UZ6^*z zJ%tN1Omd=KL1Z}8J2Fg?*p^Vta12`aMitPWehb&{4t(EBkXSHKmW8*s%uWh-9RIxe zsM|-0?gZLGY5}UY{wkegT@&tNIo=9y=|xe0lQa>wAY{yoh%QYF)^A!pA-){@F&^a| znvynyFD<3(sG*w+?N*oml1=Mw)3(b+;hcj}lNG~LcCpN3ApF{x+fB0*jhZ!dnxHxE zH(-ZZdYKFp7@}Yj*iccs<=yS=$PoH#4Xnn^Bbz&SbeJ0`UHinXcR3D{cRR0VW}m3! zl`yu51B%2!N{g_1yuIIAAnfX8gsq+3DC|#wi(4#r>>>p5(GRC}Lz>t5#o;tRrgiDk z1Op3a)jiXAo+l>edL+l(y7eM*!a|VhyYRSdrNPO*p{OT&SMpqs?oDl5erfh>aWA{e z6}q=ioNHXJNk}!hl zifThYbTn+74eDYFaoyp7pd)PvHRq#7-dbwyU7`4F;0pa2$twO18}{H^dU-=by{>{* zUo`k0$?zVAgV`2M$O70Uuir3CQeUZ7N}AAtBuY+-D*pP8REL6no~*44%`X%TtXVqT zIXTf^n6EHSb9-mP#re@`-pc?P8-7E?>Y{4nO(}Pu6EGBPL>yn;5Vp7RjzGA!SOiS- z+;&46xi4sCx-&B%toXeJx>z(vB^SG9ICE*wr@SUK_A=&#o)@3%UizA1z$6opn{1k6 z^~FCAF)_f%VY5(>X23i zykXSYqJ+ki*81={9ea)5kO}#t_b^zE|Fb_8zigTKI9xk@Ql3hXQ zy{WXl`4%;&>uGB1-QNte(@7$$uC<%*-{~JtF6Ms7hj+9y;i{tVD^Y%@yS;bR^U&=5 zId;`>@2V;reCjEvaa)(F*BmF_f7qvi-Q#WN#&lIkGZ5xw)WHH1%aQlPRw4@+3fN9S zTuP`dE&Vl^k18>FBuT_DR>R|D_sd#s!-nAvMTKIny}mp1UF+Lv3Yw}MAF<(! zS@_q*YPvXZm8YXMXVG ztgx6ujkX<(yHE&mSy!dYmbp#fZNO|%Ruhb&=H+ENwOQDd#T>8jL+)BoP9zOg7laYa z^Gxm9@jLfKts`DSCP~?AeTO0~27c4k)x_)E(T`)V3GqTa5{i9%I6sG6j&FFB=pw{RT)ACjQ#df5!+!Wn{j@ZzsgxlVhtGV^A1Z~ak_wf`m z==ay+9*-7+l`xKKD;tMU4RzcUawl%}!3+^oJKZ}XIU37*FR$pi#OkT@;aRDfkI!Zkm5|`yUEj)&8}{ZITWsa~vluoCx8SxtGHuL~ zXa#MhdJ+7h;Hf0asu8!&A#U7BG+Nv{aE5BSO9MvrS>E+&G+2jbp~Od;RfB8|bCx@G zFFCR|*xV7S=q{vem|-+pX9&=Sn7mdaOcrI}o*n{zDGdHnujOIObLGbxXRMyBPFN%T-)2VG8)s-o0}Ox!60}=_T`1AYL=J2|EP{+m*l@lxELTvNATR3dqs674~36i=L5MbzU!}cEo~rRXg|+7j z-;(9l+Iae=ci6gZ6jpiHaPg5(9QdAsvOPNtssu`Ah-XRV|Mf}s|IRUnhNnY%*oaP_ zKGY=10NU&6`M||sjN|}+?5T8CP$aUNxh1~iYqqmV0PyVL>*T_wBi1pK99qZ7>~4(&?SrCJPH zr%6yvOFpMn*^LR63PZRBeG*iwUSyAD5Xp~~W5^Y~4JaQTXcWhO(mb>eSA(<1sF5s0 zZD5v%LJRZtO#?Hn=-Du}Y2L_Yz#V_Cpqo6aR&?_fQJ`wGz^vm}NFLv)rn++QNChj}^2{3Uo(WK6oQZH(inID7gmN1lx zbT9y|d>C^nehFlTd&O+IM%aZWSlFI|#3EK{zHSWoiZ)LUAvh`^Lvd|(u%8SCMkvM3 z)Pd3M#M0^f&cd=e3p|7?Qjn&XA=&5RLnE#zZ$wWYrrr9&q(GgPiAtZ+_w(6@b@#Dub1X;TzJ^b8wL3&cjub!%FIlDZAsOB-pp&S}n_K5h~gdtxT5bO;N%Z zUtNq~0?Fyy+Y62rQ8t-nOBt?6`K^tRvo~jdG->~B#N?-}Qn-KPXz%xxSEnHTG7_xC z%`*dsnzdWBZJY`FK7EgyVgK-^yxcw?!gshsiCLBY^3NeE4?68nJr5_nAC#?^JR=O! z9II|`Cc%>@F^}7zJoW*I&V~mCflp=;!wTZ?E$HjA524>0}m^sdtR;#@bY$O`kf3~$H?+?8f; zy>abzaE9~KhSwdfIrx&5+E$o2qOFM4C7mqfM%(dskwjvuw{^>p9O_Rt;e;{% z$1hbH@q)@NuyOLv9>u=Fgzi;O>B^cb)9|~(Oydqs>Y{osA1ccj{f!5?Oi3pdv7%P% zIg}0ye373En3M~P-(`kV2+m zG=8^K*lhobaqEzR8mT~MPgT~zos0QxL>X4~@CnIpp4oGI2Rn)?gC*q&8>V<>O=j05`Cu35*rz7Zw(BroW?pCMIj2 z($Q#Wy;23%&dA#xF6k_+E%28FCQr3}>SBh^sNrk7t?)>fs>+i4=0(sPJ=penzg3Te zu!W9(fK~E@yzo$JA?y1s5&<3}Y~kk{45FD5v%i??QVOU`q{d779*ToAJzZPIR%}j5{~4?^j#-pw0I+?0ttkY#d1Vu;9~(OUadS zY0lJ*V7?bI!Gl)56oecE4+ju_+s}A}9?RH@uVQX2j?mzl0|~?l%wy~c(R=rW{9ZWo z>$}c3mI-NP0Osd?gbkL5TWFBwmvJngg34KjyMOzqImrst;A@(?4LmmjBC1Nq6y4N**e# zmzj@dO;~E8Yo{ksKO+2F*SL1#v}p`oFvTh}1Rq6Qc`R8{v$|um+CLl!WAu_g)ODcP zw@dQtSe)ao;SaH*HQBQ-MN~dAy{w^~4Kw)S)marX0rS0Z3S#=?1P6Via`p%v)(jH; zNuDA6$6|kF9sGF?N}SvBVl;!b*g3|bsD@`NLy_;%#Nl^n)$g;OFekOV`ISm%^{QU3 z^oR0trbbE(`txs8@Lj|W;sCzS(G-Cb-RWyoxmi2m5EWZH>@Isv@Seu!OD^Nw*&kvM z6nRNf-DXwK?!zMB{Sj@QV`+!yR)&eLaE=WU?i9p`3w!3|FmVaGbwKd^NR60m;D8F8 zz@F?wiWZMx5vf)5HTXfE&{mw?LUz;#msWzuZ?ipjpYdNR6VEL|ATxD!G*(0TceAxU z3d=_2a7o1A)h)1feGPH0K8$^?-iMxjCgVM0+q_x09QIiI?KN?)uRHu{LbU-b0#qTc zA`DpS^b6ls?us6SUf1(36(^rjihC7!ylUb~X|a!(1+#eg#711KBgtR~JJN!P(4fv= z*!UR+?^DnWqXqcV&BWL+&F#JG+r*ZGekSW&v1hYZENjRed;{Vxeq_64)vK6{O_OyP zv?vBDxpPR|YPNIL994^!ZAe%%E9f3AlRG{)Osu|QZEtT4{>8u*QSprV9b~^_bGiQ% zBwDs+wJ#K#0%<&Y`*TK;{#!{SxNbH56`m=1(vqg=~(83slbFT6iulw_6a%Xo{SJ}Hs+8H$P(Y}E_xl-=GkUWH=G zkWWCwW=gT2{m@;O+4E`kI$JalK%R`Di>W*0Wjb4WgGcXD?@LVXV~Dx0UX z8H_A7iob8`BwWeM{|@yh9^mlRXq{H{Bh;LTBHhvrJj?IhQ&8s*r)z7p<5fH0`+->g zLrf6v@ah=P7{P$QQ82pGsNx4&Gpbi}c4>}pPL-(OmC=-$U5Q8J+b>@a*L=tj%PHc zxqgqm2%+l;4(%EX`P??XoSvwnRFKdu5sl81sISTGt*~W$eNMsCZ7Khc(_I~sQvu#% z3G&pQ-x17jk0->8d%8cJ-oGi=m$k1#zA^+uZT%fk0?^@td~;Xh%SB=JgK_p}Z{i$X zQ0~D;WS>WD4N6;kzZYHm;>X{zI$M{^M0<(V#|htQ*kr!L<8EO+a~rQvl$cDq9FTkK z6!dnOrLt-#x}0&xZ61cxX*Dv*P|A*@e0~a2TZ+aWMv!hm_DU5IK<%GvPhZUCXxdR` zVc1Eu-B@4Vi=Fw^S$M}*p=Eq&Y(a;uPi4NZE@QSkph`8-*G|^$4dsyk>s|O^N|Gh? z{^C&*p^;=t@WXT~MQ*ixMlqyRaH|yGq{P(mjoR{j4iUJqw2!tqJg?{R*&e8k1VYmm z@T(|!HsD|MC;80gt(mw0sH?Gg#=gThS#r3jB{c$`NVvF!1;F4S!;dPANDqfFl`6!NW(K;N< z4Nj`~>@;4P^3GeCvzAADDde7llKQ8C9gmf2i89-u_8yN$%~-G0L{{dExQ2x3##IRo^d?fH8Ey zB%iC!v*H6qEu?d(&@g>{u;$AN=Bhe}H(torG@!I7b-c}P1{SKrkl3U#-?6ZL=r)r5 z#pvF+JbkRNss7t@cRdo{`*^!KFx%Qz)lTX;$-Z&E{m$mf6E~@tbyD`UmN#7ctyRIE zbv1TpFg@RUP*cLpDp@Ju+k~206_~Y~d!ViH$#HYttZ=y1DwWZy_nc{5qO0w1i%3NU zx2QL*zu$d=yxUM`0;!+>OqH`6`3vayetyC7A@*D+);Ys)KbEAY??~8h0h0BCI&&+L zJ=@J0@zcbjX-UKBx|0uy1t*xgMw8`>mlif{TQ#3qWQ7PkSFYKep7B804AK{6#5ypx z^2>5VUwVa9DfA{s_b|afK6EgtnfZy_$o#@;znYhyy82wOzb(s896TexZg{Lp_zfEb zzE=&<%J+G)2wYh!9mb7~mQv;}8iR9BkM%3Y8m-Gc9UYb(=4>lJ9R);5b~ZE$=SyEm zaA$jp{JE#?C(9;#AHxGG$`z}mK9WQ3hjbd(qM-e|3oUt2GeVI>!Y0XNtNywQtSME&(Jt@r!E>sc1}GV-g;nJPFkrinO=+8e**m@x8?nvDY&m^)mLu zyDK+4e$W5Dck3z;9aJ2r?SEtzq{H3pg)ogBG*PuUzz1*F!16GjBuDR;1h|x>BD)W3 zfvZ2H!@K!At$-ydJhUr5CW=A1iNbbNfgujzP= zONq+3=@sD!!8&zbrHHF1rrB*-Tgm$jntMd`VrToh$G>V6K1GmrcxhgHv{QaM1wHs4 z%#j&pU|jNJPmhe4m^K5QT9@URqSFmNUdwsr{1aBWVKE>A%trvG#5%!lar6NLV}Fh~ zb{;O1B;(|TY4m{Kj|T{ReQE@CpoZrmA!!RRK&eR;;z=t)eHjz;*N5!%CX^a#Rr$l| z^P`o`n*we88bw&=NRxx;mfVpN>tMBuRJNv{+D&>{9H6x2- zvtRe}51$cz=ua`AHEKW7%%P5ueQD1$-^w~`^$)w+6%k#M(-sU*4HeYv!MbW{MC`S* z0&O!N+Mv+|{+=;f?%|aZu_yaVmE8}zfe7m?z}ffCvby?lb8J%pF0l`tmmH%w+=)`z z?)eZllT=z>e0WRbNBhDbE(&FO^VAQUi%Ju@4xB;M*z1Dzc2ufPv62b3Zs)X?GTpW+bF2grr~9iyAY(eWbBuX@spo}M0dz2;fv61IR%Nds1X8>ZTSjRI2d1g zqfr6mMN1miqM1qWw}ZuI-rLYIEEs)~@27?XR{7Ta_9s}akV=v5JaXTszc)*Xx}dsB zzHBhJ!d)T~QuNCEBH3)s4LX|na&$volzn$1p!?`S5wN27=J3hSk_$hXFN^*D%f%+c z9+s917!t_wNEis1C;=l7s`yl(yu9(w{Cwvt?HXHuQij(TdRiDM-~U3&cLSF6vno0l zKG$!;hIEfGHGcj>cUr0I@tg~QP-52U`};2S*Ms$0NkYwDe~83a&R3mWp|PC?(lbyg zDK=DLqG5AZ30g8FwreDwP|+VU<8BFaQRYzU=iMdTN>y>YvB?o(0=Bn_`?%(#S?e?o zo{;I2)DT$YBL-WKx$g|DBZO;{_v%}^_K~dg)wyTndfteV!!tD55)b5iUhW7qw^-QNWoIX_bsRvIjnWI% ze!wG~u#$R>aBh|-8$lgpM=#R0nI-n@-Q;opbNy(MneAZc+kq)H&AlFHn*Cy9)=6of z4FvfPFfx#Gdn^dXxW)=z|44nsXZ{p)yW|^mffp(Pj9B{#SwX~gczrpJ9F;VipPQQl zIv#PQy|dHY*^Ci3e?C?pwX{s%u}OBC^7y9y;R!;=tLdI8bf!0CDk}5B)-?hDUl)wmHv2$8gV*rdx_vyHhNPlgN>YCs0dZgEjO8o2*JWJp z?$MVjl3C8Cbj|eql@p-#(-Rs- z+=;)zA`{1&GB`10!Z~#RNmEqGPbnY|mR-6C7$E_m$+I-6D2-klLrptXujA_EA(gVV z(}PNs-eNo zP~ll338-7!O$Q$-Lw*7|T4ZmUtC#LkbR%Aq1Kvnob<$GiG-6j;-G}OXl>(FusG_;Q zML#kGhOrCdk}d{~zP(g%5wV^JP^d!(tmt1EwAtGv8t|wecrxt)>6v=lqU~UVOWeCZ zrcpdx)#94W5B6pUFXY9&F6{3O(S$J+z)OfLX>&01QL8>iOh!>pdB(x+q%N^$Pm{P- zd=vl2p7eO5hp9aDY5O&p61M9%=N9(za^8tuZ7lAGw^7S%VWg?C+{D zsUlo2Q`MNMGtzk7G+a;;81Fet$izbiHTzPf?>ul_O))NAvxt{s-b$m4Wd? zGD;7l%VI_5T{AkQ6j#0%((QHF@EW1xbz=@1QqdFFRa)l#o7K-h&xamXE#t2=c}0ta$&Hd+^mEtxu}BCpCeXMlgX zJecmV_0ryxuD<3Fo7tXmPac#inYrGX@rdMmlk^6HD}NjduWl zO`d{`NPltL1$II_6#ZVG2PkpJZvj~EDd;cl{NF>hl8E8j79pu>++p|~Fts2pqFj;n zf@Z&Ym9E`)y1QiTK8wE=*NqL_&A*>JK9kEzmQ?h7=rLt2yv6(KY>NJzxPnd_KDrRM zGl@LX%5@&AKBQ+Z`O%{?sVM$>!(-wc9TsXZt`t0zYK+ioi8z z{JlH>dX>Wq=nlU3e|fdElfe1%_z4)F4}J};2=SuEMOQyN+f3-eb?CnF*|r^|5f$c< z#2`Qqoo(iS^VK-w1$sg}P%i#Hg_VTxfMPlRLB;d5?g$e0$_Qi|mIfe^|Iji2_EvX* z;Pwu*(2#zk&37 zGyqpM_&NpRsnLf^N3qkXRgWFSCa!I@#3ez zfZ`o%-Ueum188l4>}F6ew?TH&y&sZ(^Xx$9YlF@6hQ; z+$A2ou9YGCa_g;q=OjS5#_jNKld4;{67;0K=``uT=Y<#gv4$7=Hu%=Yno(0KvYS1P z8XX($3D{ikD;7PoOsW9?Is>N~e4woI&z(J%T%crlu+aNwx6$$5ckmh)AKn8l8Pzr- zjF)CNUnzDV37oy~I~?I5&9S2`I1SPZtr5QFcHHSoP@F+bF5iA_ZpnIc@-1H1UP`wd zdtr#~rFIC4GV7~lEW4Rm&&a#Cf|uWX@+@=C?*vl)(NLZiZ|GgaAKpBZ#FWH*H2hnq zt*yig9YZ2l$sPiHE zyfm(M`LW5okVxDQ_mK8jp-->qk{T6Wid^_gVdj=xJh} zzL3BXX>uu5A%7e({7!)8^ObcT%BD#wR^JD%QqMFqi4D-$z%FUhduEQ^+fJ(muH-#$ z_XeE{T|}~bQYMuugdz50K=IQ)Pn|1YeiFXhu-ZlFg~a@yVtKY8Gki?A_QXAjjK{BV zN~XB0(dc80Nq}&Z<4)Q)ySn7d_;8OgEt-@!|Zl z{ZcE#?>QcL>I3lWLtMLI1W|EF(q2jR<)kFz1VXh0o2mJRi(kN~Hh>|TQeHET$2D2!d5F%Gg{3J{-_@m(VZokS0|kb4&Fz@Oq_ zKpua*Uz4?+_0@IuS3*O6>H+L*qZ6`%RxHJxpKpeRh0{G)US>Q)F+ih?-e$>NeCN$;MCrvdwNs`1INF9v_ih_GDIFSA12)2i+2d6l14V-hYV^Vyv%KmTAz%+K~plxt+a z-WbVr`S2ikJ|V92T@JIjRYBJk+GL;n0yV3cxD$z-YQLk~xE=_cx8-)~^ge9_+y2U= ze?L=Kw9Gyk@}e&&y5ahBZ4Qg;tUk1Rayy_eojkTh?oN{eJVpTlsU<7NR2=*33>?TC zsUIk=ac;_y<)TtwhtNJ(|K2s0!%WL9BfrUl4U@mz@S>=(XXlr)nt=HC!Yg+@6>dj3 zOSoW%V-v#8M?f&t{BJ?scb!e7FGPpet3JQTc4Kr@=DzA zK6!~WvpCHtMOKAJQuPeF+-i0mVvTaUtYeZDhOIV5wt8MCKUQD`1L z{2Kg82$JV`C^w&mfZ50x==vYodFV5^4SSt{M+qbd}_=M-cL>h<~T z-NO#ydwiX@i&vS2c&BR3T07uE1dY#pD~Sr$u#w!|jk;#~cqXq^ZDPR`zNwy_)U8+) zJvcK4SPvugT69{*iGfL@GiktCUKVd81I&#f8>{%>{Gug``AR4-=HnEEgf`(XMCnFQ z{WxS!oTV;X2Iklh@W3pnV(y8veA^#Q)Yja43&1#e*O8;P^#A%VbiVl?znH_0((KOR z#c*2m0Bfaf5GDWpO}DXU0Fmxix6~{d*10s%@z(X#C!%_iYSEoa@$JhFg8V8yMuovi z9I}#>uYQbz-Uwc~hBEPFeteGLscquG9=4K#dY!BFK>ikAVQiS+=hnWP7iHr+)!)-= zE1kbgY<_C1SO&vB9z!s)elXtn=&5OLJNsZ1A)<4qP0cL@4s-Z16)GeYD_E{&S#jXd z0gs|vOTz0m+2P~((2Rh0+@#;!vFGi~)3#q!|^z0s7JepA zKmXb3V2}t!KwD1mFUU1E#o>VS`(Co{c8N2CcGR8aR24cA<&{r9UszR8N+&%nb8@|v z9lphF=dj7no1>B+0C$Vx*|r>Z?PphQmR#1oIekpko;77xL zfCD*aAmN!*nLf1_frF-&4i@dxRX?xf6w*sV*ONWoi;x2d}?ToRGU z)t>xr4`w3UalyB2pU0<|&FZ;mBLv8ZqL)Ld2LyOgTX%d85qk*lhMMUt4~2R|nXew5 zmj#3gw#MKz-CoZDs`~D~%JKd@e)dQ2i{sC=leC|>tj|gYThG!~UH6h!slB{Tj-{u8 zoM>4$WqWlMbAt9a1UU7vYxyV(=H@*@BdYm>yWI1tYAR-S_J3|IMh_rhM?f2j%NHx z&;atF%MOZF6egaVgii_}(oXD!pm=T=aP&=zC-FNp8Hx)`Kxn+yDyAwU!lvA~{*XMhIp6-hYDey)O(7BzmF2Z`S_OZez3JFr8 z{Rvr7UcJ=3+%ML(lH~e7Qk6(jt&@&pBUG|xKSo5c|7-8|>^RMZ;x-G1n!3;&*2s@F zPw6jPZT>NaT>Fft9K3)Xc`D4;m^kze-!ny zhb4AW=^%3k7}8)rH(e_vG}NwUA#A=sd&jn@_G5JbdtvVm<*&%RwuQYP+Y;Y@R%x1k zSPD{TxXa`6{;_v{bxmDMLv3aCx8|`CuKVHKeJ}E9+vV>%#hzd%Z7Z8*?Q?HT>$dI7 zt@&m?G&gyjc=5s?%_3g=9A!G4lOydkB;mTx zAavB4ny^?0@SvpUKY7aMKRt!}4^KHkqv9}<0D(yn!~onToHWpkCjxH;&|?Jvp4{L= zSQ+%!MHG%GGz1_3k0MS+C*cb};0b8rVk%%YhLFJN4S*#;AEx6NNeOvpt>(Z=08(+` zb`|mf@e+`wQplE)0WjE9g=7K81$#!Hf?oO*0gSkXgirX*+Mk%Gkv{!8dnJH41K^cx z|K*hc;?n*i&NGe}fc={|OUZvrT*=?W0fGNNh%?;nN$bOr_LKm_x^6w5MojUoae4Rv#(!OK6DRY5=zVrK~cBOALtj08e2T;MCQcy|ug#4bhO4&A~`PwL7fe zrkPXb5f+kl;sQ2`xDamj`jXDibK)Pd^f{UC_rj`_$0q3Zs%!X*&4ur;E05nJG#=0G zBiCHtPhvHDS`LvZ^~nWg#ii|wj`fN0wHiaOyR%~|8gCm93kr=+*TrinUb!R{1yb|q z6`TUl43#tRq;T4k#uF>^Cu_#ry$UCTpA)4tG=T}ZkUgq*toib~s78o`QbAXrKf@uv z*p#Q8SXforJjS`_3n zU(9F+q*gKm+UtKR_ltXQ3zNh`-ri0T3E6AT%>T0gs3s^dCZ_M6y8Qf*S(?p()J= zga!i$4fITC{^hCvQ=%6D9bO^{2+hA$InnIsa&$g77Q z2>mzdfr*t

;ouIr=DPSF@lYuL9eKfK&l})xuDtKOa8q)f}qn27a<@idJryG{0Zi z$SfdTlV8y#p(>ZoJAotPBj&9%mTE4SRN2E}Pwd{9x~s)JHIFQaVe?|RbY)?G>zpKd zb&JIr$Lcv>oAgU+;O3q5k9W>TcGTSEyY}eb_hss;qPsY@Lz=&uC^Td-VcQ!GCdUq5 zKl+Ci7JAT%u#im&Rr29`(N!2VT?;;=_dUP0YI9^{p4-WU;6o)B<^~40NyPh-4-%hH z5C|GKU<9LhlKva1-!=+xd_|ATKF@ntOsOs@W2h?zs2=)4zBV~vkCYPI>^wuOhtkRc z56)Aoe3O)eYUvoh7c|g5)tKc{CQFq-cju}&*&J#OP)GFJEuM9;nU>D9qwSjB?2sz&)?3J<3C&+*MGP;>kv|X z>g*|K)V&Je12Z_$W()aGTreDZlS3RjdJo{$>8Hp8azGjD4cb9Bnx+BfKJyx!(b(rC zK;6x=cmbw#`1UlE80LvPq)CBqd?f7LI2BZ`_Eq6ED0%GF6?+T=941t&e zD~Lvpy>&TL>%hiUTEW+;n*ZZAZ9Hl4VJ6f}9Ub%IJC~@tcuIo(wO6~>8eGdOw2qW7 zPOp}fAcjU?1xw|`pWEh{HsO z^bu2JQ66$mc|N#^j*{bWREzI+czawjjON=`D@-ka6K2eZ==4-a0Y* zS*bxAcXKj(T3A^Vi#Ger7xPBhqQ-u~z6!kF?7g5v1)!cay*sHSG<76m^xjl69+46@ zo3zFuDJ^ZVj!IcbnWz8s!OSW|&-uaW=KwL127uz6+^#L3 zHdJ5qLc@}gJ)pVb!qk`BQ6*(bbE0>K5icZuXo)dwewA^2|N661O{}iVLS2^;0%e|R zxDZA^h@Z=n+qWtevrY`Y#+WB8QE|b9xInck3^=z7+b;WDEZ9WlQJ3Wb6N~ zi0FS*wqQKF&{Y?TV6Rw53bE#ON*xXuJw&#P?*i<%#RPDTI+hPZq;K$nEK;GUp z!t9P;FOMkpGzQCvMBDO_VTMPCJcGJ!t8fbu(A7=ZL+4!2aiCZ@q+=_CtC%&FpMTFT zNH5J!*w|d(x*Ho?8tWEc{9R3TAn2)p%JX-^)_0{}GxEJMulXW1rlX^8I4f;sX3IAD)-pd>JAR~2{X+8jk0Ml& z$Q?RH^<;d&nkQ}Z*B51Mq50n)y{>vfk(o^q-F9FC7=Zt*u28c7E^c1u_>bD^zl_TN zyHLRYAmjfJZqxA54NTB;lU>~MACO+!!R_hfb3m5tUtkXAe}OrsZ~}--OHqtP83Fqj zobtH@$aE2N#%XsK8)ZviXg$+SvKlFf_crCPYHKb!0gDeFHY6!ut8wbF{JWm_TV`;Q zrO)ZsU`qB6|FDN;aqPdRexX?9F%8QP#Y=75x?Wx!>WTV-{b5d41Ue!oEW+b&?@#xu zDDG7?3q+zWuSC6xlXbaO)3}yh^+-Stb1d{griIgmXfx7XQdC~GxX&#ex+1R6@Q!5c6TBv@_S0hRV3%F$Ygi(zRG4s}$UAG|vl3<}``&IrK~RID zxyJpyyw!?*V@|;gpz{=^`i{M4Hry zbdcUbKmqAZ=`{iZ(wlVYy-N)}(wl(v5(vE$Itd}g_4t0@yfd@DW9`|qf6Ur{Jfeg= ziMgKpzV5SK_Q_#SA4b*Bw*5-wxRov6Xhox3#bHNdpo3Fzwf8M zxoMOUjALB8kb|{OVC6~4we52fLAeT?3|~6z5^~|Qdbm`(I8-r|#^qNq?{s?~W-I@} zn|#%%iRuj>q!YOeX`h-qx6CAs9-DpX?pm9z5*Y`g& zxQ)5CW&mQsD=$2k{cYxptw3uydrm5)LSeWpJBw!Ta(-O@=Mc@Ck7hA1XVhppd)GzN z0-s6I-8?1QnyQ;23N__F{6>4H0r;O11&v7Cdu;VGk<)9m*mAU(mI<|M{n94-&C4CpmlDWydoceHz%! z?e=oC^jK5sbDYz>kx~R_L`XS{mqSZZVOkzzv%C9BVN?dY@YAt{x_bTHbzRS4Sup|C zV*$qndHk_*MeoufKh$3MC!BJoezt=qD_Cb$m`g8My6$psOQY*kyojRu{+Lp$@oZNA z1UBRDM}_J#v6|e+$sX>vzm(pxv}7UFxw@W}jS3j$=O;8wV$G2dEpduoVtT&x(`jAq zY+to;aC*X*=mOfp;V#)bYF-c@pS}SdFm5wd)X7?MojUg1>_1bJ8M>;uk{8xK`oYmK zv@&iZf?Dm~An0Qg+04NP0zNd<+GiVexFul7JvFI%jXu;emD;S{Lhx3zj4Mi4t@#G4oHr@0m*U24?uL(=HpD#HhshC$0c43 z5YQUGwZsBc7RH$bc(xt5>R{P_#Enf!~jVj8+4NH4}Pa{*bTpVE6(ZGo|B*g;l` zHU_O1&o90U_}p9C7jwr^WyK_)6d^UbIv0cbFi!~Ob)xNJJLYk8B{ja!Hts*H_6Xa& z)d%157yh<3gl&!2rR99UOv?16D{E_|J9J;dKIoA7vK#&@(Lpw#!iHb~46!E~8 zy$+BT;TU8h{{Bp{geWRr?Hqu%p9o|1mg=gGqT8f)W?JLHWneTrbhRB<0RHT@ORCzo zo`3`P`~elfl#D`fVwtPdz1?P$UOKWk%l`@m%DWAN494Hb{JD&5%X)HmOmr5Hd!p(n z`r1^wo&Wg2^)bLaD$C+ZyJkPiXXpJ6f8^h&Kz8w#@jRxPG4*N!VCofdDso0+r#?q_z6Mc=N$qPEA z30--#Rh0!jWxjaO3QFGU=YUra%uF6cH!qTF9<*mK)lP5=q~}=;XrGPBCq+|`*I}yc zb$RD4a>Dyf`_#j~Ps&Iv14@P%um1EsM6F1CuT&*9D2h`_kgtN|p;^5y!HsK`Sw1QU z#-UBvDu&~`ujt1q?=QY0_7$-)toUdi>mjFVqr*}oEUk7TzH`yygN{A(fR7hgGL|K{ zdQcj&KX_vcqrIi^bRiY+POZXI@0wrrFDJN++h3c@$ajPUSEbP1EPLBVhuMA~Xa1Y6vlL;I$$8A7!+WIV1uTVo ztxlMbrH*>Is_9tUVBv;Iv@1Ker1XRMklgSBYJHd{SyAGnPkwoA-)j~eHtO2z6&>;o zpErkG_r7O)6HVt@+IxwzZwYdSIc`AO!|GY$s^vAsr@jV)oD2oPVpSUn0T))Y4@nNT z_eFZJZit0%jh|un6!mn}ru@Q`U)tFUwNkiEQMe^I43A2wNceLNF}HFP>xeKheQv*x z8A8;($!L^ru|KAmWd;9fuQ{Auo4aG=6QUEaq>w9ULzg4CAplBsGNS?FmzGUaB?zX zaY{aNfg6}d+q>M?sdK+R-oeokT~KmWyz*_CtdgMAjpkhVtw%As`N+ z^$@=CHKv2{OsWeyU$*eDPy#b|m>3G7tg5l^;Sm|P-YxdYYtUWc`f*>EnXjkk`>;%5 z1QY}!1g*bu?eS@`yOW;zZlWHm-lsl+mY5eB8rKepVF^i4mn{YT>CVzhZ6Gkp zXP7(aYm-OPBO%PwDe{}1u^hu)t1h~>S~Our$#0G~{arUJU7M+V$J!T6l%O}=^KJ0) zPk=wBq|Uc}k<8nC|BVP2I{>k|ZLw5z|JY{@^`;{pL;L!e!o>-qh>AoR7zS#{Ahu!GPBPwdJt zZLO>h1UCI_^1{$Ydl^ku%{T~^tUKI2sN84RUBVcZdDq&$M*-asJXQZ0%+7v=9lr`? zHWn%bmBimRRMNk{SzMgDR#X@$oTR5PmSQFeYglqbO~V{v_7f(9^Ey;vpk;!)@d~%E zuzb3;CoUYdX1{@}L3zQQG?(lluTB&;!?VH96|;2+YRM?1SV$mmFQ~D>k}uBO2fH0j z6EWsa*nmTt-vl@0S;MO;26fydd!Lvk3LP*KlF#ls?CGemJ+6eZ($!5s3U9kcQl&rl zqMEG;wW$f&ORE@1dD@Z{7P4K?b2O6Tp2pG~-L4yzzzqS$e^Wlk2XiN~>n&D16*~Ha zRQw+ft(YQT;>jaJ=nMurmsH`M?}VZ$Lmd#}Jj}L;pKop{N5U|6)Y1VRYbAN@U)Vmp zC}wA4XEI9X`cT}25;K}JG?b=qtFQa;BQU%)n{Ws;eGR^1umKP=VAygxg{PWb;}0Y7BP+FimcIbBpiZA@3_y@MvIyZ~}u7tm&E zfqVQge{C<5{{!1g6W1yC+0f&Kd-nT0q}OQ~q!`Z3rIoUA8Qwv^+_X(W;IH6bXu6M) ztK~6F>&g`P@fL9J04KVs``tZp!y0|ezsxbTx1x^CP(oBsIB(k8(R|ym`4z^T`q`f~ z+zT~tV!R0yTncv*%zr{Vm5`$|{}%bX$s?A)Wi7>?yK{(`_BV?N1R}9+rUrhJ?XBp* zG^s;M88BZ#TScK_LQ6+MF)8U!A2oxiEkWVW_UKXJ3Afrre-pS`3AyidEA23wpTc~Y zH6jKkxW2VOCci9>#r`TE<)=w`oq z$0Q^89ORJJepRJ?mpn;`F^pcCz36^W>_-w3Zxyp+FD|$meQ=k$?)#5u@7>oXoK!J| zX|+u81A3G4iqcZjimLHyItB(h9DU^0zY4*zSPm70PKbYx8t3Oi~x$1$MiyNWN`lCwXlQlQ7_LCJXMsX zqQ}>#a6m`P!S7q*hqBJanAFax@#zElv8%#WwDKAZl2HQ&`K2EU#cPmQY9n(Bx({qJGs<5I3JgjjW8$o7=jdu9#+UfF-M$-4;>C$OC2Ep-bk$d6e=` z1hfD2)1Y-WqSzgW|9F$U1DYIHqyP7P&;O;{=1H0kQ<4FBOpe}L`O9wF7;m=E>ez|Z$qQO43TB^7PvH=24fS7uWG-@ndeeZreEZnsiR#~{H)e5;roBmqhSKlGtm+44Jtf7;(RbLgmj zehW!m%2pV2zolVB?nwpAr{%_kH;gK?XE44r-sF~B$CHAvA3sd2H-_-q@WTd)Zc+xy zF4Ev%8^->x#T!dXzXi=`im-kYs`S~&s7*z>gOr_9rByT(3gczYunIq7Ua@uE6>W&Q zPz9?!#iQrG?x*cODeiMM)pT4OQ&eA$Q*8Ed{c1#XbHMTXQs*g}{a#j1M6`c=^5Sm- zYuZq;`qSbuEBgaZtfS0Zf9rAoksQW06Ri8FL+-qJeEfmln4En1hiNNpJhi@L!irk1OFdsv;POmO%};$Y)`T z4@#+jJuMbiG#le`7_RTG)MP;!~nkNoCp->9tC-*G_$YYhQC=Ek*BT zHR1=rxhlsx*XnN-R1WCq>8aE}!U|)@Qwz+cOe$U*^>K4V@}m;wx0{r#QHXF3d;Vv- zkz~G9;%thn(%4bEwJWhCK{$BZZqrtZ<%XnfBk8d2wuy*>&Si?!u+B>jE*+X;vGEwC zvdoCl6DIU=8^A50PFXH6P(IoqW~r&gvUVY;0%{W$uHu<7G;JK3mEECc8k_TxIjzOa zl9Ro*?s<;lxb1^`VDi&G7UF3Qx8IY0&s!cRs%b7NYe~0PtMLI>AgOHk2TRsUi|37F z>V)tpnAVgu`L8Y=9#R+c6VJYq5Smm97Ct+jxKEGxpn%f~(6e+F;mLe&T|{83?NBpP zSn-RQiHw4IQ|8NKdr4_*~d>*?w+ zq7_QgG=38hT8R>2u75|JoF9OML82}|*p6Lw;abzeNWJ*|4fBViM}66(FWDz2YzQFr zkWc>Cubo~4jTRHO^^L3bNpSp?VKoGz0hGUQ?)dN~fwh2;<}83*kLhTyMz2jI;y`mR zE=4&OqOVq#mHW_F9|KwCL)Fza!#up8h>~%R5Xqtg<{)ml=(5;8fiHe`5Bic!U7>y= z3A1?nh%~hK3jARuxrw3OW3eH50&&$Bb~MUTOxJtZbT5=|JYI6|_h7qA78y4Wx9tjG z++Z-TkOajYj{2|RH38C3nDGRQwu`NT`7zICgoy6Bt+=?=ss--)!z(jsxD)4nEK3W< z;}hN=x@ak@eJ%MSuc8-OUN9h1|B!S#3rhOEH4jyVr0D`+T$_sVtqkNMbay%a$yFl^ zAz`I`p-!3La6SEsw4P#K(wwa252_@Ha#W0~Ln$`S{xCnN1F&!PueQtF^KFKbZkb|# zV5IBB`pTdQydCr%hLY0QnBliaj7*pZHoDC-XEVB1uG5wc3)D(apTBcS#5@=M*6yGE zd?*SQ_hnL`ekFSgwNN(8pugcSyIIv93KhDAhrFr3UW1@?DIRNFl7-Rr(2~hOo^T^H zgDqR1gxzwK3CuKKe44gNVm!Zqd((aEp+BLxtZx*D(U|{&z10|wcZQMfGPLrNzZkhe zIkD$oIp0{3Q+MS0tcOf%t=e_(ep$I^Uc{jbwPC^&V>hE9!T0tjqnrZq{0vqAD=z5* zq$w@^^gSQOq%WQ_?07usKH+;>#Z;MiJ%cZI>~W-fA5FRK)nbD8c6P)CtwPh=l;r9J z7|@_+!luiw%u%O{fPF%)jcSv`E0cN&Cu$f{Ym4-)%yJ?P6SaX$e>f|M0P5*{V3t1!FloM(oywf6$jH+^>0VuR81+#bpJ86Ju(Q>!cE$ znB=o4S3o6GPNUEqwF5oVY~<)6W9w^$2HL1EEJrw6u_C9$m$7;qO6TO5h()hDIZ19} z?3_GW6h^vscB1a&P4m>OOasDEYN`IGKoJm?|A_;e{)5)N zz!%|AYl}Ys$#|1_ffNg#fF%ogEz9w(MP~i3SoW|pgTLtpBqaU|&h|Ua+mf!t@0{&}G@H0~yV;g8Cb>y0so_*uz{w-7 z6|ct{{da9CD}8AvbB&D*t{7D=sE=S<=t9+FuD(xs4WF%BWpR z@8>cKszX(pyJ?U0UaoYH>5Q&+E|+8unj~EL9GDqS_VOtkB^GmehOB}u=FP`AdI`fa zA`==Al=qFw=%}e>-=~HqsIYq(U8;Hq@K&z3w=}jSuf>I7B#748M6cmuE|q7-C(u@| zyJzx0vLcLqp4{u(pel9t8T~MkGA*L#;;xwfonKiaKf`50`#|lAnd0O77X&0xKJV*@ z6XKda_7{`+J#R4=Ex^eWO@_ELCclYBf&Ik?4y;5R169;v6tfb zz6?3spLpZHGm-x5oY<;lz@laL&SiN@|%%Zfqj$iqqsG*X=>`5LRWLK1sd8*R{$nu9L^q!SH!!0!b!*nS={lI1qxmd+{iHF!we+d%+m?RPGgN zaS1W9Sklxo&oms#pbdUb~PBMIITzeuhKFRjm zYhqcY<%k)bAD(2Nd&hUtDvg?8(CfQU8S5Yt2&;!%wPOcF`GTYeWDk2Ly9V%)A za*BFazI%HAac^M#C^+Qr-s=uZCsti=A^WMgy%8K$39})L`g)6^&Xz*wzSDji|9+cVl_`?RLEjGtS{FNhz27R^LQr7;Zv{>0Lii_F%{ZG$$<#eGA)?X1t0dsdw z8%1BRX*FEdR<-bRQ;2fe@T_FG(DL)W8Oy%SI-pW3D^p=NAW(^LH&Z3T95={9T+U89 z17xS_KE^&1)LCUkNIqnpjQ73V30?H@Y+RHmtAjMgR#&%XE-K=$6zVXVg}KJOBzQ=# z4-8c)Lwmiodsc5MCKhBr{(g^TaK>RNy3u#zx}bK-Oy{Iu!zSWOiS1gv@IcqssOl7( zvmg}~3YVg)j>ig;9X{ae{R=xSMj@i%^JTXK^2Z7(dfcvl@vKY|DA9ae)yCTF!Ka!D z+t+iTu_pE1?xrN+q_1W^O2jG3uM*RIvn8@u^D><#`~MP+UCN7Phm}1uCq33`vDsnI zUZ`1bAu8ji80(_ZdH<~LtkHN5l2i7LEy?<*-eujFDQujRj?}XNl>p=JJ9lxHDHwY$ z(>JAXHwafk0)}PQnjiSIy-97FNH%%paJ9=huu<>HT5P~Z`OQ6itZN;x@b%cU=InwJ zjWf6@0~#@Rs@$KDNUdHeePE5)-vmEVzTR!i*YWhY(1x5|G{dQX#bdFuzi{zqOF#sf z8r{1RJmyvRwciB7QW8{tM-Zl`r~xC5RFx z$*#C2J(B-kCFYQX>Em#M`E|dGS2iF(3nk>VPx*04zGVnr2xxnAf)YE)Jho#pcn1xs z6DZ~X9H%xgma1?{RFx`kJglQKOG}d*st50Ab^kf({V)$VX;8{qG(NPeds19Y7j&5B z=gwR8d+P5a)L);fh!@LkbpT$0w2OFlU`&y&?vMrj*j&>y&PA}phqI?}Ww07xLZ;@u zP^vrbz{Y!L>W?Hx)cDKXBE`ZT2ZyhN&H+R0=PVz;9kd7RH-WoH`+b5xo$$Z5<`+X#Xw4TZ4y7cC zx*nZN_nk7{LkCPfiRg)}%1VY!2Y`->dBezG?0ADWg&xPJQ#BS007fF7`Mh|2o81!~ zZE|Y5Lt{(bOvOC!<^{hSVyw}n*75QfPwPVqWz69W>a9-)_wpv7;&v>wX;`5;G`c~M zC~K)uxmJ9gqec9|BeNR~X^ED*+*>NGyb}kv+0K70t!Uxzfq5#3@MIfDdhQPR=KZQo zR zS6NYIxMd+9xWz_R3l+qY)=_Cgy=F<)G53XkTWq;dX@u;Ux#RknT%2H63C#*QXx*<= zHpF4`pm5WdT}0F_2?&q^$1=1z)Qw-kLotU{?ZN{hw(-`Yu`hw)JWivMW8$+Nx&JVH zDsBOjdayonr(V~j{><(Ht!JT@RuG&b_9XO9#rsPM_#Zw;^gciX`{$7}fO5}_PN07x zg?Kk+ux$KmV3}pk8R1Vm62r%^0{#ygl<}G3`(i5?;hINL->!R?yNZ4LH95cXG{@rU zt{;xYuUEwEaL(yq1|`2BF}TptH5f|04|+X#D|8=L7s1(K+at z1>=0%t+IY@;XZa6ZviY4?eg}ERVVVbcmZH6k7-0(46B@C%r;lVer5OJLC4%zeYBXn zee!#C`5U`k@0`=44UX-tt%P+H*v$@p| z+uICxdjjZh3e`5$e|M;n^|VX6UaNO0TRMdnmP4cgjy<2?m$ARPTe#08oc;(KP#SYTt}hT=;p(1cW7Je!CICq;+|lIAtp7zn zyOpz8_VH_M53!)d+|Tv;`&#D5`t$mkN^?d`e`dguJIf&i_B&sgX)2Qz$BLLI zIy&-XXP%7B%}vVQ(*&Z#ugC@HvczA_rb+!me!~9r8zGav?`Zbk77BX?_)~fp&<3py zs6J4P(J+Ll4_;sLW*V`O5+^A6piWFeMMYf4^?^dQTGBi%FFjvipqx%%X@ht8X9H$K z8$aA*um=#;>5B9*U%-u`c+Du`8;)fwMjcORjP8(3b1ZAiyST1t58m=zRme3ff3KR$ zV7H{@oXAzaQITd5*O89RktfyC+L>-+xHSGHmh+(4UB&7>vQPhfL^8>67lD~I;&IzD=!IYm9%+CHUQ1FW8!|Y%k za)PU$B+jp1mkD8!84r*p%09z3ukQ7qiVZVAV2`;hwrNXCM5+pgJ& zrnrUv0@zU zn-00)PpOKbyY5?njlyXOI%5YrytDxTWgnb{AArvll3|eYe_3F$VpJ!9*P^v&m-jNG z2{8E40tUZ-du70}VZi7P-uc{)12RgcM*!H8qz4ND9rAfc;ENo=_+A_a=ib&1ykHWL z7Y?14U(C9E#|yOppVyXrWgON*e^@VF&M8F4Ox)7YdMtata>$a;wRthHDhEHApb)zOGrLWj^zY`$YHE@?TY7%AS+ z+|U*%maaH%$O zTfH&8j`yVpjU3@>d!ydvpz+>#N?dRPGP)-3AfZ9oWfU{L{Qzq+WB;CYY%}Ru-l?_= z+Eoo4Z#8NfkaZW&>S$cMn6Y-_CnCBNQI0%Zc=6zS3#$Y-$sO8|PQ-A@-BcAw-SgR0 zUHxL)-9$1EYy5V|ZIw@AbJ1Sw{%RX7DZ&{m?-p5`tlq8s?B=G>)?t2+1)|e=LbE)b z93+Oe@NTs&8wbY1v`;F+Wn$KhvUx@hnI>3DyTU6JM!vZ@Lyu5PgTU;XlaGuz3gclQ zVjZ*4g>Ul*gb5RJ(9@7LeBC)vl|S|lsc_GU@PiJZJupx#q3w9U8HP%T1q_*`2Tu4I zIPQP@rNaw)KOpO@@FgG#S1H0d0oD31R|3kl3TAp~J%e{%0Q{Fg(Os)*H9hNR#IOU+ zZUxQ<&wxP#ZZf#4G+_5GYl32c;ntVu7(fuD0#NHCw0}8Qm)%ei_*CFcz8A_mhXTIO zB`_yY-UI#MGaV1%-voAt{=n>Ir~`N>&lKJDm+2CPC22nfPdoze7f#?IfLqt+a03j4 zpooy-!z+Qt2jK^JMQ0FII04{NFC72jbq$z-^Ilqz;=ay4PN5zfJ? zI!hx?)ChczP-QU9m-rgqfBwETZn15ZAZynPxMl=!&9TjG(*!UhULDblm^;QaG=ObZ zJ_tS=+sty9QN$`=)6<|%O3MNT^mj;Sb}H)7Bz>Nokf|kfAgrqofsqQ>(~VwFrls7W z_^Dt$ud0o<)sizr+g@!d(&*#==ppdYVL|uJSdH3y=jE@*M0Nq)c_nOTr{leuuG?-% z@GqyoRYb)xTj?-uoE{b;mK$@B7sL${QAwCg(KkM8)7pYi@PH-g-hR9kDx9q{15HC2 zRvCRgWMO{oi_D<;g*xY|`JZ?EXQx$*)LKG%F*7Y<*V8x&!&Lw~F!xOj&~AR%)LVeu zF!#2Q3Pekt&@_LstLnGMr@|;kX?FFC-zEDv>D#O`>!zAi5w6#VW|Mf;S)uIbT0}zq zT}3PcNRzE+9s$^=449{oxBUhXaJQDwWGM2ijC(Qy8Ar~7A1IlgZa*n2XaD<59Z;R&UKf{QcJ4O(_8q7iB(MsxYr9iRv%MPf7_6e3UJLYO6etDQrMbf zK>ti<AKPGMCC993!UU5l+GY~ zM0jq4A0w+D_tedFE#4*_0VZ#PAJqrCSWcbAxYBit>*sSlP|dqkJ-eof)vowp$Pgu8 zt@pD14>DD$a80!Ad2Z5`jHpCPe4+V;4qes}N>@}TEirpfJ|KJ4RCEz`2mH)*^EfFx z-*k0W@)YfWrD2uhiFfm7bdwJkv{huRRh&+D9daa4*na_x4RtLWW!M>_$rHQ980`s z{6|G}LS_@jrQ$D8#Pe!5;q`cX0xCef^HO3vmQb6EJbK(UD`F$v`JTLa91^$dld82B=lAW^8S3kEelBe5^b+1=7jHqJv|`?VE1%!`U_p2ntgB;{c2Mg2J$oslSSh5?^?Ns8Y+vcCY&-n-8E}TcQU_ufDgrGVGM-^an@^+4& zofElwxc9Gq`So!MjJ)v+Of7=67NcChFd~pXx79RyRO@N0HXnC`@v$h}E~j~qYLcFI z!cQkS=d~q&s!!Xj!{P!9i z;pFPudr@>e!A^3g75~c3RNwK|vQlvad}EQktZA4lJEb%qroUpE8JP4|f^0xF4!WH6 z&S5XZ%P2Wt-y`Y(PTgM_;x;4wW4p4Hky#0VqoDobd&AE4qdXt$5gWw<$7K8~Ge2+9 zd{H>P5=Kw(ukH98=_SI)4`^1n7$G^lm;D&vPJR6ZwIB1x5q;s#y8*P}UJ;X~=VAb5 zi4+EUj(@A@HhhibKG1^Q^20C#olu4ko<0KY2!@-TQ{hyBE*=+!LH+w{SAol8T0m!= zrvCl^43!Br!y9FxQrcvH6KFv|fX~Mthi@@i0#7ItA%NT(=7Uk(v*e{3(B<3&ejaa? zH8kpB$#(RDH^rElM~_Ls^-SFY^+a_cFxN|;#W^x*z0ZvA!vJ$Ex%jSrI*DI#JxV^d zaw{6nU|CPbmp!=~2(`N>s_e0U*sS|Z(Z!`d{Yx;RqJH;hQe(ba;eC1rPu9TFn;a!p zfu%{MH%o17ODv;vlSl_GxxgayUQr-;zMNBNii4NuwvDuLUc2qi_%cDA!%ZW&EIb#)|_ofUhK ziJ`4;k1CZlB8=LLYedVnqY`t*2sh$Xn6*CZwoOZk(b~VjaTcid75>$Uto@_pA;*<246v+a{i0igQqV63Qs!1`!Y z7!cl6 zhek_3>uT30&z}cOKZg(HH=axAlQdEyO}Vfx{|N5;Bj5SAN51O-hKoi3@1eP+075zh zidmv@M>xpO?jO{cj4ygNMu*d6oX=4=FV+UfyP7`!_gwGy)5YRIMeN$PyjoiYPcFmO$5SuyfRs0Zrk9ln*?yhQF=%MQ5v|i zvk2(WWKBM;RmEHXkL+aI5xnH*m*=JIHB+Vqpwlz)evqWK>B~;Vr#~po&(wLqFcQC6 zf*a1FGdjVi{OB$)Z%m+p=wcas@jN`eW?Kg2GU?|KQ?_r?muoXQKE$!aX+|N5e7yL~r@k*~| z)Rd&}#xO>&%hOUD-2VzUkv4yrw)`xx=`Cl`=F5>R%_~I@BG1?yqUP#RBe0G`m4v2e zVnbF3zOK}M_07milOSh@Ry;?E^u)UJT=|=G{rxDqK}VKwIn%yLqgiG5If%dMq6cw= z|9qvrM~mWX7ubT_(dgZX5Jj_C&OFj*gQQB#@gf>e!n))-vxC&-igTTR%N|}_DO#0aAh=I zZs6ftDJe_-=;(LYdYa2eo}S=`q&ylrtT=5fo&9t|7qYfM!BcS1N&g5#kI5RP4Q*T) zXmlYSuj%A|TVEeuf6veWCU0YN8%=yq`k_E?=|ujt?tsm0nN~MS^WC4ye7OAcliVD1 z#8MV%ySc}x9aipuesrq-90#2{nwUlmrWr;)-JGvE{JEhj%-AFZm&;;mfCxR=Nu_5h zFUS;N)wHn_hJN>nv5;s&{TY$0bS7P+qq3&FgLrQzn0+d zI+?XVypW@ym&H`~ams)=FC}AQ(tqCf)eSoCn4)z51F(x8v!e-cB(E`9gCm6H@(mr$ z_qf}oc+9n%zL3!GeYDw%jM$gxY*Ko~M9@UmZhk4wv<;J^W7fn*c1il;w;VbdI!Fk$rvbghvGRsTGHtVf2wykZ*#a&HBLA zrah21rsS0nu5nw-Zlr}`_}b4g;j`%=WO&ThRMGj;5CFB{+9Hd7x;Z)S zM|bs1F~#2=PG~|N?1W9Jq{Iad=_yldb&!-wEuS9VYf7I3xq5Nz8ZkJow{_XKNWX%p zt2oMIH=?d!!?yACgM%B=QXiwCV*r5j>5O%F&gdOs*ZY)vH^)LN&aa(4YJLoS6;=Bz z-^yC}R&Ihs^l+J3{C%gm3|4-6Xi)E%iY_0TJcPx=yF8_++dNl>Fik0KKGon|7`bs6 z9cfjqGK1m*Pf;{u$(R0ag6e5`7BolkfTXcTrG}J%0D9RC&~b8uzf}P}V$4?^SL3jy z79)w2=nrc6NN?5Bm>Q4B_&8?5!OA<2FUqwQ?(K@uTne_8M=CI!^3P>8&a}60ypaZR zVLo^{nCU`In>qDiS7P?D+Q%$U8;_V`k4vt$5kVNgml z#k=83PF<*CcPf*AY&Dy%b%*3{9cT4Ou6-BN3?Ywx+ZJH(#cAcDqE?B?hJ+?su_WcP zw3OpjZr}W9bMgZ7S6oOdWGmXUJqU~ve98ABI?2b!vT)dfW<4cH+Kr>ExLcz~mz(bJ z+nD5b;r!fdT@$mmQG8m;wM*tx)>1aqujs&xKeWH@1~yvM-2{%epl1^JI-nJ@>xmNn zxFu6k!5sSjU7O^!ecGr0vA{atNeJZ7w7G0KaU1PSn{jBc?UK z4u1!w~4II)gADlWRaJmS7w6 zt>U8JUFUEdGbR9N#r!C;* zTAt(+3kX!xJ%j>6(z6&b-^K))5eYH8{OPpxl+gvLO9hgqo%*Fju07s4lKp~^TEC0l zLQk3$ca0jDX?=L}Irvh{{{?W=gQI{TB0m`DDC2O8YMlPvEd9WoDKUSeWXtYiPUcd} zo{0RsA8lh%2ZFvLI*YHd*6LORMwmOJU+dkmK>XxLUEPGz_wIf2;6G9B1*SkZ_UF4W z0`B`xCrMo=IwDUer!OMmyXDEo?mUyDNTFXjnYu|5MNp(Kd7_BThmKJ+56BG>^P((D zHK{bW&^bHx;kwh}UYaAqW2jO$ajpULWnPA{}?^wyRX|zW8#UCKsoz zV&+>dhOc9l+d78%`rO-?wi~LkMxxYu$a!nR^1~WG~l{1|EM`VTg@P~ zVIPA^`8BgOBZc3_+^Sh0#-y+qO5l*el9pm_Tc(*}PR$p5p7(a`F| z9PmU=LVxP5m5cfCe$DGdS{|5GEIst5Hbn?Cz~BOI`rA9tzka$X&FR_9OkleIu9SL* zVlskuz+~)TCS4C?4^rm5-k99sMs&C4D#5!=iZf_`g?C6BZr0vo)V8U7@nzHp8=}cq zeUTq!q;=HdEG9!I`RM%~jNVw9SbBB2VQPYEZ~N7^h9-M&f5j;bjC&TorOAjIO3xRk9H}A7xPkTSUyaRdrH4) z&U3*AAUjN6pg4Uv>*$MMxGeq0MxQ1i*ab6n{D%uSjOb^{BiPYW+w7(^tuB1DtKJk{ zlh+LZsMc$e7m3ihBcI6`2pWfFU&qwP@H)-9`@66qt?Br(+ZJn`#A zRCRrI6#IR7gRBwV4x+?1@?-v=@Gr1gmvoW%p|1_}U4HE)A>_pxN`RYETW>feV4G9x zliI6jkLq(cQg->Z8%w?&Wt1U|p8lShi0_jT?Um1bmm&27ZXiyv%8 zTP=FEcr?O=ZR%go>M-85(R}sj|&s)ftW2y&;Zv390 zGP75$*1m?UI{B3`Og^B@S_F$J4H|kkw%>4?T`LKD8pVMj$}hH!W1&g3W!n=Iz1?i^ zQnGkm#!dVlDMMG=oo0Y%{WHBs3=CNK_I?xO@#catHo!oU-RAbU*oSDHBU&&5UV(O* ztj7t@(pFh(9WIc}4LVAsH>!&nYMt+ib4} z$gz0;spg$N1s#23Xp6A-C5O|l?0j}>%UTkvtUCC@wEKZp*bx!c6!V3QaHU}2TY%9U zJK#+Tqtcc0;Y~xR>P4E<1s~!@3A0+5Ir*85iM%>0ct-0&wMgPiA%W?WX)6guUhO2_TfgY5) zNeEnk`{p!~V#{yV-nR(qn8>u+5L*F?Q5-{4WrLigNu3%%d%WT&nccGHjnK~^?1U~U zEB2jHg$QUxjNX6*gT_3)tPFlyxRaT^ls=NWPB_O83%zhMSu?nG{T5}6`?SKtSoxS} zzg5QXw5BtsOViJ29JnnKK;0W=xMmBdxoK~|U5`STL^TdZtAV<+Z3fzTppC$;YM{^C zs#-Vq(*6E?oIh*HbOCr|4c&V1Q*sv$y4}uuAi7vfxn#RoyR?`WnxbX-(ZJyC(S~TB z<$LxG{?%js{!MFsoodhBqB2rNTKT)0X_2o{S&I?X9v*{qP0nNaZpx`Z@y0(!hFC_!owSwE>~?nM|}9u zkBZ2!9FgAuw3v{)67GcpUOxtyd*P8 zf2|~nhRQQTnyj3t86`ypD5z^>xxOpY{Tg85mwv2FUM(iirPLC24WOD%_|Y!y?Rvh?pCO}lp&yW>a!w-NRje%^69jd&+z=g~ z`>fm@wRnw~1+Li$| zee<|{5bu7%{$7dCHw0qzJNFvBu)EP!S)>|^GT8*)NkR0Isvi%Gul!->?sMg>AfFI{ zGXb>yQ|^wAsg!DyH6a1LGs+S2*T(Xa5#Wt;4@N`9d>pa9+g#sXWL_YMTO`*;53M69 zs7cCZDJXOvw)&}3rm5K6I9aTBb$c9NDDzHAQT6mOx*$;D7~tKZ|BJ8pjA}CKwuP~x zAYcJOT9hIklwKrJQL6MNHAAjZ_5b0f7LP+s@^qlvM zd&YOi{lgC+jLm-bUe8){%{8Z|D9V&Y4sMkwy6+a2_3Q?tw;)aIk_U?bDBjz`?asD74e)VOg(kkDck-TIXQ~us*8SH%oS#M?zN!~)wymeYGpnOD>tDK zKHm`H_=jpk$_FU4t+Hk}{`#KLT_cS{_ADS$@T2rb0ClhYOY~$0j=0Og!Y?u>h!>a` z>LUX8GI+vdPKukTH|S66u&K9 zwr0I1bZJsfhQr;QpOH>jy)=I8B<->>IDm0EK41nh|LpjK;{U|1H;_RMzhw$=>7}B# zrW78m5mVEWVw+iF45hPh!sSOu9RVqpZ$QUeRWrua(!A|eVo|(+R zuxPIKZIPV1IwMipL}91c*?M($rn_NKRk{mjvmg~s>$5xD7~B>c3%v3@FVLk>FFqUF zO(nmYyi%{(2s4Y>a#PD%_2+dx8kO@TVe(XJ$z1>L;3PolpO|xBJat_Pf2?Pyo`hM0 z!?Z>R^q)9KhfVvvtaIx#v_r*|h%`A`O5J`R@HnJokA-re&=p?itZ-c|&EDa8&{qpC zG?#7HKtR8C1AVLgf`4^d?xRHvMgllJ=KVWYlKGKoluKTk>H&8VT)kqe6*f_RdVrsJ~M4Fo%7Qz#ekat@8t>s zsCE6U|2EFmFaRd>&n0cl(rFRW-;9hbAB2fTfbRa5eP$s!fqKyClntPVkzKlDoufm> z7N5#zKk4b|Kf0dm&*N>VqjvjmIIaS|{M!aFyib356?am3$d-$&n;u%(Z*+BYB^Oi$ z%3OG)^Xsk8F=cIVWU;!wzg)IHue+8*%F}R!aIsM6PK@dd3v_w`(_-DUQtmQSAyUpE zvRoe{m&pDA_Wk*_)=%_#6&I;ua-cp0CD%3Z<5iJ{gISrz$Vn3y?0f5UNzspG9gfP$ z1x!3wRiNrUIrlY$pM+Y+E%5|jRu*!nT%13ZjV`-Jeh5L9tGv}=curpUFPl&uF%th! zz4bKWq7PtYKQ`ExxF?Fm)T3Ui&( zS8K)FgnFnF2k>5X(blE#Jqw#XtjbG;lzL=fpV<1&q-g6Os&+muUeb{ty1E#UH-Gnq zNA$eJV4{i%^5szxDSKp)=5M)1Iva!v1D5ZF6ZLY^V(E5nTX(d`w~ zlfcY0u)f%#MC zS(|9xQvXABqBnE^1Ef;0m{>wA$-l?8@y4AxY4G$9pFIsd>t%%ZOI&Bi*P;T;_N_F2 zSKZHFJC=?#k~&VJZD`^rAMq5Gvov~=r{E# zNATq(4Bg39jU|5nL)BQ3*8Bx4moi^rUpI-y#cyt^?K->svIh74@~!+mWgLN|7W-;I zHG4O86-xJ{11{+4EVDt6tp{fu>zun0A#ivAPx&QxtIJHQ5s~)+k`V>Mbs>U+h0V6t z;ge?<-T*ZEV9McMtA7?pU~xC)C3K)<5q&U&DG~GEOY>|-B<7?SIC>dd(ulOrnr!J4 zii!_pr%Lm?wwuv9{aiL}NigM1JuL<=OqrN00<*CIktXQp*;wA^~% z#y*n7E~Gf#4#{zj7hmeQUoa)4LAC zu_)0v3bfg}84UJn*YAqKjylt|7NPj^O2w5FeP5B@6?N^TE2cyh=VgpUb@ar>`%Mhj zdbh=l5dJeVrMgLnNM>=&%?T^Xot0j+Qg9}N-!JCJwm{T zsU7!Q0#=}coyuCxtEV9dMpiIlY)*!EC-GU3(=^a!&fS8pI*q(GdH~EDek@SgKK*PX zg+T~tibz>lwaBn(>RUOz~%FMdfTx4v@11m_v_kPjz)6azYW^C zYV<~pz6*U9zb81-4ZE~7t)C*}FD){QaeMroaik`mKK-pB_npMoy~>YtwGip|g>4x) zYm=}GHz+XDtp$a?S~&13x7l$)S9ucTqkE>|X98h}lkjJ2NKXUxnYmcQflsBm1zVfj z#mYI`4kh=`*s@xe{=Vdpv$@;12l@u}6NOF_AbZihQz;7M8jTAnGCKn~p6@ZLzxCZN zl}<(6;ybn!N-sBMxM5y;F~JopU1VFoLKuIdX^N$s4eEu{1)l-A@XN1F0yNwL%aR$h z{RSu@l%%~I`+-~(COK}_&`5V{o313XEL}2LI7xZ7A|HdK29Pa`-QcC zVrjIc17%~-Z2{%wIfbpBSB%P(dmOCk9FNGUk9&0cOD3z6N*W8J`<>dHb4Auty6bYA z^8zg?ewY8&SA=o#ttmx4d$Z+?G%kG*IfC$g06Tegr)2zO5J3&HH#AW_IPb&V?{ z_tm)Lv@aT+2XX(neu>AOy>fuv^U2~@w@ch_0PPB9hD>Z%{0ZTA!AiLA%Tmu~dms(3 z#*5DL_CJzgb&1>#nM+iaQ@ZHWlKGt-3$H*a?e2HuT2=xNw-;_NzRCK(-pS}q?wx!{raa+{Le0m)?ZMp+>|C#Wo-BRv0fYw8lmw*dd9CPEZ^sE5#33jqSlXfw$d@j{JDx8jO_- zs*zcsIbL{jq$008IvW%0tNrv+dW^apCz_kB*W#hlkgFqjKV*ZLgCE4(&yw?2EA27Gt{A`)Q(67Gcv47e}!KY?85st_q^V z>f6yc>Pq>1VSJ2gm50AWjoe5fL-4tJB(gE$XTuxbH}xfjwvbo1-wqpG&o6!zaF3Ss z4xr$x6SW?F{O&#E)2?=oHNanbXh5qhzw@Lkm*u+X>DN~;xHpO=m-e@5eRpJP1K&l9 z{GqA~LO&>VG}UYv=mF=}@MI;Hkq>A@mcImLqK8KNZUCzW!a~?tyr)vkj$;5v8)78Rc(9|NH89innI_q#HJ!Xezsg>>&}hTG0y)K)2FyH6c= zq42&!SIDFb4J#PXfHLP*1v1ln^BuwWhkHoJz@?+7s6%@2p5D+7Eo3|b1%BxhZsHXK z6ibA7&BeLK3|kk+*_e6kB9g;UMC#@gW}@nt%*5n!@oX&9Q(gS}-AL`fUaBpD{~3<1 z>-syVcr;&JkqEsb^x|Wg;@!cG%ti?%Vu?u5!Co--SE0>M;pcPmRXOlR(WRrnUhPJ^ z5fef<;M4g@RF)!lA!OmwSSc^@=iX6l0?JZa&Htz$lYCZQ$we_jX} zkvxBVs7V$|e;Th5{O zAYtz_cnP)^xE5lAo+<+8<>+sIdviVR;LaJ+=~hY&+nE)Fi=2Xk_gQzFcnE1ct6COv zZ}b|$>q&EHK)ig7b`&5eI<*$w+s{k?dzQoGV(YZry^81s#f~J(&tKv-JGqhl(s%Qd zk$!fSJ8bXOi_L9ns$xgDue$IYEY!^H^zW;9JJg=s)KpiKyI({-VDw%Yx^Z4GC{|}p zmbkxD!E3H&4R2=4902pX7rgdZuCtYyHA^iV+emuvDH%Hzin2O9XNO}>=9H@)iH zcUDAtat?UE=)px?v}XfS;fwd-x8WeZQx$RycKgg9s_yFDh2JuV%w&)Yd>)z)cvF2} zaRqD7!RHr&fzO65%9|*{1%P9yI0`vs@Sz6$rkaVAixevWZr-UN_8p^7?)e)7R&!nC zUC=3QIeMK1{RtkMfV~5J8vlZU@)QIRXWyVx(d$Gf$^kHsafQJt(jq(0{E5Fd2K>iv}F_AbW1W&n>+EuKx}M#24@okcmZ4 zL&y&Jy%RP238LR?DatSdic6%1F!~bFc975wY=aIKLudGql}XbjEKO2 ztsTLQkylO^NG$9{0Hrc!iGpV!UjY2aS%yBLV-qq#fG0iucM$0V_} zTt~cg?QHurB0=%PyuWW+UFIgy2N^$$orx^2`|5Si)R@JJVP#lo4%#%?F=(FD&t8G_ zSlMRw{r2HU%*ZR>sMeb?}H!h?y?R zPZdfZ^9YYUI2dzjpldg~a_!7Ua_~r#5c5Nn2&MV`xxlGFPC=gA zaAaJ##RDSJMQ6KZczX)=U2`sB4kepsV{@Jy_XBmb6+rnk9bOV(ToBL`dRcz2`1H_v zkn83LgB_Qg4{e5iUB4ixuff2ifNUMxO^mc=0nAOx45Pv+8w|W* z>)D#d=B$^NlJQ3Fq}a#d)gcl8Br9%m9Rtzv9+*Im2dz*m&2+R9L&MN+OK!`5>w2_+7k=lv#^o%# z9)2R|>OPgpa6QU|O`mKbUl%f+y)(h0$}i*P*tZ^SRwZ-@yBd;dep&&9+v2N0kKkVb zJgTRd21^Do{B-S@M`ooth=238WAokRfBedJE;Ei-<)vLtB1m6D!x;7KJfb4%w=lIb zxq24jejp?I{N7pZd}G%H4HI*aUimge6vei?3lM$n@M52lw!u1VlMxIgQJRPEf6gFl5K|y&7Yg@BN%`T*RJOwL$p%*Vr*qX2IxZ5mj9VJDk45(lAe7- zUj#&Br-=X{%dk2PRkx@(NI(~VA4%<2;G}cRwRhWvpMj>9a zr&_yXM$3456846+=DF@E2ged|6GJ`CK83)Q)kPmX9*-QoZt0LY;5RW{S+TQK(`Xp! zl3|Cf`#}{mf9V&M+gV8(frBqwyHdZmmSRRv!uO$Hz!J`%eH1ueZ&()1G+u#UDFC>~ z`Sx`0^P)UqY9kNDmyt#*j!j861O9x)CGje}sR##o=t#nU4$h$ja9v7#I&Q8J$ot4b z2mVEq(}G_lg>b%jC}_^W7zek4A)ncV8TTx9SxehWcC*A4*KS|v0dHX)3jXuP4+caw z!?n<+aIud@hoxb^dVCA)U19CENi?eU&xC85{oh#nzp|xl7>sv@ggLqr@Sj^+x7T3H z&-XF~cz1rpPYtPZ-3AYhjzh-02O!OyBpJ9DFJ{THqi&{yn&?})N1zk~;Z|c}De!YZ zm+0S-m#@u-_VvbJz;{TWc9}k@m{kRY`%S!LNPIkisygYB-knfgzP%oYIvQzG1H!!INW*v| z<%Yx9KvT;LoUyxMCr{TAQfK*guJ4DI%&{31OWnNGq<))fqfhuf_yp+q4L%a?8=bS| z-+=dW^D;HN%5bzId5iG18v?El+_Oj1p+Diu!svMC#F7E-y%G|7trEb+uUNoV_N3+4 z;QxpNJj!t+j24+mjBq0>D$uqX-sp}&Q5z1_{cfkzIlD&E-oNs4<#VowmWchf??O7-KeyJGI)cXj)o8Hw1!L(!HJ!4f)~SQ7W$UHH1KY06w82of)cam#|#A=MJcw@znOXP$*-SF^2M z)3PzU@ZO&hVbT28ZiMGQr?fttbXTbW!91~WH*mLH#%Nxl&%qmfrOEcu*BRL&yxq@q zW=Oew`p&|`XqeR4wPWgevtW#mM>6~OmIkXkz%;sHlB)UE>8-k>??VN`nk*Bi0ajclDR8kw=>YuElwr zGStW7{$=*c+jNqN+B+QNej^`aUr(t!G17M6*E9))D@XZ*qb_X+qiu6Wzx?&2K%+i1 z=>C`*CW;}OJ@3IG0qG3uoV({gp&(NkqP4qR)NCB%EMf!=(BVd%QKJx+-S?@b&*^+? z4sW`K8=SXv!fOq=AtF%8J43AZc&8}ERrC!`dzy0(qPO3#)SA{pe*DDz%D=a0<$9W1 z?l3ic^|0p@;eR-FF8~iz(*K?UEl^rMR_s*}BZRKjJ>}5oi(Jn_%_zFXQPZ7W^Ds!* zBX>x?aH|&4`W>14=$KB;x*H#ANdjcy2nT^rURVLNr8`}l=5Wqln(=z zbIJ@xUz6d{m=4bh<6;EaatQ<1x4wYHb-0vqyl_C-6N@vsr1v1Dhn|#jEz13j3c|)K zhJn}|nK}p`62lL9!}!}`)mes?Gah4Y?9DzBfj)!S?(T4_SNJoZ1<&1Y;m+QCCax|0 za~eFAZL#mBGoS(u;&ExV7iCF_&yhN+Tx%PDBhcHrt)adKyw(c#hVIDH>RcLWFvRpq zkj8}zS5P0LwUpK6S+42c$;l6hkGoAJ$WH~7IU8?*2D=IyA>epfvFtG6*ZcP3597La zjSJ;=JrB=-owbqQ=s&Qgu_1A}P0d1w|ds`coLTNVf$}$cd*w|yyjtGF68GMw@$R!v< zb!O`J!!PH`Id>-iQI=|eSP&-a%Ye^V-c$F=6hh@}YUO2kE+?w_p_?p<(i~CT_Zf1( zxeSy=yQu6dhOeHy#7E78E7I!9vRP7hT>20Rz2n^ERbDAdip51@jx$81XYPByD0M_| z5H}P`kL8y*j8}u$8w35IOA#l_uY5oQ=;Sn3A3hIQIr;=&Q_M-n#V;KrT2Lvo9+{0w z60*k<{g^D7iu_Rzth-F?r~VB8@AkxD46x#Kpq>{&&yZ0gkTVVK!7pU)(&hjb47jc! zYuxUm1G@0t2Xj&iu6ElyJt7aHV+sbHu?ZqB=80dLe#8Bk?1ne523B|*fx6qP@oNX| z6jub(@8Ig6m`2x?STkpE-MrUC{J0+P-7W$Y3CvDyZq9{gTE!$+Uu%h3$SUlnQDXNj zn_PK!rv>%^wP#<~bm&q!uw) zNEx(T0oB@I{IPco1)PqJk(>O6lOly=^RFw}+9_U9Mb&1hnuGWv`kfi3dsR#> zy^}AA4SNkiDq4wbr<gX1{FPp;e2w+ePC*UCi($`dL9uC8&T|&s1Rf*~ zahtZ*Rl>ea;4j^w;&BLM{HS-zKlZIVU*QeQg^qUj7(C@^%&uBRUD|nHVQ)s+$3aJN zNj$4*Oh=p9IQSt@o`vy{XtTp#>_w-{S!CAXRzJJ2`Z7F&1nG`F1@tmy(W6--?+ZMa zv!5@qYAOeWP~WWNJk$;(*8ZXLu9z2GB*doRv9S(VSmX3gW9sa!D2^wTJ(SQ{*3auT zP{gBde2mZR!}rf~w*bGv;!;r>8KlTW9<=1oki_7YFr3?6%>!{0P;b{X3`poji*~@Er4s ztrGtFuHOutVvpjLgzfi5JQ!zxu{p58H7B4o;*i6 z)JK|#@sejyTvLuKeSSeK6{2M$52P4rj}*L~Jfacx3ReE+DCC@&t^2a40kbKSk?bE) zWGGc}YA?sPO>TBR9D%Y)PgyuOLCudq20Ob=Tm5pioFFGF0UsRpeRh(ON5ik)k;nL}v>CwcX8`KhryhTSpuPplutiCxf< z=P)(lxi2i8ym*K6p$@z%#9xu^8DCd}`*k&x#F*rA=uztPkvoqF`er62jQ5h|yY zMSywZkt^C)yVJ`}5m4g+iXuHK()7`bWb6Hw5Q{4;77^9B%`Y84U){8!Vx27v^LC0m zw$?XZv9*GaJAj4*&o3O(<59^nfgiJOkp#VN1Ktuf6z>i74%Q%kq0z$bR+Hy{sJ=;` zBfpTvX)&_Ljb*CXuWfn2c@`)tJFV#IGVj;51L5NtP%<2L=2;ep(^s};r=?yPw!?zv zYlpOH5%>P#A2z$18J|91o`$yEsfqW+!J#!AjJ@8f%K^uS zQxqp}1=nT!!;jk*ZxRIkj=*KoF;e9@^pD_j8bvQcSi)^;r(L;@gi6=NB>UW@$ zkd&>axf?<(G_CElnfx&LyFUzON-PRA;pn%uZ;o6&3g!30S_}!R9KP#ZK2AkOsk)VF?hN} zeDq>&>1@^4PuE1RMt#s3zGZC~m1o{OAQLEbiHB2kUdvz~su$jU8biJ8Ir(!z8hLk$ zut3TO>)C_(b_KOI+VP++x)qbj2?Zg;VAx9cqF$`~xC?K`gc7Cre0f`)~xU$HU%Fvf5niOFrb&s(}b@xuE|@KFVr+1B02e5aSO#&d2Kf%zH;7n zxjkx#dQ<<-!|<5I-))K*dHn+N%LSwJzW z&Gqx5#WVb7z zcTjhA!lnPcwX<`&nw8F&(fGK5R^^Rf`PN&{Z17{m0dmtyW{ifIZ5<|aD_xZFLpiLh zTtxUKHK%CWsK9P2yRhnquok(8JJy-i38Jfs(!YMmQZwn@itS^f#U&8D#z^A|LKOD5 zYsQ_>*YL~h+4C7rC96Mz;9I6HHG8_X;^gr9lFHP~uDA|^v51@mn{AQi*V(xl{Ig%w z%`I$oJhh1xTz{y1kyQ`IcF@S6pt7C<1gV0JLtzJbAhIsba8Zy%eCC0Y0T2*3Y^8pZf1F0#7&uqUz7U z@|YYCK`6UAu{uDqjt&CrCy z$-H8NB8kogM6&|_j2-K|klD`4EL0%=)50KOw*`N_*=3tMasoap7Qsh0jO$vNKoe8t zL;uzL5w3^KyUOcgZz24FkF)Yw&C7*~fb)PE?e8%3m8Sf5=4k1Ap`k{v@2SF7!5V4$ z3r_`i$9q;DXy1p?YC1Xh0tR14!xXK)m|T#f^qKzor30VAZd2wK6*tI&+oV0YTONgtDV=^IsZ_V z?1siX4_srg@Y~v9{?7($RGgSrbmONNXeg2-o6gBcH9*Xg|zx zPPu(1RW#1q)mQ$Y+l(r@*f}fVM-5d`t4V+f-k~LO9%dyQn+9? z7zaHXNN#u8-1zNP@JYkSjxx^#nsopO__tT1XM990=Y2g|*#aGSm=0|o_d+n&w1={p z`JJQto2L4LT59uFw(o=Dt6#4jXsR4+tt>=$?rI*Q*4?_)vPF`jbix-+!`H-;7fIsvN&*OhpB__DNEmiC2^5 zv@h?{l*-3C@Emr3nt61~&hC!xwF_!IKLrbm_^9ZSArxbD52&o=-jei!Hz}iiu~A{! z&<-5Sr&R69m?&5FX7sp9RdDfrXuPf>{pLcATDGA0jnc$Ki4u|PH=zv)5`YJ!nd>wc zavl}WTR%UeBy1PWP91YmNwy146++L7$(-LO|B`{ZrthVoz`r!WhDL&;MFLXOKONny z->rV&ALO@@l)u<|rN47qgYArmYwJxa_(Q61l&pjER2QheY_Z6CPyKhv_doJCf3dB< zHQ@jK8?%cs2jks$3O?L&ylZu~{8U@9jo$y1Ps;8>7S)@}=r;hm zdb76Ll3znq-iDLM^_*nyL7b6Kqk^smmw7RZ8%vXQY&O_3fcg1`3glU4GS8)}pn=&(1aIK#kho&AD~Z~R%)OvE zy|x_4+yWPW!>*huFCBS&!zDJd+xm*gVV-G4-=rCysF4;6GY<$w7#JIrDI)tC2?k(pEm zp)v711$l3n9vZ5Ci8eUXdTv28`w_ctXE7Dk3pbE`U%8Y-Q|V~;L|g>VX}qKMYCzA< zeCQRT}U17BS*e>Tvf3-5Vzgh4~o=c1BH>;9rcRwu8+$Wgq?aS~P zi!UzVh3(}~UKE`|aMg8#SORh}wy%(3j}Fa2#KzbPPK5U76~hOTSpQIMmZ(jX5|ib_ z{=H91+6fhSZZ9Z+??9T0ibJNv(x=5HpIQyg`9X>hXn2@Y|P-zF&^oITHFIHK3S=)ej*Y^?K3% zNqqCg9{4o3uixdxg9epR3UJVJ{vJ8o(96XI@6*%yvc7(O{xg&%v8h^9;=`{rf1$OcP$$!LrTK#v^8F(AjuWExKM ziJLU>njQMLA2RnDBeIH(xHkubkA@O)VblJucC%-Pzr#L+I2WG-;eOm89CyxpEr8Nc zjcNXGhKMPFl2Heb*B%njUI3*ibyYGy%K97ciZ262EsO7sZBXd6Xmw7as8VyFuglNk zrR$7|JHcU^x-A}~7xZYmNFh=59~sW8?Vbfjz%F(re8F7%5Pz`37;B*3t-ZYd_K+yw zgKkJ}lYzLGBO{oP>M*z1;R9+P!r)66fg6(w`Cy z;E64Eb6I(+3Ky*ck5!MK2S)2u%Q7_!zM5Z5J?fSkrGhjX%dToa-qnw@Ze>c=av63$?N!qS0hK^%6O26ag9|*NkDthUZ1+E znq%GIT=&ZTqs|}qrTkg_zEW{KGe6VA+2bGOshMJhMH+K!HaErFzZniIB8(ysDxWK> z)%wkQzByleTYPZ9;=ariXBfMji|DIJNj^u{pkQ5juQ8C5x8GH(Z$5Qfil%lpsC<3i zr2Cb_+vhhOoT_Gco6FT)gF2H{(oOEhm2g-7yWl%p)%V&tn(jZM35#G9jD2{4iu8JG zw`+f`;2V^);YOZ3v-5d2-@FDyML-43NN43`_eN@mUp}?GkD|{O#^(z^k2_gOP3XaE zeO`b0pRDi@z-z*?f<7ixqEo}Qg8Z{HZQ4jK>sP$JgShNGFA{e@_Y-rZD=vX$Yn_{J zVqSLU6fQ)Afm1s6H<1t&AKI7h_vL6J^{i0H5R8Ao;z zrK@M$bzisXH6(cEy*_2|Kim$PcMfiv%O zixq4_&m0I`PXQ$~(wCv<8pqlG#Wpk00KRs!!I$73ls)H%6G)f~6n|T?&S7(3?beRs z_sD62fR*>1ci&!kGR2Fa8i>(~SS-{5M@%h$RO;$t_Awl?| z`=F8cT(#tCdZYj#;`2i0-ww*w+`7^0)lv2=nv#DHwhWC5LpR9XVX5eN7TjX-YV!T( z0D=dpdgn^4cTg7L%q+pg$g!D#0pzLOvX$*41!)HQ;wVgNwD*OFb*)BObh6ZM1GE;0 zIfU=3Cf+l^$O-ja|FzzlD(H62xn=q76rSyUKwppVptFx=M5Oekb{dz|xORW78X>Et z8`0Ca6d9yVF-BZ!P*&GfNp15I9mWRMA%5KP^Brwl|mjEwyUC zuTklKKSkzV`x`Sq8UJ?Xpduu@y9tvvlI^%&8d2p z`JZq2FtEs(VX~6Gx#unnM{7@B8qx^be4rp?pYT9Rk?Y*f=x#e%B%pJs8BFo1a2}ju z8+GYD;CEPC%(M3-T0ezsG|Vr}dwCE13uF~kYE;2$-~LdU6Q}5#rv_69{XWqUr$1EU z+6i)wmva!eYLHrDrTjcGN?bChQkc}}?8H0e4@I&%CThcJ1A~JPhp^awSj(a(WADY( z+~@9FG{P=`%L3h$A^GhL!UmU-csout-H-Jz3M589T0wf5Qjsr4x@-L9RR9A)lBD&I82nT4{^1Xd$~hA~M|U->znUX={9~&k z1&^f5-s*&xk}X?UCp76jI*aLfjBW z_dN+K(Zgw_)zY!D<_?8w%a!!6JgM1|za$vtQ9*2_P(}l35L?J+<9^^2s6edR5i^ctJXXaZ=}cNCR25@?s>4bqGV^Uiq=qee?jhahYK5 zNtc@&45@+CY|;!elW75#?-!0wnBpTU8ClYbJ)1=k1(Ic~OV;h+@zIq7JspC@y^s87P^)`w%0jQ*pNOrM>z^8{wWAZem( zHh|QVGhrO0>J}rlL$jZ;ov!h6!krQ_jd(yKfAL1uW~C+n&)8a@S@6_$y=9Eh6|Zpr z#VwwzSF^d}B(XFJov|%4+;f4p_zZkO5>I7dpy2`xCNdr>l*zh6T(QfmHC-5*&K*}Iun|3ylX@bZEG=KN>#^NASsaz|uD z25Vc%@8n_~_ccApJCLC(IX_1ABaY;M4m17Mc~xZ1LHlCw?-NjKt9*!h>lnFA-eECD z$;kZl_XZuG^0pe!lng%92Bexl_-qqWP}s4IB!a9i;LoKdc@>Na1$=l7C?Y^oG=qgL zy~)jrzXBRxzT{-c;nhp;52> zAZ@#tRCXj7lC!_lnyZ(?SLPjtICcB$m|oBrb_^I8*`dca$nb>_#RfqA3~&@jpWC3| zcYk`9Ctxx}?`6OxlCGT7St_>sknwz+@%~r^ku`#OCTq<0n3xoud7r14Va61BKwYdl ztD7=%8o}#T`Wq^iJy#7lm@J#!p?EpXDv1VLhfhxD9hMHjE z`A>=~;k*aQOG816y-l&z%33Rpu$wsdGs|PWm7BzC&rx9Y+$@f3bAx~((Oh>R_v5Yu zCQ5M7L?CyOf@`(BzAw<<;lSJ}`R-%f%P}>DIz`kJGU%dx>CeK3dsgB-o~cEpn|~Z% z7kMepc=6Xfr4VKInIM)Vhqomno%?d!W>L!X2TbGqB6m`stK4FL_L>VtiI{jcRj3>o z7_nnB&(r&0Mf=oB>ITj(GnknoAEa^;4oIr0QZ&#^NMAo|W2OUW{%J$wmvse<%l+_g z4Sh&?yVdJ2y~Nve};a# zMGlXDEAh=|b-6E3T`8%aJL>-ER-UM?WdCNL(OlS4&v@_(ji31V-n?&F zbB&-vtLDY^P?mv)-@u)AIMmW%wmddEu#Vsn7QIHc5kv400(NNzSiv zCA$hc(A%C&1rX1eHPj&j<$lk--u3#)cV2d$_v0ZSoeS=#?3;3@&Zsg9wyc_M6`D?B zug~`{J+VsS468?E3P8cg&y>gT-w0SU#jw#t0o|B4OD;H!^|KNz6wm0oD!AnntG@HG zL+XJy89u4EY@P2&%5?1PNtqMT(A0k(FU29>rOYekpFQkbmPGzLD-0AK|CuhF>h`2C zF&~J!eu55aEW2D`dS3zax+wONkrujMsA-~>w@ZQ#{@EvZw3XdhtxQ_ zRwumh8hY+R6R9PpY4UpJxw*I36~j!ErLxe6KW-Ggk&!{SeD)ZK!1xE+U}hCXE|luV zt?M#DdZum>sy)bu1Ll2=;p3u6+K5NT3_{-^-(ucF?VG9pwSH^ofdrQq*8v+oai3H) ziADUtcFY}B+a)qJE6;5OFYxbmcs$aU`s%_l^_1nXL#9B@z;jop0;No=y z0%NqsH41(akpvF5Rly3`#1IiO(W{0P!!FBwJ!@_N0sONQ(35e&{iStW%05}7KOndt z6s*we-{LvS1dXu;mPtuilj1mAMi&C6n+z@FkA-_fKI0Fw$FJTl$1T3v&hlaMVCw# zs+Qa?%!`6B$x$DDI2)@T8_a}5+OzR-dF&KDtdeTzo^Qw_>oN2*3mxP=XjrajBJQt3 zyyW`auRDf`&R!Zyj&i$W8Wa<{AU`#fL)?9|dG8;2tq8gnH9ZW_6h`*yDL<8<(}rf} z%I-UKY}(DSja^IVRdRZp>(X?=GhVDWc59ah-{`Kv;3|^{nboq62$_uYhIazAP;dc5 zW7a69qVaRhtP@l5fZM|DIPoH2yJIqS@gzR##a&2JN-$RZZ?^JXMIk1n09|hwoUnHTCVv@Hvm(dBxKbz06tJ zZjwui>{MBstbqJZ*Fr>jlCpNoH_GxI98P14USR6e(zGM#dS*?spTy^wc4qqw&h#o% zFWT{Kx%&0ty=FQVBw&qAaq-D@-ya?QH%okpj5LYGqZZPK5>W7zr2xg_eIHIc1+Oyj z&~??JmB~%;A*4L$_;2j+h`tif<8+$q{fN?dPnU8KJdr9`%}HfkcH^NoR5EteVlH+8 z`XEHtN8TX4)J5BAm!kriJI=~odrDEoan3mg20Xd(61OZK6|52O&qD+hOzJZ#=LwiJ zG1B91SQ_JLE;Kj%(17}It2}FD`&@E!-SSRuewtAak2@fZl5w0{(f{CGh()j-WKm{; zllx?a^cl>tSdf#I0GZ8D5ji;Lk^Z_z#1QJSR>jr`8?<_HMd;@D;`Y`HsH>X~61JJg zqbFAWa`XX`MHVnL-L+pqcuLyZHY|G|C6KaCTGMw~i|sSv6UcZuLfLJgt zy4jNSx^ht)C}LVf{{lFFpLhiy<;KETrH}C;b(^dFDPK{?k-fQVNcWAaL09d!1d8hJ zVy>Hw+Nn3s9bY>~F1BhYDY~c2{lj*J+4@+(+7{j{e6jDNVau>Hjqt&i)%*Fb}|-VC~iY=0gx4IvwG# zO}g;d2J+4|Gj_kvri*!(%fMx&V@JK%_Oi#S1^!dF>ciE8YsdK4;7v=-{;XA)(WjMU zza1U-91*5)(uzH~W~;EJ(`DELUd_gP+?zDzB6U=Ilwk0hqEpP!S4PBf_Wbb9>X=b< zMJ}2KFtL8rTOQiF2|HOy!w=VKT2<26pN`)HMaBEf-`-)TyOvPs7KXBNuL3J7a`vv+a#oNb+ zIrr4HE-=Nt$pLwqQYJHHyT)m9UiYJN$1Fi}ByNh^Eee;4T-*N}tl!C4yEeDaT7+`dJd^%nQ!Tl+2hw>MFQ6>UhuCXAFt=_CDLhL@g z!l4L!1cTwWSWRc0F4BePW9ysVd(T~c6mvy{<4H7Sv|u_TL0D^Lpb2T4UyT2@32ZTk z2I3n(4>|K(@DJ<_n`N*&_SR9V4y$fjsqOFQ26jz@D#v{a3nz+s|a0j@fMa2x?XfVnmj^M+(x z?tW{07bigZ$QbsJK$W;WJoiYv7ofAYfjNQBuz~_X(Ss$y zl49TdrLd4X)6b#<$rt&E5}<+RzZ~p=IuE+JORb7?BkhOTyt4>L<;{=~(fmEL5u0|Y_{%)$HJJM-N;^LJ*=9~a9-7D>+8XTSTYn_O8D@@0VRtH7vy zT$xr)MV2UTsVW>FQVLB5?xyuBU;_NG6xf1Z!5tOc=So?}B^Ja|vC}Dn1vJo$)<4tJ zv!w_gd2E{Cd7ql=YOp6NuPS55IT$mNE)YGfJ$_dC*#spk@&7QF2>zLID(P|{zV@c9 z?L)Eh0Ted(Lx(>FK1SqwH}o?~>J+=V;mh{ZSbwBnQ(Kv(0LJ<~y1y}HgU;4V0?^2) z4F5rWV!pd~<%iTsb1sk!?iihb3aSZA5N*Ia>pJi*0hn-ayEz%|5_R_v@rkBa|7KRb zbHF{C=us8lVxPTsES6wWxWzbUQ1v>cA`yrJdgqtgnBJB>l22O1RJT2WE~JE`XT(Sg zZt$$7lzeM=*!ptzawMlYc-!t)G2ytYzxGWFU(Kxdtm#XWl9Hk94u$uBsgJYik*Lhs z6Lz`m`EkUhJ%(h+^KkANpPHIIMU{2Ur73aBhdz;Cf_R_%y;x#1{B=6~8UgY$JoKC- zk7w(zDZ9n9)LkMWb2~EUB#|C{YE(qQ`!|VcCZ55{(egPy)fAcL6(;`xyxIu;Bm-c$HgJ~z~No0 z)ASk0(W#MGM={JIG9-Xp`@%}GJOW0(?2{e`DvdE~U3FLW+FcNoK0i41mY7c2_kCpmb-}`4PY|LN5OkK%ePBJllp21?Rit zK=~=)Q=BHe=84@AUHEcO!9ZjYGyQAYD=vbcv=P?Zx(Nv_TQ;M&3P5^W1VYs?K9F;% z^5lOa)-_EhY!2Bb%^xT_829skTH+w#6A4E3Eu)=?>8WW)KZDkH*%Y5{$hvuzJqPEm zJweDH#sfuur0>$@4#`t#Xh?GqEHxpnns7h%(*ZXQ3pzba-mO`-*Iue#EYPY77pGq9 zJ0|yi_c(_j+FzImS!~kz@{b&do~;|E9V6cH!t|t7D=+M5(OJNXCOhe1)j(u(tVAgYShZkfl}6g&_pE<`j)|`_w><+FYIl| zU^txwMD>xpC+z2OasEu7slez)O7xd00%bPf@cJHA))=6m+f>m9tC)U*JsbK5Grr0v zL~hLtjzhawmX`@@Pa35i@H4$fxwHED#m+|ooC&||Q<}!~qq#$R=1eJ8^zh1X_ zRqGV)NFxltCugeE{5eV{>T}K|nXW0Hs-=1<2UqSZY9psiKhwbRi3b-`d~{sfCZ_b6 z?cY}U_BOw(DjJ?VO1e%pHKp%GCl}EC_kBXGFRvYpMc^lkRjvR1eT?z=F=5d=@37vmjZT zlLk@&Jp+jqftPu95f#6PQJ_NZNvRzx#iG~%}yv3|A^QpSBs1$3F#uhPhbZrZ6Lh7P`hbP5Y8`Z(E^W8Svtv+kTOetpiqa(!ZO$?7#j2j zPo)si4!R*T`p$P)r8D>kU0upG#SeE|1AGy=w{&bc6ceof?3yS-eIoGUAPDh5|tJ&jwku@(@61VMH z^v$FC#%Qx#z@c_?UP0vEu}q|(%vj5Nw%&V@+vcB98=CO@%xSvS{>Mntey3)gI;{32dPR@L@;oW$&(e? z0zGjQwV#NdhdQ*yr({zfeP@84bp)Pl>J!4jX!X|CMn%jNf*~X*_*<1^1<-W~3ztcj z0>@ehF|(gu+wkcSvEf~EcAd|n*~M816%#w4sEl~!SDFV9Yv4dc$xmSA)AWW+Hazg_ z5@uW`fE&i5`ENj^r8TB4(8l4=>e$bw&ojI1lX`^3bIZDKcV*50#ZwbYNQH0SAEMk3 zDc)D0E1eo=^8{qHvBfUZ)b!S2)Ak08T&-7d5RgaT;TnE?^H$DYBVwoswkhU$^Vp5x zDh+sfqIj>(OU$*&f!}V2{pfU)c%e1?CI8o<)=Ey!CL}IATPbpM;iTf3p5GM{rJ*+E zAUy3#c>UBS6p`|0CbVoLRd4FBVhXJ2+FjY4kD9)WTc(=B_QydV`Cha4PVv@<)`^f?*xGK8n-`=0;1AU`R2B{& z4yko!uI8gejTSAemn#?PToZL}kj)MnsL2UA_TC8y8oBpqof^XLHm-D-4K%2Je0_UkGITeHa^W?c$i~2Y3_N#N2_Mky zr$I++Ra9N8j7jS=(UUw(Fy6`a!oW&A)%~A8pRUJhx|52(wW(`qe&Y7s$ip$njdlY2 z>%jjIGDS_TxX+30nQk*SV=9@#?QQJ}tKGu><{N{6F7WTlMmF4PiZ|1wq-2<#?Ap%{ zz&9HtH7P}FWr-D|ZpHVEAb zIZZ8?fOo^Ii6+dl6}Urqm+~On!)%$%;>l^ZG7+t@%!Ce&2LTMDyw!e@^lf&5h568jFKieuk#X0t8PSGxD{68Z0n;~mf=K?jD;|S{~1&TqK|h6=il#ZBz)Io;V6|> z0B+58>F~?|mJ7_mNnw{{RDb7vgzPU*viMzj7xpwK4@CBnKplboR@TeYQigf6lpPtu z|3Z;J|IQvu^a#@7;LSf*mL_!Wjn6N9o%Z~SpEIMr%IzN8+HhbL=uiv4>svL_*d6P@d>`j=$a=0=III0?T(%rL={3-VBW~~_1mni_>c-d6Ra9x&P9^1S3PQ~!1npm)FIhLMQs$FK`)bGO)K&iS{Vw#Q_ zY=MF#BGnn_7kedQ7~)7&KlNKxKLcGj6haJ}y#pEnTtOG;t~GGhd4aB?WIi;M1w1#tGo^W{UfO5Am)@v)KP`$?Ix$3Zpwzf zFY{7^gX0)>%Z#j(YID^nt=(fxc!!jQg7cs(#4?{nkqHLo@Oa*9r69^Ig=X*rjp(f} zJ(04w?!IKtwzMcEwSfy!?gXkT?aU~vTu%1fENiaBHLL8LpB}fZIP%#1o|~sfSvH4h zXU2ZB#jY8n=)oS`cLdwQ)-pRf7GV7aa*>**k*Wuej4#ZoP~CWu- z-i@3ZvfU13up6lmHh$Q*vLz<=Ek&R+bG@oq?_HjA?AMMfe7CoN+mNsy@C}^9xVy6b zppNd1)>Ksn3uJnrxR;4M!S{_Z1fWOwj31m3>b9d;fpha?SVlb6O~j~hjIR&i2%mfI zBhA=$Bv@T1_A^$Ub9(wujew)3kQ7_8tu3V(B>C${dD&dfIn2#v2E@Y*hqX<)=>@C% z-YL#C0=Iakha-H{#AMTPF3}`x2GVnLe@kM1Vq^6n(7r^5xkG_ppp^L9T^h##U#TG$ z82|fZ7f)fX1%|Y{O^tO?y54+aDRNCHmur}d&d+*ljK;5+G+nm-jvbcu$8AxpjC&;= zs+QuG59(}&2K42zduD(>S^#u%eA~&s1Jl`CUs>`cr>-%8?F=M8#9NNPGy>~z+2dYk zti$;1P5ZnXrSah6cC&DD4eP<4nD#JcHjrNXvtPAbeCr+Q>|n8hZYl(L-?5_82maK-A^Mb;U-uc1>e+WF{^}!|CB*KyPTVnG#-=!=x#n zQ$h#3CiCf!n(Wt$Kh;fgm?bGde6dk9q%`FY+G!Pwvld6Y$P`9#@enm(X{O ziLp7Xi89UV3YJ0{MH1;jxiM9I}A--&K0E zMH-eVtqR+mrR6(9*%lU7qkJVDd+(3`(OTh(f&1xE-UoqmB?j%aRmaDTj9y<2=9ozO zZb=Tqtr^Xlu`MA?P|~Ermd@LxvVSLP2y)3$P9H}{DB8cgFuk8HZdhGc{=toRnV4yv zFw6O=eJgb!weiPyezqse!YtQkw|b$`+n|Oj1rLABX59Q=n=HOV^`O~Oo|&YT?OLnTC&p^YTjDKL+$Z+8+?a-C<8mj!91?fjG5^Y z_$_}8cr9H21LF6S#uqZBzL!t@3AFI=fa_?4_a||ET7$P2vw*bjD)i5vZ6!I(>gqRE z8k~1mDc)2oydXVhEK@J+KF3(jDI7re1Z*7cgd3vm{fnbG?*$v9!1sr?$;=Bgl=3T2 zU+m)FEN6)Kg}#m$qHHYty>ctUH-qg2em-YBLi{iETN`*BUY%Xr-+&?`9lF-Z^V&YR zX>J?tF}w;Y5;JU1c@6pWk?HW7+8q~7W!jU6PBLF9FjE_h1EP$citUmn2x$yH**3vbXuP4xp+_jc&$SeS?3zqX&O|C~*FlaBat z_;F^jmtEK^-vjmMsOU$9ir`Diw%G%X8fZ{n-_sQ7>foD3!{(x z3s0U_KQS8nJy+F8mCB{WHr`^4Uw#-xe)R?~JGm4rxNW$_|4Z0Iu2(8hkXJw5Gp5Q4 zRMjV;-(N2lr;(B2=FkGX_U3@jzoEtneMf%i`3pty`^LUQus6OV`~By#z_EPzPPXqr zcJ@Tm-D9-}qq;E|eZV>B$8^q81*Ob z)lDqM24;Ps%--7tlU51_iK!bsXbiUJiTG^}yO`rRbAYpctAoXdeG5Ecl1sBhfjg-a zn^<1UHW(cP%%D-Rcg+^3qIi9(rL%V zt*zEI5dyv8llMl*m_N6;UWbHKzx#kw|0buifOT8+%Y|hOkBq&2{4rp^TjeRwx9vpeD7Eg;B(;-mAF#MVV>e}I>dOdM7#HYDYP!=>-t>0gPlxR%&%suP;W1S=N;q%aD|i>U zzwZwOaR^Ct0kdW_i4s&?F8Q#Y)Hm|lvHuXFRa!Ap$D98RTG$LqA?QeRDb(xl9oDEMp;EItE>Z?l9^Z%?8M-|8dCs>0@|W814R-VSIZ1go{0|jY z*2OTgJHOo%cYsR4Vt?7wYspb0wP~isMKcF=t0_xGrsaLxWrdhANVj1LTFmD=!$Tfo zRh9&c6pRcxE3De&+eI5^=R6tONztNoGUdIzUBTbNRX!AH;f!6v}b!nriyvi zEQN+gQK;VW!fw7nZwFQwkT#9BABD|&Of7z4VQRe5oDo+(EWE>eo73{y=WL{6Rrku2 z!=^*}J^pGI&#>#zr}Ef!kHLn8F))i~^gFjeW&0lU1FU_=R0UnU&C0IJ&3^h3mx26l zX*j*W>+y{lwroQlpS!iz9Qlu7I|B%h&|QCimVIiGCo)uh2MFNb@h6`y`Ms>mHzhh6fZ|ByFJgOM!aCbJv@kt6E)t zRM;B&yQE>NEnW6$yhpm^)u-J&mlHg{66)-tX66~T25wplM=t)EL-c==Nb@|X1>-P; zIW)3IaUQPc9XuiIUTgO!ruCQqW?{69PVBuD=SlYjW`7sJe&w`Wlp3t}>#(}UT+_N< zQ~3q{AmW)E04~?>L8p^v?5*+LhyaVJcM07nLxBiXo%iQtkAY!@w@sPv^W?+kSi^Vx zOgE!l13jkF8oq{_s-lk!9KlPBY5s#7=BQ!pHY13X^6=nO((JrXHN^%X2(>^RArFh~ zFA&Ah$~9c0X=%kpk$#2uZW!v9(~;|Wda%_%rH&zal^-z%Q~EhQQCvV{#!YDVsLsHh zRk0s@OJ2B@0$0GT4{$0)K^uJo6Ls2-c6_N`#U2|QZ0nfudI|N%s5(&-yF1!c*T4~Z z!THSO5-XFjW!-<}26^CllV6T`4y*9+WKmvOVzyUc*Y`{xk18Xgi-V0bPyKC$hsq{y z&KX^+nJN_zZgF9m@)@5o`sq_GTr}9Oitfw9y6Rk%WDy8stf9akxPaG8-YU?>A>Xq) zjYo{!eaGq`WL_cY8BSkR&Pth{dRYd{b5GXf%EwS+tdse-Sk? zX#e7Sny1?JR~4(Ud7Ngw78rLGZ|2aE-16CdZ?YvdOA@xBcLoZ~qswUrwc}RlXMn(f zI|@%WNwX6g@^qbX(}}^9h}+}Gm5b)>3%OlkK4#7MXX6b8`khhy2cmnax2D=Q;hkhP zC%Hpb4j+Q&#t961+r*9*++Ri{-+FtKU7uQ^; z>kai4?G?9Pv0`@JI|^Fg?ac9V2Tj{V52(M#sI7R6KQT^$Yc{(LHP1=hwA#zf?r^D~ zYv^Q;QV;t8x$zeKGyKv6BwEVI03$% zfxvDTQ@noqPd?vXAeOmXA-ffVWLk$-?W|b3TJu?7QpKUwTJ0r%P}G#ixNc9^2|2y4 zNHUohcK>uFMSK87%9J>G1w~Yip%a#gNo5F-Je!FHSfHI@a@B zl7=1>II|oM@)XT7M$7I#LaF;OeNWrHJCM6B(8%3GKj>ihz$N4Eb$7qJ>(2N)lMu(T zxsd{LJHb2_Ypyg-dh6R9qh`vt+CEyzFQ{Dn)O>N~DD){5#_>d9!1|3s`)%*eJ&6Vd zvaeWq=wg3D&Ny@@qX(4;UssB=2q#%2DQ^>5T~w6UV@UQ|A4y)y$^)s z;U)e4(07>l&W-st4!1SAxSMP9OaWSPa1kCViq3m>6@L1{^LXAXQcU5TQT^ZbrvLdL zYSbqj=*36p;w@kEt%nAmD`-zUCukZ2rhKm|-4oXI8Llq%dH^AR5!|)DW9yx&m$4=~ zNOnF=E`wMWQr@Y2eQ1bigLZ!Fex^Ne_GlxyoQHilTf`z^0g;N1B! z{%N-{&DY-Q8(W^l20N&JpSE~3^>9U{@eL^bXAUq|T6}>aaeO=70gwMxNsZ_p)LsTh7?umW=lqk7QJFtk-;9u!R!cw(RkcqQ&Wk`9X2LCw(stFn9Qidu*RmVg0-M1cMXCHNASL`SR*FpC;c)4AlW*W6^c z^8#KjXfB+AK#-5eF`ngMVA{UNmy*)|^=Naadp(NbH?(k5wlCHHD0*Kt`%=+Xlc3HAdrb2%Ku?rt10FB_g zf1<>HbNX#y^w{i-CMcgw3Z^JlnMXD3c=V+op{wr4Bvl&J`_odHb3Fc5LVX!OJ-Wse zp5Nx+&_+QlcV-vz$`qNfZmsAf&+|AGzDDVt5&1#Vds4bHwPSMIE%>GaL+?p! zyMv<|tdAxlJQC~8P=n%67PYtAO!*QZ<}EVV{LZzC=}OF#_w)EIfq3fh(0h_9xeBj7 z>QH-hzq7lS5?fesO`bVaQ|{*{N$BISnnDjZkvR`H#QPQujJu0$*Ris_i`Q`SVl3b%TH3}icO4FL$02qmssNw0z?Y&7M7s?G}^clwV#9U*Vdg%ZiOGYw-C!yjI` zZ>A-=59Z3o314{X9y4M}HhKYvU1{CESiAnZ-*O>Qe{U|o(IgO+j zM|R!|mvnG;)4{_Wo)0@m?jL#C{sphq+dZW2oscAY9y=;6>zYN7Od~O8prQ@^Q})@{ zw>~iO0eN503z8K7)k|U}3d~Eo8^QI5-Hk}>1(5={-o6&}X$b|3%=Zn*D6+6qT+WA^ zrP}+lATn3;!Cz{3?ZaS9XupVEvL|Xye_}m5y_5O!8R%PE$_K%%@QTc0rtP{m_x<5Q$)6%mPxVZDMJL6)X!>){`x|VS~X{I@xR&Lz;u*hgyRvMbRly?1|lRS{W zMHa-Wl0(LlL0XQa)+4DQ(6-CX|LlU{NYwkwFt}J)_9g>i zFeMx9(>?aQvkGPAB62iI3Mv)7t}znp{MId?r+0AcMnlY9-78#{*1ebY!FK~_=c8yZ z#=4lXFuEL!g*~Sin{bUjy-$1cmoqob&#IBnlr@+o?Db@0R0pfFdfvz3H!*a$;83%O zkjT6Inf;U$=P+k9;4}{cMA}V93%lB9pjSl>K(K%)Ks@H62~j|*k`nNH(sYmixxM^x3XJT()4vIhDH`5>9mvjIz>z#YpVqbooe2=A;g(sSpyt= ztN-&z=|h2tv`|3-+yu|*kHY+)cRf-uz1v39ozdVhy|K6Ms90Mmvb@NMwCFZF>$~9)$&ysKVG=yyD|5%d z9~UP_u`Gt#cBW8~`-h@{POV6V%ENHr!rE`e<#O5x_~^!Bd$JbvPS=__#>GGcr3r3WR}>{ewNm@DY}HM{2l#m1!rb|POHF!yZ5v+8{2 zd1zFyeHw+K?e&$D1gD6NwV&RR8DV$w{=PM{mY?at;R!GaQ#6zD0Xs4^;!p?-Mkgc4 z8>x}(V6zzU(VjVBb{(=1KHYD5Tx33tviO#;S?EW$$f%VAwNDsFQ~^cAhvM5qPHud- zV+cz_<1URhoA?Koh2fScL3|iW`7n{>41gxU9z6qGM8fbHNbI_V1NBL-gcgD>qae$+ zXt#NJS3vX66Gi<`owjzxB1~mLODdIjWh1T(4#bSo@oZ&KxqG3m?|6ObF`kn*cOTd8 zRZ<-YS6#yUy{KJpD!DV&ze&yNBc6NJeszSIs$8B1ENu|s9N~RjPQ7$)diqG<1FU}R zo3O9R4hz|@UAJ1tbxCsx6GVSUGdI`K>AN55rkqUOeBlog<0(`%!}_kcT{g~LTfcr& zDMG%aCPMbJ9D|af4xDs(4;O#B!w-7jEOba$qpGw%8Z%Fe7xPu;ubXvf z{w*2OP+KteqqJO0FUsRtvXX+iY~}o?*Lv&_*&#j0xPDZ(o9~40xBSahHEZb|QT)q% zZfZhL^k<)4QSnsFXcK1gMS~DNH?3pqbx*6&o7WZ^QDPPux4AlKwM84gF4h$pjPttL z^^q~o`F8!%QH^4u_;b1D`efqQeY4BouFy{sdx(J+eJ`dgN;lBly!{pVKU{yUF4HL|8lnI-x z*28R?h0w4hwYm!5S6iJKp?OTF0-QMZ2dXd1E>oU? zW(y~znFQ&|b58cQFE^x>$0v|_DlxnosDNRE4n&6ah2k#VyXYnKYu_F5&o8n!8s{n| zMIr_C$6KDcj=Eep1BsY8;$51w>-VvVkn_Red{vG`&F*A|mezAL8oRJ{c z|L#nwKU(A4X__ZGdu%J9w0KkDdtii;vxdtZj)SB1pX`2(JNerW?A*W4RvEsgnP~W` zA)yzido2f-LzQuo;4-jX3t9U7{Y%66Z!EEmXK+%c*fl=*G2N|pg@G<=%KQ)12zK}i zY6jdnbz_=nQYZ%G@rT>Fg=w3w>)(bL#gI9(U+x#d%=2v>}Xw^CbnyZln@`u#hCs&WraUkKT$ zYbrfNc13gwC;(quaU-zS&t=I~z&`yM<1L9v5NjMIgf!wC^{+|CPsL2bhYmcxd5@^B z>`qvBIExcq-^r-8vl@x8w3_S=Xtre0ej9a6FO?(X^4&Vgbfm9~7lG>w$aXM&EE`r~ z5elp9<;uxxRyq9m-t!l5j7LHy4dlvj_ov*WUPmQ|q!m#4rDgCpJmVkP(pXfzO3Pq} z*yi^9ZGWx}C{it|xpkG2Hdc}Ug2wF&=IkHyp0n?66-;U-eW%cS29z317NYmigrV1w z`aGXD!3hc%V;c+vU0i=-uvcUB^?ZK1H1_eFf#`?$Pcyt$h{Gw6nMazN+0#}hntt~e z_DD3?HMAOYc2#OKr^9ef*|^P`Vo-YV=I#>FW)Hin%uEqEt-(}Y&c#voGfG|){6)1H zJUlCT>qB0lwInuf;bSplwHN9?wDrT>r+2%jx>E_{yFdXE2lnA>--`8~s>7ZkIi9%? z$VAN^OsVo#zK&)q3UBx%n8wgJ@6!9zovCexIuh)uZJn5+1`1_$W#39)nY!o7Cn8%77M=oxR z1Cz;&ado2O2h+Z;_Orjt%bsua^3eX6m?|yvcV2`u;sSQAAObQgv0{!uraSiS8R(db zuN>%fOTGh4InehAd@8V*hK(FfAKaL}GoL>P%rDyo{2ra0nM&r;qO)|xkl^$J(|4B+ zjy$PqreYJhEEh}NO^Pxt^gl5%$^l@zp7-bNuug*145SdpT+}K*0)ekIjD^jC$ zl#}6ga9m_9%E@XS;OJ}@ph`<$BAoP z>YW-*?MJN&e<`+DT7sj~6?ah2Z;ZQ1lG9&*GuATz4|l z^X^W;aNdDi)GI}>i7{)R{;H>k1}i;bO&6tY14;Z=rrE?ELrTO>aB;Ytr}s302PgM9 zG%5YwpbxoR>CWi(1)&PB{#rLf&g?iYzgmd{kwCcF99OnVaL%kBU$CJCA6zjUI8Y+a zM*h$Af`X=q+G^Po!b8jCe(l>xAmA?bR2AMQU5AKeJeY1`^wPgtlW}wgLe9GM zk5AoU_IrwdpoC}O^LJQrW27KQhQ+z({dks>yO+TAiLA%Uzim*X?YF(B&ynx^i^9XK z(yA4PWFRir*{)*eZR|wUwefq2V?(Q1s z2>9`p8vYa^`N&$AnWqj)vk639&I?dCxxSMTEY0yqsw25rz}8^5bE=80m&JR+p;oUl zF|779yGTKV3U~QbGXlaJ8k6SS z64So)4^=1`ATpU1h*CnttJu|w-XriP>7?}&cBFsovP1J3$YHY?;)&LwZ6YHzp+E;% z!%Hc>$na~WP9NqaD-)+_JT)oCx}T%<)+MUK!bC5ffwaKr@%~6xbkj1O0_7N;XV+}p z9egRcWIuIjzvWNQLUDXkA9eV22(FkgaM<52k>-CxccS4JPWy3Ll`tbhSOMjDuP&NZ zPT@o%pjg;Ih7kx;3uy^|Olf9zDP(qbych$dLf_IKpA09w{c{Fl^eqK+cuk8aK-$`? zK3E=pdI$K4@*cJ(BYg_YSjQBQ8Z_}1Y2ux+mS_F^o}CrPFvGNvdYP0kW>4lO{zfq%{~qdh&9BY7dDbvx z#G|kucIbX1B#~fMdHCkI)40A@X(DD^ToC0Zv6|WJ^P8sCPKI8nXuT_}Rp+85DF=-v2wJ2;1L0_0309$Y3D5^;P(`W|{Gnx+XW-*H5&@C--@( z-Kv&*m$>>j2j?BXPSAfbR7w)Q^)Tul0E)>AVAvPK?eUQQ1X1=W5hH_&ubq%TMg_EJ=X1g1?xWpI@J(68zigV0sisNkzbW!j1+mci|a zn`|5SM>_CvR4o3gsABaN``6RW(7cu%<7bZ!`dgJQsJ3V9y}l(+QGZaly_ZK=^rbn` zDV!4L;~GiX#NAGQUh0X{v#LOE8dh)`XU|T5Ehs9JQV(ihO#jg?50Pfoiz;jt`shNp z#c=2bz~L~rv}qLf>FJEqu#B50jjI7Hr!pn}hc3hQaw^74N&%@{`;^N*Q-@}s!T`w= zT~c1tq4 zn%MXoG96!(*HvH?9~Zr`7(te);X_(HJ=#27@~W)zLUfDgRxhP$2Fqs+Arw#w^dTwi zyUCVA;!YFS*u(OJqvE6S=<*V;+^Fa`QjE0})RWe}T`q|I<nAMkN2XdNS-x%Ar7k&#tQ_hiVoy_D+>o7zjmz|R z)uJLx39H+K%822LbR0Gn;bli~sF^YuH;Kx9XHpoL5l^iKU7>eG_7{HhS%m9ko`ZBB z?z**Gn$#+0i2GMN4QE0qbX-#Lv$qvk_me{FQ298TqbF(Q5F=TdcbfF&9iT-b>oU&} zB;I7&O_zooh4K(=UR4`8S(ALxFXvreuGnMHUb69&$F#ma7~DC19&8qkWAI*RB>SLc zu`Jh*2uZo4dOfi=b&GHdePlPqGE)_8w}`T1(=Wjtz~jRLt<@K~*-m1D8uoxde_fV1 z*@AB^(d!DiP*hQ`%-#1g^!yw;kuN{3g3`bd*e&Vns_LzGZ^}mexo&lr|J4uXV2%+M z$F|^$Kbbf{R3OH+;S|&JQ(6ghrwAGuv0se)Ha%^p5PMfVeDTok0I52l*3!ex<$8MO z38}EoOqqFD1`k2pV359jLGJ*z3Cx7prEBM~_4z^GrvFIk{_n!)#qoyO!dGclpQgak z37BZ6*r>Y4(U@eUu=Mr#F~*19%GWxckY~U=qz`RM%tj zM?oJ=NGN^Z&HUnnim46$v&*Q)e#fs{S$v$DH*NUb*E*>f8fF}ltjk2D|m{dUWAe4 zvGCJY2(XEhx$zxL%HSPBZicC^m{ui#@b8R92C&T5HP`jm~L_Divz^(m|84M^t4U%4EspS+6&I?DU~)7bd>6*I!%-0)KB9uP^($etWjny5jG}dtbg1 ztgnv}qR}d(vQLl(JhL5!H~Lk{rZZdtE)r?kpOdRgnw#@+?5FBSVNw(`UoQbZBCNrC&n$Qh&<$8T_n?z> z5WP{I>((Ua0Qse@88t0%@3Z9?uv*pLF9 ze9toNa=1y2c*3?R#^jFfbPemteN8!b)?k5Wmk6=L>T$U&2NCq~hmnWW(fD!KKpUT~ z->lbYYN$*@CDT;7#L78nE-IT+f+Rs70Jr`sEg}ec7=d6ytjhsH0zBsi_kUXz4*{va zPhZo1w;iGiaXbO&^#A)y0O7r;(+Ua9hp>eoi(y~NOO)wiWMjU%;Zp&BD2T)0?Gb!IvcWRYqVB~@Wp zMQ2#)n>uqh)aLb?3?=t#^N$`bA$4tQ&W-lOtOHNEF++@k;BY{$L7iGn8k_&p(85HZ zOv69savbqH-YWot+RaWElZ|RH?sSc}F%`{H`=WsBuk!4^z{->ex_?ul8db=B(YIK+ zePD{tcb8c0|8Ann>|N3PV$H$u?IeF34-+^M%v(0Q6;hJiXt(^v*d2Q#CF*zfkYZr* z=HMOb(#F0o4o7b+R2o~3_^Q2l%aHAcGYo%f1cjUh$UUms{-mc*JS(AoGRQWU*>lUw zpmctWWq50^!sh|IfxZ1_1rMhU>O59vaOn_iYLI=V&WA3(dMx>cgAN{hWh7jIjZF(6 z(r9X%kj{rX?f*p_YQy>wOX0g{g5UhpVZ4DyVf;j$)|cHnSQTDvAhcUL$YZ9M*nn(b zd>qYCTQxM=g=>fL2YXtvMhy2;M@jBBNqCu+>@7Mfcoi}9@xMys%%MapUc6#js7wV^ z+3QuBA#6!=%f&V;DI(DfZT*7QQImQ{X1U6iT`qF0Ao4GoA0Lk9aLSbfDo!IDqm?gu zIZSl(02R0;##;efXv%oaw0S>V2W-eTGp9wYfaR<%4`z2&%^u7a1j8DI#I)h4?c%<8 zKVgZg&Tau{H|d1WpK&{Bv7MDwsSU?MRF#!M*54U5<@(MJqlU?;`#k;(xN z3i&oa(3h1CuoHu{k!PSyByjr)0UO$|P7@w>1~NPbN-f7`_nQ^YKxQFlAX~tO^2w_Q zF=IJ*2HKZ3MG!5BhbK2gq5ETer06pc+85YQE(@bbPj4e8fWJ8fH7q`1-R8-4IT>uZ zG$I_kT8sPTus%drarZuIXp!0%>>3gryl0txvWGzX*83if?F->gU!Q^c|GijCd1sbz z^W@bLs`@GEOT)S#m_=Na0wU}SW*&dm<0|^r?xd3Y>Z8uN>u3r9yL= zcafS4p$wXzL%JASaVQ>)48nI4;Tw==3X;uG`&LRB?0;gr|4Z+d(N7~^VwZFObG`n` z&79;ngO9knuL&xxyXU`o8FA&;t)Ciiqi%@RIkg6vvnC`10(_CEUq&jJOgnv z9<95#Q-Vt#L?GIJLX>B{8TM(P3&xpve-a)JuZnzySU=P^B6jtaB>&e`#R(TUy|*9U zmzu?(FkO9unlN#)s}}Lym=Z2EL7BH>vcmMp5`K+p437=GiM<<5lV`=1mKvA$v^GXe zj){!WA5~@VJ_(TcTe~GglTojPKD@m`u6*UM{3E{%`rwa}3lWgt%Y1f+O|#w&Ts9vD zpQv3N>}9aYY-R;3@lMUk(r9&;Rm`VTD~ZYKNMN<4_Lc`r}UP~yf9`1luOposHBEDyZ>b=RWe zKdAXi6?TH(v2U*B|BqYP=Jawh^4R9VX(uo>HNg8Tk>%XrmV1)YL zR@LnP+p9{BZXSpL(VhoZ_Er+Z%1qrh1ZPxtkHv*%6B#)bVJm^M?#K+Y%o7}c{bK8H z&z7YLhsKby;k~Dq`ifD_)V)4D5PUOC^|m1C(U-uvx6Wg~Dujfd)jp&*5(o~b4QU-5 zS}0BEad~q0*@nGX@ZId6?F*|`Cc$yR0b!}aVI0OXVL~cEYP?s+m3P52{|9^T9o5vk zu8U#?l_CO4j|xZ!ktQ`F(gdXUE=_vxB?<}%(mP0z-aCXI6lv0=mp~%YdkGLiif8g$ zYp=1_UHj~P&mH5O^T*x%;c$e^$(-N!z5RKgmoY+LjKu~avh{%=bCPk`#8&85N zPyH~9H9^SjB$CZB-|&}KdmO8xhiTjMmBSKxER!e(U96!dU+&rgen@OXt(kYOO_2bbTCUb)CBafpZ^ie4Pd|MP18(T*Q^rJ$F{9)$NWgdcwOY**i_*(HlFf3KoBj6QzP3Q#bxD$IxEG;T4`CPi&v5=czb06O-tM zTwSvOeNw-AAZ({C_l$d)H<&R!bOW^csqPZtz+V=E{@(hA&b#VGG@o5q%0_D_O8di& zKSTkhO^j!ahb&)$=}#)bdY;S!1@p7ELMIYY5}KC%F1)DJ5Rp%|qE*XpEXszhh`cEu zrIm70Mrw86@ef)d=5`P68G!r3Pn92k>`D9Czm_z)`wXa}JkX@(=+mzs2}BJ6_P0&s za0vb`!%c&peD;gh*~6HEhv%Q`8QTO#<<(~Mb7TK;Zt}o5j?Bh4qU)oRbNWA;x6C(# zb7$c!k7@-ht}hQOr|ysz@Z*y+=y?|v7~UPf+3{z)`8I`eed-+ zFx!{^Onj4n(abv4@w5;9zF*e)-h@gc>czz6oSL+>SZL)iWQl%}jMx~USeHUCoZL1N zIAULMKK{|Ism_`B<|Kvv#>}jtlWu#ITFZvV2*ppjenRIGIndYD4Ai~6{!IO&?(&!C z1)xz;4M0g2dF+BxC@v&pyfpgFRBN;P&@ul=xN^^dY4v z7StvnVQcTio2b=pm*<(d<71=jU?E^YntZkHIq77u;wBfb-D_8#2mH>#tALUM*<+fR zeO5s(@nSK={4T8Md!owr-zm!w5+AJ;7cP4a6!ho3o5|8G>EYJ7tg_$EH)G#g>RK*ui{@V@$*j*)~s%el(TTYtL^r`?_}RjYyW9JR4$D9C7s zGj5d71u+XGzJ>KqYV8_^6a(zf*ghx`zQdvD9QT1)Bq6V}vxr`tOEJSfqb!`VhuU6@ zr|&5E?JalBF2hu%ZM5S_M+hEoBk4n+XmwrXTnDEc1Wt7G2d;l742D-JnL$pEN>ZK0v;G z6pw<6yHKPU^C0<)G2qmS@(3A`>CNDpMZ0$HCq~ivcE9>YP2Tm6ThiRBlh$InCSLo5 z6KqQ8b7z`qB#dI6c2966*qE_;xY1C*eVElJ-}xIvNr%dtte17)zw0ica&I+S{C2b} zbA$DL zbey@94KAMA`iCgf1w{v+-5cC7I~#;O#*Whm^>w0IrmRYuCXJ#Vu%6tC%@qV9-0O2~ z&S~Sn-CWxr7fSI^$J54f>emBc7*EHbq|?|1kQbV%$!O3d+%aS0E8WeBCpQ&nUz0pL zqS19H|G;+K$f4)dq}q&}*U$f8XUwaX?3**Ux;``RgMmP}_9H-p9!_dXKaKdx*=I5B zPSO(o=oEZ5>Avk=$+E_JL2=bfe@7dj22Dj~%Mc!ta)SrvIz^tR24RKyfjZC~IsU5C zVjX=*O+GznQb*O)8&ci7D+e~u4VBZAEc;sg7;U>LIVUD}>vtHO``;VN8T98%SjS?h z13yT6ZS@Rp;@NYyz|4p5d3_Hywkt@de+5C<@#uAUu2)r!pb-w>%BgO9XI5YBd3V36 zzi+o>%WaapvF+`59TxREEz1Ho>x7nlOCIyB+flX4I^net{-chOtuYg0&RC}^wd<~u zi7USLOPgBbezrGs=pYAPX5jl+Y_+t+i49?YZ+D??sYzrYm(Hm#g%jPP^1;O9g{OYS zBl=GRlmpJBgjc?$9Co4i{R-4y%2x~~1kYpLh7fBg2w#9(zl3^D8vn|{QcSC*oDMH* zT_bsf#V4VJewn<`u-BK^+uOrN5DtUI1uwbOa{4JWUlDDq!J?TwX3a*mFpopwo#( zQ`^<*Jv6q?v2*4~a$B@bVn^$0i~1E|IM=PkarIY)F$+(6tlm?Wntz&PEiU=pOV?8E zmvFkG+51oelApyPZ5st@z{EgxA~`sh!m=|+IPL7dgk_{8qyUqD3JQy(Es6>Yi$Cge z`P;vt)O9C{Qj)l`y2l`Z+;KZ0l7Q(WRI=PA*yU zn{f)~zZL?%5QR{XLuW?On3&}w@MXwn`ITMftOY-Mp0)Ws=~j$VqXVbCiMub^aI3?i zR93=w`cO9>0cXyw1R-;cl0wCW$5W7ENHYxV%)Er3?Py$pD!ZmO-IktLq&A>(@o=Di z>vuQo>B)IN;{i*FWqPINgn#=BVciFWVM`5A12FQW?kpLE>zJAgJ%|J(ns5Q%%6k)XFUOdZQvzq5X-||(3(Y!KRcbyS`Y5^23d8) zpw`DLmD2d#$n%k&;kvR~{h>Cla;NK8uTO^IE&)mXN_n9C@fT<|=pi@>YC3t6dbE6& z4t21T{sCp1E)LSsO8)ulRQ~sfORLf&dTmTmpEC(lb$Zzv&G< zlpYfl>LLt?uOK@a@|+1f9vzb|WkPKSG61%pvfxq9{eJ&iu)C;+-i3QG^%NrQ{Ihf$ zvI?Eo-?}j@1+m{Fv~L}IiMT9bNcYjzqO|LiT(u0(^=TocNjZJZE=ZFBz(;g}k|ahC z)pnb`tnpB5qy(Vs#iMPnMlzvo5|ZPUmUSTkkq{-VZdr^_Ee@F#aJ zQ4Y#BFy{%cTrGu7#kOyqQrSwlJTzKty~MV1q>3thVNWrrViA6Bk2x6{ZQmn^=6LS& z~XZ-`?ddrZ&dh}s!8M))7aZC z#qS=iqO0ONQgyj^gefx?;##g`aXF3Fs5Q6M%!#q5p|2eB)|cV3`oigO1=d^7PRlKv zt# z^l;|uCYtP{Ee^fi_$h9TZgF)M_-Jd#@FjRn?$D(cx@uoDh>M+1XWWwyxa!!szu=_E zZEJJ?A>*_K(`+V3izG$h>59Y>9al2ej;|Q{ z-6BxDkthsd9+a@sB4h(2sn()tajhEwL8X?tgjo~H`G}2ep}HUrr2o5s^A8cd62=xuDr$m?HBszNHTO?t#bShB!W7B=xb{C{44{zszd z0`T=u1?q`Z0(I(9+m#N9Tcw?Bjtl2j;TlTG$VI4vQr=5(CQOT419bqHSVJ>Gq`e6_ z@1ZlQaX0=Kfv4nE%~rD?I()4>sM`(hXEPF{Y_F2271pHALVW)6!vIE6(3tcDTX>{8 zve@VnxDRp4-wx?^RHGtw?A5z+X8P=lT27eSO)3k>L8kZu5Dr{xV~$Kw91aGr6zw@y zaOn-wRc;fS62c6q{TtNOZjh)=_I3p)?mfr03{bh7F45N<>bWK4j|#}hP8y*bV|37< zH}R8asts}l7;L+L8A}2+H_GhF6o_g0EV{7{VLG`# z_1y{`#Kt(VGLM%EAB@Bl3?y5!(ip9~zJ5?p^uFZi=ft$wecF>{C~RT#`eJR2c4JCg z^eXAwU=kgm5Gyz5d1M3e3vC z+>$E`P+=r(#x5~w94V}jd2#c~pCAG(OHc7HN@Si^CPoEKHP5Hj@osKE38;F<^v--- zWAoRS7PUpF&A}-U&j%++4nFx7{!_j+Bj?eRPv^x7IRSg-htMjmRbONi0->A=8c%M2 zx{bO;6@TQ9IceZ;&)FvbUtFh+2m76Ncw0T;O<>+aTU8>NSxcMc zO1NvkD}&x$G83oZ^&Q?}W(oTpoOnE4X18zWJmEP)ZW~w9cSz&U68|PB_^SAoAg{jz~?9QKNgA7Yk>|JW_ z%xF6wDG1Y;+LrpyfRsC*JY9RDZXF=%p{u(uJuu<@fqn?ZRmuySm$F=R3J1}+?q~0k79qNq4kw&DcXtE%h{0OqFUy%^ubynyd5a!wusnIWlRQViVtwt> z1+HNf*5CG9RiQg}HRlj(xo%F4-LH4^04&t&=N9^p3KxRvRbz*K*G#>+C5m*ajK9v# zAZ7!Gy*#a{0Wm=kWc!k)XF=Oi18ZcRs>U4+Lm~kzhGC)3wvkFe0q-hW{ec+F>v5fgR(WipoN;u)v<{K)>&v-#F0uBLL0mP!hw5lik@`uZ zz?@r64)h^dmVGEYiYzg z%&DeMyG8WHO-%Zl53P*kvYOS0^#uM1Fv8CweHa`t&cTO)V>cCRl!c4NsCYg5VZ~m7 z4$;bbO`%sqmpduK{A2t^eYozuc{`pf6u#P{M+gSToow6jpD7m~)z6y|carV8Bw&jo ztb!E+$LBH?>d?%F0RNi~oT8-e__6Kza)rR_68ay$TO8$D!l?;AGg{9}@;t3_iy=Y9 zTjJZc1}rgeRwA{PUMJIwvBb;vu{sx;FwxRjd9mL1{TpdQY!3d?ENs9B)9tL{jm%3L z_bd)@ij7+&Ghnp^$+Vrv^0wq7B_77JsWs~sMKLDswrAew;4b0%$(w@cLaZGzPwmA* zjSqKwPwg6AGIfTNqF*N6yWL>=Ffy{i`h8UVLcSDdU26b0fI=cX&C(B+Z@`^-9YfFX zNU5Ns(EL_l1g4JOr*N}V!q9gYlXieNr2wOl-k-N8YCG-d!%oPt<}zZF*YMFQRz* zbG3Qg+Sy*p3Qy?fmy}BP1wdF9O=sai8*C42RJkmt5b*~^X-lCZ?s2^qo!71Q#7d-S z&naDvLFJHM=_htc`Fc~MOfL@dFVjxQ(UVMD1~Wz3oxckX%GzD{NJ>@E_F*Umv#U0b4Lve-hBaHzRgwn!^gXPc-P*}J@N z6N-%#3Q=Kp_x9_i1OQrs91TCir&fa$7Sj9Kzwk?bo#Z+zLCr--D*kCRaZo{R1c6gu zUFiLlC8IBg){>WVZ8fDPyc*`6hS2Xn=D0Oi1GsWt31V{=X{ON;S42xR7niUnF zYU9~U#l>~%qquu>Q_so#i{6`FBYrJOH>Gybd<`-A0o}f$CqoTCCteKGoikxu@Dq3_ zFfdA!pU3Fdo_}+=>d3ZxXkXHJC!_t1yVI?2ACg2HzWs63O#09X8PtwoV-~b)A|`oC zk$&Qmj(fYc6HE({M6RA33Kn=5%Q=ol0d}d@dk-GEcVr-Gs$AvMYwNeyx)OeNy0<7M zUQyJOQZJmG!d6R3Qo6YulT95%n=kXu^c={$ArWc_(_nV#{fH; z%2ujhaHG6F@4Fmd!BJW*eoX0$X^|o$_H(CL{oRK(>6E^4I#Hk}s(S4BcQG;1>^M%) zFX885C36tg4I}&uIF;oP1U`PMJxD<9-N?jNb{@uOD0CiGgie|)#-5!-Yz%oc{q*g5 z`iICKc6?9M0l@Gnf{Ei9tv#t&=4qjt!)g9F+&Rss*w$1W>dxLUFtq3UL%6L^Ytqgg` zq9BAfv$4lswNGG(*aZA=-D%U$Iqf>e4*7{${j>vY@Xd;0&)eOX)pw<$LIbmH>Qy`s zScR$X)K=n8+xGCgY|z^5pGRg*x6Ptjn&94t#kX=*-GpK#kGK+Eeo2w13{j%|K+ICI z?gf*`!uM>NJQyL)B_aZwkB(M1dhtqnCQTmKysg!ep^}WW38P&r0`FEE6R7Zj?Bq@3_57y1-d7q zMHV`0S(q`*w#*s#o`XieL(tjv;5|r9x+t^jnh#TL4^3H66@n+HhSdyOE)7KM&yR(J z1uLXIa2Zw6VG@2WF%=v5rrj&N>)S)j0fBp`FT=#Gl@zQjQeVa@zi?LVqRpE2^6J?C zH)lT@g5&U5vC4S_`OH78I!7jxd2MDfq2oqVKB*~JppV?^690)ulRMAwpHp_=7_KE} zVI5HQMf0F`HLy3O+Csv9;#-nVSMTWPS#gzayeYNNJer)E8j91BxkGNKR20V*J0QIi zF)l72rT3bMx!@OhDeFgS7oJa7?QgIi5Zz1!_Ms5McwUs6*9ksORngcyJn_-laf%nr z`#`ngYUCf1D4_d{u;^@In;yGbz>eB4!qqqO^$lXBf96sYLt_NhogR={=^jH>y+LR4U zDB`#~r?*Ls(_7)aQkpEz#t2pbV0T^GZ(^TW`Y-@C?%uT6P<@)}bN}<9VptJ1Ml9ka z8RXd!zKz-UL1saeU25-J;_oaqkOByNq14lpVrXyh*&>KG09_^l%ox#{&~qj)N9cD< zD1WN3WMZ~S$jZ(U2q{n2R0zow`%qevpCG3>RoyUro5cJ_{g5~YMZp{YSw;=%R*)4ZkxQm+jI9S;&g?ZU9PNJ)ddRQ)lxeI zP-exh9eopYvt=7)dBsJGZPIl`=};e|J?{@z74w;Gpj)X$<;4LR_JwAR{aTyZ*R5XP z%ahC+VNeivy1kca+?8?ZA-zNNX0pYK??nHnQf}FqN0{w+u~7K%`LYFaH^NmZQk~+1 zq}TU&f!`Fr%|6nXby1FeIpx{-+eqF2GS~1|2JIpc*FM3WO2m1Aj+S>ForIr-3NEzZ zGbd(it7WA`95~>a9@osK?gy1OY(FpuFW0cgkV@=l?_;auL`hPyMlE+}M#&VkEJtb! zLQz|O*rpv2OloGX$g6@OP;*gqzI>_g50MTl0K-5X=T=wt=#5zmG-~}^aclIPI;Lr; z6A~8kYVcr_Pe%HTGRS;imKpjHzo1~v*jlx7K;ptk8qV=;e z@i>|O`;oU5(pZSA^F@!o6D$}g)~la8;p%M#w6pMau1U02yW4=Ki6QNfwvQCflR#0$ z#8sTM$@-?r8LE*ah;yY|6(ooD7M|bQO>Guo+^vOgXXyogAj+1!OlxWQ$s0^lbj?#c z&VN<3-E?}-blyXF^5Gmjg^$&yAtPC{So)UeB_5q|nJ-?Ky1oe2RlECW-1{fh`=6&b zMp*SygltHvHY(OP%Ji>W5^jp2_1HAhZpMj^2?1S-H!wYLFOzK)@k)-P_5$DGo)k(i zCi=6Y`B!zDEmy32TJv#jVV7lT{_@1;a#X-ttk|Bd-&&`|0*n;uZimX_rjapA;EHLZ z*hTbzhW57?*0D1LkjMruj7@znbMr^Y;qRS5kpea#oDb?VL##op3T& zLrCt&$=tmkLvsI!MEdC9wwwZajdFDW@^8t@|2hS*tCJp5mSS_CtAy<%_!-ek;{J=b zHy(vnyDm>L1DpXl>!4{QsjER%@up-pVrMkTfZlMyK|^D@I|**W&2!xIV07P(Gru%% zz&TPcPKxhJe_i7S=fs|M=M-rKWDSkvR# z>8%**El6Bb5|;8z85x;zX=tjcg(_AoMQc#X(NI3SSw;-NrpT)%FaRR+F~0HukY#=y zG`Yf=no{7~92fPOy|VOUOYvoh1<;NU#N|96806ste4ZM z`ON=Wqu3+*4{m-s6trRcn)SQC6R!Vh)O2@Dc+^WpKvTThe)_f}63k@BktMOlW zUpVRMU!ty2w)I`+gP>(CRsw7y4J3nB!>vfDVe zf)0cIlL>or;sQMnX%7o9oa3P?Pw<6oxe4rj*hR>VHT=bnA`AeF$kzi1Q^O7cpM zh`c754ObV-lTM}DQk#@5?!p~t<*s~0*;<}u%~$NW>nG0q1Y@KjPz zQ&^<-^LstE9%>y6a(8qCg+&`6=Bf-~2uKOeSG?z1~@<`^1H{rdCHXm;c)|v|sJR^^zCc5Di3PdBU z$bqnWcV`s^lqQCuX}hFpZ=!1WHG$PJy63&WFIxT=b)33Y@UyUr03CCY{YBkn5zGOl zhz@&|f|kO|qwXv#eluC{-%AK)e`fwQM|H6yW}uuiO=Pe0?AZx!k)Yws_wB2d`8?!- zkD(7l(|M*S=ZtS}4x7mk9oa-`;*lF-xSQ3?S>`6(u+(0oq!OboC&WkCUUY=w$}Zxk zKSVSV#g*sV1q2J*S_pmHgoF8G1}>@1Y{8y!lbB}XAZ8X&mg=;wzW=g-*|o?106Sth z10(be_sixm{&fCMVzf~}cmL;!AslumWl>Z(Ajr={c&srj+^g=;q`)`2lWa%PFkZ>2 z%)L2*NbkE}s`G8J9e`}5FP+12SGqkvj}te=G)`q3z|jors- zzIobS<#c8-jqK}((vJWjH_lps?&c3VjlQ4ZqUaw%qbXvEmas$z1jd^%=pic~_8alW zTJvcrV?-6jyf`E4&mUiw(+#9#t_F|&G=t;cL5&<{oN&%BW4RLseM>XMZ!DKaMrMos z&b9+aOE ztKsqIK&-_?wPFr|0jGxNDyb-`eSHoCwp6CDEn}V6-@TIi<)$*9+dXhFL&w{zQ=Slq zj=~E0_jVey;d`08%PIvoY!|FvV!kZ!J!4sUnc+p@;c%sMaiae|0I??T{Dvos{qfpg zv`E0O{~DhI!xWQO?BK*z%A0|O%s;y)R_;?)Q%006dyPN63vEYii2l3?4J!4(OFEz* zov~}MZ(F0uOlA#re$|ON6s}&_P&zfJd5mKbeq-)PXpVev-fPl z?;AvR6Nv_*xfG}->H@@TsVK1_&rZ}3j&?_NKo#G;vyd#mzEOg{S_dKf=KcQbD~PqG zPUA}mS$NHxF9sXG)oVLVKj&UIMEGvwH*-9 z)p|!5f)P>@5Wr_J9(R*i81(LTROzM<*=2O~!0_nvZo3_)4$^WBLacv14M$=3D%5`2 ziZiOqQioGun`=@x?E!!kdCt5T;4~mTaa05!Fn(7f3w1v!J1wwmS-@!yKPVp-datt@ zpFVpiL_|`wceHly)6$yMUbz<7`ONRq3bC0i0q9qLzx9WRQ!4&Yjrfosd}wBW8aCUz zLjHTmJLW8-iur5-JK}FuSh=Xv(bB*+4~LPS-0rO{61}G(8y-fjKORSB7%uf_BiT9j zOkw88e{@<(m!0m&%Xz58ODnO+Kem@+SqvzD1U%MXDTIG-)h<5pdZN(yTd^^7*;)3blDsQ9;%aJ z=mPMb{?W#UHHCE{0U*%>6y#-|N-odss8g2w}+RN&h%FK)PF|P^eym?;P zVvyQf(8q{*8E7hd&x}V~$2pmVr6dCL+9=bQDk$7H(;Oj!-$Kx?G&jYGm(RS)CA%@M z{cA=2)4MTZVfPc5i2L6DppTE1tV6(!kBEnP&nge?cq;Pp%Iu4;p7?=}@1TKk=O{D4 zfkbuBf$()w!#M{E9^rU;k$rSJY}-d4fYx2+hXTC`!Ha*0)>%P+!MEA=(;=v}PN0mi z9$p8=S_PnSlnX!ugM4%we$C~4aR&BF4gfw7V&O2{N+@CO9F4!)f#N3w&h)}?2d6+I zg=1{+AEFS6&)|(QD5QND$PokMIY>H@K-&ZsD1&^CgTTQ$5~$2|c85UhZo8<9_Xz0C z?m`oC_N?+lMAweLaSt;h|NHMFzt3MwUj`y<#SGnPPA~)6KB!Ie+t)4y^#pE?WL8V= z)jGFTxUtrLW6}BaSp7>nj7p%C&ml+cRT0uTsAW#7ZgzdPNo+mkhBLj5Ut}uJkEoY< z_s407E@!ulnrY{psp|ES5JdhEvGadyprM}7+GAe(W*aQ$Y$^2oUUo6G4z+gL-g>!w zGq%e7+lyN!nSGEiL|4T@Cjl3AB^98B;QjV)dK+|awUpY(!^-IsVSjX5IAq*HWSXm5 zFu9a1f=EX8A9I!{vgYP=7Kax)pbK<^TI*?IQ6C#`jDc~v?6Gj>-6 zSLwW%RWDS0ZXe(RD4RAR8kwAC1E^{_1!zCxO}h~iwtkxCsIe*K&>-!-MFK8pptOSe)IJw+UsA4l?us7gisqW zZ}^P&Jes$1-8W(F#zVd&^1K%uK*Odz(WEw|>HQPPKG2qJfk|g+kDf52A5Al)dL1R)dqDwI6V*reCvB$=cjej{z$p0;e zDGRlZ`Gf|e$QGpBa(0K?YEH{ef^UhvhtFoi@3|z8c_FU z-efF2Nv=#0`jNN$cj;=gq>p1XiyHW3Yk>id6C{2M5Tv z1Zl6O+M1txO!0?Ef8FPi(HR4d(NkgllgW3V=*Todr&&m?Tmbw7C8Vz2SFj20phz%XbF) zs4@vqul+O1AlM(G{8Qkti2?3V(?v^^41eGtKnNpzSpA1cQVO^)oCW#Me+&51|1HI3 z3ZVE?JFS7HtBkW5VC*<^KhOvk4>W@Pd(8@b>D@gzK&S);UKEu;fwct~=l(?krV-Hb z8#oMM1B_W(3?N)Ja{)R#@oj&IMijxjIRH_ZGV%$ay@vyVvdJig738!Zy?@5i(oKkF z@WBotPuhT4O#kumvOU<2UvHEMXuCr#QJ&MZ+E>%7|&-Llwpnh8pr zWm0N8!2TgBM;EPa>5S-X$@KrIJSN$a`TAZ z+O68+-fh}bPPPFpUs362ZG#`=oiX&tNIq61BqREx$RRmQhyPu$7=z zr)$HSQ{9s2gzfT}dVh=To5A93qA6Bq2HMf^hx|Jl(~^5IFEvM>TOav&Ru&wSKZ-w0 zHxw{qqf+1|k__u)BKZ$a9pHce^9iE!L3yTEW05llZWPZu^P_#-CBsR zrtbk$q}99CkIM$jcf19Atv+OGhFw!)43V1ZJpBCohqjZEt8^=%aCjCCf;SMza&9J+ z&;8qWzhyG~AQtP>ZNfT&(^J~h+Wq5`Q=`JFFDjhJMI880{VO>q_%70+osr%O zRkeBlAMfR&?+Q`N5nVcB6yM4y%8SXn*{ZZwSYDXHm0?(BXL>WzK7z?Q%=gj96h)gb zIns)PCpA|Yu;cYC-1)bEO;U!C{F2!oiF#Nk9wtwHf91Ash~(F;!%k748SO2>_1JVK zcP`-PPHfz)q^gjR@8?weu{AJ^T03d0wp&^H7~WzGkje^ueE#u?+w_zx*#8@1E!YG- zlO@7!UElH;`7AHehV1txLYBTg)>sfEI4zE{U5o-CpB;&=HTM`y1``aNBF|W}Szm1Z zNI&|N2Po`($DD)Ws_mOsbEw6GvJpk9`<-J-gfF!HdW4tKJuLeyFf9qP4#-5#`ITHT zZ1&Nvs@V8>4(cUy3*@I-&=@dlFK5#1oN>SUTof>L@kq$VS7O`>_?X!657Aq6Dr~c% z6L6aWBAW~4{-0g|oZtVx;D3FUDrmq5#W9DzS$5J~$17Ac57=^i_vcH-DmN`}s=hS7 zazg*%4^cNF6WGZKs0Flocs6z=A17rHyx!hS>##0y1mpMu8U_E_h{dH60$>sZIj7Gl z{k_h-rDGBbf@*Bcrm2O829kGIfBYPxv&k<*(tRHB@Kpc$B?c2h8W#UWi~IXuV=x@p$`)ZbbIUaHusCW~9f;}aB2Oz>D*=k5 zYAWizQovA+eTt{<$ql-ZdBr}=kkqS8<+oaSPv4pS+!sgt9y?a$GP4%&1RSrnI$3AMdmKsj$RcPfU>?g znxtvL*^8V?g8Y~WS#YwB;8$IcKD9IdD5JfA!DMl<5gh*rhZ?HyOdJSEWu(vw_}>r> z$c9HJYy%kpgsTccfG2hpgo^t^^pi=Ca0LN~Qqee;;Mz_2%P0NkD}w)@R|Nd`75`t? zs#yszNB(?n31o$xI}+S@yfzTV{oFRFh`!{q2hJy+EUKfyR{#CL=iGA|?2RdBR@+^&xb z@g=X#Q3J>-~^1PFvt~zm&HI+2^wKA3t6lZ5?fb3xD@9$~1L$ z=PXfL)a4&}ntUth(@^n{mpnYj)7fOV*+ZhucO4~B?77GD;gyk8Gn)KmwLs?3%GS!0 zDZP~=FO-^rk)CAOE^EvXq`LX_+8U70aIRYSbGCXtw4*FY^*zB}36ovNH!?Ee(iqbF zZT_j_Z}pOhFB;UYIy%5y2cpf1s?!g8=BL+tX0-w$w(Vo}W+7PUc4|mO!&M%6(rKy-&h_K53(Q5y35n`^x zvH>gW=AA9!x9&cfSuUFKT^4#^YW6J~Z z!#?ON^_dNp0YaIZ4Iq?(V+1)CM#4@|3jmt?Y77u{%F}`o!1(o--;3YMaDa_fI31HN_XeSPI$o`js zhi!EKvw?5yyfE-5Nc>gwA{hM<4}^bTda*#*#_EIo_qUe(`>jc(fY>l#F9-XbkP9fR zK3~6?3d-00EUmOeh z58lxN80f3G3xu{e-(X}C`X(oK{t(4>P&7Wd)6MERCmAh|TQsxfF4f<<6KT>DQIxgB z`q)5NUh#zU9`l4Tez_%2u|U}=dzdlgihy_V&@~P&=^*|(>=$=O&3q(EdR6k-sGAcl98!BoUXLWEw(XS^GEY|8% z%Ys<`-;xH@MF@%hiT-5Q0VIvKGEbYV!z2eV%p#^pa!PfM+bWl25*6l8?{zHLW&Suo z?^@VA){-m=-sa-`y7r7020iekZm!^?xDinh7D^NT{q@G{kSL~4y+0@VNN+#?NGvx0v`S~AL0p+OHe7gP+MS_N?%0^!WHf}Dqz$L^ps*JNQJBI4L_IWil zJWLFi_OaaOq0trf*p;k^BTDE1o>}Ym;j6kHPwDMNyEYURfl>^5L_=3Oc%Zx*{R(-9 z{zrwGc3|SyPBSLzPeq1~LHtfTTuJAtF^*~>#vSmW%b(tS3AJxkOLB!17Nhm zPL?-jE=v$0@Y;>kzG#O(0{*+aPM}fyFfKtyZ+EY8V7NnzvCM`2P=!?>0^?S|a0LT{ z@5!Mre{CA^UBN@WYaeFt?f(8uaa<(#I{atpRX^q=qs4{!rZ$K0XjmRlqa=PdeGjt) zFri9kRVLcI@Qtc8ncZTapI*X3N<;$oxC|bO*az0DH60keD-QeDJJn_JaA7X$ zKHZ~&N&|R&qkE?e8nv{NS&Xxc!(H@h<*`CeQdR@!UJ8EJ!7o$F>-nex(W*F{Tp57+}xMx^cnt+UgzgO z(=t0!3&;4P?-H-Ae>cAFap^F&Sv2q@AoE63-jAT--o4$#hb@sDhv|mOO$)O|TV+}r z(690L?j;muhdqj6iH)fBa&QW8apsS49((mpD1*}BKkWaJcWn@z(`-qT3^q_C&C5~s0_n$x=G zq8}u_Q0d9aoznEbNbr3`ZVem}L=+Y(mg9gdUK?#o`3Y^Wbkj?Q6QvBwN; zi7T;=la@9NT2~s~4Ca|W$RuUoqZT7uZ$s(hR1|3BgtP5c+;w!-iodH|YA?Tq6~!+g zaHvzfC83#tbJbbZKw(nVjaAM2FhsbpvO(>^A+3c~S|7ch*_M`!z=BwtL*vKJ6gB4G zX+%uIC$H^d<4UpO(`^|0K+gS~bqCyl0NlrGa^1r>YpG*(50s2AWs>|IZgY}h!&0$O zBwpZE++zj3W7oCY%IeBel6^aZv$W_oqD#vUAfA32D zuYp|`--sk0Rg3U7%QmA5fc$>7wj;GP)n?Qx@H7b4f)490{yMpr;YcpwVc~OE_Fjxh zk4$2Z!rfpV(b>uRZ@Z_Bcdm|qe|%B5MxI|*ULwY?qJO|}=#zVZ`8EZrGTVf=31>2A z7|2oiA0jEdJOrc641A2nUt&85CmaF9#Cn8)*4RHpHLwft#l^P)7s2muUta`jSF>io zBLX)6nNr|`KSb>?j1G|F-OC5eH$PB~0V*0dK<^>wG!PdUH8+`q55IVKputXx9M`R7azO_88`45o(C)g48FM5>7m?MfL;C(VD?#q??4ddxDp@{Y^w=5 zWdiODq!8|aZ-Dn72Wvyt>r25{@{58f?Bd08&}ra*c`-HY5GWah+1XMfbpK_m&Mzwf zRN`wCKVosGUhKfaBmTC+EC-1M6!EG8AwV1;H>>%pyh?Z*e+IbsexXOe5=vzO|DE;u z`vNQC01$BdSBwU*z#|2~0!si33}}V_e&+uJ>-@Kaj6|oIal%j&lQ4V)_TLV&3wDS< z35nUK;i#i$+l->$$x>;5v%vEpW zEiYZ{yd!yRmDwm|!{@IH)^ENG%<$S0UGITKcjSyu;M|dml#}rMFE)LTIK@NRYMSP@ z=WlZ8>J%7@OouX&b6=B|92r#>rgl|RdN6YtM$^fvX5}JoeQ?+AmS)k>$q3+ce4Rc#>z9rmjOahc-IfK|AA`;gs>?Zj< z!SXnqUOz2S?9kuBgx+}0Dj~EZT5{-G%Pm^)Pe3nkF@g`maEH(Z$P>xu03SO=PsRWk z?y%p#53E43K4gh3x-SQl%zqXU%uYqLa+?@0+^@RUKJI{G1zDD@Ao9{Fy%#qhLnR_aA(0{P8&cm z)L|Fnbn#mb*b;@*WAG^Caqwv3`B^K4J@AKQPmNCjQJHveb`XvTy^9Jw-^GBdqY9g> z%yt`HlqxSSBmuxkx4{f@?MR7$PPOgjt*mgGKSa}0*%`?F3)yyyB2bbYnnSWNUym-! z05g_aQB^>i`^$PJAhE9~A>ST$K8Qsjg$RmD_&_6Yg$3}t4R$CX=b_950MO$dNPUL= zxj2P@1Gt|7XxDL{)+xwtjizQ%Ly5W>|E{49TU2(}2kufldY2=&_G(JaI&fn%$-mAu z@IPGvT%mu2j~TTCy{nH2(G3 zKScA3rO=q5)pGMj?dHSS!Uxwi%;GtS*I(sT2S7)+(2bwh{j08y8`4s(&0LiMM#ihH zns&y00?6}@^i$YzN&rC04?SiMAg-@esh-Sqocz_RUr7pEk~N7oX@{kSx!4WpTlac_ zb-b!Z`!qr%Jw0p$?zrsa(_BU5mDT%f*+_0^o8`s&vhS}g@gl*a_D2A{U7j3s#=Fc@ zLT>e}_@?cmi+v10Z|CqcDdY6?x2|-n(O0v65|~qvelSp=$NN{8(~p+_(cX0jHJNSkpwevUxGD+~U5ZpuRzSso z%1XxwD0Klrx&l%pfIw7`Acz!EKnN(kNEeVIktQ9U(h>p*$WjytBtVKO?2Y5>=**k9 z>by7aAOB5q&wSs#=bqm=znr2fF;-Ezx5~OPytz5tO;#HH|21QnSKb6JCg2ASLhGnj%xx!<$^(>BN9;TwpPMz_TL)aSR+G+BsyAhlS2gX(-T}~GFghdYliZ#q6XfF=uwmCPDmCD#Y zmauX)9$u}|$_M%xVG^#3y--r*6=RNeX)UlH8$6VF6)eepN-TUrkrK+Ww(aVDHWMPp za>CWg1$XGeHnX$S^LqS*Tw#)bxNmV+kYW)n6dKbT&kr}XI%mJ`I;*d=MCLx61)PgT z;0pZRm0|78zmK`UN=skIt|T{4_JtTr@&~MFpF=48qVuL%e+2DV&Ri$F{Z8^F!{Iw! z0;i!L6oBZA`^X1vlpN(XNJ|W5mVTLL*#gkf5ntG<{=^f^YYoRU6;xUN3monn!K?#@ z&^`lJ@I9rqxtHb}%CYo|zRmLl%n02OU0HTT?n=VtC6PeXzQ{~U%>d_EqKUT$z+~Sl zE*gGv#=u*N-`G@4-R*ugx^L*^(+a;nyX&NtmJ8zuk4}QQLCE9?^T8JLr*2*~sZyqC zh0xfnN3Uk%Rd^B(S3tNkngFPCbMS&davC{)-Zl(2VMR7@%6(id*N?|saZuANd{5b_ z{2Tmaiky61icr0Zjr_AG8wK&o8ysA3_P6Yk1S6Vw>44u7h{g#=H=a#q_$-^KzEoi= zI4BLwtB22FRW2~}{2g@nZcJ+;Fx>l<<(2iaC3Ft!OB?GM6xqXuYe!M<4mLp}F&j?w z9?GF-zO7Ttt-2Q#{ookhM>N-K@#G`fTV4Y-3yeCsE$0B6OEwS=CTQhkTpXI%Sjes1 z&ouk;NWU!#`AQ3ZF8TUa{OA3!LDDav+GT2YFpjdKv)asp!;vp5U;*Sc$d+Gw-o?8G zFV?)T5b2GE9LTmLy%?!U+NaHTdt5XuKw>fY2|J%V>C_HC zJB$|*G?PHw7oBmMd0bHTel%)h9+lITe>Y;vzF_|nhoVNBe7*?{no1IEWV2t^z80jo znpbLf3*=K(&F|3aiH6nk-ff54B==ItIbI(gy+3Um7jS`HNSX722E+~Vd}`X`6Rp`E z(Y@Vb&;pCQs0*opHs$@vEJVw8^Xlw#85e7?2mNK3At7rJMl!{ulC@}MSjR*KAbDbs zBvoOXr$DWZmT<(NZetN2+P$a(%LeWHT7v+-o`ESHRbP7() zAFRS}n@T$~wJ%mGd4FjXXk8hugi55gcLy@nk&0po>60kH?V+r9Sn=d^uG+pXPR7;H z?}h9VN!d|)&C3Jf-;gg+cXOa_eQ%EH4A$ytwYCV?ZT|B9-}uK5kZW(h!OZ=$oS(n= z-ktw%?3fFPnm64hS$10IFNsV}N$Lu-2;-!*$JMAyg_1YUIa!oEME6I4iWH?h)h7 z)#j}`c_t(tf~+E#qOgXuStZGDPUKTRo5;nUhB2FP`Ynn{HLsqr>3nYrVfEN?8zVur zOa)ZBr~9#t*jUqmYAKtC#)Km6`@vz189x!I^fKQ}9x8I^OfD+GSiG;NGo}~4nO%#G zQCcQS&j%`Ndm#n9^f-@81B9^RrX`kwAbq?fCWey9coZZxi*w29u&+*Rzh7T8mQZ-k zjfLOsG;L43!Rp$XbX31WWiY_cQ!h{Q1wT3c@g@@m6UJ&U z2|?X7Msl6m>NI_z0`G5@Sl<4yHZnu0MN6PqM%W0RsI`d$#WNfLV}hF(V(+k8T!ZAn zA~3^qcp?Ly$hij6XDvU1e$;Iw8hcL>a=rk%yt+t7dQJgiPQ$rWW5v)D!+AnmT>VI; zaZErNKq^QW9clo)=Xz)zam5M5BjtI>zCqB^L3Ir>y-e1e+TDCF=699{A*IBdgr39J z{hE8dF$tUUz}47z9$j)s)7wyy=;S(nyV#;yMNK$kgJe5zL!_rLvx3y8-~*Rc?<%#{ zp(*u=l^PBByXnX4%g$J~oszSBFg4IIZ&nsG*OAKDt>ohvw%r0hq9-n+q+{^b^`4#E zsqN2`S@j$^+`f~i`-_G_A$oaYC|ACr8mPoX(S~z^Vht&wJRyw}d z5b#0=>lAXcTkY)HFUwHhbd_O`T%Idj#&&krN>lll8b#wxURlfgN{`$3x6GEK1!6X%L#zClsf|R0E1PJrexb7|9-Ig&* z5@Pdt5!sKpNKE`DSq*nJmLg_r3)I6I1V56dxYdX|9(_hN-oZ6t2U3&Y>4R!ll^ChlR3u6Iu+s0W;6bGh2@*Yq{qU&#EP<^TWy literal 0 HcmV?d00001 diff --git a/src/docs/src/documentation/resources/images/hadoop-logo.jpg b/src/docs/src/documentation/resources/images/hadoop-logo.jpg new file mode 100644 index 0000000000000000000000000000000000000000..809525d9f158f6163c7692b5e317153ae68bf23f GIT binary patch literal 9443 zcmbVxWmFv9vUU$H!6AX*7GMY@L4yZLuwVlTGLYad8Qcj3LIS}DcOBeaLvRR?;4rwm zyMB}Rob#S@@1O79yLHfSiDX8@`P$Wf;~i05zl zbD#x)@y}-yP-8pF5*6_;uYZk_#&(7#465dKj&=^_b{`l7xt{_&PoD}1KjjtX;bB1a z&-jI(3IPCm8t5OV2)6lRBay)+ejE|3xo=r&XIgcneFCWhzCm>v0 z+-8J|gFk*OJAGr`Dtd?V6vn5D{E zDImZ1S$GZY{c!Q_KcJ*~$ohzlode9rFCh3_NLX6tM0!~(E;BD=pTf}oH{N{~z6I8UKhXX{ z_CEvW_kTk6H?V)>ngZ^jgHW4?P69{(CwbSh4!nH?MYUDNbyv?KckJAy&#lP~t zIfs~$AFDq7t%p>xLL?;?Di}p-qL)CXc}iH02g@Xw!$Qr1+dU`|lx&$B!+P{H zYwP_BJrwqpw|oa!vAMDad;+miVui8y0EVV2g=+z=B zRp!A0-4ekr#u?cHoXD1L-W{78Z|p5U#%71eElMpDLnZipJhr9`

$=|}3K>raVyd0jn4a}r)A zqfJhOGqxrX&!gmDMxu6&`!Is#_E`@s+B%1<(RqdS*j^9Lt^$MtwWcfF*#DB~z{way zeq|^_^I996HZVr|K{FzPzZcxd2$+fy#*H5hs(m*?biKhm}m56#(@bQiEmztirrP~ z-jBf0C)V48)8C2Lm;oe8s*-|x8E6sti5*L}MIR6}qBIhp#nbNUSg{pIeM@Q z11UBKmnO@)D5XrLwlJRWmd*cY4i@6e4^+}M^Qz-BhPqlLqAf2UiY*F973>1!|%fcwbM z>U!yD>8%#g$8nVk%gxP3by3L7@^apb&;q=7!#$H_*2!8`u z9*^3H2GUCMw#`RI(^`&q?u?qk5}aG6Pt+x+Rd&8DFXiRbZ@Y_a<7=(o#*4M}g5P%h z1i#zer}{Z0K0nP8F$xO&y}YS8I5-fg+@+fTNKvyf+;`6EpXExxj(3|&#HJEfw)0I;+rD3j0wC~(#8}G@ zNYv5LrTi*UH`&6;kF+`~ucOJ{BVQ;ww=7%}aVO_K^4ClL3buf%kujL~ zqjSKDxu+Mlxs$j!GF+0XAyH>y=+XT{ws==?durYK#Es_w<%3V^f?vHJH*ywB2Vp%H zUzW|XMHQ7$^5@ZSgoLGKzOHKF8^Y~m`)46t;HMHd4ppt!jH9va>-Ud?-Cl3^dsgaA zFD_NF>|lC6!e>hi>MBi_o}K@A{QFYVs5xY4p>L_w zpf7Q;#i+QT?rq3=&uF{UJgW1ExUW;`X-$dA+e7@^sITBQn?!I<8*u!5mM_Hta?*X%pAewg4R85Fx)4peoZmhS$lrK zZ0Gk>R}+q!G^|vXJL-$T**o({+eqt`%aycKRs*pdN8F~>I;@dxw#F+RGNUL)cAbri z4Djkv&oODL!X;q%zUf8tE@0eDQrGxcpMp93f?H1AH1#CMv^M0CWM}ARLbB!WXjaIY1O0N1 z8m+G=`i#G?4;M^-=kuznS+^TijuR8kFA@DRVW>QD=xeK)Qti(a+1WLe}|2f~u)}o&a z@_nh*qquZsVIF~G+M~jAjkB_WqS2BKV@i@2+im|+Z%j6u<+dxqZ!0R_yLzFZ@$B8I zT>BQDCdSo-q*5=+fuGt^PaevzTeb(LN_ z9HLN|i!l)a#U={&ETK|)3TC$tq1)jBJb@R@p+bAg_w|l!Z|-`BPSsfvRSKLwJdq&( zVb(~YIoO)QNTM0>E6wJgJhsR)wSQ7F%D^l7*~Da_tF+COIUMb9_PE?A_~gJpD#6`! zM##h!(o|&8AasN;+u}Y6TkaMI*zOo27%8y0A$d0!xwb~}T$1P@5Ywo`BTw}50&Fy; zIdnWIdGEbXh72K9jMP^c)C!32f20D{Y;+67!zCHDUUH%R88mK0z66gUVB2t z83E#f_%TxUN{>fI={%Qc5hQ!-!K`m70w$08(0r7%=k+`^ zO23k_Jg{ufIj>#4Tlv;n9R9}uC_!~MZ+BZ^nZ*sKIUn9w@d|#4vpOId38&wDcrds` zgt&Ywc~=HiK-~n}c0Ea}dWXu1Wc5X7KAoJciY9qSd21g6<&I4IfNPX3x2y|UCi!tZpLWZdPqHqlByXI zzm0+}{CVSoN1bZSJRH^(h2bjj6;kWv}$;i-)RKS8vjT17TS($@Q~!lJ5(< zG0A*z+w}@OLugsU9)!Vb0*RK~V@OO}B2nOgA<|@bwK#`mt|6z{lrmAFjV5hAnA6B% zw?xBXXhjqXK(}MN%D_mzis5TOQ1!k8F_olnb>=-5#v|_i5y~?~av$wC0Y)5yYvPgA zwJS626~S8i?Qnp?-XtWn^07N4F@Ex}p8C)k79RjYZA zPODKP@N~lKH8Nv>(;^DD?A|)AD`!}^@8Q!t3bYF!_2-1fZxC?vm!+NOpa!~IBVXx( zXDMH#A5)n~T?^w>ylxs|{o#6Rg(a%!t6>B?p|7cp+PCH+gc_(N=r0w#;Oj|xy44j{ z=jP5u->$%w>HK)tuMTI=lph%A<$T)ebo9Ht^u*tg!mJi?h`qiI319R{qIJBny(gr( zeF%rfOsNd!v5ld4s-;Bh(;M^7FsrvDXSs`7B{Uizs$uc!dPQq-rL*c|EiAQmI)IY|&T)I0VA zm}@j^wCh=aA-Qre%U2exYC-#t_y2&+rZPU_@8IQ7B0oBV-p~N>2ZuP9Wb>@6C5L`) z)h(Cxca$|B3LveTOAQlO$M~ocM{I`2VW~Yf+i%*OeEq7V!x(2*(cJG!V#q;_(+}ru z^`&TmT=dMIetg)(XV+`r;%Tguo7Ii3cW<@V6+HX9)`|V1I0;K)BfP35HS-U=ezs`& zJLG*reI6xcO$*}--t`Hu01}CZY`hhAkylK6qq<$^x+t|%~ZxPriT?J z?G}&rsNq`%oQhbpwHIXm38^%ID53L=#ZTCs2zm3?PpM5=l{GJ1thGzYQS;QusMSpMP4bj!Goc(6vtiEPz!W6M=%3D0}r6>q5N*RzZJCo|3rCmsGnYONzp zy6UdAQ%f_2POOID)wM}ZS~m?73XAGt2y~lfgV|N?f>R1D>UOS48hzUK111l$nv)Bf z&fL*+BW#ilUum)35(&t0S$k9e?^^~7l^{7k=6UjMbQpJdnNFt;TyGEx>Sw$0F~T|r zc86MhUz7Wkvs{hho6gpBsiyGse=HfX<=R=Z!|P^kqWO~8fEqSW;ZsK zVGW7L@xhsB6?@t>$>KV-p=85OWl^qpx+~5YnL7G@s&2OsL?!HNe_zChVYXH3PrW@~ z>tfbbJ?bWsc=A)9p~PSAb~c6{hZ3PpF{=O{wmDtY#7p+=AYily##i}NvJ$>gjmT1# z{|9P&2lP4GwvRE>xQy0Wn4R+Et0rgD0)4bQt^?LvXFAKc>w@(7O1}WQiRi@&H$#Oqwq?cwZ^?IN8FM{)yd9W4^1WGCyKc|kwxJ%Sb1SZO=3i_>PugQ&E7sI( z_!xd;&L@_kzvYbfc)_j>jcxq6Gq`f7Aof`Gh8Gng6+wU<#MluTo80OkfhF?n!2|i4;6-uyf>+Z)M z^?tPMR0U!j`QyoOHY2j_#uPoOYQL=UGc9icjTh^~%EsLJs`St#93g#%4%tbSak;Uo zb4Y*&++G7QS@PrG)lM!Mu9OcS`=i)LpN}_3nE-@NMCHk)m6OJ4KT~swa^a2887U$( zvw2&@sif`KMvUfIYXKJi4qMEHq`6{av)nd4nf!1z*$n|`7L~bbp&*@XHtmP0@?PyS znNv%H48`ksMr1)x=P&yj$Am}ex-4Q}hu0XafI<-yB0f3yFaQivfr)^@hRAOS1D!TJ zpy}2hfH>irB0};_$FO;z20msW3b2Fz`bZEa6iry*M9-Y0vQ$1YLxH|INmaSd+{t92 zpHx-Efbk?874R7}y72@ML{PwmXyQz`RcJODlCoaWaCJz`G{r$6f3*J?*}`0~l_gm7 zv5rCT1yt@IE~MWg*i~C8X?3aTxjZyAjoOI_AnyRn9a#$g8wLV>_>S>PF?);}yqM-n zp%}c2{=mCu$1&5UZmN+!(Vza7#rN(C=6{DBE&T*gMV$!$ld}X>UM>&sUjX!3LGiy2 z1^gtTM>!u4eIn8K1^kl1Ykr<2y;EsG?MwY9!`#@2mHPM&N8S5!gaZn530_H=cN@Yo zX|EJN9TTso`kj_KCR`h#eVQR!@05guZBzaN1z|6p}6=t>0VMYAvg)+@9xaZpPEnKMrOC6HdMKCz@7v$1d80Z z#CibJ1c(N2q))zKnQ1v1GrVieHjx~rUuBC+bD!F!aSD4ToVHy)bFqH$G&x@aFd)6B z{WR>NSuy8dEED6D^&S=2H-JSdTy0!EUhsWLY_m$AkYwRaTyonq$Kvuy z_XWQqkdRM3X0Iroq3)P*)#uTlF z)Ji7Os15xK3RwRLi|I{#4d2=ylxTFgLI?+%nKb94AB+r|Vs=J$%~nH=hcxlP-6G!f zTh|yRHU)8jVbV)@;a8g_pj@M4gQz-iM zl5IJlE#;N{1}PJ-=C+ve9k+JV;4e{2wP`8YJzYkZaGPgdkCh}4`|z3h7kC3*YSi(2 zwuWId`6->=a(I1ds0&sCja$A09ihe=gX}dOyGM+k^rolgnkn=3Wr7*>GR$L>wduYn zo$i{!Rv5L!x*N3`5ex$b_9**iZrJ+6h@SVz|y%y(E9$NN}V1 zhJ0T<^7n)W_P8inC~NwaGo-Q3WgIHILi|ha?;eEd9x(LslFC)?>9}0;6fPbB z7vQS>OLa6qY>Lv03bF#APKH1Rs*8EBy_IV7wEbC|fDmV_Wf{W#4&c57EJ}H~9q7W( z+E$V_$4=)G9(&@frocHjY-!CuZM-F7AnpOb%|#~$RQ#sUXztU4!{d@;<%^3J1%MxC zfxV^Zd!4q=I;Iw`YLyMM4yuL`z7Q_e+tgI2!$DRN+EmciC zH4h{pZj7dU4%?LU$cTRN(RxZyMKB?1l(m?WtRbo>ZCiO}MH2%N$J<|)z8AjRdS6K9 zTUjL)$8i$V!%C-Mx~MYjsSpa(St7HE8xJ5 zOlO)yO0^b~N!$NQ0yu+Jdr^q!T_4bMWvKcMl57D~+Pi+s599CqHaT<PYW64)cjr_vW6YvHx#VB+|WjQ{JFwp3u`@h)!VTaokd=->jd?dJKn}B$*?J0smiHjlJ zY@t#vBiT3ylc!RrHaVIJjM{3}j~6_hk^ul8AcqVD^K*Sm- z)lM#WT^nqwNdaD9M$n1n*Zyg0IHckK{E|k2cTRF?S=;sP#z}&Lbk8dS>qgVU&^Ysw ztu=-f#@}JsI6XZa(iZ>#*Y=Ocn?(F8ewD|wtver4FtCs`W}66pg94^>bHA9@_D0D#_m{q1i7%S~E4Kq}_&UN)kOMZ%L%I|l zyQ+W_Q<|4d2%+B|A#wJ(Wx2{$6R&9jOlE zE14;~1Lx@8n{uPH?*867o)fP#Ez87Y$z&<Gj2?tVfrvf>nBa|A(P~0?he+_IR31 zG5jS>1HDa0YoFHIFPVul9ia9PgzO{2MD*dhCf{gg zAF+`Xhq|dUy;m-$;Bdib{Y<#)$&sOaWBHAuL+}Xw&cYx7@9W{yk8=^|7E{8-}a@P=)fI z-pL*g1DZ_0L_SvTTtDTh0RbtwH;ES5CEuh692w#sv=R7&?KvQK>kW83=w*;v;V}ik z`xnikg=D}A+fs++(!fO+OOw7x^1Aj+x5}PHg=^L^@NrQcghM$2&P9SwC`~@g@MED| zIN6INJQ!vfL3Gb4$_qW;HKw}rqDPI@f@XPGwm`h}k&{Se0Kgqj@S1IY1DQf!qx~&) z`Pk>XQ@DwtHQDHs{VwY{iIV;Dl*Le&>d|kWA;#@vzyD)$Y=!TdE`r5r$ zg9wfry|kEYaAkG9@5@le%B?MMN_lKsmQo*Bx~W0ou4=*^$rZ2Iy2GMsxOlhsQgA-&y^Cx=oTJ}M?cxM2R4YmLXfqHmh0;ds#fll~=x&zV-08b!9o3oM`H*3pYM&vX#+kI&6D6Hdu9YZFn2I}0{zV3SjfC?# z`ZBDvfisKjMC2uEX2P*tQ+1J+#J!_oAQa`@Pac%+caf=xyn-LGz-LKq{Wr%(C$n_z zJ2RnvdKEWOh;p)gpN$m(9(e3%lgFoCbF}_dE);)`!H_+=7~n_xzPQ>VcN~e zH3%F^itZ$w@F6}p8YAB$F<!|8d}_nwA8- zDDB=)Oh)=$d$>YQ_hSk&>pNC%`qBcPP0O-{50QR#x??G1(R>6`lGOYd0&AwD@OfVu zyOZfB{vLg4*~jjMIPB6BUD|y{q$^MJ&NQCMSHh^YKv$C=`nTe$v zv?Yx-%oHJ+4_ljYci4~$OoaB)CCdxX^S2F&wyqZX1-3Hsb^vH)|0JpmFX>SNs_XQC zy3<8&;rflY8q|-wY2Et29SfL2;2BzUhzTVgqj}G@zofuaBLbZLKL-Lb4m0CAn{MjVqo*Gl4{v;M5f;Y-M5xH=`OQ%=2f|pAFT?E?=G0$O0_zeFko=d z8AuMcWmt|>ANw6*55`Q-Sk{<@(eqPI^qR{CmBOG59f13deA%2#<{76anJ#=9TzEx2 z`w9JT$Us?Pfo#j7d9nX7ygvYmb+lHpn(IHyKX2(GDd&f&Ci(@Yr$HMHRo}$*9B+nh zL_Q$HyUEfJb#02LQC)-5#0FM*LeB(c+uIrCyw&y&brqiBi2~JP-E8q?E+zv~akooj z=SfZI{{Rh)U1%RpgG|ve*KR3rNxrF$*Av0~TX6aSFn9SG0XrG)^ZPldYX?DR)tY_< zIJ$X5EI@7;51Z-A^=^EK8&tJ9An*4}ZLS_PAmQ6SYD!fguR%lB!~xtqTbQ^g;h!Vm zH%myrSR$X?dq0PG?a&Z8tp`vT|2-b2`Hp&0R0V9`X;C#Nk$a!&lljad#Cn+ zI>)$Q5ji2g)`arwmuj?M;B+xNrUdOp8UYS8>yHQ9g`ejUK}7$ZqW)u6{x7@U{@9+t zTg*V92@=&RsDC@1Xr2Ecalj@Cx&w{|HbSTs0L%w$_OSNAa1LPm&q0QV`8nILpXz>4 zUYzDZ_h#+(60y2TcpNO<@*runC}B4Xk{`=Sxxr<~XZ?7YS|aAT6~NAXuI^{uYXgDO zq0cBl=|!Vt1=e2Fs}NkCX=>7NtATd!1oI_`p9#tP3!DY$M~aHQCL?j{=bVS?o3 zcN=(!h-rE-tMxdLAKGp?>Q{Vn;+;{!&dWd4zPqj`k>Y!9pN1V5K(!O~J6^o4!vS7{ zGP&t=gU|{32z z#q!INMl)$({QeI~y(UOrSL;su&W9YaM)5M->j#8G~bg`TJ}Dz-?4c+`eU zy4e8<(xb<;+i8n=n%HUNQj)cuuObRL`R+IByT`zXWY`r72H_k3Q54o*RO2hfGe8_r zr5sf*CAdfCA1V?^gTzoBz)E7V>Jz?;olx zd4=%*Mlkda1$Ulsjv6?74ng+@MhAZzZ!7^XCiG8v;!Ur`$(RlI>lO|2bM!l@t*>6v|5B3Vx^rOw}PKDA7_#t-EP~d_$NXz z(R^`wl8^0QR{wOk(!hxc5e(maZ+QRcCx}i0WvIMjcpP;6i%cZ^fTBAO2lk1SDfy3r zG9hOx1q67XVC;giL=f6MjX~ko4c+U!T_B|pbY~C4OVQ^hws!;OSF|O=Hg>mVA|Cc0 z?^|;-lIREqzw0=3TwL66;ot6gwES+4X1tr@{=wi)Alchi6jC&9Y{RC>m}?v5dafyS zPsH%!*Xu`KO5ZC6B2jLx$);|VQdNuf9y?~Fh(6?w)Zom$0;8Ax^ggB|_^K<9cWOtN z(CHeX9?Ln2S-Vzh*9~!`b!#N68e0^GW@sG*2+V#Dr!9hintkrq_yY2@M5dF4Ec;c0j^a=I~;o)}3!A>I8s`+9uN%8v}=!NR}W z@Qs_BblJrQ{$e!I-|i-YWppfRYTa|Zpk2kocO!V~Jy-gG>?6TjYDVDwduMWGqc|Mt zsH@nwO26luH(rM{LZ#>#_yI@v^5Gwo3QlI)9if5p2kGnBHyGpB1=;3xv3N>#{C1EJ zN8oCUhwJuKiE+{9hU3-xzB1>vMLui1exXXE8-pB%h2XT@Vu)^dAE8-Ca>ug1?{I}c zhikdynQHuY?fjQd@D1;|4;>4am_>Y+DtyrnuQK1=wq}cEdL6F0+mh0#2}F4RPR7IP zk^CirPADDqu?i}dq922N7L?i=alGCm(HXkMJ$R2ixnhYeCg)rrR@2{YYXG5GU3Dz6 zU>FwYZ}9e9ADPm3qmmW4(C>}MPopqKB|0w6UcZ!ZJ!+91RX?<%2*1tr#0BV1jhNUS zq4}OTh1sasHz7zab5`^F6+{$77Low}L!)<@f0+AevaZe5 zZ8b1lZP?5*XT^%Z@!2I_F=(YkMak<>Q={lDiMdh5XR2oShrC7I?Fh+D#Mco2I>irW zn#U_4mE$`6TNJ;N!Rjz{7fZOkILbIsjzojp_D=2z%a8iiC`2!Wozs;?G)v{Eph5^n zj%8apS8Am^Z_DoVld0#ldlZo6CQqJWP=ntng$VtI$B&Y{0& zH+u1-g4J(4^{o7-L0vX`F_+Dv3EdZF4SX zA6~yalywF#S~!*+n{)jp!W*lDEU6cKf*6PkF@9ccS*u>fNU^CPF8qf4^^o|AsTS|o zYi!#TU?)2{aOafMXs(XWYEmDo7Uyg<#}BRWxz5I_)|cv)Ri_LyUjJ%)Jg%-m$?3DX3!hy}4xc}(j}kA#S2amI?O3u` zIGD{6sdIfee%BctwZKw|5XP0BPwBB3DkRn;xzUy7_po*ajKDgLfS*5mZ?85;&~}?{ z-!-vFjIw+8R-a$5iSsRQSC&USxMUS0>|WD?%a%i?BfH7jYs#_0f0l(=r7`cU(%Zk1 zVmi7Kp)9Xj$aq(p3px83UB4U6Z2YV1?)Ci;$I>YyJ})pxfxt&1&FgNh%1;07`jDTb<6+({dzF7Z+W)ic-ZozQ7bPO|#9qpWwuq-TImL(V z-9pRjfp#2fL}D9T8MpcLQT&SaGp6M4@CBK>5R6H26}osZ*iGY=yt@ykT<8N4xa&3M z*hfm%DJ}1#4crZ4d3%>enB}RbMbCGd+THtM1VugQpV)vn5uw4z2$$u5SZ_AclTr1( zpEvzpA7`_Ly{|@xt4LfuRr$V`Y0R*#KZ`Ydg+P(7MVu)6G1r9A+g;oTjkkxFX0od0!bxgX06Rx zk0vsMcF7FtFUR2-`HKeEzRs>k-8PgqQP3$_nU9o?CKG4o5Svp_CGDp&FNn{y`!z5A zA>CW>u{qN^f-@r^dNkK(eR7_?GvC)$X4D;3DKtcbR`V3ZR4E$qBWihSx0ma~>^G09 zj46G;x7jvk--n6?9))bSYTuh`M=$hOeecU%17hqShaTjrk_#sMx#MqN_laJwyGrUa z_L}F{{q6@L(!c9WO~NaZ^tSVgxzc-gJxS_WAB$18(>ygtCgV_ba_4Fw$9yVP$N%Ks zC9`^rY5ZDR%4OaXu6%91e*2f0pr$~+>j3WptuAJ#SAZf3V@KkL(dL>Yv!9m+T=~91 z`&|p~N@(lNCHw9i1G`1rn+vD8ko(`)qH#ZEyAH2Az1X#$x3y6BH1U@Cw`VZNj)ke2 z(EVP-n=g~trpCB%Y0~u(V}#D@`AUS@D`wMV#wlFANGRVZb2vYk0g>U0%d^5;DT z*<(?L0pM_`hD;RJx!=q3aL0h-(j7_rm0ba~Mj=6J+uiTv4xj55i?2_#RerI<-I-0Q zi`BCPsS79RK6jO`xZBGH{C4{Nqg*x_wjQ@A+q(GbBMV_Pt%FzDt|HBMAKpw19ElG4 zt0LH|SAj*B*pG_x832FRgJ`!AzyiJ@DNDWx={S8T>i6d+>_cY6YzfD2UmcI5_TiKg zDq0n?&>&x03~<>Y;^5oViB_T|JxBKCrRpz(t)8EbS|cXYZM+%E6ssy_g@-a4!NdBB zvqLdYbD7!(t(I4$yW(z*QPVd)VK<*L(%~g_YEN(kqU(emM3YN-4#~f^K*4AI?o0m+M z;60vJkDU9s-|n0AZ3I<9pmYNm=9~VUA_?}(|tY|Xf~>vMmd zi7H%iruVaVDfg1y1-G+T3+p@l)TIC#NyOoRr9VSg9*u9MQ0WtuG4i}h10?0hr0QmA zJV^hv%AOvTHU{VK>Zg9%#-z5vy1dJYrqLs{pb!bT9x(pz7!|UPxwWgm5eAWobzov| z{b!13b1uddTf1|cW#V|%JNZ+tD^~66&AjoWQEg{jkUiU0UOsBf?q5waKLo8&1&D#% z&D6Z->9%N9zwV62dqR^2UDwD>Qw$GZ&z$v>bn;6YW7{hQ5U2@z%SaSCW^wk>Aj)kcu@oA=dYV(J7k&l|u-LPLZ#{uq^aF{hzmW^!;moAt4wF zg@S*4v4`yo`iNkN@&%9N3JXa)q<=QbE4HNexypBU{&FJW18y;1+~$W`L`xAEiUCm{rd=pyscrYT zKIEI19em69B$0M<=_}%c1~QQXO*;kM^W@H$lY6a^NyZUDQPPS*r^F5S_9NtfQR^o} z040z?$@ML6Z&Rw|g%cwD`XEZ_kHBN(Ef=_5uaZYM82eKSgp4~3$6{#~2EIayHUa(~ z1+IZx^}1~&q`j(ipI7&~=zT}D|M&A@wjWQ~L>OWJQ|ICu>x!eoB)!><%xksJiMtI! z#D2gX90>W}sd-u3d1|gBKa4hbUv<)uHjT)t4!@<>iHc|UAPR%5|r}V50f!k-1Mx$&Nf{&{C%aP5D&8d zHvce}dH}u-DX<-R6t{3yJxY^KNKjMMW2{U1wA*LbyW5v-B6!(z_Z~5(^B11cmbP!+ zkqL&bda&*l6jrC99*t0onN(CWi&p5Bq_)*exUXyOQ=Pil{ zvYXmB8Kb=JZ}jLpRCn(|t9pn0Rp$eq`UgDe8qsFj81d4jVe(~W&P;V52|(zFhIKM3 zOB_gP8p9cb(#pmf{lYUO^A)5^4YCjR-XBk38Vqh=Sh&2BF1$fvGdkXKMJLW#MYLnA zgJ~%@I`X`VwjVVx+QziLTI0Pkh?()E9ERzF&~dXJ)vZt@7LzQCBhIy$u?xJA2>Q9< z%Oq4~4ZpL~Wa%<-oe~qCwj?0@5P|K?M91o#^I+kSAB;>-bc-Vm;B8XAnUtRI8AuJi zNW+rUid1F@wAIb5h;!+0GHh>F5hQ%V(cH$z|yLjR;(vH&Vx)7B00(HqN9l*iTkI7ZM)W1*XFv zYQUKLlf7UC(!i>3Xl}>_l7g2N_FKVnsNPEmF4Z`>zeO+}_RNcECLIDX0r4gi&?k-V= zMi7vakZxuO=@1=;8XATY6+ya(29ahM;3G%6L0TFlyoc|1?^<`=Kkk2X-gWjl@7eLj z^SraSPuh=dHgv+6$_NEA%)4|7(#o)M#&E71@dB}5{)%DnS*KrW6oZ`hzS>b?HAHyc z6>R}EeUmBlDvSG`(Yk2t>-6XdLrEG*j+fjuaWy`o8fut#5@FI(*xBB^fdne05enLj zp2qlHmFj~Q#f%c$1MS9o>e|C?oFay|(<359hf(%f=mewJXy#R!%t4p!AN7$cbsts7 zaPzS3a@lV;wH~oDecKM(_v1+BBSN(MF{0)>f3x^Voz-4vPV+9zAIe8cyiQ8HamU*I zoT_^bO7@CM#aNicihgqpzx<2xt&pl8XdxrFZK9%)!~A>A924shvo^glw7zCQ!{5U&s@% zf9CBcD>iYPrS*m7hua(`lkaRU^kvzfgV?%V)j%5(4d;%~w4+70PLvGDs*5lYG~^(5 zP-PiY5YvLw{!A#1bdk}Xo_WBfUAhVvDFj&L0Ld{R6R&wnN2wGK0GL9=z&noZCJz$F zxWgI&W4`EJg8;-TfH)ft-0AfUj_+A}(F4}W1hpIc3YZH{T6bQ%SI%6xTV9&V&Xi$w zZ8p*s#Df*^Xq$oCKw&C$)jczhY8!Q)6;7}wDCVSm`7-GI@EKGRd`3c?U$VI|48lXY041C;31@cE5$Qt&EKv1$OU3Jd(eqExUAR%^JDDP-)KeG_#jLF#mEyyQ5%lFt=qn&la=5F^_nTePlCmk({RYtXZesCk7yf>W z9~Wt)6&=&LN>?as&u$5+C2PGeZ`ViDOV{&cW`V7?_HN;{Ip^=`KQJe62`n2cZsy0A z;i__WYufZKV?2@i&FSls54Asa&PTu#b`a2Z!>up|pn{{Ex^A@5Z^#xQ-oXBPYa4 zOp4C~%VK%D1}&bNfzm=lSGhqOU^;!l>HW1(^L*Z#ua@k;#R;Qn4Wn?{g%g+ySLYSMT%TvM{%I4zjoxDa7_48f2JijZd_?zfrWK}v%!@fXU@+S=%H-#tMaFJQC~k_i3^xcKK!$Cw@G(Qj$takfg!b1^HzBR|7ZGDwj{>S0#x}M0q#2 z2wx03asBl!xin45MGv?Lm_>C7z=#WmcFv5f0v#}21$Cdz5= zc?K^KZ+UU?W*M0xBz?X4OPqd+6^Y~`VZ!vPL8ZKgO{gV~nZkj~kVK7Tt*R%vI0P^B zYSZFaZF4F3olEQ|-h$C2Zy9jEDiK9{5b)oTWv;b8#`mFCo17fa2zCt z<5^uSY(mGeTf=ot&%Khg10_{uSWLI~Z-ThXDIGT@F=`m3gDH?lF-6Pq+VeU)2NSk% z6K==LKk&ix>0PVwQt&JYwx?@-RAk-L`|Kvkt9YQi#?E7Ho0IlKmXz$4#ivcJrNc*K zX_8reB_3tX&pN~n?F)@{#wKf=p2;e58eyhGs5ysQDlVDq49AaS9SzS=yowHEZ6yu@ z(TH|EPPw$;ok(;+@5RU>)PoQ6V4^6I}?p{)0%AQ zSZN%iVMzA?PiuO7TUcr%V6~8>@(LQMgpC^WJ7c;cBIgFt8mRt29e)-%W9g|Hf=K24;q*lYyUS$7Vhj#dxQ{XZw6u$zXrTSL%C_nDx18JF974Vui0ukx3&6$VJ=mE(8>3MM+-Z% z!+*uJ{NXKai7xbTJ!kpaer6tVxcB*%l-qF2>M^^Tp-4D9p67p^B?sXZweltWT#Eo%{pcahX zq}b1zK3kYxeJ`;4`$ksA;D6czs@$@gos{J?+m&N!^QV<3L$eHzr%R2L4d!zK2J%mX z`djhBhz=mEe=?;Gk(=tt5(QXwO{r*=^?<4d4H)#6?mN~Ys6O+eAK3QdyRNkhG}mQ} zAwqzWTSs!Fz{71;FTxq_CQ`vZuNWTM7-bLCkK%jLv*F{Uy`4ywt~BoYI4dt#$)yE7J!M^xYH zoxPSVn@w#XPYn8dIS48jVj3Jx@W%J$=S;4;-fvG7KzC3Z>I%TgLU_*NG@u6nsYOud zGU5{hzD|(=8k%!HZOpmwV9#q-4{#L`&$oYSw=}?7@iMz1n`Kt zXi4f;DRc#W@!p?~=m%f8^tPF2vTg}qJZzr@c>ir74mDbKA6cK2`nHjTNQ;=+LNdf# zOHVQo7T_MrRoT_PeV_Pn#G+zLSqm6YyE(xW$#Rt#5Z#@ue_o~K(rQMjlHM6O{PZV; zirwz1kA+?B6k`t4SgXr@Rd1HLqPHsQ6iSu=9iOTPtIdx~nA-W_s;!oohvs!o`N=6M z=q^|sk!GibU2x0u-k-=9wRY-tPlDua@nM{&griTpe^vo}pqVK_%0OU+U`~I}rHKLQ zC-Cije4QJ${gmT;Z(om1*|g+YpCRS&s2z_8o>AJ|xREqAb4p-sdgoKnfL`aqCa^7R zbdHPBI(t@1fg^g)ZBDM)=n2wD`8RqXS+R67baYjd1n&UQ z@LmGQiq~9mB-AXqpw@|2%!A)+9UH;_T4vyiU(c)2bn@2UbeV_1ezwXx#^4}) z&InupdLi-{NI$DMxLS{j|L*aqn`bQ0d;o!Hsebo8?qg}^LIil&V;TLXVZ?*4PRu-F zX)2)VNvMNCzQ@HGnExHL3S#ui`?k^{EM`m~C#wJP=1KAgFH6e@DK}ivk)L0m=}tGN z@6G6(H&b0d3MKlre;GX1Q_M}s+DrT{m>eEUJ5dtVeFo;Rd}uoPd+kN?(;)o2?{V2d zem9nLza#tnQBzlYJl)+tbItz^>27)T{qG*9 zi?FRP#G3BrA3oQ%K7E)ciUg?#WfQht28yU9fbOmx?h9@j*BsTo%^O4<3jE!)?3}o- z=hr;#xUSH%Z8}f(v4CyS>5-uXLgFvi#~Hs+mrMtqcZ)hYP7gi{VOVYJ+*OI9J~B60 zk8$vGy$9R}2snj!&*t)-qkmLxBorz8lMi=oe!9;uM4rWG<&^27xl^Q!w`b*N^$snk zq52>W#`ZY#oREa<4uBz@)P2944EUhVNIj9&x?7*)lKJ{HFq9!TWQL%4B5^z8@&C9F zTGo3BVdqq8Ay)2^LSna$>SWEj3U*(w9Rw5*g?|A>Ww1!$=BWhX!(#BGq}41ow&GrG zl7tDUGBamT;el45++`-O`KSptA%`_hKS(%+d!%P_PsV1pZoONqf&%WCDh_3G%2(BI z^mcIZcH`Sp7Md^Gzb;--+@jNbJ8lhj1Kr?5Txk) zYFhytIaDGQjS!Ckb2tV0ew+XGU0Nx9HX;8muoa_P0&SZsop^=5)iN;U!;*zi6i9|; zHX-^4EW+GhRjul77Xq=C8$M7^(*q2B`G;{r7BJwrX%p#j3IP+-_adyzE<2D;T+oJu zssbYbh|UvOAPRhNoPTcVSr&2J+gcpeC|ve{ALFW@Vvn4`+6ReT6iVQ1fgU@;qQrP5 zT6&9wIq`1rO5;bXgXPi>s3kO%TTiYNly9TA zubkH68EI1nzMnvgnMx0I+}M?uP5qXbdjE9Duc}!&3Y<=AdNLLq6!cVoJ!+|0d_((G zy88edjQKmx^fB0~2_3p}m4AQm7+3x=C5=$4V)lUSN%hj2gQ9;H#4jF4pnL(BJrT0l zBaS*COyTn6#P4l8z984Iu%5s=YpJB@9iwuq6d5T9lo?h z%T!*tA$%3}-iuA4qi|&^`sjh@J%xbJ&6C5C*$EK3E^w_QVPZ{AjJ)Rq7I64DeC0O* z;xUWDm*i=pnEMUw5Vbg|93Ev#UWjlP*gG}1Qc8uC`Fp`@LaTa z4Ty&p4FDXY1~4@RvAC-yz%;{90WsZ9CClB;A3@&ccMd3!4ePkJwI*fn)UFoY?weKk zQ}se_gAkP2hwW||*J*Ya)j7Qe3t6+yp~&~+aO>qeQ3C)^U$g$Fjizsw20+Qa4FoT7 z4kDi3mO}j+wXU6de{lb!2R!|aZgWJ19A8Wdn7RFVWh^}i4b10bacY*<-O1z+o!md) zx9Q`NVEwfrjTBj%c(&BSt-=o|nr)ofK#stAH*wTR08G&5zEEcz5&kDlKVJHKRcFwP z){N#coDU2-KQ=v-6C~+;LjY}QlJLtR_3iCaq}K59OWR!MwsH83xeDhJj*?EpQh30Z~plX43~@; zf5Z~BMnF5DlzT%fG=k`FuoUPL12QO4+B&oSRk6NL29v>W- zX9_MS8&G$Sx~k@gEpfz@=pxsDHAsGBfMXq-4rPM#`_8cE6M4+v>q@2)=!KQ?q(rv` zChmQO$0r+J6epC*jK647-zyG6V(Q9sY^xqWeWnB{M(V_%wU1C>0d?P2(JUHm%LotxWBw)D{|QGEnFT(z7DfhCQ)^=by#PWrexGp3CgZSM?JZy+}y9z3qswovp_Jo zMtJh;DCnK@9nKxgZuv$sT;u)}7_(UIT#0XrwaMf9sv5gsJcMbg$j3e+-8eM?$S1>I z6dG->9e*T#R4ywJCN3U0p5#1fZHYF%m!Rd=WvulJgNdTm29|0mskCwuJ&qL;a9ejH z?+Am0YymH~2YVT23oOxCpxhH#^xWr3&~EO3^HDOwyMTqUG6&kGh`;~eWYkmwp?Vxh zNFQjgKf1AG_h9=jvr zuW7YuSi92#V&i!@ zO>*z7+e{jOZ&-x^`YswjU<&o6Rz&~JEQYmhoI`Rvu4^!fGrigqZ9l${5P>29qYUeI zZVzB`0H1{nz-tK8`TGZi{Bl-x;$(-G@a8)Ar9Qh~Dt-M~znoj$l6Cr5V%K6DE=@!U zf<>GY#u^@@iB_1eE*%_OttD(3acJuEUmz)_s*D!3a2?5I23AjHMF^=Quq&l7+;$Am z8#q<3dA$^}nC*PWvEuuE40hZ>Yv;@cdj~1`Li0>?k{_;_p|CX`ZYA7yB_OS;g6>Q0 z@zNdY=-L-^Tf|VcLa8e-4A&H3%twGorj-HW8dDGH5dAw>BaKeRhD>G)J#2F*=@|Px zGoACu?o&|M1rRK=adL6mmlSIgzle@ep0-u~+oqL2G<;YkYv*nkP{h)~0m4uMc|g6b zAqwNAC__A^3SMpTi;Hoc6bdOT&)O7V_SH4AXg(7sdI%CS2Bwb^S!5@q#35;0);HgR zeb#g8cln@%xz811Z+Qd!IzKxm67OI2t)*p@br_~7{_QLhGNNkO4&i_L9FCvCCX3tt z5AcoQtjaRBU=S@k@G<7<&`bw7KwK`(xSZjEJ^{+BNY<09d*X+M1M_7?DDvOmdVt&} zNf$p`AA#YLqGZIedH{?zcdFOdtBs#uR#|V_==6cb0Tdn5)*p7PhJ)2vfihtr*Flnr zYskFPPFA<@hQ|V1#sJ@vS(~nj5w&a5X6_dmhHr)R$AE;?0lDfBgvecl74$DK^#4^< cFa54ek;oIKkbLkl^m_THMn6 z``?%M6Wn<*J8S3co;hdF?93xA`$I_hmlYoE!wV(Ae?kucFaZEO0Kf+TK>WY{E5L_W z|1}){jWGc@nAkaZczDzS0EJfo19kvF5+E)Qpf>}UNdkQ20e)rx1Bd@s0+|3n06-nU z?j$Y`WO8s4cW_cySND+z8aM#`c>pEqfOH3r5_R&%zzhTM!vK5|fd+|ADIh>f z3BccDuo=Fw)BqR;~v?BLMw$mrPk#N^cU z%zmuVd)z-%YB1nyrO`KrkO)%1 z@!GSvM*w$8%3qbQzR@d6a+z>i73FdrC*_%Wq z?-_R;T=bd-@E)`;`?FvqPbS{#H~8&}Ls^-FXEk9JV+jW0P$`8t?r%d@G!(0M^ds27(yU?rD`Q(LKubpP%Typ|<^Jn>vQbm*5g`jqAD5xJJg&445|2qU;OO zR^QjOZh3^^or!x(Lw;1wD@@le&ouBToyMfo8wqBA%vhQyJ2)<^Q!A@WNj(XqF?BJJ zC2Z52Gk-$YuWeUEEVIN+pwmDfLUFOR8vfScFOzFt#@5WUua<~d^P;DH%NF1SGop}A zctK*+`VOu~`#ZyrUSs9&83Hmsz%%zWFh8B_n?XGMik`PAt-WcpTH#UW4Vci;_wU+;w_k-Ow2 z_6j?4vGnLqg9^)YboNu|$zzx*B|Zt4sq(ebTU1b45hup#Ztfn`N0Z5`Xua2$CNjw9 zUe$TThmX!E4pMeotERTyZf92dJEo9T12Is znOcZErZ+uXu6~{h$GyDTrGImDU&|d2Tr1k1FM5}<+SLnKflUxx6o2%SvJnkcVZlXP zXFslIZeFiG5LJ9{?({ffBRA0bI^FKwCEv~VCPqeIm7^=|cSc`eCcN?a&b?`P`4&P% zJ*Xd(YuZx7uO4V>&Y&*~C{oOxahH#Lvx5K5W`_@Pw{Xg zmxs^JCcmxQKS|f6jrHc6xNIC6?0?h#alt;M!hU@sW%}Nyo+C-!f&fRywDt)0jemdKA{q^E7Rlv`DD`S}xGycj`K@NMs5% z=-PCeaU4rZB9bSSd_yJeU3)_JP2PWf(kOC~{t`9LX(l)QD3n);TZH!j@>X6RYLe zotGMsm1_0V50uB9msu#3>J;dxR+OK^9U@EhzRylJ&YqXMWB1euS2S90&MW-Mcm>sG zH9P4qDnSbSZ^U%8dPOd(VnTS0S>BccFw5cn^np8~$($u(Gz0{j6?M9&(K zQtSkzU_Ck_06^?(!Jn$z$sNk~P5twBu2AMhmn52`Evn5YBmjSg<@c@SdUnqJmim{0 z6ZOgj{NJpU=hrxX0=-Z1#R_6w?oM777RI z!J8V3Z(2KDioI9aE2=xYY-?Q!hUZBK=n~~JKX_EgKVdEX*^-GkZp=+5Y--=q6$J8@}a)iFNjY3h} zwxQK%@YmJ)&V0=+7aX=cF`q*#(1MD#F`B`n&njekg?&ySCiBZ5nHr^9Fn{-E=b6(fN+cjNux4N+R4tRm<$k;zgR|1gd>i+w8RnQzi;S&&M(R!C)>I6vB3r@qdF{-K4J&dbc zzqdUqc*b@?pyKU7r{uDz#CTca*HZEB-|f0t>tBqt_#QmN^ADl+Lhq5|w_J+FGf%<3 zr7fWfIFsAF{pEp!A!0T!+WxP6`rj!`C76bPHTOopy)R0rVd38Gh5giYdB4|Mfy>o} z-=TnhXqb0msPXQF(XN0YKBrUZx|<5rr$fL$Z_10nU4JOlKN8_rfe2_v;6DI)(!v5L zVLmUfte|Y-Q z>o=3w)|=Z1Y7e#8+A?YCGZrBV`YvQ6rKRwE+U#6f?fI3%N06@|XW*8rzWJp67=gnt zP^c-b2P;URLf^TD}ya7E?k_Gn}5B+kr#w5nD)u6Oi{jENq81 zDrnMVh{m(3$BMMUr&aCO9}R<%=`dVx^hW=KeW}p>Wc{h>;3H`8qGW{4dC2#PkTL04 zdg-X+#0aNt5XThd2z%7dvfoP-ahnc9i#bJuX_~x>K z%1O{iowyiYaEt&fI2iTEE5*)55Gt+=mGOZ36vt_zpc|k#2}`I9oyi;N-!_P3Y*D1P zELeFau>wEEa`O+@v@xX&7-E^g;{HI?KNY@iNn`Zj9g)iub(kaqtudU~g7iDafIlaFLma1RaD2PNQnB_P3m6CXtM}}tGfr_498?`r;A5NBI z5~6yqlM=b@xXir%^jp8YE=wiyo0cnG<#JgL%SUWQKTLx85a%Rr7WV2yy>IZ`PlN{w z2Zxcwgp^c@pevc?vfhTJNSK5Awkv0?N?(`caPym zD7#yMdRaGp$3keIj;(QX9ZOjKRM<19mqUlEOGHX?qUKXgfks8@LjBEJ4;aE_>ksm# zzP>R=%^ef~O&G6-m06{W5mb&%+3pnPS8s=x4kk$@190s+872imLs&5-Vh`@EF zGgEo|iv@l?{?Vd&^XIrYtdb)GpVr!vy4@Ywv>;+z8J=|tb*xDy085K9)xH9&UKb}Wu?tgcCT-Tt_*u~-RP{Na zllV{9f?79}UR9(87;(|{ZeNL+TJ^pEgBSD4LM;kX-aUMEJ$#?rspxu`?f?Smy0W~z zX^5m}B;8u`y#{x^!#%x5pZiQm`$+ZrEYkbFdG}e(_t^n;ZSMMN>pYo(eVDSk=Xr^G41Q{jok#FXf>XdT|dC*Md&^teD)Qis4bJ_x{;f2Fm`7&IN*$ z^?>E1LHs|wP$relVe*y2==73?mZV=puDvzD0o-ql>K9(6Des>%m;vs)+;+W$rFr@8 z`8*W8fsa`}q9>a0N3)Ta89)?!tTrI>sryuNh|_c^YaUy}g%l|4EttoM7Z$y-e*|#+(Oi*ieMCBR$TlSWsb?>xH8s4wRX~r>HIR+r&u(e^Bg-cv5s7E6O%EX> z`^lcT!jZW!tIl>ZTwd9kw4z3P{k@zim6iE8Sa5gwQg#!ioMZ=c(*Byn$)u+a+r5= z?!|;&s6_W3Q%s?cmw71Ww$JLKV0MQgcAePUMFLbK1HK4VcAGCP8^b=!+pgY9I>4l`7+{DX1ym2vYz3p z4GL3D8~WG)`>wGHn}UsztP{Ri2YN^6W4sc`lme${5Zk-oG&T#Z>dj#H3gv4%8Dp(w_v=h@RWkRXE&K24u!DAEGkr)$@|8~w zSkvUa${(4zcGdJ6<5KzvgyPZE{%$ui9T&O$R4IhV?6^enh~-U-VsbPuGHvkB8kn&~ z^7ilb?G^|ruMF9Ic%%@&`18tUC!)=8w)GftWiDoKtf7%_rFddvQ1&fr_e+z>g=Cg0O; z)wu%S;S26kg1Ok6q$ZCX7NLLXP`{ ze7C=`MvJLctFv6P1<({S%wO0=s|Q3TOr=)eiR9l=m5DVO@$o~7Dq#0$2qyK+t9iB~ zVrF)r`2taa6~4W_9#QpMC-hyYuP6#%Oz0R|L}mR|Qfj87KO4Q3%hb4tj5tdEa-ngv zK^MNCzoa1Q_j3^5tjrTA5pFL~dP7^W)JU7+auwJ${rAh`(W$-6C%V2SFO3D~aO=q! z|0j6aIkhGhG7kU_#+z-3w5%qFCzb>^SiEDubR)oW@3VSwq=5MK8O<1P54Z{f0*%_REwWQ_dciUxMR zQJLj!gXHDYsvX3CQ}x$=zKSX@DpZ%C`v)9t- zvNzirt~uY_;(Z&3Cs1vQ)M$`vJY%QY(ye#;w#iCxmA4WDlJC`VC7V@?P!Yt&)5L4OgJ#@?LfF%~tpxz3lk%noB+5HA;#v-RNk6$a=M zYV9eD=?-+1xSooD{z8`-TOVSWMd!=fjaamJ>&=i`Wlk@A7~@opLK%L8 zenYrZ*OnptZA7dh&&!3@0-pzirXp8WXp)6Q;?`35zVykM#PT34OycCaw2UDNuQ>4@ zO`cP2B-|uQWhJbYnVBZa`w|!W$b`z8rCf-CSyIi=&xVzp&#Mqy7Ijk`9lHI6Z=d^=_8j!IP!)o05*L1gc&zZ}6PFB@~qdMe(s<^LX10{w042s2= zq~uq%AUUs>RZJRgmzbS7JsJW>7w&rqJM& zDd-#)lGYyr|JL5CEd0xPay{XDz ziV)y7k_a#Dgr5LU8-Q|#-zo6&*VoWV+Vy!S{nUlWIXUCRrqGyAf_}hSmWwEEWDh0B z0q5A~IQe&o4IwNI2D2PAirLo^TMzo0&fH-?G2HE`4`PY*UFHxk_^e*_Crs)% zPe+ES)<*%K4a`G6an%3Jxk`#I@4hiAXW?HUrAI}&aRHWcXyY5~RX+97&t_dcd;NW+ zmBDE_Lj90`Yx00hlGmKDIZ0jYGmw1O2k$oHgO%64##C4(c3k_9bVh~o_vY4j^V#AF zBn$uAyx#Ua%&Bw3hnjS^jH^eg`D_iEezXCSMY=Y#_b1_6WNBpl-txWVG`l}x{?y%xq-gWGC}xElQCRpV9JCZ$6Jawr^Q|fMLHQD4-C)&Dm`w9O^-wU1l7f0gGLns- zC6s4mR^mAk`MbDxk$fR#c|R5SzLTIAsTVx?tP##s9$8j9yFc;n+8v|kZ)$NiG9 zF+&4`>Hdt+TT=Ege)HTk(OShW5ahiQQ<3xkOcbXU>`X~ls>}O;YcO_rHND>Q$FEwH z#77O|q*`usA;{_vi%lNpYYBawpVV?jJ92J)yo^k9k7`Y=x`Q7CZ(#-q_WhqP{()h{$>q7_yO--EZiZR>K+05Hl-04Y=;+cdeio|@Zz9se@ z#ysqz9nVf42zYRsMFkxA#IVa{Vnw0@4PJHTN3^<7<$hef}e& zkkM68Qb8bIMieop+6Q)ZAQ>>1@Sf;is7eFwgkTay^_tb3QnElkrAcJ$X(6d~40{mp zARlY3>XA27?^V6moQlpDslbyqlZa4)#|}OYL!C)ad6!wKztCtkJ1SokMNrKfpA`0H z$`&Q#x9+#R_u`6YajjpZgWAQuV-593n7&r6U`tq$j6opEv`l+E+#HvT>_Qsc3qsxd zsTU?YrqtUO2L0V`4OQ>XV{IZNI=>hjj5ie(p8FS5VVJG(}oMCDi@qsKKOMfW@X%8@nE2pgZFr zk`d(;Lmoehw2d&I1=OWjEuOG`C5;rUaCY9)2R^xdyqxvI)A{z^IqDgig5ue8G|7SU zyM(>8HGFV!ovP{YPf*LMjHzq`1L|bfy7s;i|TFK9WWx#@6ec1(^4TgbGy!d(9ka%>A3vwJDfUvD@&`ymQTHws2DP zU{Br`DcGiqTn|4~1oyKjbo?2Z4y!sZtO&L^KhP4>Sh&-mJt}8wz5dlRa78xM=8A1) z2^6E(kZQ|Fa^q)9N?IdzEDT_a>QHGBPSa~y5@26FcxcdW7nCM4?>qXbmT-)s|J}wq ztw+Xd4?$bFXbHtVEh~X^hu?!?j%~_9C%4R)1U#RWk6h+5k8w`X-l{v+uJX#GHz;8U z8;5EwS~2SHNV?Vnr&yz~^3Q74s}mEtt_KSI_M$w!Q67nAQXsd#6Lr%P{as(pL^TNF z4gkgXb%`^D+S}sJ>~(W0=-|Te5!d7=k(;m2W$)fF8u9gadoeJ6_VhWAWXG&kO(z@u z7D4qEEKP7AaWf;QKb^>~$>_RI2lKqe_C7E;D!U)Og0g<6|KSBNT@ZHOqIg7Q&HF-@ z=Tgf>-b3QW6&soOb>Yvk;J0Ksc-K-lh_RCgm#XBu3}u?L=D571qkCoHFY3d7%3)D* z&{ISCg*CdTd<$R5t%|?g?UxJROMUhw+V~>={jdDL)=1ycPT#Dz{%M;ecxChH4gB7lVz6k{@ za)zkNgy511LexPa+OQC2_YmFf5JRF+x42+^+E7!AP;(+yV_2{SGSqfElq5G)`CFR>zSh$#AmGx}5}`rIP=3>0|{i@rrh-*4lh{}IJJ z;EKVMjUlj%Aq2+|7sotC#gOg9P!PvH<%)eK8%t#wO9PIjD~@GA#lGB$Wg?Da;fiCG zjbpcrdkv1`Dvskp#qsULy&;Z&#})rUHvW@kydXGUxHw)E6)(0E|Ajc=8&|@2*#s%e z1Zi-BY;l4-DnVfMYah!D)@fY0ap#)}6F=;`C0g^lsVoUd!};a5}0ueE^k?-ANxJ&KTj! z7?aJIu*{eOXUr65%%L(Cb~2WTGgr7W*JLv{EHk&jnLEXqd#KEVoy;TRtP`%RQ`xL@ z%dAUq)^%~#Eh_7NC+i<^_5<#0Jh^NFt879@HgQSzV{|szZZ-u;&QtE3XL31IRyj0~ z9J-Pm26WEL-5e&8T$a@6SCwMKG5G&v9s-YJ|3ig1xO3k?plqq?{3X%+003QfE+Z=U zBT1eZHy{)b zrb2$a1ptom)FJsICHZ=gLi3%1Vwqf3E6{sSV$&P^@2M2V^uZ1#L{2yOM%;zUp2T)d zVDA#5&vIaa)FNO^K>{aCgBudgU1&*CEZ0~lfrdFZiMh%ZLEjdp>{7Tk75&QyP!cj-l1N`%!<`UM5Bnuj=t}}i;x3g=&1;e?_$HKZ$_+EM zf>p?sl#`UA>4O!XJcRfcLp$=`qwzv4U=!SB5Px_Jq^x!q{@JoLVY|q9w=~4FP)WIb zo4fRzMczkrk*8d44}IYry4+kOKNDSg2*H&mB*RcmrMo3XOWc)$bmcJmg0m*@JxQfz zD&!trL^P1w*Ho^wTr@pf*x6L}(X%RHy9{7d^-Q#qr8J&E7esYi@XND&XBNg;S`lpp z0?$@0cozNK#eW%Ixx1SyzymRmE4t#Y{$d@^#t^)w1B&8?>y%Vyc~bH1)x5ne6D%!P zm8-og$uDjavzx8DX0G`BI&``UIsmKC1yv0+-jdXR zOs#!BfWVP7py&|~2C72j!3@n!{d2KwkIM=k0~SQ`tHblN(<#WP&x%lF z(MOSdBHhLlbmeGx%Z7C?P*n zQZ_x(5k0bHJ@S|yoWg#O5^1jrZ?7t_SKX#pGon|!tXCJ)tH0lCNZR*{x6c^ZXKK@D z9?@r6)@P0Bv)%8rC+&CS?RN(D|FP+Ji|F?#>-V}#z1;5iBSjuxMFaqmAvQ?pa&%Z3 zGJHM?j6p_|q5>i!0S@?yHYjKWDy0mShCyZQqq0cRIlSmRAiBT?ov#<6?2azQpyB)I z3etfp8&olAcmnT0eZ)Xx*+3I;px9!dnH1B>i|LLSXypy=iNK)BFau?%+PeV^DRzVx zn_+`VyTXh`U}s)nCP*V^_pwW)gVIKrsTYGAHiO;!n60wGy$CcIGx%Q`c77go3>-Sw z!=cw~hOWzo($fcT_lK5wheBiq{{e@)UJRG$4HMrFUX=}#`3&Cg4?ndXKI9#txJm&@5zQAM{b|3S&IBV}kIp z-2G9}gRwN;F-HC9v-Ghqts_#lCgtz+MO#@{ecs47gT+fHakPW)ULUx^r3 zJQ&tLm=IqW`*lAa6fp{jnXrUUYPC)P;G-t+VI#7sk7AR~Vq?ni5sTJIuhvQ1d#sb~ z)D7u$K;&3Fa9SjCiW)xUjh)s!m|FZi9rkj_o^OU%e=1RI6s$iIzA&8upOC~U%p{Y| z-d|1U@{K@kXQgc?Ut?!77p4m`u(27l8@zKhFGpaub49JQDF?IVWOLJcbB%CpC*M3l z)Pcg1hV4=p-;&|WWgoG{TcPE< zjDaic^4|VZCh0Pv_{3woIfBLEXTB3uF%*={i}4Dx6y!6ve5=pxVmbNQ3kt>ea z>1WJO9{E2!&se@`SYgAiG3u~o%>ks%h zk`FfSu~YxilCPB~@QpT!qBcnw$54FOz{OFvOw@(UT(Iv3Mqxu|aMIRbEP{W0sCE6x z*EK)8wb}n-5wiI-{$(<|ZT-WYtig#qg9&Hyf!BjWg$64z{DX0f6Rj^dJ$*MHzuf(Y z-G%cnG~yn@aJwu~^DT>ewS4<+nQQio+l_Yn!TMYOUTkTSt+97Z4;WzT`B!=uchKa! z9fOP02D>ea2M$sDYzMP16!)Nuo7lslA%lru{0C#?8*5QG^rHCoa@&C=~`8( zj|qmx-nV0(wJ)^spZv%=p!qlT@e6vNe2r0jk7H;<%zo9|XZ_oovt-7Ne}lVJ|Nj2+ zJMzywQe--P`{k6*{v=ZWTvX|Jkn_T_ebU{RioIg>`U@{%F5K>dere{x(MiI;wvOy^kJvq^OO;wBDMIh47y9!w5sU0>Mx54Nq?t-ZW0Uq1MEdPO`?#Lvv`S5o=l ztpmV7H0a?o79g$viqP*Twokp2R9-c@p&tGE;Q!FLNBNmbYxw1nRDXQ9Fp-0h-&U{f7!*BzP!*^VQ?QMDipSdJ3fDb(5Ee`n^O5dc zMYG~bXcMos#@@36d0p|1K7`#une4&t=TbzvfvkI77nPMW|4g+vAz0olkzb>Jn4A4a z2;ad97kX^J)rhCz|Lx?{ zPub31LKZQ~CJt>ozM*85L3&XN#_8pVGDm6SfOn~ZMv^QVuSvnYvV_kK z2gNS+Vs^38i~;$z`N}fc2d{NKeM6{Mow&&B)*?A)kp+Ch9CRyjx+_;BArgNj^J9O! zDo&3xd2eAT+9xTPmZ+M`RhalDm2UIB;Umv++dzXjb``P+%_YE%HT*+4n@qx5O z=?dJ&5qS5eadYT<;81tX?;` z$EhAVXZb+1Y9izwZ_9?gl7l-=ZHk&8>TaC&YC#9|?2dPXGmFJrhg|HcLnpJP;elXo z`*JgsJW(&Es^qBeAs3o0nR;l>7HJ*Qyxu@=8Ms`_AkQ(ew-xvw&ExtaHIaLQA!um(t&?lSl~TI;+^vy^&wkU~6z4#gf` zpN*SVrs1yRn6)D-o|_x7Iv(0_#hUjRF>t!J^5rZ<_gNAiR?nY|id8Rcm66%lIONG7 zmIzW{(wK?^eqH5Wy6NE#7Pg zfAAHB!Myl9_8kuno3R}HcE3+ELEh{_lS4!Fv zK-j#!W#@n&_Z@Y4L-_SmNVN9D7uja6Pbouct+oHMEKK}^lX==h`0vHj)&QKs zn-HrT{Yws3E+sMEfLAY8Hs!P$a&nG>8&36hytM?G1LZ>J=MpkC*2{N=6~a2!a`?EV zrPLM*AX?t!lYyHhyq+qm26wB+&xN|%)HWqaUa>xRNzKf5<+pn{7*A=1F3lFSC%9Jy-OpqTphZ7@h*!fjo_vjzK3px{x<_D?E z8)S|`14h~2vrG<{a#JRpv-VexKWw~b8BO{4lSAMrcfhuUEVWWKx6uOpRlWo_lGeA+ zCeoi=_s5Aei4_R^)Hfq(#yFNCWWyoe*x2RcOPu-GhJ}6%_0(^(CgagYk@&Mzd82Bj z?DOZt!gRB6yPvhj)U7bC3cBIPcD32`bwwiI>AQX7#?#69SV^fohvG}@bNDyH1dO>y zldOjG5bK-@ctm3v56Rw7oE4M2=9W#T{P;!!tNbJ0N+J0DIgB~7KxIZdtLV_QSmV=k zjT+C11P;d%#WHTqcUGfSa+PIfd|WEu8%J3e-@^y@xb*5ElZ~T~)7kK?6=JQXvR3OV z`b&9?E*}i1At=ZG)e04r9gy+mS8sVJOdSEqGqFX#nU5z+6Py!O0+gO602{W zVK4tP!(ece3avZNfP1Hv8uVV&w_aiGoi4-Y|2>@SSRuAXh4^6 zuD;@FUwI(Se8i*2g58z`C*Df~zm_~3>R&NhLmq!#!PmU%rU`Y3U#uHue2s?En5f$P*9tpwx;LZAgSCZ%*d0Z$o3U*2I+(P=?$5NFahPIVi6(Ye zqxoi{GODi3TwzaV?q;%Ku&&$rs|IKhajZ{QbL$Uj2qPH`{QHa_i zg##E!O0L*c>Ra#2=^k%NMe+o=u4KTY4GtDVkn2e60EZT9-d&uhJSQQ~gh6u526 zit-@tE9Jh+g_EygEury@I!~IO9>Mr-HgkO4;Xy~0S(4t9vj5!Aq886WA9}y#GxVG^ zUpybvcib8wkQuIV9I)%X+l`*{L{(^9Bx>UBzpG>kjcgfQiEe)WYc6x!aez|*O|*<< zmHT9t-d|y$Cg+B9xQ1_E{%-JzD{q}fUp9g-ea`N&<BPNcBu{7q6+1^3h^Hak$)DV1;hE12a_NvRRW+PS%S}*hyvYA{Y!g-^C>mu+&x_z z+-8D74RXho_jxd)yjB^HDP5Ja-^+c8*9-=}m6 zJ#Q5VJVJt|WBjJc4{5$o;~M@$e4`$DfchJvha{o|e+O^KkP6^4vCn{KpdcYoB(5k{ zPbOZ>;#m?XzKu2(X#x2HhPZ8s7e5T(DApj}3F+#39C+*>m>mBqC9Kzh^c`1txk;2U zCxnVl@dub&hAW=RG$Kv%`^i&Ku6m#dS19@|C7EL)%02FDV+`B#7`_=!2d)Gnk7&-$ zKq)qww=xlESfJVr%WFV%^0X_bFgJG!oiA77UOo8XvqY2L*v^_nPJB?ZL`wIkL=H(( zCQ$OOR#N!&>qxqU=f9yUA5vnI-S3TZ=1Gb&|bI}I%y7h#zeOBXhS%BUxXv}-4a zJc*nm%E*#=VXOU=_f={j7gQ1)y>|7z6BVPMqPYal93g(?0;a0Y3ExTy-Vyd_^hkgc z$4qNy)xpwv8&j{jvTwOEDY$dyey2a-eucYEETT-(Dvl;jjYf25mss*4-BXb%?#wq5 z>(!Di$q6iUIVikjkC|k6ZX%b}H~i^rHj&Ih(-dV`_P~GN)D!Ztt8|)O*5_9A0oR%N z#0k-KxnD&%Pw13BlZ47&yQ#~G-kY=SAoJgdWMeqK-LlgpC(v@NRE9& z(x`BT%|qx%&lJI%g4*Pq7|(DE5*?S{+({&v6^-5*5bZ!nE`d(*WD5A6t00a$%B}>K zW#z-jRRW*=<%KRPveL2PrXr1p^2=qV$d%Bdv;1#>b#lIJDO@+#rP;Hk)v6`VV}370 zij;NAh9LR7DP=b6FqBFBlt^luNIK;~zrO)*)7FM2ILb6HBObDqg@`SxBfxn-w_y{9?XI**A zU2$9QBR~kpAEyDHmCxK%DI%dkkeVW`sxymPKJFwJIXE%0w(2r)&9nH+Zf&ei(YmYx zX=zOdtnS0((r-Mq^IO&WkCSz83s2}_4@yCfJYe2ib+hnVr@0I(c>|+?;zkh18{w)j z)f$r9k|4F}nSB1hflPDj-)>%YEb{fkG8xA#MIN3~|46ExtvQuQvXu8CKU+5*`8RgU zRax@X(uAj#RPsL+Elf74p{H#O>M6H*RrNBw$$PFoW&mMJQbVSiRtu@ikw)OAwxcw- zS~OCdsJ(LcAmwa(%_8P42kwQndplBF&XQP#)J@R@Mm4C^A^AjRr|-{U+n zlMGFc=5=RH8H)*RVy>+=6K$A1m(p<9IYi-X?$3pF>{WAfi$pC%y7q50{I% zUR19vcd%**VWrGD6tjzu(kIhg~85RW`42l>$wHeee8=Aok zIui{2iWq9=#hwF)-w_TP?_swv!*du+H|g-y{*bc<@9+@sh~xfnclbyt<|((}D4T-m zOU+TvjL||TC+>w&w*FE6mt*g3*{C(g1boJXGR8z&$38ENiQkWXeK{^6HZG|@{=;Wn VCSzQ#bsV@bu6RFQOict3{XfPQuL=MF literal 0 HcmV?d00001 diff --git a/src/docs/src/documentation/resources/images/hdfsdatanodes.odg b/src/docs/src/documentation/resources/images/hdfsdatanodes.odg new file mode 100644 index 0000000000000000000000000000000000000000..e5d922757192f9928961e2458b0fa8d968d9133c GIT binary patch literal 37296 zcmeFZc{tTw`!~F{ZK{MOM8?XTd7d^QsgRI)4w>h9*gGm+C`snbMG-Q~EEyuUAqg2G zL*`i-A&lf?T(PUwYQ_Ukh_PS+Z}gXUspRf zA0Y=XYmd8*w%#}YmjU4z|MQC({-o;eX7A|W>-A?SZ!uwSA8Q|9ZyReb;lKM)!)Zx` z{LfDC2cCb^&)U}3&czN6+9u3q9DN41qp6mry>y!$VeaX>;IV?CiwQhlVk7V zZtY|DKZqgq|L?^7yG3}od-!@lCiwnq5&y2=xqma8hnKs9m!0>2S^mH4`S0i?P4<5? z?SHqIkGs3e|9da?e?z6CtF?ojxA0|0A6IJ+@Bg;MM!;0hr2qiw`x5MY)6v$)7p_TI zROG*WN&M?)aS;(UF)4LPsml^V9&QeQ-PG1New6tr>~~!As@g38$l;GjKur>!LW!%|&>`JYW%Sn!a z=PnnEKTlK=iSEILyFYz%jHXYJze6=&pDDd`J?D|Czop@;+jjM0p0S#UP=A!pUWH@o z5dfY(bXh>aS@0*|2L~rAg9TDvI>J|mt&?Kh~6AbjE<3GKWL*vu(+!npZ z3FxKT#;%_PlQM;F2i!1GB>iXGa1%Bo?ATJ^axf1WFMlnN~nBjcbT10qM~XWq*h?z}I? z`uY|x^3M&VpK#JsQNorhbZ@#;^>I?tUqS#TWrN$k28MP9>HLFu&!8ysz&M9M`y{}@ zOa}TQtDfca7w(%0%pK7$?sRL9ODAiNdWjvFOi{*4f4BUP}c(Euyf`0onuW8 z*=Rw@MF0%-5{7U-#a)Xb3WbZB*AndlM0$XkX)<{d!MEgb^_JvxkIeJ@F zCCdh!wsIi{Q$3>zgsmTMt0EaI9ggrsvK+|3dP9C2VKeJPWUrmElxeU1mT{?|%dx)n zg15Amv@5LN0^@Y@^^PxHxDG&LLDyo1d?jmxYU~5GS#>2#Y_wgrDW*`K9UGg;jy~L} z?<^N|j!+3c{I1o_A&Q0^A1f6YLB^oPL&Y8<&lze;H8p@y ztBfbDBsWPkp1dZpni4Y~!a|cbB9S@ks?~o$?CE1cfPq^HL!QABYdkW~z?x4HSbwoA zSO3Xppo)Q-GA0E8&AUqnLpXYbCZ^km6=ZBlGTq?DDP-Q@F2}iurF>>6CuorAkb_6Y z!5LUb|AFD_A!Q1t-vxbWwyvpCU@UnV+SEj>;Fk9Bjrp4Y6COTZ2K7?@7hGK>k41n& z{&7le!B_I(+-kGVZ?KK}(>iipw#IDTyGr-k?6V~v^(Az8iKX0lGD+O8=1DwCSs%#? z9;xU_&t@yQ#a;`|x^z6XWRCkvh(H!ArrIgu-l^+>iU{!Gw&Bi?A5;45zxzWJt2vwF zA0eB)&Fq@Jp97WGqqj0kEYh9lq-^%BOmB&K ztV2PWH$~Ncg$7gjJE#=QoCnQTH}KY)VwEF88>XMfTtBMejzWCaLJ>qhA#2eK8g5t#vY;0SO zQJ9g*=zT`b?%dgkd%cyBhv(R6Rq4oU!Z(c)(TR{}JJzp6L57}SRCZ)@IXPJP>gX>Q zA}cd_ipzU6y*F|zMBH*hD3a}1OdRc$=qZpTFh3JWw6jbY>O&&rHomaG6$N2%`p+R! z9-&&}%T3EVb%Uf=dzGSA#&4C1%M@6GWJDNlZ6xDdrn;awH-5aHqk49Wt!q*L4Rv@T zB_{3^m~tv88y#Q!a;Tg-8kMzqvsmfv+t6n+$HDpeBNT>xUwR;izq3WMQH#7Qc*}-D z2}woIt_+-2xo{oX+=&aAYUu6QS`C{wv7Gwk87xRQyEs!|#DAYdU8?0uZwE5TdC}g# z$-A0yJ-4=?8tocwSK4Sgc9esU8MI>qDLk?_Zz>%gv@Em#rl31|5-#R&88oLE|E97i zDR!lJ%5WUaI8cF!_8>Nod%OLfN*Rz4A8lmwZ`}Q$$lm951k~YlN5Wny zc4B86n}=42Bbl)d=`CtkkjnIsit;X1-0%6^5ma9siAdZOr>cYmxPKX7GRKJJlZ475 zXFHT~Zn#SrQ=O3Y1au+H8U zhCvwERpRS?N2u=&0D5|r^0Wvg-^caUBU;O%fM3q&$4pP~O9NhZyrDtZoV>4@(k-B*` z%%L(oZV>j$WccJrH>$UbB>%bY@wzZ5w3;o5W8Iq>e3%~Ox-^^peR8dbaBu2`Uw)p` z*Ctu!NBxk5a~G8_{H&0FKYxtcFi?;kg_80W-&US#x(leE0A>DBB>Ev6MRT3L-E6?k zcDO_ja_}tc0qvuQGb(3Uj)RsH;L#*hDtbi@ZeMeEl)D{hS4Jv7A_q2KzdSx)agX;C zh`CRRp+5rD3Kf?p$4)#-plN1;qsIZ|7oQ3DPIm$t?LQs3?(EXNRHd|1W(H&z0X<7O znnO|ys6rZf5t!G>c%7bGqLnTkEJsZUF@y4IWWTeM0{#Z^I z@KHl5_mKm#s=CP(JW>H}>rm#|8Y(Z~j9P$0zwW?1kT()?<~8*C{=!I)P^j13fd1t; zezijWxZ+U^fP99oXF-=9vPtC>poTk+*a1~QLu+e`ZM*;^Rt|w_e&NS6HUivVIY#~K z$y0qh-`_UpS*A`3QB9dZWz_~8Ouhz?vrD66$NuY68m(~`B5A~=g17p*o7&8Piv!R< z9G@&OL=#}UD>XMjwN#)D`_@CXB%zoq--&+N)<^MahR zrb%b&Ik7)Oh&Xi_AZoKXle_PNM-iYofTW7`tU;%NXF-<}(GIoEOC3Xrk)Z}L!>eoQ zwrEg0W8*t<4XJ#N93rt9VP!f98G4Oob|k<@6<`(9rTS)rF^kIAd~MY~6%la!IOwMb zOzF@K?{(cm1l>wVNc*2pS)~bN+sieIwP!JmkjjUkxn`L-Qh0@8VUiG5LjTATD*AB* z=7jD(oyha5vsMebA{NZRh6{)=XpQsYshFM`Foi*zhLErQ!y6)qkXJgl)Rmi@3C-#h z0;4PA7OM?gnVHK+KF)+=oyxq}TG+Mt+iFmjf6yhB(cG0GGj{$7p{Mzd7+>tHXRw(I zgDTzq6Hp7!A}~KzRy9lx2YYR{jFo!w;y+#JE*EV2kY_w7!T7@G%NH%*#$Mc29b$CY z4)!$8XFndzy>KQgFAS$cV9eAuADT?K0`2I_=^URB8HVm@ejSDBECh!bqN)3@)*sd{ ztNZ5F!wx!JHHe!wj(m1t&}Barc#q_J6N%%N>yzQ0PRH&^WlBMEO5tkG{>b2bjNAXj zINXWgy3KaoFk<7v!ir_s1t4<4LHiVy>k+k2Lsk_~1{%OhB=-`*F=b#QNXzz_+Wo~^ zwiWJ6Bzs{;V649wgpOuFBRA_v&K0ch@C>eC4yVNYrUo>&=qC30Pbd!rBhQIaJmWCg zOK3nLF>roaI`gH-IL&| zhQaOpboq0qau6K1l??Z%CM>!1NP)=E`+}}(N9Z+W70%5q;rep3=%CtjgFY$^^vOiO zFjUHo#R50R-ika$FRF>3vXVS5GX%Mk@$!h9o+a3w9@brdWOx zSn8OXFL~g1rU`#Qg8%o1-yL~MeH@(F>K}MN6@<<_G#VE*{hT-GXz|lH!!9uKPv9b{ zz&-i4!*IR?y|waU!b#C5yLb_LjKI2cK^#8Uk;)%KDtwkg6e4}M=SwOJJ7<415g02) zv&{K$`)Eapk^;OfDzi7ljP2nbp{kC2k~LPb zCoeFrdW-@pICgW_kdoQfYLshc>z7Lo-3$a~P~u1EMUMl|Q()odoo=H0z?UAK>SJLv zl(omgB+|8G$8r~0naT%OF90WzlPY}czXlQpSD!C$5-CRYxX~sWU!=~#@aJXJBm3wPn{pX9yQ@sX z9f&sQKeow1?XA@w!sL=;-IQs0+gnA&HzFqUts_05{p;_v?tx~>N{tW46{B9RtooN`q&GF1b_7kFxL;hnG&s?U zqGSdqIe~|kJ<;|Z(V?ks61H6G-n=0WViH$NWbT5i_s!Xge^s0TSAJB;+XXUMj)#!K zxf2_48isL9irf+qmtyLCz)bqI8LyD}h-B8kh4C94D0Qi{~ojhbpD_j3ClA z&S>*j!S1_}Fox{3_bLH9-lux?viZ;sZd2HLP@<`^ai(MW^he`^e7q*0PS+734Z!_% zV9TTOfpVN37)BCPFMbPy$}qcDkbNmJN#6uIQBgDwqTI#SYbDw>65ypO9T?V4F!;FF z|4^XI`<{KYambL?u#Ni#8?Et}rUnfGk3Lc>FeN8QjJ^H*tW{v0kK(${rynQA&?B*^ zskXfeO~`T|$Z{A|&8vc)H=m-!kS<}9uO)u#`Z9oNeYP%+u&5PEDlLSy962EEfum>* zH~*$%Ie)G1z#t~n2r{u>bT8@_eH%(AoSOgeDpL6tY^jXbp|3)_pHE}$9p36Rc`;e? zCT+v;ul3t-n^)djP7J2nq161-bfWvAJ8x=j;Nc8~${wu=i8SJsp6zrhs` zgQeuE8SaVGoPFRacS`KBH^jeg1Z%FZuvZ(iwbnK-&Mbz=X`FHtTK`Q%*_W5F+toE? zGU9yXdhCtQv3o)uIVF86EOScbZgh7E`TPd4EYGeO^8`n9SH1*gaEW zN{$nZt$CC6CM{o*u=Q)XaL3uTA;mn`a{xRQWLI#XGpbfwUiBYzF%gWI(#4WzH@r%7w*fawk;U<09O*+#pJfpf&0tRMp z(zZ4ZPam0E3RG^ms5P!ov@y0D!s7~NULrW`$bkC{=zCd_gbevT97*jY3wyX7)@kba z9~cmp+Zp+O_J-*6^foB^lGzFES+&iYwZ1GqLk;5+p&7{k)YAxt(DPdJpMZ%N05~#cp zr73F95x3b5wOf|HFlooSF4`Xq-!5NcQhOK3biA8L4eApeq;+y+ne@e#{h{TyY#Eqt zuxrd*Bd=F$|9hBM$Yl3~NG~EbY(cj^$sR)rnp-mtM!xi{hUHJA^~o_C?~pJiaSrs` zN}t(fYH{RmD`V13a*}`;?H&5%%30!x5ZRd-MKQQBCc_yssH9O*GdHtz)Ai2Q;)0Zx z^SXZgSpSXFZ5@zNU$1$DnMHF$^5-9P=_rPU#Z0|w#A%5)pP`XIfQ zEl%{^M2i}jN;sF<;XP`z)F)7sKjO1BX1}0&-?b|FxXgh&-F^&5bbFB;4)f4~3iB6e z7LaX9v3%X-bg<_KS2OG)Y8q+=cb1zyFgbTppGD>T)oi|X{0i#`+D)wg#jDPJNe{?dd#h8!}ZIqu32+OT#pZMGgnJBnt6%v71 zBE*i(K&i|gNN_98}e}uZA%O)pbgw)v_+{JWRz`?;Tw6IHex0?{vJI z7F}4;w!uG2#{@>@i8lB@f(VasEy2S>O3&IgSH`6Cl8NVM})ZbVs%_ z%yNB;cIj|`Jpz58wr-vomQT7_l%P+PP8lbhcygLQZsAa|lATiH0{ zctH6OwO0ABeYbJ6r+)BgY*DAobB!LB);`c&O%5#PR<8(^*G)c{P`)J?O)#wMa7@*L zo_AZ}z<-p^EP58*2=nHwQCXJWJD#sRq`J$Y8Y@bxS0shVuJ9;jmmDMS@{Vyo0_s0M zxy0yf=%Fe+@$E3tM_k$BXEpl!uEGIoMOz3LWw1bZUwH&EWeU^xMFnqnEk6kTxLauX zkgZK5%Fc7^S046oFyjJC&?(ubJ_$1ANeQsdioh@$-J8K{*Xb?fOV2?Ep8S0UCRXd` zbDtKJ1wmvGbRg4|NB$a}NW?u6CKvXw7dM2P^qoM4?G=;gp6L*Ue7$EA#CxdO?3gJA zSticoAf$s9^u^H_D}=r@8Z@suD89@RYDfEZ?BG}cjS{IKe5(CIbFo;;FJ0;jNl z{3hr2Z>sEfhgh8>pjl+JoFt7pn7k+DopsHGgg4>ddGOXw&Mh{Z3Pm0L_n4G0wp%cm z>K?PpG$r$_nQ-CE9Um0w}lMsUM*yv6eE0I-Jj-Lw89$0mqkw*n_SQdjep7{D~k~wk13pGxlUl8j2G*cp6oeBW$T-y()`lXO> z*SuVCSZ<>OWZ=%Kvgf5N0nPEx>k|^>!tO~CB@M^Du){xo^kLnp4pZT@S^!+(ftuFi zt}2Dx4#67gNw4ChpE@(ivn0`NVNbx`Dr#IlUSFrO%|U9h6tvW zd++<-A)85a1UyiB5A!b#S{)A2BR%;nmzu~`0)z75y13v;TuD_;2z0;Q(1WzTghwMV zF8x)$VA(AklQ@gExBx&dOimCoF)ZQvBNaj2!Zeslh$=ykH9;i_SE*|GL*%go!&C{eQ9qCNa!Qh!^{4wpGh4=^HM3>yA~aNXvm zXR{!qU>=!}8QO>b{gn1NsDB8HCvp{@t7Ix{5$rKi>f3=>!{8~bXYlfhKlwx}JS5}< z>tw-|F>sIbAZ6CuW}%+gl-DdbjZFxBhZ=`OFI<)q-&=^H1Qws@IE9n76|XTRLEFzd zxM|2P>`1xLG+WJINK%bG3vq4x*W*x*0!M(d)#$@1l7D~^ihC;ZOn1(FqGdUnIu4Gb z35_~~JLaarZI!*3_JO zKndO zGa-;X#98g?pj`O;)sVp5#-^hirZC-zI^TML&I9F$&ji*z%}n_A<0k4~Z`*|zBAj8a z0U`vk6ljk2#E(74UY&<3bqW<({}y}ph*@6oVpbXgW;dWAhknHy+U?9vT7D+{N@2XF z0gVpA5@86t1h)*VmoK|hE_mB~vDju=%{cO-4B1BkwIKl#+?IM=g}UKv7P~!>rgQA& zF}M{Az&*XcN}V^{jdJ1M2ns27u=WU^m0yFWy)L6CqaYXL>~^v5>aWS!P+_V!b2|$I zYUrPm9z#)lCY()V#R%O6Q)Cd5Oux}Gl0b&cSIEOK3eM1Na*OfsjTZvWkYcZAT76$# zygA-)7iM~`p7qpg6zd=XJQnJ7e&3Y-?k?qB1G`!5J<-mx&zEm8E&~q)Dysgr5+k6F zvBy*``7h`;-^qDi!NLq`V*UWSQ#q}zoFn$Yfgr+vJFeCWm)T8%jQKR2PK(?;j`#JZ!;wC3&h!D@Ql(~ zqE{@~&EH+ZwvmGg4&hC~t#nR(cuu>bts0x78X2kjorpY=^u~$tBfpTnSjU)5$K$M( zrsXAgh;TZcmu{An;NhL(Df@oLYB(Q(*;_|`8Z&C+yj#jZ_I@{6bZeS4=LF1UxGH#3 zkK+QKlxVk?qGD1QHQdcFXp+MljLLdD-U7nl*Pvnz1j3 z59y##%}dhAeCQhStoW-%^Y#2Ld5CCI45+wZ26noJ8J?9ra+HqvZA4fLXB1t7e#<^% z?&FVjro#_`deR#^;`C=rVuZd$&8Ne4x}<7FKcJFpfw5^e`;j1}ws(K2d@K|6rt*M7 zT-PQ8Ny(QKgC41b(#nGcyxq@w!P+diwo{ZY)WwW zN&ZI@`C;nd=QS&)+Y79p)pP)fXT=}LpL7S-oT|;5@ACsp9Kn|z)KNNVeWn~rkMImE}i?}>%y6N&HP zftr75yXpcw0PRz)xjA@0^daAgw307GpUX%v43VplamJZrX91?9OhH^{mX2m^V{^%pwX8D!+P@v~2eYtUsEUx#Tg z7c%9{)X}q9F;8ZRt44(RT8Q6l8#A0K9!}Iy#GEKQ`Gxg3FiSeiVmPC46LY6$*jti+ zycF5&djy#EOt+8HDdz|h{WXxl5K2eVID-#RT)kP!dyb-?wVP;`4)cvYhGan5YmSAE zNb3lu7UB4gRmo?LrNg>7&wYnsXv)51z%cI&69-@S*leHbqZCR|%K<#>wsyWNu7kh& z@Iw$X`n0G$UMgoMH+N=cyeE3J7rg}00;yT36e7!6h|HD-D%Gwk9IlU|Ttd$J6yONj zdfeSq`wD@1LJfRAa0BJ-JL9Fu%@eECu)4+{_vVx`B>U)q3Ju;AO1*ojwtoGtL9zaEm{19d2R*XLtnJ=Tbegf>6<$_bcJ;WI5Y*oyenGGF(EX;NF;tf zLdAS&#ZqfXf4-nth>mi_ot*UqSWr!LfN?^NBCR6*BQ=Yku78nj+m{;4d|Q#l_6#o- z6k~TNS~%j;86HkAsq4VxxR>RwvGS4T$~z-Ea(4rD6CF@mS}>W)_{LD8CvSxrU3yr8)Eu*#icKy4l(g+oYRv0I zPOWY6Ga8zc?uG2*loJU)S|H0XE}fl9RhLaUQ=yhq%z|^(@TqZJu z6ZA#c=!W=TIZjViLjSO|R*#xo#KJi7{T`!@9c8;nnL?Vo53==vgxx@8TTw8Y(=+;T{b^+}i`znVqBp^6NXtJ86%I_1O4GAYl^X$n(9hNu-;H(q zXUsvz> zrme{XA&}>(!(Wm6n80|!Zg?!|b^d}juv&%%Wt_=EZ*c}sfCFRnMQq5V#fGrMZGL_>taOO;`=9pSg9uWoD|9)hHIFB1YD4t^qIRyz0O}6tr-U{+XjxdXK}_ zjzU==F@D8zZ1X0q;?Z`PmWBC&a4wjD;1HRg!NJT}U#`_MEH4w@90`$1!=B`O_NK5_ ziO9o}sWZPvl%K>2J1*w&7w*wT?A?vU>s~$*avg9e? z@;BH*2URuPgy4zbzQQW(fax<{{@rXoPDt=4P z_tjrbbQw>Zg#%Piz1ta=`|53eIe!sW%8k9bp)t4g9!?HJI&0I(q8>?&yHbW0)UD9K zAlmNB1y^9%g3AhpwOI4F*Tj1&uu7uDI^oGw<=H8i#yQo|p$agBR|=%;ST_;>%+45k zr(eVTAN>RdM)cOtqt1L(retDqH_Px&VCnj;+f`2dMBGmN;%m;n^y$v#%=ovqoMc#R zXC{LA1bNIX)*N6&h|IXCbdXs;?ek{SNC`jbKC7MqiwnC)ms;}&f9b4o-s_|WA5|%t zj$At1#{&nHf09(xRgkJ5KeN|Fa22`he%8NDMif?qgNqr*D2+2_ z%R^F$I{ckYZmec|{*lG7DC7zy(+hvjrYWv>0%8Z~0evl0ldky?3Qvn3Lo4*;;RG3J zn8Xnn>QA7x?0T;-H5j(pB^E6ztPc|26J|C}X9TXkxPw6$Z-*DB;L#2h5bU%} zMG|J*>9_~bH?ZU&=OU8kLl=XOYbmL23uY!B#rW+KorT$MRMZ%MyZP#TqtQo*wA_g` z=_T{3bQd@+4Ex?d>t80yuUrKEue%Bq5<6Bz%ZTPNQHGKmP*ir932JaSid*1%*BzLO z*B{*eg7^+glYBbt6*Zw}fxPlOX#LC0%yev;;dO0+rJ0v=>J{CQ_}z+lLqh9f+T{7} zvZ3(;5`MfBV}p76E8on z+*Ih##@>+lus-!f-m`8g4vS1WyWbog1x1Qf_b?XN-r`mafp!*ueV0 z&ET_1pqb)1XfYgg8J(yqCpw_EjWGH(vwP|K?jI<~!Ea#`GRy?bXyC_8MzFzZBgm&% zip#aH$f+BpO+UL4eU}m;Ab`+gZm@lseIXM7RR6@usdH{w$BYxnSapqc+u!uH%dJ^`*h3c_&;qeorYsXo zbDtvtT%@MS^WdV98z7604ES($m-mqRbM`czKQSv*Xd<=`YiA>)>~sZ>ol&8ul0?m% zfXb*w2SPVaNAy;nQGv8N`ZEU!HeWoqj*`+Er%}AGeIwrwEJOeh+PUcxTAfic*U^#N z4ye0r%8e$3hU{am`Bou8(kWQg0*_Ryl3^CDwN=Fy#AJFA`=g-N_P%6`n~@q2#Z8XVW<&?*tz+3WenD_9o~vz0k|7@ zIz?DrD*^)&6jOWa7Ziu?D!AdjgMy{_n0uA@mhQ1vOhDDM)X*d6ZKgh%nmajQq7Vg? z{n~iOvOsLMrda z#GOP=Esjb}+88JAQ}xh-`goXn+jXUpVei%5ZBU_mv3@F^SNN91LB0;GHqLsC&>BBi ztuY+gk_K(5F$Jb37w&mTYMuE(i@6RKVCR;*YsVq8VDZ3@AM~@#OlGh$INFk&NR`gyV7 zD;v0^*vR+DYZun{ggE|!(^v;P(ecu~s^xh0YLtg{N47Wr1smA^X?#R9*?us;HfxaZ z^V`jnakIt;J%&ngPmGdeVMN1&067C+DT{ASkRqK^&!9j zo-J$n7N^!8kzhLAZ3E4FL6>y9<(7WJkfT38-Kz`DGSGE@6Fj%@DY&8~t9Ws@B+se9 zTe3IOBKQ-8K%{$97qm`V>2Gqmn+7NR9)wkSOWukFR`2#}Ok5a{w3si{p!p8h5fR?4 zu=cDX4yV+)rLQgTi_DQ^V$$ACT7=S^QjiX_=1XyBpE6M8|y+EWk?tdKH7)v)OO86I>A%&+Uwruw$A{ zTuun-lPJ4Tyf^Sylw!p4CHJ-o3J9a5D% zTmJneJE_35FTUXIXw#y1QKSsDaUw)G*sc1`Ew16tWAs!oN!?x@*i2}?n!JI8K?b-2 zE4=p2s?(9K=Ue*DU%On&n}?GiLC8eL_i69JEe>dMJI~2s{aL}4p+5P_!ULT_yi20- z6csE&fGOe?{^G(~eBm4BN>xQ-8q*B@Jd<&it>__RVy-<4^n1 zM5&9W+$b}a>d7Njo!OhSV;Et|_w(r2@K1McrNao7J6_Y^cD(%np)0xMlC$9k4-A6k z9812W+Yjys?0lFikr%xOg?wu=lCnCgyK`l4DQ>T1cP!jRa zExUj2970|A=LY8g53ZQv(7A8u<#n9IIHAOWrnAEU+@-Q?1 znoG8~#}1)45Tzt1Bf|^t9n%mcSK(?w*LnECLAq{?;UL{g4yT46)TEop|2Yps6joW7 zvcLKj)=j!fk2Mi@57Ne^8&JRIsT>#`qH|u~Fvy$F6VOXAX^=9{EWTlICAba%ksx-~ zbLV0X*dFGMyHr=12AAxP_W0F}u&pK;=J5~i{2IYJln7Z2AptPE$*2Ts560&P=Sqab zV0`JfSNF=i`gTcAsH|a=amkSLEAtB8sHDe9?t7D87b^wy46ijx0Fcy7L5qP;OTvPN zLPpQ6x#~LPGoj|2^QWUxLmXbpqWp|0{HmJo1$D+B>cKUCMIJ| z8g|=;Ex)*b)sT~4xCvh5R6n!c-5SZ^Ef%r}-GhIf;6mM1MBk{JEB>A2t_C5$@Po)&sF}{U8IO%N?VlrgEr@Ot|bFy31X=ju?5#Ct8 zn7jbK1=a7Ymq*6SK3vuCZyGC1%3jly3QZsa2O7qt_1?P`^H=)_`v{Nk_Ru*-b&NRlISX}jIHxHJWNWY;-paNH=_z-<8 zXD!PdQ62CN`fxDT)@;NDQl?z?7d2*L0ln;P+qDhHu3>J5dXBj>Lx*YZM`Mkcn@Pia z*wSLY!Cz6Uhq*r|$7j%gKibr2KJ{uCW#<-nhcr};CuMW_T88;|y|Bnv)!a1|-GNYD z=-piYNJZ(}x#1z6YtD5By|OI~a7F7IM#b>To78O<9=xk>UeL@K%TUTO~fHV!p4Wy;iigiG2>=(&(ft-J6uQqE6+Mj|Gkd z$mUS4|7Hi6oI?<>JB6&O`^9>~VXQ?8I@u6wz2N%K4owYsP)dnZY(2T{+ES(zO~ zhs9+-=EBFjg}X&-?&-K6i#K-mtB0{Ke0=on3dGDaS3_HQ#4{nIf(~|MS9f1G%uVoZ zJ&~(RhgQm)C?5<3p{(l#mK8?Mel>JeoG2h<*ZKti`m7@~ZsF8$S29xywQ!KXG#I)2 z^F3i@hpw(IUh#1G2j4vV8tf7Tj36n>I)Qb-7hbv?pS7RqZP(5%$uY99B=lWu%N=9A zOA|4>lmhgr46?*np=lm%NPN}O$`JU({Q%X z>OO&cP;d5TzyC^FTJ_&c4K89V8XharvD^uM3;DFUze$DfOw(zS-Ak`XTk$I&4;g)N;I;Y^TTx|7QlHFOblwE{OKa8# ze!d%vHyfz{=6cpkNr0q%g#9wIqENfL@HsBju3&Qr)x^AR0y)y5M1Ys4q*qieJSOS%I+nc2Dzq) z5Pg)hM{qP^|2s>*n8EwK!-@WWwEHNQw^y1s%EoBzlprKzDs+~W{C4Vu9!@+y_jbun zIa8LLUc7$GQL)9 zZo1@<(sJPs(W*kOBsZ`VP?#CFUmA_L{nZfuqs3dh4-glgNU+I~ge~(^j7qtGGJFa; zk-$*gIPrEK|DTDSZbA<+ph?V~Iu4sfnr(swZxs)`edw>AUvjcTqiAq}2b2arf8g{Fs_ZPjqrdh0F7$X1N7b-X-L4PEO!_yStq zk=%hWwv{IQ@Yu=KpBLa*xpi@2^sz()kkNd@gAfjF(4U)qBDtG?;J|!n`RUKBx17M| zlFxL%T@D6Fc5jJTCJBOvPZIF!B?vbXay|=K$UqZUnv(vb7<|``Lq$q-6V@VJ$9TFT z^b3a>ZZl{TKOb%$LQ5EF8;Q3#{b%FgQxd`jH6#H%d#5DB%u$T#MI1fX_m`w2h;_KK3Z#1IK;nWqt}jhObUC=|IKwoT+1} z3@SQY+7<6@g!{&Q`9OzXy8{>vB0SH}Y-fO45we3V=|V3nHYuRz9J&PKO&aVWRp1qF zVQR`MCdplT3|bSv^>F$hL}y`SvEW_0{B@@2YxGw>>8j!HEQeKv;zI{b-=%TT8x6$B zf~cH;kpLXr?%y>Q$?2IJTk=IscYR5SQ|h#=tNJ>I-ue!i3%%zPJ2p5`Y5Tk53!Dv) zS0aZ1+@T+Qqu)B0OOz*iJJc$frxe`4YC(wDkl8@r1a&{&_7HKCrV=6aQMP8D2@myc z9PhT4uDa#8)i+5v;Kg7e1~oD=ehqP$e>ucq{HIQUwsq#Ip4GH?`?(4W8VTF^r*Qki zg{_B(qoGI8BUJ4PG##|LhxpHg>`{W|SJHflo2T18bX_j8VDZo*sd9d)_{TctVv`2H zJ>0nW+x^h;1N+l^NT8hR>4pr_hg&kS8{b_k=_qCz zC2tEiLgu~4GQ+h@PfN9h50VH3`PU>{;p#tLKIzmG7je<|9$^j7c? zcYvZ>dOoRyY=%$@XiN7a`t%BiR~4IvN8KcEvoKDp{Ql7OVaL6M&XX{}F$OCln-7HG zaXZu`!U~%Bb<|BUc-_Tjl9DELVRDd|f@~;nh8lH&ksAcrGdiqYnsAmLLlM`#w;a7M zvpqI+DA?-W%9RiIzf7w96uK8nOH11xN;eOEB|P1=#BUeiDr6H`U}&%?z2t4#I|M;? zPA;j1Snmm34?h*je*?D(Gz^Lv8O;-^kC%7BSq3C%DNxHO-x?3-uO zWSof)@%u7E5Qj)1xTj$?ta{vqWb%IHq0kE^?J!BgBH?K<^5z2=U^v%R?m3mfArE2> zLm=hOJEPm*w};valV*IN`HPuX0hyPu|2X;}=)hO^bP*%bLM~*0c^W#dS|$7%F*!{r zDo6Ue^fdIL12sP(3UOHrya1w3!i!#iZ0-!<;KgdacABtY>Kkwns884x&q*%hO z-UpDSIR^(VlNx)oBux5WssCf`-}m6t=O0Np=RYj!{0|I-{~-yg$$w5>;vaGRBaZ*% zyxBkc`Hz17qo4noIqiRJ^B>#%$2R}JVVguM>f|F+#50~>q0audBgB9Accn@BH(Pf% zpMU-B=oK^5*s+>3VQVGy-x;n}Rx$=gubuD}o%iCg*`5-;^HNuM3$;UzSGXuFuiTOJwrMaO3LN z=LIinUXpb8#j~@8@v8<{#RjJ#* zMPq+HMJ#$o9UC2-7R~w8u{2V^JMg~1O^U_NmRHcUpmR&X;3mrh{6k@Lhq$SuAqi^t z=@JXCsyOwq#FNDi#cov@T>H8aekV7(g@VmUvgazTposZu@{F8Qx6k2{wd`meTVk-) zuAvOeibMGOCH^ZS69zF^msnqxOn{Yjx+)%jEO@fA}jq{N~ygWCCa&%*tzTQhu_PQzTL zbblq#xx2CrzKrWT-Qg`Du(3P(BbYl>eeY`W;WV3#S%_GmL3Q1-k4@O)Uq9!4E&A4f zC0n;>Ydrh(fO)9)H_cRFFV-nVB6kGqWAX%*@PeKj*#u_vYUHclXtFpH|Q5`i`X0NG(~o=4UmO<1 zK6zV`B~dVMso3G;OX@C=*bp?gYEdBKrhs@%m`?<#>&zQSYbjeZb1Vb835_**c=qVu{FQ0~Ez5o31d5 z!uijQT7Z)A&t(Mx_Sbnw<_Ri!%R~NHf*`S-%N=*PtC`_zH#9cLupKbA*sp@)t9{~f zLioj0UYxn^s`t(+?IN}8BBJ<`DhU!10MyvrNT4Z%S2f_zamL<(-h9ue^Y8c>no+%a zEa7<*VAA1jq|kwhh`S5Hd+s451wn}Q51VxrWO$LO1UP;94@Ai*LiJO)co)fSE1 z7IL~nJ5+Z$%AaZ?tqKn&lEq$*2S(l4+noEE83-7z7cZDmF?9HK=V~`is8T&2-4;=F zDK>Xy-7*4`cs2o_bk_psrQC>YKq5ish}8<(TkcoA#nh`dN*)D#t6*A+y%XCig*G%3>^ zdI7902{;+1f@A^@8*J-jAjTWlE1{AKYEP(fl8GerMFs3#)H&5Xi^zptpmZLC5m@WQ z7d_5G`|Kj-mhdagq7D8!7GF?Z4By7Qb-jQ#1_Dhg)#j`~BO4CaLxS$&QkT^?qb0?s zT=W{vCXo9@inpkF+dB6|1+L1ng;CXRi*T0qL&>4`puI2XocdCp`-PF=|LnLK>R!r{=6QM>wqeTM>W?)7MrVKizC)GX6vV(F{SDeW`J>0 zEnY|6v+QxewxXCyA@anRx(a7GReHDk34oVAcVHc&()cs-hRcXSbIzd-PR!|sUmHU3 zzy%$Bxk}SMCA_dv#}-m>57`%}>wJ^SPgBF=8ZQdUt}`wUpiPvhCk6X0TNc#q<)i(y zuSTtNVHY2#8#kwlfDS7qyu$*s^=lAWsVT9~SLXZV(QFGC-l*Z?UUtJ{YIibMmW%ZI zc%)woLg)=cSzjxO8(<9P2|FP&@&d3%IHIt+tI%5D;SbqaHwXO*TGM0}xS@Bv#r^m` z!BdS9q}!Cu5>BoJBNV>_d)ta&FeAL*^&z7xQyX>s+6}0IQhgV>kFz zh^aI*MjKQ>t ze5SICPL%+k>T;DfUh>~q6;j!~xYCf@>}qN!QvG}lFAzc`wBG;_8zsyPeW4s= z&ACj~3tJKyj3#T3Tr(BI--%)w!M~2I8C2w5U98u~IR)2JcCOG7X$e2hXavEkP3shz z*ric?TRE(k{pv8W!%OEZ_ueK}0tg0hCrrONjyQX=m27K-j2jU!AtocXM{0D$RP;1| z;nLl~hWW*lroQnreV8_jOC7HNKxO*pMur`lQO!unmrKnrOlZ?5nEfhnlgZ^*?#k+B zB$}p-%e*z$xCUrnYeu-Lx2PnK>FyIfy~}04aM|fNprL3?cCP3(Xz0G!Y+iKTIj-Bc zyX`rkYWcj0Ewz~=`aJd-Q56;l_-v(&IVNg2=Myd;io-fiGGvn1rZ>_wqAs(ym^w^7 zP8rIXa-BnsPL~f;7iskK_fl7x2^QO-$z3)`keFQN+v)iTgdI3o+firoTL=L;S70m< zde!smHe4QOEo&9GNv$xb@-!;U^1ZWdj=ux{j;Rp@#V>{Z;cv36_^+56|L&FQcPIxZ zXLoB8r{B>yPIboPmYa|}_NmmRZK%0oh^dY^@a^2KYCSKlo0+#o90MdJw^B*f3AwNz z<2i5W$V}^6-XuK4iJOi4ZhrvL`m06E-{h)w&}Rm@`6PcR*Hjh(td2c2 zIl8RvkZf$s(wnWRNs8J_B5|94L~qH7JWmAQ3+}wH_|Tq&KiRDJyU8dn&cJJxQ8n#8 z1;%Krd=cX0!I+-`SW)uJtSkqdXdMkCOP|~cx!}0FeB-=476j!tg2JWtHYCqFK5f;t zs_<*tm2+q8eE=&#XSEBUJ>eoTGUGFsd40ht@-L10OS&Oj(&#x%m016)IdbzW8 zs<88Dn@!VVcr+r3v8(^J1K6ardxX944Z!I2F5i?9phkK5EBi-i4H^iYt7`z8*ycbz zB$iz$<(7%1q!1N$>rZ&7Pq06zr42s?g#sXd1cd2}={5-k#M8-w@W?mq9ib1o1`OSb zAeuQtsjKTKMG_W^F#%3F^BApsF-xEN{02Q$qLQ#I#IltgK_OYvH6Z)5dcZdE7Gd0x zCjh>J2rY4pAYj4cmb)jimEjFifvTYg2#cjJyX==xfo;j;*~8&bVa%xSfx@3&Z>PYm z6uo=3cF+4H{3vNH2p?wI5Ev=FIkF=w`;c3z;iopBKPLes`pby#X?zVe4aP=!QOi;4 zf{Af2K+;HsWQ7tZ5doE8PGUmj2as+}MP3Nnm{9t=Z=1#W8n@;c{R@>a=~J-PWC-N$ z1B5#Y@I8KzJ#P}&Jt7j;RlH& zrZcmMEs6vkOEAIUie_{pcghKjx{QHBxnVx`WQy?4Bj*UC`Z@5FuTR-@s-!PH5&+&< z6!T-iYJ?k;)MmuXEfZ`5d}^lr>_~J&%Es`7~I*U7Y9chQ=ZtB)EDjo(rYTF|?VHcFb8mTp*0*?-6Kp+gxs( z(o{xfN{6F^gdi99H6+&Ti9@eB;~Ffa&L9?sW&@JsxU>_ zJZp1#z{;7uP1eS`{cBr!?IznV(}CqdD=+r8fmD9aXE!;IHttQf)61j4`SQUs6y0n5 zN_jUd4@68*S>hs+dxL4wQ~e`wU4+`adK}~F@6NVy=%&iw9ZQnm;y>s>6g03KXodNkX3G9)ycAFSWAN0=r_P6AR7BqQ>DN9pDdC( ziXEvlq)4Kunbs*n-+BfzLJ#!vDlw;#257;z%WFFr!QR zSdO9|`ITFmlGP46HJ-lPDX(FGI9Izy5d`|$&jmydN%cr;ur}`g%m_8AoQrxAD#@A` zyyeTes2m!MW2E|&p|KL=ag3PRcHF(u?+K|)XQ`EANeT$Y%tawADa409jIr(@sXb~8 z&}P_hnJYuPg>JE=yU?j3K&DtLu%+}$1FI+L&68-6T5h`$`P!e%?o;FN@uQ1YXTfr{ z%qqMUh#BEmnpPb6PE*CVub@(-j(@nU&Z6e+swrQ+9K*any52u#{E%yrJ;x^`6*zez z8{QT#Bhxg9ntOmLgDVOkq~uTA%^{l^ZcVg9&v^m_DI+nnUds`ybjI0Z3)galDZVas zQ$eM8;XmCWmFm%Pg6B-7^%Z#oHb}kJ%+r79A##Ef9Y+luKB4(eBYC7bv=Ht4z=&?! z(I|GxCw8XWPYPOafjSsbQ33LtBu0-|U5%-+Dsz;#6Ipfg#kmU&#zZWVVX9MuEj}n{ z!YUfZ4P5LHu9?R_Cn3*7$hjA=t5;Zi@^@O|tcapx-CcZ(4G?wYi&Io)ZNsd%rm+EJ6>1+H;2pt3i8L9IzRxwUZN3-NUCz#79P` zWH;Dv)MkY(w!zdjxc#O@OCAMsieeQgcj3tZdcGAN+$?Wnil2+!9$Mp2Sk_i>uS?%Z z9iVR%?w_)Q_hZQL*0@%Qlt?BSnB@vPOk@Uetq0Q!gBatqW!zRy zuX33uf*?wl(A%4zgqp>Rb+U5NF$}=VeP+0*+nVILh|dN;83bGN`1y<2yy-TrWv%1m zufVk1IgGPu9p&Rh+4m=iD;FG>704id%5-y{@8>*}&P+%mIJ>7e?U+MFBVBx1bp##S zztpmholsZj|1fGc_8jjds+jl{8SD*+Qiy!R zbN1+#+-u>^$b%-JS*n?@>mZ9+cV9Zh2DN&hY4sZFOjJ)#K4;DMEQdSPv>>OGVhz`j zq3tWznJ`DKWiCG=-s9KbUB>>VBNDqlq3DAp_B@J(-8V$UMJ?%2FC0LbVf&@H{(Czg zc!BtXIh<~rT4SeH?H|Xr_~p_0+{ynv+VI+*TG^gk*(RMD_quc1-Dh*Y0))MmlDuz3 zp0!;1z0KDVPoHyMPd8?NU%v{}b$)34z6wm&KSmAmEBQy~CKDJgqUq%v_k90J#(DOiG`A=S?L&N0J25Xes=_jBerEP%qpoR zTAx%B+0$fAnpj6!VTas|=!Tud9{la89_2)ECZogb%&&8IU1jk`Yv{?|il_V~bWm}u z_EuKZB2U5bhr`F~hh^vBQm1DS>twNeOiHc-!NKmbrG3WcshiUxDnQPoKK$8DERa2A z?>rYEqUKdllb44a>CB?Qv>N(Rrd1lo*?!MN}&Mj)Rq-PRNIo7s){O znm=V$xawu9%i)5X;^`r`S2HSGk9_X5+&Z=sIvVMVFWfbbP|bh~6>E9&1Pmf~dwY zWFagoD?DQDo#6L`NVpv~VG#}pC_3bSnh^cAAAN*SH26Ioii(%BTOmXWK7U0^OrU{% zFlvpkmMAS{=Tix*!`W3w<~@#E(l=Oq& zAu<4xK2R#9M$C#{|4HA*z;E*890#IaBDU^ZQw%&BAXLiD9(+~B5pJ0$Qe}{^MJ(W zEQZ~WQIyujrgJxtmFpJXi9a3kVAjI3a$st#a)*bh(j!Gp3EH_0L5=8B{2jqObY6do zn^rf3jD_7tDkCj&LYk@PX%>F@g5SgBnsJQ_xC7hhrBk8e0rGeA(W~bKg8uL{`}EBdI?h;AV$ospGnii!`C5HW&lXo}&EA{$*RCt> zy_WaaFbbjg5awjcKiqZ?)@Rv4fqo97o9i2JYb2>zy@U8VDLpM=qaW0%#)VJrvad-_ zg!ZzG(dNQ2Hh@9eQFUM>#a(gJzL=4vo`>zIqDYHDI*vm8;6txs-DXPLNKr5DXnrBT z%YopG16L|26gxia5R&MAd9cBoo-;h6>WJGE4+S#!X5V)8EEWih}(C;KEsHEJ>moO0==>;+M>1E11=*5v9 zf0etltJd@%FKfrHn#FUnD|LSHVNkTnQI!^UuW?OZUs#TMM*bz^HSnHzkqLDhPI)!! z_N$nmgY$OFLn(=KYB$a#&9Ww=dT@_-O8QySAY5H)jSbtVso&jDI1Fe{KpV|$ybxqdpZ;mg@t?U!`3|%wkl>tQCAtL!AUzZfesrc*cvGo_q$(r)&|!`MJx)^ z2Tu-7)-*>n!({`t6|3(mmMsyW*KSHZ+>KKVGU{!@6Za zvRg3%M(K#_7oCyA8w=Uzgxe>TN z1*e!uJ8kMcl>pHPHPZ2R`=OCQXuxYxc5-i1_fm;0L$`3gEr-5; z1%48MDq#wH5GdK&Q#*lBib+I+DIQpNh0O?F52K;*+v#bKg7w}0+Nnvv*~<=w`yQg! z==j{>W}@5E%<yGF;bcU4u%wHn$kc&ZxY=+vedd9Gp7W zGj7-F_ZMB(WFbFHp*D;$8Hnx-VqjyZ%QE2gv?dZ%>_CGqG6S0h4T~bLE{ilQaXa61 zW9>&SuR&dHUtlJ}pTZAH;+38SehXuHCzoqtIco@bGYIgyl6svIenf6IyHbCg z4nOGdz4O;6UrbWGgU!(cN8O=zer&(%QWOir}c;f^4r-My&4C$`%bORR41tu%I z*Z9|0+X(6YcJJ{^xDjF^LA}V)rtygQ!<7c|h#&RR2@vhY1s zshs6@t~_e%OY7)?*mN&^g$16h0WF~&2wVL7|!`Nc-b`?A#xZn;bd0h;!0VO|u<~ z;(o{OamNQMQ#S<+J-vAABHKD-nfPm`O<-adv@=!Bxkr-!4uKk*{vLEW;TvXCZlY3H zFhbEJY}DElcY@~B8UD2gf)3D)=Z}^ziTLz}PfM{i^QHJxCbH}?s|G)h(v7=hx`1ON z^DoBGlW)KwR5YwD#z|RA=R(YMvKd}6i98gLhcR5D(Yz{WWW|qCsHh@Td<>(f>u!*@ z)3?9F*+IiF(n>hAymDWWIK6XcvcY!KhH!sc@d_>wX^%xkS!<%yvU`_}mn7@wNqD}$ zOM&D#tZe69ZHo%0mWRqtOY zZwTBRDoD-dbEs!R*l_NO&l*%XD=5@xbVPx!Aq&tUJ=@YdY{%vM#?=OSh;9{>^C+Bk zPQ$zVmHf_xKR~LxfZUyiGr&>cy{mGXgg3*>XUJNacr!zI&%^*V$Xh8W^Fz?tNjGdjw=M6@x1nytT@*OyDt}Gcx zx^mJ=RcVkMO(#R6L=ZAeo5SjZ(YE|vxBuvV2D%bUzMcT;C&tpMpLN|xb-R2gy64brH^$SWu`-t|`0y^uU!$Dk@dKtp*qE0uve(Iof!;UfnWRJ$(2w~fKo zP&$-04_1*Mc+a0Rd%aV0N}#ZqUzWG)HxO#)3p61%fF7=SoJz=uTrl6_Aq< z0xhfyI__SXt??RQRXeFrT(xf9<>k~fQWw7n}jzE)a0}-vELCK5;ThUjmUgQ}CytsH^1DaIe3wOz3A{m|d z())phh@uXb+!r_Y zV2|?xiw5pNFX-&hFX+oKm~A4y0dIh^(!c0svt)+7zEPpBHA`SeiQt87BTwCLwl&ly z21gWfVS}!Hev=KC-H;W3=z^hJWO#@R;*l`W@o6yqCi(h20t#!_ocj%K@HwVYoX;}M z5tvR>jrzV6{_2KkJ|aQEexT~jAPq7BpIvE*?2QYl7{rf-!4s_qS{QdoZT)mJS3DZk zn>^Azp2%7ll70Gt_}FB(K}+pYnl#2ndxdTrH>cy=CN6H$@T*vsZ55RyTHvvjhKG*r z_|E{~+4hKa>2?np$l7>%&*dDfI`dJDW3wQtUx=`g+{c-r8_d~CZBwbp)C?*$#}jONb0MQoAQitlNhH86SD#&FCf~PaxB7TA>Pk z)h*nNdbjJDvVr(k1RlWjji{2Eh7vOl&+%qUUq_A$#sdqMp+1ej%HXu)ru=Zcu{71O zxslS?chYNoV#`=8lr-2Hgn7`(x;>J3f@EP-5RP#`%A;!inCGSypt!NB+C{3kZ^}-o zYiSPY2DqdF^|71Yv^%txe`<%D(3#zKI~Vi{M6qhCS~8$%QnncnCgYpj+)7(nI=UhP zJbLb^7q7!CxVojD&5I8}Lnz;MsBp>e^4L2a8G){@OSqXVD^N~mV-+oK;g>oPNFid` zh!y!Fu9xe)bmgvi0S~EG0(0!(I^q9V`*P37UY~?P_vg8;- z@3Y&ahC6gH2m3<1n$hy$5m9*eHxpG3T`I@YMlwWHQgRm4M4H4az`N2d_EP(`!#?^j zN!?p3XMYvG+a)I8!#{kMvR3KRJ`6J4A5R9D%Lzf zWg>8{hf`$L0aFs4sGbkNoquzj?O58OlS};eA#t`QrYzzN;r>}GN)KIGkJ{Sr@|`>z z-(&3xmLZVuI}Ep6fLfbd5Y=j*Ac^bb zP4&!Ucp?G|<2+*nU-qU+zcw0G-?HsW*lnjY{t1WW0`ufH_}lt1<=V+DnupRKh0cOTqE^FT8EAFkRmm zjm_-O)2Ns&k{Up2r+26!90g8_&Z5_Cb){XQn;6a{1NF02mYQHxpj zRg%+tx3CibWqe8Y0N?z+i?RAB%u(1x$7R0ZVfb;j4=(Dl<+CErei5FwA%5tL!P&xb z$9oMDQiT%57uH>#K)JR45jd^%?e6_6`Wua?c-Py|g(a6!hw{DjKD!IO=L1M~{U5W# zcWHpEWVB*~biFv6b-|8JfjdJfs;rl#X&2g19eZx*bo10%u!NT0Up=qmDR-2t9Yu2^&t6;o=S_Fw#GW2e zHq+l9BePi;2qjO<*j7vxI%97DNK3|YMT74kzn5Tegm;5KSnYsJKf*@*_e(GzOMF01 zCeF?lwr2m&sX`)(+`U@4g(tTC|agC?U2`7 z8Z5~$Cg`E2>BSQKuEc_K9*KSR{e!OE!F^v`F9~gZ@v@-2UZqW-&q?mdPqRJd3)Gny z{Bv(j#?jFWVrbFL2EN6#Fr)?8#0WFa)YR95v%*6bK|+3FK|MfW;m$PH;RlYICIDwL z^0a$i6xpoOps@Jqa1*i5&X{-v%x78>7#J8|UYxm{M`9Hf6$e19ke`=x z$0RBz%p%*NDk#R#BET7vq$%cJyty>NlC4Jz2GMoUWfR7Xt}H7E`@0($Jx)G0UI+P@ zoGtxokoi7Gj}bo68Lplyxcsa?RqrlRM<1OUQ&CbY-cgGu&6bCQ1i#6#N` z_~x2^RkJf%okHDQulU0_MQxEIWKy884O)r&*&k)}*tk!ix23V;drbGfxj}qO_JtsNp9o-_T?}c69?BJK8*KMgSu|QKBjPXA zVhhJtD0bQz!f*L!=s=ZD-?w?;+(wTgs{qKgJe~rip`AGPW0$wOU-P!?nU&-*)hfSj zda?+Lo|+F$Z+L>ch@QaU?d?{feyghPT%d4T#KxgjVJ}BH;fJ59wf1l`ik!?9lU2+T zJj&xw%c62rG!v`}5yiOVkN9QdW!ThSW-H?lAbQHn79QalG%jYj&cy z=Vph~Y3o&hFE)MGi@2F_viBIy(T`o)N99fbfI*;ZJ%$g(L&jtP2V8)Mofm>WmO~~3 zTC=FP`lp_4FOUNz)?{p+A7zA7t8*gUelNziN&%th%EyDvP|mD5R=LvY?x_8KS|-t) z{g?&;M)Z}b_#LB&i_~Aunl1}BZl8xG2?%3WI!xKlq1K52TFfoBB)XiAJhI6Q_?}lr5j7~ zEl;r$U-C6H96Q8v%$30WRALUv%$K%6c4SSzS&vCzG%0e^@3jc)VUsW!Otl}@uSsNS z`K)+tCx{0RSBo;lq+PZ_r`X6@wlp-TqShFRoI2y_=hH7ue>W#XN5!S(TY;% z`rgtIo7N7gNRTqkRpe-du~H<7Wa}z&d;vx15j9i|+WLTh*Fjik*R6IYBSo~o$7>{= z$pdz@u_p2aZ2pqIl*_Rse-Ib@mAw+ib&RnOY`0ZjBt7EA15y>IkBDa1cc;AN0n&O`p%eJkC3ep){d@sCsZ^aFTYSiX5>@79n!(_EyL>iMKq4o+I; zDRpw40A{59f>)P^5r$3Qo2<$4*?f4!0kbL771s6e;9mW~VShdV_82_dDyq{5j=#yCg{xL*ALaFVrfj@q%*q2cKPeaBB ze+rPCn2Hdcl)N~Dje)I&sfm;GZwpX+qKtemBa+~KI`LJ91FVf-F;%YD9PI|s4;Cpp zu7+JK5)aSxaBp82bJg#1*2C_0yxWny&F)r!I~e=~HZq)@uZe!K4N8Sni=)>@=i#fe zljIeQr4adsd_!BV5FFES;C3)HiUtOvu<9|i<)F$MSelF~Ub=eDg@+mq;PcWV4D>gn z)-(aqVfb`-@v@vn38<*sJC%haYCpWxVmWl1g5ri60=dmso&3?swj&8hR}`9S3%|%+ zlnGv>lR(38V(`Q3N6o9zBBHWnWyJa>DOe`@{t6!xDtK%YPEMC@!$ zEzAIp2F@0Cwoc3pPR<6-04GBOM}~jl{OLjYn{(v1e9(#gAsFCk9~)XBz{1r~gt#9YC0E#JoU2zfZcqfiZnF z4n+$iXTV3DFfcLxA*}zAW?^I$Wo8#+V;5%qZ5sS%Tlgn&;$P^Q{Qn=&f6^%a3;JJe z_fNlI*uQW;Qk$B8+g|lg#Xs%mzukua5?13sorwRb_ow0X?|)S>hrev?|J3^PUH|W` zCeZ1>H?jT`< literal 0 HcmV?d00001 diff --git a/src/docs/src/documentation/resources/images/hdfsdatanodes.png b/src/docs/src/documentation/resources/images/hdfsdatanodes.png new file mode 100644 index 0000000000000000000000000000000000000000..40ea2ec9d1cc164fba4f1ce3d254e46a5ca66361 GIT binary patch literal 30012 zcmbTdb980R*YBH7r(-7_qvMWk+qP|V?45LMcZ?m|wrv|7n>*&s^Zed%55{}W82A1$ z_o&6HwN}-f^ZTjVVG45M2(UP?U|?Vfk`f|HU|?UZz`($v6s+v z0s})F{>Q$^>G;`zfjM7DiU_K>XPkBL{@NXUe&+|jmwoj&9)|w$o!U+E96dTMQ4aH1 z#ntqzcPOeu6{nABJzuNsnh~}6MonCbvOuv>V_~GLwlVg1w?EPi0Q-3gEkxMykyLRu zwv~^=Tv)kpd;IEPz>B~gwBXH0@dHlJwiRr^uus;bLA}*GE#X?#vWn!sxGjH)uAz04 zJD4JmyNz~=pym9@@awX1Vg<%=!vUzH?0U|WmRL8tW!W#E`?BHT=$=X%t1mb`K9#h< zwFJFwzdmn^)U+&;Zlh03-X?@5%GLR0yj?}hNq?eVpC z1a%){#&awA!w;8-S#RbA=f0U|o(;2p^!Z}4PqV@MzNFJjXxQZOHSB_?kQ=C9>E=e4 zrElR+^zvZx=*>@T0*sBh%XCaU+v=D$*>6jpDUta5%I&RR<@-Y~6GgLxdDfR6hb9E& zgVH@a9hd*avwJqe_&95KZRG4;AhS2w-2=l$%F|PeMlHI{?9G|@gmRRWYO3j_9$!_H zuxKITL}aQ5ba@+0POkN)s^;nG`31U*;n}Zq9lcF;Meo-4R)`N?|D?$-!rX_Rt}=0+ zG{oG-c)1x#a+Xx<&Rtsdcrc&_G12K-eGTSjmGQkBc>R)BBYjt#+tn zUC=faUyilxCd;mw5*xsT6+6iLeGc1UZpv<@L!bYgP*yI`fzp}vM*v#yB@*2VP)WB% zMx&Z?Y?ZAd2KfguAzZnOS-$=O`{Q=3>ZOBQb{7Ai=i98sbvGy7%Pbw9xx;0-)D1Ua z^e@ebxe(m%9D{!k;tmQ6*!dWbTv*|TPe%SivP*SZh`Zz=09Sx7lT|Oj(lFg*_4!Ub zT*fPgIW|?NDB!H7h<}|6ubMU~i5zTdMSZZokG6ziY9~Yz5fB5Q9 z2@;n#AI={B2)>p4ieL69q< z$-+a>NBqjEOuyjat)3Sp-9R67@|(d>zT_e;Z|gZi)&2`pb5BW_F36RJ8-vJBm`eM_ z{L+%UR%8w-MuyRCZ|*lgF(P%A_ulU3Ydmzyeq$mc7t3$?_3G!OLUik2L#~I3%|ez~ z^u~QjVBWP#Qm^zfXJF0prf{$eLYr_zS&SwI3D=EO#j1mzbRunkn|2Q?BvlvY(@HVi zw*Ab?(chx0#%k_i{wkY_{W$^zl+7U1n3$*kq#CL#C#~{!;#7!xc3daZdu7(V<}4I^ zJYtsKtr~TaEie4>cP-h-LRP+~<~4}$;=b*dx?2|O_ODS^^-D~j@Fw5>g1IYQ8>7}u z7z87~a$?7|E91j+`9N>rbMf+N=K7ELe3_KBLg3%yHAc?2E(y@-qY2zPwwMx&siU%d zDA{w$-sB@l{ATQu00h7DYjpot$X@wvs+govd?bY1AUl8Da_w_EQ=_Nm2I3g9*Y^bd z15!$~gCs3#OfLl!2~=%4i%V=XQ4EhWn|gd(a$RMuo9yZYblH}ZB9YBFS!`$LBnl8u z-2?+@r>0P6pfz$FG|twis^T%%kqPd1J!-Rj-kfz&URjf32X)`*=pq+_XZ=0-Erh6k3A^q2o zQpw&XT8C@f#_0kVh=SnE$$O#gq~7ahsQZVpHD8k{(euID%vbH~%jkjds}7^r=Dy^N zn5uh=wR7DL0=IiM6%pq2U*bRkw+8+@pvdFuYJtYwCCznH@I;d_>X|RrK_33OcuQ=k zSoMz`y*NJ$lshLwi_;nnlF*e}o`@mYZ+sY*Los*wC~FEq_GC=2r;@3+=5jIvE9X6o z5!>vH;lZIqFITTj+#B09>CZeDiMY6tqH5*s6v`O!aiZvDScvMJpd*(Blo((Eabws~ zJ+Xfl#U577S1Mve$UI0oj3+xmFTCr~)9VkBifFf&zB-tSg>RgOhR1eDk*2e_5#LJI zc@?(Hc(!Gx$nTUJuX%`;(3sL|2WP=&RYkA3u{9P~J{+V(@*<{7av4fvNidK>9nghO z*<@-o80t20uD9P-(L*&tcW(Hq@ym64{k^_{GvMT=hnKe-+LmqRk?+@G?nF_Iz0a=| zI8Np=#mjpVtw74oxnENp=p?*A8Rw9U)bLdf4`~2-QBjr>gAy(h2H&PJi=6Cx@0l&r zW2F+`%bGs=&84OsO|hE-tdm3sgr0}(fS~QnV+zI8Hc+;+)cWK4`Z36iIE%rKmEW%8kf*B-RVvSy-tS~rloxt!xBGC4D5ln#MMqQKRoI^g#{*K|x$gT}@bzo3 zYU}H!ZR1?!>6WO^bw~CA7zVoB;RJqD^ZiWB*9rGCPTDwe8q3j~-_BDIR>Tc7M7sma zna0`Jd9_jW$zGP+n8LoNQwdTrRn;DLZmMK2ga5T0(wV2dH=<|?uD?zoaU9!nBL8>m zUvJgrDl@!l6bOwGgOqM-zucr5GgPIS$8(OQ+hilH_Iv4%3(t8f_ z)7%g|?*x2b??Dy*IjJaj?>L>B z0i6YdrtJ4wfC}aP@9&8u-)ng1@3y|Qcu$sV8YaNg&@4xn4AR{2b=Rn*EbZ6n(KyI4 z)*Ek5gxKW5LSLmJe4eNQJxIv>JBoitmd@b2c15hZ7a2-+f@m_}BsR^X#JNpncGv5E zwLne%bp@^+`7c8!?3k zd|TA#6NmC=SA<$SMRKxY1*VZzD)ai+6b)MEk@DvR$BjpdGzkk!3rdX}#6i4_XgT*i zJ6KCPyLm>-7bEI&2Tq18Cjvuw4iAf=#b!en`nFj^gg2KhNlWAUj%R^qn z>nvt}+9nArx{HJbkKxwAnfNKIYeLiLNfK>RpIIb_nEe(?>-&R4*%DhfZM!V(F*R+{ zYGe)4YPcn06k}MM*vqjGWZWC>FSxYuV6)JsdmLaiIVw~gxgYxSk@9y1!3OBtL5v~$ zWHfrZ3x1`<~S{f6nV82NwG1)A6`kb7@1{AK9(YH)YcMUm28NENOc;zWY2@xcDA!kEYt3lavXBkt zx9L26GgtdgfDj1OQwkR!UM$4)v)Yk}C5n8_sxA1C!xLB3VToD4-iVmEV&52V7o?0# z(-+{wj=D!4CgU@k*AobM!R@^1d#8lHW{k+J9xcBLD3ziU7r5h3*ID1Dc7BG|7Ff8>> z25G&EdR4ZMFVg9qgDeEbiozdQhJ0j)4M{;Vw{mAwLZ-Nno9Tna=S2!i(@y6XzX_adUK9Uw}9ew5?_> zMT4u}#6Z#7(k2c;Vh-8UCFX=O9O7HPEehwfsqz8Im%D3*MUz;~d0jQEOfcPSZslyw z*LHX%RvrC7dL;|nkqB>kWwMiEyw4*!Ddz9?a-vZtR*IyWFT_(=M;n%21VTX&t&Xi3 zSVV)!{K7v;vOpns68qPDFz_SWi~O4z0E5$5U*u&E)E(+wE5U#12N0L#(ukQ%7HmG2 zd7K-18pV^=PUHi&YpowT-Q?U0R*2Yf6aGQN_)+-~G-Xl9e)kNO8dD6-n>mE_P<)C8cIT8QlK66o+JK@6yo_I=Q~>KdVuXEWpP*4XD-) zUr|LW4A6*)T@k?XUb5M9zI4=$ety0^j5h@N_-Fcjv@n9TE7ir%#Sz?2>}FV69btrX zP@HT|PcL4BV8DAM%h=>0e#N<9kx`USz7tG?TG=yR$zQQ6xOm6Dw^v!^7xl4rN4gFf z@j(`-F<{Yi2G+HQR&?0Wmc=X@l$A+k1eZ=?kGA_S@=f|nh$tDfzlF^q1L9+_2GJ%H zKKTmJck>uiQe+gjlC4t!CePDTtD<}(eF=2Jy@Vb05_`gn1kb`Yt{f)drh@9rYcRi) z{F?`&Y0Ls8*oVNb!b^1p6R3Oq`79p7v(}RPVeg1C5l@uiUP>|dqy5r6 zRQh9)^Ty@TNnT4QV~J5UUj%w)*siiIrg9b1Yfpu5OLNt2n0~$&+4pfN9Qvh%2XF7d z82s8H>}{r1!S7ZB|za6kT)OqQZ;*5qe;O4lX9q7f|R{V_{eOw?Epd6f=M~A z!pM*moB;|`c(O)XVyrGEpNR}w5QZxe!?^1i|o}PU`|!2S;UsK{c&G8+>G|h69%m6%LpKlJRb0;cHo= z5drh)Ae4#1+Y~eT!ff;BMv5P}&|nb|u3tWlh&JAfGs)*DSH-i)4W*o_NRQ1thL)_p z#YRxYvPqG|_lfy70Q0hwwwJ=fQTC6hU0R?E#SISCz^twr~S1@6O{6PVrKOl z8;=*+qm}ay(h`Otn{Q0on{?*uiWsR2iNcp(NX_x(&uQ6*j-Z5ky||rpr}9;>5AuE! z!kmF)z?>P8VWhUi2b_PwfjJY>%Afc``x*CF8+G^R#}3YF3If@ZX+x=KSx}e1(R|vX zI9HeZsT-jXMIwdl#?RBJ*`SzV-`uuRB zUi`Q^vYDLXgu}?4V;GI2C~Q4(rp63xZ&X6^VR!uLCwm#*UxJ}l%RzIk{4wrr#@bhA z@I69f@8&eH=VsaUd#x6IQ#N7CGS~J(MfdV#1HN?SVk27QJ6M~9N!(hAKw}M=`akXe z=XtFlvt0Lgv(Gs=2BfR{sh_M3J{m6VO6V6izjcxb<{Qx2-SkXv}wrsT#B+ zf5wL4-%YC9IQYq*tn#VZk9t9@Mr-|!_m5-yYzQbgfWM~`4-jkCy z?yO-YBYH4gzwqo75`zP1Z3tX(jXmmTU!H6~1O;yIZ`u&|zx@adP)?b}SG`cBbpA~& zu=hAPh@xKpUd$X%zHh+sWzj{HGTTZR38=ucYm%2(RxhP2(U)+Jw+w*9xu3K#vcv4D|n?6oumNTwCsH2JP^LP@)^S^XeRoOwlQnY%FN^x618y_Bi1 zo@-a}K#y^`#PCg_PT|`z`cZ+zNPaYUa|$_K_nqL!#57FK7t$M)2bcyeWtcLOcS89w zj@0QnMeX)NH$`IsGR<|QQZ5Hhs1&)KIg_5&HYOFg(+@M;eX)Se@y9+#m@7-^-`)3c9DE78Tw`vX&5K@2V_Jud9x} zKr8QB4WUk-?n#r|#yO44DkqC2N%m!=;H+#rF|eQOXT?lm+$q(BXE@X$e_MQ4HNC=g z@yr{|w&ScJN*may`mXJDp3ErkDv)g39kg-pw_$S5=G^@0)b$lh+wU1E zcF<1}U2rlRNLsUD$<9U9hdSFee*29Ct>OVyT(ST-+_~`S+Qx@q=xOKW+!I;vPkhkS z{WhkV$MyVNfxxOU%;n|?o_DQx>t4J!kNeOUytbVfc9~{X+BX&$1+QF{VTWip{O5fv zexe>dS6JBPI&FU?R{i<*clZ|L6sVYL!hys4GAjtE3;!pU@_eSNxPPQfF3LtfJ&|$c zdeYE+D+BoOYGK-$$NTvS0eQbwB>Ks6jGV65|HK$`{$N*}3(NhbalnjhRq3zSZt%Xv zIvg|?SAOXOjgm9MR}#*%;2vv?hqmhST_Ff#ccFASrs++%6Gf)n>5-7q7_$e42nrBB z#cOZDX2-}yjdRF+t{Gygg}!JKeR8qYu~G6dnD(v!AFsMOcR=?-@m$LCwmW}L!k^(m zJ{<`#XOkNGrZjFfZDMMR6J^i+v#w=#L^R->rcThHQJ?p%WdeYjz<8%Cv>@@_>qB-< z8!Sn~M0#q+(xFT=Z z5oFuIWxF0akQWU%%Y;sK>k*GJ3gx5}P3H3T1nZ|8B#)+L!7QZ61a@uzrlF``8Vb4U zgPi>!rM?iFwJZ4xaY zoj^Q1RhIrHBzxU>fR1?^kPO2#n(!6!CN>f)&UVOjXWTChZI{EaEtV3XmTFigA0e(& z_JhW-vxS}}kTbjo(=9thcIjd`==bVq+01#!)Gs}=qFFf7otk3KWEjkVutXK_n}Vt)b7x+HXw=gg+ee*O4-2_8D}tOaEGE49Uvp()=gO$_<@Ra9KPeqQeEB;n=ga&I zqcT!VQ$o&?EId3}ZEiRnKka6~t`#AO8Pe-o=(qV?@%24VEuT~Sm1~ITTtuQrPolb! zBpe`@S1vtdz3s46S=CrT@OHr9PP>NQy<|C;Cd*v^4(9foE_lZSRXD&IBTWNes5hiJ z^P43qQD9*6W{#HI87`#KhCNw9G^Vub)4PRZ?XpO6L-`oqlL$0Q#89aY){#?-<0S4< z^@Vh~sHx-KS+pM<83_zVK$x{_MTIx{YAy}Cd?S2PMS&_@D`JOhUijNj3`F+gADPAd z#9<5#dvk8lN1j}ljz5PqB~I6>%JQZ~9qxxt06k*FOQzA$cO1spGFZ#I%XKO;zI)d- zBe2&5cOx(6k4Dwpwu#A3ehXT)wG;_Xyz*3>ojl(hCDYyTlQrJHd+Spy$INgacpf~D zR%$*PL|H(^X_!=+V|DP*=s$j45J-^)W8^jsZg{FAUx&?~*BkaUnN;He2igbOmHlIXUd(XOK^97>V z;KAR5Mf~Oj|Nm%d#|_dIblU6lbcwB-s&WNNUVNA2<3%c~pIKThy}qCy=b(UX&4%$J zCva+jJ^ATkGo{|D!?kLmgDR2EGA%y6`>kmWHT5%h{g8fFZS$T*v#C~E1b33@e=XV7 zO<7;v^>uB&Qp`h3Qs4FEcd?q+!w*=?OX4d0$Vu;@eulGQ1Wz%>+by#@b-X+Yd3sgS zG|^6*f3!N2U27pu-YA=F_FBp9ntz;Bi*Rn6eU~agM_!O*_}hMc z%cm5Pt#&~=RXWz~ue#T-HmY+ucmxrq(|v)an8(W@Hh2q6+hr>r-s*}i({Icz^e{Xn zSmgS6NP2xudX*hkFXI`znMzgVDXAT0UaKWz9dg^kP!R3ZljOS)yEbyMzbVMsc*SGUl)`#iE&`QFvV zG~AzHsK$5uFt)iZIe~)IAfxfl333x7n;97m!Dwub)T|Q0w*)SS$7yqi+(O-}OE=R<)iiGRcdrWvbI`KKbqW+Xn8 zFEN7*naB9e)vohL|7mceqm$0mc*q3JM}}rlSGsD}N|NpVLFm6{r|*iH@d?QFl*3m; zr;t>KCZ>=K()1XW_v5kifxbAx5-fHT8$@>#tDuJ$vvQ=H^7)oE+ERZ?S3ll=m8*!Q zDtfs0E&UmN(ZoMHz#UpecMnW{@Ij63eRWxX0VNA?XK|) z3}vjan+DI=*7%@V>4Opk9lD}qqpd?es736g6J*}_pM7kxm>=$`Sk{txTL+=4A_Db( zc)CGB#^ttWh8wI=@a(7c-lrLZgB-zYg!NAP2W3)#X^h%iT&yIN}=&b`ItE9X#>pQ|-OKuB5xBb!V z)7HtWydRTlZkx6RTw!XBL9m>it81t2SyOEh_=t_jN4priiN#7K&e*Y$9ZF$k7Pfr8 zjt^Y^=6|qUB`TPGTO&D=Idp&VCAT#j%zU_ZcoiolN}X4u9c$JQdw$dU_L}H%zv^}7 z$58lkD4%)6HOpJo=2ZI3n^o0CXJGxC&yG>$pgwzBUJt>sowh~UBUDAdcqep2Nc_)* zq$!J4>Vr;TTiFgr+BVvlf(7zI6)$Z_(z4km9j~%LAx}_#d?|hzo5lM$tDOswtMUU& zSG@IYe@uf7n%W~B|63!{XY)OvD-}+#bd`oW^zq%M z_G~Q=b3KLMn1&X1>C)0G3&Zl-UP9C==8oe8J~A=l>oevS=z@kF7mQELF3!D>*camP zAD$b0Zj+lc1Q`AJz#(0xkGf=fG6YA)iabK34pNTOj^raQC}jIui&X|5^~lT{7LkND z^1CqTNjX(jq<= z^5Eulwi$b0j#b5UO}&eM1aW|aHA0oT3q5-a9iQYqVuOJ-Vi5iLT-HMLZt@(u$pQxU zGq6rVHU5~S9|a7|TVd$>dxkg|7=!;EFBq7?Z?5QX^Pdy0nM0IBi4T3-Ez8Vpyc`59 z3qUmnjuN0;@DwnWxzwY>!A_i0lR2M0toT#Q%+gV^ z#9y8RXWB^@kpalDx=CHh+a5C{J9h}nZo=xoLo#1T-0S3oEsptfvr+`-sIOw;1bIzG~KV zEf9Sbp0UD)HpE9Xo^lIug5Jrr> zkaQ|9E@CK8tZm5V-kfD^Yg_W~!zx5KX(6PQ!>AHnltL*cUVb%I5%Ry>dGu&Y&Z!me zuWBwxtC1z7S>Rr!f2*La>0Fw9a`#N-peL%^+njVMR|8s7o+Tc?;btkOZ+cwm`B*L9 zZsyG%pQRK713L&1F9}ybPkOd!bcBKYq^C_x4jn!uv&t`Pm?2xNr$y|0m6H3ysZ4Ko zIgZ_E>iHZ>N45e^90f%ebKHK&OWwznX6AYkgc;CevXUy3xvurGd#;DOe-v6q+BG8~ z)~Is9Ss3YU6Y+_WOxA!<3Bs2ofnlG~c^l-Rvr$k!Cw;|`i8qIX@MbGbPxz*~ewgMu z{XV3ko{D{%7n=v`fxb|*^oLyzLTbgb(M9b51USzZOy?vflp%8{!=SZYismR~9yQ9R zbaITIEFCJMV3{gss`u+XAqnB)mc6epFZSx5V|Vb*b?>x}oyks}PPxZj7+S-1BuC@l zlp8fisC1y1r$B{eeU^QUQTZVkNP#MqCzw)~ngqewR-UJ+HWajL#rf_fvZ1>{h(Mk% zcsH-F>hlB{?K+l}gUXX9uT#=BTTp;|!7Se#g4U&8oJYw`9jwQbH*uEWQ%y?jw@Yw= z^(qeK=~8K#g`n>-o5;yocT8$uxvp#jFp=0z;$XVO|oi@&%+TPAQyNzo; zWOLJ1-6RF*}25)`bt_};kfl>>xhuK^M z?-|sp+b1_Ryom@k*us)5rM=WFS<(bM?rE48$EAZzAv#R9Dt`yaN%?lrY(k2gx|z0D@p z;u)g1yZx>j;!GG*p;iVv%OOYLryVz;JI9=3$C!+PsBRdsaN#8}_3ToYWINh-xz>Kn z(EBa>8JQQ;d~3`t7S&G*akCr{BTzn^1#Sc*zz=d2y~1`k*8D5Oy=4L zN{+It(lHJj1ZKU3A_=0Jx2oxU(`p|&d9(isgjJb|{)LN@S3`16HJo~VMFHGYEHLNE zhpNp9aPncziD@Z{edcH(Pyx7gB4U;oUaFxh`#nPxBxF}XW?7{DE&tiERlJLwL_0H7 zQ7lkrGqAxI7r?DT*$f&$I*YL7EGw4%>1)T(BB?aItIWoEaR&T3C6}md=186{q|dXb zAMz8U$D&f%J}0-TA*zTC?}!#u&cw)Np~609ej&$3^r8)95n4D=OlMTUeU>QD@yB6R zWu5tm6?U`qVt zFd5ZIXn^ZmL&@t#kF5mk;-hW0?^C5BK#JTsQRJ_O#uL~QB@=E(j&YtcWQgucN|h2( zFfKzpSc-C_@66Kk{P>*0wtwc}z*gPV=kUc>j0tF!Kixph!_mJ zadWRK#lcJ5eZV?BN5p*)f*3e~da5JR7HjFTLjlOsV77Z{e8`p`7+=6F?T7^B^DMeS zyIu6hcLU@(8)WL)3?r8CHqceQva0WHpO17+cJFQPaOKz3U~U(1D}~WJ zUxhS242S(u6Vr>5-OMGh8|?-`2f=mBl0>3RcUfXT*qzdkIKAv_ig<-l-L=--Ph!7z znA$yp6pMwurIn)HkO)hfX5T!j-9!fz#b{e z3_d&FKSb>oA7m780q^m8GI zobeBH`d!ReqwB5aJl3^;(N!RHI1=JPG=FeE1}rhT?|9d&%)6{VnzNN`FZg$G>%5fb z;zE+~Q^E#y**<@%W0abCB|ayEpR6P%qik!7#LmBBrJZL ztA)zxh_^|cIBfn;!30=-p=0YJXLPy?kEtK@D*#(f7PN^Y)SiPtg~*64RB;l`Cggfdb}Qkby3 z{XccTVC%$scib#f&>pqcoUP{k{Fu!BH3~ZkI|Xk1l21JC2^!DDAlO0YD%W$~FZ(YN zaht_%C8=%Xx3N%PQa4!9XuZI3bALY{bx!OIz)*@^`hDGSaxfAZ6#L2y5!_nrgZyA3 z(fBZgJ%k{QUMmxrgknn06P@HaAlm;98>mS_bls-Yi~6X=?S8VHp&msaxa5<+OHQab z3CBE2!Afq%yz5~85d32f11Nu7hqzmu#DlY8Sg6f7f z5n`s>-!JEp+94~2uE(Y>5;uv#WaT}*@Z=;s8S_qEC(>~7^I5~NT6ZS848WNba-?fw;6yYx->P`y7nlD94C_xsq(m-4u$HhCov z^MFoVT8HChBLh)%Swf@`pS+pH3Dzja2A!Q;Ka>xu_P;ka!i}1P2-0f9-8FuWX}#q^ z1D8ydCeEl>ueSy@QJgQMcuq|(YCi@C<>5>$hPUu$BAWGOp{e7K1?1m1A$q>0$jJ;A zGIj{hepcf0SxJvtx$iiskCp`a41EzfmzNHBm&O`4qTOjuOwH#y)z{CtAU0i8FtBRG z1AYwFdwMW18u&YW{jiT|-)+&)%yyeDPeIh}_4*UWK&zmFRS!U$qVGufgP4WLC<~Ne zGVQ>+*|PD(3L^cdpRDz{J`-CiL&uPR(^$x7=zj&(XGZ(~zc@SdkKWqqT=jlOcNl)O z`c(z;rUA zNEakX@Ym*{>Ypci(xz&ZFo0S5@%h7N`{+N^lCN{^OF)K6Ts=2*Uc$Y7RCPF1fb<(q zYn9hd-=yePzA?76&&FOP_ttUCNiU@(z)4hgxmadW?)HN^!|2$~SH>!zAH&NCxU(>RU(k$1`^I=?3&DCE zrnxj(IxbW}{UDkakBwGaVvbZ-u1tHNm@)u@gf;vrl#O_!e6rhg+cKtffr?|*1&wzZ zjPKC1;QzX`>3_t@f4tlOUfi7#?<_w%l>JWD^+)P5(pr2vYbLNEogHCy8fn$m<#83` z{XhB%1E~M*-L&j?*-3UK*vTtdFw<)gBADNt*&a{{h*h$C~(`rvpa{~c%Uk8G zSqR=1v~NJ#EgQcQ_(#lEY1!ZN_CY#qp!@W1r|%eBBpNuT^|#~{{43Pa7}4S%Pb5$K zZjCj99g|Uf;MELl93m5+`74@{Fxu1O0Y$Atkr@ggkRKdu8S>_mf0g|b66`1B(~|hu z!AAG9rkOowAD=AUvuaX(VsGQk<8T)rYTLmE7+4U|!?VS9J1$r@)}9{WHZwSwn#jf7 zr>Inexc&Sc-xDLG1s;Nfp~tNMqhrheXEhtcH+hIvtR49DYCG&xZ;ozwejppbb{pWT zjo;ZA^znZly*$qvREdpG+_Z4YR2|H6n7O=pFxKq9aO!gi>(@@M8fahttG@(+Z?!;R z|G$0cH|^`qVcP(rPTVCD0AkG#lsVSrOQhcQ>+3#>wvQR!jSH(reT& z$KU)VSk;Kv$7WAt<|i*7HGEtA%z*KUWlg@J9Jaex2)NjE zFCvQK!LufqC(DYjBx{HKjyJ4`4LyexVE5B!l`oNN#W5|oA*+8u%x-_VlYa;==EUUw zvOs#m57GJnp_xS3K7fas?LR-VKg^qKU0L5SU)Wby{M8HHKWB$){h+>iQ?HC^trKet z@kMn;>PhnVB62Iw^L;yv*-PG98RYW9@W2ZhHBJ?zVsoVb9i$n;=L16P@&5(}eG$}N z69rlV1B-?D7eb845Hh+#{swyxGW`92{X85%;~N{411G0uxO#Jz|K_{uZF7FoF0sF# z{q4U49Cj4DN|sY?MQlL+nUAwXsj4=R9kr@mhf)bXF$%G~_=3my`&!ymn-!AFb#e5)48;w{ z?+&&E*D46^mvtV|)DpU;RuH}I`IQ0w6M-jsqJ8fGODkP=w*cXn-EgbCrF1k@5|NN_ z=_0c41^r~fc1aqT8)Z5p{c$%{lPUFd18fsTg9Db{qApeWCeaw#1228f<|Rw1{JM}J z2e=jMGUH>lDzR@DN+y#p?<{#OYhBT0Bp-F+4|BxlmZIQmkhczA*YI#kVK>jw^y4 zm~Pj-PFGBL$kmJqVtX>q0Vmg?5i@66vY2N%=6X(LdeXgmZ^TTC%AvXMJ$#EN?9(2; z9DdtKFRW>$F~c3^u3q)G&sP-iqM62=LO><2PB!tvJhx+~RZ<~oLVa3da!|II@y>B7 z6!IJcCi3a0#q!4rUwK-}R8N~hR9U9Hw@Y-F&@uFRLxh?S?XqnPuiY4Hc4+2WVnt31 zZO#V@(ER(VCPZrmvzTlNee((kYI?eNy@0M*F7Ur#0Ej@^5Suy}8~~;=qq&to!g#~_ zH3#FkxZ5I^{HC{+x5G}Bh7>lW3sxAjEpgMn79n}_1&?DDfYofx=BmaNSwWu2%ce7p za0C&m=-mdF7q4DGayGP6$__6&Vk%@G|usi!4$Z z2^jd%uB#H9cHNF9_j#tDB1E+9v6nw*`s6+*sZCGgude60x~>HT0$aKmDGzK-@k;`l zigLtf&2juCP4HA zxaWWV1#lbs`Ns!jDD*!I5}C5vhMm zVB1E;gT(qMp`YP^$w8-e<&U7dpiGWRlRTf&aP~y&jKGQA2V? z_<4>%iHiR}95i+3csIY(O91DgudQ%Zb!)@J-G}m5wZa#RxaeT|kVN$30SV}hs}D5~ z{#T?=1X&F+hufm)41ZodE!D8Qk$JN*Cfc{50YkK8>X)E*c`-iu&zhV*)D#e2#=Ja5 z-=RF^c_pcVXi*rE5<_3Qx@_$y8ZoeW?7;W6LFXf=oBPu*+S6II^`x28hzcC zAv<=UYT7qk|27@tnBo8Od;zoVdQ4TDuzW}wuB`7)o00_>d|%7gOdaLUA;T8)O545& zqc`8yiB(opK5$3WRh=l;%#}L&MIBR@=Z%-tEdO1f^x$r1!(RgF`ly_%6(3ogFz-|k zvDv^c{uWS_r#E{iU}L2|TJr8g%wAo!FuX3(Z_Zp8e`K2FL7!ViT(=hRxz?NdDsx+8 zWHbZXb!bD5xP3K4Nd}OQnFXK13Ps1ytl;7FIls4|=yqqEdDlb#2+-&2P~X+NA4|@nssU;4V+&#VsoQ6In6R>uB!m zu+()S@{rPpg-Bv3_wm`XZZBzkL~^OmThoihWg=Vc=py@H<9cjkNpvxXc@!jO4-Pa< zA4{bqPNEmLho##uu4*ipwV#w08*yvtR1gTc|cnBhF1Dx8lp@|{@=TeD7l^STc! z6Ha`8@nGQODN|xr-N5klU1U{8u7u#tSZ5jvB!z*GKb|tx%RA?f7ye zmqW=ClGh`5MfB>nY^J<6{_K zzLB@xckey91m9ixDmdI~VX(sq#@#YZP-lH?shJ*lKcKAU_TWE4XD*48k!1?)q#DyqL0UlrK$-{w+~nwW zU9ZK?wOlDyN~juPHt#t3Yd{??d$Hrl`i;T1In{IPuTC6(0SrD@JTWt`Al>O!@C1Ch zJ|S6%Bl+bJpW-4iX)1qCe$!QEgFzzYZ(JNm?zrm5+2yIun0p~^E)~BFBoVI68Y_y~ z=frn?xO!`oHxYQ7n|`mtY`Xgd!Q!?*yWNzA%!H(cGL7!%G;c$hsS{fi_08$0w)DjM~RoYHghH6~S0Ep%**)3vEVS&_q1s$!jf>K3Y>}Y7><;+J4ow zgHmVPiK~icUMk?}%#f^PK9dqfwmAXaK-hGWLUd6a_FNTzX`?OkUNsYu#6h>neHp~l z$ba+$AC02MJJNUQ$5Io!)o+<@*Q`N!`dRNRiV>ZPKb+KyvozONm0Lz%Uq7MK$X&_j z*K6h9yhkcnBJZdZ_eGuBcfy#f?k^>|xmHU883PWT0uWlL2Ev@P*Msp9$n~TdRwHXj z+o7hjSM_;IGeR#EhXWgm`pQ<>OjGCL63Lc@d+y>k?wjnILL>g*k)VNcUn^5c;4k5QwAk*;o^yiukWA4+zdWI-O+DxS3Y=S^rK60|jqq@!ASk!B`Yp22(2y9Cn7IvI z1cGO{;;McB9_roVO8v+*bD>Q=wz8xSVhDXqyBE5v=0L7o4V7Ah?J5&RAyAerNzooa zYV%jDl9DKQO;n`GKlEu@{8`7ge`>2na;gs3Av*<2Hbq1Kj=0dYpXRE8B+mJeF1nG2tmJ^?kuxz?W<9*#dRE>)2OFMtO zoM;7scA7l3!PXFPC=}9A`WbzhrW{9IZeQqlP#aG~#ke3gCc5OAE8@&sAzx}ma+9h{@v zkwZ+$Hm+kB_hy(C2pJMi?VSyQZD5NFTm|SMCer{Vfq}f_UF`vOKqhB3nY7F{8H=9WmDB^y7o3X!2 zu4u5fbL=py7$6>WW70#30juwN7Wj=wXta++ z2f0&@42y?Fa&oEdhW1yDz)5Rpk4MgQwIky>l>f^^RA4$^Q6Xd61K#wqV!c4Lqbm-fGlPn}9jZRXQ zb4p<~extPs<&VLCgFomkUg4z1FRK?ZAo zgV#z@nDz==#BEca>a(i5(&LDSG?FdPy_{8L$oDuTunPKK8PkVs3H`@qt?;6l>pJ9{ zAhtz)z7iz4{vut}c6u9V_yT!Zh`J3z6FhdC?P}c`<)5GE(+I7<$<=0(t4Bds%Q2#+OAW33}eruIa1e4eKj$ z$P(qULG~u>GaitLkyswal6>8!Rj$1>V?qeT7G7S2zoim)4MU;~=i`s+U?lSt*_f_f zJ-5#Rah416Tk*crrG^{p3T8@8iJ#9|a^EB)S&vJ-718J-0;AHu#Q*B$tdx|FNb+Yu=JAx4X{Un*IN2w0q+JaO_=?fEB$|kb|isN1jUH-aveQjo5 z3YAu(KlV^PyOT`PqQ8c-E5P}a^(_*78CzVt)akIEu0OUk*#AT=|28!5(j4&S>i_g} zbHGZt9z`f0;IvjP6S@sYjUAh&+B^t!OAndmq_5x%4r^VsIdCj;0( zORA8Q0U?--oeVDH2Ae5nWIG6kVD;Vwk@SeF?8!MW0hD#FCbdr2pAk1K?Oe#{A)IS% zk+&S7TAe?-2Kb1*-RJovNazqPKmXhzUroY!)1*fAS&S5UIot-906!ERmdD3-0sX~0 zHvi!IuppkJQ#<mwBMn|NSx1aivp*T$roKo_Bmhz`Rt5nXl$!3hptW428xyx|gE#}fK3Gh^x2Lo%eDdq9TA`+Sw2{euQnH9bx@^c}&3T^GIJzz(QAt{Inj^B2D zX#-w&tn2mm-v!wG7l!e_1mpbwX#2_W`+Lfz)Mp#b$@;oQV1))ynIi&lJVi1!Z zGZXd}F*f^B{a(doI|nmnR>u>G91}6UKnoHn8+;olE$w0L-c_G`HS3W%i9t*KU4`OD zaN2ykoT6~gM#B^qcTao51fC@-02WY=-x1aO5Oh))V0~yB#^#89?{GOywIGTEgu}{)pr)iJ&3&b5E2Ibu}}b5q1LIQBGep@IdaGb{<7|+(fov;q)QHezG#e%QX>vr2rgaMcc)YhCFjh6|4CS zsc^o`RkSB|dk^vy0RR4I+gxzb{yq4?=g3_8;!9oNt4V%mkI{xc;+WSG)g7A-#AX&_ zi>xbS!{_0R`CwBj_aJq-eq+CyeAk;LSreOLm!9mvZ2~i-*I}=5fMpVTt*YuslT%5* ze>EHAo#>Jf*7LBS99(DbXz~oQyx3Z!tNc#wid$EH@LfbsQz75aJnrlUy{uxefg98V z;5?2Za*o^0b#n|ZtFsZlxzkxIIW2O(*^w}(nAu=(|Mv*ke_&{x8R5U>Ts~2J78MZi z5R0a;hn;0c{DZJu##~jWO=-k`^1sw1saVnfvA_!TVDS&crt1^nrq{SuJs)X_h4G?? z@IRIAV1gp#{=-QHjPP-fyt5nx(r6`n{PV{g=yqzZuij>+S*MXQ&W1}18&m*$Ta)#{ z?%pJhy_rg7DT)p{7`6oj_(0QFC2H^e8!WJ*OuQ=eq~`-!DSlVk&nlCEfe<`VRI;nN zbt{()Q8hv-_;!XjV(e8oqt6J2D){L!;C@NtRHiE7Q;cB8$x(G??T$W3ms)L?9H%Q^ zr+bd~YP{;J%Tn1OM>~cuGwWCv5cm5~+{52INXuWMp7U0Hu@Xvm%`iKB>At;g;$f-0 z7rg+}JE8rSq;qP0D;Zr7y5t2je|d~&v*=(3rNQbY&%SeE&;x)o$yGMB6#VsVukhPHaQJ5XGpLD0w*3_ zj->|r-LFkYg`P)q;SOLqNw^jCpsHiM+FL$32c64;pouQ?T>ko8PQa1Yl6QUQLKWGI zUgr6=bMd0Y$7;X7c0{~yX{;h4fEr=3)5n-VZW{sDdHe}>}^ zL4zz|axXB})Ih!r>4*;&^@~JsAnbtoy$q|PP-xH#;lqna)yqX1RsKE^pKe=eUc9x_ z|3H@-A+vvKfVIwj2&9-+mYwTeBfq+(DcNe22MOrWd;(CkMH#U4ip@=&TpHl0;*0zd>=XfF9Ak_-Pg`1 zl7@Zl3;84j+@b`8#nZg}XVlaeNA;~U3ZRxvf3j-p8Wt~MXt;}gxVuC@9mtk~K81dk z=!(Pvta+#){Wk>=^|!GYQY3xJc#2&IX*$tQci$q4xzq_GuwLYcSD_kGkcJx#Vm*VV zrxP0XNOrJ2GzAdoX#*jwPPr9<)4Gr1&26p5jFo2dg~;OKNdK~?Gmcp5Px)g`IM z92g&mW-4Q-e)_e}aQ-DQi@sS(OhDm;rVVR_N`GA2xJqI~m1qOP`J0!bHmPJALvQ1k zoNV{NK5sBh>oiXRich(pU+C|tmIa^Hqgj)$F@Y>&n|-#B(TUe|%D+V^%#wVOR&C+*WJG;9AgCS9mgMbC&z!`~m-f?7&Rq|^_dn3zJIB~t{))QEB@kGk4fZi< z^ZexFEIIb+P2_!@?hOOC?z9V&uc>lTxX&Y+0?60z8H5uCFU|6cqqOY)ZhYC-+SWx! z$fMl|F?VXZ_xG&aJ%iGvtHJwOYP;{cahrpl&}d=s3OucC%d{SLF{txYIav;+%T$AR z+R}R=uVZsPK!~7oT7$+IZUo;mGu{ZoHGcJAVK?_wF11)>w>j8a8lrltY8;U)vw*Ct zH#ySWna2l$1-f@kHu3}77Sg|0Y5>Tu)EONFbr{eJd`A5P5)kw}a=lChENqOIR2fmiyN9(Hm?6YVJD~@w?P3{6@mopha2Ia{{X%5y#tsP zC?7#r0MrbWOwb`YXs+!!N`npUsc|C3MWd7aQ?sGWvqFTJ>y(?@4;o>NncOorlKSk>PTW7Jom*bIJHeQ?8MntB zvqxvWx)1WTdLGHQjW+~bvo4sR!;pDN5RyN=ML;1HaDAPDnxevZRFUQb!w_1A0)ot$IB|U zL00QTIe!Y7Bx32r9Mtlu1>jd(M;!+J?GZ^_M}ljnaf1Mf0cH>`dDp${~PSoydx%+Tl+f{_UI_%NX3e^byn$lp`KY(*Z z0))d9W&E5#9j~9zm!-Tp4X9lElm!J^NBcA8im;)5I|F+_@?+tIHW9jyR(|ASd`Op0 zJQK^~DJ5-bBuf%DZgg8@TeMh|T#A&TQ|D?{;;4>;vq@A*9%U7kqibXW*R~^h1f~|PO;6c6G1Eg|A?KfQW;7IEeRKi=mU}@DSYi-^-%n%Us?qT6?Vy$Ujp-$_Z)c|Ih{y>Jc+J2 z4mXhlAS;56geNY+=+ggWKe!kWhpvC!#&eBPy}8q1Av?W>+yyu@TH$g z^tt>eocUZO0W}Yt7eI-C{`c{B>ukMVrB=_KlM?s;a{^n zYaBoU4zUq$D)1gcxI*1XgKN_gx+FW0b6GSdc27xRes<^yki#)R@_bWpY ziJSegHZ`Y6l#ekE8<^FEuCIDpWnej>=7A(gqntD;nI+86ycO0wP%Vg721G@O`34*AS3kUoINn1SKpz@d)2QT??-&B};T6Rj zfQbC!PftP=z#soic%{#}ajYJMU z*ZiF^W=I5Gh-|UG*%8%bu&S1)#8Fl`8I`Q46lQw7Ora~LBny2&LwNP8>CoiJq}q>6 zl+ho_46E{Ur<83fUoDcT%mx!5>_t*3esGzLWpwZy#h0<}TucuQCgfm^rWCfo_uIbO zZ(Sp5L5BuKy5#q$*=mXz2>#PV(u`l&%|2h7Il3R0?w*ZOg;!5}+rs1G^C4kQi;s@)lI*`1&sado9lURj5u0NVot!TazjX^9C{^9BTk`&~1BP}HtTSRJbpbQ& z2CXghbO=#VcqRHiwJR|03538$XRJt-SYRuhtiOw{k!h7J9e**kj#<6xJeb;fGh+2* zH|x{Dx708V8)tL7>fBfBzkv}n5f1Rve|d3__kEFjgLdmtgx@QKk@u0w@4 zV6Y#vI=FM${mM5KkMYi2(LUIqpdv*@z^Pf@t0AQy-hW07hmOk<{?LFi9f0oJ-X#L* zO+%43`3cz!c?CL<;6wxUFFkB&T`O(O7`fT|Z)!daZhWmx^nCg(P0hQN)6QyC1(zyK z)<7p|Y(b_n)g2w&1 z{hi5r>SuVWWejt+nn#&)sN7An?wGJ{iQlo(9iWdhq7*8b?B$oQ#L``Xt31^mP#T-u zYK()%ceZ@1x~V_F;g*Wusn&QU<@vk!oRf9w10+eU`}1dq47P|@Yh4{y2BlxArxIu- zXQ>#?Kirg%Wsg+WW;h-xQ^&mxjj8y@bl7HrYy?Gw(evsV9@rq0fvzz~U&BX^{-ywa z^;L(*p}QeZ0ySon+k%aZSLW&Q^zd^!Cxi1nc$#I5wpk}y8*OWjrvQMid83~K?D1Rf z@>uq_XMMnJxb0_yf=}<(MkWnQOg#OYF;_IQs&yR*V)J|RceGRqdLC!)PFrlR2CU@_ zd1t3B3|UD{l37bWd@S;oQm%RV3CCs9NK6sf_V00KSiiQwJTBNmf7|jF!*&nC4KbL* zj1Ay>@|eHLJo$BJa!bQXUy(<$(D>wet0%_ly<%i~(KG$GI7~IJ76T+t8KJHb`z%Y? z;hu8eivo%~YK5kcqik$uD_!!E2ze{1^Bt)&m6tz=bGyn8K@XqDr52`X{8;UAqB2Vo zNzQ4#Mape8-KLbJvCExJ$TTp_n_4G5kQGTgnX!Xm8I2P#n56NyVz%{AGjcDvzOE`?-*`2fR z>>biM*xr7moUX6}ZZ-$R4&seW;Y0bVnWLFKHbhDPV@fm!Hgw}VHwRe2| z-&Qd`PTumNUligkG#YSV`f0_geWcpM-@r3p+R@JJPYmxPwj((g=Sgw3h`(a=Ok02f zS-z|ZzY&COH=oD*z=y0^85&W4*{|gnO!RYRZ@2~={(SOJv7)>oS4V)~a*e=-u$rTB zrXu2gNHKD<&iQhg+i0If%ikN7*Y91d*wL`Blg7!*Sd225cB()J@{NJIVqy9+q+60x zVjq;T5O`Q)_Hp!_LEXCY3)R$^71_|Q+?Q`g6yPerc;|{lzP0|*vVo_B>8#_Mf7hh zPQPI)FIQB`x^|X0H<3^2IU$))yij)Y!Iq|K-Wqw9A#!^aqKnF%IfE>v=gM&C@YOeW1~TxYJPk+~1MU!gDFI$Y*#AZ)?Qn#bgcj{Bt?T(|=-tQ`WhDA9{J8T> zNDY#7&xLsO75Y%BWppZIra2P2iwO}7H;|@+%w}wJ(-sWrK4_kC?v)&`v{?U6} zGR0?+1$M0FlGTznGZiV+_;Ui@lC3T;e1IrzvDS6_EYpvWEPUza7SZ661ByynA2DGb zqCLzWL#pH~^A5*GvL>vQh?fQuBu@z@Kbw&4BOkbUF~u9hy0kPTGffES3oSo_{!AQZ zEXm?lS7Vi+rS8-pl5mTM?$!lbzh$c%V?G?MX}&J>;rnr@@-)oTkE>Z6yYHB;B;{J; z1`g>RuWuYXAGt+louky5K5U|>sK(Oe6C!C8MM!EpO9}zE`3#@AgvOvBe#|MN9sJHM zMvxr#S(#yHrE^9vae2cQlOw72B`~l0*zbxiGT}Cd_0rvrtLs>na%z|vapdS%WuDR5 zJ*2Z$Sd;EX#=4h+e7N|mP1_+dMc~GMf(DjyO)BIcO_Y47o-I8{eQCB%KstL?i&sS| z$`UQ&pl1)qBTx7~sr*7=#0Yymf>{4fB8$>DxigCIYwvqlXg}`kGtQn24Ug!*3>QK08t7;@NJn>BvrmPl&1L@ zthT_*4kC?t)Xr4Cw{GU9ScD=%K&qg5@jjLYWimTsm6`m=RF~JcQ+^nDCazQ^MN&m4 zcs`2gLy6(%s%IXF@;`9FaEt>`Z|QC+-O0-?I;zEyAEj{DW-Pc@Tbw z%umRhcmL+yOJx796*VRwL{(spB<1n8!DmxYvSDItqrv0p=DaOL8iGz0q}IqDk@w6A zqJR)8$+s!Kq(SAtQHok-1?{T2YYP=;TaDUhs2MZ8?xX8qlvyQ?YCy%+*n{gOb}!P^ z(nr&yyS$u1=K8H5-Y%><*v4kK>Q)FTnQjw2%Fm>c6k{rY9qR5c6O^*9X&MOyd#b$A zYO5>8x>@*OvdHi1j5CeNwh5wHhN#O(ON1TwaJ20uNG{Dt-%o6A7k}?eVazrUeou69 zxTV*oQ8hdoXrw=%wi`W3P)QLKg!@(UqrQ#ZMW6LGpU$|JeKS=cJO0@kD?yvi3z3NvjQB}m zegczbr2Q6Uc-6^82k8ZBkC|K>bO|3|h_>kCCdv=+&8b;<|a+O4+)4g~;@H6?yKmE$ob95IvjPl%O!sJ2Gt*!eKA&tcEQ3LG>{@KsCZ@SH8 z37yuYxw~L^p?Av;pR4_ibNPmXi)kwx6~(s|bj2cWcydD^^~QFBvo&#c>_PT?A&T2- z5fob>^&P9+ecZ*x6Uxg~Bz`*u~@RZpr%a$@RaQ zQ>_ zFW3!@8CX7nLkL7CMu3-(;*`y6$6&}UI-R+ju{yr%>-5Xg^>8PXqPrxbiX+pDc8|K? zOCy!_mT!HLTs4U_gg-gxvi`1sk>$MDEHIT|G;5)&d#)FsjE9|e<$O~fMbh};pq+m0 z^}7or9K+J3u~!3)8t!oU;60w%s~mlJ%@rUKhSwH+CKx5>DDwItzQq(Ojrr%06xjr4 zCuP>EHwZaCEuIF)aPicd>o0+Zx$kQ1H+XO+5*`lx&Tq{{u|e$_$1WPgrXX6WBZ zrH~}*vIPO}_^!XCQgqv2QitO3x5buc z0aK>$pW;&;?b$sEl;UpLDnS}|wL=Zp#v4Ev*igLTD6b#8(FOSBX54(82zMe`O zNeQ%z>wM_2dDw$;YQ7E9E<`Ux)ZN1Gw#4#jC9?9_itNo6$szE5Zsyc4+!HB*f0F}a3ofK? ztA6o`P3NBUcN6t*$L2Z>(hM`_6NtX-R7kG6rrCyRP;N!J;HC|8rg?IKMwGi>g%DJn zXj6`S?)#;_;nXsU9$XKZSnJX6-0joPLR@Fl_Up?=&ygMVi&6sjF`4Ym-%?f&!nFLB zoZAjA3XE*1%osW~YKNBz{ns7nj>*LSg0Nsv?6VPruT2;EwmjeRq-N{Ymq#bv_6~wn zB*!bFE%9nElPtxHO^hh2`RjXbtayDVn;r$FtnSsQuR&%6+g$gO9KSbJ(h?9>s-sDBL<&vQBW1KB# z-LXzW!?>o~EM}sL5T<%NdjNq?OFt1S!A;~KDd?*abF!s)Gi{V#s~2b_3o>&)Q^auk zH)u=UE@+#t5{)j6D;hrTk5UYh_RQ&) zR%-v~Pv3emNjVoM+q$Ty4#9E`CK^v;tB5Hjd-GnOxZ%8^8?37RGv>Wpa^Do6S4YO6 zQsK2%7dEA%4*Kcck3nAVk^v<}Dd{+W;K&^@*M~OzELTLJJtTd*Gu^&a9hy4(jYms* z0yBnlw{fa1u-Y;`3$?GkhJT{Vhs|6P4V?iy`gH@Ju#~;)ro}qD%=-|^Vyb>p$>X?b zbqCBi?z)t?w7f7*^{%Pl9>d>rv;#cLNK%;ZZ?6WhQdxdN2|ILeZ(LzpVS0BSTf`aV zkc0N0F$GY8|Hw{H&MCxC*iv3HStZ5>lFAWxU$znDH1zYLo!Y$LCThmP3Do_^1FQS0 zM2R4Y(f_rm)NE?#M8a=)-8l(K<%>^kdEG?uLk&FG`&RxLv@^x(=CdW7=-z@&i=_|zi zH>aik2jOs$+VEAI5J4bU7bxJmBMHxpa9grJ4Sj{)h5-*kqNo1$po%zSjRsNc4v`v2 z`2`>nuPXmz5w{6IH|AOT-b zSkP+$z;picHSnnsBLH0lwi}o~^g2KRP^TCm-~Xp((;0fz6R)gmo7r4lw_PB+wRJm} zm-hznbImh(H{Bc{Tl`au>E^S!li8v^%-sr(ZvNTapJFP%e)-EkvL9&+ffl79&>KBv z_PqJczs)QvNLiuY(w<__X#9Gz9~x?A$ia7Rw^d^FB}E8khWhE`bRPPI|` zSr=1igW2Y~VaUBK9MXQcCu~^vngNM%%41uNPK}MWip@zA#vroEVt<))m1Z>QV`_-4 zw?=Xv_tLj_g!f(-e9az?PBC2ildb`FHeh(VK2{>P0e=%IPJ$cr>9l!1jgkUO4gZdL z&8JWpPnsJ)zV=A!o2&O4kU*&AXK7N|!XpcV=(j zXN(wVCzT_lB^Pj0=Qu=#mc)mbBglQS?K>Odaeimzcq%g(Hp3KhU}>#eyiWe1ZcEGk z;n5L&snfs4?EGjg34Y$`vrOz=iEb^U3A(pzQ*{V&Qq9}YZqeu;nhVrO$LT<)q8shj z3vnwI@ZDzHlm}t@*pmtKJhKbyx5*io)1mV)*=8SzY6?&7`EXVwHM*>Yi4pE6*RjlA z`rimCD`9je$8rbkdH|3(i z^{8PlnG>|)r{-oe`stS5H@Vn)iw*SZqYR4suk22RZQ-%dhBbIfyz|+oAe|~w6EF5_ zj0$fd!9g!Nd~|wPttGXP#ovJyo3?N234{yw)FBc-Dz4?Q-|h2IVQdNepAq-J@IVO* zSfg;#)zhn6G`&Z?@Y{(iv0KAI^wl?Oo20JNSEujhh?FY4>MH>-_@CHEsIpQWZ>kyn zArsG}hnU{CGwPq&EvQ@E0f;>^Z(IC}1KIl?jOuFX=4WLe=FW3JcR>AcUnghpF1EhC z`QEgUU3?Irkv6Kt^&}?r?p^j~And+7D$ago3BcdjTNAxC{^8_dR#7XL55% zmh{yPmhy7o*io{L1B^HGPm;U5bM`zrIc`0GB}CWLn2r z@O7bvaX(k-UVf2)TaSJ1WE{^%UpSbK=MnM=P0wg)E#84E1B%2v>cD z6`H?(=NEZMy{*=?6K=lUNvF-srE~P5$H43&Ko%2cxp76vcOH{Ni)#a#c+TH;0)~`* z7~B%2tF3-D_o^2}b$6j}^09N5e0*H=SZm{_F3|yo)WQv$p{?WAX!JU;VXdSFL*)$+ zpO`3jFlypmDk+b!{r#w2etBbRjSCX6ng&7!fYX;g?Ft_bL@NS`?e_?Q22GHGr8fD* z{K^<--2u#hAm&M+D*vD$O{=>rV~tM?%GZDj93J@GB~JhnJM`QI=dUhMzUGnPlxOgX zAc4yN<^bo9zZ!-G?aKVOr+!PPCvmuzL_`}wxX16VIfz^H_GOhD@f>M!d854OT(ySB z|4L?XZW`oI5QZ0{*V3@7G*MIqMm2aao=V zJm<8fMbYNTXG+*@)nS^Qjd)L_(vwspb?& z^_bTRN>$>Y>VUOkYm&(Prp!GsB|SQOmt*Ql_1w74))UH6pLVJjz&021ydLn=#r2MM z+&7ekAy|xkWZ=Car)9KaQU@In;o;3MFOtYUo<|>JrlusSXvH+o0`4h4o(ie~FDTO* zxkkqYjYhizI-3DvElAzsVwe4^8e#L+*ylP5rzYn>C~YY;Ibz^t>*N`mH%esce%emxvj_oahO*dPj`tnEqn(3%RaT}NnM~t@wz5V5sVA| zB%7kjm^R+>F;HuE8VwCf{o6dDLAK2QnV$2u&eO4x4}=ReHE?hQn7D-n*p>g8hQztK o=DI%s;Z8oO!Snp@rNpvNc~bI4vY*N8XB|*dR92)&NcYqK0TS_?-2eap literal 0 HcmV?d00001 diff --git a/src/docs/src/documentation/skinconf.xml b/src/docs/src/documentation/skinconf.xml new file mode 100644 index 0000000..6a11801 --- /dev/null +++ b/src/docs/src/documentation/skinconf.xml @@ -0,0 +1,354 @@ + + + + + + + + + true + + false + + true + + true + + + true + + + true + + + true + + + false + + + true + + + Hadoop + Scalable Computing Platform + http://hadoop.apache.org/core/ + images/core-logo.gif + + + Hadoop + Apache Hadoop + http://hadoop.apache.org/ + images/hadoop-logo.jpg + + + + + + + images/favicon.ico + + + 2010 + The Apache Software Foundation. This release is based on the Facebook's version of Hadoop + + + + + + + + + + + + + + + + + + + p.quote { + margin-left: 2em; + padding: .5em; + background-color: #f0f0f0; + font-family: monospace; + } + + + #content h1 { + margin-bottom: .5em; + font-size: 200%; color: black; + font-family: arial; + } + h2, .h3 { font-size: 195%; color: black; font-family: arial; } + h3, .h4 { font-size: 140%; color: black; font-family: arial; margin-bottom: 0.5em; } + h4, .h5 { font-size: 125%; color: black; font-style: italic; font-weight: bold; font-family: arial; } + h5, h6 { font-size: 110%; color: #363636; font-weight: bold; } + + + pre.code { + margin-left: 0em; + padding: 0.5em; + background-color: rgb(241,239,231); + font-family: monospace; + } + + + + + + + + + + + + + + + + + + + + + + + + + + 1in + 1in + 1.25in + 1in + + + + false + + + false + + + + + + Built with Apache Forrest + http://forrest.apache.org/ + images/built-with-forrest-button.png + 88 + 31 + + + + + + diff --git a/src/docs/status.xml b/src/docs/status.xml new file mode 100644 index 0000000..3ac3fda --- /dev/null +++ b/src/docs/status.xml @@ -0,0 +1,74 @@ + + + + + + + + + + + + + + + + Initial Import + + + + + + + + + Customize this template project with your project's details. This + TODO list is generated from 'status.xml'. + + + Add lots of content. XML content goes in + src/documentation/content/xdocs, or wherever the + ${project.xdocs-dir} property (set in + forrest.properties) points. + + + Mail forrest-dev@xml.apache.org + with feedback. + + + + + + diff --git a/src/examples/org/apache/hadoop/examples/AggregateWordCount.java b/src/examples/org/apache/hadoop/examples/AggregateWordCount.java new file mode 100644 index 0000000..9e5794f --- /dev/null +++ b/src/examples/org/apache/hadoop/examples/AggregateWordCount.java @@ -0,0 +1,77 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.examples; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.StringTokenizer; +import java.util.Map.Entry; + +import org.apache.hadoop.io.Text; +import org.apache.hadoop.mapred.JobClient; +import org.apache.hadoop.mapred.JobConf; +import org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorBaseDescriptor; +import org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorJob; + +/** + * This is an example Aggregated Hadoop Map/Reduce application. It reads the + * text input files, breaks each line into words and counts them. The output is + * a locally sorted list of words and the count of how often they occurred. + * + * To run: bin/hadoop jar hadoop-*-examples.jar aggregatewordcount in-dir + * out-dir numOfReducers textinputformat + * + */ +public class AggregateWordCount { + + public static class WordCountPlugInClass extends + ValueAggregatorBaseDescriptor { + @Override + public ArrayList> generateKeyValPairs(Object key, + Object val) { + String countType = LONG_VALUE_SUM; + ArrayList> retv = new ArrayList>(); + String line = val.toString(); + StringTokenizer itr = new StringTokenizer(line); + while (itr.hasMoreTokens()) { + Entry e = generateEntry(countType, itr.nextToken(), ONE); + if (e != null) { + retv.add(e); + } + } + return retv; + } + } + + /** + * The main driver for word count map/reduce program. Invoke this method to + * submit the map/reduce job. + * + * @throws IOException + * When there is communication problems with the job tracker. + */ + @SuppressWarnings("unchecked") + public static void main(String[] args) throws IOException { + JobConf conf = ValueAggregatorJob.createValueAggregatorJob(args + , new Class[] {WordCountPlugInClass.class}); + + JobClient.runJob(conf); + } + +} diff --git a/src/examples/org/apache/hadoop/examples/AggregateWordHistogram.java b/src/examples/org/apache/hadoop/examples/AggregateWordHistogram.java new file mode 100644 index 0000000..46edc20 --- /dev/null +++ b/src/examples/org/apache/hadoop/examples/AggregateWordHistogram.java @@ -0,0 +1,81 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.examples; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Map.Entry; + +import org.apache.hadoop.io.Text; +import org.apache.hadoop.mapred.JobClient; +import org.apache.hadoop.mapred.JobConf; +import org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorBaseDescriptor; +import org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorJob; + +/** + * This is an example Aggregated Hadoop Map/Reduce application. Computes the + * histogram of the words in the input texts. + * + * To run: bin/hadoop jar hadoop-*-examples.jar aggregatewordhist in-dir + * out-dir numOfReducers textinputformat + * + */ +public class AggregateWordHistogram { + + public static class AggregateWordHistogramPlugin + extends ValueAggregatorBaseDescriptor { + + /** + * Parse the given value, generate an aggregation-id/value pair per word. + * The ID is of type VALUE_HISTOGRAM, with WORD_HISTOGRAM as the real id. + * The value is WORD\t1. + * + * @return a list of the generated pairs. + */ + @Override + public ArrayList> generateKeyValPairs(Object key, Object val) { + String words[] = val.toString().split(" |\t"); + ArrayList> retv = new ArrayList>(); + for (int i = 0; i < words.length; i++) { + Text valCount = new Text(words[i] + "\t" + "1"); + Entry en = generateEntry(VALUE_HISTOGRAM, "WORD_HISTOGRAM", + valCount); + retv.add(en); + } + return retv; + } + + } + + /** + * The main driver for word count map/reduce program. Invoke this method to + * submit the map/reduce job. + * + * @throws IOException + * When there is communication problems with the job tracker. + */ + @SuppressWarnings("unchecked") + public static void main(String[] args) throws IOException { + JobConf conf = ValueAggregatorJob.createValueAggregatorJob(args + , new Class[] {AggregateWordHistogramPlugin.class}); + + JobClient.runJob(conf); + } + +} diff --git a/src/examples/org/apache/hadoop/examples/DBCountPageView.java b/src/examples/org/apache/hadoop/examples/DBCountPageView.java new file mode 100644 index 0000000..fc56dd8 --- /dev/null +++ b/src/examples/org/apache/hadoop/examples/DBCountPageView.java @@ -0,0 +1,428 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.examples; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; +import java.util.Iterator; +import java.util.Random; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configured; +import org.apache.hadoop.io.LongWritable; +import org.apache.hadoop.io.NullWritable; +import org.apache.hadoop.io.Text; +import org.apache.hadoop.io.Writable; +import org.apache.hadoop.mapred.JobClient; +import org.apache.hadoop.mapred.JobConf; +import org.apache.hadoop.mapred.MapReduceBase; +import org.apache.hadoop.mapred.Mapper; +import org.apache.hadoop.mapred.OutputCollector; +import org.apache.hadoop.mapred.Reducer; +import org.apache.hadoop.mapred.Reporter; +import org.apache.hadoop.mapred.lib.LongSumReducer; +import org.apache.hadoop.mapred.lib.db.DBConfiguration; +import org.apache.hadoop.mapred.lib.db.DBInputFormat; +import org.apache.hadoop.mapred.lib.db.DBOutputFormat; +import org.apache.hadoop.mapred.lib.db.DBWritable; +import org.apache.hadoop.util.StringUtils; +import org.apache.hadoop.util.Tool; +import org.apache.hadoop.util.ToolRunner; +import org.hsqldb.Server; + +/** + * This is a demonstrative program, which uses DBInputFormat for reading + * the input data from a database, and DBOutputFormat for writing the data + * to the database. + *
+ * The Program first creates the necessary tables, populates the input table + * and runs the mapred job. + *
+ * The input data is a mini access log, with a <url,referrer,time> + * schema.The output is the number of pageviews of each url in the log, + * having the schema <url,pageview>. + * + * When called with no arguments the program starts a local HSQLDB server, and + * uses this database for storing/retrieving the data. + */ +public class DBCountPageView extends Configured implements Tool { + + private static final Log LOG = LogFactory.getLog(DBCountPageView.class); + + private Connection connection; + private boolean initialized = false; + + private static final String[] AccessFieldNames = {"url", "referrer", "time"}; + private static final String[] PageviewFieldNames = {"url", "pageview"}; + + private static final String DB_URL = "jdbc:hsqldb:hsql://localhost/URLAccess"; + private static final String DRIVER_CLASS = "org.hsqldb.jdbcDriver"; + + private Server server; + + private void startHsqldbServer() { + server = new Server(); + server.setDatabasePath(0, + System.getProperty("test.build.data",".") + "/URLAccess"); + server.setDatabaseName(0, "URLAccess"); + server.start(); + } + + private void createConnection(String driverClassName + , String url) throws Exception { + + Class.forName(driverClassName); + connection = DriverManager.getConnection(url); + connection.setAutoCommit(false); + } + + private void shutdown() { + try { + connection.commit(); + connection.close(); + }catch (Throwable ex) { + LOG.warn("Exception occurred while closing connection :" + + StringUtils.stringifyException(ex)); + } finally { + try { + if(server != null) { + server.shutdown(); + } + }catch (Throwable ex) { + LOG.warn("Exception occurred while shutting down HSQLDB :" + + StringUtils.stringifyException(ex)); + } + } + } + + private void initialize(String driverClassName, String url) + throws Exception { + if(!this.initialized) { + if(driverClassName.equals(DRIVER_CLASS)) { + startHsqldbServer(); + } + createConnection(driverClassName, url); + dropTables(); + createTables(); + populateAccess(); + this.initialized = true; + } + } + + private void dropTables() { + String dropAccess = "DROP TABLE Access"; + String dropPageview = "DROP TABLE Pageview"; + + try { + Statement st = connection.createStatement(); + st.executeUpdate(dropAccess); + st.executeUpdate(dropPageview); + connection.commit(); + st.close(); + }catch (SQLException ex) { + //ignore + } + } + + private void createTables() throws SQLException { + + String createAccess = + "CREATE TABLE " + + "Access(url VARCHAR(100) NOT NULL," + + " referrer VARCHAR(100)," + + " time BIGINT NOT NULL, " + + " PRIMARY KEY (url, time))"; + + String createPageview = + "CREATE TABLE " + + "Pageview(url VARCHAR(100) NOT NULL," + + " pageview BIGINT NOT NULL, " + + " PRIMARY KEY (url))"; + + Statement st = connection.createStatement(); + try { + st.executeUpdate(createAccess); + st.executeUpdate(createPageview); + connection.commit(); + } finally { + st.close(); + } + } + + /** + * Populates the Access table with generated records. + */ + private void populateAccess() throws SQLException { + + PreparedStatement statement = null ; + try { + statement = connection.prepareStatement( + "INSERT INTO Access(url, referrer, time)" + + " VALUES (?, ?, ?)"); + + Random random = new Random(); + + int time = random.nextInt(50) + 50; + + final int PROBABILITY_PRECISION = 100; // 1 / 100 + final int NEW_PAGE_PROBABILITY = 15; // 15 / 100 + + + //Pages in the site : + String[] pages = {"/a", "/b", "/c", "/d", "/e", "/f", "/g", "/h", "/i", "/j"}; + //linkMatrix[i] is the array of pages(indexes) that page_i links to. + int[][] linkMatrix = {{1,5,7}, {0,7,4,6,}, {0,1,7,8}, {0,2,4,6,7,9}, {0,1}, + {0,3,5,9}, {0}, {0,1,3}, {0,2,6}, {0,2,6}}; + + //a mini model of user browsing a la pagerank + int currentPage = random.nextInt(pages.length); + String referrer = null; + + for(int i=0; i { + + LongWritable ONE = new LongWritable(1L); + @Override + public void map(LongWritable key, AccessRecord value, + OutputCollector output, Reporter reporter) + throws IOException { + + Text oKey = new Text(value.url); + output.collect(oKey, ONE); + } + } + + /** + * Reducer sums up the pageviews and emits a PageviewRecord, + * which will correspond to one tuple in the db. + */ + static class PageviewReducer extends MapReduceBase + implements Reducer { + + NullWritable n = NullWritable.get(); + @Override + public void reduce(Text key, Iterator values, + OutputCollector output, Reporter reporter) + throws IOException { + + long sum = 0L; + while(values.hasNext()) { + sum += values.next().get(); + } + output.collect(new PageviewRecord(key.toString(), sum), n); + } + } + + @Override + //Usage DBCountPageView [driverClass dburl] + public int run(String[] args) throws Exception { + + String driverClassName = DRIVER_CLASS; + String url = DB_URL; + + if(args.length > 1) { + driverClassName = args[0]; + url = args[1]; + } + + initialize(driverClassName, url); + + JobConf job = new JobConf(getConf(), DBCountPageView.class); + + job.setJobName("Count Pageviews of URLs"); + + job.setMapperClass(PageviewMapper.class); + job.setCombinerClass(LongSumReducer.class); + job.setReducerClass(PageviewReducer.class); + + DBConfiguration.configureDB(job, driverClassName, url); + + DBInputFormat.setInput(job, AccessRecord.class, "Access" + , null, "url", AccessFieldNames); + + DBOutputFormat.setOutput(job, "Pageview", PageviewFieldNames); + + job.setMapOutputKeyClass(Text.class); + job.setMapOutputValueClass(LongWritable.class); + + job.setOutputKeyClass(PageviewRecord.class); + job.setOutputValueClass(NullWritable.class); + + try { + JobClient.runJob(job); + + boolean correct = verify(); + if(!correct) { + throw new RuntimeException("Evaluation was not correct!"); + } + } finally { + shutdown(); + } + return 0; + } + + public static void main(String[] args) throws Exception { + int ret = ToolRunner.run(new DBCountPageView(), args); + System.exit(ret); + } + +} diff --git a/src/examples/org/apache/hadoop/examples/ExampleDriver.java b/src/examples/org/apache/hadoop/examples/ExampleDriver.java new file mode 100644 index 0000000..694becb --- /dev/null +++ b/src/examples/org/apache/hadoop/examples/ExampleDriver.java @@ -0,0 +1,76 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.examples; + +import org.apache.hadoop.examples.dancing.DistributedPentomino; +import org.apache.hadoop.examples.dancing.Sudoku; +import org.apache.hadoop.examples.terasort.TeraGen; +import org.apache.hadoop.examples.terasort.TeraSort; +import org.apache.hadoop.examples.terasort.TeraValidate; +import org.apache.hadoop.util.ProgramDriver; + +/** + * A description of an example program based on its class and a + * human-readable description. + */ +public class ExampleDriver { + + public static void main(String argv[]){ + int exitCode = -1; + ProgramDriver pgd = new ProgramDriver(); + try { + pgd.addClass("wordcount", WordCount.class, + "A map/reduce program that counts the words in the input files."); + pgd.addClass("aggregatewordcount", AggregateWordCount.class, + "An Aggregate based map/reduce program that counts the words in the input files."); + pgd.addClass("aggregatewordhist", AggregateWordHistogram.class, + "An Aggregate based map/reduce program that computes the histogram of the words in the input files."); + pgd.addClass("grep", Grep.class, + "A map/reduce program that counts the matches of a regex in the input."); + pgd.addClass("randomwriter", RandomWriter.class, + "A map/reduce program that writes 10GB of random data per node."); + pgd.addClass("randomtextwriter", RandomTextWriter.class, + "A map/reduce program that writes 10GB of random textual data per node."); + pgd.addClass("sort", Sort.class, "A map/reduce program that sorts the data written by the random writer."); + pgd.addClass("pi", PiEstimator.class, "A map/reduce program that estimates Pi using monte-carlo method."); + pgd.addClass("pentomino", DistributedPentomino.class, + "A map/reduce tile laying program to find solutions to pentomino problems."); + pgd.addClass("secondarysort", SecondarySort.class, + "An example defining a secondary sort to the reduce."); + pgd.addClass("sudoku", Sudoku.class, "A sudoku solver."); + pgd.addClass("sleep", SleepJob.class, "A job that sleeps at each map and reduce task."); + pgd.addClass("join", Join.class, "A job that effects a join over sorted, equally partitioned datasets"); + pgd.addClass("multifilewc", MultiFileWordCount.class, "A job that counts words from several files."); + pgd.addClass("dbcount", DBCountPageView.class, "An example job that count the pageview counts from a database."); + pgd.addClass("teragen", TeraGen.class, "Generate data for the terasort"); + pgd.addClass("terasort", TeraSort.class, "Run the terasort"); + pgd.addClass("teravalidate", TeraValidate.class, "Checking results of terasort"); + pgd.driver(argv); + + // Success + exitCode = 0; + } + catch(Throwable e){ + e.printStackTrace(); + } + + System.exit(exitCode); + } +} + diff --git a/src/examples/org/apache/hadoop/examples/Grep.java b/src/examples/org/apache/hadoop/examples/Grep.java new file mode 100644 index 0000000..8f3a75b --- /dev/null +++ b/src/examples/org/apache/hadoop/examples/Grep.java @@ -0,0 +1,97 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.examples; + +import java.util.Random; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.conf.Configured; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.io.LongWritable; +import org.apache.hadoop.io.Text; +import org.apache.hadoop.mapred.*; +import org.apache.hadoop.mapred.lib.*; +import org.apache.hadoop.util.Tool; +import org.apache.hadoop.util.ToolRunner; + +/* Extracts matching regexs from input files and counts them. */ +public class Grep extends Configured implements Tool { + private Grep() {} // singleton + + public int run(String[] args) throws Exception { + if (args.length < 3) { + System.out.println("Grep []"); + ToolRunner.printGenericCommandUsage(System.out); + return -1; + } + + Path tempDir = + new Path("grep-temp-"+ + Integer.toString(new Random().nextInt(Integer.MAX_VALUE))); + + JobConf grepJob = new JobConf(getConf(), Grep.class); + + try { + + grepJob.setJobName("grep-search"); + + FileInputFormat.setInputPaths(grepJob, args[0]); + + grepJob.setMapperClass(RegexMapper.class); + grepJob.set("mapred.mapper.regex", args[2]); + if (args.length == 4) + grepJob.set("mapred.mapper.regex.group", args[3]); + + grepJob.setCombinerClass(LongSumReducer.class); + grepJob.setReducerClass(LongSumReducer.class); + + FileOutputFormat.setOutputPath(grepJob, tempDir); + grepJob.setOutputFormat(SequenceFileOutputFormat.class); + grepJob.setOutputKeyClass(Text.class); + grepJob.setOutputValueClass(LongWritable.class); + + JobClient.runJob(grepJob); + + JobConf sortJob = new JobConf(Grep.class); + sortJob.setJobName("grep-sort"); + + FileInputFormat.setInputPaths(sortJob, tempDir); + sortJob.setInputFormat(SequenceFileInputFormat.class); + + sortJob.setMapperClass(InverseMapper.class); + + sortJob.setNumReduceTasks(1); // write a single file + FileOutputFormat.setOutputPath(sortJob, new Path(args[1])); + sortJob.setOutputKeyComparatorClass // sort by decreasing freq + (LongWritable.DecreasingComparator.class); + + JobClient.runJob(sortJob); + } + finally { + FileSystem.get(grepJob).delete(tempDir, true); + } + return 0; + } + + public static void main(String[] args) throws Exception { + int res = ToolRunner.run(new Configuration(), new Grep(), args); + System.exit(res); + } + +} diff --git a/src/examples/org/apache/hadoop/examples/Join.java b/src/examples/org/apache/hadoop/examples/Join.java new file mode 100644 index 0000000..b4e6161 --- /dev/null +++ b/src/examples/org/apache/hadoop/examples/Join.java @@ -0,0 +1,167 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.examples; + +import java.io.IOException; +import java.util.*; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.conf.Configured; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.io.BytesWritable; +import org.apache.hadoop.io.Writable; +import org.apache.hadoop.io.WritableComparable; +import org.apache.hadoop.mapred.*; +import org.apache.hadoop.mapred.join.*; +import org.apache.hadoop.mapred.lib.IdentityMapper; +import org.apache.hadoop.mapred.lib.IdentityReducer; +import org.apache.hadoop.util.Tool; +import org.apache.hadoop.util.ToolRunner; + +/** + * This is the trivial map/reduce program that does absolutely nothing + * other than use the framework to fragment and sort the input values. + * + * To run: bin/hadoop jar build/hadoop-examples.jar join + * [-m maps] [-r reduces] + * [-inFormat input format class] + * [-outFormat output format class] + * [-outKey output key class] + * [-outValue output value class] + * [-joinOp <inner|outer|override>] + * [in-dir]* in-dir out-dir + */ +public class Join extends Configured implements Tool { + + static int printUsage() { + System.out.println("join [-m ] [-r ] " + + "[-inFormat ] " + + "[-outFormat ] " + + "[-outKey ] " + + "[-outValue ] " + + "[-joinOp ] " + + "[input]* "); + ToolRunner.printGenericCommandUsage(System.out); + return -1; + } + + /** + * The main driver for sort program. + * Invoke this method to submit the map/reduce job. + * @throws IOException When there is communication problems with the + * job tracker. + */ + public int run(String[] args) throws Exception { + JobConf jobConf = new JobConf(getConf(), Sort.class); + jobConf.setJobName("join"); + + jobConf.setMapperClass(IdentityMapper.class); + jobConf.setReducerClass(IdentityReducer.class); + + JobClient client = new JobClient(jobConf); + ClusterStatus cluster = client.getClusterStatus(); + int num_maps = cluster.getTaskTrackers() * + jobConf.getInt("test.sort.maps_per_host", 10); + int num_reduces = (int) (cluster.getMaxReduceTasks() * 0.9); + String sort_reduces = jobConf.get("test.sort.reduces_per_host"); + if (sort_reduces != null) { + num_reduces = cluster.getTaskTrackers() * + Integer.parseInt(sort_reduces); + } + Class inputFormatClass = + SequenceFileInputFormat.class; + Class outputFormatClass = + SequenceFileOutputFormat.class; + Class outputKeyClass = BytesWritable.class; + Class outputValueClass = TupleWritable.class; + String op = "inner"; + List otherArgs = new ArrayList(); + for(int i=0; i < args.length; ++i) { + try { + if ("-m".equals(args[i])) { + num_maps = Integer.parseInt(args[++i]); + } else if ("-r".equals(args[i])) { + num_reduces = Integer.parseInt(args[++i]); + } else if ("-inFormat".equals(args[i])) { + inputFormatClass = + Class.forName(args[++i]).asSubclass(InputFormat.class); + } else if ("-outFormat".equals(args[i])) { + outputFormatClass = + Class.forName(args[++i]).asSubclass(OutputFormat.class); + } else if ("-outKey".equals(args[i])) { + outputKeyClass = + Class.forName(args[++i]).asSubclass(WritableComparable.class); + } else if ("-outValue".equals(args[i])) { + outputValueClass = + Class.forName(args[++i]).asSubclass(Writable.class); + } else if ("-joinOp".equals(args[i])) { + op = args[++i]; + } else { + otherArgs.add(args[i]); + } + } catch (NumberFormatException except) { + System.out.println("ERROR: Integer expected instead of " + args[i]); + return printUsage(); + } catch (ArrayIndexOutOfBoundsException except) { + System.out.println("ERROR: Required parameter missing from " + + args[i-1]); + return printUsage(); // exits + } + } + + // Set user-supplied (possibly default) job configs + jobConf.setNumMapTasks(num_maps); + jobConf.setNumReduceTasks(num_reduces); + + if (otherArgs.size() < 2) { + System.out.println("ERROR: Wrong number of parameters: "); + return printUsage(); + } + + FileOutputFormat.setOutputPath(jobConf, + new Path(otherArgs.remove(otherArgs.size() - 1))); + List plist = new ArrayList(otherArgs.size()); + for (String s : otherArgs) { + plist.add(new Path(s)); + } + + jobConf.setInputFormat(CompositeInputFormat.class); + jobConf.set("mapred.join.expr", CompositeInputFormat.compose( + op, inputFormatClass, plist.toArray(new Path[0]))); + jobConf.setOutputFormat(outputFormatClass); + + jobConf.setOutputKeyClass(outputKeyClass); + jobConf.setOutputValueClass(outputValueClass); + + Date startTime = new Date(); + System.out.println("Job started: " + startTime); + JobClient.runJob(jobConf); + Date end_time = new Date(); + System.out.println("Job ended: " + end_time); + System.out.println("The job took " + + (end_time.getTime() - startTime.getTime()) /1000 + " seconds."); + return 0; + } + + public static void main(String[] args) throws Exception { + int res = ToolRunner.run(new Configuration(), new Join(), args); + System.exit(res); + } + +} diff --git a/src/examples/org/apache/hadoop/examples/MultiFileWordCount.java b/src/examples/org/apache/hadoop/examples/MultiFileWordCount.java new file mode 100644 index 0000000..6b2b553 --- /dev/null +++ b/src/examples/org/apache/hadoop/examples/MultiFileWordCount.java @@ -0,0 +1,268 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.examples; + +import java.io.BufferedReader; +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; +import java.io.InputStreamReader; +import java.util.StringTokenizer; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.conf.Configured; +import org.apache.hadoop.fs.FSDataInputStream; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.io.IntWritable; +import org.apache.hadoop.io.Text; +import org.apache.hadoop.io.WritableComparable; +import org.apache.hadoop.mapred.FileInputFormat; +import org.apache.hadoop.mapred.FileOutputFormat; +import org.apache.hadoop.mapred.InputSplit; +import org.apache.hadoop.mapred.JobClient; +import org.apache.hadoop.mapred.JobConf; +import org.apache.hadoop.mapred.MapReduceBase; +import org.apache.hadoop.mapred.Mapper; +import org.apache.hadoop.mapred.MultiFileInputFormat; +import org.apache.hadoop.mapred.MultiFileSplit; +import org.apache.hadoop.mapred.OutputCollector; +import org.apache.hadoop.mapred.RecordReader; +import org.apache.hadoop.mapred.Reporter; +import org.apache.hadoop.mapred.lib.LongSumReducer; +import org.apache.hadoop.util.Tool; +import org.apache.hadoop.util.ToolRunner; + + +/** + * MultiFileWordCount is an example to demonstrate the usage of + * MultiFileInputFormat. This examples counts the occurrences of + * words in the text files under the given input directory. + */ +public class MultiFileWordCount extends Configured implements Tool { + + /** + * This record keeps <filename,offset> pairs. + */ + public static class WordOffset implements WritableComparable { + + private long offset; + private String fileName; + + public void readFields(DataInput in) throws IOException { + this.offset = in.readLong(); + this.fileName = Text.readString(in); + } + + public void write(DataOutput out) throws IOException { + out.writeLong(offset); + Text.writeString(out, fileName); + } + + public int compareTo(Object o) { + WordOffset that = (WordOffset)o; + + int f = this.fileName.compareTo(that.fileName); + if(f == 0) { + return (int)Math.signum((double)(this.offset - that.offset)); + } + return f; + } + @Override + public boolean equals(Object obj) { + if(obj instanceof WordOffset) + return this.compareTo(obj) == 0; + return false; + } + @Override + public int hashCode() { + assert false : "hashCode not designed"; + return 42; //an arbitrary constant + } + } + + + /** + * To use {@link MultiFileInputFormat}, one should extend it, to return a + * (custom) {@link RecordReader}. MultiFileInputFormat uses + * {@link MultiFileSplit}s. + */ + public static class MyInputFormat + extends MultiFileInputFormat { + + @Override + public RecordReader getRecordReader(InputSplit split + , JobConf job, Reporter reporter) throws IOException { + return new MultiFileLineRecordReader(job, (MultiFileSplit)split); + } + } + + /** + * RecordReader is responsible from extracting records from the InputSplit. + * This record reader accepts a {@link MultiFileSplit}, which encapsulates several + * files, and no file is divided. + */ + public static class MultiFileLineRecordReader + implements RecordReader { + + private MultiFileSplit split; + private long offset; //total offset read so far; + private long totLength; + private FileSystem fs; + private int count = 0; + private Path[] paths; + + private FSDataInputStream currentStream; + private BufferedReader currentReader; + + public MultiFileLineRecordReader(Configuration conf, MultiFileSplit split) + throws IOException { + + this.split = split; + fs = FileSystem.get(conf); + this.paths = split.getPaths(); + this.totLength = split.getLength(); + this.offset = 0; + + //open the first file + Path file = paths[count]; + currentStream = fs.open(file); + currentReader = new BufferedReader(new InputStreamReader(currentStream)); + } + + public void close() throws IOException { } + + public long getPos() throws IOException { + long currentOffset = currentStream == null ? 0 : currentStream.getPos(); + return offset + currentOffset; + } + + public float getProgress() throws IOException { + return ((float)getPos()) / totLength; + } + + public boolean next(WordOffset key, Text value) throws IOException { + if(count >= split.getNumPaths()) + return false; + + /* Read from file, fill in key and value, if we reach the end of file, + * then open the next file and continue from there until all files are + * consumed. + */ + String line; + do { + line = currentReader.readLine(); + if(line == null) { + //close the file + currentReader.close(); + offset += split.getLength(count); + + if(++count >= split.getNumPaths()) //if we are done + return false; + + //open a new file + Path file = paths[count]; + currentStream = fs.open(file); + currentReader=new BufferedReader(new InputStreamReader(currentStream)); + key.fileName = file.getName(); + } + } while(line == null); + //update the key and value + key.offset = currentStream.getPos(); + value.set(line); + + return true; + } + + public WordOffset createKey() { + WordOffset wo = new WordOffset(); + wo.fileName = paths[0].toString(); //set as the first file + return wo; + } + + public Text createValue() { + return new Text(); + } + } + + /** + * This Mapper is similar to the one in {@link WordCount.MapClass}. + */ + public static class MapClass extends MapReduceBase + implements Mapper { + + private final static IntWritable one = new IntWritable(1); + private Text word = new Text(); + + public void map(WordOffset key, Text value, + OutputCollector output, Reporter reporter) + throws IOException { + + String line = value.toString(); + StringTokenizer itr = new StringTokenizer(line); + while (itr.hasMoreTokens()) { + word.set(itr.nextToken()); + output.collect(word, one); + } + } + } + + + private void printUsage() { + System.out.println("Usage : multifilewc " ); + } + + public int run(String[] args) throws Exception { + + if(args.length < 2) { + printUsage(); + return 1; + } + + JobConf job = new JobConf(getConf(), MultiFileWordCount.class); + job.setJobName("MultiFileWordCount"); + + //set the InputFormat of the job to our InputFormat + job.setInputFormat(MyInputFormat.class); + + // the keys are words (strings) + job.setOutputKeyClass(Text.class); + // the values are counts (ints) + job.setOutputValueClass(IntWritable.class); + + //use the defined mapper + job.setMapperClass(MapClass.class); + //use the WordCount Reducer + job.setCombinerClass(LongSumReducer.class); + job.setReducerClass(LongSumReducer.class); + + FileInputFormat.addInputPaths(job, args[0]); + FileOutputFormat.setOutputPath(job, new Path(args[1])); + + JobClient.runJob(job); + + return 0; + } + + public static void main(String[] args) throws Exception { + int ret = ToolRunner.run(new MultiFileWordCount(), args); + System.exit(ret); + } + +} diff --git a/src/examples/org/apache/hadoop/examples/PiEstimator.java b/src/examples/org/apache/hadoop/examples/PiEstimator.java new file mode 100644 index 0000000..7c3dd3e --- /dev/null +++ b/src/examples/org/apache/hadoop/examples/PiEstimator.java @@ -0,0 +1,353 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.examples; + +import java.io.IOException; +import java.math.BigDecimal; +import java.util.Iterator; + +import org.apache.hadoop.conf.Configured; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.io.BooleanWritable; +import org.apache.hadoop.io.LongWritable; +import org.apache.hadoop.io.SequenceFile; +import org.apache.hadoop.io.Writable; +import org.apache.hadoop.io.WritableComparable; +import org.apache.hadoop.io.SequenceFile.CompressionType; +import org.apache.hadoop.mapred.FileInputFormat; +import org.apache.hadoop.mapred.FileOutputFormat; +import org.apache.hadoop.mapred.JobClient; +import org.apache.hadoop.mapred.JobConf; +import org.apache.hadoop.mapred.MapReduceBase; +import org.apache.hadoop.mapred.Mapper; +import org.apache.hadoop.mapred.OutputCollector; +import org.apache.hadoop.mapred.Reducer; +import org.apache.hadoop.mapred.Reporter; +import org.apache.hadoop.mapred.SequenceFileInputFormat; +import org.apache.hadoop.mapred.SequenceFileOutputFormat; +import org.apache.hadoop.util.Tool; +import org.apache.hadoop.util.ToolRunner; + +/** + * A Map-reduce program to estimate the value of Pi + * using quasi-Monte Carlo method. + * + * Mapper: + * Generate points in a unit square + * and then count points inside/outside of the inscribed circle of the square. + * + * Reducer: + * Accumulate points inside/outside results from the mappers. + * + * Let numTotal = numInside + numOutside. + * The fraction numInside/numTotal is a rational approximation of + * the value (Area of the circle)/(Area of the square), + * where the area of the inscribed circle is Pi/4 + * and the area of unit square is 1. + * Then, Pi is estimated value to be 4(numInside/numTotal). + */ +public class PiEstimator extends Configured implements Tool { + /** tmp directory for input/output */ + static private final Path TMP_DIR = new Path( + PiEstimator.class.getSimpleName() + "_TMP_3_141592654"); + + /** 2-dimensional Halton sequence {H(i)}, + * where H(i) is a 2-dimensional point and i >= 1 is the index. + * Halton sequence is used to generate sample points for Pi estimation. + */ + private static class HaltonSequence { + /** Bases */ + static final int[] P = {2, 3}; + /** Maximum number of digits allowed */ + static final int[] K = {63, 40}; + + private long index; + private double[] x; + private double[][] q; + private int[][] d; + + /** Initialize to H(startindex), + * so the sequence begins with H(startindex+1). + */ + HaltonSequence(long startindex) { + index = startindex; + x = new double[K.length]; + q = new double[K.length][]; + d = new int[K.length][]; + for(int i = 0; i < K.length; i++) { + q[i] = new double[K[i]]; + d[i] = new int[K[i]]; + } + + for(int i = 0; i < K.length; i++) { + long k = index; + x[i] = 0; + + for(int j = 0; j < K[i]; j++) { + q[i][j] = (j == 0? 1.0: q[i][j-1])/P[i]; + d[i][j] = (int)(k % P[i]); + k = (k - d[i][j])/P[i]; + x[i] += d[i][j] * q[i][j]; + } + } + } + + /** Compute next point. + * Assume the current point is H(index). + * Compute H(index+1). + * + * @return a 2-dimensional point with coordinates in [0,1)^2 + */ + double[] nextPoint() { + index++; + for(int i = 0; i < K.length; i++) { + for(int j = 0; j < K[i]; j++) { + d[i][j]++; + x[i] += q[i][j]; + if (d[i][j] < P[i]) { + break; + } + d[i][j] = 0; + x[i] -= (j == 0? 1.0: q[i][j-1]); + } + } + return x; + } + } + + /** + * Mapper class for Pi estimation. + * Generate points in a unit square + * and then count points inside/outside of the inscribed circle of the square. + */ + public static class PiMapper extends MapReduceBase + implements Mapper { + + /** Map method. + * @param offset samples starting from the (offset+1)th sample. + * @param size the number of samples for this map + * @param out output {ture->numInside, false->numOutside} + * @param reporter + */ + public void map(LongWritable offset, + LongWritable size, + OutputCollector out, + Reporter reporter) throws IOException { + + final HaltonSequence haltonsequence = new HaltonSequence(offset.get()); + long numInside = 0L; + long numOutside = 0L; + + for(long i = 0; i < size.get(); ) { + //generate points in a unit square + final double[] point = haltonsequence.nextPoint(); + + //count points inside/outside of the inscribed circle of the square + final double x = point[0] - 0.5; + final double y = point[1] - 0.5; + if (x*x + y*y > 0.25) { + numOutside++; + } else { + numInside++; + } + + //report status + i++; + if (i % 1000 == 0) { + reporter.setStatus("Generated " + i + " samples."); + } + } + + //output map results + out.collect(new BooleanWritable(true), new LongWritable(numInside)); + out.collect(new BooleanWritable(false), new LongWritable(numOutside)); + } + } + + /** + * Reducer class for Pi estimation. + * Accumulate points inside/outside results from the mappers. + */ + public static class PiReducer extends MapReduceBase + implements Reducer, Writable> { + + private long numInside = 0; + private long numOutside = 0; + private JobConf conf; //configuration for accessing the file system + + /** Store job configuration. */ + @Override + public void configure(JobConf job) { + conf = job; + } + + /** + * Accumulate number of points inside/outside results from the mappers. + * @param isInside Is the points inside? + * @param values An iterator to a list of point counts + * @param output dummy, not used here. + * @param reporter + */ + public void reduce(BooleanWritable isInside, + Iterator values, + OutputCollector, Writable> output, + Reporter reporter) throws IOException { + if (isInside.get()) { + for(; values.hasNext(); numInside += values.next().get()); + } else { + for(; values.hasNext(); numOutside += values.next().get()); + } + } + + /** + * Reduce task done, write output to a file. + */ + @Override + public void close() throws IOException { + //write output to a file + Path outDir = new Path(TMP_DIR, "out"); + Path outFile = new Path(outDir, "reduce-out"); + FileSystem fileSys = FileSystem.get(conf); + SequenceFile.Writer writer = SequenceFile.createWriter(fileSys, conf, + outFile, LongWritable.class, LongWritable.class, + CompressionType.NONE); + writer.append(new LongWritable(numInside), new LongWritable(numOutside)); + writer.close(); + } + } + + /** + * Run a map/reduce job for estimating Pi. + * + * @return the estimated value of Pi + */ + public static BigDecimal estimate(int numMaps, long numPoints, JobConf jobConf + ) throws IOException { + //setup job conf + jobConf.setJobName(PiEstimator.class.getSimpleName()); + + jobConf.setInputFormat(SequenceFileInputFormat.class); + + jobConf.setOutputKeyClass(BooleanWritable.class); + jobConf.setOutputValueClass(LongWritable.class); + jobConf.setOutputFormat(SequenceFileOutputFormat.class); + + jobConf.setMapperClass(PiMapper.class); + jobConf.setNumMapTasks(numMaps); + + jobConf.setReducerClass(PiReducer.class); + jobConf.setNumReduceTasks(1); + + // turn off speculative execution, because DFS doesn't handle + // multiple writers to the same file. + jobConf.setSpeculativeExecution(false); + + //setup input/output directories + final Path inDir = new Path(TMP_DIR, "in"); + final Path outDir = new Path(TMP_DIR, "out"); + FileInputFormat.setInputPaths(jobConf, inDir); + FileOutputFormat.setOutputPath(jobConf, outDir); + + final FileSystem fs = FileSystem.get(jobConf); + if (fs.exists(TMP_DIR)) { + throw new IOException("Tmp directory " + fs.makeQualified(TMP_DIR) + + " already exists. Please remove it first."); + } + if (!fs.mkdirs(inDir)) { + throw new IOException("Cannot create input directory " + inDir); + } + + try { + //generate an input file for each map task + for(int i=0; i < numMaps; ++i) { + final Path file = new Path(inDir, "part"+i); + final LongWritable offset = new LongWritable(i * numPoints); + final LongWritable size = new LongWritable(numPoints); + final SequenceFile.Writer writer = SequenceFile.createWriter( + fs, jobConf, file, + LongWritable.class, LongWritable.class, CompressionType.NONE); + try { + writer.append(offset, size); + } finally { + writer.close(); + } + System.out.println("Wrote input for Map #"+i); + } + + //start a map/reduce job + System.out.println("Starting Job"); + final long startTime = System.currentTimeMillis(); + JobClient.runJob(jobConf); + final double duration = (System.currentTimeMillis() - startTime)/1000.0; + System.out.println("Job Finished in " + duration + " seconds"); + + //read outputs + Path inFile = new Path(outDir, "reduce-out"); + LongWritable numInside = new LongWritable(); + LongWritable numOutside = new LongWritable(); + SequenceFile.Reader reader = new SequenceFile.Reader(fs, inFile, jobConf); + try { + reader.next(numInside, numOutside); + } finally { + reader.close(); + } + + //compute estimated value + return BigDecimal.valueOf(4).setScale(20) + .multiply(BigDecimal.valueOf(numInside.get())) + .divide(BigDecimal.valueOf(numMaps)) + .divide(BigDecimal.valueOf(numPoints)); + } finally { + fs.delete(TMP_DIR, true); + } + } + + /** + * Parse arguments and then runs a map/reduce job. + * Print output in standard out. + * + * @return a non-zero if there is an error. Otherwise, return 0. + */ + public int run(String[] args) throws Exception { + if (args.length != 2) { + System.err.println("Usage: "+getClass().getName()+" "); + ToolRunner.printGenericCommandUsage(System.err); + return -1; + } + + final int nMaps = Integer.parseInt(args[0]); + final long nSamples = Long.parseLong(args[1]); + + System.out.println("Number of Maps = " + nMaps); + System.out.println("Samples per Map = " + nSamples); + + final JobConf jobConf = new JobConf(getConf(), getClass()); + System.out.println("Estimated value of Pi is " + + estimate(nMaps, nSamples, jobConf)); + return 0; + } + + /** + * main method for running it as a stand alone command. + */ + public static void main(String[] argv) throws Exception { + System.exit(ToolRunner.run(null, new PiEstimator(), argv)); + } +} diff --git a/src/examples/org/apache/hadoop/examples/RandomTextWriter.java b/src/examples/org/apache/hadoop/examples/RandomTextWriter.java new file mode 100644 index 0000000..c668a4e --- /dev/null +++ b/src/examples/org/apache/hadoop/examples/RandomTextWriter.java @@ -0,0 +1,758 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.examples; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Date; +import java.util.List; +import java.util.Random; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.conf.Configured; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.io.Text; +import org.apache.hadoop.mapred.ClusterStatus; +import org.apache.hadoop.mapred.FileOutputFormat; +import org.apache.hadoop.mapred.JobClient; +import org.apache.hadoop.mapred.JobConf; +import org.apache.hadoop.mapred.MapReduceBase; +import org.apache.hadoop.mapred.Mapper; +import org.apache.hadoop.mapred.OutputCollector; +import org.apache.hadoop.mapred.OutputFormat; +import org.apache.hadoop.mapred.Reporter; +import org.apache.hadoop.mapred.SequenceFileOutputFormat; +import org.apache.hadoop.util.Tool; +import org.apache.hadoop.util.ToolRunner; + +/** + * This program uses map/reduce to just run a distributed job where there is + * no interaction between the tasks and each task writes a large unsorted + * random sequence of words. + * In order for this program to generate data for terasort with a 5-10 words + * per key and 20-100 words per value, have the following config: + *

+ * <?xml version="1.0"?> + * <?xml-stylesheet type="text/xsl" href="configuration.xsl"?> + * <configuration> + * <property> + * <name>test.randomtextwrite.min_words_key</name> + * <value>5</value> + * </property> + * <property> + * <name>test.randomtextwrite.max_words_key</name> + * <value>10</value> + * </property> + * <property> + * <name>test.randomtextwrite.min_words_value</name> + * <value>20</value> + * </property> + * <property> + * <name>test.randomtextwrite.max_words_value</name> + * <value>100</value> + * </property> + * <property> + * <name>test.randomtextwrite.total_bytes</name> + * <value>1099511627776</value> + * </property> + * </configuration> + * + * Equivalently, {@link RandomTextWriter} also supports all the above options + * and ones supported by {@link Tool} via the command-line. + * + * To run: bin/hadoop jar hadoop-${version}-examples.jar randomtextwriter + * [-outFormat output format class] output + */ +public class RandomTextWriter extends Configured implements Tool { + + static int printUsage() { + System.out.println("randomtextwriter " + + "[-outFormat ] " + + ""); + ToolRunner.printGenericCommandUsage(System.out); + return -1; + } + + /** + * User counters + */ + static enum Counters { RECORDS_WRITTEN, BYTES_WRITTEN } + + static class Map extends MapReduceBase + implements Mapper { + + private long numBytesToWrite; + private int minWordsInKey; + private int wordsInKeyRange; + private int minWordsInValue; + private int wordsInValueRange; + private Random random = new Random(); + + /** + * Save the configuration value that we need to write the data. + */ + public void configure(JobConf job) { + numBytesToWrite = job.getLong("test.randomtextwrite.bytes_per_map", + 1*1024*1024*1024); + minWordsInKey = + job.getInt("test.randomtextwrite.min_words_key", 5); + wordsInKeyRange = + (job.getInt("test.randomtextwrite.max_words_key", 10) - + minWordsInKey); + minWordsInValue = + job.getInt("test.randomtextwrite.min_words_value", 10); + wordsInValueRange = + (job.getInt("test.randomtextwrite.max_words_value", 100) - + minWordsInValue); + } + + /** + * Given an output filename, write a bunch of random records to it. + */ + public void map(Text key, Text value, + OutputCollector output, + Reporter reporter) throws IOException { + int itemCount = 0; + while (numBytesToWrite > 0) { + // Generate the key/value + int noWordsKey = minWordsInKey + + (wordsInKeyRange != 0 ? random.nextInt(wordsInKeyRange) : 0); + int noWordsValue = minWordsInValue + + (wordsInValueRange != 0 ? random.nextInt(wordsInValueRange) : 0); + Text keyWords = generateSentence(noWordsKey); + Text valueWords = generateSentence(noWordsValue); + + // Write the sentence + output.collect(keyWords, valueWords); + + numBytesToWrite -= (keyWords.getLength() + valueWords.getLength()); + + // Update counters, progress etc. + reporter.incrCounter(Counters.BYTES_WRITTEN, + (keyWords.getLength()+valueWords.getLength())); + reporter.incrCounter(Counters.RECORDS_WRITTEN, 1); + if (++itemCount % 200 == 0) { + reporter.setStatus("wrote record " + itemCount + ". " + + numBytesToWrite + " bytes left."); + } + } + reporter.setStatus("done with " + itemCount + " records."); + } + + private Text generateSentence(int noWords) { + StringBuffer sentence = new StringBuffer(); + String space = " "; + for (int i=0; i < noWords; ++i) { + sentence.append(words[random.nextInt(words.length)]); + sentence.append(space); + } + return new Text(sentence.toString()); + } + } + + /** + * This is the main routine for launching a distributed random write job. + * It runs 10 maps/node and each node writes 1 gig of data to a DFS file. + * The reduce doesn't do anything. + * + * @throws IOException + */ + public int run(String[] args) throws Exception { + if (args.length == 0) { + return printUsage(); + } + + JobConf job = new JobConf(getConf()); + + job.setJarByClass(RandomTextWriter.class); + job.setJobName("random-text-writer"); + + job.setOutputKeyClass(Text.class); + job.setOutputValueClass(Text.class); + + job.setInputFormat(RandomWriter.RandomInputFormat.class); + job.setMapperClass(Map.class); + + JobClient client = new JobClient(job); + ClusterStatus cluster = client.getClusterStatus(); + int numMapsPerHost = job.getInt("test.randomtextwrite.maps_per_host", 10); + long numBytesToWritePerMap = job.getLong("test.randomtextwrite.bytes_per_map", + 1*1024*1024*1024); + if (numBytesToWritePerMap == 0) { + System.err.println("Cannot have test.randomtextwrite.bytes_per_map set to 0"); + return -2; + } + long totalBytesToWrite = job.getLong("test.randomtextwrite.total_bytes", + numMapsPerHost*numBytesToWritePerMap*cluster.getTaskTrackers()); + int numMaps = (int) (totalBytesToWrite / numBytesToWritePerMap); + if (numMaps == 0 && totalBytesToWrite > 0) { + numMaps = 1; + job.setLong("test.randomtextwrite.bytes_per_map", totalBytesToWrite); + } + + Class outputFormatClass = + SequenceFileOutputFormat.class; + List otherArgs = new ArrayList(); + for(int i=0; i < args.length; ++i) { + try { + if ("-outFormat".equals(args[i])) { + outputFormatClass = + Class.forName(args[++i]).asSubclass(OutputFormat.class); + } else { + otherArgs.add(args[i]); + } + } catch (ArrayIndexOutOfBoundsException except) { + System.out.println("ERROR: Required parameter missing from " + + args[i-1]); + return printUsage(); // exits + } + } + + job.setOutputFormat(outputFormatClass); + FileOutputFormat.setOutputPath(job, new Path(otherArgs.get(0))); + + job.setNumMapTasks(numMaps); + System.out.println("Running " + numMaps + " maps."); + + // reducer NONE + job.setNumReduceTasks(0); + + Date startTime = new Date(); + System.out.println("Job started: " + startTime); + JobClient.runJob(job); + Date endTime = new Date(); + System.out.println("Job ended: " + endTime); + System.out.println("The job took " + + (endTime.getTime() - startTime.getTime()) /1000 + + " seconds."); + + return 0; + } + + public static void main(String[] args) throws Exception { + int res = ToolRunner.run(new Configuration(), new RandomTextWriter(), args); + System.exit(res); + } + + /** + * A random list of 100 words from /usr/share/dict/words + */ + private static String[] words = { + "diurnalness", "Homoiousian", + "spiranthic", "tetragynian", + "silverhead", "ungreat", + "lithograph", "exploiter", + "physiologian", "by", + "hellbender", "Filipendula", + "undeterring", "antiscolic", + "pentagamist", "hypoid", + "cacuminal", "sertularian", + "schoolmasterism", "nonuple", + "gallybeggar", "phytonic", + "swearingly", "nebular", + "Confervales", "thermochemically", + "characinoid", "cocksuredom", + "fallacious", "feasibleness", + "debromination", "playfellowship", + "tramplike", "testa", + "participatingly", "unaccessible", + "bromate", "experientialist", + "roughcast", "docimastical", + "choralcelo", "blightbird", + "peptonate", "sombreroed", + "unschematized", "antiabolitionist", + "besagne", "mastication", + "bromic", "sviatonosite", + "cattimandoo", "metaphrastical", + "endotheliomyoma", "hysterolysis", + "unfulminated", "Hester", + "oblongly", "blurredness", + "authorling", "chasmy", + "Scorpaenidae", "toxihaemia", + "Dictograph", "Quakerishly", + "deaf", "timbermonger", + "strammel", "Thraupidae", + "seditious", "plerome", + "Arneb", "eristically", + "serpentinic", "glaumrie", + "socioromantic", "apocalypst", + "tartrous", "Bassaris", + "angiolymphoma", "horsefly", + "kenno", "astronomize", + "euphemious", "arsenide", + "untongued", "parabolicness", + "uvanite", "helpless", + "gemmeous", "stormy", + "templar", "erythrodextrin", + "comism", "interfraternal", + "preparative", "parastas", + "frontoorbital", "Ophiosaurus", + "diopside", "serosanguineous", + "ununiformly", "karyological", + "collegian", "allotropic", + "depravity", "amylogenesis", + "reformatory", "epidymides", + "pleurotropous", "trillium", + "dastardliness", "coadvice", + "embryotic", "benthonic", + "pomiferous", "figureheadship", + "Megaluridae", "Harpa", + "frenal", "commotion", + "abthainry", "cobeliever", + "manilla", "spiciferous", + "nativeness", "obispo", + "monilioid", "biopsic", + "valvula", "enterostomy", + "planosubulate", "pterostigma", + "lifter", "triradiated", + "venialness", "tum", + "archistome", "tautness", + "unswanlike", "antivenin", + "Lentibulariaceae", "Triphora", + "angiopathy", "anta", + "Dawsonia", "becomma", + "Yannigan", "winterproof", + "antalgol", "harr", + "underogating", "ineunt", + "cornberry", "flippantness", + "scyphostoma", "approbation", + "Ghent", "Macraucheniidae", + "scabbiness", "unanatomized", + "photoelasticity", "eurythermal", + "enation", "prepavement", + "flushgate", "subsequentially", + "Edo", "antihero", + "Isokontae", "unforkedness", + "porriginous", "daytime", + "nonexecutive", "trisilicic", + "morphiomania", "paranephros", + "botchedly", "impugnation", + "Dodecatheon", "obolus", + "unburnt", "provedore", + "Aktistetae", "superindifference", + "Alethea", "Joachimite", + "cyanophilous", "chorograph", + "brooky", "figured", + "periclitation", "quintette", + "hondo", "ornithodelphous", + "unefficient", "pondside", + "bogydom", "laurinoxylon", + "Shiah", "unharmed", + "cartful", "noncrystallized", + "abusiveness", "cromlech", + "japanned", "rizzomed", + "underskin", "adscendent", + "allectory", "gelatinousness", + "volcano", "uncompromisingly", + "cubit", "idiotize", + "unfurbelowed", "undinted", + "magnetooptics", "Savitar", + "diwata", "ramosopalmate", + "Pishquow", "tomorn", + "apopenptic", "Haversian", + "Hysterocarpus", "ten", + "outhue", "Bertat", + "mechanist", "asparaginic", + "velaric", "tonsure", + "bubble", "Pyrales", + "regardful", "glyphography", + "calabazilla", "shellworker", + "stradametrical", "havoc", + "theologicopolitical", "sawdust", + "diatomaceous", "jajman", + "temporomastoid", "Serrifera", + "Ochnaceae", "aspersor", + "trailmaking", "Bishareen", + "digitule", "octogynous", + "epididymitis", "smokefarthings", + "bacillite", "overcrown", + "mangonism", "sirrah", + "undecorated", "psychofugal", + "bismuthiferous", "rechar", + "Lemuridae", "frameable", + "thiodiazole", "Scanic", + "sportswomanship", "interruptedness", + "admissory", "osteopaedion", + "tingly", "tomorrowness", + "ethnocracy", "trabecular", + "vitally", "fossilism", + "adz", "metopon", + "prefatorial", "expiscate", + "diathermacy", "chronist", + "nigh", "generalizable", + "hysterogen", "aurothiosulphuric", + "whitlowwort", "downthrust", + "Protestantize", "monander", + "Itea", "chronographic", + "silicize", "Dunlop", + "eer", "componental", + "spot", "pamphlet", + "antineuritic", "paradisean", + "interruptor", "debellator", + "overcultured", "Florissant", + "hyocholic", "pneumatotherapy", + "tailoress", "rave", + "unpeople", "Sebastian", + "thermanesthesia", "Coniferae", + "swacking", "posterishness", + "ethmopalatal", "whittle", + "analgize", "scabbardless", + "naught", "symbiogenetically", + "trip", "parodist", + "columniform", "trunnel", + "yawler", "goodwill", + "pseudohalogen", "swangy", + "cervisial", "mediateness", + "genii", "imprescribable", + "pony", "consumptional", + "carposporangial", "poleax", + "bestill", "subfebrile", + "sapphiric", "arrowworm", + "qualminess", "ultraobscure", + "thorite", "Fouquieria", + "Bermudian", "prescriber", + "elemicin", "warlike", + "semiangle", "rotular", + "misthread", "returnability", + "seraphism", "precostal", + "quarried", "Babylonism", + "sangaree", "seelful", + "placatory", "pachydermous", + "bozal", "galbulus", + "spermaphyte", "cumbrousness", + "pope", "signifier", + "Endomycetaceae", "shallowish", + "sequacity", "periarthritis", + "bathysphere", "pentosuria", + "Dadaism", "spookdom", + "Consolamentum", "afterpressure", + "mutter", "louse", + "ovoviviparous", "corbel", + "metastoma", "biventer", + "Hydrangea", "hogmace", + "seizing", "nonsuppressed", + "oratorize", "uncarefully", + "benzothiofuran", "penult", + "balanocele", "macropterous", + "dishpan", "marten", + "absvolt", "jirble", + "parmelioid", "airfreighter", + "acocotl", "archesporial", + "hypoplastral", "preoral", + "quailberry", "cinque", + "terrestrially", "stroking", + "limpet", "moodishness", + "canicule", "archididascalian", + "pompiloid", "overstaid", + "introducer", "Italical", + "Christianopaganism", "prescriptible", + "subofficer", "danseuse", + "cloy", "saguran", + "frictionlessly", "deindividualization", + "Bulanda", "ventricous", + "subfoliar", "basto", + "scapuloradial", "suspend", + "stiffish", "Sphenodontidae", + "eternal", "verbid", + "mammonish", "upcushion", + "barkometer", "concretion", + "preagitate", "incomprehensible", + "tristich", "visceral", + "hemimelus", "patroller", + "stentorophonic", "pinulus", + "kerykeion", "brutism", + "monstership", "merciful", + "overinstruct", "defensibly", + "bettermost", "splenauxe", + "Mormyrus", "unreprimanded", + "taver", "ell", + "proacquittal", "infestation", + "overwoven", "Lincolnlike", + "chacona", "Tamil", + "classificational", "lebensraum", + "reeveland", "intuition", + "Whilkut", "focaloid", + "Eleusinian", "micromembrane", + "byroad", "nonrepetition", + "bacterioblast", "brag", + "ribaldrous", "phytoma", + "counteralliance", "pelvimetry", + "pelf", "relaster", + "thermoresistant", "aneurism", + "molossic", "euphonym", + "upswell", "ladhood", + "phallaceous", "inertly", + "gunshop", "stereotypography", + "laryngic", "refasten", + "twinling", "oflete", + "hepatorrhaphy", "electrotechnics", + "cockal", "guitarist", + "topsail", "Cimmerianism", + "larklike", "Llandovery", + "pyrocatechol", "immatchable", + "chooser", "metrocratic", + "craglike", "quadrennial", + "nonpoisonous", "undercolored", + "knob", "ultratense", + "balladmonger", "slait", + "sialadenitis", "bucketer", + "magnificently", "unstipulated", + "unscourged", "unsupercilious", + "packsack", "pansophism", + "soorkee", "percent", + "subirrigate", "champer", + "metapolitics", "spherulitic", + "involatile", "metaphonical", + "stachyuraceous", "speckedness", + "bespin", "proboscidiform", + "gul", "squit", + "yeelaman", "peristeropode", + "opacousness", "shibuichi", + "retinize", "yote", + "misexposition", "devilwise", + "pumpkinification", "vinny", + "bonze", "glossing", + "decardinalize", "transcortical", + "serphoid", "deepmost", + "guanajuatite", "wemless", + "arval", "lammy", + "Effie", "Saponaria", + "tetrahedral", "prolificy", + "excerpt", "dunkadoo", + "Spencerism", "insatiately", + "Gilaki", "oratorship", + "arduousness", "unbashfulness", + "Pithecolobium", "unisexuality", + "veterinarian", "detractive", + "liquidity", "acidophile", + "proauction", "sural", + "totaquina", "Vichyite", + "uninhabitedness", "allegedly", + "Gothish", "manny", + "Inger", "flutist", + "ticktick", "Ludgatian", + "homotransplant", "orthopedical", + "diminutively", "monogoneutic", + "Kenipsim", "sarcologist", + "drome", "stronghearted", + "Fameuse", "Swaziland", + "alen", "chilblain", + "beatable", "agglomeratic", + "constitutor", "tendomucoid", + "porencephalous", "arteriasis", + "boser", "tantivy", + "rede", "lineamental", + "uncontradictableness", "homeotypical", + "masa", "folious", + "dosseret", "neurodegenerative", + "subtransverse", "Chiasmodontidae", + "palaeotheriodont", "unstressedly", + "chalcites", "piquantness", + "lampyrine", "Aplacentalia", + "projecting", "elastivity", + "isopelletierin", "bladderwort", + "strander", "almud", + "iniquitously", "theologal", + "bugre", "chargeably", + "imperceptivity", "meriquinoidal", + "mesophyte", "divinator", + "perfunctory", "counterappellant", + "synovial", "charioteer", + "crystallographical", "comprovincial", + "infrastapedial", "pleasurehood", + "inventurous", "ultrasystematic", + "subangulated", "supraoesophageal", + "Vaishnavism", "transude", + "chrysochrous", "ungrave", + "reconciliable", "uninterpleaded", + "erlking", "wherefrom", + "aprosopia", "antiadiaphorist", + "metoxazine", "incalculable", + "umbellic", "predebit", + "foursquare", "unimmortal", + "nonmanufacture", "slangy", + "predisputant", "familist", + "preaffiliate", "friarhood", + "corelysis", "zoonitic", + "halloo", "paunchy", + "neuromimesis", "aconitine", + "hackneyed", "unfeeble", + "cubby", "autoschediastical", + "naprapath", "lyrebird", + "inexistency", "leucophoenicite", + "ferrogoslarite", "reperuse", + "uncombable", "tambo", + "propodiale", "diplomatize", + "Russifier", "clanned", + "corona", "michigan", + "nonutilitarian", "transcorporeal", + "bought", "Cercosporella", + "stapedius", "glandularly", + "pictorially", "weism", + "disilane", "rainproof", + "Caphtor", "scrubbed", + "oinomancy", "pseudoxanthine", + "nonlustrous", "redesertion", + "Oryzorictinae", "gala", + "Mycogone", "reappreciate", + "cyanoguanidine", "seeingness", + "breadwinner", "noreast", + "furacious", "epauliere", + "omniscribent", "Passiflorales", + "uninductive", "inductivity", + "Orbitolina", "Semecarpus", + "migrainoid", "steprelationship", + "phlogisticate", "mesymnion", + "sloped", "edificator", + "beneficent", "culm", + "paleornithology", "unurban", + "throbless", "amplexifoliate", + "sesquiquintile", "sapience", + "astucious", "dithery", + "boor", "ambitus", + "scotching", "uloid", + "uncompromisingness", "hoove", + "waird", "marshiness", + "Jerusalem", "mericarp", + "unevoked", "benzoperoxide", + "outguess", "pyxie", + "hymnic", "euphemize", + "mendacity", "erythremia", + "rosaniline", "unchatteled", + "lienteria", "Bushongo", + "dialoguer", "unrepealably", + "rivethead", "antideflation", + "vinegarish", "manganosiderite", + "doubtingness", "ovopyriform", + "Cephalodiscus", "Muscicapa", + "Animalivora", "angina", + "planispheric", "ipomoein", + "cuproiodargyrite", "sandbox", + "scrat", "Munnopsidae", + "shola", "pentafid", + "overstudiousness", "times", + "nonprofession", "appetible", + "valvulotomy", "goladar", + "uniarticular", "oxyterpene", + "unlapsing", "omega", + "trophonema", "seminonflammable", + "circumzenithal", "starer", + "depthwise", "liberatress", + "unleavened", "unrevolting", + "groundneedle", "topline", + "wandoo", "umangite", + "ordinant", "unachievable", + "oversand", "snare", + "avengeful", "unexplicit", + "mustafina", "sonable", + "rehabilitative", "eulogization", + "papery", "technopsychology", + "impressor", "cresylite", + "entame", "transudatory", + "scotale", "pachydermatoid", + "imaginary", "yeat", + "slipped", "stewardship", + "adatom", "cockstone", + "skyshine", "heavenful", + "comparability", "exprobratory", + "dermorhynchous", "parquet", + "cretaceous", "vesperal", + "raphis", "undangered", + "Glecoma", "engrain", + "counteractively", "Zuludom", + "orchiocatabasis", "Auriculariales", + "warriorwise", "extraorganismal", + "overbuilt", "alveolite", + "tetchy", "terrificness", + "widdle", "unpremonished", + "rebilling", "sequestrum", + "equiconvex", "heliocentricism", + "catabaptist", "okonite", + "propheticism", "helminthagogic", + "calycular", "giantly", + "wingable", "golem", + "unprovided", "commandingness", + "greave", "haply", + "doina", "depressingly", + "subdentate", "impairment", + "decidable", "neurotrophic", + "unpredict", "bicorporeal", + "pendulant", "flatman", + "intrabred", "toplike", + "Prosobranchiata", "farrantly", + "toxoplasmosis", "gorilloid", + "dipsomaniacal", "aquiline", + "atlantite", "ascitic", + "perculsive", "prospectiveness", + "saponaceous", "centrifugalization", + "dinical", "infravaginal", + "beadroll", "affaite", + "Helvidian", "tickleproof", + "abstractionism", "enhedge", + "outwealth", "overcontribute", + "coldfinch", "gymnastic", + "Pincian", "Munychian", + "codisjunct", "quad", + "coracomandibular", "phoenicochroite", + "amender", "selectivity", + "putative", "semantician", + "lophotrichic", "Spatangoidea", + "saccharogenic", "inferent", + "Triconodonta", "arrendation", + "sheepskin", "taurocolla", + "bunghole", "Machiavel", + "triakistetrahedral", "dehairer", + "prezygapophysial", "cylindric", + "pneumonalgia", "sleigher", + "emir", "Socraticism", + "licitness", "massedly", + "instructiveness", "sturdied", + "redecrease", "starosta", + "evictor", "orgiastic", + "squdge", "meloplasty", + "Tsonecan", "repealableness", + "swoony", "myesthesia", + "molecule", "autobiographist", + "reciprocation", "refective", + "unobservantness", "tricae", + "ungouged", "floatability", + "Mesua", "fetlocked", + "chordacentrum", "sedentariness", + "various", "laubanite", + "nectopod", "zenick", + "sequentially", "analgic", + "biodynamics", "posttraumatic", + "nummi", "pyroacetic", + "bot", "redescend", + "dispermy", "undiffusive", + "circular", "trillion", + "Uraniidae", "ploration", + "discipular", "potentness", + "sud", "Hu", + "Eryon", "plugger", + "subdrainage", "jharal", + "abscission", "supermarket", + "countergabion", "glacierist", + "lithotresis", "minniebush", + "zanyism", "eucalypteol", + "sterilely", "unrealize", + "unpatched", "hypochondriacism", + "critically", "cheesecutter", + }; +} diff --git a/src/examples/org/apache/hadoop/examples/RandomWriter.java b/src/examples/org/apache/hadoop/examples/RandomWriter.java new file mode 100644 index 0000000..1127fef --- /dev/null +++ b/src/examples/org/apache/hadoop/examples/RandomWriter.java @@ -0,0 +1,287 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.examples; + +import java.io.IOException; +import java.util.Date; +import java.util.Random; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.conf.Configured; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.io.BytesWritable; +import org.apache.hadoop.io.Text; +import org.apache.hadoop.io.Writable; +import org.apache.hadoop.io.WritableComparable; +import org.apache.hadoop.mapred.ClusterStatus; +import org.apache.hadoop.mapred.FileOutputFormat; +import org.apache.hadoop.mapred.FileSplit; +import org.apache.hadoop.mapred.InputFormat; +import org.apache.hadoop.mapred.InputSplit; +import org.apache.hadoop.mapred.JobClient; +import org.apache.hadoop.mapred.JobConf; +import org.apache.hadoop.mapred.MapReduceBase; +import org.apache.hadoop.mapred.Mapper; +import org.apache.hadoop.mapred.OutputCollector; +import org.apache.hadoop.mapred.RecordReader; +import org.apache.hadoop.mapred.Reporter; +import org.apache.hadoop.mapred.SequenceFileOutputFormat; +import org.apache.hadoop.mapred.lib.IdentityReducer; +import org.apache.hadoop.util.GenericOptionsParser; +import org.apache.hadoop.util.Tool; +import org.apache.hadoop.util.ToolRunner; + +/** + * This program uses map/reduce to just run a distributed job where there is + * no interaction between the tasks and each task write a large unsorted + * random binary sequence file of BytesWritable. + * In order for this program to generate data for terasort with 10-byte keys + * and 90-byte values, have the following config: + * + * <?xml version="1.0"?> + * <?xml-stylesheet type="text/xsl" href="configuration.xsl"?> + * <configuration> + * <property> + * <name>test.randomwrite.min_key</name> + * <value>10</value> + * </property> + * <property> + * <name>test.randomwrite.max_key</name> + * <value>10</value> + * </property> + * <property> + * <name>test.randomwrite.min_value</name> + * <value>90</value> + * </property> + * <property> + * <name>test.randomwrite.max_value</name> + * <value>90</value> + * </property> + * <property> + * <name>test.randomwrite.total_bytes</name> + * <value>1099511627776</value> + * </property> + * </configuration> + * + * Equivalently, {@link RandomWriter} also supports all the above options + * and ones supported by {@link GenericOptionsParser} via the command-line. + */ +public class RandomWriter extends Configured implements Tool { + + /** + * User counters + */ + static enum Counters { RECORDS_WRITTEN, BYTES_WRITTEN } + + /** + * A custom input format that creates virtual inputs of a single string + * for each map. + */ + static class RandomInputFormat implements InputFormat { + + /** + * Generate the requested number of file splits, with the filename + * set to the filename of the output file. + */ + public InputSplit[] getSplits(JobConf job, + int numSplits) throws IOException { + InputSplit[] result = new InputSplit[numSplits]; + Path outDir = FileOutputFormat.getOutputPath(job); + for(int i=0; i < result.length; ++i) { + result[i] = new FileSplit(new Path(outDir, "dummy-split-" + i), 0, 1, + (String[])null); + } + return result; + } + + /** + * Return a single record (filename, "") where the filename is taken from + * the file split. + */ + static class RandomRecordReader implements RecordReader { + Path name; + public RandomRecordReader(Path p) { + name = p; + } + public boolean next(Text key, Text value) { + if (name != null) { + key.set(name.getName()); + name = null; + return true; + } + return false; + } + public Text createKey() { + return new Text(); + } + public Text createValue() { + return new Text(); + } + public long getPos() { + return 0; + } + public void close() {} + public float getProgress() { + return 0.0f; + } + } + + public RecordReader getRecordReader(InputSplit split, + JobConf job, + Reporter reporter) throws IOException { + return new RandomRecordReader(((FileSplit) split).getPath()); + } + } + + static class Map extends MapReduceBase + implements Mapper { + + private long numBytesToWrite; + private int minKeySize; + private int keySizeRange; + private int minValueSize; + private int valueSizeRange; + private Random random = new Random(); + private BytesWritable randomKey = new BytesWritable(); + private BytesWritable randomValue = new BytesWritable(); + + private void randomizeBytes(byte[] data, int offset, int length) { + for(int i=offset + length - 1; i >= offset; --i) { + data[i] = (byte) random.nextInt(256); + } + } + + /** + * Given an output filename, write a bunch of random records to it. + */ + public void map(WritableComparable key, + Writable value, + OutputCollector output, + Reporter reporter) throws IOException { + int itemCount = 0; + while (numBytesToWrite > 0) { + int keyLength = minKeySize + + (keySizeRange != 0 ? random.nextInt(keySizeRange) : 0); + randomKey.setSize(keyLength); + randomizeBytes(randomKey.getBytes(), 0, randomKey.getLength()); + int valueLength = minValueSize + + (valueSizeRange != 0 ? random.nextInt(valueSizeRange) : 0); + randomValue.setSize(valueLength); + randomizeBytes(randomValue.getBytes(), 0, randomValue.getLength()); + output.collect(randomKey, randomValue); + numBytesToWrite -= keyLength + valueLength; + reporter.incrCounter(Counters.BYTES_WRITTEN, keyLength + valueLength); + reporter.incrCounter(Counters.RECORDS_WRITTEN, 1); + if (++itemCount % 200 == 0) { + reporter.setStatus("wrote record " + itemCount + ". " + + numBytesToWrite + " bytes left."); + } + } + reporter.setStatus("done with " + itemCount + " records."); + } + + /** + * Save the values out of the configuaration that we need to write + * the data. + */ + @Override + public void configure(JobConf job) { + numBytesToWrite = job.getLong("test.randomwrite.bytes_per_map", + 1*1024*1024*1024); + minKeySize = job.getInt("test.randomwrite.min_key", 10); + keySizeRange = + job.getInt("test.randomwrite.max_key", 1000) - minKeySize; + minValueSize = job.getInt("test.randomwrite.min_value", 0); + valueSizeRange = + job.getInt("test.randomwrite.max_value", 20000) - minValueSize; + } + + } + + /** + * This is the main routine for launching a distributed random write job. + * It runs 10 maps/node and each node writes 1 gig of data to a DFS file. + * The reduce doesn't do anything. + * + * @throws IOException + */ + public int run(String[] args) throws Exception { + if (args.length == 0) { + System.out.println("Usage: writer "); + ToolRunner.printGenericCommandUsage(System.out); + return -1; + } + + Path outDir = new Path(args[0]); + JobConf job = new JobConf(getConf()); + + job.setJarByClass(RandomWriter.class); + job.setJobName("random-writer"); + FileOutputFormat.setOutputPath(job, outDir); + + job.setOutputKeyClass(BytesWritable.class); + job.setOutputValueClass(BytesWritable.class); + + job.setInputFormat(RandomInputFormat.class); + job.setMapperClass(Map.class); + job.setReducerClass(IdentityReducer.class); + job.setOutputFormat(SequenceFileOutputFormat.class); + + JobClient client = new JobClient(job); + ClusterStatus cluster = client.getClusterStatus(); + int numMapsPerHost = job.getInt("test.randomwriter.maps_per_host", 10); + long numBytesToWritePerMap = job.getLong("test.randomwrite.bytes_per_map", + 1*1024*1024*1024); + if (numBytesToWritePerMap == 0) { + System.err.println("Cannot have test.randomwrite.bytes_per_map set to 0"); + return -2; + } + long totalBytesToWrite = job.getLong("test.randomwrite.total_bytes", + numMapsPerHost*numBytesToWritePerMap*cluster.getTaskTrackers()); + int numMaps = (int) (totalBytesToWrite / numBytesToWritePerMap); + if (numMaps == 0 && totalBytesToWrite > 0) { + numMaps = 1; + job.setLong("test.randomwrite.bytes_per_map", totalBytesToWrite); + } + + job.setNumMapTasks(numMaps); + System.out.println("Running " + numMaps + " maps."); + + // reducer NONE + job.setNumReduceTasks(0); + + Date startTime = new Date(); + System.out.println("Job started: " + startTime); + JobClient.runJob(job); + Date endTime = new Date(); + System.out.println("Job ended: " + endTime); + System.out.println("The job took " + + (endTime.getTime() - startTime.getTime()) /1000 + + " seconds."); + + return 0; + } + + public static void main(String[] args) throws Exception { + int res = ToolRunner.run(new Configuration(), new RandomWriter(), args); + System.exit(res); + } + +} diff --git a/src/examples/org/apache/hadoop/examples/SecondarySort.java b/src/examples/org/apache/hadoop/examples/SecondarySort.java new file mode 100644 index 0000000..35a7b30 --- /dev/null +++ b/src/examples/org/apache/hadoop/examples/SecondarySort.java @@ -0,0 +1,239 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.examples; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; +import java.util.StringTokenizer; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.io.IntWritable; +import org.apache.hadoop.io.LongWritable; +import org.apache.hadoop.io.RawComparator; +import org.apache.hadoop.io.Text; +import org.apache.hadoop.io.WritableComparable; +import org.apache.hadoop.io.WritableComparator; +import org.apache.hadoop.mapreduce.lib.input.FileInputFormat; +import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat; +import org.apache.hadoop.mapreduce.Job; +import org.apache.hadoop.mapreduce.Mapper; +import org.apache.hadoop.mapreduce.Partitioner; +import org.apache.hadoop.mapreduce.Reducer; +import org.apache.hadoop.util.GenericOptionsParser; + +/** + * This is an example Hadoop Map/Reduce application. + * It reads the text input files that must contain two integers per a line. + * The output is sorted by the first and second number and grouped on the + * first number. + * + * To run: bin/hadoop jar build/hadoop-examples.jar secondarysort + * in-dir out-dir + */ +public class SecondarySort { + + /** + * Define a pair of integers that are writable. + * They are serialized in a byte comparable format. + */ + public static class IntPair + implements WritableComparable { + private int first = 0; + private int second = 0; + + /** + * Set the left and right values. + */ + public void set(int left, int right) { + first = left; + second = right; + } + public int getFirst() { + return first; + } + public int getSecond() { + return second; + } + /** + * Read the two integers. + * Encoded as: MIN_VALUE -> 0, 0 -> -MIN_VALUE, MAX_VALUE-> -1 + */ + @Override + public void readFields(DataInput in) throws IOException { + first = in.readInt() + Integer.MIN_VALUE; + second = in.readInt() + Integer.MIN_VALUE; + } + @Override + public void write(DataOutput out) throws IOException { + out.writeInt(first - Integer.MIN_VALUE); + out.writeInt(second - Integer.MIN_VALUE); + } + @Override + public int hashCode() { + return first * 157 + second; + } + @Override + public boolean equals(Object right) { + if (right instanceof IntPair) { + IntPair r = (IntPair) right; + return r.first == first && r.second == second; + } else { + return false; + } + } + /** A Comparator that compares serialized IntPair. */ + public static class Comparator extends WritableComparator { + public Comparator() { + super(IntPair.class); + } + + public int compare(byte[] b1, int s1, int l1, + byte[] b2, int s2, int l2) { + return compareBytes(b1, s1, l1, b2, s2, l2); + } + } + + static { // register this comparator + WritableComparator.define(IntPair.class, new Comparator()); + } + + @Override + public int compareTo(IntPair o) { + if (first != o.first) { + return first < o.first ? -1 : 1; + } else if (second != o.second) { + return second < o.second ? -1 : 1; + } else { + return 0; + } + } + } + + /** + * Partition based on the first part of the pair. + */ + public static class FirstPartitioner extends Partitioner{ + @Override + public int getPartition(IntPair key, IntWritable value, + int numPartitions) { + return Math.abs(key.getFirst() * 127) % numPartitions; + } + } + + /** + * Compare only the first part of the pair, so that reduce is called once + * for each value of the first part. + */ + public static class FirstGroupingComparator + implements RawComparator { + @Override + public int compare(byte[] b1, int s1, int l1, byte[] b2, int s2, int l2) { + return WritableComparator.compareBytes(b1, s1, Integer.SIZE/8, + b2, s2, Integer.SIZE/8); + } + + @Override + public int compare(IntPair o1, IntPair o2) { + int l = o1.getFirst(); + int r = o2.getFirst(); + return l == r ? 0 : (l < r ? -1 : 1); + } + } + + /** + * Read two integers from each line and generate a key, value pair + * as ((left, right), right). + */ + public static class MapClass + extends Mapper { + + private final IntPair key = new IntPair(); + private final IntWritable value = new IntWritable(); + + @Override + public void map(LongWritable inKey, Text inValue, + Context context) throws IOException, InterruptedException { + StringTokenizer itr = new StringTokenizer(inValue.toString()); + int left = 0; + int right = 0; + if (itr.hasMoreTokens()) { + left = Integer.parseInt(itr.nextToken()); + if (itr.hasMoreTokens()) { + right = Integer.parseInt(itr.nextToken()); + } + key.set(left, right); + value.set(right); + context.write(key, value); + } + } + } + + /** + * A reducer class that just emits the sum of the input values. + */ + public static class Reduce + extends Reducer { + private static final Text SEPARATOR = + new Text("------------------------------------------------"); + private final Text first = new Text(); + + @Override + public void reduce(IntPair key, Iterable values, + Context context + ) throws IOException, InterruptedException { + context.write(SEPARATOR, null); + first.set(Integer.toString(key.getFirst())); + for(IntWritable value: values) { + context.write(first, value); + } + } + } + + public static void main(String[] args) throws Exception { + Configuration conf = new Configuration(); + String[] otherArgs = new GenericOptionsParser(conf, args).getRemainingArgs(); + if (otherArgs.length != 2) { + System.err.println("Usage: secondarysrot "); + System.exit(2); + } + Job job = new Job(conf, "secondary sort"); + job.setJarByClass(SecondarySort.class); + job.setMapperClass(MapClass.class); + job.setReducerClass(Reduce.class); + + // group and partition by the first int in the pair + job.setPartitionerClass(FirstPartitioner.class); + job.setGroupingComparatorClass(FirstGroupingComparator.class); + + // the map output is IntPair, IntWritable + job.setMapOutputKeyClass(IntPair.class); + job.setMapOutputValueClass(IntWritable.class); + + // the reduce output is Text, IntWritable + job.setOutputKeyClass(Text.class); + job.setOutputValueClass(IntWritable.class); + + FileInputFormat.addInputPath(job, new Path(otherArgs[0])); + FileOutputFormat.setOutputPath(job, new Path(otherArgs[1])); + System.exit(job.waitForCompletion(true) ? 0 : 1); + } + +} diff --git a/src/examples/org/apache/hadoop/examples/SleepJob.java b/src/examples/org/apache/hadoop/examples/SleepJob.java new file mode 100644 index 0000000..a335eb6 --- /dev/null +++ b/src/examples/org/apache/hadoop/examples/SleepJob.java @@ -0,0 +1,241 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.examples; + +import java.io.IOException; +import java.io.DataInput; +import java.io.DataOutput; +import java.util.Iterator; +import java.util.Random; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.conf.Configured; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.io.IntWritable; +import org.apache.hadoop.io.NullWritable; +import org.apache.hadoop.io.SequenceFile; +import org.apache.hadoop.mapred.*; +import org.apache.hadoop.mapred.lib.NullOutputFormat; +import org.apache.hadoop.util.Tool; +import org.apache.hadoop.util.ToolRunner; + +/** + * Dummy class for testing MR framefork. Sleeps for a defined period + * of time in mapper and reducer. Generates fake input for map / reduce + * jobs. Note that generated number of input pairs is in the order + * of numMappers * mapSleepTime / 100, so the job uses + * some disk space. + */ +public class SleepJob extends Configured implements Tool, + Mapper, + Reducer, + Partitioner { + + private long mapSleepDuration = 100; + private long reduceSleepDuration = 100; + private int mapSleepCount = 1; + private int reduceSleepCount = 1; + private int count = 0; + + public int getPartition(IntWritable k, NullWritable v, int numPartitions) { + return k.get() % numPartitions; + } + + public static class EmptySplit implements InputSplit { + public void write(DataOutput out) throws IOException { } + public void readFields(DataInput in) throws IOException { } + public long getLength() { return 0L; } + public String[] getLocations() { return new String[0]; } + } + + public static class SleepInputFormat extends Configured + implements InputFormat { + public InputSplit[] getSplits(JobConf conf, int numSplits) { + InputSplit[] ret = new InputSplit[numSplits]; + for (int i = 0; i < numSplits; ++i) { + ret[i] = new EmptySplit(); + } + return ret; + } + public RecordReader getRecordReader( + InputSplit ignored, JobConf conf, Reporter reporter) + throws IOException { + final int count = conf.getInt("sleep.job.map.sleep.count", 1); + if (count < 0) throw new IOException("Invalid map count: " + count); + final int redcount = conf.getInt("sleep.job.reduce.sleep.count", 1); + if (redcount < 0) + throw new IOException("Invalid reduce count: " + redcount); + final int emitPerMapTask = (redcount * conf.getNumReduceTasks()); + return new RecordReader() { + private int records = 0; + private int emitCount = 0; + + public boolean next(IntWritable key, IntWritable value) + throws IOException { + key.set(emitCount); + int emit = emitPerMapTask / count; + if ((emitPerMapTask) % count > records) { + ++emit; + } + emitCount += emit; + value.set(emit); + return records++ < count; + } + public IntWritable createKey() { return new IntWritable(); } + public IntWritable createValue() { return new IntWritable(); } + public long getPos() throws IOException { return records; } + public void close() throws IOException { } + public float getProgress() throws IOException { + return records / ((float)count); + } + }; + } + } + + public void map(IntWritable key, IntWritable value, + OutputCollector output, Reporter reporter) + throws IOException { + + //it is expected that every map processes mapSleepCount number of records. + try { + reporter.setStatus("Sleeping... (" + + (mapSleepDuration * (mapSleepCount - count)) + ") ms left"); + Thread.sleep(mapSleepDuration); + } + catch (InterruptedException ex) { + throw (IOException)new IOException( + "Interrupted while sleeping").initCause(ex); + } + ++count; + // output reduceSleepCount * numReduce number of random values, so that + // each reducer will get reduceSleepCount number of keys. + int k = key.get(); + for (int i = 0; i < value.get(); ++i) { + output.collect(new IntWritable(k + i), NullWritable.get()); + } + } + + public void reduce(IntWritable key, Iterator values, + OutputCollector output, Reporter reporter) + throws IOException { + try { + reporter.setStatus("Sleeping... (" + + (reduceSleepDuration * (reduceSleepCount - count)) + ") ms left"); + Thread.sleep(reduceSleepDuration); + + } + catch (InterruptedException ex) { + throw (IOException)new IOException( + "Interrupted while sleeping").initCause(ex); + } + count++; + } + + public void configure(JobConf job) { + this.mapSleepCount = + job.getInt("sleep.job.map.sleep.count", mapSleepCount); + this.reduceSleepCount = + job.getInt("sleep.job.reduce.sleep.count", reduceSleepCount); + this.mapSleepDuration = + job.getLong("sleep.job.map.sleep.time" , 100) / mapSleepCount; + this.reduceSleepDuration = + job.getLong("sleep.job.reduce.sleep.time" , 100) / reduceSleepCount; + } + + public void close() throws IOException { + } + + public static void main(String[] args) throws Exception{ + int res = ToolRunner.run(new Configuration(), new SleepJob(), args); + System.exit(res); + } + + public int run(int numMapper, int numReducer, long mapSleepTime, + int mapSleepCount, long reduceSleepTime, + int reduceSleepCount) throws IOException { + JobConf job = setupJobConf(numMapper, numReducer, mapSleepTime, + mapSleepCount, reduceSleepTime, reduceSleepCount); + JobClient.runJob(job); + return 0; + } + + public JobConf setupJobConf(int numMapper, int numReducer, + long mapSleepTime, int mapSleepCount, + long reduceSleepTime, int reduceSleepCount) { + JobConf job = new JobConf(getConf(), SleepJob.class); + job.setNumMapTasks(numMapper); + job.setNumReduceTasks(numReducer); + job.setMapperClass(SleepJob.class); + job.setMapOutputKeyClass(IntWritable.class); + job.setMapOutputValueClass(NullWritable.class); + job.setReducerClass(SleepJob.class); + job.setOutputFormat(NullOutputFormat.class); + job.setInputFormat(SleepInputFormat.class); + job.setPartitionerClass(SleepJob.class); + job.setSpeculativeExecution(false); + job.setJobName("Sleep job"); + FileInputFormat.addInputPath(job, new Path("ignored")); + job.setLong("sleep.job.map.sleep.time", mapSleepTime); + job.setLong("sleep.job.reduce.sleep.time", reduceSleepTime); + job.setInt("sleep.job.map.sleep.count", mapSleepCount); + job.setInt("sleep.job.reduce.sleep.count", reduceSleepCount); + return job; + } + + public int run(String[] args) throws Exception { + + if(args.length < 1) { + System.err.println("SleepJob [-m numMapper] [-r numReducer]" + + " [-mt mapSleepTime (msec)] [-rt reduceSleepTime (msec)]" + + " [-recordt recordSleepTime (msec)]"); + ToolRunner.printGenericCommandUsage(System.err); + return -1; + } + + int numMapper = 1, numReducer = 1; + long mapSleepTime = 100, reduceSleepTime = 100, recSleepTime = 100; + int mapSleepCount = 1, reduceSleepCount = 1; + + for(int i=0; i < args.length; i++ ) { + if(args[i].equals("-m")) { + numMapper = Integer.parseInt(args[++i]); + } + else if(args[i].equals("-r")) { + numReducer = Integer.parseInt(args[++i]); + } + else if(args[i].equals("-mt")) { + mapSleepTime = Long.parseLong(args[++i]); + } + else if(args[i].equals("-rt")) { + reduceSleepTime = Long.parseLong(args[++i]); + } + else if (args[i].equals("-recordt")) { + recSleepTime = Long.parseLong(args[++i]); + } + } + + // sleep for *SleepTime duration in Task by recSleepTime per record + mapSleepCount = (int)Math.ceil(mapSleepTime / ((double)recSleepTime)); + reduceSleepCount = (int)Math.ceil(reduceSleepTime / ((double)recSleepTime)); + + return run(numMapper, numReducer, mapSleepTime, mapSleepCount, + reduceSleepTime, reduceSleepCount); + } + +} diff --git a/src/examples/org/apache/hadoop/examples/Sort.java b/src/examples/org/apache/hadoop/examples/Sort.java new file mode 100644 index 0000000..a028009 --- /dev/null +++ b/src/examples/org/apache/hadoop/examples/Sort.java @@ -0,0 +1,198 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.examples; + +import java.io.IOException; +import java.net.URI; +import java.util.*; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.conf.Configured; +import org.apache.hadoop.filecache.DistributedCache; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.io.BytesWritable; +import org.apache.hadoop.io.Writable; +import org.apache.hadoop.io.WritableComparable; +import org.apache.hadoop.mapred.*; +import org.apache.hadoop.mapred.lib.IdentityMapper; +import org.apache.hadoop.mapred.lib.IdentityReducer; +import org.apache.hadoop.mapred.lib.InputSampler; +import org.apache.hadoop.mapred.lib.TotalOrderPartitioner; +import org.apache.hadoop.util.Tool; +import org.apache.hadoop.util.ToolRunner; + +/** + * This is the trivial map/reduce program that does absolutely nothing + * other than use the framework to fragment and sort the input values. + * + * To run: bin/hadoop jar build/hadoop-examples.jar sort + * [-m maps] [-r reduces] + * [-inFormat input format class] + * [-outFormat output format class] + * [-outKey output key class] + * [-outValue output value class] + * [-totalOrder pcnt num samples max splits] + * in-dir out-dir + */ +public class Sort extends Configured implements Tool { + private RunningJob jobResult = null; + + static int printUsage() { + System.out.println("sort [-m ] [-r ] " + + "[-inFormat ] " + + "[-outFormat ] " + + "[-outKey ] " + + "[-outValue ] " + + "[-totalOrder ] " + + " "); + ToolRunner.printGenericCommandUsage(System.out); + return -1; + } + + /** + * The main driver for sort program. + * Invoke this method to submit the map/reduce job. + * @throws IOException When there is communication problems with the + * job tracker. + */ + public int run(String[] args) throws Exception { + + JobConf jobConf = new JobConf(getConf(), Sort.class); + jobConf.setJobName("sorter"); + + jobConf.setMapperClass(IdentityMapper.class); + jobConf.setReducerClass(IdentityReducer.class); + + JobClient client = new JobClient(jobConf); + ClusterStatus cluster = client.getClusterStatus(); + int num_reduces = (int) (cluster.getMaxReduceTasks() * 0.9); + String sort_reduces = jobConf.get("test.sort.reduces_per_host"); + if (sort_reduces != null) { + num_reduces = cluster.getTaskTrackers() * + Integer.parseInt(sort_reduces); + } + Class inputFormatClass = + SequenceFileInputFormat.class; + Class outputFormatClass = + SequenceFileOutputFormat.class; + Class outputKeyClass = BytesWritable.class; + Class outputValueClass = BytesWritable.class; + List otherArgs = new ArrayList(); + InputSampler.Sampler sampler = null; + for(int i=0; i < args.length; ++i) { + try { + if ("-m".equals(args[i])) { + jobConf.setNumMapTasks(Integer.parseInt(args[++i])); + } else if ("-r".equals(args[i])) { + num_reduces = Integer.parseInt(args[++i]); + } else if ("-inFormat".equals(args[i])) { + inputFormatClass = + Class.forName(args[++i]).asSubclass(InputFormat.class); + } else if ("-outFormat".equals(args[i])) { + outputFormatClass = + Class.forName(args[++i]).asSubclass(OutputFormat.class); + } else if ("-outKey".equals(args[i])) { + outputKeyClass = + Class.forName(args[++i]).asSubclass(WritableComparable.class); + } else if ("-outValue".equals(args[i])) { + outputValueClass = + Class.forName(args[++i]).asSubclass(Writable.class); + } else if ("-totalOrder".equals(args[i])) { + double pcnt = Double.parseDouble(args[++i]); + int numSamples = Integer.parseInt(args[++i]); + int maxSplits = Integer.parseInt(args[++i]); + if (0 >= maxSplits) maxSplits = Integer.MAX_VALUE; + sampler = + new InputSampler.RandomSampler(pcnt, numSamples, maxSplits); + } else { + otherArgs.add(args[i]); + } + } catch (NumberFormatException except) { + System.out.println("ERROR: Integer expected instead of " + args[i]); + return printUsage(); + } catch (ArrayIndexOutOfBoundsException except) { + System.out.println("ERROR: Required parameter missing from " + + args[i-1]); + return printUsage(); // exits + } + } + + // Set user-supplied (possibly default) job configs + jobConf.setNumReduceTasks(num_reduces); + + jobConf.setInputFormat(inputFormatClass); + jobConf.setOutputFormat(outputFormatClass); + + jobConf.setOutputKeyClass(outputKeyClass); + jobConf.setOutputValueClass(outputValueClass); + + // Make sure there are exactly 2 parameters left. + if (otherArgs.size() != 2) { + System.out.println("ERROR: Wrong number of parameters: " + + otherArgs.size() + " instead of 2."); + return printUsage(); + } + FileInputFormat.setInputPaths(jobConf, otherArgs.get(0)); + FileOutputFormat.setOutputPath(jobConf, new Path(otherArgs.get(1))); + + if (sampler != null) { + System.out.println("Sampling input to effect total-order sort..."); + jobConf.setPartitionerClass(TotalOrderPartitioner.class); + Path inputDir = FileInputFormat.getInputPaths(jobConf)[0]; + inputDir = inputDir.makeQualified(inputDir.getFileSystem(jobConf)); + Path partitionFile = new Path(inputDir, "_sortPartitioning"); + TotalOrderPartitioner.setPartitionFile(jobConf, partitionFile); + InputSampler.writePartitionFile(jobConf, sampler); + URI partitionUri = new URI(partitionFile.toString() + + "#" + "_sortPartitioning"); + DistributedCache.addCacheFile(partitionUri, jobConf); + DistributedCache.createSymlink(jobConf); + } + + System.out.println("Running on " + + cluster.getTaskTrackers() + + " nodes to sort from " + + FileInputFormat.getInputPaths(jobConf)[0] + " into " + + FileOutputFormat.getOutputPath(jobConf) + + " with " + num_reduces + " reduces."); + Date startTime = new Date(); + System.out.println("Job started: " + startTime); + jobResult = JobClient.runJob(jobConf); + Date end_time = new Date(); + System.out.println("Job ended: " + end_time); + System.out.println("The job took " + + (end_time.getTime() - startTime.getTime()) /1000 + " seconds."); + return 0; + } + + + + public static void main(String[] args) throws Exception { + int res = ToolRunner.run(new Configuration(), new Sort(), args); + System.exit(res); + } + + /** + * Get the last job that was run using this instance. + * @return the results of the last job that was run + */ + public RunningJob getResult() { + return jobResult; + } +} diff --git a/src/examples/org/apache/hadoop/examples/WordCount.java b/src/examples/org/apache/hadoop/examples/WordCount.java new file mode 100644 index 0000000..81a6c21 --- /dev/null +++ b/src/examples/org/apache/hadoop/examples/WordCount.java @@ -0,0 +1,69 @@ +package org.apache.hadoop.examples; + +import java.io.IOException; +import java.util.StringTokenizer; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.io.IntWritable; +import org.apache.hadoop.io.Text; +import org.apache.hadoop.mapreduce.Job; +import org.apache.hadoop.mapreduce.Mapper; +import org.apache.hadoop.mapreduce.Reducer; +import org.apache.hadoop.mapreduce.lib.input.FileInputFormat; +import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat; +import org.apache.hadoop.util.GenericOptionsParser; + +public class WordCount { + + public static class TokenizerMapper + extends Mapper{ + + private final static IntWritable one = new IntWritable(1); + private Text word = new Text(); + + public void map(Object key, Text value, Context context + ) throws IOException, InterruptedException { + StringTokenizer itr = new StringTokenizer(value.toString()); + while (itr.hasMoreTokens()) { + word.set(itr.nextToken()); + context.write(word, one); + } + } + } + + public static class IntSumReducer + extends Reducer { + private IntWritable result = new IntWritable(); + + public void reduce(Text key, Iterable values, + Context context + ) throws IOException, InterruptedException { + int sum = 0; + for (IntWritable val : values) { + sum += val.get(); + } + result.set(sum); + context.write(key, result); + } + } + + public static void main(String[] args) throws Exception { + Configuration conf = new Configuration(); + String[] otherArgs = new GenericOptionsParser(conf, args).getRemainingArgs(); + if (otherArgs.length != 2) { + System.err.println("Usage: wordcount "); + System.exit(2); + } + Job job = new Job(conf, "word count"); + job.setJarByClass(WordCount.class); + job.setMapperClass(TokenizerMapper.class); + job.setCombinerClass(IntSumReducer.class); + job.setReducerClass(IntSumReducer.class); + job.setOutputKeyClass(Text.class); + job.setOutputValueClass(IntWritable.class); + FileInputFormat.addInputPath(job, new Path(otherArgs[0])); + FileOutputFormat.setOutputPath(job, new Path(otherArgs[1])); + System.exit(job.waitForCompletion(true) ? 0 : 1); + } +} diff --git a/src/examples/org/apache/hadoop/examples/dancing/DancingLinks.java b/src/examples/org/apache/hadoop/examples/dancing/DancingLinks.java new file mode 100644 index 0000000..94d2c83 --- /dev/null +++ b/src/examples/org/apache/hadoop/examples/dancing/DancingLinks.java @@ -0,0 +1,438 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.examples.dancing; + +import java.util.*; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; + +/** + * A generic solver for tile laying problems using Knuth's dancing link + * algorithm. It provides a very fast backtracking data structure for problems + * that can expressed as a sparse boolean matrix where the goal is to select a + * subset of the rows such that each column has exactly 1 true in it. + * + * The application gives each column a name and each row is named after the + * set of columns that it has as true. Solutions are passed back by giving the + * selected rows' names. + * + * The type parameter ColumnName is the class of application's column names. + */ +public class DancingLinks { + private static final Log LOG = + LogFactory.getLog(DancingLinks.class.getName()); + + /** + * A cell in the table with up/down and left/right links that form doubly + * linked lists in both directions. It also includes a link to the column + * head. + */ + private static class Node { + Node left; + Node right; + Node up; + Node down; + ColumnHeader head; + + Node(Node l, Node r, Node u, + Node d, ColumnHeader h) { + left = l; + right = r; + up = u; + down = d; + head = h; + } + + Node() { + this(null, null, null, null, null); + } + } + + /** + * Column headers record the name of the column and the number of rows that + * satisfy this column. The names are provided by the application and can + * be anything. The size is used for the heuristic for picking the next + * column to explore. + */ + private static class ColumnHeader extends Node { + ColumnName name; + int size; + + ColumnHeader(ColumnName n, int s) { + name = n; + size = s; + head = this; + } + + ColumnHeader() { + this(null, 0); + } + } + + /** + * The head of the table. Left/Right from the head are the unsatisfied + * ColumnHeader objects. + */ + private ColumnHeader head; + + /** + * The complete list of columns. + */ + private List> columns; + + public DancingLinks() { + head = new ColumnHeader(null, 0); + head.left = head; + head.right = head; + head.up = head; + head.down = head; + columns = new ArrayList>(200); + } + + /** + * Add a column to the table + * @param name The name of the column, which will be returned as part of + * solutions + * @param primary Is the column required for a solution? + */ + public void addColumn(ColumnName name, boolean primary) { + ColumnHeader top = new ColumnHeader(name, 0); + top.up = top; + top.down = top; + if (primary) { + Node tail = head.left; + tail.right = top; + top.left = tail; + top.right = head; + head.left = top; + } else { + top.left = top; + top.right = top; + } + columns.add(top); + } + + /** + * Add a column to the table + * @param name The name of the column, which will be included in the solution + */ + public void addColumn(ColumnName name) { + addColumn(name, true); + } + + /** + * Get the number of columns. + * @return the number of columns + */ + public int getNumberColumns() { + return columns.size(); + } + + /** + * Get the name of a given column as a string + * @param index the index of the column + * @return a string representation of the name + */ + public String getColumnName(int index) { + return columns.get(index).name.toString(); + } + + /** + * Add a row to the table. + * @param values the columns that are satisfied by this row + */ + public void addRow(boolean[] values) { + Node prev = null; + for(int i=0; i < values.length; ++i) { + if (values[i]) { + ColumnHeader top = columns.get(i); + top.size += 1; + Node bottom = top.up; + Node node = new Node(null, null, bottom, + top, top); + bottom.down = node; + top.up = node; + if (prev != null) { + Node front = prev.right; + node.left = prev; + node.right = front; + prev.right = node; + front.left = node; + } else { + node.left = node; + node.right = node; + } + prev = node; + } + } + } + + /** + * Applications should implement this to receive the solutions to their + * problems. + */ + public interface SolutionAcceptor { + /** + * A callback to return a solution to the application. + * @param value a List of List of ColumnNames that were satisfied by each + * selected row + */ + void solution(List> value); + } + + /** + * Find the column with the fewest choices. + * @return The column header + */ + private ColumnHeader findBestColumn() { + int lowSize = Integer.MAX_VALUE; + ColumnHeader result = null; + ColumnHeader current = (ColumnHeader) head.right; + while (current != head) { + if (current.size < lowSize) { + lowSize = current.size; + result = current; + } + current = (ColumnHeader) current.right; + } + return result; + } + + /** + * Hide a column in the table + * @param col the column to hide + */ + private void coverColumn(ColumnHeader col) { + LOG.debug("cover " + col.head.name); + // remove the column + col.right.left = col.left; + col.left.right = col.right; + Node row = col.down; + while (row != col) { + Node node = row.right; + while (node != row) { + node.down.up = node.up; + node.up.down = node.down; + node.head.size -= 1; + node = node.right; + } + row = row.down; + } + } + + /** + * Uncover a column that was hidden. + * @param col the column to unhide + */ + private void uncoverColumn(ColumnHeader col) { + LOG.debug("uncover " + col.head.name); + Node row = col.up; + while (row != col) { + Node node = row.left; + while (node != row) { + node.head.size += 1; + node.down.up = node; + node.up.down = node; + node = node.left; + } + row = row.up; + } + col.right.left = col; + col.left.right = col; + } + + /** + * Get the name of a row by getting the list of column names that it + * satisfies. + * @param row the row to make a name for + * @return the list of column names + */ + private List getRowName(Node row) { + List result = new ArrayList(); + result.add(row.head.name); + Node node = row.right; + while (node != row) { + result.add(node.head.name); + node = node.right; + } + return result; + } + + /** + * Find a solution to the problem. + * @param partial a temporary datastructure to keep the current partial + * answer in + * @param output the acceptor for the results that are found + * @return the number of solutions found + */ + private int search(List> partial, SolutionAcceptor output) { + int results = 0; + if (head.right == head) { + List> result = new ArrayList>(partial.size()); + for(Node row: partial) { + result.add(getRowName(row)); + } + output.solution(result); + results += 1; + } else { + ColumnHeader col = findBestColumn(); + if (col.size > 0) { + coverColumn(col); + Node row = col.down; + while (row != col) { + partial.add(row); + Node node = row.right; + while (node != row) { + coverColumn(node.head); + node = node.right; + } + results += search(partial, output); + partial.remove(partial.size() - 1); + node = row.left; + while (node != row) { + uncoverColumn(node.head); + node = node.left; + } + row = row.down; + } + uncoverColumn(col); + } + } + return results; + } + + /** + * Generate a list of prefixes down to a given depth. Assumes that the + * problem is always deeper than depth. + * @param depth the depth to explore down + * @param choices an array of length depth to describe a prefix + * @param prefixes a working datastructure + */ + private void searchPrefixes(int depth, int[] choices, + List prefixes) { + if (depth == 0) { + prefixes.add(choices.clone()); + } else { + ColumnHeader col = findBestColumn(); + if (col.size > 0) { + coverColumn(col); + Node row = col.down; + int rowId = 0; + while (row != col) { + Node node = row.right; + while (node != row) { + coverColumn(node.head); + node = node.right; + } + choices[choices.length - depth] = rowId; + searchPrefixes(depth - 1, choices, prefixes); + node = row.left; + while (node != row) { + uncoverColumn(node.head); + node = node.left; + } + row = row.down; + rowId += 1; + } + uncoverColumn(col); + } + } + } + + /** + * Generate a list of row choices to cover the first moves. + * @param depth the length of the prefixes to generate + * @return a list of integer arrays that list the rows to pick in order + */ + public List split(int depth) { + int[] choices = new int[depth]; + List result = new ArrayList(100000); + searchPrefixes(depth, choices, result); + return result; + } + + /** + * Make one move from a prefix + * @param goalRow the row that should be choosen + * @return the row that was found + */ + private Node advance(int goalRow) { + ColumnHeader col = findBestColumn(); + if (col.size > 0) { + coverColumn(col); + Node row = col.down; + int id = 0; + while (row != col) { + if (id == goalRow) { + Node node = row.right; + while (node != row) { + coverColumn(node.head); + node = node.right; + } + return row; + } + id += 1; + row = row.down; + } + } + return null; + } + + /** + * Undo a prefix exploration + * @param row + */ + private void rollback(Node row) { + Node node = row.left; + while (node != row) { + uncoverColumn(node.head); + node = node.left; + } + uncoverColumn(row.head); + } + + /** + * Given a prefix, find solutions under it. + * @param prefix a list of row choices that control which part of the search + * tree to explore + * @param output the output for each solution + * @return the number of solutions + */ + public int solve(int[] prefix, SolutionAcceptor output) { + List> choices = new ArrayList>(); + for(int i=0; i < prefix.length; ++i) { + choices.add(advance(prefix[i])); + } + int result = search(choices, output); + for(int i=prefix.length-1; i >=0; --i) { + rollback(choices.get(i)); + } + return result; + } + + /** + * Solve a complete problem + * @param output the acceptor to receive answers + * @return the number of solutions + */ + public int solve(SolutionAcceptor output) { + return search(new ArrayList>(), output); + } + +} diff --git a/src/examples/org/apache/hadoop/examples/dancing/DistributedPentomino.java b/src/examples/org/apache/hadoop/examples/dancing/DistributedPentomino.java new file mode 100644 index 0000000..cef7063 --- /dev/null +++ b/src/examples/org/apache/hadoop/examples/dancing/DistributedPentomino.java @@ -0,0 +1,207 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.examples.dancing; + +import java.io.*; +import java.util.List; +import java.util.StringTokenizer; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.conf.Configured; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.io.Text; +import org.apache.hadoop.io.WritableComparable; +import org.apache.hadoop.mapred.*; +import org.apache.hadoop.mapred.lib.IdentityReducer; +import org.apache.hadoop.util.*; + +/** + * Launch a distributed pentomino solver. + * It generates a complete list of prefixes of length N with each unique prefix + * as a separate line. A prefix is a sequence of N integers that denote the + * index of the row that is choosen for each column in order. Note that the + * next column is heuristically choosen by the solver, so it is dependant on + * the previous choice. That file is given as the input to + * map/reduce. The output key/value are the move prefix/solution as Text/Text. + */ +public class DistributedPentomino extends Configured implements Tool { + + /** + * Each map takes a line, which represents a prefix move and finds all of + * the solutions that start with that prefix. The output is the prefix as + * the key and the solution as the value. + */ + public static class PentMap extends MapReduceBase + implements Mapper { + + private int width; + private int height; + private int depth; + private Pentomino pent; + private Text prefixString; + private OutputCollector output; + private Reporter reporter; + + /** + * For each solution, generate the prefix and a string representation + * of the solution. The solution starts with a newline, so that the output + * looks like: + * , + * + * + */ + class SolutionCatcher + implements DancingLinks.SolutionAcceptor { + public void solution(List> answer) { + String board = Pentomino.stringifySolution(width, height, answer); + try { + output.collect(prefixString, new Text("\n" + board)); + reporter.incrCounter(pent.getCategory(answer), 1); + } catch (IOException e) { + System.err.println(StringUtils.stringifyException(e)); + } + } + } + + /** + * Break the prefix string into moves (a sequence of integer row ids that + * will be selected for each column in order). Find all solutions with + * that prefix. + */ + public void map(WritableComparable key, Text value, + OutputCollector output, Reporter reporter + ) throws IOException { + this.output = output; + this.reporter = reporter; + prefixString = value; + StringTokenizer itr = new StringTokenizer(prefixString.toString(), ","); + int[] prefix = new int[depth]; + int idx = 0; + while (itr.hasMoreTokens()) { + String num = itr.nextToken(); + prefix[idx++] = Integer.parseInt(num); + } + pent.solve(prefix); + } + + @Override + public void configure(JobConf conf) { + depth = conf.getInt("pent.depth", -1); + width = conf.getInt("pent.width", -1); + height = conf.getInt("pent.height", -1); + pent = (Pentomino) + ReflectionUtils.newInstance(conf.getClass("pent.class", + OneSidedPentomino.class), + conf); + pent.initialize(width, height); + pent.setPrinter(new SolutionCatcher()); + } + } + + /** + * Create the input file with all of the possible combinations of the + * given depth. + * @param fs the filesystem to write into + * @param dir the directory to write the input file into + * @param pent the puzzle + * @param depth the depth to explore when generating prefixes + */ + private static void createInputDirectory(FileSystem fs, + Path dir, + Pentomino pent, + int depth + ) throws IOException { + fs.mkdirs(dir); + List splits = pent.getSplits(depth); + PrintStream file = + new PrintStream(new BufferedOutputStream + (fs.create(new Path(dir, "part1")), 64*1024)); + for(int[] prefix: splits) { + for(int i=0; i < prefix.length; ++i) { + if (i != 0) { + file.print(','); + } + file.print(prefix[i]); + } + file.print('\n'); + } + file.close(); + } + + /** + * Launch the solver on 9x10 board and the one sided pentominos. + * This takes about 2.5 hours on 20 nodes with 2 cpus/node. + * Splits the job into 2000 maps and 1 reduce. + */ + public static void main(String[] args) throws Exception { + int res = ToolRunner.run(new Configuration(), new DistributedPentomino(), args); + System.exit(res); + } + + public int run(String[] args) throws Exception { + JobConf conf; + int depth = 5; + int width = 9; + int height = 10; + Class pentClass; + if (args.length == 0) { + System.out.println("pentomino "); + ToolRunner.printGenericCommandUsage(System.out); + return -1; + } + + conf = new JobConf(getConf()); + width = conf.getInt("pent.width", width); + height = conf.getInt("pent.height", height); + depth = conf.getInt("pent.depth", depth); + pentClass = conf.getClass("pent.class", OneSidedPentomino.class, Pentomino.class); + + Path output = new Path(args[0]); + Path input = new Path(output + "_input"); + FileSystem fileSys = FileSystem.get(conf); + try { + FileInputFormat.setInputPaths(conf, input); + FileOutputFormat.setOutputPath(conf, output); + conf.setJarByClass(PentMap.class); + + conf.setJobName("dancingElephant"); + Pentomino pent = ReflectionUtils.newInstance(pentClass, conf); + pent.initialize(width, height); + createInputDirectory(fileSys, input, pent, depth); + + // the keys are the prefix strings + conf.setOutputKeyClass(Text.class); + // the values are puzzle solutions + conf.setOutputValueClass(Text.class); + + conf.setMapperClass(PentMap.class); + conf.setReducerClass(IdentityReducer.class); + + conf.setNumMapTasks(2000); + conf.setNumReduceTasks(1); + + JobClient.runJob(conf); + } finally { + fileSys.delete(input, true); + } + return 0; + } + +} diff --git a/src/examples/org/apache/hadoop/examples/dancing/OneSidedPentomino.java b/src/examples/org/apache/hadoop/examples/dancing/OneSidedPentomino.java new file mode 100644 index 0000000..e555458 --- /dev/null +++ b/src/examples/org/apache/hadoop/examples/dancing/OneSidedPentomino.java @@ -0,0 +1,70 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.examples.dancing; + +/** + * Of the "normal" 12 pentominos, 6 of them have distinct shapes when flipped. + * This class includes both variants of the "flippable" shapes and the + * unflippable shapes for a total of 18 pieces. Clearly, the boards must have + * 18*5=90 boxes to hold all of the solutions. + */ +public class OneSidedPentomino extends Pentomino { + + public OneSidedPentomino() {} + + public OneSidedPentomino(int width, int height) { + super(width, height); + } + + /** + * Define the one sided pieces. The flipped pieces have the same name with + * a capital letter. + */ + protected void initializePieces() { + pieces.add(new Piece("x", " x /xxx/ x ", false, oneRotation)); + pieces.add(new Piece("v", "x /x /xxx", false, fourRotations)); + pieces.add(new Piece("t", "xxx/ x / x ", false, fourRotations)); + pieces.add(new Piece("w", " x/ xx/xx ", false, fourRotations)); + pieces.add(new Piece("u", "x x/xxx", false, fourRotations)); + pieces.add(new Piece("i", "xxxxx", false, twoRotations)); + pieces.add(new Piece("f", " xx/xx / x ", false, fourRotations)); + pieces.add(new Piece("p", "xx/xx/x ", false, fourRotations)); + pieces.add(new Piece("z", "xx / x / xx", false, twoRotations)); + pieces.add(new Piece("n", "xx / xxx", false, fourRotations)); + pieces.add(new Piece("y", " x /xxxx", false, fourRotations)); + pieces.add(new Piece("l", " x/xxxx", false, fourRotations)); + pieces.add(new Piece("F", "xx / xx/ x ", false, fourRotations)); + pieces.add(new Piece("P", "xx/xx/ x", false, fourRotations)); + pieces.add(new Piece("Z", " xx/ x /xx ", false, twoRotations)); + pieces.add(new Piece("N", " xx/xxx ", false, fourRotations)); + pieces.add(new Piece("Y", " x /xxxx", false, fourRotations)); + pieces.add(new Piece("L", "x /xxxx", false, fourRotations)); + } + + /** + * Solve the 3x30 puzzle. + * @param args + */ + public static void main(String[] args) { + Pentomino model = new OneSidedPentomino(3, 30); + int solutions = model.solve(); + System.out.println(solutions + " solutions found."); + } + +} diff --git a/src/examples/org/apache/hadoop/examples/dancing/Pentomino.java b/src/examples/org/apache/hadoop/examples/dancing/Pentomino.java new file mode 100644 index 0000000..03aab68 --- /dev/null +++ b/src/examples/org/apache/hadoop/examples/dancing/Pentomino.java @@ -0,0 +1,450 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.examples.dancing; + +import java.util.*; + +public class Pentomino { + + /** + * This interface just is a marker for what types I expect to get back + * as column names. + */ + protected static interface ColumnName { + // NOTHING + } + + /** + * Maintain information about a puzzle piece. + */ + protected static class Piece implements ColumnName { + private String name; + private boolean [][] shape; + private int[] rotations; + private boolean flippable; + + public Piece(String name, String shape, + boolean flippable, int[] rotations) { + this.name = name; + this.rotations = rotations; + this.flippable = flippable; + StringTokenizer parser = new StringTokenizer(shape, "/"); + List lines = new ArrayList(); + while (parser.hasMoreTokens()) { + String token = parser.nextToken(); + boolean[] line = new boolean[token.length()]; + for(int i=0; i < line.length; ++i) { + line[i] = token.charAt(i) == 'x'; + } + lines.add(line); + } + this.shape = new boolean[lines.size()][]; + for(int i=0 ; i < lines.size(); i++) { + this.shape[i] = (boolean[]) lines.get(i); + } + } + + public String getName() { + return name; + } + + public int[] getRotations() { + return rotations; + } + + public boolean getFlippable() { + return flippable; + } + + private int doFlip(boolean flip, int x, int max) { + if (flip) { + return max - x - 1; + } else { + return x; + } + } + + public boolean[][] getShape(boolean flip, int rotate) { + boolean [][] result; + if (rotate % 2 == 0) { + int height = shape.length; + int width = shape[0].length; + result = new boolean[height][]; + boolean flipX = rotate == 2; + boolean flipY = flip ^ (rotate == 2); + for (int y = 0; y < height; ++y) { + result[y] = new boolean[width]; + for (int x=0; x < width; ++x) { + result[y][x] = shape[doFlip(flipY, y, height)] + [doFlip(flipX, x, width)]; + } + } + } else { + int height = shape[0].length; + int width = shape.length; + result = new boolean[height][]; + boolean flipX = rotate == 3; + boolean flipY = flip ^ (rotate == 1); + for (int y = 0; y < height; ++y) { + result[y] = new boolean[width]; + for (int x=0; x < width; ++x) { + result[y][x] = shape[doFlip(flipX, x, width)] + [doFlip(flipY, y, height)]; + } + } + } + return result; + } + } + + /** + * A point in the puzzle board. This represents a placement of a piece into + * a given point on the board. + */ + static class Point implements ColumnName { + int x; + int y; + Point(int x, int y) { + this.x = x; + this.y = y; + } + } + + + /** + * Convert a solution to the puzzle returned by the model into a string + * that represents the placement of the pieces onto the board. + * @param width the width of the puzzle board + * @param height the height of the puzzle board + * @param solution the list of column names that were selected in the model + * @return a string representation of completed puzzle board + */ + public static String stringifySolution(int width, int height, + List> solution) { + String[][] picture = new String[height][width]; + StringBuffer result = new StringBuffer(); + // for each piece placement... + for(List row: solution) { + // go through to find which piece was placed + Piece piece = null; + for(ColumnName item: row) { + if (item instanceof Piece) { + piece = (Piece) item; + break; + } + } + // for each point where the piece was placed, mark it with the piece name + for(ColumnName item: row) { + if (item instanceof Point) { + Point p = (Point) item; + picture[p.y][p.x] = piece.getName(); + } + } + } + // put the string together + for(int y=0; y < picture.length; ++y) { + for (int x=0; x < picture[y].length; ++x) { + result.append(picture[y][x]); + } + result.append("\n"); + } + return result.toString(); + } + + public enum SolutionCategory {UPPER_LEFT, MID_X, MID_Y, CENTER} + + /** + * Find whether the solution has the x in the upper left quadrant, the + * x-midline, the y-midline or in the center. + * @param names the solution to check + * @return the catagory of the solution + */ + public SolutionCategory getCategory(List> names) { + Piece xPiece = null; + // find the "x" piece + for(Piece p: pieces) { + if ("x".equals(p.name)) { + xPiece = p; + break; + } + } + // find the row containing the "x" + for(List row: names) { + if (row.contains(xPiece)) { + // figure out where the "x" is located + int low_x = width; + int high_x = 0; + int low_y = height; + int high_y = 0; + for(ColumnName col: row) { + if (col instanceof Point) { + int x = ((Point) col).x; + int y = ((Point) col).y; + if (x < low_x) { + low_x = x; + } + if (x > high_x) { + high_x = x; + } + if (y < low_y) { + low_y = y; + } + if (y > high_y) { + high_y = y; + } + } + } + boolean mid_x = (low_x + high_x == width - 1); + boolean mid_y = (low_y + high_y == height - 1); + if (mid_x && mid_y) { + return SolutionCategory.CENTER; + } else if (mid_x) { + return SolutionCategory.MID_X; + } else if (mid_y) { + return SolutionCategory.MID_Y; + } + break; + } + } + return SolutionCategory.UPPER_LEFT; + } + + /** + * A solution printer that just writes the solution to stdout. + */ + private static class SolutionPrinter + implements DancingLinks.SolutionAcceptor { + int width; + int height; + + public SolutionPrinter(int width, int height) { + this.width = width; + this.height = height; + } + + public void solution(List> names) { + System.out.println(stringifySolution(width, height, names)); + } + } + + protected int width; + protected int height; + + protected List pieces = new ArrayList(); + + /** + * Is the piece fixed under rotation? + */ + protected static final int [] oneRotation = new int[]{0}; + + /** + * Is the piece identical if rotated 180 degrees? + */ + protected static final int [] twoRotations = new int[]{0,1}; + + /** + * Are all 4 rotations unique? + */ + protected static final int [] fourRotations = new int[]{0,1,2,3}; + + /** + * Fill in the pieces list. + */ + protected void initializePieces() { + pieces.add(new Piece("x", " x /xxx/ x ", false, oneRotation)); + pieces.add(new Piece("v", "x /x /xxx", false, fourRotations)); + pieces.add(new Piece("t", "xxx/ x / x ", false, fourRotations)); + pieces.add(new Piece("w", " x/ xx/xx ", false, fourRotations)); + pieces.add(new Piece("u", "x x/xxx", false, fourRotations)); + pieces.add(new Piece("i", "xxxxx", false, twoRotations)); + pieces.add(new Piece("f", " xx/xx / x ", true, fourRotations)); + pieces.add(new Piece("p", "xx/xx/x ", true, fourRotations)); + pieces.add(new Piece("z", "xx / x / xx", true, twoRotations)); + pieces.add(new Piece("n", "xx / xxx", true, fourRotations)); + pieces.add(new Piece("y", " x /xxxx", true, fourRotations)); + pieces.add(new Piece("l", " x/xxxx", true, fourRotations)); + } + + /** + * Is the middle of piece on the upper/left side of the board with + * a given offset and size of the piece? This only checks in one + * dimension. + * @param offset the offset of the piece + * @param shapeSize the size of the piece + * @param board the size of the board + * @return is it in the upper/left? + */ + private static boolean isSide(int offset, int shapeSize, int board) { + return 2*offset + shapeSize <= board; + } + + /** + * For a given piece, generate all of the potential placements and add them + * as rows to the model. + * @param dancer the problem model + * @param piece the piece we are trying to place + * @param width the width of the board + * @param height the height of the board + * @param flip is the piece flipped over? + * @param row a workspace the length of the each row in the table + * @param upperLeft is the piece constrained to the upper left of the board? + * this is used on a single piece to eliminate most of the trivial + * roations of the solution. + */ + private static void generateRows(DancingLinks dancer, + Piece piece, + int width, + int height, + boolean flip, + boolean[] row, + boolean upperLeft) { + // for each rotation + int[] rotations = piece.getRotations(); + for(int rotIndex = 0; rotIndex < rotations.length; ++rotIndex) { + // get the shape + boolean[][] shape = piece.getShape(flip, rotations[rotIndex]); + // find all of the valid offsets + for(int x=0; x < width; ++x) { + for(int y=0; y < height; ++y) { + if (y + shape.length <= height && x + shape[0].length <= width && + (!upperLeft || + (isSide(x, shape[0].length, width) && + isSide(y, shape.length, height)))) { + // clear the columns related to the points on the board + for(int idx=0; idx < width * height; ++idx) { + row[idx] = false; + } + // mark the shape + for(int subY=0; subY < shape.length; ++subY) { + for(int subX=0; subX < shape[0].length; ++subX) { + row[(y + subY) * width + x + subX] = shape[subY][subX]; + } + } + dancer.addRow(row); + } + } + } + } + } + + private DancingLinks dancer = new DancingLinks(); + private DancingLinks.SolutionAcceptor printer; + + { + initializePieces(); + } + + /** + * Create the model for a given pentomino set of pieces and board size. + * @param width the width of the board in squares + * @param height the height of the board in squares + */ + public Pentomino(int width, int height) { + initialize(width, height); + } + + /** + * Create the object without initialization. + */ + public Pentomino() { + } + + void initialize(int width, int height) { + this.width = width; + this.height = height; + for(int y=0; y < height; ++y) { + for(int x=0; x < width; ++x) { + dancer.addColumn(new Point(x,y)); + } + } + int pieceBase = dancer.getNumberColumns(); + for(Piece p: pieces) { + dancer.addColumn(p); + } + boolean[] row = new boolean[dancer.getNumberColumns()]; + for(int idx = 0; idx < pieces.size(); ++idx) { + Piece piece = (Piece) pieces.get(idx); + row[idx + pieceBase] = true; + generateRows(dancer, piece, width, height, false, row, idx == 0); + if (piece.getFlippable()) { + generateRows(dancer, piece, width, height, true, row, idx == 0); + } + row[idx + pieceBase] = false; + } + printer = new SolutionPrinter(width, height); + } + + /** + * Generate a list of prefixes to a given depth + * @param depth the length of each prefix + * @return a list of arrays of ints, which are potential prefixes + */ + public List getSplits(int depth) { + return dancer.split(depth); + } + + /** + * Find all of the solutions that start with the given prefix. The printer + * is given each solution as it is found. + * @param split a list of row indexes that should be choosen for each row + * in order + * @return the number of solutions found + */ + public int solve(int[] split) { + return dancer.solve(split, printer); + } + + /** + * Find all of the solutions to the puzzle. + * @return the number of solutions found + */ + public int solve() { + return dancer.solve(printer); + } + + /** + * Set the printer for the puzzle. + * @param printer A call-back object that is given each solution as it is + * found. + */ + public void setPrinter(DancingLinks.SolutionAcceptor printer) { + this.printer = printer; + } + + /** + * Solve the 6x10 pentomino puzzle. + */ + public static void main(String[] args) { + int width = 6; + int height = 10; + Pentomino model = new Pentomino(width, height); + List splits = model.getSplits(2); + for(Iterator splitItr=splits.iterator(); splitItr.hasNext(); ) { + int[] choices = (int[]) splitItr.next(); + System.out.print("split:"); + for(int i=0; i < choices.length; ++i) { + System.out.print(" " + choices[i]); + } + System.out.println(); + + System.out.println(model.solve(choices) + " solutions found."); + } + } + +} diff --git a/src/examples/org/apache/hadoop/examples/dancing/Sudoku.java b/src/examples/org/apache/hadoop/examples/dancing/Sudoku.java new file mode 100644 index 0000000..502b9f4 --- /dev/null +++ b/src/examples/org/apache/hadoop/examples/dancing/Sudoku.java @@ -0,0 +1,318 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.examples.dancing; + +import java.io.*; +import java.util.*; + +/** + * This class uses the dancing links algorithm from Knuth to solve sudoku + * puzzles. It has solved 42x42 puzzles in 1.02 seconds. + */ +public class Sudoku { + + /** + * The preset values in the board + * board[y][x] is the value at x,y with -1 = any + */ + private int[][] board; + + /** + * The size of the board + */ + private int size; + + /** + * The size of the sub-squares in cells across + */ + private int squareXSize; + + /** + * The size of the sub-squares in celss up and down + */ + private int squareYSize; + + /** + * This interface is a marker class for the columns created for the + * Sudoku solver. + */ + protected static interface ColumnName { + // NOTHING + } + + /** + * A string containing a representation of the solution. + * @param size the size of the board + * @param solution a list of list of column names + * @return a string of the solution matrix + */ + static String stringifySolution(int size, List> solution) { + int[][] picture = new int[size][size]; + StringBuffer result = new StringBuffer(); + // go through the rows selected in the model and build a picture of the + // solution. + for(List row: solution) { + int x = -1; + int y = -1; + int num = -1; + for(ColumnName item: row) { + if (item instanceof ColumnConstraint) { + x = ((ColumnConstraint) item).column; + num = ((ColumnConstraint) item).num; + } else if (item instanceof RowConstraint) { + y = ((RowConstraint) item).row; + } + } + picture[y][x] = num; + } + // build the string + for(int y=0; y < size; ++y) { + for (int x=0; x < size; ++x) { + result.append(picture[y][x]); + result.append(" "); + } + result.append("\n"); + } + return result.toString(); + } + + /** + * An acceptor to get the solutions to the puzzle as they are generated and + * print them to the console. + */ + private static class SolutionPrinter + implements DancingLinks.SolutionAcceptor { + int size; + + public SolutionPrinter(int size) { + this.size = size; + } + + /** + * A debugging aid that just prints the raw information about the + * dancing link columns that were selected for each row. + * @param solution a list of list of column names + */ + void rawWrite(List solution) { + for (Iterator itr=solution.iterator(); itr.hasNext(); ) { + Iterator subitr = ((List) itr.next()).iterator(); + while (subitr.hasNext()) { + System.out.print(subitr.next().toString() + " "); + } + System.out.println(); + } + } + + public void solution(List> names) { + System.out.println(stringifySolution(size, names)); + } + } + + /** + * Set up a puzzle board to the given size. + * Boards may be asymmetric, but the squares will always be divided to be + * more cells wide than they are tall. For example, a 6x6 puzzle will make + * sub-squares that are 3x2 (3 cells wide, 2 cells tall). Clearly that means + * the board is made up of 2x3 sub-squares. + * @param stream The input stream to read the data from + */ + public Sudoku(InputStream stream) throws IOException { + BufferedReader file = new BufferedReader(new InputStreamReader(stream)); + String line = file.readLine(); + List result = new ArrayList(); + while (line != null) { + StringTokenizer tokenizer = new StringTokenizer(line); + int size = tokenizer.countTokens(); + int[] col = new int[size]; + int y = 0; + while(tokenizer.hasMoreElements()) { + String word = tokenizer.nextToken(); + if ("?".equals(word)) { + col[y] = - 1; + } else { + col[y] = Integer.parseInt(word); + } + y += 1; + } + result.add(col); + line = file.readLine(); + } + size = result.size(); + board = (int[][]) result.toArray(new int [size][]); + squareYSize = (int) Math.sqrt(size); + squareXSize = size / squareYSize; + file.close(); + } + + /** + * A constraint that each number can appear just once in a column. + */ + static private class ColumnConstraint implements ColumnName { + ColumnConstraint(int num, int column) { + this.num = num; + this.column = column; + } + int num; + int column; + public String toString() { + return num + " in column " + column; + } + } + + /** + * A constraint that each number can appear just once in a row. + */ + static private class RowConstraint implements ColumnName { + RowConstraint(int num, int row) { + this.num = num; + this.row = row; + } + int num; + int row; + public String toString() { + return num + " in row " + row; + } + } + + /** + * A constraint that each number can appear just once in a square. + */ + static private class SquareConstraint implements ColumnName { + SquareConstraint(int num, int x, int y) { + this.num = num; + this.x = x; + this.y = y; + } + int num; + int x; + int y; + public String toString() { + return num + " in square " + x + "," + y; + } + } + + /** + * A constraint that each cell can only be used once. + */ + static private class CellConstraint implements ColumnName { + CellConstraint(int x, int y) { + this.x = x; + this.y = y; + } + int x; + int y; + public String toString() { + return "cell " + x + "," + y; + } + } + + /** + * Create a row that places num in cell x, y. + * @param rowValues a scratch pad to mark the bits needed + * @param x the horizontal offset of the cell + * @param y the vertical offset of the cell + * @param num the number to place + * @return a bitvector of the columns selected + */ + private boolean[] generateRow(boolean[] rowValues, int x, int y, int num) { + // clear the scratch array + for(int i=0; i < rowValues.length; ++i) { + rowValues[i] = false; + } + // find the square coordinates + int xBox = (int) x / squareXSize; + int yBox = (int) y / squareYSize; + // mark the column + rowValues[x*size + num - 1] = true; + // mark the row + rowValues[size*size + y*size + num - 1] = true; + // mark the square + rowValues[2*size*size + (xBox*squareXSize + yBox)*size + num - 1] = true; + // mark the cell + rowValues[3*size*size + size*x + y] = true; + return rowValues; + } + + private DancingLinks makeModel() { + DancingLinks model = new DancingLinks(); + // create all of the columns constraints + for(int x=0; x < size; ++x) { + for(int num=1; num <= size; ++num) { + model.addColumn(new ColumnConstraint(num, x)); + } + } + // create all of the row constraints + for(int y=0; y < size; ++y) { + for(int num=1; num <= size; ++num) { + model.addColumn(new RowConstraint(num, y)); + } + } + // create the square constraints + for(int x=0; x < squareYSize; ++x) { + for(int y=0; y < squareXSize; ++y) { + for(int num=1; num <= size; ++num) { + model.addColumn(new SquareConstraint(num, x, y)); + } + } + } + // create the cell constraints + for(int x=0; x < size; ++x) { + for(int y=0; y < size; ++y) { + model.addColumn(new CellConstraint(x, y)); + } + } + boolean[] rowValues = new boolean[size*size*4]; + for(int x=0; x < size; ++x) { + for(int y=0; y < size; ++y) { + if (board[y][x] == -1) { + // try each possible value in the cell + for(int num=1; num <= size; ++num) { + model.addRow(generateRow(rowValues, x, y, num)); + } + } else { + // put the given cell in place + model.addRow(generateRow(rowValues, x, y, board[y][x])); + } + } + } + return model; + } + + public void solve() { + DancingLinks model = makeModel(); + int results = model.solve(new SolutionPrinter(size)); + System.out.println("Found " + results + " solutions"); + } + + /** + * Solves a set of sudoku puzzles. + * @param args a list of puzzle filenames to solve + */ + public static void main(String[] args) throws IOException { + if (args.length == 0) { + System.out.println("Include a puzzle on the command line."); + } + for(int i=0; i < args.length; ++i) { + Sudoku problem = new Sudoku(new FileInputStream(args[i])); + System.out.println("Solving " + args[i]); + problem.solve(); + } + } + +} diff --git a/src/examples/org/apache/hadoop/examples/dancing/package.html b/src/examples/org/apache/hadoop/examples/dancing/package.html new file mode 100644 index 0000000..b7cb472 --- /dev/null +++ b/src/examples/org/apache/hadoop/examples/dancing/package.html @@ -0,0 +1,75 @@ + + + + + + +This package is a distributed implementation of Knuth's dancing links +algorithm that can run under Hadoop. It is a generic model for +problems, such as tile placement, where all of the valid choices can +be represented as a large sparse boolean array where the goal is to +pick a subset of the rows to end up with exactly 1 true value +in each column. + +

+ +The package includes two example applications: a pentomino solver and +a sudoku solver. + +

+ +The pentomino includes both a "normal" pentomino set and a one-sided +set where the tiles that are different when flipped are +duplicated. The pentomino solver has a Hadoop driver application to +launch it on a cluster. In Knuth's paper on dancing links, he +describes trying and failing to solve the one-sided pentomino in a +9x10 board. With the advances of computers and a cluster, it takes a +small (12 node) hadoop cluster 9 hours to find all of the solutions +that Knuth estimated would have taken him months. + +

+ +The sudoku solver is so fast, I didn't bother making a distributed +version. (All of the puzzles that I've tried, including a 42x42 have +taken around a second to solve.) On the command line, give the solver +a list of puzzle files to solve. Puzzle files have a line per a row +and columns separated by spaces. The squares either have numbers or +'?' to mean unknown. + +

+ +Both applications have been added to the examples jar, so they can be +run as: + +

+bin/hadoop jar hadoop-*-examples.jar pentomino pent-outdir
+bin/hadoop jar hadoop-*-examples.jar sudoku puzzle.txt
+
+ +

+ +I (Owen) implemented the original version of the distributed pentomino +solver for a Yahoo Hack day, where Yahoos get to work on a project of +their own choosing for a day to make something cool. The following +afternoon, everyone gets to show off their hacks and gets a free +t-shirt. I had a lot of fun doing it. + + + diff --git a/src/examples/org/apache/hadoop/examples/dancing/puzzle1.dta b/src/examples/org/apache/hadoop/examples/dancing/puzzle1.dta new file mode 100644 index 0000000..16aa2d5 --- /dev/null +++ b/src/examples/org/apache/hadoop/examples/dancing/puzzle1.dta @@ -0,0 +1,9 @@ +8 5 ? 3 9 ? ? ? ? +? ? 2 ? ? ? ? ? ? +? ? 6 ? 1 ? ? ? 2 +? ? 4 ? ? 3 ? 5 9 +? ? 8 9 ? 1 4 ? ? +3 2 ? 4 ? ? 8 ? ? +9 ? ? ? 8 ? 5 ? ? +? ? ? ? ? ? 2 ? ? +? ? ? ? 4 5 ? 7 8 diff --git a/src/examples/org/apache/hadoop/examples/package.html b/src/examples/org/apache/hadoop/examples/package.html new file mode 100644 index 0000000..0906086 --- /dev/null +++ b/src/examples/org/apache/hadoop/examples/package.html @@ -0,0 +1,23 @@ + + + + + +Hadoop example code. + + diff --git a/src/examples/org/apache/hadoop/examples/terasort/TeraGen.java b/src/examples/org/apache/hadoop/examples/terasort/TeraGen.java new file mode 100644 index 0000000..8893170 --- /dev/null +++ b/src/examples/org/apache/hadoop/examples/terasort/TeraGen.java @@ -0,0 +1,361 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.examples.terasort; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; + +import org.apache.hadoop.conf.Configured; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.io.LongWritable; +import org.apache.hadoop.io.NullWritable; +import org.apache.hadoop.io.Text; +import org.apache.hadoop.io.WritableUtils; +import org.apache.hadoop.mapred.FileOutputFormat; +import org.apache.hadoop.mapred.InputFormat; +import org.apache.hadoop.mapred.InputSplit; +import org.apache.hadoop.mapred.JobClient; +import org.apache.hadoop.mapred.JobConf; +import org.apache.hadoop.mapred.MapReduceBase; +import org.apache.hadoop.mapred.Mapper; +import org.apache.hadoop.mapred.OutputCollector; +import org.apache.hadoop.mapred.RecordReader; +import org.apache.hadoop.mapred.Reporter; +import org.apache.hadoop.util.Tool; +import org.apache.hadoop.util.ToolRunner; + +/** + * Generate the official terasort input data set. + * The user specifies the number of rows and the output directory and this + * class runs a map/reduce program to generate the data. + * The format of the data is: + *

    + *
  • (10 bytes key) (10 bytes rowid) (78 bytes filler) \r \n + *
  • The keys are random characters from the set ' ' .. '~'. + *
  • The rowid is the right justified row id as a int. + *
  • The filler consists of 7 runs of 10 characters from 'A' to 'Z'. + *
+ * + *

+ * To run the program: + * bin/hadoop jar hadoop-*-examples.jar teragen 10000000000 in-dir + */ +public class TeraGen extends Configured implements Tool { + + /** + * An input format that assigns ranges of longs to each mapper. + */ + static class RangeInputFormat + implements InputFormat { + + /** + * An input split consisting of a range on numbers. + */ + static class RangeInputSplit implements InputSplit { + long firstRow; + long rowCount; + + public RangeInputSplit() { } + + public RangeInputSplit(long offset, long length) { + firstRow = offset; + rowCount = length; + } + + public long getLength() throws IOException { + return 0; + } + + public String[] getLocations() throws IOException { + return new String[]{}; + } + + public void readFields(DataInput in) throws IOException { + firstRow = WritableUtils.readVLong(in); + rowCount = WritableUtils.readVLong(in); + } + + public void write(DataOutput out) throws IOException { + WritableUtils.writeVLong(out, firstRow); + WritableUtils.writeVLong(out, rowCount); + } + } + + /** + * A record reader that will generate a range of numbers. + */ + static class RangeRecordReader + implements RecordReader { + long startRow; + long finishedRows; + long totalRows; + + public RangeRecordReader(RangeInputSplit split) { + startRow = split.firstRow; + finishedRows = 0; + totalRows = split.rowCount; + } + + public void close() throws IOException { + // NOTHING + } + + public LongWritable createKey() { + return new LongWritable(); + } + + public NullWritable createValue() { + return NullWritable.get(); + } + + public long getPos() throws IOException { + return finishedRows; + } + + public float getProgress() throws IOException { + return finishedRows / (float) totalRows; + } + + public boolean next(LongWritable key, + NullWritable value) { + if (finishedRows < totalRows) { + key.set(startRow + finishedRows); + finishedRows += 1; + return true; + } else { + return false; + } + } + + } + + public RecordReader + getRecordReader(InputSplit split, JobConf job, + Reporter reporter) throws IOException { + return new RangeRecordReader((RangeInputSplit) split); + } + + /** + * Create the desired number of splits, dividing the number of rows + * between the mappers. + */ + public InputSplit[] getSplits(JobConf job, + int numSplits) { + long totalRows = getNumberOfRows(job); + long rowsPerSplit = totalRows / numSplits; + System.out.println("Generating " + totalRows + " using " + numSplits + + " maps with step of " + rowsPerSplit); + InputSplit[] splits = new InputSplit[numSplits]; + long currentRow = 0; + for(int split=0; split < numSplits-1; ++split) { + splits[split] = new RangeInputSplit(currentRow, rowsPerSplit); + currentRow += rowsPerSplit; + } + splits[numSplits-1] = new RangeInputSplit(currentRow, + totalRows - currentRow); + return splits; + } + + } + + static long getNumberOfRows(JobConf job) { + return job.getLong("terasort.num-rows", 0); + } + + static void setNumberOfRows(JobConf job, long numRows) { + job.setLong("terasort.num-rows", numRows); + } + + static class RandomGenerator { + private long seed = 0; + private static final long mask32 = (1l<<32) - 1; + /** + * The number of iterations separating the precomputed seeds. + */ + private static final int seedSkip = 128 * 1024 * 1024; + /** + * The precomputed seed values after every seedSkip iterations. + * There should be enough values so that a 2**32 iterations are + * covered. + */ + private static final long[] seeds = new long[]{0L, + 4160749568L, + 4026531840L, + 3892314112L, + 3758096384L, + 3623878656L, + 3489660928L, + 3355443200L, + 3221225472L, + 3087007744L, + 2952790016L, + 2818572288L, + 2684354560L, + 2550136832L, + 2415919104L, + 2281701376L, + 2147483648L, + 2013265920L, + 1879048192L, + 1744830464L, + 1610612736L, + 1476395008L, + 1342177280L, + 1207959552L, + 1073741824L, + 939524096L, + 805306368L, + 671088640L, + 536870912L, + 402653184L, + 268435456L, + 134217728L, + }; + + /** + * Start the random number generator on the given iteration. + * @param initalIteration the iteration number to start on + */ + RandomGenerator(long initalIteration) { + int baseIndex = (int) ((initalIteration & mask32) / seedSkip); + seed = seeds[baseIndex]; + for(int i=0; i < initalIteration % seedSkip; ++i) { + next(); + } + } + + RandomGenerator() { + this(0); + } + + long next() { + seed = (seed * 3141592621l + 663896637) & mask32; + return seed; + } + } + + /** + * The Mapper class that given a row number, will generate the appropriate + * output line. + */ + public static class SortGenMapper extends MapReduceBase + implements Mapper { + + private Text key = new Text(); + private Text value = new Text(); + private RandomGenerator rand; + private byte[] keyBytes = new byte[12]; + private byte[] spaces = " ".getBytes(); + private byte[][] filler = new byte[26][]; + { + for(int i=0; i < 26; ++i) { + filler[i] = new byte[10]; + for(int j=0; j<10; ++j) { + filler[i][j] = (byte) ('A' + i); + } + } + } + + /** + * Add a random key to the text + * @param rowId + */ + private void addKey() { + for(int i=0; i<3; i++) { + long temp = rand.next() / 52; + keyBytes[3 + 4*i] = (byte) (' ' + (temp % 95)); + temp /= 95; + keyBytes[2 + 4*i] = (byte) (' ' + (temp % 95)); + temp /= 95; + keyBytes[1 + 4*i] = (byte) (' ' + (temp % 95)); + temp /= 95; + keyBytes[4*i] = (byte) (' ' + (temp % 95)); + } + key.set(keyBytes, 0, 10); + } + + /** + * Add the rowid to the row. + * @param rowId + */ + private void addRowId(long rowId) { + byte[] rowid = Integer.toString((int) rowId).getBytes(); + int padSpace = 10 - rowid.length; + if (padSpace > 0) { + value.append(spaces, 0, 10 - rowid.length); + } + value.append(rowid, 0, Math.min(rowid.length, 10)); + } + + /** + * Add the required filler bytes. Each row consists of 7 blocks of + * 10 characters and 1 block of 8 characters. + * @param rowId the current row number + */ + private void addFiller(long rowId) { + int base = (int) ((rowId * 8) % 26); + for(int i=0; i<7; ++i) { + value.append(filler[(base+i) % 26], 0, 10); + } + value.append(filler[(base+7) % 26], 0, 8); + } + + public void map(LongWritable row, NullWritable ignored, + OutputCollector output, + Reporter reporter) throws IOException { + long rowId = row.get(); + if (rand == null) { + // we use 3 random numbers per a row + rand = new RandomGenerator(rowId*3); + } + addKey(); + value.clear(); + addRowId(rowId); + addFiller(rowId); + output.collect(key, value); + } + + } + + /** + * @param args the cli arguments + */ + public int run(String[] args) throws IOException { + JobConf job = (JobConf) getConf(); + setNumberOfRows(job, Long.parseLong(args[0])); + FileOutputFormat.setOutputPath(job, new Path(args[1])); + job.setJobName("TeraGen"); + job.setJarByClass(TeraGen.class); + job.setMapperClass(SortGenMapper.class); + job.setNumReduceTasks(0); + job.setOutputKeyClass(Text.class); + job.setOutputValueClass(Text.class); + job.setInputFormat(RangeInputFormat.class); + job.setOutputFormat(TeraOutputFormat.class); + JobClient.runJob(job); + return 0; + } + + public static void main(String[] args) throws Exception { + int res = ToolRunner.run(new JobConf(), new TeraGen(), args); + System.exit(res); + } + +} diff --git a/src/examples/org/apache/hadoop/examples/terasort/TeraInputFormat.java b/src/examples/org/apache/hadoop/examples/terasort/TeraInputFormat.java new file mode 100644 index 0000000..5778c98 --- /dev/null +++ b/src/examples/org/apache/hadoop/examples/terasort/TeraInputFormat.java @@ -0,0 +1,212 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.examples.terasort; + +import java.io.IOException; +import java.util.ArrayList; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.io.LongWritable; +import org.apache.hadoop.io.NullWritable; +import org.apache.hadoop.io.SequenceFile; +import org.apache.hadoop.io.Text; +import org.apache.hadoop.mapred.FileInputFormat; +import org.apache.hadoop.mapred.FileSplit; +import org.apache.hadoop.mapred.InputSplit; +import org.apache.hadoop.mapred.JobConf; +import org.apache.hadoop.mapred.LineRecordReader; +import org.apache.hadoop.mapred.RecordReader; +import org.apache.hadoop.mapred.Reporter; +import org.apache.hadoop.util.IndexedSortable; +import org.apache.hadoop.util.QuickSort; + +/** + * An input format that reads the first 10 characters of each line as the key + * and the rest of the line as the value. Both key and value are represented + * as Text. + */ +public class TeraInputFormat extends FileInputFormat { + + static final String PARTITION_FILENAME = "_partition.lst"; + static final String SAMPLE_SIZE = "terasort.partitions.sample"; + private static JobConf lastConf = null; + private static InputSplit[] lastResult = null; + + static class TextSampler implements IndexedSortable { + private ArrayList records = new ArrayList(); + + public int compare(int i, int j) { + Text left = records.get(i); + Text right = records.get(j); + return left.compareTo(right); + } + + public void swap(int i, int j) { + Text left = records.get(i); + Text right = records.get(j); + records.set(j, left); + records.set(i, right); + } + + public void addKey(Text key) { + records.add(new Text(key)); + } + + /** + * Find the split points for a given sample. The sample keys are sorted + * and down sampled to find even split points for the partitions. The + * returned keys should be the start of their respective partitions. + * @param numPartitions the desired number of partitions + * @return an array of size numPartitions - 1 that holds the split points + */ + Text[] createPartitions(int numPartitions) { + int numRecords = records.size(); + System.out.println("Making " + numPartitions + " from " + numRecords + + " records"); + if (numPartitions > numRecords) { + throw new IllegalArgumentException + ("Requested more partitions than input keys (" + numPartitions + + " > " + numRecords + ")"); + } + new QuickSort().sort(this, 0, records.size()); + float stepSize = numRecords / (float) numPartitions; + System.out.println("Step size is " + stepSize); + Text[] result = new Text[numPartitions-1]; + for(int i=1; i < numPartitions; ++i) { + result[i-1] = records.get(Math.round(stepSize * i)); + } + return result; + } + } + + /** + * Use the input splits to take samples of the input and generate sample + * keys. By default reads 100,000 keys from 10 locations in the input, sorts + * them and picks N-1 keys to generate N equally sized partitions. + * @param conf the job to sample + * @param partFile where to write the output file to + * @throws IOException if something goes wrong + */ + public static void writePartitionFile(JobConf conf, + Path partFile) throws IOException { + TeraInputFormat inFormat = new TeraInputFormat(); + TextSampler sampler = new TextSampler(); + Text key = new Text(); + Text value = new Text(); + int partitions = conf.getNumReduceTasks(); + long sampleSize = conf.getLong(SAMPLE_SIZE, 100000); + InputSplit[] splits = inFormat.getSplits(conf, conf.getNumMapTasks()); + int samples = Math.min(10, splits.length); + long recordsPerSample = sampleSize / samples; + int sampleStep = splits.length / samples; + long records = 0; + // take N samples from different parts of the input + for(int i=0; i < samples; ++i) { + RecordReader reader = + inFormat.getRecordReader(splits[sampleStep * i], conf, null); + while (reader.next(key, value)) { + sampler.addKey(key); + records += 1; + if ((i+1) * recordsPerSample <= records) { + break; + } + } + } + FileSystem outFs = partFile.getFileSystem(conf); + if (outFs.exists(partFile)) { + outFs.delete(partFile, false); + } + SequenceFile.Writer writer = + SequenceFile.createWriter(outFs, conf, partFile, Text.class, + NullWritable.class); + NullWritable nullValue = NullWritable.get(); + for(Text split : sampler.createPartitions(partitions)) { + writer.append(split, nullValue); + } + writer.close(); + } + + static class TeraRecordReader implements RecordReader { + private LineRecordReader in; + private LongWritable junk = new LongWritable(); + private Text line = new Text(); + private static int KEY_LENGTH = 10; + + public TeraRecordReader(Configuration job, + FileSplit split) throws IOException { + in = new LineRecordReader(job, split); + } + + public void close() throws IOException { + in.close(); + } + + public Text createKey() { + return new Text(); + } + + public Text createValue() { + return new Text(); + } + + public long getPos() throws IOException { + return in.getPos(); + } + + public float getProgress() throws IOException { + return in.getProgress(); + } + + public boolean next(Text key, Text value) throws IOException { + if (in.next(junk, line)) { + if (line.getLength() < KEY_LENGTH) { + key.set(line); + value.clear(); + } else { + byte[] bytes = line.getBytes(); + key.set(bytes, 0, KEY_LENGTH); + value.set(bytes, KEY_LENGTH, line.getLength() - KEY_LENGTH); + } + return true; + } else { + return false; + } + } + } + + @Override + public RecordReader + getRecordReader(InputSplit split, + JobConf job, + Reporter reporter) throws IOException { + return new TeraRecordReader(job, (FileSplit) split); + } + + @Override + public InputSplit[] getSplits(JobConf conf, int splits) throws IOException { + if (conf == lastConf) { + return lastResult; + } + lastConf = conf; + lastResult = super.getSplits(conf, splits); + return lastResult; + } +} diff --git a/src/examples/org/apache/hadoop/examples/terasort/TeraOutputFormat.java b/src/examples/org/apache/hadoop/examples/terasort/TeraOutputFormat.java new file mode 100644 index 0000000..baa3fd7 --- /dev/null +++ b/src/examples/org/apache/hadoop/examples/terasort/TeraOutputFormat.java @@ -0,0 +1,88 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.examples.terasort; + +import java.io.DataOutputStream; +import java.io.IOException; + +import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.io.Text; +import org.apache.hadoop.mapred.JobConf; +import org.apache.hadoop.mapred.RecordWriter; +import org.apache.hadoop.mapred.TextOutputFormat; +import org.apache.hadoop.util.Progressable; + +/** + * A streamlined text output format that writes key, value, and "\r\n". + */ +public class TeraOutputFormat extends TextOutputFormat { + static final String FINAL_SYNC_ATTRIBUTE = "terasort.final.sync"; + + /** + * Set the requirement for a final sync before the stream is closed. + */ + public static void setFinalSync(JobConf conf, boolean newValue) { + conf.setBoolean(FINAL_SYNC_ATTRIBUTE, newValue); + } + + /** + * Does the user want a final sync at close? + */ + public static boolean getFinalSync(JobConf conf) { + return conf.getBoolean(FINAL_SYNC_ATTRIBUTE, false); + } + + static class TeraRecordWriter extends LineRecordWriter { + private static final byte[] newLine = "\r\n".getBytes(); + private boolean finalSync = false; + + public TeraRecordWriter(DataOutputStream out, + JobConf conf) { + super(out); + finalSync = getFinalSync(conf); + } + + public synchronized void write(Text key, + Text value) throws IOException { + out.write(key.getBytes(), 0, key.getLength()); + out.write(value.getBytes(), 0, value.getLength()); + out.write(newLine, 0, newLine.length); + } + + public void close() throws IOException { + if (finalSync) { + ((FSDataOutputStream) out).sync(); + } + super.close(null); + } + } + + public RecordWriter getRecordWriter(FileSystem ignored, + JobConf job, + String name, + Progressable progress + ) throws IOException { + Path dir = getWorkOutputPath(job); + FileSystem fs = dir.getFileSystem(job); + FSDataOutputStream fileOut = fs.create(new Path(dir, name), progress); + return new TeraRecordWriter(fileOut, job); + } +} diff --git a/src/examples/org/apache/hadoop/examples/terasort/TeraSort.java b/src/examples/org/apache/hadoop/examples/terasort/TeraSort.java new file mode 100644 index 0000000..c185cc4 --- /dev/null +++ b/src/examples/org/apache/hadoop/examples/terasort/TeraSort.java @@ -0,0 +1,261 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.examples.terasort; + +import java.io.IOException; +import java.io.PrintStream; +import java.net.URI; +import java.util.ArrayList; +import java.util.List; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configured; +import org.apache.hadoop.filecache.DistributedCache; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.io.NullWritable; +import org.apache.hadoop.io.SequenceFile; +import org.apache.hadoop.io.Text; +import org.apache.hadoop.mapred.FileOutputFormat; +import org.apache.hadoop.mapred.JobClient; +import org.apache.hadoop.mapred.JobConf; +import org.apache.hadoop.mapred.Partitioner; +import org.apache.hadoop.util.Tool; +import org.apache.hadoop.util.ToolRunner; + +/** + * Generates the sampled split points, launches the job, and waits for it to + * finish. + *

+ * To run the program: + * bin/hadoop jar hadoop-*-examples.jar terasort in-dir out-dir + */ +public class TeraSort extends Configured implements Tool { + private static final Log LOG = LogFactory.getLog(TeraSort.class); + + /** + * A partitioner that splits text keys into roughly equal partitions + * in a global sorted order. + */ + static class TotalOrderPartitioner implements Partitioner{ + private TrieNode trie; + private Text[] splitPoints; + + /** + * A generic trie node + */ + static abstract class TrieNode { + private int level; + TrieNode(int level) { + this.level = level; + } + abstract int findPartition(Text key); + abstract void print(PrintStream strm) throws IOException; + int getLevel() { + return level; + } + } + + /** + * An inner trie node that contains 256 children based on the next + * character. + */ + static class InnerTrieNode extends TrieNode { + private TrieNode[] child = new TrieNode[256]; + + InnerTrieNode(int level) { + super(level); + } + int findPartition(Text key) { + int level = getLevel(); + if (key.getLength() <= level) { + return child[0].findPartition(key); + } + return child[key.getBytes()[level]].findPartition(key); + } + void setChild(int idx, TrieNode child) { + this.child[idx] = child; + } + void print(PrintStream strm) throws IOException { + for(int ch=0; ch < 255; ++ch) { + for(int i = 0; i < 2*getLevel(); ++i) { + strm.print(' '); + } + strm.print(ch); + strm.println(" ->"); + if (child[ch] != null) { + child[ch].print(strm); + } + } + } + } + + /** + * A leaf trie node that does string compares to figure out where the given + * key belongs between lower..upper. + */ + static class LeafTrieNode extends TrieNode { + int lower; + int upper; + Text[] splitPoints; + LeafTrieNode(int level, Text[] splitPoints, int lower, int upper) { + super(level); + this.splitPoints = splitPoints; + this.lower = lower; + this.upper = upper; + } + int findPartition(Text key) { + for(int i=lower; i= 0) { + return i; + } + } + return upper; + } + void print(PrintStream strm) throws IOException { + for(int i = 0; i < 2*getLevel(); ++i) { + strm.print(' '); + } + strm.print(lower); + strm.print(", "); + strm.println(upper); + } + } + + + /** + * Read the cut points from the given sequence file. + * @param fs the file system + * @param p the path to read + * @param job the job config + * @return the strings to split the partitions on + * @throws IOException + */ + private static Text[] readPartitions(FileSystem fs, Path p, + JobConf job) throws IOException { + SequenceFile.Reader reader = new SequenceFile.Reader(fs, p, job); + List parts = new ArrayList(); + Text key = new Text(); + NullWritable value = NullWritable.get(); + while (reader.next(key, value)) { + parts.add(key); + key = new Text(); + } + reader.close(); + return parts.toArray(new Text[parts.size()]); + } + + /** + * Given a sorted set of cut points, build a trie that will find the correct + * partition quickly. + * @param splits the list of cut points + * @param lower the lower bound of partitions 0..numPartitions-1 + * @param upper the upper bound of partitions 0..numPartitions-1 + * @param prefix the prefix that we have already checked against + * @param maxDepth the maximum depth we will build a trie for + * @return the trie node that will divide the splits correctly + */ + private static TrieNode buildTrie(Text[] splits, int lower, int upper, + Text prefix, int maxDepth) { + int depth = prefix.getLength(); + if (depth >= maxDepth || lower == upper) { + return new LeafTrieNode(depth, splits, lower, upper); + } + InnerTrieNode result = new InnerTrieNode(depth); + Text trial = new Text(prefix); + // append an extra byte on to the prefix + trial.append(new byte[1], 0, 1); + int currentBound = lower; + for(int ch = 0; ch < 255; ++ch) { + trial.getBytes()[depth] = (byte) (ch + 1); + lower = currentBound; + while (currentBound < upper) { + if (splits[currentBound].compareTo(trial) >= 0) { + break; + } + currentBound += 1; + } + trial.getBytes()[depth] = (byte) ch; + result.child[ch] = buildTrie(splits, lower, currentBound, trial, + maxDepth); + } + // pick up the rest + trial.getBytes()[depth] = 127; + result.child[255] = buildTrie(splits, currentBound, upper, trial, + maxDepth); + return result; + } + + public void configure(JobConf job) { + try { + FileSystem fs = FileSystem.getLocal(job); + Path partFile = new Path(TeraInputFormat.PARTITION_FILENAME); + splitPoints = readPartitions(fs, partFile, job); + trie = buildTrie(splitPoints, 0, splitPoints.length, new Text(), 2); + } catch (IOException ie) { + throw new IllegalArgumentException("can't read paritions file", ie); + } + } + + public TotalOrderPartitioner() { + } + + public int getPartition(Text key, Text value, int numPartitions) { + return trie.findPartition(key); + } + + } + + public int run(String[] args) throws Exception { + LOG.info("starting"); + JobConf job = (JobConf) getConf(); + Path inputDir = new Path(args[0]); + inputDir = inputDir.makeQualified(inputDir.getFileSystem(job)); + Path partitionFile = new Path(inputDir, TeraInputFormat.PARTITION_FILENAME); + URI partitionUri = new URI(partitionFile.toString() + + "#" + TeraInputFormat.PARTITION_FILENAME); + TeraInputFormat.setInputPaths(job, new Path(args[0])); + FileOutputFormat.setOutputPath(job, new Path(args[1])); + job.setJobName("TeraSort"); + job.setJarByClass(TeraSort.class); + job.setOutputKeyClass(Text.class); + job.setOutputValueClass(Text.class); + job.setInputFormat(TeraInputFormat.class); + job.setOutputFormat(TeraOutputFormat.class); + job.setPartitionerClass(TotalOrderPartitioner.class); + TeraInputFormat.writePartitionFile(job, partitionFile); + DistributedCache.addCacheFile(partitionUri, job); + DistributedCache.createSymlink(job); + job.setInt("dfs.replication", 1); + TeraOutputFormat.setFinalSync(job, true); + JobClient.runJob(job); + LOG.info("done"); + return 0; + } + + /** + * @param args + */ + public static void main(String[] args) throws Exception { + int res = ToolRunner.run(new JobConf(), new TeraSort(), args); + System.exit(res); + } + +} diff --git a/src/examples/org/apache/hadoop/examples/terasort/TeraValidate.java b/src/examples/org/apache/hadoop/examples/terasort/TeraValidate.java new file mode 100644 index 0000000..dac3fc1 --- /dev/null +++ b/src/examples/org/apache/hadoop/examples/terasort/TeraValidate.java @@ -0,0 +1,157 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.examples.terasort; + +import java.io.IOException; +import java.util.Iterator; + +import org.apache.hadoop.conf.Configured; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.io.Text; +import org.apache.hadoop.mapred.FileOutputFormat; +import org.apache.hadoop.mapred.FileSplit; +import org.apache.hadoop.mapred.JobClient; +import org.apache.hadoop.mapred.JobConf; +import org.apache.hadoop.mapred.MapReduceBase; +import org.apache.hadoop.mapred.Mapper; +import org.apache.hadoop.mapred.OutputCollector; +import org.apache.hadoop.mapred.Reducer; +import org.apache.hadoop.mapred.Reporter; +import org.apache.hadoop.util.Tool; +import org.apache.hadoop.util.ToolRunner; + +/** + * Generate 1 mapper per a file that checks to make sure the keys + * are sorted within each file. The mapper also generates + * "$file:begin", first key and "$file:end", last key. The reduce verifies that + * all of the start/end items are in order. + * Any output from the reduce is problem report. + *

+ * To run the program: + * bin/hadoop jar hadoop-*-examples.jar teravalidate out-dir report-dir + *

+ * If there is any output, something is wrong and the output of the reduce + * will have the problem report. + */ +public class TeraValidate extends Configured implements Tool { + private static final Text error = new Text("error"); + + static class ValidateMapper extends MapReduceBase + implements Mapper { + private Text lastKey; + private OutputCollector output; + private String filename; + + /** + * Get the final part of the input name + * @param split the input split + * @return the "part-00000" for the input + */ + private String getFilename(FileSplit split) { + return split.getPath().getName(); + } + + public void map(Text key, Text value, OutputCollector output, + Reporter reporter) throws IOException { + if (lastKey == null) { + filename = getFilename((FileSplit) reporter.getInputSplit()); + output.collect(new Text(filename + ":begin"), key); + lastKey = new Text(); + this.output = output; + } else { + if (key.compareTo(lastKey) < 0) { + output.collect(error, new Text("misorder in " + filename + + " last: '" + lastKey + + "' current: '" + key + "'")); + } + } + lastKey.set(key); + } + + public void close() throws IOException { + if (lastKey != null) { + output.collect(new Text(filename + ":end"), lastKey); + } + } + } + + /** + * Check the boundaries between the output files by making sure that the + * boundary keys are always increasing. + * Also passes any error reports along intact. + */ + static class ValidateReducer extends MapReduceBase + implements Reducer { + private boolean firstKey = true; + private Text lastKey = new Text(); + private Text lastValue = new Text(); + public void reduce(Text key, Iterator values, + OutputCollector output, + Reporter reporter) throws IOException { + if (error.equals(key)) { + while(values.hasNext()) { + output.collect(key, values.next()); + } + } else { + Text value = values.next(); + if (firstKey) { + firstKey = false; + } else { + if (value.compareTo(lastValue) < 0) { + output.collect(error, + new Text("misordered keys last: " + + lastKey + " '" + lastValue + + "' current: " + key + " '" + value + "'")); + } + } + lastKey.set(key); + lastValue.set(value); + } + } + + } + + public int run(String[] args) throws Exception { + JobConf job = (JobConf) getConf(); + TeraInputFormat.setInputPaths(job, new Path(args[0])); + FileOutputFormat.setOutputPath(job, new Path(args[1])); + job.setJobName("TeraValidate"); + job.setJarByClass(TeraValidate.class); + job.setMapperClass(ValidateMapper.class); + job.setReducerClass(ValidateReducer.class); + job.setOutputKeyClass(Text.class); + job.setOutputValueClass(Text.class); + // force a single reducer + job.setNumReduceTasks(1); + // force a single split + job.setLong("mapred.min.split.size", Long.MAX_VALUE); + job.setInputFormat(TeraInputFormat.class); + JobClient.runJob(job); + return 0; + } + + /** + * @param args + */ + public static void main(String[] args) throws Exception { + int res = ToolRunner.run(new JobConf(), new TeraValidate(), args); + System.exit(res); + } + +} diff --git a/src/examples/org/apache/hadoop/examples/terasort/job_history_summary.py b/src/examples/org/apache/hadoop/examples/terasort/job_history_summary.py new file mode 100644 index 0000000..70725f8 --- /dev/null +++ b/src/examples/org/apache/hadoop/examples/terasort/job_history_summary.py @@ -0,0 +1,100 @@ +#!/usr/bin/env python +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import re +import sys + +pat = re.compile('(?P[^=]+)="(?P[^"]*)" *') +counterPat = re.compile('(?P[^:]+):(?P[^,]*),?') + +def parse(tail): + result = {} + for n,v in re.findall(pat, tail): + result[n] = v + return result + +mapStartTime = {} +mapEndTime = {} +reduceStartTime = {} +reduceShuffleTime = {} +reduceSortTime = {} +reduceEndTime = {} +reduceBytes = {} + +for line in sys.stdin: + words = line.split(" ",1) + event = words[0] + attrs = parse(words[1]) + if event == 'MapAttempt': + if attrs.has_key("START_TIME"): + mapStartTime[attrs["TASKID"]] = int(attrs["START_TIME"])/1000 + elif attrs.has_key("FINISH_TIME"): + mapEndTime[attrs["TASKID"]] = int(attrs["FINISH_TIME"])/1000 + elif event == 'ReduceAttempt': + if attrs.has_key("START_TIME"): + reduceStartTime[attrs["TASKID"]] = int(attrs["START_TIME"]) / 1000 + elif attrs.has_key("FINISH_TIME"): + reduceShuffleTime[attrs["TASKID"]] = int(attrs["SHUFFLE_FINISHED"])/1000 + reduceSortTime[attrs["TASKID"]] = int(attrs["SORT_FINISHED"])/1000 + reduceEndTime[attrs["TASKID"]] = int(attrs["FINISH_TIME"])/1000 + elif event == 'Task': + if attrs["TASK_TYPE"] == "REDUCE" and attrs.has_key("COUNTERS"): + for n,v in re.findall(counterPat, attrs["COUNTERS"]): + if n == "File Systems.HDFS bytes written": + reduceBytes[attrs["TASKID"]] = int(v) + +runningMaps = {} +shufflingReduces = {} +sortingReduces = {} +runningReduces = {} +startTime = min(reduce(min, mapStartTime.values()), + reduce(min, reduceStartTime.values())) +endTime = max(reduce(max, mapEndTime.values()), + reduce(max, reduceEndTime.values())) + +reduces = reduceBytes.keys() +reduces.sort() + +print "Name reduce-output-bytes shuffle-finish reduce-finish" +for r in reduces: + print r, reduceBytes[r], reduceShuffleTime[r] - startTime, + print reduceEndTime[r] - startTime + +print + +for t in range(startTime, endTime): + runningMaps[t] = 0 + shufflingReduces[t] = 0 + sortingReduces[t] = 0 + runningReduces[t] = 0 + +for map in mapStartTime.keys(): + for t in range(mapStartTime[map], mapEndTime[map]): + runningMaps[t] += 1 +for reduce in reduceStartTime.keys(): + for t in range(reduceStartTime[reduce], reduceShuffleTime[reduce]): + shufflingReduces[t] += 1 + for t in range(reduceShuffleTime[reduce], reduceSortTime[reduce]): + sortingReduces[t] += 1 + for t in range(reduceSortTime[reduce], reduceEndTime[reduce]): + runningReduces[t] += 1 + +print "time maps shuffle merge reduce" +for t in range(startTime, endTime): + print t - startTime, runningMaps[t], shufflingReduces[t], sortingReduces[t], + print runningReduces[t] diff --git a/src/examples/org/apache/hadoop/examples/terasort/package.html b/src/examples/org/apache/hadoop/examples/terasort/package.html new file mode 100644 index 0000000..b7b36ec --- /dev/null +++ b/src/examples/org/apache/hadoop/examples/terasort/package.html @@ -0,0 +1,113 @@ + + + + + +This package consists of 3 map/reduce applications for Hadoop to +compete in the annual terabyte sort +competition. + +

    +
  • TeraGen is a map/reduce program to generate the data. +
  • TeraSort samples the input data and uses map/reduce to + sort the data into a total order. +
  • TeraValidate is a map/reduce program that validates the + output is sorted. +
+ +

+ +TeraGen generates output data that is byte for byte +equivalent to the C version including the newlines and specific +keys. It divides the desired number of rows by the desired number of +tasks and assigns ranges of rows to each map. The map jumps the random +number generator to the correct value for the first row and generates +the following rows. + +

+ +TeraSort is a standard map/reduce sort, except for a custom +partitioner that uses a sorted list of N-1 sampled keys that define +the key range for each reduce. In particular, all keys such that +sample[i-1] <= key < sample[i] are sent to reduce +i. This guarantees that the output of reduce i are all +less than the output of reduce i+1. To speed up the +partitioning, the partitioner builds a two level trie that quickly +indexes into the list of sample keys based on the first two bytes of +the key. TeraSort generates the sample keys by sampling the input +before the job is submitted and writing the list of keys into HDFS. +The input and output format, which are used by all 3 applications, +read and write the text files in the right format. The output of the +reduce has replication set to 1, instead of the default 3, because the +contest does not require the output data be replicated on to multiple +nodes. + +

+ +TeraValidate ensures that the output is globally sorted. It +creates one map per a file in the output directory and each map ensures that +each key is less than or equal to the previous one. The map also generates +records with the first and last keys of the file and the reduce +ensures that the first key of file i is greater that the last key of +file i-1. Any problems are reported as output of the reduce with the +keys that are out of order. + +

+ +In May 2008, Owen O'Malley ran this code on a 910 node cluster and +sorted the 10 billion records (1 TB) in 209 seconds (3.48 minutes) to +win the annual general purpose (daytona) +terabyte sort +benchmark. + +

+ +The cluster statistics were: +

    +
  • 910 nodes +
  • 4 dual core Xeons @ 2.0ghz per a node +
  • 4 SATA disks per a node +
  • 8G RAM per a node +
  • 1 gigabit ethernet on each node +
  • 40 nodes per a rack +
  • 8 gigabit ethernet uplinks from each rack to the core +
  • Red Hat Enterprise Linux Server Release 5.1 (kernel 2.6.18) +
  • Sun Java JDK 1.6.0_05-b13 +
+ +

+ +The test was on Hadoop trunk (pre-0.18) patched with HADOOP-3443 +and HADOOP-3446, +which were required to remove intermediate writes to disk. +TeraGen used +1800 tasks to generate a total of 10 billion rows in HDFS, with a +block size of 1024 MB. +TeraSort was configured with 1800 maps and 1800 reduces, and +io.sort.mb, +io.sort.factor, fs.inmemory.size.mb, and task heap size +sufficient that transient data was never spilled to disk, other at the +end of the map. The sampler looked at 100,000 keys to determine the +reduce boundaries, which lead to imperfect balancing with reduce +outputs ranging from 337 MB to 872 MB. + + + diff --git a/src/examples/pipes/.autom4te.cfg b/src/examples/pipes/.autom4te.cfg new file mode 100644 index 0000000..d21d1c9 --- /dev/null +++ b/src/examples/pipes/.autom4te.cfg @@ -0,0 +1,42 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# +# autom4te configuration for hadoop utils library +# + +begin-language: "Autoheader-preselections" +args: --no-cache +end-language: "Autoheader-preselections" + +begin-language: "Automake-preselections" +args: --no-cache +end-language: "Automake-preselections" + +begin-language: "Autoreconf-preselections" +args: --no-cache +end-language: "Autoreconf-preselections" + +begin-language: "Autoconf-without-aclocal-m4" +args: --no-cache +end-language: "Autoconf-without-aclocal-m4" + +begin-language: "Autoconf" +args: --no-cache +end-language: "Autoconf" + diff --git a/src/examples/pipes/Makefile.am b/src/examples/pipes/Makefile.am new file mode 100644 index 0000000..731ab1e --- /dev/null +++ b/src/examples/pipes/Makefile.am @@ -0,0 +1,36 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +ACLOCAL_AMFLAGS = -I ../../c++/utils/m4 +AM_CXXFLAGS=-Wall -I$(HADOOP_UTILS_PREFIX)/include \ + -I$(HADOOP_PIPES_PREFIX)/include +LDADD=-L$(HADOOP_UTILS_PREFIX)/lib -L$(HADOOP_PIPES_PREFIX)/lib \ + -lhadooppipes -lhadooputils + +bin_PROGRAMS= wordcount-simple wordcount-part wordcount-nopipe pipes-sort + +# Define the sources for each program +wordcount_simple_SOURCES = \ + impl/wordcount-simple.cc + +wordcount_part_SOURCES = \ + impl/wordcount-part.cc + +wordcount_nopipe_SOURCES = \ + impl/wordcount-nopipe.cc + +pipes_sort_SOURCES = \ + impl/sort.cc + diff --git a/src/examples/pipes/Makefile.in b/src/examples/pipes/Makefile.in new file mode 100644 index 0000000..f377fdb --- /dev/null +++ b/src/examples/pipes/Makefile.in @@ -0,0 +1,535 @@ +# Makefile.in generated by automake 1.9.5 from Makefile.am. +# @configure_input@ + +# Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, +# 2003, 2004, 2005 Free Software Foundation, Inc. +# This Makefile.in is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY, to the extent permitted by law; without +# even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. + +@SET_MAKE@ + +SOURCES = $(pipes_sort_SOURCES) $(wordcount_nopipe_SOURCES) $(wordcount_part_SOURCES) $(wordcount_simple_SOURCES) + +srcdir = @srcdir@ +top_srcdir = @top_srcdir@ +VPATH = @srcdir@ +pkgdatadir = $(datadir)/@PACKAGE@ +pkglibdir = $(libdir)/@PACKAGE@ +pkgincludedir = $(includedir)/@PACKAGE@ +top_builddir = . +am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd +INSTALL = @INSTALL@ +install_sh_DATA = $(install_sh) -c -m 644 +install_sh_PROGRAM = $(install_sh) -c +install_sh_SCRIPT = $(install_sh) -c +INSTALL_HEADER = $(INSTALL_DATA) +transform = $(program_transform_name) +NORMAL_INSTALL = : +PRE_INSTALL = : +POST_INSTALL = : +NORMAL_UNINSTALL = : +PRE_UNINSTALL = : +POST_UNINSTALL = : +build_triplet = @build@ +host_triplet = @host@ +bin_PROGRAMS = wordcount-simple$(EXEEXT) wordcount-part$(EXEEXT) \ + wordcount-nopipe$(EXEEXT) pipes-sort$(EXEEXT) +DIST_COMMON = config.guess config.sub $(srcdir)/Makefile.in \ + $(srcdir)/Makefile.am $(top_srcdir)/configure \ + $(am__configure_deps) $(top_srcdir)/impl/config.h.in depcomp \ + ltmain.sh config.guess config.sub +subdir = . +ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 +am__aclocal_m4_deps = \ + $(top_srcdir)/../../c++/utils/m4/hadoop_utils.m4 \ + $(top_srcdir)/configure.ac +am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ + $(ACLOCAL_M4) +am__CONFIG_DISTCLEAN_FILES = config.status config.cache config.log \ + configure.lineno configure.status.lineno +mkinstalldirs = $(install_sh) -d +CONFIG_HEADER = $(top_builddir)/impl/config.h +CONFIG_CLEAN_FILES = +am__installdirs = "$(DESTDIR)$(bindir)" +binPROGRAMS_INSTALL = $(INSTALL_PROGRAM) +PROGRAMS = $(bin_PROGRAMS) +am__dirstamp = $(am__leading_dot)dirstamp +am_pipes_sort_OBJECTS = impl/sort.$(OBJEXT) +pipes_sort_OBJECTS = $(am_pipes_sort_OBJECTS) +pipes_sort_LDADD = $(LDADD) +pipes_sort_DEPENDENCIES = +am_wordcount_nopipe_OBJECTS = impl/wordcount-nopipe.$(OBJEXT) +wordcount_nopipe_OBJECTS = $(am_wordcount_nopipe_OBJECTS) +wordcount_nopipe_LDADD = $(LDADD) +wordcount_nopipe_DEPENDENCIES = +am_wordcount_part_OBJECTS = impl/wordcount-part.$(OBJEXT) +wordcount_part_OBJECTS = $(am_wordcount_part_OBJECTS) +wordcount_part_LDADD = $(LDADD) +wordcount_part_DEPENDENCIES = +am_wordcount_simple_OBJECTS = impl/wordcount-simple.$(OBJEXT) +wordcount_simple_OBJECTS = $(am_wordcount_simple_OBJECTS) +wordcount_simple_LDADD = $(LDADD) +wordcount_simple_DEPENDENCIES = +DEFAULT_INCLUDES = -I. -I$(srcdir) -I$(top_builddir)/impl +depcomp = $(SHELL) $(top_srcdir)/depcomp +am__depfiles_maybe = depfiles +CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ + $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) +LTCXXCOMPILE = $(LIBTOOL) --tag=CXX --mode=compile $(CXX) $(DEFS) \ + $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) \ + $(AM_CXXFLAGS) $(CXXFLAGS) +CXXLD = $(CXX) +CXXLINK = $(LIBTOOL) --tag=CXX --mode=link $(CXXLD) $(AM_CXXFLAGS) \ + $(CXXFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@ +SOURCES = $(pipes_sort_SOURCES) $(wordcount_nopipe_SOURCES) \ + $(wordcount_part_SOURCES) $(wordcount_simple_SOURCES) +ETAGS = etags +CTAGS = ctags +ACLOCAL = @ACLOCAL@ +AMDEP_FALSE = @AMDEP_FALSE@ +AMDEP_TRUE = @AMDEP_TRUE@ +AMTAR = @AMTAR@ +AR = @AR@ +AUTOCONF = @AUTOCONF@ +AUTOHEADER = @AUTOHEADER@ +AUTOMAKE = @AUTOMAKE@ +AWK = @AWK@ +CC = @CC@ +CCDEPMODE = @CCDEPMODE@ +CFLAGS = @CFLAGS@ +CPP = @CPP@ +CPPFLAGS = @CPPFLAGS@ +CXX = @CXX@ +CXXCPP = @CXXCPP@ +CXXDEPMODE = @CXXDEPMODE@ +CXXFLAGS = @CXXFLAGS@ +CYGPATH_W = @CYGPATH_W@ +DEFS = @DEFS@ +DEPDIR = @DEPDIR@ +ECHO = @ECHO@ +ECHO_C = @ECHO_C@ +ECHO_N = @ECHO_N@ +ECHO_T = @ECHO_T@ +EGREP = @EGREP@ +EXEEXT = @EXEEXT@ +F77 = @F77@ +FFLAGS = @FFLAGS@ +HADOOP_PIPES_PREFIX = @HADOOP_PIPES_PREFIX@ +HADOOP_UTILS_PREFIX = @HADOOP_UTILS_PREFIX@ +INSTALL_DATA = @INSTALL_DATA@ +INSTALL_PROGRAM = @INSTALL_PROGRAM@ +INSTALL_SCRIPT = @INSTALL_SCRIPT@ +INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ +LDFLAGS = @LDFLAGS@ +LIBOBJS = @LIBOBJS@ +LIBS = @LIBS@ +LIBTOOL = @LIBTOOL@ +LN_S = @LN_S@ +LTLIBOBJS = @LTLIBOBJS@ +MAKEINFO = @MAKEINFO@ +OBJEXT = @OBJEXT@ +PACKAGE = @PACKAGE@ +PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ +PACKAGE_NAME = @PACKAGE_NAME@ +PACKAGE_STRING = @PACKAGE_STRING@ +PACKAGE_TARNAME = @PACKAGE_TARNAME@ +PACKAGE_VERSION = @PACKAGE_VERSION@ +PATH_SEPARATOR = @PATH_SEPARATOR@ +RANLIB = @RANLIB@ +SET_MAKE = @SET_MAKE@ +SHELL = @SHELL@ +STRIP = @STRIP@ +VERSION = @VERSION@ +ac_ct_AR = @ac_ct_AR@ +ac_ct_CC = @ac_ct_CC@ +ac_ct_CXX = @ac_ct_CXX@ +ac_ct_F77 = @ac_ct_F77@ +ac_ct_RANLIB = @ac_ct_RANLIB@ +ac_ct_STRIP = @ac_ct_STRIP@ +am__fastdepCC_FALSE = @am__fastdepCC_FALSE@ +am__fastdepCC_TRUE = @am__fastdepCC_TRUE@ +am__fastdepCXX_FALSE = @am__fastdepCXX_FALSE@ +am__fastdepCXX_TRUE = @am__fastdepCXX_TRUE@ +am__include = @am__include@ +am__leading_dot = @am__leading_dot@ +am__quote = @am__quote@ +am__tar = @am__tar@ +am__untar = @am__untar@ +bindir = @bindir@ +build = @build@ +build_alias = @build_alias@ +build_cpu = @build_cpu@ +build_os = @build_os@ +build_vendor = @build_vendor@ +datadir = @datadir@ +exec_prefix = @exec_prefix@ +host = @host@ +host_alias = @host_alias@ +host_cpu = @host_cpu@ +host_os = @host_os@ +host_vendor = @host_vendor@ +includedir = @includedir@ +infodir = @infodir@ +install_sh = @install_sh@ +libdir = @libdir@ +libexecdir = @libexecdir@ +localstatedir = @localstatedir@ +mandir = @mandir@ +mkdir_p = @mkdir_p@ +oldincludedir = @oldincludedir@ +prefix = @prefix@ +program_transform_name = @program_transform_name@ +sbindir = @sbindir@ +sharedstatedir = @sharedstatedir@ +sysconfdir = @sysconfdir@ +target_alias = @target_alias@ + +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +ACLOCAL_AMFLAGS = -I ../../c++/utils/m4 +AM_CXXFLAGS = -Wall -I$(HADOOP_UTILS_PREFIX)/include \ + -I$(HADOOP_PIPES_PREFIX)/include + +LDADD = -L$(HADOOP_UTILS_PREFIX)/lib -L$(HADOOP_PIPES_PREFIX)/lib \ + -lhadooppipes -lhadooputils + + +# Define the sources for each program +wordcount_simple_SOURCES = \ + impl/wordcount-simple.cc + +wordcount_part_SOURCES = \ + impl/wordcount-part.cc + +wordcount_nopipe_SOURCES = \ + impl/wordcount-nopipe.cc + +pipes_sort_SOURCES = \ + impl/sort.cc + +all: all-am + +.SUFFIXES: +.SUFFIXES: .cc .lo .o .obj +am--refresh: + @: +$(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) + @for dep in $?; do \ + case '$(am__configure_deps)' in \ + *$$dep*) \ + echo ' cd $(srcdir) && $(AUTOMAKE) --foreign '; \ + cd $(srcdir) && $(AUTOMAKE) --foreign \ + && exit 0; \ + exit 1;; \ + esac; \ + done; \ + echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign Makefile'; \ + cd $(top_srcdir) && \ + $(AUTOMAKE) --foreign Makefile +.PRECIOUS: Makefile +Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status + @case '$?' in \ + *config.status*) \ + echo ' $(SHELL) ./config.status'; \ + $(SHELL) ./config.status;; \ + *) \ + echo ' cd $(top_builddir) && $(SHELL) ./config.status $@ $(am__depfiles_maybe)'; \ + cd $(top_builddir) && $(SHELL) ./config.status $@ $(am__depfiles_maybe);; \ + esac; + +$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) + $(SHELL) ./config.status --recheck + +$(top_srcdir)/configure: $(am__configure_deps) + cd $(srcdir) && $(AUTOCONF) +$(ACLOCAL_M4): $(am__aclocal_m4_deps) + cd $(srcdir) && $(ACLOCAL) $(ACLOCAL_AMFLAGS) + +impl/config.h: impl/stamp-h1 + @if test ! -f $@; then \ + rm -f impl/stamp-h1; \ + $(MAKE) impl/stamp-h1; \ + else :; fi + +impl/stamp-h1: $(top_srcdir)/impl/config.h.in $(top_builddir)/config.status + @rm -f impl/stamp-h1 + cd $(top_builddir) && $(SHELL) ./config.status impl/config.h +$(top_srcdir)/impl/config.h.in: $(am__configure_deps) + cd $(top_srcdir) && $(AUTOHEADER) + rm -f impl/stamp-h1 + touch $@ + +distclean-hdr: + -rm -f impl/config.h impl/stamp-h1 +install-binPROGRAMS: $(bin_PROGRAMS) + @$(NORMAL_INSTALL) + test -z "$(bindir)" || $(mkdir_p) "$(DESTDIR)$(bindir)" + @list='$(bin_PROGRAMS)'; for p in $$list; do \ + p1=`echo $$p|sed 's/$(EXEEXT)$$//'`; \ + if test -f $$p \ + || test -f $$p1 \ + ; then \ + f=`echo "$$p1" | sed 's,^.*/,,;$(transform);s/$$/$(EXEEXT)/'`; \ + echo " $(INSTALL_PROGRAM_ENV) $(LIBTOOL) --mode=install $(binPROGRAMS_INSTALL) '$$p' '$(DESTDIR)$(bindir)/$$f'"; \ + $(INSTALL_PROGRAM_ENV) $(LIBTOOL) --mode=install $(binPROGRAMS_INSTALL) "$$p" "$(DESTDIR)$(bindir)/$$f" || exit 1; \ + else :; fi; \ + done + +uninstall-binPROGRAMS: + @$(NORMAL_UNINSTALL) + @list='$(bin_PROGRAMS)'; for p in $$list; do \ + f=`echo "$$p" | sed 's,^.*/,,;s/$(EXEEXT)$$//;$(transform);s/$$/$(EXEEXT)/'`; \ + echo " rm -f '$(DESTDIR)$(bindir)/$$f'"; \ + rm -f "$(DESTDIR)$(bindir)/$$f"; \ + done + +clean-binPROGRAMS: + @list='$(bin_PROGRAMS)'; for p in $$list; do \ + f=`echo $$p|sed 's/$(EXEEXT)$$//'`; \ + echo " rm -f $$p $$f"; \ + rm -f $$p $$f ; \ + done +impl/$(am__dirstamp): + @$(mkdir_p) impl + @: > impl/$(am__dirstamp) +impl/$(DEPDIR)/$(am__dirstamp): + @$(mkdir_p) impl/$(DEPDIR) + @: > impl/$(DEPDIR)/$(am__dirstamp) +impl/sort.$(OBJEXT): impl/$(am__dirstamp) \ + impl/$(DEPDIR)/$(am__dirstamp) +pipes-sort$(EXEEXT): $(pipes_sort_OBJECTS) $(pipes_sort_DEPENDENCIES) + @rm -f pipes-sort$(EXEEXT) + $(CXXLINK) $(pipes_sort_LDFLAGS) $(pipes_sort_OBJECTS) $(pipes_sort_LDADD) $(LIBS) +impl/wordcount-nopipe.$(OBJEXT): impl/$(am__dirstamp) \ + impl/$(DEPDIR)/$(am__dirstamp) +wordcount-nopipe$(EXEEXT): $(wordcount_nopipe_OBJECTS) $(wordcount_nopipe_DEPENDENCIES) + @rm -f wordcount-nopipe$(EXEEXT) + $(CXXLINK) $(wordcount_nopipe_LDFLAGS) $(wordcount_nopipe_OBJECTS) $(wordcount_nopipe_LDADD) $(LIBS) +impl/wordcount-part.$(OBJEXT): impl/$(am__dirstamp) \ + impl/$(DEPDIR)/$(am__dirstamp) +wordcount-part$(EXEEXT): $(wordcount_part_OBJECTS) $(wordcount_part_DEPENDENCIES) + @rm -f wordcount-part$(EXEEXT) + $(CXXLINK) $(wordcount_part_LDFLAGS) $(wordcount_part_OBJECTS) $(wordcount_part_LDADD) $(LIBS) +impl/wordcount-simple.$(OBJEXT): impl/$(am__dirstamp) \ + impl/$(DEPDIR)/$(am__dirstamp) +wordcount-simple$(EXEEXT): $(wordcount_simple_OBJECTS) $(wordcount_simple_DEPENDENCIES) + @rm -f wordcount-simple$(EXEEXT) + $(CXXLINK) $(wordcount_simple_LDFLAGS) $(wordcount_simple_OBJECTS) $(wordcount_simple_LDADD) $(LIBS) + +mostlyclean-compile: + -rm -f *.$(OBJEXT) + -rm -f impl/sort.$(OBJEXT) + -rm -f impl/wordcount-nopipe.$(OBJEXT) + -rm -f impl/wordcount-part.$(OBJEXT) + -rm -f impl/wordcount-simple.$(OBJEXT) + +distclean-compile: + -rm -f *.tab.c + +@AMDEP_TRUE@@am__include@ @am__quote@impl/$(DEPDIR)/sort.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@impl/$(DEPDIR)/wordcount-nopipe.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@impl/$(DEPDIR)/wordcount-part.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@impl/$(DEPDIR)/wordcount-simple.Po@am__quote@ + +.cc.o: +@am__fastdepCXX_TRUE@ depbase=`echo $@ | sed 's|[^/]*$$|$(DEPDIR)/&|;s|\.o$$||'`; \ +@am__fastdepCXX_TRUE@ if $(CXXCOMPILE) -MT $@ -MD -MP -MF "$$depbase.Tpo" -c -o $@ $<; \ +@am__fastdepCXX_TRUE@ then mv -f "$$depbase.Tpo" "$$depbase.Po"; else rm -f "$$depbase.Tpo"; exit 1; fi +@AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ +@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +@am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ $< + +.cc.obj: +@am__fastdepCXX_TRUE@ depbase=`echo $@ | sed 's|[^/]*$$|$(DEPDIR)/&|;s|\.obj$$||'`; \ +@am__fastdepCXX_TRUE@ if $(CXXCOMPILE) -MT $@ -MD -MP -MF "$$depbase.Tpo" -c -o $@ `$(CYGPATH_W) '$<'`; \ +@am__fastdepCXX_TRUE@ then mv -f "$$depbase.Tpo" "$$depbase.Po"; else rm -f "$$depbase.Tpo"; exit 1; fi +@AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ +@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +@am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'` + +.cc.lo: +@am__fastdepCXX_TRUE@ depbase=`echo $@ | sed 's|[^/]*$$|$(DEPDIR)/&|;s|\.lo$$||'`; \ +@am__fastdepCXX_TRUE@ if $(LTCXXCOMPILE) -MT $@ -MD -MP -MF "$$depbase.Tpo" -c -o $@ $<; \ +@am__fastdepCXX_TRUE@ then mv -f "$$depbase.Tpo" "$$depbase.Plo"; else rm -f "$$depbase.Tpo"; exit 1; fi +@AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ +@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +@am__fastdepCXX_FALSE@ $(LTCXXCOMPILE) -c -o $@ $< + +mostlyclean-libtool: + -rm -f *.lo + +clean-libtool: + -rm -rf .libs _libs + +distclean-libtool: + -rm -f libtool +uninstall-info-am: + +ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) + list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ + unique=`for i in $$list; do \ + if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ + done | \ + $(AWK) ' { files[$$0] = 1; } \ + END { for (i in files) print i; }'`; \ + mkid -fID $$unique +tags: TAGS + +TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ + $(TAGS_FILES) $(LISP) + tags=; \ + here=`pwd`; \ + list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ + unique=`for i in $$list; do \ + if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ + done | \ + $(AWK) ' { files[$$0] = 1; } \ + END { for (i in files) print i; }'`; \ + if test -z "$(ETAGS_ARGS)$$tags$$unique"; then :; else \ + test -n "$$unique" || unique=$$empty_fix; \ + $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ + $$tags $$unique; \ + fi +ctags: CTAGS +CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ + $(TAGS_FILES) $(LISP) + tags=; \ + here=`pwd`; \ + list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ + unique=`for i in $$list; do \ + if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ + done | \ + $(AWK) ' { files[$$0] = 1; } \ + END { for (i in files) print i; }'`; \ + test -z "$(CTAGS_ARGS)$$tags$$unique" \ + || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ + $$tags $$unique + +GTAGS: + here=`$(am__cd) $(top_builddir) && pwd` \ + && cd $(top_srcdir) \ + && gtags -i $(GTAGS_ARGS) $$here + +distclean-tags: + -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags +check-am: all-am +check: check-am +all-am: Makefile $(PROGRAMS) +installdirs: + for dir in "$(DESTDIR)$(bindir)"; do \ + test -z "$$dir" || $(mkdir_p) "$$dir"; \ + done +install: install-am +install-exec: install-exec-am +install-data: install-data-am +uninstall: uninstall-am + +install-am: all-am + @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am + +installcheck: installcheck-am +install-strip: + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + `test -z '$(STRIP)' || \ + echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install +mostlyclean-generic: + +clean-generic: + +distclean-generic: + -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) + -rm -f impl/$(DEPDIR)/$(am__dirstamp) + -rm -f impl/$(am__dirstamp) + +maintainer-clean-generic: + @echo "This command is intended for maintainers to use" + @echo "it deletes files that may require special tools to rebuild." +clean: clean-am + +clean-am: clean-binPROGRAMS clean-generic clean-libtool mostlyclean-am + +distclean: distclean-am + -rm -f $(am__CONFIG_DISTCLEAN_FILES) + -rm -rf impl/$(DEPDIR) + -rm -f Makefile +distclean-am: clean-am distclean-compile distclean-generic \ + distclean-hdr distclean-libtool distclean-tags + +dvi: dvi-am + +dvi-am: + +html: html-am + +info: info-am + +info-am: + +install-data-am: + +install-exec-am: install-binPROGRAMS + +install-info: install-info-am + +install-man: + +installcheck-am: + +maintainer-clean: maintainer-clean-am + -rm -f $(am__CONFIG_DISTCLEAN_FILES) + -rm -rf $(top_srcdir)/autom4te.cache + -rm -rf impl/$(DEPDIR) + -rm -f Makefile +maintainer-clean-am: distclean-am maintainer-clean-generic + +mostlyclean: mostlyclean-am + +mostlyclean-am: mostlyclean-compile mostlyclean-generic \ + mostlyclean-libtool + +pdf: pdf-am + +pdf-am: + +ps: ps-am + +ps-am: + +uninstall-am: uninstall-binPROGRAMS uninstall-info-am + +.PHONY: CTAGS GTAGS all all-am am--refresh check check-am clean \ + clean-binPROGRAMS clean-generic clean-libtool ctags distclean \ + distclean-compile distclean-generic distclean-hdr \ + distclean-libtool distclean-tags dvi dvi-am html html-am info \ + info-am install install-am install-binPROGRAMS install-data \ + install-data-am install-exec install-exec-am install-info \ + install-info-am install-man install-strip installcheck \ + installcheck-am installdirs maintainer-clean \ + maintainer-clean-generic mostlyclean mostlyclean-compile \ + mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ + tags uninstall uninstall-am uninstall-binPROGRAMS \ + uninstall-info-am + +# Tell versions [3.59,3.63) of GNU make to not export all variables. +# Otherwise a system limit (for SysV at least) may be exceeded. +.NOEXPORT: diff --git a/src/examples/pipes/README.txt b/src/examples/pipes/README.txt new file mode 100644 index 0000000..4685304 --- /dev/null +++ b/src/examples/pipes/README.txt @@ -0,0 +1,16 @@ +To run the examples, first compile them: + +% ant -Dcompile.c++=yes examples + +and then copy the binaries to dfs: + +% bin/hadoop fs -put build/c++-examples/Linux-i386-32/bin /examples/bin + +create an input directory with text files: + +% bin/hadoop fs -put my-data in-dir + +and run the word count example: + +% bin/hadoop pipes -conf src/examples/pipes/conf/word.xml \ + -input in-dir -output out-dir diff --git a/src/examples/pipes/aclocal.m4 b/src/examples/pipes/aclocal.m4 new file mode 100644 index 0000000..ee69384 --- /dev/null +++ b/src/examples/pipes/aclocal.m4 @@ -0,0 +1,7011 @@ +# generated automatically by aclocal 1.9.5 -*- Autoconf -*- + +# Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, +# 2005 Free Software Foundation, Inc. +# This file is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY, to the extent permitted by law; without +# even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. + +# libtool.m4 - Configure libtool for the host system. -*-Autoconf-*- + +# serial 47 AC_PROG_LIBTOOL + + +# AC_PROVIDE_IFELSE(MACRO-NAME, IF-PROVIDED, IF-NOT-PROVIDED) +# ----------------------------------------------------------- +# If this macro is not defined by Autoconf, define it here. +m4_ifdef([AC_PROVIDE_IFELSE], + [], + [m4_define([AC_PROVIDE_IFELSE], + [m4_ifdef([AC_PROVIDE_$1], + [$2], [$3])])]) + + +# AC_PROG_LIBTOOL +# --------------- +AC_DEFUN([AC_PROG_LIBTOOL], +[AC_REQUIRE([_AC_PROG_LIBTOOL])dnl +dnl If AC_PROG_CXX has already been expanded, run AC_LIBTOOL_CXX +dnl immediately, otherwise, hook it in at the end of AC_PROG_CXX. + AC_PROVIDE_IFELSE([AC_PROG_CXX], + [AC_LIBTOOL_CXX], + [define([AC_PROG_CXX], defn([AC_PROG_CXX])[AC_LIBTOOL_CXX + ])]) +dnl And a similar setup for Fortran 77 support + AC_PROVIDE_IFELSE([AC_PROG_F77], + [AC_LIBTOOL_F77], + [define([AC_PROG_F77], defn([AC_PROG_F77])[AC_LIBTOOL_F77 +])]) + +dnl Quote A][M_PROG_GCJ so that aclocal doesn't bring it in needlessly. +dnl If either AC_PROG_GCJ or A][M_PROG_GCJ have already been expanded, run +dnl AC_LIBTOOL_GCJ immediately, otherwise, hook it in at the end of both. + AC_PROVIDE_IFELSE([AC_PROG_GCJ], + [AC_LIBTOOL_GCJ], + [AC_PROVIDE_IFELSE([A][M_PROG_GCJ], + [AC_LIBTOOL_GCJ], + [AC_PROVIDE_IFELSE([LT_AC_PROG_GCJ], + [AC_LIBTOOL_GCJ], + [ifdef([AC_PROG_GCJ], + [define([AC_PROG_GCJ], defn([AC_PROG_GCJ])[AC_LIBTOOL_GCJ])]) + ifdef([A][M_PROG_GCJ], + [define([A][M_PROG_GCJ], defn([A][M_PROG_GCJ])[AC_LIBTOOL_GCJ])]) + ifdef([LT_AC_PROG_GCJ], + [define([LT_AC_PROG_GCJ], + defn([LT_AC_PROG_GCJ])[AC_LIBTOOL_GCJ])])])]) +])])# AC_PROG_LIBTOOL + + +# _AC_PROG_LIBTOOL +# ---------------- +AC_DEFUN([_AC_PROG_LIBTOOL], +[AC_REQUIRE([AC_LIBTOOL_SETUP])dnl +AC_BEFORE([$0],[AC_LIBTOOL_CXX])dnl +AC_BEFORE([$0],[AC_LIBTOOL_F77])dnl +AC_BEFORE([$0],[AC_LIBTOOL_GCJ])dnl + +# This can be used to rebuild libtool when needed +LIBTOOL_DEPS="$ac_aux_dir/ltmain.sh" + +# Always use our own libtool. +LIBTOOL='$(SHELL) $(top_builddir)/libtool' +AC_SUBST(LIBTOOL)dnl + +# Prevent multiple expansion +define([AC_PROG_LIBTOOL], []) +])# _AC_PROG_LIBTOOL + + +# AC_LIBTOOL_SETUP +# ---------------- +AC_DEFUN([AC_LIBTOOL_SETUP], +[AC_PREREQ(2.50)dnl +AC_REQUIRE([AC_ENABLE_SHARED])dnl +AC_REQUIRE([AC_ENABLE_STATIC])dnl +AC_REQUIRE([AC_ENABLE_FAST_INSTALL])dnl +AC_REQUIRE([AC_CANONICAL_HOST])dnl +AC_REQUIRE([AC_CANONICAL_BUILD])dnl +AC_REQUIRE([AC_PROG_CC])dnl +AC_REQUIRE([AC_PROG_LD])dnl +AC_REQUIRE([AC_PROG_LD_RELOAD_FLAG])dnl +AC_REQUIRE([AC_PROG_NM])dnl + +AC_REQUIRE([AC_PROG_LN_S])dnl +AC_REQUIRE([AC_DEPLIBS_CHECK_METHOD])dnl +# Autoconf 2.13's AC_OBJEXT and AC_EXEEXT macros only works for C compilers! +AC_REQUIRE([AC_OBJEXT])dnl +AC_REQUIRE([AC_EXEEXT])dnl +dnl + +AC_LIBTOOL_SYS_MAX_CMD_LEN +AC_LIBTOOL_SYS_GLOBAL_SYMBOL_PIPE +AC_LIBTOOL_OBJDIR + +AC_REQUIRE([_LT_AC_SYS_COMPILER])dnl +_LT_AC_PROG_ECHO_BACKSLASH + +case $host_os in +aix3*) + # AIX sometimes has problems with the GCC collect2 program. For some + # reason, if we set the COLLECT_NAMES environment variable, the problems + # vanish in a puff of smoke. + if test "X${COLLECT_NAMES+set}" != Xset; then + COLLECT_NAMES= + export COLLECT_NAMES + fi + ;; +esac + +# Sed substitution that helps us do robust quoting. It backslashifies +# metacharacters that are still active within double-quoted strings. +Xsed='sed -e 1s/^X//' +[sed_quote_subst='s/\([\\"\\`$\\\\]\)/\\\1/g'] + +# Same as above, but do not quote variable references. +[double_quote_subst='s/\([\\"\\`\\\\]\)/\\\1/g'] + +# Sed substitution to delay expansion of an escaped shell variable in a +# double_quote_subst'ed string. +delay_variable_subst='s/\\\\\\\\\\\$/\\\\\\$/g' + +# Sed substitution to avoid accidental globbing in evaled expressions +no_glob_subst='s/\*/\\\*/g' + +# Constants: +rm="rm -f" + +# Global variables: +default_ofile=libtool +can_build_shared=yes + +# All known linkers require a `.a' archive for static linking (except M$VC, +# which needs '.lib'). +libext=a +ltmain="$ac_aux_dir/ltmain.sh" +ofile="$default_ofile" +with_gnu_ld="$lt_cv_prog_gnu_ld" + +AC_CHECK_TOOL(AR, ar, false) +AC_CHECK_TOOL(RANLIB, ranlib, :) +AC_CHECK_TOOL(STRIP, strip, :) + +old_CC="$CC" +old_CFLAGS="$CFLAGS" + +# Set sane defaults for various variables +test -z "$AR" && AR=ar +test -z "$AR_FLAGS" && AR_FLAGS=cru +test -z "$AS" && AS=as +test -z "$CC" && CC=cc +test -z "$LTCC" && LTCC=$CC +test -z "$DLLTOOL" && DLLTOOL=dlltool +test -z "$LD" && LD=ld +test -z "$LN_S" && LN_S="ln -s" +test -z "$MAGIC_CMD" && MAGIC_CMD=file +test -z "$NM" && NM=nm +test -z "$SED" && SED=sed +test -z "$OBJDUMP" && OBJDUMP=objdump +test -z "$RANLIB" && RANLIB=: +test -z "$STRIP" && STRIP=: +test -z "$ac_objext" && ac_objext=o + +# Determine commands to create old-style static archives. +old_archive_cmds='$AR $AR_FLAGS $oldlib$oldobjs$old_deplibs' +old_postinstall_cmds='chmod 644 $oldlib' +old_postuninstall_cmds= + +if test -n "$RANLIB"; then + case $host_os in + openbsd*) + old_postinstall_cmds="\$RANLIB -t \$oldlib~$old_postinstall_cmds" + ;; + *) + old_postinstall_cmds="\$RANLIB \$oldlib~$old_postinstall_cmds" + ;; + esac + old_archive_cmds="$old_archive_cmds~\$RANLIB \$oldlib" +fi + +_LT_CC_BASENAME([$compiler]) + +# Only perform the check for file, if the check method requires it +case $deplibs_check_method in +file_magic*) + if test "$file_magic_cmd" = '$MAGIC_CMD'; then + AC_PATH_MAGIC + fi + ;; +esac + +AC_PROVIDE_IFELSE([AC_LIBTOOL_DLOPEN], enable_dlopen=yes, enable_dlopen=no) +AC_PROVIDE_IFELSE([AC_LIBTOOL_WIN32_DLL], +enable_win32_dll=yes, enable_win32_dll=no) + +AC_ARG_ENABLE([libtool-lock], + [AC_HELP_STRING([--disable-libtool-lock], + [avoid locking (might break parallel builds)])]) +test "x$enable_libtool_lock" != xno && enable_libtool_lock=yes + +AC_ARG_WITH([pic], + [AC_HELP_STRING([--with-pic], + [try to use only PIC/non-PIC objects @<:@default=use both@:>@])], + [pic_mode="$withval"], + [pic_mode=default]) +test -z "$pic_mode" && pic_mode=default + +# Use C for the default configuration in the libtool script +tagname= +AC_LIBTOOL_LANG_C_CONFIG +_LT_AC_TAGCONFIG +])# AC_LIBTOOL_SETUP + + +# _LT_AC_SYS_COMPILER +# ------------------- +AC_DEFUN([_LT_AC_SYS_COMPILER], +[AC_REQUIRE([AC_PROG_CC])dnl + +# If no C compiler was specified, use CC. +LTCC=${LTCC-"$CC"} + +# Allow CC to be a program name with arguments. +compiler=$CC +])# _LT_AC_SYS_COMPILER + + +# _LT_CC_BASENAME(CC) +# ------------------- +# Calculate cc_basename. Skip known compiler wrappers and cross-prefix. +AC_DEFUN([_LT_CC_BASENAME], +[for cc_temp in $1""; do + case $cc_temp in + compile | *[[\\/]]compile | ccache | *[[\\/]]ccache ) ;; + distcc | *[[\\/]]distcc | purify | *[[\\/]]purify ) ;; + \-*) ;; + *) break;; + esac +done +cc_basename=`$echo "X$cc_temp" | $Xsed -e 's%.*/%%' -e "s%^$host_alias-%%"` +]) + + +# _LT_COMPILER_BOILERPLATE +# ------------------------ +# Check for compiler boilerplate output or warnings with +# the simple compiler test code. +AC_DEFUN([_LT_COMPILER_BOILERPLATE], +[ac_outfile=conftest.$ac_objext +printf "$lt_simple_compile_test_code" >conftest.$ac_ext +eval "$ac_compile" 2>&1 >/dev/null | $SED '/^$/d' >conftest.err +_lt_compiler_boilerplate=`cat conftest.err` +$rm conftest* +])# _LT_COMPILER_BOILERPLATE + + +# _LT_LINKER_BOILERPLATE +# ---------------------- +# Check for linker boilerplate output or warnings with +# the simple link test code. +AC_DEFUN([_LT_LINKER_BOILERPLATE], +[ac_outfile=conftest.$ac_objext +printf "$lt_simple_link_test_code" >conftest.$ac_ext +eval "$ac_link" 2>&1 >/dev/null | $SED '/^$/d' >conftest.err +_lt_linker_boilerplate=`cat conftest.err` +$rm conftest* +])# _LT_LINKER_BOILERPLATE + + +# _LT_AC_SYS_LIBPATH_AIX +# ---------------------- +# Links a minimal program and checks the executable +# for the system default hardcoded library path. In most cases, +# this is /usr/lib:/lib, but when the MPI compilers are used +# the location of the communication and MPI libs are included too. +# If we don't find anything, use the default library path according +# to the aix ld manual. +AC_DEFUN([_LT_AC_SYS_LIBPATH_AIX], +[AC_LINK_IFELSE(AC_LANG_PROGRAM,[ +aix_libpath=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e '/Import File Strings/,/^$/ { /^0/ { s/^0 *\(.*\)$/\1/; p; } +}'` +# Check for a 64-bit object if we didn't find anything. +if test -z "$aix_libpath"; then aix_libpath=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e '/Import File Strings/,/^$/ { /^0/ { s/^0 *\(.*\)$/\1/; p; } +}'`; fi],[]) +if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi +])# _LT_AC_SYS_LIBPATH_AIX + + +# _LT_AC_SHELL_INIT(ARG) +# ---------------------- +AC_DEFUN([_LT_AC_SHELL_INIT], +[ifdef([AC_DIVERSION_NOTICE], + [AC_DIVERT_PUSH(AC_DIVERSION_NOTICE)], + [AC_DIVERT_PUSH(NOTICE)]) +$1 +AC_DIVERT_POP +])# _LT_AC_SHELL_INIT + + +# _LT_AC_PROG_ECHO_BACKSLASH +# -------------------------- +# Add some code to the start of the generated configure script which +# will find an echo command which doesn't interpret backslashes. +AC_DEFUN([_LT_AC_PROG_ECHO_BACKSLASH], +[_LT_AC_SHELL_INIT([ +# Check that we are running under the correct shell. +SHELL=${CONFIG_SHELL-/bin/sh} + +case X$ECHO in +X*--fallback-echo) + # Remove one level of quotation (which was required for Make). + ECHO=`echo "$ECHO" | sed 's,\\\\\[$]\\[$]0,'[$]0','` + ;; +esac + +echo=${ECHO-echo} +if test "X[$]1" = X--no-reexec; then + # Discard the --no-reexec flag, and continue. + shift +elif test "X[$]1" = X--fallback-echo; then + # Avoid inline document here, it may be left over + : +elif test "X`($echo '\t') 2>/dev/null`" = 'X\t' ; then + # Yippee, $echo works! + : +else + # Restart under the correct shell. + exec $SHELL "[$]0" --no-reexec ${1+"[$]@"} +fi + +if test "X[$]1" = X--fallback-echo; then + # used as fallback echo + shift + cat </dev/null 2>&1 && unset CDPATH + +if test -z "$ECHO"; then +if test "X${echo_test_string+set}" != Xset; then +# find a string as large as possible, as long as the shell can cope with it + for cmd in 'sed 50q "[$]0"' 'sed 20q "[$]0"' 'sed 10q "[$]0"' 'sed 2q "[$]0"' 'echo test'; do + # expected sizes: less than 2Kb, 1Kb, 512 bytes, 16 bytes, ... + if (echo_test_string="`eval $cmd`") 2>/dev/null && + echo_test_string="`eval $cmd`" && + (test "X$echo_test_string" = "X$echo_test_string") 2>/dev/null + then + break + fi + done +fi + +if test "X`($echo '\t') 2>/dev/null`" = 'X\t' && + echo_testing_string=`($echo "$echo_test_string") 2>/dev/null` && + test "X$echo_testing_string" = "X$echo_test_string"; then + : +else + # The Solaris, AIX, and Digital Unix default echo programs unquote + # backslashes. This makes it impossible to quote backslashes using + # echo "$something" | sed 's/\\/\\\\/g' + # + # So, first we look for a working echo in the user's PATH. + + lt_save_ifs="$IFS"; IFS=$PATH_SEPARATOR + for dir in $PATH /usr/ucb; do + IFS="$lt_save_ifs" + if (test -f $dir/echo || test -f $dir/echo$ac_exeext) && + test "X`($dir/echo '\t') 2>/dev/null`" = 'X\t' && + echo_testing_string=`($dir/echo "$echo_test_string") 2>/dev/null` && + test "X$echo_testing_string" = "X$echo_test_string"; then + echo="$dir/echo" + break + fi + done + IFS="$lt_save_ifs" + + if test "X$echo" = Xecho; then + # We didn't find a better echo, so look for alternatives. + if test "X`(print -r '\t') 2>/dev/null`" = 'X\t' && + echo_testing_string=`(print -r "$echo_test_string") 2>/dev/null` && + test "X$echo_testing_string" = "X$echo_test_string"; then + # This shell has a builtin print -r that does the trick. + echo='print -r' + elif (test -f /bin/ksh || test -f /bin/ksh$ac_exeext) && + test "X$CONFIG_SHELL" != X/bin/ksh; then + # If we have ksh, try running configure again with it. + ORIGINAL_CONFIG_SHELL=${CONFIG_SHELL-/bin/sh} + export ORIGINAL_CONFIG_SHELL + CONFIG_SHELL=/bin/ksh + export CONFIG_SHELL + exec $CONFIG_SHELL "[$]0" --no-reexec ${1+"[$]@"} + else + # Try using printf. + echo='printf %s\n' + if test "X`($echo '\t') 2>/dev/null`" = 'X\t' && + echo_testing_string=`($echo "$echo_test_string") 2>/dev/null` && + test "X$echo_testing_string" = "X$echo_test_string"; then + # Cool, printf works + : + elif echo_testing_string=`($ORIGINAL_CONFIG_SHELL "[$]0" --fallback-echo '\t') 2>/dev/null` && + test "X$echo_testing_string" = 'X\t' && + echo_testing_string=`($ORIGINAL_CONFIG_SHELL "[$]0" --fallback-echo "$echo_test_string") 2>/dev/null` && + test "X$echo_testing_string" = "X$echo_test_string"; then + CONFIG_SHELL=$ORIGINAL_CONFIG_SHELL + export CONFIG_SHELL + SHELL="$CONFIG_SHELL" + export SHELL + echo="$CONFIG_SHELL [$]0 --fallback-echo" + elif echo_testing_string=`($CONFIG_SHELL "[$]0" --fallback-echo '\t') 2>/dev/null` && + test "X$echo_testing_string" = 'X\t' && + echo_testing_string=`($CONFIG_SHELL "[$]0" --fallback-echo "$echo_test_string") 2>/dev/null` && + test "X$echo_testing_string" = "X$echo_test_string"; then + echo="$CONFIG_SHELL [$]0 --fallback-echo" + else + # maybe with a smaller string... + prev=: + + for cmd in 'echo test' 'sed 2q "[$]0"' 'sed 10q "[$]0"' 'sed 20q "[$]0"' 'sed 50q "[$]0"'; do + if (test "X$echo_test_string" = "X`eval $cmd`") 2>/dev/null + then + break + fi + prev="$cmd" + done + + if test "$prev" != 'sed 50q "[$]0"'; then + echo_test_string=`eval $prev` + export echo_test_string + exec ${ORIGINAL_CONFIG_SHELL-${CONFIG_SHELL-/bin/sh}} "[$]0" ${1+"[$]@"} + else + # Oops. We lost completely, so just stick with echo. + echo=echo + fi + fi + fi + fi +fi +fi + +# Copy echo and quote the copy suitably for passing to libtool from +# the Makefile, instead of quoting the original, which is used later. +ECHO=$echo +if test "X$ECHO" = "X$CONFIG_SHELL [$]0 --fallback-echo"; then + ECHO="$CONFIG_SHELL \\\$\[$]0 --fallback-echo" +fi + +AC_SUBST(ECHO) +])])# _LT_AC_PROG_ECHO_BACKSLASH + + +# _LT_AC_LOCK +# ----------- +AC_DEFUN([_LT_AC_LOCK], +[AC_ARG_ENABLE([libtool-lock], + [AC_HELP_STRING([--disable-libtool-lock], + [avoid locking (might break parallel builds)])]) +test "x$enable_libtool_lock" != xno && enable_libtool_lock=yes + +# Some flags need to be propagated to the compiler or linker for good +# libtool support. +case $host in +ia64-*-hpux*) + # Find out which ABI we are using. + echo 'int i;' > conftest.$ac_ext + if AC_TRY_EVAL(ac_compile); then + case `/usr/bin/file conftest.$ac_objext` in + *ELF-32*) + HPUX_IA64_MODE="32" + ;; + *ELF-64*) + HPUX_IA64_MODE="64" + ;; + esac + fi + rm -rf conftest* + ;; +*-*-irix6*) + # Find out which ABI we are using. + echo '[#]line __oline__ "configure"' > conftest.$ac_ext + if AC_TRY_EVAL(ac_compile); then + if test "$lt_cv_prog_gnu_ld" = yes; then + case `/usr/bin/file conftest.$ac_objext` in + *32-bit*) + LD="${LD-ld} -melf32bsmip" + ;; + *N32*) + LD="${LD-ld} -melf32bmipn32" + ;; + *64-bit*) + LD="${LD-ld} -melf64bmip" + ;; + esac + else + case `/usr/bin/file conftest.$ac_objext` in + *32-bit*) + LD="${LD-ld} -32" + ;; + *N32*) + LD="${LD-ld} -n32" + ;; + *64-bit*) + LD="${LD-ld} -64" + ;; + esac + fi + fi + rm -rf conftest* + ;; + +x86_64-*linux*|ppc*-*linux*|powerpc*-*linux*|s390*-*linux*|sparc*-*linux*) + # Find out which ABI we are using. + echo 'int i;' > conftest.$ac_ext + if AC_TRY_EVAL(ac_compile); then + case "`/usr/bin/file conftest.o`" in + *32-bit*) + case $host in + x86_64-*linux*) + LD="${LD-ld} -m elf_i386" + ;; + ppc64-*linux*|powerpc64-*linux*) + LD="${LD-ld} -m elf32ppclinux" + ;; + s390x-*linux*) + LD="${LD-ld} -m elf_s390" + ;; + sparc64-*linux*) + LD="${LD-ld} -m elf32_sparc" + ;; + esac + ;; + *64-bit*) + case $host in + x86_64-*linux*) + LD="${LD-ld} -m elf_x86_64" + ;; + ppc*-*linux*|powerpc*-*linux*) + LD="${LD-ld} -m elf64ppc" + ;; + s390*-*linux*) + LD="${LD-ld} -m elf64_s390" + ;; + sparc*-*linux*) + LD="${LD-ld} -m elf64_sparc" + ;; + esac + ;; + esac + fi + rm -rf conftest* + ;; + +*-*-sco3.2v5*) + # On SCO OpenServer 5, we need -belf to get full-featured binaries. + SAVE_CFLAGS="$CFLAGS" + CFLAGS="$CFLAGS -belf" + AC_CACHE_CHECK([whether the C compiler needs -belf], lt_cv_cc_needs_belf, + [AC_LANG_PUSH(C) + AC_TRY_LINK([],[],[lt_cv_cc_needs_belf=yes],[lt_cv_cc_needs_belf=no]) + AC_LANG_POP]) + if test x"$lt_cv_cc_needs_belf" != x"yes"; then + # this is probably gcc 2.8.0, egcs 1.0 or newer; no need for -belf + CFLAGS="$SAVE_CFLAGS" + fi + ;; +AC_PROVIDE_IFELSE([AC_LIBTOOL_WIN32_DLL], +[*-*-cygwin* | *-*-mingw* | *-*-pw32*) + AC_CHECK_TOOL(DLLTOOL, dlltool, false) + AC_CHECK_TOOL(AS, as, false) + AC_CHECK_TOOL(OBJDUMP, objdump, false) + ;; + ]) +esac + +need_locks="$enable_libtool_lock" + +])# _LT_AC_LOCK + + +# AC_LIBTOOL_COMPILER_OPTION(MESSAGE, VARIABLE-NAME, FLAGS, +# [OUTPUT-FILE], [ACTION-SUCCESS], [ACTION-FAILURE]) +# ---------------------------------------------------------------- +# Check whether the given compiler option works +AC_DEFUN([AC_LIBTOOL_COMPILER_OPTION], +[AC_REQUIRE([LT_AC_PROG_SED]) +AC_CACHE_CHECK([$1], [$2], + [$2=no + ifelse([$4], , [ac_outfile=conftest.$ac_objext], [ac_outfile=$4]) + printf "$lt_simple_compile_test_code" > conftest.$ac_ext + lt_compiler_flag="$3" + # Insert the option either (1) after the last *FLAGS variable, or + # (2) before a word containing "conftest.", or (3) at the end. + # Note that $ac_compile itself does not contain backslashes and begins + # with a dollar sign (not a hyphen), so the echo should work correctly. + # The option is referenced via a variable to avoid confusing sed. + lt_compile=`echo "$ac_compile" | $SED \ + -e 's:.*FLAGS}? :&$lt_compiler_flag :; t' \ + -e 's: [[^ ]]*conftest\.: $lt_compiler_flag&:; t' \ + -e 's:$: $lt_compiler_flag:'` + (eval echo "\"\$as_me:__oline__: $lt_compile\"" >&AS_MESSAGE_LOG_FD) + (eval "$lt_compile" 2>conftest.err) + ac_status=$? + cat conftest.err >&AS_MESSAGE_LOG_FD + echo "$as_me:__oline__: \$? = $ac_status" >&AS_MESSAGE_LOG_FD + if (exit $ac_status) && test -s "$ac_outfile"; then + # The compiler can only warn and ignore the option if not recognized + # So say no if there are warnings other than the usual output. + $echo "X$_lt_compiler_boilerplate" | $Xsed >conftest.exp + $SED '/^$/d' conftest.err >conftest.er2 + if test ! -s conftest.err || diff conftest.exp conftest.er2 >/dev/null; then + $2=yes + fi + fi + $rm conftest* +]) + +if test x"[$]$2" = xyes; then + ifelse([$5], , :, [$5]) +else + ifelse([$6], , :, [$6]) +fi +])# AC_LIBTOOL_COMPILER_OPTION + + +# AC_LIBTOOL_LINKER_OPTION(MESSAGE, VARIABLE-NAME, FLAGS, +# [ACTION-SUCCESS], [ACTION-FAILURE]) +# ------------------------------------------------------------ +# Check whether the given compiler option works +AC_DEFUN([AC_LIBTOOL_LINKER_OPTION], +[AC_CACHE_CHECK([$1], [$2], + [$2=no + save_LDFLAGS="$LDFLAGS" + LDFLAGS="$LDFLAGS $3" + printf "$lt_simple_link_test_code" > conftest.$ac_ext + if (eval $ac_link 2>conftest.err) && test -s conftest$ac_exeext; then + # The compiler can only warn and ignore the option if not recognized + # So say no if there are warnings + if test -s conftest.err; then + # Append any errors to the config.log. + cat conftest.err 1>&AS_MESSAGE_LOG_FD + $echo "X$_lt_linker_boilerplate" | $Xsed > conftest.exp + $SED '/^$/d' conftest.err >conftest.er2 + if diff conftest.exp conftest.er2 >/dev/null; then + $2=yes + fi + else + $2=yes + fi + fi + $rm conftest* + LDFLAGS="$save_LDFLAGS" +]) + +if test x"[$]$2" = xyes; then + ifelse([$4], , :, [$4]) +else + ifelse([$5], , :, [$5]) +fi +])# AC_LIBTOOL_LINKER_OPTION + + +# AC_LIBTOOL_SYS_MAX_CMD_LEN +# -------------------------- +AC_DEFUN([AC_LIBTOOL_SYS_MAX_CMD_LEN], +[# find the maximum length of command line arguments +AC_MSG_CHECKING([the maximum length of command line arguments]) +AC_CACHE_VAL([lt_cv_sys_max_cmd_len], [dnl + i=0 + teststring="ABCD" + + case $build_os in + msdosdjgpp*) + # On DJGPP, this test can blow up pretty badly due to problems in libc + # (any single argument exceeding 2000 bytes causes a buffer overrun + # during glob expansion). Even if it were fixed, the result of this + # check would be larger than it should be. + lt_cv_sys_max_cmd_len=12288; # 12K is about right + ;; + + gnu*) + # Under GNU Hurd, this test is not required because there is + # no limit to the length of command line arguments. + # Libtool will interpret -1 as no limit whatsoever + lt_cv_sys_max_cmd_len=-1; + ;; + + cygwin* | mingw*) + # On Win9x/ME, this test blows up -- it succeeds, but takes + # about 5 minutes as the teststring grows exponentially. + # Worse, since 9x/ME are not pre-emptively multitasking, + # you end up with a "frozen" computer, even though with patience + # the test eventually succeeds (with a max line length of 256k). + # Instead, let's just punt: use the minimum linelength reported by + # all of the supported platforms: 8192 (on NT/2K/XP). + lt_cv_sys_max_cmd_len=8192; + ;; + + amigaos*) + # On AmigaOS with pdksh, this test takes hours, literally. + # So we just punt and use a minimum line length of 8192. + lt_cv_sys_max_cmd_len=8192; + ;; + + netbsd* | freebsd* | openbsd* | darwin* | dragonfly*) + # This has been around since 386BSD, at least. Likely further. + if test -x /sbin/sysctl; then + lt_cv_sys_max_cmd_len=`/sbin/sysctl -n kern.argmax` + elif test -x /usr/sbin/sysctl; then + lt_cv_sys_max_cmd_len=`/usr/sbin/sysctl -n kern.argmax` + else + lt_cv_sys_max_cmd_len=65536 # usable default for *BSD + fi + # And add a safety zone + lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \/ 4` + lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \* 3` + ;; + osf*) + # Dr. Hans Ekkehard Plesser reports seeing a kernel panic running configure + # due to this test when exec_disable_arg_limit is 1 on Tru64. It is not + # nice to cause kernel panics so lets avoid the loop below. + # First set a reasonable default. + lt_cv_sys_max_cmd_len=16384 + # + if test -x /sbin/sysconfig; then + case `/sbin/sysconfig -q proc exec_disable_arg_limit` in + *1*) lt_cv_sys_max_cmd_len=-1 ;; + esac + fi + ;; + *) + # If test is not a shell built-in, we'll probably end up computing a + # maximum length that is only half of the actual maximum length, but + # we can't tell. + SHELL=${SHELL-${CONFIG_SHELL-/bin/sh}} + while (test "X"`$SHELL [$]0 --fallback-echo "X$teststring" 2>/dev/null` \ + = "XX$teststring") >/dev/null 2>&1 && + new_result=`expr "X$teststring" : ".*" 2>&1` && + lt_cv_sys_max_cmd_len=$new_result && + test $i != 17 # 1/2 MB should be enough + do + i=`expr $i + 1` + teststring=$teststring$teststring + done + teststring= + # Add a significant safety factor because C++ compilers can tack on massive + # amounts of additional arguments before passing them to the linker. + # It appears as though 1/2 is a usable value. + lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \/ 2` + ;; + esac +]) +if test -n $lt_cv_sys_max_cmd_len ; then + AC_MSG_RESULT($lt_cv_sys_max_cmd_len) +else + AC_MSG_RESULT(none) +fi +])# AC_LIBTOOL_SYS_MAX_CMD_LEN + + +# _LT_AC_CHECK_DLFCN +# -------------------- +AC_DEFUN([_LT_AC_CHECK_DLFCN], +[AC_CHECK_HEADERS(dlfcn.h)dnl +])# _LT_AC_CHECK_DLFCN + + +# _LT_AC_TRY_DLOPEN_SELF (ACTION-IF-TRUE, ACTION-IF-TRUE-W-USCORE, +# ACTION-IF-FALSE, ACTION-IF-CROSS-COMPILING) +# ------------------------------------------------------------------ +AC_DEFUN([_LT_AC_TRY_DLOPEN_SELF], +[AC_REQUIRE([_LT_AC_CHECK_DLFCN])dnl +if test "$cross_compiling" = yes; then : + [$4] +else + lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2 + lt_status=$lt_dlunknown + cat > conftest.$ac_ext < +#endif + +#include + +#ifdef RTLD_GLOBAL +# define LT_DLGLOBAL RTLD_GLOBAL +#else +# ifdef DL_GLOBAL +# define LT_DLGLOBAL DL_GLOBAL +# else +# define LT_DLGLOBAL 0 +# endif +#endif + +/* We may have to define LT_DLLAZY_OR_NOW in the command line if we + find out it does not work in some platform. */ +#ifndef LT_DLLAZY_OR_NOW +# ifdef RTLD_LAZY +# define LT_DLLAZY_OR_NOW RTLD_LAZY +# else +# ifdef DL_LAZY +# define LT_DLLAZY_OR_NOW DL_LAZY +# else +# ifdef RTLD_NOW +# define LT_DLLAZY_OR_NOW RTLD_NOW +# else +# ifdef DL_NOW +# define LT_DLLAZY_OR_NOW DL_NOW +# else +# define LT_DLLAZY_OR_NOW 0 +# endif +# endif +# endif +# endif +#endif + +#ifdef __cplusplus +extern "C" void exit (int); +#endif + +void fnord() { int i=42;} +int main () +{ + void *self = dlopen (0, LT_DLGLOBAL|LT_DLLAZY_OR_NOW); + int status = $lt_dlunknown; + + if (self) + { + if (dlsym (self,"fnord")) status = $lt_dlno_uscore; + else if (dlsym( self,"_fnord")) status = $lt_dlneed_uscore; + /* dlclose (self); */ + } + + exit (status); +}] +EOF + if AC_TRY_EVAL(ac_link) && test -s conftest${ac_exeext} 2>/dev/null; then + (./conftest; exit; ) 2>/dev/null + lt_status=$? + case x$lt_status in + x$lt_dlno_uscore) $1 ;; + x$lt_dlneed_uscore) $2 ;; + x$lt_unknown|x*) $3 ;; + esac + else : + # compilation failed + $3 + fi +fi +rm -fr conftest* +])# _LT_AC_TRY_DLOPEN_SELF + + +# AC_LIBTOOL_DLOPEN_SELF +# ------------------- +AC_DEFUN([AC_LIBTOOL_DLOPEN_SELF], +[AC_REQUIRE([_LT_AC_CHECK_DLFCN])dnl +if test "x$enable_dlopen" != xyes; then + enable_dlopen=unknown + enable_dlopen_self=unknown + enable_dlopen_self_static=unknown +else + lt_cv_dlopen=no + lt_cv_dlopen_libs= + + case $host_os in + beos*) + lt_cv_dlopen="load_add_on" + lt_cv_dlopen_libs= + lt_cv_dlopen_self=yes + ;; + + mingw* | pw32*) + lt_cv_dlopen="LoadLibrary" + lt_cv_dlopen_libs= + ;; + + cygwin*) + lt_cv_dlopen="dlopen" + lt_cv_dlopen_libs= + ;; + + darwin*) + # if libdl is installed we need to link against it + AC_CHECK_LIB([dl], [dlopen], + [lt_cv_dlopen="dlopen" lt_cv_dlopen_libs="-ldl"],[ + lt_cv_dlopen="dyld" + lt_cv_dlopen_libs= + lt_cv_dlopen_self=yes + ]) + ;; + + *) + AC_CHECK_FUNC([shl_load], + [lt_cv_dlopen="shl_load"], + [AC_CHECK_LIB([dld], [shl_load], + [lt_cv_dlopen="shl_load" lt_cv_dlopen_libs="-dld"], + [AC_CHECK_FUNC([dlopen], + [lt_cv_dlopen="dlopen"], + [AC_CHECK_LIB([dl], [dlopen], + [lt_cv_dlopen="dlopen" lt_cv_dlopen_libs="-ldl"], + [AC_CHECK_LIB([svld], [dlopen], + [lt_cv_dlopen="dlopen" lt_cv_dlopen_libs="-lsvld"], + [AC_CHECK_LIB([dld], [dld_link], + [lt_cv_dlopen="dld_link" lt_cv_dlopen_libs="-dld"]) + ]) + ]) + ]) + ]) + ]) + ;; + esac + + if test "x$lt_cv_dlopen" != xno; then + enable_dlopen=yes + else + enable_dlopen=no + fi + + case $lt_cv_dlopen in + dlopen) + save_CPPFLAGS="$CPPFLAGS" + test "x$ac_cv_header_dlfcn_h" = xyes && CPPFLAGS="$CPPFLAGS -DHAVE_DLFCN_H" + + save_LDFLAGS="$LDFLAGS" + eval LDFLAGS=\"\$LDFLAGS $export_dynamic_flag_spec\" + + save_LIBS="$LIBS" + LIBS="$lt_cv_dlopen_libs $LIBS" + + AC_CACHE_CHECK([whether a program can dlopen itself], + lt_cv_dlopen_self, [dnl + _LT_AC_TRY_DLOPEN_SELF( + lt_cv_dlopen_self=yes, lt_cv_dlopen_self=yes, + lt_cv_dlopen_self=no, lt_cv_dlopen_self=cross) + ]) + + if test "x$lt_cv_dlopen_self" = xyes; then + LDFLAGS="$LDFLAGS $link_static_flag" + AC_CACHE_CHECK([whether a statically linked program can dlopen itself], + lt_cv_dlopen_self_static, [dnl + _LT_AC_TRY_DLOPEN_SELF( + lt_cv_dlopen_self_static=yes, lt_cv_dlopen_self_static=yes, + lt_cv_dlopen_self_static=no, lt_cv_dlopen_self_static=cross) + ]) + fi + + CPPFLAGS="$save_CPPFLAGS" + LDFLAGS="$save_LDFLAGS" + LIBS="$save_LIBS" + ;; + esac + + case $lt_cv_dlopen_self in + yes|no) enable_dlopen_self=$lt_cv_dlopen_self ;; + *) enable_dlopen_self=unknown ;; + esac + + case $lt_cv_dlopen_self_static in + yes|no) enable_dlopen_self_static=$lt_cv_dlopen_self_static ;; + *) enable_dlopen_self_static=unknown ;; + esac +fi +])# AC_LIBTOOL_DLOPEN_SELF + + +# AC_LIBTOOL_PROG_CC_C_O([TAGNAME]) +# --------------------------------- +# Check to see if options -c and -o are simultaneously supported by compiler +AC_DEFUN([AC_LIBTOOL_PROG_CC_C_O], +[AC_REQUIRE([_LT_AC_SYS_COMPILER])dnl +AC_CACHE_CHECK([if $compiler supports -c -o file.$ac_objext], + [_LT_AC_TAGVAR(lt_cv_prog_compiler_c_o, $1)], + [_LT_AC_TAGVAR(lt_cv_prog_compiler_c_o, $1)=no + $rm -r conftest 2>/dev/null + mkdir conftest + cd conftest + mkdir out + printf "$lt_simple_compile_test_code" > conftest.$ac_ext + + lt_compiler_flag="-o out/conftest2.$ac_objext" + # Insert the option either (1) after the last *FLAGS variable, or + # (2) before a word containing "conftest.", or (3) at the end. + # Note that $ac_compile itself does not contain backslashes and begins + # with a dollar sign (not a hyphen), so the echo should work correctly. + lt_compile=`echo "$ac_compile" | $SED \ + -e 's:.*FLAGS}? :&$lt_compiler_flag :; t' \ + -e 's: [[^ ]]*conftest\.: $lt_compiler_flag&:; t' \ + -e 's:$: $lt_compiler_flag:'` + (eval echo "\"\$as_me:__oline__: $lt_compile\"" >&AS_MESSAGE_LOG_FD) + (eval "$lt_compile" 2>out/conftest.err) + ac_status=$? + cat out/conftest.err >&AS_MESSAGE_LOG_FD + echo "$as_me:__oline__: \$? = $ac_status" >&AS_MESSAGE_LOG_FD + if (exit $ac_status) && test -s out/conftest2.$ac_objext + then + # The compiler can only warn and ignore the option if not recognized + # So say no if there are warnings + $echo "X$_lt_compiler_boilerplate" | $Xsed > out/conftest.exp + $SED '/^$/d' out/conftest.err >out/conftest.er2 + if test ! -s out/conftest.err || diff out/conftest.exp out/conftest.er2 >/dev/null; then + _LT_AC_TAGVAR(lt_cv_prog_compiler_c_o, $1)=yes + fi + fi + chmod u+w . + $rm conftest* + # SGI C++ compiler will create directory out/ii_files/ for + # template instantiation + test -d out/ii_files && $rm out/ii_files/* && rmdir out/ii_files + $rm out/* && rmdir out + cd .. + rmdir conftest + $rm conftest* +]) +])# AC_LIBTOOL_PROG_CC_C_O + + +# AC_LIBTOOL_SYS_HARD_LINK_LOCKS([TAGNAME]) +# ----------------------------------------- +# Check to see if we can do hard links to lock some files if needed +AC_DEFUN([AC_LIBTOOL_SYS_HARD_LINK_LOCKS], +[AC_REQUIRE([_LT_AC_LOCK])dnl + +hard_links="nottested" +if test "$_LT_AC_TAGVAR(lt_cv_prog_compiler_c_o, $1)" = no && test "$need_locks" != no; then + # do not overwrite the value of need_locks provided by the user + AC_MSG_CHECKING([if we can lock with hard links]) + hard_links=yes + $rm conftest* + ln conftest.a conftest.b 2>/dev/null && hard_links=no + touch conftest.a + ln conftest.a conftest.b 2>&5 || hard_links=no + ln conftest.a conftest.b 2>/dev/null && hard_links=no + AC_MSG_RESULT([$hard_links]) + if test "$hard_links" = no; then + AC_MSG_WARN([`$CC' does not support `-c -o', so `make -j' may be unsafe]) + need_locks=warn + fi +else + need_locks=no +fi +])# AC_LIBTOOL_SYS_HARD_LINK_LOCKS + + +# AC_LIBTOOL_OBJDIR +# ----------------- +AC_DEFUN([AC_LIBTOOL_OBJDIR], +[AC_CACHE_CHECK([for objdir], [lt_cv_objdir], +[rm -f .libs 2>/dev/null +mkdir .libs 2>/dev/null +if test -d .libs; then + lt_cv_objdir=.libs +else + # MS-DOS does not allow filenames that begin with a dot. + lt_cv_objdir=_libs +fi +rmdir .libs 2>/dev/null]) +objdir=$lt_cv_objdir +])# AC_LIBTOOL_OBJDIR + + +# AC_LIBTOOL_PROG_LD_HARDCODE_LIBPATH([TAGNAME]) +# ---------------------------------------------- +# Check hardcoding attributes. +AC_DEFUN([AC_LIBTOOL_PROG_LD_HARDCODE_LIBPATH], +[AC_MSG_CHECKING([how to hardcode library paths into programs]) +_LT_AC_TAGVAR(hardcode_action, $1)= +if test -n "$_LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)" || \ + test -n "$_LT_AC_TAGVAR(runpath_var, $1)" || \ + test "X$_LT_AC_TAGVAR(hardcode_automatic, $1)" = "Xyes" ; then + + # We can hardcode non-existant directories. + if test "$_LT_AC_TAGVAR(hardcode_direct, $1)" != no && + # If the only mechanism to avoid hardcoding is shlibpath_var, we + # have to relink, otherwise we might link with an installed library + # when we should be linking with a yet-to-be-installed one + ## test "$_LT_AC_TAGVAR(hardcode_shlibpath_var, $1)" != no && + test "$_LT_AC_TAGVAR(hardcode_minus_L, $1)" != no; then + # Linking always hardcodes the temporary library directory. + _LT_AC_TAGVAR(hardcode_action, $1)=relink + else + # We can link without hardcoding, and we can hardcode nonexisting dirs. + _LT_AC_TAGVAR(hardcode_action, $1)=immediate + fi +else + # We cannot hardcode anything, or else we can only hardcode existing + # directories. + _LT_AC_TAGVAR(hardcode_action, $1)=unsupported +fi +AC_MSG_RESULT([$_LT_AC_TAGVAR(hardcode_action, $1)]) + +if test "$_LT_AC_TAGVAR(hardcode_action, $1)" = relink; then + # Fast installation is not supported + enable_fast_install=no +elif test "$shlibpath_overrides_runpath" = yes || + test "$enable_shared" = no; then + # Fast installation is not necessary + enable_fast_install=needless +fi +])# AC_LIBTOOL_PROG_LD_HARDCODE_LIBPATH + + +# AC_LIBTOOL_SYS_LIB_STRIP +# ------------------------ +AC_DEFUN([AC_LIBTOOL_SYS_LIB_STRIP], +[striplib= +old_striplib= +AC_MSG_CHECKING([whether stripping libraries is possible]) +if test -n "$STRIP" && $STRIP -V 2>&1 | grep "GNU strip" >/dev/null; then + test -z "$old_striplib" && old_striplib="$STRIP --strip-debug" + test -z "$striplib" && striplib="$STRIP --strip-unneeded" + AC_MSG_RESULT([yes]) +else +# FIXME - insert some real tests, host_os isn't really good enough + case $host_os in + darwin*) + if test -n "$STRIP" ; then + striplib="$STRIP -x" + AC_MSG_RESULT([yes]) + else + AC_MSG_RESULT([no]) +fi + ;; + *) + AC_MSG_RESULT([no]) + ;; + esac +fi +])# AC_LIBTOOL_SYS_LIB_STRIP + + +# AC_LIBTOOL_SYS_DYNAMIC_LINKER +# ----------------------------- +# PORTME Fill in your ld.so characteristics +AC_DEFUN([AC_LIBTOOL_SYS_DYNAMIC_LINKER], +[AC_MSG_CHECKING([dynamic linker characteristics]) +library_names_spec= +libname_spec='lib$name' +soname_spec= +shrext_cmds=".so" +postinstall_cmds= +postuninstall_cmds= +finish_cmds= +finish_eval= +shlibpath_var= +shlibpath_overrides_runpath=unknown +version_type=none +dynamic_linker="$host_os ld.so" +sys_lib_dlsearch_path_spec="/lib /usr/lib" +if test "$GCC" = yes; then + sys_lib_search_path_spec=`$CC -print-search-dirs | grep "^libraries:" | $SED -e "s/^libraries://" -e "s,=/,/,g"` + if echo "$sys_lib_search_path_spec" | grep ';' >/dev/null ; then + # if the path contains ";" then we assume it to be the separator + # otherwise default to the standard path separator (i.e. ":") - it is + # assumed that no part of a normal pathname contains ";" but that should + # okay in the real world where ";" in dirpaths is itself problematic. + sys_lib_search_path_spec=`echo "$sys_lib_search_path_spec" | $SED -e 's/;/ /g'` + else + sys_lib_search_path_spec=`echo "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"` + fi +else + sys_lib_search_path_spec="/lib /usr/lib /usr/local/lib" +fi +need_lib_prefix=unknown +hardcode_into_libs=no + +# when you set need_version to no, make sure it does not cause -set_version +# flags to be left without arguments +need_version=unknown + +case $host_os in +aix3*) + version_type=linux + library_names_spec='${libname}${release}${shared_ext}$versuffix $libname.a' + shlibpath_var=LIBPATH + + # AIX 3 has no versioning support, so we append a major version to the name. + soname_spec='${libname}${release}${shared_ext}$major' + ;; + +aix4* | aix5*) + version_type=linux + need_lib_prefix=no + need_version=no + hardcode_into_libs=yes + if test "$host_cpu" = ia64; then + # AIX 5 supports IA64 + library_names_spec='${libname}${release}${shared_ext}$major ${libname}${release}${shared_ext}$versuffix $libname${shared_ext}' + shlibpath_var=LD_LIBRARY_PATH + else + # With GCC up to 2.95.x, collect2 would create an import file + # for dependence libraries. The import file would start with + # the line `#! .'. This would cause the generated library to + # depend on `.', always an invalid library. This was fixed in + # development snapshots of GCC prior to 3.0. + case $host_os in + aix4 | aix4.[[01]] | aix4.[[01]].*) + if { echo '#if __GNUC__ > 2 || (__GNUC__ == 2 && __GNUC_MINOR__ >= 97)' + echo ' yes ' + echo '#endif'; } | ${CC} -E - | grep yes > /dev/null; then + : + else + can_build_shared=no + fi + ;; + esac + # AIX (on Power*) has no versioning support, so currently we can not hardcode correct + # soname into executable. Probably we can add versioning support to + # collect2, so additional links can be useful in future. + if test "$aix_use_runtimelinking" = yes; then + # If using run time linking (on AIX 4.2 or later) use lib.so + # instead of lib.a to let people know that these are not + # typical AIX shared libraries. + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + else + # We preserve .a as extension for shared libraries through AIX4.2 + # and later when we are not doing run time linking. + library_names_spec='${libname}${release}.a $libname.a' + soname_spec='${libname}${release}${shared_ext}$major' + fi + shlibpath_var=LIBPATH + fi + ;; + +amigaos*) + library_names_spec='$libname.ixlibrary $libname.a' + # Create ${libname}_ixlibrary.a entries in /sys/libs. + finish_eval='for lib in `ls $libdir/*.ixlibrary 2>/dev/null`; do libname=`$echo "X$lib" | $Xsed -e '\''s%^.*/\([[^/]]*\)\.ixlibrary$%\1%'\''`; test $rm /sys/libs/${libname}_ixlibrary.a; $show "cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a"; cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a || exit 1; done' + ;; + +beos*) + library_names_spec='${libname}${shared_ext}' + dynamic_linker="$host_os ld.so" + shlibpath_var=LIBRARY_PATH + ;; + +bsdi[[45]]*) + version_type=linux + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + finish_cmds='PATH="\$PATH:/sbin" ldconfig $libdir' + shlibpath_var=LD_LIBRARY_PATH + sys_lib_search_path_spec="/shlib /usr/lib /usr/X11/lib /usr/contrib/lib /lib /usr/local/lib" + sys_lib_dlsearch_path_spec="/shlib /usr/lib /usr/local/lib" + # the default ld.so.conf also contains /usr/contrib/lib and + # /usr/X11R6/lib (/usr/X11 is a link to /usr/X11R6), but let us allow + # libtool to hard-code these into programs + ;; + +cygwin* | mingw* | pw32*) + version_type=windows + shrext_cmds=".dll" + need_version=no + need_lib_prefix=no + + case $GCC,$host_os in + yes,cygwin* | yes,mingw* | yes,pw32*) + library_names_spec='$libname.dll.a' + # DLL is installed to $(libdir)/../bin by postinstall_cmds + postinstall_cmds='base_file=`basename \${file}`~ + dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\${base_file}'\''i;echo \$dlname'\''`~ + dldir=$destdir/`dirname \$dlpath`~ + test -d \$dldir || mkdir -p \$dldir~ + $install_prog $dir/$dlname \$dldir/$dlname' + postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; echo \$dlname'\''`~ + dlpath=$dir/\$dldll~ + $rm \$dlpath' + shlibpath_overrides_runpath=yes + + case $host_os in + cygwin*) + # Cygwin DLLs use 'cyg' prefix rather than 'lib' + soname_spec='`echo ${libname} | sed -e 's/^lib/cyg/'``echo ${release} | $SED -e 's/[[.]]/-/g'`${versuffix}${shared_ext}' + sys_lib_search_path_spec="/usr/lib /lib/w32api /lib /usr/local/lib" + ;; + mingw*) + # MinGW DLLs use traditional 'lib' prefix + soname_spec='${libname}`echo ${release} | $SED -e 's/[[.]]/-/g'`${versuffix}${shared_ext}' + sys_lib_search_path_spec=`$CC -print-search-dirs | grep "^libraries:" | $SED -e "s/^libraries://" -e "s,=/,/,g"` + if echo "$sys_lib_search_path_spec" | [grep ';[c-zC-Z]:/' >/dev/null]; then + # It is most probably a Windows format PATH printed by + # mingw gcc, but we are running on Cygwin. Gcc prints its search + # path with ; separators, and with drive letters. We can handle the + # drive letters (cygwin fileutils understands them), so leave them, + # especially as we might pass files found there to a mingw objdump, + # which wouldn't understand a cygwinified path. Ahh. + sys_lib_search_path_spec=`echo "$sys_lib_search_path_spec" | $SED -e 's/;/ /g'` + else + sys_lib_search_path_spec=`echo "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"` + fi + ;; + pw32*) + # pw32 DLLs use 'pw' prefix rather than 'lib' + library_names_spec='`echo ${libname} | sed -e 's/^lib/pw/'``echo ${release} | $SED -e 's/[[.]]/-/g'`${versuffix}${shared_ext}' + ;; + esac + ;; + + *) + library_names_spec='${libname}`echo ${release} | $SED -e 's/[[.]]/-/g'`${versuffix}${shared_ext} $libname.lib' + ;; + esac + dynamic_linker='Win32 ld.exe' + # FIXME: first we should search . and the directory the executable is in + shlibpath_var=PATH + ;; + +darwin* | rhapsody*) + dynamic_linker="$host_os dyld" + version_type=darwin + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${versuffix}$shared_ext ${libname}${release}${major}$shared_ext ${libname}$shared_ext' + soname_spec='${libname}${release}${major}$shared_ext' + shlibpath_overrides_runpath=yes + shlibpath_var=DYLD_LIBRARY_PATH + shrext_cmds='$(test .$module = .yes && echo .so || echo .dylib)' + # Apple's gcc prints 'gcc -print-search-dirs' doesn't operate the same. + if test "$GCC" = yes; then + sys_lib_search_path_spec=`$CC -print-search-dirs | tr "\n" "$PATH_SEPARATOR" | sed -e 's/libraries:/@libraries:/' | tr "@" "\n" | grep "^libraries:" | sed -e "s/^libraries://" -e "s,=/,/,g" -e "s,$PATH_SEPARATOR, ,g" -e "s,.*,& /lib /usr/lib /usr/local/lib,g"` + else + sys_lib_search_path_spec='/lib /usr/lib /usr/local/lib' + fi + sys_lib_dlsearch_path_spec='/usr/local/lib /lib /usr/lib' + ;; + +dgux*) + version_type=linux + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname$shared_ext' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + ;; + +freebsd1*) + dynamic_linker=no + ;; + +kfreebsd*-gnu) + version_type=linux + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=no + hardcode_into_libs=yes + dynamic_linker='GNU ld.so' + ;; + +freebsd* | dragonfly*) + # DragonFly does not have aout. When/if they implement a new + # versioning mechanism, adjust this. + objformat=`test -x /usr/bin/objformat && /usr/bin/objformat || echo aout` + version_type=freebsd-$objformat + case $version_type in + freebsd-elf*) + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext} $libname${shared_ext}' + need_version=no + need_lib_prefix=no + ;; + freebsd-*) + library_names_spec='${libname}${release}${shared_ext}$versuffix $libname${shared_ext}$versuffix' + need_version=yes + ;; + esac + shlibpath_var=LD_LIBRARY_PATH + case $host_os in + freebsd2*) + shlibpath_overrides_runpath=yes + ;; + freebsd3.[[01]]* | freebsdelf3.[[01]]*) + shlibpath_overrides_runpath=yes + hardcode_into_libs=yes + ;; + *) # from 3.2 on + shlibpath_overrides_runpath=no + hardcode_into_libs=yes + ;; + esac + ;; + +gnu*) + version_type=linux + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}${major} ${libname}${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + hardcode_into_libs=yes + ;; + +hpux9* | hpux10* | hpux11*) + # Give a soname corresponding to the major version so that dld.sl refuses to + # link against other versions. + version_type=sunos + need_lib_prefix=no + need_version=no + case "$host_cpu" in + ia64*) + shrext_cmds='.so' + hardcode_into_libs=yes + dynamic_linker="$host_os dld.so" + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes # Unless +noenvvar is specified. + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + if test "X$HPUX_IA64_MODE" = X32; then + sys_lib_search_path_spec="/usr/lib/hpux32 /usr/local/lib/hpux32 /usr/local/lib" + else + sys_lib_search_path_spec="/usr/lib/hpux64 /usr/local/lib/hpux64" + fi + sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec + ;; + hppa*64*) + shrext_cmds='.sl' + hardcode_into_libs=yes + dynamic_linker="$host_os dld.sl" + shlibpath_var=LD_LIBRARY_PATH # How should we handle SHLIB_PATH + shlibpath_overrides_runpath=yes # Unless +noenvvar is specified. + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + sys_lib_search_path_spec="/usr/lib/pa20_64 /usr/ccs/lib/pa20_64" + sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec + ;; + *) + shrext_cmds='.sl' + dynamic_linker="$host_os dld.sl" + shlibpath_var=SHLIB_PATH + shlibpath_overrides_runpath=no # +s is required to enable SHLIB_PATH + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + ;; + esac + # HP-UX runs *really* slowly unless shared libraries are mode 555. + postinstall_cmds='chmod 555 $lib' + ;; + +irix5* | irix6* | nonstopux*) + case $host_os in + nonstopux*) version_type=nonstopux ;; + *) + if test "$lt_cv_prog_gnu_ld" = yes; then + version_type=linux + else + version_type=irix + fi ;; + esac + need_lib_prefix=no + need_version=no + soname_spec='${libname}${release}${shared_ext}$major' + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${release}${shared_ext} $libname${shared_ext}' + case $host_os in + irix5* | nonstopux*) + libsuff= shlibsuff= + ;; + *) + case $LD in # libtool.m4 will add one of these switches to LD + *-32|*"-32 "|*-melf32bsmip|*"-melf32bsmip ") + libsuff= shlibsuff= libmagic=32-bit;; + *-n32|*"-n32 "|*-melf32bmipn32|*"-melf32bmipn32 ") + libsuff=32 shlibsuff=N32 libmagic=N32;; + *-64|*"-64 "|*-melf64bmip|*"-melf64bmip ") + libsuff=64 shlibsuff=64 libmagic=64-bit;; + *) libsuff= shlibsuff= libmagic=never-match;; + esac + ;; + esac + shlibpath_var=LD_LIBRARY${shlibsuff}_PATH + shlibpath_overrides_runpath=no + sys_lib_search_path_spec="/usr/lib${libsuff} /lib${libsuff} /usr/local/lib${libsuff}" + sys_lib_dlsearch_path_spec="/usr/lib${libsuff} /lib${libsuff}" + hardcode_into_libs=yes + ;; + +# No shared lib support for Linux oldld, aout, or coff. +linux*oldld* | linux*aout* | linux*coff*) + dynamic_linker=no + ;; + +# This must be Linux ELF. +linux*) + version_type=linux + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + finish_cmds='PATH="\$PATH:/sbin" ldconfig -n $libdir' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=no + # This implies no fast_install, which is unacceptable. + # Some rework will be needed to allow for fast_install + # before this can be enabled. + hardcode_into_libs=yes + + # find out which ABI we are using + libsuff= + case "$host_cpu" in + x86_64*|s390x*|powerpc64*) + echo '[#]line __oline__ "configure"' > conftest.$ac_ext + if AC_TRY_EVAL(ac_compile); then + case `/usr/bin/file conftest.$ac_objext` in + *64-bit*) + libsuff=64 + sys_lib_search_path_spec="/lib${libsuff} /usr/lib${libsuff} /usr/local/lib${libsuff}" + ;; + esac + fi + rm -rf conftest* + ;; + esac + + # Append ld.so.conf contents to the search path + if test -f /etc/ld.so.conf; then + lt_ld_extra=`awk '/^include / { system(sprintf("cd /etc; cat %s", \[$]2)); skip = 1; } { if (!skip) print \[$]0; skip = 0; }' < /etc/ld.so.conf | $SED -e 's/#.*//;s/[:,\t]/ /g;s/=[^=]*$//;s/=[^= ]* / /g;/^$/d' | tr '\n' ' '` + sys_lib_dlsearch_path_spec="/lib${libsuff} /usr/lib${libsuff} $lt_ld_extra" + fi + + # We used to test for /lib/ld.so.1 and disable shared libraries on + # powerpc, because MkLinux only supported shared libraries with the + # GNU dynamic linker. Since this was broken with cross compilers, + # most powerpc-linux boxes support dynamic linking these days and + # people can always --disable-shared, the test was removed, and we + # assume the GNU/Linux dynamic linker is in use. + dynamic_linker='GNU/Linux ld.so' + ;; + +knetbsd*-gnu) + version_type=linux + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=no + hardcode_into_libs=yes + dynamic_linker='GNU ld.so' + ;; + +netbsd*) + version_type=sunos + need_lib_prefix=no + need_version=no + if echo __ELF__ | $CC -E - | grep __ELF__ >/dev/null; then + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${shared_ext}$versuffix' + finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir' + dynamic_linker='NetBSD (a.out) ld.so' + else + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + dynamic_linker='NetBSD ld.elf_so' + fi + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + hardcode_into_libs=yes + ;; + +newsos6) + version_type=linux + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + ;; + +nto-qnx*) + version_type=linux + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + ;; + +openbsd*) + version_type=sunos + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${shared_ext}$versuffix' + finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir' + shlibpath_var=LD_LIBRARY_PATH + if test -z "`echo __ELF__ | $CC -E - | grep __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then + case $host_os in + openbsd2.[[89]] | openbsd2.[[89]].*) + shlibpath_overrides_runpath=no + ;; + *) + shlibpath_overrides_runpath=yes + ;; + esac + else + shlibpath_overrides_runpath=yes + fi + ;; + +os2*) + libname_spec='$name' + shrext_cmds=".dll" + need_lib_prefix=no + library_names_spec='$libname${shared_ext} $libname.a' + dynamic_linker='OS/2 ld.exe' + shlibpath_var=LIBPATH + ;; + +osf3* | osf4* | osf5*) + version_type=osf + need_lib_prefix=no + need_version=no + soname_spec='${libname}${release}${shared_ext}$major' + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + shlibpath_var=LD_LIBRARY_PATH + sys_lib_search_path_spec="/usr/shlib /usr/ccs/lib /usr/lib/cmplrs/cc /usr/lib /usr/local/lib /var/shlib" + sys_lib_dlsearch_path_spec="$sys_lib_search_path_spec" + ;; + +sco3.2v5*) + version_type=osf + soname_spec='${libname}${release}${shared_ext}$major' + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + shlibpath_var=LD_LIBRARY_PATH + ;; + +solaris*) + version_type=linux + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + hardcode_into_libs=yes + # ldd complains unless libraries are executable + postinstall_cmds='chmod +x $lib' + ;; + +sunos4*) + version_type=sunos + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${shared_ext}$versuffix' + finish_cmds='PATH="\$PATH:/usr/etc" ldconfig $libdir' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + if test "$with_gnu_ld" = yes; then + need_lib_prefix=no + fi + need_version=yes + ;; + +sysv4 | sysv4.2uw2* | sysv4.3* | sysv5*) + version_type=linux + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + case $host_vendor in + sni) + shlibpath_overrides_runpath=no + need_lib_prefix=no + export_dynamic_flag_spec='${wl}-Blargedynsym' + runpath_var=LD_RUN_PATH + ;; + siemens) + need_lib_prefix=no + ;; + motorola) + need_lib_prefix=no + need_version=no + shlibpath_overrides_runpath=no + sys_lib_search_path_spec='/lib /usr/lib /usr/ccs/lib' + ;; + esac + ;; + +sysv4*MP*) + if test -d /usr/nec ;then + version_type=linux + library_names_spec='$libname${shared_ext}.$versuffix $libname${shared_ext}.$major $libname${shared_ext}' + soname_spec='$libname${shared_ext}.$major' + shlibpath_var=LD_LIBRARY_PATH + fi + ;; + +uts4*) + version_type=linux + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + ;; + +*) + dynamic_linker=no + ;; +esac +AC_MSG_RESULT([$dynamic_linker]) +test "$dynamic_linker" = no && can_build_shared=no +])# AC_LIBTOOL_SYS_DYNAMIC_LINKER + + +# _LT_AC_TAGCONFIG +# ---------------- +AC_DEFUN([_LT_AC_TAGCONFIG], +[AC_ARG_WITH([tags], + [AC_HELP_STRING([--with-tags@<:@=TAGS@:>@], + [include additional configurations @<:@automatic@:>@])], + [tagnames="$withval"]) + +if test -f "$ltmain" && test -n "$tagnames"; then + if test ! -f "${ofile}"; then + AC_MSG_WARN([output file `$ofile' does not exist]) + fi + + if test -z "$LTCC"; then + eval "`$SHELL ${ofile} --config | grep '^LTCC='`" + if test -z "$LTCC"; then + AC_MSG_WARN([output file `$ofile' does not look like a libtool script]) + else + AC_MSG_WARN([using `LTCC=$LTCC', extracted from `$ofile']) + fi + fi + + # Extract list of available tagged configurations in $ofile. + # Note that this assumes the entire list is on one line. + available_tags=`grep "^available_tags=" "${ofile}" | $SED -e 's/available_tags=\(.*$\)/\1/' -e 's/\"//g'` + + lt_save_ifs="$IFS"; IFS="${IFS}$PATH_SEPARATOR," + for tagname in $tagnames; do + IFS="$lt_save_ifs" + # Check whether tagname contains only valid characters + case `$echo "X$tagname" | $Xsed -e 's:[[-_ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz1234567890,/]]::g'` in + "") ;; + *) AC_MSG_ERROR([invalid tag name: $tagname]) + ;; + esac + + if grep "^# ### BEGIN LIBTOOL TAG CONFIG: $tagname$" < "${ofile}" > /dev/null + then + AC_MSG_ERROR([tag name \"$tagname\" already exists]) + fi + + # Update the list of available tags. + if test -n "$tagname"; then + echo appending configuration tag \"$tagname\" to $ofile + + case $tagname in + CXX) + if test -n "$CXX" && ( test "X$CXX" != "Xno" && + ( (test "X$CXX" = "Xg++" && `g++ -v >/dev/null 2>&1` ) || + (test "X$CXX" != "Xg++"))) ; then + AC_LIBTOOL_LANG_CXX_CONFIG + else + tagname="" + fi + ;; + + F77) + if test -n "$F77" && test "X$F77" != "Xno"; then + AC_LIBTOOL_LANG_F77_CONFIG + else + tagname="" + fi + ;; + + GCJ) + if test -n "$GCJ" && test "X$GCJ" != "Xno"; then + AC_LIBTOOL_LANG_GCJ_CONFIG + else + tagname="" + fi + ;; + + RC) + AC_LIBTOOL_LANG_RC_CONFIG + ;; + + *) + AC_MSG_ERROR([Unsupported tag name: $tagname]) + ;; + esac + + # Append the new tag name to the list of available tags. + if test -n "$tagname" ; then + available_tags="$available_tags $tagname" + fi + fi + done + IFS="$lt_save_ifs" + + # Now substitute the updated list of available tags. + if eval "sed -e 's/^available_tags=.*\$/available_tags=\"$available_tags\"/' \"$ofile\" > \"${ofile}T\""; then + mv "${ofile}T" "$ofile" + chmod +x "$ofile" + else + rm -f "${ofile}T" + AC_MSG_ERROR([unable to update list of available tagged configurations.]) + fi +fi +])# _LT_AC_TAGCONFIG + + +# AC_LIBTOOL_DLOPEN +# ----------------- +# enable checks for dlopen support +AC_DEFUN([AC_LIBTOOL_DLOPEN], + [AC_BEFORE([$0],[AC_LIBTOOL_SETUP]) +])# AC_LIBTOOL_DLOPEN + + +# AC_LIBTOOL_WIN32_DLL +# -------------------- +# declare package support for building win32 dll's +AC_DEFUN([AC_LIBTOOL_WIN32_DLL], +[AC_BEFORE([$0], [AC_LIBTOOL_SETUP]) +])# AC_LIBTOOL_WIN32_DLL + + +# AC_ENABLE_SHARED([DEFAULT]) +# --------------------------- +# implement the --enable-shared flag +# DEFAULT is either `yes' or `no'. If omitted, it defaults to `yes'. +AC_DEFUN([AC_ENABLE_SHARED], +[define([AC_ENABLE_SHARED_DEFAULT], ifelse($1, no, no, yes))dnl +AC_ARG_ENABLE([shared], + [AC_HELP_STRING([--enable-shared@<:@=PKGS@:>@], + [build shared libraries @<:@default=]AC_ENABLE_SHARED_DEFAULT[@:>@])], + [p=${PACKAGE-default} + case $enableval in + yes) enable_shared=yes ;; + no) enable_shared=no ;; + *) + enable_shared=no + # Look at the argument we got. We use all the common list separators. + lt_save_ifs="$IFS"; IFS="${IFS}$PATH_SEPARATOR," + for pkg in $enableval; do + IFS="$lt_save_ifs" + if test "X$pkg" = "X$p"; then + enable_shared=yes + fi + done + IFS="$lt_save_ifs" + ;; + esac], + [enable_shared=]AC_ENABLE_SHARED_DEFAULT) +])# AC_ENABLE_SHARED + + +# AC_DISABLE_SHARED +# ----------------- +#- set the default shared flag to --disable-shared +AC_DEFUN([AC_DISABLE_SHARED], +[AC_BEFORE([$0],[AC_LIBTOOL_SETUP])dnl +AC_ENABLE_SHARED(no) +])# AC_DISABLE_SHARED + + +# AC_ENABLE_STATIC([DEFAULT]) +# --------------------------- +# implement the --enable-static flag +# DEFAULT is either `yes' or `no'. If omitted, it defaults to `yes'. +AC_DEFUN([AC_ENABLE_STATIC], +[define([AC_ENABLE_STATIC_DEFAULT], ifelse($1, no, no, yes))dnl +AC_ARG_ENABLE([static], + [AC_HELP_STRING([--enable-static@<:@=PKGS@:>@], + [build static libraries @<:@default=]AC_ENABLE_STATIC_DEFAULT[@:>@])], + [p=${PACKAGE-default} + case $enableval in + yes) enable_static=yes ;; + no) enable_static=no ;; + *) + enable_static=no + # Look at the argument we got. We use all the common list separators. + lt_save_ifs="$IFS"; IFS="${IFS}$PATH_SEPARATOR," + for pkg in $enableval; do + IFS="$lt_save_ifs" + if test "X$pkg" = "X$p"; then + enable_static=yes + fi + done + IFS="$lt_save_ifs" + ;; + esac], + [enable_static=]AC_ENABLE_STATIC_DEFAULT) +])# AC_ENABLE_STATIC + + +# AC_DISABLE_STATIC +# ----------------- +# set the default static flag to --disable-static +AC_DEFUN([AC_DISABLE_STATIC], +[AC_BEFORE([$0],[AC_LIBTOOL_SETUP])dnl +AC_ENABLE_STATIC(no) +])# AC_DISABLE_STATIC + + +# AC_ENABLE_FAST_INSTALL([DEFAULT]) +# --------------------------------- +# implement the --enable-fast-install flag +# DEFAULT is either `yes' or `no'. If omitted, it defaults to `yes'. +AC_DEFUN([AC_ENABLE_FAST_INSTALL], +[define([AC_ENABLE_FAST_INSTALL_DEFAULT], ifelse($1, no, no, yes))dnl +AC_ARG_ENABLE([fast-install], + [AC_HELP_STRING([--enable-fast-install@<:@=PKGS@:>@], + [optimize for fast installation @<:@default=]AC_ENABLE_FAST_INSTALL_DEFAULT[@:>@])], + [p=${PACKAGE-default} + case $enableval in + yes) enable_fast_install=yes ;; + no) enable_fast_install=no ;; + *) + enable_fast_install=no + # Look at the argument we got. We use all the common list separators. + lt_save_ifs="$IFS"; IFS="${IFS}$PATH_SEPARATOR," + for pkg in $enableval; do + IFS="$lt_save_ifs" + if test "X$pkg" = "X$p"; then + enable_fast_install=yes + fi + done + IFS="$lt_save_ifs" + ;; + esac], + [enable_fast_install=]AC_ENABLE_FAST_INSTALL_DEFAULT) +])# AC_ENABLE_FAST_INSTALL + + +# AC_DISABLE_FAST_INSTALL +# ----------------------- +# set the default to --disable-fast-install +AC_DEFUN([AC_DISABLE_FAST_INSTALL], +[AC_BEFORE([$0],[AC_LIBTOOL_SETUP])dnl +AC_ENABLE_FAST_INSTALL(no) +])# AC_DISABLE_FAST_INSTALL + + +# AC_LIBTOOL_PICMODE([MODE]) +# -------------------------- +# implement the --with-pic flag +# MODE is either `yes' or `no'. If omitted, it defaults to `both'. +AC_DEFUN([AC_LIBTOOL_PICMODE], +[AC_BEFORE([$0],[AC_LIBTOOL_SETUP])dnl +pic_mode=ifelse($#,1,$1,default) +])# AC_LIBTOOL_PICMODE + + +# AC_PROG_EGREP +# ------------- +# This is predefined starting with Autoconf 2.54, so this conditional +# definition can be removed once we require Autoconf 2.54 or later. +m4_ifndef([AC_PROG_EGREP], [AC_DEFUN([AC_PROG_EGREP], +[AC_CACHE_CHECK([for egrep], [ac_cv_prog_egrep], + [if echo a | (grep -E '(a|b)') >/dev/null 2>&1 + then ac_cv_prog_egrep='grep -E' + else ac_cv_prog_egrep='egrep' + fi]) + EGREP=$ac_cv_prog_egrep + AC_SUBST([EGREP]) +])]) + + +# AC_PATH_TOOL_PREFIX +# ------------------- +# find a file program which can recognise shared library +AC_DEFUN([AC_PATH_TOOL_PREFIX], +[AC_REQUIRE([AC_PROG_EGREP])dnl +AC_MSG_CHECKING([for $1]) +AC_CACHE_VAL(lt_cv_path_MAGIC_CMD, +[case $MAGIC_CMD in +[[\\/*] | ?:[\\/]*]) + lt_cv_path_MAGIC_CMD="$MAGIC_CMD" # Let the user override the test with a path. + ;; +*) + lt_save_MAGIC_CMD="$MAGIC_CMD" + lt_save_ifs="$IFS"; IFS=$PATH_SEPARATOR +dnl $ac_dummy forces splitting on constant user-supplied paths. +dnl POSIX.2 word splitting is done only on the output of word expansions, +dnl not every word. This closes a longstanding sh security hole. + ac_dummy="ifelse([$2], , $PATH, [$2])" + for ac_dir in $ac_dummy; do + IFS="$lt_save_ifs" + test -z "$ac_dir" && ac_dir=. + if test -f $ac_dir/$1; then + lt_cv_path_MAGIC_CMD="$ac_dir/$1" + if test -n "$file_magic_test_file"; then + case $deplibs_check_method in + "file_magic "*) + file_magic_regex="`expr \"$deplibs_check_method\" : \"file_magic \(.*\)\"`" + MAGIC_CMD="$lt_cv_path_MAGIC_CMD" + if eval $file_magic_cmd \$file_magic_test_file 2> /dev/null | + $EGREP "$file_magic_regex" > /dev/null; then + : + else + cat <&2 + +*** Warning: the command libtool uses to detect shared libraries, +*** $file_magic_cmd, produces output that libtool cannot recognize. +*** The result is that libtool may fail to recognize shared libraries +*** as such. This will affect the creation of libtool libraries that +*** depend on shared libraries, but programs linked with such libtool +*** libraries will work regardless of this problem. Nevertheless, you +*** may want to report the problem to your system manager and/or to +*** bug-libtool@gnu.org + +EOF + fi ;; + esac + fi + break + fi + done + IFS="$lt_save_ifs" + MAGIC_CMD="$lt_save_MAGIC_CMD" + ;; +esac]) +MAGIC_CMD="$lt_cv_path_MAGIC_CMD" +if test -n "$MAGIC_CMD"; then + AC_MSG_RESULT($MAGIC_CMD) +else + AC_MSG_RESULT(no) +fi +])# AC_PATH_TOOL_PREFIX + + +# AC_PATH_MAGIC +# ------------- +# find a file program which can recognise a shared library +AC_DEFUN([AC_PATH_MAGIC], +[AC_PATH_TOOL_PREFIX(${ac_tool_prefix}file, /usr/bin$PATH_SEPARATOR$PATH) +if test -z "$lt_cv_path_MAGIC_CMD"; then + if test -n "$ac_tool_prefix"; then + AC_PATH_TOOL_PREFIX(file, /usr/bin$PATH_SEPARATOR$PATH) + else + MAGIC_CMD=: + fi +fi +])# AC_PATH_MAGIC + + +# AC_PROG_LD +# ---------- +# find the pathname to the GNU or non-GNU linker +AC_DEFUN([AC_PROG_LD], +[AC_ARG_WITH([gnu-ld], + [AC_HELP_STRING([--with-gnu-ld], + [assume the C compiler uses GNU ld @<:@default=no@:>@])], + [test "$withval" = no || with_gnu_ld=yes], + [with_gnu_ld=no]) +AC_REQUIRE([LT_AC_PROG_SED])dnl +AC_REQUIRE([AC_PROG_CC])dnl +AC_REQUIRE([AC_CANONICAL_HOST])dnl +AC_REQUIRE([AC_CANONICAL_BUILD])dnl +ac_prog=ld +if test "$GCC" = yes; then + # Check if gcc -print-prog-name=ld gives a path. + AC_MSG_CHECKING([for ld used by $CC]) + case $host in + *-*-mingw*) + # gcc leaves a trailing carriage return which upsets mingw + ac_prog=`($CC -print-prog-name=ld) 2>&5 | tr -d '\015'` ;; + *) + ac_prog=`($CC -print-prog-name=ld) 2>&5` ;; + esac + case $ac_prog in + # Accept absolute paths. + [[\\/]]* | ?:[[\\/]]*) + re_direlt='/[[^/]][[^/]]*/\.\./' + # Canonicalize the pathname of ld + ac_prog=`echo $ac_prog| $SED 's%\\\\%/%g'` + while echo $ac_prog | grep "$re_direlt" > /dev/null 2>&1; do + ac_prog=`echo $ac_prog| $SED "s%$re_direlt%/%"` + done + test -z "$LD" && LD="$ac_prog" + ;; + "") + # If it fails, then pretend we aren't using GCC. + ac_prog=ld + ;; + *) + # If it is relative, then search for the first ld in PATH. + with_gnu_ld=unknown + ;; + esac +elif test "$with_gnu_ld" = yes; then + AC_MSG_CHECKING([for GNU ld]) +else + AC_MSG_CHECKING([for non-GNU ld]) +fi +AC_CACHE_VAL(lt_cv_path_LD, +[if test -z "$LD"; then + lt_save_ifs="$IFS"; IFS=$PATH_SEPARATOR + for ac_dir in $PATH; do + IFS="$lt_save_ifs" + test -z "$ac_dir" && ac_dir=. + if test -f "$ac_dir/$ac_prog" || test -f "$ac_dir/$ac_prog$ac_exeext"; then + lt_cv_path_LD="$ac_dir/$ac_prog" + # Check to see if the program is GNU ld. I'd rather use --version, + # but apparently some GNU ld's only accept -v. + # Break only if it was the GNU/non-GNU ld that we prefer. + case `"$lt_cv_path_LD" -v 2>&1 &1 /dev/null; then + case $host_cpu in + i*86 ) + # Not sure whether the presence of OpenBSD here was a mistake. + # Let's accept both of them until this is cleared up. + lt_cv_deplibs_check_method='file_magic (FreeBSD|OpenBSD|DragonFly)/i[[3-9]]86 (compact )?demand paged shared library' + lt_cv_file_magic_cmd=/usr/bin/file + lt_cv_file_magic_test_file=`echo /usr/lib/libc.so.*` + ;; + esac + else + lt_cv_deplibs_check_method=pass_all + fi + ;; + +gnu*) + lt_cv_deplibs_check_method=pass_all + ;; + +hpux10.20* | hpux11*) + lt_cv_file_magic_cmd=/usr/bin/file + case "$host_cpu" in + ia64*) + lt_cv_deplibs_check_method='file_magic (s[[0-9]][[0-9]][[0-9]]|ELF-[[0-9]][[0-9]]) shared object file - IA64' + lt_cv_file_magic_test_file=/usr/lib/hpux32/libc.so + ;; + hppa*64*) + [lt_cv_deplibs_check_method='file_magic (s[0-9][0-9][0-9]|ELF-[0-9][0-9]) shared object file - PA-RISC [0-9].[0-9]'] + lt_cv_file_magic_test_file=/usr/lib/pa20_64/libc.sl + ;; + *) + lt_cv_deplibs_check_method='file_magic (s[[0-9]][[0-9]][[0-9]]|PA-RISC[[0-9]].[[0-9]]) shared library' + lt_cv_file_magic_test_file=/usr/lib/libc.sl + ;; + esac + ;; + +irix5* | irix6* | nonstopux*) + case $LD in + *-32|*"-32 ") libmagic=32-bit;; + *-n32|*"-n32 ") libmagic=N32;; + *-64|*"-64 ") libmagic=64-bit;; + *) libmagic=never-match;; + esac + lt_cv_deplibs_check_method=pass_all + ;; + +# This must be Linux ELF. +linux*) + lt_cv_deplibs_check_method=pass_all + ;; + +netbsd*) + if echo __ELF__ | $CC -E - | grep __ELF__ > /dev/null; then + lt_cv_deplibs_check_method='match_pattern /lib[[^/]]+(\.so\.[[0-9]]+\.[[0-9]]+|_pic\.a)$' + else + lt_cv_deplibs_check_method='match_pattern /lib[[^/]]+(\.so|_pic\.a)$' + fi + ;; + +newos6*) + lt_cv_deplibs_check_method='file_magic ELF [[0-9]][[0-9]]*-bit [[ML]]SB (executable|dynamic lib)' + lt_cv_file_magic_cmd=/usr/bin/file + lt_cv_file_magic_test_file=/usr/lib/libnls.so + ;; + +nto-qnx*) + lt_cv_deplibs_check_method=unknown + ;; + +openbsd*) + if test -z "`echo __ELF__ | $CC -E - | grep __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then + lt_cv_deplibs_check_method='match_pattern /lib[[^/]]+(\.so\.[[0-9]]+\.[[0-9]]+|\.so|_pic\.a)$' + else + lt_cv_deplibs_check_method='match_pattern /lib[[^/]]+(\.so\.[[0-9]]+\.[[0-9]]+|_pic\.a)$' + fi + ;; + +osf3* | osf4* | osf5*) + lt_cv_deplibs_check_method=pass_all + ;; + +sco3.2v5*) + lt_cv_deplibs_check_method=pass_all + ;; + +solaris*) + lt_cv_deplibs_check_method=pass_all + ;; + +sysv4 | sysv4.2uw2* | sysv4.3* | sysv5*) + case $host_vendor in + motorola) + lt_cv_deplibs_check_method='file_magic ELF [[0-9]][[0-9]]*-bit [[ML]]SB (shared object|dynamic lib) M[[0-9]][[0-9]]* Version [[0-9]]' + lt_cv_file_magic_test_file=`echo /usr/lib/libc.so*` + ;; + ncr) + lt_cv_deplibs_check_method=pass_all + ;; + sequent) + lt_cv_file_magic_cmd='/bin/file' + lt_cv_deplibs_check_method='file_magic ELF [[0-9]][[0-9]]*-bit [[LM]]SB (shared object|dynamic lib )' + ;; + sni) + lt_cv_file_magic_cmd='/bin/file' + lt_cv_deplibs_check_method="file_magic ELF [[0-9]][[0-9]]*-bit [[LM]]SB dynamic lib" + lt_cv_file_magic_test_file=/lib/libc.so + ;; + siemens) + lt_cv_deplibs_check_method=pass_all + ;; + esac + ;; + +sysv5OpenUNIX8* | sysv5UnixWare7* | sysv5uw[[78]]* | unixware7* | sysv4*uw2*) + lt_cv_deplibs_check_method=pass_all + ;; +esac +]) +file_magic_cmd=$lt_cv_file_magic_cmd +deplibs_check_method=$lt_cv_deplibs_check_method +test -z "$deplibs_check_method" && deplibs_check_method=unknown +])# AC_DEPLIBS_CHECK_METHOD + + +# AC_PROG_NM +# ---------- +# find the pathname to a BSD-compatible name lister +AC_DEFUN([AC_PROG_NM], +[AC_CACHE_CHECK([for BSD-compatible nm], lt_cv_path_NM, +[if test -n "$NM"; then + # Let the user override the test. + lt_cv_path_NM="$NM" +else + lt_save_ifs="$IFS"; IFS=$PATH_SEPARATOR + for ac_dir in $PATH /usr/ccs/bin /usr/ucb /bin; do + IFS="$lt_save_ifs" + test -z "$ac_dir" && ac_dir=. + tmp_nm="$ac_dir/${ac_tool_prefix}nm" + if test -f "$tmp_nm" || test -f "$tmp_nm$ac_exeext" ; then + # Check to see if the nm accepts a BSD-compat flag. + # Adding the `sed 1q' prevents false positives on HP-UX, which says: + # nm: unknown option "B" ignored + # Tru64's nm complains that /dev/null is an invalid object file + case `"$tmp_nm" -B /dev/null 2>&1 | sed '1q'` in + */dev/null* | *'Invalid file or object type'*) + lt_cv_path_NM="$tmp_nm -B" + break + ;; + *) + case `"$tmp_nm" -p /dev/null 2>&1 | sed '1q'` in + */dev/null*) + lt_cv_path_NM="$tmp_nm -p" + break + ;; + *) + lt_cv_path_NM=${lt_cv_path_NM="$tmp_nm"} # keep the first match, but + continue # so that we can try to find one that supports BSD flags + ;; + esac + esac + fi + done + IFS="$lt_save_ifs" + test -z "$lt_cv_path_NM" && lt_cv_path_NM=nm +fi]) +NM="$lt_cv_path_NM" +])# AC_PROG_NM + + +# AC_CHECK_LIBM +# ------------- +# check for math library +AC_DEFUN([AC_CHECK_LIBM], +[AC_REQUIRE([AC_CANONICAL_HOST])dnl +LIBM= +case $host in +*-*-beos* | *-*-cygwin* | *-*-pw32* | *-*-darwin*) + # These system don't have libm, or don't need it + ;; +*-ncr-sysv4.3*) + AC_CHECK_LIB(mw, _mwvalidcheckl, LIBM="-lmw") + AC_CHECK_LIB(m, cos, LIBM="$LIBM -lm") + ;; +*) + AC_CHECK_LIB(m, cos, LIBM="-lm") + ;; +esac +])# AC_CHECK_LIBM + + +# AC_LIBLTDL_CONVENIENCE([DIRECTORY]) +# ----------------------------------- +# sets LIBLTDL to the link flags for the libltdl convenience library and +# LTDLINCL to the include flags for the libltdl header and adds +# --enable-ltdl-convenience to the configure arguments. Note that LIBLTDL +# and LTDLINCL are not AC_SUBSTed, nor is AC_CONFIG_SUBDIRS called. If +# DIRECTORY is not provided, it is assumed to be `libltdl'. LIBLTDL will +# be prefixed with '${top_builddir}/' and LTDLINCL will be prefixed with +# '${top_srcdir}/' (note the single quotes!). If your package is not +# flat and you're not using automake, define top_builddir and +# top_srcdir appropriately in the Makefiles. +AC_DEFUN([AC_LIBLTDL_CONVENIENCE], +[AC_BEFORE([$0],[AC_LIBTOOL_SETUP])dnl + case $enable_ltdl_convenience in + no) AC_MSG_ERROR([this package needs a convenience libltdl]) ;; + "") enable_ltdl_convenience=yes + ac_configure_args="$ac_configure_args --enable-ltdl-convenience" ;; + esac + LIBLTDL='${top_builddir}/'ifelse($#,1,[$1],['libltdl'])/libltdlc.la + LTDLINCL='-I${top_srcdir}/'ifelse($#,1,[$1],['libltdl']) + # For backwards non-gettext consistent compatibility... + INCLTDL="$LTDLINCL" +])# AC_LIBLTDL_CONVENIENCE + + +# AC_LIBLTDL_INSTALLABLE([DIRECTORY]) +# ----------------------------------- +# sets LIBLTDL to the link flags for the libltdl installable library and +# LTDLINCL to the include flags for the libltdl header and adds +# --enable-ltdl-install to the configure arguments. Note that LIBLTDL +# and LTDLINCL are not AC_SUBSTed, nor is AC_CONFIG_SUBDIRS called. If +# DIRECTORY is not provided and an installed libltdl is not found, it is +# assumed to be `libltdl'. LIBLTDL will be prefixed with '${top_builddir}/' +# and LTDLINCL will be prefixed with '${top_srcdir}/' (note the single +# quotes!). If your package is not flat and you're not using automake, +# define top_builddir and top_srcdir appropriately in the Makefiles. +# In the future, this macro may have to be called after AC_PROG_LIBTOOL. +AC_DEFUN([AC_LIBLTDL_INSTALLABLE], +[AC_BEFORE([$0],[AC_LIBTOOL_SETUP])dnl + AC_CHECK_LIB(ltdl, lt_dlinit, + [test x"$enable_ltdl_install" != xyes && enable_ltdl_install=no], + [if test x"$enable_ltdl_install" = xno; then + AC_MSG_WARN([libltdl not installed, but installation disabled]) + else + enable_ltdl_install=yes + fi + ]) + if test x"$enable_ltdl_install" = x"yes"; then + ac_configure_args="$ac_configure_args --enable-ltdl-install" + LIBLTDL='${top_builddir}/'ifelse($#,1,[$1],['libltdl'])/libltdl.la + LTDLINCL='-I${top_srcdir}/'ifelse($#,1,[$1],['libltdl']) + else + ac_configure_args="$ac_configure_args --enable-ltdl-install=no" + LIBLTDL="-lltdl" + LTDLINCL= + fi + # For backwards non-gettext consistent compatibility... + INCLTDL="$LTDLINCL" +])# AC_LIBLTDL_INSTALLABLE + + +# AC_LIBTOOL_CXX +# -------------- +# enable support for C++ libraries +AC_DEFUN([AC_LIBTOOL_CXX], +[AC_REQUIRE([_LT_AC_LANG_CXX]) +])# AC_LIBTOOL_CXX + + +# _LT_AC_LANG_CXX +# --------------- +AC_DEFUN([_LT_AC_LANG_CXX], +[AC_REQUIRE([AC_PROG_CXX]) +AC_REQUIRE([_LT_AC_PROG_CXXCPP]) +_LT_AC_SHELL_INIT([tagnames=${tagnames+${tagnames},}CXX]) +])# _LT_AC_LANG_CXX + +# _LT_AC_PROG_CXXCPP +# --------------- +AC_DEFUN([_LT_AC_PROG_CXXCPP], +[ +AC_REQUIRE([AC_PROG_CXX]) +if test -n "$CXX" && ( test "X$CXX" != "Xno" && + ( (test "X$CXX" = "Xg++" && `g++ -v >/dev/null 2>&1` ) || + (test "X$CXX" != "Xg++"))) ; then + AC_PROG_CXXCPP +fi +])# _LT_AC_PROG_CXXCPP + +# AC_LIBTOOL_F77 +# -------------- +# enable support for Fortran 77 libraries +AC_DEFUN([AC_LIBTOOL_F77], +[AC_REQUIRE([_LT_AC_LANG_F77]) +])# AC_LIBTOOL_F77 + + +# _LT_AC_LANG_F77 +# --------------- +AC_DEFUN([_LT_AC_LANG_F77], +[AC_REQUIRE([AC_PROG_F77]) +_LT_AC_SHELL_INIT([tagnames=${tagnames+${tagnames},}F77]) +])# _LT_AC_LANG_F77 + + +# AC_LIBTOOL_GCJ +# -------------- +# enable support for GCJ libraries +AC_DEFUN([AC_LIBTOOL_GCJ], +[AC_REQUIRE([_LT_AC_LANG_GCJ]) +])# AC_LIBTOOL_GCJ + + +# _LT_AC_LANG_GCJ +# --------------- +AC_DEFUN([_LT_AC_LANG_GCJ], +[AC_PROVIDE_IFELSE([AC_PROG_GCJ],[], + [AC_PROVIDE_IFELSE([A][M_PROG_GCJ],[], + [AC_PROVIDE_IFELSE([LT_AC_PROG_GCJ],[], + [ifdef([AC_PROG_GCJ],[AC_REQUIRE([AC_PROG_GCJ])], + [ifdef([A][M_PROG_GCJ],[AC_REQUIRE([A][M_PROG_GCJ])], + [AC_REQUIRE([A][C_PROG_GCJ_OR_A][M_PROG_GCJ])])])])])]) +_LT_AC_SHELL_INIT([tagnames=${tagnames+${tagnames},}GCJ]) +])# _LT_AC_LANG_GCJ + + +# AC_LIBTOOL_RC +# -------------- +# enable support for Windows resource files +AC_DEFUN([AC_LIBTOOL_RC], +[AC_REQUIRE([LT_AC_PROG_RC]) +_LT_AC_SHELL_INIT([tagnames=${tagnames+${tagnames},}RC]) +])# AC_LIBTOOL_RC + + +# AC_LIBTOOL_LANG_C_CONFIG +# ------------------------ +# Ensure that the configuration vars for the C compiler are +# suitably defined. Those variables are subsequently used by +# AC_LIBTOOL_CONFIG to write the compiler configuration to `libtool'. +AC_DEFUN([AC_LIBTOOL_LANG_C_CONFIG], [_LT_AC_LANG_C_CONFIG]) +AC_DEFUN([_LT_AC_LANG_C_CONFIG], +[lt_save_CC="$CC" +AC_LANG_PUSH(C) + +# Source file extension for C test sources. +ac_ext=c + +# Object file extension for compiled C test sources. +objext=o +_LT_AC_TAGVAR(objext, $1)=$objext + +# Code to be used in simple compile tests +lt_simple_compile_test_code="int some_variable = 0;\n" + +# Code to be used in simple link tests +lt_simple_link_test_code='int main(){return(0);}\n' + +_LT_AC_SYS_COMPILER + +# save warnings/boilerplate of simple test code +_LT_COMPILER_BOILERPLATE +_LT_LINKER_BOILERPLATE + +# +# Check for any special shared library compilation flags. +# +_LT_AC_TAGVAR(lt_prog_cc_shlib, $1)= +if test "$GCC" = no; then + case $host_os in + sco3.2v5*) + _LT_AC_TAGVAR(lt_prog_cc_shlib, $1)='-belf' + ;; + esac +fi +if test -n "$_LT_AC_TAGVAR(lt_prog_cc_shlib, $1)"; then + AC_MSG_WARN([`$CC' requires `$_LT_AC_TAGVAR(lt_prog_cc_shlib, $1)' to build shared libraries]) + if echo "$old_CC $old_CFLAGS " | grep "[[ ]]$_LT_AC_TAGVAR(lt_prog_cc_shlib, $1)[[ ]]" >/dev/null; then : + else + AC_MSG_WARN([add `$_LT_AC_TAGVAR(lt_prog_cc_shlib, $1)' to the CC or CFLAGS env variable and reconfigure]) + _LT_AC_TAGVAR(lt_cv_prog_cc_can_build_shared, $1)=no + fi +fi + + +# +# Check to make sure the static flag actually works. +# +AC_LIBTOOL_LINKER_OPTION([if $compiler static flag $_LT_AC_TAGVAR(lt_prog_compiler_static, $1) works], + _LT_AC_TAGVAR(lt_prog_compiler_static_works, $1), + $_LT_AC_TAGVAR(lt_prog_compiler_static, $1), + [], + [_LT_AC_TAGVAR(lt_prog_compiler_static, $1)=]) + + +AC_LIBTOOL_PROG_COMPILER_NO_RTTI($1) +AC_LIBTOOL_PROG_COMPILER_PIC($1) +AC_LIBTOOL_PROG_CC_C_O($1) +AC_LIBTOOL_SYS_HARD_LINK_LOCKS($1) +AC_LIBTOOL_PROG_LD_SHLIBS($1) +AC_LIBTOOL_SYS_DYNAMIC_LINKER($1) +AC_LIBTOOL_PROG_LD_HARDCODE_LIBPATH($1) +AC_LIBTOOL_SYS_LIB_STRIP +AC_LIBTOOL_DLOPEN_SELF($1) + +# Report which librarie types wil actually be built +AC_MSG_CHECKING([if libtool supports shared libraries]) +AC_MSG_RESULT([$can_build_shared]) + +AC_MSG_CHECKING([whether to build shared libraries]) +test "$can_build_shared" = "no" && enable_shared=no + +# On AIX, shared libraries and static libraries use the same namespace, and +# are all built from PIC. +case "$host_os" in +aix3*) + test "$enable_shared" = yes && enable_static=no + if test -n "$RANLIB"; then + archive_cmds="$archive_cmds~\$RANLIB \$lib" + postinstall_cmds='$RANLIB $lib' + fi + ;; + +aix4* | aix5*) + if test "$host_cpu" != ia64 && test "$aix_use_runtimelinking" = no ; then + test "$enable_shared" = yes && enable_static=no + fi + ;; +esac +AC_MSG_RESULT([$enable_shared]) + +AC_MSG_CHECKING([whether to build static libraries]) +# Make sure either enable_shared or enable_static is yes. +test "$enable_shared" = yes || enable_static=yes +AC_MSG_RESULT([$enable_static]) + +AC_LIBTOOL_CONFIG($1) + +AC_LANG_POP +CC="$lt_save_CC" +])# AC_LIBTOOL_LANG_C_CONFIG + + +# AC_LIBTOOL_LANG_CXX_CONFIG +# -------------------------- +# Ensure that the configuration vars for the C compiler are +# suitably defined. Those variables are subsequently used by +# AC_LIBTOOL_CONFIG to write the compiler configuration to `libtool'. +AC_DEFUN([AC_LIBTOOL_LANG_CXX_CONFIG], [_LT_AC_LANG_CXX_CONFIG(CXX)]) +AC_DEFUN([_LT_AC_LANG_CXX_CONFIG], +[AC_LANG_PUSH(C++) +AC_REQUIRE([AC_PROG_CXX]) +AC_REQUIRE([_LT_AC_PROG_CXXCPP]) + +_LT_AC_TAGVAR(archive_cmds_need_lc, $1)=no +_LT_AC_TAGVAR(allow_undefined_flag, $1)= +_LT_AC_TAGVAR(always_export_symbols, $1)=no +_LT_AC_TAGVAR(archive_expsym_cmds, $1)= +_LT_AC_TAGVAR(export_dynamic_flag_spec, $1)= +_LT_AC_TAGVAR(hardcode_direct, $1)=no +_LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)= +_LT_AC_TAGVAR(hardcode_libdir_flag_spec_ld, $1)= +_LT_AC_TAGVAR(hardcode_libdir_separator, $1)= +_LT_AC_TAGVAR(hardcode_minus_L, $1)=no +_LT_AC_TAGVAR(hardcode_automatic, $1)=no +_LT_AC_TAGVAR(module_cmds, $1)= +_LT_AC_TAGVAR(module_expsym_cmds, $1)= +_LT_AC_TAGVAR(link_all_deplibs, $1)=unknown +_LT_AC_TAGVAR(old_archive_cmds, $1)=$old_archive_cmds +_LT_AC_TAGVAR(no_undefined_flag, $1)= +_LT_AC_TAGVAR(whole_archive_flag_spec, $1)= +_LT_AC_TAGVAR(enable_shared_with_static_runtimes, $1)=no + +# Dependencies to place before and after the object being linked: +_LT_AC_TAGVAR(predep_objects, $1)= +_LT_AC_TAGVAR(postdep_objects, $1)= +_LT_AC_TAGVAR(predeps, $1)= +_LT_AC_TAGVAR(postdeps, $1)= +_LT_AC_TAGVAR(compiler_lib_search_path, $1)= + +# Source file extension for C++ test sources. +ac_ext=cc + +# Object file extension for compiled C++ test sources. +objext=o +_LT_AC_TAGVAR(objext, $1)=$objext + +# Code to be used in simple compile tests +lt_simple_compile_test_code="int some_variable = 0;\n" + +# Code to be used in simple link tests +lt_simple_link_test_code='int main(int, char *[]) { return(0); }\n' + +# ltmain only uses $CC for tagged configurations so make sure $CC is set. +_LT_AC_SYS_COMPILER + +# save warnings/boilerplate of simple test code +_LT_COMPILER_BOILERPLATE +_LT_LINKER_BOILERPLATE + +# Allow CC to be a program name with arguments. +lt_save_CC=$CC +lt_save_LD=$LD +lt_save_GCC=$GCC +GCC=$GXX +lt_save_with_gnu_ld=$with_gnu_ld +lt_save_path_LD=$lt_cv_path_LD +if test -n "${lt_cv_prog_gnu_ldcxx+set}"; then + lt_cv_prog_gnu_ld=$lt_cv_prog_gnu_ldcxx +else + unset lt_cv_prog_gnu_ld +fi +if test -n "${lt_cv_path_LDCXX+set}"; then + lt_cv_path_LD=$lt_cv_path_LDCXX +else + unset lt_cv_path_LD +fi +test -z "${LDCXX+set}" || LD=$LDCXX +CC=${CXX-"c++"} +compiler=$CC +_LT_AC_TAGVAR(compiler, $1)=$CC +_LT_CC_BASENAME([$compiler]) + +# We don't want -fno-exception wen compiling C++ code, so set the +# no_builtin_flag separately +if test "$GXX" = yes; then + _LT_AC_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)=' -fno-builtin' +else + _LT_AC_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)= +fi + +if test "$GXX" = yes; then + # Set up default GNU C++ configuration + + AC_PROG_LD + + # Check if GNU C++ uses GNU ld as the underlying linker, since the + # archiving commands below assume that GNU ld is being used. + if test "$with_gnu_ld" = yes; then + _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname -o $lib' + _LT_AC_TAGVAR(archive_expsym_cmds, $1)='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' + + _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}--rpath ${wl}$libdir' + _LT_AC_TAGVAR(export_dynamic_flag_spec, $1)='${wl}--export-dynamic' + + # If archive_cmds runs LD, not CC, wlarc should be empty + # XXX I think wlarc can be eliminated in ltcf-cxx, but I need to + # investigate it a little bit more. (MM) + wlarc='${wl}' + + # ancient GNU ld didn't support --whole-archive et. al. + if eval "`$CC -print-prog-name=ld` --help 2>&1" | \ + grep 'no-whole-archive' > /dev/null; then + _LT_AC_TAGVAR(whole_archive_flag_spec, $1)="$wlarc"'--whole-archive$convenience '"$wlarc"'--no-whole-archive' + else + _LT_AC_TAGVAR(whole_archive_flag_spec, $1)= + fi + else + with_gnu_ld=no + wlarc= + + # A generic and very simple default shared library creation + # command for GNU C++ for the case where it uses the native + # linker, instead of GNU ld. If possible, this setting should + # overridden to take advantage of the native linker features on + # the platform it is being used on. + _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $lib' + fi + + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. + output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | grep "\-L"' + +else + GXX=no + with_gnu_ld=no + wlarc= +fi + +# PORTME: fill in a description of your system's C++ link characteristics +AC_MSG_CHECKING([whether the $compiler linker ($LD) supports shared libraries]) +_LT_AC_TAGVAR(ld_shlibs, $1)=yes +case $host_os in + aix3*) + # FIXME: insert proper C++ library support + _LT_AC_TAGVAR(ld_shlibs, $1)=no + ;; + aix4* | aix5*) + if test "$host_cpu" = ia64; then + # On IA64, the linker does run time linking by default, so we don't + # have to do anything special. + aix_use_runtimelinking=no + exp_sym_flag='-Bexport' + no_entry_flag="" + else + aix_use_runtimelinking=no + + # Test if we are trying to use run time linking or normal + # AIX style linking. If -brtl is somewhere in LDFLAGS, we + # need to do runtime linking. + case $host_os in aix4.[[23]]|aix4.[[23]].*|aix5*) + for ld_flag in $LDFLAGS; do + case $ld_flag in + *-brtl*) + aix_use_runtimelinking=yes + break + ;; + esac + done + esac + + exp_sym_flag='-bexport' + no_entry_flag='-bnoentry' + fi + + # When large executables or shared objects are built, AIX ld can + # have problems creating the table of contents. If linking a library + # or program results in "error TOC overflow" add -mminimal-toc to + # CXXFLAGS/CFLAGS for g++/gcc. In the cases where that is not + # enough to fix the problem, add -Wl,-bbigtoc to LDFLAGS. + + _LT_AC_TAGVAR(archive_cmds, $1)='' + _LT_AC_TAGVAR(hardcode_direct, $1)=yes + _LT_AC_TAGVAR(hardcode_libdir_separator, $1)=':' + _LT_AC_TAGVAR(link_all_deplibs, $1)=yes + + if test "$GXX" = yes; then + case $host_os in aix4.[[012]]|aix4.[[012]].*) + # We only want to do this on AIX 4.2 and lower, the check + # below for broken collect2 doesn't work under 4.3+ + collect2name=`${CC} -print-prog-name=collect2` + if test -f "$collect2name" && \ + strings "$collect2name" | grep resolve_lib_name >/dev/null + then + # We have reworked collect2 + _LT_AC_TAGVAR(hardcode_direct, $1)=yes + else + # We have old collect2 + _LT_AC_TAGVAR(hardcode_direct, $1)=unsupported + # It fails to find uninstalled libraries when the uninstalled + # path is not listed in the libpath. Setting hardcode_minus_L + # to unsupported forces relinking + _LT_AC_TAGVAR(hardcode_minus_L, $1)=yes + _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' + _LT_AC_TAGVAR(hardcode_libdir_separator, $1)= + fi + esac + shared_flag='-shared' + if test "$aix_use_runtimelinking" = yes; then + shared_flag="$shared_flag "'${wl}-G' + fi + else + # not using gcc + if test "$host_cpu" = ia64; then + # VisualAge C++, Version 5.5 for AIX 5L for IA-64, Beta 3 Release + # chokes on -Wl,-G. The following line is correct: + shared_flag='-G' + else + if test "$aix_use_runtimelinking" = yes; then + shared_flag='${wl}-G' + else + shared_flag='${wl}-bM:SRE' + fi + fi + fi + + # It seems that -bexpall does not export symbols beginning with + # underscore (_), so it is better to generate a list of symbols to export. + _LT_AC_TAGVAR(always_export_symbols, $1)=yes + if test "$aix_use_runtimelinking" = yes; then + # Warning - without using the other runtime loading flags (-brtl), + # -berok will link without error, but may produce a broken library. + _LT_AC_TAGVAR(allow_undefined_flag, $1)='-berok' + # Determine the default libpath from the value encoded in an empty executable. + _LT_AC_SYS_LIBPATH_AIX + _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-blibpath:$libdir:'"$aix_libpath" + + _LT_AC_TAGVAR(archive_expsym_cmds, $1)="\$CC"' -o $output_objdir/$soname $libobjs $deplibs $compiler_flags `if test "x${allow_undefined_flag}" != "x"; then echo "${wl}${allow_undefined_flag}"; else :; fi` '"\${wl}$no_entry_flag \${wl}$exp_sym_flag:\$export_symbols $shared_flag" + else + if test "$host_cpu" = ia64; then + _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-R $libdir:/usr/lib:/lib' + _LT_AC_TAGVAR(allow_undefined_flag, $1)="-z nodefs" + _LT_AC_TAGVAR(archive_expsym_cmds, $1)="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs $compiler_flags ${wl}${allow_undefined_flag} '"\${wl}$no_entry_flag \${wl}$exp_sym_flag:\$export_symbols" + else + # Determine the default libpath from the value encoded in an empty executable. + _LT_AC_SYS_LIBPATH_AIX + _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-blibpath:$libdir:'"$aix_libpath" + # Warning - without using the other run time loading flags, + # -berok will link without error, but may produce a broken library. + _LT_AC_TAGVAR(no_undefined_flag, $1)=' ${wl}-bernotok' + _LT_AC_TAGVAR(allow_undefined_flag, $1)=' ${wl}-berok' + # -bexpall does not export symbols beginning with underscore (_) + _LT_AC_TAGVAR(always_export_symbols, $1)=yes + # Exported symbols can be pulled into shared objects from archives + _LT_AC_TAGVAR(whole_archive_flag_spec, $1)=' ' + _LT_AC_TAGVAR(archive_cmds_need_lc, $1)=yes + # This is similar to how AIX traditionally builds it's shared libraries. + _LT_AC_TAGVAR(archive_expsym_cmds, $1)="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs $compiler_flags ${wl}-bE:$export_symbols ${wl}-bnoentry${allow_undefined_flag}~$AR $AR_FLAGS $output_objdir/$libname$release.a $output_objdir/$soname' + fi + fi + ;; + chorus*) + case $cc_basename in + *) + # FIXME: insert proper C++ library support + _LT_AC_TAGVAR(ld_shlibs, $1)=no + ;; + esac + ;; + + + cygwin* | mingw* | pw32*) + # _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1) is actually meaningless, + # as there is no search path for DLLs. + _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' + _LT_AC_TAGVAR(allow_undefined_flag, $1)=unsupported + _LT_AC_TAGVAR(always_export_symbols, $1)=no + _LT_AC_TAGVAR(enable_shared_with_static_runtimes, $1)=yes + + if $LD --help 2>&1 | grep 'auto-import' > /dev/null; then + _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $output_objdir/$soname ${wl}--image-base=0x10000000 ${wl}--out-implib,$lib' + # If the export-symbols file already is a .def file (1st line + # is EXPORTS), use it as is; otherwise, prepend... + _LT_AC_TAGVAR(archive_expsym_cmds, $1)='if test "x`$SED 1q $export_symbols`" = xEXPORTS; then + cp $export_symbols $output_objdir/$soname.def; + else + echo EXPORTS > $output_objdir/$soname.def; + cat $export_symbols >> $output_objdir/$soname.def; + fi~ + $CC -shared -nostdlib $output_objdir/$soname.def $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $output_objdir/$soname ${wl}--image-base=0x10000000 ${wl}--out-implib,$lib' + else + _LT_AC_TAGVAR(ld_shlibs, $1)=no + fi + ;; + darwin* | rhapsody*) + case "$host_os" in + rhapsody* | darwin1.[[012]]) + _LT_AC_TAGVAR(allow_undefined_flag, $1)='${wl}-undefined ${wl}suppress' + ;; + *) # Darwin 1.3 on + if test -z ${MACOSX_DEPLOYMENT_TARGET} ; then + _LT_AC_TAGVAR(allow_undefined_flag, $1)='${wl}-flat_namespace ${wl}-undefined ${wl}suppress' + else + case ${MACOSX_DEPLOYMENT_TARGET} in + 10.[[012]]) + _LT_AC_TAGVAR(allow_undefined_flag, $1)='${wl}-flat_namespace ${wl}-undefined ${wl}suppress' + ;; + 10.*) + _LT_AC_TAGVAR(allow_undefined_flag, $1)='${wl}-undefined ${wl}dynamic_lookup' + ;; + esac + fi + ;; + esac + _LT_AC_TAGVAR(archive_cmds_need_lc, $1)=no + _LT_AC_TAGVAR(hardcode_direct, $1)=no + _LT_AC_TAGVAR(hardcode_automatic, $1)=yes + _LT_AC_TAGVAR(hardcode_shlibpath_var, $1)=unsupported + _LT_AC_TAGVAR(whole_archive_flag_spec, $1)='' + _LT_AC_TAGVAR(link_all_deplibs, $1)=yes + + if test "$GXX" = yes ; then + lt_int_apple_cc_single_mod=no + output_verbose_link_cmd='echo' + if $CC -dumpspecs 2>&1 | $EGREP 'single_module' >/dev/null ; then + lt_int_apple_cc_single_mod=yes + fi + if test "X$lt_int_apple_cc_single_mod" = Xyes ; then + _LT_AC_TAGVAR(archive_cmds, $1)='$CC -dynamiclib -single_module $allow_undefined_flag -o $lib $libobjs $deplibs $compiler_flags -install_name $rpath/$soname $verstring' + else + _LT_AC_TAGVAR(archive_cmds, $1)='$CC -r -keep_private_externs -nostdlib -o ${lib}-master.o $libobjs~$CC -dynamiclib $allow_undefined_flag -o $lib ${lib}-master.o $deplibs $compiler_flags -install_name $rpath/$soname $verstring' + fi + _LT_AC_TAGVAR(module_cmds, $1)='$CC $allow_undefined_flag -o $lib -bundle $libobjs $deplibs$compiler_flags' + # Don't fix this by using the ld -exported_symbols_list flag, it doesn't exist in older darwin ld's + if test "X$lt_int_apple_cc_single_mod" = Xyes ; then + _LT_AC_TAGVAR(archive_expsym_cmds, $1)='sed -e "s,#.*,," -e "s,^[ ]*,," -e "s,^\(..*\),_&," < $export_symbols > $output_objdir/${libname}-symbols.expsym~$CC -dynamiclib -single_module $allow_undefined_flag -o $lib $libobjs $deplibs $compiler_flags -install_name $rpath/$soname $verstring~nmedit -s $output_objdir/${libname}-symbols.expsym ${lib}' + else + _LT_AC_TAGVAR(archive_expsym_cmds, $1)='sed -e "s,#.*,," -e "s,^[ ]*,," -e "s,^\(..*\),_&," < $export_symbols > $output_objdir/${libname}-symbols.expsym~$CC -r -keep_private_externs -nostdlib -o ${lib}-master.o $libobjs~$CC -dynamiclib $allow_undefined_flag -o $lib ${lib}-master.o $deplibs $compiler_flags -install_name $rpath/$soname $verstring~nmedit -s $output_objdir/${libname}-symbols.expsym ${lib}' + fi + _LT_AC_TAGVAR(module_expsym_cmds, $1)='sed -e "s,#.*,," -e "s,^[ ]*,," -e "s,^\(..*\),_&," < $export_symbols > $output_objdir/${libname}-symbols.expsym~$CC $allow_undefined_flag -o $lib -bundle $libobjs $deplibs$compiler_flags~nmedit -s $output_objdir/${libname}-symbols.expsym ${lib}' + else + case $cc_basename in + xlc*) + output_verbose_link_cmd='echo' + _LT_AC_TAGVAR(archive_cmds, $1)='$CC -qmkshrobj ${wl}-single_module $allow_undefined_flag -o $lib $libobjs $deplibs $compiler_flags ${wl}-install_name ${wl}`echo $rpath/$soname` $verstring' + _LT_AC_TAGVAR(module_cmds, $1)='$CC $allow_undefined_flag -o $lib -bundle $libobjs $deplibs$compiler_flags' + # Don't fix this by using the ld -exported_symbols_list flag, it doesn't exist in older darwin ld's + _LT_AC_TAGVAR(archive_expsym_cmds, $1)='sed -e "s,#.*,," -e "s,^[ ]*,," -e "s,^\(..*\),_&," < $export_symbols > $output_objdir/${libname}-symbols.expsym~$CC -qmkshrobj ${wl}-single_module $allow_undefined_flag -o $lib $libobjs $deplibs $compiler_flags ${wl}-install_name ${wl}$rpath/$soname $verstring~nmedit -s $output_objdir/${libname}-symbols.expsym ${lib}' + _LT_AC_TAGVAR(module_expsym_cmds, $1)='sed -e "s,#.*,," -e "s,^[ ]*,," -e "s,^\(..*\),_&," < $export_symbols > $output_objdir/${libname}-symbols.expsym~$CC $allow_undefined_flag -o $lib -bundle $libobjs $deplibs$compiler_flags~nmedit -s $output_objdir/${libname}-symbols.expsym ${lib}' + ;; + *) + _LT_AC_TAGVAR(ld_shlibs, $1)=no + ;; + esac + fi + ;; + + dgux*) + case $cc_basename in + ec++*) + # FIXME: insert proper C++ library support + _LT_AC_TAGVAR(ld_shlibs, $1)=no + ;; + ghcx*) + # Green Hills C++ Compiler + # FIXME: insert proper C++ library support + _LT_AC_TAGVAR(ld_shlibs, $1)=no + ;; + *) + # FIXME: insert proper C++ library support + _LT_AC_TAGVAR(ld_shlibs, $1)=no + ;; + esac + ;; + freebsd[[12]]*) + # C++ shared libraries reported to be fairly broken before switch to ELF + _LT_AC_TAGVAR(ld_shlibs, $1)=no + ;; + freebsd-elf*) + _LT_AC_TAGVAR(archive_cmds_need_lc, $1)=no + ;; + freebsd* | kfreebsd*-gnu | dragonfly*) + # FreeBSD 3 and later use GNU C++ and GNU ld with standard ELF + # conventions + _LT_AC_TAGVAR(ld_shlibs, $1)=yes + ;; + gnu*) + ;; + hpux9*) + _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}+b ${wl}$libdir' + _LT_AC_TAGVAR(hardcode_libdir_separator, $1)=: + _LT_AC_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-E' + _LT_AC_TAGVAR(hardcode_direct, $1)=yes + _LT_AC_TAGVAR(hardcode_minus_L, $1)=yes # Not in the search PATH, + # but as the default + # location of the library. + + case $cc_basename in + CC*) + # FIXME: insert proper C++ library support + _LT_AC_TAGVAR(ld_shlibs, $1)=no + ;; + aCC*) + _LT_AC_TAGVAR(archive_cmds, $1)='$rm $output_objdir/$soname~$CC -b ${wl}+b ${wl}$install_libdir -o $output_objdir/$soname $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. + # + # There doesn't appear to be a way to prevent this compiler from + # explicitly linking system object files so we need to strip them + # from the output so that they don't get included in the library + # dependencies. + output_verbose_link_cmd='templist=`($CC -b $CFLAGS -v conftest.$objext 2>&1) | grep "[[-]]L"`; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; echo $list' + ;; + *) + if test "$GXX" = yes; then + _LT_AC_TAGVAR(archive_cmds, $1)='$rm $output_objdir/$soname~$CC -shared -nostdlib -fPIC ${wl}+b ${wl}$install_libdir -o $output_objdir/$soname $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' + else + # FIXME: insert proper C++ library support + _LT_AC_TAGVAR(ld_shlibs, $1)=no + fi + ;; + esac + ;; + hpux10*|hpux11*) + if test $with_gnu_ld = no; then + case "$host_cpu" in + hppa*64*) + _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}+b ${wl}$libdir' + _LT_AC_TAGVAR(hardcode_libdir_flag_spec_ld, $1)='+b $libdir' + _LT_AC_TAGVAR(hardcode_libdir_separator, $1)=: + ;; + ia64*) + _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' + ;; + *) + _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}+b ${wl}$libdir' + _LT_AC_TAGVAR(hardcode_libdir_separator, $1)=: + _LT_AC_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-E' + ;; + esac + fi + case "$host_cpu" in + hppa*64*) + _LT_AC_TAGVAR(hardcode_direct, $1)=no + _LT_AC_TAGVAR(hardcode_shlibpath_var, $1)=no + ;; + ia64*) + _LT_AC_TAGVAR(hardcode_direct, $1)=no + _LT_AC_TAGVAR(hardcode_shlibpath_var, $1)=no + _LT_AC_TAGVAR(hardcode_minus_L, $1)=yes # Not in the search PATH, + # but as the default + # location of the library. + ;; + *) + _LT_AC_TAGVAR(hardcode_direct, $1)=yes + _LT_AC_TAGVAR(hardcode_minus_L, $1)=yes # Not in the search PATH, + # but as the default + # location of the library. + ;; + esac + + case $cc_basename in + CC*) + # FIXME: insert proper C++ library support + _LT_AC_TAGVAR(ld_shlibs, $1)=no + ;; + aCC*) + case "$host_cpu" in + hppa*64*|ia64*) + _LT_AC_TAGVAR(archive_cmds, $1)='$LD -b +h $soname -o $lib $linker_flags $libobjs $deplibs' + ;; + *) + _LT_AC_TAGVAR(archive_cmds, $1)='$CC -b ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' + ;; + esac + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. + # + # There doesn't appear to be a way to prevent this compiler from + # explicitly linking system object files so we need to strip them + # from the output so that they don't get included in the library + # dependencies. + output_verbose_link_cmd='templist=`($CC -b $CFLAGS -v conftest.$objext 2>&1) | grep "\-L"`; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; echo $list' + ;; + *) + if test "$GXX" = yes; then + if test $with_gnu_ld = no; then + case "$host_cpu" in + ia64*|hppa*64*) + _LT_AC_TAGVAR(archive_cmds, $1)='$LD -b +h $soname -o $lib $linker_flags $libobjs $deplibs' + ;; + *) + _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib -fPIC ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' + ;; + esac + fi + else + # FIXME: insert proper C++ library support + _LT_AC_TAGVAR(ld_shlibs, $1)=no + fi + ;; + esac + ;; + irix5* | irix6*) + case $cc_basename in + CC*) + # SGI C++ + _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared -all -multigot $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -soname $soname `test -n "$verstring" && echo -set_version $verstring` -update_registry ${output_objdir}/so_locations -o $lib' + + # Archives containing C++ object files must be created using + # "CC -ar", where "CC" is the IRIX C++ compiler. This is + # necessary to make sure instantiated templates are included + # in the archive. + _LT_AC_TAGVAR(old_archive_cmds, $1)='$CC -ar -WR,-u -o $oldlib $oldobjs' + ;; + *) + if test "$GXX" = yes; then + if test "$with_gnu_ld" = no; then + _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && echo ${wl}-set_version ${wl}$verstring` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' + else + _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && echo ${wl}-set_version ${wl}$verstring` -o $lib' + fi + fi + _LT_AC_TAGVAR(link_all_deplibs, $1)=yes + ;; + esac + _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir' + _LT_AC_TAGVAR(hardcode_libdir_separator, $1)=: + ;; + linux*) + case $cc_basename in + KCC*) + # Kuck and Associates, Inc. (KAI) C++ Compiler + + # KCC will only create a shared library if the output file + # ends with ".so" (or ".sl" for HP-UX), so rename the library + # to its proper name (with version) after linking. + _LT_AC_TAGVAR(archive_cmds, $1)='tempext=`echo $shared_ext | $SED -e '\''s/\([[^()0-9A-Za-z{}]]\)/\\\\\1/g'\''`; templib=`echo $lib | $SED -e "s/\${tempext}\..*/.so/"`; $CC $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags --soname $soname -o \$templib; mv \$templib $lib' + _LT_AC_TAGVAR(archive_expsym_cmds, $1)='tempext=`echo $shared_ext | $SED -e '\''s/\([[^()0-9A-Za-z{}]]\)/\\\\\1/g'\''`; templib=`echo $lib | $SED -e "s/\${tempext}\..*/.so/"`; $CC $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags --soname $soname -o \$templib ${wl}-retain-symbols-file,$export_symbols; mv \$templib $lib' + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. + # + # There doesn't appear to be a way to prevent this compiler from + # explicitly linking system object files so we need to strip them + # from the output so that they don't get included in the library + # dependencies. + output_verbose_link_cmd='templist=`$CC $CFLAGS -v conftest.$objext -o libconftest$shared_ext 2>&1 | grep "ld"`; rm -f libconftest$shared_ext; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; echo $list' + + _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}--rpath,$libdir' + _LT_AC_TAGVAR(export_dynamic_flag_spec, $1)='${wl}--export-dynamic' + + # Archives containing C++ object files must be created using + # "CC -Bstatic", where "CC" is the KAI C++ compiler. + _LT_AC_TAGVAR(old_archive_cmds, $1)='$CC -Bstatic -o $oldlib $oldobjs' + ;; + icpc*) + # Intel C++ + with_gnu_ld=yes + # version 8.0 and above of icpc choke on multiply defined symbols + # if we add $predep_objects and $postdep_objects, however 7.1 and + # earlier do not add the objects themselves. + case `$CC -V 2>&1` in + *"Version 7."*) + _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname -o $lib' + _LT_AC_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' + ;; + *) # Version 8.0 or newer + tmp_idyn= + case $host_cpu in + ia64*) tmp_idyn=' -i_dynamic';; + esac + _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared'"$tmp_idyn"' $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + _LT_AC_TAGVAR(archive_expsym_cmds, $1)='$CC -shared'"$tmp_idyn"' $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' + ;; + esac + _LT_AC_TAGVAR(archive_cmds_need_lc, $1)=no + _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath,$libdir' + _LT_AC_TAGVAR(export_dynamic_flag_spec, $1)='${wl}--export-dynamic' + _LT_AC_TAGVAR(whole_archive_flag_spec, $1)='${wl}--whole-archive$convenience ${wl}--no-whole-archive' + ;; + pgCC*) + # Portland Group C++ compiler + _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname -o $lib' + _LT_AC_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname ${wl}-retain-symbols-file ${wl}$export_symbols -o $lib' + + _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}--rpath ${wl}$libdir' + _LT_AC_TAGVAR(export_dynamic_flag_spec, $1)='${wl}--export-dynamic' + _LT_AC_TAGVAR(whole_archive_flag_spec, $1)='' + ;; + cxx*) + # Compaq C++ + _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname -o $lib' + _LT_AC_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname -o $lib ${wl}-retain-symbols-file $wl$export_symbols' + + runpath_var=LD_RUN_PATH + _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='-rpath $libdir' + _LT_AC_TAGVAR(hardcode_libdir_separator, $1)=: + + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. + # + # There doesn't appear to be a way to prevent this compiler from + # explicitly linking system object files so we need to strip them + # from the output so that they don't get included in the library + # dependencies. + output_verbose_link_cmd='templist=`$CC -shared $CFLAGS -v conftest.$objext 2>&1 | grep "ld"`; templist=`echo $templist | $SED "s/\(^.*ld.*\)\( .*ld .*$\)/\1/"`; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; echo $list' + ;; + esac + ;; + lynxos*) + # FIXME: insert proper C++ library support + _LT_AC_TAGVAR(ld_shlibs, $1)=no + ;; + m88k*) + # FIXME: insert proper C++ library support + _LT_AC_TAGVAR(ld_shlibs, $1)=no + ;; + mvs*) + case $cc_basename in + cxx*) + # FIXME: insert proper C++ library support + _LT_AC_TAGVAR(ld_shlibs, $1)=no + ;; + *) + # FIXME: insert proper C++ library support + _LT_AC_TAGVAR(ld_shlibs, $1)=no + ;; + esac + ;; + netbsd*) + if echo __ELF__ | $CC -E - | grep __ELF__ >/dev/null; then + _LT_AC_TAGVAR(archive_cmds, $1)='$LD -Bshareable -o $lib $predep_objects $libobjs $deplibs $postdep_objects $linker_flags' + wlarc= + _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir' + _LT_AC_TAGVAR(hardcode_direct, $1)=yes + _LT_AC_TAGVAR(hardcode_shlibpath_var, $1)=no + fi + # Workaround some broken pre-1.5 toolchains + output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | grep conftest.$objext | $SED -e "s:-lgcc -lc -lgcc::"' + ;; + openbsd2*) + # C++ shared libraries are fairly broken + _LT_AC_TAGVAR(ld_shlibs, $1)=no + ;; + openbsd*) + _LT_AC_TAGVAR(hardcode_direct, $1)=yes + _LT_AC_TAGVAR(hardcode_shlibpath_var, $1)=no + _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $lib' + _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath,$libdir' + if test -z "`echo __ELF__ | $CC -E - | grep __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then + _LT_AC_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-retain-symbols-file,$export_symbols -o $lib' + _LT_AC_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-E' + _LT_AC_TAGVAR(whole_archive_flag_spec, $1)="$wlarc"'--whole-archive$convenience '"$wlarc"'--no-whole-archive' + fi + output_verbose_link_cmd='echo' + ;; + osf3*) + case $cc_basename in + KCC*) + # Kuck and Associates, Inc. (KAI) C++ Compiler + + # KCC will only create a shared library if the output file + # ends with ".so" (or ".sl" for HP-UX), so rename the library + # to its proper name (with version) after linking. + _LT_AC_TAGVAR(archive_cmds, $1)='tempext=`echo $shared_ext | $SED -e '\''s/\([[^()0-9A-Za-z{}]]\)/\\\\\1/g'\''`; templib=`echo $lib | $SED -e "s/\${tempext}\..*/.so/"`; $CC $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags --soname $soname -o \$templib; mv \$templib $lib' + + _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath,$libdir' + _LT_AC_TAGVAR(hardcode_libdir_separator, $1)=: + + # Archives containing C++ object files must be created using + # "CC -Bstatic", where "CC" is the KAI C++ compiler. + _LT_AC_TAGVAR(old_archive_cmds, $1)='$CC -Bstatic -o $oldlib $oldobjs' + + ;; + RCC*) + # Rational C++ 2.4.1 + # FIXME: insert proper C++ library support + _LT_AC_TAGVAR(ld_shlibs, $1)=no + ;; + cxx*) + _LT_AC_TAGVAR(allow_undefined_flag, $1)=' ${wl}-expect_unresolved ${wl}\*' + _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared${allow_undefined_flag} $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $soname `test -n "$verstring" && echo ${wl}-set_version $verstring` -update_registry ${output_objdir}/so_locations -o $lib' + + _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir' + _LT_AC_TAGVAR(hardcode_libdir_separator, $1)=: + + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. + # + # There doesn't appear to be a way to prevent this compiler from + # explicitly linking system object files so we need to strip them + # from the output so that they don't get included in the library + # dependencies. + output_verbose_link_cmd='templist=`$CC -shared $CFLAGS -v conftest.$objext 2>&1 | grep "ld" | grep -v "ld:"`; templist=`echo $templist | $SED "s/\(^.*ld.*\)\( .*ld.*$\)/\1/"`; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; echo $list' + ;; + *) + if test "$GXX" = yes && test "$with_gnu_ld" = no; then + _LT_AC_TAGVAR(allow_undefined_flag, $1)=' ${wl}-expect_unresolved ${wl}\*' + _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib ${allow_undefined_flag} $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && echo ${wl}-set_version ${wl}$verstring` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' + + _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir' + _LT_AC_TAGVAR(hardcode_libdir_separator, $1)=: + + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. + output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | grep "\-L"' + + else + # FIXME: insert proper C++ library support + _LT_AC_TAGVAR(ld_shlibs, $1)=no + fi + ;; + esac + ;; + osf4* | osf5*) + case $cc_basename in + KCC*) + # Kuck and Associates, Inc. (KAI) C++ Compiler + + # KCC will only create a shared library if the output file + # ends with ".so" (or ".sl" for HP-UX), so rename the library + # to its proper name (with version) after linking. + _LT_AC_TAGVAR(archive_cmds, $1)='tempext=`echo $shared_ext | $SED -e '\''s/\([[^()0-9A-Za-z{}]]\)/\\\\\1/g'\''`; templib=`echo $lib | $SED -e "s/\${tempext}\..*/.so/"`; $CC $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags --soname $soname -o \$templib; mv \$templib $lib' + + _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath,$libdir' + _LT_AC_TAGVAR(hardcode_libdir_separator, $1)=: + + # Archives containing C++ object files must be created using + # the KAI C++ compiler. + _LT_AC_TAGVAR(old_archive_cmds, $1)='$CC -o $oldlib $oldobjs' + ;; + RCC*) + # Rational C++ 2.4.1 + # FIXME: insert proper C++ library support + _LT_AC_TAGVAR(ld_shlibs, $1)=no + ;; + cxx*) + _LT_AC_TAGVAR(allow_undefined_flag, $1)=' -expect_unresolved \*' + _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared${allow_undefined_flag} $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -msym -soname $soname `test -n "$verstring" && echo -set_version $verstring` -update_registry ${output_objdir}/so_locations -o $lib' + _LT_AC_TAGVAR(archive_expsym_cmds, $1)='for i in `cat $export_symbols`; do printf "%s %s\\n" -exported_symbol "\$i" >> $lib.exp; done~ + echo "-hidden">> $lib.exp~ + $CC -shared$allow_undefined_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -msym -soname $soname -Wl,-input -Wl,$lib.exp `test -n "$verstring" && echo -set_version $verstring` -update_registry ${output_objdir}/so_locations -o $lib~ + $rm $lib.exp' + + _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='-rpath $libdir' + _LT_AC_TAGVAR(hardcode_libdir_separator, $1)=: + + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. + # + # There doesn't appear to be a way to prevent this compiler from + # explicitly linking system object files so we need to strip them + # from the output so that they don't get included in the library + # dependencies. + output_verbose_link_cmd='templist=`$CC -shared $CFLAGS -v conftest.$objext 2>&1 | grep "ld" | grep -v "ld:"`; templist=`echo $templist | $SED "s/\(^.*ld.*\)\( .*ld.*$\)/\1/"`; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; echo $list' + ;; + *) + if test "$GXX" = yes && test "$with_gnu_ld" = no; then + _LT_AC_TAGVAR(allow_undefined_flag, $1)=' ${wl}-expect_unresolved ${wl}\*' + _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib ${allow_undefined_flag} $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-msym ${wl}-soname ${wl}$soname `test -n "$verstring" && echo ${wl}-set_version ${wl}$verstring` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' + + _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir' + _LT_AC_TAGVAR(hardcode_libdir_separator, $1)=: + + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. + output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | grep "\-L"' + + else + # FIXME: insert proper C++ library support + _LT_AC_TAGVAR(ld_shlibs, $1)=no + fi + ;; + esac + ;; + psos*) + # FIXME: insert proper C++ library support + _LT_AC_TAGVAR(ld_shlibs, $1)=no + ;; + sco*) + _LT_AC_TAGVAR(archive_cmds_need_lc, $1)=no + case $cc_basename in + CC*) + # FIXME: insert proper C++ library support + _LT_AC_TAGVAR(ld_shlibs, $1)=no + ;; + *) + # FIXME: insert proper C++ library support + _LT_AC_TAGVAR(ld_shlibs, $1)=no + ;; + esac + ;; + sunos4*) + case $cc_basename in + CC*) + # Sun C++ 4.x + # FIXME: insert proper C++ library support + _LT_AC_TAGVAR(ld_shlibs, $1)=no + ;; + lcc*) + # Lucid + # FIXME: insert proper C++ library support + _LT_AC_TAGVAR(ld_shlibs, $1)=no + ;; + *) + # FIXME: insert proper C++ library support + _LT_AC_TAGVAR(ld_shlibs, $1)=no + ;; + esac + ;; + solaris*) + case $cc_basename in + CC*) + # Sun C++ 4.2, 5.x and Centerline C++ + _LT_AC_TAGVAR(no_undefined_flag, $1)=' -zdefs' + _LT_AC_TAGVAR(archive_cmds, $1)='$CC -G${allow_undefined_flag} -nolib -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' + _LT_AC_TAGVAR(archive_expsym_cmds, $1)='$echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~$echo "local: *; };" >> $lib.exp~ + $CC -G${allow_undefined_flag} -nolib ${wl}-M ${wl}$lib.exp -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~$rm $lib.exp' + + _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir' + _LT_AC_TAGVAR(hardcode_shlibpath_var, $1)=no + case $host_os in + solaris2.[[0-5]] | solaris2.[[0-5]].*) ;; + *) + # The C++ compiler is used as linker so we must use $wl + # flag to pass the commands to the underlying system + # linker. We must also pass each convience library through + # to the system linker between allextract/defaultextract. + # The C++ compiler will combine linker options so we + # cannot just pass the convience library names through + # without $wl. + # Supported since Solaris 2.6 (maybe 2.5.1?) + _LT_AC_TAGVAR(whole_archive_flag_spec, $1)='${wl}-z ${wl}allextract`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; $echo \"$new_convenience\"` ${wl}-z ${wl}defaultextract' + ;; + esac + _LT_AC_TAGVAR(link_all_deplibs, $1)=yes + + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. + # + # There doesn't appear to be a way to prevent this compiler from + # explicitly linking system object files so we need to strip them + # from the output so that they don't get included in the library + # dependencies. + output_verbose_link_cmd='templist=`$CC -G $CFLAGS -v conftest.$objext 2>&1 | grep "\-[[LR]]"`; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; echo $list' + + # Archives containing C++ object files must be created using + # "CC -xar", where "CC" is the Sun C++ compiler. This is + # necessary to make sure instantiated templates are included + # in the archive. + _LT_AC_TAGVAR(old_archive_cmds, $1)='$CC -xar -o $oldlib $oldobjs' + ;; + gcx*) + # Green Hills C++ Compiler + _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-h $wl$soname -o $lib' + + # The C++ compiler must be used to create the archive. + _LT_AC_TAGVAR(old_archive_cmds, $1)='$CC $LDFLAGS -archive -o $oldlib $oldobjs' + ;; + *) + # GNU C++ compiler with Solaris linker + if test "$GXX" = yes && test "$with_gnu_ld" = no; then + _LT_AC_TAGVAR(no_undefined_flag, $1)=' ${wl}-z ${wl}defs' + if $CC --version | grep -v '^2\.7' > /dev/null; then + _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib $LDFLAGS $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-h $wl$soname -o $lib' + _LT_AC_TAGVAR(archive_expsym_cmds, $1)='$echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~$echo "local: *; };" >> $lib.exp~ + $CC -shared -nostdlib ${wl}-M $wl$lib.exp -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~$rm $lib.exp' + + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. + output_verbose_link_cmd="$CC -shared $CFLAGS -v conftest.$objext 2>&1 | grep \"\-L\"" + else + # g++ 2.7 appears to require `-G' NOT `-shared' on this + # platform. + _LT_AC_TAGVAR(archive_cmds, $1)='$CC -G -nostdlib $LDFLAGS $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-h $wl$soname -o $lib' + _LT_AC_TAGVAR(archive_expsym_cmds, $1)='$echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~$echo "local: *; };" >> $lib.exp~ + $CC -G -nostdlib ${wl}-M $wl$lib.exp -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~$rm $lib.exp' + + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. + output_verbose_link_cmd="$CC -G $CFLAGS -v conftest.$objext 2>&1 | grep \"\-L\"" + fi + + _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-R $wl$libdir' + fi + ;; + esac + ;; + sysv5OpenUNIX8* | sysv5UnixWare7* | sysv5uw[[78]]* | unixware7*) + _LT_AC_TAGVAR(archive_cmds_need_lc, $1)=no + ;; + tandem*) + case $cc_basename in + NCC*) + # NonStop-UX NCC 3.20 + # FIXME: insert proper C++ library support + _LT_AC_TAGVAR(ld_shlibs, $1)=no + ;; + *) + # FIXME: insert proper C++ library support + _LT_AC_TAGVAR(ld_shlibs, $1)=no + ;; + esac + ;; + vxworks*) + # FIXME: insert proper C++ library support + _LT_AC_TAGVAR(ld_shlibs, $1)=no + ;; + *) + # FIXME: insert proper C++ library support + _LT_AC_TAGVAR(ld_shlibs, $1)=no + ;; +esac +AC_MSG_RESULT([$_LT_AC_TAGVAR(ld_shlibs, $1)]) +test "$_LT_AC_TAGVAR(ld_shlibs, $1)" = no && can_build_shared=no + +_LT_AC_TAGVAR(GCC, $1)="$GXX" +_LT_AC_TAGVAR(LD, $1)="$LD" + +AC_LIBTOOL_POSTDEP_PREDEP($1) +AC_LIBTOOL_PROG_COMPILER_PIC($1) +AC_LIBTOOL_PROG_CC_C_O($1) +AC_LIBTOOL_SYS_HARD_LINK_LOCKS($1) +AC_LIBTOOL_PROG_LD_SHLIBS($1) +AC_LIBTOOL_SYS_DYNAMIC_LINKER($1) +AC_LIBTOOL_PROG_LD_HARDCODE_LIBPATH($1) +AC_LIBTOOL_SYS_LIB_STRIP +AC_LIBTOOL_DLOPEN_SELF($1) + +AC_LIBTOOL_CONFIG($1) + +AC_LANG_POP +CC=$lt_save_CC +LDCXX=$LD +LD=$lt_save_LD +GCC=$lt_save_GCC +with_gnu_ldcxx=$with_gnu_ld +with_gnu_ld=$lt_save_with_gnu_ld +lt_cv_path_LDCXX=$lt_cv_path_LD +lt_cv_path_LD=$lt_save_path_LD +lt_cv_prog_gnu_ldcxx=$lt_cv_prog_gnu_ld +lt_cv_prog_gnu_ld=$lt_save_with_gnu_ld +])# AC_LIBTOOL_LANG_CXX_CONFIG + +# AC_LIBTOOL_POSTDEP_PREDEP([TAGNAME]) +# ------------------------ +# Figure out "hidden" library dependencies from verbose +# compiler output when linking a shared library. +# Parse the compiler output and extract the necessary +# objects, libraries and library flags. +AC_DEFUN([AC_LIBTOOL_POSTDEP_PREDEP],[ +dnl we can't use the lt_simple_compile_test_code here, +dnl because it contains code intended for an executable, +dnl not a library. It's possible we should let each +dnl tag define a new lt_????_link_test_code variable, +dnl but it's only used here... +ifelse([$1],[],[cat > conftest.$ac_ext < conftest.$ac_ext < conftest.$ac_ext < conftest.$ac_ext <> "$cfgfile" +ifelse([$1], [], +[#! $SHELL + +# `$echo "$cfgfile" | sed 's%^.*/%%'` - Provide generalized library-building support services. +# Generated automatically by $PROGRAM (GNU $PACKAGE $VERSION$TIMESTAMP) +# NOTE: Changes made to this file will be lost: look at ltmain.sh. +# +# Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001 +# Free Software Foundation, Inc. +# +# This file is part of GNU Libtool: +# Originally by Gordon Matzigkeit , 1996 +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. +# +# As a special exception to the GNU General Public License, if you +# distribute this file as part of a program that contains a +# configuration script generated by Autoconf, you may include it under +# the same distribution terms that you use for the rest of that program. + +# A sed program that does not truncate output. +SED=$lt_SED + +# Sed that helps us avoid accidentally triggering echo(1) options like -n. +Xsed="$SED -e 1s/^X//" + +# The HP-UX ksh and POSIX shell print the target directory to stdout +# if CDPATH is set. +(unset CDPATH) >/dev/null 2>&1 && unset CDPATH + +# The names of the tagged configurations supported by this script. +available_tags= + +# ### BEGIN LIBTOOL CONFIG], +[# ### BEGIN LIBTOOL TAG CONFIG: $tagname]) + +# Libtool was configured on host `(hostname || uname -n) 2>/dev/null | sed 1q`: + +# Shell to use when invoking shell scripts. +SHELL=$lt_SHELL + +# Whether or not to build shared libraries. +build_libtool_libs=$enable_shared + +# Whether or not to build static libraries. +build_old_libs=$enable_static + +# Whether or not to add -lc for building shared libraries. +build_libtool_need_lc=$_LT_AC_TAGVAR(archive_cmds_need_lc, $1) + +# Whether or not to disallow shared libs when runtime libs are static +allow_libtool_libs_with_static_runtimes=$_LT_AC_TAGVAR(enable_shared_with_static_runtimes, $1) + +# Whether or not to optimize for fast installation. +fast_install=$enable_fast_install + +# The host system. +host_alias=$host_alias +host=$host +host_os=$host_os + +# The build system. +build_alias=$build_alias +build=$build +build_os=$build_os + +# An echo program that does not interpret backslashes. +echo=$lt_echo + +# The archiver. +AR=$lt_AR +AR_FLAGS=$lt_AR_FLAGS + +# A C compiler. +LTCC=$lt_LTCC + +# A language-specific compiler. +CC=$lt_[]_LT_AC_TAGVAR(compiler, $1) + +# Is the compiler the GNU C compiler? +with_gcc=$_LT_AC_TAGVAR(GCC, $1) + +# An ERE matcher. +EGREP=$lt_EGREP + +# The linker used to build libraries. +LD=$lt_[]_LT_AC_TAGVAR(LD, $1) + +# Whether we need hard or soft links. +LN_S=$lt_LN_S + +# A BSD-compatible nm program. +NM=$lt_NM + +# A symbol stripping program +STRIP=$lt_STRIP + +# Used to examine libraries when file_magic_cmd begins "file" +MAGIC_CMD=$MAGIC_CMD + +# Used on cygwin: DLL creation program. +DLLTOOL="$DLLTOOL" + +# Used on cygwin: object dumper. +OBJDUMP="$OBJDUMP" + +# Used on cygwin: assembler. +AS="$AS" + +# The name of the directory that contains temporary libtool files. +objdir=$objdir + +# How to create reloadable object files. +reload_flag=$lt_reload_flag +reload_cmds=$lt_reload_cmds + +# How to pass a linker flag through the compiler. +wl=$lt_[]_LT_AC_TAGVAR(lt_prog_compiler_wl, $1) + +# Object file suffix (normally "o"). +objext="$ac_objext" + +# Old archive suffix (normally "a"). +libext="$libext" + +# Shared library suffix (normally ".so"). +shrext_cmds='$shrext_cmds' + +# Executable file suffix (normally ""). +exeext="$exeext" + +# Additional compiler flags for building library objects. +pic_flag=$lt_[]_LT_AC_TAGVAR(lt_prog_compiler_pic, $1) +pic_mode=$pic_mode + +# What is the maximum length of a command? +max_cmd_len=$lt_cv_sys_max_cmd_len + +# Does compiler simultaneously support -c and -o options? +compiler_c_o=$lt_[]_LT_AC_TAGVAR(lt_cv_prog_compiler_c_o, $1) + +# Must we lock files when doing compilation? +need_locks=$lt_need_locks + +# Do we need the lib prefix for modules? +need_lib_prefix=$need_lib_prefix + +# Do we need a version for libraries? +need_version=$need_version + +# Whether dlopen is supported. +dlopen_support=$enable_dlopen + +# Whether dlopen of programs is supported. +dlopen_self=$enable_dlopen_self + +# Whether dlopen of statically linked programs is supported. +dlopen_self_static=$enable_dlopen_self_static + +# Compiler flag to prevent dynamic linking. +link_static_flag=$lt_[]_LT_AC_TAGVAR(lt_prog_compiler_static, $1) + +# Compiler flag to turn off builtin functions. +no_builtin_flag=$lt_[]_LT_AC_TAGVAR(lt_prog_compiler_no_builtin_flag, $1) + +# Compiler flag to allow reflexive dlopens. +export_dynamic_flag_spec=$lt_[]_LT_AC_TAGVAR(export_dynamic_flag_spec, $1) + +# Compiler flag to generate shared objects directly from archives. +whole_archive_flag_spec=$lt_[]_LT_AC_TAGVAR(whole_archive_flag_spec, $1) + +# Compiler flag to generate thread-safe objects. +thread_safe_flag_spec=$lt_[]_LT_AC_TAGVAR(thread_safe_flag_spec, $1) + +# Library versioning type. +version_type=$version_type + +# Format of library name prefix. +libname_spec=$lt_libname_spec + +# List of archive names. First name is the real one, the rest are links. +# The last name is the one that the linker finds with -lNAME. +library_names_spec=$lt_library_names_spec + +# The coded name of the library, if different from the real name. +soname_spec=$lt_soname_spec + +# Commands used to build and install an old-style archive. +RANLIB=$lt_RANLIB +old_archive_cmds=$lt_[]_LT_AC_TAGVAR(old_archive_cmds, $1) +old_postinstall_cmds=$lt_old_postinstall_cmds +old_postuninstall_cmds=$lt_old_postuninstall_cmds + +# Create an old-style archive from a shared archive. +old_archive_from_new_cmds=$lt_[]_LT_AC_TAGVAR(old_archive_from_new_cmds, $1) + +# Create a temporary old-style archive to link instead of a shared archive. +old_archive_from_expsyms_cmds=$lt_[]_LT_AC_TAGVAR(old_archive_from_expsyms_cmds, $1) + +# Commands used to build and install a shared archive. +archive_cmds=$lt_[]_LT_AC_TAGVAR(archive_cmds, $1) +archive_expsym_cmds=$lt_[]_LT_AC_TAGVAR(archive_expsym_cmds, $1) +postinstall_cmds=$lt_postinstall_cmds +postuninstall_cmds=$lt_postuninstall_cmds + +# Commands used to build a loadable module (assumed same as above if empty) +module_cmds=$lt_[]_LT_AC_TAGVAR(module_cmds, $1) +module_expsym_cmds=$lt_[]_LT_AC_TAGVAR(module_expsym_cmds, $1) + +# Commands to strip libraries. +old_striplib=$lt_old_striplib +striplib=$lt_striplib + +# Dependencies to place before the objects being linked to create a +# shared library. +predep_objects=$lt_[]_LT_AC_TAGVAR(predep_objects, $1) + +# Dependencies to place after the objects being linked to create a +# shared library. +postdep_objects=$lt_[]_LT_AC_TAGVAR(postdep_objects, $1) + +# Dependencies to place before the objects being linked to create a +# shared library. +predeps=$lt_[]_LT_AC_TAGVAR(predeps, $1) + +# Dependencies to place after the objects being linked to create a +# shared library. +postdeps=$lt_[]_LT_AC_TAGVAR(postdeps, $1) + +# The library search path used internally by the compiler when linking +# a shared library. +compiler_lib_search_path=$lt_[]_LT_AC_TAGVAR(compiler_lib_search_path, $1) + +# Method to check whether dependent libraries are shared objects. +deplibs_check_method=$lt_deplibs_check_method + +# Command to use when deplibs_check_method == file_magic. +file_magic_cmd=$lt_file_magic_cmd + +# Flag that allows shared libraries with undefined symbols to be built. +allow_undefined_flag=$lt_[]_LT_AC_TAGVAR(allow_undefined_flag, $1) + +# Flag that forces no undefined symbols. +no_undefined_flag=$lt_[]_LT_AC_TAGVAR(no_undefined_flag, $1) + +# Commands used to finish a libtool library installation in a directory. +finish_cmds=$lt_finish_cmds + +# Same as above, but a single script fragment to be evaled but not shown. +finish_eval=$lt_finish_eval + +# Take the output of nm and produce a listing of raw symbols and C names. +global_symbol_pipe=$lt_lt_cv_sys_global_symbol_pipe + +# Transform the output of nm in a proper C declaration +global_symbol_to_cdecl=$lt_lt_cv_sys_global_symbol_to_cdecl + +# Transform the output of nm in a C name address pair +global_symbol_to_c_name_address=$lt_lt_cv_sys_global_symbol_to_c_name_address + +# This is the shared library runtime path variable. +runpath_var=$runpath_var + +# This is the shared library path variable. +shlibpath_var=$shlibpath_var + +# Is shlibpath searched before the hard-coded library search path? +shlibpath_overrides_runpath=$shlibpath_overrides_runpath + +# How to hardcode a shared library path into an executable. +hardcode_action=$_LT_AC_TAGVAR(hardcode_action, $1) + +# Whether we should hardcode library paths into libraries. +hardcode_into_libs=$hardcode_into_libs + +# Flag to hardcode \$libdir into a binary during linking. +# This must work even if \$libdir does not exist. +hardcode_libdir_flag_spec=$lt_[]_LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1) + +# If ld is used when linking, flag to hardcode \$libdir into +# a binary during linking. This must work even if \$libdir does +# not exist. +hardcode_libdir_flag_spec_ld=$lt_[]_LT_AC_TAGVAR(hardcode_libdir_flag_spec_ld, $1) + +# Whether we need a single -rpath flag with a separated argument. +hardcode_libdir_separator=$lt_[]_LT_AC_TAGVAR(hardcode_libdir_separator, $1) + +# Set to yes if using DIR/libNAME${shared_ext} during linking hardcodes DIR into the +# resulting binary. +hardcode_direct=$_LT_AC_TAGVAR(hardcode_direct, $1) + +# Set to yes if using the -LDIR flag during linking hardcodes DIR into the +# resulting binary. +hardcode_minus_L=$_LT_AC_TAGVAR(hardcode_minus_L, $1) + +# Set to yes if using SHLIBPATH_VAR=DIR during linking hardcodes DIR into +# the resulting binary. +hardcode_shlibpath_var=$_LT_AC_TAGVAR(hardcode_shlibpath_var, $1) + +# Set to yes if building a shared library automatically hardcodes DIR into the library +# and all subsequent libraries and executables linked against it. +hardcode_automatic=$_LT_AC_TAGVAR(hardcode_automatic, $1) + +# Variables whose values should be saved in libtool wrapper scripts and +# restored at relink time. +variables_saved_for_relink="$variables_saved_for_relink" + +# Whether libtool must link a program against all its dependency libraries. +link_all_deplibs=$_LT_AC_TAGVAR(link_all_deplibs, $1) + +# Compile-time system search path for libraries +sys_lib_search_path_spec=$lt_sys_lib_search_path_spec + +# Run-time system search path for libraries +sys_lib_dlsearch_path_spec=$lt_sys_lib_dlsearch_path_spec + +# Fix the shell variable \$srcfile for the compiler. +fix_srcfile_path="$_LT_AC_TAGVAR(fix_srcfile_path, $1)" + +# Set to yes if exported symbols are required. +always_export_symbols=$_LT_AC_TAGVAR(always_export_symbols, $1) + +# The commands to list exported symbols. +export_symbols_cmds=$lt_[]_LT_AC_TAGVAR(export_symbols_cmds, $1) + +# The commands to extract the exported symbol list from a shared archive. +extract_expsyms_cmds=$lt_extract_expsyms_cmds + +# Symbols that should not be listed in the preloaded symbols. +exclude_expsyms=$lt_[]_LT_AC_TAGVAR(exclude_expsyms, $1) + +# Symbols that must always be exported. +include_expsyms=$lt_[]_LT_AC_TAGVAR(include_expsyms, $1) + +ifelse([$1],[], +[# ### END LIBTOOL CONFIG], +[# ### END LIBTOOL TAG CONFIG: $tagname]) + +__EOF__ + +ifelse([$1],[], [ + case $host_os in + aix3*) + cat <<\EOF >> "$cfgfile" + +# AIX sometimes has problems with the GCC collect2 program. For some +# reason, if we set the COLLECT_NAMES environment variable, the problems +# vanish in a puff of smoke. +if test "X${COLLECT_NAMES+set}" != Xset; then + COLLECT_NAMES= + export COLLECT_NAMES +fi +EOF + ;; + esac + + # We use sed instead of cat because bash on DJGPP gets confused if + # if finds mixed CR/LF and LF-only lines. Since sed operates in + # text mode, it properly converts lines to CR/LF. This bash problem + # is reportedly fixed, but why not run on old versions too? + sed '$q' "$ltmain" >> "$cfgfile" || (rm -f "$cfgfile"; exit 1) + + mv -f "$cfgfile" "$ofile" || \ + (rm -f "$ofile" && cp "$cfgfile" "$ofile" && rm -f "$cfgfile") + chmod +x "$ofile" +]) +else + # If there is no Makefile yet, we rely on a make rule to execute + # `config.status --recheck' to rerun these tests and create the + # libtool script then. + ltmain_in=`echo $ltmain | sed -e 's/\.sh$/.in/'` + if test -f "$ltmain_in"; then + test -f Makefile && make "$ltmain" + fi +fi +])# AC_LIBTOOL_CONFIG + + +# AC_LIBTOOL_PROG_COMPILER_NO_RTTI([TAGNAME]) +# ------------------------------------------- +AC_DEFUN([AC_LIBTOOL_PROG_COMPILER_NO_RTTI], +[AC_REQUIRE([_LT_AC_SYS_COMPILER])dnl + +_LT_AC_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)= + +if test "$GCC" = yes; then + _LT_AC_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)=' -fno-builtin' + + AC_LIBTOOL_COMPILER_OPTION([if $compiler supports -fno-rtti -fno-exceptions], + lt_cv_prog_compiler_rtti_exceptions, + [-fno-rtti -fno-exceptions], [], + [_LT_AC_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)="$_LT_AC_TAGVAR(lt_prog_compiler_no_builtin_flag, $1) -fno-rtti -fno-exceptions"]) +fi +])# AC_LIBTOOL_PROG_COMPILER_NO_RTTI + + +# AC_LIBTOOL_SYS_GLOBAL_SYMBOL_PIPE +# --------------------------------- +AC_DEFUN([AC_LIBTOOL_SYS_GLOBAL_SYMBOL_PIPE], +[AC_REQUIRE([AC_CANONICAL_HOST]) +AC_REQUIRE([AC_PROG_NM]) +AC_REQUIRE([AC_OBJEXT]) +# Check for command to grab the raw symbol name followed by C symbol from nm. +AC_MSG_CHECKING([command to parse $NM output from $compiler object]) +AC_CACHE_VAL([lt_cv_sys_global_symbol_pipe], +[ +# These are sane defaults that work on at least a few old systems. +# [They come from Ultrix. What could be older than Ultrix?!! ;)] + +# Character class describing NM global symbol codes. +symcode='[[BCDEGRST]]' + +# Regexp to match symbols that can be accessed directly from C. +sympat='\([[_A-Za-z]][[_A-Za-z0-9]]*\)' + +# Transform an extracted symbol line into a proper C declaration +lt_cv_sys_global_symbol_to_cdecl="sed -n -e 's/^. .* \(.*\)$/extern int \1;/p'" + +# Transform an extracted symbol line into symbol name and symbol address +lt_cv_sys_global_symbol_to_c_name_address="sed -n -e 's/^: \([[^ ]]*\) $/ {\\\"\1\\\", (lt_ptr) 0},/p' -e 's/^$symcode \([[^ ]]*\) \([[^ ]]*\)$/ {\"\2\", (lt_ptr) \&\2},/p'" + +# Define system-specific variables. +case $host_os in +aix*) + symcode='[[BCDT]]' + ;; +cygwin* | mingw* | pw32*) + symcode='[[ABCDGISTW]]' + ;; +hpux*) # Its linker distinguishes data from code symbols + if test "$host_cpu" = ia64; then + symcode='[[ABCDEGRST]]' + fi + lt_cv_sys_global_symbol_to_cdecl="sed -n -e 's/^T .* \(.*\)$/extern int \1();/p' -e 's/^$symcode* .* \(.*\)$/extern char \1;/p'" + lt_cv_sys_global_symbol_to_c_name_address="sed -n -e 's/^: \([[^ ]]*\) $/ {\\\"\1\\\", (lt_ptr) 0},/p' -e 's/^$symcode* \([[^ ]]*\) \([[^ ]]*\)$/ {\"\2\", (lt_ptr) \&\2},/p'" + ;; +linux*) + if test "$host_cpu" = ia64; then + symcode='[[ABCDGIRSTW]]' + lt_cv_sys_global_symbol_to_cdecl="sed -n -e 's/^T .* \(.*\)$/extern int \1();/p' -e 's/^$symcode* .* \(.*\)$/extern char \1;/p'" + lt_cv_sys_global_symbol_to_c_name_address="sed -n -e 's/^: \([[^ ]]*\) $/ {\\\"\1\\\", (lt_ptr) 0},/p' -e 's/^$symcode* \([[^ ]]*\) \([[^ ]]*\)$/ {\"\2\", (lt_ptr) \&\2},/p'" + fi + ;; +irix* | nonstopux*) + symcode='[[BCDEGRST]]' + ;; +osf*) + symcode='[[BCDEGQRST]]' + ;; +solaris* | sysv5*) + symcode='[[BDRT]]' + ;; +sysv4) + symcode='[[DFNSTU]]' + ;; +esac + +# Handle CRLF in mingw tool chain +opt_cr= +case $build_os in +mingw*) + opt_cr=`echo 'x\{0,1\}' | tr x '\015'` # option cr in regexp + ;; +esac + +# If we're using GNU nm, then use its standard symbol codes. +case `$NM -V 2>&1` in +*GNU* | *'with BFD'*) + symcode='[[ABCDGIRSTW]]' ;; +esac + +# Try without a prefix undercore, then with it. +for ac_symprfx in "" "_"; do + + # Transform symcode, sympat, and symprfx into a raw symbol and a C symbol. + symxfrm="\\1 $ac_symprfx\\2 \\2" + + # Write the raw and C identifiers. + lt_cv_sys_global_symbol_pipe="sed -n -e 's/^.*[[ ]]\($symcode$symcode*\)[[ ]][[ ]]*$ac_symprfx$sympat$opt_cr$/$symxfrm/p'" + + # Check to see that the pipe works correctly. + pipe_works=no + + rm -f conftest* + cat > conftest.$ac_ext < $nlist) && test -s "$nlist"; then + # Try sorting and uniquifying the output. + if sort "$nlist" | uniq > "$nlist"T; then + mv -f "$nlist"T "$nlist" + else + rm -f "$nlist"T + fi + + # Make sure that we snagged all the symbols we need. + if grep ' nm_test_var$' "$nlist" >/dev/null; then + if grep ' nm_test_func$' "$nlist" >/dev/null; then + cat < conftest.$ac_ext +#ifdef __cplusplus +extern "C" { +#endif + +EOF + # Now generate the symbol file. + eval "$lt_cv_sys_global_symbol_to_cdecl"' < "$nlist" | grep -v main >> conftest.$ac_ext' + + cat <> conftest.$ac_ext +#if defined (__STDC__) && __STDC__ +# define lt_ptr_t void * +#else +# define lt_ptr_t char * +# define const +#endif + +/* The mapping between symbol names and symbols. */ +const struct { + const char *name; + lt_ptr_t address; +} +lt_preloaded_symbols[[]] = +{ +EOF + $SED "s/^$symcode$symcode* \(.*\) \(.*\)$/ {\"\2\", (lt_ptr_t) \&\2},/" < "$nlist" | grep -v main >> conftest.$ac_ext + cat <<\EOF >> conftest.$ac_ext + {0, (lt_ptr_t) 0} +}; + +#ifdef __cplusplus +} +#endif +EOF + # Now try linking the two files. + mv conftest.$ac_objext conftstm.$ac_objext + lt_save_LIBS="$LIBS" + lt_save_CFLAGS="$CFLAGS" + LIBS="conftstm.$ac_objext" + CFLAGS="$CFLAGS$_LT_AC_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)" + if AC_TRY_EVAL(ac_link) && test -s conftest${ac_exeext}; then + pipe_works=yes + fi + LIBS="$lt_save_LIBS" + CFLAGS="$lt_save_CFLAGS" + else + echo "cannot find nm_test_func in $nlist" >&AS_MESSAGE_LOG_FD + fi + else + echo "cannot find nm_test_var in $nlist" >&AS_MESSAGE_LOG_FD + fi + else + echo "cannot run $lt_cv_sys_global_symbol_pipe" >&AS_MESSAGE_LOG_FD + fi + else + echo "$progname: failed program was:" >&AS_MESSAGE_LOG_FD + cat conftest.$ac_ext >&5 + fi + rm -f conftest* conftst* + + # Do not use the global_symbol_pipe unless it works. + if test "$pipe_works" = yes; then + break + else + lt_cv_sys_global_symbol_pipe= + fi +done +]) +if test -z "$lt_cv_sys_global_symbol_pipe"; then + lt_cv_sys_global_symbol_to_cdecl= +fi +if test -z "$lt_cv_sys_global_symbol_pipe$lt_cv_sys_global_symbol_to_cdecl"; then + AC_MSG_RESULT(failed) +else + AC_MSG_RESULT(ok) +fi +]) # AC_LIBTOOL_SYS_GLOBAL_SYMBOL_PIPE + + +# AC_LIBTOOL_PROG_COMPILER_PIC([TAGNAME]) +# --------------------------------------- +AC_DEFUN([AC_LIBTOOL_PROG_COMPILER_PIC], +[_LT_AC_TAGVAR(lt_prog_compiler_wl, $1)= +_LT_AC_TAGVAR(lt_prog_compiler_pic, $1)= +_LT_AC_TAGVAR(lt_prog_compiler_static, $1)= + +AC_MSG_CHECKING([for $compiler option to produce PIC]) + ifelse([$1],[CXX],[ + # C++ specific cases for pic, static, wl, etc. + if test "$GXX" = yes; then + _LT_AC_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + _LT_AC_TAGVAR(lt_prog_compiler_static, $1)='-static' + + case $host_os in + aix*) + # All AIX code is PIC. + if test "$host_cpu" = ia64; then + # AIX 5 now supports IA64 processor + _LT_AC_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + fi + ;; + amigaos*) + # FIXME: we need at least 68020 code to build shared libraries, but + # adding the `-m68020' flag to GCC prevents building anything better, + # like `-m68040'. + _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-m68020 -resident32 -malways-restore-a4' + ;; + beos* | cygwin* | irix5* | irix6* | nonstopux* | osf3* | osf4* | osf5*) + # PIC is the default for these OSes. + ;; + mingw* | os2* | pw32*) + # This hack is so that the source file can tell whether it is being + # built for inclusion in a dll (and should export symbols for example). + _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-DDLL_EXPORT' + ;; + darwin* | rhapsody*) + # PIC is the default on this platform + # Common symbols not allowed in MH_DYLIB files + _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-fno-common' + ;; + *djgpp*) + # DJGPP does not support shared libraries at all + _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)= + ;; + sysv4*MP*) + if test -d /usr/nec; then + _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)=-Kconform_pic + fi + ;; + hpux*) + # PIC is the default for IA64 HP-UX and 64-bit HP-UX, but + # not for PA HP-UX. + case "$host_cpu" in + hppa*64*|ia64*) + ;; + *) + _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' + ;; + esac + ;; + *) + _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' + ;; + esac + else + case $host_os in + aix4* | aix5*) + # All AIX code is PIC. + if test "$host_cpu" = ia64; then + # AIX 5 now supports IA64 processor + _LT_AC_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + else + _LT_AC_TAGVAR(lt_prog_compiler_static, $1)='-bnso -bI:/lib/syscalls.exp' + fi + ;; + chorus*) + case $cc_basename in + cxch68*) + # Green Hills C++ Compiler + # _LT_AC_TAGVAR(lt_prog_compiler_static, $1)="--no_auto_instantiation -u __main -u __premain -u _abort -r $COOL_DIR/lib/libOrb.a $MVME_DIR/lib/CC/libC.a $MVME_DIR/lib/classix/libcx.s.a" + ;; + esac + ;; + darwin*) + # PIC is the default on this platform + # Common symbols not allowed in MH_DYLIB files + case $cc_basename in + xlc*) + _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-qnocommon' + _LT_AC_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + ;; + esac + ;; + dgux*) + case $cc_basename in + ec++*) + _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' + ;; + ghcx*) + # Green Hills C++ Compiler + _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-pic' + ;; + *) + ;; + esac + ;; + freebsd* | kfreebsd*-gnu | dragonfly*) + # FreeBSD uses GNU C++ + ;; + hpux9* | hpux10* | hpux11*) + case $cc_basename in + CC*) + _LT_AC_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + _LT_AC_TAGVAR(lt_prog_compiler_static, $1)="${ac_cv_prog_cc_wl}-a ${ac_cv_prog_cc_wl}archive" + if test "$host_cpu" != ia64; then + _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='+Z' + fi + ;; + aCC*) + _LT_AC_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + _LT_AC_TAGVAR(lt_prog_compiler_static, $1)="${ac_cv_prog_cc_wl}-a ${ac_cv_prog_cc_wl}archive" + case "$host_cpu" in + hppa*64*|ia64*) + # +Z the default + ;; + *) + _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='+Z' + ;; + esac + ;; + *) + ;; + esac + ;; + irix5* | irix6* | nonstopux*) + case $cc_basename in + CC*) + _LT_AC_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + _LT_AC_TAGVAR(lt_prog_compiler_static, $1)='-non_shared' + # CC pic flag -KPIC is the default. + ;; + *) + ;; + esac + ;; + linux*) + case $cc_basename in + KCC*) + # KAI C++ Compiler + _LT_AC_TAGVAR(lt_prog_compiler_wl, $1)='--backend -Wl,' + _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' + ;; + icpc* | ecpc*) + # Intel C++ + _LT_AC_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' + _LT_AC_TAGVAR(lt_prog_compiler_static, $1)='-static' + ;; + pgCC*) + # Portland Group C++ compiler. + _LT_AC_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-fpic' + _LT_AC_TAGVAR(lt_prog_compiler_static, $1)='-static' + ;; + cxx*) + # Compaq C++ + # Make sure the PIC flag is empty. It appears that all Alpha + # Linux and Compaq Tru64 Unix objects are PIC. + _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)= + _LT_AC_TAGVAR(lt_prog_compiler_static, $1)='-non_shared' + ;; + *) + ;; + esac + ;; + lynxos*) + ;; + m88k*) + ;; + mvs*) + case $cc_basename in + cxx*) + _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-W c,exportall' + ;; + *) + ;; + esac + ;; + netbsd*) + ;; + osf3* | osf4* | osf5*) + case $cc_basename in + KCC*) + _LT_AC_TAGVAR(lt_prog_compiler_wl, $1)='--backend -Wl,' + ;; + RCC*) + # Rational C++ 2.4.1 + _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-pic' + ;; + cxx*) + # Digital/Compaq C++ + _LT_AC_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + # Make sure the PIC flag is empty. It appears that all Alpha + # Linux and Compaq Tru64 Unix objects are PIC. + _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)= + _LT_AC_TAGVAR(lt_prog_compiler_static, $1)='-non_shared' + ;; + *) + ;; + esac + ;; + psos*) + ;; + sco*) + case $cc_basename in + CC*) + _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' + ;; + *) + ;; + esac + ;; + solaris*) + case $cc_basename in + CC*) + # Sun C++ 4.2, 5.x and Centerline C++ + _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' + _LT_AC_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + _LT_AC_TAGVAR(lt_prog_compiler_wl, $1)='-Qoption ld ' + ;; + gcx*) + # Green Hills C++ Compiler + _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-PIC' + ;; + *) + ;; + esac + ;; + sunos4*) + case $cc_basename in + CC*) + # Sun C++ 4.x + _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-pic' + _LT_AC_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + ;; + lcc*) + # Lucid + _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-pic' + ;; + *) + ;; + esac + ;; + tandem*) + case $cc_basename in + NCC*) + # NonStop-UX NCC 3.20 + _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' + ;; + *) + ;; + esac + ;; + unixware*) + ;; + vxworks*) + ;; + *) + _LT_AC_TAGVAR(lt_prog_compiler_can_build_shared, $1)=no + ;; + esac + fi +], +[ + if test "$GCC" = yes; then + _LT_AC_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + _LT_AC_TAGVAR(lt_prog_compiler_static, $1)='-static' + + case $host_os in + aix*) + # All AIX code is PIC. + if test "$host_cpu" = ia64; then + # AIX 5 now supports IA64 processor + _LT_AC_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + fi + ;; + + amigaos*) + # FIXME: we need at least 68020 code to build shared libraries, but + # adding the `-m68020' flag to GCC prevents building anything better, + # like `-m68040'. + _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-m68020 -resident32 -malways-restore-a4' + ;; + + beos* | cygwin* | irix5* | irix6* | nonstopux* | osf3* | osf4* | osf5*) + # PIC is the default for these OSes. + ;; + + mingw* | pw32* | os2*) + # This hack is so that the source file can tell whether it is being + # built for inclusion in a dll (and should export symbols for example). + _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-DDLL_EXPORT' + ;; + + darwin* | rhapsody*) + # PIC is the default on this platform + # Common symbols not allowed in MH_DYLIB files + _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-fno-common' + ;; + + msdosdjgpp*) + # Just because we use GCC doesn't mean we suddenly get shared libraries + # on systems that don't support them. + _LT_AC_TAGVAR(lt_prog_compiler_can_build_shared, $1)=no + enable_shared=no + ;; + + sysv4*MP*) + if test -d /usr/nec; then + _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)=-Kconform_pic + fi + ;; + + hpux*) + # PIC is the default for IA64 HP-UX and 64-bit HP-UX, but + # not for PA HP-UX. + case "$host_cpu" in + hppa*64*|ia64*) + # +Z the default + ;; + *) + _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' + ;; + esac + ;; + + *) + _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' + ;; + esac + else + # PORTME Check for flag to pass linker flags through the system compiler. + case $host_os in + aix*) + _LT_AC_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + if test "$host_cpu" = ia64; then + # AIX 5 now supports IA64 processor + _LT_AC_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + else + _LT_AC_TAGVAR(lt_prog_compiler_static, $1)='-bnso -bI:/lib/syscalls.exp' + fi + ;; + darwin*) + # PIC is the default on this platform + # Common symbols not allowed in MH_DYLIB files + case $cc_basename in + xlc*) + _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-qnocommon' + _LT_AC_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + ;; + esac + ;; + + mingw* | pw32* | os2*) + # This hack is so that the source file can tell whether it is being + # built for inclusion in a dll (and should export symbols for example). + _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-DDLL_EXPORT' + ;; + + hpux9* | hpux10* | hpux11*) + _LT_AC_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + # PIC is the default for IA64 HP-UX and 64-bit HP-UX, but + # not for PA HP-UX. + case "$host_cpu" in + hppa*64*|ia64*) + # +Z the default + ;; + *) + _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='+Z' + ;; + esac + # Is there a better lt_prog_compiler_static that works with the bundled CC? + _LT_AC_TAGVAR(lt_prog_compiler_static, $1)='${wl}-a ${wl}archive' + ;; + + irix5* | irix6* | nonstopux*) + _LT_AC_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + # PIC (with -KPIC) is the default. + _LT_AC_TAGVAR(lt_prog_compiler_static, $1)='-non_shared' + ;; + + newsos6) + _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' + _LT_AC_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + ;; + + linux*) + case $cc_basename in + icc* | ecc*) + _LT_AC_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' + _LT_AC_TAGVAR(lt_prog_compiler_static, $1)='-static' + ;; + pgcc* | pgf77* | pgf90*) + # Portland Group compilers (*not* the Pentium gcc compiler, + # which looks to be a dead project) + _LT_AC_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-fpic' + _LT_AC_TAGVAR(lt_prog_compiler_static, $1)='-static' + ;; + ccc*) + _LT_AC_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + # All Alpha code is PIC. + _LT_AC_TAGVAR(lt_prog_compiler_static, $1)='-non_shared' + ;; + esac + ;; + + osf3* | osf4* | osf5*) + _LT_AC_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + # All OSF/1 code is PIC. + _LT_AC_TAGVAR(lt_prog_compiler_static, $1)='-non_shared' + ;; + + sco3.2v5*) + _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-Kpic' + _LT_AC_TAGVAR(lt_prog_compiler_static, $1)='-dn' + ;; + + solaris*) + _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' + _LT_AC_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + case $cc_basename in + f77* | f90* | f95*) + _LT_AC_TAGVAR(lt_prog_compiler_wl, $1)='-Qoption ld ';; + *) + _LT_AC_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,';; + esac + ;; + + sunos4*) + _LT_AC_TAGVAR(lt_prog_compiler_wl, $1)='-Qoption ld ' + _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-PIC' + _LT_AC_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + ;; + + sysv4 | sysv4.2uw2* | sysv4.3* | sysv5*) + _LT_AC_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' + _LT_AC_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + ;; + + sysv4*MP*) + if test -d /usr/nec ;then + _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-Kconform_pic' + _LT_AC_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + fi + ;; + + unicos*) + _LT_AC_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + _LT_AC_TAGVAR(lt_prog_compiler_can_build_shared, $1)=no + ;; + + uts4*) + _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-pic' + _LT_AC_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + ;; + + *) + _LT_AC_TAGVAR(lt_prog_compiler_can_build_shared, $1)=no + ;; + esac + fi +]) +AC_MSG_RESULT([$_LT_AC_TAGVAR(lt_prog_compiler_pic, $1)]) + +# +# Check to make sure the PIC flag actually works. +# +if test -n "$_LT_AC_TAGVAR(lt_prog_compiler_pic, $1)"; then + AC_LIBTOOL_COMPILER_OPTION([if $compiler PIC flag $_LT_AC_TAGVAR(lt_prog_compiler_pic, $1) works], + _LT_AC_TAGVAR(lt_prog_compiler_pic_works, $1), + [$_LT_AC_TAGVAR(lt_prog_compiler_pic, $1)ifelse([$1],[],[ -DPIC],[ifelse([$1],[CXX],[ -DPIC],[])])], [], + [case $_LT_AC_TAGVAR(lt_prog_compiler_pic, $1) in + "" | " "*) ;; + *) _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)=" $_LT_AC_TAGVAR(lt_prog_compiler_pic, $1)" ;; + esac], + [_LT_AC_TAGVAR(lt_prog_compiler_pic, $1)= + _LT_AC_TAGVAR(lt_prog_compiler_can_build_shared, $1)=no]) +fi +case "$host_os" in + # For platforms which do not support PIC, -DPIC is meaningless: + *djgpp*) + _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)= + ;; + *) + _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)="$_LT_AC_TAGVAR(lt_prog_compiler_pic, $1)ifelse([$1],[],[ -DPIC],[ifelse([$1],[CXX],[ -DPIC],[])])" + ;; +esac +]) + + +# AC_LIBTOOL_PROG_LD_SHLIBS([TAGNAME]) +# ------------------------------------ +# See if the linker supports building shared libraries. +AC_DEFUN([AC_LIBTOOL_PROG_LD_SHLIBS], +[AC_MSG_CHECKING([whether the $compiler linker ($LD) supports shared libraries]) +ifelse([$1],[CXX],[ + _LT_AC_TAGVAR(export_symbols_cmds, $1)='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols' + case $host_os in + aix4* | aix5*) + # If we're using GNU nm, then we don't want the "-C" option. + # -C means demangle to AIX nm, but means don't demangle with GNU nm + if $NM -V 2>&1 | grep 'GNU' > /dev/null; then + _LT_AC_TAGVAR(export_symbols_cmds, $1)='$NM -Bpg $libobjs $convenience | awk '\''{ if (((\[$]2 == "T") || (\[$]2 == "D") || (\[$]2 == "B")) && ([substr](\[$]3,1,1) != ".")) { print \[$]3 } }'\'' | sort -u > $export_symbols' + else + _LT_AC_TAGVAR(export_symbols_cmds, $1)='$NM -BCpg $libobjs $convenience | awk '\''{ if (((\[$]2 == "T") || (\[$]2 == "D") || (\[$]2 == "B")) && ([substr](\[$]3,1,1) != ".")) { print \[$]3 } }'\'' | sort -u > $export_symbols' + fi + ;; + pw32*) + _LT_AC_TAGVAR(export_symbols_cmds, $1)="$ltdll_cmds" + ;; + cygwin* | mingw*) + _LT_AC_TAGVAR(export_symbols_cmds, $1)='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[[BCDGRS]] /s/.* \([[^ ]]*\)/\1 DATA/;/^.* __nm__/s/^.* __nm__\([[^ ]]*\) [[^ ]]*/\1 DATA/;/^I /d;/^[[AITW]] /s/.* //'\'' | sort | uniq > $export_symbols' + ;; + *) + _LT_AC_TAGVAR(export_symbols_cmds, $1)='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols' + ;; + esac +],[ + runpath_var= + _LT_AC_TAGVAR(allow_undefined_flag, $1)= + _LT_AC_TAGVAR(enable_shared_with_static_runtimes, $1)=no + _LT_AC_TAGVAR(archive_cmds, $1)= + _LT_AC_TAGVAR(archive_expsym_cmds, $1)= + _LT_AC_TAGVAR(old_archive_From_new_cmds, $1)= + _LT_AC_TAGVAR(old_archive_from_expsyms_cmds, $1)= + _LT_AC_TAGVAR(export_dynamic_flag_spec, $1)= + _LT_AC_TAGVAR(whole_archive_flag_spec, $1)= + _LT_AC_TAGVAR(thread_safe_flag_spec, $1)= + _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)= + _LT_AC_TAGVAR(hardcode_libdir_flag_spec_ld, $1)= + _LT_AC_TAGVAR(hardcode_libdir_separator, $1)= + _LT_AC_TAGVAR(hardcode_direct, $1)=no + _LT_AC_TAGVAR(hardcode_minus_L, $1)=no + _LT_AC_TAGVAR(hardcode_shlibpath_var, $1)=unsupported + _LT_AC_TAGVAR(link_all_deplibs, $1)=unknown + _LT_AC_TAGVAR(hardcode_automatic, $1)=no + _LT_AC_TAGVAR(module_cmds, $1)= + _LT_AC_TAGVAR(module_expsym_cmds, $1)= + _LT_AC_TAGVAR(always_export_symbols, $1)=no + _LT_AC_TAGVAR(export_symbols_cmds, $1)='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols' + # include_expsyms should be a list of space-separated symbols to be *always* + # included in the symbol list + _LT_AC_TAGVAR(include_expsyms, $1)= + # exclude_expsyms can be an extended regexp of symbols to exclude + # it will be wrapped by ` (' and `)$', so one must not match beginning or + # end of line. Example: `a|bc|.*d.*' will exclude the symbols `a' and `bc', + # as well as any symbol that contains `d'. + _LT_AC_TAGVAR(exclude_expsyms, $1)="_GLOBAL_OFFSET_TABLE_" + # Although _GLOBAL_OFFSET_TABLE_ is a valid symbol C name, most a.out + # platforms (ab)use it in PIC code, but their linkers get confused if + # the symbol is explicitly referenced. Since portable code cannot + # rely on this symbol name, it's probably fine to never include it in + # preloaded symbol tables. + extract_expsyms_cmds= + # Just being paranoid about ensuring that cc_basename is set. + _LT_CC_BASENAME([$compiler]) + case $host_os in + cygwin* | mingw* | pw32*) + # FIXME: the MSVC++ port hasn't been tested in a loooong time + # When not using gcc, we currently assume that we are using + # Microsoft Visual C++. + if test "$GCC" != yes; then + with_gnu_ld=no + fi + ;; + openbsd*) + with_gnu_ld=no + ;; + esac + + _LT_AC_TAGVAR(ld_shlibs, $1)=yes + if test "$with_gnu_ld" = yes; then + # If archive_cmds runs LD, not CC, wlarc should be empty + wlarc='${wl}' + + # Set some defaults for GNU ld with shared library support. These + # are reset later if shared libraries are not supported. Putting them + # here allows them to be overridden if necessary. + runpath_var=LD_RUN_PATH + _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}--rpath ${wl}$libdir' + _LT_AC_TAGVAR(export_dynamic_flag_spec, $1)='${wl}--export-dynamic' + # ancient GNU ld didn't support --whole-archive et. al. + if $LD --help 2>&1 | grep 'no-whole-archive' > /dev/null; then + _LT_AC_TAGVAR(whole_archive_flag_spec, $1)="$wlarc"'--whole-archive$convenience '"$wlarc"'--no-whole-archive' + else + _LT_AC_TAGVAR(whole_archive_flag_spec, $1)= + fi + supports_anon_versioning=no + case `$LD -v 2>/dev/null` in + *\ [[01]].* | *\ 2.[[0-9]].* | *\ 2.10.*) ;; # catch versions < 2.11 + *\ 2.11.93.0.2\ *) supports_anon_versioning=yes ;; # RH7.3 ... + *\ 2.11.92.0.12\ *) supports_anon_versioning=yes ;; # Mandrake 8.2 ... + *\ 2.11.*) ;; # other 2.11 versions + *) supports_anon_versioning=yes ;; + esac + + # See if GNU ld supports shared libraries. + case $host_os in + aix3* | aix4* | aix5*) + # On AIX/PPC, the GNU linker is very broken + if test "$host_cpu" != ia64; then + _LT_AC_TAGVAR(ld_shlibs, $1)=no + cat <&2 + +*** Warning: the GNU linker, at least up to release 2.9.1, is reported +*** to be unable to reliably create shared libraries on AIX. +*** Therefore, libtool is disabling shared libraries support. If you +*** really care for shared libraries, you may want to modify your PATH +*** so that a non-GNU linker is found, and then restart. + +EOF + fi + ;; + + amigaos*) + _LT_AC_TAGVAR(archive_cmds, $1)='$rm $output_objdir/a2ixlibrary.data~$echo "#define NAME $libname" > $output_objdir/a2ixlibrary.data~$echo "#define LIBRARY_ID 1" >> $output_objdir/a2ixlibrary.data~$echo "#define VERSION $major" >> $output_objdir/a2ixlibrary.data~$echo "#define REVISION $revision" >> $output_objdir/a2ixlibrary.data~$AR $AR_FLAGS $lib $libobjs~$RANLIB $lib~(cd $output_objdir && a2ixlibrary -32)' + _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' + _LT_AC_TAGVAR(hardcode_minus_L, $1)=yes + + # Samuel A. Falvo II reports + # that the semantics of dynamic libraries on AmigaOS, at least up + # to version 4, is to share data among multiple programs linked + # with the same dynamic library. Since this doesn't match the + # behavior of shared libraries on other platforms, we can't use + # them. + _LT_AC_TAGVAR(ld_shlibs, $1)=no + ;; + + beos*) + if $LD --help 2>&1 | grep ': supported targets:.* elf' > /dev/null; then + _LT_AC_TAGVAR(allow_undefined_flag, $1)=unsupported + # Joseph Beckenbach says some releases of gcc + # support --undefined. This deserves some investigation. FIXME + _LT_AC_TAGVAR(archive_cmds, $1)='$CC -nostart $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + else + _LT_AC_TAGVAR(ld_shlibs, $1)=no + fi + ;; + + cygwin* | mingw* | pw32*) + # _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1) is actually meaningless, + # as there is no search path for DLLs. + _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' + _LT_AC_TAGVAR(allow_undefined_flag, $1)=unsupported + _LT_AC_TAGVAR(always_export_symbols, $1)=no + _LT_AC_TAGVAR(enable_shared_with_static_runtimes, $1)=yes + _LT_AC_TAGVAR(export_symbols_cmds, $1)='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[[BCDGRS]] /s/.* \([[^ ]]*\)/\1 DATA/'\'' | $SED -e '\''/^[[AITW]] /s/.* //'\'' | sort | uniq > $export_symbols' + + if $LD --help 2>&1 | grep 'auto-import' > /dev/null; then + _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags -o $output_objdir/$soname ${wl}--image-base=0x10000000 ${wl}--out-implib,$lib' + # If the export-symbols file already is a .def file (1st line + # is EXPORTS), use it as is; otherwise, prepend... + _LT_AC_TAGVAR(archive_expsym_cmds, $1)='if test "x`$SED 1q $export_symbols`" = xEXPORTS; then + cp $export_symbols $output_objdir/$soname.def; + else + echo EXPORTS > $output_objdir/$soname.def; + cat $export_symbols >> $output_objdir/$soname.def; + fi~ + $CC -shared $output_objdir/$soname.def $libobjs $deplibs $compiler_flags -o $output_objdir/$soname ${wl}--image-base=0x10000000 ${wl}--out-implib,$lib' + else + _LT_AC_TAGVAR(ld_shlibs, $1)=no + fi + ;; + + linux*) + if $LD --help 2>&1 | grep ': supported targets:.* elf' > /dev/null; then + tmp_addflag= + case $cc_basename,$host_cpu in + pgcc*) # Portland Group C compiler + _LT_AC_TAGVAR(whole_archive_flag_spec, $1)= + ;; + pgf77* | pgf90* ) # Portland Group f77 and f90 compilers + _LT_AC_TAGVAR(whole_archive_flag_spec, $1)= + tmp_addflag=' -fpic -Mnomain' ;; + ecc*,ia64* | icc*,ia64*) # Intel C compiler on ia64 + tmp_addflag=' -i_dynamic' ;; + efc*,ia64* | ifort*,ia64*) # Intel Fortran compiler on ia64 + tmp_addflag=' -i_dynamic -nofor_main' ;; + ifc* | ifort*) # Intel Fortran compiler + tmp_addflag=' -nofor_main' ;; + esac + _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared'"$tmp_addflag"' $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + + if test $supports_anon_versioning = yes; then + _LT_AC_TAGVAR(archive_expsym_cmds, $1)='$echo "{ global:" > $output_objdir/$libname.ver~ + cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~ + $echo "local: *; };" >> $output_objdir/$libname.ver~ + $CC -shared'"$tmp_addflag"' $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-version-script ${wl}$output_objdir/$libname.ver -o $lib' + fi + else + _LT_AC_TAGVAR(ld_shlibs, $1)=no + fi + ;; + + netbsd*) + if echo __ELF__ | $CC -E - | grep __ELF__ >/dev/null; then + _LT_AC_TAGVAR(archive_cmds, $1)='$LD -Bshareable $libobjs $deplibs $linker_flags -o $lib' + wlarc= + else + _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + _LT_AC_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' + fi + ;; + + solaris* | sysv5*) + if $LD -v 2>&1 | grep 'BFD 2\.8' > /dev/null; then + _LT_AC_TAGVAR(ld_shlibs, $1)=no + cat <&2 + +*** Warning: The releases 2.8.* of the GNU linker cannot reliably +*** create shared libraries on Solaris systems. Therefore, libtool +*** is disabling shared libraries support. We urge you to upgrade GNU +*** binutils to release 2.9.1 or newer. Another option is to modify +*** your PATH or compiler configuration so that the native linker is +*** used, and then restart. + +EOF + elif $LD --help 2>&1 | grep ': supported targets:.* elf' > /dev/null; then + _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + _LT_AC_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' + else + _LT_AC_TAGVAR(ld_shlibs, $1)=no + fi + ;; + + sunos4*) + _LT_AC_TAGVAR(archive_cmds, $1)='$LD -assert pure-text -Bshareable -o $lib $libobjs $deplibs $linker_flags' + wlarc= + _LT_AC_TAGVAR(hardcode_direct, $1)=yes + _LT_AC_TAGVAR(hardcode_shlibpath_var, $1)=no + ;; + + *) + if $LD --help 2>&1 | grep ': supported targets:.* elf' > /dev/null; then + _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + _LT_AC_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' + else + _LT_AC_TAGVAR(ld_shlibs, $1)=no + fi + ;; + esac + + if test "$_LT_AC_TAGVAR(ld_shlibs, $1)" = no; then + runpath_var= + _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)= + _LT_AC_TAGVAR(export_dynamic_flag_spec, $1)= + _LT_AC_TAGVAR(whole_archive_flag_spec, $1)= + fi + else + # PORTME fill in a description of your system's linker (not GNU ld) + case $host_os in + aix3*) + _LT_AC_TAGVAR(allow_undefined_flag, $1)=unsupported + _LT_AC_TAGVAR(always_export_symbols, $1)=yes + _LT_AC_TAGVAR(archive_expsym_cmds, $1)='$LD -o $output_objdir/$soname $libobjs $deplibs $linker_flags -bE:$export_symbols -T512 -H512 -bM:SRE~$AR $AR_FLAGS $lib $output_objdir/$soname' + # Note: this linker hardcodes the directories in LIBPATH if there + # are no directories specified by -L. + _LT_AC_TAGVAR(hardcode_minus_L, $1)=yes + if test "$GCC" = yes && test -z "$link_static_flag"; then + # Neither direct hardcoding nor static linking is supported with a + # broken collect2. + _LT_AC_TAGVAR(hardcode_direct, $1)=unsupported + fi + ;; + + aix4* | aix5*) + if test "$host_cpu" = ia64; then + # On IA64, the linker does run time linking by default, so we don't + # have to do anything special. + aix_use_runtimelinking=no + exp_sym_flag='-Bexport' + no_entry_flag="" + else + # If we're using GNU nm, then we don't want the "-C" option. + # -C means demangle to AIX nm, but means don't demangle with GNU nm + if $NM -V 2>&1 | grep 'GNU' > /dev/null; then + _LT_AC_TAGVAR(export_symbols_cmds, $1)='$NM -Bpg $libobjs $convenience | awk '\''{ if (((\[$]2 == "T") || (\[$]2 == "D") || (\[$]2 == "B")) && ([substr](\[$]3,1,1) != ".")) { print \[$]3 } }'\'' | sort -u > $export_symbols' + else + _LT_AC_TAGVAR(export_symbols_cmds, $1)='$NM -BCpg $libobjs $convenience | awk '\''{ if (((\[$]2 == "T") || (\[$]2 == "D") || (\[$]2 == "B")) && ([substr](\[$]3,1,1) != ".")) { print \[$]3 } }'\'' | sort -u > $export_symbols' + fi + aix_use_runtimelinking=no + + # Test if we are trying to use run time linking or normal + # AIX style linking. If -brtl is somewhere in LDFLAGS, we + # need to do runtime linking. + case $host_os in aix4.[[23]]|aix4.[[23]].*|aix5*) + for ld_flag in $LDFLAGS; do + if (test $ld_flag = "-brtl" || test $ld_flag = "-Wl,-brtl"); then + aix_use_runtimelinking=yes + break + fi + done + esac + + exp_sym_flag='-bexport' + no_entry_flag='-bnoentry' + fi + + # When large executables or shared objects are built, AIX ld can + # have problems creating the table of contents. If linking a library + # or program results in "error TOC overflow" add -mminimal-toc to + # CXXFLAGS/CFLAGS for g++/gcc. In the cases where that is not + # enough to fix the problem, add -Wl,-bbigtoc to LDFLAGS. + + _LT_AC_TAGVAR(archive_cmds, $1)='' + _LT_AC_TAGVAR(hardcode_direct, $1)=yes + _LT_AC_TAGVAR(hardcode_libdir_separator, $1)=':' + _LT_AC_TAGVAR(link_all_deplibs, $1)=yes + + if test "$GCC" = yes; then + case $host_os in aix4.[[012]]|aix4.[[012]].*) + # We only want to do this on AIX 4.2 and lower, the check + # below for broken collect2 doesn't work under 4.3+ + collect2name=`${CC} -print-prog-name=collect2` + if test -f "$collect2name" && \ + strings "$collect2name" | grep resolve_lib_name >/dev/null + then + # We have reworked collect2 + _LT_AC_TAGVAR(hardcode_direct, $1)=yes + else + # We have old collect2 + _LT_AC_TAGVAR(hardcode_direct, $1)=unsupported + # It fails to find uninstalled libraries when the uninstalled + # path is not listed in the libpath. Setting hardcode_minus_L + # to unsupported forces relinking + _LT_AC_TAGVAR(hardcode_minus_L, $1)=yes + _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' + _LT_AC_TAGVAR(hardcode_libdir_separator, $1)= + fi + esac + shared_flag='-shared' + if test "$aix_use_runtimelinking" = yes; then + shared_flag="$shared_flag "'${wl}-G' + fi + else + # not using gcc + if test "$host_cpu" = ia64; then + # VisualAge C++, Version 5.5 for AIX 5L for IA-64, Beta 3 Release + # chokes on -Wl,-G. The following line is correct: + shared_flag='-G' + else + if test "$aix_use_runtimelinking" = yes; then + shared_flag='${wl}-G' + else + shared_flag='${wl}-bM:SRE' + fi + fi + fi + + # It seems that -bexpall does not export symbols beginning with + # underscore (_), so it is better to generate a list of symbols to export. + _LT_AC_TAGVAR(always_export_symbols, $1)=yes + if test "$aix_use_runtimelinking" = yes; then + # Warning - without using the other runtime loading flags (-brtl), + # -berok will link without error, but may produce a broken library. + _LT_AC_TAGVAR(allow_undefined_flag, $1)='-berok' + # Determine the default libpath from the value encoded in an empty executable. + _LT_AC_SYS_LIBPATH_AIX + _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-blibpath:$libdir:'"$aix_libpath" + _LT_AC_TAGVAR(archive_expsym_cmds, $1)="\$CC"' -o $output_objdir/$soname $libobjs $deplibs $compiler_flags `if test "x${allow_undefined_flag}" != "x"; then echo "${wl}${allow_undefined_flag}"; else :; fi` '"\${wl}$no_entry_flag \${wl}$exp_sym_flag:\$export_symbols $shared_flag" + else + if test "$host_cpu" = ia64; then + _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-R $libdir:/usr/lib:/lib' + _LT_AC_TAGVAR(allow_undefined_flag, $1)="-z nodefs" + _LT_AC_TAGVAR(archive_expsym_cmds, $1)="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs $compiler_flags ${wl}${allow_undefined_flag} '"\${wl}$no_entry_flag \${wl}$exp_sym_flag:\$export_symbols" + else + # Determine the default libpath from the value encoded in an empty executable. + _LT_AC_SYS_LIBPATH_AIX + _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-blibpath:$libdir:'"$aix_libpath" + # Warning - without using the other run time loading flags, + # -berok will link without error, but may produce a broken library. + _LT_AC_TAGVAR(no_undefined_flag, $1)=' ${wl}-bernotok' + _LT_AC_TAGVAR(allow_undefined_flag, $1)=' ${wl}-berok' + # -bexpall does not export symbols beginning with underscore (_) + _LT_AC_TAGVAR(always_export_symbols, $1)=yes + # Exported symbols can be pulled into shared objects from archives + _LT_AC_TAGVAR(whole_archive_flag_spec, $1)=' ' + _LT_AC_TAGVAR(archive_cmds_need_lc, $1)=yes + # This is similar to how AIX traditionally builds it's shared libraries. + _LT_AC_TAGVAR(archive_expsym_cmds, $1)="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs $compiler_flags ${wl}-bE:$export_symbols ${wl}-bnoentry${allow_undefined_flag}~$AR $AR_FLAGS $output_objdir/$libname$release.a $output_objdir/$soname' + fi + fi + ;; + + amigaos*) + _LT_AC_TAGVAR(archive_cmds, $1)='$rm $output_objdir/a2ixlibrary.data~$echo "#define NAME $libname" > $output_objdir/a2ixlibrary.data~$echo "#define LIBRARY_ID 1" >> $output_objdir/a2ixlibrary.data~$echo "#define VERSION $major" >> $output_objdir/a2ixlibrary.data~$echo "#define REVISION $revision" >> $output_objdir/a2ixlibrary.data~$AR $AR_FLAGS $lib $libobjs~$RANLIB $lib~(cd $output_objdir && a2ixlibrary -32)' + _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' + _LT_AC_TAGVAR(hardcode_minus_L, $1)=yes + # see comment about different semantics on the GNU ld section + _LT_AC_TAGVAR(ld_shlibs, $1)=no + ;; + + bsdi[[45]]*) + _LT_AC_TAGVAR(export_dynamic_flag_spec, $1)=-rdynamic + ;; + + cygwin* | mingw* | pw32*) + # When not using gcc, we currently assume that we are using + # Microsoft Visual C++. + # hardcode_libdir_flag_spec is actually meaningless, as there is + # no search path for DLLs. + _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)=' ' + _LT_AC_TAGVAR(allow_undefined_flag, $1)=unsupported + # Tell ltmain to make .lib files, not .a files. + libext=lib + # Tell ltmain to make .dll files, not .so files. + shrext_cmds=".dll" + # FIXME: Setting linknames here is a bad hack. + _LT_AC_TAGVAR(archive_cmds, $1)='$CC -o $lib $libobjs $compiler_flags `echo "$deplibs" | $SED -e '\''s/ -lc$//'\''` -link -dll~linknames=' + # The linker will automatically build a .lib file if we build a DLL. + _LT_AC_TAGVAR(old_archive_From_new_cmds, $1)='true' + # FIXME: Should let the user specify the lib program. + _LT_AC_TAGVAR(old_archive_cmds, $1)='lib /OUT:$oldlib$oldobjs$old_deplibs' + _LT_AC_TAGVAR(fix_srcfile_path, $1)='`cygpath -w "$srcfile"`' + _LT_AC_TAGVAR(enable_shared_with_static_runtimes, $1)=yes + ;; + + darwin* | rhapsody*) + case "$host_os" in + rhapsody* | darwin1.[[012]]) + _LT_AC_TAGVAR(allow_undefined_flag, $1)='${wl}-undefined ${wl}suppress' + ;; + *) # Darwin 1.3 on + if test -z ${MACOSX_DEPLOYMENT_TARGET} ; then + _LT_AC_TAGVAR(allow_undefined_flag, $1)='${wl}-flat_namespace ${wl}-undefined ${wl}suppress' + else + case ${MACOSX_DEPLOYMENT_TARGET} in + 10.[[012]]) + _LT_AC_TAGVAR(allow_undefined_flag, $1)='${wl}-flat_namespace ${wl}-undefined ${wl}suppress' + ;; + 10.*) + _LT_AC_TAGVAR(allow_undefined_flag, $1)='${wl}-undefined ${wl}dynamic_lookup' + ;; + esac + fi + ;; + esac + _LT_AC_TAGVAR(archive_cmds_need_lc, $1)=no + _LT_AC_TAGVAR(hardcode_direct, $1)=no + _LT_AC_TAGVAR(hardcode_automatic, $1)=yes + _LT_AC_TAGVAR(hardcode_shlibpath_var, $1)=unsupported + _LT_AC_TAGVAR(whole_archive_flag_spec, $1)='' + _LT_AC_TAGVAR(link_all_deplibs, $1)=yes + if test "$GCC" = yes ; then + output_verbose_link_cmd='echo' + _LT_AC_TAGVAR(archive_cmds, $1)='$CC -dynamiclib $allow_undefined_flag -o $lib $libobjs $deplibs $compiler_flags -install_name $rpath/$soname $verstring' + _LT_AC_TAGVAR(module_cmds, $1)='$CC $allow_undefined_flag -o $lib -bundle $libobjs $deplibs$compiler_flags' + # Don't fix this by using the ld -exported_symbols_list flag, it doesn't exist in older darwin ld's + _LT_AC_TAGVAR(archive_expsym_cmds, $1)='sed -e "s,#.*,," -e "s,^[ ]*,," -e "s,^\(..*\),_&," < $export_symbols > $output_objdir/${libname}-symbols.expsym~$CC -dynamiclib $allow_undefined_flag -o $lib $libobjs $deplibs $compiler_flags -install_name $rpath/$soname $verstring~nmedit -s $output_objdir/${libname}-symbols.expsym ${lib}' + _LT_AC_TAGVAR(module_expsym_cmds, $1)='sed -e "s,#.*,," -e "s,^[ ]*,," -e "s,^\(..*\),_&," < $export_symbols > $output_objdir/${libname}-symbols.expsym~$CC $allow_undefined_flag -o $lib -bundle $libobjs $deplibs$compiler_flags~nmedit -s $output_objdir/${libname}-symbols.expsym ${lib}' + else + case $cc_basename in + xlc*) + output_verbose_link_cmd='echo' + _LT_AC_TAGVAR(archive_cmds, $1)='$CC -qmkshrobj $allow_undefined_flag -o $lib $libobjs $deplibs $compiler_flags ${wl}-install_name ${wl}`echo $rpath/$soname` $verstring' + _LT_AC_TAGVAR(module_cmds, $1)='$CC $allow_undefined_flag -o $lib -bundle $libobjs $deplibs$compiler_flags' + # Don't fix this by using the ld -exported_symbols_list flag, it doesn't exist in older darwin ld's + _LT_AC_TAGVAR(archive_expsym_cmds, $1)='sed -e "s,#.*,," -e "s,^[ ]*,," -e "s,^\(..*\),_&," < $export_symbols > $output_objdir/${libname}-symbols.expsym~$CC -qmkshrobj $allow_undefined_flag -o $lib $libobjs $deplibs $compiler_flags ${wl}-install_name ${wl}$rpath/$soname $verstring~nmedit -s $output_objdir/${libname}-symbols.expsym ${lib}' + _LT_AC_TAGVAR(module_expsym_cmds, $1)='sed -e "s,#.*,," -e "s,^[ ]*,," -e "s,^\(..*\),_&," < $export_symbols > $output_objdir/${libname}-symbols.expsym~$CC $allow_undefined_flag -o $lib -bundle $libobjs $deplibs$compiler_flags~nmedit -s $output_objdir/${libname}-symbols.expsym ${lib}' + ;; + *) + _LT_AC_TAGVAR(ld_shlibs, $1)=no + ;; + esac + fi + ;; + + dgux*) + _LT_AC_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' + _LT_AC_TAGVAR(hardcode_shlibpath_var, $1)=no + ;; + + freebsd1*) + _LT_AC_TAGVAR(ld_shlibs, $1)=no + ;; + + # FreeBSD 2.2.[012] allows us to include c++rt0.o to get C++ constructor + # support. Future versions do this automatically, but an explicit c++rt0.o + # does not break anything, and helps significantly (at the cost of a little + # extra space). + freebsd2.2*) + _LT_AC_TAGVAR(archive_cmds, $1)='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags /usr/lib/c++rt0.o' + _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir' + _LT_AC_TAGVAR(hardcode_direct, $1)=yes + _LT_AC_TAGVAR(hardcode_shlibpath_var, $1)=no + ;; + + # Unfortunately, older versions of FreeBSD 2 do not have this feature. + freebsd2*) + _LT_AC_TAGVAR(archive_cmds, $1)='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags' + _LT_AC_TAGVAR(hardcode_direct, $1)=yes + _LT_AC_TAGVAR(hardcode_minus_L, $1)=yes + _LT_AC_TAGVAR(hardcode_shlibpath_var, $1)=no + ;; + + # FreeBSD 3 and greater uses gcc -shared to do shared libraries. + freebsd* | kfreebsd*-gnu | dragonfly*) + _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared -o $lib $libobjs $deplibs $compiler_flags' + _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir' + _LT_AC_TAGVAR(hardcode_direct, $1)=yes + _LT_AC_TAGVAR(hardcode_shlibpath_var, $1)=no + ;; + + hpux9*) + if test "$GCC" = yes; then + _LT_AC_TAGVAR(archive_cmds, $1)='$rm $output_objdir/$soname~$CC -shared -fPIC ${wl}+b ${wl}$install_libdir -o $output_objdir/$soname $libobjs $deplibs $compiler_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' + else + _LT_AC_TAGVAR(archive_cmds, $1)='$rm $output_objdir/$soname~$LD -b +b $install_libdir -o $output_objdir/$soname $libobjs $deplibs $linker_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' + fi + _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}+b ${wl}$libdir' + _LT_AC_TAGVAR(hardcode_libdir_separator, $1)=: + _LT_AC_TAGVAR(hardcode_direct, $1)=yes + + # hardcode_minus_L: Not really in the search PATH, + # but as the default location of the library. + _LT_AC_TAGVAR(hardcode_minus_L, $1)=yes + _LT_AC_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-E' + ;; + + hpux10* | hpux11*) + if test "$GCC" = yes -a "$with_gnu_ld" = no; then + case "$host_cpu" in + hppa*64*|ia64*) + _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared ${wl}+h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags' + ;; + *) + _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared -fPIC ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags' + ;; + esac + else + case "$host_cpu" in + hppa*64*|ia64*) + _LT_AC_TAGVAR(archive_cmds, $1)='$LD -b +h $soname -o $lib $libobjs $deplibs $linker_flags' + ;; + *) + _LT_AC_TAGVAR(archive_cmds, $1)='$LD -b +h $soname +b $install_libdir -o $lib $libobjs $deplibs $linker_flags' + ;; + esac + fi + if test "$with_gnu_ld" = no; then + case "$host_cpu" in + hppa*64*) + _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}+b ${wl}$libdir' + _LT_AC_TAGVAR(hardcode_libdir_flag_spec_ld, $1)='+b $libdir' + _LT_AC_TAGVAR(hardcode_libdir_separator, $1)=: + _LT_AC_TAGVAR(hardcode_direct, $1)=no + _LT_AC_TAGVAR(hardcode_shlibpath_var, $1)=no + ;; + ia64*) + _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' + _LT_AC_TAGVAR(hardcode_direct, $1)=no + _LT_AC_TAGVAR(hardcode_shlibpath_var, $1)=no + + # hardcode_minus_L: Not really in the search PATH, + # but as the default location of the library. + _LT_AC_TAGVAR(hardcode_minus_L, $1)=yes + ;; + *) + _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}+b ${wl}$libdir' + _LT_AC_TAGVAR(hardcode_libdir_separator, $1)=: + _LT_AC_TAGVAR(hardcode_direct, $1)=yes + _LT_AC_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-E' + + # hardcode_minus_L: Not really in the search PATH, + # but as the default location of the library. + _LT_AC_TAGVAR(hardcode_minus_L, $1)=yes + ;; + esac + fi + ;; + + irix5* | irix6* | nonstopux*) + if test "$GCC" = yes; then + _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && echo ${wl}-set_version ${wl}$verstring` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' + else + _LT_AC_TAGVAR(archive_cmds, $1)='$LD -shared $libobjs $deplibs $linker_flags -soname $soname `test -n "$verstring" && echo -set_version $verstring` -update_registry ${output_objdir}/so_locations -o $lib' + _LT_AC_TAGVAR(hardcode_libdir_flag_spec_ld, $1)='-rpath $libdir' + fi + _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir' + _LT_AC_TAGVAR(hardcode_libdir_separator, $1)=: + _LT_AC_TAGVAR(link_all_deplibs, $1)=yes + ;; + + netbsd*) + if echo __ELF__ | $CC -E - | grep __ELF__ >/dev/null; then + _LT_AC_TAGVAR(archive_cmds, $1)='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags' # a.out + else + _LT_AC_TAGVAR(archive_cmds, $1)='$LD -shared -o $lib $libobjs $deplibs $linker_flags' # ELF + fi + _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir' + _LT_AC_TAGVAR(hardcode_direct, $1)=yes + _LT_AC_TAGVAR(hardcode_shlibpath_var, $1)=no + ;; + + newsos6) + _LT_AC_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + _LT_AC_TAGVAR(hardcode_direct, $1)=yes + _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir' + _LT_AC_TAGVAR(hardcode_libdir_separator, $1)=: + _LT_AC_TAGVAR(hardcode_shlibpath_var, $1)=no + ;; + + openbsd*) + _LT_AC_TAGVAR(hardcode_direct, $1)=yes + _LT_AC_TAGVAR(hardcode_shlibpath_var, $1)=no + if test -z "`echo __ELF__ | $CC -E - | grep __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then + _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags' + _LT_AC_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags ${wl}-retain-symbols-file,$export_symbols' + _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath,$libdir' + _LT_AC_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-E' + else + case $host_os in + openbsd[[01]].* | openbsd2.[[0-7]] | openbsd2.[[0-7]].*) + _LT_AC_TAGVAR(archive_cmds, $1)='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags' + _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir' + ;; + *) + _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags' + _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath,$libdir' + ;; + esac + fi + ;; + + os2*) + _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' + _LT_AC_TAGVAR(hardcode_minus_L, $1)=yes + _LT_AC_TAGVAR(allow_undefined_flag, $1)=unsupported + _LT_AC_TAGVAR(archive_cmds, $1)='$echo "LIBRARY $libname INITINSTANCE" > $output_objdir/$libname.def~$echo "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~$echo DATA >> $output_objdir/$libname.def~$echo " SINGLE NONSHARED" >> $output_objdir/$libname.def~$echo EXPORTS >> $output_objdir/$libname.def~emxexp $libobjs >> $output_objdir/$libname.def~$CC -Zdll -Zcrtdll -o $lib $libobjs $deplibs $compiler_flags $output_objdir/$libname.def' + _LT_AC_TAGVAR(old_archive_From_new_cmds, $1)='emximp -o $output_objdir/$libname.a $output_objdir/$libname.def' + ;; + + osf3*) + if test "$GCC" = yes; then + _LT_AC_TAGVAR(allow_undefined_flag, $1)=' ${wl}-expect_unresolved ${wl}\*' + _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared${allow_undefined_flag} $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && echo ${wl}-set_version ${wl}$verstring` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' + else + _LT_AC_TAGVAR(allow_undefined_flag, $1)=' -expect_unresolved \*' + _LT_AC_TAGVAR(archive_cmds, $1)='$LD -shared${allow_undefined_flag} $libobjs $deplibs $linker_flags -soname $soname `test -n "$verstring" && echo -set_version $verstring` -update_registry ${output_objdir}/so_locations -o $lib' + fi + _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir' + _LT_AC_TAGVAR(hardcode_libdir_separator, $1)=: + ;; + + osf4* | osf5*) # as osf3* with the addition of -msym flag + if test "$GCC" = yes; then + _LT_AC_TAGVAR(allow_undefined_flag, $1)=' ${wl}-expect_unresolved ${wl}\*' + _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared${allow_undefined_flag} $libobjs $deplibs $compiler_flags ${wl}-msym ${wl}-soname ${wl}$soname `test -n "$verstring" && echo ${wl}-set_version ${wl}$verstring` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' + _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir' + else + _LT_AC_TAGVAR(allow_undefined_flag, $1)=' -expect_unresolved \*' + _LT_AC_TAGVAR(archive_cmds, $1)='$LD -shared${allow_undefined_flag} $libobjs $deplibs $linker_flags -msym -soname $soname `test -n "$verstring" && echo -set_version $verstring` -update_registry ${output_objdir}/so_locations -o $lib' + _LT_AC_TAGVAR(archive_expsym_cmds, $1)='for i in `cat $export_symbols`; do printf "%s %s\\n" -exported_symbol "\$i" >> $lib.exp; done; echo "-hidden">> $lib.exp~ + $LD -shared${allow_undefined_flag} -input $lib.exp $linker_flags $libobjs $deplibs -soname $soname `test -n "$verstring" && echo -set_version $verstring` -update_registry ${output_objdir}/so_locations -o $lib~$rm $lib.exp' + + # Both c and cxx compiler support -rpath directly + _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='-rpath $libdir' + fi + _LT_AC_TAGVAR(hardcode_libdir_separator, $1)=: + ;; + + sco3.2v5*) + _LT_AC_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + _LT_AC_TAGVAR(hardcode_shlibpath_var, $1)=no + _LT_AC_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-Bexport' + runpath_var=LD_RUN_PATH + hardcode_runpath_var=yes + ;; + + solaris*) + _LT_AC_TAGVAR(no_undefined_flag, $1)=' -z text' + if test "$GCC" = yes; then + wlarc='${wl}' + _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags' + _LT_AC_TAGVAR(archive_expsym_cmds, $1)='$echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~$echo "local: *; };" >> $lib.exp~ + $CC -shared ${wl}-M ${wl}$lib.exp ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags~$rm $lib.exp' + else + wlarc='' + _LT_AC_TAGVAR(archive_cmds, $1)='$LD -G${allow_undefined_flag} -h $soname -o $lib $libobjs $deplibs $linker_flags' + _LT_AC_TAGVAR(archive_expsym_cmds, $1)='$echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~$echo "local: *; };" >> $lib.exp~ + $LD -G${allow_undefined_flag} -M $lib.exp -h $soname -o $lib $libobjs $deplibs $linker_flags~$rm $lib.exp' + fi + _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir' + _LT_AC_TAGVAR(hardcode_shlibpath_var, $1)=no + case $host_os in + solaris2.[[0-5]] | solaris2.[[0-5]].*) ;; + *) + # The compiler driver will combine linker options so we + # cannot just pass the convience library names through + # without $wl, iff we do not link with $LD. + # Luckily, gcc supports the same syntax we need for Sun Studio. + # Supported since Solaris 2.6 (maybe 2.5.1?) + case $wlarc in + '') + _LT_AC_TAGVAR(whole_archive_flag_spec, $1)='-z allextract$convenience -z defaultextract' ;; + *) + _LT_AC_TAGVAR(whole_archive_flag_spec, $1)='${wl}-z ${wl}allextract`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; $echo \"$new_convenience\"` ${wl}-z ${wl}defaultextract' ;; + esac ;; + esac + _LT_AC_TAGVAR(link_all_deplibs, $1)=yes + ;; + + sunos4*) + if test "x$host_vendor" = xsequent; then + # Use $CC to link under sequent, because it throws in some extra .o + # files that make .init and .fini sections work. + _LT_AC_TAGVAR(archive_cmds, $1)='$CC -G ${wl}-h $soname -o $lib $libobjs $deplibs $compiler_flags' + else + _LT_AC_TAGVAR(archive_cmds, $1)='$LD -assert pure-text -Bstatic -o $lib $libobjs $deplibs $linker_flags' + fi + _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' + _LT_AC_TAGVAR(hardcode_direct, $1)=yes + _LT_AC_TAGVAR(hardcode_minus_L, $1)=yes + _LT_AC_TAGVAR(hardcode_shlibpath_var, $1)=no + ;; + + sysv4) + case $host_vendor in + sni) + _LT_AC_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + _LT_AC_TAGVAR(hardcode_direct, $1)=yes # is this really true??? + ;; + siemens) + ## LD is ld it makes a PLAMLIB + ## CC just makes a GrossModule. + _LT_AC_TAGVAR(archive_cmds, $1)='$LD -G -o $lib $libobjs $deplibs $linker_flags' + _LT_AC_TAGVAR(reload_cmds, $1)='$CC -r -o $output$reload_objs' + _LT_AC_TAGVAR(hardcode_direct, $1)=no + ;; + motorola) + _LT_AC_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + _LT_AC_TAGVAR(hardcode_direct, $1)=no #Motorola manual says yes, but my tests say they lie + ;; + esac + runpath_var='LD_RUN_PATH' + _LT_AC_TAGVAR(hardcode_shlibpath_var, $1)=no + ;; + + sysv4.3*) + _LT_AC_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + _LT_AC_TAGVAR(hardcode_shlibpath_var, $1)=no + _LT_AC_TAGVAR(export_dynamic_flag_spec, $1)='-Bexport' + ;; + + sysv4*MP*) + if test -d /usr/nec; then + _LT_AC_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + _LT_AC_TAGVAR(hardcode_shlibpath_var, $1)=no + runpath_var=LD_RUN_PATH + hardcode_runpath_var=yes + _LT_AC_TAGVAR(ld_shlibs, $1)=yes + fi + ;; + + sysv4.2uw2*) + _LT_AC_TAGVAR(archive_cmds, $1)='$LD -G -o $lib $libobjs $deplibs $linker_flags' + _LT_AC_TAGVAR(hardcode_direct, $1)=yes + _LT_AC_TAGVAR(hardcode_minus_L, $1)=no + _LT_AC_TAGVAR(hardcode_shlibpath_var, $1)=no + hardcode_runpath_var=yes + runpath_var=LD_RUN_PATH + ;; + + sysv5OpenUNIX8* | sysv5UnixWare7* | sysv5uw[[78]]* | unixware7*) + _LT_AC_TAGVAR(no_undefined_flag, $1)='${wl}-z ${wl}text' + if test "$GCC" = yes; then + _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags' + else + _LT_AC_TAGVAR(archive_cmds, $1)='$CC -G ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags' + fi + runpath_var='LD_RUN_PATH' + _LT_AC_TAGVAR(hardcode_shlibpath_var, $1)=no + ;; + + sysv5*) + _LT_AC_TAGVAR(no_undefined_flag, $1)=' -z text' + # $CC -shared without GNU ld will not create a library from C++ + # object files and a static libstdc++, better avoid it by now + _LT_AC_TAGVAR(archive_cmds, $1)='$LD -G${allow_undefined_flag} -h $soname -o $lib $libobjs $deplibs $linker_flags' + _LT_AC_TAGVAR(archive_expsym_cmds, $1)='$echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~$echo "local: *; };" >> $lib.exp~ + $LD -G${allow_undefined_flag} -M $lib.exp -h $soname -o $lib $libobjs $deplibs $linker_flags~$rm $lib.exp' + _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)= + _LT_AC_TAGVAR(hardcode_shlibpath_var, $1)=no + runpath_var='LD_RUN_PATH' + ;; + + uts4*) + _LT_AC_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' + _LT_AC_TAGVAR(hardcode_shlibpath_var, $1)=no + ;; + + *) + _LT_AC_TAGVAR(ld_shlibs, $1)=no + ;; + esac + fi +]) +AC_MSG_RESULT([$_LT_AC_TAGVAR(ld_shlibs, $1)]) +test "$_LT_AC_TAGVAR(ld_shlibs, $1)" = no && can_build_shared=no + +variables_saved_for_relink="PATH $shlibpath_var $runpath_var" +if test "$GCC" = yes; then + variables_saved_for_relink="$variables_saved_for_relink GCC_EXEC_PREFIX COMPILER_PATH LIBRARY_PATH" +fi + +# +# Do we need to explicitly link libc? +# +case "x$_LT_AC_TAGVAR(archive_cmds_need_lc, $1)" in +x|xyes) + # Assume -lc should be added + _LT_AC_TAGVAR(archive_cmds_need_lc, $1)=yes + + if test "$enable_shared" = yes && test "$GCC" = yes; then + case $_LT_AC_TAGVAR(archive_cmds, $1) in + *'~'*) + # FIXME: we may have to deal with multi-command sequences. + ;; + '$CC '*) + # Test whether the compiler implicitly links with -lc since on some + # systems, -lgcc has to come before -lc. If gcc already passes -lc + # to ld, don't add -lc before -lgcc. + AC_MSG_CHECKING([whether -lc should be explicitly linked in]) + $rm conftest* + printf "$lt_simple_compile_test_code" > conftest.$ac_ext + + if AC_TRY_EVAL(ac_compile) 2>conftest.err; then + soname=conftest + lib=conftest + libobjs=conftest.$ac_objext + deplibs= + wl=$_LT_AC_TAGVAR(lt_prog_compiler_wl, $1) + compiler_flags=-v + linker_flags=-v + verstring= + output_objdir=. + libname=conftest + lt_save_allow_undefined_flag=$_LT_AC_TAGVAR(allow_undefined_flag, $1) + _LT_AC_TAGVAR(allow_undefined_flag, $1)= + if AC_TRY_EVAL(_LT_AC_TAGVAR(archive_cmds, $1) 2\>\&1 \| grep \" -lc \" \>/dev/null 2\>\&1) + then + _LT_AC_TAGVAR(archive_cmds_need_lc, $1)=no + else + _LT_AC_TAGVAR(archive_cmds_need_lc, $1)=yes + fi + _LT_AC_TAGVAR(allow_undefined_flag, $1)=$lt_save_allow_undefined_flag + else + cat conftest.err 1>&5 + fi + $rm conftest* + AC_MSG_RESULT([$_LT_AC_TAGVAR(archive_cmds_need_lc, $1)]) + ;; + esac + fi + ;; +esac +])# AC_LIBTOOL_PROG_LD_SHLIBS + + +# _LT_AC_FILE_LTDLL_C +# ------------------- +# Be careful that the start marker always follows a newline. +AC_DEFUN([_LT_AC_FILE_LTDLL_C], [ +# /* ltdll.c starts here */ +# #define WIN32_LEAN_AND_MEAN +# #include +# #undef WIN32_LEAN_AND_MEAN +# #include +# +# #ifndef __CYGWIN__ +# # ifdef __CYGWIN32__ +# # define __CYGWIN__ __CYGWIN32__ +# # endif +# #endif +# +# #ifdef __cplusplus +# extern "C" { +# #endif +# BOOL APIENTRY DllMain (HINSTANCE hInst, DWORD reason, LPVOID reserved); +# #ifdef __cplusplus +# } +# #endif +# +# #ifdef __CYGWIN__ +# #include +# DECLARE_CYGWIN_DLL( DllMain ); +# #endif +# HINSTANCE __hDllInstance_base; +# +# BOOL APIENTRY +# DllMain (HINSTANCE hInst, DWORD reason, LPVOID reserved) +# { +# __hDllInstance_base = hInst; +# return TRUE; +# } +# /* ltdll.c ends here */ +])# _LT_AC_FILE_LTDLL_C + + +# _LT_AC_TAGVAR(VARNAME, [TAGNAME]) +# --------------------------------- +AC_DEFUN([_LT_AC_TAGVAR], [ifelse([$2], [], [$1], [$1_$2])]) + + +# old names +AC_DEFUN([AM_PROG_LIBTOOL], [AC_PROG_LIBTOOL]) +AC_DEFUN([AM_ENABLE_SHARED], [AC_ENABLE_SHARED($@)]) +AC_DEFUN([AM_ENABLE_STATIC], [AC_ENABLE_STATIC($@)]) +AC_DEFUN([AM_DISABLE_SHARED], [AC_DISABLE_SHARED($@)]) +AC_DEFUN([AM_DISABLE_STATIC], [AC_DISABLE_STATIC($@)]) +AC_DEFUN([AM_PROG_LD], [AC_PROG_LD]) +AC_DEFUN([AM_PROG_NM], [AC_PROG_NM]) + +# This is just to silence aclocal about the macro not being used +ifelse([AC_DISABLE_FAST_INSTALL]) + +AC_DEFUN([LT_AC_PROG_GCJ], +[AC_CHECK_TOOL(GCJ, gcj, no) + test "x${GCJFLAGS+set}" = xset || GCJFLAGS="-g -O2" + AC_SUBST(GCJFLAGS) +]) + +AC_DEFUN([LT_AC_PROG_RC], +[AC_CHECK_TOOL(RC, windres, no) +]) + +# NOTE: This macro has been submitted for inclusion into # +# GNU Autoconf as AC_PROG_SED. When it is available in # +# a released version of Autoconf we should remove this # +# macro and use it instead. # +# LT_AC_PROG_SED +# -------------- +# Check for a fully-functional sed program, that truncates +# as few characters as possible. Prefer GNU sed if found. +AC_DEFUN([LT_AC_PROG_SED], +[AC_MSG_CHECKING([for a sed that does not truncate output]) +AC_CACHE_VAL(lt_cv_path_SED, +[# Loop through the user's path and test for sed and gsed. +# Then use that list of sed's as ones to test for truncation. +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for lt_ac_prog in sed gsed; do + for ac_exec_ext in '' $ac_executable_extensions; do + if $as_executable_p "$as_dir/$lt_ac_prog$ac_exec_ext"; then + lt_ac_sed_list="$lt_ac_sed_list $as_dir/$lt_ac_prog$ac_exec_ext" + fi + done + done +done +lt_ac_max=0 +lt_ac_count=0 +# Add /usr/xpg4/bin/sed as it is typically found on Solaris +# along with /bin/sed that truncates output. +for lt_ac_sed in $lt_ac_sed_list /usr/xpg4/bin/sed; do + test ! -f $lt_ac_sed && continue + cat /dev/null > conftest.in + lt_ac_count=0 + echo $ECHO_N "0123456789$ECHO_C" >conftest.in + # Check for GNU sed and select it if it is found. + if "$lt_ac_sed" --version 2>&1 < /dev/null | grep 'GNU' > /dev/null; then + lt_cv_path_SED=$lt_ac_sed + break + fi + while true; do + cat conftest.in conftest.in >conftest.tmp + mv conftest.tmp conftest.in + cp conftest.in conftest.nl + echo >>conftest.nl + $lt_ac_sed -e 's/a$//' < conftest.nl >conftest.out || break + cmp -s conftest.out conftest.nl || break + # 10000 chars as input seems more than enough + test $lt_ac_count -gt 10 && break + lt_ac_count=`expr $lt_ac_count + 1` + if test $lt_ac_count -gt $lt_ac_max; then + lt_ac_max=$lt_ac_count + lt_cv_path_SED=$lt_ac_sed + fi + done +done +]) +SED=$lt_cv_path_SED +AC_MSG_RESULT([$SED]) +]) + +# Copyright (C) 2002, 2003, 2005 Free Software Foundation, Inc. +# +# This file is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# AM_AUTOMAKE_VERSION(VERSION) +# ---------------------------- +# Automake X.Y traces this macro to ensure aclocal.m4 has been +# generated from the m4 files accompanying Automake X.Y. +AC_DEFUN([AM_AUTOMAKE_VERSION], [am__api_version="1.9"]) + +# AM_SET_CURRENT_AUTOMAKE_VERSION +# ------------------------------- +# Call AM_AUTOMAKE_VERSION so it can be traced. +# This function is AC_REQUIREd by AC_INIT_AUTOMAKE. +AC_DEFUN([AM_SET_CURRENT_AUTOMAKE_VERSION], + [AM_AUTOMAKE_VERSION([1.9.5])]) + +# AM_AUX_DIR_EXPAND -*- Autoconf -*- + +# Copyright (C) 2001, 2003, 2005 Free Software Foundation, Inc. +# +# This file is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# For projects using AC_CONFIG_AUX_DIR([foo]), Autoconf sets +# $ac_aux_dir to `$srcdir/foo'. In other projects, it is set to +# `$srcdir', `$srcdir/..', or `$srcdir/../..'. +# +# Of course, Automake must honor this variable whenever it calls a +# tool from the auxiliary directory. The problem is that $srcdir (and +# therefore $ac_aux_dir as well) can be either absolute or relative, +# depending on how configure is run. This is pretty annoying, since +# it makes $ac_aux_dir quite unusable in subdirectories: in the top +# source directory, any form will work fine, but in subdirectories a +# relative path needs to be adjusted first. +# +# $ac_aux_dir/missing +# fails when called from a subdirectory if $ac_aux_dir is relative +# $top_srcdir/$ac_aux_dir/missing +# fails if $ac_aux_dir is absolute, +# fails when called from a subdirectory in a VPATH build with +# a relative $ac_aux_dir +# +# The reason of the latter failure is that $top_srcdir and $ac_aux_dir +# are both prefixed by $srcdir. In an in-source build this is usually +# harmless because $srcdir is `.', but things will broke when you +# start a VPATH build or use an absolute $srcdir. +# +# So we could use something similar to $top_srcdir/$ac_aux_dir/missing, +# iff we strip the leading $srcdir from $ac_aux_dir. That would be: +# am_aux_dir='\$(top_srcdir)/'`expr "$ac_aux_dir" : "$srcdir//*\(.*\)"` +# and then we would define $MISSING as +# MISSING="\${SHELL} $am_aux_dir/missing" +# This will work as long as MISSING is not called from configure, because +# unfortunately $(top_srcdir) has no meaning in configure. +# However there are other variables, like CC, which are often used in +# configure, and could therefore not use this "fixed" $ac_aux_dir. +# +# Another solution, used here, is to always expand $ac_aux_dir to an +# absolute PATH. The drawback is that using absolute paths prevent a +# configured tree to be moved without reconfiguration. + +AC_DEFUN([AM_AUX_DIR_EXPAND], +[dnl Rely on autoconf to set up CDPATH properly. +AC_PREREQ([2.50])dnl +# expand $ac_aux_dir to an absolute path +am_aux_dir=`cd $ac_aux_dir && pwd` +]) + +# AM_CONDITIONAL -*- Autoconf -*- + +# Copyright (C) 1997, 2000, 2001, 2003, 2004, 2005 +# Free Software Foundation, Inc. +# +# This file is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# serial 7 + +# AM_CONDITIONAL(NAME, SHELL-CONDITION) +# ------------------------------------- +# Define a conditional. +AC_DEFUN([AM_CONDITIONAL], +[AC_PREREQ(2.52)dnl + ifelse([$1], [TRUE], [AC_FATAL([$0: invalid condition: $1])], + [$1], [FALSE], [AC_FATAL([$0: invalid condition: $1])])dnl +AC_SUBST([$1_TRUE]) +AC_SUBST([$1_FALSE]) +if $2; then + $1_TRUE= + $1_FALSE='#' +else + $1_TRUE='#' + $1_FALSE= +fi +AC_CONFIG_COMMANDS_PRE( +[if test -z "${$1_TRUE}" && test -z "${$1_FALSE}"; then + AC_MSG_ERROR([[conditional "$1" was never defined. +Usually this means the macro was only invoked conditionally.]]) +fi])]) + + +# Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005 +# Free Software Foundation, Inc. +# +# This file is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# serial 8 + +# There are a few dirty hacks below to avoid letting `AC_PROG_CC' be +# written in clear, in which case automake, when reading aclocal.m4, +# will think it sees a *use*, and therefore will trigger all it's +# C support machinery. Also note that it means that autoscan, seeing +# CC etc. in the Makefile, will ask for an AC_PROG_CC use... + + +# _AM_DEPENDENCIES(NAME) +# ---------------------- +# See how the compiler implements dependency checking. +# NAME is "CC", "CXX", "GCJ", or "OBJC". +# We try a few techniques and use that to set a single cache variable. +# +# We don't AC_REQUIRE the corresponding AC_PROG_CC since the latter was +# modified to invoke _AM_DEPENDENCIES(CC); we would have a circular +# dependency, and given that the user is not expected to run this macro, +# just rely on AC_PROG_CC. +AC_DEFUN([_AM_DEPENDENCIES], +[AC_REQUIRE([AM_SET_DEPDIR])dnl +AC_REQUIRE([AM_OUTPUT_DEPENDENCY_COMMANDS])dnl +AC_REQUIRE([AM_MAKE_INCLUDE])dnl +AC_REQUIRE([AM_DEP_TRACK])dnl + +ifelse([$1], CC, [depcc="$CC" am_compiler_list=], + [$1], CXX, [depcc="$CXX" am_compiler_list=], + [$1], OBJC, [depcc="$OBJC" am_compiler_list='gcc3 gcc'], + [$1], GCJ, [depcc="$GCJ" am_compiler_list='gcc3 gcc'], + [depcc="$$1" am_compiler_list=]) + +AC_CACHE_CHECK([dependency style of $depcc], + [am_cv_$1_dependencies_compiler_type], +[if test -z "$AMDEP_TRUE" && test -f "$am_depcomp"; then + # We make a subdir and do the tests there. Otherwise we can end up + # making bogus files that we don't know about and never remove. For + # instance it was reported that on HP-UX the gcc test will end up + # making a dummy file named `D' -- because `-MD' means `put the output + # in D'. + mkdir conftest.dir + # Copy depcomp to subdir because otherwise we won't find it if we're + # using a relative directory. + cp "$am_depcomp" conftest.dir + cd conftest.dir + # We will build objects and dependencies in a subdirectory because + # it helps to detect inapplicable dependency modes. For instance + # both Tru64's cc and ICC support -MD to output dependencies as a + # side effect of compilation, but ICC will put the dependencies in + # the current directory while Tru64 will put them in the object + # directory. + mkdir sub + + am_cv_$1_dependencies_compiler_type=none + if test "$am_compiler_list" = ""; then + am_compiler_list=`sed -n ['s/^#*\([a-zA-Z0-9]*\))$/\1/p'] < ./depcomp` + fi + for depmode in $am_compiler_list; do + # Setup a source with many dependencies, because some compilers + # like to wrap large dependency lists on column 80 (with \), and + # we should not choose a depcomp mode which is confused by this. + # + # We need to recreate these files for each test, as the compiler may + # overwrite some of them when testing with obscure command lines. + # This happens at least with the AIX C compiler. + : > sub/conftest.c + for i in 1 2 3 4 5 6; do + echo '#include "conftst'$i'.h"' >> sub/conftest.c + # Using `: > sub/conftst$i.h' creates only sub/conftst1.h with + # Solaris 8's {/usr,}/bin/sh. + touch sub/conftst$i.h + done + echo "${am__include} ${am__quote}sub/conftest.Po${am__quote}" > confmf + + case $depmode in + nosideeffect) + # after this tag, mechanisms are not by side-effect, so they'll + # only be used when explicitly requested + if test "x$enable_dependency_tracking" = xyes; then + continue + else + break + fi + ;; + none) break ;; + esac + # We check with `-c' and `-o' for the sake of the "dashmstdout" + # mode. It turns out that the SunPro C++ compiler does not properly + # handle `-M -o', and we need to detect this. + if depmode=$depmode \ + source=sub/conftest.c object=sub/conftest.${OBJEXT-o} \ + depfile=sub/conftest.Po tmpdepfile=sub/conftest.TPo \ + $SHELL ./depcomp $depcc -c -o sub/conftest.${OBJEXT-o} sub/conftest.c \ + >/dev/null 2>conftest.err && + grep sub/conftst6.h sub/conftest.Po > /dev/null 2>&1 && + grep sub/conftest.${OBJEXT-o} sub/conftest.Po > /dev/null 2>&1 && + ${MAKE-make} -s -f confmf > /dev/null 2>&1; then + # icc doesn't choke on unknown options, it will just issue warnings + # or remarks (even with -Werror). So we grep stderr for any message + # that says an option was ignored or not supported. + # When given -MP, icc 7.0 and 7.1 complain thusly: + # icc: Command line warning: ignoring option '-M'; no argument required + # The diagnosis changed in icc 8.0: + # icc: Command line remark: option '-MP' not supported + if (grep 'ignoring option' conftest.err || + grep 'not supported' conftest.err) >/dev/null 2>&1; then :; else + am_cv_$1_dependencies_compiler_type=$depmode + break + fi + fi + done + + cd .. + rm -rf conftest.dir +else + am_cv_$1_dependencies_compiler_type=none +fi +]) +AC_SUBST([$1DEPMODE], [depmode=$am_cv_$1_dependencies_compiler_type]) +AM_CONDITIONAL([am__fastdep$1], [ + test "x$enable_dependency_tracking" != xno \ + && test "$am_cv_$1_dependencies_compiler_type" = gcc3]) +]) + + +# AM_SET_DEPDIR +# ------------- +# Choose a directory name for dependency files. +# This macro is AC_REQUIREd in _AM_DEPENDENCIES +AC_DEFUN([AM_SET_DEPDIR], +[AC_REQUIRE([AM_SET_LEADING_DOT])dnl +AC_SUBST([DEPDIR], ["${am__leading_dot}deps"])dnl +]) + + +# AM_DEP_TRACK +# ------------ +AC_DEFUN([AM_DEP_TRACK], +[AC_ARG_ENABLE(dependency-tracking, +[ --disable-dependency-tracking speeds up one-time build + --enable-dependency-tracking do not reject slow dependency extractors]) +if test "x$enable_dependency_tracking" != xno; then + am_depcomp="$ac_aux_dir/depcomp" + AMDEPBACKSLASH='\' +fi +AM_CONDITIONAL([AMDEP], [test "x$enable_dependency_tracking" != xno]) +AC_SUBST([AMDEPBACKSLASH]) +]) + +# Generate code to set up dependency tracking. -*- Autoconf -*- + +# Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005 +# Free Software Foundation, Inc. +# +# This file is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +#serial 3 + +# _AM_OUTPUT_DEPENDENCY_COMMANDS +# ------------------------------ +AC_DEFUN([_AM_OUTPUT_DEPENDENCY_COMMANDS], +[for mf in $CONFIG_FILES; do + # Strip MF so we end up with the name of the file. + mf=`echo "$mf" | sed -e 's/:.*$//'` + # Check whether this is an Automake generated Makefile or not. + # We used to match only the files named `Makefile.in', but + # some people rename them; so instead we look at the file content. + # Grep'ing the first line is not enough: some people post-process + # each Makefile.in and add a new line on top of each file to say so. + # So let's grep whole file. + if grep '^#.*generated by automake' $mf > /dev/null 2>&1; then + dirpart=`AS_DIRNAME("$mf")` + else + continue + fi + # Extract the definition of DEPDIR, am__include, and am__quote + # from the Makefile without running `make'. + DEPDIR=`sed -n 's/^DEPDIR = //p' < "$mf"` + test -z "$DEPDIR" && continue + am__include=`sed -n 's/^am__include = //p' < "$mf"` + test -z "am__include" && continue + am__quote=`sed -n 's/^am__quote = //p' < "$mf"` + # When using ansi2knr, U may be empty or an underscore; expand it + U=`sed -n 's/^U = //p' < "$mf"` + # Find all dependency output files, they are included files with + # $(DEPDIR) in their names. We invoke sed twice because it is the + # simplest approach to changing $(DEPDIR) to its actual value in the + # expansion. + for file in `sed -n " + s/^$am__include $am__quote\(.*(DEPDIR).*\)$am__quote"'$/\1/p' <"$mf" | \ + sed -e 's/\$(DEPDIR)/'"$DEPDIR"'/g' -e 's/\$U/'"$U"'/g'`; do + # Make sure the directory exists. + test -f "$dirpart/$file" && continue + fdir=`AS_DIRNAME(["$file"])` + AS_MKDIR_P([$dirpart/$fdir]) + # echo "creating $dirpart/$file" + echo '# dummy' > "$dirpart/$file" + done +done +])# _AM_OUTPUT_DEPENDENCY_COMMANDS + + +# AM_OUTPUT_DEPENDENCY_COMMANDS +# ----------------------------- +# This macro should only be invoked once -- use via AC_REQUIRE. +# +# This code is only required when automatic dependency tracking +# is enabled. FIXME. This creates each `.P' file that we will +# need in order to bootstrap the dependency handling code. +AC_DEFUN([AM_OUTPUT_DEPENDENCY_COMMANDS], +[AC_CONFIG_COMMANDS([depfiles], + [test x"$AMDEP_TRUE" != x"" || _AM_OUTPUT_DEPENDENCY_COMMANDS], + [AMDEP_TRUE="$AMDEP_TRUE" ac_aux_dir="$ac_aux_dir"]) +]) + +# Do all the work for Automake. -*- Autoconf -*- + +# Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005 +# Free Software Foundation, Inc. +# +# This file is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# serial 12 + +# This macro actually does too much. Some checks are only needed if +# your package does certain things. But this isn't really a big deal. + +# AM_INIT_AUTOMAKE(PACKAGE, VERSION, [NO-DEFINE]) +# AM_INIT_AUTOMAKE([OPTIONS]) +# ----------------------------------------------- +# The call with PACKAGE and VERSION arguments is the old style +# call (pre autoconf-2.50), which is being phased out. PACKAGE +# and VERSION should now be passed to AC_INIT and removed from +# the call to AM_INIT_AUTOMAKE. +# We support both call styles for the transition. After +# the next Automake release, Autoconf can make the AC_INIT +# arguments mandatory, and then we can depend on a new Autoconf +# release and drop the old call support. +AC_DEFUN([AM_INIT_AUTOMAKE], +[AC_PREREQ([2.58])dnl +dnl Autoconf wants to disallow AM_ names. We explicitly allow +dnl the ones we care about. +m4_pattern_allow([^AM_[A-Z]+FLAGS$])dnl +AC_REQUIRE([AM_SET_CURRENT_AUTOMAKE_VERSION])dnl +AC_REQUIRE([AC_PROG_INSTALL])dnl +# test to see if srcdir already configured +if test "`cd $srcdir && pwd`" != "`pwd`" && + test -f $srcdir/config.status; then + AC_MSG_ERROR([source directory already configured; run "make distclean" there first]) +fi + +# test whether we have cygpath +if test -z "$CYGPATH_W"; then + if (cygpath --version) >/dev/null 2>/dev/null; then + CYGPATH_W='cygpath -w' + else + CYGPATH_W=echo + fi +fi +AC_SUBST([CYGPATH_W]) + +# Define the identity of the package. +dnl Distinguish between old-style and new-style calls. +m4_ifval([$2], +[m4_ifval([$3], [_AM_SET_OPTION([no-define])])dnl + AC_SUBST([PACKAGE], [$1])dnl + AC_SUBST([VERSION], [$2])], +[_AM_SET_OPTIONS([$1])dnl + AC_SUBST([PACKAGE], ['AC_PACKAGE_TARNAME'])dnl + AC_SUBST([VERSION], ['AC_PACKAGE_VERSION'])])dnl + +_AM_IF_OPTION([no-define],, +[AC_DEFINE_UNQUOTED(PACKAGE, "$PACKAGE", [Name of package]) + AC_DEFINE_UNQUOTED(VERSION, "$VERSION", [Version number of package])])dnl + +# Some tools Automake needs. +AC_REQUIRE([AM_SANITY_CHECK])dnl +AC_REQUIRE([AC_ARG_PROGRAM])dnl +AM_MISSING_PROG(ACLOCAL, aclocal-${am__api_version}) +AM_MISSING_PROG(AUTOCONF, autoconf) +AM_MISSING_PROG(AUTOMAKE, automake-${am__api_version}) +AM_MISSING_PROG(AUTOHEADER, autoheader) +AM_MISSING_PROG(MAKEINFO, makeinfo) +AM_PROG_INSTALL_SH +AM_PROG_INSTALL_STRIP +AC_REQUIRE([AM_PROG_MKDIR_P])dnl +# We need awk for the "check" target. The system "awk" is bad on +# some platforms. +AC_REQUIRE([AC_PROG_AWK])dnl +AC_REQUIRE([AC_PROG_MAKE_SET])dnl +AC_REQUIRE([AM_SET_LEADING_DOT])dnl +_AM_IF_OPTION([tar-ustar], [_AM_PROG_TAR([ustar])], + [_AM_IF_OPTION([tar-pax], [_AM_PROG_TAR([pax])], + [_AM_PROG_TAR([v7])])]) +_AM_IF_OPTION([no-dependencies],, +[AC_PROVIDE_IFELSE([AC_PROG_CC], + [_AM_DEPENDENCIES(CC)], + [define([AC_PROG_CC], + defn([AC_PROG_CC])[_AM_DEPENDENCIES(CC)])])dnl +AC_PROVIDE_IFELSE([AC_PROG_CXX], + [_AM_DEPENDENCIES(CXX)], + [define([AC_PROG_CXX], + defn([AC_PROG_CXX])[_AM_DEPENDENCIES(CXX)])])dnl +]) +]) + + +# When config.status generates a header, we must update the stamp-h file. +# This file resides in the same directory as the config header +# that is generated. The stamp files are numbered to have different names. + +# Autoconf calls _AC_AM_CONFIG_HEADER_HOOK (when defined) in the +# loop where config.status creates the headers, so we can generate +# our stamp files there. +AC_DEFUN([_AC_AM_CONFIG_HEADER_HOOK], +[# Compute $1's index in $config_headers. +_am_stamp_count=1 +for _am_header in $config_headers :; do + case $_am_header in + $1 | $1:* ) + break ;; + * ) + _am_stamp_count=`expr $_am_stamp_count + 1` ;; + esac +done +echo "timestamp for $1" >`AS_DIRNAME([$1])`/stamp-h[]$_am_stamp_count]) + +# Copyright (C) 2001, 2003, 2005 Free Software Foundation, Inc. +# +# This file is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# AM_PROG_INSTALL_SH +# ------------------ +# Define $install_sh. +AC_DEFUN([AM_PROG_INSTALL_SH], +[AC_REQUIRE([AM_AUX_DIR_EXPAND])dnl +install_sh=${install_sh-"$am_aux_dir/install-sh"} +AC_SUBST(install_sh)]) + +# Copyright (C) 2003, 2005 Free Software Foundation, Inc. +# +# This file is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# serial 2 + +# Check whether the underlying file-system supports filenames +# with a leading dot. For instance MS-DOS doesn't. +AC_DEFUN([AM_SET_LEADING_DOT], +[rm -rf .tst 2>/dev/null +mkdir .tst 2>/dev/null +if test -d .tst; then + am__leading_dot=. +else + am__leading_dot=_ +fi +rmdir .tst 2>/dev/null +AC_SUBST([am__leading_dot])]) + +# Check to see how 'make' treats includes. -*- Autoconf -*- + +# Copyright (C) 2001, 2002, 2003, 2005 Free Software Foundation, Inc. +# +# This file is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# serial 3 + +# AM_MAKE_INCLUDE() +# ----------------- +# Check to see how make treats includes. +AC_DEFUN([AM_MAKE_INCLUDE], +[am_make=${MAKE-make} +cat > confinc << 'END' +am__doit: + @echo done +.PHONY: am__doit +END +# If we don't find an include directive, just comment out the code. +AC_MSG_CHECKING([for style of include used by $am_make]) +am__include="#" +am__quote= +_am_result=none +# First try GNU make style include. +echo "include confinc" > confmf +# We grep out `Entering directory' and `Leaving directory' +# messages which can occur if `w' ends up in MAKEFLAGS. +# In particular we don't look at `^make:' because GNU make might +# be invoked under some other name (usually "gmake"), in which +# case it prints its new name instead of `make'. +if test "`$am_make -s -f confmf 2> /dev/null | grep -v 'ing directory'`" = "done"; then + am__include=include + am__quote= + _am_result=GNU +fi +# Now try BSD make style include. +if test "$am__include" = "#"; then + echo '.include "confinc"' > confmf + if test "`$am_make -s -f confmf 2> /dev/null`" = "done"; then + am__include=.include + am__quote="\"" + _am_result=BSD + fi +fi +AC_SUBST([am__include]) +AC_SUBST([am__quote]) +AC_MSG_RESULT([$_am_result]) +rm -f confinc confmf +]) + +# Fake the existence of programs that GNU maintainers use. -*- Autoconf -*- + +# Copyright (C) 1997, 1999, 2000, 2001, 2003, 2005 +# Free Software Foundation, Inc. +# +# This file is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# serial 4 + +# AM_MISSING_PROG(NAME, PROGRAM) +# ------------------------------ +AC_DEFUN([AM_MISSING_PROG], +[AC_REQUIRE([AM_MISSING_HAS_RUN]) +$1=${$1-"${am_missing_run}$2"} +AC_SUBST($1)]) + + +# AM_MISSING_HAS_RUN +# ------------------ +# Define MISSING if not defined so far and test if it supports --run. +# If it does, set am_missing_run to use it, otherwise, to nothing. +AC_DEFUN([AM_MISSING_HAS_RUN], +[AC_REQUIRE([AM_AUX_DIR_EXPAND])dnl +test x"${MISSING+set}" = xset || MISSING="\${SHELL} $am_aux_dir/missing" +# Use eval to expand $SHELL +if eval "$MISSING --run true"; then + am_missing_run="$MISSING --run " +else + am_missing_run= + AC_MSG_WARN([`missing' script is too old or missing]) +fi +]) + +# Copyright (C) 2003, 2004, 2005 Free Software Foundation, Inc. +# +# This file is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# AM_PROG_MKDIR_P +# --------------- +# Check whether `mkdir -p' is supported, fallback to mkinstalldirs otherwise. +# +# Automake 1.8 used `mkdir -m 0755 -p --' to ensure that directories +# created by `make install' are always world readable, even if the +# installer happens to have an overly restrictive umask (e.g. 077). +# This was a mistake. There are at least two reasons why we must not +# use `-m 0755': +# - it causes special bits like SGID to be ignored, +# - it may be too restrictive (some setups expect 775 directories). +# +# Do not use -m 0755 and let people choose whatever they expect by +# setting umask. +# +# We cannot accept any implementation of `mkdir' that recognizes `-p'. +# Some implementations (such as Solaris 8's) are not thread-safe: if a +# parallel make tries to run `mkdir -p a/b' and `mkdir -p a/c' +# concurrently, both version can detect that a/ is missing, but only +# one can create it and the other will error out. Consequently we +# restrict ourselves to GNU make (using the --version option ensures +# this.) +AC_DEFUN([AM_PROG_MKDIR_P], +[if mkdir -p --version . >/dev/null 2>&1 && test ! -d ./--version; then + # We used to keeping the `.' as first argument, in order to + # allow $(mkdir_p) to be used without argument. As in + # $(mkdir_p) $(somedir) + # where $(somedir) is conditionally defined. However this is wrong + # for two reasons: + # 1. if the package is installed by a user who cannot write `.' + # make install will fail, + # 2. the above comment should most certainly read + # $(mkdir_p) $(DESTDIR)$(somedir) + # so it does not work when $(somedir) is undefined and + # $(DESTDIR) is not. + # To support the latter case, we have to write + # test -z "$(somedir)" || $(mkdir_p) $(DESTDIR)$(somedir), + # so the `.' trick is pointless. + mkdir_p='mkdir -p --' +else + # On NextStep and OpenStep, the `mkdir' command does not + # recognize any option. It will interpret all options as + # directories to create, and then abort because `.' already + # exists. + for d in ./-p ./--version; + do + test -d $d && rmdir $d + done + # $(mkinstalldirs) is defined by Automake if mkinstalldirs exists. + if test -f "$ac_aux_dir/mkinstalldirs"; then + mkdir_p='$(mkinstalldirs)' + else + mkdir_p='$(install_sh) -d' + fi +fi +AC_SUBST([mkdir_p])]) + +# Helper functions for option handling. -*- Autoconf -*- + +# Copyright (C) 2001, 2002, 2003, 2005 Free Software Foundation, Inc. +# +# This file is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# serial 3 + +# _AM_MANGLE_OPTION(NAME) +# ----------------------- +AC_DEFUN([_AM_MANGLE_OPTION], +[[_AM_OPTION_]m4_bpatsubst($1, [[^a-zA-Z0-9_]], [_])]) + +# _AM_SET_OPTION(NAME) +# ------------------------------ +# Set option NAME. Presently that only means defining a flag for this option. +AC_DEFUN([_AM_SET_OPTION], +[m4_define(_AM_MANGLE_OPTION([$1]), 1)]) + +# _AM_SET_OPTIONS(OPTIONS) +# ---------------------------------- +# OPTIONS is a space-separated list of Automake options. +AC_DEFUN([_AM_SET_OPTIONS], +[AC_FOREACH([_AM_Option], [$1], [_AM_SET_OPTION(_AM_Option)])]) + +# _AM_IF_OPTION(OPTION, IF-SET, [IF-NOT-SET]) +# ------------------------------------------- +# Execute IF-SET if OPTION is set, IF-NOT-SET otherwise. +AC_DEFUN([_AM_IF_OPTION], +[m4_ifset(_AM_MANGLE_OPTION([$1]), [$2], [$3])]) + +# Check to make sure that the build environment is sane. -*- Autoconf -*- + +# Copyright (C) 1996, 1997, 2000, 2001, 2003, 2005 +# Free Software Foundation, Inc. +# +# This file is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# serial 4 + +# AM_SANITY_CHECK +# --------------- +AC_DEFUN([AM_SANITY_CHECK], +[AC_MSG_CHECKING([whether build environment is sane]) +# Just in case +sleep 1 +echo timestamp > conftest.file +# Do `set' in a subshell so we don't clobber the current shell's +# arguments. Must try -L first in case configure is actually a +# symlink; some systems play weird games with the mod time of symlinks +# (eg FreeBSD returns the mod time of the symlink's containing +# directory). +if ( + set X `ls -Lt $srcdir/configure conftest.file 2> /dev/null` + if test "$[*]" = "X"; then + # -L didn't work. + set X `ls -t $srcdir/configure conftest.file` + fi + rm -f conftest.file + if test "$[*]" != "X $srcdir/configure conftest.file" \ + && test "$[*]" != "X conftest.file $srcdir/configure"; then + + # If neither matched, then we have a broken ls. This can happen + # if, for instance, CONFIG_SHELL is bash and it inherits a + # broken ls alias from the environment. This has actually + # happened. Such a system could not be considered "sane". + AC_MSG_ERROR([ls -t appears to fail. Make sure there is not a broken +alias in your environment]) + fi + + test "$[2]" = conftest.file + ) +then + # Ok. + : +else + AC_MSG_ERROR([newly created file is older than distributed files! +Check your system clock]) +fi +AC_MSG_RESULT(yes)]) + +# Copyright (C) 2001, 2003, 2005 Free Software Foundation, Inc. +# +# This file is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# AM_PROG_INSTALL_STRIP +# --------------------- +# One issue with vendor `install' (even GNU) is that you can't +# specify the program used to strip binaries. This is especially +# annoying in cross-compiling environments, where the build's strip +# is unlikely to handle the host's binaries. +# Fortunately install-sh will honor a STRIPPROG variable, so we +# always use install-sh in `make install-strip', and initialize +# STRIPPROG with the value of the STRIP variable (set by the user). +AC_DEFUN([AM_PROG_INSTALL_STRIP], +[AC_REQUIRE([AM_PROG_INSTALL_SH])dnl +# Installed binaries are usually stripped using `strip' when the user +# run `make install-strip'. However `strip' might not be the right +# tool to use in cross-compilation environments, therefore Automake +# will honor the `STRIP' environment variable to overrule this program. +dnl Don't test for $cross_compiling = yes, because it might be `maybe'. +if test "$cross_compiling" != no; then + AC_CHECK_TOOL([STRIP], [strip], :) +fi +INSTALL_STRIP_PROGRAM="\${SHELL} \$(install_sh) -c -s" +AC_SUBST([INSTALL_STRIP_PROGRAM])]) + +# Check how to create a tarball. -*- Autoconf -*- + +# Copyright (C) 2004, 2005 Free Software Foundation, Inc. +# +# This file is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# serial 2 + +# _AM_PROG_TAR(FORMAT) +# -------------------- +# Check how to create a tarball in format FORMAT. +# FORMAT should be one of `v7', `ustar', or `pax'. +# +# Substitute a variable $(am__tar) that is a command +# writing to stdout a FORMAT-tarball containing the directory +# $tardir. +# tardir=directory && $(am__tar) > result.tar +# +# Substitute a variable $(am__untar) that extract such +# a tarball read from stdin. +# $(am__untar) < result.tar +AC_DEFUN([_AM_PROG_TAR], +[# Always define AMTAR for backward compatibility. +AM_MISSING_PROG([AMTAR], [tar]) +m4_if([$1], [v7], + [am__tar='${AMTAR} chof - "$$tardir"'; am__untar='${AMTAR} xf -'], + [m4_case([$1], [ustar],, [pax],, + [m4_fatal([Unknown tar format])]) +AC_MSG_CHECKING([how to create a $1 tar archive]) +# Loop over all known methods to create a tar archive until one works. +_am_tools='gnutar m4_if([$1], [ustar], [plaintar]) pax cpio none' +_am_tools=${am_cv_prog_tar_$1-$_am_tools} +# Do not fold the above two line into one, because Tru64 sh and +# Solaris sh will not grok spaces in the rhs of `-'. +for _am_tool in $_am_tools +do + case $_am_tool in + gnutar) + for _am_tar in tar gnutar gtar; + do + AM_RUN_LOG([$_am_tar --version]) && break + done + am__tar="$_am_tar --format=m4_if([$1], [pax], [posix], [$1]) -chf - "'"$$tardir"' + am__tar_="$_am_tar --format=m4_if([$1], [pax], [posix], [$1]) -chf - "'"$tardir"' + am__untar="$_am_tar -xf -" + ;; + plaintar) + # Must skip GNU tar: if it does not support --format= it doesn't create + # ustar tarball either. + (tar --version) >/dev/null 2>&1 && continue + am__tar='tar chf - "$$tardir"' + am__tar_='tar chf - "$tardir"' + am__untar='tar xf -' + ;; + pax) + am__tar='pax -L -x $1 -w "$$tardir"' + am__tar_='pax -L -x $1 -w "$tardir"' + am__untar='pax -r' + ;; + cpio) + am__tar='find "$$tardir" -print | cpio -o -H $1 -L' + am__tar_='find "$tardir" -print | cpio -o -H $1 -L' + am__untar='cpio -i -H $1 -d' + ;; + none) + am__tar=false + am__tar_=false + am__untar=false + ;; + esac + + # If the value was cached, stop now. We just wanted to have am__tar + # and am__untar set. + test -n "${am_cv_prog_tar_$1}" && break + + # tar/untar a dummy directory, and stop if the command works + rm -rf conftest.dir + mkdir conftest.dir + echo GrepMe > conftest.dir/file + AM_RUN_LOG([tardir=conftest.dir && eval $am__tar_ >conftest.tar]) + rm -rf conftest.dir + if test -s conftest.tar; then + AM_RUN_LOG([$am__untar /dev/null 2>&1 && break + fi +done +rm -rf conftest.dir + +AC_CACHE_VAL([am_cv_prog_tar_$1], [am_cv_prog_tar_$1=$_am_tool]) +AC_MSG_RESULT([$am_cv_prog_tar_$1])]) +AC_SUBST([am__tar]) +AC_SUBST([am__untar]) +]) # _AM_PROG_TAR + +m4_include([../../c++/utils/m4/hadoop_utils.m4]) diff --git a/src/examples/pipes/conf/word-part.xml b/src/examples/pipes/conf/word-part.xml new file mode 100644 index 0000000..c85b946 --- /dev/null +++ b/src/examples/pipes/conf/word-part.xml @@ -0,0 +1,24 @@ + + + + + mapred.reduce.tasks + 2 + + + + hadoop.pipes.executable + hdfs:/examples/bin/wordcount-part + + + + hadoop.pipes.java.recordreader + true + + + + hadoop.pipes.java.recordwriter + true + + + diff --git a/src/examples/pipes/conf/word.xml b/src/examples/pipes/conf/word.xml new file mode 100644 index 0000000..5fc4394 --- /dev/null +++ b/src/examples/pipes/conf/word.xml @@ -0,0 +1,28 @@ + + + + + mapred.reduce.tasks + 2 + + + + hadoop.pipes.executable + /examples/bin/wordcount-simple#wordcount-simple + Executable path is given as "path#executable-name" + sothat the executable will have a symlink in working directory. + This can be used for gdb debugging etc. + + + + + hadoop.pipes.java.recordreader + true + + + + hadoop.pipes.java.recordwriter + true + + + diff --git a/src/examples/pipes/config.guess b/src/examples/pipes/config.guess new file mode 100644 index 0000000..a6d8a94 --- /dev/null +++ b/src/examples/pipes/config.guess @@ -0,0 +1,1449 @@ +#! /bin/sh +# Attempt to guess a canonical system name. +# Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, +# 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc. + +timestamp='2004-06-24' + +# This file is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. +# +# As a special exception to the GNU General Public License, if you +# distribute this file as part of a program that contains a +# configuration script generated by Autoconf, you may include it under +# the same distribution terms that you use for the rest of that program. + +# Originally written by Per Bothner . +# Please send patches to . Submit a context +# diff and a properly formatted ChangeLog entry. +# +# This script attempts to guess a canonical system name similar to +# config.sub. If it succeeds, it prints the system name on stdout, and +# exits with 0. Otherwise, it exits with 1. +# +# The plan is that this can be called by configure scripts if you +# don't specify an explicit build system type. + +me=`echo "$0" | sed -e 's,.*/,,'` + +usage="\ +Usage: $0 [OPTION] + +Output the configuration name of the system \`$me' is run on. + +Operation modes: + -h, --help print this help, then exit + -t, --time-stamp print date of last modification, then exit + -v, --version print version number, then exit + +Report bugs and patches to ." + +version="\ +GNU config.guess ($timestamp) + +Originally written by Per Bothner. +Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004 +Free Software Foundation, Inc. + +This is free software; see the source for copying conditions. There is NO +warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE." + +help=" +Try \`$me --help' for more information." + +# Parse command line +while test $# -gt 0 ; do + case $1 in + --time-stamp | --time* | -t ) + echo "$timestamp" ; exit 0 ;; + --version | -v ) + echo "$version" ; exit 0 ;; + --help | --h* | -h ) + echo "$usage"; exit 0 ;; + -- ) # Stop option processing + shift; break ;; + - ) # Use stdin as input. + break ;; + -* ) + echo "$me: invalid option $1$help" >&2 + exit 1 ;; + * ) + break ;; + esac +done + +if test $# != 0; then + echo "$me: too many arguments$help" >&2 + exit 1 +fi + +trap 'exit 1' 1 2 15 + +# CC_FOR_BUILD -- compiler used by this script. Note that the use of a +# compiler to aid in system detection is discouraged as it requires +# temporary files to be created and, as you can see below, it is a +# headache to deal with in a portable fashion. + +# Historically, `CC_FOR_BUILD' used to be named `HOST_CC'. We still +# use `HOST_CC' if defined, but it is deprecated. + +# Portable tmp directory creation inspired by the Autoconf team. + +set_cc_for_build=' +trap "exitcode=\$?; (rm -f \$tmpfiles 2>/dev/null; rmdir \$tmp 2>/dev/null) && exit \$exitcode" 0 ; +trap "rm -f \$tmpfiles 2>/dev/null; rmdir \$tmp 2>/dev/null; exit 1" 1 2 13 15 ; +: ${TMPDIR=/tmp} ; + { tmp=`(umask 077 && mktemp -d -q "$TMPDIR/cgXXXXXX") 2>/dev/null` && test -n "$tmp" && test -d "$tmp" ; } || + { test -n "$RANDOM" && tmp=$TMPDIR/cg$$-$RANDOM && (umask 077 && mkdir $tmp) ; } || + { tmp=$TMPDIR/cg-$$ && (umask 077 && mkdir $tmp) && echo "Warning: creating insecure temp directory" >&2 ; } || + { echo "$me: cannot create a temporary directory in $TMPDIR" >&2 ; exit 1 ; } ; +dummy=$tmp/dummy ; +tmpfiles="$dummy.c $dummy.o $dummy.rel $dummy" ; +case $CC_FOR_BUILD,$HOST_CC,$CC in + ,,) echo "int x;" > $dummy.c ; + for c in cc gcc c89 c99 ; do + if ($c -c -o $dummy.o $dummy.c) >/dev/null 2>&1 ; then + CC_FOR_BUILD="$c"; break ; + fi ; + done ; + if test x"$CC_FOR_BUILD" = x ; then + CC_FOR_BUILD=no_compiler_found ; + fi + ;; + ,,*) CC_FOR_BUILD=$CC ;; + ,*,*) CC_FOR_BUILD=$HOST_CC ;; +esac ;' + +# This is needed to find uname on a Pyramid OSx when run in the BSD universe. +# (ghazi@noc.rutgers.edu 1994-08-24) +if (test -f /.attbin/uname) >/dev/null 2>&1 ; then + PATH=$PATH:/.attbin ; export PATH +fi + +UNAME_MACHINE=`(uname -m) 2>/dev/null` || UNAME_MACHINE=unknown +UNAME_RELEASE=`(uname -r) 2>/dev/null` || UNAME_RELEASE=unknown +UNAME_SYSTEM=`(uname -s) 2>/dev/null` || UNAME_SYSTEM=unknown +UNAME_VERSION=`(uname -v) 2>/dev/null` || UNAME_VERSION=unknown + +# Note: order is significant - the case branches are not exclusive. + +case "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" in + *:NetBSD:*:*) + # NetBSD (nbsd) targets should (where applicable) match one or + # more of the tupples: *-*-netbsdelf*, *-*-netbsdaout*, + # *-*-netbsdecoff* and *-*-netbsd*. For targets that recently + # switched to ELF, *-*-netbsd* would select the old + # object file format. This provides both forward + # compatibility and a consistent mechanism for selecting the + # object file format. + # + # Note: NetBSD doesn't particularly care about the vendor + # portion of the name. We always set it to "unknown". + sysctl="sysctl -n hw.machine_arch" + UNAME_MACHINE_ARCH=`(/sbin/$sysctl 2>/dev/null || \ + /usr/sbin/$sysctl 2>/dev/null || echo unknown)` + case "${UNAME_MACHINE_ARCH}" in + armeb) machine=armeb-unknown ;; + arm*) machine=arm-unknown ;; + sh3el) machine=shl-unknown ;; + sh3eb) machine=sh-unknown ;; + *) machine=${UNAME_MACHINE_ARCH}-unknown ;; + esac + # The Operating System including object format, if it has switched + # to ELF recently, or will in the future. + case "${UNAME_MACHINE_ARCH}" in + arm*|i386|m68k|ns32k|sh3*|sparc|vax) + eval $set_cc_for_build + if echo __ELF__ | $CC_FOR_BUILD -E - 2>/dev/null \ + | grep __ELF__ >/dev/null + then + # Once all utilities can be ECOFF (netbsdecoff) or a.out (netbsdaout). + # Return netbsd for either. FIX? + os=netbsd + else + os=netbsdelf + fi + ;; + *) + os=netbsd + ;; + esac + # The OS release + # Debian GNU/NetBSD machines have a different userland, and + # thus, need a distinct triplet. However, they do not need + # kernel version information, so it can be replaced with a + # suitable tag, in the style of linux-gnu. + case "${UNAME_VERSION}" in + Debian*) + release='-gnu' + ;; + *) + release=`echo ${UNAME_RELEASE}|sed -e 's/[-_].*/\./'` + ;; + esac + # Since CPU_TYPE-MANUFACTURER-KERNEL-OPERATING_SYSTEM: + # contains redundant information, the shorter form: + # CPU_TYPE-MANUFACTURER-OPERATING_SYSTEM is used. + echo "${machine}-${os}${release}" + exit 0 ;; + amd64:OpenBSD:*:*) + echo x86_64-unknown-openbsd${UNAME_RELEASE} + exit 0 ;; + amiga:OpenBSD:*:*) + echo m68k-unknown-openbsd${UNAME_RELEASE} + exit 0 ;; + arc:OpenBSD:*:*) + echo mipsel-unknown-openbsd${UNAME_RELEASE} + exit 0 ;; + cats:OpenBSD:*:*) + echo arm-unknown-openbsd${UNAME_RELEASE} + exit 0 ;; + hp300:OpenBSD:*:*) + echo m68k-unknown-openbsd${UNAME_RELEASE} + exit 0 ;; + luna88k:OpenBSD:*:*) + echo m88k-unknown-openbsd${UNAME_RELEASE} + exit 0 ;; + mac68k:OpenBSD:*:*) + echo m68k-unknown-openbsd${UNAME_RELEASE} + exit 0 ;; + macppc:OpenBSD:*:*) + echo powerpc-unknown-openbsd${UNAME_RELEASE} + exit 0 ;; + mvme68k:OpenBSD:*:*) + echo m68k-unknown-openbsd${UNAME_RELEASE} + exit 0 ;; + mvme88k:OpenBSD:*:*) + echo m88k-unknown-openbsd${UNAME_RELEASE} + exit 0 ;; + mvmeppc:OpenBSD:*:*) + echo powerpc-unknown-openbsd${UNAME_RELEASE} + exit 0 ;; + pmax:OpenBSD:*:*) + echo mipsel-unknown-openbsd${UNAME_RELEASE} + exit 0 ;; + sgi:OpenBSD:*:*) + echo mipseb-unknown-openbsd${UNAME_RELEASE} + exit 0 ;; + sun3:OpenBSD:*:*) + echo m68k-unknown-openbsd${UNAME_RELEASE} + exit 0 ;; + wgrisc:OpenBSD:*:*) + echo mipsel-unknown-openbsd${UNAME_RELEASE} + exit 0 ;; + *:OpenBSD:*:*) + echo ${UNAME_MACHINE}-unknown-openbsd${UNAME_RELEASE} + exit 0 ;; + *:ekkoBSD:*:*) + echo ${UNAME_MACHINE}-unknown-ekkobsd${UNAME_RELEASE} + exit 0 ;; + macppc:MirBSD:*:*) + echo powerppc-unknown-mirbsd${UNAME_RELEASE} + exit 0 ;; + *:MirBSD:*:*) + echo ${UNAME_MACHINE}-unknown-mirbsd${UNAME_RELEASE} + exit 0 ;; + alpha:OSF1:*:*) + case $UNAME_RELEASE in + *4.0) + UNAME_RELEASE=`/usr/sbin/sizer -v | awk '{print $3}'` + ;; + *5.*) + UNAME_RELEASE=`/usr/sbin/sizer -v | awk '{print $4}'` + ;; + esac + # According to Compaq, /usr/sbin/psrinfo has been available on + # OSF/1 and Tru64 systems produced since 1995. I hope that + # covers most systems running today. This code pipes the CPU + # types through head -n 1, so we only detect the type of CPU 0. + ALPHA_CPU_TYPE=`/usr/sbin/psrinfo -v | sed -n -e 's/^ The alpha \(.*\) processor.*$/\1/p' | head -n 1` + case "$ALPHA_CPU_TYPE" in + "EV4 (21064)") + UNAME_MACHINE="alpha" ;; + "EV4.5 (21064)") + UNAME_MACHINE="alpha" ;; + "LCA4 (21066/21068)") + UNAME_MACHINE="alpha" ;; + "EV5 (21164)") + UNAME_MACHINE="alphaev5" ;; + "EV5.6 (21164A)") + UNAME_MACHINE="alphaev56" ;; + "EV5.6 (21164PC)") + UNAME_MACHINE="alphapca56" ;; + "EV5.7 (21164PC)") + UNAME_MACHINE="alphapca57" ;; + "EV6 (21264)") + UNAME_MACHINE="alphaev6" ;; + "EV6.7 (21264A)") + UNAME_MACHINE="alphaev67" ;; + "EV6.8CB (21264C)") + UNAME_MACHINE="alphaev68" ;; + "EV6.8AL (21264B)") + UNAME_MACHINE="alphaev68" ;; + "EV6.8CX (21264D)") + UNAME_MACHINE="alphaev68" ;; + "EV6.9A (21264/EV69A)") + UNAME_MACHINE="alphaev69" ;; + "EV7 (21364)") + UNAME_MACHINE="alphaev7" ;; + "EV7.9 (21364A)") + UNAME_MACHINE="alphaev79" ;; + esac + # A Pn.n version is a patched version. + # A Vn.n version is a released version. + # A Tn.n version is a released field test version. + # A Xn.n version is an unreleased experimental baselevel. + # 1.2 uses "1.2" for uname -r. + echo ${UNAME_MACHINE}-dec-osf`echo ${UNAME_RELEASE} | sed -e 's/^[PVTX]//' | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz'` + exit 0 ;; + Alpha\ *:Windows_NT*:*) + # How do we know it's Interix rather than the generic POSIX subsystem? + # Should we change UNAME_MACHINE based on the output of uname instead + # of the specific Alpha model? + echo alpha-pc-interix + exit 0 ;; + 21064:Windows_NT:50:3) + echo alpha-dec-winnt3.5 + exit 0 ;; + Amiga*:UNIX_System_V:4.0:*) + echo m68k-unknown-sysv4 + exit 0;; + *:[Aa]miga[Oo][Ss]:*:*) + echo ${UNAME_MACHINE}-unknown-amigaos + exit 0 ;; + *:[Mm]orph[Oo][Ss]:*:*) + echo ${UNAME_MACHINE}-unknown-morphos + exit 0 ;; + *:OS/390:*:*) + echo i370-ibm-openedition + exit 0 ;; + *:OS400:*:*) + echo powerpc-ibm-os400 + exit 0 ;; + arm:RISC*:1.[012]*:*|arm:riscix:1.[012]*:*) + echo arm-acorn-riscix${UNAME_RELEASE} + exit 0;; + SR2?01:HI-UX/MPP:*:* | SR8000:HI-UX/MPP:*:*) + echo hppa1.1-hitachi-hiuxmpp + exit 0;; + Pyramid*:OSx*:*:* | MIS*:OSx*:*:* | MIS*:SMP_DC-OSx*:*:*) + # akee@wpdis03.wpafb.af.mil (Earle F. Ake) contributed MIS and NILE. + if test "`(/bin/universe) 2>/dev/null`" = att ; then + echo pyramid-pyramid-sysv3 + else + echo pyramid-pyramid-bsd + fi + exit 0 ;; + NILE*:*:*:dcosx) + echo pyramid-pyramid-svr4 + exit 0 ;; + DRS?6000:unix:4.0:6*) + echo sparc-icl-nx6 + exit 0 ;; + DRS?6000:UNIX_SV:4.2*:7*) + case `/usr/bin/uname -p` in + sparc) echo sparc-icl-nx7 && exit 0 ;; + esac ;; + sun4H:SunOS:5.*:*) + echo sparc-hal-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` + exit 0 ;; + sun4*:SunOS:5.*:* | tadpole*:SunOS:5.*:*) + echo sparc-sun-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` + exit 0 ;; + i86pc:SunOS:5.*:*) + echo i386-pc-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` + exit 0 ;; + sun4*:SunOS:6*:*) + # According to config.sub, this is the proper way to canonicalize + # SunOS6. Hard to guess exactly what SunOS6 will be like, but + # it's likely to be more like Solaris than SunOS4. + echo sparc-sun-solaris3`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` + exit 0 ;; + sun4*:SunOS:*:*) + case "`/usr/bin/arch -k`" in + Series*|S4*) + UNAME_RELEASE=`uname -v` + ;; + esac + # Japanese Language versions have a version number like `4.1.3-JL'. + echo sparc-sun-sunos`echo ${UNAME_RELEASE}|sed -e 's/-/_/'` + exit 0 ;; + sun3*:SunOS:*:*) + echo m68k-sun-sunos${UNAME_RELEASE} + exit 0 ;; + sun*:*:4.2BSD:*) + UNAME_RELEASE=`(sed 1q /etc/motd | awk '{print substr($5,1,3)}') 2>/dev/null` + test "x${UNAME_RELEASE}" = "x" && UNAME_RELEASE=3 + case "`/bin/arch`" in + sun3) + echo m68k-sun-sunos${UNAME_RELEASE} + ;; + sun4) + echo sparc-sun-sunos${UNAME_RELEASE} + ;; + esac + exit 0 ;; + aushp:SunOS:*:*) + echo sparc-auspex-sunos${UNAME_RELEASE} + exit 0 ;; + # The situation for MiNT is a little confusing. The machine name + # can be virtually everything (everything which is not + # "atarist" or "atariste" at least should have a processor + # > m68000). The system name ranges from "MiNT" over "FreeMiNT" + # to the lowercase version "mint" (or "freemint"). Finally + # the system name "TOS" denotes a system which is actually not + # MiNT. But MiNT is downward compatible to TOS, so this should + # be no problem. + atarist[e]:*MiNT:*:* | atarist[e]:*mint:*:* | atarist[e]:*TOS:*:*) + echo m68k-atari-mint${UNAME_RELEASE} + exit 0 ;; + atari*:*MiNT:*:* | atari*:*mint:*:* | atarist[e]:*TOS:*:*) + echo m68k-atari-mint${UNAME_RELEASE} + exit 0 ;; + *falcon*:*MiNT:*:* | *falcon*:*mint:*:* | *falcon*:*TOS:*:*) + echo m68k-atari-mint${UNAME_RELEASE} + exit 0 ;; + milan*:*MiNT:*:* | milan*:*mint:*:* | *milan*:*TOS:*:*) + echo m68k-milan-mint${UNAME_RELEASE} + exit 0 ;; + hades*:*MiNT:*:* | hades*:*mint:*:* | *hades*:*TOS:*:*) + echo m68k-hades-mint${UNAME_RELEASE} + exit 0 ;; + *:*MiNT:*:* | *:*mint:*:* | *:*TOS:*:*) + echo m68k-unknown-mint${UNAME_RELEASE} + exit 0 ;; + m68k:machten:*:*) + echo m68k-apple-machten${UNAME_RELEASE} + exit 0 ;; + powerpc:machten:*:*) + echo powerpc-apple-machten${UNAME_RELEASE} + exit 0 ;; + RISC*:Mach:*:*) + echo mips-dec-mach_bsd4.3 + exit 0 ;; + RISC*:ULTRIX:*:*) + echo mips-dec-ultrix${UNAME_RELEASE} + exit 0 ;; + VAX*:ULTRIX*:*:*) + echo vax-dec-ultrix${UNAME_RELEASE} + exit 0 ;; + 2020:CLIX:*:* | 2430:CLIX:*:*) + echo clipper-intergraph-clix${UNAME_RELEASE} + exit 0 ;; + mips:*:*:UMIPS | mips:*:*:RISCos) + eval $set_cc_for_build + sed 's/^ //' << EOF >$dummy.c +#ifdef __cplusplus +#include /* for printf() prototype */ + int main (int argc, char *argv[]) { +#else + int main (argc, argv) int argc; char *argv[]; { +#endif + #if defined (host_mips) && defined (MIPSEB) + #if defined (SYSTYPE_SYSV) + printf ("mips-mips-riscos%ssysv\n", argv[1]); exit (0); + #endif + #if defined (SYSTYPE_SVR4) + printf ("mips-mips-riscos%ssvr4\n", argv[1]); exit (0); + #endif + #if defined (SYSTYPE_BSD43) || defined(SYSTYPE_BSD) + printf ("mips-mips-riscos%sbsd\n", argv[1]); exit (0); + #endif + #endif + exit (-1); + } +EOF + $CC_FOR_BUILD -o $dummy $dummy.c \ + && $dummy `echo "${UNAME_RELEASE}" | sed -n 's/\([0-9]*\).*/\1/p'` \ + && exit 0 + echo mips-mips-riscos${UNAME_RELEASE} + exit 0 ;; + Motorola:PowerMAX_OS:*:*) + echo powerpc-motorola-powermax + exit 0 ;; + Motorola:*:4.3:PL8-*) + echo powerpc-harris-powermax + exit 0 ;; + Night_Hawk:*:*:PowerMAX_OS | Synergy:PowerMAX_OS:*:*) + echo powerpc-harris-powermax + exit 0 ;; + Night_Hawk:Power_UNIX:*:*) + echo powerpc-harris-powerunix + exit 0 ;; + m88k:CX/UX:7*:*) + echo m88k-harris-cxux7 + exit 0 ;; + m88k:*:4*:R4*) + echo m88k-motorola-sysv4 + exit 0 ;; + m88k:*:3*:R3*) + echo m88k-motorola-sysv3 + exit 0 ;; + AViiON:dgux:*:*) + # DG/UX returns AViiON for all architectures + UNAME_PROCESSOR=`/usr/bin/uname -p` + if [ $UNAME_PROCESSOR = mc88100 ] || [ $UNAME_PROCESSOR = mc88110 ] + then + if [ ${TARGET_BINARY_INTERFACE}x = m88kdguxelfx ] || \ + [ ${TARGET_BINARY_INTERFACE}x = x ] + then + echo m88k-dg-dgux${UNAME_RELEASE} + else + echo m88k-dg-dguxbcs${UNAME_RELEASE} + fi + else + echo i586-dg-dgux${UNAME_RELEASE} + fi + exit 0 ;; + M88*:DolphinOS:*:*) # DolphinOS (SVR3) + echo m88k-dolphin-sysv3 + exit 0 ;; + M88*:*:R3*:*) + # Delta 88k system running SVR3 + echo m88k-motorola-sysv3 + exit 0 ;; + XD88*:*:*:*) # Tektronix XD88 system running UTekV (SVR3) + echo m88k-tektronix-sysv3 + exit 0 ;; + Tek43[0-9][0-9]:UTek:*:*) # Tektronix 4300 system running UTek (BSD) + echo m68k-tektronix-bsd + exit 0 ;; + *:IRIX*:*:*) + echo mips-sgi-irix`echo ${UNAME_RELEASE}|sed -e 's/-/_/g'` + exit 0 ;; + ????????:AIX?:[12].1:2) # AIX 2.2.1 or AIX 2.1.1 is RT/PC AIX. + echo romp-ibm-aix # uname -m gives an 8 hex-code CPU id + exit 0 ;; # Note that: echo "'`uname -s`'" gives 'AIX ' + i*86:AIX:*:*) + echo i386-ibm-aix + exit 0 ;; + ia64:AIX:*:*) + if [ -x /usr/bin/oslevel ] ; then + IBM_REV=`/usr/bin/oslevel` + else + IBM_REV=${UNAME_VERSION}.${UNAME_RELEASE} + fi + echo ${UNAME_MACHINE}-ibm-aix${IBM_REV} + exit 0 ;; + *:AIX:2:3) + if grep bos325 /usr/include/stdio.h >/dev/null 2>&1; then + eval $set_cc_for_build + sed 's/^ //' << EOF >$dummy.c + #include + + main() + { + if (!__power_pc()) + exit(1); + puts("powerpc-ibm-aix3.2.5"); + exit(0); + } +EOF + $CC_FOR_BUILD -o $dummy $dummy.c && $dummy && exit 0 + echo rs6000-ibm-aix3.2.5 + elif grep bos324 /usr/include/stdio.h >/dev/null 2>&1; then + echo rs6000-ibm-aix3.2.4 + else + echo rs6000-ibm-aix3.2 + fi + exit 0 ;; + *:AIX:*:[45]) + IBM_CPU_ID=`/usr/sbin/lsdev -C -c processor -S available | sed 1q | awk '{ print $1 }'` + if /usr/sbin/lsattr -El ${IBM_CPU_ID} | grep ' POWER' >/dev/null 2>&1; then + IBM_ARCH=rs6000 + else + IBM_ARCH=powerpc + fi + if [ -x /usr/bin/oslevel ] ; then + IBM_REV=`/usr/bin/oslevel` + else + IBM_REV=${UNAME_VERSION}.${UNAME_RELEASE} + fi + echo ${IBM_ARCH}-ibm-aix${IBM_REV} + exit 0 ;; + *:AIX:*:*) + echo rs6000-ibm-aix + exit 0 ;; + ibmrt:4.4BSD:*|romp-ibm:BSD:*) + echo romp-ibm-bsd4.4 + exit 0 ;; + ibmrt:*BSD:*|romp-ibm:BSD:*) # covers RT/PC BSD and + echo romp-ibm-bsd${UNAME_RELEASE} # 4.3 with uname added to + exit 0 ;; # report: romp-ibm BSD 4.3 + *:BOSX:*:*) + echo rs6000-bull-bosx + exit 0 ;; + DPX/2?00:B.O.S.:*:*) + echo m68k-bull-sysv3 + exit 0 ;; + 9000/[34]??:4.3bsd:1.*:*) + echo m68k-hp-bsd + exit 0 ;; + hp300:4.4BSD:*:* | 9000/[34]??:4.3bsd:2.*:*) + echo m68k-hp-bsd4.4 + exit 0 ;; + 9000/[34678]??:HP-UX:*:*) + HPUX_REV=`echo ${UNAME_RELEASE}|sed -e 's/[^.]*.[0B]*//'` + case "${UNAME_MACHINE}" in + 9000/31? ) HP_ARCH=m68000 ;; + 9000/[34]?? ) HP_ARCH=m68k ;; + 9000/[678][0-9][0-9]) + if [ -x /usr/bin/getconf ]; then + sc_cpu_version=`/usr/bin/getconf SC_CPU_VERSION 2>/dev/null` + sc_kernel_bits=`/usr/bin/getconf SC_KERNEL_BITS 2>/dev/null` + case "${sc_cpu_version}" in + 523) HP_ARCH="hppa1.0" ;; # CPU_PA_RISC1_0 + 528) HP_ARCH="hppa1.1" ;; # CPU_PA_RISC1_1 + 532) # CPU_PA_RISC2_0 + case "${sc_kernel_bits}" in + 32) HP_ARCH="hppa2.0n" ;; + 64) HP_ARCH="hppa2.0w" ;; + '') HP_ARCH="hppa2.0" ;; # HP-UX 10.20 + esac ;; + esac + fi + if [ "${HP_ARCH}" = "" ]; then + eval $set_cc_for_build + sed 's/^ //' << EOF >$dummy.c + + #define _HPUX_SOURCE + #include + #include + + int main () + { + #if defined(_SC_KERNEL_BITS) + long bits = sysconf(_SC_KERNEL_BITS); + #endif + long cpu = sysconf (_SC_CPU_VERSION); + + switch (cpu) + { + case CPU_PA_RISC1_0: puts ("hppa1.0"); break; + case CPU_PA_RISC1_1: puts ("hppa1.1"); break; + case CPU_PA_RISC2_0: + #if defined(_SC_KERNEL_BITS) + switch (bits) + { + case 64: puts ("hppa2.0w"); break; + case 32: puts ("hppa2.0n"); break; + default: puts ("hppa2.0"); break; + } break; + #else /* !defined(_SC_KERNEL_BITS) */ + puts ("hppa2.0"); break; + #endif + default: puts ("hppa1.0"); break; + } + exit (0); + } +EOF + (CCOPTS= $CC_FOR_BUILD -o $dummy $dummy.c 2>/dev/null) && HP_ARCH=`$dummy` + test -z "$HP_ARCH" && HP_ARCH=hppa + fi ;; + esac + if [ ${HP_ARCH} = "hppa2.0w" ] + then + # avoid double evaluation of $set_cc_for_build + test -n "$CC_FOR_BUILD" || eval $set_cc_for_build + if echo __LP64__ | (CCOPTS= $CC_FOR_BUILD -E -) | grep __LP64__ >/dev/null + then + HP_ARCH="hppa2.0w" + else + HP_ARCH="hppa64" + fi + fi + echo ${HP_ARCH}-hp-hpux${HPUX_REV} + exit 0 ;; + ia64:HP-UX:*:*) + HPUX_REV=`echo ${UNAME_RELEASE}|sed -e 's/[^.]*.[0B]*//'` + echo ia64-hp-hpux${HPUX_REV} + exit 0 ;; + 3050*:HI-UX:*:*) + eval $set_cc_for_build + sed 's/^ //' << EOF >$dummy.c + #include + int + main () + { + long cpu = sysconf (_SC_CPU_VERSION); + /* The order matters, because CPU_IS_HP_MC68K erroneously returns + true for CPU_PA_RISC1_0. CPU_IS_PA_RISC returns correct + results, however. */ + if (CPU_IS_PA_RISC (cpu)) + { + switch (cpu) + { + case CPU_PA_RISC1_0: puts ("hppa1.0-hitachi-hiuxwe2"); break; + case CPU_PA_RISC1_1: puts ("hppa1.1-hitachi-hiuxwe2"); break; + case CPU_PA_RISC2_0: puts ("hppa2.0-hitachi-hiuxwe2"); break; + default: puts ("hppa-hitachi-hiuxwe2"); break; + } + } + else if (CPU_IS_HP_MC68K (cpu)) + puts ("m68k-hitachi-hiuxwe2"); + else puts ("unknown-hitachi-hiuxwe2"); + exit (0); + } +EOF + $CC_FOR_BUILD -o $dummy $dummy.c && $dummy && exit 0 + echo unknown-hitachi-hiuxwe2 + exit 0 ;; + 9000/7??:4.3bsd:*:* | 9000/8?[79]:4.3bsd:*:* ) + echo hppa1.1-hp-bsd + exit 0 ;; + 9000/8??:4.3bsd:*:*) + echo hppa1.0-hp-bsd + exit 0 ;; + *9??*:MPE/iX:*:* | *3000*:MPE/iX:*:*) + echo hppa1.0-hp-mpeix + exit 0 ;; + hp7??:OSF1:*:* | hp8?[79]:OSF1:*:* ) + echo hppa1.1-hp-osf + exit 0 ;; + hp8??:OSF1:*:*) + echo hppa1.0-hp-osf + exit 0 ;; + i*86:OSF1:*:*) + if [ -x /usr/sbin/sysversion ] ; then + echo ${UNAME_MACHINE}-unknown-osf1mk + else + echo ${UNAME_MACHINE}-unknown-osf1 + fi + exit 0 ;; + parisc*:Lites*:*:*) + echo hppa1.1-hp-lites + exit 0 ;; + C1*:ConvexOS:*:* | convex:ConvexOS:C1*:*) + echo c1-convex-bsd + exit 0 ;; + C2*:ConvexOS:*:* | convex:ConvexOS:C2*:*) + if getsysinfo -f scalar_acc + then echo c32-convex-bsd + else echo c2-convex-bsd + fi + exit 0 ;; + C34*:ConvexOS:*:* | convex:ConvexOS:C34*:*) + echo c34-convex-bsd + exit 0 ;; + C38*:ConvexOS:*:* | convex:ConvexOS:C38*:*) + echo c38-convex-bsd + exit 0 ;; + C4*:ConvexOS:*:* | convex:ConvexOS:C4*:*) + echo c4-convex-bsd + exit 0 ;; + CRAY*Y-MP:*:*:*) + echo ymp-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/' + exit 0 ;; + CRAY*[A-Z]90:*:*:*) + echo ${UNAME_MACHINE}-cray-unicos${UNAME_RELEASE} \ + | sed -e 's/CRAY.*\([A-Z]90\)/\1/' \ + -e y/ABCDEFGHIJKLMNOPQRSTUVWXYZ/abcdefghijklmnopqrstuvwxyz/ \ + -e 's/\.[^.]*$/.X/' + exit 0 ;; + CRAY*TS:*:*:*) + echo t90-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/' + exit 0 ;; + CRAY*T3E:*:*:*) + echo alphaev5-cray-unicosmk${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/' + exit 0 ;; + CRAY*SV1:*:*:*) + echo sv1-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/' + exit 0 ;; + *:UNICOS/mp:*:*) + echo craynv-cray-unicosmp${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/' + exit 0 ;; + F30[01]:UNIX_System_V:*:* | F700:UNIX_System_V:*:*) + FUJITSU_PROC=`uname -m | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz'` + FUJITSU_SYS=`uname -p | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz' | sed -e 's/\///'` + FUJITSU_REL=`echo ${UNAME_RELEASE} | sed -e 's/ /_/'` + echo "${FUJITSU_PROC}-fujitsu-${FUJITSU_SYS}${FUJITSU_REL}" + exit 0 ;; + 5000:UNIX_System_V:4.*:*) + FUJITSU_SYS=`uname -p | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz' | sed -e 's/\///'` + FUJITSU_REL=`echo ${UNAME_RELEASE} | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz' | sed -e 's/ /_/'` + echo "sparc-fujitsu-${FUJITSU_SYS}${FUJITSU_REL}" + exit 0 ;; + i*86:BSD/386:*:* | i*86:BSD/OS:*:* | *:Ascend\ Embedded/OS:*:*) + echo ${UNAME_MACHINE}-pc-bsdi${UNAME_RELEASE} + exit 0 ;; + sparc*:BSD/OS:*:*) + echo sparc-unknown-bsdi${UNAME_RELEASE} + exit 0 ;; + *:BSD/OS:*:*) + echo ${UNAME_MACHINE}-unknown-bsdi${UNAME_RELEASE} + exit 0 ;; + *:FreeBSD:*:*) + echo ${UNAME_MACHINE}-unknown-freebsd`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'` + exit 0 ;; + i*:CYGWIN*:*) + echo ${UNAME_MACHINE}-pc-cygwin + exit 0 ;; + i*:MINGW*:*) + echo ${UNAME_MACHINE}-pc-mingw32 + exit 0 ;; + i*:PW*:*) + echo ${UNAME_MACHINE}-pc-pw32 + exit 0 ;; + x86:Interix*:[34]*) + echo i586-pc-interix${UNAME_RELEASE}|sed -e 's/\..*//' + exit 0 ;; + [345]86:Windows_95:* | [345]86:Windows_98:* | [345]86:Windows_NT:*) + echo i${UNAME_MACHINE}-pc-mks + exit 0 ;; + i*:Windows_NT*:* | Pentium*:Windows_NT*:*) + # How do we know it's Interix rather than the generic POSIX subsystem? + # It also conflicts with pre-2.0 versions of AT&T UWIN. Should we + # UNAME_MACHINE based on the output of uname instead of i386? + echo i586-pc-interix + exit 0 ;; + i*:UWIN*:*) + echo ${UNAME_MACHINE}-pc-uwin + exit 0 ;; + p*:CYGWIN*:*) + echo powerpcle-unknown-cygwin + exit 0 ;; + prep*:SunOS:5.*:*) + echo powerpcle-unknown-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` + exit 0 ;; + *:GNU:*:*) + # the GNU system + echo `echo ${UNAME_MACHINE}|sed -e 's,[-/].*$,,'`-unknown-gnu`echo ${UNAME_RELEASE}|sed -e 's,/.*$,,'` + exit 0 ;; + *:GNU/*:*:*) + # other systems with GNU libc and userland + echo ${UNAME_MACHINE}-unknown-`echo ${UNAME_SYSTEM} | sed 's,^[^/]*/,,' | tr '[A-Z]' '[a-z]'``echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'`-gnu + exit 0 ;; + i*86:Minix:*:*) + echo ${UNAME_MACHINE}-pc-minix + exit 0 ;; + arm*:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-gnu + exit 0 ;; + cris:Linux:*:*) + echo cris-axis-linux-gnu + exit 0 ;; + ia64:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-gnu + exit 0 ;; + m32r*:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-gnu + exit 0 ;; + m68*:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-gnu + exit 0 ;; + mips:Linux:*:*) + eval $set_cc_for_build + sed 's/^ //' << EOF >$dummy.c + #undef CPU + #undef mips + #undef mipsel + #if defined(__MIPSEL__) || defined(__MIPSEL) || defined(_MIPSEL) || defined(MIPSEL) + CPU=mipsel + #else + #if defined(__MIPSEB__) || defined(__MIPSEB) || defined(_MIPSEB) || defined(MIPSEB) + CPU=mips + #else + CPU= + #endif + #endif +EOF + eval `$CC_FOR_BUILD -E $dummy.c 2>/dev/null | grep ^CPU=` + test x"${CPU}" != x && echo "${CPU}-unknown-linux-gnu" && exit 0 + ;; + mips64:Linux:*:*) + eval $set_cc_for_build + sed 's/^ //' << EOF >$dummy.c + #undef CPU + #undef mips64 + #undef mips64el + #if defined(__MIPSEL__) || defined(__MIPSEL) || defined(_MIPSEL) || defined(MIPSEL) + CPU=mips64el + #else + #if defined(__MIPSEB__) || defined(__MIPSEB) || defined(_MIPSEB) || defined(MIPSEB) + CPU=mips64 + #else + CPU= + #endif + #endif +EOF + eval `$CC_FOR_BUILD -E $dummy.c 2>/dev/null | grep ^CPU=` + test x"${CPU}" != x && echo "${CPU}-unknown-linux-gnu" && exit 0 + ;; + ppc:Linux:*:*) + echo powerpc-unknown-linux-gnu + exit 0 ;; + ppc64:Linux:*:*) + echo powerpc64-unknown-linux-gnu + exit 0 ;; + alpha:Linux:*:*) + case `sed -n '/^cpu model/s/^.*: \(.*\)/\1/p' < /proc/cpuinfo` in + EV5) UNAME_MACHINE=alphaev5 ;; + EV56) UNAME_MACHINE=alphaev56 ;; + PCA56) UNAME_MACHINE=alphapca56 ;; + PCA57) UNAME_MACHINE=alphapca56 ;; + EV6) UNAME_MACHINE=alphaev6 ;; + EV67) UNAME_MACHINE=alphaev67 ;; + EV68*) UNAME_MACHINE=alphaev68 ;; + esac + objdump --private-headers /bin/sh | grep ld.so.1 >/dev/null + if test "$?" = 0 ; then LIBC="libc1" ; else LIBC="" ; fi + echo ${UNAME_MACHINE}-unknown-linux-gnu${LIBC} + exit 0 ;; + parisc:Linux:*:* | hppa:Linux:*:*) + # Look for CPU level + case `grep '^cpu[^a-z]*:' /proc/cpuinfo 2>/dev/null | cut -d' ' -f2` in + PA7*) echo hppa1.1-unknown-linux-gnu ;; + PA8*) echo hppa2.0-unknown-linux-gnu ;; + *) echo hppa-unknown-linux-gnu ;; + esac + exit 0 ;; + parisc64:Linux:*:* | hppa64:Linux:*:*) + echo hppa64-unknown-linux-gnu + exit 0 ;; + s390:Linux:*:* | s390x:Linux:*:*) + echo ${UNAME_MACHINE}-ibm-linux + exit 0 ;; + sh64*:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-gnu + exit 0 ;; + sh*:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-gnu + exit 0 ;; + sparc:Linux:*:* | sparc64:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-gnu + exit 0 ;; + x86_64:Linux:*:*) + echo x86_64-unknown-linux-gnu + exit 0 ;; + i*86:Linux:*:*) + # The BFD linker knows what the default object file format is, so + # first see if it will tell us. cd to the root directory to prevent + # problems with other programs or directories called `ld' in the path. + # Set LC_ALL=C to ensure ld outputs messages in English. + ld_supported_targets=`cd /; LC_ALL=C ld --help 2>&1 \ + | sed -ne '/supported targets:/!d + s/[ ][ ]*/ /g + s/.*supported targets: *// + s/ .*// + p'` + case "$ld_supported_targets" in + elf32-i386) + TENTATIVE="${UNAME_MACHINE}-pc-linux-gnu" + ;; + a.out-i386-linux) + echo "${UNAME_MACHINE}-pc-linux-gnuaout" + exit 0 ;; + coff-i386) + echo "${UNAME_MACHINE}-pc-linux-gnucoff" + exit 0 ;; + "") + # Either a pre-BFD a.out linker (linux-gnuoldld) or + # one that does not give us useful --help. + echo "${UNAME_MACHINE}-pc-linux-gnuoldld" + exit 0 ;; + esac + # Determine whether the default compiler is a.out or elf + eval $set_cc_for_build + sed 's/^ //' << EOF >$dummy.c + #include + #ifdef __ELF__ + # ifdef __GLIBC__ + # if __GLIBC__ >= 2 + LIBC=gnu + # else + LIBC=gnulibc1 + # endif + # else + LIBC=gnulibc1 + # endif + #else + #ifdef __INTEL_COMPILER + LIBC=gnu + #else + LIBC=gnuaout + #endif + #endif + #ifdef __dietlibc__ + LIBC=dietlibc + #endif +EOF + eval `$CC_FOR_BUILD -E $dummy.c 2>/dev/null | grep ^LIBC=` + test x"${LIBC}" != x && echo "${UNAME_MACHINE}-pc-linux-${LIBC}" && exit 0 + test x"${TENTATIVE}" != x && echo "${TENTATIVE}" && exit 0 + ;; + i*86:DYNIX/ptx:4*:*) + # ptx 4.0 does uname -s correctly, with DYNIX/ptx in there. + # earlier versions are messed up and put the nodename in both + # sysname and nodename. + echo i386-sequent-sysv4 + exit 0 ;; + i*86:UNIX_SV:4.2MP:2.*) + # Unixware is an offshoot of SVR4, but it has its own version + # number series starting with 2... + # I am not positive that other SVR4 systems won't match this, + # I just have to hope. -- rms. + # Use sysv4.2uw... so that sysv4* matches it. + echo ${UNAME_MACHINE}-pc-sysv4.2uw${UNAME_VERSION} + exit 0 ;; + i*86:OS/2:*:*) + # If we were able to find `uname', then EMX Unix compatibility + # is probably installed. + echo ${UNAME_MACHINE}-pc-os2-emx + exit 0 ;; + i*86:XTS-300:*:STOP) + echo ${UNAME_MACHINE}-unknown-stop + exit 0 ;; + i*86:atheos:*:*) + echo ${UNAME_MACHINE}-unknown-atheos + exit 0 ;; + i*86:syllable:*:*) + echo ${UNAME_MACHINE}-pc-syllable + exit 0 ;; + i*86:LynxOS:2.*:* | i*86:LynxOS:3.[01]*:* | i*86:LynxOS:4.0*:*) + echo i386-unknown-lynxos${UNAME_RELEASE} + exit 0 ;; + i*86:*DOS:*:*) + echo ${UNAME_MACHINE}-pc-msdosdjgpp + exit 0 ;; + i*86:*:4.*:* | i*86:SYSTEM_V:4.*:*) + UNAME_REL=`echo ${UNAME_RELEASE} | sed 's/\/MP$//'` + if grep Novell /usr/include/link.h >/dev/null 2>/dev/null; then + echo ${UNAME_MACHINE}-univel-sysv${UNAME_REL} + else + echo ${UNAME_MACHINE}-pc-sysv${UNAME_REL} + fi + exit 0 ;; + i*86:*:5:[78]*) + case `/bin/uname -X | grep "^Machine"` in + *486*) UNAME_MACHINE=i486 ;; + *Pentium) UNAME_MACHINE=i586 ;; + *Pent*|*Celeron) UNAME_MACHINE=i686 ;; + esac + echo ${UNAME_MACHINE}-unknown-sysv${UNAME_RELEASE}${UNAME_SYSTEM}${UNAME_VERSION} + exit 0 ;; + i*86:*:3.2:*) + if test -f /usr/options/cb.name; then + UNAME_REL=`sed -n 's/.*Version //p' /dev/null >/dev/null ; then + UNAME_REL=`(/bin/uname -X|grep Release|sed -e 's/.*= //')` + (/bin/uname -X|grep i80486 >/dev/null) && UNAME_MACHINE=i486 + (/bin/uname -X|grep '^Machine.*Pentium' >/dev/null) \ + && UNAME_MACHINE=i586 + (/bin/uname -X|grep '^Machine.*Pent *II' >/dev/null) \ + && UNAME_MACHINE=i686 + (/bin/uname -X|grep '^Machine.*Pentium Pro' >/dev/null) \ + && UNAME_MACHINE=i686 + echo ${UNAME_MACHINE}-pc-sco$UNAME_REL + else + echo ${UNAME_MACHINE}-pc-sysv32 + fi + exit 0 ;; + pc:*:*:*) + # Left here for compatibility: + # uname -m prints for DJGPP always 'pc', but it prints nothing about + # the processor, so we play safe by assuming i386. + echo i386-pc-msdosdjgpp + exit 0 ;; + Intel:Mach:3*:*) + echo i386-pc-mach3 + exit 0 ;; + paragon:*:*:*) + echo i860-intel-osf1 + exit 0 ;; + i860:*:4.*:*) # i860-SVR4 + if grep Stardent /usr/include/sys/uadmin.h >/dev/null 2>&1 ; then + echo i860-stardent-sysv${UNAME_RELEASE} # Stardent Vistra i860-SVR4 + else # Add other i860-SVR4 vendors below as they are discovered. + echo i860-unknown-sysv${UNAME_RELEASE} # Unknown i860-SVR4 + fi + exit 0 ;; + mini*:CTIX:SYS*5:*) + # "miniframe" + echo m68010-convergent-sysv + exit 0 ;; + mc68k:UNIX:SYSTEM5:3.51m) + echo m68k-convergent-sysv + exit 0 ;; + M680?0:D-NIX:5.3:*) + echo m68k-diab-dnix + exit 0 ;; + M68*:*:R3V[5678]*:*) + test -r /sysV68 && echo 'm68k-motorola-sysv' && exit 0 ;; + 3[345]??:*:4.0:3.0 | 3[34]??A:*:4.0:3.0 | 3[34]??,*:*:4.0:3.0 | 3[34]??/*:*:4.0:3.0 | 4400:*:4.0:3.0 | 4850:*:4.0:3.0 | SKA40:*:4.0:3.0 | SDS2:*:4.0:3.0 | SHG2:*:4.0:3.0) + OS_REL='' + test -r /etc/.relid \ + && OS_REL=.`sed -n 's/[^ ]* [^ ]* \([0-9][0-9]\).*/\1/p' < /etc/.relid` + /bin/uname -p 2>/dev/null | grep 86 >/dev/null \ + && echo i486-ncr-sysv4.3${OS_REL} && exit 0 + /bin/uname -p 2>/dev/null | /bin/grep entium >/dev/null \ + && echo i586-ncr-sysv4.3${OS_REL} && exit 0 ;; + 3[34]??:*:4.0:* | 3[34]??,*:*:4.0:*) + /bin/uname -p 2>/dev/null | grep 86 >/dev/null \ + && echo i486-ncr-sysv4 && exit 0 ;; + m68*:LynxOS:2.*:* | m68*:LynxOS:3.0*:*) + echo m68k-unknown-lynxos${UNAME_RELEASE} + exit 0 ;; + mc68030:UNIX_System_V:4.*:*) + echo m68k-atari-sysv4 + exit 0 ;; + TSUNAMI:LynxOS:2.*:*) + echo sparc-unknown-lynxos${UNAME_RELEASE} + exit 0 ;; + rs6000:LynxOS:2.*:*) + echo rs6000-unknown-lynxos${UNAME_RELEASE} + exit 0 ;; + PowerPC:LynxOS:2.*:* | PowerPC:LynxOS:3.[01]*:* | PowerPC:LynxOS:4.0*:*) + echo powerpc-unknown-lynxos${UNAME_RELEASE} + exit 0 ;; + SM[BE]S:UNIX_SV:*:*) + echo mips-dde-sysv${UNAME_RELEASE} + exit 0 ;; + RM*:ReliantUNIX-*:*:*) + echo mips-sni-sysv4 + exit 0 ;; + RM*:SINIX-*:*:*) + echo mips-sni-sysv4 + exit 0 ;; + *:SINIX-*:*:*) + if uname -p 2>/dev/null >/dev/null ; then + UNAME_MACHINE=`(uname -p) 2>/dev/null` + echo ${UNAME_MACHINE}-sni-sysv4 + else + echo ns32k-sni-sysv + fi + exit 0 ;; + PENTIUM:*:4.0*:*) # Unisys `ClearPath HMP IX 4000' SVR4/MP effort + # says + echo i586-unisys-sysv4 + exit 0 ;; + *:UNIX_System_V:4*:FTX*) + # From Gerald Hewes . + # How about differentiating between stratus architectures? -djm + echo hppa1.1-stratus-sysv4 + exit 0 ;; + *:*:*:FTX*) + # From seanf@swdc.stratus.com. + echo i860-stratus-sysv4 + exit 0 ;; + *:VOS:*:*) + # From Paul.Green@stratus.com. + echo hppa1.1-stratus-vos + exit 0 ;; + mc68*:A/UX:*:*) + echo m68k-apple-aux${UNAME_RELEASE} + exit 0 ;; + news*:NEWS-OS:6*:*) + echo mips-sony-newsos6 + exit 0 ;; + R[34]000:*System_V*:*:* | R4000:UNIX_SYSV:*:* | R*000:UNIX_SV:*:*) + if [ -d /usr/nec ]; then + echo mips-nec-sysv${UNAME_RELEASE} + else + echo mips-unknown-sysv${UNAME_RELEASE} + fi + exit 0 ;; + BeBox:BeOS:*:*) # BeOS running on hardware made by Be, PPC only. + echo powerpc-be-beos + exit 0 ;; + BeMac:BeOS:*:*) # BeOS running on Mac or Mac clone, PPC only. + echo powerpc-apple-beos + exit 0 ;; + BePC:BeOS:*:*) # BeOS running on Intel PC compatible. + echo i586-pc-beos + exit 0 ;; + SX-4:SUPER-UX:*:*) + echo sx4-nec-superux${UNAME_RELEASE} + exit 0 ;; + SX-5:SUPER-UX:*:*) + echo sx5-nec-superux${UNAME_RELEASE} + exit 0 ;; + SX-6:SUPER-UX:*:*) + echo sx6-nec-superux${UNAME_RELEASE} + exit 0 ;; + Power*:Rhapsody:*:*) + echo powerpc-apple-rhapsody${UNAME_RELEASE} + exit 0 ;; + *:Rhapsody:*:*) + echo ${UNAME_MACHINE}-apple-rhapsody${UNAME_RELEASE} + exit 0 ;; + *:Darwin:*:*) + case `uname -p` in + *86) UNAME_PROCESSOR=i686 ;; + powerpc) UNAME_PROCESSOR=powerpc ;; + esac + echo ${UNAME_PROCESSOR}-apple-darwin${UNAME_RELEASE} + exit 0 ;; + *:procnto*:*:* | *:QNX:[0123456789]*:*) + UNAME_PROCESSOR=`uname -p` + if test "$UNAME_PROCESSOR" = "x86"; then + UNAME_PROCESSOR=i386 + UNAME_MACHINE=pc + fi + echo ${UNAME_PROCESSOR}-${UNAME_MACHINE}-nto-qnx${UNAME_RELEASE} + exit 0 ;; + *:QNX:*:4*) + echo i386-pc-qnx + exit 0 ;; + NSR-?:NONSTOP_KERNEL:*:*) + echo nsr-tandem-nsk${UNAME_RELEASE} + exit 0 ;; + *:NonStop-UX:*:*) + echo mips-compaq-nonstopux + exit 0 ;; + BS2000:POSIX*:*:*) + echo bs2000-siemens-sysv + exit 0 ;; + DS/*:UNIX_System_V:*:*) + echo ${UNAME_MACHINE}-${UNAME_SYSTEM}-${UNAME_RELEASE} + exit 0 ;; + *:Plan9:*:*) + # "uname -m" is not consistent, so use $cputype instead. 386 + # is converted to i386 for consistency with other x86 + # operating systems. + if test "$cputype" = "386"; then + UNAME_MACHINE=i386 + else + UNAME_MACHINE="$cputype" + fi + echo ${UNAME_MACHINE}-unknown-plan9 + exit 0 ;; + *:TOPS-10:*:*) + echo pdp10-unknown-tops10 + exit 0 ;; + *:TENEX:*:*) + echo pdp10-unknown-tenex + exit 0 ;; + KS10:TOPS-20:*:* | KL10:TOPS-20:*:* | TYPE4:TOPS-20:*:*) + echo pdp10-dec-tops20 + exit 0 ;; + XKL-1:TOPS-20:*:* | TYPE5:TOPS-20:*:*) + echo pdp10-xkl-tops20 + exit 0 ;; + *:TOPS-20:*:*) + echo pdp10-unknown-tops20 + exit 0 ;; + *:ITS:*:*) + echo pdp10-unknown-its + exit 0 ;; + SEI:*:*:SEIUX) + echo mips-sei-seiux${UNAME_RELEASE} + exit 0 ;; + *:DragonFly:*:*) + echo ${UNAME_MACHINE}-unknown-dragonfly`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'` + exit 0 ;; + *:*VMS:*:*) + UNAME_MACHINE=`(uname -p) 2>/dev/null` + case "${UNAME_MACHINE}" in + A*) echo alpha-dec-vms && exit 0 ;; + I*) echo ia64-dec-vms && exit 0 ;; + V*) echo vax-dec-vms && exit 0 ;; + esac +esac + +#echo '(No uname command or uname output not recognized.)' 1>&2 +#echo "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" 1>&2 + +eval $set_cc_for_build +cat >$dummy.c < +# include +#endif +main () +{ +#if defined (sony) +#if defined (MIPSEB) + /* BFD wants "bsd" instead of "newsos". Perhaps BFD should be changed, + I don't know.... */ + printf ("mips-sony-bsd\n"); exit (0); +#else +#include + printf ("m68k-sony-newsos%s\n", +#ifdef NEWSOS4 + "4" +#else + "" +#endif + ); exit (0); +#endif +#endif + +#if defined (__arm) && defined (__acorn) && defined (__unix) + printf ("arm-acorn-riscix"); exit (0); +#endif + +#if defined (hp300) && !defined (hpux) + printf ("m68k-hp-bsd\n"); exit (0); +#endif + +#if defined (NeXT) +#if !defined (__ARCHITECTURE__) +#define __ARCHITECTURE__ "m68k" +#endif + int version; + version=`(hostinfo | sed -n 's/.*NeXT Mach \([0-9]*\).*/\1/p') 2>/dev/null`; + if (version < 4) + printf ("%s-next-nextstep%d\n", __ARCHITECTURE__, version); + else + printf ("%s-next-openstep%d\n", __ARCHITECTURE__, version); + exit (0); +#endif + +#if defined (MULTIMAX) || defined (n16) +#if defined (UMAXV) + printf ("ns32k-encore-sysv\n"); exit (0); +#else +#if defined (CMU) + printf ("ns32k-encore-mach\n"); exit (0); +#else + printf ("ns32k-encore-bsd\n"); exit (0); +#endif +#endif +#endif + +#if defined (__386BSD__) + printf ("i386-pc-bsd\n"); exit (0); +#endif + +#if defined (sequent) +#if defined (i386) + printf ("i386-sequent-dynix\n"); exit (0); +#endif +#if defined (ns32000) + printf ("ns32k-sequent-dynix\n"); exit (0); +#endif +#endif + +#if defined (_SEQUENT_) + struct utsname un; + + uname(&un); + + if (strncmp(un.version, "V2", 2) == 0) { + printf ("i386-sequent-ptx2\n"); exit (0); + } + if (strncmp(un.version, "V1", 2) == 0) { /* XXX is V1 correct? */ + printf ("i386-sequent-ptx1\n"); exit (0); + } + printf ("i386-sequent-ptx\n"); exit (0); + +#endif + +#if defined (vax) +# if !defined (ultrix) +# include +# if defined (BSD) +# if BSD == 43 + printf ("vax-dec-bsd4.3\n"); exit (0); +# else +# if BSD == 199006 + printf ("vax-dec-bsd4.3reno\n"); exit (0); +# else + printf ("vax-dec-bsd\n"); exit (0); +# endif +# endif +# else + printf ("vax-dec-bsd\n"); exit (0); +# endif +# else + printf ("vax-dec-ultrix\n"); exit (0); +# endif +#endif + +#if defined (alliant) && defined (i860) + printf ("i860-alliant-bsd\n"); exit (0); +#endif + + exit (1); +} +EOF + +$CC_FOR_BUILD -o $dummy $dummy.c 2>/dev/null && $dummy && exit 0 + +# Apollos put the system type in the environment. + +test -d /usr/apollo && { echo ${ISP}-apollo-${SYSTYPE}; exit 0; } + +# Convex versions that predate uname can use getsysinfo(1) + +if [ -x /usr/convex/getsysinfo ] +then + case `getsysinfo -f cpu_type` in + c1*) + echo c1-convex-bsd + exit 0 ;; + c2*) + if getsysinfo -f scalar_acc + then echo c32-convex-bsd + else echo c2-convex-bsd + fi + exit 0 ;; + c34*) + echo c34-convex-bsd + exit 0 ;; + c38*) + echo c38-convex-bsd + exit 0 ;; + c4*) + echo c4-convex-bsd + exit 0 ;; + esac +fi + +cat >&2 < in order to provide the needed +information to handle your system. + +config.guess timestamp = $timestamp + +uname -m = `(uname -m) 2>/dev/null || echo unknown` +uname -r = `(uname -r) 2>/dev/null || echo unknown` +uname -s = `(uname -s) 2>/dev/null || echo unknown` +uname -v = `(uname -v) 2>/dev/null || echo unknown` + +/usr/bin/uname -p = `(/usr/bin/uname -p) 2>/dev/null` +/bin/uname -X = `(/bin/uname -X) 2>/dev/null` + +hostinfo = `(hostinfo) 2>/dev/null` +/bin/universe = `(/bin/universe) 2>/dev/null` +/usr/bin/arch -k = `(/usr/bin/arch -k) 2>/dev/null` +/bin/arch = `(/bin/arch) 2>/dev/null` +/usr/bin/oslevel = `(/usr/bin/oslevel) 2>/dev/null` +/usr/convex/getsysinfo = `(/usr/convex/getsysinfo) 2>/dev/null` + +UNAME_MACHINE = ${UNAME_MACHINE} +UNAME_RELEASE = ${UNAME_RELEASE} +UNAME_SYSTEM = ${UNAME_SYSTEM} +UNAME_VERSION = ${UNAME_VERSION} +EOF + +exit 1 + +# Local variables: +# eval: (add-hook 'write-file-hooks 'time-stamp) +# time-stamp-start: "timestamp='" +# time-stamp-format: "%:y-%02m-%02d" +# time-stamp-end: "'" +# End: diff --git a/src/examples/pipes/config.sub b/src/examples/pipes/config.sub new file mode 100644 index 0000000..ac6de98 --- /dev/null +++ b/src/examples/pipes/config.sub @@ -0,0 +1,1552 @@ +#! /bin/sh +# Configuration validation subroutine script. +# Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, +# 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc. + +timestamp='2004-06-24' + +# This file is (in principle) common to ALL GNU software. +# The presence of a machine in this file suggests that SOME GNU software +# can handle that machine. It does not imply ALL GNU software can. +# +# This file is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 59 Temple Place - Suite 330, +# Boston, MA 02111-1307, USA. + +# As a special exception to the GNU General Public License, if you +# distribute this file as part of a program that contains a +# configuration script generated by Autoconf, you may include it under +# the same distribution terms that you use for the rest of that program. + +# Please send patches to . Submit a context +# diff and a properly formatted ChangeLog entry. +# +# Configuration subroutine to validate and canonicalize a configuration type. +# Supply the specified configuration type as an argument. +# If it is invalid, we print an error message on stderr and exit with code 1. +# Otherwise, we print the canonical config type on stdout and succeed. + +# This file is supposed to be the same for all GNU packages +# and recognize all the CPU types, system types and aliases +# that are meaningful with *any* GNU software. +# Each package is responsible for reporting which valid configurations +# it does not support. The user should be able to distinguish +# a failure to support a valid configuration from a meaningless +# configuration. + +# The goal of this file is to map all the various variations of a given +# machine specification into a single specification in the form: +# CPU_TYPE-MANUFACTURER-OPERATING_SYSTEM +# or in some cases, the newer four-part form: +# CPU_TYPE-MANUFACTURER-KERNEL-OPERATING_SYSTEM +# It is wrong to echo any other type of specification. + +me=`echo "$0" | sed -e 's,.*/,,'` + +usage="\ +Usage: $0 [OPTION] CPU-MFR-OPSYS + $0 [OPTION] ALIAS + +Canonicalize a configuration name. + +Operation modes: + -h, --help print this help, then exit + -t, --time-stamp print date of last modification, then exit + -v, --version print version number, then exit + +Report bugs and patches to ." + +version="\ +GNU config.sub ($timestamp) + +Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004 +Free Software Foundation, Inc. + +This is free software; see the source for copying conditions. There is NO +warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE." + +help=" +Try \`$me --help' for more information." + +# Parse command line +while test $# -gt 0 ; do + case $1 in + --time-stamp | --time* | -t ) + echo "$timestamp" ; exit 0 ;; + --version | -v ) + echo "$version" ; exit 0 ;; + --help | --h* | -h ) + echo "$usage"; exit 0 ;; + -- ) # Stop option processing + shift; break ;; + - ) # Use stdin as input. + break ;; + -* ) + echo "$me: invalid option $1$help" + exit 1 ;; + + *local*) + # First pass through any local machine types. + echo $1 + exit 0;; + + * ) + break ;; + esac +done + +case $# in + 0) echo "$me: missing argument$help" >&2 + exit 1;; + 1) ;; + *) echo "$me: too many arguments$help" >&2 + exit 1;; +esac + +# Separate what the user gave into CPU-COMPANY and OS or KERNEL-OS (if any). +# Here we must recognize all the valid KERNEL-OS combinations. +maybe_os=`echo $1 | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\2/'` +case $maybe_os in + nto-qnx* | linux-gnu* | linux-dietlibc | linux-uclibc* | uclinux-uclibc* | uclinux-gnu* | \ + kfreebsd*-gnu* | knetbsd*-gnu* | netbsd*-gnu* | storm-chaos* | os2-emx* | rtmk-nova*) + os=-$maybe_os + basic_machine=`echo $1 | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\1/'` + ;; + *) + basic_machine=`echo $1 | sed 's/-[^-]*$//'` + if [ $basic_machine != $1 ] + then os=`echo $1 | sed 's/.*-/-/'` + else os=; fi + ;; +esac + +### Let's recognize common machines as not being operating systems so +### that things like config.sub decstation-3100 work. We also +### recognize some manufacturers as not being operating systems, so we +### can provide default operating systems below. +case $os in + -sun*os*) + # Prevent following clause from handling this invalid input. + ;; + -dec* | -mips* | -sequent* | -encore* | -pc532* | -sgi* | -sony* | \ + -att* | -7300* | -3300* | -delta* | -motorola* | -sun[234]* | \ + -unicom* | -ibm* | -next | -hp | -isi* | -apollo | -altos* | \ + -convergent* | -ncr* | -news | -32* | -3600* | -3100* | -hitachi* |\ + -c[123]* | -convex* | -sun | -crds | -omron* | -dg | -ultra | -tti* | \ + -harris | -dolphin | -highlevel | -gould | -cbm | -ns | -masscomp | \ + -apple | -axis | -knuth | -cray) + os= + basic_machine=$1 + ;; + -sim | -cisco | -oki | -wec | -winbond) + os= + basic_machine=$1 + ;; + -scout) + ;; + -wrs) + os=-vxworks + basic_machine=$1 + ;; + -chorusos*) + os=-chorusos + basic_machine=$1 + ;; + -chorusrdb) + os=-chorusrdb + basic_machine=$1 + ;; + -hiux*) + os=-hiuxwe2 + ;; + -sco5) + os=-sco3.2v5 + basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` + ;; + -sco4) + os=-sco3.2v4 + basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` + ;; + -sco3.2.[4-9]*) + os=`echo $os | sed -e 's/sco3.2./sco3.2v/'` + basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` + ;; + -sco3.2v[4-9]*) + # Don't forget version if it is 3.2v4 or newer. + basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` + ;; + -sco*) + os=-sco3.2v2 + basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` + ;; + -udk*) + basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` + ;; + -isc) + os=-isc2.2 + basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` + ;; + -clix*) + basic_machine=clipper-intergraph + ;; + -isc*) + basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` + ;; + -lynx*) + os=-lynxos + ;; + -ptx*) + basic_machine=`echo $1 | sed -e 's/86-.*/86-sequent/'` + ;; + -windowsnt*) + os=`echo $os | sed -e 's/windowsnt/winnt/'` + ;; + -psos*) + os=-psos + ;; + -mint | -mint[0-9]*) + basic_machine=m68k-atari + os=-mint + ;; +esac + +# Decode aliases for certain CPU-COMPANY combinations. +case $basic_machine in + # Recognize the basic CPU types without company name. + # Some are omitted here because they have special meanings below. + 1750a | 580 \ + | a29k \ + | alpha | alphaev[4-8] | alphaev56 | alphaev6[78] | alphapca5[67] \ + | alpha64 | alpha64ev[4-8] | alpha64ev56 | alpha64ev6[78] | alpha64pca5[67] \ + | am33_2.0 \ + | arc | arm | arm[bl]e | arme[lb] | armv[2345] | armv[345][lb] | avr \ + | c4x | clipper \ + | d10v | d30v | dlx | dsp16xx \ + | fr30 | frv \ + | h8300 | h8500 | hppa | hppa1.[01] | hppa2.0 | hppa2.0[nw] | hppa64 \ + | i370 | i860 | i960 | ia64 \ + | ip2k | iq2000 \ + | m32r | m32rle | m68000 | m68k | m88k | mcore \ + | mips | mipsbe | mipseb | mipsel | mipsle \ + | mips16 \ + | mips64 | mips64el \ + | mips64vr | mips64vrel \ + | mips64orion | mips64orionel \ + | mips64vr4100 | mips64vr4100el \ + | mips64vr4300 | mips64vr4300el \ + | mips64vr5000 | mips64vr5000el \ + | mipsisa32 | mipsisa32el \ + | mipsisa32r2 | mipsisa32r2el \ + | mipsisa64 | mipsisa64el \ + | mipsisa64r2 | mipsisa64r2el \ + | mipsisa64sb1 | mipsisa64sb1el \ + | mipsisa64sr71k | mipsisa64sr71kel \ + | mipstx39 | mipstx39el \ + | mn10200 | mn10300 \ + | msp430 \ + | ns16k | ns32k \ + | openrisc | or32 \ + | pdp10 | pdp11 | pj | pjl \ + | powerpc | powerpc64 | powerpc64le | powerpcle | ppcbe \ + | pyramid \ + | sh | sh[1234] | sh[23]e | sh[34]eb | shbe | shle | sh[1234]le | sh3ele \ + | sh64 | sh64le \ + | sparc | sparc64 | sparc86x | sparclet | sparclite | sparcv8 | sparcv9 | sparcv9b \ + | strongarm \ + | tahoe | thumb | tic4x | tic80 | tron \ + | v850 | v850e \ + | we32k \ + | x86 | xscale | xstormy16 | xtensa \ + | z8k) + basic_machine=$basic_machine-unknown + ;; + m6811 | m68hc11 | m6812 | m68hc12) + # Motorola 68HC11/12. + basic_machine=$basic_machine-unknown + os=-none + ;; + m88110 | m680[12346]0 | m683?2 | m68360 | m5200 | v70 | w65 | z8k) + ;; + + # We use `pc' rather than `unknown' + # because (1) that's what they normally are, and + # (2) the word "unknown" tends to confuse beginning users. + i*86 | x86_64) + basic_machine=$basic_machine-pc + ;; + # Object if more than one company name word. + *-*-*) + echo Invalid configuration \`$1\': machine \`$basic_machine\' not recognized 1>&2 + exit 1 + ;; + # Recognize the basic CPU types with company name. + 580-* \ + | a29k-* \ + | alpha-* | alphaev[4-8]-* | alphaev56-* | alphaev6[78]-* \ + | alpha64-* | alpha64ev[4-8]-* | alpha64ev56-* | alpha64ev6[78]-* \ + | alphapca5[67]-* | alpha64pca5[67]-* | arc-* \ + | arm-* | armbe-* | armle-* | armeb-* | armv*-* \ + | avr-* \ + | bs2000-* \ + | c[123]* | c30-* | [cjt]90-* | c4x-* | c54x-* | c55x-* | c6x-* \ + | clipper-* | craynv-* | cydra-* \ + | d10v-* | d30v-* | dlx-* \ + | elxsi-* \ + | f30[01]-* | f700-* | fr30-* | frv-* | fx80-* \ + | h8300-* | h8500-* \ + | hppa-* | hppa1.[01]-* | hppa2.0-* | hppa2.0[nw]-* | hppa64-* \ + | i*86-* | i860-* | i960-* | ia64-* \ + | ip2k-* | iq2000-* \ + | m32r-* | m32rle-* \ + | m68000-* | m680[012346]0-* | m68360-* | m683?2-* | m68k-* \ + | m88110-* | m88k-* | mcore-* \ + | mips-* | mipsbe-* | mipseb-* | mipsel-* | mipsle-* \ + | mips16-* \ + | mips64-* | mips64el-* \ + | mips64vr-* | mips64vrel-* \ + | mips64orion-* | mips64orionel-* \ + | mips64vr4100-* | mips64vr4100el-* \ + | mips64vr4300-* | mips64vr4300el-* \ + | mips64vr5000-* | mips64vr5000el-* \ + | mipsisa32-* | mipsisa32el-* \ + | mipsisa32r2-* | mipsisa32r2el-* \ + | mipsisa64-* | mipsisa64el-* \ + | mipsisa64r2-* | mipsisa64r2el-* \ + | mipsisa64sb1-* | mipsisa64sb1el-* \ + | mipsisa64sr71k-* | mipsisa64sr71kel-* \ + | mipstx39-* | mipstx39el-* \ + | mmix-* \ + | msp430-* \ + | none-* | np1-* | ns16k-* | ns32k-* \ + | orion-* \ + | pdp10-* | pdp11-* | pj-* | pjl-* | pn-* | power-* \ + | powerpc-* | powerpc64-* | powerpc64le-* | powerpcle-* | ppcbe-* \ + | pyramid-* \ + | romp-* | rs6000-* \ + | sh-* | sh[1234]-* | sh[23]e-* | sh[34]eb-* | shbe-* \ + | shle-* | sh[1234]le-* | sh3ele-* | sh64-* | sh64le-* \ + | sparc-* | sparc64-* | sparc86x-* | sparclet-* | sparclite-* \ + | sparcv8-* | sparcv9-* | sparcv9b-* | strongarm-* | sv1-* | sx?-* \ + | tahoe-* | thumb-* \ + | tic30-* | tic4x-* | tic54x-* | tic55x-* | tic6x-* | tic80-* \ + | tron-* \ + | v850-* | v850e-* | vax-* \ + | we32k-* \ + | x86-* | x86_64-* | xps100-* | xscale-* | xstormy16-* \ + | xtensa-* \ + | ymp-* \ + | z8k-*) + ;; + # Recognize the various machine names and aliases which stand + # for a CPU type and a company and sometimes even an OS. + 386bsd) + basic_machine=i386-unknown + os=-bsd + ;; + 3b1 | 7300 | 7300-att | att-7300 | pc7300 | safari | unixpc) + basic_machine=m68000-att + ;; + 3b*) + basic_machine=we32k-att + ;; + a29khif) + basic_machine=a29k-amd + os=-udi + ;; + abacus) + basic_machine=abacus-unknown + ;; + adobe68k) + basic_machine=m68010-adobe + os=-scout + ;; + alliant | fx80) + basic_machine=fx80-alliant + ;; + altos | altos3068) + basic_machine=m68k-altos + ;; + am29k) + basic_machine=a29k-none + os=-bsd + ;; + amd64) + basic_machine=x86_64-pc + ;; + amd64-*) + basic_machine=x86_64-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + amdahl) + basic_machine=580-amdahl + os=-sysv + ;; + amiga | amiga-*) + basic_machine=m68k-unknown + ;; + amigaos | amigados) + basic_machine=m68k-unknown + os=-amigaos + ;; + amigaunix | amix) + basic_machine=m68k-unknown + os=-sysv4 + ;; + apollo68) + basic_machine=m68k-apollo + os=-sysv + ;; + apollo68bsd) + basic_machine=m68k-apollo + os=-bsd + ;; + aux) + basic_machine=m68k-apple + os=-aux + ;; + balance) + basic_machine=ns32k-sequent + os=-dynix + ;; + c90) + basic_machine=c90-cray + os=-unicos + ;; + convex-c1) + basic_machine=c1-convex + os=-bsd + ;; + convex-c2) + basic_machine=c2-convex + os=-bsd + ;; + convex-c32) + basic_machine=c32-convex + os=-bsd + ;; + convex-c34) + basic_machine=c34-convex + os=-bsd + ;; + convex-c38) + basic_machine=c38-convex + os=-bsd + ;; + cray | j90) + basic_machine=j90-cray + os=-unicos + ;; + craynv) + basic_machine=craynv-cray + os=-unicosmp + ;; + cr16c) + basic_machine=cr16c-unknown + os=-elf + ;; + crds | unos) + basic_machine=m68k-crds + ;; + cris | cris-* | etrax*) + basic_machine=cris-axis + ;; + crx) + basic_machine=crx-unknown + os=-elf + ;; + da30 | da30-*) + basic_machine=m68k-da30 + ;; + decstation | decstation-3100 | pmax | pmax-* | pmin | dec3100 | decstatn) + basic_machine=mips-dec + ;; + decsystem10* | dec10*) + basic_machine=pdp10-dec + os=-tops10 + ;; + decsystem20* | dec20*) + basic_machine=pdp10-dec + os=-tops20 + ;; + delta | 3300 | motorola-3300 | motorola-delta \ + | 3300-motorola | delta-motorola) + basic_machine=m68k-motorola + ;; + delta88) + basic_machine=m88k-motorola + os=-sysv3 + ;; + dpx20 | dpx20-*) + basic_machine=rs6000-bull + os=-bosx + ;; + dpx2* | dpx2*-bull) + basic_machine=m68k-bull + os=-sysv3 + ;; + ebmon29k) + basic_machine=a29k-amd + os=-ebmon + ;; + elxsi) + basic_machine=elxsi-elxsi + os=-bsd + ;; + encore | umax | mmax) + basic_machine=ns32k-encore + ;; + es1800 | OSE68k | ose68k | ose | OSE) + basic_machine=m68k-ericsson + os=-ose + ;; + fx2800) + basic_machine=i860-alliant + ;; + genix) + basic_machine=ns32k-ns + ;; + gmicro) + basic_machine=tron-gmicro + os=-sysv + ;; + go32) + basic_machine=i386-pc + os=-go32 + ;; + h3050r* | hiux*) + basic_machine=hppa1.1-hitachi + os=-hiuxwe2 + ;; + h8300hms) + basic_machine=h8300-hitachi + os=-hms + ;; + h8300xray) + basic_machine=h8300-hitachi + os=-xray + ;; + h8500hms) + basic_machine=h8500-hitachi + os=-hms + ;; + harris) + basic_machine=m88k-harris + os=-sysv3 + ;; + hp300-*) + basic_machine=m68k-hp + ;; + hp300bsd) + basic_machine=m68k-hp + os=-bsd + ;; + hp300hpux) + basic_machine=m68k-hp + os=-hpux + ;; + hp3k9[0-9][0-9] | hp9[0-9][0-9]) + basic_machine=hppa1.0-hp + ;; + hp9k2[0-9][0-9] | hp9k31[0-9]) + basic_machine=m68000-hp + ;; + hp9k3[2-9][0-9]) + basic_machine=m68k-hp + ;; + hp9k6[0-9][0-9] | hp6[0-9][0-9]) + basic_machine=hppa1.0-hp + ;; + hp9k7[0-79][0-9] | hp7[0-79][0-9]) + basic_machine=hppa1.1-hp + ;; + hp9k78[0-9] | hp78[0-9]) + # FIXME: really hppa2.0-hp + basic_machine=hppa1.1-hp + ;; + hp9k8[67]1 | hp8[67]1 | hp9k80[24] | hp80[24] | hp9k8[78]9 | hp8[78]9 | hp9k893 | hp893) + # FIXME: really hppa2.0-hp + basic_machine=hppa1.1-hp + ;; + hp9k8[0-9][13679] | hp8[0-9][13679]) + basic_machine=hppa1.1-hp + ;; + hp9k8[0-9][0-9] | hp8[0-9][0-9]) + basic_machine=hppa1.0-hp + ;; + hppa-next) + os=-nextstep3 + ;; + hppaosf) + basic_machine=hppa1.1-hp + os=-osf + ;; + hppro) + basic_machine=hppa1.1-hp + os=-proelf + ;; + i370-ibm* | ibm*) + basic_machine=i370-ibm + ;; +# I'm not sure what "Sysv32" means. Should this be sysv3.2? + i*86v32) + basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'` + os=-sysv32 + ;; + i*86v4*) + basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'` + os=-sysv4 + ;; + i*86v) + basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'` + os=-sysv + ;; + i*86sol2) + basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'` + os=-solaris2 + ;; + i386mach) + basic_machine=i386-mach + os=-mach + ;; + i386-vsta | vsta) + basic_machine=i386-unknown + os=-vsta + ;; + iris | iris4d) + basic_machine=mips-sgi + case $os in + -irix*) + ;; + *) + os=-irix4 + ;; + esac + ;; + isi68 | isi) + basic_machine=m68k-isi + os=-sysv + ;; + m88k-omron*) + basic_machine=m88k-omron + ;; + magnum | m3230) + basic_machine=mips-mips + os=-sysv + ;; + merlin) + basic_machine=ns32k-utek + os=-sysv + ;; + mingw32) + basic_machine=i386-pc + os=-mingw32 + ;; + miniframe) + basic_machine=m68000-convergent + ;; + *mint | -mint[0-9]* | *MiNT | *MiNT[0-9]*) + basic_machine=m68k-atari + os=-mint + ;; + mips3*-*) + basic_machine=`echo $basic_machine | sed -e 's/mips3/mips64/'` + ;; + mips3*) + basic_machine=`echo $basic_machine | sed -e 's/mips3/mips64/'`-unknown + ;; + monitor) + basic_machine=m68k-rom68k + os=-coff + ;; + morphos) + basic_machine=powerpc-unknown + os=-morphos + ;; + msdos) + basic_machine=i386-pc + os=-msdos + ;; + mvs) + basic_machine=i370-ibm + os=-mvs + ;; + ncr3000) + basic_machine=i486-ncr + os=-sysv4 + ;; + netbsd386) + basic_machine=i386-unknown + os=-netbsd + ;; + netwinder) + basic_machine=armv4l-rebel + os=-linux + ;; + news | news700 | news800 | news900) + basic_machine=m68k-sony + os=-newsos + ;; + news1000) + basic_machine=m68030-sony + os=-newsos + ;; + news-3600 | risc-news) + basic_machine=mips-sony + os=-newsos + ;; + necv70) + basic_machine=v70-nec + os=-sysv + ;; + next | m*-next ) + basic_machine=m68k-next + case $os in + -nextstep* ) + ;; + -ns2*) + os=-nextstep2 + ;; + *) + os=-nextstep3 + ;; + esac + ;; + nh3000) + basic_machine=m68k-harris + os=-cxux + ;; + nh[45]000) + basic_machine=m88k-harris + os=-cxux + ;; + nindy960) + basic_machine=i960-intel + os=-nindy + ;; + mon960) + basic_machine=i960-intel + os=-mon960 + ;; + nonstopux) + basic_machine=mips-compaq + os=-nonstopux + ;; + np1) + basic_machine=np1-gould + ;; + nsr-tandem) + basic_machine=nsr-tandem + ;; + op50n-* | op60c-*) + basic_machine=hppa1.1-oki + os=-proelf + ;; + or32 | or32-*) + basic_machine=or32-unknown + os=-coff + ;; + os400) + basic_machine=powerpc-ibm + os=-os400 + ;; + OSE68000 | ose68000) + basic_machine=m68000-ericsson + os=-ose + ;; + os68k) + basic_machine=m68k-none + os=-os68k + ;; + pa-hitachi) + basic_machine=hppa1.1-hitachi + os=-hiuxwe2 + ;; + paragon) + basic_machine=i860-intel + os=-osf + ;; + pbd) + basic_machine=sparc-tti + ;; + pbb) + basic_machine=m68k-tti + ;; + pc532 | pc532-*) + basic_machine=ns32k-pc532 + ;; + pentium | p5 | k5 | k6 | nexgen | viac3) + basic_machine=i586-pc + ;; + pentiumpro | p6 | 6x86 | athlon | athlon_*) + basic_machine=i686-pc + ;; + pentiumii | pentium2 | pentiumiii | pentium3) + basic_machine=i686-pc + ;; + pentium4) + basic_machine=i786-pc + ;; + pentium-* | p5-* | k5-* | k6-* | nexgen-* | viac3-*) + basic_machine=i586-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + pentiumpro-* | p6-* | 6x86-* | athlon-*) + basic_machine=i686-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + pentiumii-* | pentium2-* | pentiumiii-* | pentium3-*) + basic_machine=i686-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + pentium4-*) + basic_machine=i786-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + pn) + basic_machine=pn-gould + ;; + power) basic_machine=power-ibm + ;; + ppc) basic_machine=powerpc-unknown + ;; + ppc-*) basic_machine=powerpc-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + ppcle | powerpclittle | ppc-le | powerpc-little) + basic_machine=powerpcle-unknown + ;; + ppcle-* | powerpclittle-*) + basic_machine=powerpcle-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + ppc64) basic_machine=powerpc64-unknown + ;; + ppc64-*) basic_machine=powerpc64-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + ppc64le | powerpc64little | ppc64-le | powerpc64-little) + basic_machine=powerpc64le-unknown + ;; + ppc64le-* | powerpc64little-*) + basic_machine=powerpc64le-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + ps2) + basic_machine=i386-ibm + ;; + pw32) + basic_machine=i586-unknown + os=-pw32 + ;; + rom68k) + basic_machine=m68k-rom68k + os=-coff + ;; + rm[46]00) + basic_machine=mips-siemens + ;; + rtpc | rtpc-*) + basic_machine=romp-ibm + ;; + s390 | s390-*) + basic_machine=s390-ibm + ;; + s390x | s390x-*) + basic_machine=s390x-ibm + ;; + sa29200) + basic_machine=a29k-amd + os=-udi + ;; + sb1) + basic_machine=mipsisa64sb1-unknown + ;; + sb1el) + basic_machine=mipsisa64sb1el-unknown + ;; + sei) + basic_machine=mips-sei + os=-seiux + ;; + sequent) + basic_machine=i386-sequent + ;; + sh) + basic_machine=sh-hitachi + os=-hms + ;; + sh64) + basic_machine=sh64-unknown + ;; + sparclite-wrs | simso-wrs) + basic_machine=sparclite-wrs + os=-vxworks + ;; + sps7) + basic_machine=m68k-bull + os=-sysv2 + ;; + spur) + basic_machine=spur-unknown + ;; + st2000) + basic_machine=m68k-tandem + ;; + stratus) + basic_machine=i860-stratus + os=-sysv4 + ;; + sun2) + basic_machine=m68000-sun + ;; + sun2os3) + basic_machine=m68000-sun + os=-sunos3 + ;; + sun2os4) + basic_machine=m68000-sun + os=-sunos4 + ;; + sun3os3) + basic_machine=m68k-sun + os=-sunos3 + ;; + sun3os4) + basic_machine=m68k-sun + os=-sunos4 + ;; + sun4os3) + basic_machine=sparc-sun + os=-sunos3 + ;; + sun4os4) + basic_machine=sparc-sun + os=-sunos4 + ;; + sun4sol2) + basic_machine=sparc-sun + os=-solaris2 + ;; + sun3 | sun3-*) + basic_machine=m68k-sun + ;; + sun4) + basic_machine=sparc-sun + ;; + sun386 | sun386i | roadrunner) + basic_machine=i386-sun + ;; + sv1) + basic_machine=sv1-cray + os=-unicos + ;; + symmetry) + basic_machine=i386-sequent + os=-dynix + ;; + t3e) + basic_machine=alphaev5-cray + os=-unicos + ;; + t90) + basic_machine=t90-cray + os=-unicos + ;; + tic54x | c54x*) + basic_machine=tic54x-unknown + os=-coff + ;; + tic55x | c55x*) + basic_machine=tic55x-unknown + os=-coff + ;; + tic6x | c6x*) + basic_machine=tic6x-unknown + os=-coff + ;; + tx39) + basic_machine=mipstx39-unknown + ;; + tx39el) + basic_machine=mipstx39el-unknown + ;; + toad1) + basic_machine=pdp10-xkl + os=-tops20 + ;; + tower | tower-32) + basic_machine=m68k-ncr + ;; + tpf) + basic_machine=s390x-ibm + os=-tpf + ;; + udi29k) + basic_machine=a29k-amd + os=-udi + ;; + ultra3) + basic_machine=a29k-nyu + os=-sym1 + ;; + v810 | necv810) + basic_machine=v810-nec + os=-none + ;; + vaxv) + basic_machine=vax-dec + os=-sysv + ;; + vms) + basic_machine=vax-dec + os=-vms + ;; + vpp*|vx|vx-*) + basic_machine=f301-fujitsu + ;; + vxworks960) + basic_machine=i960-wrs + os=-vxworks + ;; + vxworks68) + basic_machine=m68k-wrs + os=-vxworks + ;; + vxworks29k) + basic_machine=a29k-wrs + os=-vxworks + ;; + w65*) + basic_machine=w65-wdc + os=-none + ;; + w89k-*) + basic_machine=hppa1.1-winbond + os=-proelf + ;; + xps | xps100) + basic_machine=xps100-honeywell + ;; + ymp) + basic_machine=ymp-cray + os=-unicos + ;; + z8k-*-coff) + basic_machine=z8k-unknown + os=-sim + ;; + none) + basic_machine=none-none + os=-none + ;; + +# Here we handle the default manufacturer of certain CPU types. It is in +# some cases the only manufacturer, in others, it is the most popular. + w89k) + basic_machine=hppa1.1-winbond + ;; + op50n) + basic_machine=hppa1.1-oki + ;; + op60c) + basic_machine=hppa1.1-oki + ;; + romp) + basic_machine=romp-ibm + ;; + mmix) + basic_machine=mmix-knuth + ;; + rs6000) + basic_machine=rs6000-ibm + ;; + vax) + basic_machine=vax-dec + ;; + pdp10) + # there are many clones, so DEC is not a safe bet + basic_machine=pdp10-unknown + ;; + pdp11) + basic_machine=pdp11-dec + ;; + we32k) + basic_machine=we32k-att + ;; + sh3 | sh4 | sh[34]eb | sh[1234]le | sh[23]ele) + basic_machine=sh-unknown + ;; + sh64) + basic_machine=sh64-unknown + ;; + sparc | sparcv8 | sparcv9 | sparcv9b) + basic_machine=sparc-sun + ;; + cydra) + basic_machine=cydra-cydrome + ;; + orion) + basic_machine=orion-highlevel + ;; + orion105) + basic_machine=clipper-highlevel + ;; + mac | mpw | mac-mpw) + basic_machine=m68k-apple + ;; + pmac | pmac-mpw) + basic_machine=powerpc-apple + ;; + *-unknown) + # Make sure to match an already-canonicalized machine name. + ;; + *) + echo Invalid configuration \`$1\': machine \`$basic_machine\' not recognized 1>&2 + exit 1 + ;; +esac + +# Here we canonicalize certain aliases for manufacturers. +case $basic_machine in + *-digital*) + basic_machine=`echo $basic_machine | sed 's/digital.*/dec/'` + ;; + *-commodore*) + basic_machine=`echo $basic_machine | sed 's/commodore.*/cbm/'` + ;; + *) + ;; +esac + +# Decode manufacturer-specific aliases for certain operating systems. + +if [ x"$os" != x"" ] +then +case $os in + # First match some system type aliases + # that might get confused with valid system types. + # -solaris* is a basic system type, with this one exception. + -solaris1 | -solaris1.*) + os=`echo $os | sed -e 's|solaris1|sunos4|'` + ;; + -solaris) + os=-solaris2 + ;; + -svr4*) + os=-sysv4 + ;; + -unixware*) + os=-sysv4.2uw + ;; + -gnu/linux*) + os=`echo $os | sed -e 's|gnu/linux|linux-gnu|'` + ;; + # First accept the basic system types. + # The portable systems comes first. + # Each alternative MUST END IN A *, to match a version number. + # -sysv* is not here because it comes later, after sysvr4. + -gnu* | -bsd* | -mach* | -minix* | -genix* | -ultrix* | -irix* \ + | -*vms* | -sco* | -esix* | -isc* | -aix* | -sunos | -sunos[34]*\ + | -hpux* | -unos* | -osf* | -luna* | -dgux* | -solaris* | -sym* \ + | -amigaos* | -amigados* | -msdos* | -newsos* | -unicos* | -aof* \ + | -aos* \ + | -nindy* | -vxsim* | -vxworks* | -ebmon* | -hms* | -mvs* \ + | -clix* | -riscos* | -uniplus* | -iris* | -rtu* | -xenix* \ + | -hiux* | -386bsd* | -knetbsd* | -mirbsd* | -netbsd* | -openbsd* \ + | -ekkobsd* | -kfreebsd* | -freebsd* | -riscix* | -lynxos* \ + | -bosx* | -nextstep* | -cxux* | -aout* | -elf* | -oabi* \ + | -ptx* | -coff* | -ecoff* | -winnt* | -domain* | -vsta* \ + | -udi* | -eabi* | -lites* | -ieee* | -go32* | -aux* \ + | -chorusos* | -chorusrdb* \ + | -cygwin* | -pe* | -psos* | -moss* | -proelf* | -rtems* \ + | -mingw32* | -linux-gnu* | -linux-uclibc* | -uxpv* | -beos* | -mpeix* | -udk* \ + | -interix* | -uwin* | -mks* | -rhapsody* | -darwin* | -opened* \ + | -openstep* | -oskit* | -conix* | -pw32* | -nonstopux* \ + | -storm-chaos* | -tops10* | -tenex* | -tops20* | -its* \ + | -os2* | -vos* | -palmos* | -uclinux* | -nucleus* \ + | -morphos* | -superux* | -rtmk* | -rtmk-nova* | -windiss* \ + | -powermax* | -dnix* | -nx6 | -nx7 | -sei* | -dragonfly*) + # Remember, each alternative MUST END IN *, to match a version number. + ;; + -qnx*) + case $basic_machine in + x86-* | i*86-*) + ;; + *) + os=-nto$os + ;; + esac + ;; + -nto-qnx*) + ;; + -nto*) + os=`echo $os | sed -e 's|nto|nto-qnx|'` + ;; + -sim | -es1800* | -hms* | -xray | -os68k* | -none* | -v88r* \ + | -windows* | -osx | -abug | -netware* | -os9* | -beos* \ + | -macos* | -mpw* | -magic* | -mmixware* | -mon960* | -lnews*) + ;; + -mac*) + os=`echo $os | sed -e 's|mac|macos|'` + ;; + -linux-dietlibc) + os=-linux-dietlibc + ;; + -linux*) + os=`echo $os | sed -e 's|linux|linux-gnu|'` + ;; + -sunos5*) + os=`echo $os | sed -e 's|sunos5|solaris2|'` + ;; + -sunos6*) + os=`echo $os | sed -e 's|sunos6|solaris3|'` + ;; + -opened*) + os=-openedition + ;; + -os400*) + os=-os400 + ;; + -wince*) + os=-wince + ;; + -osfrose*) + os=-osfrose + ;; + -osf*) + os=-osf + ;; + -utek*) + os=-bsd + ;; + -dynix*) + os=-bsd + ;; + -acis*) + os=-aos + ;; + -atheos*) + os=-atheos + ;; + -syllable*) + os=-syllable + ;; + -386bsd) + os=-bsd + ;; + -ctix* | -uts*) + os=-sysv + ;; + -nova*) + os=-rtmk-nova + ;; + -ns2 ) + os=-nextstep2 + ;; + -nsk*) + os=-nsk + ;; + # Preserve the version number of sinix5. + -sinix5.*) + os=`echo $os | sed -e 's|sinix|sysv|'` + ;; + -sinix*) + os=-sysv4 + ;; + -tpf*) + os=-tpf + ;; + -triton*) + os=-sysv3 + ;; + -oss*) + os=-sysv3 + ;; + -svr4) + os=-sysv4 + ;; + -svr3) + os=-sysv3 + ;; + -sysvr4) + os=-sysv4 + ;; + # This must come after -sysvr4. + -sysv*) + ;; + -ose*) + os=-ose + ;; + -es1800*) + os=-ose + ;; + -xenix) + os=-xenix + ;; + -*mint | -mint[0-9]* | -*MiNT | -MiNT[0-9]*) + os=-mint + ;; + -aros*) + os=-aros + ;; + -kaos*) + os=-kaos + ;; + -none) + ;; + *) + # Get rid of the `-' at the beginning of $os. + os=`echo $os | sed 's/[^-]*-//'` + echo Invalid configuration \`$1\': system \`$os\' not recognized 1>&2 + exit 1 + ;; +esac +else + +# Here we handle the default operating systems that come with various machines. +# The value should be what the vendor currently ships out the door with their +# machine or put another way, the most popular os provided with the machine. + +# Note that if you're going to try to match "-MANUFACTURER" here (say, +# "-sun"), then you have to tell the case statement up towards the top +# that MANUFACTURER isn't an operating system. Otherwise, code above +# will signal an error saying that MANUFACTURER isn't an operating +# system, and we'll never get to this point. + +case $basic_machine in + *-acorn) + os=-riscix1.2 + ;; + arm*-rebel) + os=-linux + ;; + arm*-semi) + os=-aout + ;; + c4x-* | tic4x-*) + os=-coff + ;; + # This must come before the *-dec entry. + pdp10-*) + os=-tops20 + ;; + pdp11-*) + os=-none + ;; + *-dec | vax-*) + os=-ultrix4.2 + ;; + m68*-apollo) + os=-domain + ;; + i386-sun) + os=-sunos4.0.2 + ;; + m68000-sun) + os=-sunos3 + # This also exists in the configure program, but was not the + # default. + # os=-sunos4 + ;; + m68*-cisco) + os=-aout + ;; + mips*-cisco) + os=-elf + ;; + mips*-*) + os=-elf + ;; + or32-*) + os=-coff + ;; + *-tti) # must be before sparc entry or we get the wrong os. + os=-sysv3 + ;; + sparc-* | *-sun) + os=-sunos4.1.1 + ;; + *-be) + os=-beos + ;; + *-ibm) + os=-aix + ;; + *-knuth) + os=-mmixware + ;; + *-wec) + os=-proelf + ;; + *-winbond) + os=-proelf + ;; + *-oki) + os=-proelf + ;; + *-hp) + os=-hpux + ;; + *-hitachi) + os=-hiux + ;; + i860-* | *-att | *-ncr | *-altos | *-motorola | *-convergent) + os=-sysv + ;; + *-cbm) + os=-amigaos + ;; + *-dg) + os=-dgux + ;; + *-dolphin) + os=-sysv3 + ;; + m68k-ccur) + os=-rtu + ;; + m88k-omron*) + os=-luna + ;; + *-next ) + os=-nextstep + ;; + *-sequent) + os=-ptx + ;; + *-crds) + os=-unos + ;; + *-ns) + os=-genix + ;; + i370-*) + os=-mvs + ;; + *-next) + os=-nextstep3 + ;; + *-gould) + os=-sysv + ;; + *-highlevel) + os=-bsd + ;; + *-encore) + os=-bsd + ;; + *-sgi) + os=-irix + ;; + *-siemens) + os=-sysv4 + ;; + *-masscomp) + os=-rtu + ;; + f30[01]-fujitsu | f700-fujitsu) + os=-uxpv + ;; + *-rom68k) + os=-coff + ;; + *-*bug) + os=-coff + ;; + *-apple) + os=-macos + ;; + *-atari*) + os=-mint + ;; + *) + os=-none + ;; +esac +fi + +# Here we handle the case where we know the os, and the CPU type, but not the +# manufacturer. We pick the logical manufacturer. +vendor=unknown +case $basic_machine in + *-unknown) + case $os in + -riscix*) + vendor=acorn + ;; + -sunos*) + vendor=sun + ;; + -aix*) + vendor=ibm + ;; + -beos*) + vendor=be + ;; + -hpux*) + vendor=hp + ;; + -mpeix*) + vendor=hp + ;; + -hiux*) + vendor=hitachi + ;; + -unos*) + vendor=crds + ;; + -dgux*) + vendor=dg + ;; + -luna*) + vendor=omron + ;; + -genix*) + vendor=ns + ;; + -mvs* | -opened*) + vendor=ibm + ;; + -os400*) + vendor=ibm + ;; + -ptx*) + vendor=sequent + ;; + -tpf*) + vendor=ibm + ;; + -vxsim* | -vxworks* | -windiss*) + vendor=wrs + ;; + -aux*) + vendor=apple + ;; + -hms*) + vendor=hitachi + ;; + -mpw* | -macos*) + vendor=apple + ;; + -*mint | -mint[0-9]* | -*MiNT | -MiNT[0-9]*) + vendor=atari + ;; + -vos*) + vendor=stratus + ;; + esac + basic_machine=`echo $basic_machine | sed "s/unknown/$vendor/"` + ;; +esac + +echo $basic_machine$os +exit 0 + +# Local variables: +# eval: (add-hook 'write-file-hooks 'time-stamp) +# time-stamp-start: "timestamp='" +# time-stamp-format: "%:y-%02m-%02d" +# time-stamp-end: "'" +# End: diff --git a/src/examples/pipes/configure b/src/examples/pipes/configure new file mode 100755 index 0000000..842dafc --- /dev/null +++ b/src/examples/pipes/configure @@ -0,0 +1,22882 @@ +#! /bin/sh +# Guess values for system-dependent variables and create Makefiles. +# Generated by GNU Autoconf 2.59 for hadoop-pipes-examples 0.13.0. +# +# Report bugs to . +# +# Copyright (C) 2003 Free Software Foundation, Inc. +# This configure script is free software; the Free Software Foundation +# gives unlimited permission to copy, distribute and modify it. +## --------------------- ## +## M4sh Initialization. ## +## --------------------- ## + +# Be Bourne compatible +if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then + emulate sh + NULLCMD=: + # Zsh 3.x and 4.x performs word splitting on ${1+"$@"}, which + # is contrary to our usage. Disable this feature. + alias -g '${1+"$@"}'='"$@"' +elif test -n "${BASH_VERSION+set}" && (set -o posix) >/dev/null 2>&1; then + set -o posix +fi +DUALCASE=1; export DUALCASE # for MKS sh + +# Support unset when possible. +if ( (MAIL=60; unset MAIL) || exit) >/dev/null 2>&1; then + as_unset=unset +else + as_unset=false +fi + + +# Work around bugs in pre-3.0 UWIN ksh. +$as_unset ENV MAIL MAILPATH +PS1='$ ' +PS2='> ' +PS4='+ ' + +# NLS nuisances. +for as_var in \ + LANG LANGUAGE LC_ADDRESS LC_ALL LC_COLLATE LC_CTYPE LC_IDENTIFICATION \ + LC_MEASUREMENT LC_MESSAGES LC_MONETARY LC_NAME LC_NUMERIC LC_PAPER \ + LC_TELEPHONE LC_TIME +do + if (set +x; test -z "`(eval $as_var=C; export $as_var) 2>&1`"); then + eval $as_var=C; export $as_var + else + $as_unset $as_var + fi +done + +# Required to use basename. +if expr a : '\(a\)' >/dev/null 2>&1; then + as_expr=expr +else + as_expr=false +fi + +if (basename /) >/dev/null 2>&1 && test "X`basename / 2>&1`" = "X/"; then + as_basename=basename +else + as_basename=false +fi + + +# Name of the executable. +as_me=`$as_basename "$0" || +$as_expr X/"$0" : '.*/\([^/][^/]*\)/*$' \| \ + X"$0" : 'X\(//\)$' \| \ + X"$0" : 'X\(/\)$' \| \ + . : '\(.\)' 2>/dev/null || +echo X/"$0" | + sed '/^.*\/\([^/][^/]*\)\/*$/{ s//\1/; q; } + /^X\/\(\/\/\)$/{ s//\1/; q; } + /^X\/\(\/\).*/{ s//\1/; q; } + s/.*/./; q'` + + +# PATH needs CR, and LINENO needs CR and PATH. +# Avoid depending upon Character Ranges. +as_cr_letters='abcdefghijklmnopqrstuvwxyz' +as_cr_LETTERS='ABCDEFGHIJKLMNOPQRSTUVWXYZ' +as_cr_Letters=$as_cr_letters$as_cr_LETTERS +as_cr_digits='0123456789' +as_cr_alnum=$as_cr_Letters$as_cr_digits + +# The user is always right. +if test "${PATH_SEPARATOR+set}" != set; then + echo "#! /bin/sh" >conf$$.sh + echo "exit 0" >>conf$$.sh + chmod +x conf$$.sh + if (PATH="/nonexistent;."; conf$$.sh) >/dev/null 2>&1; then + PATH_SEPARATOR=';' + else + PATH_SEPARATOR=: + fi + rm -f conf$$.sh +fi + + + as_lineno_1=$LINENO + as_lineno_2=$LINENO + as_lineno_3=`(expr $as_lineno_1 + 1) 2>/dev/null` + test "x$as_lineno_1" != "x$as_lineno_2" && + test "x$as_lineno_3" = "x$as_lineno_2" || { + # Find who we are. Look in the path if we contain no path at all + # relative or not. + case $0 in + *[\\/]* ) as_myself=$0 ;; + *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + test -r "$as_dir/$0" && as_myself=$as_dir/$0 && break +done + + ;; + esac + # We did not find ourselves, most probably we were run as `sh COMMAND' + # in which case we are not to be found in the path. + if test "x$as_myself" = x; then + as_myself=$0 + fi + if test ! -f "$as_myself"; then + { echo "$as_me: error: cannot find myself; rerun with an absolute path" >&2 + { (exit 1); exit 1; }; } + fi + case $CONFIG_SHELL in + '') + as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in /bin$PATH_SEPARATOR/usr/bin$PATH_SEPARATOR$PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for as_base in sh bash ksh sh5; do + case $as_dir in + /*) + if ("$as_dir/$as_base" -c ' + as_lineno_1=$LINENO + as_lineno_2=$LINENO + as_lineno_3=`(expr $as_lineno_1 + 1) 2>/dev/null` + test "x$as_lineno_1" != "x$as_lineno_2" && + test "x$as_lineno_3" = "x$as_lineno_2" ') 2>/dev/null; then + $as_unset BASH_ENV || test "${BASH_ENV+set}" != set || { BASH_ENV=; export BASH_ENV; } + $as_unset ENV || test "${ENV+set}" != set || { ENV=; export ENV; } + CONFIG_SHELL=$as_dir/$as_base + export CONFIG_SHELL + exec "$CONFIG_SHELL" "$0" ${1+"$@"} + fi;; + esac + done +done +;; + esac + + # Create $as_me.lineno as a copy of $as_myself, but with $LINENO + # uniformly replaced by the line number. The first 'sed' inserts a + # line-number line before each line; the second 'sed' does the real + # work. The second script uses 'N' to pair each line-number line + # with the numbered line, and appends trailing '-' during + # substitution so that $LINENO is not a special case at line end. + # (Raja R Harinath suggested sed '=', and Paul Eggert wrote the + # second 'sed' script. Blame Lee E. McMahon for sed's syntax. :-) + sed '=' <$as_myself | + sed ' + N + s,$,-, + : loop + s,^\(['$as_cr_digits']*\)\(.*\)[$]LINENO\([^'$as_cr_alnum'_]\),\1\2\1\3, + t loop + s,-$,, + s,^['$as_cr_digits']*\n,, + ' >$as_me.lineno && + chmod +x $as_me.lineno || + { echo "$as_me: error: cannot create $as_me.lineno; rerun with a POSIX shell" >&2 + { (exit 1); exit 1; }; } + + # Don't try to exec as it changes $[0], causing all sort of problems + # (the dirname of $[0] is not the place where we might find the + # original and so on. Autoconf is especially sensible to this). + . ./$as_me.lineno + # Exit status is that of the last command. + exit +} + + +case `echo "testing\c"; echo 1,2,3`,`echo -n testing; echo 1,2,3` in + *c*,-n*) ECHO_N= ECHO_C=' +' ECHO_T=' ' ;; + *c*,* ) ECHO_N=-n ECHO_C= ECHO_T= ;; + *) ECHO_N= ECHO_C='\c' ECHO_T= ;; +esac + +if expr a : '\(a\)' >/dev/null 2>&1; then + as_expr=expr +else + as_expr=false +fi + +rm -f conf$$ conf$$.exe conf$$.file +echo >conf$$.file +if ln -s conf$$.file conf$$ 2>/dev/null; then + # We could just check for DJGPP; but this test a) works b) is more generic + # and c) will remain valid once DJGPP supports symlinks (DJGPP 2.04). + if test -f conf$$.exe; then + # Don't use ln at all; we don't have any links + as_ln_s='cp -p' + else + as_ln_s='ln -s' + fi +elif ln conf$$.file conf$$ 2>/dev/null; then + as_ln_s=ln +else + as_ln_s='cp -p' +fi +rm -f conf$$ conf$$.exe conf$$.file + +if mkdir -p . 2>/dev/null; then + as_mkdir_p=: +else + test -d ./-p && rmdir ./-p + as_mkdir_p=false +fi + +as_executable_p="test -f" + +# Sed expression to map a string onto a valid CPP name. +as_tr_cpp="eval sed 'y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g'" + +# Sed expression to map a string onto a valid variable name. +as_tr_sh="eval sed 'y%*+%pp%;s%[^_$as_cr_alnum]%_%g'" + + +# IFS +# We need space, tab and new line, in precisely that order. +as_nl=' +' +IFS=" $as_nl" + +# CDPATH. +$as_unset CDPATH + + + +# Check that we are running under the correct shell. +SHELL=${CONFIG_SHELL-/bin/sh} + +case X$ECHO in +X*--fallback-echo) + # Remove one level of quotation (which was required for Make). + ECHO=`echo "$ECHO" | sed 's,\\\\\$\\$0,'$0','` + ;; +esac + +echo=${ECHO-echo} +if test "X$1" = X--no-reexec; then + # Discard the --no-reexec flag, and continue. + shift +elif test "X$1" = X--fallback-echo; then + # Avoid inline document here, it may be left over + : +elif test "X`($echo '\t') 2>/dev/null`" = 'X\t' ; then + # Yippee, $echo works! + : +else + # Restart under the correct shell. + exec $SHELL "$0" --no-reexec ${1+"$@"} +fi + +if test "X$1" = X--fallback-echo; then + # used as fallback echo + shift + cat </dev/null 2>&1 && unset CDPATH + +if test -z "$ECHO"; then +if test "X${echo_test_string+set}" != Xset; then +# find a string as large as possible, as long as the shell can cope with it + for cmd in 'sed 50q "$0"' 'sed 20q "$0"' 'sed 10q "$0"' 'sed 2q "$0"' 'echo test'; do + # expected sizes: less than 2Kb, 1Kb, 512 bytes, 16 bytes, ... + if (echo_test_string="`eval $cmd`") 2>/dev/null && + echo_test_string="`eval $cmd`" && + (test "X$echo_test_string" = "X$echo_test_string") 2>/dev/null + then + break + fi + done +fi + +if test "X`($echo '\t') 2>/dev/null`" = 'X\t' && + echo_testing_string=`($echo "$echo_test_string") 2>/dev/null` && + test "X$echo_testing_string" = "X$echo_test_string"; then + : +else + # The Solaris, AIX, and Digital Unix default echo programs unquote + # backslashes. This makes it impossible to quote backslashes using + # echo "$something" | sed 's/\\/\\\\/g' + # + # So, first we look for a working echo in the user's PATH. + + lt_save_ifs="$IFS"; IFS=$PATH_SEPARATOR + for dir in $PATH /usr/ucb; do + IFS="$lt_save_ifs" + if (test -f $dir/echo || test -f $dir/echo$ac_exeext) && + test "X`($dir/echo '\t') 2>/dev/null`" = 'X\t' && + echo_testing_string=`($dir/echo "$echo_test_string") 2>/dev/null` && + test "X$echo_testing_string" = "X$echo_test_string"; then + echo="$dir/echo" + break + fi + done + IFS="$lt_save_ifs" + + if test "X$echo" = Xecho; then + # We didn't find a better echo, so look for alternatives. + if test "X`(print -r '\t') 2>/dev/null`" = 'X\t' && + echo_testing_string=`(print -r "$echo_test_string") 2>/dev/null` && + test "X$echo_testing_string" = "X$echo_test_string"; then + # This shell has a builtin print -r that does the trick. + echo='print -r' + elif (test -f /bin/ksh || test -f /bin/ksh$ac_exeext) && + test "X$CONFIG_SHELL" != X/bin/ksh; then + # If we have ksh, try running configure again with it. + ORIGINAL_CONFIG_SHELL=${CONFIG_SHELL-/bin/sh} + export ORIGINAL_CONFIG_SHELL + CONFIG_SHELL=/bin/ksh + export CONFIG_SHELL + exec $CONFIG_SHELL "$0" --no-reexec ${1+"$@"} + else + # Try using printf. + echo='printf %s\n' + if test "X`($echo '\t') 2>/dev/null`" = 'X\t' && + echo_testing_string=`($echo "$echo_test_string") 2>/dev/null` && + test "X$echo_testing_string" = "X$echo_test_string"; then + # Cool, printf works + : + elif echo_testing_string=`($ORIGINAL_CONFIG_SHELL "$0" --fallback-echo '\t') 2>/dev/null` && + test "X$echo_testing_string" = 'X\t' && + echo_testing_string=`($ORIGINAL_CONFIG_SHELL "$0" --fallback-echo "$echo_test_string") 2>/dev/null` && + test "X$echo_testing_string" = "X$echo_test_string"; then + CONFIG_SHELL=$ORIGINAL_CONFIG_SHELL + export CONFIG_SHELL + SHELL="$CONFIG_SHELL" + export SHELL + echo="$CONFIG_SHELL $0 --fallback-echo" + elif echo_testing_string=`($CONFIG_SHELL "$0" --fallback-echo '\t') 2>/dev/null` && + test "X$echo_testing_string" = 'X\t' && + echo_testing_string=`($CONFIG_SHELL "$0" --fallback-echo "$echo_test_string") 2>/dev/null` && + test "X$echo_testing_string" = "X$echo_test_string"; then + echo="$CONFIG_SHELL $0 --fallback-echo" + else + # maybe with a smaller string... + prev=: + + for cmd in 'echo test' 'sed 2q "$0"' 'sed 10q "$0"' 'sed 20q "$0"' 'sed 50q "$0"'; do + if (test "X$echo_test_string" = "X`eval $cmd`") 2>/dev/null + then + break + fi + prev="$cmd" + done + + if test "$prev" != 'sed 50q "$0"'; then + echo_test_string=`eval $prev` + export echo_test_string + exec ${ORIGINAL_CONFIG_SHELL-${CONFIG_SHELL-/bin/sh}} "$0" ${1+"$@"} + else + # Oops. We lost completely, so just stick with echo. + echo=echo + fi + fi + fi + fi +fi +fi + +# Copy echo and quote the copy suitably for passing to libtool from +# the Makefile, instead of quoting the original, which is used later. +ECHO=$echo +if test "X$ECHO" = "X$CONFIG_SHELL $0 --fallback-echo"; then + ECHO="$CONFIG_SHELL \\\$\$0 --fallback-echo" +fi + + + + +tagnames=${tagnames+${tagnames},}CXX + +tagnames=${tagnames+${tagnames},}F77 + +# Name of the host. +# hostname on some systems (SVR3.2, Linux) returns a bogus exit status, +# so uname gets run too. +ac_hostname=`(hostname || uname -n) 2>/dev/null | sed 1q` + +exec 6>&1 + +# +# Initializations. +# +ac_default_prefix=/usr/local +ac_config_libobj_dir=. +cross_compiling=no +subdirs= +MFLAGS= +MAKEFLAGS= +SHELL=${CONFIG_SHELL-/bin/sh} + +# Maximum number of lines to put in a shell here document. +# This variable seems obsolete. It should probably be removed, and +# only ac_max_sed_lines should be used. +: ${ac_max_here_lines=38} + +# Identity of this package. +PACKAGE_NAME='hadoop-pipes-examples' +PACKAGE_TARNAME='hadoop-pipes-examples' +PACKAGE_VERSION='0.13.0' +PACKAGE_STRING='hadoop-pipes-examples 0.13.0' +PACKAGE_BUGREPORT='omalley@apache.org' + +ac_unique_file="impl/wordcount-simple.cc" +ac_default_prefix=`pwd`/../install +# Factoring default headers for most tests. +ac_includes_default="\ +#include +#if HAVE_SYS_TYPES_H +# include +#endif +#if HAVE_SYS_STAT_H +# include +#endif +#if STDC_HEADERS +# include +# include +#else +# if HAVE_STDLIB_H +# include +# endif +#endif +#if HAVE_STRING_H +# if !STDC_HEADERS && HAVE_MEMORY_H +# include +# endif +# include +#endif +#if HAVE_STRINGS_H +# include +#endif +#if HAVE_INTTYPES_H +# include +#else +# if HAVE_STDINT_H +# include +# endif +#endif +#if HAVE_UNISTD_H +# include +#endif" + +ac_subst_vars='SHELL PATH_SEPARATOR PACKAGE_NAME PACKAGE_TARNAME PACKAGE_VERSION PACKAGE_STRING PACKAGE_BUGREPORT exec_prefix prefix program_transform_name bindir sbindir libexecdir datadir sysconfdir sharedstatedir localstatedir libdir includedir oldincludedir infodir mandir build_alias host_alias target_alias DEFS ECHO_C ECHO_N ECHO_T LIBS INSTALL_PROGRAM INSTALL_SCRIPT INSTALL_DATA CYGPATH_W PACKAGE VERSION ACLOCAL AUTOCONF AUTOMAKE AUTOHEADER MAKEINFO install_sh STRIP ac_ct_STRIP INSTALL_STRIP_PROGRAM mkdir_p AWK SET_MAKE am__leading_dot AMTAR am__tar am__untar CC CFLAGS LDFLAGS CPPFLAGS ac_ct_CC EXEEXT OBJEXT DEPDIR am__include am__quote AMDEP_TRUE AMDEP_FALSE AMDEPBACKSLASH CCDEPMODE am__fastdepCC_TRUE am__fastdepCC_FALSE HADOOP_UTILS_PREFIX CPP EGREP HADOOP_PIPES_PREFIX CXX CXXFLAGS ac_ct_CXX CXXDEPMODE am__fastdepCXX_TRUE am__fastdepCXX_FALSE build build_cpu build_vendor build_os host host_cpu host_vendor host_os LN_S ECHO AR ac_ct_AR RANLIB ac_ct_RANLIB CXXCPP F77 FFLAGS ac_ct_F77 LIBTOOL LIBOBJS LTLIBOBJS' +ac_subst_files='' + +# Initialize some variables set by options. +ac_init_help= +ac_init_version=false +# The variables have the same names as the options, with +# dashes changed to underlines. +cache_file=/dev/null +exec_prefix=NONE +no_create= +no_recursion= +prefix=NONE +program_prefix=NONE +program_suffix=NONE +program_transform_name=s,x,x, +silent= +site= +srcdir= +verbose= +x_includes=NONE +x_libraries=NONE + +# Installation directory options. +# These are left unexpanded so users can "make install exec_prefix=/foo" +# and all the variables that are supposed to be based on exec_prefix +# by default will actually change. +# Use braces instead of parens because sh, perl, etc. also accept them. +bindir='${exec_prefix}/bin' +sbindir='${exec_prefix}/sbin' +libexecdir='${exec_prefix}/libexec' +datadir='${prefix}/share' +sysconfdir='${prefix}/etc' +sharedstatedir='${prefix}/com' +localstatedir='${prefix}/var' +libdir='${exec_prefix}/lib' +includedir='${prefix}/include' +oldincludedir='/usr/include' +infodir='${prefix}/info' +mandir='${prefix}/man' + +ac_prev= +for ac_option +do + # If the previous option needs an argument, assign it. + if test -n "$ac_prev"; then + eval "$ac_prev=\$ac_option" + ac_prev= + continue + fi + + ac_optarg=`expr "x$ac_option" : 'x[^=]*=\(.*\)'` + + # Accept the important Cygnus configure options, so we can diagnose typos. + + case $ac_option in + + -bindir | --bindir | --bindi | --bind | --bin | --bi) + ac_prev=bindir ;; + -bindir=* | --bindir=* | --bindi=* | --bind=* | --bin=* | --bi=*) + bindir=$ac_optarg ;; + + -build | --build | --buil | --bui | --bu) + ac_prev=build_alias ;; + -build=* | --build=* | --buil=* | --bui=* | --bu=*) + build_alias=$ac_optarg ;; + + -cache-file | --cache-file | --cache-fil | --cache-fi \ + | --cache-f | --cache- | --cache | --cach | --cac | --ca | --c) + ac_prev=cache_file ;; + -cache-file=* | --cache-file=* | --cache-fil=* | --cache-fi=* \ + | --cache-f=* | --cache-=* | --cache=* | --cach=* | --cac=* | --ca=* | --c=*) + cache_file=$ac_optarg ;; + + --config-cache | -C) + cache_file=config.cache ;; + + -datadir | --datadir | --datadi | --datad | --data | --dat | --da) + ac_prev=datadir ;; + -datadir=* | --datadir=* | --datadi=* | --datad=* | --data=* | --dat=* \ + | --da=*) + datadir=$ac_optarg ;; + + -disable-* | --disable-*) + ac_feature=`expr "x$ac_option" : 'x-*disable-\(.*\)'` + # Reject names that are not valid shell variable names. + expr "x$ac_feature" : ".*[^-_$as_cr_alnum]" >/dev/null && + { echo "$as_me: error: invalid feature name: $ac_feature" >&2 + { (exit 1); exit 1; }; } + ac_feature=`echo $ac_feature | sed 's/-/_/g'` + eval "enable_$ac_feature=no" ;; + + -enable-* | --enable-*) + ac_feature=`expr "x$ac_option" : 'x-*enable-\([^=]*\)'` + # Reject names that are not valid shell variable names. + expr "x$ac_feature" : ".*[^-_$as_cr_alnum]" >/dev/null && + { echo "$as_me: error: invalid feature name: $ac_feature" >&2 + { (exit 1); exit 1; }; } + ac_feature=`echo $ac_feature | sed 's/-/_/g'` + case $ac_option in + *=*) ac_optarg=`echo "$ac_optarg" | sed "s/'/'\\\\\\\\''/g"`;; + *) ac_optarg=yes ;; + esac + eval "enable_$ac_feature='$ac_optarg'" ;; + + -exec-prefix | --exec_prefix | --exec-prefix | --exec-prefi \ + | --exec-pref | --exec-pre | --exec-pr | --exec-p | --exec- \ + | --exec | --exe | --ex) + ac_prev=exec_prefix ;; + -exec-prefix=* | --exec_prefix=* | --exec-prefix=* | --exec-prefi=* \ + | --exec-pref=* | --exec-pre=* | --exec-pr=* | --exec-p=* | --exec-=* \ + | --exec=* | --exe=* | --ex=*) + exec_prefix=$ac_optarg ;; + + -gas | --gas | --ga | --g) + # Obsolete; use --with-gas. + with_gas=yes ;; + + -help | --help | --hel | --he | -h) + ac_init_help=long ;; + -help=r* | --help=r* | --hel=r* | --he=r* | -hr*) + ac_init_help=recursive ;; + -help=s* | --help=s* | --hel=s* | --he=s* | -hs*) + ac_init_help=short ;; + + -host | --host | --hos | --ho) + ac_prev=host_alias ;; + -host=* | --host=* | --hos=* | --ho=*) + host_alias=$ac_optarg ;; + + -includedir | --includedir | --includedi | --included | --include \ + | --includ | --inclu | --incl | --inc) + ac_prev=includedir ;; + -includedir=* | --includedir=* | --includedi=* | --included=* | --include=* \ + | --includ=* | --inclu=* | --incl=* | --inc=*) + includedir=$ac_optarg ;; + + -infodir | --infodir | --infodi | --infod | --info | --inf) + ac_prev=infodir ;; + -infodir=* | --infodir=* | --infodi=* | --infod=* | --info=* | --inf=*) + infodir=$ac_optarg ;; + + -libdir | --libdir | --libdi | --libd) + ac_prev=libdir ;; + -libdir=* | --libdir=* | --libdi=* | --libd=*) + libdir=$ac_optarg ;; + + -libexecdir | --libexecdir | --libexecdi | --libexecd | --libexec \ + | --libexe | --libex | --libe) + ac_prev=libexecdir ;; + -libexecdir=* | --libexecdir=* | --libexecdi=* | --libexecd=* | --libexec=* \ + | --libexe=* | --libex=* | --libe=*) + libexecdir=$ac_optarg ;; + + -localstatedir | --localstatedir | --localstatedi | --localstated \ + | --localstate | --localstat | --localsta | --localst \ + | --locals | --local | --loca | --loc | --lo) + ac_prev=localstatedir ;; + -localstatedir=* | --localstatedir=* | --localstatedi=* | --localstated=* \ + | --localstate=* | --localstat=* | --localsta=* | --localst=* \ + | --locals=* | --local=* | --loca=* | --loc=* | --lo=*) + localstatedir=$ac_optarg ;; + + -mandir | --mandir | --mandi | --mand | --man | --ma | --m) + ac_prev=mandir ;; + -mandir=* | --mandir=* | --mandi=* | --mand=* | --man=* | --ma=* | --m=*) + mandir=$ac_optarg ;; + + -nfp | --nfp | --nf) + # Obsolete; use --without-fp. + with_fp=no ;; + + -no-create | --no-create | --no-creat | --no-crea | --no-cre \ + | --no-cr | --no-c | -n) + no_create=yes ;; + + -no-recursion | --no-recursion | --no-recursio | --no-recursi \ + | --no-recurs | --no-recur | --no-recu | --no-rec | --no-re | --no-r) + no_recursion=yes ;; + + -oldincludedir | --oldincludedir | --oldincludedi | --oldincluded \ + | --oldinclude | --oldinclud | --oldinclu | --oldincl | --oldinc \ + | --oldin | --oldi | --old | --ol | --o) + ac_prev=oldincludedir ;; + -oldincludedir=* | --oldincludedir=* | --oldincludedi=* | --oldincluded=* \ + | --oldinclude=* | --oldinclud=* | --oldinclu=* | --oldincl=* | --oldinc=* \ + | --oldin=* | --oldi=* | --old=* | --ol=* | --o=*) + oldincludedir=$ac_optarg ;; + + -prefix | --prefix | --prefi | --pref | --pre | --pr | --p) + ac_prev=prefix ;; + -prefix=* | --prefix=* | --prefi=* | --pref=* | --pre=* | --pr=* | --p=*) + prefix=$ac_optarg ;; + + -program-prefix | --program-prefix | --program-prefi | --program-pref \ + | --program-pre | --program-pr | --program-p) + ac_prev=program_prefix ;; + -program-prefix=* | --program-prefix=* | --program-prefi=* \ + | --program-pref=* | --program-pre=* | --program-pr=* | --program-p=*) + program_prefix=$ac_optarg ;; + + -program-suffix | --program-suffix | --program-suffi | --program-suff \ + | --program-suf | --program-su | --program-s) + ac_prev=program_suffix ;; + -program-suffix=* | --program-suffix=* | --program-suffi=* \ + | --program-suff=* | --program-suf=* | --program-su=* | --program-s=*) + program_suffix=$ac_optarg ;; + + -program-transform-name | --program-transform-name \ + | --program-transform-nam | --program-transform-na \ + | --program-transform-n | --program-transform- \ + | --program-transform | --program-transfor \ + | --program-transfo | --program-transf \ + | --program-trans | --program-tran \ + | --progr-tra | --program-tr | --program-t) + ac_prev=program_transform_name ;; + -program-transform-name=* | --program-transform-name=* \ + | --program-transform-nam=* | --program-transform-na=* \ + | --program-transform-n=* | --program-transform-=* \ + | --program-transform=* | --program-transfor=* \ + | --program-transfo=* | --program-transf=* \ + | --program-trans=* | --program-tran=* \ + | --progr-tra=* | --program-tr=* | --program-t=*) + program_transform_name=$ac_optarg ;; + + -q | -quiet | --quiet | --quie | --qui | --qu | --q \ + | -silent | --silent | --silen | --sile | --sil) + silent=yes ;; + + -sbindir | --sbindir | --sbindi | --sbind | --sbin | --sbi | --sb) + ac_prev=sbindir ;; + -sbindir=* | --sbindir=* | --sbindi=* | --sbind=* | --sbin=* \ + | --sbi=* | --sb=*) + sbindir=$ac_optarg ;; + + -sharedstatedir | --sharedstatedir | --sharedstatedi \ + | --sharedstated | --sharedstate | --sharedstat | --sharedsta \ + | --sharedst | --shareds | --shared | --share | --shar \ + | --sha | --sh) + ac_prev=sharedstatedir ;; + -sharedstatedir=* | --sharedstatedir=* | --sharedstatedi=* \ + | --sharedstated=* | --sharedstate=* | --sharedstat=* | --sharedsta=* \ + | --sharedst=* | --shareds=* | --shared=* | --share=* | --shar=* \ + | --sha=* | --sh=*) + sharedstatedir=$ac_optarg ;; + + -site | --site | --sit) + ac_prev=site ;; + -site=* | --site=* | --sit=*) + site=$ac_optarg ;; + + -srcdir | --srcdir | --srcdi | --srcd | --src | --sr) + ac_prev=srcdir ;; + -srcdir=* | --srcdir=* | --srcdi=* | --srcd=* | --src=* | --sr=*) + srcdir=$ac_optarg ;; + + -sysconfdir | --sysconfdir | --sysconfdi | --sysconfd | --sysconf \ + | --syscon | --sysco | --sysc | --sys | --sy) + ac_prev=sysconfdir ;; + -sysconfdir=* | --sysconfdir=* | --sysconfdi=* | --sysconfd=* | --sysconf=* \ + | --syscon=* | --sysco=* | --sysc=* | --sys=* | --sy=*) + sysconfdir=$ac_optarg ;; + + -target | --target | --targe | --targ | --tar | --ta | --t) + ac_prev=target_alias ;; + -target=* | --target=* | --targe=* | --targ=* | --tar=* | --ta=* | --t=*) + target_alias=$ac_optarg ;; + + -v | -verbose | --verbose | --verbos | --verbo | --verb) + verbose=yes ;; + + -version | --version | --versio | --versi | --vers | -V) + ac_init_version=: ;; + + -with-* | --with-*) + ac_package=`expr "x$ac_option" : 'x-*with-\([^=]*\)'` + # Reject names that are not valid shell variable names. + expr "x$ac_package" : ".*[^-_$as_cr_alnum]" >/dev/null && + { echo "$as_me: error: invalid package name: $ac_package" >&2 + { (exit 1); exit 1; }; } + ac_package=`echo $ac_package| sed 's/-/_/g'` + case $ac_option in + *=*) ac_optarg=`echo "$ac_optarg" | sed "s/'/'\\\\\\\\''/g"`;; + *) ac_optarg=yes ;; + esac + eval "with_$ac_package='$ac_optarg'" ;; + + -without-* | --without-*) + ac_package=`expr "x$ac_option" : 'x-*without-\(.*\)'` + # Reject names that are not valid shell variable names. + expr "x$ac_package" : ".*[^-_$as_cr_alnum]" >/dev/null && + { echo "$as_me: error: invalid package name: $ac_package" >&2 + { (exit 1); exit 1; }; } + ac_package=`echo $ac_package | sed 's/-/_/g'` + eval "with_$ac_package=no" ;; + + --x) + # Obsolete; use --with-x. + with_x=yes ;; + + -x-includes | --x-includes | --x-include | --x-includ | --x-inclu \ + | --x-incl | --x-inc | --x-in | --x-i) + ac_prev=x_includes ;; + -x-includes=* | --x-includes=* | --x-include=* | --x-includ=* | --x-inclu=* \ + | --x-incl=* | --x-inc=* | --x-in=* | --x-i=*) + x_includes=$ac_optarg ;; + + -x-libraries | --x-libraries | --x-librarie | --x-librari \ + | --x-librar | --x-libra | --x-libr | --x-lib | --x-li | --x-l) + ac_prev=x_libraries ;; + -x-libraries=* | --x-libraries=* | --x-librarie=* | --x-librari=* \ + | --x-librar=* | --x-libra=* | --x-libr=* | --x-lib=* | --x-li=* | --x-l=*) + x_libraries=$ac_optarg ;; + + -*) { echo "$as_me: error: unrecognized option: $ac_option +Try \`$0 --help' for more information." >&2 + { (exit 1); exit 1; }; } + ;; + + *=*) + ac_envvar=`expr "x$ac_option" : 'x\([^=]*\)='` + # Reject names that are not valid shell variable names. + expr "x$ac_envvar" : ".*[^_$as_cr_alnum]" >/dev/null && + { echo "$as_me: error: invalid variable name: $ac_envvar" >&2 + { (exit 1); exit 1; }; } + ac_optarg=`echo "$ac_optarg" | sed "s/'/'\\\\\\\\''/g"` + eval "$ac_envvar='$ac_optarg'" + export $ac_envvar ;; + + *) + # FIXME: should be removed in autoconf 3.0. + echo "$as_me: WARNING: you should use --build, --host, --target" >&2 + expr "x$ac_option" : ".*[^-._$as_cr_alnum]" >/dev/null && + echo "$as_me: WARNING: invalid host type: $ac_option" >&2 + : ${build_alias=$ac_option} ${host_alias=$ac_option} ${target_alias=$ac_option} + ;; + + esac +done + +if test -n "$ac_prev"; then + ac_option=--`echo $ac_prev | sed 's/_/-/g'` + { echo "$as_me: error: missing argument to $ac_option" >&2 + { (exit 1); exit 1; }; } +fi + +# Be sure to have absolute paths. +for ac_var in exec_prefix prefix +do + eval ac_val=$`echo $ac_var` + case $ac_val in + [\\/$]* | ?:[\\/]* | NONE | '' ) ;; + *) { echo "$as_me: error: expected an absolute directory name for --$ac_var: $ac_val" >&2 + { (exit 1); exit 1; }; };; + esac +done + +# Be sure to have absolute paths. +for ac_var in bindir sbindir libexecdir datadir sysconfdir sharedstatedir \ + localstatedir libdir includedir oldincludedir infodir mandir +do + eval ac_val=$`echo $ac_var` + case $ac_val in + [\\/$]* | ?:[\\/]* ) ;; + *) { echo "$as_me: error: expected an absolute directory name for --$ac_var: $ac_val" >&2 + { (exit 1); exit 1; }; };; + esac +done + +# There might be people who depend on the old broken behavior: `$host' +# used to hold the argument of --host etc. +# FIXME: To remove some day. +build=$build_alias +host=$host_alias +target=$target_alias + +# FIXME: To remove some day. +if test "x$host_alias" != x; then + if test "x$build_alias" = x; then + cross_compiling=maybe + echo "$as_me: WARNING: If you wanted to set the --build type, don't use --host. + If a cross compiler is detected then cross compile mode will be used." >&2 + elif test "x$build_alias" != "x$host_alias"; then + cross_compiling=yes + fi +fi + +ac_tool_prefix= +test -n "$host_alias" && ac_tool_prefix=$host_alias- + +test "$silent" = yes && exec 6>/dev/null + + +# Find the source files, if location was not specified. +if test -z "$srcdir"; then + ac_srcdir_defaulted=yes + # Try the directory containing this script, then its parent. + ac_confdir=`(dirname "$0") 2>/dev/null || +$as_expr X"$0" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ + X"$0" : 'X\(//\)[^/]' \| \ + X"$0" : 'X\(//\)$' \| \ + X"$0" : 'X\(/\)' \| \ + . : '\(.\)' 2>/dev/null || +echo X"$0" | + sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ s//\1/; q; } + /^X\(\/\/\)[^/].*/{ s//\1/; q; } + /^X\(\/\/\)$/{ s//\1/; q; } + /^X\(\/\).*/{ s//\1/; q; } + s/.*/./; q'` + srcdir=$ac_confdir + if test ! -r $srcdir/$ac_unique_file; then + srcdir=.. + fi +else + ac_srcdir_defaulted=no +fi +if test ! -r $srcdir/$ac_unique_file; then + if test "$ac_srcdir_defaulted" = yes; then + { echo "$as_me: error: cannot find sources ($ac_unique_file) in $ac_confdir or .." >&2 + { (exit 1); exit 1; }; } + else + { echo "$as_me: error: cannot find sources ($ac_unique_file) in $srcdir" >&2 + { (exit 1); exit 1; }; } + fi +fi +(cd $srcdir && test -r ./$ac_unique_file) 2>/dev/null || + { echo "$as_me: error: sources are in $srcdir, but \`cd $srcdir' does not work" >&2 + { (exit 1); exit 1; }; } +srcdir=`echo "$srcdir" | sed 's%\([^\\/]\)[\\/]*$%\1%'` +ac_env_build_alias_set=${build_alias+set} +ac_env_build_alias_value=$build_alias +ac_cv_env_build_alias_set=${build_alias+set} +ac_cv_env_build_alias_value=$build_alias +ac_env_host_alias_set=${host_alias+set} +ac_env_host_alias_value=$host_alias +ac_cv_env_host_alias_set=${host_alias+set} +ac_cv_env_host_alias_value=$host_alias +ac_env_target_alias_set=${target_alias+set} +ac_env_target_alias_value=$target_alias +ac_cv_env_target_alias_set=${target_alias+set} +ac_cv_env_target_alias_value=$target_alias +ac_env_CC_set=${CC+set} +ac_env_CC_value=$CC +ac_cv_env_CC_set=${CC+set} +ac_cv_env_CC_value=$CC +ac_env_CFLAGS_set=${CFLAGS+set} +ac_env_CFLAGS_value=$CFLAGS +ac_cv_env_CFLAGS_set=${CFLAGS+set} +ac_cv_env_CFLAGS_value=$CFLAGS +ac_env_LDFLAGS_set=${LDFLAGS+set} +ac_env_LDFLAGS_value=$LDFLAGS +ac_cv_env_LDFLAGS_set=${LDFLAGS+set} +ac_cv_env_LDFLAGS_value=$LDFLAGS +ac_env_CPPFLAGS_set=${CPPFLAGS+set} +ac_env_CPPFLAGS_value=$CPPFLAGS +ac_cv_env_CPPFLAGS_set=${CPPFLAGS+set} +ac_cv_env_CPPFLAGS_value=$CPPFLAGS +ac_env_CPP_set=${CPP+set} +ac_env_CPP_value=$CPP +ac_cv_env_CPP_set=${CPP+set} +ac_cv_env_CPP_value=$CPP +ac_env_CXX_set=${CXX+set} +ac_env_CXX_value=$CXX +ac_cv_env_CXX_set=${CXX+set} +ac_cv_env_CXX_value=$CXX +ac_env_CXXFLAGS_set=${CXXFLAGS+set} +ac_env_CXXFLAGS_value=$CXXFLAGS +ac_cv_env_CXXFLAGS_set=${CXXFLAGS+set} +ac_cv_env_CXXFLAGS_value=$CXXFLAGS +ac_env_CXXCPP_set=${CXXCPP+set} +ac_env_CXXCPP_value=$CXXCPP +ac_cv_env_CXXCPP_set=${CXXCPP+set} +ac_cv_env_CXXCPP_value=$CXXCPP +ac_env_F77_set=${F77+set} +ac_env_F77_value=$F77 +ac_cv_env_F77_set=${F77+set} +ac_cv_env_F77_value=$F77 +ac_env_FFLAGS_set=${FFLAGS+set} +ac_env_FFLAGS_value=$FFLAGS +ac_cv_env_FFLAGS_set=${FFLAGS+set} +ac_cv_env_FFLAGS_value=$FFLAGS + +# +# Report the --help message. +# +if test "$ac_init_help" = "long"; then + # Omit some internal or obsolete options to make the list less imposing. + # This message is too long to be a string in the A/UX 3.1 sh. + cat <<_ACEOF +\`configure' configures hadoop-pipes-examples 0.13.0 to adapt to many kinds of systems. + +Usage: $0 [OPTION]... [VAR=VALUE]... + +To assign environment variables (e.g., CC, CFLAGS...), specify them as +VAR=VALUE. See below for descriptions of some of the useful variables. + +Defaults for the options are specified in brackets. + +Configuration: + -h, --help display this help and exit + --help=short display options specific to this package + --help=recursive display the short help of all the included packages + -V, --version display version information and exit + -q, --quiet, --silent do not print \`checking...' messages + --cache-file=FILE cache test results in FILE [disabled] + -C, --config-cache alias for \`--cache-file=config.cache' + -n, --no-create do not create output files + --srcdir=DIR find the sources in DIR [configure dir or \`..'] + +_ACEOF + + cat <<_ACEOF +Installation directories: + --prefix=PREFIX install architecture-independent files in PREFIX + [$ac_default_prefix] + --exec-prefix=EPREFIX install architecture-dependent files in EPREFIX + [PREFIX] + +By default, \`make install' will install all the files in +\`$ac_default_prefix/bin', \`$ac_default_prefix/lib' etc. You can specify +an installation prefix other than \`$ac_default_prefix' using \`--prefix', +for instance \`--prefix=\$HOME'. + +For better control, use the options below. + +Fine tuning of the installation directories: + --bindir=DIR user executables [EPREFIX/bin] + --sbindir=DIR system admin executables [EPREFIX/sbin] + --libexecdir=DIR program executables [EPREFIX/libexec] + --datadir=DIR read-only architecture-independent data [PREFIX/share] + --sysconfdir=DIR read-only single-machine data [PREFIX/etc] + --sharedstatedir=DIR modifiable architecture-independent data [PREFIX/com] + --localstatedir=DIR modifiable single-machine data [PREFIX/var] + --libdir=DIR object code libraries [EPREFIX/lib] + --includedir=DIR C header files [PREFIX/include] + --oldincludedir=DIR C header files for non-gcc [/usr/include] + --infodir=DIR info documentation [PREFIX/info] + --mandir=DIR man documentation [PREFIX/man] +_ACEOF + + cat <<\_ACEOF + +Program names: + --program-prefix=PREFIX prepend PREFIX to installed program names + --program-suffix=SUFFIX append SUFFIX to installed program names + --program-transform-name=PROGRAM run sed PROGRAM on installed program names + +System types: + --build=BUILD configure for building on BUILD [guessed] + --host=HOST cross-compile to build programs to run on HOST [BUILD] +_ACEOF +fi + +if test -n "$ac_init_help"; then + case $ac_init_help in + short | recursive ) echo "Configuration of hadoop-pipes-examples 0.13.0:";; + esac + cat <<\_ACEOF + +Optional Features: + --disable-FEATURE do not include FEATURE (same as --enable-FEATURE=no) + --enable-FEATURE[=ARG] include FEATURE [ARG=yes] + --disable-largefile omit support for large files + --disable-dependency-tracking speeds up one-time build + --enable-dependency-tracking do not reject slow dependency extractors + --enable-shared[=PKGS] + build shared libraries [default=yes] + --enable-static[=PKGS] + build static libraries [default=yes] + --enable-fast-install[=PKGS] + optimize for fast installation [default=yes] + --disable-libtool-lock avoid locking (might break parallel builds) + +Optional Packages: + --with-PACKAGE[=ARG] use PACKAGE [ARG=yes] + --without-PACKAGE do not use PACKAGE (same as --with-PACKAGE=no) + --with-hadoop-utils=

+ directory to get hadoop_utils from + --with-hadoop-pipes= + directory to get hadoop pipes from + --with-gnu-ld assume the C compiler uses GNU ld [default=no] + --with-pic try to use only PIC/non-PIC objects [default=use + both] + --with-tags[=TAGS] + include additional configurations [automatic] + +Some influential environment variables: + CC C compiler command + CFLAGS C compiler flags + LDFLAGS linker flags, e.g. -L if you have libraries in a + nonstandard directory + CPPFLAGS C/C++ preprocessor flags, e.g. -I if you have + headers in a nonstandard directory + CPP C preprocessor + CXX C++ compiler command + CXXFLAGS C++ compiler flags + CXXCPP C++ preprocessor + F77 Fortran 77 compiler command + FFLAGS Fortran 77 compiler flags + +Use these variables to override the choices made by `configure' or to help +it to find libraries and programs with nonstandard names/locations. + +Report bugs to . +_ACEOF +fi + +if test "$ac_init_help" = "recursive"; then + # If there are subdirs, report their specific --help. + ac_popdir=`pwd` + for ac_dir in : $ac_subdirs_all; do test "x$ac_dir" = x: && continue + test -d $ac_dir || continue + ac_builddir=. + +if test "$ac_dir" != .; then + ac_dir_suffix=/`echo "$ac_dir" | sed 's,^\.[\\/],,'` + # A "../" for each directory in $ac_dir_suffix. + ac_top_builddir=`echo "$ac_dir_suffix" | sed 's,/[^\\/]*,../,g'` +else + ac_dir_suffix= ac_top_builddir= +fi + +case $srcdir in + .) # No --srcdir option. We are building in place. + ac_srcdir=. + if test -z "$ac_top_builddir"; then + ac_top_srcdir=. + else + ac_top_srcdir=`echo $ac_top_builddir | sed 's,/$,,'` + fi ;; + [\\/]* | ?:[\\/]* ) # Absolute path. + ac_srcdir=$srcdir$ac_dir_suffix; + ac_top_srcdir=$srcdir ;; + *) # Relative path. + ac_srcdir=$ac_top_builddir$srcdir$ac_dir_suffix + ac_top_srcdir=$ac_top_builddir$srcdir ;; +esac + +# Do not use `cd foo && pwd` to compute absolute paths, because +# the directories may not exist. +case `pwd` in +.) ac_abs_builddir="$ac_dir";; +*) + case "$ac_dir" in + .) ac_abs_builddir=`pwd`;; + [\\/]* | ?:[\\/]* ) ac_abs_builddir="$ac_dir";; + *) ac_abs_builddir=`pwd`/"$ac_dir";; + esac;; +esac +case $ac_abs_builddir in +.) ac_abs_top_builddir=${ac_top_builddir}.;; +*) + case ${ac_top_builddir}. in + .) ac_abs_top_builddir=$ac_abs_builddir;; + [\\/]* | ?:[\\/]* ) ac_abs_top_builddir=${ac_top_builddir}.;; + *) ac_abs_top_builddir=$ac_abs_builddir/${ac_top_builddir}.;; + esac;; +esac +case $ac_abs_builddir in +.) ac_abs_srcdir=$ac_srcdir;; +*) + case $ac_srcdir in + .) ac_abs_srcdir=$ac_abs_builddir;; + [\\/]* | ?:[\\/]* ) ac_abs_srcdir=$ac_srcdir;; + *) ac_abs_srcdir=$ac_abs_builddir/$ac_srcdir;; + esac;; +esac +case $ac_abs_builddir in +.) ac_abs_top_srcdir=$ac_top_srcdir;; +*) + case $ac_top_srcdir in + .) ac_abs_top_srcdir=$ac_abs_builddir;; + [\\/]* | ?:[\\/]* ) ac_abs_top_srcdir=$ac_top_srcdir;; + *) ac_abs_top_srcdir=$ac_abs_builddir/$ac_top_srcdir;; + esac;; +esac + + cd $ac_dir + # Check for guested configure; otherwise get Cygnus style configure. + if test -f $ac_srcdir/configure.gnu; then + echo + $SHELL $ac_srcdir/configure.gnu --help=recursive + elif test -f $ac_srcdir/configure; then + echo + $SHELL $ac_srcdir/configure --help=recursive + elif test -f $ac_srcdir/configure.ac || + test -f $ac_srcdir/configure.in; then + echo + $ac_configure --help + else + echo "$as_me: WARNING: no configuration information is in $ac_dir" >&2 + fi + cd $ac_popdir + done +fi + +test -n "$ac_init_help" && exit 0 +if $ac_init_version; then + cat <<\_ACEOF +hadoop-pipes-examples configure 0.13.0 +generated by GNU Autoconf 2.59 + +Copyright (C) 2003 Free Software Foundation, Inc. +This configure script is free software; the Free Software Foundation +gives unlimited permission to copy, distribute and modify it. +_ACEOF + exit 0 +fi +exec 5>config.log +cat >&5 <<_ACEOF +This file contains any messages produced by compilers while +running configure, to aid debugging if configure makes a mistake. + +It was created by hadoop-pipes-examples $as_me 0.13.0, which was +generated by GNU Autoconf 2.59. Invocation command line was + + $ $0 $@ + +_ACEOF +{ +cat <<_ASUNAME +## --------- ## +## Platform. ## +## --------- ## + +hostname = `(hostname || uname -n) 2>/dev/null | sed 1q` +uname -m = `(uname -m) 2>/dev/null || echo unknown` +uname -r = `(uname -r) 2>/dev/null || echo unknown` +uname -s = `(uname -s) 2>/dev/null || echo unknown` +uname -v = `(uname -v) 2>/dev/null || echo unknown` + +/usr/bin/uname -p = `(/usr/bin/uname -p) 2>/dev/null || echo unknown` +/bin/uname -X = `(/bin/uname -X) 2>/dev/null || echo unknown` + +/bin/arch = `(/bin/arch) 2>/dev/null || echo unknown` +/usr/bin/arch -k = `(/usr/bin/arch -k) 2>/dev/null || echo unknown` +/usr/convex/getsysinfo = `(/usr/convex/getsysinfo) 2>/dev/null || echo unknown` +hostinfo = `(hostinfo) 2>/dev/null || echo unknown` +/bin/machine = `(/bin/machine) 2>/dev/null || echo unknown` +/usr/bin/oslevel = `(/usr/bin/oslevel) 2>/dev/null || echo unknown` +/bin/universe = `(/bin/universe) 2>/dev/null || echo unknown` + +_ASUNAME + +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + echo "PATH: $as_dir" +done + +} >&5 + +cat >&5 <<_ACEOF + + +## ----------- ## +## Core tests. ## +## ----------- ## + +_ACEOF + + +# Keep a trace of the command line. +# Strip out --no-create and --no-recursion so they do not pile up. +# Strip out --silent because we don't want to record it for future runs. +# Also quote any args containing shell meta-characters. +# Make two passes to allow for proper duplicate-argument suppression. +ac_configure_args= +ac_configure_args0= +ac_configure_args1= +ac_sep= +ac_must_keep_next=false +for ac_pass in 1 2 +do + for ac_arg + do + case $ac_arg in + -no-create | --no-c* | -n | -no-recursion | --no-r*) continue ;; + -q | -quiet | --quiet | --quie | --qui | --qu | --q \ + | -silent | --silent | --silen | --sile | --sil) + continue ;; + *" "*|*" "*|*[\[\]\~\#\$\^\&\*\(\)\{\}\\\|\;\<\>\?\"\']*) + ac_arg=`echo "$ac_arg" | sed "s/'/'\\\\\\\\''/g"` ;; + esac + case $ac_pass in + 1) ac_configure_args0="$ac_configure_args0 '$ac_arg'" ;; + 2) + ac_configure_args1="$ac_configure_args1 '$ac_arg'" + if test $ac_must_keep_next = true; then + ac_must_keep_next=false # Got value, back to normal. + else + case $ac_arg in + *=* | --config-cache | -C | -disable-* | --disable-* \ + | -enable-* | --enable-* | -gas | --g* | -nfp | --nf* \ + | -q | -quiet | --q* | -silent | --sil* | -v | -verb* \ + | -with-* | --with-* | -without-* | --without-* | --x) + case "$ac_configure_args0 " in + "$ac_configure_args1"*" '$ac_arg' "* ) continue ;; + esac + ;; + -* ) ac_must_keep_next=true ;; + esac + fi + ac_configure_args="$ac_configure_args$ac_sep'$ac_arg'" + # Get rid of the leading space. + ac_sep=" " + ;; + esac + done +done +$as_unset ac_configure_args0 || test "${ac_configure_args0+set}" != set || { ac_configure_args0=; export ac_configure_args0; } +$as_unset ac_configure_args1 || test "${ac_configure_args1+set}" != set || { ac_configure_args1=; export ac_configure_args1; } + +# When interrupted or exit'd, cleanup temporary files, and complete +# config.log. We remove comments because anyway the quotes in there +# would cause problems or look ugly. +# WARNING: Be sure not to use single quotes in there, as some shells, +# such as our DU 5.0 friend, will then `close' the trap. +trap 'exit_status=$? + # Save into config.log some information that might help in debugging. + { + echo + + cat <<\_ASBOX +## ---------------- ## +## Cache variables. ## +## ---------------- ## +_ASBOX + echo + # The following way of writing the cache mishandles newlines in values, +{ + (set) 2>&1 | + case `(ac_space='"'"' '"'"'; set | grep ac_space) 2>&1` in + *ac_space=\ *) + sed -n \ + "s/'"'"'/'"'"'\\\\'"'"''"'"'/g; + s/^\\([_$as_cr_alnum]*_cv_[_$as_cr_alnum]*\\)=\\(.*\\)/\\1='"'"'\\2'"'"'/p" + ;; + *) + sed -n \ + "s/^\\([_$as_cr_alnum]*_cv_[_$as_cr_alnum]*\\)=\\(.*\\)/\\1=\\2/p" + ;; + esac; +} + echo + + cat <<\_ASBOX +## ----------------- ## +## Output variables. ## +## ----------------- ## +_ASBOX + echo + for ac_var in $ac_subst_vars + do + eval ac_val=$`echo $ac_var` + echo "$ac_var='"'"'$ac_val'"'"'" + done | sort + echo + + if test -n "$ac_subst_files"; then + cat <<\_ASBOX +## ------------- ## +## Output files. ## +## ------------- ## +_ASBOX + echo + for ac_var in $ac_subst_files + do + eval ac_val=$`echo $ac_var` + echo "$ac_var='"'"'$ac_val'"'"'" + done | sort + echo + fi + + if test -s confdefs.h; then + cat <<\_ASBOX +## ----------- ## +## confdefs.h. ## +## ----------- ## +_ASBOX + echo + sed "/^$/d" confdefs.h | sort + echo + fi + test "$ac_signal" != 0 && + echo "$as_me: caught signal $ac_signal" + echo "$as_me: exit $exit_status" + } >&5 + rm -f core *.core && + rm -rf conftest* confdefs* conf$$* $ac_clean_files && + exit $exit_status + ' 0 +for ac_signal in 1 2 13 15; do + trap 'ac_signal='$ac_signal'; { (exit 1); exit 1; }' $ac_signal +done +ac_signal=0 + +# confdefs.h avoids OS command line length limits that DEFS can exceed. +rm -rf conftest* confdefs.h +# AIX cpp loses on an empty file, so make sure it contains at least a newline. +echo >confdefs.h + +# Predefined preprocessor variables. + +cat >>confdefs.h <<_ACEOF +#define PACKAGE_NAME "$PACKAGE_NAME" +_ACEOF + + +cat >>confdefs.h <<_ACEOF +#define PACKAGE_TARNAME "$PACKAGE_TARNAME" +_ACEOF + + +cat >>confdefs.h <<_ACEOF +#define PACKAGE_VERSION "$PACKAGE_VERSION" +_ACEOF + + +cat >>confdefs.h <<_ACEOF +#define PACKAGE_STRING "$PACKAGE_STRING" +_ACEOF + + +cat >>confdefs.h <<_ACEOF +#define PACKAGE_BUGREPORT "$PACKAGE_BUGREPORT" +_ACEOF + + +# Let the site file select an alternate cache file if it wants to. +# Prefer explicitly selected file to automatically selected ones. +if test -z "$CONFIG_SITE"; then + if test "x$prefix" != xNONE; then + CONFIG_SITE="$prefix/share/config.site $prefix/etc/config.site" + else + CONFIG_SITE="$ac_default_prefix/share/config.site $ac_default_prefix/etc/config.site" + fi +fi +for ac_site_file in $CONFIG_SITE; do + if test -r "$ac_site_file"; then + { echo "$as_me:$LINENO: loading site script $ac_site_file" >&5 +echo "$as_me: loading site script $ac_site_file" >&6;} + sed 's/^/| /' "$ac_site_file" >&5 + . "$ac_site_file" + fi +done + +if test -r "$cache_file"; then + # Some versions of bash will fail to source /dev/null (special + # files actually), so we avoid doing that. + if test -f "$cache_file"; then + { echo "$as_me:$LINENO: loading cache $cache_file" >&5 +echo "$as_me: loading cache $cache_file" >&6;} + case $cache_file in + [\\/]* | ?:[\\/]* ) . $cache_file;; + *) . ./$cache_file;; + esac + fi +else + { echo "$as_me:$LINENO: creating cache $cache_file" >&5 +echo "$as_me: creating cache $cache_file" >&6;} + >$cache_file +fi + +# Check that the precious variables saved in the cache have kept the same +# value. +ac_cache_corrupted=false +for ac_var in `(set) 2>&1 | + sed -n 's/^ac_env_\([a-zA-Z_0-9]*\)_set=.*/\1/p'`; do + eval ac_old_set=\$ac_cv_env_${ac_var}_set + eval ac_new_set=\$ac_env_${ac_var}_set + eval ac_old_val="\$ac_cv_env_${ac_var}_value" + eval ac_new_val="\$ac_env_${ac_var}_value" + case $ac_old_set,$ac_new_set in + set,) + { echo "$as_me:$LINENO: error: \`$ac_var' was set to \`$ac_old_val' in the previous run" >&5 +echo "$as_me: error: \`$ac_var' was set to \`$ac_old_val' in the previous run" >&2;} + ac_cache_corrupted=: ;; + ,set) + { echo "$as_me:$LINENO: error: \`$ac_var' was not set in the previous run" >&5 +echo "$as_me: error: \`$ac_var' was not set in the previous run" >&2;} + ac_cache_corrupted=: ;; + ,);; + *) + if test "x$ac_old_val" != "x$ac_new_val"; then + { echo "$as_me:$LINENO: error: \`$ac_var' has changed since the previous run:" >&5 +echo "$as_me: error: \`$ac_var' has changed since the previous run:" >&2;} + { echo "$as_me:$LINENO: former value: $ac_old_val" >&5 +echo "$as_me: former value: $ac_old_val" >&2;} + { echo "$as_me:$LINENO: current value: $ac_new_val" >&5 +echo "$as_me: current value: $ac_new_val" >&2;} + ac_cache_corrupted=: + fi;; + esac + # Pass precious variables to config.status. + if test "$ac_new_set" = set; then + case $ac_new_val in + *" "*|*" "*|*[\[\]\~\#\$\^\&\*\(\)\{\}\\\|\;\<\>\?\"\']*) + ac_arg=$ac_var=`echo "$ac_new_val" | sed "s/'/'\\\\\\\\''/g"` ;; + *) ac_arg=$ac_var=$ac_new_val ;; + esac + case " $ac_configure_args " in + *" '$ac_arg' "*) ;; # Avoid dups. Use of quotes ensures accuracy. + *) ac_configure_args="$ac_configure_args '$ac_arg'" ;; + esac + fi +done +if $ac_cache_corrupted; then + { echo "$as_me:$LINENO: error: changes in the environment can compromise the build" >&5 +echo "$as_me: error: changes in the environment can compromise the build" >&2;} + { { echo "$as_me:$LINENO: error: run \`make distclean' and/or \`rm $cache_file' and start over" >&5 +echo "$as_me: error: run \`make distclean' and/or \`rm $cache_file' and start over" >&2;} + { (exit 1); exit 1; }; } +fi + +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu + + + + + + + + + + + + + + + + + + + + + + + + + + + + +am__api_version="1.9" +ac_aux_dir= +for ac_dir in $srcdir $srcdir/.. $srcdir/../..; do + if test -f $ac_dir/install-sh; then + ac_aux_dir=$ac_dir + ac_install_sh="$ac_aux_dir/install-sh -c" + break + elif test -f $ac_dir/install.sh; then + ac_aux_dir=$ac_dir + ac_install_sh="$ac_aux_dir/install.sh -c" + break + elif test -f $ac_dir/shtool; then + ac_aux_dir=$ac_dir + ac_install_sh="$ac_aux_dir/shtool install -c" + break + fi +done +if test -z "$ac_aux_dir"; then + { { echo "$as_me:$LINENO: error: cannot find install-sh or install.sh in $srcdir $srcdir/.. $srcdir/../.." >&5 +echo "$as_me: error: cannot find install-sh or install.sh in $srcdir $srcdir/.. $srcdir/../.." >&2;} + { (exit 1); exit 1; }; } +fi +ac_config_guess="$SHELL $ac_aux_dir/config.guess" +ac_config_sub="$SHELL $ac_aux_dir/config.sub" +ac_configure="$SHELL $ac_aux_dir/configure" # This should be Cygnus configure. + +# Find a good install program. We prefer a C program (faster), +# so one script is as good as another. But avoid the broken or +# incompatible versions: +# SysV /etc/install, /usr/sbin/install +# SunOS /usr/etc/install +# IRIX /sbin/install +# AIX /bin/install +# AmigaOS /C/install, which installs bootblocks on floppy discs +# AIX 4 /usr/bin/installbsd, which doesn't work without a -g flag +# AFS /usr/afsws/bin/install, which mishandles nonexistent args +# SVR4 /usr/ucb/install, which tries to use the nonexistent group "staff" +# OS/2's system install, which has a completely different semantic +# ./install, which can be erroneously created by make from ./install.sh. +echo "$as_me:$LINENO: checking for a BSD-compatible install" >&5 +echo $ECHO_N "checking for a BSD-compatible install... $ECHO_C" >&6 +if test -z "$INSTALL"; then +if test "${ac_cv_path_install+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + # Account for people who put trailing slashes in PATH elements. +case $as_dir/ in + ./ | .// | /cC/* | \ + /etc/* | /usr/sbin/* | /usr/etc/* | /sbin/* | /usr/afsws/bin/* | \ + ?:\\/os2\\/install\\/* | ?:\\/OS2\\/INSTALL\\/* | \ + /usr/ucb/* ) ;; + *) + # OSF1 and SCO ODT 3.0 have their own names for install. + # Don't use installbsd from OSF since it installs stuff as root + # by default. + for ac_prog in ginstall scoinst install; do + for ac_exec_ext in '' $ac_executable_extensions; do + if $as_executable_p "$as_dir/$ac_prog$ac_exec_ext"; then + if test $ac_prog = install && + grep dspmsg "$as_dir/$ac_prog$ac_exec_ext" >/dev/null 2>&1; then + # AIX install. It has an incompatible calling convention. + : + elif test $ac_prog = install && + grep pwplus "$as_dir/$ac_prog$ac_exec_ext" >/dev/null 2>&1; then + # program-specific install script used by HP pwplus--don't use. + : + else + ac_cv_path_install="$as_dir/$ac_prog$ac_exec_ext -c" + break 3 + fi + fi + done + done + ;; +esac +done + + +fi + if test "${ac_cv_path_install+set}" = set; then + INSTALL=$ac_cv_path_install + else + # As a last resort, use the slow shell script. We don't cache a + # path for INSTALL within a source directory, because that will + # break other packages using the cache if that directory is + # removed, or if the path is relative. + INSTALL=$ac_install_sh + fi +fi +echo "$as_me:$LINENO: result: $INSTALL" >&5 +echo "${ECHO_T}$INSTALL" >&6 + +# Use test -z because SunOS4 sh mishandles braces in ${var-val}. +# It thinks the first close brace ends the variable substitution. +test -z "$INSTALL_PROGRAM" && INSTALL_PROGRAM='${INSTALL}' + +test -z "$INSTALL_SCRIPT" && INSTALL_SCRIPT='${INSTALL}' + +test -z "$INSTALL_DATA" && INSTALL_DATA='${INSTALL} -m 644' + +echo "$as_me:$LINENO: checking whether build environment is sane" >&5 +echo $ECHO_N "checking whether build environment is sane... $ECHO_C" >&6 +# Just in case +sleep 1 +echo timestamp > conftest.file +# Do `set' in a subshell so we don't clobber the current shell's +# arguments. Must try -L first in case configure is actually a +# symlink; some systems play weird games with the mod time of symlinks +# (eg FreeBSD returns the mod time of the symlink's containing +# directory). +if ( + set X `ls -Lt $srcdir/configure conftest.file 2> /dev/null` + if test "$*" = "X"; then + # -L didn't work. + set X `ls -t $srcdir/configure conftest.file` + fi + rm -f conftest.file + if test "$*" != "X $srcdir/configure conftest.file" \ + && test "$*" != "X conftest.file $srcdir/configure"; then + + # If neither matched, then we have a broken ls. This can happen + # if, for instance, CONFIG_SHELL is bash and it inherits a + # broken ls alias from the environment. This has actually + # happened. Such a system could not be considered "sane". + { { echo "$as_me:$LINENO: error: ls -t appears to fail. Make sure there is not a broken +alias in your environment" >&5 +echo "$as_me: error: ls -t appears to fail. Make sure there is not a broken +alias in your environment" >&2;} + { (exit 1); exit 1; }; } + fi + + test "$2" = conftest.file + ) +then + # Ok. + : +else + { { echo "$as_me:$LINENO: error: newly created file is older than distributed files! +Check your system clock" >&5 +echo "$as_me: error: newly created file is older than distributed files! +Check your system clock" >&2;} + { (exit 1); exit 1; }; } +fi +echo "$as_me:$LINENO: result: yes" >&5 +echo "${ECHO_T}yes" >&6 +test "$program_prefix" != NONE && + program_transform_name="s,^,$program_prefix,;$program_transform_name" +# Use a double $ so make ignores it. +test "$program_suffix" != NONE && + program_transform_name="s,\$,$program_suffix,;$program_transform_name" +# Double any \ or $. echo might interpret backslashes. +# By default was `s,x,x', remove it if useless. +cat <<\_ACEOF >conftest.sed +s/[\\$]/&&/g;s/;s,x,x,$// +_ACEOF +program_transform_name=`echo $program_transform_name | sed -f conftest.sed` +rm conftest.sed + +# expand $ac_aux_dir to an absolute path +am_aux_dir=`cd $ac_aux_dir && pwd` + +test x"${MISSING+set}" = xset || MISSING="\${SHELL} $am_aux_dir/missing" +# Use eval to expand $SHELL +if eval "$MISSING --run true"; then + am_missing_run="$MISSING --run " +else + am_missing_run= + { echo "$as_me:$LINENO: WARNING: \`missing' script is too old or missing" >&5 +echo "$as_me: WARNING: \`missing' script is too old or missing" >&2;} +fi + +if mkdir -p --version . >/dev/null 2>&1 && test ! -d ./--version; then + # We used to keeping the `.' as first argument, in order to + # allow $(mkdir_p) to be used without argument. As in + # $(mkdir_p) $(somedir) + # where $(somedir) is conditionally defined. However this is wrong + # for two reasons: + # 1. if the package is installed by a user who cannot write `.' + # make install will fail, + # 2. the above comment should most certainly read + # $(mkdir_p) $(DESTDIR)$(somedir) + # so it does not work when $(somedir) is undefined and + # $(DESTDIR) is not. + # To support the latter case, we have to write + # test -z "$(somedir)" || $(mkdir_p) $(DESTDIR)$(somedir), + # so the `.' trick is pointless. + mkdir_p='mkdir -p --' +else + # On NextStep and OpenStep, the `mkdir' command does not + # recognize any option. It will interpret all options as + # directories to create, and then abort because `.' already + # exists. + for d in ./-p ./--version; + do + test -d $d && rmdir $d + done + # $(mkinstalldirs) is defined by Automake if mkinstalldirs exists. + if test -f "$ac_aux_dir/mkinstalldirs"; then + mkdir_p='$(mkinstalldirs)' + else + mkdir_p='$(install_sh) -d' + fi +fi + +for ac_prog in gawk mawk nawk awk +do + # Extract the first word of "$ac_prog", so it can be a program name with args. +set dummy $ac_prog; ac_word=$2 +echo "$as_me:$LINENO: checking for $ac_word" >&5 +echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6 +if test "${ac_cv_prog_AWK+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + if test -n "$AWK"; then + ac_cv_prog_AWK="$AWK" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if $as_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_AWK="$ac_prog" + echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done +done + +fi +fi +AWK=$ac_cv_prog_AWK +if test -n "$AWK"; then + echo "$as_me:$LINENO: result: $AWK" >&5 +echo "${ECHO_T}$AWK" >&6 +else + echo "$as_me:$LINENO: result: no" >&5 +echo "${ECHO_T}no" >&6 +fi + + test -n "$AWK" && break +done + +echo "$as_me:$LINENO: checking whether ${MAKE-make} sets \$(MAKE)" >&5 +echo $ECHO_N "checking whether ${MAKE-make} sets \$(MAKE)... $ECHO_C" >&6 +set dummy ${MAKE-make}; ac_make=`echo "$2" | sed 'y,:./+-,___p_,'` +if eval "test \"\${ac_cv_prog_make_${ac_make}_set+set}\" = set"; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + cat >conftest.make <<\_ACEOF +all: + @echo 'ac_maketemp="$(MAKE)"' +_ACEOF +# GNU make sometimes prints "make[1]: Entering...", which would confuse us. +eval `${MAKE-make} -f conftest.make 2>/dev/null | grep temp=` +if test -n "$ac_maketemp"; then + eval ac_cv_prog_make_${ac_make}_set=yes +else + eval ac_cv_prog_make_${ac_make}_set=no +fi +rm -f conftest.make +fi +if eval "test \"`echo '$ac_cv_prog_make_'${ac_make}_set`\" = yes"; then + echo "$as_me:$LINENO: result: yes" >&5 +echo "${ECHO_T}yes" >&6 + SET_MAKE= +else + echo "$as_me:$LINENO: result: no" >&5 +echo "${ECHO_T}no" >&6 + SET_MAKE="MAKE=${MAKE-make}" +fi + +rm -rf .tst 2>/dev/null +mkdir .tst 2>/dev/null +if test -d .tst; then + am__leading_dot=. +else + am__leading_dot=_ +fi +rmdir .tst 2>/dev/null + +# test to see if srcdir already configured +if test "`cd $srcdir && pwd`" != "`pwd`" && + test -f $srcdir/config.status; then + { { echo "$as_me:$LINENO: error: source directory already configured; run \"make distclean\" there first" >&5 +echo "$as_me: error: source directory already configured; run \"make distclean\" there first" >&2;} + { (exit 1); exit 1; }; } +fi + +# test whether we have cygpath +if test -z "$CYGPATH_W"; then + if (cygpath --version) >/dev/null 2>/dev/null; then + CYGPATH_W='cygpath -w' + else + CYGPATH_W=echo + fi +fi + + +# Define the identity of the package. + PACKAGE='hadoop-pipes-examples' + VERSION='0.13.0' + + +cat >>confdefs.h <<_ACEOF +#define PACKAGE "$PACKAGE" +_ACEOF + + +cat >>confdefs.h <<_ACEOF +#define VERSION "$VERSION" +_ACEOF + +# Some tools Automake needs. + +ACLOCAL=${ACLOCAL-"${am_missing_run}aclocal-${am__api_version}"} + + +AUTOCONF=${AUTOCONF-"${am_missing_run}autoconf"} + + +AUTOMAKE=${AUTOMAKE-"${am_missing_run}automake-${am__api_version}"} + + +AUTOHEADER=${AUTOHEADER-"${am_missing_run}autoheader"} + + +MAKEINFO=${MAKEINFO-"${am_missing_run}makeinfo"} + +install_sh=${install_sh-"$am_aux_dir/install-sh"} + +# Installed binaries are usually stripped using `strip' when the user +# run `make install-strip'. However `strip' might not be the right +# tool to use in cross-compilation environments, therefore Automake +# will honor the `STRIP' environment variable to overrule this program. +if test "$cross_compiling" != no; then + if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}strip", so it can be a program name with args. +set dummy ${ac_tool_prefix}strip; ac_word=$2 +echo "$as_me:$LINENO: checking for $ac_word" >&5 +echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6 +if test "${ac_cv_prog_STRIP+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + if test -n "$STRIP"; then + ac_cv_prog_STRIP="$STRIP" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if $as_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_STRIP="${ac_tool_prefix}strip" + echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done +done + +fi +fi +STRIP=$ac_cv_prog_STRIP +if test -n "$STRIP"; then + echo "$as_me:$LINENO: result: $STRIP" >&5 +echo "${ECHO_T}$STRIP" >&6 +else + echo "$as_me:$LINENO: result: no" >&5 +echo "${ECHO_T}no" >&6 +fi + +fi +if test -z "$ac_cv_prog_STRIP"; then + ac_ct_STRIP=$STRIP + # Extract the first word of "strip", so it can be a program name with args. +set dummy strip; ac_word=$2 +echo "$as_me:$LINENO: checking for $ac_word" >&5 +echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6 +if test "${ac_cv_prog_ac_ct_STRIP+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + if test -n "$ac_ct_STRIP"; then + ac_cv_prog_ac_ct_STRIP="$ac_ct_STRIP" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if $as_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_ac_ct_STRIP="strip" + echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done +done + + test -z "$ac_cv_prog_ac_ct_STRIP" && ac_cv_prog_ac_ct_STRIP=":" +fi +fi +ac_ct_STRIP=$ac_cv_prog_ac_ct_STRIP +if test -n "$ac_ct_STRIP"; then + echo "$as_me:$LINENO: result: $ac_ct_STRIP" >&5 +echo "${ECHO_T}$ac_ct_STRIP" >&6 +else + echo "$as_me:$LINENO: result: no" >&5 +echo "${ECHO_T}no" >&6 +fi + + STRIP=$ac_ct_STRIP +else + STRIP="$ac_cv_prog_STRIP" +fi + +fi +INSTALL_STRIP_PROGRAM="\${SHELL} \$(install_sh) -c -s" + +# We need awk for the "check" target. The system "awk" is bad on +# some platforms. +# Always define AMTAR for backward compatibility. + +AMTAR=${AMTAR-"${am_missing_run}tar"} + +am__tar='${AMTAR} chof - "$$tardir"'; am__untar='${AMTAR} xf -' + + + + + + + + ac_config_headers="$ac_config_headers impl/config.h" + + ac_config_files="$ac_config_files Makefile" + + + + + +cat >>confdefs.h <<\_ACEOF +#define _GNU_SOURCE 1 +_ACEOF + + +DEPDIR="${am__leading_dot}deps" + + ac_config_commands="$ac_config_commands depfiles" + + +am_make=${MAKE-make} +cat > confinc << 'END' +am__doit: + @echo done +.PHONY: am__doit +END +# If we don't find an include directive, just comment out the code. +echo "$as_me:$LINENO: checking for style of include used by $am_make" >&5 +echo $ECHO_N "checking for style of include used by $am_make... $ECHO_C" >&6 +am__include="#" +am__quote= +_am_result=none +# First try GNU make style include. +echo "include confinc" > confmf +# We grep out `Entering directory' and `Leaving directory' +# messages which can occur if `w' ends up in MAKEFLAGS. +# In particular we don't look at `^make:' because GNU make might +# be invoked under some other name (usually "gmake"), in which +# case it prints its new name instead of `make'. +if test "`$am_make -s -f confmf 2> /dev/null | grep -v 'ing directory'`" = "done"; then + am__include=include + am__quote= + _am_result=GNU +fi +# Now try BSD make style include. +if test "$am__include" = "#"; then + echo '.include "confinc"' > confmf + if test "`$am_make -s -f confmf 2> /dev/null`" = "done"; then + am__include=.include + am__quote="\"" + _am_result=BSD + fi +fi + + +echo "$as_me:$LINENO: result: $_am_result" >&5 +echo "${ECHO_T}$_am_result" >&6 +rm -f confinc confmf + +# Check whether --enable-dependency-tracking or --disable-dependency-tracking was given. +if test "${enable_dependency_tracking+set}" = set; then + enableval="$enable_dependency_tracking" + +fi; +if test "x$enable_dependency_tracking" != xno; then + am_depcomp="$ac_aux_dir/depcomp" + AMDEPBACKSLASH='\' +fi + + +if test "x$enable_dependency_tracking" != xno; then + AMDEP_TRUE= + AMDEP_FALSE='#' +else + AMDEP_TRUE='#' + AMDEP_FALSE= +fi + + + +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu +if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}gcc", so it can be a program name with args. +set dummy ${ac_tool_prefix}gcc; ac_word=$2 +echo "$as_me:$LINENO: checking for $ac_word" >&5 +echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6 +if test "${ac_cv_prog_CC+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + if test -n "$CC"; then + ac_cv_prog_CC="$CC" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if $as_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_CC="${ac_tool_prefix}gcc" + echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done +done + +fi +fi +CC=$ac_cv_prog_CC +if test -n "$CC"; then + echo "$as_me:$LINENO: result: $CC" >&5 +echo "${ECHO_T}$CC" >&6 +else + echo "$as_me:$LINENO: result: no" >&5 +echo "${ECHO_T}no" >&6 +fi + +fi +if test -z "$ac_cv_prog_CC"; then + ac_ct_CC=$CC + # Extract the first word of "gcc", so it can be a program name with args. +set dummy gcc; ac_word=$2 +echo "$as_me:$LINENO: checking for $ac_word" >&5 +echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6 +if test "${ac_cv_prog_ac_ct_CC+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + if test -n "$ac_ct_CC"; then + ac_cv_prog_ac_ct_CC="$ac_ct_CC" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if $as_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_ac_ct_CC="gcc" + echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done +done + +fi +fi +ac_ct_CC=$ac_cv_prog_ac_ct_CC +if test -n "$ac_ct_CC"; then + echo "$as_me:$LINENO: result: $ac_ct_CC" >&5 +echo "${ECHO_T}$ac_ct_CC" >&6 +else + echo "$as_me:$LINENO: result: no" >&5 +echo "${ECHO_T}no" >&6 +fi + + CC=$ac_ct_CC +else + CC="$ac_cv_prog_CC" +fi + +if test -z "$CC"; then + if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}cc", so it can be a program name with args. +set dummy ${ac_tool_prefix}cc; ac_word=$2 +echo "$as_me:$LINENO: checking for $ac_word" >&5 +echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6 +if test "${ac_cv_prog_CC+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + if test -n "$CC"; then + ac_cv_prog_CC="$CC" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if $as_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_CC="${ac_tool_prefix}cc" + echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done +done + +fi +fi +CC=$ac_cv_prog_CC +if test -n "$CC"; then + echo "$as_me:$LINENO: result: $CC" >&5 +echo "${ECHO_T}$CC" >&6 +else + echo "$as_me:$LINENO: result: no" >&5 +echo "${ECHO_T}no" >&6 +fi + +fi +if test -z "$ac_cv_prog_CC"; then + ac_ct_CC=$CC + # Extract the first word of "cc", so it can be a program name with args. +set dummy cc; ac_word=$2 +echo "$as_me:$LINENO: checking for $ac_word" >&5 +echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6 +if test "${ac_cv_prog_ac_ct_CC+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + if test -n "$ac_ct_CC"; then + ac_cv_prog_ac_ct_CC="$ac_ct_CC" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if $as_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_ac_ct_CC="cc" + echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done +done + +fi +fi +ac_ct_CC=$ac_cv_prog_ac_ct_CC +if test -n "$ac_ct_CC"; then + echo "$as_me:$LINENO: result: $ac_ct_CC" >&5 +echo "${ECHO_T}$ac_ct_CC" >&6 +else + echo "$as_me:$LINENO: result: no" >&5 +echo "${ECHO_T}no" >&6 +fi + + CC=$ac_ct_CC +else + CC="$ac_cv_prog_CC" +fi + +fi +if test -z "$CC"; then + # Extract the first word of "cc", so it can be a program name with args. +set dummy cc; ac_word=$2 +echo "$as_me:$LINENO: checking for $ac_word" >&5 +echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6 +if test "${ac_cv_prog_CC+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + if test -n "$CC"; then + ac_cv_prog_CC="$CC" # Let the user override the test. +else + ac_prog_rejected=no +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if $as_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + if test "$as_dir/$ac_word$ac_exec_ext" = "/usr/ucb/cc"; then + ac_prog_rejected=yes + continue + fi + ac_cv_prog_CC="cc" + echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done +done + +if test $ac_prog_rejected = yes; then + # We found a bogon in the path, so make sure we never use it. + set dummy $ac_cv_prog_CC + shift + if test $# != 0; then + # We chose a different compiler from the bogus one. + # However, it has the same basename, so the bogon will be chosen + # first if we set CC to just the basename; use the full file name. + shift + ac_cv_prog_CC="$as_dir/$ac_word${1+' '}$@" + fi +fi +fi +fi +CC=$ac_cv_prog_CC +if test -n "$CC"; then + echo "$as_me:$LINENO: result: $CC" >&5 +echo "${ECHO_T}$CC" >&6 +else + echo "$as_me:$LINENO: result: no" >&5 +echo "${ECHO_T}no" >&6 +fi + +fi +if test -z "$CC"; then + if test -n "$ac_tool_prefix"; then + for ac_prog in cl + do + # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args. +set dummy $ac_tool_prefix$ac_prog; ac_word=$2 +echo "$as_me:$LINENO: checking for $ac_word" >&5 +echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6 +if test "${ac_cv_prog_CC+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + if test -n "$CC"; then + ac_cv_prog_CC="$CC" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if $as_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_CC="$ac_tool_prefix$ac_prog" + echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done +done + +fi +fi +CC=$ac_cv_prog_CC +if test -n "$CC"; then + echo "$as_me:$LINENO: result: $CC" >&5 +echo "${ECHO_T}$CC" >&6 +else + echo "$as_me:$LINENO: result: no" >&5 +echo "${ECHO_T}no" >&6 +fi + + test -n "$CC" && break + done +fi +if test -z "$CC"; then + ac_ct_CC=$CC + for ac_prog in cl +do + # Extract the first word of "$ac_prog", so it can be a program name with args. +set dummy $ac_prog; ac_word=$2 +echo "$as_me:$LINENO: checking for $ac_word" >&5 +echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6 +if test "${ac_cv_prog_ac_ct_CC+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + if test -n "$ac_ct_CC"; then + ac_cv_prog_ac_ct_CC="$ac_ct_CC" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if $as_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_ac_ct_CC="$ac_prog" + echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done +done + +fi +fi +ac_ct_CC=$ac_cv_prog_ac_ct_CC +if test -n "$ac_ct_CC"; then + echo "$as_me:$LINENO: result: $ac_ct_CC" >&5 +echo "${ECHO_T}$ac_ct_CC" >&6 +else + echo "$as_me:$LINENO: result: no" >&5 +echo "${ECHO_T}no" >&6 +fi + + test -n "$ac_ct_CC" && break +done + + CC=$ac_ct_CC +fi + +fi + + +test -z "$CC" && { { echo "$as_me:$LINENO: error: no acceptable C compiler found in \$PATH +See \`config.log' for more details." >&5 +echo "$as_me: error: no acceptable C compiler found in \$PATH +See \`config.log' for more details." >&2;} + { (exit 1); exit 1; }; } + +# Provide some information about the compiler. +echo "$as_me:$LINENO:" \ + "checking for C compiler version" >&5 +ac_compiler=`set X $ac_compile; echo $2` +{ (eval echo "$as_me:$LINENO: \"$ac_compiler --version &5\"") >&5 + (eval $ac_compiler --version &5) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } +{ (eval echo "$as_me:$LINENO: \"$ac_compiler -v &5\"") >&5 + (eval $ac_compiler -v &5) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } +{ (eval echo "$as_me:$LINENO: \"$ac_compiler -V &5\"") >&5 + (eval $ac_compiler -V &5) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } + +cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +ac_clean_files_save=$ac_clean_files +ac_clean_files="$ac_clean_files a.out a.exe b.out" +# Try to create an executable without -o first, disregard a.out. +# It will help us diagnose broken compilers, and finding out an intuition +# of exeext. +echo "$as_me:$LINENO: checking for C compiler default output file name" >&5 +echo $ECHO_N "checking for C compiler default output file name... $ECHO_C" >&6 +ac_link_default=`echo "$ac_link" | sed 's/ -o *conftest[^ ]*//'` +if { (eval echo "$as_me:$LINENO: \"$ac_link_default\"") >&5 + (eval $ac_link_default) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; then + # Find the output, starting from the most likely. This scheme is +# not robust to junk in `.', hence go to wildcards (a.*) only as a last +# resort. + +# Be careful to initialize this variable, since it used to be cached. +# Otherwise an old cache value of `no' led to `EXEEXT = no' in a Makefile. +ac_cv_exeext= +# b.out is created by i960 compilers. +for ac_file in a_out.exe a.exe conftest.exe a.out conftest a.* conftest.* b.out +do + test -f "$ac_file" || continue + case $ac_file in + *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.o | *.obj ) + ;; + conftest.$ac_ext ) + # This is the source file. + ;; + [ab].out ) + # We found the default executable, but exeext='' is most + # certainly right. + break;; + *.* ) + ac_cv_exeext=`expr "$ac_file" : '[^.]*\(\..*\)'` + # FIXME: I believe we export ac_cv_exeext for Libtool, + # but it would be cool to find out if it's true. Does anybody + # maintain Libtool? --akim. + export ac_cv_exeext + break;; + * ) + break;; + esac +done +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +{ { echo "$as_me:$LINENO: error: C compiler cannot create executables +See \`config.log' for more details." >&5 +echo "$as_me: error: C compiler cannot create executables +See \`config.log' for more details." >&2;} + { (exit 77); exit 77; }; } +fi + +ac_exeext=$ac_cv_exeext +echo "$as_me:$LINENO: result: $ac_file" >&5 +echo "${ECHO_T}$ac_file" >&6 + +# Check the compiler produces executables we can run. If not, either +# the compiler is broken, or we cross compile. +echo "$as_me:$LINENO: checking whether the C compiler works" >&5 +echo $ECHO_N "checking whether the C compiler works... $ECHO_C" >&6 +# FIXME: These cross compiler hacks should be removed for Autoconf 3.0 +# If not cross compiling, check that we can run a simple program. +if test "$cross_compiling" != yes; then + if { ac_try='./$ac_file' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; }; then + cross_compiling=no + else + if test "$cross_compiling" = maybe; then + cross_compiling=yes + else + { { echo "$as_me:$LINENO: error: cannot run C compiled programs. +If you meant to cross compile, use \`--host'. +See \`config.log' for more details." >&5 +echo "$as_me: error: cannot run C compiled programs. +If you meant to cross compile, use \`--host'. +See \`config.log' for more details." >&2;} + { (exit 1); exit 1; }; } + fi + fi +fi +echo "$as_me:$LINENO: result: yes" >&5 +echo "${ECHO_T}yes" >&6 + +rm -f a.out a.exe conftest$ac_cv_exeext b.out +ac_clean_files=$ac_clean_files_save +# Check the compiler produces executables we can run. If not, either +# the compiler is broken, or we cross compile. +echo "$as_me:$LINENO: checking whether we are cross compiling" >&5 +echo $ECHO_N "checking whether we are cross compiling... $ECHO_C" >&6 +echo "$as_me:$LINENO: result: $cross_compiling" >&5 +echo "${ECHO_T}$cross_compiling" >&6 + +echo "$as_me:$LINENO: checking for suffix of executables" >&5 +echo $ECHO_N "checking for suffix of executables... $ECHO_C" >&6 +if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 + (eval $ac_link) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; then + # If both `conftest.exe' and `conftest' are `present' (well, observable) +# catch `conftest.exe'. For instance with Cygwin, `ls conftest' will +# work properly (i.e., refer to `conftest.exe'), while it won't with +# `rm'. +for ac_file in conftest.exe conftest conftest.*; do + test -f "$ac_file" || continue + case $ac_file in + *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.o | *.obj ) ;; + *.* ) ac_cv_exeext=`expr "$ac_file" : '[^.]*\(\..*\)'` + export ac_cv_exeext + break;; + * ) break;; + esac +done +else + { { echo "$as_me:$LINENO: error: cannot compute suffix of executables: cannot compile and link +See \`config.log' for more details." >&5 +echo "$as_me: error: cannot compute suffix of executables: cannot compile and link +See \`config.log' for more details." >&2;} + { (exit 1); exit 1; }; } +fi + +rm -f conftest$ac_cv_exeext +echo "$as_me:$LINENO: result: $ac_cv_exeext" >&5 +echo "${ECHO_T}$ac_cv_exeext" >&6 + +rm -f conftest.$ac_ext +EXEEXT=$ac_cv_exeext +ac_exeext=$EXEEXT +echo "$as_me:$LINENO: checking for suffix of object files" >&5 +echo $ECHO_N "checking for suffix of object files... $ECHO_C" >&6 +if test "${ac_cv_objext+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +rm -f conftest.o conftest.obj +if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 + (eval $ac_compile) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; then + for ac_file in `(ls conftest.o conftest.obj; ls conftest.*) 2>/dev/null`; do + case $ac_file in + *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg ) ;; + *) ac_cv_objext=`expr "$ac_file" : '.*\.\(.*\)'` + break;; + esac +done +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +{ { echo "$as_me:$LINENO: error: cannot compute suffix of object files: cannot compile +See \`config.log' for more details." >&5 +echo "$as_me: error: cannot compute suffix of object files: cannot compile +See \`config.log' for more details." >&2;} + { (exit 1); exit 1; }; } +fi + +rm -f conftest.$ac_cv_objext conftest.$ac_ext +fi +echo "$as_me:$LINENO: result: $ac_cv_objext" >&5 +echo "${ECHO_T}$ac_cv_objext" >&6 +OBJEXT=$ac_cv_objext +ac_objext=$OBJEXT +echo "$as_me:$LINENO: checking whether we are using the GNU C compiler" >&5 +echo $ECHO_N "checking whether we are using the GNU C compiler... $ECHO_C" >&6 +if test "${ac_cv_c_compiler_gnu+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ + +int +main () +{ +#ifndef __GNUC__ + choke me +#endif + + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext +if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 + (eval $ac_compile) 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; }; then + ac_compiler_gnu=yes +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +ac_compiler_gnu=no +fi +rm -f conftest.err conftest.$ac_objext conftest.$ac_ext +ac_cv_c_compiler_gnu=$ac_compiler_gnu + +fi +echo "$as_me:$LINENO: result: $ac_cv_c_compiler_gnu" >&5 +echo "${ECHO_T}$ac_cv_c_compiler_gnu" >&6 +GCC=`test $ac_compiler_gnu = yes && echo yes` +ac_test_CFLAGS=${CFLAGS+set} +ac_save_CFLAGS=$CFLAGS +CFLAGS="-g" +echo "$as_me:$LINENO: checking whether $CC accepts -g" >&5 +echo $ECHO_N "checking whether $CC accepts -g... $ECHO_C" >&6 +if test "${ac_cv_prog_cc_g+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext +if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 + (eval $ac_compile) 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; }; then + ac_cv_prog_cc_g=yes +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +ac_cv_prog_cc_g=no +fi +rm -f conftest.err conftest.$ac_objext conftest.$ac_ext +fi +echo "$as_me:$LINENO: result: $ac_cv_prog_cc_g" >&5 +echo "${ECHO_T}$ac_cv_prog_cc_g" >&6 +if test "$ac_test_CFLAGS" = set; then + CFLAGS=$ac_save_CFLAGS +elif test $ac_cv_prog_cc_g = yes; then + if test "$GCC" = yes; then + CFLAGS="-g -O2" + else + CFLAGS="-g" + fi +else + if test "$GCC" = yes; then + CFLAGS="-O2" + else + CFLAGS= + fi +fi +echo "$as_me:$LINENO: checking for $CC option to accept ANSI C" >&5 +echo $ECHO_N "checking for $CC option to accept ANSI C... $ECHO_C" >&6 +if test "${ac_cv_prog_cc_stdc+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + ac_cv_prog_cc_stdc=no +ac_save_CC=$CC +cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +#include +#include +#include +#include +/* Most of the following tests are stolen from RCS 5.7's src/conf.sh. */ +struct buf { int x; }; +FILE * (*rcsopen) (struct buf *, struct stat *, int); +static char *e (p, i) + char **p; + int i; +{ + return p[i]; +} +static char *f (char * (*g) (char **, int), char **p, ...) +{ + char *s; + va_list v; + va_start (v,p); + s = g (p, va_arg (v,int)); + va_end (v); + return s; +} + +/* OSF 4.0 Compaq cc is some sort of almost-ANSI by default. It has + function prototypes and stuff, but not '\xHH' hex character constants. + These don't provoke an error unfortunately, instead are silently treated + as 'x'. The following induces an error, until -std1 is added to get + proper ANSI mode. Curiously '\x00'!='x' always comes out true, for an + array size at least. It's necessary to write '\x00'==0 to get something + that's true only with -std1. */ +int osf4_cc_array ['\x00' == 0 ? 1 : -1]; + +int test (int i, double x); +struct s1 {int (*f) (int a);}; +struct s2 {int (*f) (double a);}; +int pairnames (int, char **, FILE *(*)(struct buf *, struct stat *, int), int, int); +int argc; +char **argv; +int +main () +{ +return f (e, argv, 0) != argv[0] || f (e, argv, 1) != argv[1]; + ; + return 0; +} +_ACEOF +# Don't try gcc -ansi; that turns off useful extensions and +# breaks some systems' header files. +# AIX -qlanglvl=ansi +# Ultrix and OSF/1 -std1 +# HP-UX 10.20 and later -Ae +# HP-UX older versions -Aa -D_HPUX_SOURCE +# SVR4 -Xc -D__EXTENSIONS__ +for ac_arg in "" -qlanglvl=ansi -std1 -Ae "-Aa -D_HPUX_SOURCE" "-Xc -D__EXTENSIONS__" +do + CC="$ac_save_CC $ac_arg" + rm -f conftest.$ac_objext +if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 + (eval $ac_compile) 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; }; then + ac_cv_prog_cc_stdc=$ac_arg +break +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +fi +rm -f conftest.err conftest.$ac_objext +done +rm -f conftest.$ac_ext conftest.$ac_objext +CC=$ac_save_CC + +fi + +case "x$ac_cv_prog_cc_stdc" in + x|xno) + echo "$as_me:$LINENO: result: none needed" >&5 +echo "${ECHO_T}none needed" >&6 ;; + *) + echo "$as_me:$LINENO: result: $ac_cv_prog_cc_stdc" >&5 +echo "${ECHO_T}$ac_cv_prog_cc_stdc" >&6 + CC="$CC $ac_cv_prog_cc_stdc" ;; +esac + +# Some people use a C++ compiler to compile C. Since we use `exit', +# in C++ we need to declare it. In case someone uses the same compiler +# for both compiling C and C++ we need to have the C++ compiler decide +# the declaration of exit, since it's the most demanding environment. +cat >conftest.$ac_ext <<_ACEOF +#ifndef __cplusplus + choke me +#endif +_ACEOF +rm -f conftest.$ac_objext +if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 + (eval $ac_compile) 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; }; then + for ac_declaration in \ + '' \ + 'extern "C" void std::exit (int) throw (); using std::exit;' \ + 'extern "C" void std::exit (int); using std::exit;' \ + 'extern "C" void exit (int) throw ();' \ + 'extern "C" void exit (int);' \ + 'void exit (int);' +do + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +$ac_declaration +#include +int +main () +{ +exit (42); + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext +if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 + (eval $ac_compile) 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; }; then + : +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +continue +fi +rm -f conftest.err conftest.$ac_objext conftest.$ac_ext + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +$ac_declaration +int +main () +{ +exit (42); + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext +if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 + (eval $ac_compile) 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; }; then + break +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +fi +rm -f conftest.err conftest.$ac_objext conftest.$ac_ext +done +rm -f conftest* +if test -n "$ac_declaration"; then + echo '#ifdef __cplusplus' >>confdefs.h + echo $ac_declaration >>confdefs.h + echo '#endif' >>confdefs.h +fi + +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +fi +rm -f conftest.err conftest.$ac_objext conftest.$ac_ext +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu + +depcc="$CC" am_compiler_list= + +echo "$as_me:$LINENO: checking dependency style of $depcc" >&5 +echo $ECHO_N "checking dependency style of $depcc... $ECHO_C" >&6 +if test "${am_cv_CC_dependencies_compiler_type+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + if test -z "$AMDEP_TRUE" && test -f "$am_depcomp"; then + # We make a subdir and do the tests there. Otherwise we can end up + # making bogus files that we don't know about and never remove. For + # instance it was reported that on HP-UX the gcc test will end up + # making a dummy file named `D' -- because `-MD' means `put the output + # in D'. + mkdir conftest.dir + # Copy depcomp to subdir because otherwise we won't find it if we're + # using a relative directory. + cp "$am_depcomp" conftest.dir + cd conftest.dir + # We will build objects and dependencies in a subdirectory because + # it helps to detect inapplicable dependency modes. For instance + # both Tru64's cc and ICC support -MD to output dependencies as a + # side effect of compilation, but ICC will put the dependencies in + # the current directory while Tru64 will put them in the object + # directory. + mkdir sub + + am_cv_CC_dependencies_compiler_type=none + if test "$am_compiler_list" = ""; then + am_compiler_list=`sed -n 's/^#*\([a-zA-Z0-9]*\))$/\1/p' < ./depcomp` + fi + for depmode in $am_compiler_list; do + # Setup a source with many dependencies, because some compilers + # like to wrap large dependency lists on column 80 (with \), and + # we should not choose a depcomp mode which is confused by this. + # + # We need to recreate these files for each test, as the compiler may + # overwrite some of them when testing with obscure command lines. + # This happens at least with the AIX C compiler. + : > sub/conftest.c + for i in 1 2 3 4 5 6; do + echo '#include "conftst'$i'.h"' >> sub/conftest.c + # Using `: > sub/conftst$i.h' creates only sub/conftst1.h with + # Solaris 8's {/usr,}/bin/sh. + touch sub/conftst$i.h + done + echo "${am__include} ${am__quote}sub/conftest.Po${am__quote}" > confmf + + case $depmode in + nosideeffect) + # after this tag, mechanisms are not by side-effect, so they'll + # only be used when explicitly requested + if test "x$enable_dependency_tracking" = xyes; then + continue + else + break + fi + ;; + none) break ;; + esac + # We check with `-c' and `-o' for the sake of the "dashmstdout" + # mode. It turns out that the SunPro C++ compiler does not properly + # handle `-M -o', and we need to detect this. + if depmode=$depmode \ + source=sub/conftest.c object=sub/conftest.${OBJEXT-o} \ + depfile=sub/conftest.Po tmpdepfile=sub/conftest.TPo \ + $SHELL ./depcomp $depcc -c -o sub/conftest.${OBJEXT-o} sub/conftest.c \ + >/dev/null 2>conftest.err && + grep sub/conftst6.h sub/conftest.Po > /dev/null 2>&1 && + grep sub/conftest.${OBJEXT-o} sub/conftest.Po > /dev/null 2>&1 && + ${MAKE-make} -s -f confmf > /dev/null 2>&1; then + # icc doesn't choke on unknown options, it will just issue warnings + # or remarks (even with -Werror). So we grep stderr for any message + # that says an option was ignored or not supported. + # When given -MP, icc 7.0 and 7.1 complain thusly: + # icc: Command line warning: ignoring option '-M'; no argument required + # The diagnosis changed in icc 8.0: + # icc: Command line remark: option '-MP' not supported + if (grep 'ignoring option' conftest.err || + grep 'not supported' conftest.err) >/dev/null 2>&1; then :; else + am_cv_CC_dependencies_compiler_type=$depmode + break + fi + fi + done + + cd .. + rm -rf conftest.dir +else + am_cv_CC_dependencies_compiler_type=none +fi + +fi +echo "$as_me:$LINENO: result: $am_cv_CC_dependencies_compiler_type" >&5 +echo "${ECHO_T}$am_cv_CC_dependencies_compiler_type" >&6 +CCDEPMODE=depmode=$am_cv_CC_dependencies_compiler_type + + + +if + test "x$enable_dependency_tracking" != xno \ + && test "$am_cv_CC_dependencies_compiler_type" = gcc3; then + am__fastdepCC_TRUE= + am__fastdepCC_FALSE='#' +else + am__fastdepCC_TRUE='#' + am__fastdepCC_FALSE= +fi + + + +# Check whether --enable-largefile or --disable-largefile was given. +if test "${enable_largefile+set}" = set; then + enableval="$enable_largefile" + +fi; +if test "$enable_largefile" != no; then + + echo "$as_me:$LINENO: checking for special C compiler options needed for large files" >&5 +echo $ECHO_N "checking for special C compiler options needed for large files... $ECHO_C" >&6 +if test "${ac_cv_sys_largefile_CC+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + ac_cv_sys_largefile_CC=no + if test "$GCC" != yes; then + ac_save_CC=$CC + while :; do + # IRIX 6.2 and later do not support large files by default, + # so use the C compiler's -n32 option if that helps. + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +#include + /* Check that off_t can represent 2**63 - 1 correctly. + We can't simply define LARGE_OFF_T to be 9223372036854775807, + since some C++ compilers masquerading as C compilers + incorrectly reject 9223372036854775807. */ +#define LARGE_OFF_T (((off_t) 1 << 62) - 1 + ((off_t) 1 << 62)) + int off_t_is_large[(LARGE_OFF_T % 2147483629 == 721 + && LARGE_OFF_T % 2147483647 == 1) + ? 1 : -1]; +int +main () +{ + + ; + return 0; +} +_ACEOF + rm -f conftest.$ac_objext +if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 + (eval $ac_compile) 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; }; then + break +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +fi +rm -f conftest.err conftest.$ac_objext + CC="$CC -n32" + rm -f conftest.$ac_objext +if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 + (eval $ac_compile) 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; }; then + ac_cv_sys_largefile_CC=' -n32'; break +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +fi +rm -f conftest.err conftest.$ac_objext + break + done + CC=$ac_save_CC + rm -f conftest.$ac_ext + fi +fi +echo "$as_me:$LINENO: result: $ac_cv_sys_largefile_CC" >&5 +echo "${ECHO_T}$ac_cv_sys_largefile_CC" >&6 + if test "$ac_cv_sys_largefile_CC" != no; then + CC=$CC$ac_cv_sys_largefile_CC + fi + + echo "$as_me:$LINENO: checking for _FILE_OFFSET_BITS value needed for large files" >&5 +echo $ECHO_N "checking for _FILE_OFFSET_BITS value needed for large files... $ECHO_C" >&6 +if test "${ac_cv_sys_file_offset_bits+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + while :; do + ac_cv_sys_file_offset_bits=no + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +#include + /* Check that off_t can represent 2**63 - 1 correctly. + We can't simply define LARGE_OFF_T to be 9223372036854775807, + since some C++ compilers masquerading as C compilers + incorrectly reject 9223372036854775807. */ +#define LARGE_OFF_T (((off_t) 1 << 62) - 1 + ((off_t) 1 << 62)) + int off_t_is_large[(LARGE_OFF_T % 2147483629 == 721 + && LARGE_OFF_T % 2147483647 == 1) + ? 1 : -1]; +int +main () +{ + + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext +if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 + (eval $ac_compile) 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; }; then + break +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +fi +rm -f conftest.err conftest.$ac_objext conftest.$ac_ext + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +#define _FILE_OFFSET_BITS 64 +#include + /* Check that off_t can represent 2**63 - 1 correctly. + We can't simply define LARGE_OFF_T to be 9223372036854775807, + since some C++ compilers masquerading as C compilers + incorrectly reject 9223372036854775807. */ +#define LARGE_OFF_T (((off_t) 1 << 62) - 1 + ((off_t) 1 << 62)) + int off_t_is_large[(LARGE_OFF_T % 2147483629 == 721 + && LARGE_OFF_T % 2147483647 == 1) + ? 1 : -1]; +int +main () +{ + + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext +if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 + (eval $ac_compile) 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; }; then + ac_cv_sys_file_offset_bits=64; break +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +fi +rm -f conftest.err conftest.$ac_objext conftest.$ac_ext + break +done +fi +echo "$as_me:$LINENO: result: $ac_cv_sys_file_offset_bits" >&5 +echo "${ECHO_T}$ac_cv_sys_file_offset_bits" >&6 +if test "$ac_cv_sys_file_offset_bits" != no; then + +cat >>confdefs.h <<_ACEOF +#define _FILE_OFFSET_BITS $ac_cv_sys_file_offset_bits +_ACEOF + +fi +rm -f conftest* + echo "$as_me:$LINENO: checking for _LARGE_FILES value needed for large files" >&5 +echo $ECHO_N "checking for _LARGE_FILES value needed for large files... $ECHO_C" >&6 +if test "${ac_cv_sys_large_files+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + while :; do + ac_cv_sys_large_files=no + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +#include + /* Check that off_t can represent 2**63 - 1 correctly. + We can't simply define LARGE_OFF_T to be 9223372036854775807, + since some C++ compilers masquerading as C compilers + incorrectly reject 9223372036854775807. */ +#define LARGE_OFF_T (((off_t) 1 << 62) - 1 + ((off_t) 1 << 62)) + int off_t_is_large[(LARGE_OFF_T % 2147483629 == 721 + && LARGE_OFF_T % 2147483647 == 1) + ? 1 : -1]; +int +main () +{ + + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext +if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 + (eval $ac_compile) 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; }; then + break +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +fi +rm -f conftest.err conftest.$ac_objext conftest.$ac_ext + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +#define _LARGE_FILES 1 +#include + /* Check that off_t can represent 2**63 - 1 correctly. + We can't simply define LARGE_OFF_T to be 9223372036854775807, + since some C++ compilers masquerading as C compilers + incorrectly reject 9223372036854775807. */ +#define LARGE_OFF_T (((off_t) 1 << 62) - 1 + ((off_t) 1 << 62)) + int off_t_is_large[(LARGE_OFF_T % 2147483629 == 721 + && LARGE_OFF_T % 2147483647 == 1) + ? 1 : -1]; +int +main () +{ + + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext +if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 + (eval $ac_compile) 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; }; then + ac_cv_sys_large_files=1; break +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +fi +rm -f conftest.err conftest.$ac_objext conftest.$ac_ext + break +done +fi +echo "$as_me:$LINENO: result: $ac_cv_sys_large_files" >&5 +echo "${ECHO_T}$ac_cv_sys_large_files" >&6 +if test "$ac_cv_sys_large_files" != no; then + +cat >>confdefs.h <<_ACEOF +#define _LARGE_FILES $ac_cv_sys_large_files +_ACEOF + +fi +rm -f conftest* +fi + + + + + + + + +# Check whether --with-hadoop-utils or --without-hadoop-utils was given. +if test "${with_hadoop_utils+set}" = set; then + withval="$with_hadoop_utils" + HADOOP_UTILS_PREFIX="$withval" +else + HADOOP_UTILS_PREFIX="\${prefix}" +fi; + + +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu +echo "$as_me:$LINENO: checking how to run the C preprocessor" >&5 +echo $ECHO_N "checking how to run the C preprocessor... $ECHO_C" >&6 +# On Suns, sometimes $CPP names a directory. +if test -n "$CPP" && test -d "$CPP"; then + CPP= +fi +if test -z "$CPP"; then + if test "${ac_cv_prog_CPP+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + # Double quotes because CPP needs to be expanded + for CPP in "$CC -E" "$CC -E -traditional-cpp" "/lib/cpp" + do + ac_preproc_ok=false +for ac_c_preproc_warn_flag in '' yes +do + # Use a header file that comes with gcc, so configuring glibc + # with a fresh cross-compiler works. + # Prefer to if __STDC__ is defined, since + # exists even on freestanding compilers. + # On the NeXT, cc -E runs the code through the compiler's parser, + # not just through cpp. "Syntax error" is here to catch this case. + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +#ifdef __STDC__ +# include +#else +# include +#endif + Syntax error +_ACEOF +if { (eval echo "$as_me:$LINENO: \"$ac_cpp conftest.$ac_ext\"") >&5 + (eval $ac_cpp conftest.$ac_ext) 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } >/dev/null; then + if test -s conftest.err; then + ac_cpp_err=$ac_c_preproc_warn_flag + ac_cpp_err=$ac_cpp_err$ac_c_werror_flag + else + ac_cpp_err= + fi +else + ac_cpp_err=yes +fi +if test -z "$ac_cpp_err"; then + : +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + # Broken: fails on valid input. +continue +fi +rm -f conftest.err conftest.$ac_ext + + # OK, works on sane cases. Now check whether non-existent headers + # can be detected and how. + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +#include +_ACEOF +if { (eval echo "$as_me:$LINENO: \"$ac_cpp conftest.$ac_ext\"") >&5 + (eval $ac_cpp conftest.$ac_ext) 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } >/dev/null; then + if test -s conftest.err; then + ac_cpp_err=$ac_c_preproc_warn_flag + ac_cpp_err=$ac_cpp_err$ac_c_werror_flag + else + ac_cpp_err= + fi +else + ac_cpp_err=yes +fi +if test -z "$ac_cpp_err"; then + # Broken: success on invalid input. +continue +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + # Passes both tests. +ac_preproc_ok=: +break +fi +rm -f conftest.err conftest.$ac_ext + +done +# Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped. +rm -f conftest.err conftest.$ac_ext +if $ac_preproc_ok; then + break +fi + + done + ac_cv_prog_CPP=$CPP + +fi + CPP=$ac_cv_prog_CPP +else + ac_cv_prog_CPP=$CPP +fi +echo "$as_me:$LINENO: result: $CPP" >&5 +echo "${ECHO_T}$CPP" >&6 +ac_preproc_ok=false +for ac_c_preproc_warn_flag in '' yes +do + # Use a header file that comes with gcc, so configuring glibc + # with a fresh cross-compiler works. + # Prefer to if __STDC__ is defined, since + # exists even on freestanding compilers. + # On the NeXT, cc -E runs the code through the compiler's parser, + # not just through cpp. "Syntax error" is here to catch this case. + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +#ifdef __STDC__ +# include +#else +# include +#endif + Syntax error +_ACEOF +if { (eval echo "$as_me:$LINENO: \"$ac_cpp conftest.$ac_ext\"") >&5 + (eval $ac_cpp conftest.$ac_ext) 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } >/dev/null; then + if test -s conftest.err; then + ac_cpp_err=$ac_c_preproc_warn_flag + ac_cpp_err=$ac_cpp_err$ac_c_werror_flag + else + ac_cpp_err= + fi +else + ac_cpp_err=yes +fi +if test -z "$ac_cpp_err"; then + : +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + # Broken: fails on valid input. +continue +fi +rm -f conftest.err conftest.$ac_ext + + # OK, works on sane cases. Now check whether non-existent headers + # can be detected and how. + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +#include +_ACEOF +if { (eval echo "$as_me:$LINENO: \"$ac_cpp conftest.$ac_ext\"") >&5 + (eval $ac_cpp conftest.$ac_ext) 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } >/dev/null; then + if test -s conftest.err; then + ac_cpp_err=$ac_c_preproc_warn_flag + ac_cpp_err=$ac_cpp_err$ac_c_werror_flag + else + ac_cpp_err= + fi +else + ac_cpp_err=yes +fi +if test -z "$ac_cpp_err"; then + # Broken: success on invalid input. +continue +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + # Passes both tests. +ac_preproc_ok=: +break +fi +rm -f conftest.err conftest.$ac_ext + +done +# Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped. +rm -f conftest.err conftest.$ac_ext +if $ac_preproc_ok; then + : +else + { { echo "$as_me:$LINENO: error: C preprocessor \"$CPP\" fails sanity check +See \`config.log' for more details." >&5 +echo "$as_me: error: C preprocessor \"$CPP\" fails sanity check +See \`config.log' for more details." >&2;} + { (exit 1); exit 1; }; } +fi + +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu + + +echo "$as_me:$LINENO: checking for egrep" >&5 +echo $ECHO_N "checking for egrep... $ECHO_C" >&6 +if test "${ac_cv_prog_egrep+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + if echo a | (grep -E '(a|b)') >/dev/null 2>&1 + then ac_cv_prog_egrep='grep -E' + else ac_cv_prog_egrep='egrep' + fi +fi +echo "$as_me:$LINENO: result: $ac_cv_prog_egrep" >&5 +echo "${ECHO_T}$ac_cv_prog_egrep" >&6 + EGREP=$ac_cv_prog_egrep + + +echo "$as_me:$LINENO: checking for ANSI C header files" >&5 +echo $ECHO_N "checking for ANSI C header files... $ECHO_C" >&6 +if test "${ac_cv_header_stdc+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +#include +#include +#include +#include + +int +main () +{ + + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext +if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 + (eval $ac_compile) 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; }; then + ac_cv_header_stdc=yes +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +ac_cv_header_stdc=no +fi +rm -f conftest.err conftest.$ac_objext conftest.$ac_ext + +if test $ac_cv_header_stdc = yes; then + # SunOS 4.x string.h does not declare mem*, contrary to ANSI. + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +#include + +_ACEOF +if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | + $EGREP "memchr" >/dev/null 2>&1; then + : +else + ac_cv_header_stdc=no +fi +rm -f conftest* + +fi + +if test $ac_cv_header_stdc = yes; then + # ISC 2.0.2 stdlib.h does not declare free, contrary to ANSI. + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +#include + +_ACEOF +if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | + $EGREP "free" >/dev/null 2>&1; then + : +else + ac_cv_header_stdc=no +fi +rm -f conftest* + +fi + +if test $ac_cv_header_stdc = yes; then + # /bin/cc in Irix-4.0.5 gets non-ANSI ctype macros unless using -ansi. + if test "$cross_compiling" = yes; then + : +else + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +#include +#if ((' ' & 0x0FF) == 0x020) +# define ISLOWER(c) ('a' <= (c) && (c) <= 'z') +# define TOUPPER(c) (ISLOWER(c) ? 'A' + ((c) - 'a') : (c)) +#else +# define ISLOWER(c) \ + (('a' <= (c) && (c) <= 'i') \ + || ('j' <= (c) && (c) <= 'r') \ + || ('s' <= (c) && (c) <= 'z')) +# define TOUPPER(c) (ISLOWER(c) ? ((c) | 0x40) : (c)) +#endif + +#define XOR(e, f) (((e) && !(f)) || (!(e) && (f))) +int +main () +{ + int i; + for (i = 0; i < 256; i++) + if (XOR (islower (i), ISLOWER (i)) + || toupper (i) != TOUPPER (i)) + exit(2); + exit (0); +} +_ACEOF +rm -f conftest$ac_exeext +if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 + (eval $ac_link) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { ac_try='./conftest$ac_exeext' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; }; then + : +else + echo "$as_me: program exited with status $ac_status" >&5 +echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +( exit $ac_status ) +ac_cv_header_stdc=no +fi +rm -f core *.core gmon.out bb.out conftest$ac_exeext conftest.$ac_objext conftest.$ac_ext +fi +fi +fi +echo "$as_me:$LINENO: result: $ac_cv_header_stdc" >&5 +echo "${ECHO_T}$ac_cv_header_stdc" >&6 +if test $ac_cv_header_stdc = yes; then + +cat >>confdefs.h <<\_ACEOF +#define STDC_HEADERS 1 +_ACEOF + +fi + +# On IRIX 5.3, sys/types and inttypes.h are conflicting. + + + + + + + + + +for ac_header in sys/types.h sys/stat.h stdlib.h string.h memory.h strings.h \ + inttypes.h stdint.h unistd.h +do +as_ac_Header=`echo "ac_cv_header_$ac_header" | $as_tr_sh` +echo "$as_me:$LINENO: checking for $ac_header" >&5 +echo $ECHO_N "checking for $ac_header... $ECHO_C" >&6 +if eval "test \"\${$as_ac_Header+set}\" = set"; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +$ac_includes_default + +#include <$ac_header> +_ACEOF +rm -f conftest.$ac_objext +if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 + (eval $ac_compile) 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; }; then + eval "$as_ac_Header=yes" +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +eval "$as_ac_Header=no" +fi +rm -f conftest.err conftest.$ac_objext conftest.$ac_ext +fi +echo "$as_me:$LINENO: result: `eval echo '${'$as_ac_Header'}'`" >&5 +echo "${ECHO_T}`eval echo '${'$as_ac_Header'}'`" >&6 +if test `eval echo '${'$as_ac_Header'}'` = yes; then + cat >>confdefs.h <<_ACEOF +#define `echo "HAVE_$ac_header" | $as_tr_cpp` 1 +_ACEOF + +fi + +done + + + + +for ac_header in pthread.h +do +as_ac_Header=`echo "ac_cv_header_$ac_header" | $as_tr_sh` +if eval "test \"\${$as_ac_Header+set}\" = set"; then + echo "$as_me:$LINENO: checking for $ac_header" >&5 +echo $ECHO_N "checking for $ac_header... $ECHO_C" >&6 +if eval "test \"\${$as_ac_Header+set}\" = set"; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +fi +echo "$as_me:$LINENO: result: `eval echo '${'$as_ac_Header'}'`" >&5 +echo "${ECHO_T}`eval echo '${'$as_ac_Header'}'`" >&6 +else + # Is the header compilable? +echo "$as_me:$LINENO: checking $ac_header usability" >&5 +echo $ECHO_N "checking $ac_header usability... $ECHO_C" >&6 +cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +$ac_includes_default +#include <$ac_header> +_ACEOF +rm -f conftest.$ac_objext +if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 + (eval $ac_compile) 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; }; then + ac_header_compiler=yes +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +ac_header_compiler=no +fi +rm -f conftest.err conftest.$ac_objext conftest.$ac_ext +echo "$as_me:$LINENO: result: $ac_header_compiler" >&5 +echo "${ECHO_T}$ac_header_compiler" >&6 + +# Is the header present? +echo "$as_me:$LINENO: checking $ac_header presence" >&5 +echo $ECHO_N "checking $ac_header presence... $ECHO_C" >&6 +cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +#include <$ac_header> +_ACEOF +if { (eval echo "$as_me:$LINENO: \"$ac_cpp conftest.$ac_ext\"") >&5 + (eval $ac_cpp conftest.$ac_ext) 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } >/dev/null; then + if test -s conftest.err; then + ac_cpp_err=$ac_c_preproc_warn_flag + ac_cpp_err=$ac_cpp_err$ac_c_werror_flag + else + ac_cpp_err= + fi +else + ac_cpp_err=yes +fi +if test -z "$ac_cpp_err"; then + ac_header_preproc=yes +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + ac_header_preproc=no +fi +rm -f conftest.err conftest.$ac_ext +echo "$as_me:$LINENO: result: $ac_header_preproc" >&5 +echo "${ECHO_T}$ac_header_preproc" >&6 + +# So? What about this header? +case $ac_header_compiler:$ac_header_preproc:$ac_c_preproc_warn_flag in + yes:no: ) + { echo "$as_me:$LINENO: WARNING: $ac_header: accepted by the compiler, rejected by the preprocessor!" >&5 +echo "$as_me: WARNING: $ac_header: accepted by the compiler, rejected by the preprocessor!" >&2;} + { echo "$as_me:$LINENO: WARNING: $ac_header: proceeding with the compiler's result" >&5 +echo "$as_me: WARNING: $ac_header: proceeding with the compiler's result" >&2;} + ac_header_preproc=yes + ;; + no:yes:* ) + { echo "$as_me:$LINENO: WARNING: $ac_header: present but cannot be compiled" >&5 +echo "$as_me: WARNING: $ac_header: present but cannot be compiled" >&2;} + { echo "$as_me:$LINENO: WARNING: $ac_header: check for missing prerequisite headers?" >&5 +echo "$as_me: WARNING: $ac_header: check for missing prerequisite headers?" >&2;} + { echo "$as_me:$LINENO: WARNING: $ac_header: see the Autoconf documentation" >&5 +echo "$as_me: WARNING: $ac_header: see the Autoconf documentation" >&2;} + { echo "$as_me:$LINENO: WARNING: $ac_header: section \"Present But Cannot Be Compiled\"" >&5 +echo "$as_me: WARNING: $ac_header: section \"Present But Cannot Be Compiled\"" >&2;} + { echo "$as_me:$LINENO: WARNING: $ac_header: proceeding with the preprocessor's result" >&5 +echo "$as_me: WARNING: $ac_header: proceeding with the preprocessor's result" >&2;} + { echo "$as_me:$LINENO: WARNING: $ac_header: in the future, the compiler will take precedence" >&5 +echo "$as_me: WARNING: $ac_header: in the future, the compiler will take precedence" >&2;} + ( + cat <<\_ASBOX +## --------------------------------- ## +## Report this to omalley@apache.org ## +## --------------------------------- ## +_ASBOX + ) | + sed "s/^/$as_me: WARNING: /" >&2 + ;; +esac +echo "$as_me:$LINENO: checking for $ac_header" >&5 +echo $ECHO_N "checking for $ac_header... $ECHO_C" >&6 +if eval "test \"\${$as_ac_Header+set}\" = set"; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + eval "$as_ac_Header=\$ac_header_preproc" +fi +echo "$as_me:$LINENO: result: `eval echo '${'$as_ac_Header'}'`" >&5 +echo "${ECHO_T}`eval echo '${'$as_ac_Header'}'`" >&6 + +fi +if test `eval echo '${'$as_ac_Header'}'` = yes; then + cat >>confdefs.h <<_ACEOF +#define `echo "HAVE_$ac_header" | $as_tr_cpp` 1 +_ACEOF + +else + { { echo "$as_me:$LINENO: error: Please check if you have installed the pthread library" >&5 +echo "$as_me: error: Please check if you have installed the pthread library" >&2;} + { (exit 1); exit 1; }; } +fi + +done + + +echo "$as_me:$LINENO: checking for pthread_create in -lpthread" >&5 +echo $ECHO_N "checking for pthread_create in -lpthread... $ECHO_C" >&6 +if test "${ac_cv_lib_pthread_pthread_create+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + ac_check_lib_save_LIBS=$LIBS +LIBS="-lpthread $LIBS" +cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ + +/* Override any gcc2 internal prototype to avoid an error. */ +#ifdef __cplusplus +extern "C" +#endif +/* We use char because int might match the return type of a gcc2 + builtin and then its argument prototype would still apply. */ +char pthread_create (); +int +main () +{ +pthread_create (); + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext conftest$ac_exeext +if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 + (eval $ac_link) 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest$ac_exeext' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; }; then + ac_cv_lib_pthread_pthread_create=yes +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +ac_cv_lib_pthread_pthread_create=no +fi +rm -f conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS +fi +echo "$as_me:$LINENO: result: $ac_cv_lib_pthread_pthread_create" >&5 +echo "${ECHO_T}$ac_cv_lib_pthread_pthread_create" >&6 +if test $ac_cv_lib_pthread_pthread_create = yes; then + cat >>confdefs.h <<_ACEOF +#define HAVE_LIBPTHREAD 1 +_ACEOF + + LIBS="-lpthread $LIBS" + +else + { { echo "$as_me:$LINENO: error: Cannot find libpthread.so" >&5 +echo "$as_me: error: Cannot find libpthread.so" >&2;} + { (exit please check); exit please check; }; } +fi + + + + + + +# Check whether --with-hadoop-pipes or --without-hadoop-pipes was given. +if test "${with_hadoop_pipes+set}" = set; then + withval="$with_hadoop_pipes" + HADOOP_PIPES_PREFIX="$withval" +else + HADOOP_PIPES_PREFIX="\${prefix}" +fi; + + + +# Checks for programs. +ac_ext=cc +ac_cpp='$CXXCPP $CPPFLAGS' +ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_cxx_compiler_gnu +if test -n "$ac_tool_prefix"; then + for ac_prog in $CCC g++ c++ gpp aCC CC cxx cc++ cl FCC KCC RCC xlC_r xlC + do + # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args. +set dummy $ac_tool_prefix$ac_prog; ac_word=$2 +echo "$as_me:$LINENO: checking for $ac_word" >&5 +echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6 +if test "${ac_cv_prog_CXX+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + if test -n "$CXX"; then + ac_cv_prog_CXX="$CXX" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if $as_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_CXX="$ac_tool_prefix$ac_prog" + echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done +done + +fi +fi +CXX=$ac_cv_prog_CXX +if test -n "$CXX"; then + echo "$as_me:$LINENO: result: $CXX" >&5 +echo "${ECHO_T}$CXX" >&6 +else + echo "$as_me:$LINENO: result: no" >&5 +echo "${ECHO_T}no" >&6 +fi + + test -n "$CXX" && break + done +fi +if test -z "$CXX"; then + ac_ct_CXX=$CXX + for ac_prog in $CCC g++ c++ gpp aCC CC cxx cc++ cl FCC KCC RCC xlC_r xlC +do + # Extract the first word of "$ac_prog", so it can be a program name with args. +set dummy $ac_prog; ac_word=$2 +echo "$as_me:$LINENO: checking for $ac_word" >&5 +echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6 +if test "${ac_cv_prog_ac_ct_CXX+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + if test -n "$ac_ct_CXX"; then + ac_cv_prog_ac_ct_CXX="$ac_ct_CXX" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if $as_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_ac_ct_CXX="$ac_prog" + echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done +done + +fi +fi +ac_ct_CXX=$ac_cv_prog_ac_ct_CXX +if test -n "$ac_ct_CXX"; then + echo "$as_me:$LINENO: result: $ac_ct_CXX" >&5 +echo "${ECHO_T}$ac_ct_CXX" >&6 +else + echo "$as_me:$LINENO: result: no" >&5 +echo "${ECHO_T}no" >&6 +fi + + test -n "$ac_ct_CXX" && break +done +test -n "$ac_ct_CXX" || ac_ct_CXX="g++" + + CXX=$ac_ct_CXX +fi + + +# Provide some information about the compiler. +echo "$as_me:$LINENO:" \ + "checking for C++ compiler version" >&5 +ac_compiler=`set X $ac_compile; echo $2` +{ (eval echo "$as_me:$LINENO: \"$ac_compiler --version &5\"") >&5 + (eval $ac_compiler --version &5) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } +{ (eval echo "$as_me:$LINENO: \"$ac_compiler -v &5\"") >&5 + (eval $ac_compiler -v &5) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } +{ (eval echo "$as_me:$LINENO: \"$ac_compiler -V &5\"") >&5 + (eval $ac_compiler -V &5) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } + +echo "$as_me:$LINENO: checking whether we are using the GNU C++ compiler" >&5 +echo $ECHO_N "checking whether we are using the GNU C++ compiler... $ECHO_C" >&6 +if test "${ac_cv_cxx_compiler_gnu+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ + +int +main () +{ +#ifndef __GNUC__ + choke me +#endif + + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext +if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 + (eval $ac_compile) 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && + { ac_try='test -z "$ac_cxx_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; }; then + ac_compiler_gnu=yes +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +ac_compiler_gnu=no +fi +rm -f conftest.err conftest.$ac_objext conftest.$ac_ext +ac_cv_cxx_compiler_gnu=$ac_compiler_gnu + +fi +echo "$as_me:$LINENO: result: $ac_cv_cxx_compiler_gnu" >&5 +echo "${ECHO_T}$ac_cv_cxx_compiler_gnu" >&6 +GXX=`test $ac_compiler_gnu = yes && echo yes` +ac_test_CXXFLAGS=${CXXFLAGS+set} +ac_save_CXXFLAGS=$CXXFLAGS +CXXFLAGS="-g" +echo "$as_me:$LINENO: checking whether $CXX accepts -g" >&5 +echo $ECHO_N "checking whether $CXX accepts -g... $ECHO_C" >&6 +if test "${ac_cv_prog_cxx_g+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext +if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 + (eval $ac_compile) 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && + { ac_try='test -z "$ac_cxx_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; }; then + ac_cv_prog_cxx_g=yes +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +ac_cv_prog_cxx_g=no +fi +rm -f conftest.err conftest.$ac_objext conftest.$ac_ext +fi +echo "$as_me:$LINENO: result: $ac_cv_prog_cxx_g" >&5 +echo "${ECHO_T}$ac_cv_prog_cxx_g" >&6 +if test "$ac_test_CXXFLAGS" = set; then + CXXFLAGS=$ac_save_CXXFLAGS +elif test $ac_cv_prog_cxx_g = yes; then + if test "$GXX" = yes; then + CXXFLAGS="-g -O2" + else + CXXFLAGS="-g" + fi +else + if test "$GXX" = yes; then + CXXFLAGS="-O2" + else + CXXFLAGS= + fi +fi +for ac_declaration in \ + '' \ + 'extern "C" void std::exit (int) throw (); using std::exit;' \ + 'extern "C" void std::exit (int); using std::exit;' \ + 'extern "C" void exit (int) throw ();' \ + 'extern "C" void exit (int);' \ + 'void exit (int);' +do + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +$ac_declaration +#include +int +main () +{ +exit (42); + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext +if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 + (eval $ac_compile) 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && + { ac_try='test -z "$ac_cxx_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; }; then + : +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +continue +fi +rm -f conftest.err conftest.$ac_objext conftest.$ac_ext + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +$ac_declaration +int +main () +{ +exit (42); + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext +if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 + (eval $ac_compile) 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && + { ac_try='test -z "$ac_cxx_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; }; then + break +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +fi +rm -f conftest.err conftest.$ac_objext conftest.$ac_ext +done +rm -f conftest* +if test -n "$ac_declaration"; then + echo '#ifdef __cplusplus' >>confdefs.h + echo $ac_declaration >>confdefs.h + echo '#endif' >>confdefs.h +fi + +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu + +depcc="$CXX" am_compiler_list= + +echo "$as_me:$LINENO: checking dependency style of $depcc" >&5 +echo $ECHO_N "checking dependency style of $depcc... $ECHO_C" >&6 +if test "${am_cv_CXX_dependencies_compiler_type+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + if test -z "$AMDEP_TRUE" && test -f "$am_depcomp"; then + # We make a subdir and do the tests there. Otherwise we can end up + # making bogus files that we don't know about and never remove. For + # instance it was reported that on HP-UX the gcc test will end up + # making a dummy file named `D' -- because `-MD' means `put the output + # in D'. + mkdir conftest.dir + # Copy depcomp to subdir because otherwise we won't find it if we're + # using a relative directory. + cp "$am_depcomp" conftest.dir + cd conftest.dir + # We will build objects and dependencies in a subdirectory because + # it helps to detect inapplicable dependency modes. For instance + # both Tru64's cc and ICC support -MD to output dependencies as a + # side effect of compilation, but ICC will put the dependencies in + # the current directory while Tru64 will put them in the object + # directory. + mkdir sub + + am_cv_CXX_dependencies_compiler_type=none + if test "$am_compiler_list" = ""; then + am_compiler_list=`sed -n 's/^#*\([a-zA-Z0-9]*\))$/\1/p' < ./depcomp` + fi + for depmode in $am_compiler_list; do + # Setup a source with many dependencies, because some compilers + # like to wrap large dependency lists on column 80 (with \), and + # we should not choose a depcomp mode which is confused by this. + # + # We need to recreate these files for each test, as the compiler may + # overwrite some of them when testing with obscure command lines. + # This happens at least with the AIX C compiler. + : > sub/conftest.c + for i in 1 2 3 4 5 6; do + echo '#include "conftst'$i'.h"' >> sub/conftest.c + # Using `: > sub/conftst$i.h' creates only sub/conftst1.h with + # Solaris 8's {/usr,}/bin/sh. + touch sub/conftst$i.h + done + echo "${am__include} ${am__quote}sub/conftest.Po${am__quote}" > confmf + + case $depmode in + nosideeffect) + # after this tag, mechanisms are not by side-effect, so they'll + # only be used when explicitly requested + if test "x$enable_dependency_tracking" = xyes; then + continue + else + break + fi + ;; + none) break ;; + esac + # We check with `-c' and `-o' for the sake of the "dashmstdout" + # mode. It turns out that the SunPro C++ compiler does not properly + # handle `-M -o', and we need to detect this. + if depmode=$depmode \ + source=sub/conftest.c object=sub/conftest.${OBJEXT-o} \ + depfile=sub/conftest.Po tmpdepfile=sub/conftest.TPo \ + $SHELL ./depcomp $depcc -c -o sub/conftest.${OBJEXT-o} sub/conftest.c \ + >/dev/null 2>conftest.err && + grep sub/conftst6.h sub/conftest.Po > /dev/null 2>&1 && + grep sub/conftest.${OBJEXT-o} sub/conftest.Po > /dev/null 2>&1 && + ${MAKE-make} -s -f confmf > /dev/null 2>&1; then + # icc doesn't choke on unknown options, it will just issue warnings + # or remarks (even with -Werror). So we grep stderr for any message + # that says an option was ignored or not supported. + # When given -MP, icc 7.0 and 7.1 complain thusly: + # icc: Command line warning: ignoring option '-M'; no argument required + # The diagnosis changed in icc 8.0: + # icc: Command line remark: option '-MP' not supported + if (grep 'ignoring option' conftest.err || + grep 'not supported' conftest.err) >/dev/null 2>&1; then :; else + am_cv_CXX_dependencies_compiler_type=$depmode + break + fi + fi + done + + cd .. + rm -rf conftest.dir +else + am_cv_CXX_dependencies_compiler_type=none +fi + +fi +echo "$as_me:$LINENO: result: $am_cv_CXX_dependencies_compiler_type" >&5 +echo "${ECHO_T}$am_cv_CXX_dependencies_compiler_type" >&6 +CXXDEPMODE=depmode=$am_cv_CXX_dependencies_compiler_type + + + +if + test "x$enable_dependency_tracking" != xno \ + && test "$am_cv_CXX_dependencies_compiler_type" = gcc3; then + am__fastdepCXX_TRUE= + am__fastdepCXX_FALSE='#' +else + am__fastdepCXX_TRUE='#' + am__fastdepCXX_FALSE= +fi + + +# Find a good install program. We prefer a C program (faster), +# so one script is as good as another. But avoid the broken or +# incompatible versions: +# SysV /etc/install, /usr/sbin/install +# SunOS /usr/etc/install +# IRIX /sbin/install +# AIX /bin/install +# AmigaOS /C/install, which installs bootblocks on floppy discs +# AIX 4 /usr/bin/installbsd, which doesn't work without a -g flag +# AFS /usr/afsws/bin/install, which mishandles nonexistent args +# SVR4 /usr/ucb/install, which tries to use the nonexistent group "staff" +# OS/2's system install, which has a completely different semantic +# ./install, which can be erroneously created by make from ./install.sh. +echo "$as_me:$LINENO: checking for a BSD-compatible install" >&5 +echo $ECHO_N "checking for a BSD-compatible install... $ECHO_C" >&6 +if test -z "$INSTALL"; then +if test "${ac_cv_path_install+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + # Account for people who put trailing slashes in PATH elements. +case $as_dir/ in + ./ | .// | /cC/* | \ + /etc/* | /usr/sbin/* | /usr/etc/* | /sbin/* | /usr/afsws/bin/* | \ + ?:\\/os2\\/install\\/* | ?:\\/OS2\\/INSTALL\\/* | \ + /usr/ucb/* ) ;; + *) + # OSF1 and SCO ODT 3.0 have their own names for install. + # Don't use installbsd from OSF since it installs stuff as root + # by default. + for ac_prog in ginstall scoinst install; do + for ac_exec_ext in '' $ac_executable_extensions; do + if $as_executable_p "$as_dir/$ac_prog$ac_exec_ext"; then + if test $ac_prog = install && + grep dspmsg "$as_dir/$ac_prog$ac_exec_ext" >/dev/null 2>&1; then + # AIX install. It has an incompatible calling convention. + : + elif test $ac_prog = install && + grep pwplus "$as_dir/$ac_prog$ac_exec_ext" >/dev/null 2>&1; then + # program-specific install script used by HP pwplus--don't use. + : + else + ac_cv_path_install="$as_dir/$ac_prog$ac_exec_ext -c" + break 3 + fi + fi + done + done + ;; +esac +done + + +fi + if test "${ac_cv_path_install+set}" = set; then + INSTALL=$ac_cv_path_install + else + # As a last resort, use the slow shell script. We don't cache a + # path for INSTALL within a source directory, because that will + # break other packages using the cache if that directory is + # removed, or if the path is relative. + INSTALL=$ac_install_sh + fi +fi +echo "$as_me:$LINENO: result: $INSTALL" >&5 +echo "${ECHO_T}$INSTALL" >&6 + +# Use test -z because SunOS4 sh mishandles braces in ${var-val}. +# It thinks the first close brace ends the variable substitution. +test -z "$INSTALL_PROGRAM" && INSTALL_PROGRAM='${INSTALL}' + +test -z "$INSTALL_SCRIPT" && INSTALL_SCRIPT='${INSTALL}' + +test -z "$INSTALL_DATA" && INSTALL_DATA='${INSTALL} -m 644' + +# Check whether --enable-shared or --disable-shared was given. +if test "${enable_shared+set}" = set; then + enableval="$enable_shared" + p=${PACKAGE-default} + case $enableval in + yes) enable_shared=yes ;; + no) enable_shared=no ;; + *) + enable_shared=no + # Look at the argument we got. We use all the common list separators. + lt_save_ifs="$IFS"; IFS="${IFS}$PATH_SEPARATOR," + for pkg in $enableval; do + IFS="$lt_save_ifs" + if test "X$pkg" = "X$p"; then + enable_shared=yes + fi + done + IFS="$lt_save_ifs" + ;; + esac +else + enable_shared=yes +fi; + +# Check whether --enable-static or --disable-static was given. +if test "${enable_static+set}" = set; then + enableval="$enable_static" + p=${PACKAGE-default} + case $enableval in + yes) enable_static=yes ;; + no) enable_static=no ;; + *) + enable_static=no + # Look at the argument we got. We use all the common list separators. + lt_save_ifs="$IFS"; IFS="${IFS}$PATH_SEPARATOR," + for pkg in $enableval; do + IFS="$lt_save_ifs" + if test "X$pkg" = "X$p"; then + enable_static=yes + fi + done + IFS="$lt_save_ifs" + ;; + esac +else + enable_static=yes +fi; + +# Check whether --enable-fast-install or --disable-fast-install was given. +if test "${enable_fast_install+set}" = set; then + enableval="$enable_fast_install" + p=${PACKAGE-default} + case $enableval in + yes) enable_fast_install=yes ;; + no) enable_fast_install=no ;; + *) + enable_fast_install=no + # Look at the argument we got. We use all the common list separators. + lt_save_ifs="$IFS"; IFS="${IFS}$PATH_SEPARATOR," + for pkg in $enableval; do + IFS="$lt_save_ifs" + if test "X$pkg" = "X$p"; then + enable_fast_install=yes + fi + done + IFS="$lt_save_ifs" + ;; + esac +else + enable_fast_install=yes +fi; + +# Make sure we can run config.sub. +$ac_config_sub sun4 >/dev/null 2>&1 || + { { echo "$as_me:$LINENO: error: cannot run $ac_config_sub" >&5 +echo "$as_me: error: cannot run $ac_config_sub" >&2;} + { (exit 1); exit 1; }; } + +echo "$as_me:$LINENO: checking build system type" >&5 +echo $ECHO_N "checking build system type... $ECHO_C" >&6 +if test "${ac_cv_build+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + ac_cv_build_alias=$build_alias +test -z "$ac_cv_build_alias" && + ac_cv_build_alias=`$ac_config_guess` +test -z "$ac_cv_build_alias" && + { { echo "$as_me:$LINENO: error: cannot guess build type; you must specify one" >&5 +echo "$as_me: error: cannot guess build type; you must specify one" >&2;} + { (exit 1); exit 1; }; } +ac_cv_build=`$ac_config_sub $ac_cv_build_alias` || + { { echo "$as_me:$LINENO: error: $ac_config_sub $ac_cv_build_alias failed" >&5 +echo "$as_me: error: $ac_config_sub $ac_cv_build_alias failed" >&2;} + { (exit 1); exit 1; }; } + +fi +echo "$as_me:$LINENO: result: $ac_cv_build" >&5 +echo "${ECHO_T}$ac_cv_build" >&6 +build=$ac_cv_build +build_cpu=`echo $ac_cv_build | sed 's/^\([^-]*\)-\([^-]*\)-\(.*\)$/\1/'` +build_vendor=`echo $ac_cv_build | sed 's/^\([^-]*\)-\([^-]*\)-\(.*\)$/\2/'` +build_os=`echo $ac_cv_build | sed 's/^\([^-]*\)-\([^-]*\)-\(.*\)$/\3/'` + + +echo "$as_me:$LINENO: checking host system type" >&5 +echo $ECHO_N "checking host system type... $ECHO_C" >&6 +if test "${ac_cv_host+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + ac_cv_host_alias=$host_alias +test -z "$ac_cv_host_alias" && + ac_cv_host_alias=$ac_cv_build_alias +ac_cv_host=`$ac_config_sub $ac_cv_host_alias` || + { { echo "$as_me:$LINENO: error: $ac_config_sub $ac_cv_host_alias failed" >&5 +echo "$as_me: error: $ac_config_sub $ac_cv_host_alias failed" >&2;} + { (exit 1); exit 1; }; } + +fi +echo "$as_me:$LINENO: result: $ac_cv_host" >&5 +echo "${ECHO_T}$ac_cv_host" >&6 +host=$ac_cv_host +host_cpu=`echo $ac_cv_host | sed 's/^\([^-]*\)-\([^-]*\)-\(.*\)$/\1/'` +host_vendor=`echo $ac_cv_host | sed 's/^\([^-]*\)-\([^-]*\)-\(.*\)$/\2/'` +host_os=`echo $ac_cv_host | sed 's/^\([^-]*\)-\([^-]*\)-\(.*\)$/\3/'` + + +echo "$as_me:$LINENO: checking for a sed that does not truncate output" >&5 +echo $ECHO_N "checking for a sed that does not truncate output... $ECHO_C" >&6 +if test "${lt_cv_path_SED+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + # Loop through the user's path and test for sed and gsed. +# Then use that list of sed's as ones to test for truncation. +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for lt_ac_prog in sed gsed; do + for ac_exec_ext in '' $ac_executable_extensions; do + if $as_executable_p "$as_dir/$lt_ac_prog$ac_exec_ext"; then + lt_ac_sed_list="$lt_ac_sed_list $as_dir/$lt_ac_prog$ac_exec_ext" + fi + done + done +done +lt_ac_max=0 +lt_ac_count=0 +# Add /usr/xpg4/bin/sed as it is typically found on Solaris +# along with /bin/sed that truncates output. +for lt_ac_sed in $lt_ac_sed_list /usr/xpg4/bin/sed; do + test ! -f $lt_ac_sed && continue + cat /dev/null > conftest.in + lt_ac_count=0 + echo $ECHO_N "0123456789$ECHO_C" >conftest.in + # Check for GNU sed and select it if it is found. + if "$lt_ac_sed" --version 2>&1 < /dev/null | grep 'GNU' > /dev/null; then + lt_cv_path_SED=$lt_ac_sed + break + fi + while true; do + cat conftest.in conftest.in >conftest.tmp + mv conftest.tmp conftest.in + cp conftest.in conftest.nl + echo >>conftest.nl + $lt_ac_sed -e 's/a$//' < conftest.nl >conftest.out || break + cmp -s conftest.out conftest.nl || break + # 10000 chars as input seems more than enough + test $lt_ac_count -gt 10 && break + lt_ac_count=`expr $lt_ac_count + 1` + if test $lt_ac_count -gt $lt_ac_max; then + lt_ac_max=$lt_ac_count + lt_cv_path_SED=$lt_ac_sed + fi + done +done + +fi + +SED=$lt_cv_path_SED +echo "$as_me:$LINENO: result: $SED" >&5 +echo "${ECHO_T}$SED" >&6 + + +# Check whether --with-gnu-ld or --without-gnu-ld was given. +if test "${with_gnu_ld+set}" = set; then + withval="$with_gnu_ld" + test "$withval" = no || with_gnu_ld=yes +else + with_gnu_ld=no +fi; +ac_prog=ld +if test "$GCC" = yes; then + # Check if gcc -print-prog-name=ld gives a path. + echo "$as_me:$LINENO: checking for ld used by $CC" >&5 +echo $ECHO_N "checking for ld used by $CC... $ECHO_C" >&6 + case $host in + *-*-mingw*) + # gcc leaves a trailing carriage return which upsets mingw + ac_prog=`($CC -print-prog-name=ld) 2>&5 | tr -d '\015'` ;; + *) + ac_prog=`($CC -print-prog-name=ld) 2>&5` ;; + esac + case $ac_prog in + # Accept absolute paths. + [\\/]* | ?:[\\/]*) + re_direlt='/[^/][^/]*/\.\./' + # Canonicalize the pathname of ld + ac_prog=`echo $ac_prog| $SED 's%\\\\%/%g'` + while echo $ac_prog | grep "$re_direlt" > /dev/null 2>&1; do + ac_prog=`echo $ac_prog| $SED "s%$re_direlt%/%"` + done + test -z "$LD" && LD="$ac_prog" + ;; + "") + # If it fails, then pretend we aren't using GCC. + ac_prog=ld + ;; + *) + # If it is relative, then search for the first ld in PATH. + with_gnu_ld=unknown + ;; + esac +elif test "$with_gnu_ld" = yes; then + echo "$as_me:$LINENO: checking for GNU ld" >&5 +echo $ECHO_N "checking for GNU ld... $ECHO_C" >&6 +else + echo "$as_me:$LINENO: checking for non-GNU ld" >&5 +echo $ECHO_N "checking for non-GNU ld... $ECHO_C" >&6 +fi +if test "${lt_cv_path_LD+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + if test -z "$LD"; then + lt_save_ifs="$IFS"; IFS=$PATH_SEPARATOR + for ac_dir in $PATH; do + IFS="$lt_save_ifs" + test -z "$ac_dir" && ac_dir=. + if test -f "$ac_dir/$ac_prog" || test -f "$ac_dir/$ac_prog$ac_exeext"; then + lt_cv_path_LD="$ac_dir/$ac_prog" + # Check to see if the program is GNU ld. I'd rather use --version, + # but apparently some GNU ld's only accept -v. + # Break only if it was the GNU/non-GNU ld that we prefer. + case `"$lt_cv_path_LD" -v 2>&1 &5 +echo "${ECHO_T}$LD" >&6 +else + echo "$as_me:$LINENO: result: no" >&5 +echo "${ECHO_T}no" >&6 +fi +test -z "$LD" && { { echo "$as_me:$LINENO: error: no acceptable ld found in \$PATH" >&5 +echo "$as_me: error: no acceptable ld found in \$PATH" >&2;} + { (exit 1); exit 1; }; } +echo "$as_me:$LINENO: checking if the linker ($LD) is GNU ld" >&5 +echo $ECHO_N "checking if the linker ($LD) is GNU ld... $ECHO_C" >&6 +if test "${lt_cv_prog_gnu_ld+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + # I'd rather use --version here, but apparently some GNU ld's only accept -v. +case `$LD -v 2>&1 &5 +echo "${ECHO_T}$lt_cv_prog_gnu_ld" >&6 +with_gnu_ld=$lt_cv_prog_gnu_ld + + +echo "$as_me:$LINENO: checking for $LD option to reload object files" >&5 +echo $ECHO_N "checking for $LD option to reload object files... $ECHO_C" >&6 +if test "${lt_cv_ld_reload_flag+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + lt_cv_ld_reload_flag='-r' +fi +echo "$as_me:$LINENO: result: $lt_cv_ld_reload_flag" >&5 +echo "${ECHO_T}$lt_cv_ld_reload_flag" >&6 +reload_flag=$lt_cv_ld_reload_flag +case $reload_flag in +"" | " "*) ;; +*) reload_flag=" $reload_flag" ;; +esac +reload_cmds='$LD$reload_flag -o $output$reload_objs' +case $host_os in + darwin*) + if test "$GCC" = yes; then + reload_cmds='$CC -nostdlib ${wl}-r -o $output$reload_objs' + else + reload_cmds='$LD$reload_flag -o $output$reload_objs' + fi + ;; +esac + +echo "$as_me:$LINENO: checking for BSD-compatible nm" >&5 +echo $ECHO_N "checking for BSD-compatible nm... $ECHO_C" >&6 +if test "${lt_cv_path_NM+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + if test -n "$NM"; then + # Let the user override the test. + lt_cv_path_NM="$NM" +else + lt_save_ifs="$IFS"; IFS=$PATH_SEPARATOR + for ac_dir in $PATH /usr/ccs/bin /usr/ucb /bin; do + IFS="$lt_save_ifs" + test -z "$ac_dir" && ac_dir=. + tmp_nm="$ac_dir/${ac_tool_prefix}nm" + if test -f "$tmp_nm" || test -f "$tmp_nm$ac_exeext" ; then + # Check to see if the nm accepts a BSD-compat flag. + # Adding the `sed 1q' prevents false positives on HP-UX, which says: + # nm: unknown option "B" ignored + # Tru64's nm complains that /dev/null is an invalid object file + case `"$tmp_nm" -B /dev/null 2>&1 | sed '1q'` in + */dev/null* | *'Invalid file or object type'*) + lt_cv_path_NM="$tmp_nm -B" + break + ;; + *) + case `"$tmp_nm" -p /dev/null 2>&1 | sed '1q'` in + */dev/null*) + lt_cv_path_NM="$tmp_nm -p" + break + ;; + *) + lt_cv_path_NM=${lt_cv_path_NM="$tmp_nm"} # keep the first match, but + continue # so that we can try to find one that supports BSD flags + ;; + esac + esac + fi + done + IFS="$lt_save_ifs" + test -z "$lt_cv_path_NM" && lt_cv_path_NM=nm +fi +fi +echo "$as_me:$LINENO: result: $lt_cv_path_NM" >&5 +echo "${ECHO_T}$lt_cv_path_NM" >&6 +NM="$lt_cv_path_NM" + +echo "$as_me:$LINENO: checking whether ln -s works" >&5 +echo $ECHO_N "checking whether ln -s works... $ECHO_C" >&6 +LN_S=$as_ln_s +if test "$LN_S" = "ln -s"; then + echo "$as_me:$LINENO: result: yes" >&5 +echo "${ECHO_T}yes" >&6 +else + echo "$as_me:$LINENO: result: no, using $LN_S" >&5 +echo "${ECHO_T}no, using $LN_S" >&6 +fi + +echo "$as_me:$LINENO: checking how to recognise dependent libraries" >&5 +echo $ECHO_N "checking how to recognise dependent libraries... $ECHO_C" >&6 +if test "${lt_cv_deplibs_check_method+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + lt_cv_file_magic_cmd='$MAGIC_CMD' +lt_cv_file_magic_test_file= +lt_cv_deplibs_check_method='unknown' +# Need to set the preceding variable on all platforms that support +# interlibrary dependencies. +# 'none' -- dependencies not supported. +# `unknown' -- same as none, but documents that we really don't know. +# 'pass_all' -- all dependencies passed with no checks. +# 'test_compile' -- check by making test program. +# 'file_magic [[regex]]' -- check by looking for files in library path +# which responds to the $file_magic_cmd with a given extended regex. +# If you have `file' or equivalent on your system and you're not sure +# whether `pass_all' will *always* work, you probably want this one. + +case $host_os in +aix4* | aix5*) + lt_cv_deplibs_check_method=pass_all + ;; + +beos*) + lt_cv_deplibs_check_method=pass_all + ;; + +bsdi[45]*) + lt_cv_deplibs_check_method='file_magic ELF [0-9][0-9]*-bit [ML]SB (shared object|dynamic lib)' + lt_cv_file_magic_cmd='/usr/bin/file -L' + lt_cv_file_magic_test_file=/shlib/libc.so + ;; + +cygwin*) + # func_win32_libid is a shell function defined in ltmain.sh + lt_cv_deplibs_check_method='file_magic ^x86 archive import|^x86 DLL' + lt_cv_file_magic_cmd='func_win32_libid' + ;; + +mingw* | pw32*) + # Base MSYS/MinGW do not provide the 'file' command needed by + # func_win32_libid shell function, so use a weaker test based on 'objdump'. + lt_cv_deplibs_check_method='file_magic file format pei*-i386(.*architecture: i386)?' + lt_cv_file_magic_cmd='$OBJDUMP -f' + ;; + +darwin* | rhapsody*) + lt_cv_deplibs_check_method=pass_all + ;; + +freebsd* | kfreebsd*-gnu | dragonfly*) + if echo __ELF__ | $CC -E - | grep __ELF__ > /dev/null; then + case $host_cpu in + i*86 ) + # Not sure whether the presence of OpenBSD here was a mistake. + # Let's accept both of them until this is cleared up. + lt_cv_deplibs_check_method='file_magic (FreeBSD|OpenBSD|DragonFly)/i[3-9]86 (compact )?demand paged shared library' + lt_cv_file_magic_cmd=/usr/bin/file + lt_cv_file_magic_test_file=`echo /usr/lib/libc.so.*` + ;; + esac + else + lt_cv_deplibs_check_method=pass_all + fi + ;; + +gnu*) + lt_cv_deplibs_check_method=pass_all + ;; + +hpux10.20* | hpux11*) + lt_cv_file_magic_cmd=/usr/bin/file + case "$host_cpu" in + ia64*) + lt_cv_deplibs_check_method='file_magic (s[0-9][0-9][0-9]|ELF-[0-9][0-9]) shared object file - IA64' + lt_cv_file_magic_test_file=/usr/lib/hpux32/libc.so + ;; + hppa*64*) + lt_cv_deplibs_check_method='file_magic (s[0-9][0-9][0-9]|ELF-[0-9][0-9]) shared object file - PA-RISC [0-9].[0-9]' + lt_cv_file_magic_test_file=/usr/lib/pa20_64/libc.sl + ;; + *) + lt_cv_deplibs_check_method='file_magic (s[0-9][0-9][0-9]|PA-RISC[0-9].[0-9]) shared library' + lt_cv_file_magic_test_file=/usr/lib/libc.sl + ;; + esac + ;; + +irix5* | irix6* | nonstopux*) + case $LD in + *-32|*"-32 ") libmagic=32-bit;; + *-n32|*"-n32 ") libmagic=N32;; + *-64|*"-64 ") libmagic=64-bit;; + *) libmagic=never-match;; + esac + lt_cv_deplibs_check_method=pass_all + ;; + +# This must be Linux ELF. +linux*) + lt_cv_deplibs_check_method=pass_all + ;; + +netbsd*) + if echo __ELF__ | $CC -E - | grep __ELF__ > /dev/null; then + lt_cv_deplibs_check_method='match_pattern /lib[^/]+(\.so\.[0-9]+\.[0-9]+|_pic\.a)$' + else + lt_cv_deplibs_check_method='match_pattern /lib[^/]+(\.so|_pic\.a)$' + fi + ;; + +newos6*) + lt_cv_deplibs_check_method='file_magic ELF [0-9][0-9]*-bit [ML]SB (executable|dynamic lib)' + lt_cv_file_magic_cmd=/usr/bin/file + lt_cv_file_magic_test_file=/usr/lib/libnls.so + ;; + +nto-qnx*) + lt_cv_deplibs_check_method=unknown + ;; + +openbsd*) + if test -z "`echo __ELF__ | $CC -E - | grep __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then + lt_cv_deplibs_check_method='match_pattern /lib[^/]+(\.so\.[0-9]+\.[0-9]+|\.so|_pic\.a)$' + else + lt_cv_deplibs_check_method='match_pattern /lib[^/]+(\.so\.[0-9]+\.[0-9]+|_pic\.a)$' + fi + ;; + +osf3* | osf4* | osf5*) + lt_cv_deplibs_check_method=pass_all + ;; + +sco3.2v5*) + lt_cv_deplibs_check_method=pass_all + ;; + +solaris*) + lt_cv_deplibs_check_method=pass_all + ;; + +sysv4 | sysv4.2uw2* | sysv4.3* | sysv5*) + case $host_vendor in + motorola) + lt_cv_deplibs_check_method='file_magic ELF [0-9][0-9]*-bit [ML]SB (shared object|dynamic lib) M[0-9][0-9]* Version [0-9]' + lt_cv_file_magic_test_file=`echo /usr/lib/libc.so*` + ;; + ncr) + lt_cv_deplibs_check_method=pass_all + ;; + sequent) + lt_cv_file_magic_cmd='/bin/file' + lt_cv_deplibs_check_method='file_magic ELF [0-9][0-9]*-bit [LM]SB (shared object|dynamic lib )' + ;; + sni) + lt_cv_file_magic_cmd='/bin/file' + lt_cv_deplibs_check_method="file_magic ELF [0-9][0-9]*-bit [LM]SB dynamic lib" + lt_cv_file_magic_test_file=/lib/libc.so + ;; + siemens) + lt_cv_deplibs_check_method=pass_all + ;; + esac + ;; + +sysv5OpenUNIX8* | sysv5UnixWare7* | sysv5uw[78]* | unixware7* | sysv4*uw2*) + lt_cv_deplibs_check_method=pass_all + ;; +esac + +fi +echo "$as_me:$LINENO: result: $lt_cv_deplibs_check_method" >&5 +echo "${ECHO_T}$lt_cv_deplibs_check_method" >&6 +file_magic_cmd=$lt_cv_file_magic_cmd +deplibs_check_method=$lt_cv_deplibs_check_method +test -z "$deplibs_check_method" && deplibs_check_method=unknown + + + + +# If no C compiler was specified, use CC. +LTCC=${LTCC-"$CC"} + +# Allow CC to be a program name with arguments. +compiler=$CC + +# Check whether --enable-libtool-lock or --disable-libtool-lock was given. +if test "${enable_libtool_lock+set}" = set; then + enableval="$enable_libtool_lock" + +fi; +test "x$enable_libtool_lock" != xno && enable_libtool_lock=yes + +# Some flags need to be propagated to the compiler or linker for good +# libtool support. +case $host in +ia64-*-hpux*) + # Find out which ABI we are using. + echo 'int i;' > conftest.$ac_ext + if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 + (eval $ac_compile) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; then + case `/usr/bin/file conftest.$ac_objext` in + *ELF-32*) + HPUX_IA64_MODE="32" + ;; + *ELF-64*) + HPUX_IA64_MODE="64" + ;; + esac + fi + rm -rf conftest* + ;; +*-*-irix6*) + # Find out which ABI we are using. + echo '#line 5326 "configure"' > conftest.$ac_ext + if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 + (eval $ac_compile) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; then + if test "$lt_cv_prog_gnu_ld" = yes; then + case `/usr/bin/file conftest.$ac_objext` in + *32-bit*) + LD="${LD-ld} -melf32bsmip" + ;; + *N32*) + LD="${LD-ld} -melf32bmipn32" + ;; + *64-bit*) + LD="${LD-ld} -melf64bmip" + ;; + esac + else + case `/usr/bin/file conftest.$ac_objext` in + *32-bit*) + LD="${LD-ld} -32" + ;; + *N32*) + LD="${LD-ld} -n32" + ;; + *64-bit*) + LD="${LD-ld} -64" + ;; + esac + fi + fi + rm -rf conftest* + ;; + +x86_64-*linux*|ppc*-*linux*|powerpc*-*linux*|s390*-*linux*|sparc*-*linux*) + # Find out which ABI we are using. + echo 'int i;' > conftest.$ac_ext + if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 + (eval $ac_compile) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; then + case "`/usr/bin/file conftest.o`" in + *32-bit*) + case $host in + x86_64-*linux*) + LD="${LD-ld} -m elf_i386" + ;; + ppc64-*linux*|powerpc64-*linux*) + LD="${LD-ld} -m elf32ppclinux" + ;; + s390x-*linux*) + LD="${LD-ld} -m elf_s390" + ;; + sparc64-*linux*) + LD="${LD-ld} -m elf32_sparc" + ;; + esac + ;; + *64-bit*) + case $host in + x86_64-*linux*) + LD="${LD-ld} -m elf_x86_64" + ;; + ppc*-*linux*|powerpc*-*linux*) + LD="${LD-ld} -m elf64ppc" + ;; + s390*-*linux*) + LD="${LD-ld} -m elf64_s390" + ;; + sparc*-*linux*) + LD="${LD-ld} -m elf64_sparc" + ;; + esac + ;; + esac + fi + rm -rf conftest* + ;; + +*-*-sco3.2v5*) + # On SCO OpenServer 5, we need -belf to get full-featured binaries. + SAVE_CFLAGS="$CFLAGS" + CFLAGS="$CFLAGS -belf" + echo "$as_me:$LINENO: checking whether the C compiler needs -belf" >&5 +echo $ECHO_N "checking whether the C compiler needs -belf... $ECHO_C" >&6 +if test "${lt_cv_cc_needs_belf+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu + + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext conftest$ac_exeext +if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 + (eval $ac_link) 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest$ac_exeext' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; }; then + lt_cv_cc_needs_belf=yes +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +lt_cv_cc_needs_belf=no +fi +rm -f conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext + ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu + +fi +echo "$as_me:$LINENO: result: $lt_cv_cc_needs_belf" >&5 +echo "${ECHO_T}$lt_cv_cc_needs_belf" >&6 + if test x"$lt_cv_cc_needs_belf" != x"yes"; then + # this is probably gcc 2.8.0, egcs 1.0 or newer; no need for -belf + CFLAGS="$SAVE_CFLAGS" + fi + ;; + +esac + +need_locks="$enable_libtool_lock" + + + +for ac_header in dlfcn.h +do +as_ac_Header=`echo "ac_cv_header_$ac_header" | $as_tr_sh` +if eval "test \"\${$as_ac_Header+set}\" = set"; then + echo "$as_me:$LINENO: checking for $ac_header" >&5 +echo $ECHO_N "checking for $ac_header... $ECHO_C" >&6 +if eval "test \"\${$as_ac_Header+set}\" = set"; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +fi +echo "$as_me:$LINENO: result: `eval echo '${'$as_ac_Header'}'`" >&5 +echo "${ECHO_T}`eval echo '${'$as_ac_Header'}'`" >&6 +else + # Is the header compilable? +echo "$as_me:$LINENO: checking $ac_header usability" >&5 +echo $ECHO_N "checking $ac_header usability... $ECHO_C" >&6 +cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +$ac_includes_default +#include <$ac_header> +_ACEOF +rm -f conftest.$ac_objext +if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 + (eval $ac_compile) 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; }; then + ac_header_compiler=yes +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +ac_header_compiler=no +fi +rm -f conftest.err conftest.$ac_objext conftest.$ac_ext +echo "$as_me:$LINENO: result: $ac_header_compiler" >&5 +echo "${ECHO_T}$ac_header_compiler" >&6 + +# Is the header present? +echo "$as_me:$LINENO: checking $ac_header presence" >&5 +echo $ECHO_N "checking $ac_header presence... $ECHO_C" >&6 +cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +#include <$ac_header> +_ACEOF +if { (eval echo "$as_me:$LINENO: \"$ac_cpp conftest.$ac_ext\"") >&5 + (eval $ac_cpp conftest.$ac_ext) 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } >/dev/null; then + if test -s conftest.err; then + ac_cpp_err=$ac_c_preproc_warn_flag + ac_cpp_err=$ac_cpp_err$ac_c_werror_flag + else + ac_cpp_err= + fi +else + ac_cpp_err=yes +fi +if test -z "$ac_cpp_err"; then + ac_header_preproc=yes +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + ac_header_preproc=no +fi +rm -f conftest.err conftest.$ac_ext +echo "$as_me:$LINENO: result: $ac_header_preproc" >&5 +echo "${ECHO_T}$ac_header_preproc" >&6 + +# So? What about this header? +case $ac_header_compiler:$ac_header_preproc:$ac_c_preproc_warn_flag in + yes:no: ) + { echo "$as_me:$LINENO: WARNING: $ac_header: accepted by the compiler, rejected by the preprocessor!" >&5 +echo "$as_me: WARNING: $ac_header: accepted by the compiler, rejected by the preprocessor!" >&2;} + { echo "$as_me:$LINENO: WARNING: $ac_header: proceeding with the compiler's result" >&5 +echo "$as_me: WARNING: $ac_header: proceeding with the compiler's result" >&2;} + ac_header_preproc=yes + ;; + no:yes:* ) + { echo "$as_me:$LINENO: WARNING: $ac_header: present but cannot be compiled" >&5 +echo "$as_me: WARNING: $ac_header: present but cannot be compiled" >&2;} + { echo "$as_me:$LINENO: WARNING: $ac_header: check for missing prerequisite headers?" >&5 +echo "$as_me: WARNING: $ac_header: check for missing prerequisite headers?" >&2;} + { echo "$as_me:$LINENO: WARNING: $ac_header: see the Autoconf documentation" >&5 +echo "$as_me: WARNING: $ac_header: see the Autoconf documentation" >&2;} + { echo "$as_me:$LINENO: WARNING: $ac_header: section \"Present But Cannot Be Compiled\"" >&5 +echo "$as_me: WARNING: $ac_header: section \"Present But Cannot Be Compiled\"" >&2;} + { echo "$as_me:$LINENO: WARNING: $ac_header: proceeding with the preprocessor's result" >&5 +echo "$as_me: WARNING: $ac_header: proceeding with the preprocessor's result" >&2;} + { echo "$as_me:$LINENO: WARNING: $ac_header: in the future, the compiler will take precedence" >&5 +echo "$as_me: WARNING: $ac_header: in the future, the compiler will take precedence" >&2;} + ( + cat <<\_ASBOX +## --------------------------------- ## +## Report this to omalley@apache.org ## +## --------------------------------- ## +_ASBOX + ) | + sed "s/^/$as_me: WARNING: /" >&2 + ;; +esac +echo "$as_me:$LINENO: checking for $ac_header" >&5 +echo $ECHO_N "checking for $ac_header... $ECHO_C" >&6 +if eval "test \"\${$as_ac_Header+set}\" = set"; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + eval "$as_ac_Header=\$ac_header_preproc" +fi +echo "$as_me:$LINENO: result: `eval echo '${'$as_ac_Header'}'`" >&5 +echo "${ECHO_T}`eval echo '${'$as_ac_Header'}'`" >&6 + +fi +if test `eval echo '${'$as_ac_Header'}'` = yes; then + cat >>confdefs.h <<_ACEOF +#define `echo "HAVE_$ac_header" | $as_tr_cpp` 1 +_ACEOF + +fi + +done + + + +if test -n "$CXX" && ( test "X$CXX" != "Xno" && + ( (test "X$CXX" = "Xg++" && `g++ -v >/dev/null 2>&1` ) || + (test "X$CXX" != "Xg++"))) ; then + ac_ext=cc +ac_cpp='$CXXCPP $CPPFLAGS' +ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_cxx_compiler_gnu +echo "$as_me:$LINENO: checking how to run the C++ preprocessor" >&5 +echo $ECHO_N "checking how to run the C++ preprocessor... $ECHO_C" >&6 +if test -z "$CXXCPP"; then + if test "${ac_cv_prog_CXXCPP+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + # Double quotes because CXXCPP needs to be expanded + for CXXCPP in "$CXX -E" "/lib/cpp" + do + ac_preproc_ok=false +for ac_cxx_preproc_warn_flag in '' yes +do + # Use a header file that comes with gcc, so configuring glibc + # with a fresh cross-compiler works. + # Prefer to if __STDC__ is defined, since + # exists even on freestanding compilers. + # On the NeXT, cc -E runs the code through the compiler's parser, + # not just through cpp. "Syntax error" is here to catch this case. + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +#ifdef __STDC__ +# include +#else +# include +#endif + Syntax error +_ACEOF +if { (eval echo "$as_me:$LINENO: \"$ac_cpp conftest.$ac_ext\"") >&5 + (eval $ac_cpp conftest.$ac_ext) 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } >/dev/null; then + if test -s conftest.err; then + ac_cpp_err=$ac_cxx_preproc_warn_flag + ac_cpp_err=$ac_cpp_err$ac_cxx_werror_flag + else + ac_cpp_err= + fi +else + ac_cpp_err=yes +fi +if test -z "$ac_cpp_err"; then + : +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + # Broken: fails on valid input. +continue +fi +rm -f conftest.err conftest.$ac_ext + + # OK, works on sane cases. Now check whether non-existent headers + # can be detected and how. + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +#include +_ACEOF +if { (eval echo "$as_me:$LINENO: \"$ac_cpp conftest.$ac_ext\"") >&5 + (eval $ac_cpp conftest.$ac_ext) 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } >/dev/null; then + if test -s conftest.err; then + ac_cpp_err=$ac_cxx_preproc_warn_flag + ac_cpp_err=$ac_cpp_err$ac_cxx_werror_flag + else + ac_cpp_err= + fi +else + ac_cpp_err=yes +fi +if test -z "$ac_cpp_err"; then + # Broken: success on invalid input. +continue +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + # Passes both tests. +ac_preproc_ok=: +break +fi +rm -f conftest.err conftest.$ac_ext + +done +# Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped. +rm -f conftest.err conftest.$ac_ext +if $ac_preproc_ok; then + break +fi + + done + ac_cv_prog_CXXCPP=$CXXCPP + +fi + CXXCPP=$ac_cv_prog_CXXCPP +else + ac_cv_prog_CXXCPP=$CXXCPP +fi +echo "$as_me:$LINENO: result: $CXXCPP" >&5 +echo "${ECHO_T}$CXXCPP" >&6 +ac_preproc_ok=false +for ac_cxx_preproc_warn_flag in '' yes +do + # Use a header file that comes with gcc, so configuring glibc + # with a fresh cross-compiler works. + # Prefer to if __STDC__ is defined, since + # exists even on freestanding compilers. + # On the NeXT, cc -E runs the code through the compiler's parser, + # not just through cpp. "Syntax error" is here to catch this case. + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +#ifdef __STDC__ +# include +#else +# include +#endif + Syntax error +_ACEOF +if { (eval echo "$as_me:$LINENO: \"$ac_cpp conftest.$ac_ext\"") >&5 + (eval $ac_cpp conftest.$ac_ext) 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } >/dev/null; then + if test -s conftest.err; then + ac_cpp_err=$ac_cxx_preproc_warn_flag + ac_cpp_err=$ac_cpp_err$ac_cxx_werror_flag + else + ac_cpp_err= + fi +else + ac_cpp_err=yes +fi +if test -z "$ac_cpp_err"; then + : +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + # Broken: fails on valid input. +continue +fi +rm -f conftest.err conftest.$ac_ext + + # OK, works on sane cases. Now check whether non-existent headers + # can be detected and how. + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +#include +_ACEOF +if { (eval echo "$as_me:$LINENO: \"$ac_cpp conftest.$ac_ext\"") >&5 + (eval $ac_cpp conftest.$ac_ext) 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } >/dev/null; then + if test -s conftest.err; then + ac_cpp_err=$ac_cxx_preproc_warn_flag + ac_cpp_err=$ac_cpp_err$ac_cxx_werror_flag + else + ac_cpp_err= + fi +else + ac_cpp_err=yes +fi +if test -z "$ac_cpp_err"; then + # Broken: success on invalid input. +continue +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + # Passes both tests. +ac_preproc_ok=: +break +fi +rm -f conftest.err conftest.$ac_ext + +done +# Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped. +rm -f conftest.err conftest.$ac_ext +if $ac_preproc_ok; then + : +else + { { echo "$as_me:$LINENO: error: C++ preprocessor \"$CXXCPP\" fails sanity check +See \`config.log' for more details." >&5 +echo "$as_me: error: C++ preprocessor \"$CXXCPP\" fails sanity check +See \`config.log' for more details." >&2;} + { (exit 1); exit 1; }; } +fi + +ac_ext=cc +ac_cpp='$CXXCPP $CPPFLAGS' +ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_cxx_compiler_gnu + +fi + + +ac_ext=f +ac_compile='$F77 -c $FFLAGS conftest.$ac_ext >&5' +ac_link='$F77 -o conftest$ac_exeext $FFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_f77_compiler_gnu +if test -n "$ac_tool_prefix"; then + for ac_prog in g77 f77 xlf frt pgf77 fort77 fl32 af77 f90 xlf90 pgf90 epcf90 f95 fort xlf95 ifc efc pgf95 lf95 gfortran + do + # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args. +set dummy $ac_tool_prefix$ac_prog; ac_word=$2 +echo "$as_me:$LINENO: checking for $ac_word" >&5 +echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6 +if test "${ac_cv_prog_F77+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + if test -n "$F77"; then + ac_cv_prog_F77="$F77" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if $as_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_F77="$ac_tool_prefix$ac_prog" + echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done +done + +fi +fi +F77=$ac_cv_prog_F77 +if test -n "$F77"; then + echo "$as_me:$LINENO: result: $F77" >&5 +echo "${ECHO_T}$F77" >&6 +else + echo "$as_me:$LINENO: result: no" >&5 +echo "${ECHO_T}no" >&6 +fi + + test -n "$F77" && break + done +fi +if test -z "$F77"; then + ac_ct_F77=$F77 + for ac_prog in g77 f77 xlf frt pgf77 fort77 fl32 af77 f90 xlf90 pgf90 epcf90 f95 fort xlf95 ifc efc pgf95 lf95 gfortran +do + # Extract the first word of "$ac_prog", so it can be a program name with args. +set dummy $ac_prog; ac_word=$2 +echo "$as_me:$LINENO: checking for $ac_word" >&5 +echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6 +if test "${ac_cv_prog_ac_ct_F77+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + if test -n "$ac_ct_F77"; then + ac_cv_prog_ac_ct_F77="$ac_ct_F77" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if $as_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_ac_ct_F77="$ac_prog" + echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done +done + +fi +fi +ac_ct_F77=$ac_cv_prog_ac_ct_F77 +if test -n "$ac_ct_F77"; then + echo "$as_me:$LINENO: result: $ac_ct_F77" >&5 +echo "${ECHO_T}$ac_ct_F77" >&6 +else + echo "$as_me:$LINENO: result: no" >&5 +echo "${ECHO_T}no" >&6 +fi + + test -n "$ac_ct_F77" && break +done + + F77=$ac_ct_F77 +fi + + +# Provide some information about the compiler. +echo "$as_me:5968:" \ + "checking for Fortran 77 compiler version" >&5 +ac_compiler=`set X $ac_compile; echo $2` +{ (eval echo "$as_me:$LINENO: \"$ac_compiler --version &5\"") >&5 + (eval $ac_compiler --version &5) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } +{ (eval echo "$as_me:$LINENO: \"$ac_compiler -v &5\"") >&5 + (eval $ac_compiler -v &5) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } +{ (eval echo "$as_me:$LINENO: \"$ac_compiler -V &5\"") >&5 + (eval $ac_compiler -V &5) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } +rm -f a.out + +# If we don't use `.F' as extension, the preprocessor is not run on the +# input file. (Note that this only needs to work for GNU compilers.) +ac_save_ext=$ac_ext +ac_ext=F +echo "$as_me:$LINENO: checking whether we are using the GNU Fortran 77 compiler" >&5 +echo $ECHO_N "checking whether we are using the GNU Fortran 77 compiler... $ECHO_C" >&6 +if test "${ac_cv_f77_compiler_gnu+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + cat >conftest.$ac_ext <<_ACEOF + program main +#ifndef __GNUC__ + choke me +#endif + + end +_ACEOF +rm -f conftest.$ac_objext +if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 + (eval $ac_compile) 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && + { ac_try='test -z "$ac_f77_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; }; then + ac_compiler_gnu=yes +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +ac_compiler_gnu=no +fi +rm -f conftest.err conftest.$ac_objext conftest.$ac_ext +ac_cv_f77_compiler_gnu=$ac_compiler_gnu + +fi +echo "$as_me:$LINENO: result: $ac_cv_f77_compiler_gnu" >&5 +echo "${ECHO_T}$ac_cv_f77_compiler_gnu" >&6 +ac_ext=$ac_save_ext +ac_test_FFLAGS=${FFLAGS+set} +ac_save_FFLAGS=$FFLAGS +FFLAGS= +echo "$as_me:$LINENO: checking whether $F77 accepts -g" >&5 +echo $ECHO_N "checking whether $F77 accepts -g... $ECHO_C" >&6 +if test "${ac_cv_prog_f77_g+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + FFLAGS=-g +cat >conftest.$ac_ext <<_ACEOF + program main + + end +_ACEOF +rm -f conftest.$ac_objext +if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 + (eval $ac_compile) 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && + { ac_try='test -z "$ac_f77_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; }; then + ac_cv_prog_f77_g=yes +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +ac_cv_prog_f77_g=no +fi +rm -f conftest.err conftest.$ac_objext conftest.$ac_ext + +fi +echo "$as_me:$LINENO: result: $ac_cv_prog_f77_g" >&5 +echo "${ECHO_T}$ac_cv_prog_f77_g" >&6 +if test "$ac_test_FFLAGS" = set; then + FFLAGS=$ac_save_FFLAGS +elif test $ac_cv_prog_f77_g = yes; then + if test "x$ac_cv_f77_compiler_gnu" = xyes; then + FFLAGS="-g -O2" + else + FFLAGS="-g" + fi +else + if test "x$ac_cv_f77_compiler_gnu" = xyes; then + FFLAGS="-O2" + else + FFLAGS= + fi +fi + +G77=`test $ac_compiler_gnu = yes && echo yes` +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu + + + +# Autoconf 2.13's AC_OBJEXT and AC_EXEEXT macros only works for C compilers! + +# find the maximum length of command line arguments +echo "$as_me:$LINENO: checking the maximum length of command line arguments" >&5 +echo $ECHO_N "checking the maximum length of command line arguments... $ECHO_C" >&6 +if test "${lt_cv_sys_max_cmd_len+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + i=0 + teststring="ABCD" + + case $build_os in + msdosdjgpp*) + # On DJGPP, this test can blow up pretty badly due to problems in libc + # (any single argument exceeding 2000 bytes causes a buffer overrun + # during glob expansion). Even if it were fixed, the result of this + # check would be larger than it should be. + lt_cv_sys_max_cmd_len=12288; # 12K is about right + ;; + + gnu*) + # Under GNU Hurd, this test is not required because there is + # no limit to the length of command line arguments. + # Libtool will interpret -1 as no limit whatsoever + lt_cv_sys_max_cmd_len=-1; + ;; + + cygwin* | mingw*) + # On Win9x/ME, this test blows up -- it succeeds, but takes + # about 5 minutes as the teststring grows exponentially. + # Worse, since 9x/ME are not pre-emptively multitasking, + # you end up with a "frozen" computer, even though with patience + # the test eventually succeeds (with a max line length of 256k). + # Instead, let's just punt: use the minimum linelength reported by + # all of the supported platforms: 8192 (on NT/2K/XP). + lt_cv_sys_max_cmd_len=8192; + ;; + + amigaos*) + # On AmigaOS with pdksh, this test takes hours, literally. + # So we just punt and use a minimum line length of 8192. + lt_cv_sys_max_cmd_len=8192; + ;; + + netbsd* | freebsd* | openbsd* | darwin* | dragonfly*) + # This has been around since 386BSD, at least. Likely further. + if test -x /sbin/sysctl; then + lt_cv_sys_max_cmd_len=`/sbin/sysctl -n kern.argmax` + elif test -x /usr/sbin/sysctl; then + lt_cv_sys_max_cmd_len=`/usr/sbin/sysctl -n kern.argmax` + else + lt_cv_sys_max_cmd_len=65536 # usable default for *BSD + fi + # And add a safety zone + lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \/ 4` + lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \* 3` + ;; + osf*) + # Dr. Hans Ekkehard Plesser reports seeing a kernel panic running configure + # due to this test when exec_disable_arg_limit is 1 on Tru64. It is not + # nice to cause kernel panics so lets avoid the loop below. + # First set a reasonable default. + lt_cv_sys_max_cmd_len=16384 + # + if test -x /sbin/sysconfig; then + case `/sbin/sysconfig -q proc exec_disable_arg_limit` in + *1*) lt_cv_sys_max_cmd_len=-1 ;; + esac + fi + ;; + *) + # If test is not a shell built-in, we'll probably end up computing a + # maximum length that is only half of the actual maximum length, but + # we can't tell. + SHELL=${SHELL-${CONFIG_SHELL-/bin/sh}} + while (test "X"`$SHELL $0 --fallback-echo "X$teststring" 2>/dev/null` \ + = "XX$teststring") >/dev/null 2>&1 && + new_result=`expr "X$teststring" : ".*" 2>&1` && + lt_cv_sys_max_cmd_len=$new_result && + test $i != 17 # 1/2 MB should be enough + do + i=`expr $i + 1` + teststring=$teststring$teststring + done + teststring= + # Add a significant safety factor because C++ compilers can tack on massive + # amounts of additional arguments before passing them to the linker. + # It appears as though 1/2 is a usable value. + lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \/ 2` + ;; + esac + +fi + +if test -n $lt_cv_sys_max_cmd_len ; then + echo "$as_me:$LINENO: result: $lt_cv_sys_max_cmd_len" >&5 +echo "${ECHO_T}$lt_cv_sys_max_cmd_len" >&6 +else + echo "$as_me:$LINENO: result: none" >&5 +echo "${ECHO_T}none" >&6 +fi + + + + +# Check for command to grab the raw symbol name followed by C symbol from nm. +echo "$as_me:$LINENO: checking command to parse $NM output from $compiler object" >&5 +echo $ECHO_N "checking command to parse $NM output from $compiler object... $ECHO_C" >&6 +if test "${lt_cv_sys_global_symbol_pipe+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + +# These are sane defaults that work on at least a few old systems. +# [They come from Ultrix. What could be older than Ultrix?!! ;)] + +# Character class describing NM global symbol codes. +symcode='[BCDEGRST]' + +# Regexp to match symbols that can be accessed directly from C. +sympat='\([_A-Za-z][_A-Za-z0-9]*\)' + +# Transform an extracted symbol line into a proper C declaration +lt_cv_sys_global_symbol_to_cdecl="sed -n -e 's/^. .* \(.*\)$/extern int \1;/p'" + +# Transform an extracted symbol line into symbol name and symbol address +lt_cv_sys_global_symbol_to_c_name_address="sed -n -e 's/^: \([^ ]*\) $/ {\\\"\1\\\", (lt_ptr) 0},/p' -e 's/^$symcode \([^ ]*\) \([^ ]*\)$/ {\"\2\", (lt_ptr) \&\2},/p'" + +# Define system-specific variables. +case $host_os in +aix*) + symcode='[BCDT]' + ;; +cygwin* | mingw* | pw32*) + symcode='[ABCDGISTW]' + ;; +hpux*) # Its linker distinguishes data from code symbols + if test "$host_cpu" = ia64; then + symcode='[ABCDEGRST]' + fi + lt_cv_sys_global_symbol_to_cdecl="sed -n -e 's/^T .* \(.*\)$/extern int \1();/p' -e 's/^$symcode* .* \(.*\)$/extern char \1;/p'" + lt_cv_sys_global_symbol_to_c_name_address="sed -n -e 's/^: \([^ ]*\) $/ {\\\"\1\\\", (lt_ptr) 0},/p' -e 's/^$symcode* \([^ ]*\) \([^ ]*\)$/ {\"\2\", (lt_ptr) \&\2},/p'" + ;; +linux*) + if test "$host_cpu" = ia64; then + symcode='[ABCDGIRSTW]' + lt_cv_sys_global_symbol_to_cdecl="sed -n -e 's/^T .* \(.*\)$/extern int \1();/p' -e 's/^$symcode* .* \(.*\)$/extern char \1;/p'" + lt_cv_sys_global_symbol_to_c_name_address="sed -n -e 's/^: \([^ ]*\) $/ {\\\"\1\\\", (lt_ptr) 0},/p' -e 's/^$symcode* \([^ ]*\) \([^ ]*\)$/ {\"\2\", (lt_ptr) \&\2},/p'" + fi + ;; +irix* | nonstopux*) + symcode='[BCDEGRST]' + ;; +osf*) + symcode='[BCDEGQRST]' + ;; +solaris* | sysv5*) + symcode='[BDRT]' + ;; +sysv4) + symcode='[DFNSTU]' + ;; +esac + +# Handle CRLF in mingw tool chain +opt_cr= +case $build_os in +mingw*) + opt_cr=`echo 'x\{0,1\}' | tr x '\015'` # option cr in regexp + ;; +esac + +# If we're using GNU nm, then use its standard symbol codes. +case `$NM -V 2>&1` in +*GNU* | *'with BFD'*) + symcode='[ABCDGIRSTW]' ;; +esac + +# Try without a prefix undercore, then with it. +for ac_symprfx in "" "_"; do + + # Transform symcode, sympat, and symprfx into a raw symbol and a C symbol. + symxfrm="\\1 $ac_symprfx\\2 \\2" + + # Write the raw and C identifiers. + lt_cv_sys_global_symbol_pipe="sed -n -e 's/^.*[ ]\($symcode$symcode*\)[ ][ ]*$ac_symprfx$sympat$opt_cr$/$symxfrm/p'" + + # Check to see that the pipe works correctly. + pipe_works=no + + rm -f conftest* + cat > conftest.$ac_ext <&5 + (eval $ac_compile) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; then + # Now try to grab the symbols. + nlist=conftest.nm + if { (eval echo "$as_me:$LINENO: \"$NM conftest.$ac_objext \| $lt_cv_sys_global_symbol_pipe \> $nlist\"") >&5 + (eval $NM conftest.$ac_objext \| $lt_cv_sys_global_symbol_pipe \> $nlist) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && test -s "$nlist"; then + # Try sorting and uniquifying the output. + if sort "$nlist" | uniq > "$nlist"T; then + mv -f "$nlist"T "$nlist" + else + rm -f "$nlist"T + fi + + # Make sure that we snagged all the symbols we need. + if grep ' nm_test_var$' "$nlist" >/dev/null; then + if grep ' nm_test_func$' "$nlist" >/dev/null; then + cat < conftest.$ac_ext +#ifdef __cplusplus +extern "C" { +#endif + +EOF + # Now generate the symbol file. + eval "$lt_cv_sys_global_symbol_to_cdecl"' < "$nlist" | grep -v main >> conftest.$ac_ext' + + cat <> conftest.$ac_ext +#if defined (__STDC__) && __STDC__ +# define lt_ptr_t void * +#else +# define lt_ptr_t char * +# define const +#endif + +/* The mapping between symbol names and symbols. */ +const struct { + const char *name; + lt_ptr_t address; +} +lt_preloaded_symbols[] = +{ +EOF + $SED "s/^$symcode$symcode* \(.*\) \(.*\)$/ {\"\2\", (lt_ptr_t) \&\2},/" < "$nlist" | grep -v main >> conftest.$ac_ext + cat <<\EOF >> conftest.$ac_ext + {0, (lt_ptr_t) 0} +}; + +#ifdef __cplusplus +} +#endif +EOF + # Now try linking the two files. + mv conftest.$ac_objext conftstm.$ac_objext + lt_save_LIBS="$LIBS" + lt_save_CFLAGS="$CFLAGS" + LIBS="conftstm.$ac_objext" + CFLAGS="$CFLAGS$lt_prog_compiler_no_builtin_flag" + if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 + (eval $ac_link) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && test -s conftest${ac_exeext}; then + pipe_works=yes + fi + LIBS="$lt_save_LIBS" + CFLAGS="$lt_save_CFLAGS" + else + echo "cannot find nm_test_func in $nlist" >&5 + fi + else + echo "cannot find nm_test_var in $nlist" >&5 + fi + else + echo "cannot run $lt_cv_sys_global_symbol_pipe" >&5 + fi + else + echo "$progname: failed program was:" >&5 + cat conftest.$ac_ext >&5 + fi + rm -f conftest* conftst* + + # Do not use the global_symbol_pipe unless it works. + if test "$pipe_works" = yes; then + break + else + lt_cv_sys_global_symbol_pipe= + fi +done + +fi + +if test -z "$lt_cv_sys_global_symbol_pipe"; then + lt_cv_sys_global_symbol_to_cdecl= +fi +if test -z "$lt_cv_sys_global_symbol_pipe$lt_cv_sys_global_symbol_to_cdecl"; then + echo "$as_me:$LINENO: result: failed" >&5 +echo "${ECHO_T}failed" >&6 +else + echo "$as_me:$LINENO: result: ok" >&5 +echo "${ECHO_T}ok" >&6 +fi + +echo "$as_me:$LINENO: checking for objdir" >&5 +echo $ECHO_N "checking for objdir... $ECHO_C" >&6 +if test "${lt_cv_objdir+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + rm -f .libs 2>/dev/null +mkdir .libs 2>/dev/null +if test -d .libs; then + lt_cv_objdir=.libs +else + # MS-DOS does not allow filenames that begin with a dot. + lt_cv_objdir=_libs +fi +rmdir .libs 2>/dev/null +fi +echo "$as_me:$LINENO: result: $lt_cv_objdir" >&5 +echo "${ECHO_T}$lt_cv_objdir" >&6 +objdir=$lt_cv_objdir + + + + + +case $host_os in +aix3*) + # AIX sometimes has problems with the GCC collect2 program. For some + # reason, if we set the COLLECT_NAMES environment variable, the problems + # vanish in a puff of smoke. + if test "X${COLLECT_NAMES+set}" != Xset; then + COLLECT_NAMES= + export COLLECT_NAMES + fi + ;; +esac + +# Sed substitution that helps us do robust quoting. It backslashifies +# metacharacters that are still active within double-quoted strings. +Xsed='sed -e 1s/^X//' +sed_quote_subst='s/\([\\"\\`$\\\\]\)/\\\1/g' + +# Same as above, but do not quote variable references. +double_quote_subst='s/\([\\"\\`\\\\]\)/\\\1/g' + +# Sed substitution to delay expansion of an escaped shell variable in a +# double_quote_subst'ed string. +delay_variable_subst='s/\\\\\\\\\\\$/\\\\\\$/g' + +# Sed substitution to avoid accidental globbing in evaled expressions +no_glob_subst='s/\*/\\\*/g' + +# Constants: +rm="rm -f" + +# Global variables: +default_ofile=libtool +can_build_shared=yes + +# All known linkers require a `.a' archive for static linking (except M$VC, +# which needs '.lib'). +libext=a +ltmain="$ac_aux_dir/ltmain.sh" +ofile="$default_ofile" +with_gnu_ld="$lt_cv_prog_gnu_ld" + +if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}ar", so it can be a program name with args. +set dummy ${ac_tool_prefix}ar; ac_word=$2 +echo "$as_me:$LINENO: checking for $ac_word" >&5 +echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6 +if test "${ac_cv_prog_AR+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + if test -n "$AR"; then + ac_cv_prog_AR="$AR" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if $as_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_AR="${ac_tool_prefix}ar" + echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done +done + +fi +fi +AR=$ac_cv_prog_AR +if test -n "$AR"; then + echo "$as_me:$LINENO: result: $AR" >&5 +echo "${ECHO_T}$AR" >&6 +else + echo "$as_me:$LINENO: result: no" >&5 +echo "${ECHO_T}no" >&6 +fi + +fi +if test -z "$ac_cv_prog_AR"; then + ac_ct_AR=$AR + # Extract the first word of "ar", so it can be a program name with args. +set dummy ar; ac_word=$2 +echo "$as_me:$LINENO: checking for $ac_word" >&5 +echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6 +if test "${ac_cv_prog_ac_ct_AR+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + if test -n "$ac_ct_AR"; then + ac_cv_prog_ac_ct_AR="$ac_ct_AR" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if $as_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_ac_ct_AR="ar" + echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done +done + + test -z "$ac_cv_prog_ac_ct_AR" && ac_cv_prog_ac_ct_AR="false" +fi +fi +ac_ct_AR=$ac_cv_prog_ac_ct_AR +if test -n "$ac_ct_AR"; then + echo "$as_me:$LINENO: result: $ac_ct_AR" >&5 +echo "${ECHO_T}$ac_ct_AR" >&6 +else + echo "$as_me:$LINENO: result: no" >&5 +echo "${ECHO_T}no" >&6 +fi + + AR=$ac_ct_AR +else + AR="$ac_cv_prog_AR" +fi + +if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}ranlib", so it can be a program name with args. +set dummy ${ac_tool_prefix}ranlib; ac_word=$2 +echo "$as_me:$LINENO: checking for $ac_word" >&5 +echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6 +if test "${ac_cv_prog_RANLIB+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + if test -n "$RANLIB"; then + ac_cv_prog_RANLIB="$RANLIB" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if $as_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_RANLIB="${ac_tool_prefix}ranlib" + echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done +done + +fi +fi +RANLIB=$ac_cv_prog_RANLIB +if test -n "$RANLIB"; then + echo "$as_me:$LINENO: result: $RANLIB" >&5 +echo "${ECHO_T}$RANLIB" >&6 +else + echo "$as_me:$LINENO: result: no" >&5 +echo "${ECHO_T}no" >&6 +fi + +fi +if test -z "$ac_cv_prog_RANLIB"; then + ac_ct_RANLIB=$RANLIB + # Extract the first word of "ranlib", so it can be a program name with args. +set dummy ranlib; ac_word=$2 +echo "$as_me:$LINENO: checking for $ac_word" >&5 +echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6 +if test "${ac_cv_prog_ac_ct_RANLIB+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + if test -n "$ac_ct_RANLIB"; then + ac_cv_prog_ac_ct_RANLIB="$ac_ct_RANLIB" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if $as_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_ac_ct_RANLIB="ranlib" + echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done +done + + test -z "$ac_cv_prog_ac_ct_RANLIB" && ac_cv_prog_ac_ct_RANLIB=":" +fi +fi +ac_ct_RANLIB=$ac_cv_prog_ac_ct_RANLIB +if test -n "$ac_ct_RANLIB"; then + echo "$as_me:$LINENO: result: $ac_ct_RANLIB" >&5 +echo "${ECHO_T}$ac_ct_RANLIB" >&6 +else + echo "$as_me:$LINENO: result: no" >&5 +echo "${ECHO_T}no" >&6 +fi + + RANLIB=$ac_ct_RANLIB +else + RANLIB="$ac_cv_prog_RANLIB" +fi + +if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}strip", so it can be a program name with args. +set dummy ${ac_tool_prefix}strip; ac_word=$2 +echo "$as_me:$LINENO: checking for $ac_word" >&5 +echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6 +if test "${ac_cv_prog_STRIP+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + if test -n "$STRIP"; then + ac_cv_prog_STRIP="$STRIP" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if $as_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_STRIP="${ac_tool_prefix}strip" + echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done +done + +fi +fi +STRIP=$ac_cv_prog_STRIP +if test -n "$STRIP"; then + echo "$as_me:$LINENO: result: $STRIP" >&5 +echo "${ECHO_T}$STRIP" >&6 +else + echo "$as_me:$LINENO: result: no" >&5 +echo "${ECHO_T}no" >&6 +fi + +fi +if test -z "$ac_cv_prog_STRIP"; then + ac_ct_STRIP=$STRIP + # Extract the first word of "strip", so it can be a program name with args. +set dummy strip; ac_word=$2 +echo "$as_me:$LINENO: checking for $ac_word" >&5 +echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6 +if test "${ac_cv_prog_ac_ct_STRIP+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + if test -n "$ac_ct_STRIP"; then + ac_cv_prog_ac_ct_STRIP="$ac_ct_STRIP" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if $as_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_ac_ct_STRIP="strip" + echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done +done + + test -z "$ac_cv_prog_ac_ct_STRIP" && ac_cv_prog_ac_ct_STRIP=":" +fi +fi +ac_ct_STRIP=$ac_cv_prog_ac_ct_STRIP +if test -n "$ac_ct_STRIP"; then + echo "$as_me:$LINENO: result: $ac_ct_STRIP" >&5 +echo "${ECHO_T}$ac_ct_STRIP" >&6 +else + echo "$as_me:$LINENO: result: no" >&5 +echo "${ECHO_T}no" >&6 +fi + + STRIP=$ac_ct_STRIP +else + STRIP="$ac_cv_prog_STRIP" +fi + + +old_CC="$CC" +old_CFLAGS="$CFLAGS" + +# Set sane defaults for various variables +test -z "$AR" && AR=ar +test -z "$AR_FLAGS" && AR_FLAGS=cru +test -z "$AS" && AS=as +test -z "$CC" && CC=cc +test -z "$LTCC" && LTCC=$CC +test -z "$DLLTOOL" && DLLTOOL=dlltool +test -z "$LD" && LD=ld +test -z "$LN_S" && LN_S="ln -s" +test -z "$MAGIC_CMD" && MAGIC_CMD=file +test -z "$NM" && NM=nm +test -z "$SED" && SED=sed +test -z "$OBJDUMP" && OBJDUMP=objdump +test -z "$RANLIB" && RANLIB=: +test -z "$STRIP" && STRIP=: +test -z "$ac_objext" && ac_objext=o + +# Determine commands to create old-style static archives. +old_archive_cmds='$AR $AR_FLAGS $oldlib$oldobjs$old_deplibs' +old_postinstall_cmds='chmod 644 $oldlib' +old_postuninstall_cmds= + +if test -n "$RANLIB"; then + case $host_os in + openbsd*) + old_postinstall_cmds="\$RANLIB -t \$oldlib~$old_postinstall_cmds" + ;; + *) + old_postinstall_cmds="\$RANLIB \$oldlib~$old_postinstall_cmds" + ;; + esac + old_archive_cmds="$old_archive_cmds~\$RANLIB \$oldlib" +fi + +for cc_temp in $compiler""; do + case $cc_temp in + compile | *[\\/]compile | ccache | *[\\/]ccache ) ;; + distcc | *[\\/]distcc | purify | *[\\/]purify ) ;; + \-*) ;; + *) break;; + esac +done +cc_basename=`$echo "X$cc_temp" | $Xsed -e 's%.*/%%' -e "s%^$host_alias-%%"` + + +# Only perform the check for file, if the check method requires it +case $deplibs_check_method in +file_magic*) + if test "$file_magic_cmd" = '$MAGIC_CMD'; then + echo "$as_me:$LINENO: checking for ${ac_tool_prefix}file" >&5 +echo $ECHO_N "checking for ${ac_tool_prefix}file... $ECHO_C" >&6 +if test "${lt_cv_path_MAGIC_CMD+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + case $MAGIC_CMD in +[\\/*] | ?:[\\/]*) + lt_cv_path_MAGIC_CMD="$MAGIC_CMD" # Let the user override the test with a path. + ;; +*) + lt_save_MAGIC_CMD="$MAGIC_CMD" + lt_save_ifs="$IFS"; IFS=$PATH_SEPARATOR + ac_dummy="/usr/bin$PATH_SEPARATOR$PATH" + for ac_dir in $ac_dummy; do + IFS="$lt_save_ifs" + test -z "$ac_dir" && ac_dir=. + if test -f $ac_dir/${ac_tool_prefix}file; then + lt_cv_path_MAGIC_CMD="$ac_dir/${ac_tool_prefix}file" + if test -n "$file_magic_test_file"; then + case $deplibs_check_method in + "file_magic "*) + file_magic_regex="`expr \"$deplibs_check_method\" : \"file_magic \(.*\)\"`" + MAGIC_CMD="$lt_cv_path_MAGIC_CMD" + if eval $file_magic_cmd \$file_magic_test_file 2> /dev/null | + $EGREP "$file_magic_regex" > /dev/null; then + : + else + cat <&2 + +*** Warning: the command libtool uses to detect shared libraries, +*** $file_magic_cmd, produces output that libtool cannot recognize. +*** The result is that libtool may fail to recognize shared libraries +*** as such. This will affect the creation of libtool libraries that +*** depend on shared libraries, but programs linked with such libtool +*** libraries will work regardless of this problem. Nevertheless, you +*** may want to report the problem to your system manager and/or to +*** bug-libtool@gnu.org + +EOF + fi ;; + esac + fi + break + fi + done + IFS="$lt_save_ifs" + MAGIC_CMD="$lt_save_MAGIC_CMD" + ;; +esac +fi + +MAGIC_CMD="$lt_cv_path_MAGIC_CMD" +if test -n "$MAGIC_CMD"; then + echo "$as_me:$LINENO: result: $MAGIC_CMD" >&5 +echo "${ECHO_T}$MAGIC_CMD" >&6 +else + echo "$as_me:$LINENO: result: no" >&5 +echo "${ECHO_T}no" >&6 +fi + +if test -z "$lt_cv_path_MAGIC_CMD"; then + if test -n "$ac_tool_prefix"; then + echo "$as_me:$LINENO: checking for file" >&5 +echo $ECHO_N "checking for file... $ECHO_C" >&6 +if test "${lt_cv_path_MAGIC_CMD+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + case $MAGIC_CMD in +[\\/*] | ?:[\\/]*) + lt_cv_path_MAGIC_CMD="$MAGIC_CMD" # Let the user override the test with a path. + ;; +*) + lt_save_MAGIC_CMD="$MAGIC_CMD" + lt_save_ifs="$IFS"; IFS=$PATH_SEPARATOR + ac_dummy="/usr/bin$PATH_SEPARATOR$PATH" + for ac_dir in $ac_dummy; do + IFS="$lt_save_ifs" + test -z "$ac_dir" && ac_dir=. + if test -f $ac_dir/file; then + lt_cv_path_MAGIC_CMD="$ac_dir/file" + if test -n "$file_magic_test_file"; then + case $deplibs_check_method in + "file_magic "*) + file_magic_regex="`expr \"$deplibs_check_method\" : \"file_magic \(.*\)\"`" + MAGIC_CMD="$lt_cv_path_MAGIC_CMD" + if eval $file_magic_cmd \$file_magic_test_file 2> /dev/null | + $EGREP "$file_magic_regex" > /dev/null; then + : + else + cat <&2 + +*** Warning: the command libtool uses to detect shared libraries, +*** $file_magic_cmd, produces output that libtool cannot recognize. +*** The result is that libtool may fail to recognize shared libraries +*** as such. This will affect the creation of libtool libraries that +*** depend on shared libraries, but programs linked with such libtool +*** libraries will work regardless of this problem. Nevertheless, you +*** may want to report the problem to your system manager and/or to +*** bug-libtool@gnu.org + +EOF + fi ;; + esac + fi + break + fi + done + IFS="$lt_save_ifs" + MAGIC_CMD="$lt_save_MAGIC_CMD" + ;; +esac +fi + +MAGIC_CMD="$lt_cv_path_MAGIC_CMD" +if test -n "$MAGIC_CMD"; then + echo "$as_me:$LINENO: result: $MAGIC_CMD" >&5 +echo "${ECHO_T}$MAGIC_CMD" >&6 +else + echo "$as_me:$LINENO: result: no" >&5 +echo "${ECHO_T}no" >&6 +fi + + else + MAGIC_CMD=: + fi +fi + + fi + ;; +esac + +enable_dlopen=no +enable_win32_dll=no + +# Check whether --enable-libtool-lock or --disable-libtool-lock was given. +if test "${enable_libtool_lock+set}" = set; then + enableval="$enable_libtool_lock" + +fi; +test "x$enable_libtool_lock" != xno && enable_libtool_lock=yes + + +# Check whether --with-pic or --without-pic was given. +if test "${with_pic+set}" = set; then + withval="$with_pic" + pic_mode="$withval" +else + pic_mode=default +fi; +test -z "$pic_mode" && pic_mode=default + +# Use C for the default configuration in the libtool script +tagname= +lt_save_CC="$CC" +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu + + +# Source file extension for C test sources. +ac_ext=c + +# Object file extension for compiled C test sources. +objext=o +objext=$objext + +# Code to be used in simple compile tests +lt_simple_compile_test_code="int some_variable = 0;\n" + +# Code to be used in simple link tests +lt_simple_link_test_code='int main(){return(0);}\n' + + +# If no C compiler was specified, use CC. +LTCC=${LTCC-"$CC"} + +# Allow CC to be a program name with arguments. +compiler=$CC + + +# save warnings/boilerplate of simple test code +ac_outfile=conftest.$ac_objext +printf "$lt_simple_compile_test_code" >conftest.$ac_ext +eval "$ac_compile" 2>&1 >/dev/null | $SED '/^$/d' >conftest.err +_lt_compiler_boilerplate=`cat conftest.err` +$rm conftest* + +ac_outfile=conftest.$ac_objext +printf "$lt_simple_link_test_code" >conftest.$ac_ext +eval "$ac_link" 2>&1 >/dev/null | $SED '/^$/d' >conftest.err +_lt_linker_boilerplate=`cat conftest.err` +$rm conftest* + + +# +# Check for any special shared library compilation flags. +# +lt_prog_cc_shlib= +if test "$GCC" = no; then + case $host_os in + sco3.2v5*) + lt_prog_cc_shlib='-belf' + ;; + esac +fi +if test -n "$lt_prog_cc_shlib"; then + { echo "$as_me:$LINENO: WARNING: \`$CC' requires \`$lt_prog_cc_shlib' to build shared libraries" >&5 +echo "$as_me: WARNING: \`$CC' requires \`$lt_prog_cc_shlib' to build shared libraries" >&2;} + if echo "$old_CC $old_CFLAGS " | grep "[ ]$lt_prog_cc_shlib[ ]" >/dev/null; then : + else + { echo "$as_me:$LINENO: WARNING: add \`$lt_prog_cc_shlib' to the CC or CFLAGS env variable and reconfigure" >&5 +echo "$as_me: WARNING: add \`$lt_prog_cc_shlib' to the CC or CFLAGS env variable and reconfigure" >&2;} + lt_cv_prog_cc_can_build_shared=no + fi +fi + + +# +# Check to make sure the static flag actually works. +# +echo "$as_me:$LINENO: checking if $compiler static flag $lt_prog_compiler_static works" >&5 +echo $ECHO_N "checking if $compiler static flag $lt_prog_compiler_static works... $ECHO_C" >&6 +if test "${lt_prog_compiler_static_works+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + lt_prog_compiler_static_works=no + save_LDFLAGS="$LDFLAGS" + LDFLAGS="$LDFLAGS $lt_prog_compiler_static" + printf "$lt_simple_link_test_code" > conftest.$ac_ext + if (eval $ac_link 2>conftest.err) && test -s conftest$ac_exeext; then + # The compiler can only warn and ignore the option if not recognized + # So say no if there are warnings + if test -s conftest.err; then + # Append any errors to the config.log. + cat conftest.err 1>&5 + $echo "X$_lt_linker_boilerplate" | $Xsed > conftest.exp + $SED '/^$/d' conftest.err >conftest.er2 + if diff conftest.exp conftest.er2 >/dev/null; then + lt_prog_compiler_static_works=yes + fi + else + lt_prog_compiler_static_works=yes + fi + fi + $rm conftest* + LDFLAGS="$save_LDFLAGS" + +fi +echo "$as_me:$LINENO: result: $lt_prog_compiler_static_works" >&5 +echo "${ECHO_T}$lt_prog_compiler_static_works" >&6 + +if test x"$lt_prog_compiler_static_works" = xyes; then + : +else + lt_prog_compiler_static= +fi + + + + +lt_prog_compiler_no_builtin_flag= + +if test "$GCC" = yes; then + lt_prog_compiler_no_builtin_flag=' -fno-builtin' + + +echo "$as_me:$LINENO: checking if $compiler supports -fno-rtti -fno-exceptions" >&5 +echo $ECHO_N "checking if $compiler supports -fno-rtti -fno-exceptions... $ECHO_C" >&6 +if test "${lt_cv_prog_compiler_rtti_exceptions+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + lt_cv_prog_compiler_rtti_exceptions=no + ac_outfile=conftest.$ac_objext + printf "$lt_simple_compile_test_code" > conftest.$ac_ext + lt_compiler_flag="-fno-rtti -fno-exceptions" + # Insert the option either (1) after the last *FLAGS variable, or + # (2) before a word containing "conftest.", or (3) at the end. + # Note that $ac_compile itself does not contain backslashes and begins + # with a dollar sign (not a hyphen), so the echo should work correctly. + # The option is referenced via a variable to avoid confusing sed. + lt_compile=`echo "$ac_compile" | $SED \ + -e 's:.*FLAGS}? :&$lt_compiler_flag :; t' \ + -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ + -e 's:$: $lt_compiler_flag:'` + (eval echo "\"\$as_me:7066: $lt_compile\"" >&5) + (eval "$lt_compile" 2>conftest.err) + ac_status=$? + cat conftest.err >&5 + echo "$as_me:7070: \$? = $ac_status" >&5 + if (exit $ac_status) && test -s "$ac_outfile"; then + # The compiler can only warn and ignore the option if not recognized + # So say no if there are warnings other than the usual output. + $echo "X$_lt_compiler_boilerplate" | $Xsed >conftest.exp + $SED '/^$/d' conftest.err >conftest.er2 + if test ! -s conftest.err || diff conftest.exp conftest.er2 >/dev/null; then + lt_cv_prog_compiler_rtti_exceptions=yes + fi + fi + $rm conftest* + +fi +echo "$as_me:$LINENO: result: $lt_cv_prog_compiler_rtti_exceptions" >&5 +echo "${ECHO_T}$lt_cv_prog_compiler_rtti_exceptions" >&6 + +if test x"$lt_cv_prog_compiler_rtti_exceptions" = xyes; then + lt_prog_compiler_no_builtin_flag="$lt_prog_compiler_no_builtin_flag -fno-rtti -fno-exceptions" +else + : +fi + +fi + +lt_prog_compiler_wl= +lt_prog_compiler_pic= +lt_prog_compiler_static= + +echo "$as_me:$LINENO: checking for $compiler option to produce PIC" >&5 +echo $ECHO_N "checking for $compiler option to produce PIC... $ECHO_C" >&6 + + if test "$GCC" = yes; then + lt_prog_compiler_wl='-Wl,' + lt_prog_compiler_static='-static' + + case $host_os in + aix*) + # All AIX code is PIC. + if test "$host_cpu" = ia64; then + # AIX 5 now supports IA64 processor + lt_prog_compiler_static='-Bstatic' + fi + ;; + + amigaos*) + # FIXME: we need at least 68020 code to build shared libraries, but + # adding the `-m68020' flag to GCC prevents building anything better, + # like `-m68040'. + lt_prog_compiler_pic='-m68020 -resident32 -malways-restore-a4' + ;; + + beos* | cygwin* | irix5* | irix6* | nonstopux* | osf3* | osf4* | osf5*) + # PIC is the default for these OSes. + ;; + + mingw* | pw32* | os2*) + # This hack is so that the source file can tell whether it is being + # built for inclusion in a dll (and should export symbols for example). + lt_prog_compiler_pic='-DDLL_EXPORT' + ;; + + darwin* | rhapsody*) + # PIC is the default on this platform + # Common symbols not allowed in MH_DYLIB files + lt_prog_compiler_pic='-fno-common' + ;; + + msdosdjgpp*) + # Just because we use GCC doesn't mean we suddenly get shared libraries + # on systems that don't support them. + lt_prog_compiler_can_build_shared=no + enable_shared=no + ;; + + sysv4*MP*) + if test -d /usr/nec; then + lt_prog_compiler_pic=-Kconform_pic + fi + ;; + + hpux*) + # PIC is the default for IA64 HP-UX and 64-bit HP-UX, but + # not for PA HP-UX. + case "$host_cpu" in + hppa*64*|ia64*) + # +Z the default + ;; + *) + lt_prog_compiler_pic='-fPIC' + ;; + esac + ;; + + *) + lt_prog_compiler_pic='-fPIC' + ;; + esac + else + # PORTME Check for flag to pass linker flags through the system compiler. + case $host_os in + aix*) + lt_prog_compiler_wl='-Wl,' + if test "$host_cpu" = ia64; then + # AIX 5 now supports IA64 processor + lt_prog_compiler_static='-Bstatic' + else + lt_prog_compiler_static='-bnso -bI:/lib/syscalls.exp' + fi + ;; + darwin*) + # PIC is the default on this platform + # Common symbols not allowed in MH_DYLIB files + case $cc_basename in + xlc*) + lt_prog_compiler_pic='-qnocommon' + lt_prog_compiler_wl='-Wl,' + ;; + esac + ;; + + mingw* | pw32* | os2*) + # This hack is so that the source file can tell whether it is being + # built for inclusion in a dll (and should export symbols for example). + lt_prog_compiler_pic='-DDLL_EXPORT' + ;; + + hpux9* | hpux10* | hpux11*) + lt_prog_compiler_wl='-Wl,' + # PIC is the default for IA64 HP-UX and 64-bit HP-UX, but + # not for PA HP-UX. + case "$host_cpu" in + hppa*64*|ia64*) + # +Z the default + ;; + *) + lt_prog_compiler_pic='+Z' + ;; + esac + # Is there a better lt_prog_compiler_static that works with the bundled CC? + lt_prog_compiler_static='${wl}-a ${wl}archive' + ;; + + irix5* | irix6* | nonstopux*) + lt_prog_compiler_wl='-Wl,' + # PIC (with -KPIC) is the default. + lt_prog_compiler_static='-non_shared' + ;; + + newsos6) + lt_prog_compiler_pic='-KPIC' + lt_prog_compiler_static='-Bstatic' + ;; + + linux*) + case $cc_basename in + icc* | ecc*) + lt_prog_compiler_wl='-Wl,' + lt_prog_compiler_pic='-KPIC' + lt_prog_compiler_static='-static' + ;; + pgcc* | pgf77* | pgf90*) + # Portland Group compilers (*not* the Pentium gcc compiler, + # which looks to be a dead project) + lt_prog_compiler_wl='-Wl,' + lt_prog_compiler_pic='-fpic' + lt_prog_compiler_static='-static' + ;; + ccc*) + lt_prog_compiler_wl='-Wl,' + # All Alpha code is PIC. + lt_prog_compiler_static='-non_shared' + ;; + esac + ;; + + osf3* | osf4* | osf5*) + lt_prog_compiler_wl='-Wl,' + # All OSF/1 code is PIC. + lt_prog_compiler_static='-non_shared' + ;; + + sco3.2v5*) + lt_prog_compiler_pic='-Kpic' + lt_prog_compiler_static='-dn' + ;; + + solaris*) + lt_prog_compiler_pic='-KPIC' + lt_prog_compiler_static='-Bstatic' + case $cc_basename in + f77* | f90* | f95*) + lt_prog_compiler_wl='-Qoption ld ';; + *) + lt_prog_compiler_wl='-Wl,';; + esac + ;; + + sunos4*) + lt_prog_compiler_wl='-Qoption ld ' + lt_prog_compiler_pic='-PIC' + lt_prog_compiler_static='-Bstatic' + ;; + + sysv4 | sysv4.2uw2* | sysv4.3* | sysv5*) + lt_prog_compiler_wl='-Wl,' + lt_prog_compiler_pic='-KPIC' + lt_prog_compiler_static='-Bstatic' + ;; + + sysv4*MP*) + if test -d /usr/nec ;then + lt_prog_compiler_pic='-Kconform_pic' + lt_prog_compiler_static='-Bstatic' + fi + ;; + + unicos*) + lt_prog_compiler_wl='-Wl,' + lt_prog_compiler_can_build_shared=no + ;; + + uts4*) + lt_prog_compiler_pic='-pic' + lt_prog_compiler_static='-Bstatic' + ;; + + *) + lt_prog_compiler_can_build_shared=no + ;; + esac + fi + +echo "$as_me:$LINENO: result: $lt_prog_compiler_pic" >&5 +echo "${ECHO_T}$lt_prog_compiler_pic" >&6 + +# +# Check to make sure the PIC flag actually works. +# +if test -n "$lt_prog_compiler_pic"; then + +echo "$as_me:$LINENO: checking if $compiler PIC flag $lt_prog_compiler_pic works" >&5 +echo $ECHO_N "checking if $compiler PIC flag $lt_prog_compiler_pic works... $ECHO_C" >&6 +if test "${lt_prog_compiler_pic_works+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + lt_prog_compiler_pic_works=no + ac_outfile=conftest.$ac_objext + printf "$lt_simple_compile_test_code" > conftest.$ac_ext + lt_compiler_flag="$lt_prog_compiler_pic -DPIC" + # Insert the option either (1) after the last *FLAGS variable, or + # (2) before a word containing "conftest.", or (3) at the end. + # Note that $ac_compile itself does not contain backslashes and begins + # with a dollar sign (not a hyphen), so the echo should work correctly. + # The option is referenced via a variable to avoid confusing sed. + lt_compile=`echo "$ac_compile" | $SED \ + -e 's:.*FLAGS}? :&$lt_compiler_flag :; t' \ + -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ + -e 's:$: $lt_compiler_flag:'` + (eval echo "\"\$as_me:7328: $lt_compile\"" >&5) + (eval "$lt_compile" 2>conftest.err) + ac_status=$? + cat conftest.err >&5 + echo "$as_me:7332: \$? = $ac_status" >&5 + if (exit $ac_status) && test -s "$ac_outfile"; then + # The compiler can only warn and ignore the option if not recognized + # So say no if there are warnings other than the usual output. + $echo "X$_lt_compiler_boilerplate" | $Xsed >conftest.exp + $SED '/^$/d' conftest.err >conftest.er2 + if test ! -s conftest.err || diff conftest.exp conftest.er2 >/dev/null; then + lt_prog_compiler_pic_works=yes + fi + fi + $rm conftest* + +fi +echo "$as_me:$LINENO: result: $lt_prog_compiler_pic_works" >&5 +echo "${ECHO_T}$lt_prog_compiler_pic_works" >&6 + +if test x"$lt_prog_compiler_pic_works" = xyes; then + case $lt_prog_compiler_pic in + "" | " "*) ;; + *) lt_prog_compiler_pic=" $lt_prog_compiler_pic" ;; + esac +else + lt_prog_compiler_pic= + lt_prog_compiler_can_build_shared=no +fi + +fi +case "$host_os" in + # For platforms which do not support PIC, -DPIC is meaningless: + *djgpp*) + lt_prog_compiler_pic= + ;; + *) + lt_prog_compiler_pic="$lt_prog_compiler_pic -DPIC" + ;; +esac + +echo "$as_me:$LINENO: checking if $compiler supports -c -o file.$ac_objext" >&5 +echo $ECHO_N "checking if $compiler supports -c -o file.$ac_objext... $ECHO_C" >&6 +if test "${lt_cv_prog_compiler_c_o+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + lt_cv_prog_compiler_c_o=no + $rm -r conftest 2>/dev/null + mkdir conftest + cd conftest + mkdir out + printf "$lt_simple_compile_test_code" > conftest.$ac_ext + + lt_compiler_flag="-o out/conftest2.$ac_objext" + # Insert the option either (1) after the last *FLAGS variable, or + # (2) before a word containing "conftest.", or (3) at the end. + # Note that $ac_compile itself does not contain backslashes and begins + # with a dollar sign (not a hyphen), so the echo should work correctly. + lt_compile=`echo "$ac_compile" | $SED \ + -e 's:.*FLAGS}? :&$lt_compiler_flag :; t' \ + -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ + -e 's:$: $lt_compiler_flag:'` + (eval echo "\"\$as_me:7390: $lt_compile\"" >&5) + (eval "$lt_compile" 2>out/conftest.err) + ac_status=$? + cat out/conftest.err >&5 + echo "$as_me:7394: \$? = $ac_status" >&5 + if (exit $ac_status) && test -s out/conftest2.$ac_objext + then + # The compiler can only warn and ignore the option if not recognized + # So say no if there are warnings + $echo "X$_lt_compiler_boilerplate" | $Xsed > out/conftest.exp + $SED '/^$/d' out/conftest.err >out/conftest.er2 + if test ! -s out/conftest.err || diff out/conftest.exp out/conftest.er2 >/dev/null; then + lt_cv_prog_compiler_c_o=yes + fi + fi + chmod u+w . + $rm conftest* + # SGI C++ compiler will create directory out/ii_files/ for + # template instantiation + test -d out/ii_files && $rm out/ii_files/* && rmdir out/ii_files + $rm out/* && rmdir out + cd .. + rmdir conftest + $rm conftest* + +fi +echo "$as_me:$LINENO: result: $lt_cv_prog_compiler_c_o" >&5 +echo "${ECHO_T}$lt_cv_prog_compiler_c_o" >&6 + + +hard_links="nottested" +if test "$lt_cv_prog_compiler_c_o" = no && test "$need_locks" != no; then + # do not overwrite the value of need_locks provided by the user + echo "$as_me:$LINENO: checking if we can lock with hard links" >&5 +echo $ECHO_N "checking if we can lock with hard links... $ECHO_C" >&6 + hard_links=yes + $rm conftest* + ln conftest.a conftest.b 2>/dev/null && hard_links=no + touch conftest.a + ln conftest.a conftest.b 2>&5 || hard_links=no + ln conftest.a conftest.b 2>/dev/null && hard_links=no + echo "$as_me:$LINENO: result: $hard_links" >&5 +echo "${ECHO_T}$hard_links" >&6 + if test "$hard_links" = no; then + { echo "$as_me:$LINENO: WARNING: \`$CC' does not support \`-c -o', so \`make -j' may be unsafe" >&5 +echo "$as_me: WARNING: \`$CC' does not support \`-c -o', so \`make -j' may be unsafe" >&2;} + need_locks=warn + fi +else + need_locks=no +fi + +echo "$as_me:$LINENO: checking whether the $compiler linker ($LD) supports shared libraries" >&5 +echo $ECHO_N "checking whether the $compiler linker ($LD) supports shared libraries... $ECHO_C" >&6 + + runpath_var= + allow_undefined_flag= + enable_shared_with_static_runtimes=no + archive_cmds= + archive_expsym_cmds= + old_archive_From_new_cmds= + old_archive_from_expsyms_cmds= + export_dynamic_flag_spec= + whole_archive_flag_spec= + thread_safe_flag_spec= + hardcode_libdir_flag_spec= + hardcode_libdir_flag_spec_ld= + hardcode_libdir_separator= + hardcode_direct=no + hardcode_minus_L=no + hardcode_shlibpath_var=unsupported + link_all_deplibs=unknown + hardcode_automatic=no + module_cmds= + module_expsym_cmds= + always_export_symbols=no + export_symbols_cmds='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols' + # include_expsyms should be a list of space-separated symbols to be *always* + # included in the symbol list + include_expsyms= + # exclude_expsyms can be an extended regexp of symbols to exclude + # it will be wrapped by ` (' and `)$', so one must not match beginning or + # end of line. Example: `a|bc|.*d.*' will exclude the symbols `a' and `bc', + # as well as any symbol that contains `d'. + exclude_expsyms="_GLOBAL_OFFSET_TABLE_" + # Although _GLOBAL_OFFSET_TABLE_ is a valid symbol C name, most a.out + # platforms (ab)use it in PIC code, but their linkers get confused if + # the symbol is explicitly referenced. Since portable code cannot + # rely on this symbol name, it's probably fine to never include it in + # preloaded symbol tables. + extract_expsyms_cmds= + # Just being paranoid about ensuring that cc_basename is set. + for cc_temp in $compiler""; do + case $cc_temp in + compile | *[\\/]compile | ccache | *[\\/]ccache ) ;; + distcc | *[\\/]distcc | purify | *[\\/]purify ) ;; + \-*) ;; + *) break;; + esac +done +cc_basename=`$echo "X$cc_temp" | $Xsed -e 's%.*/%%' -e "s%^$host_alias-%%"` + + case $host_os in + cygwin* | mingw* | pw32*) + # FIXME: the MSVC++ port hasn't been tested in a loooong time + # When not using gcc, we currently assume that we are using + # Microsoft Visual C++. + if test "$GCC" != yes; then + with_gnu_ld=no + fi + ;; + openbsd*) + with_gnu_ld=no + ;; + esac + + ld_shlibs=yes + if test "$with_gnu_ld" = yes; then + # If archive_cmds runs LD, not CC, wlarc should be empty + wlarc='${wl}' + + # Set some defaults for GNU ld with shared library support. These + # are reset later if shared libraries are not supported. Putting them + # here allows them to be overridden if necessary. + runpath_var=LD_RUN_PATH + hardcode_libdir_flag_spec='${wl}--rpath ${wl}$libdir' + export_dynamic_flag_spec='${wl}--export-dynamic' + # ancient GNU ld didn't support --whole-archive et. al. + if $LD --help 2>&1 | grep 'no-whole-archive' > /dev/null; then + whole_archive_flag_spec="$wlarc"'--whole-archive$convenience '"$wlarc"'--no-whole-archive' + else + whole_archive_flag_spec= + fi + supports_anon_versioning=no + case `$LD -v 2>/dev/null` in + *\ [01].* | *\ 2.[0-9].* | *\ 2.10.*) ;; # catch versions < 2.11 + *\ 2.11.93.0.2\ *) supports_anon_versioning=yes ;; # RH7.3 ... + *\ 2.11.92.0.12\ *) supports_anon_versioning=yes ;; # Mandrake 8.2 ... + *\ 2.11.*) ;; # other 2.11 versions + *) supports_anon_versioning=yes ;; + esac + + # See if GNU ld supports shared libraries. + case $host_os in + aix3* | aix4* | aix5*) + # On AIX/PPC, the GNU linker is very broken + if test "$host_cpu" != ia64; then + ld_shlibs=no + cat <&2 + +*** Warning: the GNU linker, at least up to release 2.9.1, is reported +*** to be unable to reliably create shared libraries on AIX. +*** Therefore, libtool is disabling shared libraries support. If you +*** really care for shared libraries, you may want to modify your PATH +*** so that a non-GNU linker is found, and then restart. + +EOF + fi + ;; + + amigaos*) + archive_cmds='$rm $output_objdir/a2ixlibrary.data~$echo "#define NAME $libname" > $output_objdir/a2ixlibrary.data~$echo "#define LIBRARY_ID 1" >> $output_objdir/a2ixlibrary.data~$echo "#define VERSION $major" >> $output_objdir/a2ixlibrary.data~$echo "#define REVISION $revision" >> $output_objdir/a2ixlibrary.data~$AR $AR_FLAGS $lib $libobjs~$RANLIB $lib~(cd $output_objdir && a2ixlibrary -32)' + hardcode_libdir_flag_spec='-L$libdir' + hardcode_minus_L=yes + + # Samuel A. Falvo II reports + # that the semantics of dynamic libraries on AmigaOS, at least up + # to version 4, is to share data among multiple programs linked + # with the same dynamic library. Since this doesn't match the + # behavior of shared libraries on other platforms, we can't use + # them. + ld_shlibs=no + ;; + + beos*) + if $LD --help 2>&1 | grep ': supported targets:.* elf' > /dev/null; then + allow_undefined_flag=unsupported + # Joseph Beckenbach says some releases of gcc + # support --undefined. This deserves some investigation. FIXME + archive_cmds='$CC -nostart $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + else + ld_shlibs=no + fi + ;; + + cygwin* | mingw* | pw32*) + # _LT_AC_TAGVAR(hardcode_libdir_flag_spec, ) is actually meaningless, + # as there is no search path for DLLs. + hardcode_libdir_flag_spec='-L$libdir' + allow_undefined_flag=unsupported + always_export_symbols=no + enable_shared_with_static_runtimes=yes + export_symbols_cmds='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[BCDGRS] /s/.* \([^ ]*\)/\1 DATA/'\'' | $SED -e '\''/^[AITW] /s/.* //'\'' | sort | uniq > $export_symbols' + + if $LD --help 2>&1 | grep 'auto-import' > /dev/null; then + archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags -o $output_objdir/$soname ${wl}--image-base=0x10000000 ${wl}--out-implib,$lib' + # If the export-symbols file already is a .def file (1st line + # is EXPORTS), use it as is; otherwise, prepend... + archive_expsym_cmds='if test "x`$SED 1q $export_symbols`" = xEXPORTS; then + cp $export_symbols $output_objdir/$soname.def; + else + echo EXPORTS > $output_objdir/$soname.def; + cat $export_symbols >> $output_objdir/$soname.def; + fi~ + $CC -shared $output_objdir/$soname.def $libobjs $deplibs $compiler_flags -o $output_objdir/$soname ${wl}--image-base=0x10000000 ${wl}--out-implib,$lib' + else + ld_shlibs=no + fi + ;; + + linux*) + if $LD --help 2>&1 | grep ': supported targets:.* elf' > /dev/null; then + tmp_addflag= + case $cc_basename,$host_cpu in + pgcc*) # Portland Group C compiler + whole_archive_flag_spec= + ;; + pgf77* | pgf90* ) # Portland Group f77 and f90 compilers + whole_archive_flag_spec= + tmp_addflag=' -fpic -Mnomain' ;; + ecc*,ia64* | icc*,ia64*) # Intel C compiler on ia64 + tmp_addflag=' -i_dynamic' ;; + efc*,ia64* | ifort*,ia64*) # Intel Fortran compiler on ia64 + tmp_addflag=' -i_dynamic -nofor_main' ;; + ifc* | ifort*) # Intel Fortran compiler + tmp_addflag=' -nofor_main' ;; + esac + archive_cmds='$CC -shared'"$tmp_addflag"' $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + + if test $supports_anon_versioning = yes; then + archive_expsym_cmds='$echo "{ global:" > $output_objdir/$libname.ver~ + cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~ + $echo "local: *; };" >> $output_objdir/$libname.ver~ + $CC -shared'"$tmp_addflag"' $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-version-script ${wl}$output_objdir/$libname.ver -o $lib' + fi + else + ld_shlibs=no + fi + ;; + + netbsd*) + if echo __ELF__ | $CC -E - | grep __ELF__ >/dev/null; then + archive_cmds='$LD -Bshareable $libobjs $deplibs $linker_flags -o $lib' + wlarc= + else + archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' + fi + ;; + + solaris* | sysv5*) + if $LD -v 2>&1 | grep 'BFD 2\.8' > /dev/null; then + ld_shlibs=no + cat <&2 + +*** Warning: The releases 2.8.* of the GNU linker cannot reliably +*** create shared libraries on Solaris systems. Therefore, libtool +*** is disabling shared libraries support. We urge you to upgrade GNU +*** binutils to release 2.9.1 or newer. Another option is to modify +*** your PATH or compiler configuration so that the native linker is +*** used, and then restart. + +EOF + elif $LD --help 2>&1 | grep ': supported targets:.* elf' > /dev/null; then + archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' + else + ld_shlibs=no + fi + ;; + + sunos4*) + archive_cmds='$LD -assert pure-text -Bshareable -o $lib $libobjs $deplibs $linker_flags' + wlarc= + hardcode_direct=yes + hardcode_shlibpath_var=no + ;; + + *) + if $LD --help 2>&1 | grep ': supported targets:.* elf' > /dev/null; then + archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' + else + ld_shlibs=no + fi + ;; + esac + + if test "$ld_shlibs" = no; then + runpath_var= + hardcode_libdir_flag_spec= + export_dynamic_flag_spec= + whole_archive_flag_spec= + fi + else + # PORTME fill in a description of your system's linker (not GNU ld) + case $host_os in + aix3*) + allow_undefined_flag=unsupported + always_export_symbols=yes + archive_expsym_cmds='$LD -o $output_objdir/$soname $libobjs $deplibs $linker_flags -bE:$export_symbols -T512 -H512 -bM:SRE~$AR $AR_FLAGS $lib $output_objdir/$soname' + # Note: this linker hardcodes the directories in LIBPATH if there + # are no directories specified by -L. + hardcode_minus_L=yes + if test "$GCC" = yes && test -z "$link_static_flag"; then + # Neither direct hardcoding nor static linking is supported with a + # broken collect2. + hardcode_direct=unsupported + fi + ;; + + aix4* | aix5*) + if test "$host_cpu" = ia64; then + # On IA64, the linker does run time linking by default, so we don't + # have to do anything special. + aix_use_runtimelinking=no + exp_sym_flag='-Bexport' + no_entry_flag="" + else + # If we're using GNU nm, then we don't want the "-C" option. + # -C means demangle to AIX nm, but means don't demangle with GNU nm + if $NM -V 2>&1 | grep 'GNU' > /dev/null; then + export_symbols_cmds='$NM -Bpg $libobjs $convenience | awk '\''{ if (((\$2 == "T") || (\$2 == "D") || (\$2 == "B")) && (substr(\$3,1,1) != ".")) { print \$3 } }'\'' | sort -u > $export_symbols' + else + export_symbols_cmds='$NM -BCpg $libobjs $convenience | awk '\''{ if (((\$2 == "T") || (\$2 == "D") || (\$2 == "B")) && (substr(\$3,1,1) != ".")) { print \$3 } }'\'' | sort -u > $export_symbols' + fi + aix_use_runtimelinking=no + + # Test if we are trying to use run time linking or normal + # AIX style linking. If -brtl is somewhere in LDFLAGS, we + # need to do runtime linking. + case $host_os in aix4.[23]|aix4.[23].*|aix5*) + for ld_flag in $LDFLAGS; do + if (test $ld_flag = "-brtl" || test $ld_flag = "-Wl,-brtl"); then + aix_use_runtimelinking=yes + break + fi + done + esac + + exp_sym_flag='-bexport' + no_entry_flag='-bnoentry' + fi + + # When large executables or shared objects are built, AIX ld can + # have problems creating the table of contents. If linking a library + # or program results in "error TOC overflow" add -mminimal-toc to + # CXXFLAGS/CFLAGS for g++/gcc. In the cases where that is not + # enough to fix the problem, add -Wl,-bbigtoc to LDFLAGS. + + archive_cmds='' + hardcode_direct=yes + hardcode_libdir_separator=':' + link_all_deplibs=yes + + if test "$GCC" = yes; then + case $host_os in aix4.[012]|aix4.[012].*) + # We only want to do this on AIX 4.2 and lower, the check + # below for broken collect2 doesn't work under 4.3+ + collect2name=`${CC} -print-prog-name=collect2` + if test -f "$collect2name" && \ + strings "$collect2name" | grep resolve_lib_name >/dev/null + then + # We have reworked collect2 + hardcode_direct=yes + else + # We have old collect2 + hardcode_direct=unsupported + # It fails to find uninstalled libraries when the uninstalled + # path is not listed in the libpath. Setting hardcode_minus_L + # to unsupported forces relinking + hardcode_minus_L=yes + hardcode_libdir_flag_spec='-L$libdir' + hardcode_libdir_separator= + fi + esac + shared_flag='-shared' + if test "$aix_use_runtimelinking" = yes; then + shared_flag="$shared_flag "'${wl}-G' + fi + else + # not using gcc + if test "$host_cpu" = ia64; then + # VisualAge C++, Version 5.5 for AIX 5L for IA-64, Beta 3 Release + # chokes on -Wl,-G. The following line is correct: + shared_flag='-G' + else + if test "$aix_use_runtimelinking" = yes; then + shared_flag='${wl}-G' + else + shared_flag='${wl}-bM:SRE' + fi + fi + fi + + # It seems that -bexpall does not export symbols beginning with + # underscore (_), so it is better to generate a list of symbols to export. + always_export_symbols=yes + if test "$aix_use_runtimelinking" = yes; then + # Warning - without using the other runtime loading flags (-brtl), + # -berok will link without error, but may produce a broken library. + allow_undefined_flag='-berok' + # Determine the default libpath from the value encoded in an empty executable. + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext conftest$ac_exeext +if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 + (eval $ac_link) 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest$ac_exeext' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; }; then + +aix_libpath=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e '/Import File Strings/,/^$/ { /^0/ { s/^0 *\(.*\)$/\1/; p; } +}'` +# Check for a 64-bit object if we didn't find anything. +if test -z "$aix_libpath"; then aix_libpath=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e '/Import File Strings/,/^$/ { /^0/ { s/^0 *\(.*\)$/\1/; p; } +}'`; fi +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +fi +rm -f conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi + + hardcode_libdir_flag_spec='${wl}-blibpath:$libdir:'"$aix_libpath" + archive_expsym_cmds="\$CC"' -o $output_objdir/$soname $libobjs $deplibs $compiler_flags `if test "x${allow_undefined_flag}" != "x"; then echo "${wl}${allow_undefined_flag}"; else :; fi` '"\${wl}$no_entry_flag \${wl}$exp_sym_flag:\$export_symbols $shared_flag" + else + if test "$host_cpu" = ia64; then + hardcode_libdir_flag_spec='${wl}-R $libdir:/usr/lib:/lib' + allow_undefined_flag="-z nodefs" + archive_expsym_cmds="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs $compiler_flags ${wl}${allow_undefined_flag} '"\${wl}$no_entry_flag \${wl}$exp_sym_flag:\$export_symbols" + else + # Determine the default libpath from the value encoded in an empty executable. + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext conftest$ac_exeext +if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 + (eval $ac_link) 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest$ac_exeext' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; }; then + +aix_libpath=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e '/Import File Strings/,/^$/ { /^0/ { s/^0 *\(.*\)$/\1/; p; } +}'` +# Check for a 64-bit object if we didn't find anything. +if test -z "$aix_libpath"; then aix_libpath=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e '/Import File Strings/,/^$/ { /^0/ { s/^0 *\(.*\)$/\1/; p; } +}'`; fi +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +fi +rm -f conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi + + hardcode_libdir_flag_spec='${wl}-blibpath:$libdir:'"$aix_libpath" + # Warning - without using the other run time loading flags, + # -berok will link without error, but may produce a broken library. + no_undefined_flag=' ${wl}-bernotok' + allow_undefined_flag=' ${wl}-berok' + # -bexpall does not export symbols beginning with underscore (_) + always_export_symbols=yes + # Exported symbols can be pulled into shared objects from archives + whole_archive_flag_spec=' ' + archive_cmds_need_lc=yes + # This is similar to how AIX traditionally builds it's shared libraries. + archive_expsym_cmds="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs $compiler_flags ${wl}-bE:$export_symbols ${wl}-bnoentry${allow_undefined_flag}~$AR $AR_FLAGS $output_objdir/$libname$release.a $output_objdir/$soname' + fi + fi + ;; + + amigaos*) + archive_cmds='$rm $output_objdir/a2ixlibrary.data~$echo "#define NAME $libname" > $output_objdir/a2ixlibrary.data~$echo "#define LIBRARY_ID 1" >> $output_objdir/a2ixlibrary.data~$echo "#define VERSION $major" >> $output_objdir/a2ixlibrary.data~$echo "#define REVISION $revision" >> $output_objdir/a2ixlibrary.data~$AR $AR_FLAGS $lib $libobjs~$RANLIB $lib~(cd $output_objdir && a2ixlibrary -32)' + hardcode_libdir_flag_spec='-L$libdir' + hardcode_minus_L=yes + # see comment about different semantics on the GNU ld section + ld_shlibs=no + ;; + + bsdi[45]*) + export_dynamic_flag_spec=-rdynamic + ;; + + cygwin* | mingw* | pw32*) + # When not using gcc, we currently assume that we are using + # Microsoft Visual C++. + # hardcode_libdir_flag_spec is actually meaningless, as there is + # no search path for DLLs. + hardcode_libdir_flag_spec=' ' + allow_undefined_flag=unsupported + # Tell ltmain to make .lib files, not .a files. + libext=lib + # Tell ltmain to make .dll files, not .so files. + shrext_cmds=".dll" + # FIXME: Setting linknames here is a bad hack. + archive_cmds='$CC -o $lib $libobjs $compiler_flags `echo "$deplibs" | $SED -e '\''s/ -lc$//'\''` -link -dll~linknames=' + # The linker will automatically build a .lib file if we build a DLL. + old_archive_From_new_cmds='true' + # FIXME: Should let the user specify the lib program. + old_archive_cmds='lib /OUT:$oldlib$oldobjs$old_deplibs' + fix_srcfile_path='`cygpath -w "$srcfile"`' + enable_shared_with_static_runtimes=yes + ;; + + darwin* | rhapsody*) + case "$host_os" in + rhapsody* | darwin1.[012]) + allow_undefined_flag='${wl}-undefined ${wl}suppress' + ;; + *) # Darwin 1.3 on + if test -z ${MACOSX_DEPLOYMENT_TARGET} ; then + allow_undefined_flag='${wl}-flat_namespace ${wl}-undefined ${wl}suppress' + else + case ${MACOSX_DEPLOYMENT_TARGET} in + 10.[012]) + allow_undefined_flag='${wl}-flat_namespace ${wl}-undefined ${wl}suppress' + ;; + 10.*) + allow_undefined_flag='${wl}-undefined ${wl}dynamic_lookup' + ;; + esac + fi + ;; + esac + archive_cmds_need_lc=no + hardcode_direct=no + hardcode_automatic=yes + hardcode_shlibpath_var=unsupported + whole_archive_flag_spec='' + link_all_deplibs=yes + if test "$GCC" = yes ; then + output_verbose_link_cmd='echo' + archive_cmds='$CC -dynamiclib $allow_undefined_flag -o $lib $libobjs $deplibs $compiler_flags -install_name $rpath/$soname $verstring' + module_cmds='$CC $allow_undefined_flag -o $lib -bundle $libobjs $deplibs$compiler_flags' + # Don't fix this by using the ld -exported_symbols_list flag, it doesn't exist in older darwin ld's + archive_expsym_cmds='sed -e "s,#.*,," -e "s,^[ ]*,," -e "s,^\(..*\),_&," < $export_symbols > $output_objdir/${libname}-symbols.expsym~$CC -dynamiclib $allow_undefined_flag -o $lib $libobjs $deplibs $compiler_flags -install_name $rpath/$soname $verstring~nmedit -s $output_objdir/${libname}-symbols.expsym ${lib}' + module_expsym_cmds='sed -e "s,#.*,," -e "s,^[ ]*,," -e "s,^\(..*\),_&," < $export_symbols > $output_objdir/${libname}-symbols.expsym~$CC $allow_undefined_flag -o $lib -bundle $libobjs $deplibs$compiler_flags~nmedit -s $output_objdir/${libname}-symbols.expsym ${lib}' + else + case $cc_basename in + xlc*) + output_verbose_link_cmd='echo' + archive_cmds='$CC -qmkshrobj $allow_undefined_flag -o $lib $libobjs $deplibs $compiler_flags ${wl}-install_name ${wl}`echo $rpath/$soname` $verstring' + module_cmds='$CC $allow_undefined_flag -o $lib -bundle $libobjs $deplibs$compiler_flags' + # Don't fix this by using the ld -exported_symbols_list flag, it doesn't exist in older darwin ld's + archive_expsym_cmds='sed -e "s,#.*,," -e "s,^[ ]*,," -e "s,^\(..*\),_&," < $export_symbols > $output_objdir/${libname}-symbols.expsym~$CC -qmkshrobj $allow_undefined_flag -o $lib $libobjs $deplibs $compiler_flags ${wl}-install_name ${wl}$rpath/$soname $verstring~nmedit -s $output_objdir/${libname}-symbols.expsym ${lib}' + module_expsym_cmds='sed -e "s,#.*,," -e "s,^[ ]*,," -e "s,^\(..*\),_&," < $export_symbols > $output_objdir/${libname}-symbols.expsym~$CC $allow_undefined_flag -o $lib -bundle $libobjs $deplibs$compiler_flags~nmedit -s $output_objdir/${libname}-symbols.expsym ${lib}' + ;; + *) + ld_shlibs=no + ;; + esac + fi + ;; + + dgux*) + archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + hardcode_libdir_flag_spec='-L$libdir' + hardcode_shlibpath_var=no + ;; + + freebsd1*) + ld_shlibs=no + ;; + + # FreeBSD 2.2.[012] allows us to include c++rt0.o to get C++ constructor + # support. Future versions do this automatically, but an explicit c++rt0.o + # does not break anything, and helps significantly (at the cost of a little + # extra space). + freebsd2.2*) + archive_cmds='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags /usr/lib/c++rt0.o' + hardcode_libdir_flag_spec='-R$libdir' + hardcode_direct=yes + hardcode_shlibpath_var=no + ;; + + # Unfortunately, older versions of FreeBSD 2 do not have this feature. + freebsd2*) + archive_cmds='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags' + hardcode_direct=yes + hardcode_minus_L=yes + hardcode_shlibpath_var=no + ;; + + # FreeBSD 3 and greater uses gcc -shared to do shared libraries. + freebsd* | kfreebsd*-gnu | dragonfly*) + archive_cmds='$CC -shared -o $lib $libobjs $deplibs $compiler_flags' + hardcode_libdir_flag_spec='-R$libdir' + hardcode_direct=yes + hardcode_shlibpath_var=no + ;; + + hpux9*) + if test "$GCC" = yes; then + archive_cmds='$rm $output_objdir/$soname~$CC -shared -fPIC ${wl}+b ${wl}$install_libdir -o $output_objdir/$soname $libobjs $deplibs $compiler_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' + else + archive_cmds='$rm $output_objdir/$soname~$LD -b +b $install_libdir -o $output_objdir/$soname $libobjs $deplibs $linker_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' + fi + hardcode_libdir_flag_spec='${wl}+b ${wl}$libdir' + hardcode_libdir_separator=: + hardcode_direct=yes + + # hardcode_minus_L: Not really in the search PATH, + # but as the default location of the library. + hardcode_minus_L=yes + export_dynamic_flag_spec='${wl}-E' + ;; + + hpux10* | hpux11*) + if test "$GCC" = yes -a "$with_gnu_ld" = no; then + case "$host_cpu" in + hppa*64*|ia64*) + archive_cmds='$CC -shared ${wl}+h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags' + ;; + *) + archive_cmds='$CC -shared -fPIC ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags' + ;; + esac + else + case "$host_cpu" in + hppa*64*|ia64*) + archive_cmds='$LD -b +h $soname -o $lib $libobjs $deplibs $linker_flags' + ;; + *) + archive_cmds='$LD -b +h $soname +b $install_libdir -o $lib $libobjs $deplibs $linker_flags' + ;; + esac + fi + if test "$with_gnu_ld" = no; then + case "$host_cpu" in + hppa*64*) + hardcode_libdir_flag_spec='${wl}+b ${wl}$libdir' + hardcode_libdir_flag_spec_ld='+b $libdir' + hardcode_libdir_separator=: + hardcode_direct=no + hardcode_shlibpath_var=no + ;; + ia64*) + hardcode_libdir_flag_spec='-L$libdir' + hardcode_direct=no + hardcode_shlibpath_var=no + + # hardcode_minus_L: Not really in the search PATH, + # but as the default location of the library. + hardcode_minus_L=yes + ;; + *) + hardcode_libdir_flag_spec='${wl}+b ${wl}$libdir' + hardcode_libdir_separator=: + hardcode_direct=yes + export_dynamic_flag_spec='${wl}-E' + + # hardcode_minus_L: Not really in the search PATH, + # but as the default location of the library. + hardcode_minus_L=yes + ;; + esac + fi + ;; + + irix5* | irix6* | nonstopux*) + if test "$GCC" = yes; then + archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && echo ${wl}-set_version ${wl}$verstring` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' + else + archive_cmds='$LD -shared $libobjs $deplibs $linker_flags -soname $soname `test -n "$verstring" && echo -set_version $verstring` -update_registry ${output_objdir}/so_locations -o $lib' + hardcode_libdir_flag_spec_ld='-rpath $libdir' + fi + hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir' + hardcode_libdir_separator=: + link_all_deplibs=yes + ;; + + netbsd*) + if echo __ELF__ | $CC -E - | grep __ELF__ >/dev/null; then + archive_cmds='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags' # a.out + else + archive_cmds='$LD -shared -o $lib $libobjs $deplibs $linker_flags' # ELF + fi + hardcode_libdir_flag_spec='-R$libdir' + hardcode_direct=yes + hardcode_shlibpath_var=no + ;; + + newsos6) + archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + hardcode_direct=yes + hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir' + hardcode_libdir_separator=: + hardcode_shlibpath_var=no + ;; + + openbsd*) + hardcode_direct=yes + hardcode_shlibpath_var=no + if test -z "`echo __ELF__ | $CC -E - | grep __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then + archive_cmds='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags' + archive_expsym_cmds='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags ${wl}-retain-symbols-file,$export_symbols' + hardcode_libdir_flag_spec='${wl}-rpath,$libdir' + export_dynamic_flag_spec='${wl}-E' + else + case $host_os in + openbsd[01].* | openbsd2.[0-7] | openbsd2.[0-7].*) + archive_cmds='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags' + hardcode_libdir_flag_spec='-R$libdir' + ;; + *) + archive_cmds='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags' + hardcode_libdir_flag_spec='${wl}-rpath,$libdir' + ;; + esac + fi + ;; + + os2*) + hardcode_libdir_flag_spec='-L$libdir' + hardcode_minus_L=yes + allow_undefined_flag=unsupported + archive_cmds='$echo "LIBRARY $libname INITINSTANCE" > $output_objdir/$libname.def~$echo "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~$echo DATA >> $output_objdir/$libname.def~$echo " SINGLE NONSHARED" >> $output_objdir/$libname.def~$echo EXPORTS >> $output_objdir/$libname.def~emxexp $libobjs >> $output_objdir/$libname.def~$CC -Zdll -Zcrtdll -o $lib $libobjs $deplibs $compiler_flags $output_objdir/$libname.def' + old_archive_From_new_cmds='emximp -o $output_objdir/$libname.a $output_objdir/$libname.def' + ;; + + osf3*) + if test "$GCC" = yes; then + allow_undefined_flag=' ${wl}-expect_unresolved ${wl}\*' + archive_cmds='$CC -shared${allow_undefined_flag} $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && echo ${wl}-set_version ${wl}$verstring` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' + else + allow_undefined_flag=' -expect_unresolved \*' + archive_cmds='$LD -shared${allow_undefined_flag} $libobjs $deplibs $linker_flags -soname $soname `test -n "$verstring" && echo -set_version $verstring` -update_registry ${output_objdir}/so_locations -o $lib' + fi + hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir' + hardcode_libdir_separator=: + ;; + + osf4* | osf5*) # as osf3* with the addition of -msym flag + if test "$GCC" = yes; then + allow_undefined_flag=' ${wl}-expect_unresolved ${wl}\*' + archive_cmds='$CC -shared${allow_undefined_flag} $libobjs $deplibs $compiler_flags ${wl}-msym ${wl}-soname ${wl}$soname `test -n "$verstring" && echo ${wl}-set_version ${wl}$verstring` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' + hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir' + else + allow_undefined_flag=' -expect_unresolved \*' + archive_cmds='$LD -shared${allow_undefined_flag} $libobjs $deplibs $linker_flags -msym -soname $soname `test -n "$verstring" && echo -set_version $verstring` -update_registry ${output_objdir}/so_locations -o $lib' + archive_expsym_cmds='for i in `cat $export_symbols`; do printf "%s %s\\n" -exported_symbol "\$i" >> $lib.exp; done; echo "-hidden">> $lib.exp~ + $LD -shared${allow_undefined_flag} -input $lib.exp $linker_flags $libobjs $deplibs -soname $soname `test -n "$verstring" && echo -set_version $verstring` -update_registry ${output_objdir}/so_locations -o $lib~$rm $lib.exp' + + # Both c and cxx compiler support -rpath directly + hardcode_libdir_flag_spec='-rpath $libdir' + fi + hardcode_libdir_separator=: + ;; + + sco3.2v5*) + archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + hardcode_shlibpath_var=no + export_dynamic_flag_spec='${wl}-Bexport' + runpath_var=LD_RUN_PATH + hardcode_runpath_var=yes + ;; + + solaris*) + no_undefined_flag=' -z text' + if test "$GCC" = yes; then + wlarc='${wl}' + archive_cmds='$CC -shared ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags' + archive_expsym_cmds='$echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~$echo "local: *; };" >> $lib.exp~ + $CC -shared ${wl}-M ${wl}$lib.exp ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags~$rm $lib.exp' + else + wlarc='' + archive_cmds='$LD -G${allow_undefined_flag} -h $soname -o $lib $libobjs $deplibs $linker_flags' + archive_expsym_cmds='$echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~$echo "local: *; };" >> $lib.exp~ + $LD -G${allow_undefined_flag} -M $lib.exp -h $soname -o $lib $libobjs $deplibs $linker_flags~$rm $lib.exp' + fi + hardcode_libdir_flag_spec='-R$libdir' + hardcode_shlibpath_var=no + case $host_os in + solaris2.[0-5] | solaris2.[0-5].*) ;; + *) + # The compiler driver will combine linker options so we + # cannot just pass the convience library names through + # without $wl, iff we do not link with $LD. + # Luckily, gcc supports the same syntax we need for Sun Studio. + # Supported since Solaris 2.6 (maybe 2.5.1?) + case $wlarc in + '') + whole_archive_flag_spec='-z allextract$convenience -z defaultextract' ;; + *) + whole_archive_flag_spec='${wl}-z ${wl}allextract`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; $echo \"$new_convenience\"` ${wl}-z ${wl}defaultextract' ;; + esac ;; + esac + link_all_deplibs=yes + ;; + + sunos4*) + if test "x$host_vendor" = xsequent; then + # Use $CC to link under sequent, because it throws in some extra .o + # files that make .init and .fini sections work. + archive_cmds='$CC -G ${wl}-h $soname -o $lib $libobjs $deplibs $compiler_flags' + else + archive_cmds='$LD -assert pure-text -Bstatic -o $lib $libobjs $deplibs $linker_flags' + fi + hardcode_libdir_flag_spec='-L$libdir' + hardcode_direct=yes + hardcode_minus_L=yes + hardcode_shlibpath_var=no + ;; + + sysv4) + case $host_vendor in + sni) + archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + hardcode_direct=yes # is this really true??? + ;; + siemens) + ## LD is ld it makes a PLAMLIB + ## CC just makes a GrossModule. + archive_cmds='$LD -G -o $lib $libobjs $deplibs $linker_flags' + reload_cmds='$CC -r -o $output$reload_objs' + hardcode_direct=no + ;; + motorola) + archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + hardcode_direct=no #Motorola manual says yes, but my tests say they lie + ;; + esac + runpath_var='LD_RUN_PATH' + hardcode_shlibpath_var=no + ;; + + sysv4.3*) + archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + hardcode_shlibpath_var=no + export_dynamic_flag_spec='-Bexport' + ;; + + sysv4*MP*) + if test -d /usr/nec; then + archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + hardcode_shlibpath_var=no + runpath_var=LD_RUN_PATH + hardcode_runpath_var=yes + ld_shlibs=yes + fi + ;; + + sysv4.2uw2*) + archive_cmds='$LD -G -o $lib $libobjs $deplibs $linker_flags' + hardcode_direct=yes + hardcode_minus_L=no + hardcode_shlibpath_var=no + hardcode_runpath_var=yes + runpath_var=LD_RUN_PATH + ;; + + sysv5OpenUNIX8* | sysv5UnixWare7* | sysv5uw[78]* | unixware7*) + no_undefined_flag='${wl}-z ${wl}text' + if test "$GCC" = yes; then + archive_cmds='$CC -shared ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags' + else + archive_cmds='$CC -G ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags' + fi + runpath_var='LD_RUN_PATH' + hardcode_shlibpath_var=no + ;; + + sysv5*) + no_undefined_flag=' -z text' + # $CC -shared without GNU ld will not create a library from C++ + # object files and a static libstdc++, better avoid it by now + archive_cmds='$LD -G${allow_undefined_flag} -h $soname -o $lib $libobjs $deplibs $linker_flags' + archive_expsym_cmds='$echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~$echo "local: *; };" >> $lib.exp~ + $LD -G${allow_undefined_flag} -M $lib.exp -h $soname -o $lib $libobjs $deplibs $linker_flags~$rm $lib.exp' + hardcode_libdir_flag_spec= + hardcode_shlibpath_var=no + runpath_var='LD_RUN_PATH' + ;; + + uts4*) + archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + hardcode_libdir_flag_spec='-L$libdir' + hardcode_shlibpath_var=no + ;; + + *) + ld_shlibs=no + ;; + esac + fi + +echo "$as_me:$LINENO: result: $ld_shlibs" >&5 +echo "${ECHO_T}$ld_shlibs" >&6 +test "$ld_shlibs" = no && can_build_shared=no + +variables_saved_for_relink="PATH $shlibpath_var $runpath_var" +if test "$GCC" = yes; then + variables_saved_for_relink="$variables_saved_for_relink GCC_EXEC_PREFIX COMPILER_PATH LIBRARY_PATH" +fi + +# +# Do we need to explicitly link libc? +# +case "x$archive_cmds_need_lc" in +x|xyes) + # Assume -lc should be added + archive_cmds_need_lc=yes + + if test "$enable_shared" = yes && test "$GCC" = yes; then + case $archive_cmds in + *'~'*) + # FIXME: we may have to deal with multi-command sequences. + ;; + '$CC '*) + # Test whether the compiler implicitly links with -lc since on some + # systems, -lgcc has to come before -lc. If gcc already passes -lc + # to ld, don't add -lc before -lgcc. + echo "$as_me:$LINENO: checking whether -lc should be explicitly linked in" >&5 +echo $ECHO_N "checking whether -lc should be explicitly linked in... $ECHO_C" >&6 + $rm conftest* + printf "$lt_simple_compile_test_code" > conftest.$ac_ext + + if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 + (eval $ac_compile) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } 2>conftest.err; then + soname=conftest + lib=conftest + libobjs=conftest.$ac_objext + deplibs= + wl=$lt_prog_compiler_wl + compiler_flags=-v + linker_flags=-v + verstring= + output_objdir=. + libname=conftest + lt_save_allow_undefined_flag=$allow_undefined_flag + allow_undefined_flag= + if { (eval echo "$as_me:$LINENO: \"$archive_cmds 2\>\&1 \| grep \" -lc \" \>/dev/null 2\>\&1\"") >&5 + (eval $archive_cmds 2\>\&1 \| grep \" -lc \" \>/dev/null 2\>\&1) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } + then + archive_cmds_need_lc=no + else + archive_cmds_need_lc=yes + fi + allow_undefined_flag=$lt_save_allow_undefined_flag + else + cat conftest.err 1>&5 + fi + $rm conftest* + echo "$as_me:$LINENO: result: $archive_cmds_need_lc" >&5 +echo "${ECHO_T}$archive_cmds_need_lc" >&6 + ;; + esac + fi + ;; +esac + +echo "$as_me:$LINENO: checking dynamic linker characteristics" >&5 +echo $ECHO_N "checking dynamic linker characteristics... $ECHO_C" >&6 +library_names_spec= +libname_spec='lib$name' +soname_spec= +shrext_cmds=".so" +postinstall_cmds= +postuninstall_cmds= +finish_cmds= +finish_eval= +shlibpath_var= +shlibpath_overrides_runpath=unknown +version_type=none +dynamic_linker="$host_os ld.so" +sys_lib_dlsearch_path_spec="/lib /usr/lib" +if test "$GCC" = yes; then + sys_lib_search_path_spec=`$CC -print-search-dirs | grep "^libraries:" | $SED -e "s/^libraries://" -e "s,=/,/,g"` + if echo "$sys_lib_search_path_spec" | grep ';' >/dev/null ; then + # if the path contains ";" then we assume it to be the separator + # otherwise default to the standard path separator (i.e. ":") - it is + # assumed that no part of a normal pathname contains ";" but that should + # okay in the real world where ";" in dirpaths is itself problematic. + sys_lib_search_path_spec=`echo "$sys_lib_search_path_spec" | $SED -e 's/;/ /g'` + else + sys_lib_search_path_spec=`echo "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"` + fi +else + sys_lib_search_path_spec="/lib /usr/lib /usr/local/lib" +fi +need_lib_prefix=unknown +hardcode_into_libs=no + +# when you set need_version to no, make sure it does not cause -set_version +# flags to be left without arguments +need_version=unknown + +case $host_os in +aix3*) + version_type=linux + library_names_spec='${libname}${release}${shared_ext}$versuffix $libname.a' + shlibpath_var=LIBPATH + + # AIX 3 has no versioning support, so we append a major version to the name. + soname_spec='${libname}${release}${shared_ext}$major' + ;; + +aix4* | aix5*) + version_type=linux + need_lib_prefix=no + need_version=no + hardcode_into_libs=yes + if test "$host_cpu" = ia64; then + # AIX 5 supports IA64 + library_names_spec='${libname}${release}${shared_ext}$major ${libname}${release}${shared_ext}$versuffix $libname${shared_ext}' + shlibpath_var=LD_LIBRARY_PATH + else + # With GCC up to 2.95.x, collect2 would create an import file + # for dependence libraries. The import file would start with + # the line `#! .'. This would cause the generated library to + # depend on `.', always an invalid library. This was fixed in + # development snapshots of GCC prior to 3.0. + case $host_os in + aix4 | aix4.[01] | aix4.[01].*) + if { echo '#if __GNUC__ > 2 || (__GNUC__ == 2 && __GNUC_MINOR__ >= 97)' + echo ' yes ' + echo '#endif'; } | ${CC} -E - | grep yes > /dev/null; then + : + else + can_build_shared=no + fi + ;; + esac + # AIX (on Power*) has no versioning support, so currently we can not hardcode correct + # soname into executable. Probably we can add versioning support to + # collect2, so additional links can be useful in future. + if test "$aix_use_runtimelinking" = yes; then + # If using run time linking (on AIX 4.2 or later) use lib.so + # instead of lib.a to let people know that these are not + # typical AIX shared libraries. + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + else + # We preserve .a as extension for shared libraries through AIX4.2 + # and later when we are not doing run time linking. + library_names_spec='${libname}${release}.a $libname.a' + soname_spec='${libname}${release}${shared_ext}$major' + fi + shlibpath_var=LIBPATH + fi + ;; + +amigaos*) + library_names_spec='$libname.ixlibrary $libname.a' + # Create ${libname}_ixlibrary.a entries in /sys/libs. + finish_eval='for lib in `ls $libdir/*.ixlibrary 2>/dev/null`; do libname=`$echo "X$lib" | $Xsed -e '\''s%^.*/\([^/]*\)\.ixlibrary$%\1%'\''`; test $rm /sys/libs/${libname}_ixlibrary.a; $show "cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a"; cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a || exit 1; done' + ;; + +beos*) + library_names_spec='${libname}${shared_ext}' + dynamic_linker="$host_os ld.so" + shlibpath_var=LIBRARY_PATH + ;; + +bsdi[45]*) + version_type=linux + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + finish_cmds='PATH="\$PATH:/sbin" ldconfig $libdir' + shlibpath_var=LD_LIBRARY_PATH + sys_lib_search_path_spec="/shlib /usr/lib /usr/X11/lib /usr/contrib/lib /lib /usr/local/lib" + sys_lib_dlsearch_path_spec="/shlib /usr/lib /usr/local/lib" + # the default ld.so.conf also contains /usr/contrib/lib and + # /usr/X11R6/lib (/usr/X11 is a link to /usr/X11R6), but let us allow + # libtool to hard-code these into programs + ;; + +cygwin* | mingw* | pw32*) + version_type=windows + shrext_cmds=".dll" + need_version=no + need_lib_prefix=no + + case $GCC,$host_os in + yes,cygwin* | yes,mingw* | yes,pw32*) + library_names_spec='$libname.dll.a' + # DLL is installed to $(libdir)/../bin by postinstall_cmds + postinstall_cmds='base_file=`basename \${file}`~ + dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\${base_file}'\''i;echo \$dlname'\''`~ + dldir=$destdir/`dirname \$dlpath`~ + test -d \$dldir || mkdir -p \$dldir~ + $install_prog $dir/$dlname \$dldir/$dlname' + postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; echo \$dlname'\''`~ + dlpath=$dir/\$dldll~ + $rm \$dlpath' + shlibpath_overrides_runpath=yes + + case $host_os in + cygwin*) + # Cygwin DLLs use 'cyg' prefix rather than 'lib' + soname_spec='`echo ${libname} | sed -e 's/^lib/cyg/'``echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext}' + sys_lib_search_path_spec="/usr/lib /lib/w32api /lib /usr/local/lib" + ;; + mingw*) + # MinGW DLLs use traditional 'lib' prefix + soname_spec='${libname}`echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext}' + sys_lib_search_path_spec=`$CC -print-search-dirs | grep "^libraries:" | $SED -e "s/^libraries://" -e "s,=/,/,g"` + if echo "$sys_lib_search_path_spec" | grep ';[c-zC-Z]:/' >/dev/null; then + # It is most probably a Windows format PATH printed by + # mingw gcc, but we are running on Cygwin. Gcc prints its search + # path with ; separators, and with drive letters. We can handle the + # drive letters (cygwin fileutils understands them), so leave them, + # especially as we might pass files found there to a mingw objdump, + # which wouldn't understand a cygwinified path. Ahh. + sys_lib_search_path_spec=`echo "$sys_lib_search_path_spec" | $SED -e 's/;/ /g'` + else + sys_lib_search_path_spec=`echo "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"` + fi + ;; + pw32*) + # pw32 DLLs use 'pw' prefix rather than 'lib' + library_names_spec='`echo ${libname} | sed -e 's/^lib/pw/'``echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext}' + ;; + esac + ;; + + *) + library_names_spec='${libname}`echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext} $libname.lib' + ;; + esac + dynamic_linker='Win32 ld.exe' + # FIXME: first we should search . and the directory the executable is in + shlibpath_var=PATH + ;; + +darwin* | rhapsody*) + dynamic_linker="$host_os dyld" + version_type=darwin + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${versuffix}$shared_ext ${libname}${release}${major}$shared_ext ${libname}$shared_ext' + soname_spec='${libname}${release}${major}$shared_ext' + shlibpath_overrides_runpath=yes + shlibpath_var=DYLD_LIBRARY_PATH + shrext_cmds='$(test .$module = .yes && echo .so || echo .dylib)' + # Apple's gcc prints 'gcc -print-search-dirs' doesn't operate the same. + if test "$GCC" = yes; then + sys_lib_search_path_spec=`$CC -print-search-dirs | tr "\n" "$PATH_SEPARATOR" | sed -e 's/libraries:/@libraries:/' | tr "@" "\n" | grep "^libraries:" | sed -e "s/^libraries://" -e "s,=/,/,g" -e "s,$PATH_SEPARATOR, ,g" -e "s,.*,& /lib /usr/lib /usr/local/lib,g"` + else + sys_lib_search_path_spec='/lib /usr/lib /usr/local/lib' + fi + sys_lib_dlsearch_path_spec='/usr/local/lib /lib /usr/lib' + ;; + +dgux*) + version_type=linux + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname$shared_ext' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + ;; + +freebsd1*) + dynamic_linker=no + ;; + +kfreebsd*-gnu) + version_type=linux + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=no + hardcode_into_libs=yes + dynamic_linker='GNU ld.so' + ;; + +freebsd* | dragonfly*) + # DragonFly does not have aout. When/if they implement a new + # versioning mechanism, adjust this. + objformat=`test -x /usr/bin/objformat && /usr/bin/objformat || echo aout` + version_type=freebsd-$objformat + case $version_type in + freebsd-elf*) + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext} $libname${shared_ext}' + need_version=no + need_lib_prefix=no + ;; + freebsd-*) + library_names_spec='${libname}${release}${shared_ext}$versuffix $libname${shared_ext}$versuffix' + need_version=yes + ;; + esac + shlibpath_var=LD_LIBRARY_PATH + case $host_os in + freebsd2*) + shlibpath_overrides_runpath=yes + ;; + freebsd3.[01]* | freebsdelf3.[01]*) + shlibpath_overrides_runpath=yes + hardcode_into_libs=yes + ;; + *) # from 3.2 on + shlibpath_overrides_runpath=no + hardcode_into_libs=yes + ;; + esac + ;; + +gnu*) + version_type=linux + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}${major} ${libname}${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + hardcode_into_libs=yes + ;; + +hpux9* | hpux10* | hpux11*) + # Give a soname corresponding to the major version so that dld.sl refuses to + # link against other versions. + version_type=sunos + need_lib_prefix=no + need_version=no + case "$host_cpu" in + ia64*) + shrext_cmds='.so' + hardcode_into_libs=yes + dynamic_linker="$host_os dld.so" + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes # Unless +noenvvar is specified. + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + if test "X$HPUX_IA64_MODE" = X32; then + sys_lib_search_path_spec="/usr/lib/hpux32 /usr/local/lib/hpux32 /usr/local/lib" + else + sys_lib_search_path_spec="/usr/lib/hpux64 /usr/local/lib/hpux64" + fi + sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec + ;; + hppa*64*) + shrext_cmds='.sl' + hardcode_into_libs=yes + dynamic_linker="$host_os dld.sl" + shlibpath_var=LD_LIBRARY_PATH # How should we handle SHLIB_PATH + shlibpath_overrides_runpath=yes # Unless +noenvvar is specified. + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + sys_lib_search_path_spec="/usr/lib/pa20_64 /usr/ccs/lib/pa20_64" + sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec + ;; + *) + shrext_cmds='.sl' + dynamic_linker="$host_os dld.sl" + shlibpath_var=SHLIB_PATH + shlibpath_overrides_runpath=no # +s is required to enable SHLIB_PATH + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + ;; + esac + # HP-UX runs *really* slowly unless shared libraries are mode 555. + postinstall_cmds='chmod 555 $lib' + ;; + +irix5* | irix6* | nonstopux*) + case $host_os in + nonstopux*) version_type=nonstopux ;; + *) + if test "$lt_cv_prog_gnu_ld" = yes; then + version_type=linux + else + version_type=irix + fi ;; + esac + need_lib_prefix=no + need_version=no + soname_spec='${libname}${release}${shared_ext}$major' + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${release}${shared_ext} $libname${shared_ext}' + case $host_os in + irix5* | nonstopux*) + libsuff= shlibsuff= + ;; + *) + case $LD in # libtool.m4 will add one of these switches to LD + *-32|*"-32 "|*-melf32bsmip|*"-melf32bsmip ") + libsuff= shlibsuff= libmagic=32-bit;; + *-n32|*"-n32 "|*-melf32bmipn32|*"-melf32bmipn32 ") + libsuff=32 shlibsuff=N32 libmagic=N32;; + *-64|*"-64 "|*-melf64bmip|*"-melf64bmip ") + libsuff=64 shlibsuff=64 libmagic=64-bit;; + *) libsuff= shlibsuff= libmagic=never-match;; + esac + ;; + esac + shlibpath_var=LD_LIBRARY${shlibsuff}_PATH + shlibpath_overrides_runpath=no + sys_lib_search_path_spec="/usr/lib${libsuff} /lib${libsuff} /usr/local/lib${libsuff}" + sys_lib_dlsearch_path_spec="/usr/lib${libsuff} /lib${libsuff}" + hardcode_into_libs=yes + ;; + +# No shared lib support for Linux oldld, aout, or coff. +linux*oldld* | linux*aout* | linux*coff*) + dynamic_linker=no + ;; + +# This must be Linux ELF. +linux*) + version_type=linux + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + finish_cmds='PATH="\$PATH:/sbin" ldconfig -n $libdir' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=no + # This implies no fast_install, which is unacceptable. + # Some rework will be needed to allow for fast_install + # before this can be enabled. + hardcode_into_libs=yes + + # find out which ABI we are using + libsuff= + case "$host_cpu" in + x86_64*|s390x*|powerpc64*) + echo '#line 8775 "configure"' > conftest.$ac_ext + if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 + (eval $ac_compile) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; then + case `/usr/bin/file conftest.$ac_objext` in + *64-bit*) + libsuff=64 + sys_lib_search_path_spec="/lib${libsuff} /usr/lib${libsuff} /usr/local/lib${libsuff}" + ;; + esac + fi + rm -rf conftest* + ;; + esac + + # Append ld.so.conf contents to the search path + if test -f /etc/ld.so.conf; then + lt_ld_extra=`awk '/^include / { system(sprintf("cd /etc; cat %s", \$2)); skip = 1; } { if (!skip) print \$0; skip = 0; }' < /etc/ld.so.conf | $SED -e 's/#.*//;s/[:,\t]/ /g;s/=[^=]*$//;s/=[^= ]* / /g;/^$/d' | tr '\n' ' '` + sys_lib_dlsearch_path_spec="/lib${libsuff} /usr/lib${libsuff} $lt_ld_extra" + fi + + # We used to test for /lib/ld.so.1 and disable shared libraries on + # powerpc, because MkLinux only supported shared libraries with the + # GNU dynamic linker. Since this was broken with cross compilers, + # most powerpc-linux boxes support dynamic linking these days and + # people can always --disable-shared, the test was removed, and we + # assume the GNU/Linux dynamic linker is in use. + dynamic_linker='GNU/Linux ld.so' + ;; + +knetbsd*-gnu) + version_type=linux + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=no + hardcode_into_libs=yes + dynamic_linker='GNU ld.so' + ;; + +netbsd*) + version_type=sunos + need_lib_prefix=no + need_version=no + if echo __ELF__ | $CC -E - | grep __ELF__ >/dev/null; then + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${shared_ext}$versuffix' + finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir' + dynamic_linker='NetBSD (a.out) ld.so' + else + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + dynamic_linker='NetBSD ld.elf_so' + fi + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + hardcode_into_libs=yes + ;; + +newsos6) + version_type=linux + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + ;; + +nto-qnx*) + version_type=linux + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + ;; + +openbsd*) + version_type=sunos + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${shared_ext}$versuffix' + finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir' + shlibpath_var=LD_LIBRARY_PATH + if test -z "`echo __ELF__ | $CC -E - | grep __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then + case $host_os in + openbsd2.[89] | openbsd2.[89].*) + shlibpath_overrides_runpath=no + ;; + *) + shlibpath_overrides_runpath=yes + ;; + esac + else + shlibpath_overrides_runpath=yes + fi + ;; + +os2*) + libname_spec='$name' + shrext_cmds=".dll" + need_lib_prefix=no + library_names_spec='$libname${shared_ext} $libname.a' + dynamic_linker='OS/2 ld.exe' + shlibpath_var=LIBPATH + ;; + +osf3* | osf4* | osf5*) + version_type=osf + need_lib_prefix=no + need_version=no + soname_spec='${libname}${release}${shared_ext}$major' + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + shlibpath_var=LD_LIBRARY_PATH + sys_lib_search_path_spec="/usr/shlib /usr/ccs/lib /usr/lib/cmplrs/cc /usr/lib /usr/local/lib /var/shlib" + sys_lib_dlsearch_path_spec="$sys_lib_search_path_spec" + ;; + +sco3.2v5*) + version_type=osf + soname_spec='${libname}${release}${shared_ext}$major' + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + shlibpath_var=LD_LIBRARY_PATH + ;; + +solaris*) + version_type=linux + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + hardcode_into_libs=yes + # ldd complains unless libraries are executable + postinstall_cmds='chmod +x $lib' + ;; + +sunos4*) + version_type=sunos + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${shared_ext}$versuffix' + finish_cmds='PATH="\$PATH:/usr/etc" ldconfig $libdir' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + if test "$with_gnu_ld" = yes; then + need_lib_prefix=no + fi + need_version=yes + ;; + +sysv4 | sysv4.2uw2* | sysv4.3* | sysv5*) + version_type=linux + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + case $host_vendor in + sni) + shlibpath_overrides_runpath=no + need_lib_prefix=no + export_dynamic_flag_spec='${wl}-Blargedynsym' + runpath_var=LD_RUN_PATH + ;; + siemens) + need_lib_prefix=no + ;; + motorola) + need_lib_prefix=no + need_version=no + shlibpath_overrides_runpath=no + sys_lib_search_path_spec='/lib /usr/lib /usr/ccs/lib' + ;; + esac + ;; + +sysv4*MP*) + if test -d /usr/nec ;then + version_type=linux + library_names_spec='$libname${shared_ext}.$versuffix $libname${shared_ext}.$major $libname${shared_ext}' + soname_spec='$libname${shared_ext}.$major' + shlibpath_var=LD_LIBRARY_PATH + fi + ;; + +uts4*) + version_type=linux + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + ;; + +*) + dynamic_linker=no + ;; +esac +echo "$as_me:$LINENO: result: $dynamic_linker" >&5 +echo "${ECHO_T}$dynamic_linker" >&6 +test "$dynamic_linker" = no && can_build_shared=no + +echo "$as_me:$LINENO: checking how to hardcode library paths into programs" >&5 +echo $ECHO_N "checking how to hardcode library paths into programs... $ECHO_C" >&6 +hardcode_action= +if test -n "$hardcode_libdir_flag_spec" || \ + test -n "$runpath_var" || \ + test "X$hardcode_automatic" = "Xyes" ; then + + # We can hardcode non-existant directories. + if test "$hardcode_direct" != no && + # If the only mechanism to avoid hardcoding is shlibpath_var, we + # have to relink, otherwise we might link with an installed library + # when we should be linking with a yet-to-be-installed one + ## test "$_LT_AC_TAGVAR(hardcode_shlibpath_var, )" != no && + test "$hardcode_minus_L" != no; then + # Linking always hardcodes the temporary library directory. + hardcode_action=relink + else + # We can link without hardcoding, and we can hardcode nonexisting dirs. + hardcode_action=immediate + fi +else + # We cannot hardcode anything, or else we can only hardcode existing + # directories. + hardcode_action=unsupported +fi +echo "$as_me:$LINENO: result: $hardcode_action" >&5 +echo "${ECHO_T}$hardcode_action" >&6 + +if test "$hardcode_action" = relink; then + # Fast installation is not supported + enable_fast_install=no +elif test "$shlibpath_overrides_runpath" = yes || + test "$enable_shared" = no; then + # Fast installation is not necessary + enable_fast_install=needless +fi + +striplib= +old_striplib= +echo "$as_me:$LINENO: checking whether stripping libraries is possible" >&5 +echo $ECHO_N "checking whether stripping libraries is possible... $ECHO_C" >&6 +if test -n "$STRIP" && $STRIP -V 2>&1 | grep "GNU strip" >/dev/null; then + test -z "$old_striplib" && old_striplib="$STRIP --strip-debug" + test -z "$striplib" && striplib="$STRIP --strip-unneeded" + echo "$as_me:$LINENO: result: yes" >&5 +echo "${ECHO_T}yes" >&6 +else +# FIXME - insert some real tests, host_os isn't really good enough + case $host_os in + darwin*) + if test -n "$STRIP" ; then + striplib="$STRIP -x" + echo "$as_me:$LINENO: result: yes" >&5 +echo "${ECHO_T}yes" >&6 + else + echo "$as_me:$LINENO: result: no" >&5 +echo "${ECHO_T}no" >&6 +fi + ;; + *) + echo "$as_me:$LINENO: result: no" >&5 +echo "${ECHO_T}no" >&6 + ;; + esac +fi + +if test "x$enable_dlopen" != xyes; then + enable_dlopen=unknown + enable_dlopen_self=unknown + enable_dlopen_self_static=unknown +else + lt_cv_dlopen=no + lt_cv_dlopen_libs= + + case $host_os in + beos*) + lt_cv_dlopen="load_add_on" + lt_cv_dlopen_libs= + lt_cv_dlopen_self=yes + ;; + + mingw* | pw32*) + lt_cv_dlopen="LoadLibrary" + lt_cv_dlopen_libs= + ;; + + cygwin*) + lt_cv_dlopen="dlopen" + lt_cv_dlopen_libs= + ;; + + darwin*) + # if libdl is installed we need to link against it + echo "$as_me:$LINENO: checking for dlopen in -ldl" >&5 +echo $ECHO_N "checking for dlopen in -ldl... $ECHO_C" >&6 +if test "${ac_cv_lib_dl_dlopen+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + ac_check_lib_save_LIBS=$LIBS +LIBS="-ldl $LIBS" +cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ + +/* Override any gcc2 internal prototype to avoid an error. */ +#ifdef __cplusplus +extern "C" +#endif +/* We use char because int might match the return type of a gcc2 + builtin and then its argument prototype would still apply. */ +char dlopen (); +int +main () +{ +dlopen (); + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext conftest$ac_exeext +if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 + (eval $ac_link) 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest$ac_exeext' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; }; then + ac_cv_lib_dl_dlopen=yes +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +ac_cv_lib_dl_dlopen=no +fi +rm -f conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS +fi +echo "$as_me:$LINENO: result: $ac_cv_lib_dl_dlopen" >&5 +echo "${ECHO_T}$ac_cv_lib_dl_dlopen" >&6 +if test $ac_cv_lib_dl_dlopen = yes; then + lt_cv_dlopen="dlopen" lt_cv_dlopen_libs="-ldl" +else + + lt_cv_dlopen="dyld" + lt_cv_dlopen_libs= + lt_cv_dlopen_self=yes + +fi + + ;; + + *) + echo "$as_me:$LINENO: checking for shl_load" >&5 +echo $ECHO_N "checking for shl_load... $ECHO_C" >&6 +if test "${ac_cv_func_shl_load+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +/* Define shl_load to an innocuous variant, in case declares shl_load. + For example, HP-UX 11i declares gettimeofday. */ +#define shl_load innocuous_shl_load + +/* System header to define __stub macros and hopefully few prototypes, + which can conflict with char shl_load (); below. + Prefer to if __STDC__ is defined, since + exists even on freestanding compilers. */ + +#ifdef __STDC__ +# include +#else +# include +#endif + +#undef shl_load + +/* Override any gcc2 internal prototype to avoid an error. */ +#ifdef __cplusplus +extern "C" +{ +#endif +/* We use char because int might match the return type of a gcc2 + builtin and then its argument prototype would still apply. */ +char shl_load (); +/* The GNU C library defines this for functions which it implements + to always fail with ENOSYS. Some functions are actually named + something starting with __ and the normal name is an alias. */ +#if defined (__stub_shl_load) || defined (__stub___shl_load) +choke me +#else +char (*f) () = shl_load; +#endif +#ifdef __cplusplus +} +#endif + +int +main () +{ +return f != shl_load; + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext conftest$ac_exeext +if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 + (eval $ac_link) 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest$ac_exeext' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; }; then + ac_cv_func_shl_load=yes +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +ac_cv_func_shl_load=no +fi +rm -f conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +fi +echo "$as_me:$LINENO: result: $ac_cv_func_shl_load" >&5 +echo "${ECHO_T}$ac_cv_func_shl_load" >&6 +if test $ac_cv_func_shl_load = yes; then + lt_cv_dlopen="shl_load" +else + echo "$as_me:$LINENO: checking for shl_load in -ldld" >&5 +echo $ECHO_N "checking for shl_load in -ldld... $ECHO_C" >&6 +if test "${ac_cv_lib_dld_shl_load+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + ac_check_lib_save_LIBS=$LIBS +LIBS="-ldld $LIBS" +cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ + +/* Override any gcc2 internal prototype to avoid an error. */ +#ifdef __cplusplus +extern "C" +#endif +/* We use char because int might match the return type of a gcc2 + builtin and then its argument prototype would still apply. */ +char shl_load (); +int +main () +{ +shl_load (); + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext conftest$ac_exeext +if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 + (eval $ac_link) 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest$ac_exeext' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; }; then + ac_cv_lib_dld_shl_load=yes +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +ac_cv_lib_dld_shl_load=no +fi +rm -f conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS +fi +echo "$as_me:$LINENO: result: $ac_cv_lib_dld_shl_load" >&5 +echo "${ECHO_T}$ac_cv_lib_dld_shl_load" >&6 +if test $ac_cv_lib_dld_shl_load = yes; then + lt_cv_dlopen="shl_load" lt_cv_dlopen_libs="-dld" +else + echo "$as_me:$LINENO: checking for dlopen" >&5 +echo $ECHO_N "checking for dlopen... $ECHO_C" >&6 +if test "${ac_cv_func_dlopen+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +/* Define dlopen to an innocuous variant, in case declares dlopen. + For example, HP-UX 11i declares gettimeofday. */ +#define dlopen innocuous_dlopen + +/* System header to define __stub macros and hopefully few prototypes, + which can conflict with char dlopen (); below. + Prefer to if __STDC__ is defined, since + exists even on freestanding compilers. */ + +#ifdef __STDC__ +# include +#else +# include +#endif + +#undef dlopen + +/* Override any gcc2 internal prototype to avoid an error. */ +#ifdef __cplusplus +extern "C" +{ +#endif +/* We use char because int might match the return type of a gcc2 + builtin and then its argument prototype would still apply. */ +char dlopen (); +/* The GNU C library defines this for functions which it implements + to always fail with ENOSYS. Some functions are actually named + something starting with __ and the normal name is an alias. */ +#if defined (__stub_dlopen) || defined (__stub___dlopen) +choke me +#else +char (*f) () = dlopen; +#endif +#ifdef __cplusplus +} +#endif + +int +main () +{ +return f != dlopen; + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext conftest$ac_exeext +if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 + (eval $ac_link) 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest$ac_exeext' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; }; then + ac_cv_func_dlopen=yes +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +ac_cv_func_dlopen=no +fi +rm -f conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +fi +echo "$as_me:$LINENO: result: $ac_cv_func_dlopen" >&5 +echo "${ECHO_T}$ac_cv_func_dlopen" >&6 +if test $ac_cv_func_dlopen = yes; then + lt_cv_dlopen="dlopen" +else + echo "$as_me:$LINENO: checking for dlopen in -ldl" >&5 +echo $ECHO_N "checking for dlopen in -ldl... $ECHO_C" >&6 +if test "${ac_cv_lib_dl_dlopen+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + ac_check_lib_save_LIBS=$LIBS +LIBS="-ldl $LIBS" +cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ + +/* Override any gcc2 internal prototype to avoid an error. */ +#ifdef __cplusplus +extern "C" +#endif +/* We use char because int might match the return type of a gcc2 + builtin and then its argument prototype would still apply. */ +char dlopen (); +int +main () +{ +dlopen (); + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext conftest$ac_exeext +if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 + (eval $ac_link) 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest$ac_exeext' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; }; then + ac_cv_lib_dl_dlopen=yes +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +ac_cv_lib_dl_dlopen=no +fi +rm -f conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS +fi +echo "$as_me:$LINENO: result: $ac_cv_lib_dl_dlopen" >&5 +echo "${ECHO_T}$ac_cv_lib_dl_dlopen" >&6 +if test $ac_cv_lib_dl_dlopen = yes; then + lt_cv_dlopen="dlopen" lt_cv_dlopen_libs="-ldl" +else + echo "$as_me:$LINENO: checking for dlopen in -lsvld" >&5 +echo $ECHO_N "checking for dlopen in -lsvld... $ECHO_C" >&6 +if test "${ac_cv_lib_svld_dlopen+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + ac_check_lib_save_LIBS=$LIBS +LIBS="-lsvld $LIBS" +cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ + +/* Override any gcc2 internal prototype to avoid an error. */ +#ifdef __cplusplus +extern "C" +#endif +/* We use char because int might match the return type of a gcc2 + builtin and then its argument prototype would still apply. */ +char dlopen (); +int +main () +{ +dlopen (); + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext conftest$ac_exeext +if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 + (eval $ac_link) 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest$ac_exeext' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; }; then + ac_cv_lib_svld_dlopen=yes +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +ac_cv_lib_svld_dlopen=no +fi +rm -f conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS +fi +echo "$as_me:$LINENO: result: $ac_cv_lib_svld_dlopen" >&5 +echo "${ECHO_T}$ac_cv_lib_svld_dlopen" >&6 +if test $ac_cv_lib_svld_dlopen = yes; then + lt_cv_dlopen="dlopen" lt_cv_dlopen_libs="-lsvld" +else + echo "$as_me:$LINENO: checking for dld_link in -ldld" >&5 +echo $ECHO_N "checking for dld_link in -ldld... $ECHO_C" >&6 +if test "${ac_cv_lib_dld_dld_link+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + ac_check_lib_save_LIBS=$LIBS +LIBS="-ldld $LIBS" +cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ + +/* Override any gcc2 internal prototype to avoid an error. */ +#ifdef __cplusplus +extern "C" +#endif +/* We use char because int might match the return type of a gcc2 + builtin and then its argument prototype would still apply. */ +char dld_link (); +int +main () +{ +dld_link (); + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext conftest$ac_exeext +if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 + (eval $ac_link) 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest$ac_exeext' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; }; then + ac_cv_lib_dld_dld_link=yes +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +ac_cv_lib_dld_dld_link=no +fi +rm -f conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS +fi +echo "$as_me:$LINENO: result: $ac_cv_lib_dld_dld_link" >&5 +echo "${ECHO_T}$ac_cv_lib_dld_dld_link" >&6 +if test $ac_cv_lib_dld_dld_link = yes; then + lt_cv_dlopen="dld_link" lt_cv_dlopen_libs="-dld" +fi + + +fi + + +fi + + +fi + + +fi + + +fi + + ;; + esac + + if test "x$lt_cv_dlopen" != xno; then + enable_dlopen=yes + else + enable_dlopen=no + fi + + case $lt_cv_dlopen in + dlopen) + save_CPPFLAGS="$CPPFLAGS" + test "x$ac_cv_header_dlfcn_h" = xyes && CPPFLAGS="$CPPFLAGS -DHAVE_DLFCN_H" + + save_LDFLAGS="$LDFLAGS" + eval LDFLAGS=\"\$LDFLAGS $export_dynamic_flag_spec\" + + save_LIBS="$LIBS" + LIBS="$lt_cv_dlopen_libs $LIBS" + + echo "$as_me:$LINENO: checking whether a program can dlopen itself" >&5 +echo $ECHO_N "checking whether a program can dlopen itself... $ECHO_C" >&6 +if test "${lt_cv_dlopen_self+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + if test "$cross_compiling" = yes; then : + lt_cv_dlopen_self=cross +else + lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2 + lt_status=$lt_dlunknown + cat > conftest.$ac_ext < +#endif + +#include + +#ifdef RTLD_GLOBAL +# define LT_DLGLOBAL RTLD_GLOBAL +#else +# ifdef DL_GLOBAL +# define LT_DLGLOBAL DL_GLOBAL +# else +# define LT_DLGLOBAL 0 +# endif +#endif + +/* We may have to define LT_DLLAZY_OR_NOW in the command line if we + find out it does not work in some platform. */ +#ifndef LT_DLLAZY_OR_NOW +# ifdef RTLD_LAZY +# define LT_DLLAZY_OR_NOW RTLD_LAZY +# else +# ifdef DL_LAZY +# define LT_DLLAZY_OR_NOW DL_LAZY +# else +# ifdef RTLD_NOW +# define LT_DLLAZY_OR_NOW RTLD_NOW +# else +# ifdef DL_NOW +# define LT_DLLAZY_OR_NOW DL_NOW +# else +# define LT_DLLAZY_OR_NOW 0 +# endif +# endif +# endif +# endif +#endif + +#ifdef __cplusplus +extern "C" void exit (int); +#endif + +void fnord() { int i=42;} +int main () +{ + void *self = dlopen (0, LT_DLGLOBAL|LT_DLLAZY_OR_NOW); + int status = $lt_dlunknown; + + if (self) + { + if (dlsym (self,"fnord")) status = $lt_dlno_uscore; + else if (dlsym( self,"_fnord")) status = $lt_dlneed_uscore; + /* dlclose (self); */ + } + + exit (status); +} +EOF + if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 + (eval $ac_link) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && test -s conftest${ac_exeext} 2>/dev/null; then + (./conftest; exit; ) 2>/dev/null + lt_status=$? + case x$lt_status in + x$lt_dlno_uscore) lt_cv_dlopen_self=yes ;; + x$lt_dlneed_uscore) lt_cv_dlopen_self=yes ;; + x$lt_unknown|x*) lt_cv_dlopen_self=no ;; + esac + else : + # compilation failed + lt_cv_dlopen_self=no + fi +fi +rm -fr conftest* + + +fi +echo "$as_me:$LINENO: result: $lt_cv_dlopen_self" >&5 +echo "${ECHO_T}$lt_cv_dlopen_self" >&6 + + if test "x$lt_cv_dlopen_self" = xyes; then + LDFLAGS="$LDFLAGS $link_static_flag" + echo "$as_me:$LINENO: checking whether a statically linked program can dlopen itself" >&5 +echo $ECHO_N "checking whether a statically linked program can dlopen itself... $ECHO_C" >&6 +if test "${lt_cv_dlopen_self_static+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + if test "$cross_compiling" = yes; then : + lt_cv_dlopen_self_static=cross +else + lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2 + lt_status=$lt_dlunknown + cat > conftest.$ac_ext < +#endif + +#include + +#ifdef RTLD_GLOBAL +# define LT_DLGLOBAL RTLD_GLOBAL +#else +# ifdef DL_GLOBAL +# define LT_DLGLOBAL DL_GLOBAL +# else +# define LT_DLGLOBAL 0 +# endif +#endif + +/* We may have to define LT_DLLAZY_OR_NOW in the command line if we + find out it does not work in some platform. */ +#ifndef LT_DLLAZY_OR_NOW +# ifdef RTLD_LAZY +# define LT_DLLAZY_OR_NOW RTLD_LAZY +# else +# ifdef DL_LAZY +# define LT_DLLAZY_OR_NOW DL_LAZY +# else +# ifdef RTLD_NOW +# define LT_DLLAZY_OR_NOW RTLD_NOW +# else +# ifdef DL_NOW +# define LT_DLLAZY_OR_NOW DL_NOW +# else +# define LT_DLLAZY_OR_NOW 0 +# endif +# endif +# endif +# endif +#endif + +#ifdef __cplusplus +extern "C" void exit (int); +#endif + +void fnord() { int i=42;} +int main () +{ + void *self = dlopen (0, LT_DLGLOBAL|LT_DLLAZY_OR_NOW); + int status = $lt_dlunknown; + + if (self) + { + if (dlsym (self,"fnord")) status = $lt_dlno_uscore; + else if (dlsym( self,"_fnord")) status = $lt_dlneed_uscore; + /* dlclose (self); */ + } + + exit (status); +} +EOF + if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 + (eval $ac_link) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && test -s conftest${ac_exeext} 2>/dev/null; then + (./conftest; exit; ) 2>/dev/null + lt_status=$? + case x$lt_status in + x$lt_dlno_uscore) lt_cv_dlopen_self_static=yes ;; + x$lt_dlneed_uscore) lt_cv_dlopen_self_static=yes ;; + x$lt_unknown|x*) lt_cv_dlopen_self_static=no ;; + esac + else : + # compilation failed + lt_cv_dlopen_self_static=no + fi +fi +rm -fr conftest* + + +fi +echo "$as_me:$LINENO: result: $lt_cv_dlopen_self_static" >&5 +echo "${ECHO_T}$lt_cv_dlopen_self_static" >&6 + fi + + CPPFLAGS="$save_CPPFLAGS" + LDFLAGS="$save_LDFLAGS" + LIBS="$save_LIBS" + ;; + esac + + case $lt_cv_dlopen_self in + yes|no) enable_dlopen_self=$lt_cv_dlopen_self ;; + *) enable_dlopen_self=unknown ;; + esac + + case $lt_cv_dlopen_self_static in + yes|no) enable_dlopen_self_static=$lt_cv_dlopen_self_static ;; + *) enable_dlopen_self_static=unknown ;; + esac +fi + + +# Report which librarie types wil actually be built +echo "$as_me:$LINENO: checking if libtool supports shared libraries" >&5 +echo $ECHO_N "checking if libtool supports shared libraries... $ECHO_C" >&6 +echo "$as_me:$LINENO: result: $can_build_shared" >&5 +echo "${ECHO_T}$can_build_shared" >&6 + +echo "$as_me:$LINENO: checking whether to build shared libraries" >&5 +echo $ECHO_N "checking whether to build shared libraries... $ECHO_C" >&6 +test "$can_build_shared" = "no" && enable_shared=no + +# On AIX, shared libraries and static libraries use the same namespace, and +# are all built from PIC. +case "$host_os" in +aix3*) + test "$enable_shared" = yes && enable_static=no + if test -n "$RANLIB"; then + archive_cmds="$archive_cmds~\$RANLIB \$lib" + postinstall_cmds='$RANLIB $lib' + fi + ;; + +aix4* | aix5*) + if test "$host_cpu" != ia64 && test "$aix_use_runtimelinking" = no ; then + test "$enable_shared" = yes && enable_static=no + fi + ;; +esac +echo "$as_me:$LINENO: result: $enable_shared" >&5 +echo "${ECHO_T}$enable_shared" >&6 + +echo "$as_me:$LINENO: checking whether to build static libraries" >&5 +echo $ECHO_N "checking whether to build static libraries... $ECHO_C" >&6 +# Make sure either enable_shared or enable_static is yes. +test "$enable_shared" = yes || enable_static=yes +echo "$as_me:$LINENO: result: $enable_static" >&5 +echo "${ECHO_T}$enable_static" >&6 + +# The else clause should only fire when bootstrapping the +# libtool distribution, otherwise you forgot to ship ltmain.sh +# with your package, and you will get complaints that there are +# no rules to generate ltmain.sh. +if test -f "$ltmain"; then + # See if we are running on zsh, and set the options which allow our commands through + # without removal of \ escapes. + if test -n "${ZSH_VERSION+set}" ; then + setopt NO_GLOB_SUBST + fi + # Now quote all the things that may contain metacharacters while being + # careful not to overquote the AC_SUBSTed values. We take copies of the + # variables and quote the copies for generation of the libtool script. + for var in echo old_CC old_CFLAGS AR AR_FLAGS EGREP RANLIB LN_S LTCC NM \ + SED SHELL STRIP \ + libname_spec library_names_spec soname_spec extract_expsyms_cmds \ + old_striplib striplib file_magic_cmd finish_cmds finish_eval \ + deplibs_check_method reload_flag reload_cmds need_locks \ + lt_cv_sys_global_symbol_pipe lt_cv_sys_global_symbol_to_cdecl \ + lt_cv_sys_global_symbol_to_c_name_address \ + sys_lib_search_path_spec sys_lib_dlsearch_path_spec \ + old_postinstall_cmds old_postuninstall_cmds \ + compiler \ + CC \ + LD \ + lt_prog_compiler_wl \ + lt_prog_compiler_pic \ + lt_prog_compiler_static \ + lt_prog_compiler_no_builtin_flag \ + export_dynamic_flag_spec \ + thread_safe_flag_spec \ + whole_archive_flag_spec \ + enable_shared_with_static_runtimes \ + old_archive_cmds \ + old_archive_from_new_cmds \ + predep_objects \ + postdep_objects \ + predeps \ + postdeps \ + compiler_lib_search_path \ + archive_cmds \ + archive_expsym_cmds \ + postinstall_cmds \ + postuninstall_cmds \ + old_archive_from_expsyms_cmds \ + allow_undefined_flag \ + no_undefined_flag \ + export_symbols_cmds \ + hardcode_libdir_flag_spec \ + hardcode_libdir_flag_spec_ld \ + hardcode_libdir_separator \ + hardcode_automatic \ + module_cmds \ + module_expsym_cmds \ + lt_cv_prog_compiler_c_o \ + exclude_expsyms \ + include_expsyms; do + + case $var in + old_archive_cmds | \ + old_archive_from_new_cmds | \ + archive_cmds | \ + archive_expsym_cmds | \ + module_cmds | \ + module_expsym_cmds | \ + old_archive_from_expsyms_cmds | \ + export_symbols_cmds | \ + extract_expsyms_cmds | reload_cmds | finish_cmds | \ + postinstall_cmds | postuninstall_cmds | \ + old_postinstall_cmds | old_postuninstall_cmds | \ + sys_lib_search_path_spec | sys_lib_dlsearch_path_spec) + # Double-quote double-evaled strings. + eval "lt_$var=\\\"\`\$echo \"X\$$var\" | \$Xsed -e \"\$double_quote_subst\" -e \"\$sed_quote_subst\" -e \"\$delay_variable_subst\"\`\\\"" + ;; + *) + eval "lt_$var=\\\"\`\$echo \"X\$$var\" | \$Xsed -e \"\$sed_quote_subst\"\`\\\"" + ;; + esac + done + + case $lt_echo in + *'\$0 --fallback-echo"') + lt_echo=`$echo "X$lt_echo" | $Xsed -e 's/\\\\\\\$0 --fallback-echo"$/$0 --fallback-echo"/'` + ;; + esac + +cfgfile="${ofile}T" + trap "$rm \"$cfgfile\"; exit 1" 1 2 15 + $rm -f "$cfgfile" + { echo "$as_me:$LINENO: creating $ofile" >&5 +echo "$as_me: creating $ofile" >&6;} + + cat <<__EOF__ >> "$cfgfile" +#! $SHELL + +# `$echo "$cfgfile" | sed 's%^.*/%%'` - Provide generalized library-building support services. +# Generated automatically by $PROGRAM (GNU $PACKAGE $VERSION$TIMESTAMP) +# NOTE: Changes made to this file will be lost: look at ltmain.sh. +# +# Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001 +# Free Software Foundation, Inc. +# +# This file is part of GNU Libtool: +# Originally by Gordon Matzigkeit , 1996 +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. +# +# As a special exception to the GNU General Public License, if you +# distribute this file as part of a program that contains a +# configuration script generated by Autoconf, you may include it under +# the same distribution terms that you use for the rest of that program. + +# A sed program that does not truncate output. +SED=$lt_SED + +# Sed that helps us avoid accidentally triggering echo(1) options like -n. +Xsed="$SED -e 1s/^X//" + +# The HP-UX ksh and POSIX shell print the target directory to stdout +# if CDPATH is set. +(unset CDPATH) >/dev/null 2>&1 && unset CDPATH + +# The names of the tagged configurations supported by this script. +available_tags= + +# ### BEGIN LIBTOOL CONFIG + +# Libtool was configured on host `(hostname || uname -n) 2>/dev/null | sed 1q`: + +# Shell to use when invoking shell scripts. +SHELL=$lt_SHELL + +# Whether or not to build shared libraries. +build_libtool_libs=$enable_shared + +# Whether or not to build static libraries. +build_old_libs=$enable_static + +# Whether or not to add -lc for building shared libraries. +build_libtool_need_lc=$archive_cmds_need_lc + +# Whether or not to disallow shared libs when runtime libs are static +allow_libtool_libs_with_static_runtimes=$enable_shared_with_static_runtimes + +# Whether or not to optimize for fast installation. +fast_install=$enable_fast_install + +# The host system. +host_alias=$host_alias +host=$host +host_os=$host_os + +# The build system. +build_alias=$build_alias +build=$build +build_os=$build_os + +# An echo program that does not interpret backslashes. +echo=$lt_echo + +# The archiver. +AR=$lt_AR +AR_FLAGS=$lt_AR_FLAGS + +# A C compiler. +LTCC=$lt_LTCC + +# A language-specific compiler. +CC=$lt_compiler + +# Is the compiler the GNU C compiler? +with_gcc=$GCC + +# An ERE matcher. +EGREP=$lt_EGREP + +# The linker used to build libraries. +LD=$lt_LD + +# Whether we need hard or soft links. +LN_S=$lt_LN_S + +# A BSD-compatible nm program. +NM=$lt_NM + +# A symbol stripping program +STRIP=$lt_STRIP + +# Used to examine libraries when file_magic_cmd begins "file" +MAGIC_CMD=$MAGIC_CMD + +# Used on cygwin: DLL creation program. +DLLTOOL="$DLLTOOL" + +# Used on cygwin: object dumper. +OBJDUMP="$OBJDUMP" + +# Used on cygwin: assembler. +AS="$AS" + +# The name of the directory that contains temporary libtool files. +objdir=$objdir + +# How to create reloadable object files. +reload_flag=$lt_reload_flag +reload_cmds=$lt_reload_cmds + +# How to pass a linker flag through the compiler. +wl=$lt_lt_prog_compiler_wl + +# Object file suffix (normally "o"). +objext="$ac_objext" + +# Old archive suffix (normally "a"). +libext="$libext" + +# Shared library suffix (normally ".so"). +shrext_cmds='$shrext_cmds' + +# Executable file suffix (normally ""). +exeext="$exeext" + +# Additional compiler flags for building library objects. +pic_flag=$lt_lt_prog_compiler_pic +pic_mode=$pic_mode + +# What is the maximum length of a command? +max_cmd_len=$lt_cv_sys_max_cmd_len + +# Does compiler simultaneously support -c and -o options? +compiler_c_o=$lt_lt_cv_prog_compiler_c_o + +# Must we lock files when doing compilation? +need_locks=$lt_need_locks + +# Do we need the lib prefix for modules? +need_lib_prefix=$need_lib_prefix + +# Do we need a version for libraries? +need_version=$need_version + +# Whether dlopen is supported. +dlopen_support=$enable_dlopen + +# Whether dlopen of programs is supported. +dlopen_self=$enable_dlopen_self + +# Whether dlopen of statically linked programs is supported. +dlopen_self_static=$enable_dlopen_self_static + +# Compiler flag to prevent dynamic linking. +link_static_flag=$lt_lt_prog_compiler_static + +# Compiler flag to turn off builtin functions. +no_builtin_flag=$lt_lt_prog_compiler_no_builtin_flag + +# Compiler flag to allow reflexive dlopens. +export_dynamic_flag_spec=$lt_export_dynamic_flag_spec + +# Compiler flag to generate shared objects directly from archives. +whole_archive_flag_spec=$lt_whole_archive_flag_spec + +# Compiler flag to generate thread-safe objects. +thread_safe_flag_spec=$lt_thread_safe_flag_spec + +# Library versioning type. +version_type=$version_type + +# Format of library name prefix. +libname_spec=$lt_libname_spec + +# List of archive names. First name is the real one, the rest are links. +# The last name is the one that the linker finds with -lNAME. +library_names_spec=$lt_library_names_spec + +# The coded name of the library, if different from the real name. +soname_spec=$lt_soname_spec + +# Commands used to build and install an old-style archive. +RANLIB=$lt_RANLIB +old_archive_cmds=$lt_old_archive_cmds +old_postinstall_cmds=$lt_old_postinstall_cmds +old_postuninstall_cmds=$lt_old_postuninstall_cmds + +# Create an old-style archive from a shared archive. +old_archive_from_new_cmds=$lt_old_archive_from_new_cmds + +# Create a temporary old-style archive to link instead of a shared archive. +old_archive_from_expsyms_cmds=$lt_old_archive_from_expsyms_cmds + +# Commands used to build and install a shared archive. +archive_cmds=$lt_archive_cmds +archive_expsym_cmds=$lt_archive_expsym_cmds +postinstall_cmds=$lt_postinstall_cmds +postuninstall_cmds=$lt_postuninstall_cmds + +# Commands used to build a loadable module (assumed same as above if empty) +module_cmds=$lt_module_cmds +module_expsym_cmds=$lt_module_expsym_cmds + +# Commands to strip libraries. +old_striplib=$lt_old_striplib +striplib=$lt_striplib + +# Dependencies to place before the objects being linked to create a +# shared library. +predep_objects=$lt_predep_objects + +# Dependencies to place after the objects being linked to create a +# shared library. +postdep_objects=$lt_postdep_objects + +# Dependencies to place before the objects being linked to create a +# shared library. +predeps=$lt_predeps + +# Dependencies to place after the objects being linked to create a +# shared library. +postdeps=$lt_postdeps + +# The library search path used internally by the compiler when linking +# a shared library. +compiler_lib_search_path=$lt_compiler_lib_search_path + +# Method to check whether dependent libraries are shared objects. +deplibs_check_method=$lt_deplibs_check_method + +# Command to use when deplibs_check_method == file_magic. +file_magic_cmd=$lt_file_magic_cmd + +# Flag that allows shared libraries with undefined symbols to be built. +allow_undefined_flag=$lt_allow_undefined_flag + +# Flag that forces no undefined symbols. +no_undefined_flag=$lt_no_undefined_flag + +# Commands used to finish a libtool library installation in a directory. +finish_cmds=$lt_finish_cmds + +# Same as above, but a single script fragment to be evaled but not shown. +finish_eval=$lt_finish_eval + +# Take the output of nm and produce a listing of raw symbols and C names. +global_symbol_pipe=$lt_lt_cv_sys_global_symbol_pipe + +# Transform the output of nm in a proper C declaration +global_symbol_to_cdecl=$lt_lt_cv_sys_global_symbol_to_cdecl + +# Transform the output of nm in a C name address pair +global_symbol_to_c_name_address=$lt_lt_cv_sys_global_symbol_to_c_name_address + +# This is the shared library runtime path variable. +runpath_var=$runpath_var + +# This is the shared library path variable. +shlibpath_var=$shlibpath_var + +# Is shlibpath searched before the hard-coded library search path? +shlibpath_overrides_runpath=$shlibpath_overrides_runpath + +# How to hardcode a shared library path into an executable. +hardcode_action=$hardcode_action + +# Whether we should hardcode library paths into libraries. +hardcode_into_libs=$hardcode_into_libs + +# Flag to hardcode \$libdir into a binary during linking. +# This must work even if \$libdir does not exist. +hardcode_libdir_flag_spec=$lt_hardcode_libdir_flag_spec + +# If ld is used when linking, flag to hardcode \$libdir into +# a binary during linking. This must work even if \$libdir does +# not exist. +hardcode_libdir_flag_spec_ld=$lt_hardcode_libdir_flag_spec_ld + +# Whether we need a single -rpath flag with a separated argument. +hardcode_libdir_separator=$lt_hardcode_libdir_separator + +# Set to yes if using DIR/libNAME${shared_ext} during linking hardcodes DIR into the +# resulting binary. +hardcode_direct=$hardcode_direct + +# Set to yes if using the -LDIR flag during linking hardcodes DIR into the +# resulting binary. +hardcode_minus_L=$hardcode_minus_L + +# Set to yes if using SHLIBPATH_VAR=DIR during linking hardcodes DIR into +# the resulting binary. +hardcode_shlibpath_var=$hardcode_shlibpath_var + +# Set to yes if building a shared library automatically hardcodes DIR into the library +# and all subsequent libraries and executables linked against it. +hardcode_automatic=$hardcode_automatic + +# Variables whose values should be saved in libtool wrapper scripts and +# restored at relink time. +variables_saved_for_relink="$variables_saved_for_relink" + +# Whether libtool must link a program against all its dependency libraries. +link_all_deplibs=$link_all_deplibs + +# Compile-time system search path for libraries +sys_lib_search_path_spec=$lt_sys_lib_search_path_spec + +# Run-time system search path for libraries +sys_lib_dlsearch_path_spec=$lt_sys_lib_dlsearch_path_spec + +# Fix the shell variable \$srcfile for the compiler. +fix_srcfile_path="$fix_srcfile_path" + +# Set to yes if exported symbols are required. +always_export_symbols=$always_export_symbols + +# The commands to list exported symbols. +export_symbols_cmds=$lt_export_symbols_cmds + +# The commands to extract the exported symbol list from a shared archive. +extract_expsyms_cmds=$lt_extract_expsyms_cmds + +# Symbols that should not be listed in the preloaded symbols. +exclude_expsyms=$lt_exclude_expsyms + +# Symbols that must always be exported. +include_expsyms=$lt_include_expsyms + +# ### END LIBTOOL CONFIG + +__EOF__ + + + case $host_os in + aix3*) + cat <<\EOF >> "$cfgfile" + +# AIX sometimes has problems with the GCC collect2 program. For some +# reason, if we set the COLLECT_NAMES environment variable, the problems +# vanish in a puff of smoke. +if test "X${COLLECT_NAMES+set}" != Xset; then + COLLECT_NAMES= + export COLLECT_NAMES +fi +EOF + ;; + esac + + # We use sed instead of cat because bash on DJGPP gets confused if + # if finds mixed CR/LF and LF-only lines. Since sed operates in + # text mode, it properly converts lines to CR/LF. This bash problem + # is reportedly fixed, but why not run on old versions too? + sed '$q' "$ltmain" >> "$cfgfile" || (rm -f "$cfgfile"; exit 1) + + mv -f "$cfgfile" "$ofile" || \ + (rm -f "$ofile" && cp "$cfgfile" "$ofile" && rm -f "$cfgfile") + chmod +x "$ofile" + +else + # If there is no Makefile yet, we rely on a make rule to execute + # `config.status --recheck' to rerun these tests and create the + # libtool script then. + ltmain_in=`echo $ltmain | sed -e 's/\.sh$/.in/'` + if test -f "$ltmain_in"; then + test -f Makefile && make "$ltmain" + fi +fi + + +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu + +CC="$lt_save_CC" + + +# Check whether --with-tags or --without-tags was given. +if test "${with_tags+set}" = set; then + withval="$with_tags" + tagnames="$withval" +fi; + +if test -f "$ltmain" && test -n "$tagnames"; then + if test ! -f "${ofile}"; then + { echo "$as_me:$LINENO: WARNING: output file \`$ofile' does not exist" >&5 +echo "$as_me: WARNING: output file \`$ofile' does not exist" >&2;} + fi + + if test -z "$LTCC"; then + eval "`$SHELL ${ofile} --config | grep '^LTCC='`" + if test -z "$LTCC"; then + { echo "$as_me:$LINENO: WARNING: output file \`$ofile' does not look like a libtool script" >&5 +echo "$as_me: WARNING: output file \`$ofile' does not look like a libtool script" >&2;} + else + { echo "$as_me:$LINENO: WARNING: using \`LTCC=$LTCC', extracted from \`$ofile'" >&5 +echo "$as_me: WARNING: using \`LTCC=$LTCC', extracted from \`$ofile'" >&2;} + fi + fi + + # Extract list of available tagged configurations in $ofile. + # Note that this assumes the entire list is on one line. + available_tags=`grep "^available_tags=" "${ofile}" | $SED -e 's/available_tags=\(.*$\)/\1/' -e 's/\"//g'` + + lt_save_ifs="$IFS"; IFS="${IFS}$PATH_SEPARATOR," + for tagname in $tagnames; do + IFS="$lt_save_ifs" + # Check whether tagname contains only valid characters + case `$echo "X$tagname" | $Xsed -e 's:[-_ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz1234567890,/]::g'` in + "") ;; + *) { { echo "$as_me:$LINENO: error: invalid tag name: $tagname" >&5 +echo "$as_me: error: invalid tag name: $tagname" >&2;} + { (exit 1); exit 1; }; } + ;; + esac + + if grep "^# ### BEGIN LIBTOOL TAG CONFIG: $tagname$" < "${ofile}" > /dev/null + then + { { echo "$as_me:$LINENO: error: tag name \"$tagname\" already exists" >&5 +echo "$as_me: error: tag name \"$tagname\" already exists" >&2;} + { (exit 1); exit 1; }; } + fi + + # Update the list of available tags. + if test -n "$tagname"; then + echo appending configuration tag \"$tagname\" to $ofile + + case $tagname in + CXX) + if test -n "$CXX" && ( test "X$CXX" != "Xno" && + ( (test "X$CXX" = "Xg++" && `g++ -v >/dev/null 2>&1` ) || + (test "X$CXX" != "Xg++"))) ; then + ac_ext=cc +ac_cpp='$CXXCPP $CPPFLAGS' +ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_cxx_compiler_gnu + + + + +archive_cmds_need_lc_CXX=no +allow_undefined_flag_CXX= +always_export_symbols_CXX=no +archive_expsym_cmds_CXX= +export_dynamic_flag_spec_CXX= +hardcode_direct_CXX=no +hardcode_libdir_flag_spec_CXX= +hardcode_libdir_flag_spec_ld_CXX= +hardcode_libdir_separator_CXX= +hardcode_minus_L_CXX=no +hardcode_automatic_CXX=no +module_cmds_CXX= +module_expsym_cmds_CXX= +link_all_deplibs_CXX=unknown +old_archive_cmds_CXX=$old_archive_cmds +no_undefined_flag_CXX= +whole_archive_flag_spec_CXX= +enable_shared_with_static_runtimes_CXX=no + +# Dependencies to place before and after the object being linked: +predep_objects_CXX= +postdep_objects_CXX= +predeps_CXX= +postdeps_CXX= +compiler_lib_search_path_CXX= + +# Source file extension for C++ test sources. +ac_ext=cc + +# Object file extension for compiled C++ test sources. +objext=o +objext_CXX=$objext + +# Code to be used in simple compile tests +lt_simple_compile_test_code="int some_variable = 0;\n" + +# Code to be used in simple link tests +lt_simple_link_test_code='int main(int, char *) { return(0); }\n' + +# ltmain only uses $CC for tagged configurations so make sure $CC is set. + +# If no C compiler was specified, use CC. +LTCC=${LTCC-"$CC"} + +# Allow CC to be a program name with arguments. +compiler=$CC + + +# save warnings/boilerplate of simple test code +ac_outfile=conftest.$ac_objext +printf "$lt_simple_compile_test_code" >conftest.$ac_ext +eval "$ac_compile" 2>&1 >/dev/null | $SED '/^$/d' >conftest.err +_lt_compiler_boilerplate=`cat conftest.err` +$rm conftest* + +ac_outfile=conftest.$ac_objext +printf "$lt_simple_link_test_code" >conftest.$ac_ext +eval "$ac_link" 2>&1 >/dev/null | $SED '/^$/d' >conftest.err +_lt_linker_boilerplate=`cat conftest.err` +$rm conftest* + + +# Allow CC to be a program name with arguments. +lt_save_CC=$CC +lt_save_LD=$LD +lt_save_GCC=$GCC +GCC=$GXX +lt_save_with_gnu_ld=$with_gnu_ld +lt_save_path_LD=$lt_cv_path_LD +if test -n "${lt_cv_prog_gnu_ldcxx+set}"; then + lt_cv_prog_gnu_ld=$lt_cv_prog_gnu_ldcxx +else + unset lt_cv_prog_gnu_ld +fi +if test -n "${lt_cv_path_LDCXX+set}"; then + lt_cv_path_LD=$lt_cv_path_LDCXX +else + unset lt_cv_path_LD +fi +test -z "${LDCXX+set}" || LD=$LDCXX +CC=${CXX-"c++"} +compiler=$CC +compiler_CXX=$CC +for cc_temp in $compiler""; do + case $cc_temp in + compile | *[\\/]compile | ccache | *[\\/]ccache ) ;; + distcc | *[\\/]distcc | purify | *[\\/]purify ) ;; + \-*) ;; + *) break;; + esac +done +cc_basename=`$echo "X$cc_temp" | $Xsed -e 's%.*/%%' -e "s%^$host_alias-%%"` + + +# We don't want -fno-exception wen compiling C++ code, so set the +# no_builtin_flag separately +if test "$GXX" = yes; then + lt_prog_compiler_no_builtin_flag_CXX=' -fno-builtin' +else + lt_prog_compiler_no_builtin_flag_CXX= +fi + +if test "$GXX" = yes; then + # Set up default GNU C++ configuration + + +# Check whether --with-gnu-ld or --without-gnu-ld was given. +if test "${with_gnu_ld+set}" = set; then + withval="$with_gnu_ld" + test "$withval" = no || with_gnu_ld=yes +else + with_gnu_ld=no +fi; +ac_prog=ld +if test "$GCC" = yes; then + # Check if gcc -print-prog-name=ld gives a path. + echo "$as_me:$LINENO: checking for ld used by $CC" >&5 +echo $ECHO_N "checking for ld used by $CC... $ECHO_C" >&6 + case $host in + *-*-mingw*) + # gcc leaves a trailing carriage return which upsets mingw + ac_prog=`($CC -print-prog-name=ld) 2>&5 | tr -d '\015'` ;; + *) + ac_prog=`($CC -print-prog-name=ld) 2>&5` ;; + esac + case $ac_prog in + # Accept absolute paths. + [\\/]* | ?:[\\/]*) + re_direlt='/[^/][^/]*/\.\./' + # Canonicalize the pathname of ld + ac_prog=`echo $ac_prog| $SED 's%\\\\%/%g'` + while echo $ac_prog | grep "$re_direlt" > /dev/null 2>&1; do + ac_prog=`echo $ac_prog| $SED "s%$re_direlt%/%"` + done + test -z "$LD" && LD="$ac_prog" + ;; + "") + # If it fails, then pretend we aren't using GCC. + ac_prog=ld + ;; + *) + # If it is relative, then search for the first ld in PATH. + with_gnu_ld=unknown + ;; + esac +elif test "$with_gnu_ld" = yes; then + echo "$as_me:$LINENO: checking for GNU ld" >&5 +echo $ECHO_N "checking for GNU ld... $ECHO_C" >&6 +else + echo "$as_me:$LINENO: checking for non-GNU ld" >&5 +echo $ECHO_N "checking for non-GNU ld... $ECHO_C" >&6 +fi +if test "${lt_cv_path_LD+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + if test -z "$LD"; then + lt_save_ifs="$IFS"; IFS=$PATH_SEPARATOR + for ac_dir in $PATH; do + IFS="$lt_save_ifs" + test -z "$ac_dir" && ac_dir=. + if test -f "$ac_dir/$ac_prog" || test -f "$ac_dir/$ac_prog$ac_exeext"; then + lt_cv_path_LD="$ac_dir/$ac_prog" + # Check to see if the program is GNU ld. I'd rather use --version, + # but apparently some GNU ld's only accept -v. + # Break only if it was the GNU/non-GNU ld that we prefer. + case `"$lt_cv_path_LD" -v 2>&1 &5 +echo "${ECHO_T}$LD" >&6 +else + echo "$as_me:$LINENO: result: no" >&5 +echo "${ECHO_T}no" >&6 +fi +test -z "$LD" && { { echo "$as_me:$LINENO: error: no acceptable ld found in \$PATH" >&5 +echo "$as_me: error: no acceptable ld found in \$PATH" >&2;} + { (exit 1); exit 1; }; } +echo "$as_me:$LINENO: checking if the linker ($LD) is GNU ld" >&5 +echo $ECHO_N "checking if the linker ($LD) is GNU ld... $ECHO_C" >&6 +if test "${lt_cv_prog_gnu_ld+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + # I'd rather use --version here, but apparently some GNU ld's only accept -v. +case `$LD -v 2>&1 &5 +echo "${ECHO_T}$lt_cv_prog_gnu_ld" >&6 +with_gnu_ld=$lt_cv_prog_gnu_ld + + + + # Check if GNU C++ uses GNU ld as the underlying linker, since the + # archiving commands below assume that GNU ld is being used. + if test "$with_gnu_ld" = yes; then + archive_cmds_CXX='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname -o $lib' + archive_expsym_cmds_CXX='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' + + hardcode_libdir_flag_spec_CXX='${wl}--rpath ${wl}$libdir' + export_dynamic_flag_spec_CXX='${wl}--export-dynamic' + + # If archive_cmds runs LD, not CC, wlarc should be empty + # XXX I think wlarc can be eliminated in ltcf-cxx, but I need to + # investigate it a little bit more. (MM) + wlarc='${wl}' + + # ancient GNU ld didn't support --whole-archive et. al. + if eval "`$CC -print-prog-name=ld` --help 2>&1" | \ + grep 'no-whole-archive' > /dev/null; then + whole_archive_flag_spec_CXX="$wlarc"'--whole-archive$convenience '"$wlarc"'--no-whole-archive' + else + whole_archive_flag_spec_CXX= + fi + else + with_gnu_ld=no + wlarc= + + # A generic and very simple default shared library creation + # command for GNU C++ for the case where it uses the native + # linker, instead of GNU ld. If possible, this setting should + # overridden to take advantage of the native linker features on + # the platform it is being used on. + archive_cmds_CXX='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $lib' + fi + + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. + output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | grep "\-L"' + +else + GXX=no + with_gnu_ld=no + wlarc= +fi + +# PORTME: fill in a description of your system's C++ link characteristics +echo "$as_me:$LINENO: checking whether the $compiler linker ($LD) supports shared libraries" >&5 +echo $ECHO_N "checking whether the $compiler linker ($LD) supports shared libraries... $ECHO_C" >&6 +ld_shlibs_CXX=yes +case $host_os in + aix3*) + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + ;; + aix4* | aix5*) + if test "$host_cpu" = ia64; then + # On IA64, the linker does run time linking by default, so we don't + # have to do anything special. + aix_use_runtimelinking=no + exp_sym_flag='-Bexport' + no_entry_flag="" + else + aix_use_runtimelinking=no + + # Test if we are trying to use run time linking or normal + # AIX style linking. If -brtl is somewhere in LDFLAGS, we + # need to do runtime linking. + case $host_os in aix4.[23]|aix4.[23].*|aix5*) + for ld_flag in $LDFLAGS; do + case $ld_flag in + *-brtl*) + aix_use_runtimelinking=yes + break + ;; + esac + done + esac + + exp_sym_flag='-bexport' + no_entry_flag='-bnoentry' + fi + + # When large executables or shared objects are built, AIX ld can + # have problems creating the table of contents. If linking a library + # or program results in "error TOC overflow" add -mminimal-toc to + # CXXFLAGS/CFLAGS for g++/gcc. In the cases where that is not + # enough to fix the problem, add -Wl,-bbigtoc to LDFLAGS. + + archive_cmds_CXX='' + hardcode_direct_CXX=yes + hardcode_libdir_separator_CXX=':' + link_all_deplibs_CXX=yes + + if test "$GXX" = yes; then + case $host_os in aix4.[012]|aix4.[012].*) + # We only want to do this on AIX 4.2 and lower, the check + # below for broken collect2 doesn't work under 4.3+ + collect2name=`${CC} -print-prog-name=collect2` + if test -f "$collect2name" && \ + strings "$collect2name" | grep resolve_lib_name >/dev/null + then + # We have reworked collect2 + hardcode_direct_CXX=yes + else + # We have old collect2 + hardcode_direct_CXX=unsupported + # It fails to find uninstalled libraries when the uninstalled + # path is not listed in the libpath. Setting hardcode_minus_L + # to unsupported forces relinking + hardcode_minus_L_CXX=yes + hardcode_libdir_flag_spec_CXX='-L$libdir' + hardcode_libdir_separator_CXX= + fi + esac + shared_flag='-shared' + if test "$aix_use_runtimelinking" = yes; then + shared_flag="$shared_flag "'${wl}-G' + fi + else + # not using gcc + if test "$host_cpu" = ia64; then + # VisualAge C++, Version 5.5 for AIX 5L for IA-64, Beta 3 Release + # chokes on -Wl,-G. The following line is correct: + shared_flag='-G' + else + if test "$aix_use_runtimelinking" = yes; then + shared_flag='${wl}-G' + else + shared_flag='${wl}-bM:SRE' + fi + fi + fi + + # It seems that -bexpall does not export symbols beginning with + # underscore (_), so it is better to generate a list of symbols to export. + always_export_symbols_CXX=yes + if test "$aix_use_runtimelinking" = yes; then + # Warning - without using the other runtime loading flags (-brtl), + # -berok will link without error, but may produce a broken library. + allow_undefined_flag_CXX='-berok' + # Determine the default libpath from the value encoded in an empty executable. + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext conftest$ac_exeext +if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 + (eval $ac_link) 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && + { ac_try='test -z "$ac_cxx_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest$ac_exeext' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; }; then + +aix_libpath=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e '/Import File Strings/,/^$/ { /^0/ { s/^0 *\(.*\)$/\1/; p; } +}'` +# Check for a 64-bit object if we didn't find anything. +if test -z "$aix_libpath"; then aix_libpath=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e '/Import File Strings/,/^$/ { /^0/ { s/^0 *\(.*\)$/\1/; p; } +}'`; fi +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +fi +rm -f conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi + + hardcode_libdir_flag_spec_CXX='${wl}-blibpath:$libdir:'"$aix_libpath" + + archive_expsym_cmds_CXX="\$CC"' -o $output_objdir/$soname $libobjs $deplibs $compiler_flags `if test "x${allow_undefined_flag}" != "x"; then echo "${wl}${allow_undefined_flag}"; else :; fi` '"\${wl}$no_entry_flag \${wl}$exp_sym_flag:\$export_symbols $shared_flag" + else + if test "$host_cpu" = ia64; then + hardcode_libdir_flag_spec_CXX='${wl}-R $libdir:/usr/lib:/lib' + allow_undefined_flag_CXX="-z nodefs" + archive_expsym_cmds_CXX="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs $compiler_flags ${wl}${allow_undefined_flag} '"\${wl}$no_entry_flag \${wl}$exp_sym_flag:\$export_symbols" + else + # Determine the default libpath from the value encoded in an empty executable. + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext conftest$ac_exeext +if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 + (eval $ac_link) 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && + { ac_try='test -z "$ac_cxx_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest$ac_exeext' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; }; then + +aix_libpath=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e '/Import File Strings/,/^$/ { /^0/ { s/^0 *\(.*\)$/\1/; p; } +}'` +# Check for a 64-bit object if we didn't find anything. +if test -z "$aix_libpath"; then aix_libpath=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e '/Import File Strings/,/^$/ { /^0/ { s/^0 *\(.*\)$/\1/; p; } +}'`; fi +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +fi +rm -f conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi + + hardcode_libdir_flag_spec_CXX='${wl}-blibpath:$libdir:'"$aix_libpath" + # Warning - without using the other run time loading flags, + # -berok will link without error, but may produce a broken library. + no_undefined_flag_CXX=' ${wl}-bernotok' + allow_undefined_flag_CXX=' ${wl}-berok' + # -bexpall does not export symbols beginning with underscore (_) + always_export_symbols_CXX=yes + # Exported symbols can be pulled into shared objects from archives + whole_archive_flag_spec_CXX=' ' + archive_cmds_need_lc_CXX=yes + # This is similar to how AIX traditionally builds it's shared libraries. + archive_expsym_cmds_CXX="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs $compiler_flags ${wl}-bE:$export_symbols ${wl}-bnoentry${allow_undefined_flag}~$AR $AR_FLAGS $output_objdir/$libname$release.a $output_objdir/$soname' + fi + fi + ;; + chorus*) + case $cc_basename in + *) + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + ;; + esac + ;; + + + cygwin* | mingw* | pw32*) + # _LT_AC_TAGVAR(hardcode_libdir_flag_spec, CXX) is actually meaningless, + # as there is no search path for DLLs. + hardcode_libdir_flag_spec_CXX='-L$libdir' + allow_undefined_flag_CXX=unsupported + always_export_symbols_CXX=no + enable_shared_with_static_runtimes_CXX=yes + + if $LD --help 2>&1 | grep 'auto-import' > /dev/null; then + archive_cmds_CXX='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $output_objdir/$soname ${wl}--image-base=0x10000000 ${wl}--out-implib,$lib' + # If the export-symbols file already is a .def file (1st line + # is EXPORTS), use it as is; otherwise, prepend... + archive_expsym_cmds_CXX='if test "x`$SED 1q $export_symbols`" = xEXPORTS; then + cp $export_symbols $output_objdir/$soname.def; + else + echo EXPORTS > $output_objdir/$soname.def; + cat $export_symbols >> $output_objdir/$soname.def; + fi~ + $CC -shared -nostdlib $output_objdir/$soname.def $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $output_objdir/$soname ${wl}--image-base=0x10000000 ${wl}--out-implib,$lib' + else + ld_shlibs_CXX=no + fi + ;; + darwin* | rhapsody*) + case "$host_os" in + rhapsody* | darwin1.[012]) + allow_undefined_flag_CXX='${wl}-undefined ${wl}suppress' + ;; + *) # Darwin 1.3 on + if test -z ${MACOSX_DEPLOYMENT_TARGET} ; then + allow_undefined_flag_CXX='${wl}-flat_namespace ${wl}-undefined ${wl}suppress' + else + case ${MACOSX_DEPLOYMENT_TARGET} in + 10.[012]) + allow_undefined_flag_CXX='${wl}-flat_namespace ${wl}-undefined ${wl}suppress' + ;; + 10.*) + allow_undefined_flag_CXX='${wl}-undefined ${wl}dynamic_lookup' + ;; + esac + fi + ;; + esac + archive_cmds_need_lc_CXX=no + hardcode_direct_CXX=no + hardcode_automatic_CXX=yes + hardcode_shlibpath_var_CXX=unsupported + whole_archive_flag_spec_CXX='' + link_all_deplibs_CXX=yes + + if test "$GXX" = yes ; then + lt_int_apple_cc_single_mod=no + output_verbose_link_cmd='echo' + if $CC -dumpspecs 2>&1 | $EGREP 'single_module' >/dev/null ; then + lt_int_apple_cc_single_mod=yes + fi + if test "X$lt_int_apple_cc_single_mod" = Xyes ; then + archive_cmds_CXX='$CC -dynamiclib -single_module $allow_undefined_flag -o $lib $libobjs $deplibs $compiler_flags -install_name $rpath/$soname $verstring' + else + archive_cmds_CXX='$CC -r -keep_private_externs -nostdlib -o ${lib}-master.o $libobjs~$CC -dynamiclib $allow_undefined_flag -o $lib ${lib}-master.o $deplibs $compiler_flags -install_name $rpath/$soname $verstring' + fi + module_cmds_CXX='$CC $allow_undefined_flag -o $lib -bundle $libobjs $deplibs$compiler_flags' + # Don't fix this by using the ld -exported_symbols_list flag, it doesn't exist in older darwin ld's + if test "X$lt_int_apple_cc_single_mod" = Xyes ; then + archive_expsym_cmds_CXX='sed -e "s,#.*,," -e "s,^[ ]*,," -e "s,^\(..*\),_&," < $export_symbols > $output_objdir/${libname}-symbols.expsym~$CC -dynamiclib -single_module $allow_undefined_flag -o $lib $libobjs $deplibs $compiler_flags -install_name $rpath/$soname $verstring~nmedit -s $output_objdir/${libname}-symbols.expsym ${lib}' + else + archive_expsym_cmds_CXX='sed -e "s,#.*,," -e "s,^[ ]*,," -e "s,^\(..*\),_&," < $export_symbols > $output_objdir/${libname}-symbols.expsym~$CC -r -keep_private_externs -nostdlib -o ${lib}-master.o $libobjs~$CC -dynamiclib $allow_undefined_flag -o $lib ${lib}-master.o $deplibs $compiler_flags -install_name $rpath/$soname $verstring~nmedit -s $output_objdir/${libname}-symbols.expsym ${lib}' + fi + module_expsym_cmds_CXX='sed -e "s,#.*,," -e "s,^[ ]*,," -e "s,^\(..*\),_&," < $export_symbols > $output_objdir/${libname}-symbols.expsym~$CC $allow_undefined_flag -o $lib -bundle $libobjs $deplibs$compiler_flags~nmedit -s $output_objdir/${libname}-symbols.expsym ${lib}' + else + case $cc_basename in + xlc*) + output_verbose_link_cmd='echo' + archive_cmds_CXX='$CC -qmkshrobj ${wl}-single_module $allow_undefined_flag -o $lib $libobjs $deplibs $compiler_flags ${wl}-install_name ${wl}`echo $rpath/$soname` $verstring' + module_cmds_CXX='$CC $allow_undefined_flag -o $lib -bundle $libobjs $deplibs$compiler_flags' + # Don't fix this by using the ld -exported_symbols_list flag, it doesn't exist in older darwin ld's + archive_expsym_cmds_CXX='sed -e "s,#.*,," -e "s,^[ ]*,," -e "s,^\(..*\),_&," < $export_symbols > $output_objdir/${libname}-symbols.expsym~$CC -qmkshrobj ${wl}-single_module $allow_undefined_flag -o $lib $libobjs $deplibs $compiler_flags ${wl}-install_name ${wl}$rpath/$soname $verstring~nmedit -s $output_objdir/${libname}-symbols.expsym ${lib}' + module_expsym_cmds_CXX='sed -e "s,#.*,," -e "s,^[ ]*,," -e "s,^\(..*\),_&," < $export_symbols > $output_objdir/${libname}-symbols.expsym~$CC $allow_undefined_flag -o $lib -bundle $libobjs $deplibs$compiler_flags~nmedit -s $output_objdir/${libname}-symbols.expsym ${lib}' + ;; + *) + ld_shlibs_CXX=no + ;; + esac + fi + ;; + + dgux*) + case $cc_basename in + ec++*) + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + ;; + ghcx*) + # Green Hills C++ Compiler + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + ;; + *) + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + ;; + esac + ;; + freebsd[12]*) + # C++ shared libraries reported to be fairly broken before switch to ELF + ld_shlibs_CXX=no + ;; + freebsd-elf*) + archive_cmds_need_lc_CXX=no + ;; + freebsd* | kfreebsd*-gnu | dragonfly*) + # FreeBSD 3 and later use GNU C++ and GNU ld with standard ELF + # conventions + ld_shlibs_CXX=yes + ;; + gnu*) + ;; + hpux9*) + hardcode_libdir_flag_spec_CXX='${wl}+b ${wl}$libdir' + hardcode_libdir_separator_CXX=: + export_dynamic_flag_spec_CXX='${wl}-E' + hardcode_direct_CXX=yes + hardcode_minus_L_CXX=yes # Not in the search PATH, + # but as the default + # location of the library. + + case $cc_basename in + CC*) + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + ;; + aCC*) + archive_cmds_CXX='$rm $output_objdir/$soname~$CC -b ${wl}+b ${wl}$install_libdir -o $output_objdir/$soname $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. + # + # There doesn't appear to be a way to prevent this compiler from + # explicitly linking system object files so we need to strip them + # from the output so that they don't get included in the library + # dependencies. + output_verbose_link_cmd='templist=`($CC -b $CFLAGS -v conftest.$objext 2>&1) | grep "[-]L"`; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; echo $list' + ;; + *) + if test "$GXX" = yes; then + archive_cmds_CXX='$rm $output_objdir/$soname~$CC -shared -nostdlib -fPIC ${wl}+b ${wl}$install_libdir -o $output_objdir/$soname $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' + else + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + fi + ;; + esac + ;; + hpux10*|hpux11*) + if test $with_gnu_ld = no; then + case "$host_cpu" in + hppa*64*) + hardcode_libdir_flag_spec_CXX='${wl}+b ${wl}$libdir' + hardcode_libdir_flag_spec_ld_CXX='+b $libdir' + hardcode_libdir_separator_CXX=: + ;; + ia64*) + hardcode_libdir_flag_spec_CXX='-L$libdir' + ;; + *) + hardcode_libdir_flag_spec_CXX='${wl}+b ${wl}$libdir' + hardcode_libdir_separator_CXX=: + export_dynamic_flag_spec_CXX='${wl}-E' + ;; + esac + fi + case "$host_cpu" in + hppa*64*) + hardcode_direct_CXX=no + hardcode_shlibpath_var_CXX=no + ;; + ia64*) + hardcode_direct_CXX=no + hardcode_shlibpath_var_CXX=no + hardcode_minus_L_CXX=yes # Not in the search PATH, + # but as the default + # location of the library. + ;; + *) + hardcode_direct_CXX=yes + hardcode_minus_L_CXX=yes # Not in the search PATH, + # but as the default + # location of the library. + ;; + esac + + case $cc_basename in + CC*) + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + ;; + aCC*) + case "$host_cpu" in + hppa*64*|ia64*) + archive_cmds_CXX='$LD -b +h $soname -o $lib $linker_flags $libobjs $deplibs' + ;; + *) + archive_cmds_CXX='$CC -b ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' + ;; + esac + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. + # + # There doesn't appear to be a way to prevent this compiler from + # explicitly linking system object files so we need to strip them + # from the output so that they don't get included in the library + # dependencies. + output_verbose_link_cmd='templist=`($CC -b $CFLAGS -v conftest.$objext 2>&1) | grep "\-L"`; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; echo $list' + ;; + *) + if test "$GXX" = yes; then + if test $with_gnu_ld = no; then + case "$host_cpu" in + ia64*|hppa*64*) + archive_cmds_CXX='$LD -b +h $soname -o $lib $linker_flags $libobjs $deplibs' + ;; + *) + archive_cmds_CXX='$CC -shared -nostdlib -fPIC ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' + ;; + esac + fi + else + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + fi + ;; + esac + ;; + irix5* | irix6*) + case $cc_basename in + CC*) + # SGI C++ + archive_cmds_CXX='$CC -shared -all -multigot $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -soname $soname `test -n "$verstring" && echo -set_version $verstring` -update_registry ${output_objdir}/so_locations -o $lib' + + # Archives containing C++ object files must be created using + # "CC -ar", where "CC" is the IRIX C++ compiler. This is + # necessary to make sure instantiated templates are included + # in the archive. + old_archive_cmds_CXX='$CC -ar -WR,-u -o $oldlib $oldobjs' + ;; + *) + if test "$GXX" = yes; then + if test "$with_gnu_ld" = no; then + archive_cmds_CXX='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && echo ${wl}-set_version ${wl}$verstring` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' + else + archive_cmds_CXX='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && echo ${wl}-set_version ${wl}$verstring` -o $lib' + fi + fi + link_all_deplibs_CXX=yes + ;; + esac + hardcode_libdir_flag_spec_CXX='${wl}-rpath ${wl}$libdir' + hardcode_libdir_separator_CXX=: + ;; + linux*) + case $cc_basename in + KCC*) + # Kuck and Associates, Inc. (KAI) C++ Compiler + + # KCC will only create a shared library if the output file + # ends with ".so" (or ".sl" for HP-UX), so rename the library + # to its proper name (with version) after linking. + archive_cmds_CXX='tempext=`echo $shared_ext | $SED -e '\''s/\([^()0-9A-Za-z{}]\)/\\\\\1/g'\''`; templib=`echo $lib | $SED -e "s/\${tempext}\..*/.so/"`; $CC $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags --soname $soname -o \$templib; mv \$templib $lib' + archive_expsym_cmds_CXX='tempext=`echo $shared_ext | $SED -e '\''s/\([^()0-9A-Za-z{}]\)/\\\\\1/g'\''`; templib=`echo $lib | $SED -e "s/\${tempext}\..*/.so/"`; $CC $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags --soname $soname -o \$templib ${wl}-retain-symbols-file,$export_symbols; mv \$templib $lib' + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. + # + # There doesn't appear to be a way to prevent this compiler from + # explicitly linking system object files so we need to strip them + # from the output so that they don't get included in the library + # dependencies. + output_verbose_link_cmd='templist=`$CC $CFLAGS -v conftest.$objext -o libconftest$shared_ext 2>&1 | grep "ld"`; rm -f libconftest$shared_ext; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; echo $list' + + hardcode_libdir_flag_spec_CXX='${wl}--rpath,$libdir' + export_dynamic_flag_spec_CXX='${wl}--export-dynamic' + + # Archives containing C++ object files must be created using + # "CC -Bstatic", where "CC" is the KAI C++ compiler. + old_archive_cmds_CXX='$CC -Bstatic -o $oldlib $oldobjs' + ;; + icpc*) + # Intel C++ + with_gnu_ld=yes + # version 8.0 and above of icpc choke on multiply defined symbols + # if we add $predep_objects and $postdep_objects, however 7.1 and + # earlier do not add the objects themselves. + case `$CC -V 2>&1` in + *"Version 7."*) + archive_cmds_CXX='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname -o $lib' + archive_expsym_cmds_CXX='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' + ;; + *) # Version 8.0 or newer + tmp_idyn= + case $host_cpu in + ia64*) tmp_idyn=' -i_dynamic';; + esac + archive_cmds_CXX='$CC -shared'"$tmp_idyn"' $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + archive_expsym_cmds_CXX='$CC -shared'"$tmp_idyn"' $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' + ;; + esac + archive_cmds_need_lc_CXX=no + hardcode_libdir_flag_spec_CXX='${wl}-rpath,$libdir' + export_dynamic_flag_spec_CXX='${wl}--export-dynamic' + whole_archive_flag_spec_CXX='${wl}--whole-archive$convenience ${wl}--no-whole-archive' + ;; + pgCC*) + # Portland Group C++ compiler + archive_cmds_CXX='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname -o $lib' + archive_expsym_cmds_CXX='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname ${wl}-retain-symbols-file ${wl}$export_symbols -o $lib' + + hardcode_libdir_flag_spec_CXX='${wl}--rpath ${wl}$libdir' + export_dynamic_flag_spec_CXX='${wl}--export-dynamic' + whole_archive_flag_spec_CXX='' + ;; + cxx*) + # Compaq C++ + archive_cmds_CXX='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname -o $lib' + archive_expsym_cmds_CXX='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname -o $lib ${wl}-retain-symbols-file $wl$export_symbols' + + runpath_var=LD_RUN_PATH + hardcode_libdir_flag_spec_CXX='-rpath $libdir' + hardcode_libdir_separator_CXX=: + + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. + # + # There doesn't appear to be a way to prevent this compiler from + # explicitly linking system object files so we need to strip them + # from the output so that they don't get included in the library + # dependencies. + output_verbose_link_cmd='templist=`$CC -shared $CFLAGS -v conftest.$objext 2>&1 | grep "ld"`; templist=`echo $templist | $SED "s/\(^.*ld.*\)\( .*ld .*$\)/\1/"`; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; echo $list' + ;; + esac + ;; + lynxos*) + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + ;; + m88k*) + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + ;; + mvs*) + case $cc_basename in + cxx*) + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + ;; + *) + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + ;; + esac + ;; + netbsd*) + if echo __ELF__ | $CC -E - | grep __ELF__ >/dev/null; then + archive_cmds_CXX='$LD -Bshareable -o $lib $predep_objects $libobjs $deplibs $postdep_objects $linker_flags' + wlarc= + hardcode_libdir_flag_spec_CXX='-R$libdir' + hardcode_direct_CXX=yes + hardcode_shlibpath_var_CXX=no + fi + # Workaround some broken pre-1.5 toolchains + output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | grep conftest.$objext | $SED -e "s:-lgcc -lc -lgcc::"' + ;; + openbsd2*) + # C++ shared libraries are fairly broken + ld_shlibs_CXX=no + ;; + openbsd*) + hardcode_direct_CXX=yes + hardcode_shlibpath_var_CXX=no + archive_cmds_CXX='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $lib' + hardcode_libdir_flag_spec_CXX='${wl}-rpath,$libdir' + if test -z "`echo __ELF__ | $CC -E - | grep __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then + archive_expsym_cmds_CXX='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-retain-symbols-file,$export_symbols -o $lib' + export_dynamic_flag_spec_CXX='${wl}-E' + whole_archive_flag_spec_CXX="$wlarc"'--whole-archive$convenience '"$wlarc"'--no-whole-archive' + fi + output_verbose_link_cmd='echo' + ;; + osf3*) + case $cc_basename in + KCC*) + # Kuck and Associates, Inc. (KAI) C++ Compiler + + # KCC will only create a shared library if the output file + # ends with ".so" (or ".sl" for HP-UX), so rename the library + # to its proper name (with version) after linking. + archive_cmds_CXX='tempext=`echo $shared_ext | $SED -e '\''s/\([^()0-9A-Za-z{}]\)/\\\\\1/g'\''`; templib=`echo $lib | $SED -e "s/\${tempext}\..*/.so/"`; $CC $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags --soname $soname -o \$templib; mv \$templib $lib' + + hardcode_libdir_flag_spec_CXX='${wl}-rpath,$libdir' + hardcode_libdir_separator_CXX=: + + # Archives containing C++ object files must be created using + # "CC -Bstatic", where "CC" is the KAI C++ compiler. + old_archive_cmds_CXX='$CC -Bstatic -o $oldlib $oldobjs' + + ;; + RCC*) + # Rational C++ 2.4.1 + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + ;; + cxx*) + allow_undefined_flag_CXX=' ${wl}-expect_unresolved ${wl}\*' + archive_cmds_CXX='$CC -shared${allow_undefined_flag} $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $soname `test -n "$verstring" && echo ${wl}-set_version $verstring` -update_registry ${output_objdir}/so_locations -o $lib' + + hardcode_libdir_flag_spec_CXX='${wl}-rpath ${wl}$libdir' + hardcode_libdir_separator_CXX=: + + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. + # + # There doesn't appear to be a way to prevent this compiler from + # explicitly linking system object files so we need to strip them + # from the output so that they don't get included in the library + # dependencies. + output_verbose_link_cmd='templist=`$CC -shared $CFLAGS -v conftest.$objext 2>&1 | grep "ld" | grep -v "ld:"`; templist=`echo $templist | $SED "s/\(^.*ld.*\)\( .*ld.*$\)/\1/"`; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; echo $list' + ;; + *) + if test "$GXX" = yes && test "$with_gnu_ld" = no; then + allow_undefined_flag_CXX=' ${wl}-expect_unresolved ${wl}\*' + archive_cmds_CXX='$CC -shared -nostdlib ${allow_undefined_flag} $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && echo ${wl}-set_version ${wl}$verstring` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' + + hardcode_libdir_flag_spec_CXX='${wl}-rpath ${wl}$libdir' + hardcode_libdir_separator_CXX=: + + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. + output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | grep "\-L"' + + else + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + fi + ;; + esac + ;; + osf4* | osf5*) + case $cc_basename in + KCC*) + # Kuck and Associates, Inc. (KAI) C++ Compiler + + # KCC will only create a shared library if the output file + # ends with ".so" (or ".sl" for HP-UX), so rename the library + # to its proper name (with version) after linking. + archive_cmds_CXX='tempext=`echo $shared_ext | $SED -e '\''s/\([^()0-9A-Za-z{}]\)/\\\\\1/g'\''`; templib=`echo $lib | $SED -e "s/\${tempext}\..*/.so/"`; $CC $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags --soname $soname -o \$templib; mv \$templib $lib' + + hardcode_libdir_flag_spec_CXX='${wl}-rpath,$libdir' + hardcode_libdir_separator_CXX=: + + # Archives containing C++ object files must be created using + # the KAI C++ compiler. + old_archive_cmds_CXX='$CC -o $oldlib $oldobjs' + ;; + RCC*) + # Rational C++ 2.4.1 + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + ;; + cxx*) + allow_undefined_flag_CXX=' -expect_unresolved \*' + archive_cmds_CXX='$CC -shared${allow_undefined_flag} $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -msym -soname $soname `test -n "$verstring" && echo -set_version $verstring` -update_registry ${output_objdir}/so_locations -o $lib' + archive_expsym_cmds_CXX='for i in `cat $export_symbols`; do printf "%s %s\\n" -exported_symbol "\$i" >> $lib.exp; done~ + echo "-hidden">> $lib.exp~ + $CC -shared$allow_undefined_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -msym -soname $soname -Wl,-input -Wl,$lib.exp `test -n "$verstring" && echo -set_version $verstring` -update_registry ${output_objdir}/so_locations -o $lib~ + $rm $lib.exp' + + hardcode_libdir_flag_spec_CXX='-rpath $libdir' + hardcode_libdir_separator_CXX=: + + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. + # + # There doesn't appear to be a way to prevent this compiler from + # explicitly linking system object files so we need to strip them + # from the output so that they don't get included in the library + # dependencies. + output_verbose_link_cmd='templist=`$CC -shared $CFLAGS -v conftest.$objext 2>&1 | grep "ld" | grep -v "ld:"`; templist=`echo $templist | $SED "s/\(^.*ld.*\)\( .*ld.*$\)/\1/"`; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; echo $list' + ;; + *) + if test "$GXX" = yes && test "$with_gnu_ld" = no; then + allow_undefined_flag_CXX=' ${wl}-expect_unresolved ${wl}\*' + archive_cmds_CXX='$CC -shared -nostdlib ${allow_undefined_flag} $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-msym ${wl}-soname ${wl}$soname `test -n "$verstring" && echo ${wl}-set_version ${wl}$verstring` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' + + hardcode_libdir_flag_spec_CXX='${wl}-rpath ${wl}$libdir' + hardcode_libdir_separator_CXX=: + + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. + output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | grep "\-L"' + + else + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + fi + ;; + esac + ;; + psos*) + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + ;; + sco*) + archive_cmds_need_lc_CXX=no + case $cc_basename in + CC*) + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + ;; + *) + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + ;; + esac + ;; + sunos4*) + case $cc_basename in + CC*) + # Sun C++ 4.x + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + ;; + lcc*) + # Lucid + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + ;; + *) + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + ;; + esac + ;; + solaris*) + case $cc_basename in + CC*) + # Sun C++ 4.2, 5.x and Centerline C++ + no_undefined_flag_CXX=' -zdefs' + archive_cmds_CXX='$CC -G${allow_undefined_flag} -nolib -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' + archive_expsym_cmds_CXX='$echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~$echo "local: *; };" >> $lib.exp~ + $CC -G${allow_undefined_flag} -nolib ${wl}-M ${wl}$lib.exp -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~$rm $lib.exp' + + hardcode_libdir_flag_spec_CXX='-R$libdir' + hardcode_shlibpath_var_CXX=no + case $host_os in + solaris2.[0-5] | solaris2.[0-5].*) ;; + *) + # The C++ compiler is used as linker so we must use $wl + # flag to pass the commands to the underlying system + # linker. We must also pass each convience library through + # to the system linker between allextract/defaultextract. + # The C++ compiler will combine linker options so we + # cannot just pass the convience library names through + # without $wl. + # Supported since Solaris 2.6 (maybe 2.5.1?) + whole_archive_flag_spec_CXX='${wl}-z ${wl}allextract`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; $echo \"$new_convenience\"` ${wl}-z ${wl}defaultextract' + ;; + esac + link_all_deplibs_CXX=yes + + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. + # + # There doesn't appear to be a way to prevent this compiler from + # explicitly linking system object files so we need to strip them + # from the output so that they don't get included in the library + # dependencies. + output_verbose_link_cmd='templist=`$CC -G $CFLAGS -v conftest.$objext 2>&1 | grep "\-[LR]"`; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; echo $list' + + # Archives containing C++ object files must be created using + # "CC -xar", where "CC" is the Sun C++ compiler. This is + # necessary to make sure instantiated templates are included + # in the archive. + old_archive_cmds_CXX='$CC -xar -o $oldlib $oldobjs' + ;; + gcx*) + # Green Hills C++ Compiler + archive_cmds_CXX='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-h $wl$soname -o $lib' + + # The C++ compiler must be used to create the archive. + old_archive_cmds_CXX='$CC $LDFLAGS -archive -o $oldlib $oldobjs' + ;; + *) + # GNU C++ compiler with Solaris linker + if test "$GXX" = yes && test "$with_gnu_ld" = no; then + no_undefined_flag_CXX=' ${wl}-z ${wl}defs' + if $CC --version | grep -v '^2\.7' > /dev/null; then + archive_cmds_CXX='$CC -shared -nostdlib $LDFLAGS $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-h $wl$soname -o $lib' + archive_expsym_cmds_CXX='$echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~$echo "local: *; };" >> $lib.exp~ + $CC -shared -nostdlib ${wl}-M $wl$lib.exp -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~$rm $lib.exp' + + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. + output_verbose_link_cmd="$CC -shared $CFLAGS -v conftest.$objext 2>&1 | grep \"\-L\"" + else + # g++ 2.7 appears to require `-G' NOT `-shared' on this + # platform. + archive_cmds_CXX='$CC -G -nostdlib $LDFLAGS $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-h $wl$soname -o $lib' + archive_expsym_cmds_CXX='$echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~$echo "local: *; };" >> $lib.exp~ + $CC -G -nostdlib ${wl}-M $wl$lib.exp -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~$rm $lib.exp' + + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. + output_verbose_link_cmd="$CC -G $CFLAGS -v conftest.$objext 2>&1 | grep \"\-L\"" + fi + + hardcode_libdir_flag_spec_CXX='${wl}-R $wl$libdir' + fi + ;; + esac + ;; + sysv5OpenUNIX8* | sysv5UnixWare7* | sysv5uw[78]* | unixware7*) + archive_cmds_need_lc_CXX=no + ;; + tandem*) + case $cc_basename in + NCC*) + # NonStop-UX NCC 3.20 + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + ;; + *) + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + ;; + esac + ;; + vxworks*) + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + ;; + *) + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + ;; +esac +echo "$as_me:$LINENO: result: $ld_shlibs_CXX" >&5 +echo "${ECHO_T}$ld_shlibs_CXX" >&6 +test "$ld_shlibs_CXX" = no && can_build_shared=no + +GCC_CXX="$GXX" +LD_CXX="$LD" + + +cat > conftest.$ac_ext <&5 + (eval $ac_compile) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; then + # Parse the compiler output and extract the necessary + # objects, libraries and library flags. + + # Sentinel used to keep track of whether or not we are before + # the conftest object file. + pre_test_object_deps_done=no + + # The `*' in the case matches for architectures that use `case' in + # $output_verbose_cmd can trigger glob expansion during the loop + # eval without this substitution. + output_verbose_link_cmd="`$echo \"X$output_verbose_link_cmd\" | $Xsed -e \"$no_glob_subst\"`" + + for p in `eval $output_verbose_link_cmd`; do + case $p in + + -L* | -R* | -l*) + # Some compilers place space between "-{L,R}" and the path. + # Remove the space. + if test $p = "-L" \ + || test $p = "-R"; then + prev=$p + continue + else + prev= + fi + + if test "$pre_test_object_deps_done" = no; then + case $p in + -L* | -R*) + # Internal compiler library paths should come after those + # provided the user. The postdeps already come after the + # user supplied libs so there is no need to process them. + if test -z "$compiler_lib_search_path_CXX"; then + compiler_lib_search_path_CXX="${prev}${p}" + else + compiler_lib_search_path_CXX="${compiler_lib_search_path_CXX} ${prev}${p}" + fi + ;; + # The "-l" case would never come before the object being + # linked, so don't bother handling this case. + esac + else + if test -z "$postdeps_CXX"; then + postdeps_CXX="${prev}${p}" + else + postdeps_CXX="${postdeps_CXX} ${prev}${p}" + fi + fi + ;; + + *.$objext) + # This assumes that the test object file only shows up + # once in the compiler output. + if test "$p" = "conftest.$objext"; then + pre_test_object_deps_done=yes + continue + fi + + if test "$pre_test_object_deps_done" = no; then + if test -z "$predep_objects_CXX"; then + predep_objects_CXX="$p" + else + predep_objects_CXX="$predep_objects_CXX $p" + fi + else + if test -z "$postdep_objects_CXX"; then + postdep_objects_CXX="$p" + else + postdep_objects_CXX="$postdep_objects_CXX $p" + fi + fi + ;; + + *) ;; # Ignore the rest. + + esac + done + + # Clean up. + rm -f a.out a.exe +else + echo "libtool.m4: error: problem compiling CXX test program" +fi + +$rm -f confest.$objext + +case " $postdeps_CXX " in +*" -lc "*) archive_cmds_need_lc_CXX=no ;; +esac + +lt_prog_compiler_wl_CXX= +lt_prog_compiler_pic_CXX= +lt_prog_compiler_static_CXX= + +echo "$as_me:$LINENO: checking for $compiler option to produce PIC" >&5 +echo $ECHO_N "checking for $compiler option to produce PIC... $ECHO_C" >&6 + + # C++ specific cases for pic, static, wl, etc. + if test "$GXX" = yes; then + lt_prog_compiler_wl_CXX='-Wl,' + lt_prog_compiler_static_CXX='-static' + + case $host_os in + aix*) + # All AIX code is PIC. + if test "$host_cpu" = ia64; then + # AIX 5 now supports IA64 processor + lt_prog_compiler_static_CXX='-Bstatic' + fi + ;; + amigaos*) + # FIXME: we need at least 68020 code to build shared libraries, but + # adding the `-m68020' flag to GCC prevents building anything better, + # like `-m68040'. + lt_prog_compiler_pic_CXX='-m68020 -resident32 -malways-restore-a4' + ;; + beos* | cygwin* | irix5* | irix6* | nonstopux* | osf3* | osf4* | osf5*) + # PIC is the default for these OSes. + ;; + mingw* | os2* | pw32*) + # This hack is so that the source file can tell whether it is being + # built for inclusion in a dll (and should export symbols for example). + lt_prog_compiler_pic_CXX='-DDLL_EXPORT' + ;; + darwin* | rhapsody*) + # PIC is the default on this platform + # Common symbols not allowed in MH_DYLIB files + lt_prog_compiler_pic_CXX='-fno-common' + ;; + *djgpp*) + # DJGPP does not support shared libraries at all + lt_prog_compiler_pic_CXX= + ;; + sysv4*MP*) + if test -d /usr/nec; then + lt_prog_compiler_pic_CXX=-Kconform_pic + fi + ;; + hpux*) + # PIC is the default for IA64 HP-UX and 64-bit HP-UX, but + # not for PA HP-UX. + case "$host_cpu" in + hppa*64*|ia64*) + ;; + *) + lt_prog_compiler_pic_CXX='-fPIC' + ;; + esac + ;; + *) + lt_prog_compiler_pic_CXX='-fPIC' + ;; + esac + else + case $host_os in + aix4* | aix5*) + # All AIX code is PIC. + if test "$host_cpu" = ia64; then + # AIX 5 now supports IA64 processor + lt_prog_compiler_static_CXX='-Bstatic' + else + lt_prog_compiler_static_CXX='-bnso -bI:/lib/syscalls.exp' + fi + ;; + chorus*) + case $cc_basename in + cxch68*) + # Green Hills C++ Compiler + # _LT_AC_TAGVAR(lt_prog_compiler_static, CXX)="--no_auto_instantiation -u __main -u __premain -u _abort -r $COOL_DIR/lib/libOrb.a $MVME_DIR/lib/CC/libC.a $MVME_DIR/lib/classix/libcx.s.a" + ;; + esac + ;; + darwin*) + # PIC is the default on this platform + # Common symbols not allowed in MH_DYLIB files + case $cc_basename in + xlc*) + lt_prog_compiler_pic_CXX='-qnocommon' + lt_prog_compiler_wl_CXX='-Wl,' + ;; + esac + ;; + dgux*) + case $cc_basename in + ec++*) + lt_prog_compiler_pic_CXX='-KPIC' + ;; + ghcx*) + # Green Hills C++ Compiler + lt_prog_compiler_pic_CXX='-pic' + ;; + *) + ;; + esac + ;; + freebsd* | kfreebsd*-gnu | dragonfly*) + # FreeBSD uses GNU C++ + ;; + hpux9* | hpux10* | hpux11*) + case $cc_basename in + CC*) + lt_prog_compiler_wl_CXX='-Wl,' + lt_prog_compiler_static_CXX="${ac_cv_prog_cc_wl}-a ${ac_cv_prog_cc_wl}archive" + if test "$host_cpu" != ia64; then + lt_prog_compiler_pic_CXX='+Z' + fi + ;; + aCC*) + lt_prog_compiler_wl_CXX='-Wl,' + lt_prog_compiler_static_CXX="${ac_cv_prog_cc_wl}-a ${ac_cv_prog_cc_wl}archive" + case "$host_cpu" in + hppa*64*|ia64*) + # +Z the default + ;; + *) + lt_prog_compiler_pic_CXX='+Z' + ;; + esac + ;; + *) + ;; + esac + ;; + irix5* | irix6* | nonstopux*) + case $cc_basename in + CC*) + lt_prog_compiler_wl_CXX='-Wl,' + lt_prog_compiler_static_CXX='-non_shared' + # CC pic flag -KPIC is the default. + ;; + *) + ;; + esac + ;; + linux*) + case $cc_basename in + KCC*) + # KAI C++ Compiler + lt_prog_compiler_wl_CXX='--backend -Wl,' + lt_prog_compiler_pic_CXX='-fPIC' + ;; + icpc* | ecpc*) + # Intel C++ + lt_prog_compiler_wl_CXX='-Wl,' + lt_prog_compiler_pic_CXX='-KPIC' + lt_prog_compiler_static_CXX='-static' + ;; + pgCC*) + # Portland Group C++ compiler. + lt_prog_compiler_wl_CXX='-Wl,' + lt_prog_compiler_pic_CXX='-fpic' + lt_prog_compiler_static_CXX='-static' + ;; + cxx*) + # Compaq C++ + # Make sure the PIC flag is empty. It appears that all Alpha + # Linux and Compaq Tru64 Unix objects are PIC. + lt_prog_compiler_pic_CXX= + lt_prog_compiler_static_CXX='-non_shared' + ;; + *) + ;; + esac + ;; + lynxos*) + ;; + m88k*) + ;; + mvs*) + case $cc_basename in + cxx*) + lt_prog_compiler_pic_CXX='-W c,exportall' + ;; + *) + ;; + esac + ;; + netbsd*) + ;; + osf3* | osf4* | osf5*) + case $cc_basename in + KCC*) + lt_prog_compiler_wl_CXX='--backend -Wl,' + ;; + RCC*) + # Rational C++ 2.4.1 + lt_prog_compiler_pic_CXX='-pic' + ;; + cxx*) + # Digital/Compaq C++ + lt_prog_compiler_wl_CXX='-Wl,' + # Make sure the PIC flag is empty. It appears that all Alpha + # Linux and Compaq Tru64 Unix objects are PIC. + lt_prog_compiler_pic_CXX= + lt_prog_compiler_static_CXX='-non_shared' + ;; + *) + ;; + esac + ;; + psos*) + ;; + sco*) + case $cc_basename in + CC*) + lt_prog_compiler_pic_CXX='-fPIC' + ;; + *) + ;; + esac + ;; + solaris*) + case $cc_basename in + CC*) + # Sun C++ 4.2, 5.x and Centerline C++ + lt_prog_compiler_pic_CXX='-KPIC' + lt_prog_compiler_static_CXX='-Bstatic' + lt_prog_compiler_wl_CXX='-Qoption ld ' + ;; + gcx*) + # Green Hills C++ Compiler + lt_prog_compiler_pic_CXX='-PIC' + ;; + *) + ;; + esac + ;; + sunos4*) + case $cc_basename in + CC*) + # Sun C++ 4.x + lt_prog_compiler_pic_CXX='-pic' + lt_prog_compiler_static_CXX='-Bstatic' + ;; + lcc*) + # Lucid + lt_prog_compiler_pic_CXX='-pic' + ;; + *) + ;; + esac + ;; + tandem*) + case $cc_basename in + NCC*) + # NonStop-UX NCC 3.20 + lt_prog_compiler_pic_CXX='-KPIC' + ;; + *) + ;; + esac + ;; + unixware*) + ;; + vxworks*) + ;; + *) + lt_prog_compiler_can_build_shared_CXX=no + ;; + esac + fi + +echo "$as_me:$LINENO: result: $lt_prog_compiler_pic_CXX" >&5 +echo "${ECHO_T}$lt_prog_compiler_pic_CXX" >&6 + +# +# Check to make sure the PIC flag actually works. +# +if test -n "$lt_prog_compiler_pic_CXX"; then + +echo "$as_me:$LINENO: checking if $compiler PIC flag $lt_prog_compiler_pic_CXX works" >&5 +echo $ECHO_N "checking if $compiler PIC flag $lt_prog_compiler_pic_CXX works... $ECHO_C" >&6 +if test "${lt_prog_compiler_pic_works_CXX+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + lt_prog_compiler_pic_works_CXX=no + ac_outfile=conftest.$ac_objext + printf "$lt_simple_compile_test_code" > conftest.$ac_ext + lt_compiler_flag="$lt_prog_compiler_pic_CXX -DPIC" + # Insert the option either (1) after the last *FLAGS variable, or + # (2) before a word containing "conftest.", or (3) at the end. + # Note that $ac_compile itself does not contain backslashes and begins + # with a dollar sign (not a hyphen), so the echo should work correctly. + # The option is referenced via a variable to avoid confusing sed. + lt_compile=`echo "$ac_compile" | $SED \ + -e 's:.*FLAGS}? :&$lt_compiler_flag :; t' \ + -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ + -e 's:$: $lt_compiler_flag:'` + (eval echo "\"\$as_me:11992: $lt_compile\"" >&5) + (eval "$lt_compile" 2>conftest.err) + ac_status=$? + cat conftest.err >&5 + echo "$as_me:11996: \$? = $ac_status" >&5 + if (exit $ac_status) && test -s "$ac_outfile"; then + # The compiler can only warn and ignore the option if not recognized + # So say no if there are warnings other than the usual output. + $echo "X$_lt_compiler_boilerplate" | $Xsed >conftest.exp + $SED '/^$/d' conftest.err >conftest.er2 + if test ! -s conftest.err || diff conftest.exp conftest.er2 >/dev/null; then + lt_prog_compiler_pic_works_CXX=yes + fi + fi + $rm conftest* + +fi +echo "$as_me:$LINENO: result: $lt_prog_compiler_pic_works_CXX" >&5 +echo "${ECHO_T}$lt_prog_compiler_pic_works_CXX" >&6 + +if test x"$lt_prog_compiler_pic_works_CXX" = xyes; then + case $lt_prog_compiler_pic_CXX in + "" | " "*) ;; + *) lt_prog_compiler_pic_CXX=" $lt_prog_compiler_pic_CXX" ;; + esac +else + lt_prog_compiler_pic_CXX= + lt_prog_compiler_can_build_shared_CXX=no +fi + +fi +case "$host_os" in + # For platforms which do not support PIC, -DPIC is meaningless: + *djgpp*) + lt_prog_compiler_pic_CXX= + ;; + *) + lt_prog_compiler_pic_CXX="$lt_prog_compiler_pic_CXX -DPIC" + ;; +esac + +echo "$as_me:$LINENO: checking if $compiler supports -c -o file.$ac_objext" >&5 +echo $ECHO_N "checking if $compiler supports -c -o file.$ac_objext... $ECHO_C" >&6 +if test "${lt_cv_prog_compiler_c_o_CXX+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + lt_cv_prog_compiler_c_o_CXX=no + $rm -r conftest 2>/dev/null + mkdir conftest + cd conftest + mkdir out + printf "$lt_simple_compile_test_code" > conftest.$ac_ext + + lt_compiler_flag="-o out/conftest2.$ac_objext" + # Insert the option either (1) after the last *FLAGS variable, or + # (2) before a word containing "conftest.", or (3) at the end. + # Note that $ac_compile itself does not contain backslashes and begins + # with a dollar sign (not a hyphen), so the echo should work correctly. + lt_compile=`echo "$ac_compile" | $SED \ + -e 's:.*FLAGS}? :&$lt_compiler_flag :; t' \ + -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ + -e 's:$: $lt_compiler_flag:'` + (eval echo "\"\$as_me:12054: $lt_compile\"" >&5) + (eval "$lt_compile" 2>out/conftest.err) + ac_status=$? + cat out/conftest.err >&5 + echo "$as_me:12058: \$? = $ac_status" >&5 + if (exit $ac_status) && test -s out/conftest2.$ac_objext + then + # The compiler can only warn and ignore the option if not recognized + # So say no if there are warnings + $echo "X$_lt_compiler_boilerplate" | $Xsed > out/conftest.exp + $SED '/^$/d' out/conftest.err >out/conftest.er2 + if test ! -s out/conftest.err || diff out/conftest.exp out/conftest.er2 >/dev/null; then + lt_cv_prog_compiler_c_o_CXX=yes + fi + fi + chmod u+w . + $rm conftest* + # SGI C++ compiler will create directory out/ii_files/ for + # template instantiation + test -d out/ii_files && $rm out/ii_files/* && rmdir out/ii_files + $rm out/* && rmdir out + cd .. + rmdir conftest + $rm conftest* + +fi +echo "$as_me:$LINENO: result: $lt_cv_prog_compiler_c_o_CXX" >&5 +echo "${ECHO_T}$lt_cv_prog_compiler_c_o_CXX" >&6 + + +hard_links="nottested" +if test "$lt_cv_prog_compiler_c_o_CXX" = no && test "$need_locks" != no; then + # do not overwrite the value of need_locks provided by the user + echo "$as_me:$LINENO: checking if we can lock with hard links" >&5 +echo $ECHO_N "checking if we can lock with hard links... $ECHO_C" >&6 + hard_links=yes + $rm conftest* + ln conftest.a conftest.b 2>/dev/null && hard_links=no + touch conftest.a + ln conftest.a conftest.b 2>&5 || hard_links=no + ln conftest.a conftest.b 2>/dev/null && hard_links=no + echo "$as_me:$LINENO: result: $hard_links" >&5 +echo "${ECHO_T}$hard_links" >&6 + if test "$hard_links" = no; then + { echo "$as_me:$LINENO: WARNING: \`$CC' does not support \`-c -o', so \`make -j' may be unsafe" >&5 +echo "$as_me: WARNING: \`$CC' does not support \`-c -o', so \`make -j' may be unsafe" >&2;} + need_locks=warn + fi +else + need_locks=no +fi + +echo "$as_me:$LINENO: checking whether the $compiler linker ($LD) supports shared libraries" >&5 +echo $ECHO_N "checking whether the $compiler linker ($LD) supports shared libraries... $ECHO_C" >&6 + + export_symbols_cmds_CXX='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols' + case $host_os in + aix4* | aix5*) + # If we're using GNU nm, then we don't want the "-C" option. + # -C means demangle to AIX nm, but means don't demangle with GNU nm + if $NM -V 2>&1 | grep 'GNU' > /dev/null; then + export_symbols_cmds_CXX='$NM -Bpg $libobjs $convenience | awk '\''{ if (((\$2 == "T") || (\$2 == "D") || (\$2 == "B")) && (substr(\$3,1,1) != ".")) { print \$3 } }'\'' | sort -u > $export_symbols' + else + export_symbols_cmds_CXX='$NM -BCpg $libobjs $convenience | awk '\''{ if (((\$2 == "T") || (\$2 == "D") || (\$2 == "B")) && (substr(\$3,1,1) != ".")) { print \$3 } }'\'' | sort -u > $export_symbols' + fi + ;; + pw32*) + export_symbols_cmds_CXX="$ltdll_cmds" + ;; + cygwin* | mingw*) + export_symbols_cmds_CXX='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[BCDGRS] /s/.* \([^ ]*\)/\1 DATA/;/^.* __nm__/s/^.* __nm__\([^ ]*\) [^ ]*/\1 DATA/;/^I /d;/^[AITW] /s/.* //'\'' | sort | uniq > $export_symbols' + ;; + *) + export_symbols_cmds_CXX='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols' + ;; + esac + +echo "$as_me:$LINENO: result: $ld_shlibs_CXX" >&5 +echo "${ECHO_T}$ld_shlibs_CXX" >&6 +test "$ld_shlibs_CXX" = no && can_build_shared=no + +variables_saved_for_relink="PATH $shlibpath_var $runpath_var" +if test "$GCC" = yes; then + variables_saved_for_relink="$variables_saved_for_relink GCC_EXEC_PREFIX COMPILER_PATH LIBRARY_PATH" +fi + +# +# Do we need to explicitly link libc? +# +case "x$archive_cmds_need_lc_CXX" in +x|xyes) + # Assume -lc should be added + archive_cmds_need_lc_CXX=yes + + if test "$enable_shared" = yes && test "$GCC" = yes; then + case $archive_cmds_CXX in + *'~'*) + # FIXME: we may have to deal with multi-command sequences. + ;; + '$CC '*) + # Test whether the compiler implicitly links with -lc since on some + # systems, -lgcc has to come before -lc. If gcc already passes -lc + # to ld, don't add -lc before -lgcc. + echo "$as_me:$LINENO: checking whether -lc should be explicitly linked in" >&5 +echo $ECHO_N "checking whether -lc should be explicitly linked in... $ECHO_C" >&6 + $rm conftest* + printf "$lt_simple_compile_test_code" > conftest.$ac_ext + + if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 + (eval $ac_compile) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } 2>conftest.err; then + soname=conftest + lib=conftest + libobjs=conftest.$ac_objext + deplibs= + wl=$lt_prog_compiler_wl_CXX + compiler_flags=-v + linker_flags=-v + verstring= + output_objdir=. + libname=conftest + lt_save_allow_undefined_flag=$allow_undefined_flag_CXX + allow_undefined_flag_CXX= + if { (eval echo "$as_me:$LINENO: \"$archive_cmds_CXX 2\>\&1 \| grep \" -lc \" \>/dev/null 2\>\&1\"") >&5 + (eval $archive_cmds_CXX 2\>\&1 \| grep \" -lc \" \>/dev/null 2\>\&1) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } + then + archive_cmds_need_lc_CXX=no + else + archive_cmds_need_lc_CXX=yes + fi + allow_undefined_flag_CXX=$lt_save_allow_undefined_flag + else + cat conftest.err 1>&5 + fi + $rm conftest* + echo "$as_me:$LINENO: result: $archive_cmds_need_lc_CXX" >&5 +echo "${ECHO_T}$archive_cmds_need_lc_CXX" >&6 + ;; + esac + fi + ;; +esac + +echo "$as_me:$LINENO: checking dynamic linker characteristics" >&5 +echo $ECHO_N "checking dynamic linker characteristics... $ECHO_C" >&6 +library_names_spec= +libname_spec='lib$name' +soname_spec= +shrext_cmds=".so" +postinstall_cmds= +postuninstall_cmds= +finish_cmds= +finish_eval= +shlibpath_var= +shlibpath_overrides_runpath=unknown +version_type=none +dynamic_linker="$host_os ld.so" +sys_lib_dlsearch_path_spec="/lib /usr/lib" +if test "$GCC" = yes; then + sys_lib_search_path_spec=`$CC -print-search-dirs | grep "^libraries:" | $SED -e "s/^libraries://" -e "s,=/,/,g"` + if echo "$sys_lib_search_path_spec" | grep ';' >/dev/null ; then + # if the path contains ";" then we assume it to be the separator + # otherwise default to the standard path separator (i.e. ":") - it is + # assumed that no part of a normal pathname contains ";" but that should + # okay in the real world where ";" in dirpaths is itself problematic. + sys_lib_search_path_spec=`echo "$sys_lib_search_path_spec" | $SED -e 's/;/ /g'` + else + sys_lib_search_path_spec=`echo "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"` + fi +else + sys_lib_search_path_spec="/lib /usr/lib /usr/local/lib" +fi +need_lib_prefix=unknown +hardcode_into_libs=no + +# when you set need_version to no, make sure it does not cause -set_version +# flags to be left without arguments +need_version=unknown + +case $host_os in +aix3*) + version_type=linux + library_names_spec='${libname}${release}${shared_ext}$versuffix $libname.a' + shlibpath_var=LIBPATH + + # AIX 3 has no versioning support, so we append a major version to the name. + soname_spec='${libname}${release}${shared_ext}$major' + ;; + +aix4* | aix5*) + version_type=linux + need_lib_prefix=no + need_version=no + hardcode_into_libs=yes + if test "$host_cpu" = ia64; then + # AIX 5 supports IA64 + library_names_spec='${libname}${release}${shared_ext}$major ${libname}${release}${shared_ext}$versuffix $libname${shared_ext}' + shlibpath_var=LD_LIBRARY_PATH + else + # With GCC up to 2.95.x, collect2 would create an import file + # for dependence libraries. The import file would start with + # the line `#! .'. This would cause the generated library to + # depend on `.', always an invalid library. This was fixed in + # development snapshots of GCC prior to 3.0. + case $host_os in + aix4 | aix4.[01] | aix4.[01].*) + if { echo '#if __GNUC__ > 2 || (__GNUC__ == 2 && __GNUC_MINOR__ >= 97)' + echo ' yes ' + echo '#endif'; } | ${CC} -E - | grep yes > /dev/null; then + : + else + can_build_shared=no + fi + ;; + esac + # AIX (on Power*) has no versioning support, so currently we can not hardcode correct + # soname into executable. Probably we can add versioning support to + # collect2, so additional links can be useful in future. + if test "$aix_use_runtimelinking" = yes; then + # If using run time linking (on AIX 4.2 or later) use lib.so + # instead of lib.a to let people know that these are not + # typical AIX shared libraries. + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + else + # We preserve .a as extension for shared libraries through AIX4.2 + # and later when we are not doing run time linking. + library_names_spec='${libname}${release}.a $libname.a' + soname_spec='${libname}${release}${shared_ext}$major' + fi + shlibpath_var=LIBPATH + fi + ;; + +amigaos*) + library_names_spec='$libname.ixlibrary $libname.a' + # Create ${libname}_ixlibrary.a entries in /sys/libs. + finish_eval='for lib in `ls $libdir/*.ixlibrary 2>/dev/null`; do libname=`$echo "X$lib" | $Xsed -e '\''s%^.*/\([^/]*\)\.ixlibrary$%\1%'\''`; test $rm /sys/libs/${libname}_ixlibrary.a; $show "cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a"; cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a || exit 1; done' + ;; + +beos*) + library_names_spec='${libname}${shared_ext}' + dynamic_linker="$host_os ld.so" + shlibpath_var=LIBRARY_PATH + ;; + +bsdi[45]*) + version_type=linux + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + finish_cmds='PATH="\$PATH:/sbin" ldconfig $libdir' + shlibpath_var=LD_LIBRARY_PATH + sys_lib_search_path_spec="/shlib /usr/lib /usr/X11/lib /usr/contrib/lib /lib /usr/local/lib" + sys_lib_dlsearch_path_spec="/shlib /usr/lib /usr/local/lib" + # the default ld.so.conf also contains /usr/contrib/lib and + # /usr/X11R6/lib (/usr/X11 is a link to /usr/X11R6), but let us allow + # libtool to hard-code these into programs + ;; + +cygwin* | mingw* | pw32*) + version_type=windows + shrext_cmds=".dll" + need_version=no + need_lib_prefix=no + + case $GCC,$host_os in + yes,cygwin* | yes,mingw* | yes,pw32*) + library_names_spec='$libname.dll.a' + # DLL is installed to $(libdir)/../bin by postinstall_cmds + postinstall_cmds='base_file=`basename \${file}`~ + dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\${base_file}'\''i;echo \$dlname'\''`~ + dldir=$destdir/`dirname \$dlpath`~ + test -d \$dldir || mkdir -p \$dldir~ + $install_prog $dir/$dlname \$dldir/$dlname' + postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; echo \$dlname'\''`~ + dlpath=$dir/\$dldll~ + $rm \$dlpath' + shlibpath_overrides_runpath=yes + + case $host_os in + cygwin*) + # Cygwin DLLs use 'cyg' prefix rather than 'lib' + soname_spec='`echo ${libname} | sed -e 's/^lib/cyg/'``echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext}' + sys_lib_search_path_spec="/usr/lib /lib/w32api /lib /usr/local/lib" + ;; + mingw*) + # MinGW DLLs use traditional 'lib' prefix + soname_spec='${libname}`echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext}' + sys_lib_search_path_spec=`$CC -print-search-dirs | grep "^libraries:" | $SED -e "s/^libraries://" -e "s,=/,/,g"` + if echo "$sys_lib_search_path_spec" | grep ';[c-zC-Z]:/' >/dev/null; then + # It is most probably a Windows format PATH printed by + # mingw gcc, but we are running on Cygwin. Gcc prints its search + # path with ; separators, and with drive letters. We can handle the + # drive letters (cygwin fileutils understands them), so leave them, + # especially as we might pass files found there to a mingw objdump, + # which wouldn't understand a cygwinified path. Ahh. + sys_lib_search_path_spec=`echo "$sys_lib_search_path_spec" | $SED -e 's/;/ /g'` + else + sys_lib_search_path_spec=`echo "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"` + fi + ;; + pw32*) + # pw32 DLLs use 'pw' prefix rather than 'lib' + library_names_spec='`echo ${libname} | sed -e 's/^lib/pw/'``echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext}' + ;; + esac + ;; + + *) + library_names_spec='${libname}`echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext} $libname.lib' + ;; + esac + dynamic_linker='Win32 ld.exe' + # FIXME: first we should search . and the directory the executable is in + shlibpath_var=PATH + ;; + +darwin* | rhapsody*) + dynamic_linker="$host_os dyld" + version_type=darwin + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${versuffix}$shared_ext ${libname}${release}${major}$shared_ext ${libname}$shared_ext' + soname_spec='${libname}${release}${major}$shared_ext' + shlibpath_overrides_runpath=yes + shlibpath_var=DYLD_LIBRARY_PATH + shrext_cmds='$(test .$module = .yes && echo .so || echo .dylib)' + # Apple's gcc prints 'gcc -print-search-dirs' doesn't operate the same. + if test "$GCC" = yes; then + sys_lib_search_path_spec=`$CC -print-search-dirs | tr "\n" "$PATH_SEPARATOR" | sed -e 's/libraries:/@libraries:/' | tr "@" "\n" | grep "^libraries:" | sed -e "s/^libraries://" -e "s,=/,/,g" -e "s,$PATH_SEPARATOR, ,g" -e "s,.*,& /lib /usr/lib /usr/local/lib,g"` + else + sys_lib_search_path_spec='/lib /usr/lib /usr/local/lib' + fi + sys_lib_dlsearch_path_spec='/usr/local/lib /lib /usr/lib' + ;; + +dgux*) + version_type=linux + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname$shared_ext' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + ;; + +freebsd1*) + dynamic_linker=no + ;; + +kfreebsd*-gnu) + version_type=linux + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=no + hardcode_into_libs=yes + dynamic_linker='GNU ld.so' + ;; + +freebsd* | dragonfly*) + # DragonFly does not have aout. When/if they implement a new + # versioning mechanism, adjust this. + objformat=`test -x /usr/bin/objformat && /usr/bin/objformat || echo aout` + version_type=freebsd-$objformat + case $version_type in + freebsd-elf*) + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext} $libname${shared_ext}' + need_version=no + need_lib_prefix=no + ;; + freebsd-*) + library_names_spec='${libname}${release}${shared_ext}$versuffix $libname${shared_ext}$versuffix' + need_version=yes + ;; + esac + shlibpath_var=LD_LIBRARY_PATH + case $host_os in + freebsd2*) + shlibpath_overrides_runpath=yes + ;; + freebsd3.[01]* | freebsdelf3.[01]*) + shlibpath_overrides_runpath=yes + hardcode_into_libs=yes + ;; + *) # from 3.2 on + shlibpath_overrides_runpath=no + hardcode_into_libs=yes + ;; + esac + ;; + +gnu*) + version_type=linux + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}${major} ${libname}${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + hardcode_into_libs=yes + ;; + +hpux9* | hpux10* | hpux11*) + # Give a soname corresponding to the major version so that dld.sl refuses to + # link against other versions. + version_type=sunos + need_lib_prefix=no + need_version=no + case "$host_cpu" in + ia64*) + shrext_cmds='.so' + hardcode_into_libs=yes + dynamic_linker="$host_os dld.so" + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes # Unless +noenvvar is specified. + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + if test "X$HPUX_IA64_MODE" = X32; then + sys_lib_search_path_spec="/usr/lib/hpux32 /usr/local/lib/hpux32 /usr/local/lib" + else + sys_lib_search_path_spec="/usr/lib/hpux64 /usr/local/lib/hpux64" + fi + sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec + ;; + hppa*64*) + shrext_cmds='.sl' + hardcode_into_libs=yes + dynamic_linker="$host_os dld.sl" + shlibpath_var=LD_LIBRARY_PATH # How should we handle SHLIB_PATH + shlibpath_overrides_runpath=yes # Unless +noenvvar is specified. + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + sys_lib_search_path_spec="/usr/lib/pa20_64 /usr/ccs/lib/pa20_64" + sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec + ;; + *) + shrext_cmds='.sl' + dynamic_linker="$host_os dld.sl" + shlibpath_var=SHLIB_PATH + shlibpath_overrides_runpath=no # +s is required to enable SHLIB_PATH + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + ;; + esac + # HP-UX runs *really* slowly unless shared libraries are mode 555. + postinstall_cmds='chmod 555 $lib' + ;; + +irix5* | irix6* | nonstopux*) + case $host_os in + nonstopux*) version_type=nonstopux ;; + *) + if test "$lt_cv_prog_gnu_ld" = yes; then + version_type=linux + else + version_type=irix + fi ;; + esac + need_lib_prefix=no + need_version=no + soname_spec='${libname}${release}${shared_ext}$major' + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${release}${shared_ext} $libname${shared_ext}' + case $host_os in + irix5* | nonstopux*) + libsuff= shlibsuff= + ;; + *) + case $LD in # libtool.m4 will add one of these switches to LD + *-32|*"-32 "|*-melf32bsmip|*"-melf32bsmip ") + libsuff= shlibsuff= libmagic=32-bit;; + *-n32|*"-n32 "|*-melf32bmipn32|*"-melf32bmipn32 ") + libsuff=32 shlibsuff=N32 libmagic=N32;; + *-64|*"-64 "|*-melf64bmip|*"-melf64bmip ") + libsuff=64 shlibsuff=64 libmagic=64-bit;; + *) libsuff= shlibsuff= libmagic=never-match;; + esac + ;; + esac + shlibpath_var=LD_LIBRARY${shlibsuff}_PATH + shlibpath_overrides_runpath=no + sys_lib_search_path_spec="/usr/lib${libsuff} /lib${libsuff} /usr/local/lib${libsuff}" + sys_lib_dlsearch_path_spec="/usr/lib${libsuff} /lib${libsuff}" + hardcode_into_libs=yes + ;; + +# No shared lib support for Linux oldld, aout, or coff. +linux*oldld* | linux*aout* | linux*coff*) + dynamic_linker=no + ;; + +# This must be Linux ELF. +linux*) + version_type=linux + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + finish_cmds='PATH="\$PATH:/sbin" ldconfig -n $libdir' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=no + # This implies no fast_install, which is unacceptable. + # Some rework will be needed to allow for fast_install + # before this can be enabled. + hardcode_into_libs=yes + + # find out which ABI we are using + libsuff= + case "$host_cpu" in + x86_64*|s390x*|powerpc64*) + echo '#line 12569 "configure"' > conftest.$ac_ext + if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 + (eval $ac_compile) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; then + case `/usr/bin/file conftest.$ac_objext` in + *64-bit*) + libsuff=64 + sys_lib_search_path_spec="/lib${libsuff} /usr/lib${libsuff} /usr/local/lib${libsuff}" + ;; + esac + fi + rm -rf conftest* + ;; + esac + + # Append ld.so.conf contents to the search path + if test -f /etc/ld.so.conf; then + lt_ld_extra=`awk '/^include / { system(sprintf("cd /etc; cat %s", \$2)); skip = 1; } { if (!skip) print \$0; skip = 0; }' < /etc/ld.so.conf | $SED -e 's/#.*//;s/[:,\t]/ /g;s/=[^=]*$//;s/=[^= ]* / /g;/^$/d' | tr '\n' ' '` + sys_lib_dlsearch_path_spec="/lib${libsuff} /usr/lib${libsuff} $lt_ld_extra" + fi + + # We used to test for /lib/ld.so.1 and disable shared libraries on + # powerpc, because MkLinux only supported shared libraries with the + # GNU dynamic linker. Since this was broken with cross compilers, + # most powerpc-linux boxes support dynamic linking these days and + # people can always --disable-shared, the test was removed, and we + # assume the GNU/Linux dynamic linker is in use. + dynamic_linker='GNU/Linux ld.so' + ;; + +knetbsd*-gnu) + version_type=linux + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=no + hardcode_into_libs=yes + dynamic_linker='GNU ld.so' + ;; + +netbsd*) + version_type=sunos + need_lib_prefix=no + need_version=no + if echo __ELF__ | $CC -E - | grep __ELF__ >/dev/null; then + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${shared_ext}$versuffix' + finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir' + dynamic_linker='NetBSD (a.out) ld.so' + else + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + dynamic_linker='NetBSD ld.elf_so' + fi + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + hardcode_into_libs=yes + ;; + +newsos6) + version_type=linux + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + ;; + +nto-qnx*) + version_type=linux + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + ;; + +openbsd*) + version_type=sunos + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${shared_ext}$versuffix' + finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir' + shlibpath_var=LD_LIBRARY_PATH + if test -z "`echo __ELF__ | $CC -E - | grep __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then + case $host_os in + openbsd2.[89] | openbsd2.[89].*) + shlibpath_overrides_runpath=no + ;; + *) + shlibpath_overrides_runpath=yes + ;; + esac + else + shlibpath_overrides_runpath=yes + fi + ;; + +os2*) + libname_spec='$name' + shrext_cmds=".dll" + need_lib_prefix=no + library_names_spec='$libname${shared_ext} $libname.a' + dynamic_linker='OS/2 ld.exe' + shlibpath_var=LIBPATH + ;; + +osf3* | osf4* | osf5*) + version_type=osf + need_lib_prefix=no + need_version=no + soname_spec='${libname}${release}${shared_ext}$major' + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + shlibpath_var=LD_LIBRARY_PATH + sys_lib_search_path_spec="/usr/shlib /usr/ccs/lib /usr/lib/cmplrs/cc /usr/lib /usr/local/lib /var/shlib" + sys_lib_dlsearch_path_spec="$sys_lib_search_path_spec" + ;; + +sco3.2v5*) + version_type=osf + soname_spec='${libname}${release}${shared_ext}$major' + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + shlibpath_var=LD_LIBRARY_PATH + ;; + +solaris*) + version_type=linux + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + hardcode_into_libs=yes + # ldd complains unless libraries are executable + postinstall_cmds='chmod +x $lib' + ;; + +sunos4*) + version_type=sunos + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${shared_ext}$versuffix' + finish_cmds='PATH="\$PATH:/usr/etc" ldconfig $libdir' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + if test "$with_gnu_ld" = yes; then + need_lib_prefix=no + fi + need_version=yes + ;; + +sysv4 | sysv4.2uw2* | sysv4.3* | sysv5*) + version_type=linux + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + case $host_vendor in + sni) + shlibpath_overrides_runpath=no + need_lib_prefix=no + export_dynamic_flag_spec='${wl}-Blargedynsym' + runpath_var=LD_RUN_PATH + ;; + siemens) + need_lib_prefix=no + ;; + motorola) + need_lib_prefix=no + need_version=no + shlibpath_overrides_runpath=no + sys_lib_search_path_spec='/lib /usr/lib /usr/ccs/lib' + ;; + esac + ;; + +sysv4*MP*) + if test -d /usr/nec ;then + version_type=linux + library_names_spec='$libname${shared_ext}.$versuffix $libname${shared_ext}.$major $libname${shared_ext}' + soname_spec='$libname${shared_ext}.$major' + shlibpath_var=LD_LIBRARY_PATH + fi + ;; + +uts4*) + version_type=linux + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + ;; + +*) + dynamic_linker=no + ;; +esac +echo "$as_me:$LINENO: result: $dynamic_linker" >&5 +echo "${ECHO_T}$dynamic_linker" >&6 +test "$dynamic_linker" = no && can_build_shared=no + +echo "$as_me:$LINENO: checking how to hardcode library paths into programs" >&5 +echo $ECHO_N "checking how to hardcode library paths into programs... $ECHO_C" >&6 +hardcode_action_CXX= +if test -n "$hardcode_libdir_flag_spec_CXX" || \ + test -n "$runpath_var_CXX" || \ + test "X$hardcode_automatic_CXX" = "Xyes" ; then + + # We can hardcode non-existant directories. + if test "$hardcode_direct_CXX" != no && + # If the only mechanism to avoid hardcoding is shlibpath_var, we + # have to relink, otherwise we might link with an installed library + # when we should be linking with a yet-to-be-installed one + ## test "$_LT_AC_TAGVAR(hardcode_shlibpath_var, CXX)" != no && + test "$hardcode_minus_L_CXX" != no; then + # Linking always hardcodes the temporary library directory. + hardcode_action_CXX=relink + else + # We can link without hardcoding, and we can hardcode nonexisting dirs. + hardcode_action_CXX=immediate + fi +else + # We cannot hardcode anything, or else we can only hardcode existing + # directories. + hardcode_action_CXX=unsupported +fi +echo "$as_me:$LINENO: result: $hardcode_action_CXX" >&5 +echo "${ECHO_T}$hardcode_action_CXX" >&6 + +if test "$hardcode_action_CXX" = relink; then + # Fast installation is not supported + enable_fast_install=no +elif test "$shlibpath_overrides_runpath" = yes || + test "$enable_shared" = no; then + # Fast installation is not necessary + enable_fast_install=needless +fi + +striplib= +old_striplib= +echo "$as_me:$LINENO: checking whether stripping libraries is possible" >&5 +echo $ECHO_N "checking whether stripping libraries is possible... $ECHO_C" >&6 +if test -n "$STRIP" && $STRIP -V 2>&1 | grep "GNU strip" >/dev/null; then + test -z "$old_striplib" && old_striplib="$STRIP --strip-debug" + test -z "$striplib" && striplib="$STRIP --strip-unneeded" + echo "$as_me:$LINENO: result: yes" >&5 +echo "${ECHO_T}yes" >&6 +else +# FIXME - insert some real tests, host_os isn't really good enough + case $host_os in + darwin*) + if test -n "$STRIP" ; then + striplib="$STRIP -x" + echo "$as_me:$LINENO: result: yes" >&5 +echo "${ECHO_T}yes" >&6 + else + echo "$as_me:$LINENO: result: no" >&5 +echo "${ECHO_T}no" >&6 +fi + ;; + *) + echo "$as_me:$LINENO: result: no" >&5 +echo "${ECHO_T}no" >&6 + ;; + esac +fi + +if test "x$enable_dlopen" != xyes; then + enable_dlopen=unknown + enable_dlopen_self=unknown + enable_dlopen_self_static=unknown +else + lt_cv_dlopen=no + lt_cv_dlopen_libs= + + case $host_os in + beos*) + lt_cv_dlopen="load_add_on" + lt_cv_dlopen_libs= + lt_cv_dlopen_self=yes + ;; + + mingw* | pw32*) + lt_cv_dlopen="LoadLibrary" + lt_cv_dlopen_libs= + ;; + + cygwin*) + lt_cv_dlopen="dlopen" + lt_cv_dlopen_libs= + ;; + + darwin*) + # if libdl is installed we need to link against it + echo "$as_me:$LINENO: checking for dlopen in -ldl" >&5 +echo $ECHO_N "checking for dlopen in -ldl... $ECHO_C" >&6 +if test "${ac_cv_lib_dl_dlopen+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + ac_check_lib_save_LIBS=$LIBS +LIBS="-ldl $LIBS" +cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ + +/* Override any gcc2 internal prototype to avoid an error. */ +#ifdef __cplusplus +extern "C" +#endif +/* We use char because int might match the return type of a gcc2 + builtin and then its argument prototype would still apply. */ +char dlopen (); +int +main () +{ +dlopen (); + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext conftest$ac_exeext +if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 + (eval $ac_link) 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && + { ac_try='test -z "$ac_cxx_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest$ac_exeext' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; }; then + ac_cv_lib_dl_dlopen=yes +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +ac_cv_lib_dl_dlopen=no +fi +rm -f conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS +fi +echo "$as_me:$LINENO: result: $ac_cv_lib_dl_dlopen" >&5 +echo "${ECHO_T}$ac_cv_lib_dl_dlopen" >&6 +if test $ac_cv_lib_dl_dlopen = yes; then + lt_cv_dlopen="dlopen" lt_cv_dlopen_libs="-ldl" +else + + lt_cv_dlopen="dyld" + lt_cv_dlopen_libs= + lt_cv_dlopen_self=yes + +fi + + ;; + + *) + echo "$as_me:$LINENO: checking for shl_load" >&5 +echo $ECHO_N "checking for shl_load... $ECHO_C" >&6 +if test "${ac_cv_func_shl_load+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +/* Define shl_load to an innocuous variant, in case declares shl_load. + For example, HP-UX 11i declares gettimeofday. */ +#define shl_load innocuous_shl_load + +/* System header to define __stub macros and hopefully few prototypes, + which can conflict with char shl_load (); below. + Prefer to if __STDC__ is defined, since + exists even on freestanding compilers. */ + +#ifdef __STDC__ +# include +#else +# include +#endif + +#undef shl_load + +/* Override any gcc2 internal prototype to avoid an error. */ +#ifdef __cplusplus +extern "C" +{ +#endif +/* We use char because int might match the return type of a gcc2 + builtin and then its argument prototype would still apply. */ +char shl_load (); +/* The GNU C library defines this for functions which it implements + to always fail with ENOSYS. Some functions are actually named + something starting with __ and the normal name is an alias. */ +#if defined (__stub_shl_load) || defined (__stub___shl_load) +choke me +#else +char (*f) () = shl_load; +#endif +#ifdef __cplusplus +} +#endif + +int +main () +{ +return f != shl_load; + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext conftest$ac_exeext +if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 + (eval $ac_link) 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && + { ac_try='test -z "$ac_cxx_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest$ac_exeext' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; }; then + ac_cv_func_shl_load=yes +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +ac_cv_func_shl_load=no +fi +rm -f conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +fi +echo "$as_me:$LINENO: result: $ac_cv_func_shl_load" >&5 +echo "${ECHO_T}$ac_cv_func_shl_load" >&6 +if test $ac_cv_func_shl_load = yes; then + lt_cv_dlopen="shl_load" +else + echo "$as_me:$LINENO: checking for shl_load in -ldld" >&5 +echo $ECHO_N "checking for shl_load in -ldld... $ECHO_C" >&6 +if test "${ac_cv_lib_dld_shl_load+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + ac_check_lib_save_LIBS=$LIBS +LIBS="-ldld $LIBS" +cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ + +/* Override any gcc2 internal prototype to avoid an error. */ +#ifdef __cplusplus +extern "C" +#endif +/* We use char because int might match the return type of a gcc2 + builtin and then its argument prototype would still apply. */ +char shl_load (); +int +main () +{ +shl_load (); + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext conftest$ac_exeext +if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 + (eval $ac_link) 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && + { ac_try='test -z "$ac_cxx_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest$ac_exeext' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; }; then + ac_cv_lib_dld_shl_load=yes +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +ac_cv_lib_dld_shl_load=no +fi +rm -f conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS +fi +echo "$as_me:$LINENO: result: $ac_cv_lib_dld_shl_load" >&5 +echo "${ECHO_T}$ac_cv_lib_dld_shl_load" >&6 +if test $ac_cv_lib_dld_shl_load = yes; then + lt_cv_dlopen="shl_load" lt_cv_dlopen_libs="-dld" +else + echo "$as_me:$LINENO: checking for dlopen" >&5 +echo $ECHO_N "checking for dlopen... $ECHO_C" >&6 +if test "${ac_cv_func_dlopen+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +/* Define dlopen to an innocuous variant, in case declares dlopen. + For example, HP-UX 11i declares gettimeofday. */ +#define dlopen innocuous_dlopen + +/* System header to define __stub macros and hopefully few prototypes, + which can conflict with char dlopen (); below. + Prefer to if __STDC__ is defined, since + exists even on freestanding compilers. */ + +#ifdef __STDC__ +# include +#else +# include +#endif + +#undef dlopen + +/* Override any gcc2 internal prototype to avoid an error. */ +#ifdef __cplusplus +extern "C" +{ +#endif +/* We use char because int might match the return type of a gcc2 + builtin and then its argument prototype would still apply. */ +char dlopen (); +/* The GNU C library defines this for functions which it implements + to always fail with ENOSYS. Some functions are actually named + something starting with __ and the normal name is an alias. */ +#if defined (__stub_dlopen) || defined (__stub___dlopen) +choke me +#else +char (*f) () = dlopen; +#endif +#ifdef __cplusplus +} +#endif + +int +main () +{ +return f != dlopen; + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext conftest$ac_exeext +if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 + (eval $ac_link) 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && + { ac_try='test -z "$ac_cxx_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest$ac_exeext' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; }; then + ac_cv_func_dlopen=yes +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +ac_cv_func_dlopen=no +fi +rm -f conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +fi +echo "$as_me:$LINENO: result: $ac_cv_func_dlopen" >&5 +echo "${ECHO_T}$ac_cv_func_dlopen" >&6 +if test $ac_cv_func_dlopen = yes; then + lt_cv_dlopen="dlopen" +else + echo "$as_me:$LINENO: checking for dlopen in -ldl" >&5 +echo $ECHO_N "checking for dlopen in -ldl... $ECHO_C" >&6 +if test "${ac_cv_lib_dl_dlopen+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + ac_check_lib_save_LIBS=$LIBS +LIBS="-ldl $LIBS" +cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ + +/* Override any gcc2 internal prototype to avoid an error. */ +#ifdef __cplusplus +extern "C" +#endif +/* We use char because int might match the return type of a gcc2 + builtin and then its argument prototype would still apply. */ +char dlopen (); +int +main () +{ +dlopen (); + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext conftest$ac_exeext +if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 + (eval $ac_link) 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && + { ac_try='test -z "$ac_cxx_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest$ac_exeext' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; }; then + ac_cv_lib_dl_dlopen=yes +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +ac_cv_lib_dl_dlopen=no +fi +rm -f conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS +fi +echo "$as_me:$LINENO: result: $ac_cv_lib_dl_dlopen" >&5 +echo "${ECHO_T}$ac_cv_lib_dl_dlopen" >&6 +if test $ac_cv_lib_dl_dlopen = yes; then + lt_cv_dlopen="dlopen" lt_cv_dlopen_libs="-ldl" +else + echo "$as_me:$LINENO: checking for dlopen in -lsvld" >&5 +echo $ECHO_N "checking for dlopen in -lsvld... $ECHO_C" >&6 +if test "${ac_cv_lib_svld_dlopen+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + ac_check_lib_save_LIBS=$LIBS +LIBS="-lsvld $LIBS" +cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ + +/* Override any gcc2 internal prototype to avoid an error. */ +#ifdef __cplusplus +extern "C" +#endif +/* We use char because int might match the return type of a gcc2 + builtin and then its argument prototype would still apply. */ +char dlopen (); +int +main () +{ +dlopen (); + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext conftest$ac_exeext +if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 + (eval $ac_link) 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && + { ac_try='test -z "$ac_cxx_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest$ac_exeext' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; }; then + ac_cv_lib_svld_dlopen=yes +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +ac_cv_lib_svld_dlopen=no +fi +rm -f conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS +fi +echo "$as_me:$LINENO: result: $ac_cv_lib_svld_dlopen" >&5 +echo "${ECHO_T}$ac_cv_lib_svld_dlopen" >&6 +if test $ac_cv_lib_svld_dlopen = yes; then + lt_cv_dlopen="dlopen" lt_cv_dlopen_libs="-lsvld" +else + echo "$as_me:$LINENO: checking for dld_link in -ldld" >&5 +echo $ECHO_N "checking for dld_link in -ldld... $ECHO_C" >&6 +if test "${ac_cv_lib_dld_dld_link+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + ac_check_lib_save_LIBS=$LIBS +LIBS="-ldld $LIBS" +cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ + +/* Override any gcc2 internal prototype to avoid an error. */ +#ifdef __cplusplus +extern "C" +#endif +/* We use char because int might match the return type of a gcc2 + builtin and then its argument prototype would still apply. */ +char dld_link (); +int +main () +{ +dld_link (); + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext conftest$ac_exeext +if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 + (eval $ac_link) 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && + { ac_try='test -z "$ac_cxx_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest$ac_exeext' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; }; then + ac_cv_lib_dld_dld_link=yes +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +ac_cv_lib_dld_dld_link=no +fi +rm -f conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS +fi +echo "$as_me:$LINENO: result: $ac_cv_lib_dld_dld_link" >&5 +echo "${ECHO_T}$ac_cv_lib_dld_dld_link" >&6 +if test $ac_cv_lib_dld_dld_link = yes; then + lt_cv_dlopen="dld_link" lt_cv_dlopen_libs="-dld" +fi + + +fi + + +fi + + +fi + + +fi + + +fi + + ;; + esac + + if test "x$lt_cv_dlopen" != xno; then + enable_dlopen=yes + else + enable_dlopen=no + fi + + case $lt_cv_dlopen in + dlopen) + save_CPPFLAGS="$CPPFLAGS" + test "x$ac_cv_header_dlfcn_h" = xyes && CPPFLAGS="$CPPFLAGS -DHAVE_DLFCN_H" + + save_LDFLAGS="$LDFLAGS" + eval LDFLAGS=\"\$LDFLAGS $export_dynamic_flag_spec\" + + save_LIBS="$LIBS" + LIBS="$lt_cv_dlopen_libs $LIBS" + + echo "$as_me:$LINENO: checking whether a program can dlopen itself" >&5 +echo $ECHO_N "checking whether a program can dlopen itself... $ECHO_C" >&6 +if test "${lt_cv_dlopen_self+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + if test "$cross_compiling" = yes; then : + lt_cv_dlopen_self=cross +else + lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2 + lt_status=$lt_dlunknown + cat > conftest.$ac_ext < +#endif + +#include + +#ifdef RTLD_GLOBAL +# define LT_DLGLOBAL RTLD_GLOBAL +#else +# ifdef DL_GLOBAL +# define LT_DLGLOBAL DL_GLOBAL +# else +# define LT_DLGLOBAL 0 +# endif +#endif + +/* We may have to define LT_DLLAZY_OR_NOW in the command line if we + find out it does not work in some platform. */ +#ifndef LT_DLLAZY_OR_NOW +# ifdef RTLD_LAZY +# define LT_DLLAZY_OR_NOW RTLD_LAZY +# else +# ifdef DL_LAZY +# define LT_DLLAZY_OR_NOW DL_LAZY +# else +# ifdef RTLD_NOW +# define LT_DLLAZY_OR_NOW RTLD_NOW +# else +# ifdef DL_NOW +# define LT_DLLAZY_OR_NOW DL_NOW +# else +# define LT_DLLAZY_OR_NOW 0 +# endif +# endif +# endif +# endif +#endif + +#ifdef __cplusplus +extern "C" void exit (int); +#endif + +void fnord() { int i=42;} +int main () +{ + void *self = dlopen (0, LT_DLGLOBAL|LT_DLLAZY_OR_NOW); + int status = $lt_dlunknown; + + if (self) + { + if (dlsym (self,"fnord")) status = $lt_dlno_uscore; + else if (dlsym( self,"_fnord")) status = $lt_dlneed_uscore; + /* dlclose (self); */ + } + + exit (status); +} +EOF + if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 + (eval $ac_link) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && test -s conftest${ac_exeext} 2>/dev/null; then + (./conftest; exit; ) 2>/dev/null + lt_status=$? + case x$lt_status in + x$lt_dlno_uscore) lt_cv_dlopen_self=yes ;; + x$lt_dlneed_uscore) lt_cv_dlopen_self=yes ;; + x$lt_unknown|x*) lt_cv_dlopen_self=no ;; + esac + else : + # compilation failed + lt_cv_dlopen_self=no + fi +fi +rm -fr conftest* + + +fi +echo "$as_me:$LINENO: result: $lt_cv_dlopen_self" >&5 +echo "${ECHO_T}$lt_cv_dlopen_self" >&6 + + if test "x$lt_cv_dlopen_self" = xyes; then + LDFLAGS="$LDFLAGS $link_static_flag" + echo "$as_me:$LINENO: checking whether a statically linked program can dlopen itself" >&5 +echo $ECHO_N "checking whether a statically linked program can dlopen itself... $ECHO_C" >&6 +if test "${lt_cv_dlopen_self_static+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + if test "$cross_compiling" = yes; then : + lt_cv_dlopen_self_static=cross +else + lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2 + lt_status=$lt_dlunknown + cat > conftest.$ac_ext < +#endif + +#include + +#ifdef RTLD_GLOBAL +# define LT_DLGLOBAL RTLD_GLOBAL +#else +# ifdef DL_GLOBAL +# define LT_DLGLOBAL DL_GLOBAL +# else +# define LT_DLGLOBAL 0 +# endif +#endif + +/* We may have to define LT_DLLAZY_OR_NOW in the command line if we + find out it does not work in some platform. */ +#ifndef LT_DLLAZY_OR_NOW +# ifdef RTLD_LAZY +# define LT_DLLAZY_OR_NOW RTLD_LAZY +# else +# ifdef DL_LAZY +# define LT_DLLAZY_OR_NOW DL_LAZY +# else +# ifdef RTLD_NOW +# define LT_DLLAZY_OR_NOW RTLD_NOW +# else +# ifdef DL_NOW +# define LT_DLLAZY_OR_NOW DL_NOW +# else +# define LT_DLLAZY_OR_NOW 0 +# endif +# endif +# endif +# endif +#endif + +#ifdef __cplusplus +extern "C" void exit (int); +#endif + +void fnord() { int i=42;} +int main () +{ + void *self = dlopen (0, LT_DLGLOBAL|LT_DLLAZY_OR_NOW); + int status = $lt_dlunknown; + + if (self) + { + if (dlsym (self,"fnord")) status = $lt_dlno_uscore; + else if (dlsym( self,"_fnord")) status = $lt_dlneed_uscore; + /* dlclose (self); */ + } + + exit (status); +} +EOF + if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 + (eval $ac_link) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && test -s conftest${ac_exeext} 2>/dev/null; then + (./conftest; exit; ) 2>/dev/null + lt_status=$? + case x$lt_status in + x$lt_dlno_uscore) lt_cv_dlopen_self_static=yes ;; + x$lt_dlneed_uscore) lt_cv_dlopen_self_static=yes ;; + x$lt_unknown|x*) lt_cv_dlopen_self_static=no ;; + esac + else : + # compilation failed + lt_cv_dlopen_self_static=no + fi +fi +rm -fr conftest* + + +fi +echo "$as_me:$LINENO: result: $lt_cv_dlopen_self_static" >&5 +echo "${ECHO_T}$lt_cv_dlopen_self_static" >&6 + fi + + CPPFLAGS="$save_CPPFLAGS" + LDFLAGS="$save_LDFLAGS" + LIBS="$save_LIBS" + ;; + esac + + case $lt_cv_dlopen_self in + yes|no) enable_dlopen_self=$lt_cv_dlopen_self ;; + *) enable_dlopen_self=unknown ;; + esac + + case $lt_cv_dlopen_self_static in + yes|no) enable_dlopen_self_static=$lt_cv_dlopen_self_static ;; + *) enable_dlopen_self_static=unknown ;; + esac +fi + + +# The else clause should only fire when bootstrapping the +# libtool distribution, otherwise you forgot to ship ltmain.sh +# with your package, and you will get complaints that there are +# no rules to generate ltmain.sh. +if test -f "$ltmain"; then + # See if we are running on zsh, and set the options which allow our commands through + # without removal of \ escapes. + if test -n "${ZSH_VERSION+set}" ; then + setopt NO_GLOB_SUBST + fi + # Now quote all the things that may contain metacharacters while being + # careful not to overquote the AC_SUBSTed values. We take copies of the + # variables and quote the copies for generation of the libtool script. + for var in echo old_CC old_CFLAGS AR AR_FLAGS EGREP RANLIB LN_S LTCC NM \ + SED SHELL STRIP \ + libname_spec library_names_spec soname_spec extract_expsyms_cmds \ + old_striplib striplib file_magic_cmd finish_cmds finish_eval \ + deplibs_check_method reload_flag reload_cmds need_locks \ + lt_cv_sys_global_symbol_pipe lt_cv_sys_global_symbol_to_cdecl \ + lt_cv_sys_global_symbol_to_c_name_address \ + sys_lib_search_path_spec sys_lib_dlsearch_path_spec \ + old_postinstall_cmds old_postuninstall_cmds \ + compiler_CXX \ + CC_CXX \ + LD_CXX \ + lt_prog_compiler_wl_CXX \ + lt_prog_compiler_pic_CXX \ + lt_prog_compiler_static_CXX \ + lt_prog_compiler_no_builtin_flag_CXX \ + export_dynamic_flag_spec_CXX \ + thread_safe_flag_spec_CXX \ + whole_archive_flag_spec_CXX \ + enable_shared_with_static_runtimes_CXX \ + old_archive_cmds_CXX \ + old_archive_from_new_cmds_CXX \ + predep_objects_CXX \ + postdep_objects_CXX \ + predeps_CXX \ + postdeps_CXX \ + compiler_lib_search_path_CXX \ + archive_cmds_CXX \ + archive_expsym_cmds_CXX \ + postinstall_cmds_CXX \ + postuninstall_cmds_CXX \ + old_archive_from_expsyms_cmds_CXX \ + allow_undefined_flag_CXX \ + no_undefined_flag_CXX \ + export_symbols_cmds_CXX \ + hardcode_libdir_flag_spec_CXX \ + hardcode_libdir_flag_spec_ld_CXX \ + hardcode_libdir_separator_CXX \ + hardcode_automatic_CXX \ + module_cmds_CXX \ + module_expsym_cmds_CXX \ + lt_cv_prog_compiler_c_o_CXX \ + exclude_expsyms_CXX \ + include_expsyms_CXX; do + + case $var in + old_archive_cmds_CXX | \ + old_archive_from_new_cmds_CXX | \ + archive_cmds_CXX | \ + archive_expsym_cmds_CXX | \ + module_cmds_CXX | \ + module_expsym_cmds_CXX | \ + old_archive_from_expsyms_cmds_CXX | \ + export_symbols_cmds_CXX | \ + extract_expsyms_cmds | reload_cmds | finish_cmds | \ + postinstall_cmds | postuninstall_cmds | \ + old_postinstall_cmds | old_postuninstall_cmds | \ + sys_lib_search_path_spec | sys_lib_dlsearch_path_spec) + # Double-quote double-evaled strings. + eval "lt_$var=\\\"\`\$echo \"X\$$var\" | \$Xsed -e \"\$double_quote_subst\" -e \"\$sed_quote_subst\" -e \"\$delay_variable_subst\"\`\\\"" + ;; + *) + eval "lt_$var=\\\"\`\$echo \"X\$$var\" | \$Xsed -e \"\$sed_quote_subst\"\`\\\"" + ;; + esac + done + + case $lt_echo in + *'\$0 --fallback-echo"') + lt_echo=`$echo "X$lt_echo" | $Xsed -e 's/\\\\\\\$0 --fallback-echo"$/$0 --fallback-echo"/'` + ;; + esac + +cfgfile="$ofile" + + cat <<__EOF__ >> "$cfgfile" +# ### BEGIN LIBTOOL TAG CONFIG: $tagname + +# Libtool was configured on host `(hostname || uname -n) 2>/dev/null | sed 1q`: + +# Shell to use when invoking shell scripts. +SHELL=$lt_SHELL + +# Whether or not to build shared libraries. +build_libtool_libs=$enable_shared + +# Whether or not to build static libraries. +build_old_libs=$enable_static + +# Whether or not to add -lc for building shared libraries. +build_libtool_need_lc=$archive_cmds_need_lc_CXX + +# Whether or not to disallow shared libs when runtime libs are static +allow_libtool_libs_with_static_runtimes=$enable_shared_with_static_runtimes_CXX + +# Whether or not to optimize for fast installation. +fast_install=$enable_fast_install + +# The host system. +host_alias=$host_alias +host=$host +host_os=$host_os + +# The build system. +build_alias=$build_alias +build=$build +build_os=$build_os + +# An echo program that does not interpret backslashes. +echo=$lt_echo + +# The archiver. +AR=$lt_AR +AR_FLAGS=$lt_AR_FLAGS + +# A C compiler. +LTCC=$lt_LTCC + +# A language-specific compiler. +CC=$lt_compiler_CXX + +# Is the compiler the GNU C compiler? +with_gcc=$GCC_CXX + +# An ERE matcher. +EGREP=$lt_EGREP + +# The linker used to build libraries. +LD=$lt_LD_CXX + +# Whether we need hard or soft links. +LN_S=$lt_LN_S + +# A BSD-compatible nm program. +NM=$lt_NM + +# A symbol stripping program +STRIP=$lt_STRIP + +# Used to examine libraries when file_magic_cmd begins "file" +MAGIC_CMD=$MAGIC_CMD + +# Used on cygwin: DLL creation program. +DLLTOOL="$DLLTOOL" + +# Used on cygwin: object dumper. +OBJDUMP="$OBJDUMP" + +# Used on cygwin: assembler. +AS="$AS" + +# The name of the directory that contains temporary libtool files. +objdir=$objdir + +# How to create reloadable object files. +reload_flag=$lt_reload_flag +reload_cmds=$lt_reload_cmds + +# How to pass a linker flag through the compiler. +wl=$lt_lt_prog_compiler_wl_CXX + +# Object file suffix (normally "o"). +objext="$ac_objext" + +# Old archive suffix (normally "a"). +libext="$libext" + +# Shared library suffix (normally ".so"). +shrext_cmds='$shrext_cmds' + +# Executable file suffix (normally ""). +exeext="$exeext" + +# Additional compiler flags for building library objects. +pic_flag=$lt_lt_prog_compiler_pic_CXX +pic_mode=$pic_mode + +# What is the maximum length of a command? +max_cmd_len=$lt_cv_sys_max_cmd_len + +# Does compiler simultaneously support -c and -o options? +compiler_c_o=$lt_lt_cv_prog_compiler_c_o_CXX + +# Must we lock files when doing compilation? +need_locks=$lt_need_locks + +# Do we need the lib prefix for modules? +need_lib_prefix=$need_lib_prefix + +# Do we need a version for libraries? +need_version=$need_version + +# Whether dlopen is supported. +dlopen_support=$enable_dlopen + +# Whether dlopen of programs is supported. +dlopen_self=$enable_dlopen_self + +# Whether dlopen of statically linked programs is supported. +dlopen_self_static=$enable_dlopen_self_static + +# Compiler flag to prevent dynamic linking. +link_static_flag=$lt_lt_prog_compiler_static_CXX + +# Compiler flag to turn off builtin functions. +no_builtin_flag=$lt_lt_prog_compiler_no_builtin_flag_CXX + +# Compiler flag to allow reflexive dlopens. +export_dynamic_flag_spec=$lt_export_dynamic_flag_spec_CXX + +# Compiler flag to generate shared objects directly from archives. +whole_archive_flag_spec=$lt_whole_archive_flag_spec_CXX + +# Compiler flag to generate thread-safe objects. +thread_safe_flag_spec=$lt_thread_safe_flag_spec_CXX + +# Library versioning type. +version_type=$version_type + +# Format of library name prefix. +libname_spec=$lt_libname_spec + +# List of archive names. First name is the real one, the rest are links. +# The last name is the one that the linker finds with -lNAME. +library_names_spec=$lt_library_names_spec + +# The coded name of the library, if different from the real name. +soname_spec=$lt_soname_spec + +# Commands used to build and install an old-style archive. +RANLIB=$lt_RANLIB +old_archive_cmds=$lt_old_archive_cmds_CXX +old_postinstall_cmds=$lt_old_postinstall_cmds +old_postuninstall_cmds=$lt_old_postuninstall_cmds + +# Create an old-style archive from a shared archive. +old_archive_from_new_cmds=$lt_old_archive_from_new_cmds_CXX + +# Create a temporary old-style archive to link instead of a shared archive. +old_archive_from_expsyms_cmds=$lt_old_archive_from_expsyms_cmds_CXX + +# Commands used to build and install a shared archive. +archive_cmds=$lt_archive_cmds_CXX +archive_expsym_cmds=$lt_archive_expsym_cmds_CXX +postinstall_cmds=$lt_postinstall_cmds +postuninstall_cmds=$lt_postuninstall_cmds + +# Commands used to build a loadable module (assumed same as above if empty) +module_cmds=$lt_module_cmds_CXX +module_expsym_cmds=$lt_module_expsym_cmds_CXX + +# Commands to strip libraries. +old_striplib=$lt_old_striplib +striplib=$lt_striplib + +# Dependencies to place before the objects being linked to create a +# shared library. +predep_objects=$lt_predep_objects_CXX + +# Dependencies to place after the objects being linked to create a +# shared library. +postdep_objects=$lt_postdep_objects_CXX + +# Dependencies to place before the objects being linked to create a +# shared library. +predeps=$lt_predeps_CXX + +# Dependencies to place after the objects being linked to create a +# shared library. +postdeps=$lt_postdeps_CXX + +# The library search path used internally by the compiler when linking +# a shared library. +compiler_lib_search_path=$lt_compiler_lib_search_path_CXX + +# Method to check whether dependent libraries are shared objects. +deplibs_check_method=$lt_deplibs_check_method + +# Command to use when deplibs_check_method == file_magic. +file_magic_cmd=$lt_file_magic_cmd + +# Flag that allows shared libraries with undefined symbols to be built. +allow_undefined_flag=$lt_allow_undefined_flag_CXX + +# Flag that forces no undefined symbols. +no_undefined_flag=$lt_no_undefined_flag_CXX + +# Commands used to finish a libtool library installation in a directory. +finish_cmds=$lt_finish_cmds + +# Same as above, but a single script fragment to be evaled but not shown. +finish_eval=$lt_finish_eval + +# Take the output of nm and produce a listing of raw symbols and C names. +global_symbol_pipe=$lt_lt_cv_sys_global_symbol_pipe + +# Transform the output of nm in a proper C declaration +global_symbol_to_cdecl=$lt_lt_cv_sys_global_symbol_to_cdecl + +# Transform the output of nm in a C name address pair +global_symbol_to_c_name_address=$lt_lt_cv_sys_global_symbol_to_c_name_address + +# This is the shared library runtime path variable. +runpath_var=$runpath_var + +# This is the shared library path variable. +shlibpath_var=$shlibpath_var + +# Is shlibpath searched before the hard-coded library search path? +shlibpath_overrides_runpath=$shlibpath_overrides_runpath + +# How to hardcode a shared library path into an executable. +hardcode_action=$hardcode_action_CXX + +# Whether we should hardcode library paths into libraries. +hardcode_into_libs=$hardcode_into_libs + +# Flag to hardcode \$libdir into a binary during linking. +# This must work even if \$libdir does not exist. +hardcode_libdir_flag_spec=$lt_hardcode_libdir_flag_spec_CXX + +# If ld is used when linking, flag to hardcode \$libdir into +# a binary during linking. This must work even if \$libdir does +# not exist. +hardcode_libdir_flag_spec_ld=$lt_hardcode_libdir_flag_spec_ld_CXX + +# Whether we need a single -rpath flag with a separated argument. +hardcode_libdir_separator=$lt_hardcode_libdir_separator_CXX + +# Set to yes if using DIR/libNAME${shared_ext} during linking hardcodes DIR into the +# resulting binary. +hardcode_direct=$hardcode_direct_CXX + +# Set to yes if using the -LDIR flag during linking hardcodes DIR into the +# resulting binary. +hardcode_minus_L=$hardcode_minus_L_CXX + +# Set to yes if using SHLIBPATH_VAR=DIR during linking hardcodes DIR into +# the resulting binary. +hardcode_shlibpath_var=$hardcode_shlibpath_var_CXX + +# Set to yes if building a shared library automatically hardcodes DIR into the library +# and all subsequent libraries and executables linked against it. +hardcode_automatic=$hardcode_automatic_CXX + +# Variables whose values should be saved in libtool wrapper scripts and +# restored at relink time. +variables_saved_for_relink="$variables_saved_for_relink" + +# Whether libtool must link a program against all its dependency libraries. +link_all_deplibs=$link_all_deplibs_CXX + +# Compile-time system search path for libraries +sys_lib_search_path_spec=$lt_sys_lib_search_path_spec + +# Run-time system search path for libraries +sys_lib_dlsearch_path_spec=$lt_sys_lib_dlsearch_path_spec + +# Fix the shell variable \$srcfile for the compiler. +fix_srcfile_path="$fix_srcfile_path_CXX" + +# Set to yes if exported symbols are required. +always_export_symbols=$always_export_symbols_CXX + +# The commands to list exported symbols. +export_symbols_cmds=$lt_export_symbols_cmds_CXX + +# The commands to extract the exported symbol list from a shared archive. +extract_expsyms_cmds=$lt_extract_expsyms_cmds + +# Symbols that should not be listed in the preloaded symbols. +exclude_expsyms=$lt_exclude_expsyms_CXX + +# Symbols that must always be exported. +include_expsyms=$lt_include_expsyms_CXX + +# ### END LIBTOOL TAG CONFIG: $tagname + +__EOF__ + + +else + # If there is no Makefile yet, we rely on a make rule to execute + # `config.status --recheck' to rerun these tests and create the + # libtool script then. + ltmain_in=`echo $ltmain | sed -e 's/\.sh$/.in/'` + if test -f "$ltmain_in"; then + test -f Makefile && make "$ltmain" + fi +fi + + +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu + +CC=$lt_save_CC +LDCXX=$LD +LD=$lt_save_LD +GCC=$lt_save_GCC +with_gnu_ldcxx=$with_gnu_ld +with_gnu_ld=$lt_save_with_gnu_ld +lt_cv_path_LDCXX=$lt_cv_path_LD +lt_cv_path_LD=$lt_save_path_LD +lt_cv_prog_gnu_ldcxx=$lt_cv_prog_gnu_ld +lt_cv_prog_gnu_ld=$lt_save_with_gnu_ld + + else + tagname="" + fi + ;; + + F77) + if test -n "$F77" && test "X$F77" != "Xno"; then + +ac_ext=f +ac_compile='$F77 -c $FFLAGS conftest.$ac_ext >&5' +ac_link='$F77 -o conftest$ac_exeext $FFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_f77_compiler_gnu + + +archive_cmds_need_lc_F77=no +allow_undefined_flag_F77= +always_export_symbols_F77=no +archive_expsym_cmds_F77= +export_dynamic_flag_spec_F77= +hardcode_direct_F77=no +hardcode_libdir_flag_spec_F77= +hardcode_libdir_flag_spec_ld_F77= +hardcode_libdir_separator_F77= +hardcode_minus_L_F77=no +hardcode_automatic_F77=no +module_cmds_F77= +module_expsym_cmds_F77= +link_all_deplibs_F77=unknown +old_archive_cmds_F77=$old_archive_cmds +no_undefined_flag_F77= +whole_archive_flag_spec_F77= +enable_shared_with_static_runtimes_F77=no + +# Source file extension for f77 test sources. +ac_ext=f + +# Object file extension for compiled f77 test sources. +objext=o +objext_F77=$objext + +# Code to be used in simple compile tests +lt_simple_compile_test_code=" subroutine t\n return\n end\n" + +# Code to be used in simple link tests +lt_simple_link_test_code=" program t\n end\n" + +# ltmain only uses $CC for tagged configurations so make sure $CC is set. + +# If no C compiler was specified, use CC. +LTCC=${LTCC-"$CC"} + +# Allow CC to be a program name with arguments. +compiler=$CC + + +# save warnings/boilerplate of simple test code +ac_outfile=conftest.$ac_objext +printf "$lt_simple_compile_test_code" >conftest.$ac_ext +eval "$ac_compile" 2>&1 >/dev/null | $SED '/^$/d' >conftest.err +_lt_compiler_boilerplate=`cat conftest.err` +$rm conftest* + +ac_outfile=conftest.$ac_objext +printf "$lt_simple_link_test_code" >conftest.$ac_ext +eval "$ac_link" 2>&1 >/dev/null | $SED '/^$/d' >conftest.err +_lt_linker_boilerplate=`cat conftest.err` +$rm conftest* + + +# Allow CC to be a program name with arguments. +lt_save_CC="$CC" +CC=${F77-"f77"} +compiler=$CC +compiler_F77=$CC +for cc_temp in $compiler""; do + case $cc_temp in + compile | *[\\/]compile | ccache | *[\\/]ccache ) ;; + distcc | *[\\/]distcc | purify | *[\\/]purify ) ;; + \-*) ;; + *) break;; + esac +done +cc_basename=`$echo "X$cc_temp" | $Xsed -e 's%.*/%%' -e "s%^$host_alias-%%"` + + +echo "$as_me:$LINENO: checking if libtool supports shared libraries" >&5 +echo $ECHO_N "checking if libtool supports shared libraries... $ECHO_C" >&6 +echo "$as_me:$LINENO: result: $can_build_shared" >&5 +echo "${ECHO_T}$can_build_shared" >&6 + +echo "$as_me:$LINENO: checking whether to build shared libraries" >&5 +echo $ECHO_N "checking whether to build shared libraries... $ECHO_C" >&6 +test "$can_build_shared" = "no" && enable_shared=no + +# On AIX, shared libraries and static libraries use the same namespace, and +# are all built from PIC. +case "$host_os" in +aix3*) + test "$enable_shared" = yes && enable_static=no + if test -n "$RANLIB"; then + archive_cmds="$archive_cmds~\$RANLIB \$lib" + postinstall_cmds='$RANLIB $lib' + fi + ;; +aix4* | aix5*) + if test "$host_cpu" != ia64 && test "$aix_use_runtimelinking" = no ; then + test "$enable_shared" = yes && enable_static=no + fi + ;; +esac +echo "$as_me:$LINENO: result: $enable_shared" >&5 +echo "${ECHO_T}$enable_shared" >&6 + +echo "$as_me:$LINENO: checking whether to build static libraries" >&5 +echo $ECHO_N "checking whether to build static libraries... $ECHO_C" >&6 +# Make sure either enable_shared or enable_static is yes. +test "$enable_shared" = yes || enable_static=yes +echo "$as_me:$LINENO: result: $enable_static" >&5 +echo "${ECHO_T}$enable_static" >&6 + +test "$ld_shlibs_F77" = no && can_build_shared=no + +GCC_F77="$G77" +LD_F77="$LD" + +lt_prog_compiler_wl_F77= +lt_prog_compiler_pic_F77= +lt_prog_compiler_static_F77= + +echo "$as_me:$LINENO: checking for $compiler option to produce PIC" >&5 +echo $ECHO_N "checking for $compiler option to produce PIC... $ECHO_C" >&6 + + if test "$GCC" = yes; then + lt_prog_compiler_wl_F77='-Wl,' + lt_prog_compiler_static_F77='-static' + + case $host_os in + aix*) + # All AIX code is PIC. + if test "$host_cpu" = ia64; then + # AIX 5 now supports IA64 processor + lt_prog_compiler_static_F77='-Bstatic' + fi + ;; + + amigaos*) + # FIXME: we need at least 68020 code to build shared libraries, but + # adding the `-m68020' flag to GCC prevents building anything better, + # like `-m68040'. + lt_prog_compiler_pic_F77='-m68020 -resident32 -malways-restore-a4' + ;; + + beos* | cygwin* | irix5* | irix6* | nonstopux* | osf3* | osf4* | osf5*) + # PIC is the default for these OSes. + ;; + + mingw* | pw32* | os2*) + # This hack is so that the source file can tell whether it is being + # built for inclusion in a dll (and should export symbols for example). + lt_prog_compiler_pic_F77='-DDLL_EXPORT' + ;; + + darwin* | rhapsody*) + # PIC is the default on this platform + # Common symbols not allowed in MH_DYLIB files + lt_prog_compiler_pic_F77='-fno-common' + ;; + + msdosdjgpp*) + # Just because we use GCC doesn't mean we suddenly get shared libraries + # on systems that don't support them. + lt_prog_compiler_can_build_shared_F77=no + enable_shared=no + ;; + + sysv4*MP*) + if test -d /usr/nec; then + lt_prog_compiler_pic_F77=-Kconform_pic + fi + ;; + + hpux*) + # PIC is the default for IA64 HP-UX and 64-bit HP-UX, but + # not for PA HP-UX. + case "$host_cpu" in + hppa*64*|ia64*) + # +Z the default + ;; + *) + lt_prog_compiler_pic_F77='-fPIC' + ;; + esac + ;; + + *) + lt_prog_compiler_pic_F77='-fPIC' + ;; + esac + else + # PORTME Check for flag to pass linker flags through the system compiler. + case $host_os in + aix*) + lt_prog_compiler_wl_F77='-Wl,' + if test "$host_cpu" = ia64; then + # AIX 5 now supports IA64 processor + lt_prog_compiler_static_F77='-Bstatic' + else + lt_prog_compiler_static_F77='-bnso -bI:/lib/syscalls.exp' + fi + ;; + darwin*) + # PIC is the default on this platform + # Common symbols not allowed in MH_DYLIB files + case $cc_basename in + xlc*) + lt_prog_compiler_pic_F77='-qnocommon' + lt_prog_compiler_wl_F77='-Wl,' + ;; + esac + ;; + + mingw* | pw32* | os2*) + # This hack is so that the source file can tell whether it is being + # built for inclusion in a dll (and should export symbols for example). + lt_prog_compiler_pic_F77='-DDLL_EXPORT' + ;; + + hpux9* | hpux10* | hpux11*) + lt_prog_compiler_wl_F77='-Wl,' + # PIC is the default for IA64 HP-UX and 64-bit HP-UX, but + # not for PA HP-UX. + case "$host_cpu" in + hppa*64*|ia64*) + # +Z the default + ;; + *) + lt_prog_compiler_pic_F77='+Z' + ;; + esac + # Is there a better lt_prog_compiler_static that works with the bundled CC? + lt_prog_compiler_static_F77='${wl}-a ${wl}archive' + ;; + + irix5* | irix6* | nonstopux*) + lt_prog_compiler_wl_F77='-Wl,' + # PIC (with -KPIC) is the default. + lt_prog_compiler_static_F77='-non_shared' + ;; + + newsos6) + lt_prog_compiler_pic_F77='-KPIC' + lt_prog_compiler_static_F77='-Bstatic' + ;; + + linux*) + case $cc_basename in + icc* | ecc*) + lt_prog_compiler_wl_F77='-Wl,' + lt_prog_compiler_pic_F77='-KPIC' + lt_prog_compiler_static_F77='-static' + ;; + pgcc* | pgf77* | pgf90*) + # Portland Group compilers (*not* the Pentium gcc compiler, + # which looks to be a dead project) + lt_prog_compiler_wl_F77='-Wl,' + lt_prog_compiler_pic_F77='-fpic' + lt_prog_compiler_static_F77='-static' + ;; + ccc*) + lt_prog_compiler_wl_F77='-Wl,' + # All Alpha code is PIC. + lt_prog_compiler_static_F77='-non_shared' + ;; + esac + ;; + + osf3* | osf4* | osf5*) + lt_prog_compiler_wl_F77='-Wl,' + # All OSF/1 code is PIC. + lt_prog_compiler_static_F77='-non_shared' + ;; + + sco3.2v5*) + lt_prog_compiler_pic_F77='-Kpic' + lt_prog_compiler_static_F77='-dn' + ;; + + solaris*) + lt_prog_compiler_pic_F77='-KPIC' + lt_prog_compiler_static_F77='-Bstatic' + case $cc_basename in + f77* | f90* | f95*) + lt_prog_compiler_wl_F77='-Qoption ld ';; + *) + lt_prog_compiler_wl_F77='-Wl,';; + esac + ;; + + sunos4*) + lt_prog_compiler_wl_F77='-Qoption ld ' + lt_prog_compiler_pic_F77='-PIC' + lt_prog_compiler_static_F77='-Bstatic' + ;; + + sysv4 | sysv4.2uw2* | sysv4.3* | sysv5*) + lt_prog_compiler_wl_F77='-Wl,' + lt_prog_compiler_pic_F77='-KPIC' + lt_prog_compiler_static_F77='-Bstatic' + ;; + + sysv4*MP*) + if test -d /usr/nec ;then + lt_prog_compiler_pic_F77='-Kconform_pic' + lt_prog_compiler_static_F77='-Bstatic' + fi + ;; + + unicos*) + lt_prog_compiler_wl_F77='-Wl,' + lt_prog_compiler_can_build_shared_F77=no + ;; + + uts4*) + lt_prog_compiler_pic_F77='-pic' + lt_prog_compiler_static_F77='-Bstatic' + ;; + + *) + lt_prog_compiler_can_build_shared_F77=no + ;; + esac + fi + +echo "$as_me:$LINENO: result: $lt_prog_compiler_pic_F77" >&5 +echo "${ECHO_T}$lt_prog_compiler_pic_F77" >&6 + +# +# Check to make sure the PIC flag actually works. +# +if test -n "$lt_prog_compiler_pic_F77"; then + +echo "$as_me:$LINENO: checking if $compiler PIC flag $lt_prog_compiler_pic_F77 works" >&5 +echo $ECHO_N "checking if $compiler PIC flag $lt_prog_compiler_pic_F77 works... $ECHO_C" >&6 +if test "${lt_prog_compiler_pic_works_F77+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + lt_prog_compiler_pic_works_F77=no + ac_outfile=conftest.$ac_objext + printf "$lt_simple_compile_test_code" > conftest.$ac_ext + lt_compiler_flag="$lt_prog_compiler_pic_F77" + # Insert the option either (1) after the last *FLAGS variable, or + # (2) before a word containing "conftest.", or (3) at the end. + # Note that $ac_compile itself does not contain backslashes and begins + # with a dollar sign (not a hyphen), so the echo should work correctly. + # The option is referenced via a variable to avoid confusing sed. + lt_compile=`echo "$ac_compile" | $SED \ + -e 's:.*FLAGS}? :&$lt_compiler_flag :; t' \ + -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ + -e 's:$: $lt_compiler_flag:'` + (eval echo "\"\$as_me:14423: $lt_compile\"" >&5) + (eval "$lt_compile" 2>conftest.err) + ac_status=$? + cat conftest.err >&5 + echo "$as_me:14427: \$? = $ac_status" >&5 + if (exit $ac_status) && test -s "$ac_outfile"; then + # The compiler can only warn and ignore the option if not recognized + # So say no if there are warnings other than the usual output. + $echo "X$_lt_compiler_boilerplate" | $Xsed >conftest.exp + $SED '/^$/d' conftest.err >conftest.er2 + if test ! -s conftest.err || diff conftest.exp conftest.er2 >/dev/null; then + lt_prog_compiler_pic_works_F77=yes + fi + fi + $rm conftest* + +fi +echo "$as_me:$LINENO: result: $lt_prog_compiler_pic_works_F77" >&5 +echo "${ECHO_T}$lt_prog_compiler_pic_works_F77" >&6 + +if test x"$lt_prog_compiler_pic_works_F77" = xyes; then + case $lt_prog_compiler_pic_F77 in + "" | " "*) ;; + *) lt_prog_compiler_pic_F77=" $lt_prog_compiler_pic_F77" ;; + esac +else + lt_prog_compiler_pic_F77= + lt_prog_compiler_can_build_shared_F77=no +fi + +fi +case "$host_os" in + # For platforms which do not support PIC, -DPIC is meaningless: + *djgpp*) + lt_prog_compiler_pic_F77= + ;; + *) + lt_prog_compiler_pic_F77="$lt_prog_compiler_pic_F77" + ;; +esac + +echo "$as_me:$LINENO: checking if $compiler supports -c -o file.$ac_objext" >&5 +echo $ECHO_N "checking if $compiler supports -c -o file.$ac_objext... $ECHO_C" >&6 +if test "${lt_cv_prog_compiler_c_o_F77+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + lt_cv_prog_compiler_c_o_F77=no + $rm -r conftest 2>/dev/null + mkdir conftest + cd conftest + mkdir out + printf "$lt_simple_compile_test_code" > conftest.$ac_ext + + lt_compiler_flag="-o out/conftest2.$ac_objext" + # Insert the option either (1) after the last *FLAGS variable, or + # (2) before a word containing "conftest.", or (3) at the end. + # Note that $ac_compile itself does not contain backslashes and begins + # with a dollar sign (not a hyphen), so the echo should work correctly. + lt_compile=`echo "$ac_compile" | $SED \ + -e 's:.*FLAGS}? :&$lt_compiler_flag :; t' \ + -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ + -e 's:$: $lt_compiler_flag:'` + (eval echo "\"\$as_me:14485: $lt_compile\"" >&5) + (eval "$lt_compile" 2>out/conftest.err) + ac_status=$? + cat out/conftest.err >&5 + echo "$as_me:14489: \$? = $ac_status" >&5 + if (exit $ac_status) && test -s out/conftest2.$ac_objext + then + # The compiler can only warn and ignore the option if not recognized + # So say no if there are warnings + $echo "X$_lt_compiler_boilerplate" | $Xsed > out/conftest.exp + $SED '/^$/d' out/conftest.err >out/conftest.er2 + if test ! -s out/conftest.err || diff out/conftest.exp out/conftest.er2 >/dev/null; then + lt_cv_prog_compiler_c_o_F77=yes + fi + fi + chmod u+w . + $rm conftest* + # SGI C++ compiler will create directory out/ii_files/ for + # template instantiation + test -d out/ii_files && $rm out/ii_files/* && rmdir out/ii_files + $rm out/* && rmdir out + cd .. + rmdir conftest + $rm conftest* + +fi +echo "$as_me:$LINENO: result: $lt_cv_prog_compiler_c_o_F77" >&5 +echo "${ECHO_T}$lt_cv_prog_compiler_c_o_F77" >&6 + + +hard_links="nottested" +if test "$lt_cv_prog_compiler_c_o_F77" = no && test "$need_locks" != no; then + # do not overwrite the value of need_locks provided by the user + echo "$as_me:$LINENO: checking if we can lock with hard links" >&5 +echo $ECHO_N "checking if we can lock with hard links... $ECHO_C" >&6 + hard_links=yes + $rm conftest* + ln conftest.a conftest.b 2>/dev/null && hard_links=no + touch conftest.a + ln conftest.a conftest.b 2>&5 || hard_links=no + ln conftest.a conftest.b 2>/dev/null && hard_links=no + echo "$as_me:$LINENO: result: $hard_links" >&5 +echo "${ECHO_T}$hard_links" >&6 + if test "$hard_links" = no; then + { echo "$as_me:$LINENO: WARNING: \`$CC' does not support \`-c -o', so \`make -j' may be unsafe" >&5 +echo "$as_me: WARNING: \`$CC' does not support \`-c -o', so \`make -j' may be unsafe" >&2;} + need_locks=warn + fi +else + need_locks=no +fi + +echo "$as_me:$LINENO: checking whether the $compiler linker ($LD) supports shared libraries" >&5 +echo $ECHO_N "checking whether the $compiler linker ($LD) supports shared libraries... $ECHO_C" >&6 + + runpath_var= + allow_undefined_flag_F77= + enable_shared_with_static_runtimes_F77=no + archive_cmds_F77= + archive_expsym_cmds_F77= + old_archive_From_new_cmds_F77= + old_archive_from_expsyms_cmds_F77= + export_dynamic_flag_spec_F77= + whole_archive_flag_spec_F77= + thread_safe_flag_spec_F77= + hardcode_libdir_flag_spec_F77= + hardcode_libdir_flag_spec_ld_F77= + hardcode_libdir_separator_F77= + hardcode_direct_F77=no + hardcode_minus_L_F77=no + hardcode_shlibpath_var_F77=unsupported + link_all_deplibs_F77=unknown + hardcode_automatic_F77=no + module_cmds_F77= + module_expsym_cmds_F77= + always_export_symbols_F77=no + export_symbols_cmds_F77='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols' + # include_expsyms should be a list of space-separated symbols to be *always* + # included in the symbol list + include_expsyms_F77= + # exclude_expsyms can be an extended regexp of symbols to exclude + # it will be wrapped by ` (' and `)$', so one must not match beginning or + # end of line. Example: `a|bc|.*d.*' will exclude the symbols `a' and `bc', + # as well as any symbol that contains `d'. + exclude_expsyms_F77="_GLOBAL_OFFSET_TABLE_" + # Although _GLOBAL_OFFSET_TABLE_ is a valid symbol C name, most a.out + # platforms (ab)use it in PIC code, but their linkers get confused if + # the symbol is explicitly referenced. Since portable code cannot + # rely on this symbol name, it's probably fine to never include it in + # preloaded symbol tables. + extract_expsyms_cmds= + # Just being paranoid about ensuring that cc_basename is set. + for cc_temp in $compiler""; do + case $cc_temp in + compile | *[\\/]compile | ccache | *[\\/]ccache ) ;; + distcc | *[\\/]distcc | purify | *[\\/]purify ) ;; + \-*) ;; + *) break;; + esac +done +cc_basename=`$echo "X$cc_temp" | $Xsed -e 's%.*/%%' -e "s%^$host_alias-%%"` + + case $host_os in + cygwin* | mingw* | pw32*) + # FIXME: the MSVC++ port hasn't been tested in a loooong time + # When not using gcc, we currently assume that we are using + # Microsoft Visual C++. + if test "$GCC" != yes; then + with_gnu_ld=no + fi + ;; + openbsd*) + with_gnu_ld=no + ;; + esac + + ld_shlibs_F77=yes + if test "$with_gnu_ld" = yes; then + # If archive_cmds runs LD, not CC, wlarc should be empty + wlarc='${wl}' + + # Set some defaults for GNU ld with shared library support. These + # are reset later if shared libraries are not supported. Putting them + # here allows them to be overridden if necessary. + runpath_var=LD_RUN_PATH + hardcode_libdir_flag_spec_F77='${wl}--rpath ${wl}$libdir' + export_dynamic_flag_spec_F77='${wl}--export-dynamic' + # ancient GNU ld didn't support --whole-archive et. al. + if $LD --help 2>&1 | grep 'no-whole-archive' > /dev/null; then + whole_archive_flag_spec_F77="$wlarc"'--whole-archive$convenience '"$wlarc"'--no-whole-archive' + else + whole_archive_flag_spec_F77= + fi + supports_anon_versioning=no + case `$LD -v 2>/dev/null` in + *\ [01].* | *\ 2.[0-9].* | *\ 2.10.*) ;; # catch versions < 2.11 + *\ 2.11.93.0.2\ *) supports_anon_versioning=yes ;; # RH7.3 ... + *\ 2.11.92.0.12\ *) supports_anon_versioning=yes ;; # Mandrake 8.2 ... + *\ 2.11.*) ;; # other 2.11 versions + *) supports_anon_versioning=yes ;; + esac + + # See if GNU ld supports shared libraries. + case $host_os in + aix3* | aix4* | aix5*) + # On AIX/PPC, the GNU linker is very broken + if test "$host_cpu" != ia64; then + ld_shlibs_F77=no + cat <&2 + +*** Warning: the GNU linker, at least up to release 2.9.1, is reported +*** to be unable to reliably create shared libraries on AIX. +*** Therefore, libtool is disabling shared libraries support. If you +*** really care for shared libraries, you may want to modify your PATH +*** so that a non-GNU linker is found, and then restart. + +EOF + fi + ;; + + amigaos*) + archive_cmds_F77='$rm $output_objdir/a2ixlibrary.data~$echo "#define NAME $libname" > $output_objdir/a2ixlibrary.data~$echo "#define LIBRARY_ID 1" >> $output_objdir/a2ixlibrary.data~$echo "#define VERSION $major" >> $output_objdir/a2ixlibrary.data~$echo "#define REVISION $revision" >> $output_objdir/a2ixlibrary.data~$AR $AR_FLAGS $lib $libobjs~$RANLIB $lib~(cd $output_objdir && a2ixlibrary -32)' + hardcode_libdir_flag_spec_F77='-L$libdir' + hardcode_minus_L_F77=yes + + # Samuel A. Falvo II reports + # that the semantics of dynamic libraries on AmigaOS, at least up + # to version 4, is to share data among multiple programs linked + # with the same dynamic library. Since this doesn't match the + # behavior of shared libraries on other platforms, we can't use + # them. + ld_shlibs_F77=no + ;; + + beos*) + if $LD --help 2>&1 | grep ': supported targets:.* elf' > /dev/null; then + allow_undefined_flag_F77=unsupported + # Joseph Beckenbach says some releases of gcc + # support --undefined. This deserves some investigation. FIXME + archive_cmds_F77='$CC -nostart $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + else + ld_shlibs_F77=no + fi + ;; + + cygwin* | mingw* | pw32*) + # _LT_AC_TAGVAR(hardcode_libdir_flag_spec, F77) is actually meaningless, + # as there is no search path for DLLs. + hardcode_libdir_flag_spec_F77='-L$libdir' + allow_undefined_flag_F77=unsupported + always_export_symbols_F77=no + enable_shared_with_static_runtimes_F77=yes + export_symbols_cmds_F77='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[BCDGRS] /s/.* \([^ ]*\)/\1 DATA/'\'' | $SED -e '\''/^[AITW] /s/.* //'\'' | sort | uniq > $export_symbols' + + if $LD --help 2>&1 | grep 'auto-import' > /dev/null; then + archive_cmds_F77='$CC -shared $libobjs $deplibs $compiler_flags -o $output_objdir/$soname ${wl}--image-base=0x10000000 ${wl}--out-implib,$lib' + # If the export-symbols file already is a .def file (1st line + # is EXPORTS), use it as is; otherwise, prepend... + archive_expsym_cmds_F77='if test "x`$SED 1q $export_symbols`" = xEXPORTS; then + cp $export_symbols $output_objdir/$soname.def; + else + echo EXPORTS > $output_objdir/$soname.def; + cat $export_symbols >> $output_objdir/$soname.def; + fi~ + $CC -shared $output_objdir/$soname.def $libobjs $deplibs $compiler_flags -o $output_objdir/$soname ${wl}--image-base=0x10000000 ${wl}--out-implib,$lib' + else + ld_shlibs_F77=no + fi + ;; + + linux*) + if $LD --help 2>&1 | grep ': supported targets:.* elf' > /dev/null; then + tmp_addflag= + case $cc_basename,$host_cpu in + pgcc*) # Portland Group C compiler + whole_archive_flag_spec_F77= + ;; + pgf77* | pgf90* ) # Portland Group f77 and f90 compilers + whole_archive_flag_spec_F77= + tmp_addflag=' -fpic -Mnomain' ;; + ecc*,ia64* | icc*,ia64*) # Intel C compiler on ia64 + tmp_addflag=' -i_dynamic' ;; + efc*,ia64* | ifort*,ia64*) # Intel Fortran compiler on ia64 + tmp_addflag=' -i_dynamic -nofor_main' ;; + ifc* | ifort*) # Intel Fortran compiler + tmp_addflag=' -nofor_main' ;; + esac + archive_cmds_F77='$CC -shared'"$tmp_addflag"' $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + + if test $supports_anon_versioning = yes; then + archive_expsym_cmds_F77='$echo "{ global:" > $output_objdir/$libname.ver~ + cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~ + $echo "local: *; };" >> $output_objdir/$libname.ver~ + $CC -shared'"$tmp_addflag"' $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-version-script ${wl}$output_objdir/$libname.ver -o $lib' + fi + else + ld_shlibs_F77=no + fi + ;; + + netbsd*) + if echo __ELF__ | $CC -E - | grep __ELF__ >/dev/null; then + archive_cmds_F77='$LD -Bshareable $libobjs $deplibs $linker_flags -o $lib' + wlarc= + else + archive_cmds_F77='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + archive_expsym_cmds_F77='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' + fi + ;; + + solaris* | sysv5*) + if $LD -v 2>&1 | grep 'BFD 2\.8' > /dev/null; then + ld_shlibs_F77=no + cat <&2 + +*** Warning: The releases 2.8.* of the GNU linker cannot reliably +*** create shared libraries on Solaris systems. Therefore, libtool +*** is disabling shared libraries support. We urge you to upgrade GNU +*** binutils to release 2.9.1 or newer. Another option is to modify +*** your PATH or compiler configuration so that the native linker is +*** used, and then restart. + +EOF + elif $LD --help 2>&1 | grep ': supported targets:.* elf' > /dev/null; then + archive_cmds_F77='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + archive_expsym_cmds_F77='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' + else + ld_shlibs_F77=no + fi + ;; + + sunos4*) + archive_cmds_F77='$LD -assert pure-text -Bshareable -o $lib $libobjs $deplibs $linker_flags' + wlarc= + hardcode_direct_F77=yes + hardcode_shlibpath_var_F77=no + ;; + + *) + if $LD --help 2>&1 | grep ': supported targets:.* elf' > /dev/null; then + archive_cmds_F77='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + archive_expsym_cmds_F77='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' + else + ld_shlibs_F77=no + fi + ;; + esac + + if test "$ld_shlibs_F77" = no; then + runpath_var= + hardcode_libdir_flag_spec_F77= + export_dynamic_flag_spec_F77= + whole_archive_flag_spec_F77= + fi + else + # PORTME fill in a description of your system's linker (not GNU ld) + case $host_os in + aix3*) + allow_undefined_flag_F77=unsupported + always_export_symbols_F77=yes + archive_expsym_cmds_F77='$LD -o $output_objdir/$soname $libobjs $deplibs $linker_flags -bE:$export_symbols -T512 -H512 -bM:SRE~$AR $AR_FLAGS $lib $output_objdir/$soname' + # Note: this linker hardcodes the directories in LIBPATH if there + # are no directories specified by -L. + hardcode_minus_L_F77=yes + if test "$GCC" = yes && test -z "$link_static_flag"; then + # Neither direct hardcoding nor static linking is supported with a + # broken collect2. + hardcode_direct_F77=unsupported + fi + ;; + + aix4* | aix5*) + if test "$host_cpu" = ia64; then + # On IA64, the linker does run time linking by default, so we don't + # have to do anything special. + aix_use_runtimelinking=no + exp_sym_flag='-Bexport' + no_entry_flag="" + else + # If we're using GNU nm, then we don't want the "-C" option. + # -C means demangle to AIX nm, but means don't demangle with GNU nm + if $NM -V 2>&1 | grep 'GNU' > /dev/null; then + export_symbols_cmds_F77='$NM -Bpg $libobjs $convenience | awk '\''{ if (((\$2 == "T") || (\$2 == "D") || (\$2 == "B")) && (substr(\$3,1,1) != ".")) { print \$3 } }'\'' | sort -u > $export_symbols' + else + export_symbols_cmds_F77='$NM -BCpg $libobjs $convenience | awk '\''{ if (((\$2 == "T") || (\$2 == "D") || (\$2 == "B")) && (substr(\$3,1,1) != ".")) { print \$3 } }'\'' | sort -u > $export_symbols' + fi + aix_use_runtimelinking=no + + # Test if we are trying to use run time linking or normal + # AIX style linking. If -brtl is somewhere in LDFLAGS, we + # need to do runtime linking. + case $host_os in aix4.[23]|aix4.[23].*|aix5*) + for ld_flag in $LDFLAGS; do + if (test $ld_flag = "-brtl" || test $ld_flag = "-Wl,-brtl"); then + aix_use_runtimelinking=yes + break + fi + done + esac + + exp_sym_flag='-bexport' + no_entry_flag='-bnoentry' + fi + + # When large executables or shared objects are built, AIX ld can + # have problems creating the table of contents. If linking a library + # or program results in "error TOC overflow" add -mminimal-toc to + # CXXFLAGS/CFLAGS for g++/gcc. In the cases where that is not + # enough to fix the problem, add -Wl,-bbigtoc to LDFLAGS. + + archive_cmds_F77='' + hardcode_direct_F77=yes + hardcode_libdir_separator_F77=':' + link_all_deplibs_F77=yes + + if test "$GCC" = yes; then + case $host_os in aix4.[012]|aix4.[012].*) + # We only want to do this on AIX 4.2 and lower, the check + # below for broken collect2 doesn't work under 4.3+ + collect2name=`${CC} -print-prog-name=collect2` + if test -f "$collect2name" && \ + strings "$collect2name" | grep resolve_lib_name >/dev/null + then + # We have reworked collect2 + hardcode_direct_F77=yes + else + # We have old collect2 + hardcode_direct_F77=unsupported + # It fails to find uninstalled libraries when the uninstalled + # path is not listed in the libpath. Setting hardcode_minus_L + # to unsupported forces relinking + hardcode_minus_L_F77=yes + hardcode_libdir_flag_spec_F77='-L$libdir' + hardcode_libdir_separator_F77= + fi + esac + shared_flag='-shared' + if test "$aix_use_runtimelinking" = yes; then + shared_flag="$shared_flag "'${wl}-G' + fi + else + # not using gcc + if test "$host_cpu" = ia64; then + # VisualAge C++, Version 5.5 for AIX 5L for IA-64, Beta 3 Release + # chokes on -Wl,-G. The following line is correct: + shared_flag='-G' + else + if test "$aix_use_runtimelinking" = yes; then + shared_flag='${wl}-G' + else + shared_flag='${wl}-bM:SRE' + fi + fi + fi + + # It seems that -bexpall does not export symbols beginning with + # underscore (_), so it is better to generate a list of symbols to export. + always_export_symbols_F77=yes + if test "$aix_use_runtimelinking" = yes; then + # Warning - without using the other runtime loading flags (-brtl), + # -berok will link without error, but may produce a broken library. + allow_undefined_flag_F77='-berok' + # Determine the default libpath from the value encoded in an empty executable. + cat >conftest.$ac_ext <<_ACEOF + program main + + end +_ACEOF +rm -f conftest.$ac_objext conftest$ac_exeext +if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 + (eval $ac_link) 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && + { ac_try='test -z "$ac_f77_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest$ac_exeext' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; }; then + +aix_libpath=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e '/Import File Strings/,/^$/ { /^0/ { s/^0 *\(.*\)$/\1/; p; } +}'` +# Check for a 64-bit object if we didn't find anything. +if test -z "$aix_libpath"; then aix_libpath=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e '/Import File Strings/,/^$/ { /^0/ { s/^0 *\(.*\)$/\1/; p; } +}'`; fi +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +fi +rm -f conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi + + hardcode_libdir_flag_spec_F77='${wl}-blibpath:$libdir:'"$aix_libpath" + archive_expsym_cmds_F77="\$CC"' -o $output_objdir/$soname $libobjs $deplibs $compiler_flags `if test "x${allow_undefined_flag}" != "x"; then echo "${wl}${allow_undefined_flag}"; else :; fi` '"\${wl}$no_entry_flag \${wl}$exp_sym_flag:\$export_symbols $shared_flag" + else + if test "$host_cpu" = ia64; then + hardcode_libdir_flag_spec_F77='${wl}-R $libdir:/usr/lib:/lib' + allow_undefined_flag_F77="-z nodefs" + archive_expsym_cmds_F77="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs $compiler_flags ${wl}${allow_undefined_flag} '"\${wl}$no_entry_flag \${wl}$exp_sym_flag:\$export_symbols" + else + # Determine the default libpath from the value encoded in an empty executable. + cat >conftest.$ac_ext <<_ACEOF + program main + + end +_ACEOF +rm -f conftest.$ac_objext conftest$ac_exeext +if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 + (eval $ac_link) 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && + { ac_try='test -z "$ac_f77_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest$ac_exeext' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; }; then + +aix_libpath=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e '/Import File Strings/,/^$/ { /^0/ { s/^0 *\(.*\)$/\1/; p; } +}'` +# Check for a 64-bit object if we didn't find anything. +if test -z "$aix_libpath"; then aix_libpath=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e '/Import File Strings/,/^$/ { /^0/ { s/^0 *\(.*\)$/\1/; p; } +}'`; fi +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +fi +rm -f conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi + + hardcode_libdir_flag_spec_F77='${wl}-blibpath:$libdir:'"$aix_libpath" + # Warning - without using the other run time loading flags, + # -berok will link without error, but may produce a broken library. + no_undefined_flag_F77=' ${wl}-bernotok' + allow_undefined_flag_F77=' ${wl}-berok' + # -bexpall does not export symbols beginning with underscore (_) + always_export_symbols_F77=yes + # Exported symbols can be pulled into shared objects from archives + whole_archive_flag_spec_F77=' ' + archive_cmds_need_lc_F77=yes + # This is similar to how AIX traditionally builds it's shared libraries. + archive_expsym_cmds_F77="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs $compiler_flags ${wl}-bE:$export_symbols ${wl}-bnoentry${allow_undefined_flag}~$AR $AR_FLAGS $output_objdir/$libname$release.a $output_objdir/$soname' + fi + fi + ;; + + amigaos*) + archive_cmds_F77='$rm $output_objdir/a2ixlibrary.data~$echo "#define NAME $libname" > $output_objdir/a2ixlibrary.data~$echo "#define LIBRARY_ID 1" >> $output_objdir/a2ixlibrary.data~$echo "#define VERSION $major" >> $output_objdir/a2ixlibrary.data~$echo "#define REVISION $revision" >> $output_objdir/a2ixlibrary.data~$AR $AR_FLAGS $lib $libobjs~$RANLIB $lib~(cd $output_objdir && a2ixlibrary -32)' + hardcode_libdir_flag_spec_F77='-L$libdir' + hardcode_minus_L_F77=yes + # see comment about different semantics on the GNU ld section + ld_shlibs_F77=no + ;; + + bsdi[45]*) + export_dynamic_flag_spec_F77=-rdynamic + ;; + + cygwin* | mingw* | pw32*) + # When not using gcc, we currently assume that we are using + # Microsoft Visual C++. + # hardcode_libdir_flag_spec is actually meaningless, as there is + # no search path for DLLs. + hardcode_libdir_flag_spec_F77=' ' + allow_undefined_flag_F77=unsupported + # Tell ltmain to make .lib files, not .a files. + libext=lib + # Tell ltmain to make .dll files, not .so files. + shrext_cmds=".dll" + # FIXME: Setting linknames here is a bad hack. + archive_cmds_F77='$CC -o $lib $libobjs $compiler_flags `echo "$deplibs" | $SED -e '\''s/ -lc$//'\''` -link -dll~linknames=' + # The linker will automatically build a .lib file if we build a DLL. + old_archive_From_new_cmds_F77='true' + # FIXME: Should let the user specify the lib program. + old_archive_cmds_F77='lib /OUT:$oldlib$oldobjs$old_deplibs' + fix_srcfile_path_F77='`cygpath -w "$srcfile"`' + enable_shared_with_static_runtimes_F77=yes + ;; + + darwin* | rhapsody*) + case "$host_os" in + rhapsody* | darwin1.[012]) + allow_undefined_flag_F77='${wl}-undefined ${wl}suppress' + ;; + *) # Darwin 1.3 on + if test -z ${MACOSX_DEPLOYMENT_TARGET} ; then + allow_undefined_flag_F77='${wl}-flat_namespace ${wl}-undefined ${wl}suppress' + else + case ${MACOSX_DEPLOYMENT_TARGET} in + 10.[012]) + allow_undefined_flag_F77='${wl}-flat_namespace ${wl}-undefined ${wl}suppress' + ;; + 10.*) + allow_undefined_flag_F77='${wl}-undefined ${wl}dynamic_lookup' + ;; + esac + fi + ;; + esac + archive_cmds_need_lc_F77=no + hardcode_direct_F77=no + hardcode_automatic_F77=yes + hardcode_shlibpath_var_F77=unsupported + whole_archive_flag_spec_F77='' + link_all_deplibs_F77=yes + if test "$GCC" = yes ; then + output_verbose_link_cmd='echo' + archive_cmds_F77='$CC -dynamiclib $allow_undefined_flag -o $lib $libobjs $deplibs $compiler_flags -install_name $rpath/$soname $verstring' + module_cmds_F77='$CC $allow_undefined_flag -o $lib -bundle $libobjs $deplibs$compiler_flags' + # Don't fix this by using the ld -exported_symbols_list flag, it doesn't exist in older darwin ld's + archive_expsym_cmds_F77='sed -e "s,#.*,," -e "s,^[ ]*,," -e "s,^\(..*\),_&," < $export_symbols > $output_objdir/${libname}-symbols.expsym~$CC -dynamiclib $allow_undefined_flag -o $lib $libobjs $deplibs $compiler_flags -install_name $rpath/$soname $verstring~nmedit -s $output_objdir/${libname}-symbols.expsym ${lib}' + module_expsym_cmds_F77='sed -e "s,#.*,," -e "s,^[ ]*,," -e "s,^\(..*\),_&," < $export_symbols > $output_objdir/${libname}-symbols.expsym~$CC $allow_undefined_flag -o $lib -bundle $libobjs $deplibs$compiler_flags~nmedit -s $output_objdir/${libname}-symbols.expsym ${lib}' + else + case $cc_basename in + xlc*) + output_verbose_link_cmd='echo' + archive_cmds_F77='$CC -qmkshrobj $allow_undefined_flag -o $lib $libobjs $deplibs $compiler_flags ${wl}-install_name ${wl}`echo $rpath/$soname` $verstring' + module_cmds_F77='$CC $allow_undefined_flag -o $lib -bundle $libobjs $deplibs$compiler_flags' + # Don't fix this by using the ld -exported_symbols_list flag, it doesn't exist in older darwin ld's + archive_expsym_cmds_F77='sed -e "s,#.*,," -e "s,^[ ]*,," -e "s,^\(..*\),_&," < $export_symbols > $output_objdir/${libname}-symbols.expsym~$CC -qmkshrobj $allow_undefined_flag -o $lib $libobjs $deplibs $compiler_flags ${wl}-install_name ${wl}$rpath/$soname $verstring~nmedit -s $output_objdir/${libname}-symbols.expsym ${lib}' + module_expsym_cmds_F77='sed -e "s,#.*,," -e "s,^[ ]*,," -e "s,^\(..*\),_&," < $export_symbols > $output_objdir/${libname}-symbols.expsym~$CC $allow_undefined_flag -o $lib -bundle $libobjs $deplibs$compiler_flags~nmedit -s $output_objdir/${libname}-symbols.expsym ${lib}' + ;; + *) + ld_shlibs_F77=no + ;; + esac + fi + ;; + + dgux*) + archive_cmds_F77='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + hardcode_libdir_flag_spec_F77='-L$libdir' + hardcode_shlibpath_var_F77=no + ;; + + freebsd1*) + ld_shlibs_F77=no + ;; + + # FreeBSD 2.2.[012] allows us to include c++rt0.o to get C++ constructor + # support. Future versions do this automatically, but an explicit c++rt0.o + # does not break anything, and helps significantly (at the cost of a little + # extra space). + freebsd2.2*) + archive_cmds_F77='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags /usr/lib/c++rt0.o' + hardcode_libdir_flag_spec_F77='-R$libdir' + hardcode_direct_F77=yes + hardcode_shlibpath_var_F77=no + ;; + + # Unfortunately, older versions of FreeBSD 2 do not have this feature. + freebsd2*) + archive_cmds_F77='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags' + hardcode_direct_F77=yes + hardcode_minus_L_F77=yes + hardcode_shlibpath_var_F77=no + ;; + + # FreeBSD 3 and greater uses gcc -shared to do shared libraries. + freebsd* | kfreebsd*-gnu | dragonfly*) + archive_cmds_F77='$CC -shared -o $lib $libobjs $deplibs $compiler_flags' + hardcode_libdir_flag_spec_F77='-R$libdir' + hardcode_direct_F77=yes + hardcode_shlibpath_var_F77=no + ;; + + hpux9*) + if test "$GCC" = yes; then + archive_cmds_F77='$rm $output_objdir/$soname~$CC -shared -fPIC ${wl}+b ${wl}$install_libdir -o $output_objdir/$soname $libobjs $deplibs $compiler_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' + else + archive_cmds_F77='$rm $output_objdir/$soname~$LD -b +b $install_libdir -o $output_objdir/$soname $libobjs $deplibs $linker_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' + fi + hardcode_libdir_flag_spec_F77='${wl}+b ${wl}$libdir' + hardcode_libdir_separator_F77=: + hardcode_direct_F77=yes + + # hardcode_minus_L: Not really in the search PATH, + # but as the default location of the library. + hardcode_minus_L_F77=yes + export_dynamic_flag_spec_F77='${wl}-E' + ;; + + hpux10* | hpux11*) + if test "$GCC" = yes -a "$with_gnu_ld" = no; then + case "$host_cpu" in + hppa*64*|ia64*) + archive_cmds_F77='$CC -shared ${wl}+h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags' + ;; + *) + archive_cmds_F77='$CC -shared -fPIC ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags' + ;; + esac + else + case "$host_cpu" in + hppa*64*|ia64*) + archive_cmds_F77='$LD -b +h $soname -o $lib $libobjs $deplibs $linker_flags' + ;; + *) + archive_cmds_F77='$LD -b +h $soname +b $install_libdir -o $lib $libobjs $deplibs $linker_flags' + ;; + esac + fi + if test "$with_gnu_ld" = no; then + case "$host_cpu" in + hppa*64*) + hardcode_libdir_flag_spec_F77='${wl}+b ${wl}$libdir' + hardcode_libdir_flag_spec_ld_F77='+b $libdir' + hardcode_libdir_separator_F77=: + hardcode_direct_F77=no + hardcode_shlibpath_var_F77=no + ;; + ia64*) + hardcode_libdir_flag_spec_F77='-L$libdir' + hardcode_direct_F77=no + hardcode_shlibpath_var_F77=no + + # hardcode_minus_L: Not really in the search PATH, + # but as the default location of the library. + hardcode_minus_L_F77=yes + ;; + *) + hardcode_libdir_flag_spec_F77='${wl}+b ${wl}$libdir' + hardcode_libdir_separator_F77=: + hardcode_direct_F77=yes + export_dynamic_flag_spec_F77='${wl}-E' + + # hardcode_minus_L: Not really in the search PATH, + # but as the default location of the library. + hardcode_minus_L_F77=yes + ;; + esac + fi + ;; + + irix5* | irix6* | nonstopux*) + if test "$GCC" = yes; then + archive_cmds_F77='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && echo ${wl}-set_version ${wl}$verstring` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' + else + archive_cmds_F77='$LD -shared $libobjs $deplibs $linker_flags -soname $soname `test -n "$verstring" && echo -set_version $verstring` -update_registry ${output_objdir}/so_locations -o $lib' + hardcode_libdir_flag_spec_ld_F77='-rpath $libdir' + fi + hardcode_libdir_flag_spec_F77='${wl}-rpath ${wl}$libdir' + hardcode_libdir_separator_F77=: + link_all_deplibs_F77=yes + ;; + + netbsd*) + if echo __ELF__ | $CC -E - | grep __ELF__ >/dev/null; then + archive_cmds_F77='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags' # a.out + else + archive_cmds_F77='$LD -shared -o $lib $libobjs $deplibs $linker_flags' # ELF + fi + hardcode_libdir_flag_spec_F77='-R$libdir' + hardcode_direct_F77=yes + hardcode_shlibpath_var_F77=no + ;; + + newsos6) + archive_cmds_F77='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + hardcode_direct_F77=yes + hardcode_libdir_flag_spec_F77='${wl}-rpath ${wl}$libdir' + hardcode_libdir_separator_F77=: + hardcode_shlibpath_var_F77=no + ;; + + openbsd*) + hardcode_direct_F77=yes + hardcode_shlibpath_var_F77=no + if test -z "`echo __ELF__ | $CC -E - | grep __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then + archive_cmds_F77='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags' + archive_expsym_cmds_F77='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags ${wl}-retain-symbols-file,$export_symbols' + hardcode_libdir_flag_spec_F77='${wl}-rpath,$libdir' + export_dynamic_flag_spec_F77='${wl}-E' + else + case $host_os in + openbsd[01].* | openbsd2.[0-7] | openbsd2.[0-7].*) + archive_cmds_F77='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags' + hardcode_libdir_flag_spec_F77='-R$libdir' + ;; + *) + archive_cmds_F77='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags' + hardcode_libdir_flag_spec_F77='${wl}-rpath,$libdir' + ;; + esac + fi + ;; + + os2*) + hardcode_libdir_flag_spec_F77='-L$libdir' + hardcode_minus_L_F77=yes + allow_undefined_flag_F77=unsupported + archive_cmds_F77='$echo "LIBRARY $libname INITINSTANCE" > $output_objdir/$libname.def~$echo "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~$echo DATA >> $output_objdir/$libname.def~$echo " SINGLE NONSHARED" >> $output_objdir/$libname.def~$echo EXPORTS >> $output_objdir/$libname.def~emxexp $libobjs >> $output_objdir/$libname.def~$CC -Zdll -Zcrtdll -o $lib $libobjs $deplibs $compiler_flags $output_objdir/$libname.def' + old_archive_From_new_cmds_F77='emximp -o $output_objdir/$libname.a $output_objdir/$libname.def' + ;; + + osf3*) + if test "$GCC" = yes; then + allow_undefined_flag_F77=' ${wl}-expect_unresolved ${wl}\*' + archive_cmds_F77='$CC -shared${allow_undefined_flag} $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && echo ${wl}-set_version ${wl}$verstring` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' + else + allow_undefined_flag_F77=' -expect_unresolved \*' + archive_cmds_F77='$LD -shared${allow_undefined_flag} $libobjs $deplibs $linker_flags -soname $soname `test -n "$verstring" && echo -set_version $verstring` -update_registry ${output_objdir}/so_locations -o $lib' + fi + hardcode_libdir_flag_spec_F77='${wl}-rpath ${wl}$libdir' + hardcode_libdir_separator_F77=: + ;; + + osf4* | osf5*) # as osf3* with the addition of -msym flag + if test "$GCC" = yes; then + allow_undefined_flag_F77=' ${wl}-expect_unresolved ${wl}\*' + archive_cmds_F77='$CC -shared${allow_undefined_flag} $libobjs $deplibs $compiler_flags ${wl}-msym ${wl}-soname ${wl}$soname `test -n "$verstring" && echo ${wl}-set_version ${wl}$verstring` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' + hardcode_libdir_flag_spec_F77='${wl}-rpath ${wl}$libdir' + else + allow_undefined_flag_F77=' -expect_unresolved \*' + archive_cmds_F77='$LD -shared${allow_undefined_flag} $libobjs $deplibs $linker_flags -msym -soname $soname `test -n "$verstring" && echo -set_version $verstring` -update_registry ${output_objdir}/so_locations -o $lib' + archive_expsym_cmds_F77='for i in `cat $export_symbols`; do printf "%s %s\\n" -exported_symbol "\$i" >> $lib.exp; done; echo "-hidden">> $lib.exp~ + $LD -shared${allow_undefined_flag} -input $lib.exp $linker_flags $libobjs $deplibs -soname $soname `test -n "$verstring" && echo -set_version $verstring` -update_registry ${output_objdir}/so_locations -o $lib~$rm $lib.exp' + + # Both c and cxx compiler support -rpath directly + hardcode_libdir_flag_spec_F77='-rpath $libdir' + fi + hardcode_libdir_separator_F77=: + ;; + + sco3.2v5*) + archive_cmds_F77='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + hardcode_shlibpath_var_F77=no + export_dynamic_flag_spec_F77='${wl}-Bexport' + runpath_var=LD_RUN_PATH + hardcode_runpath_var=yes + ;; + + solaris*) + no_undefined_flag_F77=' -z text' + if test "$GCC" = yes; then + wlarc='${wl}' + archive_cmds_F77='$CC -shared ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags' + archive_expsym_cmds_F77='$echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~$echo "local: *; };" >> $lib.exp~ + $CC -shared ${wl}-M ${wl}$lib.exp ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags~$rm $lib.exp' + else + wlarc='' + archive_cmds_F77='$LD -G${allow_undefined_flag} -h $soname -o $lib $libobjs $deplibs $linker_flags' + archive_expsym_cmds_F77='$echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~$echo "local: *; };" >> $lib.exp~ + $LD -G${allow_undefined_flag} -M $lib.exp -h $soname -o $lib $libobjs $deplibs $linker_flags~$rm $lib.exp' + fi + hardcode_libdir_flag_spec_F77='-R$libdir' + hardcode_shlibpath_var_F77=no + case $host_os in + solaris2.[0-5] | solaris2.[0-5].*) ;; + *) + # The compiler driver will combine linker options so we + # cannot just pass the convience library names through + # without $wl, iff we do not link with $LD. + # Luckily, gcc supports the same syntax we need for Sun Studio. + # Supported since Solaris 2.6 (maybe 2.5.1?) + case $wlarc in + '') + whole_archive_flag_spec_F77='-z allextract$convenience -z defaultextract' ;; + *) + whole_archive_flag_spec_F77='${wl}-z ${wl}allextract`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; $echo \"$new_convenience\"` ${wl}-z ${wl}defaultextract' ;; + esac ;; + esac + link_all_deplibs_F77=yes + ;; + + sunos4*) + if test "x$host_vendor" = xsequent; then + # Use $CC to link under sequent, because it throws in some extra .o + # files that make .init and .fini sections work. + archive_cmds_F77='$CC -G ${wl}-h $soname -o $lib $libobjs $deplibs $compiler_flags' + else + archive_cmds_F77='$LD -assert pure-text -Bstatic -o $lib $libobjs $deplibs $linker_flags' + fi + hardcode_libdir_flag_spec_F77='-L$libdir' + hardcode_direct_F77=yes + hardcode_minus_L_F77=yes + hardcode_shlibpath_var_F77=no + ;; + + sysv4) + case $host_vendor in + sni) + archive_cmds_F77='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + hardcode_direct_F77=yes # is this really true??? + ;; + siemens) + ## LD is ld it makes a PLAMLIB + ## CC just makes a GrossModule. + archive_cmds_F77='$LD -G -o $lib $libobjs $deplibs $linker_flags' + reload_cmds_F77='$CC -r -o $output$reload_objs' + hardcode_direct_F77=no + ;; + motorola) + archive_cmds_F77='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + hardcode_direct_F77=no #Motorola manual says yes, but my tests say they lie + ;; + esac + runpath_var='LD_RUN_PATH' + hardcode_shlibpath_var_F77=no + ;; + + sysv4.3*) + archive_cmds_F77='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + hardcode_shlibpath_var_F77=no + export_dynamic_flag_spec_F77='-Bexport' + ;; + + sysv4*MP*) + if test -d /usr/nec; then + archive_cmds_F77='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + hardcode_shlibpath_var_F77=no + runpath_var=LD_RUN_PATH + hardcode_runpath_var=yes + ld_shlibs_F77=yes + fi + ;; + + sysv4.2uw2*) + archive_cmds_F77='$LD -G -o $lib $libobjs $deplibs $linker_flags' + hardcode_direct_F77=yes + hardcode_minus_L_F77=no + hardcode_shlibpath_var_F77=no + hardcode_runpath_var=yes + runpath_var=LD_RUN_PATH + ;; + + sysv5OpenUNIX8* | sysv5UnixWare7* | sysv5uw[78]* | unixware7*) + no_undefined_flag_F77='${wl}-z ${wl}text' + if test "$GCC" = yes; then + archive_cmds_F77='$CC -shared ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags' + else + archive_cmds_F77='$CC -G ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags' + fi + runpath_var='LD_RUN_PATH' + hardcode_shlibpath_var_F77=no + ;; + + sysv5*) + no_undefined_flag_F77=' -z text' + # $CC -shared without GNU ld will not create a library from C++ + # object files and a static libstdc++, better avoid it by now + archive_cmds_F77='$LD -G${allow_undefined_flag} -h $soname -o $lib $libobjs $deplibs $linker_flags' + archive_expsym_cmds_F77='$echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~$echo "local: *; };" >> $lib.exp~ + $LD -G${allow_undefined_flag} -M $lib.exp -h $soname -o $lib $libobjs $deplibs $linker_flags~$rm $lib.exp' + hardcode_libdir_flag_spec_F77= + hardcode_shlibpath_var_F77=no + runpath_var='LD_RUN_PATH' + ;; + + uts4*) + archive_cmds_F77='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + hardcode_libdir_flag_spec_F77='-L$libdir' + hardcode_shlibpath_var_F77=no + ;; + + *) + ld_shlibs_F77=no + ;; + esac + fi + +echo "$as_me:$LINENO: result: $ld_shlibs_F77" >&5 +echo "${ECHO_T}$ld_shlibs_F77" >&6 +test "$ld_shlibs_F77" = no && can_build_shared=no + +variables_saved_for_relink="PATH $shlibpath_var $runpath_var" +if test "$GCC" = yes; then + variables_saved_for_relink="$variables_saved_for_relink GCC_EXEC_PREFIX COMPILER_PATH LIBRARY_PATH" +fi + +# +# Do we need to explicitly link libc? +# +case "x$archive_cmds_need_lc_F77" in +x|xyes) + # Assume -lc should be added + archive_cmds_need_lc_F77=yes + + if test "$enable_shared" = yes && test "$GCC" = yes; then + case $archive_cmds_F77 in + *'~'*) + # FIXME: we may have to deal with multi-command sequences. + ;; + '$CC '*) + # Test whether the compiler implicitly links with -lc since on some + # systems, -lgcc has to come before -lc. If gcc already passes -lc + # to ld, don't add -lc before -lgcc. + echo "$as_me:$LINENO: checking whether -lc should be explicitly linked in" >&5 +echo $ECHO_N "checking whether -lc should be explicitly linked in... $ECHO_C" >&6 + $rm conftest* + printf "$lt_simple_compile_test_code" > conftest.$ac_ext + + if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 + (eval $ac_compile) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } 2>conftest.err; then + soname=conftest + lib=conftest + libobjs=conftest.$ac_objext + deplibs= + wl=$lt_prog_compiler_wl_F77 + compiler_flags=-v + linker_flags=-v + verstring= + output_objdir=. + libname=conftest + lt_save_allow_undefined_flag=$allow_undefined_flag_F77 + allow_undefined_flag_F77= + if { (eval echo "$as_me:$LINENO: \"$archive_cmds_F77 2\>\&1 \| grep \" -lc \" \>/dev/null 2\>\&1\"") >&5 + (eval $archive_cmds_F77 2\>\&1 \| grep \" -lc \" \>/dev/null 2\>\&1) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } + then + archive_cmds_need_lc_F77=no + else + archive_cmds_need_lc_F77=yes + fi + allow_undefined_flag_F77=$lt_save_allow_undefined_flag + else + cat conftest.err 1>&5 + fi + $rm conftest* + echo "$as_me:$LINENO: result: $archive_cmds_need_lc_F77" >&5 +echo "${ECHO_T}$archive_cmds_need_lc_F77" >&6 + ;; + esac + fi + ;; +esac + +echo "$as_me:$LINENO: checking dynamic linker characteristics" >&5 +echo $ECHO_N "checking dynamic linker characteristics... $ECHO_C" >&6 +library_names_spec= +libname_spec='lib$name' +soname_spec= +shrext_cmds=".so" +postinstall_cmds= +postuninstall_cmds= +finish_cmds= +finish_eval= +shlibpath_var= +shlibpath_overrides_runpath=unknown +version_type=none +dynamic_linker="$host_os ld.so" +sys_lib_dlsearch_path_spec="/lib /usr/lib" +if test "$GCC" = yes; then + sys_lib_search_path_spec=`$CC -print-search-dirs | grep "^libraries:" | $SED -e "s/^libraries://" -e "s,=/,/,g"` + if echo "$sys_lib_search_path_spec" | grep ';' >/dev/null ; then + # if the path contains ";" then we assume it to be the separator + # otherwise default to the standard path separator (i.e. ":") - it is + # assumed that no part of a normal pathname contains ";" but that should + # okay in the real world where ";" in dirpaths is itself problematic. + sys_lib_search_path_spec=`echo "$sys_lib_search_path_spec" | $SED -e 's/;/ /g'` + else + sys_lib_search_path_spec=`echo "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"` + fi +else + sys_lib_search_path_spec="/lib /usr/lib /usr/local/lib" +fi +need_lib_prefix=unknown +hardcode_into_libs=no + +# when you set need_version to no, make sure it does not cause -set_version +# flags to be left without arguments +need_version=unknown + +case $host_os in +aix3*) + version_type=linux + library_names_spec='${libname}${release}${shared_ext}$versuffix $libname.a' + shlibpath_var=LIBPATH + + # AIX 3 has no versioning support, so we append a major version to the name. + soname_spec='${libname}${release}${shared_ext}$major' + ;; + +aix4* | aix5*) + version_type=linux + need_lib_prefix=no + need_version=no + hardcode_into_libs=yes + if test "$host_cpu" = ia64; then + # AIX 5 supports IA64 + library_names_spec='${libname}${release}${shared_ext}$major ${libname}${release}${shared_ext}$versuffix $libname${shared_ext}' + shlibpath_var=LD_LIBRARY_PATH + else + # With GCC up to 2.95.x, collect2 would create an import file + # for dependence libraries. The import file would start with + # the line `#! .'. This would cause the generated library to + # depend on `.', always an invalid library. This was fixed in + # development snapshots of GCC prior to 3.0. + case $host_os in + aix4 | aix4.[01] | aix4.[01].*) + if { echo '#if __GNUC__ > 2 || (__GNUC__ == 2 && __GNUC_MINOR__ >= 97)' + echo ' yes ' + echo '#endif'; } | ${CC} -E - | grep yes > /dev/null; then + : + else + can_build_shared=no + fi + ;; + esac + # AIX (on Power*) has no versioning support, so currently we can not hardcode correct + # soname into executable. Probably we can add versioning support to + # collect2, so additional links can be useful in future. + if test "$aix_use_runtimelinking" = yes; then + # If using run time linking (on AIX 4.2 or later) use lib.so + # instead of lib.a to let people know that these are not + # typical AIX shared libraries. + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + else + # We preserve .a as extension for shared libraries through AIX4.2 + # and later when we are not doing run time linking. + library_names_spec='${libname}${release}.a $libname.a' + soname_spec='${libname}${release}${shared_ext}$major' + fi + shlibpath_var=LIBPATH + fi + ;; + +amigaos*) + library_names_spec='$libname.ixlibrary $libname.a' + # Create ${libname}_ixlibrary.a entries in /sys/libs. + finish_eval='for lib in `ls $libdir/*.ixlibrary 2>/dev/null`; do libname=`$echo "X$lib" | $Xsed -e '\''s%^.*/\([^/]*\)\.ixlibrary$%\1%'\''`; test $rm /sys/libs/${libname}_ixlibrary.a; $show "cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a"; cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a || exit 1; done' + ;; + +beos*) + library_names_spec='${libname}${shared_ext}' + dynamic_linker="$host_os ld.so" + shlibpath_var=LIBRARY_PATH + ;; + +bsdi[45]*) + version_type=linux + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + finish_cmds='PATH="\$PATH:/sbin" ldconfig $libdir' + shlibpath_var=LD_LIBRARY_PATH + sys_lib_search_path_spec="/shlib /usr/lib /usr/X11/lib /usr/contrib/lib /lib /usr/local/lib" + sys_lib_dlsearch_path_spec="/shlib /usr/lib /usr/local/lib" + # the default ld.so.conf also contains /usr/contrib/lib and + # /usr/X11R6/lib (/usr/X11 is a link to /usr/X11R6), but let us allow + # libtool to hard-code these into programs + ;; + +cygwin* | mingw* | pw32*) + version_type=windows + shrext_cmds=".dll" + need_version=no + need_lib_prefix=no + + case $GCC,$host_os in + yes,cygwin* | yes,mingw* | yes,pw32*) + library_names_spec='$libname.dll.a' + # DLL is installed to $(libdir)/../bin by postinstall_cmds + postinstall_cmds='base_file=`basename \${file}`~ + dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\${base_file}'\''i;echo \$dlname'\''`~ + dldir=$destdir/`dirname \$dlpath`~ + test -d \$dldir || mkdir -p \$dldir~ + $install_prog $dir/$dlname \$dldir/$dlname' + postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; echo \$dlname'\''`~ + dlpath=$dir/\$dldll~ + $rm \$dlpath' + shlibpath_overrides_runpath=yes + + case $host_os in + cygwin*) + # Cygwin DLLs use 'cyg' prefix rather than 'lib' + soname_spec='`echo ${libname} | sed -e 's/^lib/cyg/'``echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext}' + sys_lib_search_path_spec="/usr/lib /lib/w32api /lib /usr/local/lib" + ;; + mingw*) + # MinGW DLLs use traditional 'lib' prefix + soname_spec='${libname}`echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext}' + sys_lib_search_path_spec=`$CC -print-search-dirs | grep "^libraries:" | $SED -e "s/^libraries://" -e "s,=/,/,g"` + if echo "$sys_lib_search_path_spec" | grep ';[c-zC-Z]:/' >/dev/null; then + # It is most probably a Windows format PATH printed by + # mingw gcc, but we are running on Cygwin. Gcc prints its search + # path with ; separators, and with drive letters. We can handle the + # drive letters (cygwin fileutils understands them), so leave them, + # especially as we might pass files found there to a mingw objdump, + # which wouldn't understand a cygwinified path. Ahh. + sys_lib_search_path_spec=`echo "$sys_lib_search_path_spec" | $SED -e 's/;/ /g'` + else + sys_lib_search_path_spec=`echo "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"` + fi + ;; + pw32*) + # pw32 DLLs use 'pw' prefix rather than 'lib' + library_names_spec='`echo ${libname} | sed -e 's/^lib/pw/'``echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext}' + ;; + esac + ;; + + *) + library_names_spec='${libname}`echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext} $libname.lib' + ;; + esac + dynamic_linker='Win32 ld.exe' + # FIXME: first we should search . and the directory the executable is in + shlibpath_var=PATH + ;; + +darwin* | rhapsody*) + dynamic_linker="$host_os dyld" + version_type=darwin + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${versuffix}$shared_ext ${libname}${release}${major}$shared_ext ${libname}$shared_ext' + soname_spec='${libname}${release}${major}$shared_ext' + shlibpath_overrides_runpath=yes + shlibpath_var=DYLD_LIBRARY_PATH + shrext_cmds='$(test .$module = .yes && echo .so || echo .dylib)' + # Apple's gcc prints 'gcc -print-search-dirs' doesn't operate the same. + if test "$GCC" = yes; then + sys_lib_search_path_spec=`$CC -print-search-dirs | tr "\n" "$PATH_SEPARATOR" | sed -e 's/libraries:/@libraries:/' | tr "@" "\n" | grep "^libraries:" | sed -e "s/^libraries://" -e "s,=/,/,g" -e "s,$PATH_SEPARATOR, ,g" -e "s,.*,& /lib /usr/lib /usr/local/lib,g"` + else + sys_lib_search_path_spec='/lib /usr/lib /usr/local/lib' + fi + sys_lib_dlsearch_path_spec='/usr/local/lib /lib /usr/lib' + ;; + +dgux*) + version_type=linux + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname$shared_ext' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + ;; + +freebsd1*) + dynamic_linker=no + ;; + +kfreebsd*-gnu) + version_type=linux + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=no + hardcode_into_libs=yes + dynamic_linker='GNU ld.so' + ;; + +freebsd* | dragonfly*) + # DragonFly does not have aout. When/if they implement a new + # versioning mechanism, adjust this. + objformat=`test -x /usr/bin/objformat && /usr/bin/objformat || echo aout` + version_type=freebsd-$objformat + case $version_type in + freebsd-elf*) + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext} $libname${shared_ext}' + need_version=no + need_lib_prefix=no + ;; + freebsd-*) + library_names_spec='${libname}${release}${shared_ext}$versuffix $libname${shared_ext}$versuffix' + need_version=yes + ;; + esac + shlibpath_var=LD_LIBRARY_PATH + case $host_os in + freebsd2*) + shlibpath_overrides_runpath=yes + ;; + freebsd3.[01]* | freebsdelf3.[01]*) + shlibpath_overrides_runpath=yes + hardcode_into_libs=yes + ;; + *) # from 3.2 on + shlibpath_overrides_runpath=no + hardcode_into_libs=yes + ;; + esac + ;; + +gnu*) + version_type=linux + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}${major} ${libname}${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + hardcode_into_libs=yes + ;; + +hpux9* | hpux10* | hpux11*) + # Give a soname corresponding to the major version so that dld.sl refuses to + # link against other versions. + version_type=sunos + need_lib_prefix=no + need_version=no + case "$host_cpu" in + ia64*) + shrext_cmds='.so' + hardcode_into_libs=yes + dynamic_linker="$host_os dld.so" + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes # Unless +noenvvar is specified. + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + if test "X$HPUX_IA64_MODE" = X32; then + sys_lib_search_path_spec="/usr/lib/hpux32 /usr/local/lib/hpux32 /usr/local/lib" + else + sys_lib_search_path_spec="/usr/lib/hpux64 /usr/local/lib/hpux64" + fi + sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec + ;; + hppa*64*) + shrext_cmds='.sl' + hardcode_into_libs=yes + dynamic_linker="$host_os dld.sl" + shlibpath_var=LD_LIBRARY_PATH # How should we handle SHLIB_PATH + shlibpath_overrides_runpath=yes # Unless +noenvvar is specified. + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + sys_lib_search_path_spec="/usr/lib/pa20_64 /usr/ccs/lib/pa20_64" + sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec + ;; + *) + shrext_cmds='.sl' + dynamic_linker="$host_os dld.sl" + shlibpath_var=SHLIB_PATH + shlibpath_overrides_runpath=no # +s is required to enable SHLIB_PATH + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + ;; + esac + # HP-UX runs *really* slowly unless shared libraries are mode 555. + postinstall_cmds='chmod 555 $lib' + ;; + +irix5* | irix6* | nonstopux*) + case $host_os in + nonstopux*) version_type=nonstopux ;; + *) + if test "$lt_cv_prog_gnu_ld" = yes; then + version_type=linux + else + version_type=irix + fi ;; + esac + need_lib_prefix=no + need_version=no + soname_spec='${libname}${release}${shared_ext}$major' + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${release}${shared_ext} $libname${shared_ext}' + case $host_os in + irix5* | nonstopux*) + libsuff= shlibsuff= + ;; + *) + case $LD in # libtool.m4 will add one of these switches to LD + *-32|*"-32 "|*-melf32bsmip|*"-melf32bsmip ") + libsuff= shlibsuff= libmagic=32-bit;; + *-n32|*"-n32 "|*-melf32bmipn32|*"-melf32bmipn32 ") + libsuff=32 shlibsuff=N32 libmagic=N32;; + *-64|*"-64 "|*-melf64bmip|*"-melf64bmip ") + libsuff=64 shlibsuff=64 libmagic=64-bit;; + *) libsuff= shlibsuff= libmagic=never-match;; + esac + ;; + esac + shlibpath_var=LD_LIBRARY${shlibsuff}_PATH + shlibpath_overrides_runpath=no + sys_lib_search_path_spec="/usr/lib${libsuff} /lib${libsuff} /usr/local/lib${libsuff}" + sys_lib_dlsearch_path_spec="/usr/lib${libsuff} /lib${libsuff}" + hardcode_into_libs=yes + ;; + +# No shared lib support for Linux oldld, aout, or coff. +linux*oldld* | linux*aout* | linux*coff*) + dynamic_linker=no + ;; + +# This must be Linux ELF. +linux*) + version_type=linux + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + finish_cmds='PATH="\$PATH:/sbin" ldconfig -n $libdir' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=no + # This implies no fast_install, which is unacceptable. + # Some rework will be needed to allow for fast_install + # before this can be enabled. + hardcode_into_libs=yes + + # find out which ABI we are using + libsuff= + case "$host_cpu" in + x86_64*|s390x*|powerpc64*) + echo '#line 15850 "configure"' > conftest.$ac_ext + if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 + (eval $ac_compile) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; then + case `/usr/bin/file conftest.$ac_objext` in + *64-bit*) + libsuff=64 + sys_lib_search_path_spec="/lib${libsuff} /usr/lib${libsuff} /usr/local/lib${libsuff}" + ;; + esac + fi + rm -rf conftest* + ;; + esac + + # Append ld.so.conf contents to the search path + if test -f /etc/ld.so.conf; then + lt_ld_extra=`awk '/^include / { system(sprintf("cd /etc; cat %s", \$2)); skip = 1; } { if (!skip) print \$0; skip = 0; }' < /etc/ld.so.conf | $SED -e 's/#.*//;s/[:,\t]/ /g;s/=[^=]*$//;s/=[^= ]* / /g;/^$/d' | tr '\n' ' '` + sys_lib_dlsearch_path_spec="/lib${libsuff} /usr/lib${libsuff} $lt_ld_extra" + fi + + # We used to test for /lib/ld.so.1 and disable shared libraries on + # powerpc, because MkLinux only supported shared libraries with the + # GNU dynamic linker. Since this was broken with cross compilers, + # most powerpc-linux boxes support dynamic linking these days and + # people can always --disable-shared, the test was removed, and we + # assume the GNU/Linux dynamic linker is in use. + dynamic_linker='GNU/Linux ld.so' + ;; + +knetbsd*-gnu) + version_type=linux + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=no + hardcode_into_libs=yes + dynamic_linker='GNU ld.so' + ;; + +netbsd*) + version_type=sunos + need_lib_prefix=no + need_version=no + if echo __ELF__ | $CC -E - | grep __ELF__ >/dev/null; then + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${shared_ext}$versuffix' + finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir' + dynamic_linker='NetBSD (a.out) ld.so' + else + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + dynamic_linker='NetBSD ld.elf_so' + fi + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + hardcode_into_libs=yes + ;; + +newsos6) + version_type=linux + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + ;; + +nto-qnx*) + version_type=linux + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + ;; + +openbsd*) + version_type=sunos + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${shared_ext}$versuffix' + finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir' + shlibpath_var=LD_LIBRARY_PATH + if test -z "`echo __ELF__ | $CC -E - | grep __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then + case $host_os in + openbsd2.[89] | openbsd2.[89].*) + shlibpath_overrides_runpath=no + ;; + *) + shlibpath_overrides_runpath=yes + ;; + esac + else + shlibpath_overrides_runpath=yes + fi + ;; + +os2*) + libname_spec='$name' + shrext_cmds=".dll" + need_lib_prefix=no + library_names_spec='$libname${shared_ext} $libname.a' + dynamic_linker='OS/2 ld.exe' + shlibpath_var=LIBPATH + ;; + +osf3* | osf4* | osf5*) + version_type=osf + need_lib_prefix=no + need_version=no + soname_spec='${libname}${release}${shared_ext}$major' + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + shlibpath_var=LD_LIBRARY_PATH + sys_lib_search_path_spec="/usr/shlib /usr/ccs/lib /usr/lib/cmplrs/cc /usr/lib /usr/local/lib /var/shlib" + sys_lib_dlsearch_path_spec="$sys_lib_search_path_spec" + ;; + +sco3.2v5*) + version_type=osf + soname_spec='${libname}${release}${shared_ext}$major' + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + shlibpath_var=LD_LIBRARY_PATH + ;; + +solaris*) + version_type=linux + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + hardcode_into_libs=yes + # ldd complains unless libraries are executable + postinstall_cmds='chmod +x $lib' + ;; + +sunos4*) + version_type=sunos + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${shared_ext}$versuffix' + finish_cmds='PATH="\$PATH:/usr/etc" ldconfig $libdir' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + if test "$with_gnu_ld" = yes; then + need_lib_prefix=no + fi + need_version=yes + ;; + +sysv4 | sysv4.2uw2* | sysv4.3* | sysv5*) + version_type=linux + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + case $host_vendor in + sni) + shlibpath_overrides_runpath=no + need_lib_prefix=no + export_dynamic_flag_spec='${wl}-Blargedynsym' + runpath_var=LD_RUN_PATH + ;; + siemens) + need_lib_prefix=no + ;; + motorola) + need_lib_prefix=no + need_version=no + shlibpath_overrides_runpath=no + sys_lib_search_path_spec='/lib /usr/lib /usr/ccs/lib' + ;; + esac + ;; + +sysv4*MP*) + if test -d /usr/nec ;then + version_type=linux + library_names_spec='$libname${shared_ext}.$versuffix $libname${shared_ext}.$major $libname${shared_ext}' + soname_spec='$libname${shared_ext}.$major' + shlibpath_var=LD_LIBRARY_PATH + fi + ;; + +uts4*) + version_type=linux + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + ;; + +*) + dynamic_linker=no + ;; +esac +echo "$as_me:$LINENO: result: $dynamic_linker" >&5 +echo "${ECHO_T}$dynamic_linker" >&6 +test "$dynamic_linker" = no && can_build_shared=no + +echo "$as_me:$LINENO: checking how to hardcode library paths into programs" >&5 +echo $ECHO_N "checking how to hardcode library paths into programs... $ECHO_C" >&6 +hardcode_action_F77= +if test -n "$hardcode_libdir_flag_spec_F77" || \ + test -n "$runpath_var_F77" || \ + test "X$hardcode_automatic_F77" = "Xyes" ; then + + # We can hardcode non-existant directories. + if test "$hardcode_direct_F77" != no && + # If the only mechanism to avoid hardcoding is shlibpath_var, we + # have to relink, otherwise we might link with an installed library + # when we should be linking with a yet-to-be-installed one + ## test "$_LT_AC_TAGVAR(hardcode_shlibpath_var, F77)" != no && + test "$hardcode_minus_L_F77" != no; then + # Linking always hardcodes the temporary library directory. + hardcode_action_F77=relink + else + # We can link without hardcoding, and we can hardcode nonexisting dirs. + hardcode_action_F77=immediate + fi +else + # We cannot hardcode anything, or else we can only hardcode existing + # directories. + hardcode_action_F77=unsupported +fi +echo "$as_me:$LINENO: result: $hardcode_action_F77" >&5 +echo "${ECHO_T}$hardcode_action_F77" >&6 + +if test "$hardcode_action_F77" = relink; then + # Fast installation is not supported + enable_fast_install=no +elif test "$shlibpath_overrides_runpath" = yes || + test "$enable_shared" = no; then + # Fast installation is not necessary + enable_fast_install=needless +fi + +striplib= +old_striplib= +echo "$as_me:$LINENO: checking whether stripping libraries is possible" >&5 +echo $ECHO_N "checking whether stripping libraries is possible... $ECHO_C" >&6 +if test -n "$STRIP" && $STRIP -V 2>&1 | grep "GNU strip" >/dev/null; then + test -z "$old_striplib" && old_striplib="$STRIP --strip-debug" + test -z "$striplib" && striplib="$STRIP --strip-unneeded" + echo "$as_me:$LINENO: result: yes" >&5 +echo "${ECHO_T}yes" >&6 +else +# FIXME - insert some real tests, host_os isn't really good enough + case $host_os in + darwin*) + if test -n "$STRIP" ; then + striplib="$STRIP -x" + echo "$as_me:$LINENO: result: yes" >&5 +echo "${ECHO_T}yes" >&6 + else + echo "$as_me:$LINENO: result: no" >&5 +echo "${ECHO_T}no" >&6 +fi + ;; + *) + echo "$as_me:$LINENO: result: no" >&5 +echo "${ECHO_T}no" >&6 + ;; + esac +fi + + + +# The else clause should only fire when bootstrapping the +# libtool distribution, otherwise you forgot to ship ltmain.sh +# with your package, and you will get complaints that there are +# no rules to generate ltmain.sh. +if test -f "$ltmain"; then + # See if we are running on zsh, and set the options which allow our commands through + # without removal of \ escapes. + if test -n "${ZSH_VERSION+set}" ; then + setopt NO_GLOB_SUBST + fi + # Now quote all the things that may contain metacharacters while being + # careful not to overquote the AC_SUBSTed values. We take copies of the + # variables and quote the copies for generation of the libtool script. + for var in echo old_CC old_CFLAGS AR AR_FLAGS EGREP RANLIB LN_S LTCC NM \ + SED SHELL STRIP \ + libname_spec library_names_spec soname_spec extract_expsyms_cmds \ + old_striplib striplib file_magic_cmd finish_cmds finish_eval \ + deplibs_check_method reload_flag reload_cmds need_locks \ + lt_cv_sys_global_symbol_pipe lt_cv_sys_global_symbol_to_cdecl \ + lt_cv_sys_global_symbol_to_c_name_address \ + sys_lib_search_path_spec sys_lib_dlsearch_path_spec \ + old_postinstall_cmds old_postuninstall_cmds \ + compiler_F77 \ + CC_F77 \ + LD_F77 \ + lt_prog_compiler_wl_F77 \ + lt_prog_compiler_pic_F77 \ + lt_prog_compiler_static_F77 \ + lt_prog_compiler_no_builtin_flag_F77 \ + export_dynamic_flag_spec_F77 \ + thread_safe_flag_spec_F77 \ + whole_archive_flag_spec_F77 \ + enable_shared_with_static_runtimes_F77 \ + old_archive_cmds_F77 \ + old_archive_from_new_cmds_F77 \ + predep_objects_F77 \ + postdep_objects_F77 \ + predeps_F77 \ + postdeps_F77 \ + compiler_lib_search_path_F77 \ + archive_cmds_F77 \ + archive_expsym_cmds_F77 \ + postinstall_cmds_F77 \ + postuninstall_cmds_F77 \ + old_archive_from_expsyms_cmds_F77 \ + allow_undefined_flag_F77 \ + no_undefined_flag_F77 \ + export_symbols_cmds_F77 \ + hardcode_libdir_flag_spec_F77 \ + hardcode_libdir_flag_spec_ld_F77 \ + hardcode_libdir_separator_F77 \ + hardcode_automatic_F77 \ + module_cmds_F77 \ + module_expsym_cmds_F77 \ + lt_cv_prog_compiler_c_o_F77 \ + exclude_expsyms_F77 \ + include_expsyms_F77; do + + case $var in + old_archive_cmds_F77 | \ + old_archive_from_new_cmds_F77 | \ + archive_cmds_F77 | \ + archive_expsym_cmds_F77 | \ + module_cmds_F77 | \ + module_expsym_cmds_F77 | \ + old_archive_from_expsyms_cmds_F77 | \ + export_symbols_cmds_F77 | \ + extract_expsyms_cmds | reload_cmds | finish_cmds | \ + postinstall_cmds | postuninstall_cmds | \ + old_postinstall_cmds | old_postuninstall_cmds | \ + sys_lib_search_path_spec | sys_lib_dlsearch_path_spec) + # Double-quote double-evaled strings. + eval "lt_$var=\\\"\`\$echo \"X\$$var\" | \$Xsed -e \"\$double_quote_subst\" -e \"\$sed_quote_subst\" -e \"\$delay_variable_subst\"\`\\\"" + ;; + *) + eval "lt_$var=\\\"\`\$echo \"X\$$var\" | \$Xsed -e \"\$sed_quote_subst\"\`\\\"" + ;; + esac + done + + case $lt_echo in + *'\$0 --fallback-echo"') + lt_echo=`$echo "X$lt_echo" | $Xsed -e 's/\\\\\\\$0 --fallback-echo"$/$0 --fallback-echo"/'` + ;; + esac + +cfgfile="$ofile" + + cat <<__EOF__ >> "$cfgfile" +# ### BEGIN LIBTOOL TAG CONFIG: $tagname + +# Libtool was configured on host `(hostname || uname -n) 2>/dev/null | sed 1q`: + +# Shell to use when invoking shell scripts. +SHELL=$lt_SHELL + +# Whether or not to build shared libraries. +build_libtool_libs=$enable_shared + +# Whether or not to build static libraries. +build_old_libs=$enable_static + +# Whether or not to add -lc for building shared libraries. +build_libtool_need_lc=$archive_cmds_need_lc_F77 + +# Whether or not to disallow shared libs when runtime libs are static +allow_libtool_libs_with_static_runtimes=$enable_shared_with_static_runtimes_F77 + +# Whether or not to optimize for fast installation. +fast_install=$enable_fast_install + +# The host system. +host_alias=$host_alias +host=$host +host_os=$host_os + +# The build system. +build_alias=$build_alias +build=$build +build_os=$build_os + +# An echo program that does not interpret backslashes. +echo=$lt_echo + +# The archiver. +AR=$lt_AR +AR_FLAGS=$lt_AR_FLAGS + +# A C compiler. +LTCC=$lt_LTCC + +# A language-specific compiler. +CC=$lt_compiler_F77 + +# Is the compiler the GNU C compiler? +with_gcc=$GCC_F77 + +# An ERE matcher. +EGREP=$lt_EGREP + +# The linker used to build libraries. +LD=$lt_LD_F77 + +# Whether we need hard or soft links. +LN_S=$lt_LN_S + +# A BSD-compatible nm program. +NM=$lt_NM + +# A symbol stripping program +STRIP=$lt_STRIP + +# Used to examine libraries when file_magic_cmd begins "file" +MAGIC_CMD=$MAGIC_CMD + +# Used on cygwin: DLL creation program. +DLLTOOL="$DLLTOOL" + +# Used on cygwin: object dumper. +OBJDUMP="$OBJDUMP" + +# Used on cygwin: assembler. +AS="$AS" + +# The name of the directory that contains temporary libtool files. +objdir=$objdir + +# How to create reloadable object files. +reload_flag=$lt_reload_flag +reload_cmds=$lt_reload_cmds + +# How to pass a linker flag through the compiler. +wl=$lt_lt_prog_compiler_wl_F77 + +# Object file suffix (normally "o"). +objext="$ac_objext" + +# Old archive suffix (normally "a"). +libext="$libext" + +# Shared library suffix (normally ".so"). +shrext_cmds='$shrext_cmds' + +# Executable file suffix (normally ""). +exeext="$exeext" + +# Additional compiler flags for building library objects. +pic_flag=$lt_lt_prog_compiler_pic_F77 +pic_mode=$pic_mode + +# What is the maximum length of a command? +max_cmd_len=$lt_cv_sys_max_cmd_len + +# Does compiler simultaneously support -c and -o options? +compiler_c_o=$lt_lt_cv_prog_compiler_c_o_F77 + +# Must we lock files when doing compilation? +need_locks=$lt_need_locks + +# Do we need the lib prefix for modules? +need_lib_prefix=$need_lib_prefix + +# Do we need a version for libraries? +need_version=$need_version + +# Whether dlopen is supported. +dlopen_support=$enable_dlopen + +# Whether dlopen of programs is supported. +dlopen_self=$enable_dlopen_self + +# Whether dlopen of statically linked programs is supported. +dlopen_self_static=$enable_dlopen_self_static + +# Compiler flag to prevent dynamic linking. +link_static_flag=$lt_lt_prog_compiler_static_F77 + +# Compiler flag to turn off builtin functions. +no_builtin_flag=$lt_lt_prog_compiler_no_builtin_flag_F77 + +# Compiler flag to allow reflexive dlopens. +export_dynamic_flag_spec=$lt_export_dynamic_flag_spec_F77 + +# Compiler flag to generate shared objects directly from archives. +whole_archive_flag_spec=$lt_whole_archive_flag_spec_F77 + +# Compiler flag to generate thread-safe objects. +thread_safe_flag_spec=$lt_thread_safe_flag_spec_F77 + +# Library versioning type. +version_type=$version_type + +# Format of library name prefix. +libname_spec=$lt_libname_spec + +# List of archive names. First name is the real one, the rest are links. +# The last name is the one that the linker finds with -lNAME. +library_names_spec=$lt_library_names_spec + +# The coded name of the library, if different from the real name. +soname_spec=$lt_soname_spec + +# Commands used to build and install an old-style archive. +RANLIB=$lt_RANLIB +old_archive_cmds=$lt_old_archive_cmds_F77 +old_postinstall_cmds=$lt_old_postinstall_cmds +old_postuninstall_cmds=$lt_old_postuninstall_cmds + +# Create an old-style archive from a shared archive. +old_archive_from_new_cmds=$lt_old_archive_from_new_cmds_F77 + +# Create a temporary old-style archive to link instead of a shared archive. +old_archive_from_expsyms_cmds=$lt_old_archive_from_expsyms_cmds_F77 + +# Commands used to build and install a shared archive. +archive_cmds=$lt_archive_cmds_F77 +archive_expsym_cmds=$lt_archive_expsym_cmds_F77 +postinstall_cmds=$lt_postinstall_cmds +postuninstall_cmds=$lt_postuninstall_cmds + +# Commands used to build a loadable module (assumed same as above if empty) +module_cmds=$lt_module_cmds_F77 +module_expsym_cmds=$lt_module_expsym_cmds_F77 + +# Commands to strip libraries. +old_striplib=$lt_old_striplib +striplib=$lt_striplib + +# Dependencies to place before the objects being linked to create a +# shared library. +predep_objects=$lt_predep_objects_F77 + +# Dependencies to place after the objects being linked to create a +# shared library. +postdep_objects=$lt_postdep_objects_F77 + +# Dependencies to place before the objects being linked to create a +# shared library. +predeps=$lt_predeps_F77 + +# Dependencies to place after the objects being linked to create a +# shared library. +postdeps=$lt_postdeps_F77 + +# The library search path used internally by the compiler when linking +# a shared library. +compiler_lib_search_path=$lt_compiler_lib_search_path_F77 + +# Method to check whether dependent libraries are shared objects. +deplibs_check_method=$lt_deplibs_check_method + +# Command to use when deplibs_check_method == file_magic. +file_magic_cmd=$lt_file_magic_cmd + +# Flag that allows shared libraries with undefined symbols to be built. +allow_undefined_flag=$lt_allow_undefined_flag_F77 + +# Flag that forces no undefined symbols. +no_undefined_flag=$lt_no_undefined_flag_F77 + +# Commands used to finish a libtool library installation in a directory. +finish_cmds=$lt_finish_cmds + +# Same as above, but a single script fragment to be evaled but not shown. +finish_eval=$lt_finish_eval + +# Take the output of nm and produce a listing of raw symbols and C names. +global_symbol_pipe=$lt_lt_cv_sys_global_symbol_pipe + +# Transform the output of nm in a proper C declaration +global_symbol_to_cdecl=$lt_lt_cv_sys_global_symbol_to_cdecl + +# Transform the output of nm in a C name address pair +global_symbol_to_c_name_address=$lt_lt_cv_sys_global_symbol_to_c_name_address + +# This is the shared library runtime path variable. +runpath_var=$runpath_var + +# This is the shared library path variable. +shlibpath_var=$shlibpath_var + +# Is shlibpath searched before the hard-coded library search path? +shlibpath_overrides_runpath=$shlibpath_overrides_runpath + +# How to hardcode a shared library path into an executable. +hardcode_action=$hardcode_action_F77 + +# Whether we should hardcode library paths into libraries. +hardcode_into_libs=$hardcode_into_libs + +# Flag to hardcode \$libdir into a binary during linking. +# This must work even if \$libdir does not exist. +hardcode_libdir_flag_spec=$lt_hardcode_libdir_flag_spec_F77 + +# If ld is used when linking, flag to hardcode \$libdir into +# a binary during linking. This must work even if \$libdir does +# not exist. +hardcode_libdir_flag_spec_ld=$lt_hardcode_libdir_flag_spec_ld_F77 + +# Whether we need a single -rpath flag with a separated argument. +hardcode_libdir_separator=$lt_hardcode_libdir_separator_F77 + +# Set to yes if using DIR/libNAME${shared_ext} during linking hardcodes DIR into the +# resulting binary. +hardcode_direct=$hardcode_direct_F77 + +# Set to yes if using the -LDIR flag during linking hardcodes DIR into the +# resulting binary. +hardcode_minus_L=$hardcode_minus_L_F77 + +# Set to yes if using SHLIBPATH_VAR=DIR during linking hardcodes DIR into +# the resulting binary. +hardcode_shlibpath_var=$hardcode_shlibpath_var_F77 + +# Set to yes if building a shared library automatically hardcodes DIR into the library +# and all subsequent libraries and executables linked against it. +hardcode_automatic=$hardcode_automatic_F77 + +# Variables whose values should be saved in libtool wrapper scripts and +# restored at relink time. +variables_saved_for_relink="$variables_saved_for_relink" + +# Whether libtool must link a program against all its dependency libraries. +link_all_deplibs=$link_all_deplibs_F77 + +# Compile-time system search path for libraries +sys_lib_search_path_spec=$lt_sys_lib_search_path_spec + +# Run-time system search path for libraries +sys_lib_dlsearch_path_spec=$lt_sys_lib_dlsearch_path_spec + +# Fix the shell variable \$srcfile for the compiler. +fix_srcfile_path="$fix_srcfile_path_F77" + +# Set to yes if exported symbols are required. +always_export_symbols=$always_export_symbols_F77 + +# The commands to list exported symbols. +export_symbols_cmds=$lt_export_symbols_cmds_F77 + +# The commands to extract the exported symbol list from a shared archive. +extract_expsyms_cmds=$lt_extract_expsyms_cmds + +# Symbols that should not be listed in the preloaded symbols. +exclude_expsyms=$lt_exclude_expsyms_F77 + +# Symbols that must always be exported. +include_expsyms=$lt_include_expsyms_F77 + +# ### END LIBTOOL TAG CONFIG: $tagname + +__EOF__ + + +else + # If there is no Makefile yet, we rely on a make rule to execute + # `config.status --recheck' to rerun these tests and create the + # libtool script then. + ltmain_in=`echo $ltmain | sed -e 's/\.sh$/.in/'` + if test -f "$ltmain_in"; then + test -f Makefile && make "$ltmain" + fi +fi + + +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu + +CC="$lt_save_CC" + + else + tagname="" + fi + ;; + + GCJ) + if test -n "$GCJ" && test "X$GCJ" != "Xno"; then + + + +# Source file extension for Java test sources. +ac_ext=java + +# Object file extension for compiled Java test sources. +objext=o +objext_GCJ=$objext + +# Code to be used in simple compile tests +lt_simple_compile_test_code="class foo {}\n" + +# Code to be used in simple link tests +lt_simple_link_test_code='public class conftest { public static void main(String[] argv) {}; }\n' + +# ltmain only uses $CC for tagged configurations so make sure $CC is set. + +# If no C compiler was specified, use CC. +LTCC=${LTCC-"$CC"} + +# Allow CC to be a program name with arguments. +compiler=$CC + + +# save warnings/boilerplate of simple test code +ac_outfile=conftest.$ac_objext +printf "$lt_simple_compile_test_code" >conftest.$ac_ext +eval "$ac_compile" 2>&1 >/dev/null | $SED '/^$/d' >conftest.err +_lt_compiler_boilerplate=`cat conftest.err` +$rm conftest* + +ac_outfile=conftest.$ac_objext +printf "$lt_simple_link_test_code" >conftest.$ac_ext +eval "$ac_link" 2>&1 >/dev/null | $SED '/^$/d' >conftest.err +_lt_linker_boilerplate=`cat conftest.err` +$rm conftest* + + +# Allow CC to be a program name with arguments. +lt_save_CC="$CC" +CC=${GCJ-"gcj"} +compiler=$CC +compiler_GCJ=$CC +for cc_temp in $compiler""; do + case $cc_temp in + compile | *[\\/]compile | ccache | *[\\/]ccache ) ;; + distcc | *[\\/]distcc | purify | *[\\/]purify ) ;; + \-*) ;; + *) break;; + esac +done +cc_basename=`$echo "X$cc_temp" | $Xsed -e 's%.*/%%' -e "s%^$host_alias-%%"` + + +# GCJ did not exist at the time GCC didn't implicitly link libc in. +archive_cmds_need_lc_GCJ=no + +old_archive_cmds_GCJ=$old_archive_cmds + + +lt_prog_compiler_no_builtin_flag_GCJ= + +if test "$GCC" = yes; then + lt_prog_compiler_no_builtin_flag_GCJ=' -fno-builtin' + + +echo "$as_me:$LINENO: checking if $compiler supports -fno-rtti -fno-exceptions" >&5 +echo $ECHO_N "checking if $compiler supports -fno-rtti -fno-exceptions... $ECHO_C" >&6 +if test "${lt_cv_prog_compiler_rtti_exceptions+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + lt_cv_prog_compiler_rtti_exceptions=no + ac_outfile=conftest.$ac_objext + printf "$lt_simple_compile_test_code" > conftest.$ac_ext + lt_compiler_flag="-fno-rtti -fno-exceptions" + # Insert the option either (1) after the last *FLAGS variable, or + # (2) before a word containing "conftest.", or (3) at the end. + # Note that $ac_compile itself does not contain backslashes and begins + # with a dollar sign (not a hyphen), so the echo should work correctly. + # The option is referenced via a variable to avoid confusing sed. + lt_compile=`echo "$ac_compile" | $SED \ + -e 's:.*FLAGS}? :&$lt_compiler_flag :; t' \ + -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ + -e 's:$: $lt_compiler_flag:'` + (eval echo "\"\$as_me:16623: $lt_compile\"" >&5) + (eval "$lt_compile" 2>conftest.err) + ac_status=$? + cat conftest.err >&5 + echo "$as_me:16627: \$? = $ac_status" >&5 + if (exit $ac_status) && test -s "$ac_outfile"; then + # The compiler can only warn and ignore the option if not recognized + # So say no if there are warnings other than the usual output. + $echo "X$_lt_compiler_boilerplate" | $Xsed >conftest.exp + $SED '/^$/d' conftest.err >conftest.er2 + if test ! -s conftest.err || diff conftest.exp conftest.er2 >/dev/null; then + lt_cv_prog_compiler_rtti_exceptions=yes + fi + fi + $rm conftest* + +fi +echo "$as_me:$LINENO: result: $lt_cv_prog_compiler_rtti_exceptions" >&5 +echo "${ECHO_T}$lt_cv_prog_compiler_rtti_exceptions" >&6 + +if test x"$lt_cv_prog_compiler_rtti_exceptions" = xyes; then + lt_prog_compiler_no_builtin_flag_GCJ="$lt_prog_compiler_no_builtin_flag_GCJ -fno-rtti -fno-exceptions" +else + : +fi + +fi + +lt_prog_compiler_wl_GCJ= +lt_prog_compiler_pic_GCJ= +lt_prog_compiler_static_GCJ= + +echo "$as_me:$LINENO: checking for $compiler option to produce PIC" >&5 +echo $ECHO_N "checking for $compiler option to produce PIC... $ECHO_C" >&6 + + if test "$GCC" = yes; then + lt_prog_compiler_wl_GCJ='-Wl,' + lt_prog_compiler_static_GCJ='-static' + + case $host_os in + aix*) + # All AIX code is PIC. + if test "$host_cpu" = ia64; then + # AIX 5 now supports IA64 processor + lt_prog_compiler_static_GCJ='-Bstatic' + fi + ;; + + amigaos*) + # FIXME: we need at least 68020 code to build shared libraries, but + # adding the `-m68020' flag to GCC prevents building anything better, + # like `-m68040'. + lt_prog_compiler_pic_GCJ='-m68020 -resident32 -malways-restore-a4' + ;; + + beos* | cygwin* | irix5* | irix6* | nonstopux* | osf3* | osf4* | osf5*) + # PIC is the default for these OSes. + ;; + + mingw* | pw32* | os2*) + # This hack is so that the source file can tell whether it is being + # built for inclusion in a dll (and should export symbols for example). + lt_prog_compiler_pic_GCJ='-DDLL_EXPORT' + ;; + + darwin* | rhapsody*) + # PIC is the default on this platform + # Common symbols not allowed in MH_DYLIB files + lt_prog_compiler_pic_GCJ='-fno-common' + ;; + + msdosdjgpp*) + # Just because we use GCC doesn't mean we suddenly get shared libraries + # on systems that don't support them. + lt_prog_compiler_can_build_shared_GCJ=no + enable_shared=no + ;; + + sysv4*MP*) + if test -d /usr/nec; then + lt_prog_compiler_pic_GCJ=-Kconform_pic + fi + ;; + + hpux*) + # PIC is the default for IA64 HP-UX and 64-bit HP-UX, but + # not for PA HP-UX. + case "$host_cpu" in + hppa*64*|ia64*) + # +Z the default + ;; + *) + lt_prog_compiler_pic_GCJ='-fPIC' + ;; + esac + ;; + + *) + lt_prog_compiler_pic_GCJ='-fPIC' + ;; + esac + else + # PORTME Check for flag to pass linker flags through the system compiler. + case $host_os in + aix*) + lt_prog_compiler_wl_GCJ='-Wl,' + if test "$host_cpu" = ia64; then + # AIX 5 now supports IA64 processor + lt_prog_compiler_static_GCJ='-Bstatic' + else + lt_prog_compiler_static_GCJ='-bnso -bI:/lib/syscalls.exp' + fi + ;; + darwin*) + # PIC is the default on this platform + # Common symbols not allowed in MH_DYLIB files + case $cc_basename in + xlc*) + lt_prog_compiler_pic_GCJ='-qnocommon' + lt_prog_compiler_wl_GCJ='-Wl,' + ;; + esac + ;; + + mingw* | pw32* | os2*) + # This hack is so that the source file can tell whether it is being + # built for inclusion in a dll (and should export symbols for example). + lt_prog_compiler_pic_GCJ='-DDLL_EXPORT' + ;; + + hpux9* | hpux10* | hpux11*) + lt_prog_compiler_wl_GCJ='-Wl,' + # PIC is the default for IA64 HP-UX and 64-bit HP-UX, but + # not for PA HP-UX. + case "$host_cpu" in + hppa*64*|ia64*) + # +Z the default + ;; + *) + lt_prog_compiler_pic_GCJ='+Z' + ;; + esac + # Is there a better lt_prog_compiler_static that works with the bundled CC? + lt_prog_compiler_static_GCJ='${wl}-a ${wl}archive' + ;; + + irix5* | irix6* | nonstopux*) + lt_prog_compiler_wl_GCJ='-Wl,' + # PIC (with -KPIC) is the default. + lt_prog_compiler_static_GCJ='-non_shared' + ;; + + newsos6) + lt_prog_compiler_pic_GCJ='-KPIC' + lt_prog_compiler_static_GCJ='-Bstatic' + ;; + + linux*) + case $cc_basename in + icc* | ecc*) + lt_prog_compiler_wl_GCJ='-Wl,' + lt_prog_compiler_pic_GCJ='-KPIC' + lt_prog_compiler_static_GCJ='-static' + ;; + pgcc* | pgf77* | pgf90*) + # Portland Group compilers (*not* the Pentium gcc compiler, + # which looks to be a dead project) + lt_prog_compiler_wl_GCJ='-Wl,' + lt_prog_compiler_pic_GCJ='-fpic' + lt_prog_compiler_static_GCJ='-static' + ;; + ccc*) + lt_prog_compiler_wl_GCJ='-Wl,' + # All Alpha code is PIC. + lt_prog_compiler_static_GCJ='-non_shared' + ;; + esac + ;; + + osf3* | osf4* | osf5*) + lt_prog_compiler_wl_GCJ='-Wl,' + # All OSF/1 code is PIC. + lt_prog_compiler_static_GCJ='-non_shared' + ;; + + sco3.2v5*) + lt_prog_compiler_pic_GCJ='-Kpic' + lt_prog_compiler_static_GCJ='-dn' + ;; + + solaris*) + lt_prog_compiler_pic_GCJ='-KPIC' + lt_prog_compiler_static_GCJ='-Bstatic' + case $cc_basename in + f77* | f90* | f95*) + lt_prog_compiler_wl_GCJ='-Qoption ld ';; + *) + lt_prog_compiler_wl_GCJ='-Wl,';; + esac + ;; + + sunos4*) + lt_prog_compiler_wl_GCJ='-Qoption ld ' + lt_prog_compiler_pic_GCJ='-PIC' + lt_prog_compiler_static_GCJ='-Bstatic' + ;; + + sysv4 | sysv4.2uw2* | sysv4.3* | sysv5*) + lt_prog_compiler_wl_GCJ='-Wl,' + lt_prog_compiler_pic_GCJ='-KPIC' + lt_prog_compiler_static_GCJ='-Bstatic' + ;; + + sysv4*MP*) + if test -d /usr/nec ;then + lt_prog_compiler_pic_GCJ='-Kconform_pic' + lt_prog_compiler_static_GCJ='-Bstatic' + fi + ;; + + unicos*) + lt_prog_compiler_wl_GCJ='-Wl,' + lt_prog_compiler_can_build_shared_GCJ=no + ;; + + uts4*) + lt_prog_compiler_pic_GCJ='-pic' + lt_prog_compiler_static_GCJ='-Bstatic' + ;; + + *) + lt_prog_compiler_can_build_shared_GCJ=no + ;; + esac + fi + +echo "$as_me:$LINENO: result: $lt_prog_compiler_pic_GCJ" >&5 +echo "${ECHO_T}$lt_prog_compiler_pic_GCJ" >&6 + +# +# Check to make sure the PIC flag actually works. +# +if test -n "$lt_prog_compiler_pic_GCJ"; then + +echo "$as_me:$LINENO: checking if $compiler PIC flag $lt_prog_compiler_pic_GCJ works" >&5 +echo $ECHO_N "checking if $compiler PIC flag $lt_prog_compiler_pic_GCJ works... $ECHO_C" >&6 +if test "${lt_prog_compiler_pic_works_GCJ+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + lt_prog_compiler_pic_works_GCJ=no + ac_outfile=conftest.$ac_objext + printf "$lt_simple_compile_test_code" > conftest.$ac_ext + lt_compiler_flag="$lt_prog_compiler_pic_GCJ" + # Insert the option either (1) after the last *FLAGS variable, or + # (2) before a word containing "conftest.", or (3) at the end. + # Note that $ac_compile itself does not contain backslashes and begins + # with a dollar sign (not a hyphen), so the echo should work correctly. + # The option is referenced via a variable to avoid confusing sed. + lt_compile=`echo "$ac_compile" | $SED \ + -e 's:.*FLAGS}? :&$lt_compiler_flag :; t' \ + -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ + -e 's:$: $lt_compiler_flag:'` + (eval echo "\"\$as_me:16885: $lt_compile\"" >&5) + (eval "$lt_compile" 2>conftest.err) + ac_status=$? + cat conftest.err >&5 + echo "$as_me:16889: \$? = $ac_status" >&5 + if (exit $ac_status) && test -s "$ac_outfile"; then + # The compiler can only warn and ignore the option if not recognized + # So say no if there are warnings other than the usual output. + $echo "X$_lt_compiler_boilerplate" | $Xsed >conftest.exp + $SED '/^$/d' conftest.err >conftest.er2 + if test ! -s conftest.err || diff conftest.exp conftest.er2 >/dev/null; then + lt_prog_compiler_pic_works_GCJ=yes + fi + fi + $rm conftest* + +fi +echo "$as_me:$LINENO: result: $lt_prog_compiler_pic_works_GCJ" >&5 +echo "${ECHO_T}$lt_prog_compiler_pic_works_GCJ" >&6 + +if test x"$lt_prog_compiler_pic_works_GCJ" = xyes; then + case $lt_prog_compiler_pic_GCJ in + "" | " "*) ;; + *) lt_prog_compiler_pic_GCJ=" $lt_prog_compiler_pic_GCJ" ;; + esac +else + lt_prog_compiler_pic_GCJ= + lt_prog_compiler_can_build_shared_GCJ=no +fi + +fi +case "$host_os" in + # For platforms which do not support PIC, -DPIC is meaningless: + *djgpp*) + lt_prog_compiler_pic_GCJ= + ;; + *) + lt_prog_compiler_pic_GCJ="$lt_prog_compiler_pic_GCJ" + ;; +esac + +echo "$as_me:$LINENO: checking if $compiler supports -c -o file.$ac_objext" >&5 +echo $ECHO_N "checking if $compiler supports -c -o file.$ac_objext... $ECHO_C" >&6 +if test "${lt_cv_prog_compiler_c_o_GCJ+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + lt_cv_prog_compiler_c_o_GCJ=no + $rm -r conftest 2>/dev/null + mkdir conftest + cd conftest + mkdir out + printf "$lt_simple_compile_test_code" > conftest.$ac_ext + + lt_compiler_flag="-o out/conftest2.$ac_objext" + # Insert the option either (1) after the last *FLAGS variable, or + # (2) before a word containing "conftest.", or (3) at the end. + # Note that $ac_compile itself does not contain backslashes and begins + # with a dollar sign (not a hyphen), so the echo should work correctly. + lt_compile=`echo "$ac_compile" | $SED \ + -e 's:.*FLAGS}? :&$lt_compiler_flag :; t' \ + -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ + -e 's:$: $lt_compiler_flag:'` + (eval echo "\"\$as_me:16947: $lt_compile\"" >&5) + (eval "$lt_compile" 2>out/conftest.err) + ac_status=$? + cat out/conftest.err >&5 + echo "$as_me:16951: \$? = $ac_status" >&5 + if (exit $ac_status) && test -s out/conftest2.$ac_objext + then + # The compiler can only warn and ignore the option if not recognized + # So say no if there are warnings + $echo "X$_lt_compiler_boilerplate" | $Xsed > out/conftest.exp + $SED '/^$/d' out/conftest.err >out/conftest.er2 + if test ! -s out/conftest.err || diff out/conftest.exp out/conftest.er2 >/dev/null; then + lt_cv_prog_compiler_c_o_GCJ=yes + fi + fi + chmod u+w . + $rm conftest* + # SGI C++ compiler will create directory out/ii_files/ for + # template instantiation + test -d out/ii_files && $rm out/ii_files/* && rmdir out/ii_files + $rm out/* && rmdir out + cd .. + rmdir conftest + $rm conftest* + +fi +echo "$as_me:$LINENO: result: $lt_cv_prog_compiler_c_o_GCJ" >&5 +echo "${ECHO_T}$lt_cv_prog_compiler_c_o_GCJ" >&6 + + +hard_links="nottested" +if test "$lt_cv_prog_compiler_c_o_GCJ" = no && test "$need_locks" != no; then + # do not overwrite the value of need_locks provided by the user + echo "$as_me:$LINENO: checking if we can lock with hard links" >&5 +echo $ECHO_N "checking if we can lock with hard links... $ECHO_C" >&6 + hard_links=yes + $rm conftest* + ln conftest.a conftest.b 2>/dev/null && hard_links=no + touch conftest.a + ln conftest.a conftest.b 2>&5 || hard_links=no + ln conftest.a conftest.b 2>/dev/null && hard_links=no + echo "$as_me:$LINENO: result: $hard_links" >&5 +echo "${ECHO_T}$hard_links" >&6 + if test "$hard_links" = no; then + { echo "$as_me:$LINENO: WARNING: \`$CC' does not support \`-c -o', so \`make -j' may be unsafe" >&5 +echo "$as_me: WARNING: \`$CC' does not support \`-c -o', so \`make -j' may be unsafe" >&2;} + need_locks=warn + fi +else + need_locks=no +fi + +echo "$as_me:$LINENO: checking whether the $compiler linker ($LD) supports shared libraries" >&5 +echo $ECHO_N "checking whether the $compiler linker ($LD) supports shared libraries... $ECHO_C" >&6 + + runpath_var= + allow_undefined_flag_GCJ= + enable_shared_with_static_runtimes_GCJ=no + archive_cmds_GCJ= + archive_expsym_cmds_GCJ= + old_archive_From_new_cmds_GCJ= + old_archive_from_expsyms_cmds_GCJ= + export_dynamic_flag_spec_GCJ= + whole_archive_flag_spec_GCJ= + thread_safe_flag_spec_GCJ= + hardcode_libdir_flag_spec_GCJ= + hardcode_libdir_flag_spec_ld_GCJ= + hardcode_libdir_separator_GCJ= + hardcode_direct_GCJ=no + hardcode_minus_L_GCJ=no + hardcode_shlibpath_var_GCJ=unsupported + link_all_deplibs_GCJ=unknown + hardcode_automatic_GCJ=no + module_cmds_GCJ= + module_expsym_cmds_GCJ= + always_export_symbols_GCJ=no + export_symbols_cmds_GCJ='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols' + # include_expsyms should be a list of space-separated symbols to be *always* + # included in the symbol list + include_expsyms_GCJ= + # exclude_expsyms can be an extended regexp of symbols to exclude + # it will be wrapped by ` (' and `)$', so one must not match beginning or + # end of line. Example: `a|bc|.*d.*' will exclude the symbols `a' and `bc', + # as well as any symbol that contains `d'. + exclude_expsyms_GCJ="_GLOBAL_OFFSET_TABLE_" + # Although _GLOBAL_OFFSET_TABLE_ is a valid symbol C name, most a.out + # platforms (ab)use it in PIC code, but their linkers get confused if + # the symbol is explicitly referenced. Since portable code cannot + # rely on this symbol name, it's probably fine to never include it in + # preloaded symbol tables. + extract_expsyms_cmds= + # Just being paranoid about ensuring that cc_basename is set. + for cc_temp in $compiler""; do + case $cc_temp in + compile | *[\\/]compile | ccache | *[\\/]ccache ) ;; + distcc | *[\\/]distcc | purify | *[\\/]purify ) ;; + \-*) ;; + *) break;; + esac +done +cc_basename=`$echo "X$cc_temp" | $Xsed -e 's%.*/%%' -e "s%^$host_alias-%%"` + + case $host_os in + cygwin* | mingw* | pw32*) + # FIXME: the MSVC++ port hasn't been tested in a loooong time + # When not using gcc, we currently assume that we are using + # Microsoft Visual C++. + if test "$GCC" != yes; then + with_gnu_ld=no + fi + ;; + openbsd*) + with_gnu_ld=no + ;; + esac + + ld_shlibs_GCJ=yes + if test "$with_gnu_ld" = yes; then + # If archive_cmds runs LD, not CC, wlarc should be empty + wlarc='${wl}' + + # Set some defaults for GNU ld with shared library support. These + # are reset later if shared libraries are not supported. Putting them + # here allows them to be overridden if necessary. + runpath_var=LD_RUN_PATH + hardcode_libdir_flag_spec_GCJ='${wl}--rpath ${wl}$libdir' + export_dynamic_flag_spec_GCJ='${wl}--export-dynamic' + # ancient GNU ld didn't support --whole-archive et. al. + if $LD --help 2>&1 | grep 'no-whole-archive' > /dev/null; then + whole_archive_flag_spec_GCJ="$wlarc"'--whole-archive$convenience '"$wlarc"'--no-whole-archive' + else + whole_archive_flag_spec_GCJ= + fi + supports_anon_versioning=no + case `$LD -v 2>/dev/null` in + *\ [01].* | *\ 2.[0-9].* | *\ 2.10.*) ;; # catch versions < 2.11 + *\ 2.11.93.0.2\ *) supports_anon_versioning=yes ;; # RH7.3 ... + *\ 2.11.92.0.12\ *) supports_anon_versioning=yes ;; # Mandrake 8.2 ... + *\ 2.11.*) ;; # other 2.11 versions + *) supports_anon_versioning=yes ;; + esac + + # See if GNU ld supports shared libraries. + case $host_os in + aix3* | aix4* | aix5*) + # On AIX/PPC, the GNU linker is very broken + if test "$host_cpu" != ia64; then + ld_shlibs_GCJ=no + cat <&2 + +*** Warning: the GNU linker, at least up to release 2.9.1, is reported +*** to be unable to reliably create shared libraries on AIX. +*** Therefore, libtool is disabling shared libraries support. If you +*** really care for shared libraries, you may want to modify your PATH +*** so that a non-GNU linker is found, and then restart. + +EOF + fi + ;; + + amigaos*) + archive_cmds_GCJ='$rm $output_objdir/a2ixlibrary.data~$echo "#define NAME $libname" > $output_objdir/a2ixlibrary.data~$echo "#define LIBRARY_ID 1" >> $output_objdir/a2ixlibrary.data~$echo "#define VERSION $major" >> $output_objdir/a2ixlibrary.data~$echo "#define REVISION $revision" >> $output_objdir/a2ixlibrary.data~$AR $AR_FLAGS $lib $libobjs~$RANLIB $lib~(cd $output_objdir && a2ixlibrary -32)' + hardcode_libdir_flag_spec_GCJ='-L$libdir' + hardcode_minus_L_GCJ=yes + + # Samuel A. Falvo II reports + # that the semantics of dynamic libraries on AmigaOS, at least up + # to version 4, is to share data among multiple programs linked + # with the same dynamic library. Since this doesn't match the + # behavior of shared libraries on other platforms, we can't use + # them. + ld_shlibs_GCJ=no + ;; + + beos*) + if $LD --help 2>&1 | grep ': supported targets:.* elf' > /dev/null; then + allow_undefined_flag_GCJ=unsupported + # Joseph Beckenbach says some releases of gcc + # support --undefined. This deserves some investigation. FIXME + archive_cmds_GCJ='$CC -nostart $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + else + ld_shlibs_GCJ=no + fi + ;; + + cygwin* | mingw* | pw32*) + # _LT_AC_TAGVAR(hardcode_libdir_flag_spec, GCJ) is actually meaningless, + # as there is no search path for DLLs. + hardcode_libdir_flag_spec_GCJ='-L$libdir' + allow_undefined_flag_GCJ=unsupported + always_export_symbols_GCJ=no + enable_shared_with_static_runtimes_GCJ=yes + export_symbols_cmds_GCJ='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[BCDGRS] /s/.* \([^ ]*\)/\1 DATA/'\'' | $SED -e '\''/^[AITW] /s/.* //'\'' | sort | uniq > $export_symbols' + + if $LD --help 2>&1 | grep 'auto-import' > /dev/null; then + archive_cmds_GCJ='$CC -shared $libobjs $deplibs $compiler_flags -o $output_objdir/$soname ${wl}--image-base=0x10000000 ${wl}--out-implib,$lib' + # If the export-symbols file already is a .def file (1st line + # is EXPORTS), use it as is; otherwise, prepend... + archive_expsym_cmds_GCJ='if test "x`$SED 1q $export_symbols`" = xEXPORTS; then + cp $export_symbols $output_objdir/$soname.def; + else + echo EXPORTS > $output_objdir/$soname.def; + cat $export_symbols >> $output_objdir/$soname.def; + fi~ + $CC -shared $output_objdir/$soname.def $libobjs $deplibs $compiler_flags -o $output_objdir/$soname ${wl}--image-base=0x10000000 ${wl}--out-implib,$lib' + else + ld_shlibs_GCJ=no + fi + ;; + + linux*) + if $LD --help 2>&1 | grep ': supported targets:.* elf' > /dev/null; then + tmp_addflag= + case $cc_basename,$host_cpu in + pgcc*) # Portland Group C compiler + whole_archive_flag_spec_GCJ= + ;; + pgf77* | pgf90* ) # Portland Group f77 and f90 compilers + whole_archive_flag_spec_GCJ= + tmp_addflag=' -fpic -Mnomain' ;; + ecc*,ia64* | icc*,ia64*) # Intel C compiler on ia64 + tmp_addflag=' -i_dynamic' ;; + efc*,ia64* | ifort*,ia64*) # Intel Fortran compiler on ia64 + tmp_addflag=' -i_dynamic -nofor_main' ;; + ifc* | ifort*) # Intel Fortran compiler + tmp_addflag=' -nofor_main' ;; + esac + archive_cmds_GCJ='$CC -shared'"$tmp_addflag"' $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + + if test $supports_anon_versioning = yes; then + archive_expsym_cmds_GCJ='$echo "{ global:" > $output_objdir/$libname.ver~ + cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~ + $echo "local: *; };" >> $output_objdir/$libname.ver~ + $CC -shared'"$tmp_addflag"' $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-version-script ${wl}$output_objdir/$libname.ver -o $lib' + fi + else + ld_shlibs_GCJ=no + fi + ;; + + netbsd*) + if echo __ELF__ | $CC -E - | grep __ELF__ >/dev/null; then + archive_cmds_GCJ='$LD -Bshareable $libobjs $deplibs $linker_flags -o $lib' + wlarc= + else + archive_cmds_GCJ='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + archive_expsym_cmds_GCJ='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' + fi + ;; + + solaris* | sysv5*) + if $LD -v 2>&1 | grep 'BFD 2\.8' > /dev/null; then + ld_shlibs_GCJ=no + cat <&2 + +*** Warning: The releases 2.8.* of the GNU linker cannot reliably +*** create shared libraries on Solaris systems. Therefore, libtool +*** is disabling shared libraries support. We urge you to upgrade GNU +*** binutils to release 2.9.1 or newer. Another option is to modify +*** your PATH or compiler configuration so that the native linker is +*** used, and then restart. + +EOF + elif $LD --help 2>&1 | grep ': supported targets:.* elf' > /dev/null; then + archive_cmds_GCJ='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + archive_expsym_cmds_GCJ='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' + else + ld_shlibs_GCJ=no + fi + ;; + + sunos4*) + archive_cmds_GCJ='$LD -assert pure-text -Bshareable -o $lib $libobjs $deplibs $linker_flags' + wlarc= + hardcode_direct_GCJ=yes + hardcode_shlibpath_var_GCJ=no + ;; + + *) + if $LD --help 2>&1 | grep ': supported targets:.* elf' > /dev/null; then + archive_cmds_GCJ='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + archive_expsym_cmds_GCJ='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' + else + ld_shlibs_GCJ=no + fi + ;; + esac + + if test "$ld_shlibs_GCJ" = no; then + runpath_var= + hardcode_libdir_flag_spec_GCJ= + export_dynamic_flag_spec_GCJ= + whole_archive_flag_spec_GCJ= + fi + else + # PORTME fill in a description of your system's linker (not GNU ld) + case $host_os in + aix3*) + allow_undefined_flag_GCJ=unsupported + always_export_symbols_GCJ=yes + archive_expsym_cmds_GCJ='$LD -o $output_objdir/$soname $libobjs $deplibs $linker_flags -bE:$export_symbols -T512 -H512 -bM:SRE~$AR $AR_FLAGS $lib $output_objdir/$soname' + # Note: this linker hardcodes the directories in LIBPATH if there + # are no directories specified by -L. + hardcode_minus_L_GCJ=yes + if test "$GCC" = yes && test -z "$link_static_flag"; then + # Neither direct hardcoding nor static linking is supported with a + # broken collect2. + hardcode_direct_GCJ=unsupported + fi + ;; + + aix4* | aix5*) + if test "$host_cpu" = ia64; then + # On IA64, the linker does run time linking by default, so we don't + # have to do anything special. + aix_use_runtimelinking=no + exp_sym_flag='-Bexport' + no_entry_flag="" + else + # If we're using GNU nm, then we don't want the "-C" option. + # -C means demangle to AIX nm, but means don't demangle with GNU nm + if $NM -V 2>&1 | grep 'GNU' > /dev/null; then + export_symbols_cmds_GCJ='$NM -Bpg $libobjs $convenience | awk '\''{ if (((\$2 == "T") || (\$2 == "D") || (\$2 == "B")) && (substr(\$3,1,1) != ".")) { print \$3 } }'\'' | sort -u > $export_symbols' + else + export_symbols_cmds_GCJ='$NM -BCpg $libobjs $convenience | awk '\''{ if (((\$2 == "T") || (\$2 == "D") || (\$2 == "B")) && (substr(\$3,1,1) != ".")) { print \$3 } }'\'' | sort -u > $export_symbols' + fi + aix_use_runtimelinking=no + + # Test if we are trying to use run time linking or normal + # AIX style linking. If -brtl is somewhere in LDFLAGS, we + # need to do runtime linking. + case $host_os in aix4.[23]|aix4.[23].*|aix5*) + for ld_flag in $LDFLAGS; do + if (test $ld_flag = "-brtl" || test $ld_flag = "-Wl,-brtl"); then + aix_use_runtimelinking=yes + break + fi + done + esac + + exp_sym_flag='-bexport' + no_entry_flag='-bnoentry' + fi + + # When large executables or shared objects are built, AIX ld can + # have problems creating the table of contents. If linking a library + # or program results in "error TOC overflow" add -mminimal-toc to + # CXXFLAGS/CFLAGS for g++/gcc. In the cases where that is not + # enough to fix the problem, add -Wl,-bbigtoc to LDFLAGS. + + archive_cmds_GCJ='' + hardcode_direct_GCJ=yes + hardcode_libdir_separator_GCJ=':' + link_all_deplibs_GCJ=yes + + if test "$GCC" = yes; then + case $host_os in aix4.[012]|aix4.[012].*) + # We only want to do this on AIX 4.2 and lower, the check + # below for broken collect2 doesn't work under 4.3+ + collect2name=`${CC} -print-prog-name=collect2` + if test -f "$collect2name" && \ + strings "$collect2name" | grep resolve_lib_name >/dev/null + then + # We have reworked collect2 + hardcode_direct_GCJ=yes + else + # We have old collect2 + hardcode_direct_GCJ=unsupported + # It fails to find uninstalled libraries when the uninstalled + # path is not listed in the libpath. Setting hardcode_minus_L + # to unsupported forces relinking + hardcode_minus_L_GCJ=yes + hardcode_libdir_flag_spec_GCJ='-L$libdir' + hardcode_libdir_separator_GCJ= + fi + esac + shared_flag='-shared' + if test "$aix_use_runtimelinking" = yes; then + shared_flag="$shared_flag "'${wl}-G' + fi + else + # not using gcc + if test "$host_cpu" = ia64; then + # VisualAge C++, Version 5.5 for AIX 5L for IA-64, Beta 3 Release + # chokes on -Wl,-G. The following line is correct: + shared_flag='-G' + else + if test "$aix_use_runtimelinking" = yes; then + shared_flag='${wl}-G' + else + shared_flag='${wl}-bM:SRE' + fi + fi + fi + + # It seems that -bexpall does not export symbols beginning with + # underscore (_), so it is better to generate a list of symbols to export. + always_export_symbols_GCJ=yes + if test "$aix_use_runtimelinking" = yes; then + # Warning - without using the other runtime loading flags (-brtl), + # -berok will link without error, but may produce a broken library. + allow_undefined_flag_GCJ='-berok' + # Determine the default libpath from the value encoded in an empty executable. + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext conftest$ac_exeext +if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 + (eval $ac_link) 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest$ac_exeext' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; }; then + +aix_libpath=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e '/Import File Strings/,/^$/ { /^0/ { s/^0 *\(.*\)$/\1/; p; } +}'` +# Check for a 64-bit object if we didn't find anything. +if test -z "$aix_libpath"; then aix_libpath=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e '/Import File Strings/,/^$/ { /^0/ { s/^0 *\(.*\)$/\1/; p; } +}'`; fi +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +fi +rm -f conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi + + hardcode_libdir_flag_spec_GCJ='${wl}-blibpath:$libdir:'"$aix_libpath" + archive_expsym_cmds_GCJ="\$CC"' -o $output_objdir/$soname $libobjs $deplibs $compiler_flags `if test "x${allow_undefined_flag}" != "x"; then echo "${wl}${allow_undefined_flag}"; else :; fi` '"\${wl}$no_entry_flag \${wl}$exp_sym_flag:\$export_symbols $shared_flag" + else + if test "$host_cpu" = ia64; then + hardcode_libdir_flag_spec_GCJ='${wl}-R $libdir:/usr/lib:/lib' + allow_undefined_flag_GCJ="-z nodefs" + archive_expsym_cmds_GCJ="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs $compiler_flags ${wl}${allow_undefined_flag} '"\${wl}$no_entry_flag \${wl}$exp_sym_flag:\$export_symbols" + else + # Determine the default libpath from the value encoded in an empty executable. + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext conftest$ac_exeext +if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 + (eval $ac_link) 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest$ac_exeext' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; }; then + +aix_libpath=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e '/Import File Strings/,/^$/ { /^0/ { s/^0 *\(.*\)$/\1/; p; } +}'` +# Check for a 64-bit object if we didn't find anything. +if test -z "$aix_libpath"; then aix_libpath=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e '/Import File Strings/,/^$/ { /^0/ { s/^0 *\(.*\)$/\1/; p; } +}'`; fi +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +fi +rm -f conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi + + hardcode_libdir_flag_spec_GCJ='${wl}-blibpath:$libdir:'"$aix_libpath" + # Warning - without using the other run time loading flags, + # -berok will link without error, but may produce a broken library. + no_undefined_flag_GCJ=' ${wl}-bernotok' + allow_undefined_flag_GCJ=' ${wl}-berok' + # -bexpall does not export symbols beginning with underscore (_) + always_export_symbols_GCJ=yes + # Exported symbols can be pulled into shared objects from archives + whole_archive_flag_spec_GCJ=' ' + archive_cmds_need_lc_GCJ=yes + # This is similar to how AIX traditionally builds it's shared libraries. + archive_expsym_cmds_GCJ="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs $compiler_flags ${wl}-bE:$export_symbols ${wl}-bnoentry${allow_undefined_flag}~$AR $AR_FLAGS $output_objdir/$libname$release.a $output_objdir/$soname' + fi + fi + ;; + + amigaos*) + archive_cmds_GCJ='$rm $output_objdir/a2ixlibrary.data~$echo "#define NAME $libname" > $output_objdir/a2ixlibrary.data~$echo "#define LIBRARY_ID 1" >> $output_objdir/a2ixlibrary.data~$echo "#define VERSION $major" >> $output_objdir/a2ixlibrary.data~$echo "#define REVISION $revision" >> $output_objdir/a2ixlibrary.data~$AR $AR_FLAGS $lib $libobjs~$RANLIB $lib~(cd $output_objdir && a2ixlibrary -32)' + hardcode_libdir_flag_spec_GCJ='-L$libdir' + hardcode_minus_L_GCJ=yes + # see comment about different semantics on the GNU ld section + ld_shlibs_GCJ=no + ;; + + bsdi[45]*) + export_dynamic_flag_spec_GCJ=-rdynamic + ;; + + cygwin* | mingw* | pw32*) + # When not using gcc, we currently assume that we are using + # Microsoft Visual C++. + # hardcode_libdir_flag_spec is actually meaningless, as there is + # no search path for DLLs. + hardcode_libdir_flag_spec_GCJ=' ' + allow_undefined_flag_GCJ=unsupported + # Tell ltmain to make .lib files, not .a files. + libext=lib + # Tell ltmain to make .dll files, not .so files. + shrext_cmds=".dll" + # FIXME: Setting linknames here is a bad hack. + archive_cmds_GCJ='$CC -o $lib $libobjs $compiler_flags `echo "$deplibs" | $SED -e '\''s/ -lc$//'\''` -link -dll~linknames=' + # The linker will automatically build a .lib file if we build a DLL. + old_archive_From_new_cmds_GCJ='true' + # FIXME: Should let the user specify the lib program. + old_archive_cmds_GCJ='lib /OUT:$oldlib$oldobjs$old_deplibs' + fix_srcfile_path_GCJ='`cygpath -w "$srcfile"`' + enable_shared_with_static_runtimes_GCJ=yes + ;; + + darwin* | rhapsody*) + case "$host_os" in + rhapsody* | darwin1.[012]) + allow_undefined_flag_GCJ='${wl}-undefined ${wl}suppress' + ;; + *) # Darwin 1.3 on + if test -z ${MACOSX_DEPLOYMENT_TARGET} ; then + allow_undefined_flag_GCJ='${wl}-flat_namespace ${wl}-undefined ${wl}suppress' + else + case ${MACOSX_DEPLOYMENT_TARGET} in + 10.[012]) + allow_undefined_flag_GCJ='${wl}-flat_namespace ${wl}-undefined ${wl}suppress' + ;; + 10.*) + allow_undefined_flag_GCJ='${wl}-undefined ${wl}dynamic_lookup' + ;; + esac + fi + ;; + esac + archive_cmds_need_lc_GCJ=no + hardcode_direct_GCJ=no + hardcode_automatic_GCJ=yes + hardcode_shlibpath_var_GCJ=unsupported + whole_archive_flag_spec_GCJ='' + link_all_deplibs_GCJ=yes + if test "$GCC" = yes ; then + output_verbose_link_cmd='echo' + archive_cmds_GCJ='$CC -dynamiclib $allow_undefined_flag -o $lib $libobjs $deplibs $compiler_flags -install_name $rpath/$soname $verstring' + module_cmds_GCJ='$CC $allow_undefined_flag -o $lib -bundle $libobjs $deplibs$compiler_flags' + # Don't fix this by using the ld -exported_symbols_list flag, it doesn't exist in older darwin ld's + archive_expsym_cmds_GCJ='sed -e "s,#.*,," -e "s,^[ ]*,," -e "s,^\(..*\),_&," < $export_symbols > $output_objdir/${libname}-symbols.expsym~$CC -dynamiclib $allow_undefined_flag -o $lib $libobjs $deplibs $compiler_flags -install_name $rpath/$soname $verstring~nmedit -s $output_objdir/${libname}-symbols.expsym ${lib}' + module_expsym_cmds_GCJ='sed -e "s,#.*,," -e "s,^[ ]*,," -e "s,^\(..*\),_&," < $export_symbols > $output_objdir/${libname}-symbols.expsym~$CC $allow_undefined_flag -o $lib -bundle $libobjs $deplibs$compiler_flags~nmedit -s $output_objdir/${libname}-symbols.expsym ${lib}' + else + case $cc_basename in + xlc*) + output_verbose_link_cmd='echo' + archive_cmds_GCJ='$CC -qmkshrobj $allow_undefined_flag -o $lib $libobjs $deplibs $compiler_flags ${wl}-install_name ${wl}`echo $rpath/$soname` $verstring' + module_cmds_GCJ='$CC $allow_undefined_flag -o $lib -bundle $libobjs $deplibs$compiler_flags' + # Don't fix this by using the ld -exported_symbols_list flag, it doesn't exist in older darwin ld's + archive_expsym_cmds_GCJ='sed -e "s,#.*,," -e "s,^[ ]*,," -e "s,^\(..*\),_&," < $export_symbols > $output_objdir/${libname}-symbols.expsym~$CC -qmkshrobj $allow_undefined_flag -o $lib $libobjs $deplibs $compiler_flags ${wl}-install_name ${wl}$rpath/$soname $verstring~nmedit -s $output_objdir/${libname}-symbols.expsym ${lib}' + module_expsym_cmds_GCJ='sed -e "s,#.*,," -e "s,^[ ]*,," -e "s,^\(..*\),_&," < $export_symbols > $output_objdir/${libname}-symbols.expsym~$CC $allow_undefined_flag -o $lib -bundle $libobjs $deplibs$compiler_flags~nmedit -s $output_objdir/${libname}-symbols.expsym ${lib}' + ;; + *) + ld_shlibs_GCJ=no + ;; + esac + fi + ;; + + dgux*) + archive_cmds_GCJ='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + hardcode_libdir_flag_spec_GCJ='-L$libdir' + hardcode_shlibpath_var_GCJ=no + ;; + + freebsd1*) + ld_shlibs_GCJ=no + ;; + + # FreeBSD 2.2.[012] allows us to include c++rt0.o to get C++ constructor + # support. Future versions do this automatically, but an explicit c++rt0.o + # does not break anything, and helps significantly (at the cost of a little + # extra space). + freebsd2.2*) + archive_cmds_GCJ='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags /usr/lib/c++rt0.o' + hardcode_libdir_flag_spec_GCJ='-R$libdir' + hardcode_direct_GCJ=yes + hardcode_shlibpath_var_GCJ=no + ;; + + # Unfortunately, older versions of FreeBSD 2 do not have this feature. + freebsd2*) + archive_cmds_GCJ='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags' + hardcode_direct_GCJ=yes + hardcode_minus_L_GCJ=yes + hardcode_shlibpath_var_GCJ=no + ;; + + # FreeBSD 3 and greater uses gcc -shared to do shared libraries. + freebsd* | kfreebsd*-gnu | dragonfly*) + archive_cmds_GCJ='$CC -shared -o $lib $libobjs $deplibs $compiler_flags' + hardcode_libdir_flag_spec_GCJ='-R$libdir' + hardcode_direct_GCJ=yes + hardcode_shlibpath_var_GCJ=no + ;; + + hpux9*) + if test "$GCC" = yes; then + archive_cmds_GCJ='$rm $output_objdir/$soname~$CC -shared -fPIC ${wl}+b ${wl}$install_libdir -o $output_objdir/$soname $libobjs $deplibs $compiler_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' + else + archive_cmds_GCJ='$rm $output_objdir/$soname~$LD -b +b $install_libdir -o $output_objdir/$soname $libobjs $deplibs $linker_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' + fi + hardcode_libdir_flag_spec_GCJ='${wl}+b ${wl}$libdir' + hardcode_libdir_separator_GCJ=: + hardcode_direct_GCJ=yes + + # hardcode_minus_L: Not really in the search PATH, + # but as the default location of the library. + hardcode_minus_L_GCJ=yes + export_dynamic_flag_spec_GCJ='${wl}-E' + ;; + + hpux10* | hpux11*) + if test "$GCC" = yes -a "$with_gnu_ld" = no; then + case "$host_cpu" in + hppa*64*|ia64*) + archive_cmds_GCJ='$CC -shared ${wl}+h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags' + ;; + *) + archive_cmds_GCJ='$CC -shared -fPIC ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags' + ;; + esac + else + case "$host_cpu" in + hppa*64*|ia64*) + archive_cmds_GCJ='$LD -b +h $soname -o $lib $libobjs $deplibs $linker_flags' + ;; + *) + archive_cmds_GCJ='$LD -b +h $soname +b $install_libdir -o $lib $libobjs $deplibs $linker_flags' + ;; + esac + fi + if test "$with_gnu_ld" = no; then + case "$host_cpu" in + hppa*64*) + hardcode_libdir_flag_spec_GCJ='${wl}+b ${wl}$libdir' + hardcode_libdir_flag_spec_ld_GCJ='+b $libdir' + hardcode_libdir_separator_GCJ=: + hardcode_direct_GCJ=no + hardcode_shlibpath_var_GCJ=no + ;; + ia64*) + hardcode_libdir_flag_spec_GCJ='-L$libdir' + hardcode_direct_GCJ=no + hardcode_shlibpath_var_GCJ=no + + # hardcode_minus_L: Not really in the search PATH, + # but as the default location of the library. + hardcode_minus_L_GCJ=yes + ;; + *) + hardcode_libdir_flag_spec_GCJ='${wl}+b ${wl}$libdir' + hardcode_libdir_separator_GCJ=: + hardcode_direct_GCJ=yes + export_dynamic_flag_spec_GCJ='${wl}-E' + + # hardcode_minus_L: Not really in the search PATH, + # but as the default location of the library. + hardcode_minus_L_GCJ=yes + ;; + esac + fi + ;; + + irix5* | irix6* | nonstopux*) + if test "$GCC" = yes; then + archive_cmds_GCJ='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && echo ${wl}-set_version ${wl}$verstring` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' + else + archive_cmds_GCJ='$LD -shared $libobjs $deplibs $linker_flags -soname $soname `test -n "$verstring" && echo -set_version $verstring` -update_registry ${output_objdir}/so_locations -o $lib' + hardcode_libdir_flag_spec_ld_GCJ='-rpath $libdir' + fi + hardcode_libdir_flag_spec_GCJ='${wl}-rpath ${wl}$libdir' + hardcode_libdir_separator_GCJ=: + link_all_deplibs_GCJ=yes + ;; + + netbsd*) + if echo __ELF__ | $CC -E - | grep __ELF__ >/dev/null; then + archive_cmds_GCJ='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags' # a.out + else + archive_cmds_GCJ='$LD -shared -o $lib $libobjs $deplibs $linker_flags' # ELF + fi + hardcode_libdir_flag_spec_GCJ='-R$libdir' + hardcode_direct_GCJ=yes + hardcode_shlibpath_var_GCJ=no + ;; + + newsos6) + archive_cmds_GCJ='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + hardcode_direct_GCJ=yes + hardcode_libdir_flag_spec_GCJ='${wl}-rpath ${wl}$libdir' + hardcode_libdir_separator_GCJ=: + hardcode_shlibpath_var_GCJ=no + ;; + + openbsd*) + hardcode_direct_GCJ=yes + hardcode_shlibpath_var_GCJ=no + if test -z "`echo __ELF__ | $CC -E - | grep __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then + archive_cmds_GCJ='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags' + archive_expsym_cmds_GCJ='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags ${wl}-retain-symbols-file,$export_symbols' + hardcode_libdir_flag_spec_GCJ='${wl}-rpath,$libdir' + export_dynamic_flag_spec_GCJ='${wl}-E' + else + case $host_os in + openbsd[01].* | openbsd2.[0-7] | openbsd2.[0-7].*) + archive_cmds_GCJ='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags' + hardcode_libdir_flag_spec_GCJ='-R$libdir' + ;; + *) + archive_cmds_GCJ='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags' + hardcode_libdir_flag_spec_GCJ='${wl}-rpath,$libdir' + ;; + esac + fi + ;; + + os2*) + hardcode_libdir_flag_spec_GCJ='-L$libdir' + hardcode_minus_L_GCJ=yes + allow_undefined_flag_GCJ=unsupported + archive_cmds_GCJ='$echo "LIBRARY $libname INITINSTANCE" > $output_objdir/$libname.def~$echo "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~$echo DATA >> $output_objdir/$libname.def~$echo " SINGLE NONSHARED" >> $output_objdir/$libname.def~$echo EXPORTS >> $output_objdir/$libname.def~emxexp $libobjs >> $output_objdir/$libname.def~$CC -Zdll -Zcrtdll -o $lib $libobjs $deplibs $compiler_flags $output_objdir/$libname.def' + old_archive_From_new_cmds_GCJ='emximp -o $output_objdir/$libname.a $output_objdir/$libname.def' + ;; + + osf3*) + if test "$GCC" = yes; then + allow_undefined_flag_GCJ=' ${wl}-expect_unresolved ${wl}\*' + archive_cmds_GCJ='$CC -shared${allow_undefined_flag} $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && echo ${wl}-set_version ${wl}$verstring` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' + else + allow_undefined_flag_GCJ=' -expect_unresolved \*' + archive_cmds_GCJ='$LD -shared${allow_undefined_flag} $libobjs $deplibs $linker_flags -soname $soname `test -n "$verstring" && echo -set_version $verstring` -update_registry ${output_objdir}/so_locations -o $lib' + fi + hardcode_libdir_flag_spec_GCJ='${wl}-rpath ${wl}$libdir' + hardcode_libdir_separator_GCJ=: + ;; + + osf4* | osf5*) # as osf3* with the addition of -msym flag + if test "$GCC" = yes; then + allow_undefined_flag_GCJ=' ${wl}-expect_unresolved ${wl}\*' + archive_cmds_GCJ='$CC -shared${allow_undefined_flag} $libobjs $deplibs $compiler_flags ${wl}-msym ${wl}-soname ${wl}$soname `test -n "$verstring" && echo ${wl}-set_version ${wl}$verstring` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' + hardcode_libdir_flag_spec_GCJ='${wl}-rpath ${wl}$libdir' + else + allow_undefined_flag_GCJ=' -expect_unresolved \*' + archive_cmds_GCJ='$LD -shared${allow_undefined_flag} $libobjs $deplibs $linker_flags -msym -soname $soname `test -n "$verstring" && echo -set_version $verstring` -update_registry ${output_objdir}/so_locations -o $lib' + archive_expsym_cmds_GCJ='for i in `cat $export_symbols`; do printf "%s %s\\n" -exported_symbol "\$i" >> $lib.exp; done; echo "-hidden">> $lib.exp~ + $LD -shared${allow_undefined_flag} -input $lib.exp $linker_flags $libobjs $deplibs -soname $soname `test -n "$verstring" && echo -set_version $verstring` -update_registry ${output_objdir}/so_locations -o $lib~$rm $lib.exp' + + # Both c and cxx compiler support -rpath directly + hardcode_libdir_flag_spec_GCJ='-rpath $libdir' + fi + hardcode_libdir_separator_GCJ=: + ;; + + sco3.2v5*) + archive_cmds_GCJ='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + hardcode_shlibpath_var_GCJ=no + export_dynamic_flag_spec_GCJ='${wl}-Bexport' + runpath_var=LD_RUN_PATH + hardcode_runpath_var=yes + ;; + + solaris*) + no_undefined_flag_GCJ=' -z text' + if test "$GCC" = yes; then + wlarc='${wl}' + archive_cmds_GCJ='$CC -shared ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags' + archive_expsym_cmds_GCJ='$echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~$echo "local: *; };" >> $lib.exp~ + $CC -shared ${wl}-M ${wl}$lib.exp ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags~$rm $lib.exp' + else + wlarc='' + archive_cmds_GCJ='$LD -G${allow_undefined_flag} -h $soname -o $lib $libobjs $deplibs $linker_flags' + archive_expsym_cmds_GCJ='$echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~$echo "local: *; };" >> $lib.exp~ + $LD -G${allow_undefined_flag} -M $lib.exp -h $soname -o $lib $libobjs $deplibs $linker_flags~$rm $lib.exp' + fi + hardcode_libdir_flag_spec_GCJ='-R$libdir' + hardcode_shlibpath_var_GCJ=no + case $host_os in + solaris2.[0-5] | solaris2.[0-5].*) ;; + *) + # The compiler driver will combine linker options so we + # cannot just pass the convience library names through + # without $wl, iff we do not link with $LD. + # Luckily, gcc supports the same syntax we need for Sun Studio. + # Supported since Solaris 2.6 (maybe 2.5.1?) + case $wlarc in + '') + whole_archive_flag_spec_GCJ='-z allextract$convenience -z defaultextract' ;; + *) + whole_archive_flag_spec_GCJ='${wl}-z ${wl}allextract`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; $echo \"$new_convenience\"` ${wl}-z ${wl}defaultextract' ;; + esac ;; + esac + link_all_deplibs_GCJ=yes + ;; + + sunos4*) + if test "x$host_vendor" = xsequent; then + # Use $CC to link under sequent, because it throws in some extra .o + # files that make .init and .fini sections work. + archive_cmds_GCJ='$CC -G ${wl}-h $soname -o $lib $libobjs $deplibs $compiler_flags' + else + archive_cmds_GCJ='$LD -assert pure-text -Bstatic -o $lib $libobjs $deplibs $linker_flags' + fi + hardcode_libdir_flag_spec_GCJ='-L$libdir' + hardcode_direct_GCJ=yes + hardcode_minus_L_GCJ=yes + hardcode_shlibpath_var_GCJ=no + ;; + + sysv4) + case $host_vendor in + sni) + archive_cmds_GCJ='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + hardcode_direct_GCJ=yes # is this really true??? + ;; + siemens) + ## LD is ld it makes a PLAMLIB + ## CC just makes a GrossModule. + archive_cmds_GCJ='$LD -G -o $lib $libobjs $deplibs $linker_flags' + reload_cmds_GCJ='$CC -r -o $output$reload_objs' + hardcode_direct_GCJ=no + ;; + motorola) + archive_cmds_GCJ='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + hardcode_direct_GCJ=no #Motorola manual says yes, but my tests say they lie + ;; + esac + runpath_var='LD_RUN_PATH' + hardcode_shlibpath_var_GCJ=no + ;; + + sysv4.3*) + archive_cmds_GCJ='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + hardcode_shlibpath_var_GCJ=no + export_dynamic_flag_spec_GCJ='-Bexport' + ;; + + sysv4*MP*) + if test -d /usr/nec; then + archive_cmds_GCJ='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + hardcode_shlibpath_var_GCJ=no + runpath_var=LD_RUN_PATH + hardcode_runpath_var=yes + ld_shlibs_GCJ=yes + fi + ;; + + sysv4.2uw2*) + archive_cmds_GCJ='$LD -G -o $lib $libobjs $deplibs $linker_flags' + hardcode_direct_GCJ=yes + hardcode_minus_L_GCJ=no + hardcode_shlibpath_var_GCJ=no + hardcode_runpath_var=yes + runpath_var=LD_RUN_PATH + ;; + + sysv5OpenUNIX8* | sysv5UnixWare7* | sysv5uw[78]* | unixware7*) + no_undefined_flag_GCJ='${wl}-z ${wl}text' + if test "$GCC" = yes; then + archive_cmds_GCJ='$CC -shared ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags' + else + archive_cmds_GCJ='$CC -G ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags' + fi + runpath_var='LD_RUN_PATH' + hardcode_shlibpath_var_GCJ=no + ;; + + sysv5*) + no_undefined_flag_GCJ=' -z text' + # $CC -shared without GNU ld will not create a library from C++ + # object files and a static libstdc++, better avoid it by now + archive_cmds_GCJ='$LD -G${allow_undefined_flag} -h $soname -o $lib $libobjs $deplibs $linker_flags' + archive_expsym_cmds_GCJ='$echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~$echo "local: *; };" >> $lib.exp~ + $LD -G${allow_undefined_flag} -M $lib.exp -h $soname -o $lib $libobjs $deplibs $linker_flags~$rm $lib.exp' + hardcode_libdir_flag_spec_GCJ= + hardcode_shlibpath_var_GCJ=no + runpath_var='LD_RUN_PATH' + ;; + + uts4*) + archive_cmds_GCJ='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + hardcode_libdir_flag_spec_GCJ='-L$libdir' + hardcode_shlibpath_var_GCJ=no + ;; + + *) + ld_shlibs_GCJ=no + ;; + esac + fi + +echo "$as_me:$LINENO: result: $ld_shlibs_GCJ" >&5 +echo "${ECHO_T}$ld_shlibs_GCJ" >&6 +test "$ld_shlibs_GCJ" = no && can_build_shared=no + +variables_saved_for_relink="PATH $shlibpath_var $runpath_var" +if test "$GCC" = yes; then + variables_saved_for_relink="$variables_saved_for_relink GCC_EXEC_PREFIX COMPILER_PATH LIBRARY_PATH" +fi + +# +# Do we need to explicitly link libc? +# +case "x$archive_cmds_need_lc_GCJ" in +x|xyes) + # Assume -lc should be added + archive_cmds_need_lc_GCJ=yes + + if test "$enable_shared" = yes && test "$GCC" = yes; then + case $archive_cmds_GCJ in + *'~'*) + # FIXME: we may have to deal with multi-command sequences. + ;; + '$CC '*) + # Test whether the compiler implicitly links with -lc since on some + # systems, -lgcc has to come before -lc. If gcc already passes -lc + # to ld, don't add -lc before -lgcc. + echo "$as_me:$LINENO: checking whether -lc should be explicitly linked in" >&5 +echo $ECHO_N "checking whether -lc should be explicitly linked in... $ECHO_C" >&6 + $rm conftest* + printf "$lt_simple_compile_test_code" > conftest.$ac_ext + + if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 + (eval $ac_compile) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } 2>conftest.err; then + soname=conftest + lib=conftest + libobjs=conftest.$ac_objext + deplibs= + wl=$lt_prog_compiler_wl_GCJ + compiler_flags=-v + linker_flags=-v + verstring= + output_objdir=. + libname=conftest + lt_save_allow_undefined_flag=$allow_undefined_flag_GCJ + allow_undefined_flag_GCJ= + if { (eval echo "$as_me:$LINENO: \"$archive_cmds_GCJ 2\>\&1 \| grep \" -lc \" \>/dev/null 2\>\&1\"") >&5 + (eval $archive_cmds_GCJ 2\>\&1 \| grep \" -lc \" \>/dev/null 2\>\&1) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } + then + archive_cmds_need_lc_GCJ=no + else + archive_cmds_need_lc_GCJ=yes + fi + allow_undefined_flag_GCJ=$lt_save_allow_undefined_flag + else + cat conftest.err 1>&5 + fi + $rm conftest* + echo "$as_me:$LINENO: result: $archive_cmds_need_lc_GCJ" >&5 +echo "${ECHO_T}$archive_cmds_need_lc_GCJ" >&6 + ;; + esac + fi + ;; +esac + +echo "$as_me:$LINENO: checking dynamic linker characteristics" >&5 +echo $ECHO_N "checking dynamic linker characteristics... $ECHO_C" >&6 +library_names_spec= +libname_spec='lib$name' +soname_spec= +shrext_cmds=".so" +postinstall_cmds= +postuninstall_cmds= +finish_cmds= +finish_eval= +shlibpath_var= +shlibpath_overrides_runpath=unknown +version_type=none +dynamic_linker="$host_os ld.so" +sys_lib_dlsearch_path_spec="/lib /usr/lib" +if test "$GCC" = yes; then + sys_lib_search_path_spec=`$CC -print-search-dirs | grep "^libraries:" | $SED -e "s/^libraries://" -e "s,=/,/,g"` + if echo "$sys_lib_search_path_spec" | grep ';' >/dev/null ; then + # if the path contains ";" then we assume it to be the separator + # otherwise default to the standard path separator (i.e. ":") - it is + # assumed that no part of a normal pathname contains ";" but that should + # okay in the real world where ";" in dirpaths is itself problematic. + sys_lib_search_path_spec=`echo "$sys_lib_search_path_spec" | $SED -e 's/;/ /g'` + else + sys_lib_search_path_spec=`echo "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"` + fi +else + sys_lib_search_path_spec="/lib /usr/lib /usr/local/lib" +fi +need_lib_prefix=unknown +hardcode_into_libs=no + +# when you set need_version to no, make sure it does not cause -set_version +# flags to be left without arguments +need_version=unknown + +case $host_os in +aix3*) + version_type=linux + library_names_spec='${libname}${release}${shared_ext}$versuffix $libname.a' + shlibpath_var=LIBPATH + + # AIX 3 has no versioning support, so we append a major version to the name. + soname_spec='${libname}${release}${shared_ext}$major' + ;; + +aix4* | aix5*) + version_type=linux + need_lib_prefix=no + need_version=no + hardcode_into_libs=yes + if test "$host_cpu" = ia64; then + # AIX 5 supports IA64 + library_names_spec='${libname}${release}${shared_ext}$major ${libname}${release}${shared_ext}$versuffix $libname${shared_ext}' + shlibpath_var=LD_LIBRARY_PATH + else + # With GCC up to 2.95.x, collect2 would create an import file + # for dependence libraries. The import file would start with + # the line `#! .'. This would cause the generated library to + # depend on `.', always an invalid library. This was fixed in + # development snapshots of GCC prior to 3.0. + case $host_os in + aix4 | aix4.[01] | aix4.[01].*) + if { echo '#if __GNUC__ > 2 || (__GNUC__ == 2 && __GNUC_MINOR__ >= 97)' + echo ' yes ' + echo '#endif'; } | ${CC} -E - | grep yes > /dev/null; then + : + else + can_build_shared=no + fi + ;; + esac + # AIX (on Power*) has no versioning support, so currently we can not hardcode correct + # soname into executable. Probably we can add versioning support to + # collect2, so additional links can be useful in future. + if test "$aix_use_runtimelinking" = yes; then + # If using run time linking (on AIX 4.2 or later) use lib.so + # instead of lib.a to let people know that these are not + # typical AIX shared libraries. + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + else + # We preserve .a as extension for shared libraries through AIX4.2 + # and later when we are not doing run time linking. + library_names_spec='${libname}${release}.a $libname.a' + soname_spec='${libname}${release}${shared_ext}$major' + fi + shlibpath_var=LIBPATH + fi + ;; + +amigaos*) + library_names_spec='$libname.ixlibrary $libname.a' + # Create ${libname}_ixlibrary.a entries in /sys/libs. + finish_eval='for lib in `ls $libdir/*.ixlibrary 2>/dev/null`; do libname=`$echo "X$lib" | $Xsed -e '\''s%^.*/\([^/]*\)\.ixlibrary$%\1%'\''`; test $rm /sys/libs/${libname}_ixlibrary.a; $show "cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a"; cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a || exit 1; done' + ;; + +beos*) + library_names_spec='${libname}${shared_ext}' + dynamic_linker="$host_os ld.so" + shlibpath_var=LIBRARY_PATH + ;; + +bsdi[45]*) + version_type=linux + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + finish_cmds='PATH="\$PATH:/sbin" ldconfig $libdir' + shlibpath_var=LD_LIBRARY_PATH + sys_lib_search_path_spec="/shlib /usr/lib /usr/X11/lib /usr/contrib/lib /lib /usr/local/lib" + sys_lib_dlsearch_path_spec="/shlib /usr/lib /usr/local/lib" + # the default ld.so.conf also contains /usr/contrib/lib and + # /usr/X11R6/lib (/usr/X11 is a link to /usr/X11R6), but let us allow + # libtool to hard-code these into programs + ;; + +cygwin* | mingw* | pw32*) + version_type=windows + shrext_cmds=".dll" + need_version=no + need_lib_prefix=no + + case $GCC,$host_os in + yes,cygwin* | yes,mingw* | yes,pw32*) + library_names_spec='$libname.dll.a' + # DLL is installed to $(libdir)/../bin by postinstall_cmds + postinstall_cmds='base_file=`basename \${file}`~ + dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\${base_file}'\''i;echo \$dlname'\''`~ + dldir=$destdir/`dirname \$dlpath`~ + test -d \$dldir || mkdir -p \$dldir~ + $install_prog $dir/$dlname \$dldir/$dlname' + postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; echo \$dlname'\''`~ + dlpath=$dir/\$dldll~ + $rm \$dlpath' + shlibpath_overrides_runpath=yes + + case $host_os in + cygwin*) + # Cygwin DLLs use 'cyg' prefix rather than 'lib' + soname_spec='`echo ${libname} | sed -e 's/^lib/cyg/'``echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext}' + sys_lib_search_path_spec="/usr/lib /lib/w32api /lib /usr/local/lib" + ;; + mingw*) + # MinGW DLLs use traditional 'lib' prefix + soname_spec='${libname}`echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext}' + sys_lib_search_path_spec=`$CC -print-search-dirs | grep "^libraries:" | $SED -e "s/^libraries://" -e "s,=/,/,g"` + if echo "$sys_lib_search_path_spec" | grep ';[c-zC-Z]:/' >/dev/null; then + # It is most probably a Windows format PATH printed by + # mingw gcc, but we are running on Cygwin. Gcc prints its search + # path with ; separators, and with drive letters. We can handle the + # drive letters (cygwin fileutils understands them), so leave them, + # especially as we might pass files found there to a mingw objdump, + # which wouldn't understand a cygwinified path. Ahh. + sys_lib_search_path_spec=`echo "$sys_lib_search_path_spec" | $SED -e 's/;/ /g'` + else + sys_lib_search_path_spec=`echo "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"` + fi + ;; + pw32*) + # pw32 DLLs use 'pw' prefix rather than 'lib' + library_names_spec='`echo ${libname} | sed -e 's/^lib/pw/'``echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext}' + ;; + esac + ;; + + *) + library_names_spec='${libname}`echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext} $libname.lib' + ;; + esac + dynamic_linker='Win32 ld.exe' + # FIXME: first we should search . and the directory the executable is in + shlibpath_var=PATH + ;; + +darwin* | rhapsody*) + dynamic_linker="$host_os dyld" + version_type=darwin + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${versuffix}$shared_ext ${libname}${release}${major}$shared_ext ${libname}$shared_ext' + soname_spec='${libname}${release}${major}$shared_ext' + shlibpath_overrides_runpath=yes + shlibpath_var=DYLD_LIBRARY_PATH + shrext_cmds='$(test .$module = .yes && echo .so || echo .dylib)' + # Apple's gcc prints 'gcc -print-search-dirs' doesn't operate the same. + if test "$GCC" = yes; then + sys_lib_search_path_spec=`$CC -print-search-dirs | tr "\n" "$PATH_SEPARATOR" | sed -e 's/libraries:/@libraries:/' | tr "@" "\n" | grep "^libraries:" | sed -e "s/^libraries://" -e "s,=/,/,g" -e "s,$PATH_SEPARATOR, ,g" -e "s,.*,& /lib /usr/lib /usr/local/lib,g"` + else + sys_lib_search_path_spec='/lib /usr/lib /usr/local/lib' + fi + sys_lib_dlsearch_path_spec='/usr/local/lib /lib /usr/lib' + ;; + +dgux*) + version_type=linux + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname$shared_ext' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + ;; + +freebsd1*) + dynamic_linker=no + ;; + +kfreebsd*-gnu) + version_type=linux + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=no + hardcode_into_libs=yes + dynamic_linker='GNU ld.so' + ;; + +freebsd* | dragonfly*) + # DragonFly does not have aout. When/if they implement a new + # versioning mechanism, adjust this. + objformat=`test -x /usr/bin/objformat && /usr/bin/objformat || echo aout` + version_type=freebsd-$objformat + case $version_type in + freebsd-elf*) + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext} $libname${shared_ext}' + need_version=no + need_lib_prefix=no + ;; + freebsd-*) + library_names_spec='${libname}${release}${shared_ext}$versuffix $libname${shared_ext}$versuffix' + need_version=yes + ;; + esac + shlibpath_var=LD_LIBRARY_PATH + case $host_os in + freebsd2*) + shlibpath_overrides_runpath=yes + ;; + freebsd3.[01]* | freebsdelf3.[01]*) + shlibpath_overrides_runpath=yes + hardcode_into_libs=yes + ;; + *) # from 3.2 on + shlibpath_overrides_runpath=no + hardcode_into_libs=yes + ;; + esac + ;; + +gnu*) + version_type=linux + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}${major} ${libname}${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + hardcode_into_libs=yes + ;; + +hpux9* | hpux10* | hpux11*) + # Give a soname corresponding to the major version so that dld.sl refuses to + # link against other versions. + version_type=sunos + need_lib_prefix=no + need_version=no + case "$host_cpu" in + ia64*) + shrext_cmds='.so' + hardcode_into_libs=yes + dynamic_linker="$host_os dld.so" + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes # Unless +noenvvar is specified. + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + if test "X$HPUX_IA64_MODE" = X32; then + sys_lib_search_path_spec="/usr/lib/hpux32 /usr/local/lib/hpux32 /usr/local/lib" + else + sys_lib_search_path_spec="/usr/lib/hpux64 /usr/local/lib/hpux64" + fi + sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec + ;; + hppa*64*) + shrext_cmds='.sl' + hardcode_into_libs=yes + dynamic_linker="$host_os dld.sl" + shlibpath_var=LD_LIBRARY_PATH # How should we handle SHLIB_PATH + shlibpath_overrides_runpath=yes # Unless +noenvvar is specified. + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + sys_lib_search_path_spec="/usr/lib/pa20_64 /usr/ccs/lib/pa20_64" + sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec + ;; + *) + shrext_cmds='.sl' + dynamic_linker="$host_os dld.sl" + shlibpath_var=SHLIB_PATH + shlibpath_overrides_runpath=no # +s is required to enable SHLIB_PATH + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + ;; + esac + # HP-UX runs *really* slowly unless shared libraries are mode 555. + postinstall_cmds='chmod 555 $lib' + ;; + +irix5* | irix6* | nonstopux*) + case $host_os in + nonstopux*) version_type=nonstopux ;; + *) + if test "$lt_cv_prog_gnu_ld" = yes; then + version_type=linux + else + version_type=irix + fi ;; + esac + need_lib_prefix=no + need_version=no + soname_spec='${libname}${release}${shared_ext}$major' + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${release}${shared_ext} $libname${shared_ext}' + case $host_os in + irix5* | nonstopux*) + libsuff= shlibsuff= + ;; + *) + case $LD in # libtool.m4 will add one of these switches to LD + *-32|*"-32 "|*-melf32bsmip|*"-melf32bsmip ") + libsuff= shlibsuff= libmagic=32-bit;; + *-n32|*"-n32 "|*-melf32bmipn32|*"-melf32bmipn32 ") + libsuff=32 shlibsuff=N32 libmagic=N32;; + *-64|*"-64 "|*-melf64bmip|*"-melf64bmip ") + libsuff=64 shlibsuff=64 libmagic=64-bit;; + *) libsuff= shlibsuff= libmagic=never-match;; + esac + ;; + esac + shlibpath_var=LD_LIBRARY${shlibsuff}_PATH + shlibpath_overrides_runpath=no + sys_lib_search_path_spec="/usr/lib${libsuff} /lib${libsuff} /usr/local/lib${libsuff}" + sys_lib_dlsearch_path_spec="/usr/lib${libsuff} /lib${libsuff}" + hardcode_into_libs=yes + ;; + +# No shared lib support for Linux oldld, aout, or coff. +linux*oldld* | linux*aout* | linux*coff*) + dynamic_linker=no + ;; + +# This must be Linux ELF. +linux*) + version_type=linux + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + finish_cmds='PATH="\$PATH:/sbin" ldconfig -n $libdir' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=no + # This implies no fast_install, which is unacceptable. + # Some rework will be needed to allow for fast_install + # before this can be enabled. + hardcode_into_libs=yes + + # find out which ABI we are using + libsuff= + case "$host_cpu" in + x86_64*|s390x*|powerpc64*) + echo '#line 18332 "configure"' > conftest.$ac_ext + if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 + (eval $ac_compile) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; then + case `/usr/bin/file conftest.$ac_objext` in + *64-bit*) + libsuff=64 + sys_lib_search_path_spec="/lib${libsuff} /usr/lib${libsuff} /usr/local/lib${libsuff}" + ;; + esac + fi + rm -rf conftest* + ;; + esac + + # Append ld.so.conf contents to the search path + if test -f /etc/ld.so.conf; then + lt_ld_extra=`awk '/^include / { system(sprintf("cd /etc; cat %s", \$2)); skip = 1; } { if (!skip) print \$0; skip = 0; }' < /etc/ld.so.conf | $SED -e 's/#.*//;s/[:,\t]/ /g;s/=[^=]*$//;s/=[^= ]* / /g;/^$/d' | tr '\n' ' '` + sys_lib_dlsearch_path_spec="/lib${libsuff} /usr/lib${libsuff} $lt_ld_extra" + fi + + # We used to test for /lib/ld.so.1 and disable shared libraries on + # powerpc, because MkLinux only supported shared libraries with the + # GNU dynamic linker. Since this was broken with cross compilers, + # most powerpc-linux boxes support dynamic linking these days and + # people can always --disable-shared, the test was removed, and we + # assume the GNU/Linux dynamic linker is in use. + dynamic_linker='GNU/Linux ld.so' + ;; + +knetbsd*-gnu) + version_type=linux + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=no + hardcode_into_libs=yes + dynamic_linker='GNU ld.so' + ;; + +netbsd*) + version_type=sunos + need_lib_prefix=no + need_version=no + if echo __ELF__ | $CC -E - | grep __ELF__ >/dev/null; then + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${shared_ext}$versuffix' + finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir' + dynamic_linker='NetBSD (a.out) ld.so' + else + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + dynamic_linker='NetBSD ld.elf_so' + fi + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + hardcode_into_libs=yes + ;; + +newsos6) + version_type=linux + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + ;; + +nto-qnx*) + version_type=linux + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + ;; + +openbsd*) + version_type=sunos + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${shared_ext}$versuffix' + finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir' + shlibpath_var=LD_LIBRARY_PATH + if test -z "`echo __ELF__ | $CC -E - | grep __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then + case $host_os in + openbsd2.[89] | openbsd2.[89].*) + shlibpath_overrides_runpath=no + ;; + *) + shlibpath_overrides_runpath=yes + ;; + esac + else + shlibpath_overrides_runpath=yes + fi + ;; + +os2*) + libname_spec='$name' + shrext_cmds=".dll" + need_lib_prefix=no + library_names_spec='$libname${shared_ext} $libname.a' + dynamic_linker='OS/2 ld.exe' + shlibpath_var=LIBPATH + ;; + +osf3* | osf4* | osf5*) + version_type=osf + need_lib_prefix=no + need_version=no + soname_spec='${libname}${release}${shared_ext}$major' + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + shlibpath_var=LD_LIBRARY_PATH + sys_lib_search_path_spec="/usr/shlib /usr/ccs/lib /usr/lib/cmplrs/cc /usr/lib /usr/local/lib /var/shlib" + sys_lib_dlsearch_path_spec="$sys_lib_search_path_spec" + ;; + +sco3.2v5*) + version_type=osf + soname_spec='${libname}${release}${shared_ext}$major' + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + shlibpath_var=LD_LIBRARY_PATH + ;; + +solaris*) + version_type=linux + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + hardcode_into_libs=yes + # ldd complains unless libraries are executable + postinstall_cmds='chmod +x $lib' + ;; + +sunos4*) + version_type=sunos + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${shared_ext}$versuffix' + finish_cmds='PATH="\$PATH:/usr/etc" ldconfig $libdir' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + if test "$with_gnu_ld" = yes; then + need_lib_prefix=no + fi + need_version=yes + ;; + +sysv4 | sysv4.2uw2* | sysv4.3* | sysv5*) + version_type=linux + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + case $host_vendor in + sni) + shlibpath_overrides_runpath=no + need_lib_prefix=no + export_dynamic_flag_spec='${wl}-Blargedynsym' + runpath_var=LD_RUN_PATH + ;; + siemens) + need_lib_prefix=no + ;; + motorola) + need_lib_prefix=no + need_version=no + shlibpath_overrides_runpath=no + sys_lib_search_path_spec='/lib /usr/lib /usr/ccs/lib' + ;; + esac + ;; + +sysv4*MP*) + if test -d /usr/nec ;then + version_type=linux + library_names_spec='$libname${shared_ext}.$versuffix $libname${shared_ext}.$major $libname${shared_ext}' + soname_spec='$libname${shared_ext}.$major' + shlibpath_var=LD_LIBRARY_PATH + fi + ;; + +uts4*) + version_type=linux + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + ;; + +*) + dynamic_linker=no + ;; +esac +echo "$as_me:$LINENO: result: $dynamic_linker" >&5 +echo "${ECHO_T}$dynamic_linker" >&6 +test "$dynamic_linker" = no && can_build_shared=no + +echo "$as_me:$LINENO: checking how to hardcode library paths into programs" >&5 +echo $ECHO_N "checking how to hardcode library paths into programs... $ECHO_C" >&6 +hardcode_action_GCJ= +if test -n "$hardcode_libdir_flag_spec_GCJ" || \ + test -n "$runpath_var_GCJ" || \ + test "X$hardcode_automatic_GCJ" = "Xyes" ; then + + # We can hardcode non-existant directories. + if test "$hardcode_direct_GCJ" != no && + # If the only mechanism to avoid hardcoding is shlibpath_var, we + # have to relink, otherwise we might link with an installed library + # when we should be linking with a yet-to-be-installed one + ## test "$_LT_AC_TAGVAR(hardcode_shlibpath_var, GCJ)" != no && + test "$hardcode_minus_L_GCJ" != no; then + # Linking always hardcodes the temporary library directory. + hardcode_action_GCJ=relink + else + # We can link without hardcoding, and we can hardcode nonexisting dirs. + hardcode_action_GCJ=immediate + fi +else + # We cannot hardcode anything, or else we can only hardcode existing + # directories. + hardcode_action_GCJ=unsupported +fi +echo "$as_me:$LINENO: result: $hardcode_action_GCJ" >&5 +echo "${ECHO_T}$hardcode_action_GCJ" >&6 + +if test "$hardcode_action_GCJ" = relink; then + # Fast installation is not supported + enable_fast_install=no +elif test "$shlibpath_overrides_runpath" = yes || + test "$enable_shared" = no; then + # Fast installation is not necessary + enable_fast_install=needless +fi + +striplib= +old_striplib= +echo "$as_me:$LINENO: checking whether stripping libraries is possible" >&5 +echo $ECHO_N "checking whether stripping libraries is possible... $ECHO_C" >&6 +if test -n "$STRIP" && $STRIP -V 2>&1 | grep "GNU strip" >/dev/null; then + test -z "$old_striplib" && old_striplib="$STRIP --strip-debug" + test -z "$striplib" && striplib="$STRIP --strip-unneeded" + echo "$as_me:$LINENO: result: yes" >&5 +echo "${ECHO_T}yes" >&6 +else +# FIXME - insert some real tests, host_os isn't really good enough + case $host_os in + darwin*) + if test -n "$STRIP" ; then + striplib="$STRIP -x" + echo "$as_me:$LINENO: result: yes" >&5 +echo "${ECHO_T}yes" >&6 + else + echo "$as_me:$LINENO: result: no" >&5 +echo "${ECHO_T}no" >&6 +fi + ;; + *) + echo "$as_me:$LINENO: result: no" >&5 +echo "${ECHO_T}no" >&6 + ;; + esac +fi + +if test "x$enable_dlopen" != xyes; then + enable_dlopen=unknown + enable_dlopen_self=unknown + enable_dlopen_self_static=unknown +else + lt_cv_dlopen=no + lt_cv_dlopen_libs= + + case $host_os in + beos*) + lt_cv_dlopen="load_add_on" + lt_cv_dlopen_libs= + lt_cv_dlopen_self=yes + ;; + + mingw* | pw32*) + lt_cv_dlopen="LoadLibrary" + lt_cv_dlopen_libs= + ;; + + cygwin*) + lt_cv_dlopen="dlopen" + lt_cv_dlopen_libs= + ;; + + darwin*) + # if libdl is installed we need to link against it + echo "$as_me:$LINENO: checking for dlopen in -ldl" >&5 +echo $ECHO_N "checking for dlopen in -ldl... $ECHO_C" >&6 +if test "${ac_cv_lib_dl_dlopen+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + ac_check_lib_save_LIBS=$LIBS +LIBS="-ldl $LIBS" +cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ + +/* Override any gcc2 internal prototype to avoid an error. */ +#ifdef __cplusplus +extern "C" +#endif +/* We use char because int might match the return type of a gcc2 + builtin and then its argument prototype would still apply. */ +char dlopen (); +int +main () +{ +dlopen (); + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext conftest$ac_exeext +if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 + (eval $ac_link) 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest$ac_exeext' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; }; then + ac_cv_lib_dl_dlopen=yes +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +ac_cv_lib_dl_dlopen=no +fi +rm -f conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS +fi +echo "$as_me:$LINENO: result: $ac_cv_lib_dl_dlopen" >&5 +echo "${ECHO_T}$ac_cv_lib_dl_dlopen" >&6 +if test $ac_cv_lib_dl_dlopen = yes; then + lt_cv_dlopen="dlopen" lt_cv_dlopen_libs="-ldl" +else + + lt_cv_dlopen="dyld" + lt_cv_dlopen_libs= + lt_cv_dlopen_self=yes + +fi + + ;; + + *) + echo "$as_me:$LINENO: checking for shl_load" >&5 +echo $ECHO_N "checking for shl_load... $ECHO_C" >&6 +if test "${ac_cv_func_shl_load+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +/* Define shl_load to an innocuous variant, in case declares shl_load. + For example, HP-UX 11i declares gettimeofday. */ +#define shl_load innocuous_shl_load + +/* System header to define __stub macros and hopefully few prototypes, + which can conflict with char shl_load (); below. + Prefer to if __STDC__ is defined, since + exists even on freestanding compilers. */ + +#ifdef __STDC__ +# include +#else +# include +#endif + +#undef shl_load + +/* Override any gcc2 internal prototype to avoid an error. */ +#ifdef __cplusplus +extern "C" +{ +#endif +/* We use char because int might match the return type of a gcc2 + builtin and then its argument prototype would still apply. */ +char shl_load (); +/* The GNU C library defines this for functions which it implements + to always fail with ENOSYS. Some functions are actually named + something starting with __ and the normal name is an alias. */ +#if defined (__stub_shl_load) || defined (__stub___shl_load) +choke me +#else +char (*f) () = shl_load; +#endif +#ifdef __cplusplus +} +#endif + +int +main () +{ +return f != shl_load; + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext conftest$ac_exeext +if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 + (eval $ac_link) 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest$ac_exeext' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; }; then + ac_cv_func_shl_load=yes +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +ac_cv_func_shl_load=no +fi +rm -f conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +fi +echo "$as_me:$LINENO: result: $ac_cv_func_shl_load" >&5 +echo "${ECHO_T}$ac_cv_func_shl_load" >&6 +if test $ac_cv_func_shl_load = yes; then + lt_cv_dlopen="shl_load" +else + echo "$as_me:$LINENO: checking for shl_load in -ldld" >&5 +echo $ECHO_N "checking for shl_load in -ldld... $ECHO_C" >&6 +if test "${ac_cv_lib_dld_shl_load+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + ac_check_lib_save_LIBS=$LIBS +LIBS="-ldld $LIBS" +cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ + +/* Override any gcc2 internal prototype to avoid an error. */ +#ifdef __cplusplus +extern "C" +#endif +/* We use char because int might match the return type of a gcc2 + builtin and then its argument prototype would still apply. */ +char shl_load (); +int +main () +{ +shl_load (); + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext conftest$ac_exeext +if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 + (eval $ac_link) 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest$ac_exeext' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; }; then + ac_cv_lib_dld_shl_load=yes +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +ac_cv_lib_dld_shl_load=no +fi +rm -f conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS +fi +echo "$as_me:$LINENO: result: $ac_cv_lib_dld_shl_load" >&5 +echo "${ECHO_T}$ac_cv_lib_dld_shl_load" >&6 +if test $ac_cv_lib_dld_shl_load = yes; then + lt_cv_dlopen="shl_load" lt_cv_dlopen_libs="-dld" +else + echo "$as_me:$LINENO: checking for dlopen" >&5 +echo $ECHO_N "checking for dlopen... $ECHO_C" >&6 +if test "${ac_cv_func_dlopen+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +/* Define dlopen to an innocuous variant, in case declares dlopen. + For example, HP-UX 11i declares gettimeofday. */ +#define dlopen innocuous_dlopen + +/* System header to define __stub macros and hopefully few prototypes, + which can conflict with char dlopen (); below. + Prefer to if __STDC__ is defined, since + exists even on freestanding compilers. */ + +#ifdef __STDC__ +# include +#else +# include +#endif + +#undef dlopen + +/* Override any gcc2 internal prototype to avoid an error. */ +#ifdef __cplusplus +extern "C" +{ +#endif +/* We use char because int might match the return type of a gcc2 + builtin and then its argument prototype would still apply. */ +char dlopen (); +/* The GNU C library defines this for functions which it implements + to always fail with ENOSYS. Some functions are actually named + something starting with __ and the normal name is an alias. */ +#if defined (__stub_dlopen) || defined (__stub___dlopen) +choke me +#else +char (*f) () = dlopen; +#endif +#ifdef __cplusplus +} +#endif + +int +main () +{ +return f != dlopen; + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext conftest$ac_exeext +if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 + (eval $ac_link) 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest$ac_exeext' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; }; then + ac_cv_func_dlopen=yes +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +ac_cv_func_dlopen=no +fi +rm -f conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +fi +echo "$as_me:$LINENO: result: $ac_cv_func_dlopen" >&5 +echo "${ECHO_T}$ac_cv_func_dlopen" >&6 +if test $ac_cv_func_dlopen = yes; then + lt_cv_dlopen="dlopen" +else + echo "$as_me:$LINENO: checking for dlopen in -ldl" >&5 +echo $ECHO_N "checking for dlopen in -ldl... $ECHO_C" >&6 +if test "${ac_cv_lib_dl_dlopen+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + ac_check_lib_save_LIBS=$LIBS +LIBS="-ldl $LIBS" +cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ + +/* Override any gcc2 internal prototype to avoid an error. */ +#ifdef __cplusplus +extern "C" +#endif +/* We use char because int might match the return type of a gcc2 + builtin and then its argument prototype would still apply. */ +char dlopen (); +int +main () +{ +dlopen (); + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext conftest$ac_exeext +if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 + (eval $ac_link) 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest$ac_exeext' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; }; then + ac_cv_lib_dl_dlopen=yes +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +ac_cv_lib_dl_dlopen=no +fi +rm -f conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS +fi +echo "$as_me:$LINENO: result: $ac_cv_lib_dl_dlopen" >&5 +echo "${ECHO_T}$ac_cv_lib_dl_dlopen" >&6 +if test $ac_cv_lib_dl_dlopen = yes; then + lt_cv_dlopen="dlopen" lt_cv_dlopen_libs="-ldl" +else + echo "$as_me:$LINENO: checking for dlopen in -lsvld" >&5 +echo $ECHO_N "checking for dlopen in -lsvld... $ECHO_C" >&6 +if test "${ac_cv_lib_svld_dlopen+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + ac_check_lib_save_LIBS=$LIBS +LIBS="-lsvld $LIBS" +cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ + +/* Override any gcc2 internal prototype to avoid an error. */ +#ifdef __cplusplus +extern "C" +#endif +/* We use char because int might match the return type of a gcc2 + builtin and then its argument prototype would still apply. */ +char dlopen (); +int +main () +{ +dlopen (); + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext conftest$ac_exeext +if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 + (eval $ac_link) 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest$ac_exeext' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; }; then + ac_cv_lib_svld_dlopen=yes +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +ac_cv_lib_svld_dlopen=no +fi +rm -f conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS +fi +echo "$as_me:$LINENO: result: $ac_cv_lib_svld_dlopen" >&5 +echo "${ECHO_T}$ac_cv_lib_svld_dlopen" >&6 +if test $ac_cv_lib_svld_dlopen = yes; then + lt_cv_dlopen="dlopen" lt_cv_dlopen_libs="-lsvld" +else + echo "$as_me:$LINENO: checking for dld_link in -ldld" >&5 +echo $ECHO_N "checking for dld_link in -ldld... $ECHO_C" >&6 +if test "${ac_cv_lib_dld_dld_link+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + ac_check_lib_save_LIBS=$LIBS +LIBS="-ldld $LIBS" +cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ + +/* Override any gcc2 internal prototype to avoid an error. */ +#ifdef __cplusplus +extern "C" +#endif +/* We use char because int might match the return type of a gcc2 + builtin and then its argument prototype would still apply. */ +char dld_link (); +int +main () +{ +dld_link (); + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext conftest$ac_exeext +if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 + (eval $ac_link) 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest$ac_exeext' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; }; then + ac_cv_lib_dld_dld_link=yes +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +ac_cv_lib_dld_dld_link=no +fi +rm -f conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS +fi +echo "$as_me:$LINENO: result: $ac_cv_lib_dld_dld_link" >&5 +echo "${ECHO_T}$ac_cv_lib_dld_dld_link" >&6 +if test $ac_cv_lib_dld_dld_link = yes; then + lt_cv_dlopen="dld_link" lt_cv_dlopen_libs="-dld" +fi + + +fi + + +fi + + +fi + + +fi + + +fi + + ;; + esac + + if test "x$lt_cv_dlopen" != xno; then + enable_dlopen=yes + else + enable_dlopen=no + fi + + case $lt_cv_dlopen in + dlopen) + save_CPPFLAGS="$CPPFLAGS" + test "x$ac_cv_header_dlfcn_h" = xyes && CPPFLAGS="$CPPFLAGS -DHAVE_DLFCN_H" + + save_LDFLAGS="$LDFLAGS" + eval LDFLAGS=\"\$LDFLAGS $export_dynamic_flag_spec\" + + save_LIBS="$LIBS" + LIBS="$lt_cv_dlopen_libs $LIBS" + + echo "$as_me:$LINENO: checking whether a program can dlopen itself" >&5 +echo $ECHO_N "checking whether a program can dlopen itself... $ECHO_C" >&6 +if test "${lt_cv_dlopen_self+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + if test "$cross_compiling" = yes; then : + lt_cv_dlopen_self=cross +else + lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2 + lt_status=$lt_dlunknown + cat > conftest.$ac_ext < +#endif + +#include + +#ifdef RTLD_GLOBAL +# define LT_DLGLOBAL RTLD_GLOBAL +#else +# ifdef DL_GLOBAL +# define LT_DLGLOBAL DL_GLOBAL +# else +# define LT_DLGLOBAL 0 +# endif +#endif + +/* We may have to define LT_DLLAZY_OR_NOW in the command line if we + find out it does not work in some platform. */ +#ifndef LT_DLLAZY_OR_NOW +# ifdef RTLD_LAZY +# define LT_DLLAZY_OR_NOW RTLD_LAZY +# else +# ifdef DL_LAZY +# define LT_DLLAZY_OR_NOW DL_LAZY +# else +# ifdef RTLD_NOW +# define LT_DLLAZY_OR_NOW RTLD_NOW +# else +# ifdef DL_NOW +# define LT_DLLAZY_OR_NOW DL_NOW +# else +# define LT_DLLAZY_OR_NOW 0 +# endif +# endif +# endif +# endif +#endif + +#ifdef __cplusplus +extern "C" void exit (int); +#endif + +void fnord() { int i=42;} +int main () +{ + void *self = dlopen (0, LT_DLGLOBAL|LT_DLLAZY_OR_NOW); + int status = $lt_dlunknown; + + if (self) + { + if (dlsym (self,"fnord")) status = $lt_dlno_uscore; + else if (dlsym( self,"_fnord")) status = $lt_dlneed_uscore; + /* dlclose (self); */ + } + + exit (status); +} +EOF + if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 + (eval $ac_link) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && test -s conftest${ac_exeext} 2>/dev/null; then + (./conftest; exit; ) 2>/dev/null + lt_status=$? + case x$lt_status in + x$lt_dlno_uscore) lt_cv_dlopen_self=yes ;; + x$lt_dlneed_uscore) lt_cv_dlopen_self=yes ;; + x$lt_unknown|x*) lt_cv_dlopen_self=no ;; + esac + else : + # compilation failed + lt_cv_dlopen_self=no + fi +fi +rm -fr conftest* + + +fi +echo "$as_me:$LINENO: result: $lt_cv_dlopen_self" >&5 +echo "${ECHO_T}$lt_cv_dlopen_self" >&6 + + if test "x$lt_cv_dlopen_self" = xyes; then + LDFLAGS="$LDFLAGS $link_static_flag" + echo "$as_me:$LINENO: checking whether a statically linked program can dlopen itself" >&5 +echo $ECHO_N "checking whether a statically linked program can dlopen itself... $ECHO_C" >&6 +if test "${lt_cv_dlopen_self_static+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + if test "$cross_compiling" = yes; then : + lt_cv_dlopen_self_static=cross +else + lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2 + lt_status=$lt_dlunknown + cat > conftest.$ac_ext < +#endif + +#include + +#ifdef RTLD_GLOBAL +# define LT_DLGLOBAL RTLD_GLOBAL +#else +# ifdef DL_GLOBAL +# define LT_DLGLOBAL DL_GLOBAL +# else +# define LT_DLGLOBAL 0 +# endif +#endif + +/* We may have to define LT_DLLAZY_OR_NOW in the command line if we + find out it does not work in some platform. */ +#ifndef LT_DLLAZY_OR_NOW +# ifdef RTLD_LAZY +# define LT_DLLAZY_OR_NOW RTLD_LAZY +# else +# ifdef DL_LAZY +# define LT_DLLAZY_OR_NOW DL_LAZY +# else +# ifdef RTLD_NOW +# define LT_DLLAZY_OR_NOW RTLD_NOW +# else +# ifdef DL_NOW +# define LT_DLLAZY_OR_NOW DL_NOW +# else +# define LT_DLLAZY_OR_NOW 0 +# endif +# endif +# endif +# endif +#endif + +#ifdef __cplusplus +extern "C" void exit (int); +#endif + +void fnord() { int i=42;} +int main () +{ + void *self = dlopen (0, LT_DLGLOBAL|LT_DLLAZY_OR_NOW); + int status = $lt_dlunknown; + + if (self) + { + if (dlsym (self,"fnord")) status = $lt_dlno_uscore; + else if (dlsym( self,"_fnord")) status = $lt_dlneed_uscore; + /* dlclose (self); */ + } + + exit (status); +} +EOF + if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 + (eval $ac_link) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && test -s conftest${ac_exeext} 2>/dev/null; then + (./conftest; exit; ) 2>/dev/null + lt_status=$? + case x$lt_status in + x$lt_dlno_uscore) lt_cv_dlopen_self_static=yes ;; + x$lt_dlneed_uscore) lt_cv_dlopen_self_static=yes ;; + x$lt_unknown|x*) lt_cv_dlopen_self_static=no ;; + esac + else : + # compilation failed + lt_cv_dlopen_self_static=no + fi +fi +rm -fr conftest* + + +fi +echo "$as_me:$LINENO: result: $lt_cv_dlopen_self_static" >&5 +echo "${ECHO_T}$lt_cv_dlopen_self_static" >&6 + fi + + CPPFLAGS="$save_CPPFLAGS" + LDFLAGS="$save_LDFLAGS" + LIBS="$save_LIBS" + ;; + esac + + case $lt_cv_dlopen_self in + yes|no) enable_dlopen_self=$lt_cv_dlopen_self ;; + *) enable_dlopen_self=unknown ;; + esac + + case $lt_cv_dlopen_self_static in + yes|no) enable_dlopen_self_static=$lt_cv_dlopen_self_static ;; + *) enable_dlopen_self_static=unknown ;; + esac +fi + + +# The else clause should only fire when bootstrapping the +# libtool distribution, otherwise you forgot to ship ltmain.sh +# with your package, and you will get complaints that there are +# no rules to generate ltmain.sh. +if test -f "$ltmain"; then + # See if we are running on zsh, and set the options which allow our commands through + # without removal of \ escapes. + if test -n "${ZSH_VERSION+set}" ; then + setopt NO_GLOB_SUBST + fi + # Now quote all the things that may contain metacharacters while being + # careful not to overquote the AC_SUBSTed values. We take copies of the + # variables and quote the copies for generation of the libtool script. + for var in echo old_CC old_CFLAGS AR AR_FLAGS EGREP RANLIB LN_S LTCC NM \ + SED SHELL STRIP \ + libname_spec library_names_spec soname_spec extract_expsyms_cmds \ + old_striplib striplib file_magic_cmd finish_cmds finish_eval \ + deplibs_check_method reload_flag reload_cmds need_locks \ + lt_cv_sys_global_symbol_pipe lt_cv_sys_global_symbol_to_cdecl \ + lt_cv_sys_global_symbol_to_c_name_address \ + sys_lib_search_path_spec sys_lib_dlsearch_path_spec \ + old_postinstall_cmds old_postuninstall_cmds \ + compiler_GCJ \ + CC_GCJ \ + LD_GCJ \ + lt_prog_compiler_wl_GCJ \ + lt_prog_compiler_pic_GCJ \ + lt_prog_compiler_static_GCJ \ + lt_prog_compiler_no_builtin_flag_GCJ \ + export_dynamic_flag_spec_GCJ \ + thread_safe_flag_spec_GCJ \ + whole_archive_flag_spec_GCJ \ + enable_shared_with_static_runtimes_GCJ \ + old_archive_cmds_GCJ \ + old_archive_from_new_cmds_GCJ \ + predep_objects_GCJ \ + postdep_objects_GCJ \ + predeps_GCJ \ + postdeps_GCJ \ + compiler_lib_search_path_GCJ \ + archive_cmds_GCJ \ + archive_expsym_cmds_GCJ \ + postinstall_cmds_GCJ \ + postuninstall_cmds_GCJ \ + old_archive_from_expsyms_cmds_GCJ \ + allow_undefined_flag_GCJ \ + no_undefined_flag_GCJ \ + export_symbols_cmds_GCJ \ + hardcode_libdir_flag_spec_GCJ \ + hardcode_libdir_flag_spec_ld_GCJ \ + hardcode_libdir_separator_GCJ \ + hardcode_automatic_GCJ \ + module_cmds_GCJ \ + module_expsym_cmds_GCJ \ + lt_cv_prog_compiler_c_o_GCJ \ + exclude_expsyms_GCJ \ + include_expsyms_GCJ; do + + case $var in + old_archive_cmds_GCJ | \ + old_archive_from_new_cmds_GCJ | \ + archive_cmds_GCJ | \ + archive_expsym_cmds_GCJ | \ + module_cmds_GCJ | \ + module_expsym_cmds_GCJ | \ + old_archive_from_expsyms_cmds_GCJ | \ + export_symbols_cmds_GCJ | \ + extract_expsyms_cmds | reload_cmds | finish_cmds | \ + postinstall_cmds | postuninstall_cmds | \ + old_postinstall_cmds | old_postuninstall_cmds | \ + sys_lib_search_path_spec | sys_lib_dlsearch_path_spec) + # Double-quote double-evaled strings. + eval "lt_$var=\\\"\`\$echo \"X\$$var\" | \$Xsed -e \"\$double_quote_subst\" -e \"\$sed_quote_subst\" -e \"\$delay_variable_subst\"\`\\\"" + ;; + *) + eval "lt_$var=\\\"\`\$echo \"X\$$var\" | \$Xsed -e \"\$sed_quote_subst\"\`\\\"" + ;; + esac + done + + case $lt_echo in + *'\$0 --fallback-echo"') + lt_echo=`$echo "X$lt_echo" | $Xsed -e 's/\\\\\\\$0 --fallback-echo"$/$0 --fallback-echo"/'` + ;; + esac + +cfgfile="$ofile" + + cat <<__EOF__ >> "$cfgfile" +# ### BEGIN LIBTOOL TAG CONFIG: $tagname + +# Libtool was configured on host `(hostname || uname -n) 2>/dev/null | sed 1q`: + +# Shell to use when invoking shell scripts. +SHELL=$lt_SHELL + +# Whether or not to build shared libraries. +build_libtool_libs=$enable_shared + +# Whether or not to build static libraries. +build_old_libs=$enable_static + +# Whether or not to add -lc for building shared libraries. +build_libtool_need_lc=$archive_cmds_need_lc_GCJ + +# Whether or not to disallow shared libs when runtime libs are static +allow_libtool_libs_with_static_runtimes=$enable_shared_with_static_runtimes_GCJ + +# Whether or not to optimize for fast installation. +fast_install=$enable_fast_install + +# The host system. +host_alias=$host_alias +host=$host +host_os=$host_os + +# The build system. +build_alias=$build_alias +build=$build +build_os=$build_os + +# An echo program that does not interpret backslashes. +echo=$lt_echo + +# The archiver. +AR=$lt_AR +AR_FLAGS=$lt_AR_FLAGS + +# A C compiler. +LTCC=$lt_LTCC + +# A language-specific compiler. +CC=$lt_compiler_GCJ + +# Is the compiler the GNU C compiler? +with_gcc=$GCC_GCJ + +# An ERE matcher. +EGREP=$lt_EGREP + +# The linker used to build libraries. +LD=$lt_LD_GCJ + +# Whether we need hard or soft links. +LN_S=$lt_LN_S + +# A BSD-compatible nm program. +NM=$lt_NM + +# A symbol stripping program +STRIP=$lt_STRIP + +# Used to examine libraries when file_magic_cmd begins "file" +MAGIC_CMD=$MAGIC_CMD + +# Used on cygwin: DLL creation program. +DLLTOOL="$DLLTOOL" + +# Used on cygwin: object dumper. +OBJDUMP="$OBJDUMP" + +# Used on cygwin: assembler. +AS="$AS" + +# The name of the directory that contains temporary libtool files. +objdir=$objdir + +# How to create reloadable object files. +reload_flag=$lt_reload_flag +reload_cmds=$lt_reload_cmds + +# How to pass a linker flag through the compiler. +wl=$lt_lt_prog_compiler_wl_GCJ + +# Object file suffix (normally "o"). +objext="$ac_objext" + +# Old archive suffix (normally "a"). +libext="$libext" + +# Shared library suffix (normally ".so"). +shrext_cmds='$shrext_cmds' + +# Executable file suffix (normally ""). +exeext="$exeext" + +# Additional compiler flags for building library objects. +pic_flag=$lt_lt_prog_compiler_pic_GCJ +pic_mode=$pic_mode + +# What is the maximum length of a command? +max_cmd_len=$lt_cv_sys_max_cmd_len + +# Does compiler simultaneously support -c and -o options? +compiler_c_o=$lt_lt_cv_prog_compiler_c_o_GCJ + +# Must we lock files when doing compilation? +need_locks=$lt_need_locks + +# Do we need the lib prefix for modules? +need_lib_prefix=$need_lib_prefix + +# Do we need a version for libraries? +need_version=$need_version + +# Whether dlopen is supported. +dlopen_support=$enable_dlopen + +# Whether dlopen of programs is supported. +dlopen_self=$enable_dlopen_self + +# Whether dlopen of statically linked programs is supported. +dlopen_self_static=$enable_dlopen_self_static + +# Compiler flag to prevent dynamic linking. +link_static_flag=$lt_lt_prog_compiler_static_GCJ + +# Compiler flag to turn off builtin functions. +no_builtin_flag=$lt_lt_prog_compiler_no_builtin_flag_GCJ + +# Compiler flag to allow reflexive dlopens. +export_dynamic_flag_spec=$lt_export_dynamic_flag_spec_GCJ + +# Compiler flag to generate shared objects directly from archives. +whole_archive_flag_spec=$lt_whole_archive_flag_spec_GCJ + +# Compiler flag to generate thread-safe objects. +thread_safe_flag_spec=$lt_thread_safe_flag_spec_GCJ + +# Library versioning type. +version_type=$version_type + +# Format of library name prefix. +libname_spec=$lt_libname_spec + +# List of archive names. First name is the real one, the rest are links. +# The last name is the one that the linker finds with -lNAME. +library_names_spec=$lt_library_names_spec + +# The coded name of the library, if different from the real name. +soname_spec=$lt_soname_spec + +# Commands used to build and install an old-style archive. +RANLIB=$lt_RANLIB +old_archive_cmds=$lt_old_archive_cmds_GCJ +old_postinstall_cmds=$lt_old_postinstall_cmds +old_postuninstall_cmds=$lt_old_postuninstall_cmds + +# Create an old-style archive from a shared archive. +old_archive_from_new_cmds=$lt_old_archive_from_new_cmds_GCJ + +# Create a temporary old-style archive to link instead of a shared archive. +old_archive_from_expsyms_cmds=$lt_old_archive_from_expsyms_cmds_GCJ + +# Commands used to build and install a shared archive. +archive_cmds=$lt_archive_cmds_GCJ +archive_expsym_cmds=$lt_archive_expsym_cmds_GCJ +postinstall_cmds=$lt_postinstall_cmds +postuninstall_cmds=$lt_postuninstall_cmds + +# Commands used to build a loadable module (assumed same as above if empty) +module_cmds=$lt_module_cmds_GCJ +module_expsym_cmds=$lt_module_expsym_cmds_GCJ + +# Commands to strip libraries. +old_striplib=$lt_old_striplib +striplib=$lt_striplib + +# Dependencies to place before the objects being linked to create a +# shared library. +predep_objects=$lt_predep_objects_GCJ + +# Dependencies to place after the objects being linked to create a +# shared library. +postdep_objects=$lt_postdep_objects_GCJ + +# Dependencies to place before the objects being linked to create a +# shared library. +predeps=$lt_predeps_GCJ + +# Dependencies to place after the objects being linked to create a +# shared library. +postdeps=$lt_postdeps_GCJ + +# The library search path used internally by the compiler when linking +# a shared library. +compiler_lib_search_path=$lt_compiler_lib_search_path_GCJ + +# Method to check whether dependent libraries are shared objects. +deplibs_check_method=$lt_deplibs_check_method + +# Command to use when deplibs_check_method == file_magic. +file_magic_cmd=$lt_file_magic_cmd + +# Flag that allows shared libraries with undefined symbols to be built. +allow_undefined_flag=$lt_allow_undefined_flag_GCJ + +# Flag that forces no undefined symbols. +no_undefined_flag=$lt_no_undefined_flag_GCJ + +# Commands used to finish a libtool library installation in a directory. +finish_cmds=$lt_finish_cmds + +# Same as above, but a single script fragment to be evaled but not shown. +finish_eval=$lt_finish_eval + +# Take the output of nm and produce a listing of raw symbols and C names. +global_symbol_pipe=$lt_lt_cv_sys_global_symbol_pipe + +# Transform the output of nm in a proper C declaration +global_symbol_to_cdecl=$lt_lt_cv_sys_global_symbol_to_cdecl + +# Transform the output of nm in a C name address pair +global_symbol_to_c_name_address=$lt_lt_cv_sys_global_symbol_to_c_name_address + +# This is the shared library runtime path variable. +runpath_var=$runpath_var + +# This is the shared library path variable. +shlibpath_var=$shlibpath_var + +# Is shlibpath searched before the hard-coded library search path? +shlibpath_overrides_runpath=$shlibpath_overrides_runpath + +# How to hardcode a shared library path into an executable. +hardcode_action=$hardcode_action_GCJ + +# Whether we should hardcode library paths into libraries. +hardcode_into_libs=$hardcode_into_libs + +# Flag to hardcode \$libdir into a binary during linking. +# This must work even if \$libdir does not exist. +hardcode_libdir_flag_spec=$lt_hardcode_libdir_flag_spec_GCJ + +# If ld is used when linking, flag to hardcode \$libdir into +# a binary during linking. This must work even if \$libdir does +# not exist. +hardcode_libdir_flag_spec_ld=$lt_hardcode_libdir_flag_spec_ld_GCJ + +# Whether we need a single -rpath flag with a separated argument. +hardcode_libdir_separator=$lt_hardcode_libdir_separator_GCJ + +# Set to yes if using DIR/libNAME${shared_ext} during linking hardcodes DIR into the +# resulting binary. +hardcode_direct=$hardcode_direct_GCJ + +# Set to yes if using the -LDIR flag during linking hardcodes DIR into the +# resulting binary. +hardcode_minus_L=$hardcode_minus_L_GCJ + +# Set to yes if using SHLIBPATH_VAR=DIR during linking hardcodes DIR into +# the resulting binary. +hardcode_shlibpath_var=$hardcode_shlibpath_var_GCJ + +# Set to yes if building a shared library automatically hardcodes DIR into the library +# and all subsequent libraries and executables linked against it. +hardcode_automatic=$hardcode_automatic_GCJ + +# Variables whose values should be saved in libtool wrapper scripts and +# restored at relink time. +variables_saved_for_relink="$variables_saved_for_relink" + +# Whether libtool must link a program against all its dependency libraries. +link_all_deplibs=$link_all_deplibs_GCJ + +# Compile-time system search path for libraries +sys_lib_search_path_spec=$lt_sys_lib_search_path_spec + +# Run-time system search path for libraries +sys_lib_dlsearch_path_spec=$lt_sys_lib_dlsearch_path_spec + +# Fix the shell variable \$srcfile for the compiler. +fix_srcfile_path="$fix_srcfile_path_GCJ" + +# Set to yes if exported symbols are required. +always_export_symbols=$always_export_symbols_GCJ + +# The commands to list exported symbols. +export_symbols_cmds=$lt_export_symbols_cmds_GCJ + +# The commands to extract the exported symbol list from a shared archive. +extract_expsyms_cmds=$lt_extract_expsyms_cmds + +# Symbols that should not be listed in the preloaded symbols. +exclude_expsyms=$lt_exclude_expsyms_GCJ + +# Symbols that must always be exported. +include_expsyms=$lt_include_expsyms_GCJ + +# ### END LIBTOOL TAG CONFIG: $tagname + +__EOF__ + + +else + # If there is no Makefile yet, we rely on a make rule to execute + # `config.status --recheck' to rerun these tests and create the + # libtool script then. + ltmain_in=`echo $ltmain | sed -e 's/\.sh$/.in/'` + if test -f "$ltmain_in"; then + test -f Makefile && make "$ltmain" + fi +fi + + +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu + +CC="$lt_save_CC" + + else + tagname="" + fi + ;; + + RC) + + + +# Source file extension for RC test sources. +ac_ext=rc + +# Object file extension for compiled RC test sources. +objext=o +objext_RC=$objext + +# Code to be used in simple compile tests +lt_simple_compile_test_code='sample MENU { MENUITEM "&Soup", 100, CHECKED }\n' + +# Code to be used in simple link tests +lt_simple_link_test_code="$lt_simple_compile_test_code" + +# ltmain only uses $CC for tagged configurations so make sure $CC is set. + +# If no C compiler was specified, use CC. +LTCC=${LTCC-"$CC"} + +# Allow CC to be a program name with arguments. +compiler=$CC + + +# save warnings/boilerplate of simple test code +ac_outfile=conftest.$ac_objext +printf "$lt_simple_compile_test_code" >conftest.$ac_ext +eval "$ac_compile" 2>&1 >/dev/null | $SED '/^$/d' >conftest.err +_lt_compiler_boilerplate=`cat conftest.err` +$rm conftest* + +ac_outfile=conftest.$ac_objext +printf "$lt_simple_link_test_code" >conftest.$ac_ext +eval "$ac_link" 2>&1 >/dev/null | $SED '/^$/d' >conftest.err +_lt_linker_boilerplate=`cat conftest.err` +$rm conftest* + + +# Allow CC to be a program name with arguments. +lt_save_CC="$CC" +CC=${RC-"windres"} +compiler=$CC +compiler_RC=$CC +for cc_temp in $compiler""; do + case $cc_temp in + compile | *[\\/]compile | ccache | *[\\/]ccache ) ;; + distcc | *[\\/]distcc | purify | *[\\/]purify ) ;; + \-*) ;; + *) break;; + esac +done +cc_basename=`$echo "X$cc_temp" | $Xsed -e 's%.*/%%' -e "s%^$host_alias-%%"` + +lt_cv_prog_compiler_c_o_RC=yes + +# The else clause should only fire when bootstrapping the +# libtool distribution, otherwise you forgot to ship ltmain.sh +# with your package, and you will get complaints that there are +# no rules to generate ltmain.sh. +if test -f "$ltmain"; then + # See if we are running on zsh, and set the options which allow our commands through + # without removal of \ escapes. + if test -n "${ZSH_VERSION+set}" ; then + setopt NO_GLOB_SUBST + fi + # Now quote all the things that may contain metacharacters while being + # careful not to overquote the AC_SUBSTed values. We take copies of the + # variables and quote the copies for generation of the libtool script. + for var in echo old_CC old_CFLAGS AR AR_FLAGS EGREP RANLIB LN_S LTCC NM \ + SED SHELL STRIP \ + libname_spec library_names_spec soname_spec extract_expsyms_cmds \ + old_striplib striplib file_magic_cmd finish_cmds finish_eval \ + deplibs_check_method reload_flag reload_cmds need_locks \ + lt_cv_sys_global_symbol_pipe lt_cv_sys_global_symbol_to_cdecl \ + lt_cv_sys_global_symbol_to_c_name_address \ + sys_lib_search_path_spec sys_lib_dlsearch_path_spec \ + old_postinstall_cmds old_postuninstall_cmds \ + compiler_RC \ + CC_RC \ + LD_RC \ + lt_prog_compiler_wl_RC \ + lt_prog_compiler_pic_RC \ + lt_prog_compiler_static_RC \ + lt_prog_compiler_no_builtin_flag_RC \ + export_dynamic_flag_spec_RC \ + thread_safe_flag_spec_RC \ + whole_archive_flag_spec_RC \ + enable_shared_with_static_runtimes_RC \ + old_archive_cmds_RC \ + old_archive_from_new_cmds_RC \ + predep_objects_RC \ + postdep_objects_RC \ + predeps_RC \ + postdeps_RC \ + compiler_lib_search_path_RC \ + archive_cmds_RC \ + archive_expsym_cmds_RC \ + postinstall_cmds_RC \ + postuninstall_cmds_RC \ + old_archive_from_expsyms_cmds_RC \ + allow_undefined_flag_RC \ + no_undefined_flag_RC \ + export_symbols_cmds_RC \ + hardcode_libdir_flag_spec_RC \ + hardcode_libdir_flag_spec_ld_RC \ + hardcode_libdir_separator_RC \ + hardcode_automatic_RC \ + module_cmds_RC \ + module_expsym_cmds_RC \ + lt_cv_prog_compiler_c_o_RC \ + exclude_expsyms_RC \ + include_expsyms_RC; do + + case $var in + old_archive_cmds_RC | \ + old_archive_from_new_cmds_RC | \ + archive_cmds_RC | \ + archive_expsym_cmds_RC | \ + module_cmds_RC | \ + module_expsym_cmds_RC | \ + old_archive_from_expsyms_cmds_RC | \ + export_symbols_cmds_RC | \ + extract_expsyms_cmds | reload_cmds | finish_cmds | \ + postinstall_cmds | postuninstall_cmds | \ + old_postinstall_cmds | old_postuninstall_cmds | \ + sys_lib_search_path_spec | sys_lib_dlsearch_path_spec) + # Double-quote double-evaled strings. + eval "lt_$var=\\\"\`\$echo \"X\$$var\" | \$Xsed -e \"\$double_quote_subst\" -e \"\$sed_quote_subst\" -e \"\$delay_variable_subst\"\`\\\"" + ;; + *) + eval "lt_$var=\\\"\`\$echo \"X\$$var\" | \$Xsed -e \"\$sed_quote_subst\"\`\\\"" + ;; + esac + done + + case $lt_echo in + *'\$0 --fallback-echo"') + lt_echo=`$echo "X$lt_echo" | $Xsed -e 's/\\\\\\\$0 --fallback-echo"$/$0 --fallback-echo"/'` + ;; + esac + +cfgfile="$ofile" + + cat <<__EOF__ >> "$cfgfile" +# ### BEGIN LIBTOOL TAG CONFIG: $tagname + +# Libtool was configured on host `(hostname || uname -n) 2>/dev/null | sed 1q`: + +# Shell to use when invoking shell scripts. +SHELL=$lt_SHELL + +# Whether or not to build shared libraries. +build_libtool_libs=$enable_shared + +# Whether or not to build static libraries. +build_old_libs=$enable_static + +# Whether or not to add -lc for building shared libraries. +build_libtool_need_lc=$archive_cmds_need_lc_RC + +# Whether or not to disallow shared libs when runtime libs are static +allow_libtool_libs_with_static_runtimes=$enable_shared_with_static_runtimes_RC + +# Whether or not to optimize for fast installation. +fast_install=$enable_fast_install + +# The host system. +host_alias=$host_alias +host=$host +host_os=$host_os + +# The build system. +build_alias=$build_alias +build=$build +build_os=$build_os + +# An echo program that does not interpret backslashes. +echo=$lt_echo + +# The archiver. +AR=$lt_AR +AR_FLAGS=$lt_AR_FLAGS + +# A C compiler. +LTCC=$lt_LTCC + +# A language-specific compiler. +CC=$lt_compiler_RC + +# Is the compiler the GNU C compiler? +with_gcc=$GCC_RC + +# An ERE matcher. +EGREP=$lt_EGREP + +# The linker used to build libraries. +LD=$lt_LD_RC + +# Whether we need hard or soft links. +LN_S=$lt_LN_S + +# A BSD-compatible nm program. +NM=$lt_NM + +# A symbol stripping program +STRIP=$lt_STRIP + +# Used to examine libraries when file_magic_cmd begins "file" +MAGIC_CMD=$MAGIC_CMD + +# Used on cygwin: DLL creation program. +DLLTOOL="$DLLTOOL" + +# Used on cygwin: object dumper. +OBJDUMP="$OBJDUMP" + +# Used on cygwin: assembler. +AS="$AS" + +# The name of the directory that contains temporary libtool files. +objdir=$objdir + +# How to create reloadable object files. +reload_flag=$lt_reload_flag +reload_cmds=$lt_reload_cmds + +# How to pass a linker flag through the compiler. +wl=$lt_lt_prog_compiler_wl_RC + +# Object file suffix (normally "o"). +objext="$ac_objext" + +# Old archive suffix (normally "a"). +libext="$libext" + +# Shared library suffix (normally ".so"). +shrext_cmds='$shrext_cmds' + +# Executable file suffix (normally ""). +exeext="$exeext" + +# Additional compiler flags for building library objects. +pic_flag=$lt_lt_prog_compiler_pic_RC +pic_mode=$pic_mode + +# What is the maximum length of a command? +max_cmd_len=$lt_cv_sys_max_cmd_len + +# Does compiler simultaneously support -c and -o options? +compiler_c_o=$lt_lt_cv_prog_compiler_c_o_RC + +# Must we lock files when doing compilation? +need_locks=$lt_need_locks + +# Do we need the lib prefix for modules? +need_lib_prefix=$need_lib_prefix + +# Do we need a version for libraries? +need_version=$need_version + +# Whether dlopen is supported. +dlopen_support=$enable_dlopen + +# Whether dlopen of programs is supported. +dlopen_self=$enable_dlopen_self + +# Whether dlopen of statically linked programs is supported. +dlopen_self_static=$enable_dlopen_self_static + +# Compiler flag to prevent dynamic linking. +link_static_flag=$lt_lt_prog_compiler_static_RC + +# Compiler flag to turn off builtin functions. +no_builtin_flag=$lt_lt_prog_compiler_no_builtin_flag_RC + +# Compiler flag to allow reflexive dlopens. +export_dynamic_flag_spec=$lt_export_dynamic_flag_spec_RC + +# Compiler flag to generate shared objects directly from archives. +whole_archive_flag_spec=$lt_whole_archive_flag_spec_RC + +# Compiler flag to generate thread-safe objects. +thread_safe_flag_spec=$lt_thread_safe_flag_spec_RC + +# Library versioning type. +version_type=$version_type + +# Format of library name prefix. +libname_spec=$lt_libname_spec + +# List of archive names. First name is the real one, the rest are links. +# The last name is the one that the linker finds with -lNAME. +library_names_spec=$lt_library_names_spec + +# The coded name of the library, if different from the real name. +soname_spec=$lt_soname_spec + +# Commands used to build and install an old-style archive. +RANLIB=$lt_RANLIB +old_archive_cmds=$lt_old_archive_cmds_RC +old_postinstall_cmds=$lt_old_postinstall_cmds +old_postuninstall_cmds=$lt_old_postuninstall_cmds + +# Create an old-style archive from a shared archive. +old_archive_from_new_cmds=$lt_old_archive_from_new_cmds_RC + +# Create a temporary old-style archive to link instead of a shared archive. +old_archive_from_expsyms_cmds=$lt_old_archive_from_expsyms_cmds_RC + +# Commands used to build and install a shared archive. +archive_cmds=$lt_archive_cmds_RC +archive_expsym_cmds=$lt_archive_expsym_cmds_RC +postinstall_cmds=$lt_postinstall_cmds +postuninstall_cmds=$lt_postuninstall_cmds + +# Commands used to build a loadable module (assumed same as above if empty) +module_cmds=$lt_module_cmds_RC +module_expsym_cmds=$lt_module_expsym_cmds_RC + +# Commands to strip libraries. +old_striplib=$lt_old_striplib +striplib=$lt_striplib + +# Dependencies to place before the objects being linked to create a +# shared library. +predep_objects=$lt_predep_objects_RC + +# Dependencies to place after the objects being linked to create a +# shared library. +postdep_objects=$lt_postdep_objects_RC + +# Dependencies to place before the objects being linked to create a +# shared library. +predeps=$lt_predeps_RC + +# Dependencies to place after the objects being linked to create a +# shared library. +postdeps=$lt_postdeps_RC + +# The library search path used internally by the compiler when linking +# a shared library. +compiler_lib_search_path=$lt_compiler_lib_search_path_RC + +# Method to check whether dependent libraries are shared objects. +deplibs_check_method=$lt_deplibs_check_method + +# Command to use when deplibs_check_method == file_magic. +file_magic_cmd=$lt_file_magic_cmd + +# Flag that allows shared libraries with undefined symbols to be built. +allow_undefined_flag=$lt_allow_undefined_flag_RC + +# Flag that forces no undefined symbols. +no_undefined_flag=$lt_no_undefined_flag_RC + +# Commands used to finish a libtool library installation in a directory. +finish_cmds=$lt_finish_cmds + +# Same as above, but a single script fragment to be evaled but not shown. +finish_eval=$lt_finish_eval + +# Take the output of nm and produce a listing of raw symbols and C names. +global_symbol_pipe=$lt_lt_cv_sys_global_symbol_pipe + +# Transform the output of nm in a proper C declaration +global_symbol_to_cdecl=$lt_lt_cv_sys_global_symbol_to_cdecl + +# Transform the output of nm in a C name address pair +global_symbol_to_c_name_address=$lt_lt_cv_sys_global_symbol_to_c_name_address + +# This is the shared library runtime path variable. +runpath_var=$runpath_var + +# This is the shared library path variable. +shlibpath_var=$shlibpath_var + +# Is shlibpath searched before the hard-coded library search path? +shlibpath_overrides_runpath=$shlibpath_overrides_runpath + +# How to hardcode a shared library path into an executable. +hardcode_action=$hardcode_action_RC + +# Whether we should hardcode library paths into libraries. +hardcode_into_libs=$hardcode_into_libs + +# Flag to hardcode \$libdir into a binary during linking. +# This must work even if \$libdir does not exist. +hardcode_libdir_flag_spec=$lt_hardcode_libdir_flag_spec_RC + +# If ld is used when linking, flag to hardcode \$libdir into +# a binary during linking. This must work even if \$libdir does +# not exist. +hardcode_libdir_flag_spec_ld=$lt_hardcode_libdir_flag_spec_ld_RC + +# Whether we need a single -rpath flag with a separated argument. +hardcode_libdir_separator=$lt_hardcode_libdir_separator_RC + +# Set to yes if using DIR/libNAME${shared_ext} during linking hardcodes DIR into the +# resulting binary. +hardcode_direct=$hardcode_direct_RC + +# Set to yes if using the -LDIR flag during linking hardcodes DIR into the +# resulting binary. +hardcode_minus_L=$hardcode_minus_L_RC + +# Set to yes if using SHLIBPATH_VAR=DIR during linking hardcodes DIR into +# the resulting binary. +hardcode_shlibpath_var=$hardcode_shlibpath_var_RC + +# Set to yes if building a shared library automatically hardcodes DIR into the library +# and all subsequent libraries and executables linked against it. +hardcode_automatic=$hardcode_automatic_RC + +# Variables whose values should be saved in libtool wrapper scripts and +# restored at relink time. +variables_saved_for_relink="$variables_saved_for_relink" + +# Whether libtool must link a program against all its dependency libraries. +link_all_deplibs=$link_all_deplibs_RC + +# Compile-time system search path for libraries +sys_lib_search_path_spec=$lt_sys_lib_search_path_spec + +# Run-time system search path for libraries +sys_lib_dlsearch_path_spec=$lt_sys_lib_dlsearch_path_spec + +# Fix the shell variable \$srcfile for the compiler. +fix_srcfile_path="$fix_srcfile_path_RC" + +# Set to yes if exported symbols are required. +always_export_symbols=$always_export_symbols_RC + +# The commands to list exported symbols. +export_symbols_cmds=$lt_export_symbols_cmds_RC + +# The commands to extract the exported symbol list from a shared archive. +extract_expsyms_cmds=$lt_extract_expsyms_cmds + +# Symbols that should not be listed in the preloaded symbols. +exclude_expsyms=$lt_exclude_expsyms_RC + +# Symbols that must always be exported. +include_expsyms=$lt_include_expsyms_RC + +# ### END LIBTOOL TAG CONFIG: $tagname + +__EOF__ + + +else + # If there is no Makefile yet, we rely on a make rule to execute + # `config.status --recheck' to rerun these tests and create the + # libtool script then. + ltmain_in=`echo $ltmain | sed -e 's/\.sh$/.in/'` + if test -f "$ltmain_in"; then + test -f Makefile && make "$ltmain" + fi +fi + + +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu + +CC="$lt_save_CC" + + ;; + + *) + { { echo "$as_me:$LINENO: error: Unsupported tag name: $tagname" >&5 +echo "$as_me: error: Unsupported tag name: $tagname" >&2;} + { (exit 1); exit 1; }; } + ;; + esac + + # Append the new tag name to the list of available tags. + if test -n "$tagname" ; then + available_tags="$available_tags $tagname" + fi + fi + done + IFS="$lt_save_ifs" + + # Now substitute the updated list of available tags. + if eval "sed -e 's/^available_tags=.*\$/available_tags=\"$available_tags\"/' \"$ofile\" > \"${ofile}T\""; then + mv "${ofile}T" "$ofile" + chmod +x "$ofile" + else + rm -f "${ofile}T" + { { echo "$as_me:$LINENO: error: unable to update list of available tagged configurations." >&5 +echo "$as_me: error: unable to update list of available tagged configurations." >&2;} + { (exit 1); exit 1; }; } + fi +fi + + + +# This can be used to rebuild libtool when needed +LIBTOOL_DEPS="$ac_aux_dir/ltmain.sh" + +# Always use our own libtool. +LIBTOOL='$(SHELL) $(top_builddir)/libtool' + +# Prevent multiple expansion + + + + + + + + + + + + + + + + + + + + + +# Checks for libraries. + +# Checks for header files. +ac_ext=cc +ac_cpp='$CXXCPP $CPPFLAGS' +ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_cxx_compiler_gnu + + + +for ac_header in unistd.h +do +as_ac_Header=`echo "ac_cv_header_$ac_header" | $as_tr_sh` +if eval "test \"\${$as_ac_Header+set}\" = set"; then + echo "$as_me:$LINENO: checking for $ac_header" >&5 +echo $ECHO_N "checking for $ac_header... $ECHO_C" >&6 +if eval "test \"\${$as_ac_Header+set}\" = set"; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +fi +echo "$as_me:$LINENO: result: `eval echo '${'$as_ac_Header'}'`" >&5 +echo "${ECHO_T}`eval echo '${'$as_ac_Header'}'`" >&6 +else + # Is the header compilable? +echo "$as_me:$LINENO: checking $ac_header usability" >&5 +echo $ECHO_N "checking $ac_header usability... $ECHO_C" >&6 +cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +$ac_includes_default +#include <$ac_header> +_ACEOF +rm -f conftest.$ac_objext +if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 + (eval $ac_compile) 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && + { ac_try='test -z "$ac_cxx_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; }; then + ac_header_compiler=yes +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +ac_header_compiler=no +fi +rm -f conftest.err conftest.$ac_objext conftest.$ac_ext +echo "$as_me:$LINENO: result: $ac_header_compiler" >&5 +echo "${ECHO_T}$ac_header_compiler" >&6 + +# Is the header present? +echo "$as_me:$LINENO: checking $ac_header presence" >&5 +echo $ECHO_N "checking $ac_header presence... $ECHO_C" >&6 +cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +#include <$ac_header> +_ACEOF +if { (eval echo "$as_me:$LINENO: \"$ac_cpp conftest.$ac_ext\"") >&5 + (eval $ac_cpp conftest.$ac_ext) 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } >/dev/null; then + if test -s conftest.err; then + ac_cpp_err=$ac_cxx_preproc_warn_flag + ac_cpp_err=$ac_cpp_err$ac_cxx_werror_flag + else + ac_cpp_err= + fi +else + ac_cpp_err=yes +fi +if test -z "$ac_cpp_err"; then + ac_header_preproc=yes +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + ac_header_preproc=no +fi +rm -f conftest.err conftest.$ac_ext +echo "$as_me:$LINENO: result: $ac_header_preproc" >&5 +echo "${ECHO_T}$ac_header_preproc" >&6 + +# So? What about this header? +case $ac_header_compiler:$ac_header_preproc:$ac_cxx_preproc_warn_flag in + yes:no: ) + { echo "$as_me:$LINENO: WARNING: $ac_header: accepted by the compiler, rejected by the preprocessor!" >&5 +echo "$as_me: WARNING: $ac_header: accepted by the compiler, rejected by the preprocessor!" >&2;} + { echo "$as_me:$LINENO: WARNING: $ac_header: proceeding with the compiler's result" >&5 +echo "$as_me: WARNING: $ac_header: proceeding with the compiler's result" >&2;} + ac_header_preproc=yes + ;; + no:yes:* ) + { echo "$as_me:$LINENO: WARNING: $ac_header: present but cannot be compiled" >&5 +echo "$as_me: WARNING: $ac_header: present but cannot be compiled" >&2;} + { echo "$as_me:$LINENO: WARNING: $ac_header: check for missing prerequisite headers?" >&5 +echo "$as_me: WARNING: $ac_header: check for missing prerequisite headers?" >&2;} + { echo "$as_me:$LINENO: WARNING: $ac_header: see the Autoconf documentation" >&5 +echo "$as_me: WARNING: $ac_header: see the Autoconf documentation" >&2;} + { echo "$as_me:$LINENO: WARNING: $ac_header: section \"Present But Cannot Be Compiled\"" >&5 +echo "$as_me: WARNING: $ac_header: section \"Present But Cannot Be Compiled\"" >&2;} + { echo "$as_me:$LINENO: WARNING: $ac_header: proceeding with the preprocessor's result" >&5 +echo "$as_me: WARNING: $ac_header: proceeding with the preprocessor's result" >&2;} + { echo "$as_me:$LINENO: WARNING: $ac_header: in the future, the compiler will take precedence" >&5 +echo "$as_me: WARNING: $ac_header: in the future, the compiler will take precedence" >&2;} + ( + cat <<\_ASBOX +## --------------------------------- ## +## Report this to omalley@apache.org ## +## --------------------------------- ## +_ASBOX + ) | + sed "s/^/$as_me: WARNING: /" >&2 + ;; +esac +echo "$as_me:$LINENO: checking for $ac_header" >&5 +echo $ECHO_N "checking for $ac_header... $ECHO_C" >&6 +if eval "test \"\${$as_ac_Header+set}\" = set"; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + eval "$as_ac_Header=\$ac_header_preproc" +fi +echo "$as_me:$LINENO: result: `eval echo '${'$as_ac_Header'}'`" >&5 +echo "${ECHO_T}`eval echo '${'$as_ac_Header'}'`" >&6 + +fi +if test `eval echo '${'$as_ac_Header'}'` = yes; then + cat >>confdefs.h <<_ACEOF +#define `echo "HAVE_$ac_header" | $as_tr_cpp` 1 +_ACEOF + +fi + +done + + +# Checks for typedefs, structures, and compiler characteristics. +echo "$as_me:$LINENO: checking for stdbool.h that conforms to C99" >&5 +echo $ECHO_N "checking for stdbool.h that conforms to C99... $ECHO_C" >&6 +if test "${ac_cv_header_stdbool_h+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ + +#include +#ifndef bool +# error bool is not defined +#endif +#ifndef false +# error false is not defined +#endif +#if false +# error false is not 0 +#endif +#ifndef true +# error true is not defined +#endif +#if true != 1 +# error true is not 1 +#endif +#ifndef __bool_true_false_are_defined +# error __bool_true_false_are_defined is not defined +#endif + + struct s { _Bool s: 1; _Bool t; } s; + + char a[true == 1 ? 1 : -1]; + char b[false == 0 ? 1 : -1]; + char c[__bool_true_false_are_defined == 1 ? 1 : -1]; + char d[(bool) -0.5 == true ? 1 : -1]; + bool e = &s; + char f[(_Bool) -0.0 == false ? 1 : -1]; + char g[true]; + char h[sizeof (_Bool)]; + char i[sizeof s.t]; + +int +main () +{ + return !a + !b + !c + !d + !e + !f + !g + !h + !i; + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext +if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 + (eval $ac_compile) 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && + { ac_try='test -z "$ac_cxx_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; }; then + ac_cv_header_stdbool_h=yes +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +ac_cv_header_stdbool_h=no +fi +rm -f conftest.err conftest.$ac_objext conftest.$ac_ext +fi +echo "$as_me:$LINENO: result: $ac_cv_header_stdbool_h" >&5 +echo "${ECHO_T}$ac_cv_header_stdbool_h" >&6 +echo "$as_me:$LINENO: checking for _Bool" >&5 +echo $ECHO_N "checking for _Bool... $ECHO_C" >&6 +if test "${ac_cv_type__Bool+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +$ac_includes_default +int +main () +{ +if ((_Bool *) 0) + return 0; +if (sizeof (_Bool)) + return 0; + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext +if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 + (eval $ac_compile) 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && + { ac_try='test -z "$ac_cxx_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; }; then + ac_cv_type__Bool=yes +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +ac_cv_type__Bool=no +fi +rm -f conftest.err conftest.$ac_objext conftest.$ac_ext +fi +echo "$as_me:$LINENO: result: $ac_cv_type__Bool" >&5 +echo "${ECHO_T}$ac_cv_type__Bool" >&6 +if test $ac_cv_type__Bool = yes; then + +cat >>confdefs.h <<_ACEOF +#define HAVE__BOOL 1 +_ACEOF + + +fi + +if test $ac_cv_header_stdbool_h = yes; then + +cat >>confdefs.h <<\_ACEOF +#define HAVE_STDBOOL_H 1 +_ACEOF + +fi + +echo "$as_me:$LINENO: checking for an ANSI C-conforming const" >&5 +echo $ECHO_N "checking for an ANSI C-conforming const... $ECHO_C" >&6 +if test "${ac_cv_c_const+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ + +int +main () +{ +/* FIXME: Include the comments suggested by Paul. */ +#ifndef __cplusplus + /* Ultrix mips cc rejects this. */ + typedef int charset[2]; + const charset x; + /* SunOS 4.1.1 cc rejects this. */ + char const *const *ccp; + char **p; + /* NEC SVR4.0.2 mips cc rejects this. */ + struct point {int x, y;}; + static struct point const zero = {0,0}; + /* AIX XL C 1.02.0.0 rejects this. + It does not let you subtract one const X* pointer from another in + an arm of an if-expression whose if-part is not a constant + expression */ + const char *g = "string"; + ccp = &g + (g ? g-g : 0); + /* HPUX 7.0 cc rejects these. */ + ++ccp; + p = (char**) ccp; + ccp = (char const *const *) p; + { /* SCO 3.2v4 cc rejects this. */ + char *t; + char const *s = 0 ? (char *) 0 : (char const *) 0; + + *t++ = 0; + } + { /* Someone thinks the Sun supposedly-ANSI compiler will reject this. */ + int x[] = {25, 17}; + const int *foo = &x[0]; + ++foo; + } + { /* Sun SC1.0 ANSI compiler rejects this -- but not the above. */ + typedef const int *iptr; + iptr p = 0; + ++p; + } + { /* AIX XL C 1.02.0.0 rejects this saying + "k.c", line 2.27: 1506-025 (S) Operand must be a modifiable lvalue. */ + struct s { int j; const int *ap[3]; }; + struct s *b; b->j = 5; + } + { /* ULTRIX-32 V3.1 (Rev 9) vcc rejects this */ + const int foo = 10; + } +#endif + + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext +if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 + (eval $ac_compile) 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && + { ac_try='test -z "$ac_cxx_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; }; then + ac_cv_c_const=yes +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +ac_cv_c_const=no +fi +rm -f conftest.err conftest.$ac_objext conftest.$ac_ext +fi +echo "$as_me:$LINENO: result: $ac_cv_c_const" >&5 +echo "${ECHO_T}$ac_cv_c_const" >&6 +if test $ac_cv_c_const = no; then + +cat >>confdefs.h <<\_ACEOF +#define const +_ACEOF + +fi + +echo "$as_me:$LINENO: checking for off_t" >&5 +echo $ECHO_N "checking for off_t... $ECHO_C" >&6 +if test "${ac_cv_type_off_t+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +$ac_includes_default +int +main () +{ +if ((off_t *) 0) + return 0; +if (sizeof (off_t)) + return 0; + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext +if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 + (eval $ac_compile) 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && + { ac_try='test -z "$ac_cxx_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; }; then + ac_cv_type_off_t=yes +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +ac_cv_type_off_t=no +fi +rm -f conftest.err conftest.$ac_objext conftest.$ac_ext +fi +echo "$as_me:$LINENO: result: $ac_cv_type_off_t" >&5 +echo "${ECHO_T}$ac_cv_type_off_t" >&6 +if test $ac_cv_type_off_t = yes; then + : +else + +cat >>confdefs.h <<_ACEOF +#define off_t long +_ACEOF + +fi + +echo "$as_me:$LINENO: checking for size_t" >&5 +echo $ECHO_N "checking for size_t... $ECHO_C" >&6 +if test "${ac_cv_type_size_t+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +$ac_includes_default +int +main () +{ +if ((size_t *) 0) + return 0; +if (sizeof (size_t)) + return 0; + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext +if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 + (eval $ac_compile) 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && + { ac_try='test -z "$ac_cxx_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; }; then + ac_cv_type_size_t=yes +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +ac_cv_type_size_t=no +fi +rm -f conftest.err conftest.$ac_objext conftest.$ac_ext +fi +echo "$as_me:$LINENO: result: $ac_cv_type_size_t" >&5 +echo "${ECHO_T}$ac_cv_type_size_t" >&6 +if test $ac_cv_type_size_t = yes; then + : +else + +cat >>confdefs.h <<_ACEOF +#define size_t unsigned +_ACEOF + +fi + +echo "$as_me:$LINENO: checking whether strerror_r is declared" >&5 +echo $ECHO_N "checking whether strerror_r is declared... $ECHO_C" >&6 +if test "${ac_cv_have_decl_strerror_r+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +$ac_includes_default +int +main () +{ +#ifndef strerror_r + char *p = (char *) strerror_r; +#endif + + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext +if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 + (eval $ac_compile) 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && + { ac_try='test -z "$ac_cxx_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; }; then + ac_cv_have_decl_strerror_r=yes +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +ac_cv_have_decl_strerror_r=no +fi +rm -f conftest.err conftest.$ac_objext conftest.$ac_ext +fi +echo "$as_me:$LINENO: result: $ac_cv_have_decl_strerror_r" >&5 +echo "${ECHO_T}$ac_cv_have_decl_strerror_r" >&6 +if test $ac_cv_have_decl_strerror_r = yes; then + +cat >>confdefs.h <<_ACEOF +#define HAVE_DECL_STRERROR_R 1 +_ACEOF + + +else + cat >>confdefs.h <<_ACEOF +#define HAVE_DECL_STRERROR_R 0 +_ACEOF + + +fi + + + +for ac_func in strerror_r +do +as_ac_var=`echo "ac_cv_func_$ac_func" | $as_tr_sh` +echo "$as_me:$LINENO: checking for $ac_func" >&5 +echo $ECHO_N "checking for $ac_func... $ECHO_C" >&6 +if eval "test \"\${$as_ac_var+set}\" = set"; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +/* Define $ac_func to an innocuous variant, in case declares $ac_func. + For example, HP-UX 11i declares gettimeofday. */ +#define $ac_func innocuous_$ac_func + +/* System header to define __stub macros and hopefully few prototypes, + which can conflict with char $ac_func (); below. + Prefer to if __STDC__ is defined, since + exists even on freestanding compilers. */ + +#ifdef __STDC__ +# include +#else +# include +#endif + +#undef $ac_func + +/* Override any gcc2 internal prototype to avoid an error. */ +#ifdef __cplusplus +extern "C" +{ +#endif +/* We use char because int might match the return type of a gcc2 + builtin and then its argument prototype would still apply. */ +char $ac_func (); +/* The GNU C library defines this for functions which it implements + to always fail with ENOSYS. Some functions are actually named + something starting with __ and the normal name is an alias. */ +#if defined (__stub_$ac_func) || defined (__stub___$ac_func) +choke me +#else +char (*f) () = $ac_func; +#endif +#ifdef __cplusplus +} +#endif + +int +main () +{ +return f != $ac_func; + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext conftest$ac_exeext +if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 + (eval $ac_link) 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && + { ac_try='test -z "$ac_cxx_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest$ac_exeext' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; }; then + eval "$as_ac_var=yes" +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +eval "$as_ac_var=no" +fi +rm -f conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +fi +echo "$as_me:$LINENO: result: `eval echo '${'$as_ac_var'}'`" >&5 +echo "${ECHO_T}`eval echo '${'$as_ac_var'}'`" >&6 +if test `eval echo '${'$as_ac_var'}'` = yes; then + cat >>confdefs.h <<_ACEOF +#define `echo "HAVE_$ac_func" | $as_tr_cpp` 1 +_ACEOF + +fi +done + +echo "$as_me:$LINENO: checking whether strerror_r returns char *" >&5 +echo $ECHO_N "checking whether strerror_r returns char *... $ECHO_C" >&6 +if test "${ac_cv_func_strerror_r_char_p+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + + ac_cv_func_strerror_r_char_p=no + if test $ac_cv_have_decl_strerror_r = yes; then + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +$ac_includes_default +int +main () +{ + + char buf[100]; + char x = *strerror_r (0, buf, sizeof buf); + char *p = strerror_r (0, buf, sizeof buf); + + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext +if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 + (eval $ac_compile) 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && + { ac_try='test -z "$ac_cxx_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; }; then + ac_cv_func_strerror_r_char_p=yes +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +fi +rm -f conftest.err conftest.$ac_objext conftest.$ac_ext + else + # strerror_r is not declared. Choose between + # systems that have relatively inaccessible declarations for the + # function. BeOS and DEC UNIX 4.0 fall in this category, but the + # former has a strerror_r that returns char*, while the latter + # has a strerror_r that returns `int'. + # This test should segfault on the DEC system. + if test "$cross_compiling" = yes; then + : +else + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +$ac_includes_default + extern char *strerror_r (); +int +main () +{ +char buf[100]; + char x = *strerror_r (0, buf, sizeof buf); + exit (!isalpha (x)); + ; + return 0; +} +_ACEOF +rm -f conftest$ac_exeext +if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 + (eval $ac_link) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { ac_try='./conftest$ac_exeext' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; }; then + ac_cv_func_strerror_r_char_p=yes +else + echo "$as_me: program exited with status $ac_status" >&5 +echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +fi +rm -f core *.core gmon.out bb.out conftest$ac_exeext conftest.$ac_objext conftest.$ac_ext +fi + fi + +fi +echo "$as_me:$LINENO: result: $ac_cv_func_strerror_r_char_p" >&5 +echo "${ECHO_T}$ac_cv_func_strerror_r_char_p" >&6 +if test $ac_cv_func_strerror_r_char_p = yes; then + +cat >>confdefs.h <<\_ACEOF +#define STRERROR_R_CHAR_P 1 +_ACEOF + +fi + + +# Checks for library functions. + + +for ac_func in mkdir uname +do +as_ac_var=`echo "ac_cv_func_$ac_func" | $as_tr_sh` +echo "$as_me:$LINENO: checking for $ac_func" >&5 +echo $ECHO_N "checking for $ac_func... $ECHO_C" >&6 +if eval "test \"\${$as_ac_var+set}\" = set"; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +/* Define $ac_func to an innocuous variant, in case declares $ac_func. + For example, HP-UX 11i declares gettimeofday. */ +#define $ac_func innocuous_$ac_func + +/* System header to define __stub macros and hopefully few prototypes, + which can conflict with char $ac_func (); below. + Prefer to if __STDC__ is defined, since + exists even on freestanding compilers. */ + +#ifdef __STDC__ +# include +#else +# include +#endif + +#undef $ac_func + +/* Override any gcc2 internal prototype to avoid an error. */ +#ifdef __cplusplus +extern "C" +{ +#endif +/* We use char because int might match the return type of a gcc2 + builtin and then its argument prototype would still apply. */ +char $ac_func (); +/* The GNU C library defines this for functions which it implements + to always fail with ENOSYS. Some functions are actually named + something starting with __ and the normal name is an alias. */ +#if defined (__stub_$ac_func) || defined (__stub___$ac_func) +choke me +#else +char (*f) () = $ac_func; +#endif +#ifdef __cplusplus +} +#endif + +int +main () +{ +return f != $ac_func; + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext conftest$ac_exeext +if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 + (eval $ac_link) 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && + { ac_try='test -z "$ac_cxx_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest$ac_exeext' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; }; then + eval "$as_ac_var=yes" +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +eval "$as_ac_var=no" +fi +rm -f conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +fi +echo "$as_me:$LINENO: result: `eval echo '${'$as_ac_var'}'`" >&5 +echo "${ECHO_T}`eval echo '${'$as_ac_var'}'`" >&6 +if test `eval echo '${'$as_ac_var'}'` = yes; then + cat >>confdefs.h <<_ACEOF +#define `echo "HAVE_$ac_func" | $as_tr_cpp` 1 +_ACEOF + +fi +done + + +echo "$as_me:$LINENO: checking for shutdown in -lsocket" >&5 +echo $ECHO_N "checking for shutdown in -lsocket... $ECHO_C" >&6 +if test "${ac_cv_lib_socket_shutdown+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + ac_check_lib_save_LIBS=$LIBS +LIBS="-lsocket $LIBS" +cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ + +/* Override any gcc2 internal prototype to avoid an error. */ +#ifdef __cplusplus +extern "C" +#endif +/* We use char because int might match the return type of a gcc2 + builtin and then its argument prototype would still apply. */ +char shutdown (); +int +main () +{ +shutdown (); + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext conftest$ac_exeext +if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 + (eval $ac_link) 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && + { ac_try='test -z "$ac_cxx_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest$ac_exeext' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; }; then + ac_cv_lib_socket_shutdown=yes +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +ac_cv_lib_socket_shutdown=no +fi +rm -f conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS +fi +echo "$as_me:$LINENO: result: $ac_cv_lib_socket_shutdown" >&5 +echo "${ECHO_T}$ac_cv_lib_socket_shutdown" >&6 +if test $ac_cv_lib_socket_shutdown = yes; then + cat >>confdefs.h <<_ACEOF +#define HAVE_LIBSOCKET 1 +_ACEOF + + LIBS="-lsocket $LIBS" + +fi + + +echo "$as_me:$LINENO: checking for xdr_float in -lnsl" >&5 +echo $ECHO_N "checking for xdr_float in -lnsl... $ECHO_C" >&6 +if test "${ac_cv_lib_nsl_xdr_float+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + ac_check_lib_save_LIBS=$LIBS +LIBS="-lnsl $LIBS" +cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ + +/* Override any gcc2 internal prototype to avoid an error. */ +#ifdef __cplusplus +extern "C" +#endif +/* We use char because int might match the return type of a gcc2 + builtin and then its argument prototype would still apply. */ +char xdr_float (); +int +main () +{ +xdr_float (); + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext conftest$ac_exeext +if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 + (eval $ac_link) 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && + { ac_try='test -z "$ac_cxx_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest$ac_exeext' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; }; then + ac_cv_lib_nsl_xdr_float=yes +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +ac_cv_lib_nsl_xdr_float=no +fi +rm -f conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS +fi +echo "$as_me:$LINENO: result: $ac_cv_lib_nsl_xdr_float" >&5 +echo "${ECHO_T}$ac_cv_lib_nsl_xdr_float" >&6 +if test $ac_cv_lib_nsl_xdr_float = yes; then + cat >>confdefs.h <<_ACEOF +#define HAVE_LIBNSL 1 +_ACEOF + + LIBS="-lnsl $LIBS" + +fi + +cat >confcache <<\_ACEOF +# This file is a shell script that caches the results of configure +# tests run on this system so they can be shared between configure +# scripts and configure runs, see configure's option --config-cache. +# It is not useful on other systems. If it contains results you don't +# want to keep, you may remove or edit it. +# +# config.status only pays attention to the cache file if you give it +# the --recheck option to rerun configure. +# +# `ac_cv_env_foo' variables (set or unset) will be overridden when +# loading this file, other *unset* `ac_cv_foo' will be assigned the +# following values. + +_ACEOF + +# The following way of writing the cache mishandles newlines in values, +# but we know of no workaround that is simple, portable, and efficient. +# So, don't put newlines in cache variables' values. +# Ultrix sh set writes to stderr and can't be redirected directly, +# and sets the high bit in the cache file unless we assign to the vars. +{ + (set) 2>&1 | + case `(ac_space=' '; set | grep ac_space) 2>&1` in + *ac_space=\ *) + # `set' does not quote correctly, so add quotes (double-quote + # substitution turns \\\\ into \\, and sed turns \\ into \). + sed -n \ + "s/'/'\\\\''/g; + s/^\\([_$as_cr_alnum]*_cv_[_$as_cr_alnum]*\\)=\\(.*\\)/\\1='\\2'/p" + ;; + *) + # `set' quotes correctly as required by POSIX, so do not add quotes. + sed -n \ + "s/^\\([_$as_cr_alnum]*_cv_[_$as_cr_alnum]*\\)=\\(.*\\)/\\1=\\2/p" + ;; + esac; +} | + sed ' + t clear + : clear + s/^\([^=]*\)=\(.*[{}].*\)$/test "${\1+set}" = set || &/ + t end + /^ac_cv_env/!s/^\([^=]*\)=\(.*\)$/\1=${\1=\2}/ + : end' >>confcache +if diff $cache_file confcache >/dev/null 2>&1; then :; else + if test -w $cache_file; then + test "x$cache_file" != "x/dev/null" && echo "updating cache $cache_file" + cat confcache >$cache_file + else + echo "not updating unwritable cache $cache_file" + fi +fi +rm -f confcache + +test "x$prefix" = xNONE && prefix=$ac_default_prefix +# Let make expand exec_prefix. +test "x$exec_prefix" = xNONE && exec_prefix='${prefix}' + +# VPATH may cause trouble with some makes, so we remove $(srcdir), +# ${srcdir} and @srcdir@ from VPATH if srcdir is ".", strip leading and +# trailing colons and then remove the whole line if VPATH becomes empty +# (actually we leave an empty line to preserve line numbers). +if test "x$srcdir" = x.; then + ac_vpsub='/^[ ]*VPATH[ ]*=/{ +s/:*\$(srcdir):*/:/; +s/:*\${srcdir}:*/:/; +s/:*@srcdir@:*/:/; +s/^\([^=]*=[ ]*\):*/\1/; +s/:*$//; +s/^[^=]*=[ ]*$//; +}' +fi + +DEFS=-DHAVE_CONFIG_H + +ac_libobjs= +ac_ltlibobjs= +for ac_i in : $LIBOBJS; do test "x$ac_i" = x: && continue + # 1. Remove the extension, and $U if already installed. + ac_i=`echo "$ac_i" | + sed 's/\$U\././;s/\.o$//;s/\.obj$//'` + # 2. Add them. + ac_libobjs="$ac_libobjs $ac_i\$U.$ac_objext" + ac_ltlibobjs="$ac_ltlibobjs $ac_i"'$U.lo' +done +LIBOBJS=$ac_libobjs + +LTLIBOBJS=$ac_ltlibobjs + + +if test -z "${AMDEP_TRUE}" && test -z "${AMDEP_FALSE}"; then + { { echo "$as_me:$LINENO: error: conditional \"AMDEP\" was never defined. +Usually this means the macro was only invoked conditionally." >&5 +echo "$as_me: error: conditional \"AMDEP\" was never defined. +Usually this means the macro was only invoked conditionally." >&2;} + { (exit 1); exit 1; }; } +fi +if test -z "${am__fastdepCC_TRUE}" && test -z "${am__fastdepCC_FALSE}"; then + { { echo "$as_me:$LINENO: error: conditional \"am__fastdepCC\" was never defined. +Usually this means the macro was only invoked conditionally." >&5 +echo "$as_me: error: conditional \"am__fastdepCC\" was never defined. +Usually this means the macro was only invoked conditionally." >&2;} + { (exit 1); exit 1; }; } +fi +if test -z "${am__fastdepCXX_TRUE}" && test -z "${am__fastdepCXX_FALSE}"; then + { { echo "$as_me:$LINENO: error: conditional \"am__fastdepCXX\" was never defined. +Usually this means the macro was only invoked conditionally." >&5 +echo "$as_me: error: conditional \"am__fastdepCXX\" was never defined. +Usually this means the macro was only invoked conditionally." >&2;} + { (exit 1); exit 1; }; } +fi + +: ${CONFIG_STATUS=./config.status} +ac_clean_files_save=$ac_clean_files +ac_clean_files="$ac_clean_files $CONFIG_STATUS" +{ echo "$as_me:$LINENO: creating $CONFIG_STATUS" >&5 +echo "$as_me: creating $CONFIG_STATUS" >&6;} +cat >$CONFIG_STATUS <<_ACEOF +#! $SHELL +# Generated by $as_me. +# Run this file to recreate the current configuration. +# Compiler output produced by configure, useful for debugging +# configure, is in config.log if it exists. + +debug=false +ac_cs_recheck=false +ac_cs_silent=false +SHELL=\${CONFIG_SHELL-$SHELL} +_ACEOF + +cat >>$CONFIG_STATUS <<\_ACEOF +## --------------------- ## +## M4sh Initialization. ## +## --------------------- ## + +# Be Bourne compatible +if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then + emulate sh + NULLCMD=: + # Zsh 3.x and 4.x performs word splitting on ${1+"$@"}, which + # is contrary to our usage. Disable this feature. + alias -g '${1+"$@"}'='"$@"' +elif test -n "${BASH_VERSION+set}" && (set -o posix) >/dev/null 2>&1; then + set -o posix +fi +DUALCASE=1; export DUALCASE # for MKS sh + +# Support unset when possible. +if ( (MAIL=60; unset MAIL) || exit) >/dev/null 2>&1; then + as_unset=unset +else + as_unset=false +fi + + +# Work around bugs in pre-3.0 UWIN ksh. +$as_unset ENV MAIL MAILPATH +PS1='$ ' +PS2='> ' +PS4='+ ' + +# NLS nuisances. +for as_var in \ + LANG LANGUAGE LC_ADDRESS LC_ALL LC_COLLATE LC_CTYPE LC_IDENTIFICATION \ + LC_MEASUREMENT LC_MESSAGES LC_MONETARY LC_NAME LC_NUMERIC LC_PAPER \ + LC_TELEPHONE LC_TIME +do + if (set +x; test -z "`(eval $as_var=C; export $as_var) 2>&1`"); then + eval $as_var=C; export $as_var + else + $as_unset $as_var + fi +done + +# Required to use basename. +if expr a : '\(a\)' >/dev/null 2>&1; then + as_expr=expr +else + as_expr=false +fi + +if (basename /) >/dev/null 2>&1 && test "X`basename / 2>&1`" = "X/"; then + as_basename=basename +else + as_basename=false +fi + + +# Name of the executable. +as_me=`$as_basename "$0" || +$as_expr X/"$0" : '.*/\([^/][^/]*\)/*$' \| \ + X"$0" : 'X\(//\)$' \| \ + X"$0" : 'X\(/\)$' \| \ + . : '\(.\)' 2>/dev/null || +echo X/"$0" | + sed '/^.*\/\([^/][^/]*\)\/*$/{ s//\1/; q; } + /^X\/\(\/\/\)$/{ s//\1/; q; } + /^X\/\(\/\).*/{ s//\1/; q; } + s/.*/./; q'` + + +# PATH needs CR, and LINENO needs CR and PATH. +# Avoid depending upon Character Ranges. +as_cr_letters='abcdefghijklmnopqrstuvwxyz' +as_cr_LETTERS='ABCDEFGHIJKLMNOPQRSTUVWXYZ' +as_cr_Letters=$as_cr_letters$as_cr_LETTERS +as_cr_digits='0123456789' +as_cr_alnum=$as_cr_Letters$as_cr_digits + +# The user is always right. +if test "${PATH_SEPARATOR+set}" != set; then + echo "#! /bin/sh" >conf$$.sh + echo "exit 0" >>conf$$.sh + chmod +x conf$$.sh + if (PATH="/nonexistent;."; conf$$.sh) >/dev/null 2>&1; then + PATH_SEPARATOR=';' + else + PATH_SEPARATOR=: + fi + rm -f conf$$.sh +fi + + + as_lineno_1=$LINENO + as_lineno_2=$LINENO + as_lineno_3=`(expr $as_lineno_1 + 1) 2>/dev/null` + test "x$as_lineno_1" != "x$as_lineno_2" && + test "x$as_lineno_3" = "x$as_lineno_2" || { + # Find who we are. Look in the path if we contain no path at all + # relative or not. + case $0 in + *[\\/]* ) as_myself=$0 ;; + *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + test -r "$as_dir/$0" && as_myself=$as_dir/$0 && break +done + + ;; + esac + # We did not find ourselves, most probably we were run as `sh COMMAND' + # in which case we are not to be found in the path. + if test "x$as_myself" = x; then + as_myself=$0 + fi + if test ! -f "$as_myself"; then + { { echo "$as_me:$LINENO: error: cannot find myself; rerun with an absolute path" >&5 +echo "$as_me: error: cannot find myself; rerun with an absolute path" >&2;} + { (exit 1); exit 1; }; } + fi + case $CONFIG_SHELL in + '') + as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in /bin$PATH_SEPARATOR/usr/bin$PATH_SEPARATOR$PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for as_base in sh bash ksh sh5; do + case $as_dir in + /*) + if ("$as_dir/$as_base" -c ' + as_lineno_1=$LINENO + as_lineno_2=$LINENO + as_lineno_3=`(expr $as_lineno_1 + 1) 2>/dev/null` + test "x$as_lineno_1" != "x$as_lineno_2" && + test "x$as_lineno_3" = "x$as_lineno_2" ') 2>/dev/null; then + $as_unset BASH_ENV || test "${BASH_ENV+set}" != set || { BASH_ENV=; export BASH_ENV; } + $as_unset ENV || test "${ENV+set}" != set || { ENV=; export ENV; } + CONFIG_SHELL=$as_dir/$as_base + export CONFIG_SHELL + exec "$CONFIG_SHELL" "$0" ${1+"$@"} + fi;; + esac + done +done +;; + esac + + # Create $as_me.lineno as a copy of $as_myself, but with $LINENO + # uniformly replaced by the line number. The first 'sed' inserts a + # line-number line before each line; the second 'sed' does the real + # work. The second script uses 'N' to pair each line-number line + # with the numbered line, and appends trailing '-' during + # substitution so that $LINENO is not a special case at line end. + # (Raja R Harinath suggested sed '=', and Paul Eggert wrote the + # second 'sed' script. Blame Lee E. McMahon for sed's syntax. :-) + sed '=' <$as_myself | + sed ' + N + s,$,-, + : loop + s,^\(['$as_cr_digits']*\)\(.*\)[$]LINENO\([^'$as_cr_alnum'_]\),\1\2\1\3, + t loop + s,-$,, + s,^['$as_cr_digits']*\n,, + ' >$as_me.lineno && + chmod +x $as_me.lineno || + { { echo "$as_me:$LINENO: error: cannot create $as_me.lineno; rerun with a POSIX shell" >&5 +echo "$as_me: error: cannot create $as_me.lineno; rerun with a POSIX shell" >&2;} + { (exit 1); exit 1; }; } + + # Don't try to exec as it changes $[0], causing all sort of problems + # (the dirname of $[0] is not the place where we might find the + # original and so on. Autoconf is especially sensible to this). + . ./$as_me.lineno + # Exit status is that of the last command. + exit +} + + +case `echo "testing\c"; echo 1,2,3`,`echo -n testing; echo 1,2,3` in + *c*,-n*) ECHO_N= ECHO_C=' +' ECHO_T=' ' ;; + *c*,* ) ECHO_N=-n ECHO_C= ECHO_T= ;; + *) ECHO_N= ECHO_C='\c' ECHO_T= ;; +esac + +if expr a : '\(a\)' >/dev/null 2>&1; then + as_expr=expr +else + as_expr=false +fi + +rm -f conf$$ conf$$.exe conf$$.file +echo >conf$$.file +if ln -s conf$$.file conf$$ 2>/dev/null; then + # We could just check for DJGPP; but this test a) works b) is more generic + # and c) will remain valid once DJGPP supports symlinks (DJGPP 2.04). + if test -f conf$$.exe; then + # Don't use ln at all; we don't have any links + as_ln_s='cp -p' + else + as_ln_s='ln -s' + fi +elif ln conf$$.file conf$$ 2>/dev/null; then + as_ln_s=ln +else + as_ln_s='cp -p' +fi +rm -f conf$$ conf$$.exe conf$$.file + +if mkdir -p . 2>/dev/null; then + as_mkdir_p=: +else + test -d ./-p && rmdir ./-p + as_mkdir_p=false +fi + +as_executable_p="test -f" + +# Sed expression to map a string onto a valid CPP name. +as_tr_cpp="eval sed 'y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g'" + +# Sed expression to map a string onto a valid variable name. +as_tr_sh="eval sed 'y%*+%pp%;s%[^_$as_cr_alnum]%_%g'" + + +# IFS +# We need space, tab and new line, in precisely that order. +as_nl=' +' +IFS=" $as_nl" + +# CDPATH. +$as_unset CDPATH + +exec 6>&1 + +# Open the log real soon, to keep \$[0] and so on meaningful, and to +# report actual input values of CONFIG_FILES etc. instead of their +# values after options handling. Logging --version etc. is OK. +exec 5>>config.log +{ + echo + sed 'h;s/./-/g;s/^.../## /;s/...$/ ##/;p;x;p;x' <<_ASBOX +## Running $as_me. ## +_ASBOX +} >&5 +cat >&5 <<_CSEOF + +This file was extended by hadoop-pipes-examples $as_me 0.13.0, which was +generated by GNU Autoconf 2.59. Invocation command line was + + CONFIG_FILES = $CONFIG_FILES + CONFIG_HEADERS = $CONFIG_HEADERS + CONFIG_LINKS = $CONFIG_LINKS + CONFIG_COMMANDS = $CONFIG_COMMANDS + $ $0 $@ + +_CSEOF +echo "on `(hostname || uname -n) 2>/dev/null | sed 1q`" >&5 +echo >&5 +_ACEOF + +# Files that config.status was made for. +if test -n "$ac_config_files"; then + echo "config_files=\"$ac_config_files\"" >>$CONFIG_STATUS +fi + +if test -n "$ac_config_headers"; then + echo "config_headers=\"$ac_config_headers\"" >>$CONFIG_STATUS +fi + +if test -n "$ac_config_links"; then + echo "config_links=\"$ac_config_links\"" >>$CONFIG_STATUS +fi + +if test -n "$ac_config_commands"; then + echo "config_commands=\"$ac_config_commands\"" >>$CONFIG_STATUS +fi + +cat >>$CONFIG_STATUS <<\_ACEOF + +ac_cs_usage="\ +\`$as_me' instantiates files from templates according to the +current configuration. + +Usage: $0 [OPTIONS] [FILE]... + + -h, --help print this help, then exit + -V, --version print version number, then exit + -q, --quiet do not print progress messages + -d, --debug don't remove temporary files + --recheck update $as_me by reconfiguring in the same conditions + --file=FILE[:TEMPLATE] + instantiate the configuration file FILE + --header=FILE[:TEMPLATE] + instantiate the configuration header FILE + +Configuration files: +$config_files + +Configuration headers: +$config_headers + +Configuration commands: +$config_commands + +Report bugs to ." +_ACEOF + +cat >>$CONFIG_STATUS <<_ACEOF +ac_cs_version="\\ +hadoop-pipes-examples config.status 0.13.0 +configured by $0, generated by GNU Autoconf 2.59, + with options \\"`echo "$ac_configure_args" | sed 's/[\\""\`\$]/\\\\&/g'`\\" + +Copyright (C) 2003 Free Software Foundation, Inc. +This config.status script is free software; the Free Software Foundation +gives unlimited permission to copy, distribute and modify it." +srcdir=$srcdir +INSTALL="$INSTALL" +_ACEOF + +cat >>$CONFIG_STATUS <<\_ACEOF +# If no file are specified by the user, then we need to provide default +# value. By we need to know if files were specified by the user. +ac_need_defaults=: +while test $# != 0 +do + case $1 in + --*=*) + ac_option=`expr "x$1" : 'x\([^=]*\)='` + ac_optarg=`expr "x$1" : 'x[^=]*=\(.*\)'` + ac_shift=: + ;; + -*) + ac_option=$1 + ac_optarg=$2 + ac_shift=shift + ;; + *) # This is not an option, so the user has probably given explicit + # arguments. + ac_option=$1 + ac_need_defaults=false;; + esac + + case $ac_option in + # Handling of the options. +_ACEOF +cat >>$CONFIG_STATUS <<\_ACEOF + -recheck | --recheck | --rechec | --reche | --rech | --rec | --re | --r) + ac_cs_recheck=: ;; + --version | --vers* | -V ) + echo "$ac_cs_version"; exit 0 ;; + --he | --h) + # Conflict between --help and --header + { { echo "$as_me:$LINENO: error: ambiguous option: $1 +Try \`$0 --help' for more information." >&5 +echo "$as_me: error: ambiguous option: $1 +Try \`$0 --help' for more information." >&2;} + { (exit 1); exit 1; }; };; + --help | --hel | -h ) + echo "$ac_cs_usage"; exit 0 ;; + --debug | --d* | -d ) + debug=: ;; + --file | --fil | --fi | --f ) + $ac_shift + CONFIG_FILES="$CONFIG_FILES $ac_optarg" + ac_need_defaults=false;; + --header | --heade | --head | --hea ) + $ac_shift + CONFIG_HEADERS="$CONFIG_HEADERS $ac_optarg" + ac_need_defaults=false;; + -q | -quiet | --quiet | --quie | --qui | --qu | --q \ + | -silent | --silent | --silen | --sile | --sil | --si | --s) + ac_cs_silent=: ;; + + # This is an error. + -*) { { echo "$as_me:$LINENO: error: unrecognized option: $1 +Try \`$0 --help' for more information." >&5 +echo "$as_me: error: unrecognized option: $1 +Try \`$0 --help' for more information." >&2;} + { (exit 1); exit 1; }; } ;; + + *) ac_config_targets="$ac_config_targets $1" ;; + + esac + shift +done + +ac_configure_extra_args= + +if $ac_cs_silent; then + exec 6>/dev/null + ac_configure_extra_args="$ac_configure_extra_args --silent" +fi + +_ACEOF +cat >>$CONFIG_STATUS <<_ACEOF +if \$ac_cs_recheck; then + echo "running $SHELL $0 " $ac_configure_args \$ac_configure_extra_args " --no-create --no-recursion" >&6 + exec $SHELL $0 $ac_configure_args \$ac_configure_extra_args --no-create --no-recursion +fi + +_ACEOF + +cat >>$CONFIG_STATUS <<_ACEOF +# +# INIT-COMMANDS section. +# + +AMDEP_TRUE="$AMDEP_TRUE" ac_aux_dir="$ac_aux_dir" + +_ACEOF + + + +cat >>$CONFIG_STATUS <<\_ACEOF +for ac_config_target in $ac_config_targets +do + case "$ac_config_target" in + # Handling of arguments. + "Makefile" ) CONFIG_FILES="$CONFIG_FILES Makefile" ;; + "depfiles" ) CONFIG_COMMANDS="$CONFIG_COMMANDS depfiles" ;; + "impl/config.h" ) CONFIG_HEADERS="$CONFIG_HEADERS impl/config.h" ;; + *) { { echo "$as_me:$LINENO: error: invalid argument: $ac_config_target" >&5 +echo "$as_me: error: invalid argument: $ac_config_target" >&2;} + { (exit 1); exit 1; }; };; + esac +done + +# If the user did not use the arguments to specify the items to instantiate, +# then the envvar interface is used. Set only those that are not. +# We use the long form for the default assignment because of an extremely +# bizarre bug on SunOS 4.1.3. +if $ac_need_defaults; then + test "${CONFIG_FILES+set}" = set || CONFIG_FILES=$config_files + test "${CONFIG_HEADERS+set}" = set || CONFIG_HEADERS=$config_headers + test "${CONFIG_COMMANDS+set}" = set || CONFIG_COMMANDS=$config_commands +fi + +# Have a temporary directory for convenience. Make it in the build tree +# simply because there is no reason to put it here, and in addition, +# creating and moving files from /tmp can sometimes cause problems. +# Create a temporary directory, and hook for its removal unless debugging. +$debug || +{ + trap 'exit_status=$?; rm -rf $tmp && exit $exit_status' 0 + trap '{ (exit 1); exit 1; }' 1 2 13 15 +} + +# Create a (secure) tmp directory for tmp files. + +{ + tmp=`(umask 077 && mktemp -d -q "./confstatXXXXXX") 2>/dev/null` && + test -n "$tmp" && test -d "$tmp" +} || +{ + tmp=./confstat$$-$RANDOM + (umask 077 && mkdir $tmp) +} || +{ + echo "$me: cannot create a temporary directory in ." >&2 + { (exit 1); exit 1; } +} + +_ACEOF + +cat >>$CONFIG_STATUS <<_ACEOF + +# +# CONFIG_FILES section. +# + +# No need to generate the scripts if there are no CONFIG_FILES. +# This happens for instance when ./config.status config.h +if test -n "\$CONFIG_FILES"; then + # Protect against being on the right side of a sed subst in config.status. + sed 's/,@/@@/; s/@,/@@/; s/,;t t\$/@;t t/; /@;t t\$/s/[\\\\&,]/\\\\&/g; + s/@@/,@/; s/@@/@,/; s/@;t t\$/,;t t/' >\$tmp/subs.sed <<\\CEOF +s,@SHELL@,$SHELL,;t t +s,@PATH_SEPARATOR@,$PATH_SEPARATOR,;t t +s,@PACKAGE_NAME@,$PACKAGE_NAME,;t t +s,@PACKAGE_TARNAME@,$PACKAGE_TARNAME,;t t +s,@PACKAGE_VERSION@,$PACKAGE_VERSION,;t t +s,@PACKAGE_STRING@,$PACKAGE_STRING,;t t +s,@PACKAGE_BUGREPORT@,$PACKAGE_BUGREPORT,;t t +s,@exec_prefix@,$exec_prefix,;t t +s,@prefix@,$prefix,;t t +s,@program_transform_name@,$program_transform_name,;t t +s,@bindir@,$bindir,;t t +s,@sbindir@,$sbindir,;t t +s,@libexecdir@,$libexecdir,;t t +s,@datadir@,$datadir,;t t +s,@sysconfdir@,$sysconfdir,;t t +s,@sharedstatedir@,$sharedstatedir,;t t +s,@localstatedir@,$localstatedir,;t t +s,@libdir@,$libdir,;t t +s,@includedir@,$includedir,;t t +s,@oldincludedir@,$oldincludedir,;t t +s,@infodir@,$infodir,;t t +s,@mandir@,$mandir,;t t +s,@build_alias@,$build_alias,;t t +s,@host_alias@,$host_alias,;t t +s,@target_alias@,$target_alias,;t t +s,@DEFS@,$DEFS,;t t +s,@ECHO_C@,$ECHO_C,;t t +s,@ECHO_N@,$ECHO_N,;t t +s,@ECHO_T@,$ECHO_T,;t t +s,@LIBS@,$LIBS,;t t +s,@INSTALL_PROGRAM@,$INSTALL_PROGRAM,;t t +s,@INSTALL_SCRIPT@,$INSTALL_SCRIPT,;t t +s,@INSTALL_DATA@,$INSTALL_DATA,;t t +s,@CYGPATH_W@,$CYGPATH_W,;t t +s,@PACKAGE@,$PACKAGE,;t t +s,@VERSION@,$VERSION,;t t +s,@ACLOCAL@,$ACLOCAL,;t t +s,@AUTOCONF@,$AUTOCONF,;t t +s,@AUTOMAKE@,$AUTOMAKE,;t t +s,@AUTOHEADER@,$AUTOHEADER,;t t +s,@MAKEINFO@,$MAKEINFO,;t t +s,@install_sh@,$install_sh,;t t +s,@STRIP@,$STRIP,;t t +s,@ac_ct_STRIP@,$ac_ct_STRIP,;t t +s,@INSTALL_STRIP_PROGRAM@,$INSTALL_STRIP_PROGRAM,;t t +s,@mkdir_p@,$mkdir_p,;t t +s,@AWK@,$AWK,;t t +s,@SET_MAKE@,$SET_MAKE,;t t +s,@am__leading_dot@,$am__leading_dot,;t t +s,@AMTAR@,$AMTAR,;t t +s,@am__tar@,$am__tar,;t t +s,@am__untar@,$am__untar,;t t +s,@CC@,$CC,;t t +s,@CFLAGS@,$CFLAGS,;t t +s,@LDFLAGS@,$LDFLAGS,;t t +s,@CPPFLAGS@,$CPPFLAGS,;t t +s,@ac_ct_CC@,$ac_ct_CC,;t t +s,@EXEEXT@,$EXEEXT,;t t +s,@OBJEXT@,$OBJEXT,;t t +s,@DEPDIR@,$DEPDIR,;t t +s,@am__include@,$am__include,;t t +s,@am__quote@,$am__quote,;t t +s,@AMDEP_TRUE@,$AMDEP_TRUE,;t t +s,@AMDEP_FALSE@,$AMDEP_FALSE,;t t +s,@AMDEPBACKSLASH@,$AMDEPBACKSLASH,;t t +s,@CCDEPMODE@,$CCDEPMODE,;t t +s,@am__fastdepCC_TRUE@,$am__fastdepCC_TRUE,;t t +s,@am__fastdepCC_FALSE@,$am__fastdepCC_FALSE,;t t +s,@HADOOP_UTILS_PREFIX@,$HADOOP_UTILS_PREFIX,;t t +s,@CPP@,$CPP,;t t +s,@EGREP@,$EGREP,;t t +s,@HADOOP_PIPES_PREFIX@,$HADOOP_PIPES_PREFIX,;t t +s,@CXX@,$CXX,;t t +s,@CXXFLAGS@,$CXXFLAGS,;t t +s,@ac_ct_CXX@,$ac_ct_CXX,;t t +s,@CXXDEPMODE@,$CXXDEPMODE,;t t +s,@am__fastdepCXX_TRUE@,$am__fastdepCXX_TRUE,;t t +s,@am__fastdepCXX_FALSE@,$am__fastdepCXX_FALSE,;t t +s,@build@,$build,;t t +s,@build_cpu@,$build_cpu,;t t +s,@build_vendor@,$build_vendor,;t t +s,@build_os@,$build_os,;t t +s,@host@,$host,;t t +s,@host_cpu@,$host_cpu,;t t +s,@host_vendor@,$host_vendor,;t t +s,@host_os@,$host_os,;t t +s,@LN_S@,$LN_S,;t t +s,@ECHO@,$ECHO,;t t +s,@AR@,$AR,;t t +s,@ac_ct_AR@,$ac_ct_AR,;t t +s,@RANLIB@,$RANLIB,;t t +s,@ac_ct_RANLIB@,$ac_ct_RANLIB,;t t +s,@CXXCPP@,$CXXCPP,;t t +s,@F77@,$F77,;t t +s,@FFLAGS@,$FFLAGS,;t t +s,@ac_ct_F77@,$ac_ct_F77,;t t +s,@LIBTOOL@,$LIBTOOL,;t t +s,@LIBOBJS@,$LIBOBJS,;t t +s,@LTLIBOBJS@,$LTLIBOBJS,;t t +CEOF + +_ACEOF + + cat >>$CONFIG_STATUS <<\_ACEOF + # Split the substitutions into bite-sized pieces for seds with + # small command number limits, like on Digital OSF/1 and HP-UX. + ac_max_sed_lines=48 + ac_sed_frag=1 # Number of current file. + ac_beg=1 # First line for current file. + ac_end=$ac_max_sed_lines # Line after last line for current file. + ac_more_lines=: + ac_sed_cmds= + while $ac_more_lines; do + if test $ac_beg -gt 1; then + sed "1,${ac_beg}d; ${ac_end}q" $tmp/subs.sed >$tmp/subs.frag + else + sed "${ac_end}q" $tmp/subs.sed >$tmp/subs.frag + fi + if test ! -s $tmp/subs.frag; then + ac_more_lines=false + else + # The purpose of the label and of the branching condition is to + # speed up the sed processing (if there are no `@' at all, there + # is no need to browse any of the substitutions). + # These are the two extra sed commands mentioned above. + (echo ':t + /@[a-zA-Z_][a-zA-Z_0-9]*@/!b' && cat $tmp/subs.frag) >$tmp/subs-$ac_sed_frag.sed + if test -z "$ac_sed_cmds"; then + ac_sed_cmds="sed -f $tmp/subs-$ac_sed_frag.sed" + else + ac_sed_cmds="$ac_sed_cmds | sed -f $tmp/subs-$ac_sed_frag.sed" + fi + ac_sed_frag=`expr $ac_sed_frag + 1` + ac_beg=$ac_end + ac_end=`expr $ac_end + $ac_max_sed_lines` + fi + done + if test -z "$ac_sed_cmds"; then + ac_sed_cmds=cat + fi +fi # test -n "$CONFIG_FILES" + +_ACEOF +cat >>$CONFIG_STATUS <<\_ACEOF +for ac_file in : $CONFIG_FILES; do test "x$ac_file" = x: && continue + # Support "outfile[:infile[:infile...]]", defaulting infile="outfile.in". + case $ac_file in + - | *:- | *:-:* ) # input from stdin + cat >$tmp/stdin + ac_file_in=`echo "$ac_file" | sed 's,[^:]*:,,'` + ac_file=`echo "$ac_file" | sed 's,:.*,,'` ;; + *:* ) ac_file_in=`echo "$ac_file" | sed 's,[^:]*:,,'` + ac_file=`echo "$ac_file" | sed 's,:.*,,'` ;; + * ) ac_file_in=$ac_file.in ;; + esac + + # Compute @srcdir@, @top_srcdir@, and @INSTALL@ for subdirectories. + ac_dir=`(dirname "$ac_file") 2>/dev/null || +$as_expr X"$ac_file" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ + X"$ac_file" : 'X\(//\)[^/]' \| \ + X"$ac_file" : 'X\(//\)$' \| \ + X"$ac_file" : 'X\(/\)' \| \ + . : '\(.\)' 2>/dev/null || +echo X"$ac_file" | + sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ s//\1/; q; } + /^X\(\/\/\)[^/].*/{ s//\1/; q; } + /^X\(\/\/\)$/{ s//\1/; q; } + /^X\(\/\).*/{ s//\1/; q; } + s/.*/./; q'` + { if $as_mkdir_p; then + mkdir -p "$ac_dir" + else + as_dir="$ac_dir" + as_dirs= + while test ! -d "$as_dir"; do + as_dirs="$as_dir $as_dirs" + as_dir=`(dirname "$as_dir") 2>/dev/null || +$as_expr X"$as_dir" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ + X"$as_dir" : 'X\(//\)[^/]' \| \ + X"$as_dir" : 'X\(//\)$' \| \ + X"$as_dir" : 'X\(/\)' \| \ + . : '\(.\)' 2>/dev/null || +echo X"$as_dir" | + sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ s//\1/; q; } + /^X\(\/\/\)[^/].*/{ s//\1/; q; } + /^X\(\/\/\)$/{ s//\1/; q; } + /^X\(\/\).*/{ s//\1/; q; } + s/.*/./; q'` + done + test ! -n "$as_dirs" || mkdir $as_dirs + fi || { { echo "$as_me:$LINENO: error: cannot create directory \"$ac_dir\"" >&5 +echo "$as_me: error: cannot create directory \"$ac_dir\"" >&2;} + { (exit 1); exit 1; }; }; } + + ac_builddir=. + +if test "$ac_dir" != .; then + ac_dir_suffix=/`echo "$ac_dir" | sed 's,^\.[\\/],,'` + # A "../" for each directory in $ac_dir_suffix. + ac_top_builddir=`echo "$ac_dir_suffix" | sed 's,/[^\\/]*,../,g'` +else + ac_dir_suffix= ac_top_builddir= +fi + +case $srcdir in + .) # No --srcdir option. We are building in place. + ac_srcdir=. + if test -z "$ac_top_builddir"; then + ac_top_srcdir=. + else + ac_top_srcdir=`echo $ac_top_builddir | sed 's,/$,,'` + fi ;; + [\\/]* | ?:[\\/]* ) # Absolute path. + ac_srcdir=$srcdir$ac_dir_suffix; + ac_top_srcdir=$srcdir ;; + *) # Relative path. + ac_srcdir=$ac_top_builddir$srcdir$ac_dir_suffix + ac_top_srcdir=$ac_top_builddir$srcdir ;; +esac + +# Do not use `cd foo && pwd` to compute absolute paths, because +# the directories may not exist. +case `pwd` in +.) ac_abs_builddir="$ac_dir";; +*) + case "$ac_dir" in + .) ac_abs_builddir=`pwd`;; + [\\/]* | ?:[\\/]* ) ac_abs_builddir="$ac_dir";; + *) ac_abs_builddir=`pwd`/"$ac_dir";; + esac;; +esac +case $ac_abs_builddir in +.) ac_abs_top_builddir=${ac_top_builddir}.;; +*) + case ${ac_top_builddir}. in + .) ac_abs_top_builddir=$ac_abs_builddir;; + [\\/]* | ?:[\\/]* ) ac_abs_top_builddir=${ac_top_builddir}.;; + *) ac_abs_top_builddir=$ac_abs_builddir/${ac_top_builddir}.;; + esac;; +esac +case $ac_abs_builddir in +.) ac_abs_srcdir=$ac_srcdir;; +*) + case $ac_srcdir in + .) ac_abs_srcdir=$ac_abs_builddir;; + [\\/]* | ?:[\\/]* ) ac_abs_srcdir=$ac_srcdir;; + *) ac_abs_srcdir=$ac_abs_builddir/$ac_srcdir;; + esac;; +esac +case $ac_abs_builddir in +.) ac_abs_top_srcdir=$ac_top_srcdir;; +*) + case $ac_top_srcdir in + .) ac_abs_top_srcdir=$ac_abs_builddir;; + [\\/]* | ?:[\\/]* ) ac_abs_top_srcdir=$ac_top_srcdir;; + *) ac_abs_top_srcdir=$ac_abs_builddir/$ac_top_srcdir;; + esac;; +esac + + + case $INSTALL in + [\\/$]* | ?:[\\/]* ) ac_INSTALL=$INSTALL ;; + *) ac_INSTALL=$ac_top_builddir$INSTALL ;; + esac + + if test x"$ac_file" != x-; then + { echo "$as_me:$LINENO: creating $ac_file" >&5 +echo "$as_me: creating $ac_file" >&6;} + rm -f "$ac_file" + fi + # Let's still pretend it is `configure' which instantiates (i.e., don't + # use $as_me), people would be surprised to read: + # /* config.h. Generated by config.status. */ + if test x"$ac_file" = x-; then + configure_input= + else + configure_input="$ac_file. " + fi + configure_input=$configure_input"Generated from `echo $ac_file_in | + sed 's,.*/,,'` by configure." + + # First look for the input files in the build tree, otherwise in the + # src tree. + ac_file_inputs=`IFS=: + for f in $ac_file_in; do + case $f in + -) echo $tmp/stdin ;; + [\\/$]*) + # Absolute (can't be DOS-style, as IFS=:) + test -f "$f" || { { echo "$as_me:$LINENO: error: cannot find input file: $f" >&5 +echo "$as_me: error: cannot find input file: $f" >&2;} + { (exit 1); exit 1; }; } + echo "$f";; + *) # Relative + if test -f "$f"; then + # Build tree + echo "$f" + elif test -f "$srcdir/$f"; then + # Source tree + echo "$srcdir/$f" + else + # /dev/null tree + { { echo "$as_me:$LINENO: error: cannot find input file: $f" >&5 +echo "$as_me: error: cannot find input file: $f" >&2;} + { (exit 1); exit 1; }; } + fi;; + esac + done` || { (exit 1); exit 1; } +_ACEOF +cat >>$CONFIG_STATUS <<_ACEOF + sed "$ac_vpsub +$extrasub +_ACEOF +cat >>$CONFIG_STATUS <<\_ACEOF +:t +/@[a-zA-Z_][a-zA-Z_0-9]*@/!b +s,@configure_input@,$configure_input,;t t +s,@srcdir@,$ac_srcdir,;t t +s,@abs_srcdir@,$ac_abs_srcdir,;t t +s,@top_srcdir@,$ac_top_srcdir,;t t +s,@abs_top_srcdir@,$ac_abs_top_srcdir,;t t +s,@builddir@,$ac_builddir,;t t +s,@abs_builddir@,$ac_abs_builddir,;t t +s,@top_builddir@,$ac_top_builddir,;t t +s,@abs_top_builddir@,$ac_abs_top_builddir,;t t +s,@INSTALL@,$ac_INSTALL,;t t +" $ac_file_inputs | (eval "$ac_sed_cmds") >$tmp/out + rm -f $tmp/stdin + if test x"$ac_file" != x-; then + mv $tmp/out $ac_file + else + cat $tmp/out + rm -f $tmp/out + fi + +done +_ACEOF +cat >>$CONFIG_STATUS <<\_ACEOF + +# +# CONFIG_HEADER section. +# + +# These sed commands are passed to sed as "A NAME B NAME C VALUE D", where +# NAME is the cpp macro being defined and VALUE is the value it is being given. +# +# ac_d sets the value in "#define NAME VALUE" lines. +ac_dA='s,^\([ ]*\)#\([ ]*define[ ][ ]*\)' +ac_dB='[ ].*$,\1#\2' +ac_dC=' ' +ac_dD=',;t' +# ac_u turns "#undef NAME" without trailing blanks into "#define NAME VALUE". +ac_uA='s,^\([ ]*\)#\([ ]*\)undef\([ ][ ]*\)' +ac_uB='$,\1#\2define\3' +ac_uC=' ' +ac_uD=',;t' + +for ac_file in : $CONFIG_HEADERS; do test "x$ac_file" = x: && continue + # Support "outfile[:infile[:infile...]]", defaulting infile="outfile.in". + case $ac_file in + - | *:- | *:-:* ) # input from stdin + cat >$tmp/stdin + ac_file_in=`echo "$ac_file" | sed 's,[^:]*:,,'` + ac_file=`echo "$ac_file" | sed 's,:.*,,'` ;; + *:* ) ac_file_in=`echo "$ac_file" | sed 's,[^:]*:,,'` + ac_file=`echo "$ac_file" | sed 's,:.*,,'` ;; + * ) ac_file_in=$ac_file.in ;; + esac + + test x"$ac_file" != x- && { echo "$as_me:$LINENO: creating $ac_file" >&5 +echo "$as_me: creating $ac_file" >&6;} + + # First look for the input files in the build tree, otherwise in the + # src tree. + ac_file_inputs=`IFS=: + for f in $ac_file_in; do + case $f in + -) echo $tmp/stdin ;; + [\\/$]*) + # Absolute (can't be DOS-style, as IFS=:) + test -f "$f" || { { echo "$as_me:$LINENO: error: cannot find input file: $f" >&5 +echo "$as_me: error: cannot find input file: $f" >&2;} + { (exit 1); exit 1; }; } + # Do quote $f, to prevent DOS paths from being IFS'd. + echo "$f";; + *) # Relative + if test -f "$f"; then + # Build tree + echo "$f" + elif test -f "$srcdir/$f"; then + # Source tree + echo "$srcdir/$f" + else + # /dev/null tree + { { echo "$as_me:$LINENO: error: cannot find input file: $f" >&5 +echo "$as_me: error: cannot find input file: $f" >&2;} + { (exit 1); exit 1; }; } + fi;; + esac + done` || { (exit 1); exit 1; } + # Remove the trailing spaces. + sed 's/[ ]*$//' $ac_file_inputs >$tmp/in + +_ACEOF + +# Transform confdefs.h into two sed scripts, `conftest.defines' and +# `conftest.undefs', that substitutes the proper values into +# config.h.in to produce config.h. The first handles `#define' +# templates, and the second `#undef' templates. +# And first: Protect against being on the right side of a sed subst in +# config.status. Protect against being in an unquoted here document +# in config.status. +rm -f conftest.defines conftest.undefs +# Using a here document instead of a string reduces the quoting nightmare. +# Putting comments in sed scripts is not portable. +# +# `end' is used to avoid that the second main sed command (meant for +# 0-ary CPP macros) applies to n-ary macro definitions. +# See the Autoconf documentation for `clear'. +cat >confdef2sed.sed <<\_ACEOF +s/[\\&,]/\\&/g +s,[\\$`],\\&,g +t clear +: clear +s,^[ ]*#[ ]*define[ ][ ]*\([^ (][^ (]*\)\(([^)]*)\)[ ]*\(.*\)$,${ac_dA}\1${ac_dB}\1\2${ac_dC}\3${ac_dD},gp +t end +s,^[ ]*#[ ]*define[ ][ ]*\([^ ][^ ]*\)[ ]*\(.*\)$,${ac_dA}\1${ac_dB}\1${ac_dC}\2${ac_dD},gp +: end +_ACEOF +# If some macros were called several times there might be several times +# the same #defines, which is useless. Nevertheless, we may not want to +# sort them, since we want the *last* AC-DEFINE to be honored. +uniq confdefs.h | sed -n -f confdef2sed.sed >conftest.defines +sed 's/ac_d/ac_u/g' conftest.defines >conftest.undefs +rm -f confdef2sed.sed + +# This sed command replaces #undef with comments. This is necessary, for +# example, in the case of _POSIX_SOURCE, which is predefined and required +# on some systems where configure will not decide to define it. +cat >>conftest.undefs <<\_ACEOF +s,^[ ]*#[ ]*undef[ ][ ]*[a-zA-Z_][a-zA-Z_0-9]*,/* & */, +_ACEOF + +# Break up conftest.defines because some shells have a limit on the size +# of here documents, and old seds have small limits too (100 cmds). +echo ' # Handle all the #define templates only if necessary.' >>$CONFIG_STATUS +echo ' if grep "^[ ]*#[ ]*define" $tmp/in >/dev/null; then' >>$CONFIG_STATUS +echo ' # If there are no defines, we may have an empty if/fi' >>$CONFIG_STATUS +echo ' :' >>$CONFIG_STATUS +rm -f conftest.tail +while grep . conftest.defines >/dev/null +do + # Write a limited-size here document to $tmp/defines.sed. + echo ' cat >$tmp/defines.sed <>$CONFIG_STATUS + # Speed up: don't consider the non `#define' lines. + echo '/^[ ]*#[ ]*define/!b' >>$CONFIG_STATUS + # Work around the forget-to-reset-the-flag bug. + echo 't clr' >>$CONFIG_STATUS + echo ': clr' >>$CONFIG_STATUS + sed ${ac_max_here_lines}q conftest.defines >>$CONFIG_STATUS + echo 'CEOF + sed -f $tmp/defines.sed $tmp/in >$tmp/out + rm -f $tmp/in + mv $tmp/out $tmp/in +' >>$CONFIG_STATUS + sed 1,${ac_max_here_lines}d conftest.defines >conftest.tail + rm -f conftest.defines + mv conftest.tail conftest.defines +done +rm -f conftest.defines +echo ' fi # grep' >>$CONFIG_STATUS +echo >>$CONFIG_STATUS + +# Break up conftest.undefs because some shells have a limit on the size +# of here documents, and old seds have small limits too (100 cmds). +echo ' # Handle all the #undef templates' >>$CONFIG_STATUS +rm -f conftest.tail +while grep . conftest.undefs >/dev/null +do + # Write a limited-size here document to $tmp/undefs.sed. + echo ' cat >$tmp/undefs.sed <>$CONFIG_STATUS + # Speed up: don't consider the non `#undef' + echo '/^[ ]*#[ ]*undef/!b' >>$CONFIG_STATUS + # Work around the forget-to-reset-the-flag bug. + echo 't clr' >>$CONFIG_STATUS + echo ': clr' >>$CONFIG_STATUS + sed ${ac_max_here_lines}q conftest.undefs >>$CONFIG_STATUS + echo 'CEOF + sed -f $tmp/undefs.sed $tmp/in >$tmp/out + rm -f $tmp/in + mv $tmp/out $tmp/in +' >>$CONFIG_STATUS + sed 1,${ac_max_here_lines}d conftest.undefs >conftest.tail + rm -f conftest.undefs + mv conftest.tail conftest.undefs +done +rm -f conftest.undefs + +cat >>$CONFIG_STATUS <<\_ACEOF + # Let's still pretend it is `configure' which instantiates (i.e., don't + # use $as_me), people would be surprised to read: + # /* config.h. Generated by config.status. */ + if test x"$ac_file" = x-; then + echo "/* Generated by configure. */" >$tmp/config.h + else + echo "/* $ac_file. Generated by configure. */" >$tmp/config.h + fi + cat $tmp/in >>$tmp/config.h + rm -f $tmp/in + if test x"$ac_file" != x-; then + if diff $ac_file $tmp/config.h >/dev/null 2>&1; then + { echo "$as_me:$LINENO: $ac_file is unchanged" >&5 +echo "$as_me: $ac_file is unchanged" >&6;} + else + ac_dir=`(dirname "$ac_file") 2>/dev/null || +$as_expr X"$ac_file" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ + X"$ac_file" : 'X\(//\)[^/]' \| \ + X"$ac_file" : 'X\(//\)$' \| \ + X"$ac_file" : 'X\(/\)' \| \ + . : '\(.\)' 2>/dev/null || +echo X"$ac_file" | + sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ s//\1/; q; } + /^X\(\/\/\)[^/].*/{ s//\1/; q; } + /^X\(\/\/\)$/{ s//\1/; q; } + /^X\(\/\).*/{ s//\1/; q; } + s/.*/./; q'` + { if $as_mkdir_p; then + mkdir -p "$ac_dir" + else + as_dir="$ac_dir" + as_dirs= + while test ! -d "$as_dir"; do + as_dirs="$as_dir $as_dirs" + as_dir=`(dirname "$as_dir") 2>/dev/null || +$as_expr X"$as_dir" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ + X"$as_dir" : 'X\(//\)[^/]' \| \ + X"$as_dir" : 'X\(//\)$' \| \ + X"$as_dir" : 'X\(/\)' \| \ + . : '\(.\)' 2>/dev/null || +echo X"$as_dir" | + sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ s//\1/; q; } + /^X\(\/\/\)[^/].*/{ s//\1/; q; } + /^X\(\/\/\)$/{ s//\1/; q; } + /^X\(\/\).*/{ s//\1/; q; } + s/.*/./; q'` + done + test ! -n "$as_dirs" || mkdir $as_dirs + fi || { { echo "$as_me:$LINENO: error: cannot create directory \"$ac_dir\"" >&5 +echo "$as_me: error: cannot create directory \"$ac_dir\"" >&2;} + { (exit 1); exit 1; }; }; } + + rm -f $ac_file + mv $tmp/config.h $ac_file + fi + else + cat $tmp/config.h + rm -f $tmp/config.h + fi +# Compute $ac_file's index in $config_headers. +_am_stamp_count=1 +for _am_header in $config_headers :; do + case $_am_header in + $ac_file | $ac_file:* ) + break ;; + * ) + _am_stamp_count=`expr $_am_stamp_count + 1` ;; + esac +done +echo "timestamp for $ac_file" >`(dirname $ac_file) 2>/dev/null || +$as_expr X$ac_file : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ + X$ac_file : 'X\(//\)[^/]' \| \ + X$ac_file : 'X\(//\)$' \| \ + X$ac_file : 'X\(/\)' \| \ + . : '\(.\)' 2>/dev/null || +echo X$ac_file | + sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ s//\1/; q; } + /^X\(\/\/\)[^/].*/{ s//\1/; q; } + /^X\(\/\/\)$/{ s//\1/; q; } + /^X\(\/\).*/{ s//\1/; q; } + s/.*/./; q'`/stamp-h$_am_stamp_count +done +_ACEOF +cat >>$CONFIG_STATUS <<\_ACEOF + +# +# CONFIG_COMMANDS section. +# +for ac_file in : $CONFIG_COMMANDS; do test "x$ac_file" = x: && continue + ac_dest=`echo "$ac_file" | sed 's,:.*,,'` + ac_source=`echo "$ac_file" | sed 's,[^:]*:,,'` + ac_dir=`(dirname "$ac_dest") 2>/dev/null || +$as_expr X"$ac_dest" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ + X"$ac_dest" : 'X\(//\)[^/]' \| \ + X"$ac_dest" : 'X\(//\)$' \| \ + X"$ac_dest" : 'X\(/\)' \| \ + . : '\(.\)' 2>/dev/null || +echo X"$ac_dest" | + sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ s//\1/; q; } + /^X\(\/\/\)[^/].*/{ s//\1/; q; } + /^X\(\/\/\)$/{ s//\1/; q; } + /^X\(\/\).*/{ s//\1/; q; } + s/.*/./; q'` + { if $as_mkdir_p; then + mkdir -p "$ac_dir" + else + as_dir="$ac_dir" + as_dirs= + while test ! -d "$as_dir"; do + as_dirs="$as_dir $as_dirs" + as_dir=`(dirname "$as_dir") 2>/dev/null || +$as_expr X"$as_dir" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ + X"$as_dir" : 'X\(//\)[^/]' \| \ + X"$as_dir" : 'X\(//\)$' \| \ + X"$as_dir" : 'X\(/\)' \| \ + . : '\(.\)' 2>/dev/null || +echo X"$as_dir" | + sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ s//\1/; q; } + /^X\(\/\/\)[^/].*/{ s//\1/; q; } + /^X\(\/\/\)$/{ s//\1/; q; } + /^X\(\/\).*/{ s//\1/; q; } + s/.*/./; q'` + done + test ! -n "$as_dirs" || mkdir $as_dirs + fi || { { echo "$as_me:$LINENO: error: cannot create directory \"$ac_dir\"" >&5 +echo "$as_me: error: cannot create directory \"$ac_dir\"" >&2;} + { (exit 1); exit 1; }; }; } + + ac_builddir=. + +if test "$ac_dir" != .; then + ac_dir_suffix=/`echo "$ac_dir" | sed 's,^\.[\\/],,'` + # A "../" for each directory in $ac_dir_suffix. + ac_top_builddir=`echo "$ac_dir_suffix" | sed 's,/[^\\/]*,../,g'` +else + ac_dir_suffix= ac_top_builddir= +fi + +case $srcdir in + .) # No --srcdir option. We are building in place. + ac_srcdir=. + if test -z "$ac_top_builddir"; then + ac_top_srcdir=. + else + ac_top_srcdir=`echo $ac_top_builddir | sed 's,/$,,'` + fi ;; + [\\/]* | ?:[\\/]* ) # Absolute path. + ac_srcdir=$srcdir$ac_dir_suffix; + ac_top_srcdir=$srcdir ;; + *) # Relative path. + ac_srcdir=$ac_top_builddir$srcdir$ac_dir_suffix + ac_top_srcdir=$ac_top_builddir$srcdir ;; +esac + +# Do not use `cd foo && pwd` to compute absolute paths, because +# the directories may not exist. +case `pwd` in +.) ac_abs_builddir="$ac_dir";; +*) + case "$ac_dir" in + .) ac_abs_builddir=`pwd`;; + [\\/]* | ?:[\\/]* ) ac_abs_builddir="$ac_dir";; + *) ac_abs_builddir=`pwd`/"$ac_dir";; + esac;; +esac +case $ac_abs_builddir in +.) ac_abs_top_builddir=${ac_top_builddir}.;; +*) + case ${ac_top_builddir}. in + .) ac_abs_top_builddir=$ac_abs_builddir;; + [\\/]* | ?:[\\/]* ) ac_abs_top_builddir=${ac_top_builddir}.;; + *) ac_abs_top_builddir=$ac_abs_builddir/${ac_top_builddir}.;; + esac;; +esac +case $ac_abs_builddir in +.) ac_abs_srcdir=$ac_srcdir;; +*) + case $ac_srcdir in + .) ac_abs_srcdir=$ac_abs_builddir;; + [\\/]* | ?:[\\/]* ) ac_abs_srcdir=$ac_srcdir;; + *) ac_abs_srcdir=$ac_abs_builddir/$ac_srcdir;; + esac;; +esac +case $ac_abs_builddir in +.) ac_abs_top_srcdir=$ac_top_srcdir;; +*) + case $ac_top_srcdir in + .) ac_abs_top_srcdir=$ac_abs_builddir;; + [\\/]* | ?:[\\/]* ) ac_abs_top_srcdir=$ac_top_srcdir;; + *) ac_abs_top_srcdir=$ac_abs_builddir/$ac_top_srcdir;; + esac;; +esac + + + { echo "$as_me:$LINENO: executing $ac_dest commands" >&5 +echo "$as_me: executing $ac_dest commands" >&6;} + case $ac_dest in + depfiles ) test x"$AMDEP_TRUE" != x"" || for mf in $CONFIG_FILES; do + # Strip MF so we end up with the name of the file. + mf=`echo "$mf" | sed -e 's/:.*$//'` + # Check whether this is an Automake generated Makefile or not. + # We used to match only the files named `Makefile.in', but + # some people rename them; so instead we look at the file content. + # Grep'ing the first line is not enough: some people post-process + # each Makefile.in and add a new line on top of each file to say so. + # So let's grep whole file. + if grep '^#.*generated by automake' $mf > /dev/null 2>&1; then + dirpart=`(dirname "$mf") 2>/dev/null || +$as_expr X"$mf" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ + X"$mf" : 'X\(//\)[^/]' \| \ + X"$mf" : 'X\(//\)$' \| \ + X"$mf" : 'X\(/\)' \| \ + . : '\(.\)' 2>/dev/null || +echo X"$mf" | + sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ s//\1/; q; } + /^X\(\/\/\)[^/].*/{ s//\1/; q; } + /^X\(\/\/\)$/{ s//\1/; q; } + /^X\(\/\).*/{ s//\1/; q; } + s/.*/./; q'` + else + continue + fi + # Extract the definition of DEPDIR, am__include, and am__quote + # from the Makefile without running `make'. + DEPDIR=`sed -n 's/^DEPDIR = //p' < "$mf"` + test -z "$DEPDIR" && continue + am__include=`sed -n 's/^am__include = //p' < "$mf"` + test -z "am__include" && continue + am__quote=`sed -n 's/^am__quote = //p' < "$mf"` + # When using ansi2knr, U may be empty or an underscore; expand it + U=`sed -n 's/^U = //p' < "$mf"` + # Find all dependency output files, they are included files with + # $(DEPDIR) in their names. We invoke sed twice because it is the + # simplest approach to changing $(DEPDIR) to its actual value in the + # expansion. + for file in `sed -n " + s/^$am__include $am__quote\(.*(DEPDIR).*\)$am__quote"'$/\1/p' <"$mf" | \ + sed -e 's/\$(DEPDIR)/'"$DEPDIR"'/g' -e 's/\$U/'"$U"'/g'`; do + # Make sure the directory exists. + test -f "$dirpart/$file" && continue + fdir=`(dirname "$file") 2>/dev/null || +$as_expr X"$file" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ + X"$file" : 'X\(//\)[^/]' \| \ + X"$file" : 'X\(//\)$' \| \ + X"$file" : 'X\(/\)' \| \ + . : '\(.\)' 2>/dev/null || +echo X"$file" | + sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ s//\1/; q; } + /^X\(\/\/\)[^/].*/{ s//\1/; q; } + /^X\(\/\/\)$/{ s//\1/; q; } + /^X\(\/\).*/{ s//\1/; q; } + s/.*/./; q'` + { if $as_mkdir_p; then + mkdir -p $dirpart/$fdir + else + as_dir=$dirpart/$fdir + as_dirs= + while test ! -d "$as_dir"; do + as_dirs="$as_dir $as_dirs" + as_dir=`(dirname "$as_dir") 2>/dev/null || +$as_expr X"$as_dir" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ + X"$as_dir" : 'X\(//\)[^/]' \| \ + X"$as_dir" : 'X\(//\)$' \| \ + X"$as_dir" : 'X\(/\)' \| \ + . : '\(.\)' 2>/dev/null || +echo X"$as_dir" | + sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ s//\1/; q; } + /^X\(\/\/\)[^/].*/{ s//\1/; q; } + /^X\(\/\/\)$/{ s//\1/; q; } + /^X\(\/\).*/{ s//\1/; q; } + s/.*/./; q'` + done + test ! -n "$as_dirs" || mkdir $as_dirs + fi || { { echo "$as_me:$LINENO: error: cannot create directory $dirpart/$fdir" >&5 +echo "$as_me: error: cannot create directory $dirpart/$fdir" >&2;} + { (exit 1); exit 1; }; }; } + + # echo "creating $dirpart/$file" + echo '# dummy' > "$dirpart/$file" + done +done + ;; + esac +done +_ACEOF + +cat >>$CONFIG_STATUS <<\_ACEOF + +{ (exit 0); exit 0; } +_ACEOF +chmod +x $CONFIG_STATUS +ac_clean_files=$ac_clean_files_save + + +# configure is writing to config.log, and then calls config.status. +# config.status does its own redirection, appending to config.log. +# Unfortunately, on DOS this fails, as config.log is still kept open +# by configure, so config.status won't be able to write to it; its +# output is simply discarded. So we exec the FD to /dev/null, +# effectively closing config.log, so it can be properly (re)opened and +# appended to by config.status. When coming back to configure, we +# need to make the FD available again. +if test "$no_create" != yes; then + ac_cs_success=: + ac_config_status_args= + test "$silent" = yes && + ac_config_status_args="$ac_config_status_args --quiet" + exec 5>/dev/null + $SHELL $CONFIG_STATUS $ac_config_status_args || ac_cs_success=false + exec 5>>config.log + # Use ||, not &&, to avoid exiting from the if with $? = 1, which + # would make configure fail if this is the last instruction. + $ac_cs_success || { (exit 1); exit 1; } +fi + diff --git a/src/examples/pipes/configure.ac b/src/examples/pipes/configure.ac new file mode 100644 index 0000000..89b4791 --- /dev/null +++ b/src/examples/pipes/configure.ac @@ -0,0 +1,55 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# -*- Autoconf -*- +# Process this file with autoconf to produce a configure script. + +AC_PREREQ(2.59) +AC_INIT(hadoop-pipes-examples, 0.13.0, omalley@apache.org) + +AM_INIT_AUTOMAKE([subdir-objects foreign no-dist]) + +AC_CONFIG_SRCDIR([impl/wordcount-simple.cc]) +AC_CONFIG_HEADER([impl/config.h]) +AC_CONFIG_FILES([Makefile]) + +AC_PREFIX_DEFAULT(`pwd`/../install) + +USE_HADOOP_PIPES + +# Checks for programs. +AC_PROG_CXX +AC_PROG_INSTALL +AC_PROG_LIBTOOL + +# Checks for libraries. + +# Checks for header files. +AC_LANG(C++) +AC_CHECK_HEADERS([unistd.h]) + +# Checks for typedefs, structures, and compiler characteristics. +AC_HEADER_STDBOOL +AC_C_CONST +AC_TYPE_OFF_T +AC_TYPE_SIZE_T +AC_FUNC_STRERROR_R + +# Checks for library functions. +AC_CHECK_FUNCS([mkdir uname]) +AC_CHECK_LIB([socket],[shutdown]) +AC_CHECK_LIB([nsl],[xdr_float]) +AC_OUTPUT diff --git a/src/examples/pipes/depcomp b/src/examples/pipes/depcomp new file mode 100644 index 0000000..11e2d3b --- /dev/null +++ b/src/examples/pipes/depcomp @@ -0,0 +1,522 @@ +#! /bin/sh +# depcomp - compile a program generating dependencies as side-effects + +scriptversion=2004-05-31.23 + +# Copyright (C) 1999, 2000, 2003, 2004 Free Software Foundation, Inc. + +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2, or (at your option) +# any later version. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. + +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA +# 02111-1307, USA. + +# As a special exception to the GNU General Public License, if you +# distribute this file as part of a program that contains a +# configuration script generated by Autoconf, you may include it under +# the same distribution terms that you use for the rest of that program. + +# Originally written by Alexandre Oliva . + +case $1 in + '') + echo "$0: No command. Try \`$0 --help' for more information." 1>&2 + exit 1; + ;; + -h | --h*) + cat <<\EOF +Usage: depcomp [--help] [--version] PROGRAM [ARGS] + +Run PROGRAMS ARGS to compile a file, generating dependencies +as side-effects. + +Environment variables: + depmode Dependency tracking mode. + source Source file read by `PROGRAMS ARGS'. + object Object file output by `PROGRAMS ARGS'. + DEPDIR directory where to store dependencies. + depfile Dependency file to output. + tmpdepfile Temporary file to use when outputing dependencies. + libtool Whether libtool is used (yes/no). + +Report bugs to . +EOF + exit 0 + ;; + -v | --v*) + echo "depcomp $scriptversion" + exit 0 + ;; +esac + +if test -z "$depmode" || test -z "$source" || test -z "$object"; then + echo "depcomp: Variables source, object and depmode must be set" 1>&2 + exit 1 +fi + +# Dependencies for sub/bar.o or sub/bar.obj go into sub/.deps/bar.Po. +depfile=${depfile-`echo "$object" | + sed 's|[^\\/]*$|'${DEPDIR-.deps}'/&|;s|\.\([^.]*\)$|.P\1|;s|Pobj$|Po|'`} +tmpdepfile=${tmpdepfile-`echo "$depfile" | sed 's/\.\([^.]*\)$/.T\1/'`} + +rm -f "$tmpdepfile" + +# Some modes work just like other modes, but use different flags. We +# parameterize here, but still list the modes in the big case below, +# to make depend.m4 easier to write. Note that we *cannot* use a case +# here, because this file can only contain one case statement. +if test "$depmode" = hp; then + # HP compiler uses -M and no extra arg. + gccflag=-M + depmode=gcc +fi + +if test "$depmode" = dashXmstdout; then + # This is just like dashmstdout with a different argument. + dashmflag=-xM + depmode=dashmstdout +fi + +case "$depmode" in +gcc3) +## gcc 3 implements dependency tracking that does exactly what +## we want. Yay! Note: for some reason libtool 1.4 doesn't like +## it if -MD -MP comes after the -MF stuff. Hmm. + "$@" -MT "$object" -MD -MP -MF "$tmpdepfile" + stat=$? + if test $stat -eq 0; then : + else + rm -f "$tmpdepfile" + exit $stat + fi + mv "$tmpdepfile" "$depfile" + ;; + +gcc) +## There are various ways to get dependency output from gcc. Here's +## why we pick this rather obscure method: +## - Don't want to use -MD because we'd like the dependencies to end +## up in a subdir. Having to rename by hand is ugly. +## (We might end up doing this anyway to support other compilers.) +## - The DEPENDENCIES_OUTPUT environment variable makes gcc act like +## -MM, not -M (despite what the docs say). +## - Using -M directly means running the compiler twice (even worse +## than renaming). + if test -z "$gccflag"; then + gccflag=-MD, + fi + "$@" -Wp,"$gccflag$tmpdepfile" + stat=$? + if test $stat -eq 0; then : + else + rm -f "$tmpdepfile" + exit $stat + fi + rm -f "$depfile" + echo "$object : \\" > "$depfile" + alpha=ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz +## The second -e expression handles DOS-style file names with drive letters. + sed -e 's/^[^:]*: / /' \ + -e 's/^['$alpha']:\/[^:]*: / /' < "$tmpdepfile" >> "$depfile" +## This next piece of magic avoids the `deleted header file' problem. +## The problem is that when a header file which appears in a .P file +## is deleted, the dependency causes make to die (because there is +## typically no way to rebuild the header). We avoid this by adding +## dummy dependencies for each header file. Too bad gcc doesn't do +## this for us directly. + tr ' ' ' +' < "$tmpdepfile" | +## Some versions of gcc put a space before the `:'. On the theory +## that the space means something, we add a space to the output as +## well. +## Some versions of the HPUX 10.20 sed can't process this invocation +## correctly. Breaking it into two sed invocations is a workaround. + sed -e 's/^\\$//' -e '/^$/d' -e '/:$/d' | sed -e 's/$/ :/' >> "$depfile" + rm -f "$tmpdepfile" + ;; + +hp) + # This case exists only to let depend.m4 do its work. It works by + # looking at the text of this script. This case will never be run, + # since it is checked for above. + exit 1 + ;; + +sgi) + if test "$libtool" = yes; then + "$@" "-Wp,-MDupdate,$tmpdepfile" + else + "$@" -MDupdate "$tmpdepfile" + fi + stat=$? + if test $stat -eq 0; then : + else + rm -f "$tmpdepfile" + exit $stat + fi + rm -f "$depfile" + + if test -f "$tmpdepfile"; then # yes, the sourcefile depend on other files + echo "$object : \\" > "$depfile" + + # Clip off the initial element (the dependent). Don't try to be + # clever and replace this with sed code, as IRIX sed won't handle + # lines with more than a fixed number of characters (4096 in + # IRIX 6.2 sed, 8192 in IRIX 6.5). We also remove comment lines; + # the IRIX cc adds comments like `#:fec' to the end of the + # dependency line. + tr ' ' ' +' < "$tmpdepfile" \ + | sed -e 's/^.*\.o://' -e 's/#.*$//' -e '/^$/ d' | \ + tr ' +' ' ' >> $depfile + echo >> $depfile + + # The second pass generates a dummy entry for each header file. + tr ' ' ' +' < "$tmpdepfile" \ + | sed -e 's/^.*\.o://' -e 's/#.*$//' -e '/^$/ d' -e 's/$/:/' \ + >> $depfile + else + # The sourcefile does not contain any dependencies, so just + # store a dummy comment line, to avoid errors with the Makefile + # "include basename.Plo" scheme. + echo "#dummy" > "$depfile" + fi + rm -f "$tmpdepfile" + ;; + +aix) + # The C for AIX Compiler uses -M and outputs the dependencies + # in a .u file. In older versions, this file always lives in the + # current directory. Also, the AIX compiler puts `$object:' at the + # start of each line; $object doesn't have directory information. + # Version 6 uses the directory in both cases. + stripped=`echo "$object" | sed 's/\(.*\)\..*$/\1/'` + tmpdepfile="$stripped.u" + if test "$libtool" = yes; then + "$@" -Wc,-M + else + "$@" -M + fi + stat=$? + + if test -f "$tmpdepfile"; then : + else + stripped=`echo "$stripped" | sed 's,^.*/,,'` + tmpdepfile="$stripped.u" + fi + + if test $stat -eq 0; then : + else + rm -f "$tmpdepfile" + exit $stat + fi + + if test -f "$tmpdepfile"; then + outname="$stripped.o" + # Each line is of the form `foo.o: dependent.h'. + # Do two passes, one to just change these to + # `$object: dependent.h' and one to simply `dependent.h:'. + sed -e "s,^$outname:,$object :," < "$tmpdepfile" > "$depfile" + sed -e "s,^$outname: \(.*\)$,\1:," < "$tmpdepfile" >> "$depfile" + else + # The sourcefile does not contain any dependencies, so just + # store a dummy comment line, to avoid errors with the Makefile + # "include basename.Plo" scheme. + echo "#dummy" > "$depfile" + fi + rm -f "$tmpdepfile" + ;; + +icc) + # Intel's C compiler understands `-MD -MF file'. However on + # icc -MD -MF foo.d -c -o sub/foo.o sub/foo.c + # ICC 7.0 will fill foo.d with something like + # foo.o: sub/foo.c + # foo.o: sub/foo.h + # which is wrong. We want: + # sub/foo.o: sub/foo.c + # sub/foo.o: sub/foo.h + # sub/foo.c: + # sub/foo.h: + # ICC 7.1 will output + # foo.o: sub/foo.c sub/foo.h + # and will wrap long lines using \ : + # foo.o: sub/foo.c ... \ + # sub/foo.h ... \ + # ... + + "$@" -MD -MF "$tmpdepfile" + stat=$? + if test $stat -eq 0; then : + else + rm -f "$tmpdepfile" + exit $stat + fi + rm -f "$depfile" + # Each line is of the form `foo.o: dependent.h', + # or `foo.o: dep1.h dep2.h \', or ` dep3.h dep4.h \'. + # Do two passes, one to just change these to + # `$object: dependent.h' and one to simply `dependent.h:'. + sed "s,^[^:]*:,$object :," < "$tmpdepfile" > "$depfile" + # Some versions of the HPUX 10.20 sed can't process this invocation + # correctly. Breaking it into two sed invocations is a workaround. + sed 's,^[^:]*: \(.*\)$,\1,;s/^\\$//;/^$/d;/:$/d' < "$tmpdepfile" | + sed -e 's/$/ :/' >> "$depfile" + rm -f "$tmpdepfile" + ;; + +tru64) + # The Tru64 compiler uses -MD to generate dependencies as a side + # effect. `cc -MD -o foo.o ...' puts the dependencies into `foo.o.d'. + # At least on Alpha/Redhat 6.1, Compaq CCC V6.2-504 seems to put + # dependencies in `foo.d' instead, so we check for that too. + # Subdirectories are respected. + dir=`echo "$object" | sed -e 's|/[^/]*$|/|'` + test "x$dir" = "x$object" && dir= + base=`echo "$object" | sed -e 's|^.*/||' -e 's/\.o$//' -e 's/\.lo$//'` + + if test "$libtool" = yes; then + # Dependencies are output in .lo.d with libtool 1.4. + # With libtool 1.5 they are output both in $dir.libs/$base.o.d + # and in $dir.libs/$base.o.d and $dir$base.o.d. We process the + # latter, because the former will be cleaned when $dir.libs is + # erased. + tmpdepfile1="$dir.libs/$base.lo.d" + tmpdepfile2="$dir$base.o.d" + tmpdepfile3="$dir.libs/$base.d" + "$@" -Wc,-MD + else + tmpdepfile1="$dir$base.o.d" + tmpdepfile2="$dir$base.d" + tmpdepfile3="$dir$base.d" + "$@" -MD + fi + + stat=$? + if test $stat -eq 0; then : + else + rm -f "$tmpdepfile1" "$tmpdepfile2" "$tmpdepfile3" + exit $stat + fi + + if test -f "$tmpdepfile1"; then + tmpdepfile="$tmpdepfile1" + elif test -f "$tmpdepfile2"; then + tmpdepfile="$tmpdepfile2" + else + tmpdepfile="$tmpdepfile3" + fi + if test -f "$tmpdepfile"; then + sed -e "s,^.*\.[a-z]*:,$object:," < "$tmpdepfile" > "$depfile" + # That's a tab and a space in the []. + sed -e 's,^.*\.[a-z]*:[ ]*,,' -e 's,$,:,' < "$tmpdepfile" >> "$depfile" + else + echo "#dummy" > "$depfile" + fi + rm -f "$tmpdepfile" + ;; + +#nosideeffect) + # This comment above is used by automake to tell side-effect + # dependency tracking mechanisms from slower ones. + +dashmstdout) + # Important note: in order to support this mode, a compiler *must* + # always write the preprocessed file to stdout, regardless of -o. + "$@" || exit $? + + # Remove the call to Libtool. + if test "$libtool" = yes; then + while test $1 != '--mode=compile'; do + shift + done + shift + fi + + # Remove `-o $object'. + IFS=" " + for arg + do + case $arg in + -o) + shift + ;; + $object) + shift + ;; + *) + set fnord "$@" "$arg" + shift # fnord + shift # $arg + ;; + esac + done + + test -z "$dashmflag" && dashmflag=-M + # Require at least two characters before searching for `:' + # in the target name. This is to cope with DOS-style filenames: + # a dependency such as `c:/foo/bar' could be seen as target `c' otherwise. + "$@" $dashmflag | + sed 's:^[ ]*[^: ][^:][^:]*\:[ ]*:'"$object"'\: :' > "$tmpdepfile" + rm -f "$depfile" + cat < "$tmpdepfile" > "$depfile" + tr ' ' ' +' < "$tmpdepfile" | \ +## Some versions of the HPUX 10.20 sed can't process this invocation +## correctly. Breaking it into two sed invocations is a workaround. + sed -e 's/^\\$//' -e '/^$/d' -e '/:$/d' | sed -e 's/$/ :/' >> "$depfile" + rm -f "$tmpdepfile" + ;; + +dashXmstdout) + # This case only exists to satisfy depend.m4. It is never actually + # run, as this mode is specially recognized in the preamble. + exit 1 + ;; + +makedepend) + "$@" || exit $? + # Remove any Libtool call + if test "$libtool" = yes; then + while test $1 != '--mode=compile'; do + shift + done + shift + fi + # X makedepend + shift + cleared=no + for arg in "$@"; do + case $cleared in + no) + set ""; shift + cleared=yes ;; + esac + case "$arg" in + -D*|-I*) + set fnord "$@" "$arg"; shift ;; + # Strip any option that makedepend may not understand. Remove + # the object too, otherwise makedepend will parse it as a source file. + -*|$object) + ;; + *) + set fnord "$@" "$arg"; shift ;; + esac + done + obj_suffix="`echo $object | sed 's/^.*\././'`" + touch "$tmpdepfile" + ${MAKEDEPEND-makedepend} -o"$obj_suffix" -f"$tmpdepfile" "$@" + rm -f "$depfile" + cat < "$tmpdepfile" > "$depfile" + sed '1,2d' "$tmpdepfile" | tr ' ' ' +' | \ +## Some versions of the HPUX 10.20 sed can't process this invocation +## correctly. Breaking it into two sed invocations is a workaround. + sed -e 's/^\\$//' -e '/^$/d' -e '/:$/d' | sed -e 's/$/ :/' >> "$depfile" + rm -f "$tmpdepfile" "$tmpdepfile".bak + ;; + +cpp) + # Important note: in order to support this mode, a compiler *must* + # always write the preprocessed file to stdout. + "$@" || exit $? + + # Remove the call to Libtool. + if test "$libtool" = yes; then + while test $1 != '--mode=compile'; do + shift + done + shift + fi + + # Remove `-o $object'. + IFS=" " + for arg + do + case $arg in + -o) + shift + ;; + $object) + shift + ;; + *) + set fnord "$@" "$arg" + shift # fnord + shift # $arg + ;; + esac + done + + "$@" -E | + sed -n '/^# [0-9][0-9]* "\([^"]*\)".*/ s:: \1 \\:p' | + sed '$ s: \\$::' > "$tmpdepfile" + rm -f "$depfile" + echo "$object : \\" > "$depfile" + cat < "$tmpdepfile" >> "$depfile" + sed < "$tmpdepfile" '/^$/d;s/^ //;s/ \\$//;s/$/ :/' >> "$depfile" + rm -f "$tmpdepfile" + ;; + +msvisualcpp) + # Important note: in order to support this mode, a compiler *must* + # always write the preprocessed file to stdout, regardless of -o, + # because we must use -o when running libtool. + "$@" || exit $? + IFS=" " + for arg + do + case "$arg" in + "-Gm"|"/Gm"|"-Gi"|"/Gi"|"-ZI"|"/ZI") + set fnord "$@" + shift + shift + ;; + *) + set fnord "$@" "$arg" + shift + shift + ;; + esac + done + "$@" -E | + sed -n '/^#line [0-9][0-9]* "\([^"]*\)"/ s::echo "`cygpath -u \\"\1\\"`":p' | sort | uniq > "$tmpdepfile" + rm -f "$depfile" + echo "$object : \\" > "$depfile" + . "$tmpdepfile" | sed 's% %\\ %g' | sed -n '/^\(.*\)$/ s:: \1 \\:p' >> "$depfile" + echo " " >> "$depfile" + . "$tmpdepfile" | sed 's% %\\ %g' | sed -n '/^\(.*\)$/ s::\1\::p' >> "$depfile" + rm -f "$tmpdepfile" + ;; + +none) + exec "$@" + ;; + +*) + echo "Unknown depmode $depmode" 1>&2 + exit 1 + ;; +esac + +exit 0 + +# Local Variables: +# mode: shell-script +# sh-indentation: 2 +# eval: (add-hook 'write-file-hooks 'time-stamp) +# time-stamp-start: "scriptversion=" +# time-stamp-format: "%:y-%02m-%02d.%02H" +# time-stamp-end: "$" +# End: diff --git a/src/examples/pipes/impl/config.h.in b/src/examples/pipes/impl/config.h.in new file mode 100644 index 0000000..9af3b8b --- /dev/null +++ b/src/examples/pipes/impl/config.h.in @@ -0,0 +1,109 @@ +/* impl/config.h.in. Generated from configure.ac by autoheader. */ + +/* Define to 1 if you have the declaration of `strerror_r', and to 0 if you + don't. */ +#undef HAVE_DECL_STRERROR_R + +/* Define to 1 if you have the header file. */ +#undef HAVE_DLFCN_H + +/* Define to 1 if you have the header file. */ +#undef HAVE_INTTYPES_H + +/* Define to 1 if you have the `nsl' library (-lnsl). */ +#undef HAVE_LIBNSL + +/* Define to 1 if you have the `pthread' library (-lpthread). */ +#undef HAVE_LIBPTHREAD + +/* Define to 1 if you have the `socket' library (-lsocket). */ +#undef HAVE_LIBSOCKET + +/* Define to 1 if you have the header file. */ +#undef HAVE_MEMORY_H + +/* Define to 1 if you have the `mkdir' function. */ +#undef HAVE_MKDIR + +/* Define to 1 if you have the header file. */ +#undef HAVE_PTHREAD_H + +/* Define to 1 if stdbool.h conforms to C99. */ +#undef HAVE_STDBOOL_H + +/* Define to 1 if you have the header file. */ +#undef HAVE_STDINT_H + +/* Define to 1 if you have the header file. */ +#undef HAVE_STDLIB_H + +/* Define to 1 if you have the `strerror_r' function. */ +#undef HAVE_STRERROR_R + +/* Define to 1 if you have the header file. */ +#undef HAVE_STRINGS_H + +/* Define to 1 if you have the header file. */ +#undef HAVE_STRING_H + +/* Define to 1 if you have the header file. */ +#undef HAVE_SYS_STAT_H + +/* Define to 1 if you have the header file. */ +#undef HAVE_SYS_TYPES_H + +/* Define to 1 if you have the `uname' function. */ +#undef HAVE_UNAME + +/* Define to 1 if you have the header file. */ +#undef HAVE_UNISTD_H + +/* Define to 1 if the system has the type `_Bool'. */ +#undef HAVE__BOOL + +/* Name of package */ +#undef PACKAGE + +/* Define to the address where bug reports for this package should be sent. */ +#undef PACKAGE_BUGREPORT + +/* Define to the full name of this package. */ +#undef PACKAGE_NAME + +/* Define to the full name and version of this package. */ +#undef PACKAGE_STRING + +/* Define to the one symbol short name of this package. */ +#undef PACKAGE_TARNAME + +/* Define to the version of this package. */ +#undef PACKAGE_VERSION + +/* Define to 1 if you have the ANSI C header files. */ +#undef STDC_HEADERS + +/* Define to 1 if strerror_r returns char *. */ +#undef STRERROR_R_CHAR_P + +/* Version number of package */ +#undef VERSION + +/* Number of bits in a file offset, on hosts where this is settable. */ +#undef _FILE_OFFSET_BITS + +/* Enable GNU extensions on systems that have them. */ +#ifndef _GNU_SOURCE +# undef _GNU_SOURCE +#endif + +/* Define for large files, on AIX-style hosts. */ +#undef _LARGE_FILES + +/* Define to empty if `const' does not conform to ANSI C. */ +#undef const + +/* Define to `long' if does not define. */ +#undef off_t + +/* Define to `unsigned' if does not define. */ +#undef size_t diff --git a/src/examples/pipes/impl/sort.cc b/src/examples/pipes/impl/sort.cc new file mode 100644 index 0000000..b1becf1 --- /dev/null +++ b/src/examples/pipes/impl/sort.cc @@ -0,0 +1,96 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "hadoop/Pipes.hh" +#include "hadoop/TemplateFactory.hh" + +class SortMap: public HadoopPipes::Mapper { +private: + /* the fraction 0.0 to 1.0 of records to keep */ + float keepFraction; + /* the number of records kept so far */ + long long keptRecords; + /* the total number of records */ + long long totalRecords; + static const std::string MAP_KEEP_PERCENT; +public: + /* + * Look in the config to find the fraction of records to keep. + */ + SortMap(HadoopPipes::TaskContext& context){ + const HadoopPipes::JobConf* conf = context.getJobConf(); + if (conf->hasKey(MAP_KEEP_PERCENT)) { + keepFraction = conf->getFloat(MAP_KEEP_PERCENT) / 100.0; + } else { + keepFraction = 1.0; + } + keptRecords = 0; + totalRecords = 0; + } + + void map(HadoopPipes::MapContext& context) { + totalRecords += 1; + while ((float) keptRecords / totalRecords < keepFraction) { + keptRecords += 1; + context.emit(context.getInputKey(), context.getInputValue()); + } + } +}; + +const std::string SortMap::MAP_KEEP_PERCENT("hadoop.sort.map.keep.percent"); + +class SortReduce: public HadoopPipes::Reducer { +private: + /* the fraction 0.0 to 1.0 of records to keep */ + float keepFraction; + /* the number of records kept so far */ + long long keptRecords; + /* the total number of records */ + long long totalRecords; + static const std::string REDUCE_KEEP_PERCENT; +public: + SortReduce(HadoopPipes::TaskContext& context){ + const HadoopPipes::JobConf* conf = context.getJobConf(); + if (conf->hasKey(REDUCE_KEEP_PERCENT)) { + keepFraction = conf->getFloat(REDUCE_KEEP_PERCENT) / 100.0; + } else { + keepFraction = 1.0; + } + keptRecords = 0; + totalRecords = 0; + } + + void reduce(HadoopPipes::ReduceContext& context) { + while (context.nextValue()) { + totalRecords += 1; + while ((float) keptRecords / totalRecords < keepFraction) { + keptRecords += 1; + context.emit(context.getInputKey(), context.getInputValue()); + } + } + } +}; + +const std::string + SortReduce::REDUCE_KEEP_PERCENT("hadoop.sort.reduce.keep.percent"); + +int main(int argc, char *argv[]) { + return HadoopPipes::runTask(HadoopPipes::TemplateFactory()); +} + diff --git a/src/examples/pipes/impl/wordcount-nopipe.cc b/src/examples/pipes/impl/wordcount-nopipe.cc new file mode 100644 index 0000000..105796d --- /dev/null +++ b/src/examples/pipes/impl/wordcount-nopipe.cc @@ -0,0 +1,148 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "hadoop/Pipes.hh" +#include "hadoop/TemplateFactory.hh" +#include "hadoop/StringUtils.hh" +#include "hadoop/SerialUtils.hh" + +#include +#include +#include + +const std::string WORDCOUNT = "WORDCOUNT"; +const std::string INPUT_WORDS = "INPUT_WORDS"; +const std::string OUTPUT_WORDS = "OUTPUT_WORDS"; + +class WordCountMap: public HadoopPipes::Mapper { +public: + HadoopPipes::TaskContext::Counter* inputWords; + + WordCountMap(HadoopPipes::TaskContext& context) { + inputWords = context.getCounter(WORDCOUNT, INPUT_WORDS); + } + + void map(HadoopPipes::MapContext& context) { + std::vector words = + HadoopUtils::splitString(context.getInputValue(), " "); + for(unsigned int i=0; i < words.size(); ++i) { + context.emit(words[i], "1"); + } + context.incrementCounter(inputWords, words.size()); + } +}; + +class WordCountReduce: public HadoopPipes::Reducer { +public: + HadoopPipes::TaskContext::Counter* outputWords; + + WordCountReduce(HadoopPipes::TaskContext& context) { + outputWords = context.getCounter(WORDCOUNT, OUTPUT_WORDS); + } + + void reduce(HadoopPipes::ReduceContext& context) { + int sum = 0; + while (context.nextValue()) { + sum += HadoopUtils::toInt(context.getInputValue()); + } + context.emit(context.getInputKey(), HadoopUtils::toString(sum)); + context.incrementCounter(outputWords, 1); + } +}; + +class WordCountReader: public HadoopPipes::RecordReader { +private: + int64_t bytesTotal; + int64_t bytesRead; + FILE* file; +public: + WordCountReader(HadoopPipes::MapContext& context) { + std::string filename; + HadoopUtils::StringInStream stream(context.getInputSplit()); + HadoopUtils::deserializeString(filename, stream); + struct stat statResult; + stat(filename.c_str(), &statResult); + bytesTotal = statResult.st_size; + bytesRead = 0; + file = fopen(filename.c_str(), "rt"); + HADOOP_ASSERT(file != NULL, "failed to open " + filename); + } + + ~WordCountReader() { + fclose(file); + } + + virtual bool next(std::string& key, std::string& value) { + key = HadoopUtils::toString(ftell(file)); + int ch = getc(file); + bytesRead += 1; + value.clear(); + while (ch != -1 && ch != '\n') { + value += ch; + ch = getc(file); + bytesRead += 1; + } + return ch != -1; + } + + /** + * The progress of the record reader through the split as a value between + * 0.0 and 1.0. + */ + virtual float getProgress() { + if (bytesTotal > 0) { + return (float)bytesRead / bytesTotal; + } else { + return 1.0f; + } + } +}; + +class WordCountWriter: public HadoopPipes::RecordWriter { +private: + FILE* file; +public: + WordCountWriter(HadoopPipes::ReduceContext& context) { + const HadoopPipes::JobConf* job = context.getJobConf(); + int part = job->getInt("mapred.task.partition"); + std::string outDir = job->get("mapred.work.output.dir"); + // remove the file: schema substring + std::string::size_type posn = outDir.find(":"); + HADOOP_ASSERT(posn != std::string::npos, + "no schema found in output dir: " + outDir); + outDir.erase(0, posn+1); + mkdir(outDir.c_str(), 0777); + std::string outFile = outDir + "/part-" + HadoopUtils::toString(part); + file = fopen(outFile.c_str(), "wt"); + HADOOP_ASSERT(file != NULL, "can't open file for writing: " + outFile); + } + + ~WordCountWriter() { + fclose(file); + } + + void emit(const std::string& key, const std::string& value) { + fprintf(file, "%s -> %s\n", key.c_str(), value.c_str()); + } +}; + +int main(int argc, char *argv[]) { + return HadoopPipes::runTask(HadoopPipes::TemplateFactory()); +} + diff --git a/src/examples/pipes/impl/wordcount-part.cc b/src/examples/pipes/impl/wordcount-part.cc new file mode 100644 index 0000000..37dc199 --- /dev/null +++ b/src/examples/pipes/impl/wordcount-part.cc @@ -0,0 +1,76 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "hadoop/Pipes.hh" +#include "hadoop/TemplateFactory.hh" +#include "hadoop/StringUtils.hh" + +const std::string WORDCOUNT = "WORDCOUNT"; +const std::string INPUT_WORDS = "INPUT_WORDS"; +const std::string OUTPUT_WORDS = "OUTPUT_WORDS"; + +class WordCountMap: public HadoopPipes::Mapper { +public: + HadoopPipes::TaskContext::Counter* inputWords; + + WordCountMap(HadoopPipes::TaskContext& context) { + inputWords = context.getCounter(WORDCOUNT, INPUT_WORDS); + } + + void map(HadoopPipes::MapContext& context) { + std::vector words = + HadoopUtils::splitString(context.getInputValue(), " "); + for(unsigned int i=0; i < words.size(); ++i) { + context.emit(words[i], "1"); + } + context.incrementCounter(inputWords, words.size()); + } +}; + +class WordCountReduce: public HadoopPipes::Reducer { +public: + HadoopPipes::TaskContext::Counter* outputWords; + + WordCountReduce(HadoopPipes::TaskContext& context) { + outputWords = context.getCounter(WORDCOUNT, OUTPUT_WORDS); + } + + void reduce(HadoopPipes::ReduceContext& context) { + int sum = 0; + while (context.nextValue()) { + sum += HadoopUtils::toInt(context.getInputValue()); + } + context.emit(context.getInputKey(), HadoopUtils::toString(sum)); + context.incrementCounter(outputWords, 1); + } +}; + +class WordCountPartitioner: public HadoopPipes::Partitioner { +public: + WordCountPartitioner(HadoopPipes::TaskContext& context){} + virtual int partition(const std::string& key, int numOfReduces) { + return 0; + } +}; + +int main(int argc, char *argv[]) { + return HadoopPipes::runTask(HadoopPipes::TemplateFactory()); +} + diff --git a/src/examples/pipes/impl/wordcount-simple.cc b/src/examples/pipes/impl/wordcount-simple.cc new file mode 100644 index 0000000..64dd801 --- /dev/null +++ b/src/examples/pipes/impl/wordcount-simple.cc @@ -0,0 +1,67 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "hadoop/Pipes.hh" +#include "hadoop/TemplateFactory.hh" +#include "hadoop/StringUtils.hh" + +const std::string WORDCOUNT = "WORDCOUNT"; +const std::string INPUT_WORDS = "INPUT_WORDS"; +const std::string OUTPUT_WORDS = "OUTPUT_WORDS"; + +class WordCountMap: public HadoopPipes::Mapper { +public: + HadoopPipes::TaskContext::Counter* inputWords; + + WordCountMap(HadoopPipes::TaskContext& context) { + inputWords = context.getCounter(WORDCOUNT, INPUT_WORDS); + } + + void map(HadoopPipes::MapContext& context) { + std::vector words = + HadoopUtils::splitString(context.getInputValue(), " "); + for(unsigned int i=0; i < words.size(); ++i) { + context.emit(words[i], "1"); + } + context.incrementCounter(inputWords, words.size()); + } +}; + +class WordCountReduce: public HadoopPipes::Reducer { +public: + HadoopPipes::TaskContext::Counter* outputWords; + + WordCountReduce(HadoopPipes::TaskContext& context) { + outputWords = context.getCounter(WORDCOUNT, OUTPUT_WORDS); + } + + void reduce(HadoopPipes::ReduceContext& context) { + int sum = 0; + while (context.nextValue()) { + sum += HadoopUtils::toInt(context.getInputValue()); + } + context.emit(context.getInputKey(), HadoopUtils::toString(sum)); + context.incrementCounter(outputWords, 1); + } +}; + +int main(int argc, char *argv[]) { + return HadoopPipes::runTask(HadoopPipes::TemplateFactory()); +} + diff --git a/src/examples/pipes/install-sh b/src/examples/pipes/install-sh new file mode 100644 index 0000000..b777f12 --- /dev/null +++ b/src/examples/pipes/install-sh @@ -0,0 +1,322 @@ +#!/bin/sh +# install - install a program, script, or datafile + +scriptversion=2004-07-05.00 + +# This originates from X11R5 (mit/util/scripts/install.sh), which was +# later released in X11R6 (xc/config/util/install.sh) with the +# following copyright and license. +# +# Copyright (C) 1994 X Consortium +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to +# deal in the Software without restriction, including without limitation the +# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or +# sell copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# X CONSORTIUM BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN +# AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNEC- +# TION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +# +# Except as contained in this notice, the name of the X Consortium shall not +# be used in advertising or otherwise to promote the sale, use or other deal- +# ings in this Software without prior written authorization from the X Consor- +# tium. +# +# +# FSF changes to this file are in the public domain. +# +# Calling this script install-sh is preferred over install.sh, to prevent +# `make' implicit rules from creating a file called install from it +# when there is no Makefile. +# +# This script is compatible with the BSD install script, but was written +# from scratch. It can only install one file at a time, a restriction +# shared with many OS's install programs. + +# set DOITPROG to echo to test this script + +# Don't use :- since 4.3BSD and earlier shells don't like it. +doit="${DOITPROG-}" + +# put in absolute paths if you don't have them in your path; or use env. vars. + +mvprog="${MVPROG-mv}" +cpprog="${CPPROG-cp}" +chmodprog="${CHMODPROG-chmod}" +chownprog="${CHOWNPROG-chown}" +chgrpprog="${CHGRPPROG-chgrp}" +stripprog="${STRIPPROG-strip}" +rmprog="${RMPROG-rm}" +mkdirprog="${MKDIRPROG-mkdir}" + +chmodcmd="$chmodprog 0755" +chowncmd= +chgrpcmd= +stripcmd= +rmcmd="$rmprog -f" +mvcmd="$mvprog" +src= +dst= +dir_arg= +dstarg= +no_target_directory= + +usage="Usage: $0 [OPTION]... [-T] SRCFILE DSTFILE + or: $0 [OPTION]... SRCFILES... DIRECTORY + or: $0 [OPTION]... -t DIRECTORY SRCFILES... + or: $0 [OPTION]... -d DIRECTORIES... + +In the 1st form, copy SRCFILE to DSTFILE. +In the 2nd and 3rd, copy all SRCFILES to DIRECTORY. +In the 4th, create DIRECTORIES. + +Options: +-c (ignored) +-d create directories instead of installing files. +-g GROUP $chgrpprog installed files to GROUP. +-m MODE $chmodprog installed files to MODE. +-o USER $chownprog installed files to USER. +-s $stripprog installed files. +-t DIRECTORY install into DIRECTORY. +-T report an error if DSTFILE is a directory. +--help display this help and exit. +--version display version info and exit. + +Environment variables override the default commands: + CHGRPPROG CHMODPROG CHOWNPROG CPPROG MKDIRPROG MVPROG RMPROG STRIPPROG +" + +while test -n "$1"; do + case $1 in + -c) shift + continue;; + + -d) dir_arg=true + shift + continue;; + + -g) chgrpcmd="$chgrpprog $2" + shift + shift + continue;; + + --help) echo "$usage"; exit 0;; + + -m) chmodcmd="$chmodprog $2" + shift + shift + continue;; + + -o) chowncmd="$chownprog $2" + shift + shift + continue;; + + -s) stripcmd=$stripprog + shift + continue;; + + -t) dstarg=$2 + shift + shift + continue;; + + -T) no_target_directory=true + shift + continue;; + + --version) echo "$0 $scriptversion"; exit 0;; + + *) # When -d is used, all remaining arguments are directories to create. + # When -t is used, the destination is already specified. + test -n "$dir_arg$dstarg" && break + # Otherwise, the last argument is the destination. Remove it from $@. + for arg + do + if test -n "$dstarg"; then + # $@ is not empty: it contains at least $arg. + set fnord "$@" "$dstarg" + shift # fnord + fi + shift # arg + dstarg=$arg + done + break;; + esac +done + +if test -z "$1"; then + if test -z "$dir_arg"; then + echo "$0: no input file specified." >&2 + exit 1 + fi + # It's OK to call `install-sh -d' without argument. + # This can happen when creating conditional directories. + exit 0 +fi + +for src +do + # Protect names starting with `-'. + case $src in + -*) src=./$src ;; + esac + + if test -n "$dir_arg"; then + dst=$src + src= + + if test -d "$dst"; then + mkdircmd=: + chmodcmd= + else + mkdircmd=$mkdirprog + fi + else + # Waiting for this to be detected by the "$cpprog $src $dsttmp" command + # might cause directories to be created, which would be especially bad + # if $src (and thus $dsttmp) contains '*'. + if test ! -f "$src" && test ! -d "$src"; then + echo "$0: $src does not exist." >&2 + exit 1 + fi + + if test -z "$dstarg"; then + echo "$0: no destination specified." >&2 + exit 1 + fi + + dst=$dstarg + # Protect names starting with `-'. + case $dst in + -*) dst=./$dst ;; + esac + + # If destination is a directory, append the input filename; won't work + # if double slashes aren't ignored. + if test -d "$dst"; then + if test -n "$no_target_directory"; then + echo "$0: $dstarg: Is a directory" >&2 + exit 1 + fi + dst=$dst/`basename "$src"` + fi + fi + + # This sed command emulates the dirname command. + dstdir=`echo "$dst" | sed -e 's,[^/]*$,,;s,/$,,;s,^$,.,'` + + # Make sure that the destination directory exists. + + # Skip lots of stat calls in the usual case. + if test ! -d "$dstdir"; then + defaultIFS=' + ' + IFS="${IFS-$defaultIFS}" + + oIFS=$IFS + # Some sh's can't handle IFS=/ for some reason. + IFS='%' + set - `echo "$dstdir" | sed -e 's@/@%@g' -e 's@^%@/@'` + IFS=$oIFS + + pathcomp= + + while test $# -ne 0 ; do + pathcomp=$pathcomp$1 + shift + if test ! -d "$pathcomp"; then + $mkdirprog "$pathcomp" + # mkdir can fail with a `File exist' error in case several + # install-sh are creating the directory concurrently. This + # is OK. + test -d "$pathcomp" || exit + fi + pathcomp=$pathcomp/ + done + fi + + if test -n "$dir_arg"; then + $doit $mkdircmd "$dst" \ + && { test -z "$chowncmd" || $doit $chowncmd "$dst"; } \ + && { test -z "$chgrpcmd" || $doit $chgrpcmd "$dst"; } \ + && { test -z "$stripcmd" || $doit $stripcmd "$dst"; } \ + && { test -z "$chmodcmd" || $doit $chmodcmd "$dst"; } + + else + dstfile=`basename "$dst"` + + # Make a couple of temp file names in the proper directory. + dsttmp=$dstdir/_inst.$$_ + rmtmp=$dstdir/_rm.$$_ + + # Trap to clean up those temp files at exit. + trap 'status=$?; rm -f "$dsttmp" "$rmtmp" && exit $status' 0 + trap '(exit $?); exit' 1 2 13 15 + + # Copy the file name to the temp name. + $doit $cpprog "$src" "$dsttmp" && + + # and set any options; do chmod last to preserve setuid bits. + # + # If any of these fail, we abort the whole thing. If we want to + # ignore errors from any of these, just make sure not to ignore + # errors from the above "$doit $cpprog $src $dsttmp" command. + # + { test -z "$chowncmd" || $doit $chowncmd "$dsttmp"; } \ + && { test -z "$chgrpcmd" || $doit $chgrpcmd "$dsttmp"; } \ + && { test -z "$stripcmd" || $doit $stripcmd "$dsttmp"; } \ + && { test -z "$chmodcmd" || $doit $chmodcmd "$dsttmp"; } && + + # Now rename the file to the real destination. + { $doit $mvcmd -f "$dsttmp" "$dstdir/$dstfile" 2>/dev/null \ + || { + # The rename failed, perhaps because mv can't rename something else + # to itself, or perhaps because mv is so ancient that it does not + # support -f. + + # Now remove or move aside any old file at destination location. + # We try this two ways since rm can't unlink itself on some + # systems and the destination file might be busy for other + # reasons. In this case, the final cleanup might fail but the new + # file should still install successfully. + { + if test -f "$dstdir/$dstfile"; then + $doit $rmcmd -f "$dstdir/$dstfile" 2>/dev/null \ + || $doit $mvcmd -f "$dstdir/$dstfile" "$rmtmp" 2>/dev/null \ + || { + echo "$0: cannot unlink or rename $dstdir/$dstfile" >&2 + (exit 1); exit + } + else + : + fi + } && + + # Now rename the file to the real destination. + $doit $mvcmd "$dsttmp" "$dstdir/$dstfile" + } + } + fi || { (exit 1); exit; } +done + +# The final little trick to "correctly" pass the exit status to the exit trap. +{ + (exit 0); exit +} + +# Local variables: +# eval: (add-hook 'write-file-hooks 'time-stamp) +# time-stamp-start: "scriptversion=" +# time-stamp-format: "%:y-%02m-%02d.%02H" +# time-stamp-end: "$" +# End: diff --git a/src/examples/pipes/ltmain.sh b/src/examples/pipes/ltmain.sh new file mode 100644 index 0000000..9e71d27 --- /dev/null +++ b/src/examples/pipes/ltmain.sh @@ -0,0 +1,6530 @@ +# ltmain.sh - Provide generalized library-building support services. +# NOTE: Changing this file will not affect anything until you rerun configure. +# +# Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2003, 2004, 2005 +# Free Software Foundation, Inc. +# Originally by Gordon Matzigkeit , 1996 +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. +# +# As a special exception to the GNU General Public License, if you +# distribute this file as part of a program that contains a +# configuration script generated by Autoconf, you may include it under +# the same distribution terms that you use for the rest of that program. + +basename="s,^.*/,,g" + +# Work around backward compatibility issue on IRIX 6.5. On IRIX 6.4+, sh +# is ksh but when the shell is invoked as "sh" and the current value of +# the _XPG environment variable is not equal to 1 (one), the special +# positional parameter $0, within a function call, is the name of the +# function. +progpath="$0" + +# The name of this program: +progname=`echo "$progpath" | $SED $basename` +modename="$progname" + +# Global variables: +EXIT_SUCCESS=0 +EXIT_FAILURE=1 + +PROGRAM=ltmain.sh +PACKAGE=libtool +VERSION=1.5.20 +TIMESTAMP=" (1.1220.2.287 2005/08/31 18:54:15)" + +# See if we are running on zsh, and set the options which allow our +# commands through without removal of \ escapes. +if test -n "${ZSH_VERSION+set}" ; then + setopt NO_GLOB_SUBST +fi + +# Check that we have a working $echo. +if test "X$1" = X--no-reexec; then + # Discard the --no-reexec flag, and continue. + shift +elif test "X$1" = X--fallback-echo; then + # Avoid inline document here, it may be left over + : +elif test "X`($echo '\t') 2>/dev/null`" = 'X\t'; then + # Yippee, $echo works! + : +else + # Restart under the correct shell, and then maybe $echo will work. + exec $SHELL "$progpath" --no-reexec ${1+"$@"} +fi + +if test "X$1" = X--fallback-echo; then + # used as fallback echo + shift + cat <&2 + $echo "Fatal configuration error. See the $PACKAGE docs for more information." 1>&2 + exit $EXIT_FAILURE +fi + +# Global variables. +mode=$default_mode +nonopt= +prev= +prevopt= +run= +show="$echo" +show_help= +execute_dlfiles= +lo2o="s/\\.lo\$/.${objext}/" +o2lo="s/\\.${objext}\$/.lo/" + +##################################### +# Shell function definitions: +# This seems to be the best place for them + +# func_win32_libid arg +# return the library type of file 'arg' +# +# Need a lot of goo to handle *both* DLLs and import libs +# Has to be a shell function in order to 'eat' the argument +# that is supplied when $file_magic_command is called. +func_win32_libid () +{ + win32_libid_type="unknown" + win32_fileres=`file -L $1 2>/dev/null` + case $win32_fileres in + *ar\ archive\ import\ library*) # definitely import + win32_libid_type="x86 archive import" + ;; + *ar\ archive*) # could be an import, or static + if eval $OBJDUMP -f $1 | $SED -e '10q' 2>/dev/null | \ + $EGREP -e 'file format pe-i386(.*architecture: i386)?' >/dev/null ; then + win32_nmres=`eval $NM -f posix -A $1 | \ + sed -n -e '1,100{/ I /{x;/import/!{s/^/import/;h;p;};x;};}'` + if test "X$win32_nmres" = "Ximport" ; then + win32_libid_type="x86 archive import" + else + win32_libid_type="x86 archive static" + fi + fi + ;; + *DLL*) + win32_libid_type="x86 DLL" + ;; + *executable*) # but shell scripts are "executable" too... + case $win32_fileres in + *MS\ Windows\ PE\ Intel*) + win32_libid_type="x86 DLL" + ;; + esac + ;; + esac + $echo $win32_libid_type +} + + +# func_infer_tag arg +# Infer tagged configuration to use if any are available and +# if one wasn't chosen via the "--tag" command line option. +# Only attempt this if the compiler in the base compile +# command doesn't match the default compiler. +# arg is usually of the form 'gcc ...' +func_infer_tag () +{ + if test -n "$available_tags" && test -z "$tagname"; then + CC_quoted= + for arg in $CC; do + case $arg in + *[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*|"") + arg="\"$arg\"" + ;; + esac + CC_quoted="$CC_quoted $arg" + done + case $@ in + # Blanks in the command may have been stripped by the calling shell, + # but not from the CC environment variable when configure was run. + " $CC "* | "$CC "* | " `$echo $CC` "* | "`$echo $CC` "* | " $CC_quoted"* | "$CC_quoted "* | " `$echo $CC_quoted` "* | "`$echo $CC_quoted` "*) ;; + # Blanks at the start of $base_compile will cause this to fail + # if we don't check for them as well. + *) + for z in $available_tags; do + if grep "^# ### BEGIN LIBTOOL TAG CONFIG: $z$" < "$progpath" > /dev/null; then + # Evaluate the configuration. + eval "`${SED} -n -e '/^# ### BEGIN LIBTOOL TAG CONFIG: '$z'$/,/^# ### END LIBTOOL TAG CONFIG: '$z'$/p' < $progpath`" + CC_quoted= + for arg in $CC; do + # Double-quote args containing other shell metacharacters. + case $arg in + *[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*|"") + arg="\"$arg\"" + ;; + esac + CC_quoted="$CC_quoted $arg" + done + case "$@ " in + " $CC "* | "$CC "* | " `$echo $CC` "* | "`$echo $CC` "* | " $CC_quoted"* | "$CC_quoted "* | " `$echo $CC_quoted` "* | "`$echo $CC_quoted` "*) + # The compiler in the base compile command matches + # the one in the tagged configuration. + # Assume this is the tagged configuration we want. + tagname=$z + break + ;; + esac + fi + done + # If $tagname still isn't set, then no tagged configuration + # was found and let the user know that the "--tag" command + # line option must be used. + if test -z "$tagname"; then + $echo "$modename: unable to infer tagged configuration" + $echo "$modename: specify a tag with \`--tag'" 1>&2 + exit $EXIT_FAILURE +# else +# $echo "$modename: using $tagname tagged configuration" + fi + ;; + esac + fi +} + + +# func_extract_an_archive dir oldlib +func_extract_an_archive () +{ + f_ex_an_ar_dir="$1"; shift + f_ex_an_ar_oldlib="$1" + + $show "(cd $f_ex_an_ar_dir && $AR x $f_ex_an_ar_oldlib)" + $run eval "(cd \$f_ex_an_ar_dir && $AR x \$f_ex_an_ar_oldlib)" || exit $? + if ($AR t "$f_ex_an_ar_oldlib" | sort | sort -uc >/dev/null 2>&1); then + : + else + $echo "$modename: ERROR: object name conflicts: $f_ex_an_ar_dir/$f_ex_an_ar_oldlib" 1>&2 + exit $EXIT_FAILURE + fi +} + +# func_extract_archives gentop oldlib ... +func_extract_archives () +{ + my_gentop="$1"; shift + my_oldlibs=${1+"$@"} + my_oldobjs="" + my_xlib="" + my_xabs="" + my_xdir="" + my_status="" + + $show "${rm}r $my_gentop" + $run ${rm}r "$my_gentop" + $show "$mkdir $my_gentop" + $run $mkdir "$my_gentop" + my_status=$? + if test "$my_status" -ne 0 && test ! -d "$my_gentop"; then + exit $my_status + fi + + for my_xlib in $my_oldlibs; do + # Extract the objects. + case $my_xlib in + [\\/]* | [A-Za-z]:[\\/]*) my_xabs="$my_xlib" ;; + *) my_xabs=`pwd`"/$my_xlib" ;; + esac + my_xlib=`$echo "X$my_xlib" | $Xsed -e 's%^.*/%%'` + my_xdir="$my_gentop/$my_xlib" + + $show "${rm}r $my_xdir" + $run ${rm}r "$my_xdir" + $show "$mkdir $my_xdir" + $run $mkdir "$my_xdir" + status=$? + if test "$status" -ne 0 && test ! -d "$my_xdir"; then + exit $status + fi + case $host in + *-darwin*) + $show "Extracting $my_xabs" + # Do not bother doing anything if just a dry run + if test -z "$run"; then + darwin_orig_dir=`pwd` + cd $my_xdir || exit $? + darwin_archive=$my_xabs + darwin_curdir=`pwd` + darwin_base_archive=`$echo "X$darwin_archive" | $Xsed -e 's%^.*/%%'` + darwin_arches=`lipo -info "$darwin_archive" 2>/dev/null | $EGREP Architectures 2>/dev/null` + if test -n "$darwin_arches"; then + darwin_arches=`echo "$darwin_arches" | $SED -e 's/.*are://'` + darwin_arch= + $show "$darwin_base_archive has multiple architectures $darwin_arches" + for darwin_arch in $darwin_arches ; do + mkdir -p "unfat-$$/${darwin_base_archive}-${darwin_arch}" + lipo -thin $darwin_arch -output "unfat-$$/${darwin_base_archive}-${darwin_arch}/${darwin_base_archive}" "${darwin_archive}" + cd "unfat-$$/${darwin_base_archive}-${darwin_arch}" + func_extract_an_archive "`pwd`" "${darwin_base_archive}" + cd "$darwin_curdir" + $rm "unfat-$$/${darwin_base_archive}-${darwin_arch}/${darwin_base_archive}" + done # $darwin_arches + ## Okay now we have a bunch of thin objects, gotta fatten them up :) + darwin_filelist=`find unfat-$$ -type f -name \*.o -print -o -name \*.lo -print| xargs basename | sort -u | $NL2SP` + darwin_file= + darwin_files= + for darwin_file in $darwin_filelist; do + darwin_files=`find unfat-$$ -name $darwin_file -print | $NL2SP` + lipo -create -output "$darwin_file" $darwin_files + done # $darwin_filelist + ${rm}r unfat-$$ + cd "$darwin_orig_dir" + else + cd "$darwin_orig_dir" + func_extract_an_archive "$my_xdir" "$my_xabs" + fi # $darwin_arches + fi # $run + ;; + *) + func_extract_an_archive "$my_xdir" "$my_xabs" + ;; + esac + my_oldobjs="$my_oldobjs "`find $my_xdir -name \*.$objext -print -o -name \*.lo -print | $NL2SP` + done + func_extract_archives_result="$my_oldobjs" +} +# End of Shell function definitions +##################################### + +# Darwin sucks +eval std_shrext=\"$shrext_cmds\" + +# Parse our command line options once, thoroughly. +while test "$#" -gt 0 +do + arg="$1" + shift + + case $arg in + -*=*) optarg=`$echo "X$arg" | $Xsed -e 's/[-_a-zA-Z0-9]*=//'` ;; + *) optarg= ;; + esac + + # If the previous option needs an argument, assign it. + if test -n "$prev"; then + case $prev in + execute_dlfiles) + execute_dlfiles="$execute_dlfiles $arg" + ;; + tag) + tagname="$arg" + preserve_args="${preserve_args}=$arg" + + # Check whether tagname contains only valid characters + case $tagname in + *[!-_A-Za-z0-9,/]*) + $echo "$progname: invalid tag name: $tagname" 1>&2 + exit $EXIT_FAILURE + ;; + esac + + case $tagname in + CC) + # Don't test for the "default" C tag, as we know, it's there, but + # not specially marked. + ;; + *) + if grep "^# ### BEGIN LIBTOOL TAG CONFIG: $tagname$" < "$progpath" > /dev/null; then + taglist="$taglist $tagname" + # Evaluate the configuration. + eval "`${SED} -n -e '/^# ### BEGIN LIBTOOL TAG CONFIG: '$tagname'$/,/^# ### END LIBTOOL TAG CONFIG: '$tagname'$/p' < $progpath`" + else + $echo "$progname: ignoring unknown tag $tagname" 1>&2 + fi + ;; + esac + ;; + *) + eval "$prev=\$arg" + ;; + esac + + prev= + prevopt= + continue + fi + + # Have we seen a non-optional argument yet? + case $arg in + --help) + show_help=yes + ;; + + --version) + $echo "$PROGRAM (GNU $PACKAGE) $VERSION$TIMESTAMP" + $echo + $echo "Copyright (C) 2005 Free Software Foundation, Inc." + $echo "This is free software; see the source for copying conditions. There is NO" + $echo "warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE." + exit $? + ;; + + --config) + ${SED} -e '1,/^# ### BEGIN LIBTOOL CONFIG/d' -e '/^# ### END LIBTOOL CONFIG/,$d' $progpath + # Now print the configurations for the tags. + for tagname in $taglist; do + ${SED} -n -e "/^# ### BEGIN LIBTOOL TAG CONFIG: $tagname$/,/^# ### END LIBTOOL TAG CONFIG: $tagname$/p" < "$progpath" + done + exit $? + ;; + + --debug) + $echo "$progname: enabling shell trace mode" + set -x + preserve_args="$preserve_args $arg" + ;; + + --dry-run | -n) + run=: + ;; + + --features) + $echo "host: $host" + if test "$build_libtool_libs" = yes; then + $echo "enable shared libraries" + else + $echo "disable shared libraries" + fi + if test "$build_old_libs" = yes; then + $echo "enable static libraries" + else + $echo "disable static libraries" + fi + exit $? + ;; + + --finish) mode="finish" ;; + + --mode) prevopt="--mode" prev=mode ;; + --mode=*) mode="$optarg" ;; + + --preserve-dup-deps) duplicate_deps="yes" ;; + + --quiet | --silent) + show=: + preserve_args="$preserve_args $arg" + ;; + + --tag) prevopt="--tag" prev=tag ;; + --tag=*) + set tag "$optarg" ${1+"$@"} + shift + prev=tag + preserve_args="$preserve_args --tag" + ;; + + -dlopen) + prevopt="-dlopen" + prev=execute_dlfiles + ;; + + -*) + $echo "$modename: unrecognized option \`$arg'" 1>&2 + $echo "$help" 1>&2 + exit $EXIT_FAILURE + ;; + + *) + nonopt="$arg" + break + ;; + esac +done + +if test -n "$prevopt"; then + $echo "$modename: option \`$prevopt' requires an argument" 1>&2 + $echo "$help" 1>&2 + exit $EXIT_FAILURE +fi + +# If this variable is set in any of the actions, the command in it +# will be execed at the end. This prevents here-documents from being +# left over by shells. +exec_cmd= + +if test -z "$show_help"; then + + # Infer the operation mode. + if test -z "$mode"; then + $echo "*** Warning: inferring the mode of operation is deprecated." 1>&2 + $echo "*** Future versions of Libtool will require --mode=MODE be specified." 1>&2 + case $nonopt in + *cc | cc* | *++ | gcc* | *-gcc* | g++* | xlc*) + mode=link + for arg + do + case $arg in + -c) + mode=compile + break + ;; + esac + done + ;; + *db | *dbx | *strace | *truss) + mode=execute + ;; + *install*|cp|mv) + mode=install + ;; + *rm) + mode=uninstall + ;; + *) + # If we have no mode, but dlfiles were specified, then do execute mode. + test -n "$execute_dlfiles" && mode=execute + + # Just use the default operation mode. + if test -z "$mode"; then + if test -n "$nonopt"; then + $echo "$modename: warning: cannot infer operation mode from \`$nonopt'" 1>&2 + else + $echo "$modename: warning: cannot infer operation mode without MODE-ARGS" 1>&2 + fi + fi + ;; + esac + fi + + # Only execute mode is allowed to have -dlopen flags. + if test -n "$execute_dlfiles" && test "$mode" != execute; then + $echo "$modename: unrecognized option \`-dlopen'" 1>&2 + $echo "$help" 1>&2 + exit $EXIT_FAILURE + fi + + # Change the help message to a mode-specific one. + generic_help="$help" + help="Try \`$modename --help --mode=$mode' for more information." + + # These modes are in order of execution frequency so that they run quickly. + case $mode in + # libtool compile mode + compile) + modename="$modename: compile" + # Get the compilation command and the source file. + base_compile= + srcfile="$nonopt" # always keep a non-empty value in "srcfile" + suppress_opt=yes + suppress_output= + arg_mode=normal + libobj= + later= + + for arg + do + case $arg_mode in + arg ) + # do not "continue". Instead, add this to base_compile + lastarg="$arg" + arg_mode=normal + ;; + + target ) + libobj="$arg" + arg_mode=normal + continue + ;; + + normal ) + # Accept any command-line options. + case $arg in + -o) + if test -n "$libobj" ; then + $echo "$modename: you cannot specify \`-o' more than once" 1>&2 + exit $EXIT_FAILURE + fi + arg_mode=target + continue + ;; + + -static | -prefer-pic | -prefer-non-pic) + later="$later $arg" + continue + ;; + + -no-suppress) + suppress_opt=no + continue + ;; + + -Xcompiler) + arg_mode=arg # the next one goes into the "base_compile" arg list + continue # The current "srcfile" will either be retained or + ;; # replaced later. I would guess that would be a bug. + + -Wc,*) + args=`$echo "X$arg" | $Xsed -e "s/^-Wc,//"` + lastarg= + save_ifs="$IFS"; IFS=',' + for arg in $args; do + IFS="$save_ifs" + + # Double-quote args containing other shell metacharacters. + # Many Bourne shells cannot handle close brackets correctly + # in scan sets, so we specify it separately. + case $arg in + *[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*|"") + arg="\"$arg\"" + ;; + esac + lastarg="$lastarg $arg" + done + IFS="$save_ifs" + lastarg=`$echo "X$lastarg" | $Xsed -e "s/^ //"` + + # Add the arguments to base_compile. + base_compile="$base_compile $lastarg" + continue + ;; + + * ) + # Accept the current argument as the source file. + # The previous "srcfile" becomes the current argument. + # + lastarg="$srcfile" + srcfile="$arg" + ;; + esac # case $arg + ;; + esac # case $arg_mode + + # Aesthetically quote the previous argument. + lastarg=`$echo "X$lastarg" | $Xsed -e "$sed_quote_subst"` + + case $lastarg in + # Double-quote args containing other shell metacharacters. + # Many Bourne shells cannot handle close brackets correctly + # in scan sets, and some SunOS ksh mistreat backslash-escaping + # in scan sets (worked around with variable expansion), + # and furthermore cannot handle '|' '&' '(' ')' in scan sets + # at all, so we specify them separately. + *[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*|"") + lastarg="\"$lastarg\"" + ;; + esac + + base_compile="$base_compile $lastarg" + done # for arg + + case $arg_mode in + arg) + $echo "$modename: you must specify an argument for -Xcompile" + exit $EXIT_FAILURE + ;; + target) + $echo "$modename: you must specify a target with \`-o'" 1>&2 + exit $EXIT_FAILURE + ;; + *) + # Get the name of the library object. + [ -z "$libobj" ] && libobj=`$echo "X$srcfile" | $Xsed -e 's%^.*/%%'` + ;; + esac + + # Recognize several different file suffixes. + # If the user specifies -o file.o, it is replaced with file.lo + xform='[cCFSifmso]' + case $libobj in + *.ada) xform=ada ;; + *.adb) xform=adb ;; + *.ads) xform=ads ;; + *.asm) xform=asm ;; + *.c++) xform=c++ ;; + *.cc) xform=cc ;; + *.ii) xform=ii ;; + *.class) xform=class ;; + *.cpp) xform=cpp ;; + *.cxx) xform=cxx ;; + *.f90) xform=f90 ;; + *.for) xform=for ;; + *.java) xform=java ;; + esac + + libobj=`$echo "X$libobj" | $Xsed -e "s/\.$xform$/.lo/"` + + case $libobj in + *.lo) obj=`$echo "X$libobj" | $Xsed -e "$lo2o"` ;; + *) + $echo "$modename: cannot determine name of library object from \`$libobj'" 1>&2 + exit $EXIT_FAILURE + ;; + esac + + func_infer_tag $base_compile + + for arg in $later; do + case $arg in + -static) + build_old_libs=yes + continue + ;; + + -prefer-pic) + pic_mode=yes + continue + ;; + + -prefer-non-pic) + pic_mode=no + continue + ;; + esac + done + + qlibobj=`$echo "X$libobj" | $Xsed -e "$sed_quote_subst"` + case $qlibobj in + *[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*|"") + qlibobj="\"$qlibobj\"" ;; + esac + test "X$libobj" != "X$qlibobj" \ + && $echo "X$libobj" | grep '[]~#^*{};<>?"'"'"' &()|`$[]' \ + && $echo "$modename: libobj name \`$libobj' may not contain shell special characters." + objname=`$echo "X$obj" | $Xsed -e 's%^.*/%%'` + xdir=`$echo "X$obj" | $Xsed -e 's%/[^/]*$%%'` + if test "X$xdir" = "X$obj"; then + xdir= + else + xdir=$xdir/ + fi + lobj=${xdir}$objdir/$objname + + if test -z "$base_compile"; then + $echo "$modename: you must specify a compilation command" 1>&2 + $echo "$help" 1>&2 + exit $EXIT_FAILURE + fi + + # Delete any leftover library objects. + if test "$build_old_libs" = yes; then + removelist="$obj $lobj $libobj ${libobj}T" + else + removelist="$lobj $libobj ${libobj}T" + fi + + $run $rm $removelist + trap "$run $rm $removelist; exit $EXIT_FAILURE" 1 2 15 + + # On Cygwin there's no "real" PIC flag so we must build both object types + case $host_os in + cygwin* | mingw* | pw32* | os2*) + pic_mode=default + ;; + esac + if test "$pic_mode" = no && test "$deplibs_check_method" != pass_all; then + # non-PIC code in shared libraries is not supported + pic_mode=default + fi + + # Calculate the filename of the output object if compiler does + # not support -o with -c + if test "$compiler_c_o" = no; then + output_obj=`$echo "X$srcfile" | $Xsed -e 's%^.*/%%' -e 's%\.[^.]*$%%'`.${objext} + lockfile="$output_obj.lock" + removelist="$removelist $output_obj $lockfile" + trap "$run $rm $removelist; exit $EXIT_FAILURE" 1 2 15 + else + output_obj= + need_locks=no + lockfile= + fi + + # Lock this critical section if it is needed + # We use this script file to make the link, it avoids creating a new file + if test "$need_locks" = yes; then + until $run ln "$progpath" "$lockfile" 2>/dev/null; do + $show "Waiting for $lockfile to be removed" + sleep 2 + done + elif test "$need_locks" = warn; then + if test -f "$lockfile"; then + $echo "\ +*** ERROR, $lockfile exists and contains: +`cat $lockfile 2>/dev/null` + +This indicates that another process is trying to use the same +temporary object file, and libtool could not work around it because +your compiler does not support \`-c' and \`-o' together. If you +repeat this compilation, it may succeed, by chance, but you had better +avoid parallel builds (make -j) in this platform, or get a better +compiler." + + $run $rm $removelist + exit $EXIT_FAILURE + fi + $echo "$srcfile" > "$lockfile" + fi + + if test -n "$fix_srcfile_path"; then + eval srcfile=\"$fix_srcfile_path\" + fi + qsrcfile=`$echo "X$srcfile" | $Xsed -e "$sed_quote_subst"` + case $qsrcfile in + *[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*|"") + qsrcfile="\"$qsrcfile\"" ;; + esac + + $run $rm "$libobj" "${libobj}T" + + # Create a libtool object file (analogous to a ".la" file), + # but don't create it if we're doing a dry run. + test -z "$run" && cat > ${libobj}T </dev/null`" != "X$srcfile"; then + $echo "\ +*** ERROR, $lockfile contains: +`cat $lockfile 2>/dev/null` + +but it should contain: +$srcfile + +This indicates that another process is trying to use the same +temporary object file, and libtool could not work around it because +your compiler does not support \`-c' and \`-o' together. If you +repeat this compilation, it may succeed, by chance, but you had better +avoid parallel builds (make -j) in this platform, or get a better +compiler." + + $run $rm $removelist + exit $EXIT_FAILURE + fi + + # Just move the object if needed, then go on to compile the next one + if test -n "$output_obj" && test "X$output_obj" != "X$lobj"; then + $show "$mv $output_obj $lobj" + if $run $mv $output_obj $lobj; then : + else + error=$? + $run $rm $removelist + exit $error + fi + fi + + # Append the name of the PIC object to the libtool object file. + test -z "$run" && cat >> ${libobj}T <> ${libobj}T </dev/null`" != "X$srcfile"; then + $echo "\ +*** ERROR, $lockfile contains: +`cat $lockfile 2>/dev/null` + +but it should contain: +$srcfile + +This indicates that another process is trying to use the same +temporary object file, and libtool could not work around it because +your compiler does not support \`-c' and \`-o' together. If you +repeat this compilation, it may succeed, by chance, but you had better +avoid parallel builds (make -j) in this platform, or get a better +compiler." + + $run $rm $removelist + exit $EXIT_FAILURE + fi + + # Just move the object if needed + if test -n "$output_obj" && test "X$output_obj" != "X$obj"; then + $show "$mv $output_obj $obj" + if $run $mv $output_obj $obj; then : + else + error=$? + $run $rm $removelist + exit $error + fi + fi + + # Append the name of the non-PIC object the libtool object file. + # Only append if the libtool object file exists. + test -z "$run" && cat >> ${libobj}T <> ${libobj}T <&2 + fi + if test -n "$link_static_flag"; then + dlopen_self=$dlopen_self_static + fi + else + if test -z "$pic_flag" && test -n "$link_static_flag"; then + dlopen_self=$dlopen_self_static + fi + fi + build_libtool_libs=no + build_old_libs=yes + prefer_static_libs=yes + break + ;; + esac + done + + # See if our shared archives depend on static archives. + test -n "$old_archive_from_new_cmds" && build_old_libs=yes + + # Go through the arguments, transforming them on the way. + while test "$#" -gt 0; do + arg="$1" + shift + case $arg in + *[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*|"") + qarg=\"`$echo "X$arg" | $Xsed -e "$sed_quote_subst"`\" ### testsuite: skip nested quoting test + ;; + *) qarg=$arg ;; + esac + libtool_args="$libtool_args $qarg" + + # If the previous option needs an argument, assign it. + if test -n "$prev"; then + case $prev in + output) + compile_command="$compile_command @OUTPUT@" + finalize_command="$finalize_command @OUTPUT@" + ;; + esac + + case $prev in + dlfiles|dlprefiles) + if test "$preload" = no; then + # Add the symbol object into the linking commands. + compile_command="$compile_command @SYMFILE@" + finalize_command="$finalize_command @SYMFILE@" + preload=yes + fi + case $arg in + *.la | *.lo) ;; # We handle these cases below. + force) + if test "$dlself" = no; then + dlself=needless + export_dynamic=yes + fi + prev= + continue + ;; + self) + if test "$prev" = dlprefiles; then + dlself=yes + elif test "$prev" = dlfiles && test "$dlopen_self" != yes; then + dlself=yes + else + dlself=needless + export_dynamic=yes + fi + prev= + continue + ;; + *) + if test "$prev" = dlfiles; then + dlfiles="$dlfiles $arg" + else + dlprefiles="$dlprefiles $arg" + fi + prev= + continue + ;; + esac + ;; + expsyms) + export_symbols="$arg" + if test ! -f "$arg"; then + $echo "$modename: symbol file \`$arg' does not exist" + exit $EXIT_FAILURE + fi + prev= + continue + ;; + expsyms_regex) + export_symbols_regex="$arg" + prev= + continue + ;; + inst_prefix) + inst_prefix_dir="$arg" + prev= + continue + ;; + precious_regex) + precious_files_regex="$arg" + prev= + continue + ;; + release) + release="-$arg" + prev= + continue + ;; + objectlist) + if test -f "$arg"; then + save_arg=$arg + moreargs= + for fil in `cat $save_arg` + do +# moreargs="$moreargs $fil" + arg=$fil + # A libtool-controlled object. + + # Check to see that this really is a libtool object. + if (${SED} -e '2q' $arg | grep "^# Generated by .*$PACKAGE") >/dev/null 2>&1; then + pic_object= + non_pic_object= + + # Read the .lo file + # If there is no directory component, then add one. + case $arg in + */* | *\\*) . $arg ;; + *) . ./$arg ;; + esac + + if test -z "$pic_object" || \ + test -z "$non_pic_object" || + test "$pic_object" = none && \ + test "$non_pic_object" = none; then + $echo "$modename: cannot find name of object for \`$arg'" 1>&2 + exit $EXIT_FAILURE + fi + + # Extract subdirectory from the argument. + xdir=`$echo "X$arg" | $Xsed -e 's%/[^/]*$%%'` + if test "X$xdir" = "X$arg"; then + xdir= + else + xdir="$xdir/" + fi + + if test "$pic_object" != none; then + # Prepend the subdirectory the object is found in. + pic_object="$xdir$pic_object" + + if test "$prev" = dlfiles; then + if test "$build_libtool_libs" = yes && test "$dlopen_support" = yes; then + dlfiles="$dlfiles $pic_object" + prev= + continue + else + # If libtool objects are unsupported, then we need to preload. + prev=dlprefiles + fi + fi + + # CHECK ME: I think I busted this. -Ossama + if test "$prev" = dlprefiles; then + # Preload the old-style object. + dlprefiles="$dlprefiles $pic_object" + prev= + fi + + # A PIC object. + libobjs="$libobjs $pic_object" + arg="$pic_object" + fi + + # Non-PIC object. + if test "$non_pic_object" != none; then + # Prepend the subdirectory the object is found in. + non_pic_object="$xdir$non_pic_object" + + # A standard non-PIC object + non_pic_objects="$non_pic_objects $non_pic_object" + if test -z "$pic_object" || test "$pic_object" = none ; then + arg="$non_pic_object" + fi + fi + else + # Only an error if not doing a dry-run. + if test -z "$run"; then + $echo "$modename: \`$arg' is not a valid libtool object" 1>&2 + exit $EXIT_FAILURE + else + # Dry-run case. + + # Extract subdirectory from the argument. + xdir=`$echo "X$arg" | $Xsed -e 's%/[^/]*$%%'` + if test "X$xdir" = "X$arg"; then + xdir= + else + xdir="$xdir/" + fi + + pic_object=`$echo "X${xdir}${objdir}/${arg}" | $Xsed -e "$lo2o"` + non_pic_object=`$echo "X${xdir}${arg}" | $Xsed -e "$lo2o"` + libobjs="$libobjs $pic_object" + non_pic_objects="$non_pic_objects $non_pic_object" + fi + fi + done + else + $echo "$modename: link input file \`$save_arg' does not exist" + exit $EXIT_FAILURE + fi + arg=$save_arg + prev= + continue + ;; + rpath | xrpath) + # We need an absolute path. + case $arg in + [\\/]* | [A-Za-z]:[\\/]*) ;; + *) + $echo "$modename: only absolute run-paths are allowed" 1>&2 + exit $EXIT_FAILURE + ;; + esac + if test "$prev" = rpath; then + case "$rpath " in + *" $arg "*) ;; + *) rpath="$rpath $arg" ;; + esac + else + case "$xrpath " in + *" $arg "*) ;; + *) xrpath="$xrpath $arg" ;; + esac + fi + prev= + continue + ;; + xcompiler) + compiler_flags="$compiler_flags $qarg" + prev= + compile_command="$compile_command $qarg" + finalize_command="$finalize_command $qarg" + continue + ;; + xlinker) + linker_flags="$linker_flags $qarg" + compiler_flags="$compiler_flags $wl$qarg" + prev= + compile_command="$compile_command $wl$qarg" + finalize_command="$finalize_command $wl$qarg" + continue + ;; + xcclinker) + linker_flags="$linker_flags $qarg" + compiler_flags="$compiler_flags $qarg" + prev= + compile_command="$compile_command $qarg" + finalize_command="$finalize_command $qarg" + continue + ;; + shrext) + shrext_cmds="$arg" + prev= + continue + ;; + darwin_framework) + compiler_flags="$compiler_flags $arg" + compile_command="$compile_command $arg" + finalize_command="$finalize_command $arg" + prev= + continue + ;; + *) + eval "$prev=\"\$arg\"" + prev= + continue + ;; + esac + fi # test -n "$prev" + + prevarg="$arg" + + case $arg in + -all-static) + if test -n "$link_static_flag"; then + compile_command="$compile_command $link_static_flag" + finalize_command="$finalize_command $link_static_flag" + fi + continue + ;; + + -allow-undefined) + # FIXME: remove this flag sometime in the future. + $echo "$modename: \`-allow-undefined' is deprecated because it is the default" 1>&2 + continue + ;; + + -avoid-version) + avoid_version=yes + continue + ;; + + -dlopen) + prev=dlfiles + continue + ;; + + -dlpreopen) + prev=dlprefiles + continue + ;; + + -export-dynamic) + export_dynamic=yes + continue + ;; + + -export-symbols | -export-symbols-regex) + if test -n "$export_symbols" || test -n "$export_symbols_regex"; then + $echo "$modename: more than one -exported-symbols argument is not allowed" + exit $EXIT_FAILURE + fi + if test "X$arg" = "X-export-symbols"; then + prev=expsyms + else + prev=expsyms_regex + fi + continue + ;; + + -framework|-arch) + prev=darwin_framework + compiler_flags="$compiler_flags $arg" + compile_command="$compile_command $arg" + finalize_command="$finalize_command $arg" + continue + ;; + + -inst-prefix-dir) + prev=inst_prefix + continue + ;; + + # The native IRIX linker understands -LANG:*, -LIST:* and -LNO:* + # so, if we see these flags be careful not to treat them like -L + -L[A-Z][A-Z]*:*) + case $with_gcc/$host in + no/*-*-irix* | /*-*-irix*) + compile_command="$compile_command $arg" + finalize_command="$finalize_command $arg" + ;; + esac + continue + ;; + + -L*) + dir=`$echo "X$arg" | $Xsed -e 's/^-L//'` + # We need an absolute path. + case $dir in + [\\/]* | [A-Za-z]:[\\/]*) ;; + *) + absdir=`cd "$dir" && pwd` + if test -z "$absdir"; then + $echo "$modename: cannot determine absolute directory name of \`$dir'" 1>&2 + exit $EXIT_FAILURE + fi + dir="$absdir" + ;; + esac + case "$deplibs " in + *" -L$dir "*) ;; + *) + deplibs="$deplibs -L$dir" + lib_search_path="$lib_search_path $dir" + ;; + esac + case $host in + *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2*) + case :$dllsearchpath: in + *":$dir:"*) ;; + *) dllsearchpath="$dllsearchpath:$dir";; + esac + ;; + esac + continue + ;; + + -l*) + if test "X$arg" = "X-lc" || test "X$arg" = "X-lm"; then + case $host in + *-*-cygwin* | *-*-pw32* | *-*-beos*) + # These systems don't actually have a C or math library (as such) + continue + ;; + *-*-mingw* | *-*-os2*) + # These systems don't actually have a C library (as such) + test "X$arg" = "X-lc" && continue + ;; + *-*-openbsd* | *-*-freebsd* | *-*-dragonfly*) + # Do not include libc due to us having libc/libc_r. + test "X$arg" = "X-lc" && continue + ;; + *-*-rhapsody* | *-*-darwin1.[012]) + # Rhapsody C and math libraries are in the System framework + deplibs="$deplibs -framework System" + continue + esac + elif test "X$arg" = "X-lc_r"; then + case $host in + *-*-openbsd* | *-*-freebsd* | *-*-dragonfly*) + # Do not include libc_r directly, use -pthread flag. + continue + ;; + esac + fi + deplibs="$deplibs $arg" + continue + ;; + + # Tru64 UNIX uses -model [arg] to determine the layout of C++ + # classes, name mangling, and exception handling. + -model) + compile_command="$compile_command $arg" + compiler_flags="$compiler_flags $arg" + finalize_command="$finalize_command $arg" + prev=xcompiler + continue + ;; + + -mt|-mthreads|-kthread|-Kthread|-pthread|-pthreads|--thread-safe) + compiler_flags="$compiler_flags $arg" + compile_command="$compile_command $arg" + finalize_command="$finalize_command $arg" + continue + ;; + + -module) + module=yes + continue + ;; + + # -64, -mips[0-9] enable 64-bit mode on the SGI compiler + # -r[0-9][0-9]* specifies the processor on the SGI compiler + # -xarch=*, -xtarget=* enable 64-bit mode on the Sun compiler + # +DA*, +DD* enable 64-bit mode on the HP compiler + # -q* pass through compiler args for the IBM compiler + # -m* pass through architecture-specific compiler args for GCC + -64|-mips[0-9]|-r[0-9][0-9]*|-xarch=*|-xtarget=*|+DA*|+DD*|-q*|-m*) + + # Unknown arguments in both finalize_command and compile_command need + # to be aesthetically quoted because they are evaled later. + arg=`$echo "X$arg" | $Xsed -e "$sed_quote_subst"` + case $arg in + *[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*|"") + arg="\"$arg\"" + ;; + esac + compile_command="$compile_command $arg" + finalize_command="$finalize_command $arg" + if test "$with_gcc" = "yes" ; then + compiler_flags="$compiler_flags $arg" + fi + continue + ;; + + -shrext) + prev=shrext + continue + ;; + + -no-fast-install) + fast_install=no + continue + ;; + + -no-install) + case $host in + *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2*) + # The PATH hackery in wrapper scripts is required on Windows + # in order for the loader to find any dlls it needs. + $echo "$modename: warning: \`-no-install' is ignored for $host" 1>&2 + $echo "$modename: warning: assuming \`-no-fast-install' instead" 1>&2 + fast_install=no + ;; + *) no_install=yes ;; + esac + continue + ;; + + -no-undefined) + allow_undefined=no + continue + ;; + + -objectlist) + prev=objectlist + continue + ;; + + -o) prev=output ;; + + -precious-files-regex) + prev=precious_regex + continue + ;; + + -release) + prev=release + continue + ;; + + -rpath) + prev=rpath + continue + ;; + + -R) + prev=xrpath + continue + ;; + + -R*) + dir=`$echo "X$arg" | $Xsed -e 's/^-R//'` + # We need an absolute path. + case $dir in + [\\/]* | [A-Za-z]:[\\/]*) ;; + *) + $echo "$modename: only absolute run-paths are allowed" 1>&2 + exit $EXIT_FAILURE + ;; + esac + case "$xrpath " in + *" $dir "*) ;; + *) xrpath="$xrpath $dir" ;; + esac + continue + ;; + + -static) + # The effects of -static are defined in a previous loop. + # We used to do the same as -all-static on platforms that + # didn't have a PIC flag, but the assumption that the effects + # would be equivalent was wrong. It would break on at least + # Digital Unix and AIX. + continue + ;; + + -thread-safe) + thread_safe=yes + continue + ;; + + -version-info) + prev=vinfo + continue + ;; + -version-number) + prev=vinfo + vinfo_number=yes + continue + ;; + + -Wc,*) + args=`$echo "X$arg" | $Xsed -e "$sed_quote_subst" -e 's/^-Wc,//'` + arg= + save_ifs="$IFS"; IFS=',' + for flag in $args; do + IFS="$save_ifs" + case $flag in + *[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*|"") + flag="\"$flag\"" + ;; + esac + arg="$arg $wl$flag" + compiler_flags="$compiler_flags $flag" + done + IFS="$save_ifs" + arg=`$echo "X$arg" | $Xsed -e "s/^ //"` + ;; + + -Wl,*) + args=`$echo "X$arg" | $Xsed -e "$sed_quote_subst" -e 's/^-Wl,//'` + arg= + save_ifs="$IFS"; IFS=',' + for flag in $args; do + IFS="$save_ifs" + case $flag in + *[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*|"") + flag="\"$flag\"" + ;; + esac + arg="$arg $wl$flag" + compiler_flags="$compiler_flags $wl$flag" + linker_flags="$linker_flags $flag" + done + IFS="$save_ifs" + arg=`$echo "X$arg" | $Xsed -e "s/^ //"` + ;; + + -Xcompiler) + prev=xcompiler + continue + ;; + + -Xlinker) + prev=xlinker + continue + ;; + + -XCClinker) + prev=xcclinker + continue + ;; + + # Some other compiler flag. + -* | +*) + # Unknown arguments in both finalize_command and compile_command need + # to be aesthetically quoted because they are evaled later. + arg=`$echo "X$arg" | $Xsed -e "$sed_quote_subst"` + case $arg in + *[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*|"") + arg="\"$arg\"" + ;; + esac + ;; + + *.$objext) + # A standard object. + objs="$objs $arg" + ;; + + *.lo) + # A libtool-controlled object. + + # Check to see that this really is a libtool object. + if (${SED} -e '2q' $arg | grep "^# Generated by .*$PACKAGE") >/dev/null 2>&1; then + pic_object= + non_pic_object= + + # Read the .lo file + # If there is no directory component, then add one. + case $arg in + */* | *\\*) . $arg ;; + *) . ./$arg ;; + esac + + if test -z "$pic_object" || \ + test -z "$non_pic_object" || + test "$pic_object" = none && \ + test "$non_pic_object" = none; then + $echo "$modename: cannot find name of object for \`$arg'" 1>&2 + exit $EXIT_FAILURE + fi + + # Extract subdirectory from the argument. + xdir=`$echo "X$arg" | $Xsed -e 's%/[^/]*$%%'` + if test "X$xdir" = "X$arg"; then + xdir= + else + xdir="$xdir/" + fi + + if test "$pic_object" != none; then + # Prepend the subdirectory the object is found in. + pic_object="$xdir$pic_object" + + if test "$prev" = dlfiles; then + if test "$build_libtool_libs" = yes && test "$dlopen_support" = yes; then + dlfiles="$dlfiles $pic_object" + prev= + continue + else + # If libtool objects are unsupported, then we need to preload. + prev=dlprefiles + fi + fi + + # CHECK ME: I think I busted this. -Ossama + if test "$prev" = dlprefiles; then + # Preload the old-style object. + dlprefiles="$dlprefiles $pic_object" + prev= + fi + + # A PIC object. + libobjs="$libobjs $pic_object" + arg="$pic_object" + fi + + # Non-PIC object. + if test "$non_pic_object" != none; then + # Prepend the subdirectory the object is found in. + non_pic_object="$xdir$non_pic_object" + + # A standard non-PIC object + non_pic_objects="$non_pic_objects $non_pic_object" + if test -z "$pic_object" || test "$pic_object" = none ; then + arg="$non_pic_object" + fi + fi + else + # Only an error if not doing a dry-run. + if test -z "$run"; then + $echo "$modename: \`$arg' is not a valid libtool object" 1>&2 + exit $EXIT_FAILURE + else + # Dry-run case. + + # Extract subdirectory from the argument. + xdir=`$echo "X$arg" | $Xsed -e 's%/[^/]*$%%'` + if test "X$xdir" = "X$arg"; then + xdir= + else + xdir="$xdir/" + fi + + pic_object=`$echo "X${xdir}${objdir}/${arg}" | $Xsed -e "$lo2o"` + non_pic_object=`$echo "X${xdir}${arg}" | $Xsed -e "$lo2o"` + libobjs="$libobjs $pic_object" + non_pic_objects="$non_pic_objects $non_pic_object" + fi + fi + ;; + + *.$libext) + # An archive. + deplibs="$deplibs $arg" + old_deplibs="$old_deplibs $arg" + continue + ;; + + *.la) + # A libtool-controlled library. + + if test "$prev" = dlfiles; then + # This library was specified with -dlopen. + dlfiles="$dlfiles $arg" + prev= + elif test "$prev" = dlprefiles; then + # The library was specified with -dlpreopen. + dlprefiles="$dlprefiles $arg" + prev= + else + deplibs="$deplibs $arg" + fi + continue + ;; + + # Some other compiler argument. + *) + # Unknown arguments in both finalize_command and compile_command need + # to be aesthetically quoted because they are evaled later. + arg=`$echo "X$arg" | $Xsed -e "$sed_quote_subst"` + case $arg in + *[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*|"") + arg="\"$arg\"" + ;; + esac + ;; + esac # arg + + # Now actually substitute the argument into the commands. + if test -n "$arg"; then + compile_command="$compile_command $arg" + finalize_command="$finalize_command $arg" + fi + done # argument parsing loop + + if test -n "$prev"; then + $echo "$modename: the \`$prevarg' option requires an argument" 1>&2 + $echo "$help" 1>&2 + exit $EXIT_FAILURE + fi + + if test "$export_dynamic" = yes && test -n "$export_dynamic_flag_spec"; then + eval arg=\"$export_dynamic_flag_spec\" + compile_command="$compile_command $arg" + finalize_command="$finalize_command $arg" + fi + + oldlibs= + # calculate the name of the file, without its directory + outputname=`$echo "X$output" | $Xsed -e 's%^.*/%%'` + libobjs_save="$libobjs" + + if test -n "$shlibpath_var"; then + # get the directories listed in $shlibpath_var + eval shlib_search_path=\`\$echo \"X\${$shlibpath_var}\" \| \$Xsed -e \'s/:/ /g\'\` + else + shlib_search_path= + fi + eval sys_lib_search_path=\"$sys_lib_search_path_spec\" + eval sys_lib_dlsearch_path=\"$sys_lib_dlsearch_path_spec\" + + output_objdir=`$echo "X$output" | $Xsed -e 's%/[^/]*$%%'` + if test "X$output_objdir" = "X$output"; then + output_objdir="$objdir" + else + output_objdir="$output_objdir/$objdir" + fi + # Create the object directory. + if test ! -d "$output_objdir"; then + $show "$mkdir $output_objdir" + $run $mkdir $output_objdir + status=$? + if test "$status" -ne 0 && test ! -d "$output_objdir"; then + exit $status + fi + fi + + # Determine the type of output + case $output in + "") + $echo "$modename: you must specify an output file" 1>&2 + $echo "$help" 1>&2 + exit $EXIT_FAILURE + ;; + *.$libext) linkmode=oldlib ;; + *.lo | *.$objext) linkmode=obj ;; + *.la) linkmode=lib ;; + *) linkmode=prog ;; # Anything else should be a program. + esac + + case $host in + *cygwin* | *mingw* | *pw32*) + # don't eliminate duplications in $postdeps and $predeps + duplicate_compiler_generated_deps=yes + ;; + *) + duplicate_compiler_generated_deps=$duplicate_deps + ;; + esac + specialdeplibs= + + libs= + # Find all interdependent deplibs by searching for libraries + # that are linked more than once (e.g. -la -lb -la) + for deplib in $deplibs; do + if test "X$duplicate_deps" = "Xyes" ; then + case "$libs " in + *" $deplib "*) specialdeplibs="$specialdeplibs $deplib" ;; + esac + fi + libs="$libs $deplib" + done + + if test "$linkmode" = lib; then + libs="$predeps $libs $compiler_lib_search_path $postdeps" + + # Compute libraries that are listed more than once in $predeps + # $postdeps and mark them as special (i.e., whose duplicates are + # not to be eliminated). + pre_post_deps= + if test "X$duplicate_compiler_generated_deps" = "Xyes" ; then + for pre_post_dep in $predeps $postdeps; do + case "$pre_post_deps " in + *" $pre_post_dep "*) specialdeplibs="$specialdeplibs $pre_post_deps" ;; + esac + pre_post_deps="$pre_post_deps $pre_post_dep" + done + fi + pre_post_deps= + fi + + deplibs= + newdependency_libs= + newlib_search_path= + need_relink=no # whether we're linking any uninstalled libtool libraries + notinst_deplibs= # not-installed libtool libraries + notinst_path= # paths that contain not-installed libtool libraries + case $linkmode in + lib) + passes="conv link" + for file in $dlfiles $dlprefiles; do + case $file in + *.la) ;; + *) + $echo "$modename: libraries can \`-dlopen' only libtool libraries: $file" 1>&2 + exit $EXIT_FAILURE + ;; + esac + done + ;; + prog) + compile_deplibs= + finalize_deplibs= + alldeplibs=no + newdlfiles= + newdlprefiles= + passes="conv scan dlopen dlpreopen link" + ;; + *) passes="conv" + ;; + esac + for pass in $passes; do + if test "$linkmode,$pass" = "lib,link" || + test "$linkmode,$pass" = "prog,scan"; then + libs="$deplibs" + deplibs= + fi + if test "$linkmode" = prog; then + case $pass in + dlopen) libs="$dlfiles" ;; + dlpreopen) libs="$dlprefiles" ;; + link) libs="$deplibs %DEPLIBS% $dependency_libs" ;; + esac + fi + if test "$pass" = dlopen; then + # Collect dlpreopened libraries + save_deplibs="$deplibs" + deplibs= + fi + for deplib in $libs; do + lib= + found=no + case $deplib in + -mt|-mthreads|-kthread|-Kthread|-pthread|-pthreads|--thread-safe) + if test "$linkmode,$pass" = "prog,link"; then + compile_deplibs="$deplib $compile_deplibs" + finalize_deplibs="$deplib $finalize_deplibs" + else + compiler_flags="$compiler_flags $deplib" + fi + continue + ;; + -l*) + if test "$linkmode" != lib && test "$linkmode" != prog; then + $echo "$modename: warning: \`-l' is ignored for archives/objects" 1>&2 + continue + fi + name=`$echo "X$deplib" | $Xsed -e 's/^-l//'` + for searchdir in $newlib_search_path $lib_search_path $sys_lib_search_path $shlib_search_path; do + for search_ext in .la $std_shrext .so .a; do + # Search the libtool library + lib="$searchdir/lib${name}${search_ext}" + if test -f "$lib"; then + if test "$search_ext" = ".la"; then + found=yes + else + found=no + fi + break 2 + fi + done + done + if test "$found" != yes; then + # deplib doesn't seem to be a libtool library + if test "$linkmode,$pass" = "prog,link"; then + compile_deplibs="$deplib $compile_deplibs" + finalize_deplibs="$deplib $finalize_deplibs" + else + deplibs="$deplib $deplibs" + test "$linkmode" = lib && newdependency_libs="$deplib $newdependency_libs" + fi + continue + else # deplib is a libtool library + # If $allow_libtool_libs_with_static_runtimes && $deplib is a stdlib, + # We need to do some special things here, and not later. + if test "X$allow_libtool_libs_with_static_runtimes" = "Xyes" ; then + case " $predeps $postdeps " in + *" $deplib "*) + if (${SED} -e '2q' $lib | + grep "^# Generated by .*$PACKAGE") >/dev/null 2>&1; then + library_names= + old_library= + case $lib in + */* | *\\*) . $lib ;; + *) . ./$lib ;; + esac + for l in $old_library $library_names; do + ll="$l" + done + if test "X$ll" = "X$old_library" ; then # only static version available + found=no + ladir=`$echo "X$lib" | $Xsed -e 's%/[^/]*$%%'` + test "X$ladir" = "X$lib" && ladir="." + lib=$ladir/$old_library + if test "$linkmode,$pass" = "prog,link"; then + compile_deplibs="$deplib $compile_deplibs" + finalize_deplibs="$deplib $finalize_deplibs" + else + deplibs="$deplib $deplibs" + test "$linkmode" = lib && newdependency_libs="$deplib $newdependency_libs" + fi + continue + fi + fi + ;; + *) ;; + esac + fi + fi + ;; # -l + -L*) + case $linkmode in + lib) + deplibs="$deplib $deplibs" + test "$pass" = conv && continue + newdependency_libs="$deplib $newdependency_libs" + newlib_search_path="$newlib_search_path "`$echo "X$deplib" | $Xsed -e 's/^-L//'` + ;; + prog) + if test "$pass" = conv; then + deplibs="$deplib $deplibs" + continue + fi + if test "$pass" = scan; then + deplibs="$deplib $deplibs" + else + compile_deplibs="$deplib $compile_deplibs" + finalize_deplibs="$deplib $finalize_deplibs" + fi + newlib_search_path="$newlib_search_path "`$echo "X$deplib" | $Xsed -e 's/^-L//'` + ;; + *) + $echo "$modename: warning: \`-L' is ignored for archives/objects" 1>&2 + ;; + esac # linkmode + continue + ;; # -L + -R*) + if test "$pass" = link; then + dir=`$echo "X$deplib" | $Xsed -e 's/^-R//'` + # Make sure the xrpath contains only unique directories. + case "$xrpath " in + *" $dir "*) ;; + *) xrpath="$xrpath $dir" ;; + esac + fi + deplibs="$deplib $deplibs" + continue + ;; + *.la) lib="$deplib" ;; + *.$libext) + if test "$pass" = conv; then + deplibs="$deplib $deplibs" + continue + fi + case $linkmode in + lib) + valid_a_lib=no + case $deplibs_check_method in + match_pattern*) + set dummy $deplibs_check_method + match_pattern_regex=`expr "$deplibs_check_method" : "$2 \(.*\)"` + if eval $echo \"$deplib\" 2>/dev/null \ + | $SED 10q \ + | $EGREP "$match_pattern_regex" > /dev/null; then + valid_a_lib=yes + fi + ;; + pass_all) + valid_a_lib=yes + ;; + esac + if test "$valid_a_lib" != yes; then + $echo + $echo "*** Warning: Trying to link with static lib archive $deplib." + $echo "*** I have the capability to make that library automatically link in when" + $echo "*** you link to this library. But I can only do this if you have a" + $echo "*** shared version of the library, which you do not appear to have" + $echo "*** because the file extensions .$libext of this argument makes me believe" + $echo "*** that it is just a static archive that I should not used here." + else + $echo + $echo "*** Warning: Linking the shared library $output against the" + $echo "*** static library $deplib is not portable!" + deplibs="$deplib $deplibs" + fi + continue + ;; + prog) + if test "$pass" != link; then + deplibs="$deplib $deplibs" + else + compile_deplibs="$deplib $compile_deplibs" + finalize_deplibs="$deplib $finalize_deplibs" + fi + continue + ;; + esac # linkmode + ;; # *.$libext + *.lo | *.$objext) + if test "$pass" = conv; then + deplibs="$deplib $deplibs" + elif test "$linkmode" = prog; then + if test "$pass" = dlpreopen || test "$dlopen_support" != yes || test "$build_libtool_libs" = no; then + # If there is no dlopen support or we're linking statically, + # we need to preload. + newdlprefiles="$newdlprefiles $deplib" + compile_deplibs="$deplib $compile_deplibs" + finalize_deplibs="$deplib $finalize_deplibs" + else + newdlfiles="$newdlfiles $deplib" + fi + fi + continue + ;; + %DEPLIBS%) + alldeplibs=yes + continue + ;; + esac # case $deplib + if test "$found" = yes || test -f "$lib"; then : + else + $echo "$modename: cannot find the library \`$lib'" 1>&2 + exit $EXIT_FAILURE + fi + + # Check to see that this really is a libtool archive. + if (${SED} -e '2q' $lib | grep "^# Generated by .*$PACKAGE") >/dev/null 2>&1; then : + else + $echo "$modename: \`$lib' is not a valid libtool archive" 1>&2 + exit $EXIT_FAILURE + fi + + ladir=`$echo "X$lib" | $Xsed -e 's%/[^/]*$%%'` + test "X$ladir" = "X$lib" && ladir="." + + dlname= + dlopen= + dlpreopen= + libdir= + library_names= + old_library= + # If the library was installed with an old release of libtool, + # it will not redefine variables installed, or shouldnotlink + installed=yes + shouldnotlink=no + avoidtemprpath= + + + # Read the .la file + case $lib in + */* | *\\*) . $lib ;; + *) . ./$lib ;; + esac + + if test "$linkmode,$pass" = "lib,link" || + test "$linkmode,$pass" = "prog,scan" || + { test "$linkmode" != prog && test "$linkmode" != lib; }; then + test -n "$dlopen" && dlfiles="$dlfiles $dlopen" + test -n "$dlpreopen" && dlprefiles="$dlprefiles $dlpreopen" + fi + + if test "$pass" = conv; then + # Only check for convenience libraries + deplibs="$lib $deplibs" + if test -z "$libdir"; then + if test -z "$old_library"; then + $echo "$modename: cannot find name of link library for \`$lib'" 1>&2 + exit $EXIT_FAILURE + fi + # It is a libtool convenience library, so add in its objects. + convenience="$convenience $ladir/$objdir/$old_library" + old_convenience="$old_convenience $ladir/$objdir/$old_library" + tmp_libs= + for deplib in $dependency_libs; do + deplibs="$deplib $deplibs" + if test "X$duplicate_deps" = "Xyes" ; then + case "$tmp_libs " in + *" $deplib "*) specialdeplibs="$specialdeplibs $deplib" ;; + esac + fi + tmp_libs="$tmp_libs $deplib" + done + elif test "$linkmode" != prog && test "$linkmode" != lib; then + $echo "$modename: \`$lib' is not a convenience library" 1>&2 + exit $EXIT_FAILURE + fi + continue + fi # $pass = conv + + + # Get the name of the library we link against. + linklib= + for l in $old_library $library_names; do + linklib="$l" + done + if test -z "$linklib"; then + $echo "$modename: cannot find name of link library for \`$lib'" 1>&2 + exit $EXIT_FAILURE + fi + + # This library was specified with -dlopen. + if test "$pass" = dlopen; then + if test -z "$libdir"; then + $echo "$modename: cannot -dlopen a convenience library: \`$lib'" 1>&2 + exit $EXIT_FAILURE + fi + if test -z "$dlname" || + test "$dlopen_support" != yes || + test "$build_libtool_libs" = no; then + # If there is no dlname, no dlopen support or we're linking + # statically, we need to preload. We also need to preload any + # dependent libraries so libltdl's deplib preloader doesn't + # bomb out in the load deplibs phase. + dlprefiles="$dlprefiles $lib $dependency_libs" + else + newdlfiles="$newdlfiles $lib" + fi + continue + fi # $pass = dlopen + + # We need an absolute path. + case $ladir in + [\\/]* | [A-Za-z]:[\\/]*) abs_ladir="$ladir" ;; + *) + abs_ladir=`cd "$ladir" && pwd` + if test -z "$abs_ladir"; then + $echo "$modename: warning: cannot determine absolute directory name of \`$ladir'" 1>&2 + $echo "$modename: passing it literally to the linker, although it might fail" 1>&2 + abs_ladir="$ladir" + fi + ;; + esac + laname=`$echo "X$lib" | $Xsed -e 's%^.*/%%'` + + # Find the relevant object directory and library name. + if test "X$installed" = Xyes; then + if test ! -f "$libdir/$linklib" && test -f "$abs_ladir/$linklib"; then + $echo "$modename: warning: library \`$lib' was moved." 1>&2 + dir="$ladir" + absdir="$abs_ladir" + libdir="$abs_ladir" + else + dir="$libdir" + absdir="$libdir" + fi + test "X$hardcode_automatic" = Xyes && avoidtemprpath=yes + else + if test ! -f "$ladir/$objdir/$linklib" && test -f "$abs_ladir/$linklib"; then + dir="$ladir" + absdir="$abs_ladir" + # Remove this search path later + notinst_path="$notinst_path $abs_ladir" + else + dir="$ladir/$objdir" + absdir="$abs_ladir/$objdir" + # Remove this search path later + notinst_path="$notinst_path $abs_ladir" + fi + fi # $installed = yes + name=`$echo "X$laname" | $Xsed -e 's/\.la$//' -e 's/^lib//'` + + # This library was specified with -dlpreopen. + if test "$pass" = dlpreopen; then + if test -z "$libdir"; then + $echo "$modename: cannot -dlpreopen a convenience library: \`$lib'" 1>&2 + exit $EXIT_FAILURE + fi + # Prefer using a static library (so that no silly _DYNAMIC symbols + # are required to link). + if test -n "$old_library"; then + newdlprefiles="$newdlprefiles $dir/$old_library" + # Otherwise, use the dlname, so that lt_dlopen finds it. + elif test -n "$dlname"; then + newdlprefiles="$newdlprefiles $dir/$dlname" + else + newdlprefiles="$newdlprefiles $dir/$linklib" + fi + fi # $pass = dlpreopen + + if test -z "$libdir"; then + # Link the convenience library + if test "$linkmode" = lib; then + deplibs="$dir/$old_library $deplibs" + elif test "$linkmode,$pass" = "prog,link"; then + compile_deplibs="$dir/$old_library $compile_deplibs" + finalize_deplibs="$dir/$old_library $finalize_deplibs" + else + deplibs="$lib $deplibs" # used for prog,scan pass + fi + continue + fi + + + if test "$linkmode" = prog && test "$pass" != link; then + newlib_search_path="$newlib_search_path $ladir" + deplibs="$lib $deplibs" + + linkalldeplibs=no + if test "$link_all_deplibs" != no || test -z "$library_names" || + test "$build_libtool_libs" = no; then + linkalldeplibs=yes + fi + + tmp_libs= + for deplib in $dependency_libs; do + case $deplib in + -L*) newlib_search_path="$newlib_search_path "`$echo "X$deplib" | $Xsed -e 's/^-L//'`;; ### testsuite: skip nested quoting test + esac + # Need to link against all dependency_libs? + if test "$linkalldeplibs" = yes; then + deplibs="$deplib $deplibs" + else + # Need to hardcode shared library paths + # or/and link against static libraries + newdependency_libs="$deplib $newdependency_libs" + fi + if test "X$duplicate_deps" = "Xyes" ; then + case "$tmp_libs " in + *" $deplib "*) specialdeplibs="$specialdeplibs $deplib" ;; + esac + fi + tmp_libs="$tmp_libs $deplib" + done # for deplib + continue + fi # $linkmode = prog... + + if test "$linkmode,$pass" = "prog,link"; then + if test -n "$library_names" && + { test "$prefer_static_libs" = no || test -z "$old_library"; }; then + # We need to hardcode the library path + if test -n "$shlibpath_var" && test -z "$avoidtemprpath" ; then + # Make sure the rpath contains only unique directories. + case "$temp_rpath " in + *" $dir "*) ;; + *" $absdir "*) ;; + *) temp_rpath="$temp_rpath $absdir" ;; + esac + fi + + # Hardcode the library path. + # Skip directories that are in the system default run-time + # search path. + case " $sys_lib_dlsearch_path " in + *" $absdir "*) ;; + *) + case "$compile_rpath " in + *" $absdir "*) ;; + *) compile_rpath="$compile_rpath $absdir" + esac + ;; + esac + case " $sys_lib_dlsearch_path " in + *" $libdir "*) ;; + *) + case "$finalize_rpath " in + *" $libdir "*) ;; + *) finalize_rpath="$finalize_rpath $libdir" + esac + ;; + esac + fi # $linkmode,$pass = prog,link... + + if test "$alldeplibs" = yes && + { test "$deplibs_check_method" = pass_all || + { test "$build_libtool_libs" = yes && + test -n "$library_names"; }; }; then + # We only need to search for static libraries + continue + fi + fi + + link_static=no # Whether the deplib will be linked statically + if test -n "$library_names" && + { test "$prefer_static_libs" = no || test -z "$old_library"; }; then + if test "$installed" = no; then + notinst_deplibs="$notinst_deplibs $lib" + need_relink=yes + fi + # This is a shared library + + # Warn about portability, can't link against -module's on + # some systems (darwin) + if test "$shouldnotlink" = yes && test "$pass" = link ; then + $echo + if test "$linkmode" = prog; then + $echo "*** Warning: Linking the executable $output against the loadable module" + else + $echo "*** Warning: Linking the shared library $output against the loadable module" + fi + $echo "*** $linklib is not portable!" + fi + if test "$linkmode" = lib && + test "$hardcode_into_libs" = yes; then + # Hardcode the library path. + # Skip directories that are in the system default run-time + # search path. + case " $sys_lib_dlsearch_path " in + *" $absdir "*) ;; + *) + case "$compile_rpath " in + *" $absdir "*) ;; + *) compile_rpath="$compile_rpath $absdir" + esac + ;; + esac + case " $sys_lib_dlsearch_path " in + *" $libdir "*) ;; + *) + case "$finalize_rpath " in + *" $libdir "*) ;; + *) finalize_rpath="$finalize_rpath $libdir" + esac + ;; + esac + fi + + if test -n "$old_archive_from_expsyms_cmds"; then + # figure out the soname + set dummy $library_names + realname="$2" + shift; shift + libname=`eval \\$echo \"$libname_spec\"` + # use dlname if we got it. it's perfectly good, no? + if test -n "$dlname"; then + soname="$dlname" + elif test -n "$soname_spec"; then + # bleh windows + case $host in + *cygwin* | mingw*) + major=`expr $current - $age` + versuffix="-$major" + ;; + esac + eval soname=\"$soname_spec\" + else + soname="$realname" + fi + + # Make a new name for the extract_expsyms_cmds to use + soroot="$soname" + soname=`$echo $soroot | ${SED} -e 's/^.*\///'` + newlib="libimp-`$echo $soname | ${SED} 's/^lib//;s/\.dll$//'`.a" + + # If the library has no export list, then create one now + if test -f "$output_objdir/$soname-def"; then : + else + $show "extracting exported symbol list from \`$soname'" + save_ifs="$IFS"; IFS='~' + cmds=$extract_expsyms_cmds + for cmd in $cmds; do + IFS="$save_ifs" + eval cmd=\"$cmd\" + $show "$cmd" + $run eval "$cmd" || exit $? + done + IFS="$save_ifs" + fi + + # Create $newlib + if test -f "$output_objdir/$newlib"; then :; else + $show "generating import library for \`$soname'" + save_ifs="$IFS"; IFS='~' + cmds=$old_archive_from_expsyms_cmds + for cmd in $cmds; do + IFS="$save_ifs" + eval cmd=\"$cmd\" + $show "$cmd" + $run eval "$cmd" || exit $? + done + IFS="$save_ifs" + fi + # make sure the library variables are pointing to the new library + dir=$output_objdir + linklib=$newlib + fi # test -n "$old_archive_from_expsyms_cmds" + + if test "$linkmode" = prog || test "$mode" != relink; then + add_shlibpath= + add_dir= + add= + lib_linked=yes + case $hardcode_action in + immediate | unsupported) + if test "$hardcode_direct" = no; then + add="$dir/$linklib" + case $host in + *-*-sco3.2v5* ) add_dir="-L$dir" ;; + *-*-darwin* ) + # if the lib is a module then we can not link against + # it, someone is ignoring the new warnings I added + if /usr/bin/file -L $add 2> /dev/null | $EGREP "bundle" >/dev/null ; then + $echo "** Warning, lib $linklib is a module, not a shared library" + if test -z "$old_library" ; then + $echo + $echo "** And there doesn't seem to be a static archive available" + $echo "** The link will probably fail, sorry" + else + add="$dir/$old_library" + fi + fi + esac + elif test "$hardcode_minus_L" = no; then + case $host in + *-*-sunos*) add_shlibpath="$dir" ;; + esac + add_dir="-L$dir" + add="-l$name" + elif test "$hardcode_shlibpath_var" = no; then + add_shlibpath="$dir" + add="-l$name" + else + lib_linked=no + fi + ;; + relink) + if test "$hardcode_direct" = yes; then + add="$dir/$linklib" + elif test "$hardcode_minus_L" = yes; then + add_dir="-L$dir" + # Try looking first in the location we're being installed to. + if test -n "$inst_prefix_dir"; then + case $libdir in + [\\/]*) + add_dir="$add_dir -L$inst_prefix_dir$libdir" + ;; + esac + fi + add="-l$name" + elif test "$hardcode_shlibpath_var" = yes; then + add_shlibpath="$dir" + add="-l$name" + else + lib_linked=no + fi + ;; + *) lib_linked=no ;; + esac + + if test "$lib_linked" != yes; then + $echo "$modename: configuration error: unsupported hardcode properties" + exit $EXIT_FAILURE + fi + + if test -n "$add_shlibpath"; then + case :$compile_shlibpath: in + *":$add_shlibpath:"*) ;; + *) compile_shlibpath="$compile_shlibpath$add_shlibpath:" ;; + esac + fi + if test "$linkmode" = prog; then + test -n "$add_dir" && compile_deplibs="$add_dir $compile_deplibs" + test -n "$add" && compile_deplibs="$add $compile_deplibs" + else + test -n "$add_dir" && deplibs="$add_dir $deplibs" + test -n "$add" && deplibs="$add $deplibs" + if test "$hardcode_direct" != yes && \ + test "$hardcode_minus_L" != yes && \ + test "$hardcode_shlibpath_var" = yes; then + case :$finalize_shlibpath: in + *":$libdir:"*) ;; + *) finalize_shlibpath="$finalize_shlibpath$libdir:" ;; + esac + fi + fi + fi + + if test "$linkmode" = prog || test "$mode" = relink; then + add_shlibpath= + add_dir= + add= + # Finalize command for both is simple: just hardcode it. + if test "$hardcode_direct" = yes; then + add="$libdir/$linklib" + elif test "$hardcode_minus_L" = yes; then + add_dir="-L$libdir" + add="-l$name" + elif test "$hardcode_shlibpath_var" = yes; then + case :$finalize_shlibpath: in + *":$libdir:"*) ;; + *) finalize_shlibpath="$finalize_shlibpath$libdir:" ;; + esac + add="-l$name" + elif test "$hardcode_automatic" = yes; then + if test -n "$inst_prefix_dir" && + test -f "$inst_prefix_dir$libdir/$linklib" ; then + add="$inst_prefix_dir$libdir/$linklib" + else + add="$libdir/$linklib" + fi + else + # We cannot seem to hardcode it, guess we'll fake it. + add_dir="-L$libdir" + # Try looking first in the location we're being installed to. + if test -n "$inst_prefix_dir"; then + case $libdir in + [\\/]*) + add_dir="$add_dir -L$inst_prefix_dir$libdir" + ;; + esac + fi + add="-l$name" + fi + + if test "$linkmode" = prog; then + test -n "$add_dir" && finalize_deplibs="$add_dir $finalize_deplibs" + test -n "$add" && finalize_deplibs="$add $finalize_deplibs" + else + test -n "$add_dir" && deplibs="$add_dir $deplibs" + test -n "$add" && deplibs="$add $deplibs" + fi + fi + elif test "$linkmode" = prog; then + # Here we assume that one of hardcode_direct or hardcode_minus_L + # is not unsupported. This is valid on all known static and + # shared platforms. + if test "$hardcode_direct" != unsupported; then + test -n "$old_library" && linklib="$old_library" + compile_deplibs="$dir/$linklib $compile_deplibs" + finalize_deplibs="$dir/$linklib $finalize_deplibs" + else + compile_deplibs="-l$name -L$dir $compile_deplibs" + finalize_deplibs="-l$name -L$dir $finalize_deplibs" + fi + elif test "$build_libtool_libs" = yes; then + # Not a shared library + if test "$deplibs_check_method" != pass_all; then + # We're trying link a shared library against a static one + # but the system doesn't support it. + + # Just print a warning and add the library to dependency_libs so + # that the program can be linked against the static library. + $echo + $echo "*** Warning: This system can not link to static lib archive $lib." + $echo "*** I have the capability to make that library automatically link in when" + $echo "*** you link to this library. But I can only do this if you have a" + $echo "*** shared version of the library, which you do not appear to have." + if test "$module" = yes; then + $echo "*** But as you try to build a module library, libtool will still create " + $echo "*** a static module, that should work as long as the dlopening application" + $echo "*** is linked with the -dlopen flag to resolve symbols at runtime." + if test -z "$global_symbol_pipe"; then + $echo + $echo "*** However, this would only work if libtool was able to extract symbol" + $echo "*** lists from a program, using \`nm' or equivalent, but libtool could" + $echo "*** not find such a program. So, this module is probably useless." + $echo "*** \`nm' from GNU binutils and a full rebuild may help." + fi + if test "$build_old_libs" = no; then + build_libtool_libs=module + build_old_libs=yes + else + build_libtool_libs=no + fi + fi + else + deplibs="$dir/$old_library $deplibs" + link_static=yes + fi + fi # link shared/static library? + + if test "$linkmode" = lib; then + if test -n "$dependency_libs" && + { test "$hardcode_into_libs" != yes || + test "$build_old_libs" = yes || + test "$link_static" = yes; }; then + # Extract -R from dependency_libs + temp_deplibs= + for libdir in $dependency_libs; do + case $libdir in + -R*) temp_xrpath=`$echo "X$libdir" | $Xsed -e 's/^-R//'` + case " $xrpath " in + *" $temp_xrpath "*) ;; + *) xrpath="$xrpath $temp_xrpath";; + esac;; + *) temp_deplibs="$temp_deplibs $libdir";; + esac + done + dependency_libs="$temp_deplibs" + fi + + newlib_search_path="$newlib_search_path $absdir" + # Link against this library + test "$link_static" = no && newdependency_libs="$abs_ladir/$laname $newdependency_libs" + # ... and its dependency_libs + tmp_libs= + for deplib in $dependency_libs; do + newdependency_libs="$deplib $newdependency_libs" + if test "X$duplicate_deps" = "Xyes" ; then + case "$tmp_libs " in + *" $deplib "*) specialdeplibs="$specialdeplibs $deplib" ;; + esac + fi + tmp_libs="$tmp_libs $deplib" + done + + if test "$link_all_deplibs" != no; then + # Add the search paths of all dependency libraries + for deplib in $dependency_libs; do + case $deplib in + -L*) path="$deplib" ;; + *.la) + dir=`$echo "X$deplib" | $Xsed -e 's%/[^/]*$%%'` + test "X$dir" = "X$deplib" && dir="." + # We need an absolute path. + case $dir in + [\\/]* | [A-Za-z]:[\\/]*) absdir="$dir" ;; + *) + absdir=`cd "$dir" && pwd` + if test -z "$absdir"; then + $echo "$modename: warning: cannot determine absolute directory name of \`$dir'" 1>&2 + absdir="$dir" + fi + ;; + esac + if grep "^installed=no" $deplib > /dev/null; then + path="$absdir/$objdir" + else + eval libdir=`${SED} -n -e 's/^libdir=\(.*\)$/\1/p' $deplib` + if test -z "$libdir"; then + $echo "$modename: \`$deplib' is not a valid libtool archive" 1>&2 + exit $EXIT_FAILURE + fi + if test "$absdir" != "$libdir"; then + $echo "$modename: warning: \`$deplib' seems to be moved" 1>&2 + fi + path="$absdir" + fi + depdepl= + case $host in + *-*-darwin*) + # we do not want to link against static libs, + # but need to link against shared + eval deplibrary_names=`${SED} -n -e 's/^library_names=\(.*\)$/\1/p' $deplib` + if test -n "$deplibrary_names" ; then + for tmp in $deplibrary_names ; do + depdepl=$tmp + done + if test -f "$path/$depdepl" ; then + depdepl="$path/$depdepl" + fi + # do not add paths which are already there + case " $newlib_search_path " in + *" $path "*) ;; + *) newlib_search_path="$newlib_search_path $path";; + esac + fi + path="" + ;; + *) + path="-L$path" + ;; + esac + ;; + -l*) + case $host in + *-*-darwin*) + # Again, we only want to link against shared libraries + eval tmp_libs=`$echo "X$deplib" | $Xsed -e "s,^\-l,,"` + for tmp in $newlib_search_path ; do + if test -f "$tmp/lib$tmp_libs.dylib" ; then + eval depdepl="$tmp/lib$tmp_libs.dylib" + break + fi + done + path="" + ;; + *) continue ;; + esac + ;; + *) continue ;; + esac + case " $deplibs " in + *" $path "*) ;; + *) deplibs="$path $deplibs" ;; + esac + case " $deplibs " in + *" $depdepl "*) ;; + *) deplibs="$depdepl $deplibs" ;; + esac + done + fi # link_all_deplibs != no + fi # linkmode = lib + done # for deplib in $libs + dependency_libs="$newdependency_libs" + if test "$pass" = dlpreopen; then + # Link the dlpreopened libraries before other libraries + for deplib in $save_deplibs; do + deplibs="$deplib $deplibs" + done + fi + if test "$pass" != dlopen; then + if test "$pass" != conv; then + # Make sure lib_search_path contains only unique directories. + lib_search_path= + for dir in $newlib_search_path; do + case "$lib_search_path " in + *" $dir "*) ;; + *) lib_search_path="$lib_search_path $dir" ;; + esac + done + newlib_search_path= + fi + + if test "$linkmode,$pass" != "prog,link"; then + vars="deplibs" + else + vars="compile_deplibs finalize_deplibs" + fi + for var in $vars dependency_libs; do + # Add libraries to $var in reverse order + eval tmp_libs=\"\$$var\" + new_libs= + for deplib in $tmp_libs; do + # FIXME: Pedantically, this is the right thing to do, so + # that some nasty dependency loop isn't accidentally + # broken: + #new_libs="$deplib $new_libs" + # Pragmatically, this seems to cause very few problems in + # practice: + case $deplib in + -L*) new_libs="$deplib $new_libs" ;; + -R*) ;; + *) + # And here is the reason: when a library appears more + # than once as an explicit dependence of a library, or + # is implicitly linked in more than once by the + # compiler, it is considered special, and multiple + # occurrences thereof are not removed. Compare this + # with having the same library being listed as a + # dependency of multiple other libraries: in this case, + # we know (pedantically, we assume) the library does not + # need to be listed more than once, so we keep only the + # last copy. This is not always right, but it is rare + # enough that we require users that really mean to play + # such unportable linking tricks to link the library + # using -Wl,-lname, so that libtool does not consider it + # for duplicate removal. + case " $specialdeplibs " in + *" $deplib "*) new_libs="$deplib $new_libs" ;; + *) + case " $new_libs " in + *" $deplib "*) ;; + *) new_libs="$deplib $new_libs" ;; + esac + ;; + esac + ;; + esac + done + tmp_libs= + for deplib in $new_libs; do + case $deplib in + -L*) + case " $tmp_libs " in + *" $deplib "*) ;; + *) tmp_libs="$tmp_libs $deplib" ;; + esac + ;; + *) tmp_libs="$tmp_libs $deplib" ;; + esac + done + eval $var=\"$tmp_libs\" + done # for var + fi + # Last step: remove runtime libs from dependency_libs + # (they stay in deplibs) + tmp_libs= + for i in $dependency_libs ; do + case " $predeps $postdeps $compiler_lib_search_path " in + *" $i "*) + i="" + ;; + esac + if test -n "$i" ; then + tmp_libs="$tmp_libs $i" + fi + done + dependency_libs=$tmp_libs + done # for pass + if test "$linkmode" = prog; then + dlfiles="$newdlfiles" + dlprefiles="$newdlprefiles" + fi + + case $linkmode in + oldlib) + if test -n "$deplibs"; then + $echo "$modename: warning: \`-l' and \`-L' are ignored for archives" 1>&2 + fi + + if test -n "$dlfiles$dlprefiles" || test "$dlself" != no; then + $echo "$modename: warning: \`-dlopen' is ignored for archives" 1>&2 + fi + + if test -n "$rpath"; then + $echo "$modename: warning: \`-rpath' is ignored for archives" 1>&2 + fi + + if test -n "$xrpath"; then + $echo "$modename: warning: \`-R' is ignored for archives" 1>&2 + fi + + if test -n "$vinfo"; then + $echo "$modename: warning: \`-version-info/-version-number' is ignored for archives" 1>&2 + fi + + if test -n "$release"; then + $echo "$modename: warning: \`-release' is ignored for archives" 1>&2 + fi + + if test -n "$export_symbols" || test -n "$export_symbols_regex"; then + $echo "$modename: warning: \`-export-symbols' is ignored for archives" 1>&2 + fi + + # Now set the variables for building old libraries. + build_libtool_libs=no + oldlibs="$output" + objs="$objs$old_deplibs" + ;; + + lib) + # Make sure we only generate libraries of the form `libNAME.la'. + case $outputname in + lib*) + name=`$echo "X$outputname" | $Xsed -e 's/\.la$//' -e 's/^lib//'` + eval shared_ext=\"$shrext_cmds\" + eval libname=\"$libname_spec\" + ;; + *) + if test "$module" = no; then + $echo "$modename: libtool library \`$output' must begin with \`lib'" 1>&2 + $echo "$help" 1>&2 + exit $EXIT_FAILURE + fi + if test "$need_lib_prefix" != no; then + # Add the "lib" prefix for modules if required + name=`$echo "X$outputname" | $Xsed -e 's/\.la$//'` + eval shared_ext=\"$shrext_cmds\" + eval libname=\"$libname_spec\" + else + libname=`$echo "X$outputname" | $Xsed -e 's/\.la$//'` + fi + ;; + esac + + if test -n "$objs"; then + if test "$deplibs_check_method" != pass_all; then + $echo "$modename: cannot build libtool library \`$output' from non-libtool objects on this host:$objs" 2>&1 + exit $EXIT_FAILURE + else + $echo + $echo "*** Warning: Linking the shared library $output against the non-libtool" + $echo "*** objects $objs is not portable!" + libobjs="$libobjs $objs" + fi + fi + + if test "$dlself" != no; then + $echo "$modename: warning: \`-dlopen self' is ignored for libtool libraries" 1>&2 + fi + + set dummy $rpath + if test "$#" -gt 2; then + $echo "$modename: warning: ignoring multiple \`-rpath's for a libtool library" 1>&2 + fi + install_libdir="$2" + + oldlibs= + if test -z "$rpath"; then + if test "$build_libtool_libs" = yes; then + # Building a libtool convenience library. + # Some compilers have problems with a `.al' extension so + # convenience libraries should have the same extension an + # archive normally would. + oldlibs="$output_objdir/$libname.$libext $oldlibs" + build_libtool_libs=convenience + build_old_libs=yes + fi + + if test -n "$vinfo"; then + $echo "$modename: warning: \`-version-info/-version-number' is ignored for convenience libraries" 1>&2 + fi + + if test -n "$release"; then + $echo "$modename: warning: \`-release' is ignored for convenience libraries" 1>&2 + fi + else + + # Parse the version information argument. + save_ifs="$IFS"; IFS=':' + set dummy $vinfo 0 0 0 + IFS="$save_ifs" + + if test -n "$8"; then + $echo "$modename: too many parameters to \`-version-info'" 1>&2 + $echo "$help" 1>&2 + exit $EXIT_FAILURE + fi + + # convert absolute version numbers to libtool ages + # this retains compatibility with .la files and attempts + # to make the code below a bit more comprehensible + + case $vinfo_number in + yes) + number_major="$2" + number_minor="$3" + number_revision="$4" + # + # There are really only two kinds -- those that + # use the current revision as the major version + # and those that subtract age and use age as + # a minor version. But, then there is irix + # which has an extra 1 added just for fun + # + case $version_type in + darwin|linux|osf|windows) + current=`expr $number_major + $number_minor` + age="$number_minor" + revision="$number_revision" + ;; + freebsd-aout|freebsd-elf|sunos) + current="$number_major" + revision="$number_minor" + age="0" + ;; + irix|nonstopux) + current=`expr $number_major + $number_minor - 1` + age="$number_minor" + revision="$number_minor" + ;; + esac + ;; + no) + current="$2" + revision="$3" + age="$4" + ;; + esac + + # Check that each of the things are valid numbers. + case $current in + 0|[1-9]|[1-9][0-9]|[1-9][0-9][0-9]|[1-9][0-9][0-9][0-9]|[1-9][0-9][0-9][0-9][0-9]) ;; + *) + $echo "$modename: CURRENT \`$current' must be a nonnegative integer" 1>&2 + $echo "$modename: \`$vinfo' is not valid version information" 1>&2 + exit $EXIT_FAILURE + ;; + esac + + case $revision in + 0|[1-9]|[1-9][0-9]|[1-9][0-9][0-9]|[1-9][0-9][0-9][0-9]|[1-9][0-9][0-9][0-9][0-9]) ;; + *) + $echo "$modename: REVISION \`$revision' must be a nonnegative integer" 1>&2 + $echo "$modename: \`$vinfo' is not valid version information" 1>&2 + exit $EXIT_FAILURE + ;; + esac + + case $age in + 0|[1-9]|[1-9][0-9]|[1-9][0-9][0-9]|[1-9][0-9][0-9][0-9]|[1-9][0-9][0-9][0-9][0-9]) ;; + *) + $echo "$modename: AGE \`$age' must be a nonnegative integer" 1>&2 + $echo "$modename: \`$vinfo' is not valid version information" 1>&2 + exit $EXIT_FAILURE + ;; + esac + + if test "$age" -gt "$current"; then + $echo "$modename: AGE \`$age' is greater than the current interface number \`$current'" 1>&2 + $echo "$modename: \`$vinfo' is not valid version information" 1>&2 + exit $EXIT_FAILURE + fi + + # Calculate the version variables. + major= + versuffix= + verstring= + case $version_type in + none) ;; + + darwin) + # Like Linux, but with the current version available in + # verstring for coding it into the library header + major=.`expr $current - $age` + versuffix="$major.$age.$revision" + # Darwin ld doesn't like 0 for these options... + minor_current=`expr $current + 1` + verstring="${wl}-compatibility_version ${wl}$minor_current ${wl}-current_version ${wl}$minor_current.$revision" + ;; + + freebsd-aout) + major=".$current" + versuffix=".$current.$revision"; + ;; + + freebsd-elf) + major=".$current" + versuffix=".$current"; + ;; + + irix | nonstopux) + major=`expr $current - $age + 1` + + case $version_type in + nonstopux) verstring_prefix=nonstopux ;; + *) verstring_prefix=sgi ;; + esac + verstring="$verstring_prefix$major.$revision" + + # Add in all the interfaces that we are compatible with. + loop=$revision + while test "$loop" -ne 0; do + iface=`expr $revision - $loop` + loop=`expr $loop - 1` + verstring="$verstring_prefix$major.$iface:$verstring" + done + + # Before this point, $major must not contain `.'. + major=.$major + versuffix="$major.$revision" + ;; + + linux) + major=.`expr $current - $age` + versuffix="$major.$age.$revision" + ;; + + osf) + major=.`expr $current - $age` + versuffix=".$current.$age.$revision" + verstring="$current.$age.$revision" + + # Add in all the interfaces that we are compatible with. + loop=$age + while test "$loop" -ne 0; do + iface=`expr $current - $loop` + loop=`expr $loop - 1` + verstring="$verstring:${iface}.0" + done + + # Make executables depend on our current version. + verstring="$verstring:${current}.0" + ;; + + sunos) + major=".$current" + versuffix=".$current.$revision" + ;; + + windows) + # Use '-' rather than '.', since we only want one + # extension on DOS 8.3 filesystems. + major=`expr $current - $age` + versuffix="-$major" + ;; + + *) + $echo "$modename: unknown library version type \`$version_type'" 1>&2 + $echo "Fatal configuration error. See the $PACKAGE docs for more information." 1>&2 + exit $EXIT_FAILURE + ;; + esac + + # Clear the version info if we defaulted, and they specified a release. + if test -z "$vinfo" && test -n "$release"; then + major= + case $version_type in + darwin) + # we can't check for "0.0" in archive_cmds due to quoting + # problems, so we reset it completely + verstring= + ;; + *) + verstring="0.0" + ;; + esac + if test "$need_version" = no; then + versuffix= + else + versuffix=".0.0" + fi + fi + + # Remove version info from name if versioning should be avoided + if test "$avoid_version" = yes && test "$need_version" = no; then + major= + versuffix= + verstring="" + fi + + # Check to see if the archive will have undefined symbols. + if test "$allow_undefined" = yes; then + if test "$allow_undefined_flag" = unsupported; then + $echo "$modename: warning: undefined symbols not allowed in $host shared libraries" 1>&2 + build_libtool_libs=no + build_old_libs=yes + fi + else + # Don't allow undefined symbols. + allow_undefined_flag="$no_undefined_flag" + fi + fi + + if test "$mode" != relink; then + # Remove our outputs, but don't remove object files since they + # may have been created when compiling PIC objects. + removelist= + tempremovelist=`$echo "$output_objdir/*"` + for p in $tempremovelist; do + case $p in + *.$objext) + ;; + $output_objdir/$outputname | $output_objdir/$libname.* | $output_objdir/${libname}${release}.*) + if test "X$precious_files_regex" != "X"; then + if echo $p | $EGREP -e "$precious_files_regex" >/dev/null 2>&1 + then + continue + fi + fi + removelist="$removelist $p" + ;; + *) ;; + esac + done + if test -n "$removelist"; then + $show "${rm}r $removelist" + $run ${rm}r $removelist + fi + fi + + # Now set the variables for building old libraries. + if test "$build_old_libs" = yes && test "$build_libtool_libs" != convenience ; then + oldlibs="$oldlibs $output_objdir/$libname.$libext" + + # Transform .lo files to .o files. + oldobjs="$objs "`$echo "X$libobjs" | $SP2NL | $Xsed -e '/\.'${libext}'$/d' -e "$lo2o" | $NL2SP` + fi + + # Eliminate all temporary directories. + for path in $notinst_path; do + lib_search_path=`$echo "$lib_search_path " | ${SED} -e 's% $path % %g'` + deplibs=`$echo "$deplibs " | ${SED} -e 's% -L$path % %g'` + dependency_libs=`$echo "$dependency_libs " | ${SED} -e 's% -L$path % %g'` + done + + if test -n "$xrpath"; then + # If the user specified any rpath flags, then add them. + temp_xrpath= + for libdir in $xrpath; do + temp_xrpath="$temp_xrpath -R$libdir" + case "$finalize_rpath " in + *" $libdir "*) ;; + *) finalize_rpath="$finalize_rpath $libdir" ;; + esac + done + if test "$hardcode_into_libs" != yes || test "$build_old_libs" = yes; then + dependency_libs="$temp_xrpath $dependency_libs" + fi + fi + + # Make sure dlfiles contains only unique files that won't be dlpreopened + old_dlfiles="$dlfiles" + dlfiles= + for lib in $old_dlfiles; do + case " $dlprefiles $dlfiles " in + *" $lib "*) ;; + *) dlfiles="$dlfiles $lib" ;; + esac + done + + # Make sure dlprefiles contains only unique files + old_dlprefiles="$dlprefiles" + dlprefiles= + for lib in $old_dlprefiles; do + case "$dlprefiles " in + *" $lib "*) ;; + *) dlprefiles="$dlprefiles $lib" ;; + esac + done + + if test "$build_libtool_libs" = yes; then + if test -n "$rpath"; then + case $host in + *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2* | *-*-beos*) + # these systems don't actually have a c library (as such)! + ;; + *-*-rhapsody* | *-*-darwin1.[012]) + # Rhapsody C library is in the System framework + deplibs="$deplibs -framework System" + ;; + *-*-netbsd*) + # Don't link with libc until the a.out ld.so is fixed. + ;; + *-*-openbsd* | *-*-freebsd* | *-*-dragonfly*) + # Do not include libc due to us having libc/libc_r. + test "X$arg" = "X-lc" && continue + ;; + *) + # Add libc to deplibs on all other systems if necessary. + if test "$build_libtool_need_lc" = "yes"; then + deplibs="$deplibs -lc" + fi + ;; + esac + fi + + # Transform deplibs into only deplibs that can be linked in shared. + name_save=$name + libname_save=$libname + release_save=$release + versuffix_save=$versuffix + major_save=$major + # I'm not sure if I'm treating the release correctly. I think + # release should show up in the -l (ie -lgmp5) so we don't want to + # add it in twice. Is that correct? + release="" + versuffix="" + major="" + newdeplibs= + droppeddeps=no + case $deplibs_check_method in + pass_all) + # Don't check for shared/static. Everything works. + # This might be a little naive. We might want to check + # whether the library exists or not. But this is on + # osf3 & osf4 and I'm not really sure... Just + # implementing what was already the behavior. + newdeplibs=$deplibs + ;; + test_compile) + # This code stresses the "libraries are programs" paradigm to its + # limits. Maybe even breaks it. We compile a program, linking it + # against the deplibs as a proxy for the library. Then we can check + # whether they linked in statically or dynamically with ldd. + $rm conftest.c + cat > conftest.c </dev/null` + for potent_lib in $potential_libs; do + # Follow soft links. + if ls -lLd "$potent_lib" 2>/dev/null \ + | grep " -> " >/dev/null; then + continue + fi + # The statement above tries to avoid entering an + # endless loop below, in case of cyclic links. + # We might still enter an endless loop, since a link + # loop can be closed while we follow links, + # but so what? + potlib="$potent_lib" + while test -h "$potlib" 2>/dev/null; do + potliblink=`ls -ld $potlib | ${SED} 's/.* -> //'` + case $potliblink in + [\\/]* | [A-Za-z]:[\\/]*) potlib="$potliblink";; + *) potlib=`$echo "X$potlib" | $Xsed -e 's,[^/]*$,,'`"$potliblink";; + esac + done + if eval $file_magic_cmd \"\$potlib\" 2>/dev/null \ + | ${SED} 10q \ + | $EGREP "$file_magic_regex" > /dev/null; then + newdeplibs="$newdeplibs $a_deplib" + a_deplib="" + break 2 + fi + done + done + fi + if test -n "$a_deplib" ; then + droppeddeps=yes + $echo + $echo "*** Warning: linker path does not have real file for library $a_deplib." + $echo "*** I have the capability to make that library automatically link in when" + $echo "*** you link to this library. But I can only do this if you have a" + $echo "*** shared version of the library, which you do not appear to have" + $echo "*** because I did check the linker path looking for a file starting" + if test -z "$potlib" ; then + $echo "*** with $libname but no candidates were found. (...for file magic test)" + else + $echo "*** with $libname and none of the candidates passed a file format test" + $echo "*** using a file magic. Last file checked: $potlib" + fi + fi + else + # Add a -L argument. + newdeplibs="$newdeplibs $a_deplib" + fi + done # Gone through all deplibs. + ;; + match_pattern*) + set dummy $deplibs_check_method + match_pattern_regex=`expr "$deplibs_check_method" : "$2 \(.*\)"` + for a_deplib in $deplibs; do + name=`expr $a_deplib : '-l\(.*\)'` + # If $name is empty we are operating on a -L argument. + if test -n "$name" && test "$name" != "0"; then + if test "X$allow_libtool_libs_with_static_runtimes" = "Xyes" ; then + case " $predeps $postdeps " in + *" $a_deplib "*) + newdeplibs="$newdeplibs $a_deplib" + a_deplib="" + ;; + esac + fi + if test -n "$a_deplib" ; then + libname=`eval \\$echo \"$libname_spec\"` + for i in $lib_search_path $sys_lib_search_path $shlib_search_path; do + potential_libs=`ls $i/$libname[.-]* 2>/dev/null` + for potent_lib in $potential_libs; do + potlib="$potent_lib" # see symlink-check above in file_magic test + if eval $echo \"$potent_lib\" 2>/dev/null \ + | ${SED} 10q \ + | $EGREP "$match_pattern_regex" > /dev/null; then + newdeplibs="$newdeplibs $a_deplib" + a_deplib="" + break 2 + fi + done + done + fi + if test -n "$a_deplib" ; then + droppeddeps=yes + $echo + $echo "*** Warning: linker path does not have real file for library $a_deplib." + $echo "*** I have the capability to make that library automatically link in when" + $echo "*** you link to this library. But I can only do this if you have a" + $echo "*** shared version of the library, which you do not appear to have" + $echo "*** because I did check the linker path looking for a file starting" + if test -z "$potlib" ; then + $echo "*** with $libname but no candidates were found. (...for regex pattern test)" + else + $echo "*** with $libname and none of the candidates passed a file format test" + $echo "*** using a regex pattern. Last file checked: $potlib" + fi + fi + else + # Add a -L argument. + newdeplibs="$newdeplibs $a_deplib" + fi + done # Gone through all deplibs. + ;; + none | unknown | *) + newdeplibs="" + tmp_deplibs=`$echo "X $deplibs" | $Xsed -e 's/ -lc$//' \ + -e 's/ -[LR][^ ]*//g'` + if test "X$allow_libtool_libs_with_static_runtimes" = "Xyes" ; then + for i in $predeps $postdeps ; do + # can't use Xsed below, because $i might contain '/' + tmp_deplibs=`$echo "X $tmp_deplibs" | ${SED} -e "1s,^X,," -e "s,$i,,"` + done + fi + if $echo "X $tmp_deplibs" | $Xsed -e 's/[ ]//g' \ + | grep . >/dev/null; then + $echo + if test "X$deplibs_check_method" = "Xnone"; then + $echo "*** Warning: inter-library dependencies are not supported in this platform." + else + $echo "*** Warning: inter-library dependencies are not known to be supported." + fi + $echo "*** All declared inter-library dependencies are being dropped." + droppeddeps=yes + fi + ;; + esac + versuffix=$versuffix_save + major=$major_save + release=$release_save + libname=$libname_save + name=$name_save + + case $host in + *-*-rhapsody* | *-*-darwin1.[012]) + # On Rhapsody replace the C library is the System framework + newdeplibs=`$echo "X $newdeplibs" | $Xsed -e 's/ -lc / -framework System /'` + ;; + esac + + if test "$droppeddeps" = yes; then + if test "$module" = yes; then + $echo + $echo "*** Warning: libtool could not satisfy all declared inter-library" + $echo "*** dependencies of module $libname. Therefore, libtool will create" + $echo "*** a static module, that should work as long as the dlopening" + $echo "*** application is linked with the -dlopen flag." + if test -z "$global_symbol_pipe"; then + $echo + $echo "*** However, this would only work if libtool was able to extract symbol" + $echo "*** lists from a program, using \`nm' or equivalent, but libtool could" + $echo "*** not find such a program. So, this module is probably useless." + $echo "*** \`nm' from GNU binutils and a full rebuild may help." + fi + if test "$build_old_libs" = no; then + oldlibs="$output_objdir/$libname.$libext" + build_libtool_libs=module + build_old_libs=yes + else + build_libtool_libs=no + fi + else + $echo "*** The inter-library dependencies that have been dropped here will be" + $echo "*** automatically added whenever a program is linked with this library" + $echo "*** or is declared to -dlopen it." + + if test "$allow_undefined" = no; then + $echo + $echo "*** Since this library must not contain undefined symbols," + $echo "*** because either the platform does not support them or" + $echo "*** it was explicitly requested with -no-undefined," + $echo "*** libtool will only create a static version of it." + if test "$build_old_libs" = no; then + oldlibs="$output_objdir/$libname.$libext" + build_libtool_libs=module + build_old_libs=yes + else + build_libtool_libs=no + fi + fi + fi + fi + # Done checking deplibs! + deplibs=$newdeplibs + fi + + # All the library-specific variables (install_libdir is set above). + library_names= + old_library= + dlname= + + # Test again, we may have decided not to build it any more + if test "$build_libtool_libs" = yes; then + if test "$hardcode_into_libs" = yes; then + # Hardcode the library paths + hardcode_libdirs= + dep_rpath= + rpath="$finalize_rpath" + test "$mode" != relink && rpath="$compile_rpath$rpath" + for libdir in $rpath; do + if test -n "$hardcode_libdir_flag_spec"; then + if test -n "$hardcode_libdir_separator"; then + if test -z "$hardcode_libdirs"; then + hardcode_libdirs="$libdir" + else + # Just accumulate the unique libdirs. + case $hardcode_libdir_separator$hardcode_libdirs$hardcode_libdir_separator in + *"$hardcode_libdir_separator$libdir$hardcode_libdir_separator"*) + ;; + *) + hardcode_libdirs="$hardcode_libdirs$hardcode_libdir_separator$libdir" + ;; + esac + fi + else + eval flag=\"$hardcode_libdir_flag_spec\" + dep_rpath="$dep_rpath $flag" + fi + elif test -n "$runpath_var"; then + case "$perm_rpath " in + *" $libdir "*) ;; + *) perm_rpath="$perm_rpath $libdir" ;; + esac + fi + done + # Substitute the hardcoded libdirs into the rpath. + if test -n "$hardcode_libdir_separator" && + test -n "$hardcode_libdirs"; then + libdir="$hardcode_libdirs" + if test -n "$hardcode_libdir_flag_spec_ld"; then + eval dep_rpath=\"$hardcode_libdir_flag_spec_ld\" + else + eval dep_rpath=\"$hardcode_libdir_flag_spec\" + fi + fi + if test -n "$runpath_var" && test -n "$perm_rpath"; then + # We should set the runpath_var. + rpath= + for dir in $perm_rpath; do + rpath="$rpath$dir:" + done + eval "$runpath_var='$rpath\$$runpath_var'; export $runpath_var" + fi + test -n "$dep_rpath" && deplibs="$dep_rpath $deplibs" + fi + + shlibpath="$finalize_shlibpath" + test "$mode" != relink && shlibpath="$compile_shlibpath$shlibpath" + if test -n "$shlibpath"; then + eval "$shlibpath_var='$shlibpath\$$shlibpath_var'; export $shlibpath_var" + fi + + # Get the real and link names of the library. + eval shared_ext=\"$shrext_cmds\" + eval library_names=\"$library_names_spec\" + set dummy $library_names + realname="$2" + shift; shift + + if test -n "$soname_spec"; then + eval soname=\"$soname_spec\" + else + soname="$realname" + fi + if test -z "$dlname"; then + dlname=$soname + fi + + lib="$output_objdir/$realname" + for link + do + linknames="$linknames $link" + done + + # Use standard objects if they are pic + test -z "$pic_flag" && libobjs=`$echo "X$libobjs" | $SP2NL | $Xsed -e "$lo2o" | $NL2SP` + + # Prepare the list of exported symbols + if test -z "$export_symbols"; then + if test "$always_export_symbols" = yes || test -n "$export_symbols_regex"; then + $show "generating symbol list for \`$libname.la'" + export_symbols="$output_objdir/$libname.exp" + $run $rm $export_symbols + cmds=$export_symbols_cmds + save_ifs="$IFS"; IFS='~' + for cmd in $cmds; do + IFS="$save_ifs" + eval cmd=\"$cmd\" + if len=`expr "X$cmd" : ".*"` && + test "$len" -le "$max_cmd_len" || test "$max_cmd_len" -le -1; then + $show "$cmd" + $run eval "$cmd" || exit $? + skipped_export=false + else + # The command line is too long to execute in one step. + $show "using reloadable object file for export list..." + skipped_export=: + # Break out early, otherwise skipped_export may be + # set to false by a later but shorter cmd. + break + fi + done + IFS="$save_ifs" + if test -n "$export_symbols_regex"; then + $show "$EGREP -e \"$export_symbols_regex\" \"$export_symbols\" > \"${export_symbols}T\"" + $run eval '$EGREP -e "$export_symbols_regex" "$export_symbols" > "${export_symbols}T"' + $show "$mv \"${export_symbols}T\" \"$export_symbols\"" + $run eval '$mv "${export_symbols}T" "$export_symbols"' + fi + fi + fi + + if test -n "$export_symbols" && test -n "$include_expsyms"; then + $run eval '$echo "X$include_expsyms" | $SP2NL >> "$export_symbols"' + fi + + tmp_deplibs= + for test_deplib in $deplibs; do + case " $convenience " in + *" $test_deplib "*) ;; + *) + tmp_deplibs="$tmp_deplibs $test_deplib" + ;; + esac + done + deplibs="$tmp_deplibs" + + if test -n "$convenience"; then + if test -n "$whole_archive_flag_spec"; then + save_libobjs=$libobjs + eval libobjs=\"\$libobjs $whole_archive_flag_spec\" + else + gentop="$output_objdir/${outputname}x" + generated="$generated $gentop" + + func_extract_archives $gentop $convenience + libobjs="$libobjs $func_extract_archives_result" + fi + fi + + if test "$thread_safe" = yes && test -n "$thread_safe_flag_spec"; then + eval flag=\"$thread_safe_flag_spec\" + linker_flags="$linker_flags $flag" + fi + + # Make a backup of the uninstalled library when relinking + if test "$mode" = relink; then + $run eval '(cd $output_objdir && $rm ${realname}U && $mv $realname ${realname}U)' || exit $? + fi + + # Do each of the archive commands. + if test "$module" = yes && test -n "$module_cmds" ; then + if test -n "$export_symbols" && test -n "$module_expsym_cmds"; then + eval test_cmds=\"$module_expsym_cmds\" + cmds=$module_expsym_cmds + else + eval test_cmds=\"$module_cmds\" + cmds=$module_cmds + fi + else + if test -n "$export_symbols" && test -n "$archive_expsym_cmds"; then + eval test_cmds=\"$archive_expsym_cmds\" + cmds=$archive_expsym_cmds + else + eval test_cmds=\"$archive_cmds\" + cmds=$archive_cmds + fi + fi + + if test "X$skipped_export" != "X:" && + len=`expr "X$test_cmds" : ".*" 2>/dev/null` && + test "$len" -le "$max_cmd_len" || test "$max_cmd_len" -le -1; then + : + else + # The command line is too long to link in one step, link piecewise. + $echo "creating reloadable object files..." + + # Save the value of $output and $libobjs because we want to + # use them later. If we have whole_archive_flag_spec, we + # want to use save_libobjs as it was before + # whole_archive_flag_spec was expanded, because we can't + # assume the linker understands whole_archive_flag_spec. + # This may have to be revisited, in case too many + # convenience libraries get linked in and end up exceeding + # the spec. + if test -z "$convenience" || test -z "$whole_archive_flag_spec"; then + save_libobjs=$libobjs + fi + save_output=$output + output_la=`$echo "X$output" | $Xsed -e "$basename"` + + # Clear the reloadable object creation command queue and + # initialize k to one. + test_cmds= + concat_cmds= + objlist= + delfiles= + last_robj= + k=1 + output=$output_objdir/$output_la-${k}.$objext + # Loop over the list of objects to be linked. + for obj in $save_libobjs + do + eval test_cmds=\"$reload_cmds $objlist $last_robj\" + if test "X$objlist" = X || + { len=`expr "X$test_cmds" : ".*" 2>/dev/null` && + test "$len" -le "$max_cmd_len"; }; then + objlist="$objlist $obj" + else + # The command $test_cmds is almost too long, add a + # command to the queue. + if test "$k" -eq 1 ; then + # The first file doesn't have a previous command to add. + eval concat_cmds=\"$reload_cmds $objlist $last_robj\" + else + # All subsequent reloadable object files will link in + # the last one created. + eval concat_cmds=\"\$concat_cmds~$reload_cmds $objlist $last_robj\" + fi + last_robj=$output_objdir/$output_la-${k}.$objext + k=`expr $k + 1` + output=$output_objdir/$output_la-${k}.$objext + objlist=$obj + len=1 + fi + done + # Handle the remaining objects by creating one last + # reloadable object file. All subsequent reloadable object + # files will link in the last one created. + test -z "$concat_cmds" || concat_cmds=$concat_cmds~ + eval concat_cmds=\"\${concat_cmds}$reload_cmds $objlist $last_robj\" + + if ${skipped_export-false}; then + $show "generating symbol list for \`$libname.la'" + export_symbols="$output_objdir/$libname.exp" + $run $rm $export_symbols + libobjs=$output + # Append the command to create the export file. + eval concat_cmds=\"\$concat_cmds~$export_symbols_cmds\" + fi + + # Set up a command to remove the reloadable object files + # after they are used. + i=0 + while test "$i" -lt "$k" + do + i=`expr $i + 1` + delfiles="$delfiles $output_objdir/$output_la-${i}.$objext" + done + + $echo "creating a temporary reloadable object file: $output" + + # Loop through the commands generated above and execute them. + save_ifs="$IFS"; IFS='~' + for cmd in $concat_cmds; do + IFS="$save_ifs" + $show "$cmd" + $run eval "$cmd" || exit $? + done + IFS="$save_ifs" + + libobjs=$output + # Restore the value of output. + output=$save_output + + if test -n "$convenience" && test -n "$whole_archive_flag_spec"; then + eval libobjs=\"\$libobjs $whole_archive_flag_spec\" + fi + # Expand the library linking commands again to reset the + # value of $libobjs for piecewise linking. + + # Do each of the archive commands. + if test "$module" = yes && test -n "$module_cmds" ; then + if test -n "$export_symbols" && test -n "$module_expsym_cmds"; then + cmds=$module_expsym_cmds + else + cmds=$module_cmds + fi + else + if test -n "$export_symbols" && test -n "$archive_expsym_cmds"; then + cmds=$archive_expsym_cmds + else + cmds=$archive_cmds + fi + fi + + # Append the command to remove the reloadable object files + # to the just-reset $cmds. + eval cmds=\"\$cmds~\$rm $delfiles\" + fi + save_ifs="$IFS"; IFS='~' + for cmd in $cmds; do + IFS="$save_ifs" + eval cmd=\"$cmd\" + $show "$cmd" + $run eval "$cmd" || { + lt_exit=$? + + # Restore the uninstalled library and exit + if test "$mode" = relink; then + $run eval '(cd $output_objdir && $rm ${realname}T && $mv ${realname}U $realname)' + fi + + exit $lt_exit + } + done + IFS="$save_ifs" + + # Restore the uninstalled library and exit + if test "$mode" = relink; then + $run eval '(cd $output_objdir && $rm ${realname}T && $mv $realname ${realname}T && $mv "$realname"U $realname)' || exit $? + + if test -n "$convenience"; then + if test -z "$whole_archive_flag_spec"; then + $show "${rm}r $gentop" + $run ${rm}r "$gentop" + fi + fi + + exit $EXIT_SUCCESS + fi + + # Create links to the real library. + for linkname in $linknames; do + if test "$realname" != "$linkname"; then + $show "(cd $output_objdir && $rm $linkname && $LN_S $realname $linkname)" + $run eval '(cd $output_objdir && $rm $linkname && $LN_S $realname $linkname)' || exit $? + fi + done + + # If -module or -export-dynamic was specified, set the dlname. + if test "$module" = yes || test "$export_dynamic" = yes; then + # On all known operating systems, these are identical. + dlname="$soname" + fi + fi + ;; + + obj) + if test -n "$deplibs"; then + $echo "$modename: warning: \`-l' and \`-L' are ignored for objects" 1>&2 + fi + + if test -n "$dlfiles$dlprefiles" || test "$dlself" != no; then + $echo "$modename: warning: \`-dlopen' is ignored for objects" 1>&2 + fi + + if test -n "$rpath"; then + $echo "$modename: warning: \`-rpath' is ignored for objects" 1>&2 + fi + + if test -n "$xrpath"; then + $echo "$modename: warning: \`-R' is ignored for objects" 1>&2 + fi + + if test -n "$vinfo"; then + $echo "$modename: warning: \`-version-info' is ignored for objects" 1>&2 + fi + + if test -n "$release"; then + $echo "$modename: warning: \`-release' is ignored for objects" 1>&2 + fi + + case $output in + *.lo) + if test -n "$objs$old_deplibs"; then + $echo "$modename: cannot build library object \`$output' from non-libtool objects" 1>&2 + exit $EXIT_FAILURE + fi + libobj="$output" + obj=`$echo "X$output" | $Xsed -e "$lo2o"` + ;; + *) + libobj= + obj="$output" + ;; + esac + + # Delete the old objects. + $run $rm $obj $libobj + + # Objects from convenience libraries. This assumes + # single-version convenience libraries. Whenever we create + # different ones for PIC/non-PIC, this we'll have to duplicate + # the extraction. + reload_conv_objs= + gentop= + # reload_cmds runs $LD directly, so let us get rid of + # -Wl from whole_archive_flag_spec + wl= + + if test -n "$convenience"; then + if test -n "$whole_archive_flag_spec"; then + eval reload_conv_objs=\"\$reload_objs $whole_archive_flag_spec\" + else + gentop="$output_objdir/${obj}x" + generated="$generated $gentop" + + func_extract_archives $gentop $convenience + reload_conv_objs="$reload_objs $func_extract_archives_result" + fi + fi + + # Create the old-style object. + reload_objs="$objs$old_deplibs "`$echo "X$libobjs" | $SP2NL | $Xsed -e '/\.'${libext}$'/d' -e '/\.lib$/d' -e "$lo2o" | $NL2SP`" $reload_conv_objs" ### testsuite: skip nested quoting test + + output="$obj" + cmds=$reload_cmds + save_ifs="$IFS"; IFS='~' + for cmd in $cmds; do + IFS="$save_ifs" + eval cmd=\"$cmd\" + $show "$cmd" + $run eval "$cmd" || exit $? + done + IFS="$save_ifs" + + # Exit if we aren't doing a library object file. + if test -z "$libobj"; then + if test -n "$gentop"; then + $show "${rm}r $gentop" + $run ${rm}r $gentop + fi + + exit $EXIT_SUCCESS + fi + + if test "$build_libtool_libs" != yes; then + if test -n "$gentop"; then + $show "${rm}r $gentop" + $run ${rm}r $gentop + fi + + # Create an invalid libtool object if no PIC, so that we don't + # accidentally link it into a program. + # $show "echo timestamp > $libobj" + # $run eval "echo timestamp > $libobj" || exit $? + exit $EXIT_SUCCESS + fi + + if test -n "$pic_flag" || test "$pic_mode" != default; then + # Only do commands if we really have different PIC objects. + reload_objs="$libobjs $reload_conv_objs" + output="$libobj" + cmds=$reload_cmds + save_ifs="$IFS"; IFS='~' + for cmd in $cmds; do + IFS="$save_ifs" + eval cmd=\"$cmd\" + $show "$cmd" + $run eval "$cmd" || exit $? + done + IFS="$save_ifs" + fi + + if test -n "$gentop"; then + $show "${rm}r $gentop" + $run ${rm}r $gentop + fi + + exit $EXIT_SUCCESS + ;; + + prog) + case $host in + *cygwin*) output=`$echo $output | ${SED} -e 's,.exe$,,;s,$,.exe,'` ;; + esac + if test -n "$vinfo"; then + $echo "$modename: warning: \`-version-info' is ignored for programs" 1>&2 + fi + + if test -n "$release"; then + $echo "$modename: warning: \`-release' is ignored for programs" 1>&2 + fi + + if test "$preload" = yes; then + if test "$dlopen_support" = unknown && test "$dlopen_self" = unknown && + test "$dlopen_self_static" = unknown; then + $echo "$modename: warning: \`AC_LIBTOOL_DLOPEN' not used. Assuming no dlopen support." + fi + fi + + case $host in + *-*-rhapsody* | *-*-darwin1.[012]) + # On Rhapsody replace the C library is the System framework + compile_deplibs=`$echo "X $compile_deplibs" | $Xsed -e 's/ -lc / -framework System /'` + finalize_deplibs=`$echo "X $finalize_deplibs" | $Xsed -e 's/ -lc / -framework System /'` + ;; + esac + + case $host in + *darwin*) + # Don't allow lazy linking, it breaks C++ global constructors + if test "$tagname" = CXX ; then + compile_command="$compile_command ${wl}-bind_at_load" + finalize_command="$finalize_command ${wl}-bind_at_load" + fi + ;; + esac + + compile_command="$compile_command $compile_deplibs" + finalize_command="$finalize_command $finalize_deplibs" + + if test -n "$rpath$xrpath"; then + # If the user specified any rpath flags, then add them. + for libdir in $rpath $xrpath; do + # This is the magic to use -rpath. + case "$finalize_rpath " in + *" $libdir "*) ;; + *) finalize_rpath="$finalize_rpath $libdir" ;; + esac + done + fi + + # Now hardcode the library paths + rpath= + hardcode_libdirs= + for libdir in $compile_rpath $finalize_rpath; do + if test -n "$hardcode_libdir_flag_spec"; then + if test -n "$hardcode_libdir_separator"; then + if test -z "$hardcode_libdirs"; then + hardcode_libdirs="$libdir" + else + # Just accumulate the unique libdirs. + case $hardcode_libdir_separator$hardcode_libdirs$hardcode_libdir_separator in + *"$hardcode_libdir_separator$libdir$hardcode_libdir_separator"*) + ;; + *) + hardcode_libdirs="$hardcode_libdirs$hardcode_libdir_separator$libdir" + ;; + esac + fi + else + eval flag=\"$hardcode_libdir_flag_spec\" + rpath="$rpath $flag" + fi + elif test -n "$runpath_var"; then + case "$perm_rpath " in + *" $libdir "*) ;; + *) perm_rpath="$perm_rpath $libdir" ;; + esac + fi + case $host in + *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2*) + case :$dllsearchpath: in + *":$libdir:"*) ;; + *) dllsearchpath="$dllsearchpath:$libdir";; + esac + ;; + esac + done + # Substitute the hardcoded libdirs into the rpath. + if test -n "$hardcode_libdir_separator" && + test -n "$hardcode_libdirs"; then + libdir="$hardcode_libdirs" + eval rpath=\" $hardcode_libdir_flag_spec\" + fi + compile_rpath="$rpath" + + rpath= + hardcode_libdirs= + for libdir in $finalize_rpath; do + if test -n "$hardcode_libdir_flag_spec"; then + if test -n "$hardcode_libdir_separator"; then + if test -z "$hardcode_libdirs"; then + hardcode_libdirs="$libdir" + else + # Just accumulate the unique libdirs. + case $hardcode_libdir_separator$hardcode_libdirs$hardcode_libdir_separator in + *"$hardcode_libdir_separator$libdir$hardcode_libdir_separator"*) + ;; + *) + hardcode_libdirs="$hardcode_libdirs$hardcode_libdir_separator$libdir" + ;; + esac + fi + else + eval flag=\"$hardcode_libdir_flag_spec\" + rpath="$rpath $flag" + fi + elif test -n "$runpath_var"; then + case "$finalize_perm_rpath " in + *" $libdir "*) ;; + *) finalize_perm_rpath="$finalize_perm_rpath $libdir" ;; + esac + fi + done + # Substitute the hardcoded libdirs into the rpath. + if test -n "$hardcode_libdir_separator" && + test -n "$hardcode_libdirs"; then + libdir="$hardcode_libdirs" + eval rpath=\" $hardcode_libdir_flag_spec\" + fi + finalize_rpath="$rpath" + + if test -n "$libobjs" && test "$build_old_libs" = yes; then + # Transform all the library objects into standard objects. + compile_command=`$echo "X$compile_command" | $SP2NL | $Xsed -e "$lo2o" | $NL2SP` + finalize_command=`$echo "X$finalize_command" | $SP2NL | $Xsed -e "$lo2o" | $NL2SP` + fi + + dlsyms= + if test -n "$dlfiles$dlprefiles" || test "$dlself" != no; then + if test -n "$NM" && test -n "$global_symbol_pipe"; then + dlsyms="${outputname}S.c" + else + $echo "$modename: not configured to extract global symbols from dlpreopened files" 1>&2 + fi + fi + + if test -n "$dlsyms"; then + case $dlsyms in + "") ;; + *.c) + # Discover the nlist of each of the dlfiles. + nlist="$output_objdir/${outputname}.nm" + + $show "$rm $nlist ${nlist}S ${nlist}T" + $run $rm "$nlist" "${nlist}S" "${nlist}T" + + # Parse the name list into a source file. + $show "creating $output_objdir/$dlsyms" + + test -z "$run" && $echo > "$output_objdir/$dlsyms" "\ +/* $dlsyms - symbol resolution table for \`$outputname' dlsym emulation. */ +/* Generated by $PROGRAM - GNU $PACKAGE $VERSION$TIMESTAMP */ + +#ifdef __cplusplus +extern \"C\" { +#endif + +/* Prevent the only kind of declaration conflicts we can make. */ +#define lt_preloaded_symbols some_other_symbol + +/* External symbol declarations for the compiler. */\ +" + + if test "$dlself" = yes; then + $show "generating symbol list for \`$output'" + + test -z "$run" && $echo ': @PROGRAM@ ' > "$nlist" + + # Add our own program objects to the symbol list. + progfiles=`$echo "X$objs$old_deplibs" | $SP2NL | $Xsed -e "$lo2o" | $NL2SP` + for arg in $progfiles; do + $show "extracting global C symbols from \`$arg'" + $run eval "$NM $arg | $global_symbol_pipe >> '$nlist'" + done + + if test -n "$exclude_expsyms"; then + $run eval '$EGREP -v " ($exclude_expsyms)$" "$nlist" > "$nlist"T' + $run eval '$mv "$nlist"T "$nlist"' + fi + + if test -n "$export_symbols_regex"; then + $run eval '$EGREP -e "$export_symbols_regex" "$nlist" > "$nlist"T' + $run eval '$mv "$nlist"T "$nlist"' + fi + + # Prepare the list of exported symbols + if test -z "$export_symbols"; then + export_symbols="$output_objdir/$outputname.exp" + $run $rm $export_symbols + $run eval "${SED} -n -e '/^: @PROGRAM@ $/d' -e 's/^.* \(.*\)$/\1/p' "'< "$nlist" > "$export_symbols"' + else + $run eval "${SED} -e 's/\([ ][.*^$]\)/\\\1/g' -e 's/^/ /' -e 's/$/$/'"' < "$export_symbols" > "$output_objdir/$outputname.exp"' + $run eval 'grep -f "$output_objdir/$outputname.exp" < "$nlist" > "$nlist"T' + $run eval 'mv "$nlist"T "$nlist"' + fi + fi + + for arg in $dlprefiles; do + $show "extracting global C symbols from \`$arg'" + name=`$echo "$arg" | ${SED} -e 's%^.*/%%'` + $run eval '$echo ": $name " >> "$nlist"' + $run eval "$NM $arg | $global_symbol_pipe >> '$nlist'" + done + + if test -z "$run"; then + # Make sure we have at least an empty file. + test -f "$nlist" || : > "$nlist" + + if test -n "$exclude_expsyms"; then + $EGREP -v " ($exclude_expsyms)$" "$nlist" > "$nlist"T + $mv "$nlist"T "$nlist" + fi + + # Try sorting and uniquifying the output. + if grep -v "^: " < "$nlist" | + if sort -k 3 /dev/null 2>&1; then + sort -k 3 + else + sort +2 + fi | + uniq > "$nlist"S; then + : + else + grep -v "^: " < "$nlist" > "$nlist"S + fi + + if test -f "$nlist"S; then + eval "$global_symbol_to_cdecl"' < "$nlist"S >> "$output_objdir/$dlsyms"' + else + $echo '/* NONE */' >> "$output_objdir/$dlsyms" + fi + + $echo >> "$output_objdir/$dlsyms" "\ + +#undef lt_preloaded_symbols + +#if defined (__STDC__) && __STDC__ +# define lt_ptr void * +#else +# define lt_ptr char * +# define const +#endif + +/* The mapping between symbol names and symbols. */ +" + + case $host in + *cygwin* | *mingw* ) + $echo >> "$output_objdir/$dlsyms" "\ +/* DATA imports from DLLs on WIN32 can't be const, because + runtime relocations are performed -- see ld's documentation + on pseudo-relocs */ +struct { +" + ;; + * ) + $echo >> "$output_objdir/$dlsyms" "\ +const struct { +" + ;; + esac + + + $echo >> "$output_objdir/$dlsyms" "\ + const char *name; + lt_ptr address; +} +lt_preloaded_symbols[] = +{\ +" + + eval "$global_symbol_to_c_name_address" < "$nlist" >> "$output_objdir/$dlsyms" + + $echo >> "$output_objdir/$dlsyms" "\ + {0, (lt_ptr) 0} +}; + +/* This works around a problem in FreeBSD linker */ +#ifdef FREEBSD_WORKAROUND +static const void *lt_preloaded_setup() { + return lt_preloaded_symbols; +} +#endif + +#ifdef __cplusplus +} +#endif\ +" + fi + + pic_flag_for_symtable= + case $host in + # compiling the symbol table file with pic_flag works around + # a FreeBSD bug that causes programs to crash when -lm is + # linked before any other PIC object. But we must not use + # pic_flag when linking with -static. The problem exists in + # FreeBSD 2.2.6 and is fixed in FreeBSD 3.1. + *-*-freebsd2*|*-*-freebsd3.0*|*-*-freebsdelf3.0*) + case "$compile_command " in + *" -static "*) ;; + *) pic_flag_for_symtable=" $pic_flag -DFREEBSD_WORKAROUND";; + esac;; + *-*-hpux*) + case "$compile_command " in + *" -static "*) ;; + *) pic_flag_for_symtable=" $pic_flag";; + esac + esac + + # Now compile the dynamic symbol file. + $show "(cd $output_objdir && $LTCC -c$no_builtin_flag$pic_flag_for_symtable \"$dlsyms\")" + $run eval '(cd $output_objdir && $LTCC -c$no_builtin_flag$pic_flag_for_symtable "$dlsyms")' || exit $? + + # Clean up the generated files. + $show "$rm $output_objdir/$dlsyms $nlist ${nlist}S ${nlist}T" + $run $rm "$output_objdir/$dlsyms" "$nlist" "${nlist}S" "${nlist}T" + + # Transform the symbol file into the correct name. + compile_command=`$echo "X$compile_command" | $Xsed -e "s%@SYMFILE@%$output_objdir/${outputname}S.${objext}%"` + finalize_command=`$echo "X$finalize_command" | $Xsed -e "s%@SYMFILE@%$output_objdir/${outputname}S.${objext}%"` + ;; + *) + $echo "$modename: unknown suffix for \`$dlsyms'" 1>&2 + exit $EXIT_FAILURE + ;; + esac + else + # We keep going just in case the user didn't refer to + # lt_preloaded_symbols. The linker will fail if global_symbol_pipe + # really was required. + + # Nullify the symbol file. + compile_command=`$echo "X$compile_command" | $Xsed -e "s% @SYMFILE@%%"` + finalize_command=`$echo "X$finalize_command" | $Xsed -e "s% @SYMFILE@%%"` + fi + + if test "$need_relink" = no || test "$build_libtool_libs" != yes; then + # Replace the output file specification. + compile_command=`$echo "X$compile_command" | $Xsed -e 's%@OUTPUT@%'"$output"'%g'` + link_command="$compile_command$compile_rpath" + + # We have no uninstalled library dependencies, so finalize right now. + $show "$link_command" + $run eval "$link_command" + status=$? + + # Delete the generated files. + if test -n "$dlsyms"; then + $show "$rm $output_objdir/${outputname}S.${objext}" + $run $rm "$output_objdir/${outputname}S.${objext}" + fi + + exit $status + fi + + if test -n "$shlibpath_var"; then + # We should set the shlibpath_var + rpath= + for dir in $temp_rpath; do + case $dir in + [\\/]* | [A-Za-z]:[\\/]*) + # Absolute path. + rpath="$rpath$dir:" + ;; + *) + # Relative path: add a thisdir entry. + rpath="$rpath\$thisdir/$dir:" + ;; + esac + done + temp_rpath="$rpath" + fi + + if test -n "$compile_shlibpath$finalize_shlibpath"; then + compile_command="$shlibpath_var=\"$compile_shlibpath$finalize_shlibpath\$$shlibpath_var\" $compile_command" + fi + if test -n "$finalize_shlibpath"; then + finalize_command="$shlibpath_var=\"$finalize_shlibpath\$$shlibpath_var\" $finalize_command" + fi + + compile_var= + finalize_var= + if test -n "$runpath_var"; then + if test -n "$perm_rpath"; then + # We should set the runpath_var. + rpath= + for dir in $perm_rpath; do + rpath="$rpath$dir:" + done + compile_var="$runpath_var=\"$rpath\$$runpath_var\" " + fi + if test -n "$finalize_perm_rpath"; then + # We should set the runpath_var. + rpath= + for dir in $finalize_perm_rpath; do + rpath="$rpath$dir:" + done + finalize_var="$runpath_var=\"$rpath\$$runpath_var\" " + fi + fi + + if test "$no_install" = yes; then + # We don't need to create a wrapper script. + link_command="$compile_var$compile_command$compile_rpath" + # Replace the output file specification. + link_command=`$echo "X$link_command" | $Xsed -e 's%@OUTPUT@%'"$output"'%g'` + # Delete the old output file. + $run $rm $output + # Link the executable and exit + $show "$link_command" + $run eval "$link_command" || exit $? + exit $EXIT_SUCCESS + fi + + if test "$hardcode_action" = relink; then + # Fast installation is not supported + link_command="$compile_var$compile_command$compile_rpath" + relink_command="$finalize_var$finalize_command$finalize_rpath" + + $echo "$modename: warning: this platform does not like uninstalled shared libraries" 1>&2 + $echo "$modename: \`$output' will be relinked during installation" 1>&2 + else + if test "$fast_install" != no; then + link_command="$finalize_var$compile_command$finalize_rpath" + if test "$fast_install" = yes; then + relink_command=`$echo "X$compile_var$compile_command$compile_rpath" | $Xsed -e 's%@OUTPUT@%\$progdir/\$file%g'` + else + # fast_install is set to needless + relink_command= + fi + else + link_command="$compile_var$compile_command$compile_rpath" + relink_command="$finalize_var$finalize_command$finalize_rpath" + fi + fi + + # Replace the output file specification. + link_command=`$echo "X$link_command" | $Xsed -e 's%@OUTPUT@%'"$output_objdir/$outputname"'%g'` + + # Delete the old output files. + $run $rm $output $output_objdir/$outputname $output_objdir/lt-$outputname + + $show "$link_command" + $run eval "$link_command" || exit $? + + # Now create the wrapper script. + $show "creating $output" + + # Quote the relink command for shipping. + if test -n "$relink_command"; then + # Preserve any variables that may affect compiler behavior + for var in $variables_saved_for_relink; do + if eval test -z \"\${$var+set}\"; then + relink_command="{ test -z \"\${$var+set}\" || unset $var || { $var=; export $var; }; }; $relink_command" + elif eval var_value=\$$var; test -z "$var_value"; then + relink_command="$var=; export $var; $relink_command" + else + var_value=`$echo "X$var_value" | $Xsed -e "$sed_quote_subst"` + relink_command="$var=\"$var_value\"; export $var; $relink_command" + fi + done + relink_command="(cd `pwd`; $relink_command)" + relink_command=`$echo "X$relink_command" | $Xsed -e "$sed_quote_subst"` + fi + + # Quote $echo for shipping. + if test "X$echo" = "X$SHELL $progpath --fallback-echo"; then + case $progpath in + [\\/]* | [A-Za-z]:[\\/]*) qecho="$SHELL $progpath --fallback-echo";; + *) qecho="$SHELL `pwd`/$progpath --fallback-echo";; + esac + qecho=`$echo "X$qecho" | $Xsed -e "$sed_quote_subst"` + else + qecho=`$echo "X$echo" | $Xsed -e "$sed_quote_subst"` + fi + + # Only actually do things if our run command is non-null. + if test -z "$run"; then + # win32 will think the script is a binary if it has + # a .exe suffix, so we strip it off here. + case $output in + *.exe) output=`$echo $output|${SED} 's,.exe$,,'` ;; + esac + # test for cygwin because mv fails w/o .exe extensions + case $host in + *cygwin*) + exeext=.exe + outputname=`$echo $outputname|${SED} 's,.exe$,,'` ;; + *) exeext= ;; + esac + case $host in + *cygwin* | *mingw* ) + cwrappersource=`$echo ${objdir}/lt-${outputname}.c` + cwrapper=`$echo ${output}.exe` + $rm $cwrappersource $cwrapper + trap "$rm $cwrappersource $cwrapper; exit $EXIT_FAILURE" 1 2 15 + + cat > $cwrappersource <> $cwrappersource<<"EOF" +#include +#include +#include +#include +#include +#include + +#if defined(PATH_MAX) +# define LT_PATHMAX PATH_MAX +#elif defined(MAXPATHLEN) +# define LT_PATHMAX MAXPATHLEN +#else +# define LT_PATHMAX 1024 +#endif + +#ifndef DIR_SEPARATOR +#define DIR_SEPARATOR '/' +#endif + +#if defined (_WIN32) || defined (__MSDOS__) || defined (__DJGPP__) || \ + defined (__OS2__) +#define HAVE_DOS_BASED_FILE_SYSTEM +#ifndef DIR_SEPARATOR_2 +#define DIR_SEPARATOR_2 '\\' +#endif +#endif + +#ifndef DIR_SEPARATOR_2 +# define IS_DIR_SEPARATOR(ch) ((ch) == DIR_SEPARATOR) +#else /* DIR_SEPARATOR_2 */ +# define IS_DIR_SEPARATOR(ch) \ + (((ch) == DIR_SEPARATOR) || ((ch) == DIR_SEPARATOR_2)) +#endif /* DIR_SEPARATOR_2 */ + +#define XMALLOC(type, num) ((type *) xmalloc ((num) * sizeof(type))) +#define XFREE(stale) do { \ + if (stale) { free ((void *) stale); stale = 0; } \ +} while (0) + +const char *program_name = NULL; + +void * xmalloc (size_t num); +char * xstrdup (const char *string); +char * basename (const char *name); +char * fnqualify(const char *path); +char * strendzap(char *str, const char *pat); +void lt_fatal (const char *message, ...); + +int +main (int argc, char *argv[]) +{ + char **newargz; + int i; + + program_name = (char *) xstrdup ((char *) basename (argv[0])); + newargz = XMALLOC(char *, argc+2); +EOF + + cat >> $cwrappersource <> $cwrappersource <<"EOF" + newargz[1] = fnqualify(argv[0]); + /* we know the script has the same name, without the .exe */ + /* so make sure newargz[1] doesn't end in .exe */ + strendzap(newargz[1],".exe"); + for (i = 1; i < argc; i++) + newargz[i+1] = xstrdup(argv[i]); + newargz[argc+1] = NULL; +EOF + + cat >> $cwrappersource <> $cwrappersource <<"EOF" + return 127; +} + +void * +xmalloc (size_t num) +{ + void * p = (void *) malloc (num); + if (!p) + lt_fatal ("Memory exhausted"); + + return p; +} + +char * +xstrdup (const char *string) +{ + return string ? strcpy ((char *) xmalloc (strlen (string) + 1), string) : NULL +; +} + +char * +basename (const char *name) +{ + const char *base; + +#if defined (HAVE_DOS_BASED_FILE_SYSTEM) + /* Skip over the disk name in MSDOS pathnames. */ + if (isalpha (name[0]) && name[1] == ':') + name += 2; +#endif + + for (base = name; *name; name++) + if (IS_DIR_SEPARATOR (*name)) + base = name + 1; + return (char *) base; +} + +char * +fnqualify(const char *path) +{ + size_t size; + char *p; + char tmp[LT_PATHMAX + 1]; + + assert(path != NULL); + + /* Is it qualified already? */ +#if defined (HAVE_DOS_BASED_FILE_SYSTEM) + if (isalpha (path[0]) && path[1] == ':') + return xstrdup (path); +#endif + if (IS_DIR_SEPARATOR (path[0])) + return xstrdup (path); + + /* prepend the current directory */ + /* doesn't handle '~' */ + if (getcwd (tmp, LT_PATHMAX) == NULL) + lt_fatal ("getcwd failed"); + size = strlen(tmp) + 1 + strlen(path) + 1; /* +2 for '/' and '\0' */ + p = XMALLOC(char, size); + sprintf(p, "%s%c%s", tmp, DIR_SEPARATOR, path); + return p; +} + +char * +strendzap(char *str, const char *pat) +{ + size_t len, patlen; + + assert(str != NULL); + assert(pat != NULL); + + len = strlen(str); + patlen = strlen(pat); + + if (patlen <= len) + { + str += len - patlen; + if (strcmp(str, pat) == 0) + *str = '\0'; + } + return str; +} + +static void +lt_error_core (int exit_status, const char * mode, + const char * message, va_list ap) +{ + fprintf (stderr, "%s: %s: ", program_name, mode); + vfprintf (stderr, message, ap); + fprintf (stderr, ".\n"); + + if (exit_status >= 0) + exit (exit_status); +} + +void +lt_fatal (const char *message, ...) +{ + va_list ap; + va_start (ap, message); + lt_error_core (EXIT_FAILURE, "FATAL", message, ap); + va_end (ap); +} +EOF + # we should really use a build-platform specific compiler + # here, but OTOH, the wrappers (shell script and this C one) + # are only useful if you want to execute the "real" binary. + # Since the "real" binary is built for $host, then this + # wrapper might as well be built for $host, too. + $run $LTCC -s -o $cwrapper $cwrappersource + ;; + esac + $rm $output + trap "$rm $output; exit $EXIT_FAILURE" 1 2 15 + + $echo > $output "\ +#! $SHELL + +# $output - temporary wrapper script for $objdir/$outputname +# Generated by $PROGRAM - GNU $PACKAGE $VERSION$TIMESTAMP +# +# The $output program cannot be directly executed until all the libtool +# libraries that it depends on are installed. +# +# This wrapper script should never be moved out of the build directory. +# If it is, it will not operate correctly. + +# Sed substitution that helps us do robust quoting. It backslashifies +# metacharacters that are still active within double-quoted strings. +Xsed='${SED} -e 1s/^X//' +sed_quote_subst='$sed_quote_subst' + +# The HP-UX ksh and POSIX shell print the target directory to stdout +# if CDPATH is set. +(unset CDPATH) >/dev/null 2>&1 && unset CDPATH + +relink_command=\"$relink_command\" + +# This environment variable determines our operation mode. +if test \"\$libtool_install_magic\" = \"$magic\"; then + # install mode needs the following variable: + notinst_deplibs='$notinst_deplibs' +else + # When we are sourced in execute mode, \$file and \$echo are already set. + if test \"\$libtool_execute_magic\" != \"$magic\"; then + echo=\"$qecho\" + file=\"\$0\" + # Make sure echo works. + if test \"X\$1\" = X--no-reexec; then + # Discard the --no-reexec flag, and continue. + shift + elif test \"X\`(\$echo '\t') 2>/dev/null\`\" = 'X\t'; then + # Yippee, \$echo works! + : + else + # Restart under the correct shell, and then maybe \$echo will work. + exec $SHELL \"\$0\" --no-reexec \${1+\"\$@\"} + fi + fi\ +" + $echo >> $output "\ + + # Find the directory that this script lives in. + thisdir=\`\$echo \"X\$file\" | \$Xsed -e 's%/[^/]*$%%'\` + test \"x\$thisdir\" = \"x\$file\" && thisdir=. + + # Follow symbolic links until we get to the real thisdir. + file=\`ls -ld \"\$file\" | ${SED} -n 's/.*-> //p'\` + while test -n \"\$file\"; do + destdir=\`\$echo \"X\$file\" | \$Xsed -e 's%/[^/]*\$%%'\` + + # If there was a directory component, then change thisdir. + if test \"x\$destdir\" != \"x\$file\"; then + case \"\$destdir\" in + [\\\\/]* | [A-Za-z]:[\\\\/]*) thisdir=\"\$destdir\" ;; + *) thisdir=\"\$thisdir/\$destdir\" ;; + esac + fi + + file=\`\$echo \"X\$file\" | \$Xsed -e 's%^.*/%%'\` + file=\`ls -ld \"\$thisdir/\$file\" | ${SED} -n 's/.*-> //p'\` + done + + # Try to get the absolute directory name. + absdir=\`cd \"\$thisdir\" && pwd\` + test -n \"\$absdir\" && thisdir=\"\$absdir\" +" + + if test "$fast_install" = yes; then + $echo >> $output "\ + program=lt-'$outputname'$exeext + progdir=\"\$thisdir/$objdir\" + + if test ! -f \"\$progdir/\$program\" || \\ + { file=\`ls -1dt \"\$progdir/\$program\" \"\$progdir/../\$program\" 2>/dev/null | ${SED} 1q\`; \\ + test \"X\$file\" != \"X\$progdir/\$program\"; }; then + + file=\"\$\$-\$program\" + + if test ! -d \"\$progdir\"; then + $mkdir \"\$progdir\" + else + $rm \"\$progdir/\$file\" + fi" + + $echo >> $output "\ + + # relink executable if necessary + if test -n \"\$relink_command\"; then + if relink_command_output=\`eval \$relink_command 2>&1\`; then : + else + $echo \"\$relink_command_output\" >&2 + $rm \"\$progdir/\$file\" + exit $EXIT_FAILURE + fi + fi + + $mv \"\$progdir/\$file\" \"\$progdir/\$program\" 2>/dev/null || + { $rm \"\$progdir/\$program\"; + $mv \"\$progdir/\$file\" \"\$progdir/\$program\"; } + $rm \"\$progdir/\$file\" + fi" + else + $echo >> $output "\ + program='$outputname' + progdir=\"\$thisdir/$objdir\" +" + fi + + $echo >> $output "\ + + if test -f \"\$progdir/\$program\"; then" + + # Export our shlibpath_var if we have one. + if test "$shlibpath_overrides_runpath" = yes && test -n "$shlibpath_var" && test -n "$temp_rpath"; then + $echo >> $output "\ + # Add our own library path to $shlibpath_var + $shlibpath_var=\"$temp_rpath\$$shlibpath_var\" + + # Some systems cannot cope with colon-terminated $shlibpath_var + # The second colon is a workaround for a bug in BeOS R4 sed + $shlibpath_var=\`\$echo \"X\$$shlibpath_var\" | \$Xsed -e 's/::*\$//'\` + + export $shlibpath_var +" + fi + + # fixup the dll searchpath if we need to. + if test -n "$dllsearchpath"; then + $echo >> $output "\ + # Add the dll search path components to the executable PATH + PATH=$dllsearchpath:\$PATH +" + fi + + $echo >> $output "\ + if test \"\$libtool_execute_magic\" != \"$magic\"; then + # Run the actual program with our arguments. +" + case $host in + # Backslashes separate directories on plain windows + *-*-mingw | *-*-os2*) + $echo >> $output "\ + exec \"\$progdir\\\\\$program\" \${1+\"\$@\"} +" + ;; + + *) + $echo >> $output "\ + exec \"\$progdir/\$program\" \${1+\"\$@\"} +" + ;; + esac + $echo >> $output "\ + \$echo \"\$0: cannot exec \$program \${1+\"\$@\"}\" + exit $EXIT_FAILURE + fi + else + # The program doesn't exist. + \$echo \"\$0: error: \\\`\$progdir/\$program' does not exist\" 1>&2 + \$echo \"This script is just a wrapper for \$program.\" 1>&2 + $echo \"See the $PACKAGE documentation for more information.\" 1>&2 + exit $EXIT_FAILURE + fi +fi\ +" + chmod +x $output + fi + exit $EXIT_SUCCESS + ;; + esac + + # See if we need to build an old-fashioned archive. + for oldlib in $oldlibs; do + + if test "$build_libtool_libs" = convenience; then + oldobjs="$libobjs_save" + addlibs="$convenience" + build_libtool_libs=no + else + if test "$build_libtool_libs" = module; then + oldobjs="$libobjs_save" + build_libtool_libs=no + else + oldobjs="$old_deplibs $non_pic_objects" + fi + addlibs="$old_convenience" + fi + + if test -n "$addlibs"; then + gentop="$output_objdir/${outputname}x" + generated="$generated $gentop" + + func_extract_archives $gentop $addlibs + oldobjs="$oldobjs $func_extract_archives_result" + fi + + # Do each command in the archive commands. + if test -n "$old_archive_from_new_cmds" && test "$build_libtool_libs" = yes; then + cmds=$old_archive_from_new_cmds + else + # POSIX demands no paths to be encoded in archives. We have + # to avoid creating archives with duplicate basenames if we + # might have to extract them afterwards, e.g., when creating a + # static archive out of a convenience library, or when linking + # the entirety of a libtool archive into another (currently + # not supported by libtool). + if (for obj in $oldobjs + do + $echo "X$obj" | $Xsed -e 's%^.*/%%' + done | sort | sort -uc >/dev/null 2>&1); then + : + else + $echo "copying selected object files to avoid basename conflicts..." + + if test -z "$gentop"; then + gentop="$output_objdir/${outputname}x" + generated="$generated $gentop" + + $show "${rm}r $gentop" + $run ${rm}r "$gentop" + $show "$mkdir $gentop" + $run $mkdir "$gentop" + status=$? + if test "$status" -ne 0 && test ! -d "$gentop"; then + exit $status + fi + fi + + save_oldobjs=$oldobjs + oldobjs= + counter=1 + for obj in $save_oldobjs + do + objbase=`$echo "X$obj" | $Xsed -e 's%^.*/%%'` + case " $oldobjs " in + " ") oldobjs=$obj ;; + *[\ /]"$objbase "*) + while :; do + # Make sure we don't pick an alternate name that also + # overlaps. + newobj=lt$counter-$objbase + counter=`expr $counter + 1` + case " $oldobjs " in + *[\ /]"$newobj "*) ;; + *) if test ! -f "$gentop/$newobj"; then break; fi ;; + esac + done + $show "ln $obj $gentop/$newobj || cp $obj $gentop/$newobj" + $run ln "$obj" "$gentop/$newobj" || + $run cp "$obj" "$gentop/$newobj" + oldobjs="$oldobjs $gentop/$newobj" + ;; + *) oldobjs="$oldobjs $obj" ;; + esac + done + fi + + eval cmds=\"$old_archive_cmds\" + + if len=`expr "X$cmds" : ".*"` && + test "$len" -le "$max_cmd_len" || test "$max_cmd_len" -le -1; then + cmds=$old_archive_cmds + else + # the command line is too long to link in one step, link in parts + $echo "using piecewise archive linking..." + save_RANLIB=$RANLIB + RANLIB=: + objlist= + concat_cmds= + save_oldobjs=$oldobjs + + # Is there a better way of finding the last object in the list? + for obj in $save_oldobjs + do + last_oldobj=$obj + done + for obj in $save_oldobjs + do + oldobjs="$objlist $obj" + objlist="$objlist $obj" + eval test_cmds=\"$old_archive_cmds\" + if len=`expr "X$test_cmds" : ".*" 2>/dev/null` && + test "$len" -le "$max_cmd_len"; then + : + else + # the above command should be used before it gets too long + oldobjs=$objlist + if test "$obj" = "$last_oldobj" ; then + RANLIB=$save_RANLIB + fi + test -z "$concat_cmds" || concat_cmds=$concat_cmds~ + eval concat_cmds=\"\${concat_cmds}$old_archive_cmds\" + objlist= + fi + done + RANLIB=$save_RANLIB + oldobjs=$objlist + if test "X$oldobjs" = "X" ; then + eval cmds=\"\$concat_cmds\" + else + eval cmds=\"\$concat_cmds~\$old_archive_cmds\" + fi + fi + fi + save_ifs="$IFS"; IFS='~' + for cmd in $cmds; do + eval cmd=\"$cmd\" + IFS="$save_ifs" + $show "$cmd" + $run eval "$cmd" || exit $? + done + IFS="$save_ifs" + done + + if test -n "$generated"; then + $show "${rm}r$generated" + $run ${rm}r$generated + fi + + # Now create the libtool archive. + case $output in + *.la) + old_library= + test "$build_old_libs" = yes && old_library="$libname.$libext" + $show "creating $output" + + # Preserve any variables that may affect compiler behavior + for var in $variables_saved_for_relink; do + if eval test -z \"\${$var+set}\"; then + relink_command="{ test -z \"\${$var+set}\" || unset $var || { $var=; export $var; }; }; $relink_command" + elif eval var_value=\$$var; test -z "$var_value"; then + relink_command="$var=; export $var; $relink_command" + else + var_value=`$echo "X$var_value" | $Xsed -e "$sed_quote_subst"` + relink_command="$var=\"$var_value\"; export $var; $relink_command" + fi + done + # Quote the link command for shipping. + relink_command="(cd `pwd`; $SHELL $progpath $preserve_args --mode=relink $libtool_args @inst_prefix_dir@)" + relink_command=`$echo "X$relink_command" | $Xsed -e "$sed_quote_subst"` + if test "$hardcode_automatic" = yes ; then + relink_command= + fi + + + # Only create the output if not a dry run. + if test -z "$run"; then + for installed in no yes; do + if test "$installed" = yes; then + if test -z "$install_libdir"; then + break + fi + output="$output_objdir/$outputname"i + # Replace all uninstalled libtool libraries with the installed ones + newdependency_libs= + for deplib in $dependency_libs; do + case $deplib in + *.la) + name=`$echo "X$deplib" | $Xsed -e 's%^.*/%%'` + eval libdir=`${SED} -n -e 's/^libdir=\(.*\)$/\1/p' $deplib` + if test -z "$libdir"; then + $echo "$modename: \`$deplib' is not a valid libtool archive" 1>&2 + exit $EXIT_FAILURE + fi + newdependency_libs="$newdependency_libs $libdir/$name" + ;; + *) newdependency_libs="$newdependency_libs $deplib" ;; + esac + done + dependency_libs="$newdependency_libs" + newdlfiles= + for lib in $dlfiles; do + name=`$echo "X$lib" | $Xsed -e 's%^.*/%%'` + eval libdir=`${SED} -n -e 's/^libdir=\(.*\)$/\1/p' $lib` + if test -z "$libdir"; then + $echo "$modename: \`$lib' is not a valid libtool archive" 1>&2 + exit $EXIT_FAILURE + fi + newdlfiles="$newdlfiles $libdir/$name" + done + dlfiles="$newdlfiles" + newdlprefiles= + for lib in $dlprefiles; do + name=`$echo "X$lib" | $Xsed -e 's%^.*/%%'` + eval libdir=`${SED} -n -e 's/^libdir=\(.*\)$/\1/p' $lib` + if test -z "$libdir"; then + $echo "$modename: \`$lib' is not a valid libtool archive" 1>&2 + exit $EXIT_FAILURE + fi + newdlprefiles="$newdlprefiles $libdir/$name" + done + dlprefiles="$newdlprefiles" + else + newdlfiles= + for lib in $dlfiles; do + case $lib in + [\\/]* | [A-Za-z]:[\\/]*) abs="$lib" ;; + *) abs=`pwd`"/$lib" ;; + esac + newdlfiles="$newdlfiles $abs" + done + dlfiles="$newdlfiles" + newdlprefiles= + for lib in $dlprefiles; do + case $lib in + [\\/]* | [A-Za-z]:[\\/]*) abs="$lib" ;; + *) abs=`pwd`"/$lib" ;; + esac + newdlprefiles="$newdlprefiles $abs" + done + dlprefiles="$newdlprefiles" + fi + $rm $output + # place dlname in correct position for cygwin + tdlname=$dlname + case $host,$output,$installed,$module,$dlname in + *cygwin*,*lai,yes,no,*.dll | *mingw*,*lai,yes,no,*.dll) tdlname=../bin/$dlname ;; + esac + $echo > $output "\ +# $outputname - a libtool library file +# Generated by $PROGRAM - GNU $PACKAGE $VERSION$TIMESTAMP +# +# Please DO NOT delete this file! +# It is necessary for linking the library. + +# The name that we can dlopen(3). +dlname='$tdlname' + +# Names of this library. +library_names='$library_names' + +# The name of the static archive. +old_library='$old_library' + +# Libraries that this one depends upon. +dependency_libs='$dependency_libs' + +# Version information for $libname. +current=$current +age=$age +revision=$revision + +# Is this an already installed library? +installed=$installed + +# Should we warn about portability when linking against -modules? +shouldnotlink=$module + +# Files to dlopen/dlpreopen +dlopen='$dlfiles' +dlpreopen='$dlprefiles' + +# Directory that this library needs to be installed in: +libdir='$install_libdir'" + if test "$installed" = no && test "$need_relink" = yes; then + $echo >> $output "\ +relink_command=\"$relink_command\"" + fi + done + fi + + # Do a symbolic link so that the libtool archive can be found in + # LD_LIBRARY_PATH before the program is installed. + $show "(cd $output_objdir && $rm $outputname && $LN_S ../$outputname $outputname)" + $run eval '(cd $output_objdir && $rm $outputname && $LN_S ../$outputname $outputname)' || exit $? + ;; + esac + exit $EXIT_SUCCESS + ;; + + # libtool install mode + install) + modename="$modename: install" + + # There may be an optional sh(1) argument at the beginning of + # install_prog (especially on Windows NT). + if test "$nonopt" = "$SHELL" || test "$nonopt" = /bin/sh || + # Allow the use of GNU shtool's install command. + $echo "X$nonopt" | grep shtool > /dev/null; then + # Aesthetically quote it. + arg=`$echo "X$nonopt" | $Xsed -e "$sed_quote_subst"` + case $arg in + *[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*|"") + arg="\"$arg\"" + ;; + esac + install_prog="$arg " + arg="$1" + shift + else + install_prog= + arg=$nonopt + fi + + # The real first argument should be the name of the installation program. + # Aesthetically quote it. + arg=`$echo "X$arg" | $Xsed -e "$sed_quote_subst"` + case $arg in + *[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*|"") + arg="\"$arg\"" + ;; + esac + install_prog="$install_prog$arg" + + # We need to accept at least all the BSD install flags. + dest= + files= + opts= + prev= + install_type= + isdir=no + stripme= + for arg + do + if test -n "$dest"; then + files="$files $dest" + dest=$arg + continue + fi + + case $arg in + -d) isdir=yes ;; + -f) + case " $install_prog " in + *[\\\ /]cp\ *) ;; + *) prev=$arg ;; + esac + ;; + -g | -m | -o) prev=$arg ;; + -s) + stripme=" -s" + continue + ;; + -*) + ;; + *) + # If the previous option needed an argument, then skip it. + if test -n "$prev"; then + prev= + else + dest=$arg + continue + fi + ;; + esac + + # Aesthetically quote the argument. + arg=`$echo "X$arg" | $Xsed -e "$sed_quote_subst"` + case $arg in + *[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*|"") + arg="\"$arg\"" + ;; + esac + install_prog="$install_prog $arg" + done + + if test -z "$install_prog"; then + $echo "$modename: you must specify an install program" 1>&2 + $echo "$help" 1>&2 + exit $EXIT_FAILURE + fi + + if test -n "$prev"; then + $echo "$modename: the \`$prev' option requires an argument" 1>&2 + $echo "$help" 1>&2 + exit $EXIT_FAILURE + fi + + if test -z "$files"; then + if test -z "$dest"; then + $echo "$modename: no file or destination specified" 1>&2 + else + $echo "$modename: you must specify a destination" 1>&2 + fi + $echo "$help" 1>&2 + exit $EXIT_FAILURE + fi + + # Strip any trailing slash from the destination. + dest=`$echo "X$dest" | $Xsed -e 's%/$%%'` + + # Check to see that the destination is a directory. + test -d "$dest" && isdir=yes + if test "$isdir" = yes; then + destdir="$dest" + destname= + else + destdir=`$echo "X$dest" | $Xsed -e 's%/[^/]*$%%'` + test "X$destdir" = "X$dest" && destdir=. + destname=`$echo "X$dest" | $Xsed -e 's%^.*/%%'` + + # Not a directory, so check to see that there is only one file specified. + set dummy $files + if test "$#" -gt 2; then + $echo "$modename: \`$dest' is not a directory" 1>&2 + $echo "$help" 1>&2 + exit $EXIT_FAILURE + fi + fi + case $destdir in + [\\/]* | [A-Za-z]:[\\/]*) ;; + *) + for file in $files; do + case $file in + *.lo) ;; + *) + $echo "$modename: \`$destdir' must be an absolute directory name" 1>&2 + $echo "$help" 1>&2 + exit $EXIT_FAILURE + ;; + esac + done + ;; + esac + + # This variable tells wrapper scripts just to set variables rather + # than running their programs. + libtool_install_magic="$magic" + + staticlibs= + future_libdirs= + current_libdirs= + for file in $files; do + + # Do each installation. + case $file in + *.$libext) + # Do the static libraries later. + staticlibs="$staticlibs $file" + ;; + + *.la) + # Check to see that this really is a libtool archive. + if (${SED} -e '2q' $file | grep "^# Generated by .*$PACKAGE") >/dev/null 2>&1; then : + else + $echo "$modename: \`$file' is not a valid libtool archive" 1>&2 + $echo "$help" 1>&2 + exit $EXIT_FAILURE + fi + + library_names= + old_library= + relink_command= + # If there is no directory component, then add one. + case $file in + */* | *\\*) . $file ;; + *) . ./$file ;; + esac + + # Add the libdir to current_libdirs if it is the destination. + if test "X$destdir" = "X$libdir"; then + case "$current_libdirs " in + *" $libdir "*) ;; + *) current_libdirs="$current_libdirs $libdir" ;; + esac + else + # Note the libdir as a future libdir. + case "$future_libdirs " in + *" $libdir "*) ;; + *) future_libdirs="$future_libdirs $libdir" ;; + esac + fi + + dir=`$echo "X$file" | $Xsed -e 's%/[^/]*$%%'`/ + test "X$dir" = "X$file/" && dir= + dir="$dir$objdir" + + if test -n "$relink_command"; then + # Determine the prefix the user has applied to our future dir. + inst_prefix_dir=`$echo "$destdir" | $SED "s%$libdir\$%%"` + + # Don't allow the user to place us outside of our expected + # location b/c this prevents finding dependent libraries that + # are installed to the same prefix. + # At present, this check doesn't affect windows .dll's that + # are installed into $libdir/../bin (currently, that works fine) + # but it's something to keep an eye on. + if test "$inst_prefix_dir" = "$destdir"; then + $echo "$modename: error: cannot install \`$file' to a directory not ending in $libdir" 1>&2 + exit $EXIT_FAILURE + fi + + if test -n "$inst_prefix_dir"; then + # Stick the inst_prefix_dir data into the link command. + relink_command=`$echo "$relink_command" | $SED "s%@inst_prefix_dir@%-inst-prefix-dir $inst_prefix_dir%"` + else + relink_command=`$echo "$relink_command" | $SED "s%@inst_prefix_dir@%%"` + fi + + $echo "$modename: warning: relinking \`$file'" 1>&2 + $show "$relink_command" + if $run eval "$relink_command"; then : + else + $echo "$modename: error: relink \`$file' with the above command before installing it" 1>&2 + exit $EXIT_FAILURE + fi + fi + + # See the names of the shared library. + set dummy $library_names + if test -n "$2"; then + realname="$2" + shift + shift + + srcname="$realname" + test -n "$relink_command" && srcname="$realname"T + + # Install the shared library and build the symlinks. + $show "$install_prog $dir/$srcname $destdir/$realname" + $run eval "$install_prog $dir/$srcname $destdir/$realname" || exit $? + if test -n "$stripme" && test -n "$striplib"; then + $show "$striplib $destdir/$realname" + $run eval "$striplib $destdir/$realname" || exit $? + fi + + if test "$#" -gt 0; then + # Delete the old symlinks, and create new ones. + # Try `ln -sf' first, because the `ln' binary might depend on + # the symlink we replace! Solaris /bin/ln does not understand -f, + # so we also need to try rm && ln -s. + for linkname + do + if test "$linkname" != "$realname"; then + $show "(cd $destdir && { $LN_S -f $realname $linkname || { $rm $linkname && $LN_S $realname $linkname; }; })" + $run eval "(cd $destdir && { $LN_S -f $realname $linkname || { $rm $linkname && $LN_S $realname $linkname; }; })" + fi + done + fi + + # Do each command in the postinstall commands. + lib="$destdir/$realname" + cmds=$postinstall_cmds + save_ifs="$IFS"; IFS='~' + for cmd in $cmds; do + IFS="$save_ifs" + eval cmd=\"$cmd\" + $show "$cmd" + $run eval "$cmd" || { + lt_exit=$? + + # Restore the uninstalled library and exit + if test "$mode" = relink; then + $run eval '(cd $output_objdir && $rm ${realname}T && $mv ${realname}U $realname)' + fi + + exit $lt_exit + } + done + IFS="$save_ifs" + fi + + # Install the pseudo-library for information purposes. + name=`$echo "X$file" | $Xsed -e 's%^.*/%%'` + instname="$dir/$name"i + $show "$install_prog $instname $destdir/$name" + $run eval "$install_prog $instname $destdir/$name" || exit $? + + # Maybe install the static library, too. + test -n "$old_library" && staticlibs="$staticlibs $dir/$old_library" + ;; + + *.lo) + # Install (i.e. copy) a libtool object. + + # Figure out destination file name, if it wasn't already specified. + if test -n "$destname"; then + destfile="$destdir/$destname" + else + destfile=`$echo "X$file" | $Xsed -e 's%^.*/%%'` + destfile="$destdir/$destfile" + fi + + # Deduce the name of the destination old-style object file. + case $destfile in + *.lo) + staticdest=`$echo "X$destfile" | $Xsed -e "$lo2o"` + ;; + *.$objext) + staticdest="$destfile" + destfile= + ;; + *) + $echo "$modename: cannot copy a libtool object to \`$destfile'" 1>&2 + $echo "$help" 1>&2 + exit $EXIT_FAILURE + ;; + esac + + # Install the libtool object if requested. + if test -n "$destfile"; then + $show "$install_prog $file $destfile" + $run eval "$install_prog $file $destfile" || exit $? + fi + + # Install the old object if enabled. + if test "$build_old_libs" = yes; then + # Deduce the name of the old-style object file. + staticobj=`$echo "X$file" | $Xsed -e "$lo2o"` + + $show "$install_prog $staticobj $staticdest" + $run eval "$install_prog \$staticobj \$staticdest" || exit $? + fi + exit $EXIT_SUCCESS + ;; + + *) + # Figure out destination file name, if it wasn't already specified. + if test -n "$destname"; then + destfile="$destdir/$destname" + else + destfile=`$echo "X$file" | $Xsed -e 's%^.*/%%'` + destfile="$destdir/$destfile" + fi + + # If the file is missing, and there is a .exe on the end, strip it + # because it is most likely a libtool script we actually want to + # install + stripped_ext="" + case $file in + *.exe) + if test ! -f "$file"; then + file=`$echo $file|${SED} 's,.exe$,,'` + stripped_ext=".exe" + fi + ;; + esac + + # Do a test to see if this is really a libtool program. + case $host in + *cygwin*|*mingw*) + wrapper=`$echo $file | ${SED} -e 's,.exe$,,'` + ;; + *) + wrapper=$file + ;; + esac + if (${SED} -e '4q' $wrapper | grep "^# Generated by .*$PACKAGE")>/dev/null 2>&1; then + notinst_deplibs= + relink_command= + + # Note that it is not necessary on cygwin/mingw to append a dot to + # foo even if both foo and FILE.exe exist: automatic-append-.exe + # behavior happens only for exec(3), not for open(2)! Also, sourcing + # `FILE.' does not work on cygwin managed mounts. + # + # If there is no directory component, then add one. + case $wrapper in + */* | *\\*) . ${wrapper} ;; + *) . ./${wrapper} ;; + esac + + # Check the variables that should have been set. + if test -z "$notinst_deplibs"; then + $echo "$modename: invalid libtool wrapper script \`$wrapper'" 1>&2 + exit $EXIT_FAILURE + fi + + finalize=yes + for lib in $notinst_deplibs; do + # Check to see that each library is installed. + libdir= + if test -f "$lib"; then + # If there is no directory component, then add one. + case $lib in + */* | *\\*) . $lib ;; + *) . ./$lib ;; + esac + fi + libfile="$libdir/"`$echo "X$lib" | $Xsed -e 's%^.*/%%g'` ### testsuite: skip nested quoting test + if test -n "$libdir" && test ! -f "$libfile"; then + $echo "$modename: warning: \`$lib' has not been installed in \`$libdir'" 1>&2 + finalize=no + fi + done + + relink_command= + # Note that it is not necessary on cygwin/mingw to append a dot to + # foo even if both foo and FILE.exe exist: automatic-append-.exe + # behavior happens only for exec(3), not for open(2)! Also, sourcing + # `FILE.' does not work on cygwin managed mounts. + # + # If there is no directory component, then add one. + case $wrapper in + */* | *\\*) . ${wrapper} ;; + *) . ./${wrapper} ;; + esac + + outputname= + if test "$fast_install" = no && test -n "$relink_command"; then + if test "$finalize" = yes && test -z "$run"; then + tmpdir="/tmp" + test -n "$TMPDIR" && tmpdir="$TMPDIR" + tmpdir="$tmpdir/libtool-$$" + save_umask=`umask` + umask 0077 + if $mkdir "$tmpdir"; then + umask $save_umask + else + umask $save_umask + $echo "$modename: error: cannot create temporary directory \`$tmpdir'" 1>&2 + continue + fi + file=`$echo "X$file$stripped_ext" | $Xsed -e 's%^.*/%%'` + outputname="$tmpdir/$file" + # Replace the output file specification. + relink_command=`$echo "X$relink_command" | $Xsed -e 's%@OUTPUT@%'"$outputname"'%g'` + + $show "$relink_command" + if $run eval "$relink_command"; then : + else + $echo "$modename: error: relink \`$file' with the above command before installing it" 1>&2 + ${rm}r "$tmpdir" + continue + fi + file="$outputname" + else + $echo "$modename: warning: cannot relink \`$file'" 1>&2 + fi + else + # Install the binary that we compiled earlier. + file=`$echo "X$file$stripped_ext" | $Xsed -e "s%\([^/]*\)$%$objdir/\1%"` + fi + fi + + # remove .exe since cygwin /usr/bin/install will append another + # one anyway + case $install_prog,$host in + */usr/bin/install*,*cygwin*) + case $file:$destfile in + *.exe:*.exe) + # this is ok + ;; + *.exe:*) + destfile=$destfile.exe + ;; + *:*.exe) + destfile=`$echo $destfile | ${SED} -e 's,.exe$,,'` + ;; + esac + ;; + esac + $show "$install_prog$stripme $file $destfile" + $run eval "$install_prog\$stripme \$file \$destfile" || exit $? + test -n "$outputname" && ${rm}r "$tmpdir" + ;; + esac + done + + for file in $staticlibs; do + name=`$echo "X$file" | $Xsed -e 's%^.*/%%'` + + # Set up the ranlib parameters. + oldlib="$destdir/$name" + + $show "$install_prog $file $oldlib" + $run eval "$install_prog \$file \$oldlib" || exit $? + + if test -n "$stripme" && test -n "$old_striplib"; then + $show "$old_striplib $oldlib" + $run eval "$old_striplib $oldlib" || exit $? + fi + + # Do each command in the postinstall commands. + cmds=$old_postinstall_cmds + save_ifs="$IFS"; IFS='~' + for cmd in $cmds; do + IFS="$save_ifs" + eval cmd=\"$cmd\" + $show "$cmd" + $run eval "$cmd" || exit $? + done + IFS="$save_ifs" + done + + if test -n "$future_libdirs"; then + $echo "$modename: warning: remember to run \`$progname --finish$future_libdirs'" 1>&2 + fi + + if test -n "$current_libdirs"; then + # Maybe just do a dry run. + test -n "$run" && current_libdirs=" -n$current_libdirs" + exec_cmd='$SHELL $progpath $preserve_args --finish$current_libdirs' + else + exit $EXIT_SUCCESS + fi + ;; + + # libtool finish mode + finish) + modename="$modename: finish" + libdirs="$nonopt" + admincmds= + + if test -n "$finish_cmds$finish_eval" && test -n "$libdirs"; then + for dir + do + libdirs="$libdirs $dir" + done + + for libdir in $libdirs; do + if test -n "$finish_cmds"; then + # Do each command in the finish commands. + cmds=$finish_cmds + save_ifs="$IFS"; IFS='~' + for cmd in $cmds; do + IFS="$save_ifs" + eval cmd=\"$cmd\" + $show "$cmd" + $run eval "$cmd" || admincmds="$admincmds + $cmd" + done + IFS="$save_ifs" + fi + if test -n "$finish_eval"; then + # Do the single finish_eval. + eval cmds=\"$finish_eval\" + $run eval "$cmds" || admincmds="$admincmds + $cmds" + fi + done + fi + + # Exit here if they wanted silent mode. + test "$show" = : && exit $EXIT_SUCCESS + + $echo "----------------------------------------------------------------------" + $echo "Libraries have been installed in:" + for libdir in $libdirs; do + $echo " $libdir" + done + $echo + $echo "If you ever happen to want to link against installed libraries" + $echo "in a given directory, LIBDIR, you must either use libtool, and" + $echo "specify the full pathname of the library, or use the \`-LLIBDIR'" + $echo "flag during linking and do at least one of the following:" + if test -n "$shlibpath_var"; then + $echo " - add LIBDIR to the \`$shlibpath_var' environment variable" + $echo " during execution" + fi + if test -n "$runpath_var"; then + $echo " - add LIBDIR to the \`$runpath_var' environment variable" + $echo " during linking" + fi + if test -n "$hardcode_libdir_flag_spec"; then + libdir=LIBDIR + eval flag=\"$hardcode_libdir_flag_spec\" + + $echo " - use the \`$flag' linker flag" + fi + if test -n "$admincmds"; then + $echo " - have your system administrator run these commands:$admincmds" + fi + if test -f /etc/ld.so.conf; then + $echo " - have your system administrator add LIBDIR to \`/etc/ld.so.conf'" + fi + $echo + $echo "See any operating system documentation about shared libraries for" + $echo "more information, such as the ld(1) and ld.so(8) manual pages." + $echo "----------------------------------------------------------------------" + exit $EXIT_SUCCESS + ;; + + # libtool execute mode + execute) + modename="$modename: execute" + + # The first argument is the command name. + cmd="$nonopt" + if test -z "$cmd"; then + $echo "$modename: you must specify a COMMAND" 1>&2 + $echo "$help" + exit $EXIT_FAILURE + fi + + # Handle -dlopen flags immediately. + for file in $execute_dlfiles; do + if test ! -f "$file"; then + $echo "$modename: \`$file' is not a file" 1>&2 + $echo "$help" 1>&2 + exit $EXIT_FAILURE + fi + + dir= + case $file in + *.la) + # Check to see that this really is a libtool archive. + if (${SED} -e '2q' $file | grep "^# Generated by .*$PACKAGE") >/dev/null 2>&1; then : + else + $echo "$modename: \`$lib' is not a valid libtool archive" 1>&2 + $echo "$help" 1>&2 + exit $EXIT_FAILURE + fi + + # Read the libtool library. + dlname= + library_names= + + # If there is no directory component, then add one. + case $file in + */* | *\\*) . $file ;; + *) . ./$file ;; + esac + + # Skip this library if it cannot be dlopened. + if test -z "$dlname"; then + # Warn if it was a shared library. + test -n "$library_names" && $echo "$modename: warning: \`$file' was not linked with \`-export-dynamic'" + continue + fi + + dir=`$echo "X$file" | $Xsed -e 's%/[^/]*$%%'` + test "X$dir" = "X$file" && dir=. + + if test -f "$dir/$objdir/$dlname"; then + dir="$dir/$objdir" + else + $echo "$modename: cannot find \`$dlname' in \`$dir' or \`$dir/$objdir'" 1>&2 + exit $EXIT_FAILURE + fi + ;; + + *.lo) + # Just add the directory containing the .lo file. + dir=`$echo "X$file" | $Xsed -e 's%/[^/]*$%%'` + test "X$dir" = "X$file" && dir=. + ;; + + *) + $echo "$modename: warning \`-dlopen' is ignored for non-libtool libraries and objects" 1>&2 + continue + ;; + esac + + # Get the absolute pathname. + absdir=`cd "$dir" && pwd` + test -n "$absdir" && dir="$absdir" + + # Now add the directory to shlibpath_var. + if eval "test -z \"\$$shlibpath_var\""; then + eval "$shlibpath_var=\"\$dir\"" + else + eval "$shlibpath_var=\"\$dir:\$$shlibpath_var\"" + fi + done + + # This variable tells wrapper scripts just to set shlibpath_var + # rather than running their programs. + libtool_execute_magic="$magic" + + # Check if any of the arguments is a wrapper script. + args= + for file + do + case $file in + -*) ;; + *) + # Do a test to see if this is really a libtool program. + if (${SED} -e '4q' $file | grep "^# Generated by .*$PACKAGE") >/dev/null 2>&1; then + # If there is no directory component, then add one. + case $file in + */* | *\\*) . $file ;; + *) . ./$file ;; + esac + + # Transform arg to wrapped name. + file="$progdir/$program" + fi + ;; + esac + # Quote arguments (to preserve shell metacharacters). + file=`$echo "X$file" | $Xsed -e "$sed_quote_subst"` + args="$args \"$file\"" + done + + if test -z "$run"; then + if test -n "$shlibpath_var"; then + # Export the shlibpath_var. + eval "export $shlibpath_var" + fi + + # Restore saved environment variables + if test "${save_LC_ALL+set}" = set; then + LC_ALL="$save_LC_ALL"; export LC_ALL + fi + if test "${save_LANG+set}" = set; then + LANG="$save_LANG"; export LANG + fi + + # Now prepare to actually exec the command. + exec_cmd="\$cmd$args" + else + # Display what would be done. + if test -n "$shlibpath_var"; then + eval "\$echo \"\$shlibpath_var=\$$shlibpath_var\"" + $echo "export $shlibpath_var" + fi + $echo "$cmd$args" + exit $EXIT_SUCCESS + fi + ;; + + # libtool clean and uninstall mode + clean | uninstall) + modename="$modename: $mode" + rm="$nonopt" + files= + rmforce= + exit_status=0 + + # This variable tells wrapper scripts just to set variables rather + # than running their programs. + libtool_install_magic="$magic" + + for arg + do + case $arg in + -f) rm="$rm $arg"; rmforce=yes ;; + -*) rm="$rm $arg" ;; + *) files="$files $arg" ;; + esac + done + + if test -z "$rm"; then + $echo "$modename: you must specify an RM program" 1>&2 + $echo "$help" 1>&2 + exit $EXIT_FAILURE + fi + + rmdirs= + + origobjdir="$objdir" + for file in $files; do + dir=`$echo "X$file" | $Xsed -e 's%/[^/]*$%%'` + if test "X$dir" = "X$file"; then + dir=. + objdir="$origobjdir" + else + objdir="$dir/$origobjdir" + fi + name=`$echo "X$file" | $Xsed -e 's%^.*/%%'` + test "$mode" = uninstall && objdir="$dir" + + # Remember objdir for removal later, being careful to avoid duplicates + if test "$mode" = clean; then + case " $rmdirs " in + *" $objdir "*) ;; + *) rmdirs="$rmdirs $objdir" ;; + esac + fi + + # Don't error if the file doesn't exist and rm -f was used. + if (test -L "$file") >/dev/null 2>&1 \ + || (test -h "$file") >/dev/null 2>&1 \ + || test -f "$file"; then + : + elif test -d "$file"; then + exit_status=1 + continue + elif test "$rmforce" = yes; then + continue + fi + + rmfiles="$file" + + case $name in + *.la) + # Possibly a libtool archive, so verify it. + if (${SED} -e '2q' $file | grep "^# Generated by .*$PACKAGE") >/dev/null 2>&1; then + . $dir/$name + + # Delete the libtool libraries and symlinks. + for n in $library_names; do + rmfiles="$rmfiles $objdir/$n" + done + test -n "$old_library" && rmfiles="$rmfiles $objdir/$old_library" + test "$mode" = clean && rmfiles="$rmfiles $objdir/$name $objdir/${name}i" + + if test "$mode" = uninstall; then + if test -n "$library_names"; then + # Do each command in the postuninstall commands. + cmds=$postuninstall_cmds + save_ifs="$IFS"; IFS='~' + for cmd in $cmds; do + IFS="$save_ifs" + eval cmd=\"$cmd\" + $show "$cmd" + $run eval "$cmd" + if test "$?" -ne 0 && test "$rmforce" != yes; then + exit_status=1 + fi + done + IFS="$save_ifs" + fi + + if test -n "$old_library"; then + # Do each command in the old_postuninstall commands. + cmds=$old_postuninstall_cmds + save_ifs="$IFS"; IFS='~' + for cmd in $cmds; do + IFS="$save_ifs" + eval cmd=\"$cmd\" + $show "$cmd" + $run eval "$cmd" + if test "$?" -ne 0 && test "$rmforce" != yes; then + exit_status=1 + fi + done + IFS="$save_ifs" + fi + # FIXME: should reinstall the best remaining shared library. + fi + fi + ;; + + *.lo) + # Possibly a libtool object, so verify it. + if (${SED} -e '2q' $file | grep "^# Generated by .*$PACKAGE") >/dev/null 2>&1; then + + # Read the .lo file + . $dir/$name + + # Add PIC object to the list of files to remove. + if test -n "$pic_object" \ + && test "$pic_object" != none; then + rmfiles="$rmfiles $dir/$pic_object" + fi + + # Add non-PIC object to the list of files to remove. + if test -n "$non_pic_object" \ + && test "$non_pic_object" != none; then + rmfiles="$rmfiles $dir/$non_pic_object" + fi + fi + ;; + + *) + if test "$mode" = clean ; then + noexename=$name + case $file in + *.exe) + file=`$echo $file|${SED} 's,.exe$,,'` + noexename=`$echo $name|${SED} 's,.exe$,,'` + # $file with .exe has already been added to rmfiles, + # add $file without .exe + rmfiles="$rmfiles $file" + ;; + esac + # Do a test to see if this is a libtool program. + if (${SED} -e '4q' $file | grep "^# Generated by .*$PACKAGE") >/dev/null 2>&1; then + relink_command= + . $dir/$noexename + + # note $name still contains .exe if it was in $file originally + # as does the version of $file that was added into $rmfiles + rmfiles="$rmfiles $objdir/$name $objdir/${name}S.${objext}" + if test "$fast_install" = yes && test -n "$relink_command"; then + rmfiles="$rmfiles $objdir/lt-$name" + fi + if test "X$noexename" != "X$name" ; then + rmfiles="$rmfiles $objdir/lt-${noexename}.c" + fi + fi + fi + ;; + esac + $show "$rm $rmfiles" + $run $rm $rmfiles || exit_status=1 + done + objdir="$origobjdir" + + # Try to remove the ${objdir}s in the directories where we deleted files + for dir in $rmdirs; do + if test -d "$dir"; then + $show "rmdir $dir" + $run rmdir $dir >/dev/null 2>&1 + fi + done + + exit $exit_status + ;; + + "") + $echo "$modename: you must specify a MODE" 1>&2 + $echo "$generic_help" 1>&2 + exit $EXIT_FAILURE + ;; + esac + + if test -z "$exec_cmd"; then + $echo "$modename: invalid operation mode \`$mode'" 1>&2 + $echo "$generic_help" 1>&2 + exit $EXIT_FAILURE + fi +fi # test -z "$show_help" + +if test -n "$exec_cmd"; then + eval exec $exec_cmd + exit $EXIT_FAILURE +fi + +# We need to display help for each of the modes. +case $mode in +"") $echo \ +"Usage: $modename [OPTION]... [MODE-ARG]... + +Provide generalized library-building support services. + + --config show all configuration variables + --debug enable verbose shell tracing +-n, --dry-run display commands without modifying any files + --features display basic configuration information and exit + --finish same as \`--mode=finish' + --help display this help message and exit + --mode=MODE use operation mode MODE [default=inferred from MODE-ARGS] + --quiet same as \`--silent' + --silent don't print informational messages + --tag=TAG use configuration variables from tag TAG + --version print version information + +MODE must be one of the following: + + clean remove files from the build directory + compile compile a source file into a libtool object + execute automatically set library path, then run a program + finish complete the installation of libtool libraries + install install libraries or executables + link create a library or an executable + uninstall remove libraries from an installed directory + +MODE-ARGS vary depending on the MODE. Try \`$modename --help --mode=MODE' for +a more detailed description of MODE. + +Report bugs to ." + exit $EXIT_SUCCESS + ;; + +clean) + $echo \ +"Usage: $modename [OPTION]... --mode=clean RM [RM-OPTION]... FILE... + +Remove files from the build directory. + +RM is the name of the program to use to delete files associated with each FILE +(typically \`/bin/rm'). RM-OPTIONS are options (such as \`-f') to be passed +to RM. + +If FILE is a libtool library, object or program, all the files associated +with it are deleted. Otherwise, only FILE itself is deleted using RM." + ;; + +compile) + $echo \ +"Usage: $modename [OPTION]... --mode=compile COMPILE-COMMAND... SOURCEFILE + +Compile a source file into a libtool library object. + +This mode accepts the following additional options: + + -o OUTPUT-FILE set the output file name to OUTPUT-FILE + -prefer-pic try to building PIC objects only + -prefer-non-pic try to building non-PIC objects only + -static always build a \`.o' file suitable for static linking + +COMPILE-COMMAND is a command to be used in creating a \`standard' object file +from the given SOURCEFILE. + +The output file name is determined by removing the directory component from +SOURCEFILE, then substituting the C source code suffix \`.c' with the +library object suffix, \`.lo'." + ;; + +execute) + $echo \ +"Usage: $modename [OPTION]... --mode=execute COMMAND [ARGS]... + +Automatically set library path, then run a program. + +This mode accepts the following additional options: + + -dlopen FILE add the directory containing FILE to the library path + +This mode sets the library path environment variable according to \`-dlopen' +flags. + +If any of the ARGS are libtool executable wrappers, then they are translated +into their corresponding uninstalled binary, and any of their required library +directories are added to the library path. + +Then, COMMAND is executed, with ARGS as arguments." + ;; + +finish) + $echo \ +"Usage: $modename [OPTION]... --mode=finish [LIBDIR]... + +Complete the installation of libtool libraries. + +Each LIBDIR is a directory that contains libtool libraries. + +The commands that this mode executes may require superuser privileges. Use +the \`--dry-run' option if you just want to see what would be executed." + ;; + +install) + $echo \ +"Usage: $modename [OPTION]... --mode=install INSTALL-COMMAND... + +Install executables or libraries. + +INSTALL-COMMAND is the installation command. The first component should be +either the \`install' or \`cp' program. + +The rest of the components are interpreted as arguments to that command (only +BSD-compatible install options are recognized)." + ;; + +link) + $echo \ +"Usage: $modename [OPTION]... --mode=link LINK-COMMAND... + +Link object files or libraries together to form another library, or to +create an executable program. + +LINK-COMMAND is a command using the C compiler that you would use to create +a program from several object files. + +The following components of LINK-COMMAND are treated specially: + + -all-static do not do any dynamic linking at all + -avoid-version do not add a version suffix if possible + -dlopen FILE \`-dlpreopen' FILE if it cannot be dlopened at runtime + -dlpreopen FILE link in FILE and add its symbols to lt_preloaded_symbols + -export-dynamic allow symbols from OUTPUT-FILE to be resolved with dlsym(3) + -export-symbols SYMFILE + try to export only the symbols listed in SYMFILE + -export-symbols-regex REGEX + try to export only the symbols matching REGEX + -LLIBDIR search LIBDIR for required installed libraries + -lNAME OUTPUT-FILE requires the installed library libNAME + -module build a library that can dlopened + -no-fast-install disable the fast-install mode + -no-install link a not-installable executable + -no-undefined declare that a library does not refer to external symbols + -o OUTPUT-FILE create OUTPUT-FILE from the specified objects + -objectlist FILE Use a list of object files found in FILE to specify objects + -precious-files-regex REGEX + don't remove output files matching REGEX + -release RELEASE specify package release information + -rpath LIBDIR the created library will eventually be installed in LIBDIR + -R[ ]LIBDIR add LIBDIR to the runtime path of programs and libraries + -static do not do any dynamic linking of libtool libraries + -version-info CURRENT[:REVISION[:AGE]] + specify library version info [each variable defaults to 0] + +All other options (arguments beginning with \`-') are ignored. + +Every other argument is treated as a filename. Files ending in \`.la' are +treated as uninstalled libtool libraries, other files are standard or library +object files. + +If the OUTPUT-FILE ends in \`.la', then a libtool library is created, +only library objects (\`.lo' files) may be specified, and \`-rpath' is +required, except when creating a convenience library. + +If OUTPUT-FILE ends in \`.a' or \`.lib', then a standard library is created +using \`ar' and \`ranlib', or on Windows using \`lib'. + +If OUTPUT-FILE ends in \`.lo' or \`.${objext}', then a reloadable object file +is created, otherwise an executable program is created." + ;; + +uninstall) + $echo \ +"Usage: $modename [OPTION]... --mode=uninstall RM [RM-OPTION]... FILE... + +Remove libraries from an installation directory. + +RM is the name of the program to use to delete files associated with each FILE +(typically \`/bin/rm'). RM-OPTIONS are options (such as \`-f') to be passed +to RM. + +If FILE is a libtool library, all the files associated with it are deleted. +Otherwise, only FILE itself is deleted using RM." + ;; + +*) + $echo "$modename: invalid operation mode \`$mode'" 1>&2 + $echo "$help" 1>&2 + exit $EXIT_FAILURE + ;; +esac + +$echo +$echo "Try \`$modename --help' for more information about other modes." + +exit $? + +# The TAGs below are defined such that we never get into a situation +# in which we disable both kinds of libraries. Given conflicting +# choices, we go for a static library, that is the most portable, +# since we can't tell whether shared libraries were disabled because +# the user asked for that or because the platform doesn't support +# them. This is particularly important on AIX, because we don't +# support having both static and shared libraries enabled at the same +# time on that platform, so we default to a shared-only configuration. +# If a disable-shared tag is given, we'll fallback to a static-only +# configuration. But we'll never go from static-only to shared-only. + +# ### BEGIN LIBTOOL TAG CONFIG: disable-shared +build_libtool_libs=no +build_old_libs=yes +# ### END LIBTOOL TAG CONFIG: disable-shared + +# ### BEGIN LIBTOOL TAG CONFIG: disable-static +build_old_libs=`case $build_libtool_libs in yes) $echo no;; *) $echo yes;; esac` +# ### END LIBTOOL TAG CONFIG: disable-static + +# Local Variables: +# mode:shell-script +# sh-indentation:2 +# End: diff --git a/src/examples/pipes/missing b/src/examples/pipes/missing new file mode 100644 index 0000000..e7ef83a --- /dev/null +++ b/src/examples/pipes/missing @@ -0,0 +1,360 @@ +#! /bin/sh +# Common stub for a few missing GNU programs while installing. + +scriptversion=2003-09-02.23 + +# Copyright (C) 1996, 1997, 1999, 2000, 2002, 2003 +# Free Software Foundation, Inc. +# Originally by Fran,cois Pinard , 1996. + +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2, or (at your option) +# any later version. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. + +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA +# 02111-1307, USA. + +# As a special exception to the GNU General Public License, if you +# distribute this file as part of a program that contains a +# configuration script generated by Autoconf, you may include it under +# the same distribution terms that you use for the rest of that program. + +if test $# -eq 0; then + echo 1>&2 "Try \`$0 --help' for more information" + exit 1 +fi + +run=: + +# In the cases where this matters, `missing' is being run in the +# srcdir already. +if test -f configure.ac; then + configure_ac=configure.ac +else + configure_ac=configure.in +fi + +msg="missing on your system" + +case "$1" in +--run) + # Try to run requested program, and just exit if it succeeds. + run= + shift + "$@" && exit 0 + # Exit code 63 means version mismatch. This often happens + # when the user try to use an ancient version of a tool on + # a file that requires a minimum version. In this case we + # we should proceed has if the program had been absent, or + # if --run hadn't been passed. + if test $? = 63; then + run=: + msg="probably too old" + fi + ;; +esac + +# If it does not exist, or fails to run (possibly an outdated version), +# try to emulate it. +case "$1" in + + -h|--h|--he|--hel|--help) + echo "\ +$0 [OPTION]... PROGRAM [ARGUMENT]... + +Handle \`PROGRAM [ARGUMENT]...' for when PROGRAM is missing, or return an +error status if there is no known handling for PROGRAM. + +Options: + -h, --help display this help and exit + -v, --version output version information and exit + --run try to run the given command, and emulate it if it fails + +Supported PROGRAM values: + aclocal touch file \`aclocal.m4' + autoconf touch file \`configure' + autoheader touch file \`config.h.in' + automake touch all \`Makefile.in' files + bison create \`y.tab.[ch]', if possible, from existing .[ch] + flex create \`lex.yy.c', if possible, from existing .c + help2man touch the output file + lex create \`lex.yy.c', if possible, from existing .c + makeinfo touch the output file + tar try tar, gnutar, gtar, then tar without non-portable flags + yacc create \`y.tab.[ch]', if possible, from existing .[ch] + +Send bug reports to ." + ;; + + -v|--v|--ve|--ver|--vers|--versi|--versio|--version) + echo "missing $scriptversion (GNU Automake)" + ;; + + -*) + echo 1>&2 "$0: Unknown \`$1' option" + echo 1>&2 "Try \`$0 --help' for more information" + exit 1 + ;; + + aclocal*) + if test -z "$run" && ($1 --version) > /dev/null 2>&1; then + # We have it, but it failed. + exit 1 + fi + + echo 1>&2 "\ +WARNING: \`$1' is $msg. You should only need it if + you modified \`acinclude.m4' or \`${configure_ac}'. You might want + to install the \`Automake' and \`Perl' packages. Grab them from + any GNU archive site." + touch aclocal.m4 + ;; + + autoconf) + if test -z "$run" && ($1 --version) > /dev/null 2>&1; then + # We have it, but it failed. + exit 1 + fi + + echo 1>&2 "\ +WARNING: \`$1' is $msg. You should only need it if + you modified \`${configure_ac}'. You might want to install the + \`Autoconf' and \`GNU m4' packages. Grab them from any GNU + archive site." + touch configure + ;; + + autoheader) + if test -z "$run" && ($1 --version) > /dev/null 2>&1; then + # We have it, but it failed. + exit 1 + fi + + echo 1>&2 "\ +WARNING: \`$1' is $msg. You should only need it if + you modified \`acconfig.h' or \`${configure_ac}'. You might want + to install the \`Autoconf' and \`GNU m4' packages. Grab them + from any GNU archive site." + files=`sed -n 's/^[ ]*A[CM]_CONFIG_HEADER(\([^)]*\)).*/\1/p' ${configure_ac}` + test -z "$files" && files="config.h" + touch_files= + for f in $files; do + case "$f" in + *:*) touch_files="$touch_files "`echo "$f" | + sed -e 's/^[^:]*://' -e 's/:.*//'`;; + *) touch_files="$touch_files $f.in";; + esac + done + touch $touch_files + ;; + + automake*) + if test -z "$run" && ($1 --version) > /dev/null 2>&1; then + # We have it, but it failed. + exit 1 + fi + + echo 1>&2 "\ +WARNING: \`$1' is $msg. You should only need it if + you modified \`Makefile.am', \`acinclude.m4' or \`${configure_ac}'. + You might want to install the \`Automake' and \`Perl' packages. + Grab them from any GNU archive site." + find . -type f -name Makefile.am -print | + sed 's/\.am$/.in/' | + while read f; do touch "$f"; done + ;; + + autom4te) + if test -z "$run" && ($1 --version) > /dev/null 2>&1; then + # We have it, but it failed. + exit 1 + fi + + echo 1>&2 "\ +WARNING: \`$1' is needed, but is $msg. + You might have modified some files without having the + proper tools for further handling them. + You can get \`$1' as part of \`Autoconf' from any GNU + archive site." + + file=`echo "$*" | sed -n 's/.*--output[ =]*\([^ ]*\).*/\1/p'` + test -z "$file" && file=`echo "$*" | sed -n 's/.*-o[ ]*\([^ ]*\).*/\1/p'` + if test -f "$file"; then + touch $file + else + test -z "$file" || exec >$file + echo "#! /bin/sh" + echo "# Created by GNU Automake missing as a replacement of" + echo "# $ $@" + echo "exit 0" + chmod +x $file + exit 1 + fi + ;; + + bison|yacc) + echo 1>&2 "\ +WARNING: \`$1' $msg. You should only need it if + you modified a \`.y' file. You may need the \`Bison' package + in order for those modifications to take effect. You can get + \`Bison' from any GNU archive site." + rm -f y.tab.c y.tab.h + if [ $# -ne 1 ]; then + eval LASTARG="\${$#}" + case "$LASTARG" in + *.y) + SRCFILE=`echo "$LASTARG" | sed 's/y$/c/'` + if [ -f "$SRCFILE" ]; then + cp "$SRCFILE" y.tab.c + fi + SRCFILE=`echo "$LASTARG" | sed 's/y$/h/'` + if [ -f "$SRCFILE" ]; then + cp "$SRCFILE" y.tab.h + fi + ;; + esac + fi + if [ ! -f y.tab.h ]; then + echo >y.tab.h + fi + if [ ! -f y.tab.c ]; then + echo 'main() { return 0; }' >y.tab.c + fi + ;; + + lex|flex) + echo 1>&2 "\ +WARNING: \`$1' is $msg. You should only need it if + you modified a \`.l' file. You may need the \`Flex' package + in order for those modifications to take effect. You can get + \`Flex' from any GNU archive site." + rm -f lex.yy.c + if [ $# -ne 1 ]; then + eval LASTARG="\${$#}" + case "$LASTARG" in + *.l) + SRCFILE=`echo "$LASTARG" | sed 's/l$/c/'` + if [ -f "$SRCFILE" ]; then + cp "$SRCFILE" lex.yy.c + fi + ;; + esac + fi + if [ ! -f lex.yy.c ]; then + echo 'main() { return 0; }' >lex.yy.c + fi + ;; + + help2man) + if test -z "$run" && ($1 --version) > /dev/null 2>&1; then + # We have it, but it failed. + exit 1 + fi + + echo 1>&2 "\ +WARNING: \`$1' is $msg. You should only need it if + you modified a dependency of a manual page. You may need the + \`Help2man' package in order for those modifications to take + effect. You can get \`Help2man' from any GNU archive site." + + file=`echo "$*" | sed -n 's/.*-o \([^ ]*\).*/\1/p'` + if test -z "$file"; then + file=`echo "$*" | sed -n 's/.*--output=\([^ ]*\).*/\1/p'` + fi + if [ -f "$file" ]; then + touch $file + else + test -z "$file" || exec >$file + echo ".ab help2man is required to generate this page" + exit 1 + fi + ;; + + makeinfo) + if test -z "$run" && (makeinfo --version) > /dev/null 2>&1; then + # We have makeinfo, but it failed. + exit 1 + fi + + echo 1>&2 "\ +WARNING: \`$1' is $msg. You should only need it if + you modified a \`.texi' or \`.texinfo' file, or any other file + indirectly affecting the aspect of the manual. The spurious + call might also be the consequence of using a buggy \`make' (AIX, + DU, IRIX). You might want to install the \`Texinfo' package or + the \`GNU make' package. Grab either from any GNU archive site." + file=`echo "$*" | sed -n 's/.*-o \([^ ]*\).*/\1/p'` + if test -z "$file"; then + file=`echo "$*" | sed 's/.* \([^ ]*\) *$/\1/'` + file=`sed -n '/^@setfilename/ { s/.* \([^ ]*\) *$/\1/; p; q; }' $file` + fi + touch $file + ;; + + tar) + shift + if test -n "$run"; then + echo 1>&2 "ERROR: \`tar' requires --run" + exit 1 + fi + + # We have already tried tar in the generic part. + # Look for gnutar/gtar before invocation to avoid ugly error + # messages. + if (gnutar --version > /dev/null 2>&1); then + gnutar "$@" && exit 0 + fi + if (gtar --version > /dev/null 2>&1); then + gtar "$@" && exit 0 + fi + firstarg="$1" + if shift; then + case "$firstarg" in + *o*) + firstarg=`echo "$firstarg" | sed s/o//` + tar "$firstarg" "$@" && exit 0 + ;; + esac + case "$firstarg" in + *h*) + firstarg=`echo "$firstarg" | sed s/h//` + tar "$firstarg" "$@" && exit 0 + ;; + esac + fi + + echo 1>&2 "\ +WARNING: I can't seem to be able to run \`tar' with the given arguments. + You may want to install GNU tar or Free paxutils, or check the + command line arguments." + exit 1 + ;; + + *) + echo 1>&2 "\ +WARNING: \`$1' is needed, and is $msg. + You might have modified some files without having the + proper tools for further handling them. Check the \`README' file, + it often tells you about the needed prerequisites for installing + this package. You may also peek at any GNU archive site, in case + some other package would contain this missing \`$1' program." + exit 1 + ;; +esac + +exit 0 + +# Local variables: +# eval: (add-hook 'write-file-hooks 'time-stamp) +# time-stamp-start: "scriptversion=" +# time-stamp-format: "%:y-%02m-%02d.%02H" +# time-stamp-end: "$" +# End: diff --git a/src/examples/python/WordCount.py b/src/examples/python/WordCount.py new file mode 100644 index 0000000..f666122 --- /dev/null +++ b/src/examples/python/WordCount.py @@ -0,0 +1,70 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from org.apache.hadoop.fs import Path +from org.apache.hadoop.io import * +from org.apache.hadoop.mapred import * + +import sys +import getopt + +class WordCountMap(Mapper, MapReduceBase): + one = IntWritable(1) + def map(self, key, value, output, reporter): + for w in value.toString().split(): + output.collect(Text(w), self.one) + +class Summer(Reducer, MapReduceBase): + def reduce(self, key, values, output, reporter): + sum = 0 + while values.hasNext(): + sum += values.next().get() + output.collect(key, IntWritable(sum)) + +def printUsage(code): + print "wordcount [-m ] [-r ] " + sys.exit(code) + +def main(args): + conf = JobConf(WordCountMap); + conf.setJobName("wordcount"); + + conf.setOutputKeyClass(Text); + conf.setOutputValueClass(IntWritable); + + conf.setMapperClass(WordCountMap); + conf.setCombinerClass(Summer); + conf.setReducerClass(Summer); + try: + flags, other_args = getopt.getopt(args[1:], "m:r:") + except getopt.GetoptError: + printUsage(1) + if len(other_args) != 2: + printUsage(1) + + for f,v in flags: + if f == "-m": + conf.setNumMapTasks(int(v)) + elif f == "-r": + conf.setNumReduceTasks(int(v)) + conf.setInputPath(Path(other_args[0])) + conf.setOutputPath(Path(other_args[1])) + JobClient.runJob(conf); + +if __name__ == "__main__": + main(sys.argv) diff --git a/src/examples/python/compile b/src/examples/python/compile new file mode 100644 index 0000000..a29b75f --- /dev/null +++ b/src/examples/python/compile @@ -0,0 +1,21 @@ +#!/usr/bin/env bash + +export HADOOP_HOME=../../.. + +export CLASSPATH="$HADOOP_HOME/build/classes" + +# so that filenames w/ spaces are handled correctly in loops below +IFS= + +# add libs to CLASSPATH +for f in $HADOOP_HOME/lib/*.jar; do + CLASSPATH=${CLASSPATH}:$f; +done + +for f in $HADOOP_HOME/lib/jetty-ext/*.jar; do + CLASSPATH=${CLASSPATH}:$f; +done + +# restore ordinary behaviour +unset IFS +jythonc -p org.apache.hadoop.examples -d -j wc.jar -c WordCount.py diff --git a/src/examples/python/pyAbacus/JyAbacusWCPlugIN.py b/src/examples/python/pyAbacus/JyAbacusWCPlugIN.py new file mode 100644 index 0000000..bf3865f --- /dev/null +++ b/src/examples/python/pyAbacus/JyAbacusWCPlugIN.py @@ -0,0 +1,36 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from org.apache.hadoop.fs import Path +from org.apache.hadoop.io import * +from org.apache.hadoop.mapred import * + +from org.apache.hadoop.abacus import *; + +from java.util import *; + +import sys + +class AbacusWordCount(ValueAggregatorBaseDescriptor): + def generateKeyValPairs(self, key, val): + retv = ArrayList(); + for w in val.toString().split(): + en = ValueAggregatorBaseDescriptor.generateEntry(ValueAggregatorBaseDescriptor.LONG_VALUE_SUM, w, ValueAggregatorBaseDescriptor.ONE); + retv.add(en); + return retv; + diff --git a/src/examples/python/pyAbacus/JythonAbacus.py b/src/examples/python/pyAbacus/JythonAbacus.py new file mode 100644 index 0000000..655b381 --- /dev/null +++ b/src/examples/python/pyAbacus/JythonAbacus.py @@ -0,0 +1,82 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from org.apache.hadoop.fs import Path +from org.apache.hadoop.io import * +from org.apache.hadoop.mapred import * + +from org.apache.hadoop.abacus import * + +from java.util import *; + +import sys + +class AbacusMapper(ValueAggregatorMapper): + def map(self, key, value, output, reporter): + ValueAggregatorMapper.map(self, key, value, output, reporter); + +class AbacusReducer(ValueAggregatorReducer): + def reduce(self, key, values, output, reporter): + ValueAggregatorReducer.reduce(self, key, values, output, reporter); + +class AbacusCombiner(ValueAggregatorCombiner): + def reduce(self, key, values, output, reporter): + ValueAggregatorCombiner.reduce(self, key, values, output, reporter); + +def printUsage(code): + print "Abacus " + sys.exit(code) + +def main(args): + if len(args) < 6: + printUsage(1); + + inDir = args[1]; + outDir = args[2]; + numOfReducers = int(args[3]); + theInputFormat = args[4]; + specFile = args[5]; + + print "numOfReducers: ", numOfReducers, "theInputFormat: ", theInputFormat, "specFile: ", specFile + + conf = JobConf(AbacusMapper); + conf.setJobName("recordcount"); + conf.addDefaultResource(Path(specFile)); + + if theInputFormat=="textinputformat": + conf.setInputFormat(TextInputFormat); + else: + conf.setInputFormat(SequenceFileInputFormat); + conf.setOutputFormat(TextOutputFormat); + conf.setMapOutputKeyClass(Text); + conf.setMapOutputValueClass(Text); + conf.setOutputKeyClass(Text); + conf.setOutputValueClass(Text); + conf.setNumMapTasks(1); + conf.setNumReduceTasks(numOfReducers); + + conf.setMapperClass(AbacusMapper); + conf.setCombinerClass(AbacusCombiner); + conf.setReducerClass(AbacusReducer); + conf.setInputPath(Path(args[1])) + conf.setOutputPath(Path(args[2])) + + JobClient.runJob(conf); + +if __name__ == "__main__": + main(sys.argv) diff --git a/src/examples/python/pyAbacus/compile b/src/examples/python/pyAbacus/compile new file mode 100644 index 0000000..b37fd41 --- /dev/null +++ b/src/examples/python/pyAbacus/compile @@ -0,0 +1,25 @@ +#!/usr/bin/env bash + +export HADOOP_HOME=../../../../.. + +export CLASSPATH="$HADOOP_HOME/build/classes" +export CLASSPATH=${CLASSPATH}:"$HADOOP_HOME/build/contrib/abacus/classes" + +# so that filenames w/ spaces are handled correctly in loops below +IFS= + +# add libs to CLASSPATH +for f in $HADOOP_HOME/lib/*.jar; do + CLASSPATH=${CLASSPATH}:$f; +done + +for f in $HADOOP_HOME/lib/jetty-ext/*.jar; do + CLASSPATH=${CLASSPATH}:$f; +done + +# restore ordinary behaviour +unset IFS +jythonc -p org.apache.hadoop.abacus.examples -d -j jwc.jar -c JythonAbacus.py JyAbacusWCPlugIN.py + +jar -uvf jwc.jar -C $HADOOP_HOME/build/contrib/abacus/classes . + diff --git a/src/examples/python/pyAbacus/wordcountaggregator.spec b/src/examples/python/pyAbacus/wordcountaggregator.spec new file mode 100644 index 0000000..5270a65 --- /dev/null +++ b/src/examples/python/pyAbacus/wordcountaggregator.spec @@ -0,0 +1,15 @@ + + + + + + + aggregator.descriptor.num + 1 + + + + aggregator.descriptor.0 + UserDefined,org.apache.hadoop.abacus.examples.JyAbacusWCPlugIN$AbacusWordCount +o + diff --git a/src/hdfs/hdfs-default.xml b/src/hdfs/hdfs-default.xml new file mode 100644 index 0000000..b4096ff --- /dev/null +++ b/src/hdfs/hdfs-default.xml @@ -0,0 +1,399 @@ + + + + + + + + + + + dfs.namenode.logging.level + info + The logging level for dfs namenode. Other values are "dir"(trac +e namespace mutations), "block"(trace block under/over replications and block +creations/deletions), or "all". + + + + dfs.secondary.http.address + 0.0.0.0:50090 + + The secondary namenode http server address and port. + If the port is 0 then the server will start on a free port. + + + + + dfs.datanode.address + 0.0.0.0:50010 + + The address where the datanode server will listen to. + If the port is 0 then the server will start on a free port. + + + + + dfs.datanode.http.address + 0.0.0.0:50075 + + The datanode http server address and port. + If the port is 0 then the server will start on a free port. + + + + + dfs.datanode.ipc.address + 0.0.0.0:50020 + + The datanode ipc server address and port. + If the port is 0 then the server will start on a free port. + + + + + dfs.datanode.handler.count + 3 + The number of server threads for the datanode. + + + + dfs.http.address + 0.0.0.0:50070 + + The address and the base port where the dfs namenode web ui will listen on. + If the port is 0 then the server will start on a free port. + + + + + dfs.https.enable + false + Decide if HTTPS(SSL) is supported on HDFS + + + + + dfs.https.need.client.auth + false + Whether SSL client certificate authentication is required + + + + + dfs.https.server.keystore.resource + ssl-server.xml + Resource file from which ssl server keystore + information will be extracted + + + + + dfs.https.client.keystore.resource + ssl-client.xml + Resource file from which ssl client keystore + information will be extracted + + + + + dfs.datanode.https.address + 0.0.0.0:50475 + + + + dfs.https.address + 0.0.0.0:50470 + + + + dfs.datanode.dns.interface + default + The name of the Network Interface from which a data node should + report its IP address. + + + + + dfs.datanode.dns.nameserver + default + The host name or IP address of the name server (DNS) + which a DataNode should use to determine the host name used by the + NameNode for communication and display purposes. + + + + + + + dfs.replication.considerLoad + true + Decide if chooseTarget considers the target's load or not + + + + dfs.default.chunk.view.size + 32768 + The number of bytes to view for a file on the browser. + + + + + dfs.datanode.du.reserved + 0 + Reserved space in bytes per volume. Always leave this much space free for non dfs use. + + + + + dfs.name.dir + ${hadoop.tmp.dir}/dfs/name + Determines where on the local filesystem the DFS name node + should store the name table(fsimage). If this is a comma-delimited list + of directories then the name table is replicated in all of the + directories, for redundancy. + + + + dfs.name.edits.dir + ${dfs.name.dir} + Determines where on the local filesystem the DFS name node + should store the transaction (edits) file. If this is a comma-delimited list + of directories then the transaction file is replicated in all of the + directories, for redundancy. Default value is same as dfs.name.dir + + + + dfs.web.ugi + webuser,webgroup + The user account used by the web interface. + Syntax: USERNAME,GROUP1,GROUP2, ... + + + + + dfs.permissions + true + + If "true", enable permission checking in HDFS. + If "false", permission checking is turned off, + but all other behavior is unchanged. + Switching from one parameter value to the other does not change the mode, + owner or group of files or directories. + + + + + dfs.permissions.supergroup + supergroup + The name of the group of super-users. + + + + dfs.data.dir + ${hadoop.tmp.dir}/dfs/data + Determines where on the local filesystem an DFS data node + should store its blocks. If this is a comma-delimited + list of directories, then data will be stored in all named + directories, typically on different devices. + Directories that do not exist are ignored. + + + + + dfs.replication + 3 + Default block replication. + The actual number of replications can be specified when the file is created. + The default is used if replication is not specified in create time. + + + + + dfs.replication.max + 512 + Maximal block replication. + + + + + dfs.replication.min + 1 + Minimal block replication. + + + + + dfs.block.size + 67108864 + The default block size for new files. + + + + dfs.df.interval + 60000 + Disk usage statistics refresh interval in msec. + + + + dfs.client.block.write.retries + 3 + The number of retries for writing blocks to the data nodes, + before we signal failure to the application. + + + + + dfs.blockreport.intervalMsec + 3600000 + Determines block reporting interval in milliseconds. + + + + dfs.blockreport.initialDelay 0 + Delay for first block report in seconds. + + + + dfs.heartbeat.interval + 3 + Determines datanode heartbeat interval in seconds. + + + + dfs.namenode.handler.count + 10 + The number of server threads for the namenode. + + + + dfs.safemode.threshold.pct + 0.999f + + Specifies the percentage of blocks that should satisfy + the minimal replication requirement defined by dfs.replication.min. + Values less than or equal to 0 mean not to start in safe mode. + Values greater than 1 will make safe mode permanent. + + + + + dfs.safemode.extension + 30000 + + Determines extension of safe mode in milliseconds + after the threshold level is reached. + + + + + dfs.balance.bandwidthPerSec + 1048576 + + Specifies the maximum amount of bandwidth that each datanode + can utilize for the balancing purpose in term of + the number of bytes per second. + + + + + dfs.hosts + + Names a file that contains a list of hosts that are + permitted to connect to the namenode. The full pathname of the file + must be specified. If the value is empty, all hosts are + permitted. + + + + dfs.hosts.exclude + + Names a file that contains a list of hosts that are + not permitted to connect to the namenode. The full pathname of the + file must be specified. If the value is empty, no hosts are + excluded. + + + + dfs.max.objects + 0 + The maximum number of files, directories and blocks + dfs supports. A value of zero indicates no limit to the number + of objects that dfs supports. + + + + + dfs.namenode.decommission.interval + 30 + Namenode periodicity in seconds to check if decommission is + complete. + + + + dfs.namenode.decommission.nodes.per.interval + 5 + The number of nodes namenode checks if decommission is complete + in each dfs.namenode.decommission.interval. + + + + dfs.replication.interval + 3 + The periodicity in seconds with which the namenode computes + repliaction work for datanodes. + + + + dfs.access.time.precision + 3600000 + The access time for HDFS file is precise upto this value. + The default value is 1 hour. Setting a value of 0 disables + access times for HDFS. + + + + + dfs.support.append + false + Does HDFS allow appends to files? + This is currently set to false because there are bugs in the + "append code" and is not supported in any prodction cluster. + + + + + dfs.namenode.support.allowformat + true + Does HDFS namenode allow itself to be formatted? + You should consider setting this to false for any production + cluster, to avoid any possibility of formatting a running DFS. + + + + + dfs.image.compress + false + Should the dfs image be compressed? + + + + + dfs.image.compression.codec + org.apache.hadoop.io.compress.DefaultCodec + If the dfs image is compressed, how should they be compressed? + This codec has to be one that's defined in io.compression.codecs + + + + + dfs.image.transfer.bandwidthPerSec + 0 + + Specifies the maximum amount of bandwidth that can be utilized for + image transfer in term of the number of bytes per second. + A default value of 0 indicates that throttling is disabled. + + + + diff --git a/src/hdfs/org/apache/hadoop/hdfs/ChecksumDistributedFileSystem.java b/src/hdfs/org/apache/hadoop/hdfs/ChecksumDistributedFileSystem.java new file mode 100644 index 0000000..8862063 --- /dev/null +++ b/src/hdfs/org/apache/hadoop/hdfs/ChecksumDistributedFileSystem.java @@ -0,0 +1,129 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdfs; + +import java.io.*; +import java.net.*; + +import org.apache.hadoop.fs.*; +import org.apache.hadoop.hdfs.protocol.DatanodeInfo; +import org.apache.hadoop.hdfs.protocol.FSConstants; +import org.apache.hadoop.hdfs.protocol.FSConstants.UpgradeAction; +import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport; +import org.apache.hadoop.conf.Configuration; + +/** + * An implementation of ChecksumFileSystem over DistributedFileSystem. + * Note that as of now (May 07), DistributedFileSystem natively checksums + * all of its data. Using this class is not be necessary in most cases. + * Currently provided mainly for backward compatibility and testing. + */ +public class ChecksumDistributedFileSystem extends ChecksumFileSystem { + + public ChecksumDistributedFileSystem() { + super( new DistributedFileSystem() ); + } + + /** @deprecated */ + public ChecksumDistributedFileSystem(InetSocketAddress namenode, + Configuration conf) throws IOException { + super( new DistributedFileSystem(namenode, conf) ); + } + + /** Any extra interface that DistributeFileSystem provides can be + * accessed with this.*/ + DistributedFileSystem getDFS() { + return (DistributedFileSystem)fs; + } + + /** Return the total raw capacity of the filesystem, disregarding + * replication .*/ + public long getRawCapacity() throws IOException{ + return getDFS().getRawCapacity(); + } + + /** Return the total raw used space in the filesystem, disregarding + * replication .*/ + public long getRawUsed() throws IOException{ + return getDFS().getRawUsed(); + } + + /** Return statistics for each datanode. */ + public DatanodeInfo[] getDataNodeStats() throws IOException { + return getDFS().getDataNodeStats(); + } + + /** + * Enter, leave or get safe mode. + * + * @see org.apache.hadoop.hdfs.protocol.ClientProtocol#setSafeMode(FSConstants.SafeModeAction) + */ + public boolean setSafeMode(FSConstants.SafeModeAction action) + throws IOException { + return getDFS().setSafeMode(action); + } + + /* + * Refreshes the list of hosts and excluded hosts from the configured + * files. + */ + public void refreshNodes() throws IOException { + getDFS().refreshNodes(); + } + + /** + * Finalize previously upgraded files system state. + */ + public void finalizeUpgrade() throws IOException { + getDFS().finalizeUpgrade(); + } + + public UpgradeStatusReport distributedUpgradeProgress(UpgradeAction action + ) throws IOException { + return getDFS().distributedUpgradeProgress(action); + } + + /* + * Dumps dfs data structures into specified file. + */ + public void metaSave(String pathname) throws IOException { + getDFS().metaSave(pathname); + } + + /** + * We need to find the blocks that didn't match. Likely only one + * is corrupt but we will report both to the namenode. In the future, + * we can consider figuring out exactly which block is corrupt. + */ + public boolean reportChecksumFailure(Path f, + FSDataInputStream in, long inPos, + FSDataInputStream sums, long sumsPos) { + return getDFS().reportChecksumFailure(f, in, inPos, sums, sumsPos); + } + + + /** + * Returns the stat information about the file. + */ + @Override + public FileStatus getFileStatus(Path f) throws IOException { + return getDFS().getFileStatus(f); + } + +} diff --git a/src/hdfs/org/apache/hadoop/hdfs/DFSClient.java b/src/hdfs/org/apache/hadoop/hdfs/DFSClient.java new file mode 100644 index 0000000..712184d --- /dev/null +++ b/src/hdfs/org/apache/hadoop/hdfs/DFSClient.java @@ -0,0 +1,3736 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs; + +import org.apache.hadoop.io.*; +import org.apache.hadoop.io.retry.RetryPolicies; +import org.apache.hadoop.io.retry.RetryPolicy; +import org.apache.hadoop.io.retry.RetryProxy; +import org.apache.hadoop.fs.*; +import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.ipc.*; +import org.apache.hadoop.net.NetUtils; +import org.apache.hadoop.net.NodeBase; +import org.apache.hadoop.conf.*; +import org.apache.hadoop.hdfs.DistributedFileSystem.DiskStatus; +import org.apache.hadoop.hdfs.protocol.*; +import org.apache.hadoop.hdfs.server.common.HdfsConstants; +import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport; +import org.apache.hadoop.hdfs.server.datanode.DataNode; +import org.apache.hadoop.hdfs.server.namenode.NameNode; +import org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException; +import org.apache.hadoop.security.AccessControlException; +import org.apache.hadoop.security.UnixUserGroupInformation; +import org.apache.hadoop.util.*; +import org.apache.hadoop.ipc.Client; + +import org.apache.commons.logging.*; + +import java.io.*; +import java.net.*; +import java.util.*; +import java.util.zip.CRC32; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.ConcurrentHashMap; +import java.nio.BufferOverflowException; +import java.nio.ByteBuffer; + +import javax.net.SocketFactory; +import javax.security.auth.login.LoginException; + +/******************************************************** + * DFSClient can connect to a Hadoop Filesystem and + * perform basic file tasks. It uses the ClientProtocol + * to communicate with a NameNode daemon, and connects + * directly to DataNodes to read/write block data. + * + * Hadoop DFS users should obtain an instance of + * DistributedFileSystem, which uses DFSClient to handle + * filesystem tasks. + * + ********************************************************/ +public class DFSClient implements FSConstants, java.io.Closeable { + public static final Log LOG = LogFactory.getLog(DFSClient.class); + public static final int MAX_BLOCK_ACQUIRE_FAILURES = 3; + private static final int TCP_WINDOW_SIZE = 128 * 1024; // 128 KB + public ClientProtocol namenode; + private final ClientProtocol rpcNamenode; + final UnixUserGroupInformation ugi; + volatile boolean clientRunning = true; + Random r = new Random(); + final String clientName; + final LeaseChecker leasechecker = new LeaseChecker(); + private Configuration conf; + private long defaultBlockSize; + private short defaultReplication; + private SocketFactory socketFactory; + private int socketTimeout; + private int datanodeWriteTimeout; + final int writePacketSize; + private final FileSystem.Statistics stats; + private int maxBlockAcquireFailures; + private final int hdfsTimeout; // timeout value for a DFS operation. + private long namenodeVersion = ClientProtocol.versionID; + + /** + * The locking hierarchy is to first acquire lock on DFSClient object, followed by + * lock on leasechecker, followed by lock on an individual DFSOutputStream. + */ + public static ClientProtocol createNamenode(Configuration conf) throws IOException { + return createNamenode(NameNode.getAddress(conf), conf); + } + + public static ClientProtocol createNamenode( InetSocketAddress nameNodeAddr, + Configuration conf) throws IOException { + try { + return createNamenode(createRPCNamenode(nameNodeAddr, conf, + UnixUserGroupInformation.login(conf, true))); + } catch (LoginException e) { + throw (IOException)(new IOException().initCause(e)); + } + } + + /** + * Create a NameNode proxy for the client if the client and NameNode + * are compatible + * + * @param nameNodeAddr NameNode address + * @param conf configuration + * @param ugi ticket + * @return a NameNode proxy that's compatible with the client + */ + private ClientProtocol createRPCNamenodeIfCompatible( + InetSocketAddress nameNodeAddr, + Configuration conf, + UnixUserGroupInformation ugi) throws IOException { + try { + return createRPCNamenode(nameNodeAddr, conf, ugi); + } catch (RPC.VersionMismatch e) { + long clientVersion = e.getClientVersion(); + namenodeVersion = e.getServerVersion(); + if (clientVersion > namenodeVersion && + !ProtocolCompatible.isCompatibleClientProtocol( + clientVersion, namenodeVersion)) { + throw new RPC.VersionIncompatible( + ClientProtocol.class.getName(), clientVersion, namenodeVersion); + } + return (ClientProtocol)e.getProxy(); + } + } + + private static ClientProtocol createRPCNamenode(InetSocketAddress nameNodeAddr, + Configuration conf, UnixUserGroupInformation ugi) + throws IOException { + return (ClientProtocol)RPC.getProxy(ClientProtocol.class, + ClientProtocol.versionID, nameNodeAddr, ugi, conf, + NetUtils.getSocketFactory(conf, ClientProtocol.class)); + } + + private static ClientProtocol createNamenode(ClientProtocol rpcNamenode) + throws IOException { + RetryPolicy createPolicy = RetryPolicies.retryUpToMaximumCountWithFixedSleep( + 5, LEASE_SOFTLIMIT_PERIOD, TimeUnit.MILLISECONDS); + + Map,RetryPolicy> remoteExceptionToPolicyMap = + new HashMap, RetryPolicy>(); + remoteExceptionToPolicyMap.put(AlreadyBeingCreatedException.class, createPolicy); + + Map,RetryPolicy> exceptionToPolicyMap = + new HashMap, RetryPolicy>(); + exceptionToPolicyMap.put(RemoteException.class, + RetryPolicies.retryByRemoteException( + RetryPolicies.TRY_ONCE_THEN_FAIL, remoteExceptionToPolicyMap)); + RetryPolicy methodPolicy = RetryPolicies.retryByException( + RetryPolicies.TRY_ONCE_THEN_FAIL, exceptionToPolicyMap); + Map methodNameToPolicyMap = new HashMap(); + + methodNameToPolicyMap.put("create", methodPolicy); + + return (ClientProtocol) RetryProxy.create(ClientProtocol.class, + rpcNamenode, methodNameToPolicyMap); + } + + static ClientDatanodeProtocol createClientDatanodeProtocolProxy ( + DatanodeID datanodeid, Configuration conf) throws IOException { + InetSocketAddress addr = NetUtils.createSocketAddr( + datanodeid.getHost() + ":" + datanodeid.getIpcPort()); + if (ClientDatanodeProtocol.LOG.isDebugEnabled()) { + ClientDatanodeProtocol.LOG.info("ClientDatanodeProtocol addr=" + addr); + } + try { + return (ClientDatanodeProtocol)RPC.getProxy(ClientDatanodeProtocol.class, + ClientDatanodeProtocol.versionID, addr, conf); + } catch (RPC.VersionMismatch e) { + long clientVersion = e.getClientVersion(); + long datanodeVersion = e.getServerVersion(); + if (clientVersion > datanodeVersion && + !ProtocolCompatible.isCompatibleClientDatanodeProtocol(clientVersion, + datanodeVersion)) { + throw new RPC.VersionIncompatible(ClientDatanodeProtocol.class.getName(), + clientVersion, datanodeVersion); + } + return (ClientDatanodeProtocol)e.getProxy(); + } + } + + /** + * Same as this(NameNode.getAddress(conf), conf); + * @see #DFSClient(InetSocketAddress, Configuration) + */ + public DFSClient(Configuration conf) throws IOException { + this(NameNode.getAddress(conf), conf); + } + + /** + * Same as this(nameNodeAddr, conf, null); + * @see #DFSClient(InetSocketAddress, Configuration, org.apache.hadoop.fs.FileSystem.Statistics) + */ + public DFSClient(InetSocketAddress nameNodeAddr, Configuration conf + ) throws IOException { + this(nameNodeAddr, conf, null); + } + + /** + * Same as this(nameNodeAddr, null, conf, stats); + * @see #DFSClient(InetSocketAddress, ClientProtocol, Configuration, org.apache.hadoop.fs.FileSystem.Statistics) + */ + public DFSClient(InetSocketAddress nameNodeAddr, Configuration conf, + FileSystem.Statistics stats) + throws IOException { + this(nameNodeAddr, null, conf, stats); + } + + /** + * Create a new DFSClient connected to the given nameNodeAddr or rpcNamenode. + * Exactly one of nameNodeAddr or rpcNamenode must be null. + */ + DFSClient(InetSocketAddress nameNodeAddr, ClientProtocol rpcNamenode, + Configuration conf, FileSystem.Statistics stats) + throws IOException { + this.conf = conf; + this.stats = stats; + this.socketTimeout = conf.getInt("dfs.socket.timeout", + HdfsConstants.READ_TIMEOUT); + this.datanodeWriteTimeout = conf.getInt("dfs.datanode.socket.write.timeout", + HdfsConstants.WRITE_TIMEOUT); + this.socketFactory = NetUtils.getSocketFactory(conf, ClientProtocol.class); + // dfs.write.packet.size is an internal config variable + this.writePacketSize = conf.getInt("dfs.write.packet.size", 64*1024); + this.maxBlockAcquireFailures = + conf.getInt("dfs.client.max.block.acquire.failures", + MAX_BLOCK_ACQUIRE_FAILURES); + + // The hdfsTimeout is currently the same as the ipc timeout + this.hdfsTimeout = Client.getTimeout(conf); + + try { + this.ugi = UnixUserGroupInformation.login(conf, true); + } catch (LoginException e) { + throw (IOException)(new IOException().initCause(e)); + } + + String taskId = conf.get("mapred.task.id"); + if (taskId != null) { + this.clientName = "DFSClient_" + taskId; + } else { + this.clientName = "DFSClient_" + r.nextInt(); + } + defaultBlockSize = conf.getLong("dfs.block.size", DEFAULT_BLOCK_SIZE); + defaultReplication = (short) conf.getInt("dfs.replication", 3); + + if (nameNodeAddr != null && rpcNamenode == null) { + this.rpcNamenode = createRPCNamenodeIfCompatible(nameNodeAddr, conf, ugi); + this.namenode = createNamenode(this.rpcNamenode); + } else if (nameNodeAddr == null && rpcNamenode != null) { + //This case is used for testing. + this.namenode = this.rpcNamenode = rpcNamenode; + } else { + throw new IllegalArgumentException( + "Expecting exactly one of nameNodeAddr and rpcNamenode being null: " + + "nameNodeAddr=" + nameNodeAddr + ", rpcNamenode=" + rpcNamenode); + } + } + + private void checkOpen() throws IOException { + if (!clientRunning) { + IOException result = new IOException("Filesystem closed"); + throw result; + } + } + + // + // If the call stack does not have FsShell.delete(), then invoke + // FsShell.delete. This ensures that files always goes thru Trash. + // Returns 0 if the file is successfully deleted by this method, + // Returns -1 if the file is not being deleted by this method + // Returns 1 if this method tried deleting the file but failed. + // + private int deleteUsingTrash(String file, boolean recursive) throws IOException { + + // The configuration parameter specifies the class name to match. + // Typically, this is set to org.apache.hadoop.fs.FsShell.delete + String className = conf.get("fs.shell.delete.classname"); + if (className == null) { + className = "org.apache.hadoop.fs.FsShell.delete"; + } + + // find the stack trace of this thread + StringWriter str = new StringWriter(); + PrintWriter pr = new PrintWriter(str); + try { + throw new Throwable(); + } catch (Throwable t) { + t.printStackTrace(pr); + } + + // if the specified class does not appear in the calling thread's + // stack trace, and if this file is not in "/tmp", + // then invoke FsShell.delete() + if (str.toString().indexOf(className) == -1 && + file.indexOf("/tmp") != 0) { + String errmsg = "File " + file + " is beng deleted only through Trash " + + className + + " because all deletes must go through Trash."; + LOG.warn(errmsg); + FsShell fh = new FsShell(conf); + Path p = new Path(file); + fh.init(); + try { + fh.delete(p, p.getFileSystem(conf), recursive, false); + return 0; // successful deletion + } catch (RemoteException rex) { + throw rex.unwrapRemoteException(AccessControlException.class); + } catch (AccessControlException ace) { + throw ace; + } catch (IOException e) { + return 1; // deletion unsuccessful + } + } + return -1; // deletion not attempted + } + + /** + * Close the file system, abandoning all of the leases and files being + * created and close connections to the namenode. + */ + public synchronized void close() throws IOException { + if(clientRunning) { + leasechecker.close(); + clientRunning = false; + try { + leasechecker.interruptAndJoin(); + } catch (InterruptedException ie) { + } + + // close connections to the namenode + RPC.stopProxy(rpcNamenode); + } + } + + /** + * Get the default block size for this cluster + * @return the default block size in bytes + */ + public long getDefaultBlockSize() { + return defaultBlockSize; + } + + public long getBlockSize(String f) throws IOException { + try { + return namenode.getPreferredBlockSize(f); + } catch (IOException ie) { + LOG.warn("Problem getting block size: " + + StringUtils.stringifyException(ie)); + throw ie; + } + } + + /** + * Report corrupt blocks that were discovered by the client. + */ + public void reportBadBlocks(LocatedBlock[] blocks) throws IOException { + namenode.reportBadBlocks(blocks); + } + + public short getDefaultReplication() { + return defaultReplication; + } + + /** + * @deprecated Use getBlockLocations instead + * + * Get hints about the location of the indicated block(s). + * + * getHints() returns a list of hostnames that store data for + * a specific file region. It returns a set of hostnames for + * every block within the indicated region. + * + * This function is very useful when writing code that considers + * data-placement when performing operations. For example, the + * MapReduce system tries to schedule tasks on the same machines + * as the data-block the task processes. + */ + @Deprecated + public String[][] getHints(String src, long start, long length) + throws IOException { + BlockLocation[] blkLocations = getBlockLocations(src, start, length); + if ((blkLocations == null) || (blkLocations.length == 0)) { + return new String[0][]; + } + int blkCount = blkLocations.length; + String[][]hints = new String[blkCount][]; + for (int i=0; i < blkCount ; i++) { + String[] hosts = blkLocations[i].getHosts(); + hints[i] = new String[hosts.length]; + hints[i] = hosts; + } + return hints; + } + + private static LocatedBlocks callGetBlockLocations(ClientProtocol namenode, + String src, long start, long length) throws IOException { + try { + return namenode.getBlockLocations(src, start, length); + } catch(RemoteException re) { + throw re.unwrapRemoteException(AccessControlException.class, + FileNotFoundException.class); + } + } + + /** + * Get block location info about file + * + * getBlockLocations() returns a list of hostnames that store + * data for a specific file region. It returns a set of hostnames + * for every block within the indicated region. + * + * This function is very useful when writing code that considers + * data-placement when performing operations. For example, the + * MapReduce system tries to schedule tasks on the same machines + * as the data-block the task processes. + */ + public BlockLocation[] getBlockLocations(String src, long start, + long length) throws IOException { + LocatedBlocks blocks = callGetBlockLocations(namenode, src, start, length); + return DFSUtil.locatedBlocks2Locations(blocks); + } + + public DFSInputStream open(String src) throws IOException { + return open(src, conf.getInt("io.file.buffer.size", 4096), true, null); + } + + /** + * Create an input stream that obtains a nodelist from the + * namenode, and then reads from all the right places. Creates + * inner subclass of InputStream that does the right out-of-band + * work. + */ + DFSInputStream open(String src, int buffersize, boolean verifyChecksum, + FileSystem.Statistics stats + ) throws IOException { + checkOpen(); + // Get block info from namenode + return new DFSInputStream(src, buffersize, verifyChecksum); + } + + /** + * Create a new dfs file and return an output stream for writing into it. + * + * @param src stream name + * @param overwrite do not check for file existence if true + * @return output stream + * @throws IOException + */ + public OutputStream create(String src, + boolean overwrite + ) throws IOException { + return create(src, overwrite, defaultReplication, defaultBlockSize, null); + } + + /** + * Create a new dfs file and return an output stream for writing into it + * with write-progress reporting. + * + * @param src stream name + * @param overwrite do not check for file existence if true + * @return output stream + * @throws IOException + */ + public OutputStream create(String src, + boolean overwrite, + Progressable progress + ) throws IOException { + return create(src, overwrite, defaultReplication, defaultBlockSize, null); + } + + /** + * Create a new dfs file with the specified block replication + * and return an output stream for writing into the file. + * + * @param src stream name + * @param overwrite do not check for file existence if true + * @param replication block replication + * @return output stream + * @throws IOException + */ + public OutputStream create(String src, + boolean overwrite, + short replication, + long blockSize + ) throws IOException { + return create(src, overwrite, replication, blockSize, null); + } + + + /** + * Create a new dfs file with the specified block replication + * with write-progress reporting and return an output stream for writing + * into the file. + * + * @param src stream name + * @param overwrite do not check for file existence if true + * @param replication block replication + * @return output stream + * @throws IOException + */ + public OutputStream create(String src, + boolean overwrite, + short replication, + long blockSize, + Progressable progress + ) throws IOException { + return create(src, overwrite, replication, blockSize, progress, + conf.getInt("io.file.buffer.size", 4096)); + } + /** + * Call + * {@link #create(String,FsPermission,boolean,short,long,Progressable,int)} + * with default permission. + * @see FsPermission#getDefault() + */ + public OutputStream create(String src, + boolean overwrite, + short replication, + long blockSize, + Progressable progress, + int buffersize + ) throws IOException { + return create(src, FsPermission.getDefault(), + overwrite, replication, blockSize, progress, buffersize); + } + /** + * Create a new dfs file with the specified block replication + * with write-progress reporting and return an output stream for writing + * into the file. + * + * @param src stream name + * @param permission The permission of the directory being created. + * If permission == null, use {@link FsPermission#getDefault()}. + * @param overwrite do not check for file existence if true + * @param replication block replication + * @return output stream + * @throws IOException + * @see ClientProtocol#create(String, FsPermission, String, boolean, short, long) + */ + public OutputStream create(String src, + FsPermission permission, + boolean overwrite, + short replication, + long blockSize, + Progressable progress, + int buffersize + ) throws IOException { + checkOpen(); + if (permission == null) { + permission = FsPermission.getDefault(); + } + FsPermission masked = permission.applyUMask(FsPermission.getUMask(conf)); + LOG.debug(src + ": masked=" + masked); + OutputStream result = new DFSOutputStream(src, masked, + overwrite, replication, blockSize, progress, buffersize, + conf.getInt("io.bytes.per.checksum", 512)); + leasechecker.put(src, result); + return result; + } + + /** + * Append to an existing HDFS file. + * + * @param src file name + * @param buffersize buffer size + * @param progress for reporting write-progress + * @return an output stream for writing into the file + * @throws IOException + * @see ClientProtocol#append(String, String) + */ + OutputStream append(String src, int buffersize, Progressable progress + ) throws IOException { + checkOpen(); + FileStatus stat = null; + LocatedBlock lastBlock = null; + try { + stat = getFileInfo(src); + lastBlock = namenode.append(src, clientName); + } catch(RemoteException re) { + throw re.unwrapRemoteException(FileNotFoundException.class, + AccessControlException.class, + NSQuotaExceededException.class, + DSQuotaExceededException.class); + } + OutputStream result = new DFSOutputStream(src, buffersize, progress, + lastBlock, stat, conf.getInt("io.bytes.per.checksum", 512)); + leasechecker.put(src, result); + return result; + } + + /** + * Set replication for an existing file. + * + * @see ClientProtocol#setReplication(String, short) + * @param replication + * @throws IOException + * @return true is successful or false if file does not exist + */ + public boolean setReplication(String src, + short replication + ) throws IOException { + try { + return namenode.setReplication(src, replication); + } catch(RemoteException re) { + throw re.unwrapRemoteException(AccessControlException.class, + NSQuotaExceededException.class, + DSQuotaExceededException.class); + } + } + + /* + * Move blocks from src to trg and delete src + * See {@link ClientProtocol#concat(String, String [])}. + */ + public void concat(String trg, String [] srcs) throws IOException { + checkOpen(); + try { + namenode.concat(trg, srcs); + } catch(RemoteException re) { + throw re.unwrapRemoteException(AccessControlException.class, + NSQuotaExceededException.class, + DSQuotaExceededException.class); + } + } + + /** + * Rename file or directory. + * See {@link ClientProtocol#rename(String, String)}. + */ + public boolean rename(String src, String dst) throws IOException { + checkOpen(); + try { + return namenode.rename(src, dst); + } catch(RemoteException re) { + throw re.unwrapRemoteException(AccessControlException.class, + NSQuotaExceededException.class, + DSQuotaExceededException.class); + } + } + + /** + * Delete file or directory. + * See {@link ClientProtocol#delete(String)}. + */ + @Deprecated + public boolean delete(String src) throws IOException { + checkOpen(); + int val = deleteUsingTrash(src, true); // allow deletion only from FsShell + if (val == 0) { + return true; + } else if (val == 1) { + return false; + } + return namenode.delete(src, true); + } + + /** + * delete file or directory. + * delete contents of the directory if non empty and recursive + * set to true + */ + public boolean delete(String src, boolean recursive) throws IOException { + checkOpen(); + int val = deleteUsingTrash(src, recursive); // allow deletion only from FsShell + if (val == 0) { + return true; + } else if (val == 1) { + return false; + } + try { + return namenode.delete(src, recursive); + } catch(RemoteException re) { + throw re.unwrapRemoteException(AccessControlException.class); + } + } + + /** Implemented using getFileInfo(src) + */ + public boolean exists(String src) throws IOException { + checkOpen(); + return getFileInfo(src) != null; + } + + /** @deprecated Use getFileStatus() instead */ + @Deprecated + public boolean isDirectory(String src) throws IOException { + FileStatus fs = getFileInfo(src); + if (fs != null) + return fs.isDir(); + else + throw new FileNotFoundException("File does not exist: " + src); + } + + /** + * Convert an HdfsFileStatus to a FileStatus + * @param stat an HdfsFileStatus + * @param src parent path in string representation + * @return a FileStatus object + */ + private static FileStatus toFileStatus(HdfsFileStatus stat, String src) { + if (stat == null) { + return null; + } + return new FileStatus(stat.getLen(), stat.isDir(), stat.getReplication(), + stat.getBlockSize(), stat.getModificationTime(), + stat.getAccessTime(), + stat.getPermission(), stat.getOwner(), stat.getGroup(), + stat.getFullPath(new Path(src))); // full path + } + + /** + * Convert an HdfsFileStatus and its block locations to a LocatedFileStatus + * @param stat an HdfsFileStatus + * @param locs the file's block locations + * @param src parent path in string representation + * @return a FileStatus object + */ + private static LocatedFileStatus toLocatedFileStatus( + HdfsFileStatus stat, LocatedBlocks locs, String src) { + if (stat == null) { + return null; + } + return new LocatedFileStatus(stat.getLen(), + stat.isDir(), stat.getReplication(), + stat.getBlockSize(), stat.getModificationTime(), + stat.getAccessTime(), + stat.getPermission(), stat.getOwner(), stat.getGroup(), + stat.getFullPath(new Path(src)), // full path + DFSUtil.locatedBlocks2Locations(locs)); + } + + /** + * Get a listing of the indicated directory + */ + public FileStatus[] listPaths(String src) throws IOException { + checkOpen(); + try { + if (namenodeVersion >= ClientProtocol.ITERATIVE_LISTING_VERSION) { + return iterativeListing(src); + } else if (namenodeVersion >= ClientProtocol.OPTIMIZE_FILE_STATUS_VERSION) { + HdfsFileStatus[] hdfsStats = namenode.getHdfsListing(src); + if (hdfsStats == null) { + return null; + } + FileStatus[] stats = new FileStatus[hdfsStats.length]; + for (int i=0; i listPathWithLocation( + final String src) throws IOException { + checkOpen(); + try { + if (namenodeVersion >= ClientProtocol.BULK_BLOCK_LOCATIONS_VERSION) { + return iteratorListing(src); + } else { + return arrayListing(src); + } + } catch(RemoteException re) { + throw re.unwrapRemoteException(AccessControlException.class); + } + } + + /** create the iterator from an array of file status */ + private RemoteIterator arrayListing(final String src) + throws IOException { + return new RemoteIterator() { + private FileStatus[] stats; + private int i = 0; + + { //initializer + stats = listPaths(src); + if (stats == null) { + throw new FileNotFoundException("File " + src + " does not exist."); + } + } + + @Override + public boolean hasNext() throws IOException { + return i iteratorListing(final String src) + throws IOException { + return new RemoteIterator() { + private LocatedDirectoryListing thisListing; + private int i; + + { // initializer + // fetch the first batch of entries in the directory + + thisListing = namenode.getLocatedPartialListing( + src, HdfsFileStatus.EMPTY_NAME); + if (thisListing == null) { // the directory does not exist + throw new FileNotFoundException("File " + src + " does not exist."); + } + } + + @Override + public boolean hasNext() throws IOException { + if (i>=thisListing.getPartialListing().length + && thisListing.hasMore()) { + // current listing is exhausted & fetch a new listing + thisListing = namenode.getLocatedPartialListing( + src, thisListing.getLastName()); + if (thisListing == null) { + throw new FileNotFoundException("File " + src + " does not exist."); + } + i = 0; + } + return i < thisListing.getPartialListing().length; + } + + @Override + public LocatedFileStatus next() throws IOException { + if (!hasNext()) { + throw new java.util.NoSuchElementException("No more entry in " + src); + } + return toLocatedFileStatus( + thisListing.getPartialListing()[i], + thisListing.getBlockLocations()[i++], src); + } + }; + + } + /** + * List the given path iteratively if the directory is large + * + * @param src a path + * @return a listing of the path + * @throws IOException if any IO error is occurred + */ + private FileStatus[] iterativeListing(String src) throws IOException { + // fetch the first batch of entries in the directory + DirectoryListing thisListing = namenode.getPartialListing( + src, HdfsFileStatus.EMPTY_NAME); + + if (thisListing == null) { // the directory does not exist + return null; + } + HdfsFileStatus[] partialListing = thisListing.getPartialListing(); + if (!thisListing.hasMore()) { // got all entries of the directory + FileStatus[] stats = new FileStatus[partialListing.length]; + for (int i = 0; i < partialListing.length; i++) { + stats[i] = toFileStatus(partialListing[i], src); + } + return stats; + } + + // The directory size is too big that it needs to fetch more + // estimate the total number of entries in the directory + int totalNumEntries = + partialListing.length + thisListing.getRemainingEntries(); + ArrayList listing = + new ArrayList(totalNumEntries); + // add the first batch of entries to the array list + for (HdfsFileStatus fileStatus : partialListing) { + listing.add(toFileStatus(fileStatus, src)); + } + + // now fetch more entries + do { + thisListing = namenode.getPartialListing(src, thisListing.getLastName()); + + if (thisListing == null) { + return null; // the directory is deleted + } + + partialListing = thisListing.getPartialListing(); + for (HdfsFileStatus fileStatus : partialListing) { + listing.add(toFileStatus(fileStatus, src)); + } + } while (thisListing.hasMore()); + + return listing.toArray(new FileStatus[listing.size()]); + } + + public FileStatus getFileInfo(String src) throws IOException { + checkOpen(); + try { + if (namenodeVersion >= ClientProtocol.OPTIMIZE_FILE_STATUS_VERSION) { + return toFileStatus(namenode.getHdfsFileInfo(src), src); + } else { + return namenode.getFileInfo(src); + } + } catch(RemoteException re) { + throw re.unwrapRemoteException(AccessControlException.class); + } + } + + /** + * Get the checksum of a file. + * @param src The file path + * @return The checksum + * @see DistributedFileSystem#getFileChecksum(Path) + */ + MD5MD5CRC32FileChecksum getFileChecksum(String src) throws IOException { + checkOpen(); + return getFileChecksum(src, namenode, socketFactory, socketTimeout); + } + + /** + * Get the checksum of a file. + * @param src The file path + * @return The checksum + */ + public static MD5MD5CRC32FileChecksum getFileChecksum(String src, + ClientProtocol namenode, SocketFactory socketFactory, int socketTimeout + ) throws IOException { + //get all block locations + final List locatedblocks + = callGetBlockLocations(namenode, src, 0, Long.MAX_VALUE).getLocatedBlocks(); + final DataOutputBuffer md5out = new DataOutputBuffer(); + int bytesPerCRC = 0; + long crcPerBlock = 0; + + //get block checksum for each block + for(int i = 0; i < locatedblocks.size(); i++) { + LocatedBlock lb = locatedblocks.get(i); + final Block block = lb.getBlock(); + final DatanodeInfo[] datanodes = lb.getLocations(); + + //try each datanode location of the block + final int timeout = 3000 * datanodes.length + socketTimeout; + boolean done = false; + for(int j = 0; !done && j < datanodes.length; j++) { + //connect to a datanode + final Socket sock = socketFactory.createSocket(); + NetUtils.connect(sock, + NetUtils.createSocketAddr(datanodes[j].getName()), + timeout); + sock.setSoTimeout(timeout); + + DataOutputStream out = new DataOutputStream( + new BufferedOutputStream(NetUtils.getOutputStream(sock), + DataNode.SMALL_BUFFER_SIZE)); + DataInputStream in = new DataInputStream(NetUtils.getInputStream(sock)); + + // get block MD5 + try { + if (LOG.isDebugEnabled()) { + LOG.debug("write to " + datanodes[j].getName() + ": " + + DataTransferProtocol.OP_BLOCK_CHECKSUM + + ", block=" + block); + } + out.writeShort(DataTransferProtocol.DATA_TRANSFER_VERSION); + out.write(DataTransferProtocol.OP_BLOCK_CHECKSUM); + out.writeLong(block.getBlockId()); + out.writeLong(block.getGenerationStamp()); + out.flush(); + + final short reply = in.readShort(); + if (reply != DataTransferProtocol.OP_STATUS_SUCCESS) { + throw new IOException("Bad response " + reply + " for block " + + block + " from datanode " + datanodes[j].getName()); + } + + //read byte-per-checksum + final int bpc = in.readInt(); + if (i == 0) { //first block + bytesPerCRC = bpc; + } + else if (bpc != bytesPerCRC) { + throw new IOException("Byte-per-checksum not matched: bpc=" + bpc + + " but bytesPerCRC=" + bytesPerCRC); + } + + //read crc-per-block + final long cpb = in.readLong(); + if (locatedblocks.size() > 1 && i == 0) { + crcPerBlock = cpb; + } + + //read md5 + final MD5Hash md5 = MD5Hash.read(in); + md5.write(md5out); + + done = true; + + if (LOG.isDebugEnabled()) { + if (i == 0) { + LOG.debug("set bytesPerCRC=" + bytesPerCRC + + ", crcPerBlock=" + crcPerBlock); + } + LOG.debug("got reply from " + datanodes[j].getName() + + ": md5=" + md5); + } + } catch (IOException ie) { + LOG.warn("src=" + src + ", datanodes[" + j + "].getName()=" + + datanodes[j].getName(), ie); + } finally { + IOUtils.closeStream(in); + IOUtils.closeStream(out); + IOUtils.closeSocket(sock); + } + } + + if (!done) { + throw new IOException("Fail to get block MD5 for " + block); + } + } + + //compute file MD5 + final MD5Hash fileMD5 = MD5Hash.digest(md5out.getData()); + return new MD5MD5CRC32FileChecksum(bytesPerCRC, crcPerBlock, fileMD5); + } + + /** + * Set permissions to a file or directory. + * @param src path name. + * @param permission + * @throws FileNotFoundException is file does not exist. + */ + public void setPermission(String src, FsPermission permission + ) throws IOException { + checkOpen(); + try { + namenode.setPermission(src, permission); + } catch(RemoteException re) { + throw re.unwrapRemoteException(AccessControlException.class, + FileNotFoundException.class); + } + } + + /** + * Set file or directory owner. + * @param src path name. + * @param username user id. + * @param groupname user group. + * @throws FileNotFoundException is file does not exist. + */ + public void setOwner(String src, String username, String groupname + ) throws IOException { + checkOpen(); + try { + namenode.setOwner(src, username, groupname); + } catch(RemoteException re) { + throw re.unwrapRemoteException(AccessControlException.class, + FileNotFoundException.class); + } + } + + public DiskStatus getDiskStatus() throws IOException { + long rawNums[] = namenode.getStats(); + return new DiskStatus(rawNums[0], rawNums[1], rawNums[2]); + } + /** + */ + public long totalRawCapacity() throws IOException { + long rawNums[] = namenode.getStats(); + return rawNums[0]; + } + + /** + */ + public long totalRawUsed() throws IOException { + long rawNums[] = namenode.getStats(); + return rawNums[1]; + } + + /** + * Returns count of blocks with no good replicas left. Normally should be + * zero. + * @throws IOException + */ + public long getMissingBlocksCount() throws IOException { + return namenode.getStats()[ClientProtocol.GET_STATS_MISSING_BLOCKS_IDX]; + } + + /** + * Returns count of blocks with one of more replica missing. + * @throws IOException + */ + public long getUnderReplicatedBlocksCount() throws IOException { + return namenode.getStats()[ClientProtocol.GET_STATS_UNDER_REPLICATED_IDX]; + } + + /** + * Returns count of blocks with at least one replica marked corrupt. + * @throws IOException + */ + public long getCorruptBlocksCount() throws IOException { + return namenode.getStats()[ClientProtocol.GET_STATS_CORRUPT_BLOCKS_IDX]; + } + + /** + * @return a list in which each entry describes a corrupt file/block + * @throws AccessControlException + * @throws IOException + */ + public CorruptFileBlocks listCorruptFileBlocks(String path, + String cookie) + throws IOException { + if (namenodeVersion < ClientProtocol.LIST_CORRUPT_FILEBLOCKS_VERSION) { + LOG.info("NameNode version is " + namenodeVersion + + " Using older version of getCorruptFiles."); + if (cookie != null ) { + return new CorruptFileBlocks(new String[0], ""); + } + ArrayList str = new ArrayList(); + for (FileStatus stat : namenode.getCorruptFiles()) { + String filename = stat.getPath().toUri().getPath(); + if (filename.startsWith(path)) { + str.add(filename); + } + } + return new CorruptFileBlocks(str.toArray(new String[str.size()]), ""); + } + return namenode.listCorruptFileBlocks(path, cookie); + } + + public DatanodeInfo[] datanodeReport(DatanodeReportType type) + throws IOException { + return namenode.getDatanodeReport(type); + } + + /** + * Enter, leave or get safe mode. + * See {@link ClientProtocol#setSafeMode(FSConstants.SafeModeAction)} + * for more details. + * + * @see ClientProtocol#setSafeMode(FSConstants.SafeModeAction) + */ + public boolean setSafeMode(SafeModeAction action) throws IOException { + return namenode.setSafeMode(action); + } + + /** + * Save namespace image. + * See {@link ClientProtocol#saveNamespace()} + * for more details. + * + * @see ClientProtocol#saveNamespace() + */ + void saveNamespace() throws AccessControlException, IOException { + try { + namenode.saveNamespace(); + } catch(RemoteException re) { + throw re.unwrapRemoteException(AccessControlException.class); + } + } + + /** + * Refresh the hosts and exclude files. (Rereads them.) + * See {@link ClientProtocol#refreshNodes()} + * for more details. + * + * @see ClientProtocol#refreshNodes() + */ + public void refreshNodes() throws IOException { + namenode.refreshNodes(); + } + + /** + * Dumps DFS data structures into specified file. + * See {@link ClientProtocol#metaSave(String)} + * for more details. + * + * @see ClientProtocol#metaSave(String) + */ + public void metaSave(String pathname) throws IOException { + namenode.metaSave(pathname); + } + + /** + * @see ClientProtocol#finalizeUpgrade() + */ + public void finalizeUpgrade() throws IOException { + namenode.finalizeUpgrade(); + } + + /** + * @see ClientProtocol#distributedUpgradeProgress(FSConstants.UpgradeAction) + */ + public UpgradeStatusReport distributedUpgradeProgress(UpgradeAction action + ) throws IOException { + return namenode.distributedUpgradeProgress(action); + } + + /** + */ + public boolean mkdirs(String src) throws IOException { + return mkdirs(src, null); + } + + /** + * Create a directory (or hierarchy of directories) with the given + * name and permission. + * + * @param src The path of the directory being created + * @param permission The permission of the directory being created. + * If permission == null, use {@link FsPermission#getDefault()}. + * @return True if the operation success. + * @see ClientProtocol#mkdirs(String, FsPermission) + */ + public boolean mkdirs(String src, FsPermission permission)throws IOException{ + checkOpen(); + if (permission == null) { + permission = FsPermission.getDefault(); + } + FsPermission masked = permission.applyUMask(FsPermission.getUMask(conf)); + LOG.debug(src + ": masked=" + masked); + try { + return namenode.mkdirs(src, masked); + } catch(RemoteException re) { + throw re.unwrapRemoteException(AccessControlException.class, + NSQuotaExceededException.class, + DSQuotaExceededException.class); + } + } + + ContentSummary getContentSummary(String src) throws IOException { + try { + return namenode.getContentSummary(src); + } catch(RemoteException re) { + throw re.unwrapRemoteException(AccessControlException.class, + FileNotFoundException.class); + } + } + + /** + * Sets or resets quotas for a directory. + * @see org.apache.hadoop.hdfs.protocol.ClientProtocol#setQuota(String, long, long) + */ + void setQuota(String src, long namespaceQuota, long diskspaceQuota) + throws IOException { + // sanity check + if ((namespaceQuota <= 0 && namespaceQuota != FSConstants.QUOTA_DONT_SET && + namespaceQuota != FSConstants.QUOTA_RESET) || + (diskspaceQuota <= 0 && diskspaceQuota != FSConstants.QUOTA_DONT_SET && + diskspaceQuota != FSConstants.QUOTA_RESET)) { + throw new IllegalArgumentException("Invalid values for quota : " + + namespaceQuota + " and " + + diskspaceQuota); + + } + + try { + namenode.setQuota(src, namespaceQuota, diskspaceQuota); + } catch(RemoteException re) { + throw re.unwrapRemoteException(AccessControlException.class, + FileNotFoundException.class, + NSQuotaExceededException.class, + DSQuotaExceededException.class); + } + } + + /** + * set the modification and access time of a file + * @throws FileNotFoundException if the path is not a file + */ + public void setTimes(String src, long mtime, long atime) throws IOException { + checkOpen(); + try { + namenode.setTimes(src, mtime, atime); + } catch(RemoteException re) { + throw re.unwrapRemoteException(AccessControlException.class, + FileNotFoundException.class); + } + } + + /** + * Pick the best node from which to stream the data. + * Entries in nodes are already in the priority order + */ + private DatanodeInfo bestNode(DatanodeInfo nodes[], + AbstractMap deadNodes) + throws IOException { + if (nodes != null) { + for (int i = 0; i < nodes.length; i++) { + if (!deadNodes.containsKey(nodes[i])) { + return nodes[i]; + } + } + } + throw new IOException("No live nodes contain current block"); + } + + boolean isLeaseCheckerStarted() { + return leasechecker.daemon != null; + } + + /** Lease management*/ + class LeaseChecker implements Runnable { + /** A map from src -> DFSOutputStream of files that are currently being + * written by this client. + */ + private final SortedMap pendingCreates + = new TreeMap(); + + private Daemon daemon = null; + + synchronized void put(String src, OutputStream out) { + if (clientRunning) { + if (daemon == null) { + daemon = new Daemon(this); + daemon.start(); + } + pendingCreates.put(src, out); + } + } + + synchronized void remove(String src) { + pendingCreates.remove(src); + } + + void interruptAndJoin() throws InterruptedException { + Daemon daemonCopy = null; + synchronized (this) { + if (daemon != null) { + daemon.interrupt(); + daemonCopy = daemon; + } + } + + if (daemonCopy != null) { + LOG.debug("Wait for lease checker to terminate"); + daemonCopy.join(); + } + } + + synchronized void close() { + while (!pendingCreates.isEmpty()) { + String src = pendingCreates.firstKey(); + OutputStream out = pendingCreates.remove(src); + if (out != null) { + try { + out.close(); + } catch (IOException ie) { + LOG.error("Exception closing file " + src+ " : " + ie, ie); + } + } + } + } + + /** + * Abort all open files. Release resources held. Ignore all errors. + */ + synchronized void abort() { + clientRunning = false; + while (!pendingCreates.isEmpty()) { + String src = pendingCreates.firstKey(); + DFSOutputStream out = (DFSOutputStream)pendingCreates.remove(src); + if (out != null) { + try { + out.abort(); + } catch (IOException ie) { + LOG.error("Exception aborting file " + src+ ": ", ie); + } + } + } + RPC.stopProxy(rpcNamenode); // close connections to the namenode + } + + private void renew() throws IOException { + synchronized(this) { + if (pendingCreates.isEmpty()) { + return; + } + } + namenode.renewLease(clientName); + } + + /** + * Periodically check in with the namenode and renew all the leases + * when the lease period is half over. + */ + public void run() { + long lastRenewed = 0; + long hardLeaseLimit = conf.getLong( + FSConstants.DFS_HARD_LEASE_KEY, LEASE_HARDLIMIT_PERIOD); + long softLeaseLimit = conf.getLong( + FSConstants.DFS_SOFT_LEASE_KEY, LEASE_SOFTLIMIT_PERIOD); + long renewal = Math.min(hardLeaseLimit, softLeaseLimit) / 2; + if (hdfsTimeout > 0) { + renewal = Math.min(renewal, hdfsTimeout/2); + } + while (clientRunning && !Thread.interrupted()) { + if (System.currentTimeMillis() - lastRenewed > renewal) { + try { + renew(); + lastRenewed = System.currentTimeMillis(); + } catch (SocketTimeoutException ie) { + LOG.warn("Problem renewing lease for " + clientName + + " for a period of " + (renewal/1000) + + " seconds. Shutting down HDFS client...", ie); + abort(); + break; + } catch (IOException ie) { + LOG.warn("Problem renewing lease for " + clientName + + " for a period of " + (renewal/1000) + + " seconds. Will retry shortly...", ie); + } + } + + try { + Thread.sleep(1000); + } catch (InterruptedException ie) { + if (LOG.isDebugEnabled()) { + LOG.debug(this + " is interrupted.", ie); + } + return; + } + } + } + + /** {@inheritDoc} */ + public String toString() { + String s = getClass().getSimpleName(); + if (LOG.isTraceEnabled()) { + return s + "@" + DFSClient.this + ": " + + StringUtils.stringifyException(new Throwable("for testing")); + } + return s; + } + } + + /** Utility class to encapsulate data node info and its ip address. */ + private static class DNAddrPair { + DatanodeInfo info; + InetSocketAddress addr; + DNAddrPair(DatanodeInfo info, InetSocketAddress addr) { + this.info = info; + this.addr = addr; + } + } + + /** This is a wrapper around connection to datadone + * and understands checksum, offset etc + */ + public static class BlockReader extends FSInputChecker { + + private Socket dnSock; //for now just sending checksumOk. + private DataInputStream in; + private DataChecksum checksum; + private long lastChunkOffset = -1; + private long lastChunkLen = -1; + private long lastSeqNo = -1; + + private long startOffset; + private long firstChunkOffset; + private int bytesPerChecksum; + private int checksumSize; + private boolean gotEOS = false; + + byte[] skipBuf = null; + ByteBuffer checksumBytes = null; + int dataLeft = 0; + boolean isLastPacket = false; + + /* FSInputChecker interface */ + + /* same interface as inputStream java.io.InputStream#read() + * used by DFSInputStream#read() + * This violates one rule when there is a checksum error: + * "Read should not modify user buffer before successful read" + * because it first reads the data to user buffer and then checks + * the checksum. + */ + @Override + public synchronized int read(byte[] buf, int off, int len) + throws IOException { + + //for the first read, skip the extra bytes at the front. + if (lastChunkLen < 0 && startOffset > firstChunkOffset && len > 0) { + // Skip these bytes. But don't call this.skip()! + int toSkip = (int)(startOffset - firstChunkOffset); + if ( skipBuf == null ) { + skipBuf = new byte[bytesPerChecksum]; + } + if ( super.read(skipBuf, 0, toSkip) != toSkip ) { + // should never happen + throw new IOException("Could not skip required number of bytes"); + } + } + + boolean eosBefore = gotEOS; + int nRead = super.read(buf, off, len); + + // if gotEOS was set in the previous read and checksum is enabled : + if (gotEOS && !eosBefore && nRead >= 0 && needChecksum()) { + //checksum is verified and there are no errors. + checksumOk(dnSock); + } + return nRead; + } + + @Override + public synchronized long skip(long n) throws IOException { + /* How can we make sure we don't throw a ChecksumException, at least + * in majority of the cases?. This one throws. */ + if ( skipBuf == null ) { + skipBuf = new byte[bytesPerChecksum]; + } + + long nSkipped = 0; + while ( nSkipped < n ) { + int toSkip = (int)Math.min(n-nSkipped, skipBuf.length); + int ret = read(skipBuf, 0, toSkip); + if ( ret <= 0 ) { + return nSkipped; + } + nSkipped += ret; + } + return nSkipped; + } + + @Override + public int read() throws IOException { + throw new IOException("read() is not expected to be invoked. " + + "Use read(buf, off, len) instead."); + } + + @Override + public boolean seekToNewSource(long targetPos) throws IOException { + /* Checksum errors are handled outside the BlockReader. + * DFSInputStream does not always call 'seekToNewSource'. In the + * case of pread(), it just tries a different replica without seeking. + */ + return false; + } + + @Override + public void seek(long pos) throws IOException { + throw new IOException("Seek() is not supported in BlockInputChecker"); + } + + @Override + protected long getChunkPosition(long pos) { + throw new RuntimeException("getChunkPosition() is not supported, " + + "since seek is not required"); + } + + /** + * Makes sure that checksumBytes has enough capacity + * and limit is set to the number of checksum bytes needed + * to be read. + */ + private void adjustChecksumBytes(int dataLen) { + int requiredSize = + ((dataLen + bytesPerChecksum - 1)/bytesPerChecksum)*checksumSize; + if (checksumBytes == null || requiredSize > checksumBytes.capacity()) { + checksumBytes = ByteBuffer.wrap(new byte[requiredSize]); + } else { + checksumBytes.clear(); + } + checksumBytes.limit(requiredSize); + } + + @Override + protected synchronized int readChunk(long pos, byte[] buf, int offset, + int len, byte[] checksumBuf) + throws IOException { + // Read one chunk. + + if ( gotEOS ) { + if ( startOffset < 0 ) { + //This is mainly for debugging. can be removed. + throw new IOException( "BlockRead: already got EOS or an error" ); + } + startOffset = -1; + return -1; + } + + // Read one DATA_CHUNK. + long chunkOffset = lastChunkOffset; + if ( lastChunkLen > 0 ) { + chunkOffset += lastChunkLen; + } + + if ( (pos + firstChunkOffset) != chunkOffset ) { + throw new IOException("Mismatch in pos : " + pos + " + " + + firstChunkOffset + " != " + chunkOffset); + } + + // Read next packet if the previous packet has been read completely. + if (dataLeft <= 0) { + //Read packet headers. + int packetLen = in.readInt(); + long offsetInBlock = in.readLong(); + long seqno = in.readLong(); + boolean lastPacketInBlock = in.readBoolean(); + + if (LOG.isDebugEnabled()) { + LOG.debug("DFSClient readChunk got seqno " + seqno + + " offsetInBlock " + offsetInBlock + + " lastPacketInBlock " + lastPacketInBlock + + " packetLen " + packetLen); + } + + int dataLen = in.readInt(); + + // Sanity check the lengths + if ( dataLen < 0 || + ( (dataLen % bytesPerChecksum) != 0 && !lastPacketInBlock ) || + (seqno != (lastSeqNo + 1)) ) { + throw new IOException("BlockReader: error in packet header" + + "(chunkOffset : " + chunkOffset + + ", dataLen : " + dataLen + + ", seqno : " + seqno + + " (last: " + lastSeqNo + "))"); + } + + lastSeqNo = seqno; + isLastPacket = lastPacketInBlock; + dataLeft = dataLen; + adjustChecksumBytes(dataLen); + if (dataLen > 0) { + IOUtils.readFully(in, checksumBytes.array(), 0, + checksumBytes.limit()); + } + } + + int chunkLen = Math.min(dataLeft, bytesPerChecksum); + + if ( chunkLen > 0 ) { + // len should be >= chunkLen + IOUtils.readFully(in, buf, offset, chunkLen); + checksumBytes.get(checksumBuf, 0, checksumSize); + } + + dataLeft -= chunkLen; + lastChunkOffset = chunkOffset; + lastChunkLen = chunkLen; + + if ((dataLeft == 0 && isLastPacket) || chunkLen == 0) { + gotEOS = true; + } + if ( chunkLen == 0 ) { + return -1; + } + + return chunkLen; + } + + private BlockReader( String file, long blockId, DataInputStream in, + DataChecksum checksum, boolean verifyChecksum, + long startOffset, long firstChunkOffset, + Socket dnSock ) { + super(new Path("/blk_" + blockId + ":of:" + file)/*too non path-like?*/, + 1, verifyChecksum, + checksum.getChecksumSize() > 0? checksum : null, + checksum.getBytesPerChecksum(), + checksum.getChecksumSize()); + + this.dnSock = dnSock; + this.in = in; + this.checksum = checksum; + this.startOffset = Math.max( startOffset, 0 ); + + this.firstChunkOffset = firstChunkOffset; + lastChunkOffset = firstChunkOffset; + lastChunkLen = -1; + + bytesPerChecksum = this.checksum.getBytesPerChecksum(); + checksumSize = this.checksum.getChecksumSize(); + } + + public static BlockReader newBlockReader(Socket sock, String file, long blockId, + long genStamp, long startOffset, long len, int bufferSize) throws IOException { + return newBlockReader(sock, file, blockId, genStamp, startOffset, len, bufferSize, + true); + } + + /** Java Doc required */ + public static BlockReader newBlockReader( Socket sock, String file, long blockId, + long genStamp, + long startOffset, long len, + int bufferSize, boolean verifyChecksum) + throws IOException { + return newBlockReader(sock, file, blockId, genStamp, startOffset, + len, bufferSize, verifyChecksum, ""); + } + + public static BlockReader newBlockReader( Socket sock, String file, + long blockId, + long genStamp, + long startOffset, long len, + int bufferSize, boolean verifyChecksum, + String clientName) + throws IOException { + // in and out will be closed when sock is closed (by the caller) + DataOutputStream out = new DataOutputStream( + new BufferedOutputStream(NetUtils.getOutputStream(sock,HdfsConstants.WRITE_TIMEOUT))); + + //write the header. + out.writeShort( DataTransferProtocol.DATA_TRANSFER_VERSION ); + out.write( DataTransferProtocol.OP_READ_BLOCK ); + out.writeLong( blockId ); + out.writeLong( genStamp ); + out.writeLong( startOffset ); + out.writeLong( len ); + Text.writeString(out, clientName); + out.flush(); + + // + // Get bytes in block, set streams + // + + DataInputStream in = new DataInputStream( + new BufferedInputStream(NetUtils.getInputStream(sock), + bufferSize)); + + if ( in.readShort() != DataTransferProtocol.OP_STATUS_SUCCESS ) { + throw new IOException("Got error in response to OP_READ_BLOCK " + + "for file " + file + + " for block " + blockId); + } + DataChecksum checksum = DataChecksum.newDataChecksum( in ); + //Warning when we get CHECKSUM_NULL? + + // Read the first chunk offset. + long firstChunkOffset = in.readLong(); + + if ( firstChunkOffset < 0 || firstChunkOffset > startOffset || + firstChunkOffset >= (startOffset + checksum.getBytesPerChecksum())) { + throw new IOException("BlockReader: error in first chunk offset (" + + firstChunkOffset + ") startOffset is " + + startOffset + " for file " + file); + } + + return new BlockReader( file, blockId, in, checksum, verifyChecksum, + startOffset, firstChunkOffset, sock ); + } + + @Override + public synchronized void close() throws IOException { + startOffset = -1; + checksum = null; + // in will be closed when its Socket is closed. + } + + /** kind of like readFully(). Only reads as much as possible. + * And allows use of protected readFully(). + */ + public int readAll(byte[] buf, int offset, int len) throws IOException { + return readFully(this, buf, offset, len); + } + + /* When the reader reaches end of a block and there are no checksum + * errors, we send OP_STATUS_CHECKSUM_OK to datanode to inform that + * checksum was verified and there was no error. + */ + private void checksumOk(Socket sock) { + try { + OutputStream out = NetUtils.getOutputStream(sock, HdfsConstants.WRITE_TIMEOUT); + byte buf[] = { (DataTransferProtocol.OP_STATUS_CHECKSUM_OK >>> 8) & 0xff, + (DataTransferProtocol.OP_STATUS_CHECKSUM_OK) & 0xff }; + out.write(buf); + out.flush(); + } catch (IOException e) { + // its ok not to be able to send this. + LOG.debug("Could not write to datanode " + sock.getInetAddress() + + ": " + e.getMessage()); + } + } + } + + /**************************************************************** + * DFSInputStream provides bytes from a named file. It handles + * negotiation of the namenode and various datanodes as necessary. + ****************************************************************/ + public class DFSInputStream extends FSInputStream { + private Socket s = null; + private boolean closed = false; + + private String src; + private long prefetchSize = 10 * defaultBlockSize; + private BlockReader blockReader = null; + private boolean verifyChecksum; + private LocatedBlocks locatedBlocks = null; + private DatanodeInfo currentNode = null; + private Block currentBlock = null; + private long pos = 0; + private long blockEnd = -1; + private int failures = 0; + private int timeWindow = 3000; // wait time window (in msec) if BlockMissingException is caught + + /* XXX Use of CocurrentHashMap is temp fix. Need to fix + * parallel accesses to DFSInputStream (through ptreads) properly */ + private ConcurrentHashMap deadNodes = + new ConcurrentHashMap(); + private int buffersize = 1; + + private byte[] oneByteBuf = new byte[1]; // used for 'int read()' + + void addToDeadNodes(DatanodeInfo dnInfo) { + deadNodes.put(dnInfo, dnInfo); + } + + DFSInputStream(String src, int buffersize, boolean verifyChecksum + ) throws IOException { + this.verifyChecksum = verifyChecksum; + this.buffersize = buffersize; + this.src = src; + prefetchSize = conf.getLong("dfs.read.prefetch.size", prefetchSize); + timeWindow = conf.getInt("dfs.client.baseTimeWindow.waitOn.BlockMissingException", timeWindow); + openInfo(); + } + + /** + * Grab the open-file info from namenode + */ + synchronized void openInfo() throws IOException { + LocatedBlocks newInfo = callGetBlockLocations(namenode, src, 0, prefetchSize); + if (newInfo == null) { + throw new IOException("Cannot open filename " + src); + } + + if (locatedBlocks != null) { + Iterator oldIter = locatedBlocks.getLocatedBlocks().iterator(); + Iterator newIter = newInfo.getLocatedBlocks().iterator(); + while (oldIter.hasNext() && newIter.hasNext()) { + if (! oldIter.next().getBlock().equals(newIter.next().getBlock())) { + throw new IOException("Blocklist for " + src + " has changed!"); + } + } + } + this.locatedBlocks = newInfo; + this.currentNode = null; + } + + /** + * Returns whether the file opened is under construction. + */ + public synchronized boolean isUnderConstruction() { + return locatedBlocks.isUnderConstruction(); + } + + public synchronized long getFileLength() { + return (locatedBlocks == null) ? 0 : locatedBlocks.getFileLength(); + } + + /** + * Returns the datanode from which the stream is currently reading. + */ + public DatanodeInfo getCurrentDatanode() { + return currentNode; + } + + /** + * Returns the block containing the target position. + */ + public Block getCurrentBlock() { + return currentBlock; + } + + /** + * Return collection of blocks that has already been located. + */ + synchronized List getAllBlocks() throws IOException { + return getBlockRange(0, this.getFileLength()); + } + + /** + * Get block at the specified position. + * Fetch it from the namenode if not cached. + * + * @param offset + * @return located block + * @throws IOException + */ + private LocatedBlock getBlockAt(long offset) throws IOException { + assert (locatedBlocks != null) : "locatedBlocks is null"; + // search cached blocks first + int targetBlockIdx = locatedBlocks.findBlock(offset); + if (targetBlockIdx < 0) { // block is not cached + targetBlockIdx = LocatedBlocks.getInsertIndex(targetBlockIdx); + // fetch more blocks + LocatedBlocks newBlocks; + newBlocks = callGetBlockLocations(namenode, src, offset, prefetchSize); + assert (newBlocks != null) : "Could not find target position " + offset; + locatedBlocks.insertRange(targetBlockIdx, newBlocks.getLocatedBlocks()); + } + LocatedBlock blk = locatedBlocks.get(targetBlockIdx); + // update current position + this.pos = offset; + this.blockEnd = blk.getStartOffset() + blk.getBlockSize() - 1; + this.currentBlock = blk.getBlock(); + return blk; + } + + /** + * Get blocks in the specified range. + * Fetch them from the namenode if not cached. + * + * @param offset + * @param length + * @return consequent segment of located blocks + * @throws IOException + */ + private synchronized List getBlockRange(long offset, + long length) + throws IOException { + assert (locatedBlocks != null) : "locatedBlocks is null"; + List blockRange = new ArrayList(); + // search cached blocks first + int blockIdx = locatedBlocks.findBlock(offset); + if (blockIdx < 0) { // block is not cached + blockIdx = LocatedBlocks.getInsertIndex(blockIdx); + } + long remaining = length; + long curOff = offset; + while(remaining > 0) { + LocatedBlock blk = null; + if(blockIdx < locatedBlocks.locatedBlockCount()) + blk = locatedBlocks.get(blockIdx); + if (blk == null || curOff < blk.getStartOffset()) { + LocatedBlocks newBlocks; + newBlocks = callGetBlockLocations(namenode, src, curOff, remaining); + locatedBlocks.insertRange(blockIdx, newBlocks.getLocatedBlocks()); + continue; + } + assert curOff >= blk.getStartOffset() : "Block not found"; + blockRange.add(blk); + long bytesRead = blk.getStartOffset() + blk.getBlockSize() - curOff; + remaining -= bytesRead; + curOff += bytesRead; + blockIdx++; + } + return blockRange; + } + + /** + * Open a DataInputStream to a DataNode so that it can be read from. + * We get block ID and the IDs of the destinations at startup, from the namenode. + */ + private synchronized DatanodeInfo blockSeekTo(long target) throws IOException { + if (target >= getFileLength()) { + throw new IOException("Attempted to read past end of file"); + } + + if ( blockReader != null ) { + blockReader.close(); + blockReader = null; + } + + if (s != null) { + s.close(); + s = null; + } + + // + // Compute desired block + // + LocatedBlock targetBlock = getBlockAt(target); + assert (target==this.pos) : "Wrong postion " + pos + " expect " + target; + long offsetIntoBlock = target - targetBlock.getStartOffset(); + + // + // Connect to best DataNode for desired Block, with potential offset + // + DatanodeInfo chosenNode = null; + failures = 0; + while (s == null) { + DNAddrPair retval = chooseDataNode(targetBlock); + chosenNode = retval.info; + InetSocketAddress targetAddr = retval.addr; + + try { + s = socketFactory.createSocket(); + NetUtils.connect(s, targetAddr, socketTimeout); + s.setSoTimeout(socketTimeout); + Block blk = targetBlock.getBlock(); + + blockReader = BlockReader.newBlockReader(s, src, blk.getBlockId(), + blk.getGenerationStamp(), + offsetIntoBlock, blk.getNumBytes() - offsetIntoBlock, + buffersize, verifyChecksum, clientName); + return chosenNode; + } catch (IOException ex) { + // Put chosen node into dead list, continue + LOG.debug("Failed to connect to " + targetAddr + ":" + + StringUtils.stringifyException(ex)); + addToDeadNodes(chosenNode); + if (s != null) { + try { + s.close(); + } catch (IOException iex) { + } + } + s = null; + } + } + return chosenNode; + } + + /** + * Close it down! + */ + @Override + public synchronized void close() throws IOException { + if (closed) { + return; + } + checkOpen(); + + if ( blockReader != null ) { + blockReader.close(); + blockReader = null; + } + + if (s != null) { + s.close(); + s = null; + } + super.close(); + closed = true; + } + + @Override + public synchronized int read() throws IOException { + int ret = read( oneByteBuf, 0, 1 ); + return ( ret <= 0 ) ? -1 : (oneByteBuf[0] & 0xff); + } + + /* This is a used by regular read() and handles ChecksumExceptions. + * name readBuffer() is chosen to imply similarity to readBuffer() in + * ChecksuFileSystem + */ + private synchronized int readBuffer(byte buf[], int off, int len) + throws IOException { + IOException ioe; + + /* we retry current node only once. So this is set to true only here. + * Intention is to handle one common case of an error that is not a + * failure on datanode or client : when DataNode closes the connection + * since client is idle. If there are other cases of "non-errors" then + * then a datanode might be retried by setting this to true again. + */ + boolean retryCurrentNode = true; + + while (true) { + // retry as many times as seekToNewSource allows. + try { + return blockReader.read(buf, off, len); + } catch ( ChecksumException ce ) { + LOG.warn("Found Checksum error for " + currentBlock + " from " + + currentNode.getName() + " at " + ce.getPos()); + reportChecksumFailure(src, currentBlock, currentNode); + ioe = ce; + retryCurrentNode = false; + } catch ( IOException e ) { + if (!retryCurrentNode) { + LOG.warn("Exception while reading from " + currentBlock + + " of " + src + " from " + currentNode + ": " + + StringUtils.stringifyException(e)); + } + ioe = e; + } + boolean sourceFound = false; + if (retryCurrentNode) { + /* possibly retry the same node so that transient errors don't + * result in application level failures (e.g. Datanode could have + * closed the connection because the client is idle for too long). + */ + sourceFound = seekToBlockSource(pos); + } else { + addToDeadNodes(currentNode); + sourceFound = seekToNewSource(pos); + } + if (!sourceFound) { + throw ioe; + } + retryCurrentNode = false; + } + } + + /** + * Read the entire buffer. + */ + @Override + public synchronized int read(byte buf[], int off, int len) throws IOException { + checkOpen(); + if (closed) { + throw new IOException("Stream closed"); + } + if (pos < getFileLength()) { + int retries = 2; + while (retries > 0) { + try { + if (pos > blockEnd) { + currentNode = blockSeekTo(pos); + } + int realLen = (int) Math.min((long) len, (blockEnd - pos + 1L)); + int result = readBuffer(buf, off, realLen); + + if (result >= 0) { + pos += result; + } else { + // got a EOS from reader though we expect more data on it. + throw new IOException("Unexpected EOS from the reader"); + } + if (stats != null && result != -1) { + stats.incrementBytesRead(result); + } + return result; + } catch (ChecksumException ce) { + throw ce; + } catch (IOException e) { + if (retries == 1) { + LOG.warn("DFS Read: " + StringUtils.stringifyException(e)); + } + blockEnd = -1; + if (currentNode != null) { addToDeadNodes(currentNode); } + if (--retries == 0) { + throw e; + } + } + } + } + return -1; + } + + + private DNAddrPair chooseDataNode(LocatedBlock block) + throws IOException { + while (true) { + DatanodeInfo[] nodes = block.getLocations(); + DatanodeInfo chosenNode = null; + try { + chosenNode = bestNode(nodes, deadNodes); + InetSocketAddress targetAddr = + NetUtils.createSocketAddr(chosenNode.getName()); + return new DNAddrPair(chosenNode, targetAddr); + } catch (IOException ie) { + String blockInfo = block.getBlock() + " file=" + src; + if (failures >= maxBlockAcquireFailures) { + throw new BlockMissingException(src, "Could not obtain block: " + blockInfo, block.getStartOffset()); + } + + if (nodes == null || nodes.length == 0) { + LOG.info("No node available for block: " + blockInfo); + } + LOG.info("Could not obtain block " + block.getBlock() + + " from node: " + (chosenNode == null ? " " : chosenNode.getHostName()) + + ie); + try { + // Introducing a random factor to the wait time before another retry. + // The wait time is dependent on # of failures and a random factor. + // At the first time of getting a BlockMissingException, the wait time + // is a random number between 0..3000 ms. If the first retry + // still fails, we will wait 3000 ms grace period before the 2nd retry. + // Also at the second retry, the waiting window is expanded to 6000 ms + // alleviating the request rate from the server. Similarly the 3rd retry + // will wait 6000ms grace period before retry and the waiting window is + // expanded to 9000ms. + double waitTime = timeWindow * failures + // grace period for the last round of attempt + timeWindow * (failures + 1) * r.nextDouble(); // expanding time window for each failure + LOG.warn("DFS chooseDataNode: got # " + (failures + 1) + " IOException, will wait for " + waitTime + " msec."); + Thread.sleep((long)waitTime); + } catch (InterruptedException iex) { + } + deadNodes.clear(); //2nd option is to remove only nodes[blockId] + openInfo(); + block = getBlockAt(block.getStartOffset()); + failures++; + continue; + } + } + } + + private void fetchBlockByteRange(LocatedBlock block, long start, + long end, byte[] buf, int offset) throws IOException { + // + // Connect to best DataNode for desired Block, with potential offset + // + Socket dn = null; + int numAttempts = block.getLocations().length; + IOException ioe = null; + failures = 0; + + while (dn == null && numAttempts-- > 0 ) { + DNAddrPair retval = chooseDataNode(block); + DatanodeInfo chosenNode = retval.info; + InetSocketAddress targetAddr = retval.addr; + BlockReader reader = null; + + try { + dn = socketFactory.createSocket(); + NetUtils.connect(dn, targetAddr, socketTimeout); + dn.setSoTimeout(socketTimeout); + + int len = (int) (end - start + 1); + + reader = BlockReader.newBlockReader(dn, src, + block.getBlock().getBlockId(), + block.getBlock().getGenerationStamp(), + start, len, buffersize, + verifyChecksum, clientName); + int nread = reader.readAll(buf, offset, len); + if (nread != len) { + throw new IOException("truncated return from reader.read(): " + + "excpected " + len + ", got " + nread); + } + return; + } catch (ChecksumException e) { + ioe = e; + LOG.warn("fetchBlockByteRange(). Got a checksum exception for " + + src + " at " + block.getBlock() + ":" + + e.getPos() + " from " + chosenNode.getName()); + reportChecksumFailure(src, block.getBlock(), chosenNode); + } catch (IOException e) { + ioe = e; + LOG.warn("Failed to connect to " + targetAddr + + " for file " + src + + " for block " + block.getBlock().getBlockId() + ":" + + StringUtils.stringifyException(e)); + } finally { + IOUtils.closeStream(reader); + IOUtils.closeSocket(dn); + dn = null; + } + // Put chosen node into dead list, continue + addToDeadNodes(chosenNode); + } + throw (ioe == null) ? new IOException("Could not read data") : ioe; + } + + /** + * Read bytes starting from the specified position. + * + * @param position start read from this position + * @param buffer read buffer + * @param offset offset into buffer + * @param length number of bytes to read + * + * @return actual number of bytes read + */ + @Override + public int read(long position, byte[] buffer, int offset, int length) + throws IOException { + // sanity checks + checkOpen(); + if (closed) { + throw new IOException("Stream closed"); + } + long filelen = getFileLength(); + if ((position < 0) || (position >= filelen)) { + return -1; + } + int realLen = length; + if ((position + length) > filelen) { + realLen = (int)(filelen - position); + } + + // determine the block and byte range within the block + // corresponding to position and realLen + List blockRange = getBlockRange(position, realLen); + int remaining = realLen; + for (LocatedBlock blk : blockRange) { + long targetStart = position - blk.getStartOffset(); + long bytesToRead = Math.min(remaining, blk.getBlockSize() - targetStart); + fetchBlockByteRange(blk, targetStart, + targetStart + bytesToRead - 1, buffer, offset); + remaining -= bytesToRead; + position += bytesToRead; + offset += bytesToRead; + } + assert remaining == 0 : "Wrong number of bytes read."; + if (stats != null) { + stats.incrementBytesRead(realLen); + } + return realLen; + } + + @Override + public long skip(long n) throws IOException { + if ( n > 0 ) { + long curPos = getPos(); + long fileLen = getFileLength(); + if( n+curPos > fileLen ) { + n = fileLen - curPos; + } + seek(curPos+n); + return n; + } + return n < 0 ? -1 : 0; + } + + /** + * Seek to a new arbitrary location + */ + @Override + public synchronized void seek(long targetPos) throws IOException { + if (targetPos > getFileLength()) { + throw new IOException("Cannot seek after EOF"); + } + boolean done = false; + if (pos <= targetPos && targetPos <= blockEnd) { + // + // If this seek is to a positive position in the current + // block, and this piece of data might already be lying in + // the TCP buffer, then just eat up the intervening data. + // + int diff = (int)(targetPos - pos); + if (diff <= TCP_WINDOW_SIZE) { + try { + pos += blockReader.skip(diff); + if (pos == targetPos) { + done = true; + } + } catch (IOException e) {//make following read to retry + LOG.debug("Exception while seek to " + targetPos + " from " + + currentBlock +" of " + src + " from " + currentNode + + ": " + StringUtils.stringifyException(e)); + } + } + } + if (!done) { + pos = targetPos; + blockEnd = -1; + } + } + + /** + * Same as {@link #seekToNewSource(long)} except that it does not exclude + * the current datanode and might connect to the same node. + */ + private synchronized boolean seekToBlockSource(long targetPos) + throws IOException { + currentNode = blockSeekTo(targetPos); + return true; + } + + /** + * Seek to given position on a node other than the current node. If + * a node other than the current node is found, then returns true. + * If another node could not be found, then returns false. + */ + @Override + public synchronized boolean seekToNewSource(long targetPos) throws IOException { + boolean markedDead = deadNodes.containsKey(currentNode); + addToDeadNodes(currentNode); + DatanodeInfo oldNode = currentNode; + DatanodeInfo newNode = blockSeekTo(targetPos); + if (!markedDead) { + /* remove it from deadNodes. blockSeekTo could have cleared + * deadNodes and added currentNode again. Thats ok. */ + deadNodes.remove(oldNode); + } + if (!oldNode.getStorageID().equals(newNode.getStorageID())) { + currentNode = newNode; + return true; + } else { + return false; + } + } + + /** + */ + @Override + public synchronized long getPos() throws IOException { + return pos; + } + + /** + */ + @Override + public synchronized int available() throws IOException { + if (closed) { + throw new IOException("Stream closed"); + } + return (int) (getFileLength() - pos); + } + + /** + * We definitely don't support marks + */ + @Override + public boolean markSupported() { + return false; + } + @Override + public void mark(int readLimit) { + } + @Override + public void reset() throws IOException { + throw new IOException("Mark/reset not supported"); + } + } + + static class DFSDataInputStream extends FSDataInputStream { + DFSDataInputStream(DFSInputStream in) + throws IOException { + super(in); + } + + /** + * Returns the datanode from which the stream is currently reading. + */ + public DatanodeInfo getCurrentDatanode() { + return ((DFSInputStream)in).getCurrentDatanode(); + } + + /** + * Returns the block containing the target position. + */ + public Block getCurrentBlock() { + return ((DFSInputStream)in).getCurrentBlock(); + } + + /** + * Return collection of blocks that has already been located. + */ + synchronized List getAllBlocks() throws IOException { + return ((DFSInputStream)in).getAllBlocks(); + } + + } + + /**************************************************************** + * DFSOutputStream creates files from a stream of bytes. + * + * The client application writes data that is cached internally by + * this stream. Data is broken up into packets, each packet is + * typically 64K in size. A packet comprises of chunks. Each chunk + * is typically 512 bytes and has an associated checksum with it. + * + * When a client application fills up the currentPacket, it is + * enqueued into dataQueue. The DataStreamer thread picks up + * packets from the dataQueue, sends it to the first datanode in + * the pipeline and moves it from the dataQueue to the ackQueue. + * The ResponseProcessor receives acks from the datanodes. When an + * successful ack for a packet is received from all datanodes, the + * ResponseProcessor removes the corresponding packet from the + * ackQueue. + * + * In case of error, all outstanding packets and moved from + * ackQueue. A new pipeline is setup by eliminating the bad + * datanode from the original pipeline. The DataStreamer now + * starts sending packets from the dataQueue. + ****************************************************************/ + class DFSOutputStream extends FSOutputSummer implements Syncable { + private Socket s; + boolean closed = false; + + private String src; + private DataOutputStream blockStream; + private DataInputStream blockReplyStream; + private Block block; + final private long blockSize; + private DataChecksum checksum; + private LinkedList dataQueue = new LinkedList(); + private LinkedList ackQueue = new LinkedList(); + private Packet currentPacket = null; + private int maxPackets = 80; // each packet 64K, total 5MB + // private int maxPackets = 1000; // each packet 64K, total 64MB + private DataStreamer streamer = new DataStreamer();; + private ResponseProcessor response = null; + private long currentSeqno = 0; + private long bytesCurBlock = 0; // bytes writen in current block + private int packetSize = 0; // write packet size, including the header. + private int chunksPerPacket = 0; + private DatanodeInfo[] nodes = null; // list of targets for current block + private volatile boolean hasError = false; + private volatile int errorIndex = 0; + private volatile IOException lastException = null; + private long artificialSlowdown = 0; + private long lastFlushOffset = -1; // offset when flush was invoked + private boolean persistBlocks = false; // persist blocks on namenode + private int recoveryErrorCount = 0; // number of times block recovery failed + private int maxRecoveryErrorCount = 5; // try block recovery 5 times + private volatile boolean appendChunk = false; // appending to existing partial block + private long initialFileSize = 0; // at time of file open + + private void setLastException(IOException e) { + if (lastException == null) { + lastException = e; + } + } + + private class Packet { + ByteBuffer buffer; // only one of buf and buffer is non-null + byte[] buf; + long seqno; // sequencenumber of buffer in block + long offsetInBlock; // offset in block + boolean lastPacketInBlock; // is this the last packet in block? + int numChunks; // number of chunks currently in packet + int maxChunks; // max chunks in packet + int dataStart; + int dataPos; + int checksumStart; + int checksumPos; + + // create a new packet + Packet(int pktSize, int chunksPerPkt, long offsetInBlock) { + this.lastPacketInBlock = false; + this.numChunks = 0; + this.offsetInBlock = offsetInBlock; + this.seqno = currentSeqno; + currentSeqno++; + + buffer = null; + buf = new byte[pktSize]; + + checksumStart = DataNode.PKT_HEADER_LEN + SIZE_OF_INTEGER; + checksumPos = checksumStart; + dataStart = checksumStart + chunksPerPkt * checksum.getChecksumSize(); + dataPos = dataStart; + maxChunks = chunksPerPkt; + } + + void writeData(byte[] inarray, int off, int len) { + if ( dataPos + len > buf.length) { + throw new BufferOverflowException(); + } + System.arraycopy(inarray, off, buf, dataPos, len); + dataPos += len; + } + + void writeChecksum(byte[] inarray, int off, int len) { + if (checksumPos + len > dataStart) { + throw new BufferOverflowException(); + } + System.arraycopy(inarray, off, buf, checksumPos, len); + checksumPos += len; + } + + /** + * Returns ByteBuffer that contains one full packet, including header. + */ + ByteBuffer getBuffer() { + /* Once this is called, no more data can be added to the packet. + * setting 'buf' to null ensures that. + * This is called only when the packet is ready to be sent. + */ + if (buffer != null) { + return buffer; + } + + //prepare the header and close any gap between checksum and data. + + int dataLen = dataPos - dataStart; + int checksumLen = checksumPos - checksumStart; + + if (checksumPos != dataStart) { + /* move the checksum to cover the gap. + * This can happen for the last packet. + */ + System.arraycopy(buf, checksumStart, buf, + dataStart - checksumLen , checksumLen); + } + + int pktLen = SIZE_OF_INTEGER + dataLen + checksumLen; + + //normally dataStart == checksumPos, i.e., offset is zero. + buffer = ByteBuffer.wrap(buf, dataStart - checksumPos, + DataNode.PKT_HEADER_LEN + pktLen); + buf = null; + buffer.mark(); + + /* write the header and data length. + * The format is described in comment before DataNode.BlockSender + */ + buffer.putInt(pktLen); // pktSize + buffer.putLong(offsetInBlock); + buffer.putLong(seqno); + buffer.put((byte) ((lastPacketInBlock) ? 1 : 0)); + //end of pkt header + buffer.putInt(dataLen); // actual data length, excluding checksum. + + buffer.reset(); + return buffer; + } + } + + // + // The DataStreamer class is responsible for sending data packets to the + // datanodes in the pipeline. It retrieves a new blockid and block locations + // from the namenode, and starts streaming packets to the pipeline of + // Datanodes. Every packet has a sequence number associated with + // it. When all the packets for a block are sent out and acks for each + // if them are received, the DataStreamer closes the current block. + // + private class DataStreamer extends Daemon { + + private volatile boolean closed = false; + + public void run() { + while (!closed && clientRunning) { + + // if the Responder encountered an error, shutdown Responder + if (hasError && response != null) { + try { + response.close(); + response.join(); + response = null; + } catch (InterruptedException e) { + } + } + + Packet one = null; + synchronized (dataQueue) { + + // process IO errors if any + boolean doSleep = processDatanodeError(hasError, false); + + // wait for a packet to be sent. + while ((!closed && !hasError && clientRunning + && dataQueue.size() == 0) || doSleep) { + try { + dataQueue.wait(1000); + } catch (InterruptedException e) { + } + doSleep = false; + } + if (closed || hasError || dataQueue.size() == 0 || !clientRunning) { + continue; + } + + try { + // get packet to be sent. + one = dataQueue.getFirst(); + long offsetInBlock = one.offsetInBlock; + + // get new block from namenode. + if (blockStream == null) { + LOG.debug("Allocating new block"); + nodes = nextBlockOutputStream(src); + this.setName("DataStreamer for file " + src + + " block " + block); + response = new ResponseProcessor(nodes); + response.start(); + } + + if (offsetInBlock >= blockSize) { + throw new IOException("BlockSize " + blockSize + + " is smaller than data size. " + + " Offset of packet in block " + + offsetInBlock + + " Aborting file " + src); + } + + ByteBuffer buf = one.getBuffer(); + + // move packet from dataQueue to ackQueue + dataQueue.removeFirst(); + dataQueue.notifyAll(); + synchronized (ackQueue) { + ackQueue.addLast(one); + ackQueue.notifyAll(); + } + + // write out data to remote datanode + blockStream.write(buf.array(), buf.position(), buf.remaining()); + + if (one.lastPacketInBlock) { + blockStream.writeInt(0); // indicate end-of-block + } + blockStream.flush(); + if (LOG.isDebugEnabled()) { + LOG.debug("DataStreamer block " + block + + " wrote packet seqno:" + one.seqno + + " size:" + buf.remaining() + + " offsetInBlock:" + one.offsetInBlock + + " lastPacketInBlock:" + one.lastPacketInBlock); + } + } catch (Throwable e) { + LOG.warn("DataStreamer Exception: " + + StringUtils.stringifyException(e)); + if (e instanceof IOException) { + setLastException((IOException)e); + } + hasError = true; + } + } + + if (closed || hasError || !clientRunning) { + continue; + } + + // Is this block full? + if (one.lastPacketInBlock) { + synchronized (ackQueue) { + while (!hasError && ackQueue.size() != 0 && clientRunning) { + try { + ackQueue.wait(); // wait for acks to arrive from datanodes + } catch (InterruptedException e) { + } + } + } + LOG.debug("Closing old block " + block); + this.setName("DataStreamer for file " + src); + + response.close(); // ignore all errors in Response + try { + response.join(); + response = null; + } catch (InterruptedException e) { + } + + if (closed || hasError || !clientRunning) { + continue; + } + + synchronized (dataQueue) { + try { + blockStream.close(); + blockReplyStream.close(); + } catch (IOException e) { + } + nodes = null; + response = null; + blockStream = null; + blockReplyStream = null; + } + } + if (progress != null) { progress.progress(); } + + // This is used by unit test to trigger race conditions. + if (artificialSlowdown != 0 && clientRunning) { + try { + Thread.sleep(artificialSlowdown); + } catch (InterruptedException e) {} + } + } + } + + // shutdown thread + void close() { + closed = true; + synchronized (dataQueue) { + dataQueue.notifyAll(); + } + synchronized (ackQueue) { + ackQueue.notifyAll(); + } + this.interrupt(); + } + } + + // + // Processes reponses from the datanodes. A packet is removed + // from the ackQueue when its response arrives. + // + private class ResponseProcessor extends Thread { + + private volatile boolean closed = false; + private DatanodeInfo[] targets = null; + private boolean lastPacketInBlock = false; + + ResponseProcessor (DatanodeInfo[] targets) { + this.targets = targets; + } + + public void run() { + + this.setName("ResponseProcessor for block " + block); + + while (!closed && clientRunning && !lastPacketInBlock) { + // process responses from datanodes. + try { + // verify seqno from datanode + long seqno = blockReplyStream.readLong(); + LOG.debug("DFSClient received ack for seqno " + seqno); + if (seqno == -1) { + continue; + } else if (seqno == -2) { + // no nothing + } else { + Packet one = null; + synchronized (ackQueue) { + one = ackQueue.getFirst(); + } + if (one.seqno != seqno) { + throw new IOException("Responseprocessor: Expecting seqno " + + " for block " + block + + one.seqno + " but received " + seqno); + } + lastPacketInBlock = one.lastPacketInBlock; + } + + // processes response status from all datanodes. + for (int i = 0; i < targets.length && clientRunning; i++) { + short reply = blockReplyStream.readShort(); + if (reply != DataTransferProtocol.OP_STATUS_SUCCESS) { + errorIndex = i; // first bad datanode + throw new IOException("Bad response " + reply + + " for block " + block + + " from datanode " + + targets[i].getName()); + } + } + + synchronized (ackQueue) { + ackQueue.removeFirst(); + ackQueue.notifyAll(); + } + } catch (Exception e) { + if (!closed) { + hasError = true; + if (e instanceof IOException) { + setLastException((IOException)e); + } + LOG.warn("DFSOutputStream ResponseProcessor exception " + + " for block " + block + + StringUtils.stringifyException(e)); + closed = true; + } + } + + synchronized (dataQueue) { + dataQueue.notifyAll(); + } + synchronized (ackQueue) { + ackQueue.notifyAll(); + } + } + } + + void close() { + closed = true; + this.interrupt(); + } + } + + // If this stream has encountered any errors so far, shutdown + // threads and mark stream as closed. Returns true if we should + // sleep for a while after returning from this call. + // + private boolean processDatanodeError(boolean hasError, boolean isAppend) { + if (!hasError) { + return false; + } + if (response != null) { + LOG.info("Error Recovery for block " + block + + " waiting for responder to exit. "); + return true; + } + if (errorIndex >= 0) { + LOG.warn("Error Recovery for block " + block + + " bad datanode[" + errorIndex + "] " + + (nodes == null? "nodes == null": nodes[errorIndex].getName())); + } + + if (blockStream != null) { + try { + blockStream.close(); + blockReplyStream.close(); + } catch (IOException e) { + } + } + blockStream = null; + blockReplyStream = null; + + // move packets from ack queue to front of the data queue + synchronized (ackQueue) { + dataQueue.addAll(0, ackQueue); + ackQueue.clear(); + } + + boolean success = false; + while (!success && clientRunning) { + DatanodeInfo[] newnodes = null; + if (nodes == null) { + String msg = "Could not get block locations. " + + "Source file \"" + src + + "\" - Aborting..."; + LOG.warn(msg); + setLastException(new IOException(msg)); + closed = true; + if (streamer != null) streamer.close(); + return false; + } + StringBuilder pipelineMsg = new StringBuilder(); + for (int j = 0; j < nodes.length; j++) { + pipelineMsg.append(nodes[j].getName()); + if (j < nodes.length - 1) { + pipelineMsg.append(", "); + } + } + // remove bad datanode from list of datanodes. + // If errorIndex was not set (i.e. appends), then do not remove + // any datanodes + // + if (errorIndex < 0) { + newnodes = nodes; + } else { + if (nodes.length <= 1) { + lastException = new IOException("All datanodes " + pipelineMsg + + " are bad. Aborting..."); + closed = true; + if (streamer != null) streamer.close(); + return false; + } + LOG.warn("Error Recovery for block " + block + + " in pipeline " + pipelineMsg + + ": bad datanode " + nodes[errorIndex].getName()); + newnodes = new DatanodeInfo[nodes.length-1]; + System.arraycopy(nodes, 0, newnodes, 0, errorIndex); + System.arraycopy(nodes, errorIndex+1, newnodes, errorIndex, + newnodes.length-errorIndex); + } + + // Tell the primary datanode to do error recovery + // by stamping appropriate generation stamps. + // + LocatedBlock newBlock = null; + ClientDatanodeProtocol primary = null; + DatanodeInfo primaryNode = null; + try { + // Pick the "least" datanode as the primary datanode to avoid deadlock. + primaryNode = Collections.min(Arrays.asList(newnodes)); + primary = createClientDatanodeProtocolProxy(primaryNode, conf); + newBlock = primary.recoverBlock(block, isAppend, newnodes); + } catch (IOException e) { + recoveryErrorCount++; + if (recoveryErrorCount > maxRecoveryErrorCount) { + if (nodes.length > 1) { + // if the primary datanode failed, remove it from the list. + // The original bad datanode is left in the list because it is + // conservative to remove only one datanode in one iteration. + for (int j = 0; j < nodes.length; j++) { + if (nodes[j].equals(primaryNode)) { + errorIndex = j; // forget original bad node. + } + } + // remove primary node from list + newnodes = new DatanodeInfo[nodes.length-1]; + System.arraycopy(nodes, 0, newnodes, 0, errorIndex); + System.arraycopy(nodes, errorIndex+1, newnodes, errorIndex, + newnodes.length-errorIndex); + nodes = newnodes; + LOG.warn("Error Recovery for block " + block + " failed " + + " because recovery from primary datanode " + + primaryNode + " failed " + recoveryErrorCount + + " times. " + " Pipeline was " + pipelineMsg + + ". Marking primary datanode as bad."); + recoveryErrorCount = 0; + errorIndex = -1; + return true; // sleep when we return from here + } + String emsg = "Error Recovery for block " + block + " failed " + + " because recovery from primary datanode " + + primaryNode + " failed " + recoveryErrorCount + + " times. " + " Pipeline was " + pipelineMsg + + ". Aborting..."; + LOG.warn(emsg); + lastException = new IOException(emsg); + closed = true; + if (streamer != null) streamer.close(); + return false; // abort with IOexception + } + LOG.warn("Error Recovery for block " + block + " failed " + + " because recovery from primary datanode " + + primaryNode + " failed " + recoveryErrorCount + + " times. " + " Pipeline was " + pipelineMsg + + ". Will retry..."); + return true; // sleep when we return from here + } finally { + RPC.stopProxy(primary); + } + recoveryErrorCount = 0; // block recovery successful + + // If the block recovery generated a new generation stamp, use that + // from now on. Also, setup new pipeline + // + if (newBlock != null) { + block = newBlock.getBlock(); + nodes = newBlock.getLocations(); + } + + this.hasError = false; + lastException = null; + errorIndex = 0; + success = createBlockOutputStream(nodes, clientName, true); + } + + response = new ResponseProcessor(nodes); + response.start(); + return false; // do not sleep, continue processing + } + + private void isClosed() throws IOException { + if (closed && lastException != null) { + throw lastException; + } + } + + // + // returns the list of targets, if any, that is being currently used. + // + DatanodeInfo[] getPipeline() { + synchronized (dataQueue) { + if (nodes == null) { + return null; + } + DatanodeInfo[] value = new DatanodeInfo[nodes.length]; + for (int i = 0; i < nodes.length; i++) { + value[i] = nodes[i]; + } + return value; + } + } + + private Progressable progress; + + private DFSOutputStream(String src, long blockSize, Progressable progress, + int bytesPerChecksum) throws IOException { + super(new CRC32(), bytesPerChecksum, 4); + this.src = src; + this.blockSize = blockSize; + this.progress = progress; + if (progress != null) { + LOG.debug("Set non-null progress callback on DFSOutputStream "+src); + } + + if ( bytesPerChecksum < 1 || blockSize % bytesPerChecksum != 0) { + throw new IOException("io.bytes.per.checksum(" + bytesPerChecksum + + ") and blockSize(" + blockSize + + ") do not match. " + "blockSize should be a " + + "multiple of io.bytes.per.checksum"); + + } + checksum = DataChecksum.newDataChecksum(DataChecksum.CHECKSUM_CRC32, + bytesPerChecksum); + } + + /** + * Create a new output stream to the given DataNode. + * @see ClientProtocol#create(String, FsPermission, String, boolean, short, long) + */ + DFSOutputStream(String src, FsPermission masked, boolean overwrite, + short replication, long blockSize, Progressable progress, + int buffersize, int bytesPerChecksum) throws IOException { + this(src, blockSize, progress, bytesPerChecksum); + + computePacketChunkSize(writePacketSize, bytesPerChecksum); + + try { + namenode.create( + src, masked, clientName, overwrite, replication, blockSize); + } catch(RemoteException re) { + throw re.unwrapRemoteException(AccessControlException.class, + NSQuotaExceededException.class, + DSQuotaExceededException.class); + } + streamer.start(); + } + + /** + * Create a new output stream to the given DataNode. + * @see ClientProtocol#create(String, FsPermission, String, boolean, short, long) + */ + DFSOutputStream(String src, int buffersize, Progressable progress, + LocatedBlock lastBlock, FileStatus stat, + int bytesPerChecksum) throws IOException { + this(src, stat.getBlockSize(), progress, bytesPerChecksum); + initialFileSize = stat.getLen(); // length of file when opened + + // + // The last partial block of the file has to be filled. + // + if (lastBlock != null) { + block = lastBlock.getBlock(); + long usedInLastBlock = stat.getLen() % blockSize; + int freeInLastBlock = (int)(blockSize - usedInLastBlock); + + // calculate the amount of free space in the pre-existing + // last crc chunk + int usedInCksum = (int)(stat.getLen() % bytesPerChecksum); + int freeInCksum = bytesPerChecksum - usedInCksum; + + // if there is space in the last block, then we have to + // append to that block + if (freeInLastBlock > blockSize) { + throw new IOException("The last block for file " + + src + " is full."); + } + + // indicate that we are appending to an existing block + bytesCurBlock = lastBlock.getBlockSize(); + + if (usedInCksum > 0 && freeInCksum > 0) { + // if there is space in the last partial chunk, then + // setup in such a way that the next packet will have only + // one chunk that fills up the partial chunk. + // + computePacketChunkSize(0, freeInCksum); + resetChecksumChunk(freeInCksum); + this.appendChunk = true; + } else { + // if the remaining space in the block is smaller than + // that expected size of of a packet, then create + // smaller size packet. + // + computePacketChunkSize(Math.min(writePacketSize, freeInLastBlock), + bytesPerChecksum); + } + + // setup pipeline to append to the last block XXX retries?? + nodes = lastBlock.getLocations(); + errorIndex = -1; // no errors yet. + if (nodes.length < 1) { + throw new IOException("Unable to retrieve blocks locations " + + " for last block " + block + + "of file " + src); + + } + processDatanodeError(true, true); + streamer.start(); + } + else { + computePacketChunkSize(writePacketSize, bytesPerChecksum); + streamer.start(); + } + } + + private void computePacketChunkSize(int psize, int csize) { + int chunkSize = csize + checksum.getChecksumSize(); + int n = DataNode.PKT_HEADER_LEN + SIZE_OF_INTEGER; + chunksPerPacket = Math.max((psize - n + chunkSize-1)/chunkSize, 1); + packetSize = n + chunkSize*chunksPerPacket; + if (LOG.isDebugEnabled()) { + LOG.debug("computePacketChunkSize: src=" + src + + ", chunkSize=" + chunkSize + + ", chunksPerPacket=" + chunksPerPacket + + ", packetSize=" + packetSize); + } + } + + /** + * Open a DataOutputStream to a DataNode so that it can be written to. + * This happens when a file is created and each time a new block is allocated. + * Must get block ID and the IDs of the destinations from the namenode. + * Returns the list of target datanodes. + */ + private DatanodeInfo[] nextBlockOutputStream(String client) throws IOException { + LocatedBlock lb = null; + boolean retry = false; + DatanodeInfo[] nodes; + int count = conf.getInt("dfs.client.block.write.retries", 3); + boolean success; + do { + hasError = false; + lastException = null; + errorIndex = 0; + retry = false; + nodes = null; + success = false; + + long startTime = System.currentTimeMillis(); + lb = locateFollowingBlock(startTime); + block = lb.getBlock(); + nodes = lb.getLocations(); + + // + // Connect to first DataNode in the list. + // + success = createBlockOutputStream(nodes, clientName, false); + + if (!success) { + LOG.info("Abandoning block " + block); + namenode.abandonBlock(block, src, clientName); + + // Connection failed. Let's wait a little bit and retry + retry = true; + try { + if (System.currentTimeMillis() - startTime > 5000) { + LOG.info("Waiting to find target node: " + nodes[0].getName()); + } + Thread.sleep(6000); + } catch (InterruptedException iex) { + } + } + } while (retry && --count >= 0); + + if (!success) { + throw new IOException("Unable to create new block."); + } + return nodes; + } + + // connects to the first datanode in the pipeline + // Returns true if success, otherwise return failure. + // + private boolean createBlockOutputStream(DatanodeInfo[] nodes, String client, + boolean recoveryFlag) { + String firstBadLink = ""; + if (LOG.isDebugEnabled()) { + for (int i = 0; i < nodes.length; i++) { + LOG.debug("pipeline = " + nodes[i].getName()); + } + } + + // persist blocks on namenode on next flush + persistBlocks = true; + + boolean result = false; + try { + LOG.debug("Connecting to " + nodes[0].getName()); + InetSocketAddress target = NetUtils.createSocketAddr(nodes[0].getName()); + s = socketFactory.createSocket(); + int timeoutValue = 3000 * nodes.length + socketTimeout; + NetUtils.connect(s, target, timeoutValue); + s.setSoTimeout(timeoutValue); + s.setSendBufferSize(DEFAULT_DATA_SOCKET_SIZE); + LOG.debug("Send buf size " + s.getSendBufferSize()); + long writeTimeout = HdfsConstants.WRITE_TIMEOUT_EXTENSION * nodes.length + + datanodeWriteTimeout; + + // + // Xmit header info to datanode + // + DataOutputStream out = new DataOutputStream( + new BufferedOutputStream(NetUtils.getOutputStream(s, writeTimeout), + DataNode.SMALL_BUFFER_SIZE)); + blockReplyStream = new DataInputStream(NetUtils.getInputStream(s)); + + out.writeShort( DataTransferProtocol.DATA_TRANSFER_VERSION ); + out.write( DataTransferProtocol.OP_WRITE_BLOCK ); + out.writeLong( block.getBlockId() ); + out.writeLong( block.getGenerationStamp() ); + out.writeInt( nodes.length ); + out.writeBoolean( recoveryFlag ); // recovery flag + Text.writeString( out, client ); + out.writeBoolean(false); // Not sending src node information + out.writeInt( nodes.length - 1 ); + for (int i = 1; i < nodes.length; i++) { + nodes[i].write(out); + } + checksum.writeHeader( out ); + out.flush(); + + // receive ack for connect + firstBadLink = Text.readString(blockReplyStream); + if (firstBadLink.length() != 0) { + throw new IOException("Bad connect ack with firstBadLink " + firstBadLink); + } + + blockStream = out; + result = true; // success + + } catch (IOException ie) { + + LOG.info("Exception in createBlockOutputStream " + ie); + + // find the datanode that matches + if (firstBadLink.length() != 0) { + for (int i = 0; i < nodes.length; i++) { + if (nodes[i].getName().equals(firstBadLink)) { + errorIndex = i; + break; + } + } + } + hasError = true; + setLastException(ie); + blockReplyStream = null; + result = false; + } finally { + if (!result) { + IOUtils.closeSocket(s); + s = null; + } + } + return result; + } + + private LocatedBlock locateFollowingBlock(long start + ) throws IOException { + int retries = conf.getInt("dfs.client.block.write.locateFollowingBlock.retries", 5); + long sleeptime = 400; + while (true) { + long localstart = System.currentTimeMillis(); + while (true) { + try { + return namenode.addBlock(src, clientName); + } catch (RemoteException e) { + IOException ue = + e.unwrapRemoteException(FileNotFoundException.class, + AccessControlException.class, + NSQuotaExceededException.class, + DSQuotaExceededException.class); + if (ue != e) { + throw ue; // no need to retry these exceptions + } + + if (NotReplicatedYetException.class.getName(). + equals(e.getClassName())) { + + if (retries == 0) { + throw e; + } else { + --retries; + LOG.info(StringUtils.stringifyException(e)); + if (System.currentTimeMillis() - localstart > 5000) { + LOG.info("Waiting for replication for " + + (System.currentTimeMillis() - localstart) / 1000 + + " seconds"); + } + try { + LOG.warn("NotReplicatedYetException sleeping " + src + + " retries left " + retries); + Thread.sleep(sleeptime); + sleeptime *= 2; + } catch (InterruptedException ie) { + } + } + } else { + throw e; + } + } + } + } + } + + // @see FSOutputSummer#writeChunk() + @Override + protected synchronized void writeChunk(byte[] b, int offset, int len, byte[] checksum) + throws IOException { + checkOpen(); + isClosed(); + + int cklen = checksum.length; + int bytesPerChecksum = this.checksum.getBytesPerChecksum(); + if (len > bytesPerChecksum) { + throw new IOException("writeChunk() buffer size is " + len + + " is larger than supported bytesPerChecksum " + + bytesPerChecksum); + } + if (checksum.length != this.checksum.getChecksumSize()) { + throw new IOException("writeChunk() checksum size is supposed to be " + + this.checksum.getChecksumSize() + + " but found to be " + checksum.length); + } + + synchronized (dataQueue) { + + // If queue is full, then wait till we can create enough space + while (!closed && dataQueue.size() + ackQueue.size() > maxPackets) { + try { + dataQueue.wait(); + } catch (InterruptedException e) { + } + } + isClosed(); + + if (currentPacket == null) { + currentPacket = new Packet(packetSize, chunksPerPacket, + bytesCurBlock); + if (LOG.isDebugEnabled()) { + LOG.debug("DFSClient writeChunk allocating new packet seqno=" + + currentPacket.seqno + + ", src=" + src + + ", packetSize=" + packetSize + + ", chunksPerPacket=" + chunksPerPacket + + ", bytesCurBlock=" + bytesCurBlock); + } + } + + currentPacket.writeChecksum(checksum, 0, cklen); + currentPacket.writeData(b, offset, len); + currentPacket.numChunks++; + bytesCurBlock += len; + + // If packet is full, enqueue it for transmission + // + if (currentPacket.numChunks == currentPacket.maxChunks || + bytesCurBlock == blockSize) { + if (LOG.isDebugEnabled()) { + LOG.debug("DFSClient writeChunk packet full seqno=" + + currentPacket.seqno + + ", src=" + src + + ", bytesCurBlock=" + bytesCurBlock + + ", blockSize=" + blockSize + + ", appendChunk=" + appendChunk); + } + // + // if we allocated a new packet because we encountered a block + // boundary, reset bytesCurBlock. + // + if (bytesCurBlock == blockSize) { + currentPacket.lastPacketInBlock = true; + bytesCurBlock = 0; + lastFlushOffset = -1; + } + dataQueue.addLast(currentPacket); + dataQueue.notifyAll(); + currentPacket = null; + + // If this was the first write after reopening a file, then the above + // write filled up any partial chunk. Tell the summer to generate full + // crc chunks from now on. + if (appendChunk) { + appendChunk = false; + resetChecksumChunk(bytesPerChecksum); + } + int psize = Math.min((int)(blockSize-bytesCurBlock), writePacketSize); + computePacketChunkSize(psize, bytesPerChecksum); + } + } + //LOG.debug("DFSClient writeChunk done length " + len + + // " checksum length " + cklen); + } + + /** + * All data is written out to datanodes. It is not guaranteed + * that data has been flushed to persistent store on the + * datanode. Block allocations are persisted on namenode. + */ + public synchronized void sync() throws IOException { + try { + /* Record current blockOffset. This might be changed inside + * flushBuffer() where a partial checksum chunk might be flushed. + * After the flush, reset the bytesCurBlock back to its previous value, + * any partial checksum chunk will be sent now and in next packet. + */ + long saveOffset = bytesCurBlock; + + // flush checksum buffer, but keep checksum buffer intact + flushBuffer(true); + + LOG.debug("DFSClient flush() : saveOffset " + saveOffset + + " bytesCurBlock " + bytesCurBlock + + " lastFlushOffset " + lastFlushOffset); + + // Flush only if we haven't already flushed till this offset. + if (lastFlushOffset != bytesCurBlock) { + + // record the valid offset of this flush + lastFlushOffset = bytesCurBlock; + + // wait for all packets to be sent and acknowledged + flushInternal(); + } else { + // just discard the current packet since it is already been sent. + currentPacket = null; + } + + // Restore state of stream. Record the last flush offset + // of the last full chunk that was flushed. + // + bytesCurBlock = saveOffset; + + // If any new blocks were allocated since the last flush, + // then persist block locations on namenode. + // + if (persistBlocks) { + namenode.fsync(src, clientName); + persistBlocks = false; + } + } catch (IOException e) { + lastException = new IOException("IOException flush:" + e); + closed = true; + closeThreads(); + throw e; + } + } + + /** + * Waits till all existing data is flushed and confirmations + * received from datanodes. + */ + private synchronized void flushInternal() throws IOException { + checkOpen(); + isClosed(); + + while (!closed) { + synchronized (dataQueue) { + isClosed(); + // + // If there is data in the current buffer, send it across + // + if (currentPacket != null) { + dataQueue.addLast(currentPacket); + dataQueue.notifyAll(); + currentPacket = null; + } + + // wait for all buffers to be flushed to datanodes + if (!closed && dataQueue.size() != 0) { + try { + dataQueue.wait(); + } catch (InterruptedException e) { + } + continue; + } + } + + // wait for all acks to be received back from datanodes + synchronized (ackQueue) { + if (!closed && ackQueue.size() != 0) { + try { + ackQueue.wait(); + } catch (InterruptedException e) { + } + continue; + } + } + + // acquire both the locks and verify that we are + // *really done*. In the case of error recovery, + // packets might move back from ackQueue to dataQueue. + // + synchronized (dataQueue) { + synchronized (ackQueue) { + if (dataQueue.size() + ackQueue.size() == 0) { + break; // we are done + } + } + } + } + } + + /** + * Closes this output stream and releases any system + * resources associated with this stream. + */ + @Override + public void close() throws IOException { + if (closed) { + IOException e = lastException; + if (e == null) + return; + else + throw e; + } + closeInternal(); + leasechecker.remove(src); + + if (s != null) { + s.close(); + s = null; + } + } + + /** + * Harsh abort method that should only be used from tests - this + * is in order to prevent pipeline recovery when eg a DN shuts down. + */ + void abortForTests() throws IOException { + streamer.close(); + response.close(); + closed = true; + } + + /** + * Aborts this output stream and releases any system + * resources associated with this stream. + */ + synchronized void abort() throws IOException { + if (closed) { + return; + } + setLastException(new IOException("Lease timeout of " + + (hdfsTimeout/1000) + " seconds expired.")); + closeThreads(); + } + + + // shutdown datastreamer and responseprocessor threads. + private void closeThreads() throws IOException { + try { + if (streamer != null) { + streamer.close(); + streamer.join(); + } + + // shutdown response after streamer has exited. + if (response != null) { + response.close(); + response.join(); + response = null; + } + } catch (InterruptedException e) { + throw new IOException("Failed to shutdown response thread"); + } + } + + /** + * Closes this output stream and releases any system + * resources associated with this stream. + */ + private synchronized void closeInternal() throws IOException { + checkOpen(); + isClosed(); + + try { + flushBuffer(); // flush from all upper layers + + // Mark that this packet is the last packet in block. + // If there are no outstanding packets and the last packet + // was not the last one in the current block, then create a + // packet with empty payload. + synchronized (dataQueue) { + if (currentPacket == null && bytesCurBlock != 0) { + currentPacket = new Packet(packetSize, chunksPerPacket, + bytesCurBlock); + } + if (currentPacket != null) { + currentPacket.lastPacketInBlock = true; + } + } + + flushInternal(); // flush all data to Datanodes + isClosed(); // check to see if flushInternal had any exceptions + closed = true; // allow closeThreads() to showdown threads + + closeThreads(); + + synchronized (dataQueue) { + if (blockStream != null) { + blockStream.writeInt(0); // indicate end-of-block to datanode + blockStream.close(); + blockReplyStream.close(); + } + if (s != null) { + s.close(); + s = null; + } + } + + streamer = null; + blockStream = null; + blockReplyStream = null; + + long localstart = System.currentTimeMillis(); + boolean fileComplete = false; + while (!fileComplete) { + fileComplete = namenode.complete(src, clientName); + if (!fileComplete) { + if (!clientRunning || + (hdfsTimeout > 0 && + localstart + hdfsTimeout < System.currentTimeMillis())) { + String msg = "Unable to close file because dfsclient " + + " was unable to contact the HDFS servers." + + " clientRunning " + clientRunning + + " hdfsTimeout " + hdfsTimeout; + LOG.info(msg); + throw new IOException(msg); + } + + try { + Thread.sleep(400); + if (System.currentTimeMillis() - localstart > 5000) { + LOG.info("Could not complete file " + src + " retrying..."); + } + } catch (InterruptedException ie) { + } + } + } + } finally { + closed = true; + } + } + + void setArtificialSlowdown(long period) { + artificialSlowdown = period; + } + + synchronized void setChunksPerPacket(int value) { + chunksPerPacket = Math.min(chunksPerPacket, value); + packetSize = DataNode.PKT_HEADER_LEN + SIZE_OF_INTEGER + + (checksum.getBytesPerChecksum() + + checksum.getChecksumSize()) * chunksPerPacket; + } + + synchronized void setTestFilename(String newname) { + src = newname; + } + + /** + * Returns the size of a file as it was when this stream was opened + */ + long getInitialLen() { + return initialFileSize; + } + } + + void reportChecksumFailure(String file, Block blk, DatanodeInfo dn) { + DatanodeInfo [] dnArr = { dn }; + LocatedBlock [] lblocks = { new LocatedBlock(blk, dnArr) }; + reportChecksumFailure(file, lblocks); + } + + // just reports checksum failure and ignores any exception during the report. + void reportChecksumFailure(String file, LocatedBlock lblocks[]) { + try { + reportBadBlocks(lblocks); + } catch (IOException ie) { + LOG.info("Found corruption while reading " + file + + ". Error repairing corrupt blocks. Bad blocks remain. " + + StringUtils.stringifyException(ie)); + } + } + + /** {@inheritDoc} */ + public String toString() { + return getClass().getSimpleName() + "[clientName=" + clientName + + ", ugi=" + ugi + "]"; + } +} diff --git a/src/hdfs/org/apache/hadoop/hdfs/DFSUtil.java b/src/hdfs/org/apache/hadoop/hdfs/DFSUtil.java new file mode 100644 index 0000000..88cff6a --- /dev/null +++ b/src/hdfs/org/apache/hadoop/hdfs/DFSUtil.java @@ -0,0 +1,228 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdfs; + +import java.io.UnsupportedEncodingException; +import java.io.IOException; +import java.util.StringTokenizer; +import java.util.Set; +import java.util.HashSet; + +import org.apache.hadoop.fs.BlockLocation; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.CorruptFileBlocks; +import org.apache.hadoop.hdfs.protocol.DatanodeInfo; +import org.apache.hadoop.hdfs.protocol.LocatedBlock; +import org.apache.hadoop.hdfs.protocol.LocatedBlocks; +import org.apache.hadoop.net.NodeBase; +import org.apache.hadoop.security.AccessControlException; + +public class DFSUtil { + /** + * Whether the pathname is valid. Currently prohibits relative paths, + * and names which contain a ":" or "/" + */ + public static boolean isValidName(String src) { + + // Path must be absolute. + if (!src.startsWith(Path.SEPARATOR)) { + return false; + } + + // Check for ".." "." ":" "/" + StringTokenizer tokens = new StringTokenizer(src, Path.SEPARATOR); + while(tokens.hasMoreTokens()) { + String element = tokens.nextToken(); + if (element.equals("..") || + element.equals(".") || + (element.indexOf(":") >= 0) || + (element.indexOf("/") >= 0)) { + return false; + } + } + return true; + } + + /** + * Given a list of path components returns a path as a UTF8 String + */ + public static String byteArray2String(byte[][] pathComponents) { + if (pathComponents.length == 0) + return ""; + if (pathComponents.length == 1 && pathComponents[0].length == 0) { + return Path.SEPARATOR; + } + try { + StringBuilder result = new StringBuilder(); + for (int i = 0; i < pathComponents.length; i++) { + result.append(new String(pathComponents[i], "UTF-8")); + if (i < pathComponents.length - 1) { + result.append(Path.SEPARATOR_CHAR); + } + } + return result.toString(); + } catch (UnsupportedEncodingException ex) { + assert false : "UTF8 encoding is not supported "; + } + return null; + } + + /** + * Splits the array of bytes into array of arrays of bytes on byte separator + * + * @param bytes + * the array of bytes to split + * @param separator + * the delimiting byte + */ + public static byte[][] bytes2byteArray(byte[] bytes, byte separator) { + return bytes2byteArray(bytes, bytes.length, separator); + } + + /** + * Converts a byte array to a string using UTF8 encoding. + */ + public static String bytes2String(byte[] bytes) { + try { + return new String(bytes, "UTF8"); + } catch(UnsupportedEncodingException e) { + assert false : "UTF8 encoding is not supported "; + } + return null; + } + + /** + * Converts a string to a byte array using UTF8 encoding. + */ + public static byte[] string2Bytes(String str) { + try { + return str.getBytes("UTF8"); + } catch(UnsupportedEncodingException e) { + assert false : "UTF8 encoding is not supported "; + } + return null; + } + + /** + * Splits first len bytes in bytes to array of arrays of bytes on byte + * separator + * + * @param bytes + * the byte array to split + * @param len + * the number of bytes to split + * @param separator + * the delimiting byte + */ + public static byte[][] bytes2byteArray(byte[] bytes, int len, byte separator) { + assert len <= bytes.length; + int splits = 0; + if (len == 0) { + return new byte[][] { null }; + } + // Count the splits. Omit multiple separators and the last one + for (int i = 0; i < len; i++) { + if (bytes[i] == separator) { + splits++; + } + } + int last = len - 1; + while (last > -1 && bytes[last--] == separator) { + splits--; + } + if (splits == 0 && bytes[0] == separator) { + return new byte[][] { null }; + } + splits++; + byte[][] result = new byte[splits][]; + int startIndex = 0; + int nextIndex = 0; + int index = 0; + // Build the splits + while (index < splits) { + while (nextIndex < len && bytes[nextIndex] != separator) { + nextIndex++; + } + result[index] = new byte[nextIndex - startIndex]; + System.arraycopy(bytes, startIndex, result[index], 0, nextIndex + - startIndex); + index++; + startIndex = nextIndex + 1; + nextIndex = startIndex; + } + return result; + } + + /** + * Convert a LocatedBlocks to BlockLocations[] + * @param blocks a LocatedBlocks + * @return an array of BlockLocations + */ + public static BlockLocation[] locatedBlocks2Locations(LocatedBlocks blocks) { + if (blocks == null) { + return new BlockLocation[0]; + } + int nrBlocks = blocks.locatedBlockCount(); + BlockLocation[] blkLocations = new BlockLocation[nrBlocks]; + int idx = 0; + for (LocatedBlock blk : blocks.getLocatedBlocks()) { + assert idx < nrBlocks : "Incorrect index"; + DatanodeInfo[] locations = blk.getLocations(); + String[] hosts = new String[locations.length]; + String[] names = new String[locations.length]; + String[] racks = new String[locations.length]; + for (int hCnt = 0; hCnt < locations.length; hCnt++) { + hosts[hCnt] = locations[hCnt].getHostName(); + names[hCnt] = locations[hCnt].getName(); + NodeBase node = new NodeBase(names[hCnt], + locations[hCnt].getNetworkLocation()); + racks[hCnt] = node.toString(); + } + blkLocations[idx] = new BlockLocation(names, hosts, racks, + blk.getStartOffset(), + blk.getBlockSize(), + blk.isCorrupt()); + idx++; + } + return blkLocations; + } + + /** + * maked successive calls to listCorruptFiles to obtain all + * corrupt files + */ + public static String[] getCorruptFiles(DistributedFileSystem dfs) + throws IOException { + Set corruptFiles = new HashSet(); + + String cookie = null; + for (CorruptFileBlocks fbs = dfs.listCorruptFileBlocks("/", cookie); + fbs.getFiles().length > 0; + fbs = dfs.listCorruptFileBlocks("/", cookie)) { + for (String path : fbs.getFiles()) { + corruptFiles.add(path); + } + cookie = fbs.getCookie(); + } + + return corruptFiles.toArray(new String[corruptFiles.size()]); + } + +} + diff --git a/src/hdfs/org/apache/hadoop/hdfs/DistributedFileSystem.java b/src/hdfs/org/apache/hadoop/hdfs/DistributedFileSystem.java new file mode 100644 index 0000000..7fb75ee --- /dev/null +++ b/src/hdfs/org/apache/hadoop/hdfs/DistributedFileSystem.java @@ -0,0 +1,542 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdfs; + +import java.io.*; +import java.net.*; + +import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.fs.*; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdfs.protocol.DatanodeInfo; +import org.apache.hadoop.hdfs.protocol.FSConstants; +import org.apache.hadoop.hdfs.protocol.Block; +import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; +import org.apache.hadoop.hdfs.protocol.LocatedBlock; +import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType; +import org.apache.hadoop.hdfs.protocol.FSConstants.UpgradeAction; +import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport; +import org.apache.hadoop.hdfs.server.namenode.NameNode; +import org.apache.hadoop.hdfs.DFSClient.DFSOutputStream; +import org.apache.hadoop.security.AccessControlException; +import org.apache.hadoop.util.Progressable; + + +/**************************************************************** + * Implementation of the abstract FileSystem for the DFS system. + * This object is the way end-user code interacts with a Hadoop + * DistributedFileSystem. + * + *****************************************************************/ +public class DistributedFileSystem extends FileSystem { + private Path workingDir; + private URI uri; + + DFSClient dfs; + private boolean verifyChecksum = true; + + static{ + Configuration.addDefaultResource("hdfs-default.xml"); + Configuration.addDefaultResource("hdfs-site.xml"); + } + + public DistributedFileSystem() { + } + + /** @deprecated */ + public DistributedFileSystem(InetSocketAddress namenode, + Configuration conf) throws IOException { + initialize(NameNode.getUri(namenode), conf); + } + + /** @deprecated */ + public String getName() { return uri.getAuthority(); } + + public URI getUri() { return uri; } + + public void initialize(URI uri, Configuration conf) throws IOException { + super.initialize(uri, conf); + setConf(conf); + + String host = uri.getHost(); + if (host == null) { + throw new IOException("Incomplete HDFS URI, no host: "+ uri); + } + + InetSocketAddress namenode = NameNode.getAddress(uri.getAuthority()); + this.dfs = new DFSClient(namenode, conf, statistics); + this.uri = NameNode.getUri(namenode); + this.workingDir = getHomeDirectory(); + } + + /** Permit paths which explicitly specify the default port. */ + protected void checkPath(Path path) { + URI thisUri = this.getUri(); + URI thatUri = path.toUri(); + String thatAuthority = thatUri.getAuthority(); + if (thatUri.getScheme() != null + && thatUri.getScheme().equalsIgnoreCase(thisUri.getScheme()) + && thatUri.getPort() == NameNode.DEFAULT_PORT + && thisUri.getPort() == -1 + && thatAuthority.substring(0,thatAuthority.indexOf(":")) + .equalsIgnoreCase(thisUri.getAuthority())) + return; + super.checkPath(path); + } + + /** Normalize paths that explicitly specify the default port. */ + public Path makeQualified(Path path) { + URI thisUri = this.getUri(); + URI thatUri = path.toUri(); + String thatAuthority = thatUri.getAuthority(); + if (thatUri.getScheme() != null + && thatUri.getScheme().equalsIgnoreCase(thisUri.getScheme()) + && thatUri.getPort() == NameNode.DEFAULT_PORT + && thisUri.getPort() == -1 + && thatAuthority.substring(0,thatAuthority.indexOf(":")) + .equalsIgnoreCase(thisUri.getAuthority())) { + path = new Path(thisUri.getScheme(), thisUri.getAuthority(), + thatUri.getPath()); + } + return super.makeQualified(path); + } + + public Path getWorkingDirectory() { + return workingDir; + } + + public long getDefaultBlockSize() { + return dfs.getDefaultBlockSize(); + } + + public short getDefaultReplication() { + return dfs.getDefaultReplication(); + } + + private Path makeAbsolute(Path f) { + if (f.isAbsolute()) { + return f; + } else { + return new Path(workingDir, f); + } + } + + public void setWorkingDirectory(Path dir) { + String result = makeAbsolute(dir).toUri().getPath(); + if (!DFSUtil.isValidName(result)) { + throw new IllegalArgumentException("Invalid DFS directory name " + + result); + } + workingDir = makeAbsolute(dir); + } + + /** {@inheritDoc} */ + public Path getHomeDirectory() { + return new Path("/user/" + dfs.ugi.getUserName()).makeQualified(this); + } + + private String getPathName(Path file) { + checkPath(file); + String result = makeAbsolute(file).toUri().getPath(); + if (!DFSUtil.isValidName(result)) { + throw new IllegalArgumentException("Pathname " + result + " from " + + file+" is not a valid DFS filename."); + } + return result; + } + + + public BlockLocation[] getFileBlockLocations(FileStatus file, long start, + long len) throws IOException { + if (file == null) { + return null; + } + return dfs.getBlockLocations(getPathName(file.getPath()), start, len); + } + + public void setVerifyChecksum(boolean verifyChecksum) { + this.verifyChecksum = verifyChecksum; + } + + public FSDataInputStream open(Path f, int bufferSize) throws IOException { + return new DFSClient.DFSDataInputStream( + dfs.open(getPathName(f), bufferSize, verifyChecksum, statistics)); + } + + /** This optional operation is not yet supported. */ + public FSDataOutputStream append(Path f, int bufferSize, + Progressable progress) throws IOException { + + DFSOutputStream op = (DFSOutputStream)dfs.append(getPathName(f), bufferSize, progress); + return new FSDataOutputStream(op, statistics, op.getInitialLen()); + } + + public FSDataOutputStream create(Path f, FsPermission permission, + boolean overwrite, + int bufferSize, short replication, long blockSize, + Progressable progress) throws IOException { + + return new FSDataOutputStream + (dfs.create(getPathName(f), permission, + overwrite, replication, blockSize, progress, bufferSize), + statistics); + } + + public boolean setReplication(Path src, + short replication + ) throws IOException { + return dfs.setReplication(getPathName(src), replication); + } + + /** + * THIS IS DFS only operations, it is not part of FileSystem + * move blocks from srcs to trg + * and delete srcs afterwards + * all blocks should be the same size + * @param trg existing file to append to + * @param psrcs list of files (same block size, same replication) + * @throws IOException + */ + public void concat(Path trg, Path [] psrcs) throws IOException { + String [] srcs = new String [psrcs.length]; + for(int i=0; i listLocatedStatus(final Path p, + final PathFilter filter) + throws IOException { + return new RemoteIterator() { + private RemoteIterator itor = + dfs.listPathWithLocation(getPathName(p)); + private LocatedFileStatus curStat = null; + + + @Override + public boolean hasNext() throws IOException { + while (curStat == null && itor.hasNext()) { + LocatedFileStatus next =itor.next(); + next.makeQualified(DistributedFileSystem.this); + if (filter.accept(next.getPath())) { + curStat = next; + } + } + return curStat != null; + } + + @Override + public LocatedFileStatus next() throws IOException { + if (!hasNext()) { + throw new java.util.NoSuchElementException("No more entry in " + p); + } + LocatedFileStatus tmp = curStat; + curStat = null; + return tmp; + } + }; + } + + + public boolean mkdirs(Path f, FsPermission permission) throws IOException { + return dfs.mkdirs(getPathName(f), permission); + } + + /** {@inheritDoc} */ + public void close() throws IOException { + try { + super.processDeleteOnExit(); + dfs.close(); + } finally { + super.close(); + } + } + + public String toString() { + return "DFS[" + dfs + "]"; + } + + public DFSClient getClient() { + return dfs; + } + + public static class DiskStatus { + private long capacity; + private long dfsUsed; + private long remaining; + public DiskStatus(long capacity, long dfsUsed, long remaining) { + this.capacity = capacity; + this.dfsUsed = dfsUsed; + this.remaining = remaining; + } + + public long getCapacity() { + return capacity; + } + public long getDfsUsed() { + return dfsUsed; + } + public long getRemaining() { + return remaining; + } + } + + + /** Return the disk usage of the filesystem, including total capacity, + * used space, and remaining space */ + public DiskStatus getDiskStatus() throws IOException { + return dfs.getDiskStatus(); + } + + /** Return the total raw capacity of the filesystem, disregarding + * replication .*/ + public long getRawCapacity() throws IOException{ + return dfs.totalRawCapacity(); + } + + /** Return the total raw used space in the filesystem, disregarding + * replication .*/ + public long getRawUsed() throws IOException{ + return dfs.totalRawUsed(); + } + + /** + * Returns count of blocks with no good replicas left. Normally should be + * zero. + * + * @throws IOException + */ + public long getMissingBlocksCount() throws IOException { + return dfs.getMissingBlocksCount(); + } + + /** + * Returns count of blocks with one of more replica missing. + * + * @throws IOException + */ + public long getUnderReplicatedBlocksCount() throws IOException { + return dfs.getUnderReplicatedBlocksCount(); + } + + /** + * Returns count of blocks with at least one replica marked corrupt. + * + * @throws IOException + */ + public long getCorruptBlocksCount() throws IOException { + return dfs.getCorruptBlocksCount(); + } + + /** + * {@inheritDoc} + */ + @Override + public CorruptFileBlocks listCorruptFileBlocks(String path, + String cookie) + throws IOException { + return dfs.listCorruptFileBlocks(path, cookie); + } + + /** Return statistics for each datanode. */ + public DatanodeInfo[] getDataNodeStats() throws IOException { + return dfs.datanodeReport(DatanodeReportType.ALL); + } + + /** + * Enter, leave or get safe mode. + * + * @see org.apache.hadoop.hdfs.protocol.ClientProtocol#setSafeMode( + * FSConstants.SafeModeAction) + */ + public boolean setSafeMode(FSConstants.SafeModeAction action) + throws IOException { + return dfs.setSafeMode(action); + } + + /** + * Save namespace image. + * + * @see org.apache.hadoop.hdfs.protocol.ClientProtocol#saveNamespace() + */ + public void saveNamespace() throws AccessControlException, IOException { + dfs.saveNamespace(); + } + + /** + * Refreshes the list of hosts and excluded hosts from the configured + * files. + */ + public void refreshNodes() throws IOException { + dfs.refreshNodes(); + } + + /** + * Finalize previously upgraded files system state. + * @throws IOException + */ + public void finalizeUpgrade() throws IOException { + dfs.finalizeUpgrade(); + } + + public UpgradeStatusReport distributedUpgradeProgress(UpgradeAction action + ) throws IOException { + return dfs.distributedUpgradeProgress(action); + } + + /* + * Requests the namenode to dump data strcutures into specified + * file. + */ + public void metaSave(String pathname) throws IOException { + dfs.metaSave(pathname); + } + + /** + * We need to find the blocks that didn't match. Likely only one + * is corrupt but we will report both to the namenode. In the future, + * we can consider figuring out exactly which block is corrupt. + */ + public boolean reportChecksumFailure(Path f, + FSDataInputStream in, long inPos, + FSDataInputStream sums, long sumsPos) { + + LocatedBlock lblocks[] = new LocatedBlock[2]; + + // Find block in data stream. + DFSClient.DFSDataInputStream dfsIn = (DFSClient.DFSDataInputStream) in; + Block dataBlock = dfsIn.getCurrentBlock(); + if (dataBlock == null) { + LOG.error("Error: Current block in data stream is null! "); + return false; + } + DatanodeInfo[] dataNode = {dfsIn.getCurrentDatanode()}; + lblocks[0] = new LocatedBlock(dataBlock, dataNode); + LOG.info("Found checksum error in data stream at block=" + + dataBlock + " on datanode=" + + dataNode[0].getName()); + + // Find block in checksum stream + DFSClient.DFSDataInputStream dfsSums = (DFSClient.DFSDataInputStream) sums; + Block sumsBlock = dfsSums.getCurrentBlock(); + if (sumsBlock == null) { + LOG.error("Error: Current block in checksum stream is null! "); + return false; + } + DatanodeInfo[] sumsNode = {dfsSums.getCurrentDatanode()}; + lblocks[1] = new LocatedBlock(sumsBlock, sumsNode); + LOG.info("Found checksum error in checksum stream at block=" + + sumsBlock + " on datanode=" + + sumsNode[0].getName()); + + // Ask client to delete blocks. + dfs.reportChecksumFailure(f.toString(), lblocks); + + return true; + } + + /** + * Returns the stat information about the file. + * @throws FileNotFoundException if the file does not exist. + */ + public FileStatus getFileStatus(Path f) throws IOException { + FileStatus fi = dfs.getFileInfo(getPathName(f)); + if (fi != null) { + fi.makeQualified(this); + return fi; + } else { + throw new FileNotFoundException("File does not exist: " + f); + } + } + + /** {@inheritDoc} */ + public MD5MD5CRC32FileChecksum getFileChecksum(Path f) throws IOException { + return dfs.getFileChecksum(getPathName(f)); + } + + /** {@inheritDoc }*/ + public void setPermission(Path p, FsPermission permission + ) throws IOException { + dfs.setPermission(getPathName(p), permission); + } + + /** {@inheritDoc }*/ + public void setOwner(Path p, String username, String groupname + ) throws IOException { + if (username == null && groupname == null) { + throw new IOException("username == null && groupname == null"); + } + dfs.setOwner(getPathName(p), username, groupname); + } + + /** {@inheritDoc }*/ + public void setTimes(Path p, long mtime, long atime + ) throws IOException { + dfs.setTimes(getPathName(p), mtime, atime); + } + + +} diff --git a/src/hdfs/org/apache/hadoop/hdfs/HDFSPolicyProvider.java b/src/hdfs/org/apache/hadoop/hdfs/HDFSPolicyProvider.java new file mode 100644 index 0000000..b43f669 --- /dev/null +++ b/src/hdfs/org/apache/hadoop/hdfs/HDFSPolicyProvider.java @@ -0,0 +1,50 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs; + +import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol; +import org.apache.hadoop.hdfs.protocol.ClientProtocol; +import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol; +import org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol; +import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol; +import org.apache.hadoop.security.authorize.PolicyProvider; +import org.apache.hadoop.security.authorize.RefreshAuthorizationPolicyProtocol; +import org.apache.hadoop.security.authorize.Service; + +/** + * {@link PolicyProvider} for HDFS protocols. + */ +public class HDFSPolicyProvider extends PolicyProvider { + private static final Service[] hdfsServices = + new Service[] { + new Service("security.client.protocol.acl", ClientProtocol.class), + new Service("security.client.datanode.protocol.acl", + ClientDatanodeProtocol.class), + new Service("security.datanode.protocol.acl", DatanodeProtocol.class), + new Service("security.inter.datanode.protocol.acl", + InterDatanodeProtocol.class), + new Service("security.namenode.protocol.acl", NamenodeProtocol.class), + new Service("security.refresh.policy.protocol.acl", + RefreshAuthorizationPolicyProtocol.class), + }; + + @Override + public Service[] getServices() { + return hdfsServices; + } +} diff --git a/src/hdfs/org/apache/hadoop/hdfs/HftpFileSystem.java b/src/hdfs/org/apache/hadoop/hdfs/HftpFileSystem.java new file mode 100644 index 0000000..ede344e --- /dev/null +++ b/src/hdfs/org/apache/hadoop/hdfs/HftpFileSystem.java @@ -0,0 +1,437 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdfs; + +import java.io.EOFException; +import java.io.FileNotFoundException; +import java.io.IOException; +import java.io.InputStream; +import java.net.HttpURLConnection; +import java.net.InetSocketAddress; +import java.net.URI; +import java.net.URISyntaxException; +import java.net.URL; +import java.text.ParseException; +import java.text.SimpleDateFormat; +import java.util.ArrayList; +import java.util.TimeZone; + +import javax.security.auth.login.LoginException; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FSDataInputStream; +import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.FSInputStream; +import org.apache.hadoop.fs.FileChecksum; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.MD5MD5CRC32FileChecksum; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.ipc.RemoteException; +import org.apache.hadoop.net.NetUtils; +import org.apache.hadoop.security.UnixUserGroupInformation; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.util.Progressable; +import org.apache.hadoop.util.StringUtils; +import org.xml.sax.Attributes; +import org.xml.sax.InputSource; +import org.xml.sax.SAXException; +import org.xml.sax.XMLReader; +import org.xml.sax.helpers.DefaultHandler; +import org.xml.sax.helpers.XMLReaderFactory; + +/** An implementation of a protocol for accessing filesystems over HTTP. + * The following implementation provides a limited, read-only interface + * to a filesystem over HTTP. + * @see org.apache.hadoop.hdfs.server.namenode.ListPathsServlet + * @see org.apache.hadoop.hdfs.server.namenode.FileDataServlet + */ +public class HftpFileSystem extends FileSystem { + static { + HttpURLConnection.setFollowRedirects(true); + } + + protected InetSocketAddress nnAddr; + protected UserGroupInformation ugi; + + public static final String HFTP_TIMEZONE = "UTC"; + public static final String HFTP_DATE_FORMAT = "yyyy-MM-dd'T'HH:mm:ssZ"; + public static final String CONTENT_LENGTH_FIELD = "Content-Length"; + public static final String STRICT_CONTENT_LENGTH = "dfs.hftp.strictContentLength"; + + private boolean doStrictContentLengthCheck = false; + + public static final SimpleDateFormat getDateFormat() { + final SimpleDateFormat df = new SimpleDateFormat(HFTP_DATE_FORMAT); + df.setTimeZone(TimeZone.getTimeZone(HFTP_TIMEZONE)); + return df; + } + + protected static final ThreadLocal df = + new ThreadLocal() { + protected SimpleDateFormat initialValue() { + return getDateFormat(); + } + }; + + @Override + public void initialize(URI name, Configuration conf) throws IOException { + super.initialize(name, conf); + setConf(conf); + try { + this.ugi = UnixUserGroupInformation.login(conf, true); + } catch (LoginException le) { + throw new IOException(StringUtils.stringifyException(le)); + } + + nnAddr = NetUtils.createSocketAddr(name.toString()); + doStrictContentLengthCheck = conf.getBoolean(STRICT_CONTENT_LENGTH, false); + } + + @Override + public URI getUri() { + try { + return new URI("hftp", null, nnAddr.getHostName(), nnAddr.getPort(), + null, null, null); + } catch (URISyntaxException e) { + return null; + } + } + + /** + * Open an HTTP connection to the namenode to read file data and metadata. + * @param path The path component of the URL + * @param query The query component of the URL + */ + protected HttpURLConnection openConnection(String path, String query) + throws IOException { + try { + final URL url = new URI("http", null, nnAddr.getHostName(), + nnAddr.getPort(), path, query, null).toURL(); + if (LOG.isTraceEnabled()) { + LOG.trace("url=" + url); + } + return (HttpURLConnection)url.openConnection(); + } catch (URISyntaxException e) { + throw (IOException)new IOException().initCause(e); + } + } + + @Override + public FSDataInputStream open(Path f, int buffersize) throws IOException { + HftpInputStream hftpIn = new HftpInputStream(f); + + return new FSDataInputStream(hftpIn) { + @Override + public boolean isUnderConstruction() { + return ((HftpInputStream)in).isUnderConstruction(); + } + }; + } + + private class HftpInputStream extends FSInputStream { + private final Path file; + + private InputStream in; + private long pos; + private boolean isUnderConstruction; + private long totalBytesRead = 0; + private long contentLength; + + public HftpInputStream(Path file) throws IOException { + this.file = file; + openStream("/data" + file.toUri().getPath(), "ugi=" + ugi); + pos = 0; + } + + public boolean isUnderConstruction() { + return isUnderConstruction; + } + + @Override + public int read() throws IOException { + int ret = in.read(); + if (ret >= 0) { + ++pos; + totalBytesRead++; + } else if (totalBytesRead < contentLength) { + throw new EOFException( + String.format( + "premature end of stream: expected total %d bytes and got only %d", + pos, contentLength + )); + + } + return ret; + } + public int read(byte[] b, int off, int len) throws IOException { + int bytesRead = in.read(b, off, len); + if (bytesRead > 0) { + pos += bytesRead; + totalBytesRead += bytesRead; + } else if (bytesRead < 0) { + if (totalBytesRead < contentLength) { + throw new EOFException( + String.format( + "premature end of stream: expected total %d bytes and got only %d", + pos, contentLength + )); + } + } + return bytesRead; + } + + public void close() throws IOException { + in.close(); + } + + @Override + public void seek(long pos) throws IOException { + in.close(); + openStream("/data" + file.toUri().getPath(), + "seek=" + pos + "&ugi=" + ugi); + this.pos = pos; + } + + @Override + public long getPos() throws IOException { + return pos; + } + + @Override + public boolean seekToNewSource(long targetPos) throws IOException { + return false; + } + + private void openStream(String path, String query) throws IOException { + HttpURLConnection connection = openConnection(path, query); + connection.setRequestMethod("GET"); + connection.connect(); + + String contentLengthStr = + connection.getHeaderField(HftpFileSystem.CONTENT_LENGTH_FIELD); + + totalBytesRead = 0; + + if (contentLengthStr != null) { + contentLength = Long.valueOf( + contentLengthStr + ); + } else { + if (HftpFileSystem.this.doStrictContentLengthCheck) { + throw new IOException("missing require Content-Length header"); + } else { + contentLength = -1; //unknown + } + } + + isUnderConstruction = + "true".equals(connection.getHeaderField("isUnderConstruction")); + in = connection.getInputStream(); + } + } + + /** Class to parse and store a listing reply from the server. */ + class LsParser extends DefaultHandler { + + ArrayList fslist = new ArrayList(); + + public void startElement(String ns, String localname, String qname, + Attributes attrs) throws SAXException { + if ("listing".equals(qname)) return; + if (!"file".equals(qname) && !"directory".equals(qname)) { + if (RemoteException.class.getSimpleName().equals(qname)) { + throw new SAXException(RemoteException.valueOf(attrs)); + } + throw new SAXException("Unrecognized entry: " + qname); + } + long modif; + long atime = 0; + try { + final SimpleDateFormat ldf = df.get(); + modif = ldf.parse(attrs.getValue("modified")).getTime(); + String astr = attrs.getValue("accesstime"); + if (astr != null) { + atime = ldf.parse(astr).getTime(); + } + } catch (ParseException e) { throw new SAXException(e); } + FileStatus fs = "file".equals(qname) + ? new FileStatus( + Long.valueOf(attrs.getValue("size")).longValue(), false, + Short.valueOf(attrs.getValue("replication")).shortValue(), + Long.valueOf(attrs.getValue("blocksize")).longValue(), + modif, atime, FsPermission.valueOf(attrs.getValue("permission")), + attrs.getValue("owner"), attrs.getValue("group"), + new Path(getUri().toString(), attrs.getValue("path")) + .makeQualified(HftpFileSystem.this)) + : new FileStatus(0L, true, 0, 0L, + modif, atime, FsPermission.valueOf(attrs.getValue("permission")), + attrs.getValue("owner"), attrs.getValue("group"), + new Path(getUri().toString(), attrs.getValue("path")) + .makeQualified(HftpFileSystem.this)); + fslist.add(fs); + } + + private void fetchList(String path, boolean recur) throws IOException { + try { + XMLReader xr = XMLReaderFactory.createXMLReader(); + xr.setContentHandler(this); + HttpURLConnection connection = openConnection("/listPaths" + path, + "ugi=" + ugi + (recur? "&recursive=yes" : "")); + connection.setRequestMethod("GET"); + connection.connect(); + + InputStream resp = connection.getInputStream(); + xr.parse(new InputSource(resp)); + } catch(SAXException e) { + final Exception embedded = e.getException(); + if (embedded != null && embedded instanceof IOException) { + throw (IOException)embedded; + } + throw new IOException("invalid xml directory content", e); + } + } + + public FileStatus getFileStatus(Path f) throws IOException { + fetchList(f.toUri().getPath(), false); + if (fslist.size() == 0) { + throw new FileNotFoundException("File does not exist: " + f); + } + return fslist.get(0); + } + + public FileStatus[] listStatus(Path f, boolean recur) throws IOException { + fetchList(f.toUri().getPath(), recur); + if (fslist.size() > 0 && (fslist.size() != 1 || fslist.get(0).isDir())) { + fslist.remove(0); + } + return fslist.toArray(new FileStatus[0]); + } + + public FileStatus[] listStatus(Path f) throws IOException { + return listStatus(f, false); + } + } + + @Override + public FileStatus[] listStatus(Path f) throws IOException { + LsParser lsparser = new LsParser(); + return lsparser.listStatus(f); + } + + @Override + public FileStatus getFileStatus(Path f) throws IOException { + LsParser lsparser = new LsParser(); + return lsparser.getFileStatus(f); + } + + private class ChecksumParser extends DefaultHandler { + private FileChecksum filechecksum; + + /** {@inheritDoc} */ + public void startElement(String ns, String localname, String qname, + Attributes attrs) throws SAXException { + if (!MD5MD5CRC32FileChecksum.class.getName().equals(qname)) { + if (RemoteException.class.getSimpleName().equals(qname)) { + throw new SAXException(RemoteException.valueOf(attrs)); + } + throw new SAXException("Unrecognized entry: " + qname); + } + + filechecksum = MD5MD5CRC32FileChecksum.valueOf(attrs); + } + + private FileChecksum getFileChecksum(String f) throws IOException { + final HttpURLConnection connection = openConnection( + "/fileChecksum" + f, "ugi=" + ugi); + try { + final XMLReader xr = XMLReaderFactory.createXMLReader(); + xr.setContentHandler(this); + + connection.setRequestMethod("GET"); + connection.connect(); + + xr.parse(new InputSource(connection.getInputStream())); + } catch(SAXException e) { + final Exception embedded = e.getException(); + if (embedded != null && embedded instanceof IOException) { + throw (IOException)embedded; + } + throw new IOException("invalid xml directory content", e); + } finally { + connection.disconnect(); + } + return filechecksum; + } + } + + /** {@inheritDoc} */ + public FileChecksum getFileChecksum(Path f) throws IOException { + final String s = makeQualified(f).toUri().getPath(); + return new ChecksumParser().getFileChecksum(s); + } + + @Override + public Path getWorkingDirectory() { + return new Path("/").makeQualified(this); + } + + @Override + public void setWorkingDirectory(Path f) { } + + /** This optional operation is not yet supported. */ + public FSDataOutputStream append(Path f, int bufferSize, + Progressable progress) throws IOException { + throw new IOException("Not supported"); + } + + @Override + public FSDataOutputStream create(Path f, FsPermission permission, + boolean overwrite, int bufferSize, + short replication, long blockSize, + Progressable progress) throws IOException { + throw new IOException("Not supported"); + } + + @Override + public boolean rename(Path src, Path dst) throws IOException { + throw new IOException("Not supported"); + } + + @Override + /* + * @deprecated Use delete(path, boolean) + */ + @Deprecated + public boolean delete(Path f) throws IOException { + throw new IOException("Not supported"); + } + + @Override + public boolean delete(Path f, boolean recursive) throws IOException { + throw new IOException("Not supported"); + } + + @Override + public boolean mkdirs(Path f, FsPermission permission) throws IOException { + throw new IOException("Not supported"); + } + +} diff --git a/src/hdfs/org/apache/hadoop/hdfs/HighTideShell.java b/src/hdfs/org/apache/hadoop/hdfs/HighTideShell.java new file mode 100644 index 0000000..2c6fb77 --- /dev/null +++ b/src/hdfs/org/apache/hadoop/hdfs/HighTideShell.java @@ -0,0 +1,270 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdfs; + +import java.io.IOException; +import java.util.Collection; +import java.util.Map; +import java.util.HashMap; +import java.util.concurrent.TimeUnit; +import java.net.InetSocketAddress; +import javax.security.auth.login.LoginException; + +import org.apache.hadoop.ipc.*; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.net.NetUtils; +import org.apache.hadoop.conf.Configured; +import org.apache.hadoop.util.Tool; +import org.apache.hadoop.util.ToolRunner; +import org.apache.hadoop.io.retry.RetryPolicy; +import org.apache.hadoop.io.retry.RetryPolicies; +import org.apache.hadoop.io.retry.RetryProxy; +import org.apache.hadoop.security.UnixUserGroupInformation; +import org.apache.hadoop.fs.Path; + +import org.apache.hadoop.hdfs.protocol.PolicyInfo; +import org.apache.hadoop.hdfs.protocol.HighTideProtocol; +import org.apache.hadoop.hdfs.server.hightidenode.HighTideNode; + +/** + * A {@link HighTideShell} that allows browsing configured HighTide policies. + */ +public class HighTideShell extends Configured implements Tool { + static { + Configuration.addDefaultResource("hdfs-default.xml"); + Configuration.addDefaultResource("hdfs-site.xml"); + } + public static final Log LOG = LogFactory.getLog( "org.apache.hadoop.HighTideShell"); + public HighTideProtocol hightidenode; + HighTideProtocol rpcHighTidenode; + private UnixUserGroupInformation ugi; + volatile boolean clientRunning = true; + private Configuration conf; + + /** + * Start HighTideShell. + *

+ * The HighTideShell connects to the specified HighTideNode and performs basic + * configuration options. + * @throws IOException + */ + public HighTideShell(Configuration conf) throws IOException { + super(conf); + this.conf = conf; + } + + private void initializeRpc(Configuration conf, InetSocketAddress address) throws IOException { + try { + this.ugi = UnixUserGroupInformation.login(conf, true); + } catch (LoginException e) { + throw (IOException)(new IOException().initCause(e)); + } + + this.rpcHighTidenode = createRPCHighTidenode(address, conf, ugi); + this.hightidenode = createHighTidenode(rpcHighTidenode); + } + + private void initializeLocal(Configuration conf) throws IOException { + try { + this.ugi = UnixUserGroupInformation.login(conf, true); + } catch (LoginException e) { + throw (IOException)(new IOException().initCause(e)); + } + } + + public static HighTideProtocol createHighTidenode(Configuration conf) throws IOException { + return createHighTidenode(HighTideNode.getAddress(conf), conf); + } + + public static HighTideProtocol createHighTidenode(InetSocketAddress htNodeAddr, + Configuration conf) throws IOException { + try { + return createHighTidenode(createRPCHighTidenode(htNodeAddr, conf, + UnixUserGroupInformation.login(conf, true))); + } catch (LoginException e) { + throw (IOException)(new IOException().initCause(e)); + } + } + + private static HighTideProtocol createRPCHighTidenode(InetSocketAddress htNodeAddr, + Configuration conf, UnixUserGroupInformation ugi) + throws IOException { + LOG.info("HighTideShell connecting to " + htNodeAddr); + return (HighTideProtocol)RPC.getProxy(HighTideProtocol.class, + HighTideProtocol.versionID, htNodeAddr, ugi, conf, + NetUtils.getSocketFactory(conf, HighTideProtocol.class)); + } + + private static HighTideProtocol createHighTidenode(HighTideProtocol rpcHighTidenode) + throws IOException { + RetryPolicy createPolicy = RetryPolicies.retryUpToMaximumCountWithFixedSleep( + 5, 5000, TimeUnit.MILLISECONDS); + + Map,RetryPolicy> remoteExceptionToPolicyMap = + new HashMap, RetryPolicy>(); + + Map,RetryPolicy> exceptionToPolicyMap = + new HashMap, RetryPolicy>(); + exceptionToPolicyMap.put(RemoteException.class, + RetryPolicies.retryByRemoteException( + RetryPolicies.TRY_ONCE_THEN_FAIL, remoteExceptionToPolicyMap)); + RetryPolicy methodPolicy = RetryPolicies.retryByException( + RetryPolicies.TRY_ONCE_THEN_FAIL, exceptionToPolicyMap); + Map methodNameToPolicyMap = new HashMap(); + + methodNameToPolicyMap.put("create", methodPolicy); + + return (HighTideProtocol) RetryProxy.create(HighTideProtocol.class, + rpcHighTidenode, methodNameToPolicyMap); + } + + private void checkOpen() throws IOException { + if (!clientRunning) { + IOException result = new IOException("HighTideNode closed"); + throw result; + } + } + + /** + * Close the connection to the HighTideNode. + */ + public synchronized void close() throws IOException { + if(clientRunning) { + clientRunning = false; + RPC.stopProxy(rpcHighTidenode); + } + } + + /** + * Displays format of commands. + */ + private static void printUsage(String cmd) { + String prefix = "Usage: java " + HighTideShell.class.getSimpleName(); + if ("-showConfig".equals(cmd)) { + System.err.println("Usage: java org.apache.hadoop.hdfs.HighTideShell"); + } else { + System.err.println("Usage: java HighTideShell"); + System.err.println(" [-showConfig ]"); + System.err.println(" [-help [cmd]]"); + System.err.println(); + ToolRunner.printGenericCommandUsage(System.err); + } + } + + /** + * run + */ + public int run(String argv[]) throws Exception { + + if (argv.length < 1) { + printUsage(""); + return -1; + } + + int exitCode = -1; + int i = 0; + String cmd = argv[i++]; + // + // verify that we have enough command line parameters + // + if ("-showConfig".equals(cmd)) { + if (argv.length < 1) { + printUsage(cmd); + return exitCode; + } + } + + try { + if ("-showConfig".equals(cmd)) { + initializeRpc(conf, HighTideNode.getAddress(conf)); + exitCode = showConfig(cmd, argv, i); + } else { + exitCode = -1; + System.err.println(cmd.substring(1) + ": Unknown command"); + printUsage(""); + } + } catch (IllegalArgumentException arge) { + exitCode = -1; + System.err.println(cmd.substring(1) + ": " + arge.getLocalizedMessage()); + printUsage(cmd); + } catch (RemoteException e) { + // + // This is a error returned by hightidenode server. Print + // out the first line of the error mesage, ignore the stack trace. + exitCode = -1; + try { + String[] content; + content = e.getLocalizedMessage().split("\n"); + System.err.println(cmd.substring(1) + ": " + + content[0]); + } catch (Exception ex) { + System.err.println(cmd.substring(1) + ": " + + ex.getLocalizedMessage()); + } + } catch (IOException e) { + // + // IO exception encountered locally. + // + exitCode = -1; + System.err.println(cmd.substring(1) + ": " + + e.getLocalizedMessage()); + } catch (Exception re) { + exitCode = -1; + System.err.println(cmd.substring(1) + ": " + re.getLocalizedMessage()); + } finally { + } + return exitCode; + } + + /** + * Apply operation specified by 'cmd' on all parameters + * starting from argv[startindex]. + */ + private int showConfig(String cmd, String argv[], int startindex) throws IOException { + int exitCode = 0; + PolicyInfo[] all = hightidenode.getAllPolicies(); + for (int i = 0; i < all.length; i++) { + System.out.println(all[i]); + } + return exitCode; + } + + /** + * main() has some simple utility methods + */ + public static void main(String argv[]) throws Exception { + HighTideShell shell = null; + try { + shell = new HighTideShell(new Configuration()); + int res = ToolRunner.run(shell, argv); + System.exit(res); + } catch (RPC.VersionMismatch v) { + System.err.println("Version Mismatch between client and server" + + "... command aborted."); + System.exit(-1); + } catch (IOException e) { + System.err.println("Bad connection to HighTideNode. command aborted."); + System.exit(-1); + } finally { + shell.close(); + } + } +} diff --git a/src/hdfs/org/apache/hadoop/hdfs/HsftpFileSystem.java b/src/hdfs/org/apache/hadoop/hdfs/HsftpFileSystem.java new file mode 100644 index 0000000..f70dae0 --- /dev/null +++ b/src/hdfs/org/apache/hadoop/hdfs/HsftpFileSystem.java @@ -0,0 +1,101 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdfs; + +import java.io.IOException; +import java.net.HttpURLConnection; +import java.net.URI; +import java.net.URISyntaxException; +import java.net.URL; +import javax.net.ssl.HostnameVerifier; +import javax.net.ssl.HttpsURLConnection; +import javax.net.ssl.SSLSession; + +import org.apache.hadoop.conf.Configuration; + +/** An implementation of a protocol for accessing filesystems over HTTPS. + * The following implementation provides a limited, read-only interface + * to a filesystem over HTTPS. + * @see org.apache.hadoop.hdfs.server.namenode.ListPathsServlet + * @see org.apache.hadoop.hdfs.server.namenode.FileDataServlet + */ +public class HsftpFileSystem extends HftpFileSystem { + + @Override + public void initialize(URI name, Configuration conf) throws IOException { + super.initialize(name, conf); + setupSsl(conf); + } + + /** Set up SSL resources */ + private static void setupSsl(Configuration conf) { + Configuration sslConf = new Configuration(false); + sslConf.addResource(conf.get("dfs.https.client.keystore.resource", + "ssl-client.xml")); + System.setProperty("javax.net.ssl.trustStore", sslConf.get( + "ssl.client.truststore.location", "")); + System.setProperty("javax.net.ssl.trustStorePassword", sslConf.get( + "ssl.client.truststore.password", "")); + System.setProperty("javax.net.ssl.trustStoreType", sslConf.get( + "ssl.client.truststore.type", "jks")); + System.setProperty("javax.net.ssl.keyStore", sslConf.get( + "ssl.client.keystore.location", "")); + System.setProperty("javax.net.ssl.keyStorePassword", sslConf.get( + "ssl.client.keystore.password", "")); + System.setProperty("javax.net.ssl.keyPassword", sslConf.get( + "ssl.client.keystore.keypassword", "")); + System.setProperty("javax.net.ssl.keyStoreType", sslConf.get( + "ssl.client.keystore.type", "jks")); + } + + @Override + protected HttpURLConnection openConnection(String path, String query) + throws IOException { + try { + final URL url = new URI("https", null, nnAddr.getHostName(), + nnAddr.getPort(), path, query, null).toURL(); + HttpsURLConnection conn = (HttpsURLConnection)url.openConnection(); + // bypass hostname verification + conn.setHostnameVerifier(new DummyHostnameVerifier()); + return (HttpURLConnection)conn; + } catch (URISyntaxException e) { + throw (IOException)new IOException().initCause(e); + } + } + + @Override + public URI getUri() { + try { + return new URI("hsftp", null, nnAddr.getHostName(), nnAddr.getPort(), + null, null, null); + } catch (URISyntaxException e) { + return null; + } + } + + /** + * Dummy hostname verifier that is used to bypass hostname checking + */ + protected static class DummyHostnameVerifier implements HostnameVerifier { + public boolean verify(String hostname, SSLSession session) { + return true; + } + } + +} diff --git a/src/hdfs/org/apache/hadoop/hdfs/package.html b/src/hdfs/org/apache/hadoop/hdfs/package.html new file mode 100644 index 0000000..47ec0ff --- /dev/null +++ b/src/hdfs/org/apache/hadoop/hdfs/package.html @@ -0,0 +1,34 @@ + + + + + + +

A distributed implementation of {@link +org.apache.hadoop.fs.FileSystem}. This is loosely modelled after +Google's GFS.

+ +

The most important difference is that unlike GFS, Hadoop DFS files +have strictly one writer at any one time. Bytes are always appended +to the end of the writer's stream. There is no notion of "record appends" +or "mutations" that are then checked or reordered. Writers simply emit +a byte stream. That byte stream is guaranteed to be stored in the +order written.

+ + + diff --git a/src/hdfs/org/apache/hadoop/hdfs/protocol/AlreadyBeingCreatedException.java b/src/hdfs/org/apache/hadoop/hdfs/protocol/AlreadyBeingCreatedException.java new file mode 100644 index 0000000..4423a0d --- /dev/null +++ b/src/hdfs/org/apache/hadoop/hdfs/protocol/AlreadyBeingCreatedException.java @@ -0,0 +1,31 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdfs.protocol; + +import java.io.IOException; + +/** + * The exception that happens when you ask to create a file that already + * is being created, but is not closed yet. + */ +public class AlreadyBeingCreatedException extends IOException { + public AlreadyBeingCreatedException(String msg) { + super(msg); + } +} diff --git a/src/hdfs/org/apache/hadoop/hdfs/protocol/Block.java b/src/hdfs/org/apache/hadoop/hdfs/protocol/Block.java new file mode 100644 index 0000000..92df205 --- /dev/null +++ b/src/hdfs/org/apache/hadoop/hdfs/protocol/Block.java @@ -0,0 +1,187 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.protocol; + +import java.io.*; + +import org.apache.hadoop.hdfs.server.common.GenerationStamp; +import org.apache.hadoop.io.*; + +/************************************************** + * A Block is a Hadoop FS primitive, identified by a + * long. + * + **************************************************/ +public class Block implements Writable, Comparable { + + static { // register a ctor + WritableFactories.setFactory + (Block.class, + new WritableFactory() { + public Writable newInstance() { return new Block(); } + }); + } + + // generation stamp of blocks that pre-date the introduction of + // a generation stamp. + public static final long GRANDFATHER_GENERATION_STAMP = 0; + + /** + */ + public static boolean isBlockFilename(File f) { + String name = f.getName(); + if ( name.startsWith( "blk_" ) && + name.indexOf( '.' ) < 0 ) { + return true; + } else { + return false; + } + } + + public static long filename2id(String name) { + return Long.parseLong(name.substring("blk_".length())); + } + + private long blockId; + private long numBytes; + private long generationStamp; + + public Block() {this(0, 0, 0);} + + public Block(final long blkid, final long len, final long generationStamp) { + set(blkid, len, generationStamp); + } + + public Block(final long blkid) {this(blkid, 0, GenerationStamp.WILDCARD_STAMP);} + + public Block(Block blk) {this(blk.blockId, blk.numBytes, blk.generationStamp);} + + /** + * Find the blockid from the given filename + */ + public Block(File f, long len, long genstamp) { + this(filename2id(f.getName()), len, genstamp); + } + + public void set(long blkid, long len, long genStamp) { + this.blockId = blkid; + this.numBytes = len; + this.generationStamp = genStamp; + } + /** + */ + public long getBlockId() { + return blockId; + } + + public void setBlockId(long bid) { + blockId = bid; + } + + /** + */ + public String getBlockName() { + return "blk_" + String.valueOf(blockId); + } + + /** + */ + public long getNumBytes() { + return numBytes; + } + public void setNumBytes(long len) { + this.numBytes = len; + } + + public long getGenerationStamp() { + return generationStamp; + } + + public void setGenerationStamp(long stamp) { + generationStamp = stamp; + } + + /** + */ + public String toString() { + return getBlockName() + "_" + getGenerationStamp(); + } + + ///////////////////////////////////// + // Writable + ///////////////////////////////////// + public void write(DataOutput out) throws IOException { + out.writeLong(blockId); + out.writeLong(numBytes); + out.writeLong(generationStamp); + } + + public void readFields(DataInput in) throws IOException { + this.blockId = in.readLong(); + this.numBytes = in.readLong(); + this.generationStamp = in.readLong(); + if (numBytes < 0) { + throw new IOException("Unexpected block size: " + numBytes + + " Blockid " + blockId + + " GenStamp " + generationStamp); + } + } + + ///////////////////////////////////// + // Comparable + ///////////////////////////////////// + static void validateGenerationStamp(long generationstamp) { + if (generationstamp == GenerationStamp.WILDCARD_STAMP) { + throw new IllegalStateException("generationStamp (=" + generationstamp + + ") == GenerationStamp.WILDCARD_STAMP"); + } + } + + /** {@inheritDoc} */ + public int compareTo(Block b) { + //Wildcard generationStamp is NOT ALLOWED here + validateGenerationStamp(this.generationStamp); + validateGenerationStamp(b.generationStamp); + + if (blockId < b.blockId) { + return -1; + } else if (blockId == b.blockId) { + return GenerationStamp.compare(generationStamp, b.generationStamp); + } else { + return 1; + } + } + + /** {@inheritDoc} */ + public boolean equals(Object o) { + if (!(o instanceof Block)) { + return false; + } + final Block that = (Block)o; + //Wildcard generationStamp is ALLOWED here + return this.blockId == that.blockId + && GenerationStamp.equalsWithWildcard( + this.generationStamp, that.generationStamp); + } + + /** {@inheritDoc} */ + public int hashCode() { + //GenerationStamp is IRRELEVANT and should not be used here + return 37 * 17 + (int) (blockId^(blockId>>>32)); + } +} diff --git a/src/hdfs/org/apache/hadoop/hdfs/protocol/BlockListAsLongs.java b/src/hdfs/org/apache/hadoop/hdfs/protocol/BlockListAsLongs.java new file mode 100644 index 0000000..9a8bee4 --- /dev/null +++ b/src/hdfs/org/apache/hadoop/hdfs/protocol/BlockListAsLongs.java @@ -0,0 +1,127 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.protocol; + +/** + * This class provides an interface for accessing list of blocks that + * has been implemented as long[]. + * This class is usefull for block report. Rather than send block reports + * as a Block[] we can send it as a long[]. + * + */ +public class BlockListAsLongs { + /** + * A block as 3 longs + * block-id and block length and generation stamp + */ + private static final int LONGS_PER_BLOCK = 3; + + private static int index2BlockId(int index) { + return index*LONGS_PER_BLOCK; + } + private static int index2BlockLen(int index) { + return (index*LONGS_PER_BLOCK) + 1; + } + private static int index2BlockGenStamp(int index) { + return (index*LONGS_PER_BLOCK) + 2; + } + + private long[] blockList; + + /** + * Converting a block[] to a long[] + * @param blockArray - the input array block[] + * @return the output array of long[] + */ + + public static long[] convertToArrayLongs(final Block[] blockArray) { + long[] blocksAsLongs = new long[blockArray.length * LONGS_PER_BLOCK]; + + BlockListAsLongs bl = new BlockListAsLongs(blocksAsLongs); + assert bl.getNumberOfBlocks() == blockArray.length; + + for (int i = 0; i < blockArray.length; i++) { + bl.setBlock(i, blockArray[i]); + } + return blocksAsLongs; + } + + /** + * Constructor + * @param iBlockList - BlockListALongs create from this long[] parameter + */ + public BlockListAsLongs(final long[] iBlockList) { + if (iBlockList == null) { + blockList = new long[0]; + } else { + if (iBlockList.length%LONGS_PER_BLOCK != 0) { + // must be multiple of LONGS_PER_BLOCK + throw new IllegalArgumentException(); + } + blockList = iBlockList; + } + } + + + /** + * The number of blocks + * @return - the number of blocks + */ + public int getNumberOfBlocks() { + return blockList.length/LONGS_PER_BLOCK; + } + + + /** + * The block-id of the indexTh block + * @param index - the block whose block-id is desired + * @return the block-id + */ + public long getBlockId(final int index) { + return blockList[index2BlockId(index)]; + } + + /** + * The block-len of the indexTh block + * @param index - the block whose block-len is desired + * @return - the block-len + */ + public long getBlockLen(final int index) { + return blockList[index2BlockLen(index)]; + } + + /** + * The generation stamp of the indexTh block + * @param index - the block whose block-len is desired + * @return - the generation stamp + */ + public long getBlockGenStamp(final int index) { + return blockList[index2BlockGenStamp(index)]; + } + + /** + * Set the indexTh block + * @param index - the index of the block to set + * @param b - the block is set to the value of the this block + */ + void setBlock(final int index, final Block b) { + blockList[index2BlockId(index)] = b.getBlockId(); + blockList[index2BlockLen(index)] = b.getNumBytes(); + blockList[index2BlockGenStamp(index)] = b.getGenerationStamp(); + } +} diff --git a/src/hdfs/org/apache/hadoop/hdfs/protocol/ClientDatanodeProtocol.java b/src/hdfs/org/apache/hadoop/hdfs/protocol/ClientDatanodeProtocol.java new file mode 100644 index 0000000..e74e9b4 --- /dev/null +++ b/src/hdfs/org/apache/hadoop/hdfs/protocol/ClientDatanodeProtocol.java @@ -0,0 +1,69 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.protocol; + +import java.io.IOException; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.ipc.VersionedProtocol; + +/** An client-datanode protocol for block recovery + */ +public interface ClientDatanodeProtocol extends VersionedProtocol { + public static final Log LOG = LogFactory.getLog(ClientDatanodeProtocol.class); + + public static final long GET_BLOCKINFO_VERSION = 4L; + public static final long COPY_BLOCK_VERSION = 5L; + + /** + * 3: add keepLength parameter. + * 4: added getBlockInfo + * 5: add copyBlock parameter. + */ + public static final long versionID = 5L; + + /** Start generation-stamp recovery for specified block + * @param block the specified block + * @param keepLength keep the block length + * @param targets the list of possible locations of specified block + * @return the new blockid if recovery successful and the generation stamp + * got updated as part of the recovery, else returns null if the block id + * not have any data and the block was deleted. + * @throws IOException + */ + LocatedBlock recoverBlock(Block block, boolean keepLength, + DatanodeInfo[] targets) throws IOException; + + /** Returns a block object that contains the specified block object + * from the specified Datanode. + * @param block the specified block + * @return the Block object from the specified Datanode + * @throws IOException if the block does not exist + */ + public Block getBlockInfo(Block block) throws IOException; + + /** Instruct the datanode to copy a block to specified target. + * @param srcBlock the specified block on this datanode + * @param destinationBlock the block identifier on the destination datanode + * @param target the locations where this block needs to be copied + * @throws IOException + */ + public void copyBlock(Block srcblock, Block destBlock, + DatanodeInfo target) throws IOException; +} diff --git a/src/hdfs/org/apache/hadoop/hdfs/protocol/ClientProtocol.java b/src/hdfs/org/apache/hadoop/hdfs/protocol/ClientProtocol.java new file mode 100644 index 0000000..fa20f75 --- /dev/null +++ b/src/hdfs/org/apache/hadoop/hdfs/protocol/ClientProtocol.java @@ -0,0 +1,571 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.protocol; + +import java.io.*; + +import org.apache.hadoop.ipc.VersionedProtocol; +import org.apache.hadoop.security.AccessControlException; +import org.apache.hadoop.hdfs.protocol.FSConstants.UpgradeAction; +import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport; +import org.apache.hadoop.fs.permission.*; +import org.apache.hadoop.fs.ContentSummary; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.CorruptFileBlocks; + +/********************************************************************** + * ClientProtocol is used by user code via + * {@link org.apache.hadoop.hdfs.DistributedFileSystem} class to communicate + * with the NameNode. User code can manipulate the directory namespace, + * as well as open/close file streams, etc. + * + **********************************************************************/ +public interface ClientProtocol extends VersionedProtocol { + + public static final long OPTIMIZE_FILE_STATUS_VERSION = 42L; + public static final long ITERATIVE_LISTING_VERSION = 43L; + public static final long BULK_BLOCK_LOCATIONS_VERSION = 44L; + public static final long CONCAT_VERSION = 52L; + public static final long LIST_CORRUPT_FILEBLOCKS_VERSION = 53L; + + /** + * Compared to the previous version the following changes have been introduced: + * (Only the latest change is reflected. + * The log of historical changes can be retrieved from the svn). + * 52: concat() + * 53: Replace getCorruptFiles() with listCorruptFileBlocks() + */ + + public static final long versionID = LIST_CORRUPT_FILEBLOCKS_VERSION; + + /////////////////////////////////////// + // File contents + /////////////////////////////////////// + /** + * Get locations of the blocks of the specified file within the specified range. + * DataNode locations for each block are sorted by + * the proximity to the client. + *

+ * Return {@link LocatedBlocks} which contains + * file length, blocks and their locations. + * DataNode locations for each block are sorted by + * the distance to the client's address. + *

+ * The client will then have to contact + * one of the indicated DataNodes to obtain the actual data. + * + * @param src file name + * @param offset range start offset + * @param length range length + * @return file length and array of blocks with their locations + * @throws IOException + */ + public LocatedBlocks getBlockLocations(String src, + long offset, + long length) throws IOException; + + /** + * Create a new file entry in the namespace. + *

+ * This will create an empty file specified by the source path. + * The path should reflect a full path originated at the root. + * The name-node does not have a notion of "current" directory for a client. + *

+ * Once created, the file is visible and available for read to other clients. + * Although, other clients cannot {@link #delete(String)}, re-create or + * {@link #rename(String, String)} it until the file is completed + * or explicitly as a result of lease expiration. + *

+ * Blocks have a maximum size. Clients that intend to + * create multi-block files must also use {@link #addBlock(String, String)}. + * + * @param src path of the file being created. + * @param masked masked permission. + * @param clientName name of the current client. + * @param overwrite indicates whether the file should be + * overwritten if it already exists. + * @param replication block replication factor. + * @param blockSize maximum block size. + * + * @throws AccessControlException if permission to create file is + * denied by the system. As usually on the client side the exception will + * be wrapped into {@link org.apache.hadoop.ipc.RemoteException}. + * @throws QuotaExceededException if the file creation violates + * any quota restriction + * @throws IOException if other errors occur. + */ + public void create(String src, + FsPermission masked, + String clientName, + boolean overwrite, + short replication, + long blockSize + ) throws IOException; + + /** + * Append to the end of the file. + * @param src path of the file being created. + * @param clientName name of the current client. + * @return information about the last partial block if any. + * @throws AccessControlException if permission to append file is + * denied by the system. As usually on the client side the exception will + * be wrapped into {@link org.apache.hadoop.ipc.RemoteException}. + * Allows appending to an existing file if the server is + * configured with the parameter dfs.support.append set to true, otherwise + * throws an IOException. + * @throws IOException if other errors occur. + */ + public LocatedBlock append(String src, String clientName) throws IOException; + + /** + * Set replication for an existing file. + *

+ * The NameNode sets replication to the new value and returns. + * The actual block replication is not expected to be performed during + * this method call. The blocks will be populated or removed in the + * background as the result of the routine block maintenance procedures. + * + * @param src file name + * @param replication new replication + * @throws IOException + * @return true if successful; + * false if file does not exist or is a directory + */ + public boolean setReplication(String src, + short replication + ) throws IOException; + + /** + * Set permissions for an existing file/directory. + */ + public void setPermission(String src, FsPermission permission + ) throws IOException; + + /** + * Set owner of a path (i.e. a file or a directory). + * The parameters username and groupname cannot both be null. + * @param src + * @param username If it is null, the original username remains unchanged. + * @param groupname If it is null, the original groupname remains unchanged. + */ + public void setOwner(String src, String username, String groupname + ) throws IOException; + + /** + * The client can give up on a blcok by calling abandonBlock(). + * The client can then + * either obtain a new block, or complete or abandon the file. + * Any partial writes to the block will be discarded. + */ + public void abandonBlock(Block b, String src, String holder + ) throws IOException; + + /** + * A client that wants to write an additional block to the + * indicated filename (which must currently be open for writing) + * should call addBlock(). + * + * addBlock() allocates a new block and datanodes the block data + * should be replicated to. + * + * @return LocatedBlock allocated block information. + */ + public LocatedBlock addBlock(String src, String clientName) throws IOException; + + /** + * A client that wants to write an additional block to the + * indicated filename (which must currently be open for writing) + * should call addBlock(). + * + * addBlock() allocates a new block and datanodes the block data + * should be replicated to. + * + * This method enables client compatibility + * which uses this method for adding blocks. + * + * @param excludedNodes a list of nodes that should not be allocated; + * implementation may ignore this + * + * @return LocatedBlock allocated block information. + */ + public LocatedBlock addBlock(String src, String clientName, + DatanodeInfo[] excludedNodes) throws IOException; + + /** + * The client is done writing data to the given filename, and would + * like to complete it. + * + * The function returns whether the file has been closed successfully. + * If the function returns false, the caller should try again. + * + * A call to complete() will not return true until all the file's + * blocks have been replicated the minimum number of times. Thus, + * DataNode failures may cause a client to call complete() several + * times before succeeding. + */ + public boolean complete(String src, String clientName) throws IOException; + + /** + * The client wants to report corrupted blocks (blocks with specified + * locations on datanodes). + * @param blocks Array of located blocks to report + */ + public void reportBadBlocks(LocatedBlock[] blocks) throws IOException; + + /////////////////////////////////////// + // Namespace management + /////////////////////////////////////// + /** + * Rename an item in the file system namespace. + * + * @param src existing file or directory name. + * @param dst new name. + * @return true if successful, or false if the old name does not exist + * or if the new name already belongs to the namespace. + * @throws IOException if the new name is invalid. + * @throws QuotaExceededException if the rename would violate + * any quota restriction + */ + public boolean rename(String src, String dst) throws IOException; + + /** + * moves blocks from srcs to trg and delete srcs + * + * @param trg existing file + * @param srcs - list of existing files (same block size, same replication) + * @throws IOException if some arguments are invalid + * @throws QuotaExceededException if the rename would violate + * any quota restriction + */ + public void concat(String trg, String [] srcs) throws IOException; + + /** + * Delete the given file or directory from the file system. + *

+ * Any blocks belonging to the deleted files will be garbage-collected. + * + * @param src existing name. + * @return true only if the existing file or directory was actually removed + * from the file system. + */ + public boolean delete(String src) throws IOException; + + /** + * Delete the given file or directory from the file system. + *

+ * same as delete but provides a way to avoid accidentally + * deleting non empty directories programmatically. + * @param src existing name + * @param recursive if true deletes a non empty directory recursively, + * else throws an exception. + * @return true only if the existing file or directory was actually removed + * from the file system. + */ + public boolean delete(String src, boolean recursive) throws IOException; + + /** + * Create a directory (or hierarchy of directories) with the given + * name and permission. + * + * @param src The path of the directory being created + * @param masked The masked permission of the directory being created + * @return True if the operation success. + * @throws {@link AccessControlException} if permission to create file is + * denied by the system. As usually on the client side the exception will + * be wraped into {@link org.apache.hadoop.ipc.RemoteException}. + * @throws QuotaExceededException if the operation would violate + * any quota restriction. + */ + public boolean mkdirs(String src, FsPermission masked) throws IOException; + + /** + * Get a listing of the indicated directory + */ + public FileStatus[] getListing(String src) throws IOException; + + /** + * Get a listing of the indicated directory + */ + public HdfsFileStatus[] getHdfsListing(String src) throws IOException; + + + /** + * Get a partial listing of the indicated directory, + * starting from the first child whose name is greater than startAfter + * + * @param src the directory name + * @param startAfter the name to start listing after + * @return a partial listing starting after startAfter + */ + public DirectoryListing getPartialListing(String src, byte[] startAfter) + throws IOException; + + /** + * Get a partial listing of the indicated directory, + * piggybacking block locations to each FileStatus + * + * @param src the directory name + * @param startAfter the name to start listing after + * @return a partial listing starting after startAfter + */ + public LocatedDirectoryListing getLocatedPartialListing(String src, + byte[] startAfter) + throws IOException; + + /////////////////////////////////////// + // System issues and management + /////////////////////////////////////// + + /** + * Client programs can cause stateful changes in the NameNode + * that affect other clients. A client may obtain a file and + * neither abandon nor complete it. A client might hold a series + * of locks that prevent other clients from proceeding. + * Clearly, it would be bad if a client held a bunch of locks + * that it never gave up. This can happen easily if the client + * dies unexpectedly. + *

+ * So, the NameNode will revoke the locks and live file-creates + * for clients that it thinks have died. A client tells the + * NameNode that it is still alive by periodically calling + * renewLease(). If a certain amount of time passes since + * the last call to renewLease(), the NameNode assumes the + * client has died. + */ + public void renewLease(String clientName) throws IOException; + + public int GET_STATS_CAPACITY_IDX = 0; + public int GET_STATS_USED_IDX = 1; + public int GET_STATS_REMAINING_IDX = 2; + public int GET_STATS_UNDER_REPLICATED_IDX = 3; + public int GET_STATS_CORRUPT_BLOCKS_IDX = 4; + public int GET_STATS_MISSING_BLOCKS_IDX = 5; + + /** + * Get a set of statistics about the filesystem. + * Right now, only three values are returned. + *

    + *
  • [0] contains the total storage capacity of the system, in bytes.
  • + *
  • [1] contains the total used space of the system, in bytes.
  • + *
  • [2] contains the available storage of the system, in bytes.
  • + *
  • [3] contains number of under replicated blocks in the system.
  • + *
  • [4] contains number of blocks with a corrupt replica.
  • + *
  • [5] contains number of blocks without any good replicas left.
  • + *
+ * Use public constants like {@link #GET_STATS_CAPACITY_IDX} in place of + * actual numbers to index into the array. + */ + public long[] getStats() throws IOException; + + /** + * Get a report on the system's current datanodes. + * One DatanodeInfo object is returned for each DataNode. + * Return live datanodes if type is LIVE; dead datanodes if type is DEAD; + * otherwise all datanodes if type is ALL. + */ + public DatanodeInfo[] getDatanodeReport(FSConstants.DatanodeReportType type) + throws IOException; + + /** + * Get the block size for the given file. + * @param filename The name of the file + * @return The number of bytes in each block + * @throws IOException + */ + public long getPreferredBlockSize(String filename) throws IOException; + + /** + * Enter, leave or get safe mode. + *

+ * Safe mode is a name node state when it + *

  1. does not accept changes to name space (read-only), and
  2. + *
  3. does not replicate or delete blocks.
+ * + *

+ * Safe mode is entered automatically at name node startup. + * Safe mode can also be entered manually using + * {@link #setSafeMode(FSConstants.SafeModeAction) setSafeMode(SafeModeAction.SAFEMODE_GET)}. + *

+ * At startup the name node accepts data node reports collecting + * information about block locations. + * In order to leave safe mode it needs to collect a configurable + * percentage called threshold of blocks, which satisfy the minimal + * replication condition. + * The minimal replication condition is that each block must have at least + * dfs.replication.min replicas. + * When the threshold is reached the name node extends safe mode + * for a configurable amount of time + * to let the remaining data nodes to check in before it + * will start replicating missing blocks. + * Then the name node leaves safe mode. + *

+ * If safe mode is turned on manually using + * {@link #setSafeMode(FSConstants.SafeModeAction) setSafeMode(SafeModeAction.SAFEMODE_ENTER)} + * then the name node stays in safe mode until it is manually turned off + * using {@link #setSafeMode(FSConstants.SafeModeAction) setSafeMode(SafeModeAction.SAFEMODE_LEAVE)}. + * Current state of the name node can be verified using + * {@link #setSafeMode(FSConstants.SafeModeAction) setSafeMode(SafeModeAction.SAFEMODE_GET)} + *

Configuration parameters:

+ * dfs.safemode.threshold.pct is the threshold parameter.
+ * dfs.safemode.extension is the safe mode extension parameter.
+ * dfs.replication.min is the minimal replication parameter. + * + *

Special cases:

+ * The name node does not enter safe mode at startup if the threshold is + * set to 0 or if the name space is empty.
+ * If the threshold is set to 1 then all blocks need to have at least + * minimal replication.
+ * If the threshold value is greater than 1 then the name node will not be + * able to turn off safe mode automatically.
+ * Safe mode can always be turned off manually. + * + * @param action
  • 0 leave safe mode;
  • + *
  • 1 enter safe mode;
  • + *
  • 2 get safe mode state.
+ * @return
  • 0 if the safe mode is OFF or
  • + *
  • 1 if the safe mode is ON.
+ * @throws IOException + */ + public boolean setSafeMode(FSConstants.SafeModeAction action) throws IOException; + + /** + * Save namespace image. + *

+ * Saves current namespace into storage directories and reset edits log. + * Requires superuser privilege and safe mode. + * + * @throws AccessControlException if the superuser privilege is violated. + * @throws IOException if image creation failed. + */ + public void saveNamespace() throws IOException; + + /** + * Tells the namenode to reread the hosts and exclude files. + * @throws IOException + */ + public void refreshNodes() throws IOException; + + /** + * Finalize previous upgrade. + * Remove file system state saved during the upgrade. + * The upgrade will become irreversible. + * + * @throws IOException + */ + public void finalizeUpgrade() throws IOException; + + /** + * Report distributed upgrade progress or force current upgrade to proceed. + * + * @param action {@link FSConstants.UpgradeAction} to perform + * @return upgrade status information or null if no upgrades are in progress + * @throws IOException + */ + public UpgradeStatusReport distributedUpgradeProgress(UpgradeAction action) + throws IOException; + + /** + * Dumps namenode data structures into specified file. If file + * already exists, then append. + * @throws IOException + */ + public void metaSave(String filename) throws IOException; + + /** + * @return Array of FileStatus objects referring to corrupted files. + * @throws AccessControlException + * @throws IOException + */ + @Deprecated + FileStatus[] getCorruptFiles() + throws AccessControlException, IOException; + + /** + * @return a list in which each entry describes a corrupt file/block + * @throws IOException + */ + public CorruptFileBlocks + listCorruptFileBlocks(String path, String cookie) + throws IOException; + + /** + * Get the file info for a specific file or directory. + * @param src The string representation of the path to the file + * @throws IOException if permission to access file is denied by the system + * @return object containing information regarding the file + * or null if file not found + */ + public FileStatus getFileInfo(String src) throws IOException; + + /** + * Get the file info for a specific file or directory. + * @param src The string representation of the path to the file + * @throws IOException if permission to access file is denied by the system + * @return HdfsFileStatus containing information regarding the file + * or null if file not found + */ + public HdfsFileStatus getHdfsFileInfo(String src) throws IOException; + + /** + * Get {@link ContentSummary} rooted at the specified directory. + * @param path The string representation of the path + */ + public ContentSummary getContentSummary(String path) throws IOException; + + /** + * Set the quota for a directory. + * @param path The string representation of the path to the directory + * @param namespaceQuota Limit on the number of names in the tree rooted + * at the directory + * @param diskspaceQuota Limit on disk space occupied all the files under + * this directory. + *

+ * + * The quota can have three types of values : (1) 0 or more will set + * the quota to that value, (2) {@link FSConstants#QUOTA_DONT_SET} implies + * the quota will not be changed, and (3) {@link FSConstants#QUOTA_RESET} + * implies the quota will be reset. Any other value is a runtime error. + * + * @throws FileNotFoundException if the path is a file or + * does not exist + * @throws QuotaExceededException if the directory size + * is greater than the given quota + */ + public void setQuota(String path, long namespaceQuota, long diskspaceQuota) + throws IOException; + + /** + * Write all metadata for this file into persistent storage. + * The file must be currently open for writing. + * @param src The string representation of the path + * @param client The string representation of the client + */ + public void fsync(String src, String client) throws IOException; + + /** + * Sets the modification and access time of the file to the specified time. + * @param src The string representation of the path + * @param mtime The number of milliseconds since Jan 1, 1970. + * Setting mtime to -1 means that modification time should not be set + * by this call. + * @param atime The number of milliseconds since Jan 1, 1970. + * Setting atime to -1 means that access time should not be set + * by this call. + */ + public void setTimes(String src, long mtime, long atime) throws IOException; +} diff --git a/src/hdfs/org/apache/hadoop/hdfs/protocol/DSQuotaExceededException.java b/src/hdfs/org/apache/hadoop/hdfs/protocol/DSQuotaExceededException.java new file mode 100644 index 0000000..bb8903a --- /dev/null +++ b/src/hdfs/org/apache/hadoop/hdfs/protocol/DSQuotaExceededException.java @@ -0,0 +1,43 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdfs.protocol; + +import org.apache.hadoop.util.StringUtils; + +public class DSQuotaExceededException extends QuotaExceededException { + protected static final long serialVersionUID = 1L; + + public DSQuotaExceededException(String msg) { + super(msg); + } + + public DSQuotaExceededException(long quota, long count) { + super(quota, count); + } + + public String getMessage() { + String msg = super.getMessage(); + if (msg == null) { + return "The DiskSpace quota" + (pathName==null?"":(" of " + pathName)) + + " is exceeded: quota=" + quota + " diskspace consumed=" + StringUtils.humanReadableInt(count); + } else { + return msg; + } + } +} diff --git a/src/hdfs/org/apache/hadoop/hdfs/protocol/DataTransferProtocol.java b/src/hdfs/org/apache/hadoop/hdfs/protocol/DataTransferProtocol.java new file mode 100644 index 0000000..6e59080 --- /dev/null +++ b/src/hdfs/org/apache/hadoop/hdfs/protocol/DataTransferProtocol.java @@ -0,0 +1,61 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.protocol; + + +/** + * + * The Client transfers data to/from datanode using a streaming protocol. + * + */ +public interface DataTransferProtocol { + + + /** Version for data transfers between clients and datanodes + * This should change when serialization of DatanodeInfo, not just + * when protocol changes. It is not very obvious. + */ + /* + * Version 14: + * OP_REPLACE_BLOCK is sent from the Balancer server to the destination, + * including the block id, source, and proxy. + * OP_COPY_BLOCK is sent from the destination to the proxy, which contains + * only the block id. + * A reply to OP_COPY_BLOCK sends the block content. + * A reply to OP_REPLACE_BLOCK includes an operation status. + */ + public static final int DATA_TRANSFER_VERSION = 14; + + // Processed at datanode stream-handler + public static final byte OP_WRITE_BLOCK = (byte) 80; + public static final byte OP_READ_BLOCK = (byte) 81; + public static final byte OP_READ_METADATA = (byte) 82; + public static final byte OP_REPLACE_BLOCK = (byte) 83; + public static final byte OP_COPY_BLOCK = (byte) 84; + public static final byte OP_BLOCK_CHECKSUM = (byte) 85; + + public static final int OP_STATUS_SUCCESS = 0; + public static final int OP_STATUS_ERROR = 1; + public static final int OP_STATUS_ERROR_CHECKSUM = 2; + public static final int OP_STATUS_ERROR_INVALID = 3; + public static final int OP_STATUS_ERROR_EXISTS = 4; + public static final int OP_STATUS_CHECKSUM_OK = 5; + + + +} diff --git a/src/hdfs/org/apache/hadoop/hdfs/protocol/DatanodeID.java b/src/hdfs/org/apache/hadoop/hdfs/protocol/DatanodeID.java new file mode 100644 index 0000000..1964ae7 --- /dev/null +++ b/src/hdfs/org/apache/hadoop/hdfs/protocol/DatanodeID.java @@ -0,0 +1,190 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdfs.protocol; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; + +import org.apache.hadoop.io.UTF8; +import org.apache.hadoop.io.WritableComparable; + +/** + * DatanodeID is composed of the data node + * name (hostname:portNumber) and the data storage ID, + * which it currently represents. + * + */ +public class DatanodeID implements WritableComparable { + public static final DatanodeID[] EMPTY_ARRAY = {}; + + public String name; /// hostname:portNumber + public String storageID; /// unique per cluster storageID + protected int infoPort; /// the port where the infoserver is running + public int ipcPort; /// the port where the ipc server is running + + /** Equivalent to DatanodeID(""). */ + public DatanodeID() {this("");} + + /** Equivalent to DatanodeID(nodeName, "", -1, -1). */ + public DatanodeID(String nodeName) {this(nodeName, "", -1, -1);} + + /** + * DatanodeID copy constructor + * + * @param from + */ + public DatanodeID(DatanodeID from) { + this(from.getName(), + from.getStorageID(), + from.getInfoPort(), + from.getIpcPort()); + } + + /** + * Create DatanodeID + * @param nodeName (hostname:portNumber) + * @param storageID data storage ID + * @param infoPort info server port + * @param ipcPort ipc server port + */ + public DatanodeID(String nodeName, String storageID, + int infoPort, int ipcPort) { + this.name = nodeName; + this.storageID = storageID; + this.infoPort = infoPort; + this.ipcPort = ipcPort; + } + + /** + * @return hostname:portNumber. + */ + public String getName() { + return name; + } + + /** + * @return data storage ID. + */ + public String getStorageID() { + return this.storageID; + } + + /** + * @return infoPort (the port at which the HTTP server bound to) + */ + public int getInfoPort() { + if (infoPort < 0) + return 50075; // default port + return infoPort; + } + + /** + * @return ipcPort (the port at which the IPC server bound to) + */ + public int getIpcPort() { + return ipcPort; + } + + /** + * sets the data storage ID. + */ + public void setStorageID(String storageID) { + this.storageID = storageID; + } + + /** + * @return hostname and no :portNumber. + */ + public String getHost() { + int colon = name.indexOf(":"); + if (colon < 0) { + return name; + } else { + return name.substring(0, colon); + } + } + + public int getPort() { + int colon = name.indexOf(":"); + if (colon < 0) { + return 50010; // default port. + } + return Integer.parseInt(name.substring(colon+1)); + } + + public boolean equals(Object to) { + if (this == to) { + return true; + } + if (!(to instanceof DatanodeID)) { + return false; + } + return (name.equals(((DatanodeID)to).getName()) && + storageID.equals(((DatanodeID)to).getStorageID())); + } + + public int hashCode() { + return name.hashCode()^ storageID.hashCode(); + } + + public String toString() { + return name; + } + + /** + * Update fields when a new registration request comes in. + * Note that this does not update storageID. + */ + public void updateRegInfo(DatanodeID nodeReg) { + name = nodeReg.getName(); + infoPort = nodeReg.getInfoPort(); + // update any more fields added in future. + } + + /** Comparable. + * Basis of compare is the String name (host:portNumber) only. + * @param that + * @return as specified by Comparable. + */ + public int compareTo(DatanodeID that) { + return name.compareTo(that.getName()); + } + + ///////////////////////////////////////////////// + // Writable + ///////////////////////////////////////////////// + /** {@inheritDoc} */ + public void write(DataOutput out) throws IOException { + UTF8.writeString(out, name); + UTF8.writeString(out, storageID); + out.writeShort(infoPort); + } + + /** {@inheritDoc} */ + public void readFields(DataInput in) throws IOException { + name = UTF8.readString(in); + storageID = UTF8.readString(in); + // the infoPort read could be negative, if the port is a large number (more + // than 15 bits in storage size (but less than 16 bits). + // So chop off the first two bytes (and hence the signed bits) before + // setting the field. + this.infoPort = in.readShort() & 0x0000ffff; + } +} diff --git a/src/hdfs/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java b/src/hdfs/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java new file mode 100644 index 0000000..6830423 --- /dev/null +++ b/src/hdfs/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java @@ -0,0 +1,345 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.protocol; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; +import java.util.Date; + +import org.apache.hadoop.io.Text; +import org.apache.hadoop.io.Writable; +import org.apache.hadoop.io.WritableFactories; +import org.apache.hadoop.io.WritableFactory; +import org.apache.hadoop.io.WritableUtils; +import org.apache.hadoop.net.NetworkTopology; +import org.apache.hadoop.net.Node; +import org.apache.hadoop.net.NodeBase; +import org.apache.hadoop.util.StringUtils; + +/** + * DatanodeInfo represents the status of a DataNode. + * This object is used for communication in the + * Datanode Protocol and the Client Protocol. + */ +public class DatanodeInfo extends DatanodeID implements Node { + protected long capacity; + protected long dfsUsed; + protected long remaining; + protected long lastUpdate; + protected int xceiverCount; + protected String location = NetworkTopology.DEFAULT_RACK; + + /** HostName as suplied by the datanode during registration as its + * name. Namenode uses datanode IP address as the name. + */ + protected String hostName = null; + + // administrative states of a datanode + public enum AdminStates {NORMAL, DECOMMISSION_INPROGRESS, DECOMMISSIONED; } + protected AdminStates adminState; + + + public DatanodeInfo() { + super(); + adminState = null; + } + + public DatanodeInfo(DatanodeInfo from) { + super(from); + this.capacity = from.getCapacity(); + this.dfsUsed = from.getDfsUsed(); + this.remaining = from.getRemaining(); + this.lastUpdate = from.getLastUpdate(); + this.xceiverCount = from.getXceiverCount(); + this.location = from.getNetworkLocation(); + this.adminState = from.adminState; + this.hostName = from.hostName; + } + + public DatanodeInfo(DatanodeID nodeID) { + super(nodeID); + this.capacity = 0L; + this.dfsUsed = 0L; + this.remaining = 0L; + this.lastUpdate = 0L; + this.xceiverCount = 0; + this.adminState = null; + } + + protected DatanodeInfo(DatanodeID nodeID, String location, String hostName) { + this(nodeID); + this.location = location; + this.hostName = hostName; + } + + /** The raw capacity. */ + public long getCapacity() { return capacity; } + + /** The used space by the data node. */ + public long getDfsUsed() { return dfsUsed; } + + /** The used space by the data node. */ + public long getNonDfsUsed() { + long nonDFSUsed = capacity - dfsUsed - remaining; + return nonDFSUsed < 0 ? 0 : nonDFSUsed; + } + + /** The used space by the data node as percentage of present capacity */ + public float getDfsUsedPercent() { + if (capacity <= 0) { + return 100; + } + + return ((float)dfsUsed * 100.0f)/(float)capacity; + } + + /** The raw free space. */ + public long getRemaining() { return remaining; } + + /** The remaining space as percentage of configured capacity. */ + public float getRemainingPercent() { + if (capacity <= 0) { + return 0; + } + + return ((float)remaining * 100.0f)/(float)capacity; + } + + /** The time when this information was accurate. */ + public long getLastUpdate() { return lastUpdate; } + + /** number of active connections */ + public int getXceiverCount() { return xceiverCount; } + + /** Sets raw capacity. */ + public void setCapacity(long capacity) { + this.capacity = capacity; + } + + /** Sets raw free space. */ + public void setRemaining(long remaining) { + this.remaining = remaining; + } + + /** Sets time when this information was accurate. */ + public void setLastUpdate(long lastUpdate) { + this.lastUpdate = lastUpdate; + } + + /** Sets number of active connections */ + public void setXceiverCount(int xceiverCount) { + this.xceiverCount = xceiverCount; + } + + /** rack name **/ + public synchronized String getNetworkLocation() {return location;} + + /** Sets the rack name */ + public synchronized void setNetworkLocation(String location) { + this.location = NodeBase.normalize(location); + } + + public String getHostName() { + return (hostName == null || hostName.length()==0) ? getHost() : hostName; + } + + public void setHostName(String host) { + hostName = host; + } + + /** A formatted string for reporting the status of the DataNode. */ + public String getDatanodeReport() { + StringBuffer buffer = new StringBuffer(); + long c = getCapacity(); + long r = getRemaining(); + long u = getDfsUsed(); + long nonDFSUsed = getNonDfsUsed(); + float usedPercent = getDfsUsedPercent(); + float remainingPercent = getRemainingPercent(); + + buffer.append("Name: "+name+"\n"); + if (!NetworkTopology.DEFAULT_RACK.equals(location)) { + buffer.append("Rack: "+location+"\n"); + } + buffer.append("Decommission Status : "); + if (isDecommissioned()) { + buffer.append("Decommissioned\n"); + } else if (isDecommissionInProgress()) { + buffer.append("Decommission in progress\n"); + } else { + buffer.append("Normal\n"); + } + buffer.append("Configured Capacity: "+c+" ("+StringUtils.byteDesc(c)+")"+"\n"); + buffer.append("DFS Used: "+u+" ("+StringUtils.byteDesc(u)+")"+"\n"); + buffer.append("Non DFS Used: "+nonDFSUsed+" ("+StringUtils.byteDesc(nonDFSUsed)+")"+"\n"); + buffer.append("DFS Remaining: " +r+ "("+StringUtils.byteDesc(r)+")"+"\n"); + buffer.append("DFS Used%: "+StringUtils.limitDecimalTo2(usedPercent)+"%\n"); + buffer.append("DFS Remaining%: "+StringUtils.limitDecimalTo2(remainingPercent)+"%\n"); + buffer.append("Last contact: "+new Date(lastUpdate)+"\n"); + return buffer.toString(); + } + + /** A formatted string for printing the status of the DataNode. */ + public String dumpDatanode() { + StringBuffer buffer = new StringBuffer(); + long c = getCapacity(); + long r = getRemaining(); + long u = getDfsUsed(); + buffer.append(name); + if (!NetworkTopology.DEFAULT_RACK.equals(location)) { + buffer.append(" "+location); + } + if (isDecommissioned()) { + buffer.append(" DD"); + } else if (isDecommissionInProgress()) { + buffer.append(" DP"); + } else { + buffer.append(" IN"); + } + buffer.append(" " + c + "(" + StringUtils.byteDesc(c)+")"); + buffer.append(" " + u + "(" + StringUtils.byteDesc(u)+")"); + buffer.append(" " + StringUtils.limitDecimalTo2(((1.0*u)/c)*100)+"%"); + buffer.append(" " + r + "(" + StringUtils.byteDesc(r)+")"); + buffer.append(" " + new Date(lastUpdate)); + return buffer.toString(); + } + + /** + * Start decommissioning a node. + * old state. + */ + public void startDecommission() { + adminState = AdminStates.DECOMMISSION_INPROGRESS; + } + + /** + * Stop decommissioning a node. + * old state. + */ + public void stopDecommission() { + adminState = null; + } + + /** + * Returns true if the node is in the process of being decommissioned + */ + public boolean isDecommissionInProgress() { + if (adminState == AdminStates.DECOMMISSION_INPROGRESS) { + return true; + } + return false; + } + + /** + * Returns true if the node has been decommissioned. + */ + public boolean isDecommissioned() { + if (adminState == AdminStates.DECOMMISSIONED) { + return true; + } + return false; + } + + /** + * Sets the admin state to indicate that decommision is complete. + */ + public void setDecommissioned() { + adminState = AdminStates.DECOMMISSIONED; + } + + /** + * Retrieves the admin state of this node. + */ + AdminStates getAdminState() { + if (adminState == null) { + return AdminStates.NORMAL; + } + return adminState; + } + + /** + * Sets the admin state of this node. + */ + protected void setAdminState(AdminStates newState) { + if (newState == AdminStates.NORMAL) { + adminState = null; + } + else { + adminState = newState; + } + } + + private int level; //which level of the tree the node resides + private Node parent; //its parent + + /** Return this node's parent */ + public Node getParent() { return parent; } + public void setParent(Node parent) {this.parent = parent;} + + /** Return this node's level in the tree. + * E.g. the root of a tree returns 0 and its children return 1 + */ + public int getLevel() { return level; } + public void setLevel(int level) {this.level = level;} + + ///////////////////////////////////////////////// + // Writable + ///////////////////////////////////////////////// + static { // register a ctor + WritableFactories.setFactory + (DatanodeInfo.class, + new WritableFactory() { + public Writable newInstance() { return new DatanodeInfo(); } + }); + } + + /** {@inheritDoc} */ + public void write(DataOutput out) throws IOException { + super.write(out); + + //TODO: move it to DatanodeID once DatanodeID is not stored in FSImage + out.writeShort(ipcPort); + + out.writeLong(capacity); + out.writeLong(dfsUsed); + out.writeLong(remaining); + out.writeLong(lastUpdate); + out.writeInt(xceiverCount); + Text.writeString(out, location); + Text.writeString(out, hostName == null? "": hostName); + WritableUtils.writeEnum(out, getAdminState()); + } + + /** {@inheritDoc} */ + public void readFields(DataInput in) throws IOException { + super.readFields(in); + + //TODO: move it to DatanodeID once DatanodeID is not stored in FSImage + this.ipcPort = in.readShort() & 0x0000ffff; + + this.capacity = in.readLong(); + this.dfsUsed = in.readLong(); + this.remaining = in.readLong(); + this.lastUpdate = in.readLong(); + this.xceiverCount = in.readInt(); + this.location = Text.readString(in); + this.hostName = Text.readString(in); + setAdminState(WritableUtils.readEnum(in, AdminStates.class)); + } +} diff --git a/src/hdfs/org/apache/hadoop/hdfs/protocol/DirectoryListing.java b/src/hdfs/org/apache/hadoop/hdfs/protocol/DirectoryListing.java new file mode 100644 index 0000000..7e8ed67 --- /dev/null +++ b/src/hdfs/org/apache/hadoop/hdfs/protocol/DirectoryListing.java @@ -0,0 +1,123 @@ +/* Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.protocol; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; + +import org.apache.hadoop.io.Writable; +import org.apache.hadoop.io.WritableFactories; +import org.apache.hadoop.io.WritableFactory; + +/** + * This class defines a partial listing of a directory to support + * iterative directory listing. + */ +public class DirectoryListing implements Writable { + static { // register a ctor + WritableFactories.setFactory + (DirectoryListing.class, + new WritableFactory() { + public Writable newInstance() { return new DirectoryListing(); } + }); + } + + private HdfsFileStatus[] partialListing; + private int remainingEntries; + + /** + * default constructor + */ + public DirectoryListing() { + } + + /** + * constructor + * @param partialListing a partial listing of a directory + * @param remainingEntries number of entries that are left to be listed + */ + public DirectoryListing(HdfsFileStatus[] partialListing, + int remainingEntries) { + if (partialListing == null) { + throw new IllegalArgumentException("partial listing should not be null"); + } + if (partialListing.length == 0 && remainingEntries != 0) { + throw new IllegalArgumentException("Partial listing is empty but " + + "the number of remaining entries is not zero"); + } + this.partialListing = partialListing; + this.remainingEntries = remainingEntries; + } + + /** + * Get the partial listing of file status + * @return the partial listing of file status + */ + public HdfsFileStatus[] getPartialListing() { + return partialListing; + } + + /** + * Get the number of remaining entries that are left to be listed + * @return the number of remaining entries that are left to be listed + */ + public int getRemainingEntries() { + return remainingEntries; + } + + /** + * Check if there are more entries that are left to be listed + * @return true if there are more entries that are left to be listed; + * return false otherwise. + */ + public boolean hasMore() { + return remainingEntries != 0; + } + + /** + * Get the last name in this list + * @return the last name in the list if it is not empty; otherwise return null + */ + public byte[] getLastName() { + if (partialListing.length == 0) { + return null; + } + return partialListing[partialListing.length-1].getLocalNameInBytes(); + } + + // Writable interface + @Override + public void readFields(DataInput in) throws IOException { + int numEntries = in.readInt(); + partialListing = new HdfsFileStatus[numEntries]; + for (int i=0; i blocks; // array of blocks with prioritized locations + private boolean underConstruction; + + public LocatedBlocks() { + fileLength = 0; + blocks = null; + underConstruction = false; + } + + public LocatedBlocks(long flength, List blks, boolean isUnderConstuction) { + + fileLength = flength; + blocks = blks; + underConstruction = isUnderConstuction; + } + + /** + * Get located blocks. + */ + public List getLocatedBlocks() { + return blocks; + } + + /** + * Get located block. + */ + public LocatedBlock get(int index) { + return blocks.get(index); + } + + /** + * Get number of located blocks. + */ + public int locatedBlockCount() { + return blocks == null ? 0 : blocks.size(); + } + + /** + * + */ + public long getFileLength() { + return this.fileLength; + } + + /** + * Return ture if file was under construction when + * this LocatedBlocks was constructed, false otherwise. + */ + public boolean isUnderConstruction() { + return underConstruction; + } + + /** + * Find block containing specified offset. + * + * @return block if found, or null otherwise. + */ + public int findBlock(long offset) { + // create fake block of size 1 as a key + LocatedBlock key = new LocatedBlock(); + key.setStartOffset(offset); + key.getBlock().setNumBytes(1); + Comparator comp = + new Comparator() { + // Returns 0 iff a is inside b or b is inside a + public int compare(LocatedBlock a, LocatedBlock b) { + long aBeg = a.getStartOffset(); + long bBeg = b.getStartOffset(); + long aEnd = aBeg + a.getBlockSize(); + long bEnd = bBeg + b.getBlockSize(); + if(aBeg <= bBeg && bEnd <= aEnd + || bBeg <= aBeg && aEnd <= bEnd) + return 0; // one of the blocks is inside the other + if(aBeg < bBeg) + return -1; // a's left bound is to the left of the b's + return 1; + } + }; + return Collections.binarySearch(blocks, key, comp); + } + + public void insertRange(int blockIdx, List newBlocks) { + int oldIdx = blockIdx; + int insStart = 0, insEnd = 0; + for(int newIdx = 0; newIdx < newBlocks.size() && oldIdx < blocks.size(); + newIdx++) { + long newOff = newBlocks.get(newIdx).getStartOffset(); + long oldOff = blocks.get(oldIdx).getStartOffset(); + if(newOff < oldOff) { + insEnd++; + } else if(newOff == oldOff) { + // replace old cached block by the new one + blocks.set(oldIdx, newBlocks.get(newIdx)); + if(insStart < insEnd) { // insert new blocks + blocks.addAll(oldIdx, newBlocks.subList(insStart, insEnd)); + oldIdx += insEnd - insStart; + } + insStart = insEnd = newIdx+1; + oldIdx++; + } else { // newOff > oldOff + assert false : "List of LocatedBlock must be sorted by startOffset"; + } + } + insEnd = newBlocks.size(); + if(insStart < insEnd) { // insert new blocks + blocks.addAll(oldIdx, newBlocks.subList(insStart, insEnd)); + } + } + + public static int getInsertIndex(int binSearchResult) { + return binSearchResult >= 0 ? binSearchResult : -(binSearchResult+1); + } + + ////////////////////////////////////////////////// + // Writable + ////////////////////////////////////////////////// + static { // register a ctor + WritableFactories.setFactory + (LocatedBlocks.class, + new WritableFactory() { + public Writable newInstance() { return new LocatedBlocks(); } + }); + } + + public void write(DataOutput out) throws IOException { + out.writeLong(this.fileLength); + out.writeBoolean(underConstruction); + // write located blocks + int nrBlocks = locatedBlockCount(); + out.writeInt(nrBlocks); + if (nrBlocks == 0) { + return; + } + for (LocatedBlock blk : this.blocks) { + blk.write(out); + } + } + + public void readFields(DataInput in) throws IOException { + this.fileLength = in.readLong(); + underConstruction = in.readBoolean(); + // read located blocks + int nrBlocks = in.readInt(); + this.blocks = new ArrayList(nrBlocks); + for (int idx = 0; idx < nrBlocks; idx++) { + LocatedBlock blk = new LocatedBlock(); + blk.readFields(in); + this.blocks.add(blk); + } + } +} diff --git a/src/hdfs/org/apache/hadoop/hdfs/protocol/LocatedDirectoryListing.java b/src/hdfs/org/apache/hadoop/hdfs/protocol/LocatedDirectoryListing.java new file mode 100644 index 0000000..68d98e4 --- /dev/null +++ b/src/hdfs/org/apache/hadoop/hdfs/protocol/LocatedDirectoryListing.java @@ -0,0 +1,96 @@ +/* Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.protocol; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; + +import org.apache.hadoop.io.Writable; +import org.apache.hadoop.io.WritableFactories; +import org.apache.hadoop.io.WritableFactory; + +/** + * This class defines a partial listing of a directory to support + * iterative directory listing. + */ +public class LocatedDirectoryListing extends DirectoryListing { + static { // register a ctor + WritableFactories.setFactory + (LocatedDirectoryListing.class, + new WritableFactory() { + public Writable newInstance() { return new LocatedDirectoryListing(); } + }); + } + + private LocatedBlocks[] blockLocations; + + /** + * default constructor + */ + public LocatedDirectoryListing() { + } + + /** + * constructor + * @param partialListing a partial listing of a directory + * @param remainingEntries number of entries that are left to be listed + */ + public LocatedDirectoryListing(HdfsFileStatus[] partialListing, + LocatedBlocks[] blockLocations, + int remainingEntries) { + super(partialListing, remainingEntries); + if (blockLocations == null) { + throw new IllegalArgumentException("block locations should not be null"); + } + + if(blockLocations.length != partialListing.length) { + throw new IllegalArgumentException( + "location list and status list do not have the same length"); + } + + this.blockLocations = blockLocations; + } + + /** + * Get the list of block locations + * @return the lsit of block locations + */ + public LocatedBlocks[] getBlockLocations() { + return blockLocations; + } + + // Writable interface + @Override + public void readFields(DataInput in) throws IOException { + super.readFields(in); + int numEntries = getPartialListing().length; + blockLocations = new LocatedBlocks[numEntries]; + for (int i=0; i destPath; // all the destination paths + + /** + * Create the empty object + */ + public PolicyInfo() { + this.conf = null; + this.description = ""; + this.srcPath = null; + this.destPath = null; + this.properties = new Properties(); + } + + /** + * Create the metadata that describes a policy + */ + public PolicyInfo(String srcPath, Configuration conf) { + this.conf = conf; + this.description = ""; + this.srcPath = new Path(srcPath); + this.destPath = null; + this.properties = new Properties(); + } + + /** + * Copy fields from another PolicyInfo + */ + public void copyFrom(PolicyInfo other) throws IOException { + if (other.conf != null) { + this.conf = other.conf; + } + if (other.description != null && other.description.length() > 0) { + this.description = other.description; + } + if (other.srcPath != null) { + this.srcPath = other.srcPath; + } + for (Object key : other.properties.keySet()) { + String skey = (String) key; + this.properties.setProperty(skey, other.properties.getProperty(skey)); + } + + if (other.destPath != null) { + for (PathInfo p:destPath) { + this.addDestPath(p.rpath.toString(), p.myproperties); + } + } + } + + /** + * Sets the input path on which this policy has to be applied + */ + public void setSrcPath(String in) throws IOException { + srcPath= new Path(in); + if (!srcPath.isAbsolute() || !srcPath.toUri().isAbsolute()) { + throw new IOException("Path " + in + " is not absolute."); + } + } + + /** + * Sets the destination path on which this policy has to be applied + */ + public void addDestPath(String in, Properties repl) throws IOException { + Path dPath = new Path(in); + if (!dPath.isAbsolute() || !dPath.toUri().isAbsolute()) { + throw new IOException("Path " + in + " is not absolute."); + } + PathInfo pinfo = new PathInfo(dPath, repl); + if (this.destPath == null) { + this.destPath = new ArrayList(); + } + this.destPath.add(pinfo); + } + + /** + * Set the description of this policy. + */ + public void setDescription(String des) { + this.description = des; + } + + /** + * Sets an internal property. + * @param name property name. + * @param value property value. + */ + public void setProperty(String name, String value) { + properties.setProperty(name, value); + } + + /** + * Returns the value of an internal property. + * @param name property name. + */ + public String getProperty(String name) { + return properties.getProperty(name); + } + + /** + * Get the srcPath + */ + public Path getSrcPath() { + return srcPath; + } + + /** + * Get the destPath + */ + public List getDestPaths() throws IOException { + return destPath; + } + + /** + * Get the Configuration + */ + public Configuration getConf() throws IOException { + return this.conf; + } + + /** + * Convert this policy into a printable form + */ + public String toString() { + StringBuffer buff = new StringBuffer(); + buff.append("Source Path:\t" + srcPath + "\n"); + for (Enumeration e = properties.propertyNames(); e.hasMoreElements();) { + String name = (String) e.nextElement(); + buff.append( name + ":\t" + properties.getProperty(name) + "\n"); + } + if (description.length() > 0) { + int len = Math.min(description.length(), 80); + String sub = description.substring(0, len).trim(); + sub = sub.replaceAll("\n", " "); + buff.append("Description:\t" + sub + "...\n"); + } + if (destPath != null) { + for (PathInfo p:destPath) { + buff.append("Destination Path:\t" + p.rpath + "\n"); + for (Enumeration e = p.myproperties.propertyNames(); e.hasMoreElements();) { + String name = (String) e.nextElement(); + buff.append( name + ":\t\t" + p.myproperties.getProperty(name) + "\n"); + } + } + } + return buff.toString(); + } + + /** + * Sort Policies based on their srcPath. reverse lexicographical order. + */ + public static class CompareByPath implements Comparator { + public CompareByPath() throws IOException { + } + public int compare(PolicyInfo l1, PolicyInfo l2) { + return 0 - l1.getSrcPath().compareTo(l2.getSrcPath()); + } + } + + + ////////////////////////////////////////////////// + // Writable + ////////////////////////////////////////////////// + static { // register a ctor + WritableFactories.setFactory + (PolicyInfo.class, + new WritableFactory() { + public Writable newInstance() { return new PolicyInfo(); } + }); + } + + public void write(DataOutput out) throws IOException { + Text.writeString(out, srcPath.toString()); + Text.writeString(out, description); + out.writeInt(properties.size()); + for (Enumeration e = properties.propertyNames(); e.hasMoreElements();) { + String name = (String) e.nextElement(); + Text.writeString(out, name); + Text.writeString(out, properties.getProperty(name)); + } + out.writeInt(destPath.size()); + for (PathInfo p:destPath) { + Text.writeString(out, p.rpath.toString()); + out.writeInt(p.myproperties.size()); + for (Enumeration e = p.myproperties.propertyNames(); e.hasMoreElements();) { + String name = (String) e.nextElement(); + Text.writeString(out, name); + Text.writeString(out, p.myproperties.getProperty(name)); + } + } + } + + public void readFields(DataInput in) throws IOException { + this.srcPath = new Path(Text.readString(in)); + this.description = Text.readString(in); + for (int n = in.readInt(); n>0; n--) { + String name = Text.readString(in); + String value = Text.readString(in); + properties.setProperty(name,value); + } + for (int n = in.readInt(); n>0; n--) { + String destPath = Text.readString(in); + Properties p = new Properties(); + for (int m = in.readInt(); m>0; m--) { + String name = Text.readString(in); + String value = Text.readString(in); + p.setProperty(name,value); + } + this.addDestPath(destPath, p); + } + } +} diff --git a/src/hdfs/org/apache/hadoop/hdfs/protocol/ProtocolCompatible.java b/src/hdfs/org/apache/hadoop/hdfs/protocol/ProtocolCompatible.java new file mode 100644 index 0000000..cd5eacc --- /dev/null +++ b/src/hdfs/org/apache/hadoop/hdfs/protocol/ProtocolCompatible.java @@ -0,0 +1,56 @@ +package org.apache.hadoop.hdfs.protocol; + +/** + * This class provides a set of methods to check if two versions of a protocol + * is compatible or not. + * + */ +public class ProtocolCompatible { + /** + * Check if the client and NameNode have compatible ClientProtocol versions + * + * @param clientVersion the version of ClientProtocol that client has + * @param serverVersion the version of ClientProtocol that NameNode has + * @return true if two versions are compatible + */ + public static boolean isCompatibleClientProtocol( + long clientVersion, long serverVersion) { + return clientVersion == serverVersion || + ( + ( clientVersion == ClientProtocol.OPTIMIZE_FILE_STATUS_VERSION-1 || + clientVersion == ClientProtocol.OPTIMIZE_FILE_STATUS_VERSION || + clientVersion == ClientProtocol.ITERATIVE_LISTING_VERSION || + clientVersion == ClientProtocol.BULK_BLOCK_LOCATIONS_VERSION || + clientVersion == ClientProtocol.CONCAT_VERSION || + clientVersion == ClientProtocol.LIST_CORRUPT_FILEBLOCKS_VERSION + ) && + ( serverVersion == ClientProtocol.OPTIMIZE_FILE_STATUS_VERSION-1 || + serverVersion == ClientProtocol.OPTIMIZE_FILE_STATUS_VERSION || + serverVersion == ClientProtocol.ITERATIVE_LISTING_VERSION || + serverVersion == ClientProtocol.BULK_BLOCK_LOCATIONS_VERSION || + serverVersion == ClientProtocol.CONCAT_VERSION || + serverVersion == ClientProtocol.LIST_CORRUPT_FILEBLOCKS_VERSION + )); + } + + /** + * Check if the client and DataNode have compatible ClientDataNodeProtocol versions + * + * @param clientVersion the version of ClientDatanodeProtocol that client has + * @param serverVersion the version of ClientDatanodeProtocol that DataNode has + * @return true if two versions are compatible + */ + public static boolean isCompatibleClientDatanodeProtocol( + long clientVersion, long serverVersion) { + return clientVersion == serverVersion || + ( + ( clientVersion == ClientDatanodeProtocol.GET_BLOCKINFO_VERSION-1 || + clientVersion == ClientDatanodeProtocol.GET_BLOCKINFO_VERSION || + clientVersion == ClientDatanodeProtocol.COPY_BLOCK_VERSION + ) && + ( serverVersion == ClientDatanodeProtocol.GET_BLOCKINFO_VERSION-1 || + serverVersion == ClientDatanodeProtocol.GET_BLOCKINFO_VERSION || + serverVersion == ClientDatanodeProtocol.COPY_BLOCK_VERSION + )); + } +} diff --git a/src/hdfs/org/apache/hadoop/hdfs/protocol/QuotaExceededException.java b/src/hdfs/org/apache/hadoop/hdfs/protocol/QuotaExceededException.java new file mode 100644 index 0000000..81762af --- /dev/null +++ b/src/hdfs/org/apache/hadoop/hdfs/protocol/QuotaExceededException.java @@ -0,0 +1,57 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdfs.protocol; + +import java.io.IOException; + +/** + * This exception is thrown when modification to HDFS results in violation + * of a directory quota. A directory quota might be namespace quota (limit + * on number of files and directories) or a diskspace quota (limit on space + * taken by all the file under the directory tree).

+ * + * The message for the exception specifies the directory where the quota + * was violated and actual quotas. Specific message is generated in the + * corresponding Exception class: + * DSQuotaExceededException or + * NSQuotaExceededException + */ +public class QuotaExceededException extends IOException { + protected static final long serialVersionUID = 1L; + protected String pathName=null; + protected long quota; // quota + protected long count; // actual value + + protected QuotaExceededException(String msg) { + super(msg); + } + + protected QuotaExceededException(long quota, long count) { + this.quota = quota; + this.count = count; + } + + public void setPathName(String path) { + this.pathName = path; + } + + public String getMessage() { + return super.getMessage(); + } +} diff --git a/src/hdfs/org/apache/hadoop/hdfs/protocol/UnregisteredDatanodeException.java b/src/hdfs/org/apache/hadoop/hdfs/protocol/UnregisteredDatanodeException.java new file mode 100644 index 0000000..c60c3be --- /dev/null +++ b/src/hdfs/org/apache/hadoop/hdfs/protocol/UnregisteredDatanodeException.java @@ -0,0 +1,42 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdfs.protocol; + +import java.io.IOException; + + +/** + * This exception is thrown when a datanode that has not previously + * registered is trying to access the name node. + * + */ +public class UnregisteredDatanodeException extends IOException { + + public UnregisteredDatanodeException(DatanodeID nodeID) { + super("Unregistered data node: " + nodeID.getName()); + } + + public UnregisteredDatanodeException(DatanodeID nodeID, + DatanodeInfo storedNode) { + super("Data node " + nodeID.getName() + + " is attempting to report storage ID " + + nodeID.getStorageID() + ". Node " + + storedNode.getName() + " is expected to serve this storage."); + } +} diff --git a/src/hdfs/org/apache/hadoop/hdfs/server/balancer/Balancer.java b/src/hdfs/org/apache/hadoop/hdfs/server/balancer/Balancer.java new file mode 100644 index 0000000..3292b91 --- /dev/null +++ b/src/hdfs/org/apache/hadoop/hdfs/server/balancer/Balancer.java @@ -0,0 +1,1709 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.balancer; + +import java.io.BufferedInputStream; +import java.io.BufferedOutputStream; +import java.io.DataInput; +import java.io.DataInputStream; +import java.io.DataOutput; +import java.io.DataOutputStream; +import java.io.IOException; +import java.io.EOFException; +import java.io.OutputStream; +import java.net.InetAddress; +import java.net.InetSocketAddress; +import java.net.Socket; +import java.text.DateFormat; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Date; +import java.util.Formatter; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Iterator; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.Random; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.Future; +import java.util.concurrent.TimeUnit; + +import org.apache.commons.cli.BasicParser; +import org.apache.commons.cli.CommandLine; +import org.apache.commons.cli.HelpFormatter; +import org.apache.commons.cli.OptionBuilder; +import org.apache.commons.cli.Options; +import org.apache.commons.cli.ParseException; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdfs.DFSClient; +import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException; +import org.apache.hadoop.hdfs.protocol.Block; +import org.apache.hadoop.hdfs.protocol.ClientProtocol; +import org.apache.hadoop.hdfs.protocol.DataTransferProtocol; +import org.apache.hadoop.hdfs.protocol.DatanodeInfo; +import org.apache.hadoop.hdfs.protocol.FSConstants; +import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType; +import org.apache.hadoop.hdfs.server.common.HdfsConstants; +import org.apache.hadoop.hdfs.server.common.Util; +import org.apache.hadoop.hdfs.server.namenode.BlockPlacementPolicy; +import org.apache.hadoop.hdfs.server.namenode.BlockPlacementPolicyDefault; +import org.apache.hadoop.hdfs.server.namenode.NameNode; +import org.apache.hadoop.hdfs.server.namenode.UnsupportedActionException; +import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol; +import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations; +import org.apache.hadoop.io.IOUtils; +import org.apache.hadoop.io.Text; +import org.apache.hadoop.io.Writable; +import org.apache.hadoop.io.retry.RetryPolicies; +import org.apache.hadoop.io.retry.RetryPolicy; +import org.apache.hadoop.io.retry.RetryProxy; +import org.apache.hadoop.ipc.RPC; +import org.apache.hadoop.ipc.RemoteException; +import org.apache.hadoop.net.NetUtils; +import org.apache.hadoop.net.NetworkTopology; +import org.apache.hadoop.security.UnixUserGroupInformation; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.util.StringUtils; +import org.apache.hadoop.util.Tool; +import org.apache.hadoop.util.ToolRunner; + +/**

The balancer is a tool that balances disk space usage on an HDFS cluster + * when some datanodes become full or when new empty nodes join the cluster. + * The tool is deployed as an application program that can be run by the + * cluster administrator on a live HDFS cluster while applications + * adding and deleting files. + * + *

SYNOPSIS + *

+ * To start:
+ *      bin/start-balancer.sh [-threshold ]
+ *      Example: bin/ start-balancer.sh
+ *                     start the balancer with a default threshold of 10%
+ *               bin/ start-balancer.sh -threshold 5
+ *                     start the balancer with a threshold of 5%
+ * To stop:
+ *      bin/ stop-balancer.sh
+ * 
+ * + *

DESCRIPTION + *

The threshold parameter is a fraction in the range of (0%, 100%) with a + * default value of 10%. The threshold sets a target for whether the cluster + * is balanced. A cluster is balanced if for each datanode, the utilization + * of the node (ratio of used space at the node to total capacity of the node) + * differs from the utilization of the (ratio of used space in the cluster + * to total capacity of the cluster) by no more than the threshold value. + * The smaller the threshold, the more balanced a cluster will become. + * It takes more time to run the balancer for small threshold values. + * Also for a very small threshold the cluster may not be able to reach the + * balanced state when applications write and delete files concurrently. + * + *

The tool moves blocks from highly utilized datanodes to poorly + * utilized datanodes iteratively. In each iteration a datanode moves or + * receives no more than the lesser of 10G bytes or the threshold fraction + * of its capacity. Each iteration runs no more than 20 minutes. + * At the end of each iteration, the balancer obtains updated datanodes + * information from the namenode. + * + *

A system property that limits the balancer's use of bandwidth is + * defined in the default configuration file: + *

+ * 
+ *   dfs.balance.bandwidthPerSec
+ *   1048576
+ *   Specifies the maximum bandwidth that each datanode
+ * can utilize for the balancing purpose in term of the number of bytes
+ * per second. 
+ * 
+ * 
+ * + *

This property determines the maximum speed at which a block will be + * moved from one datanode to another. The default value is 1MB/s. The higher + * the bandwidth, the faster a cluster can reach the balanced state, + * but with greater competition with application processes. If an + * administrator changes the value of this property in the configuration + * file, the change is observed when HDFS is next restarted. + * + *

MONITERING BALANCER PROGRESS + *

After the balancer is started, an output file name where the balancer + * progress will be recorded is printed on the screen. The administrator + * can monitor the running of the balancer by reading the output file. + * The output shows the balancer's status iteration by iteration. In each + * iteration it prints the starting time, the iteration number, the total + * number of bytes that have been moved in the previous iterations, + * the total number of bytes that are left to move in order for the cluster + * to be balanced, and the number of bytes that are being moved in this + * iteration. Normally "Bytes Already Moved" is increasing while "Bytes Left + * To Move" is decreasing. + * + *

Running multiple instances of the balancer in an HDFS cluster is + * prohibited by the tool. + * + *

The balancer automatically exits when any of the following five + * conditions is satisfied: + *

    + *
  1. The cluster is balanced; + *
  2. No block can be moved; + *
  3. No block has been moved for five consecutive iterations; + *
  4. An IOException occurs while communicating with the namenode; + *
  5. Another balancer is running. + *
+ * + *

Upon exit, a balancer returns an exit code and prints one of the + * following messages to the output file in corresponding to the above exit + * reasons: + *

    + *
  1. The cluster is balanced. Exiting + *
  2. No block can be moved. Exiting... + *
  3. No block has been moved for 3 iterations. Exiting... + *
  4. Received an IO exception: failure reason. Exiting... + *
  5. Another balancer is running. Exiting... + *
+ * + *

The administrator can interrupt the execution of the balancer at any + * time by running the command "stop-balancer.sh" on the machine where the + * balancer is running. + */ + +public class Balancer implements Tool { + private static final Log LOG = + LogFactory.getLog(Balancer.class.getName()); + final private static long MAX_BLOCKS_SIZE_TO_FETCH = 2*1024*1024*1024L; //2GB + + /** The maximum number of concurrent blocks moves for + * balancing purpose at a datanode + */ + public final static int MAX_NUM_CONCURRENT_MOVES = 5; + public static int maxConcurrentMoves; + private static long maxIterationTime = 20*60*1000L; //20 mins + + private Configuration conf; + + private double threshold = 10D; + private NamenodeProtocol namenode; + private ClientProtocol client; + private FileSystem fs; + private final static Random rnd = new Random(); + + // all data node lists + private Collection overUtilizedDatanodes + = new LinkedList(); + private Collection aboveAvgUtilizedDatanodes + = new LinkedList(); + private Collection belowAvgUtilizedDatanodes + = new LinkedList(); + private Collection underUtilizedDatanodes + = new LinkedList(); + + private Collection sources + = new HashSet(); + private Collection targets + = new HashSet(); + + private Map globalBlockList + = new HashMap(); + private MovedBlocks movedBlocks = new MovedBlocks(); + private Map datanodes + = new HashMap(); + + private NetworkTopology cluster = new NetworkTopology(); + + private double avgUtilization = 0.0D; + + final static private int MOVER_THREAD_POOL_SIZE = 1000; + private ExecutorService moverExecutor = null; + final static private int DISPATCHER_THREAD_POOL_SIZE = 200; + private ExecutorService dispatcherExecutor = null; + + /* This class keeps track of a scheduled block move */ + private class PendingBlockMove { + private BalancerBlock block; + private Source source; + private BalancerDatanode proxySource; + private BalancerDatanode target; + private Socket sock; + + /** constructor */ + private PendingBlockMove() { + } + + /* choose a block & a proxy source for this pendingMove + * whose source & target have already been chosen. + * + * Return true if a block and its proxy are chosen; false otherwise + */ + private boolean chooseBlockAndProxy() { + // iterate all source's blocks until find a good one + for (Iterator blocks= + source.getBlockIterator(); blocks.hasNext();) { + if (markMovedIfGoodBlock(blocks.next())) { + blocks.remove(); + return true; + } + } + return false; + } + + /* Return true if the given block is good for the tentative move; + * If it is good, add it to the moved list to marked as "Moved". + * A block is good if + * 1. it is a good candidate; see isGoodBlockCandidate + * 2. can find a proxy source that's not busy for this move + */ + private boolean markMovedIfGoodBlock(BalancerBlock block) { + synchronized(block) { + synchronized(movedBlocks) { + if (isGoodBlockCandidate(source, target, block)) { + this.block = block; + if ( chooseProxySource() ) { + movedBlocks.add(block); + if (LOG.isDebugEnabled()) { + LOG.debug("Decided to move block "+ block.getBlockId() + +" with a length of "+StringUtils.byteDesc(block.getNumBytes()) + + " bytes from " + source.getName() + + " to " + target.getName() + + " using proxy source " + proxySource.getName() ); + } + return true; + } + } + } + } + return false; + } + + /* Now we find out source, target, and block, we need to find a proxy + * + * @return true if a proxy is found; otherwise false + */ + private boolean chooseProxySource() { + // check if there is replica which is on the same rack with the target + for (BalancerDatanode loc : block.getLocations()) { + if (cluster.isOnSameRack(loc.getDatanode(), target.getDatanode())) { + if (loc.addPendingBlock(this)) { + proxySource = loc; + return true; + } + } + } + // find out a non-busy replica + for (BalancerDatanode loc : block.getLocations()) { + if (loc.addPendingBlock(this)) { + proxySource = loc; + return true; + } + } + return false; + } + + /* Dispatch the block move task to the proxy source & wait for the response + */ + private void dispatch() { + sock = new Socket(); + DataOutputStream out = null; + DataInputStream in = null; + try { + sock.connect(NetUtils.createSocketAddr( + target.datanode.getName()), HdfsConstants.READ_TIMEOUT); + sock.setKeepAlive(true); + out = new DataOutputStream( new BufferedOutputStream( + sock.getOutputStream(), FSConstants.BUFFER_SIZE)); + sendRequest(out); + in = new DataInputStream( new BufferedInputStream( + sock.getInputStream(), FSConstants.BUFFER_SIZE)); + receiveResponse(in); + bytesMoved.inc(block.getNumBytes()); + LOG.info( "Moving block " + block.getBlock().getBlockId() + + " from "+ source.getName() + " to " + + target.getName() + " through " + + proxySource.getName() + + " is succeeded." ); + } catch (IOException e) { + LOG.warn("Error moving block "+block.getBlockId()+ + " from " + source.getName() + " to " + + target.getName() + " through " + + proxySource.getName() + + ": "+e.getMessage()); + if (e instanceof EOFException) { + LOG.warn("Moving block " + block.getBlockId() + + " was cancelled because the time exceeded the limit"); + } + } finally { + IOUtils.closeStream(out); + IOUtils.closeStream(in); + IOUtils.closeSocket(sock); + + proxySource.removePendingBlock(this); + synchronized(target) { + target.removePendingBlock(this); + } + + synchronized (this ) { + reset(); + } + synchronized (Balancer.this) { + Balancer.this.notifyAll(); + } + } + } + + /* Send a block replace request to the output stream*/ + private void sendRequest(DataOutputStream out) throws IOException { + out.writeShort(DataTransferProtocol.DATA_TRANSFER_VERSION); + out.writeByte(DataTransferProtocol.OP_REPLACE_BLOCK); + out.writeLong(block.getBlock().getBlockId()); + out.writeLong(block.getBlock().getGenerationStamp()); + Text.writeString(out, source.getStorageID()); + proxySource.write(out); + out.flush(); + } + + /* Receive a block copy response from the input stream */ + private void receiveResponse(DataInputStream in) throws IOException { + short status = in.readShort(); + if (status != DataTransferProtocol.OP_STATUS_SUCCESS) { + throw new IOException("block move is failed"); + } + } + + /* reset the object */ + private void reset() { + block = null; + source = null; + proxySource = null; + target = null; + } + + public void closeSocket() { + try { + this.sock.shutdownInput(); + } catch (IOException ex) { + LOG.error("Error shutting down the socket to cancel block transfer"); + } + } + + /* start a thread to dispatch the block move */ + private void scheduleBlockMove() { + moverExecutor.execute(new Runnable() { + public void run() { + if (LOG.isDebugEnabled()) { + LOG.debug("Starting moving "+ block.getBlockId() + + " from " + proxySource.getName() + " to " + target.getName()); + } + dispatch(); + } + }); + } + } + + /* A class for keeping track of blocks in the Balancer */ + static private class BalancerBlock { + private Block block; // the block + private List locations + = new ArrayList(3); // its locations + + /* Constructor */ + private BalancerBlock(Block block) { + this.block = block; + } + + /* clean block locations */ + private synchronized void clearLocations() { + locations.clear(); + } + + /* add a location */ + private synchronized void addLocation(BalancerDatanode datanode) { + if (!locations.contains(datanode)) { + locations.add(datanode); + } + } + + /* Return if the block is located on datanode */ + private synchronized boolean isLocatedOnDatanode( + BalancerDatanode datanode) { + return locations.contains(datanode); + } + + /* Return its locations */ + private synchronized List getLocations() { + return locations; + } + + /* Return the block */ + private Block getBlock() { + return block; + } + + /* Return the block id */ + private long getBlockId() { + return block.getBlockId(); + } + + /* Return the length of the block */ + private long getNumBytes() { + return block.getNumBytes(); + } + } + + /* The class represents a desired move of bytes between two nodes + * and the target. + * An object of this class is stored in a source node. + */ + static private class NodeTask { + private BalancerDatanode datanode; //target node + private long size; //bytes scheduled to move + + /* constructor */ + private NodeTask(BalancerDatanode datanode, long size) { + this.datanode = datanode; + this.size = size; + } + + /* Get the node */ + private BalancerDatanode getDatanode() { + return datanode; + } + + /* Get the number of bytes that need to be moved */ + private long getSize() { + return size; + } + } + + /* Return the utilization of a datanode */ + static private double getUtilization(DatanodeInfo datanode) { + return ((double)datanode.getDfsUsed())/datanode.getCapacity()*100; + } + + /* A class that keeps track of a datanode in Balancer */ + private static class BalancerDatanode implements Writable { + final private static long MAX_SIZE_TO_MOVE = 10*1024*1024*1024L; //10GB + protected DatanodeInfo datanode; + private double utilization; + protected long maxSizeToMove; + protected long scheduledSize = 0L; + // blocks being moved but not confirmed yet + private List pendingBlocks = + new ArrayList(maxConcurrentMoves); + + /* Constructor + * Depending on avgutil & threshold, calculate maximum bytes to move + */ + private BalancerDatanode( + DatanodeInfo node, double avgUtil, double threshold) { + datanode = node; + utilization = Balancer.getUtilization(node); + + if (utilization >= avgUtil+threshold + || utilization <= avgUtil-threshold) { + maxSizeToMove = (long)(threshold*datanode.getCapacity()/100); + } else { + maxSizeToMove = + (long)(Math.abs(avgUtil-utilization)*datanode.getCapacity()/100); + } + if (utilization < avgUtil ) { + maxSizeToMove = Math.min(datanode.getRemaining(), maxSizeToMove); + } + maxSizeToMove = Math.min(MAX_SIZE_TO_MOVE, maxSizeToMove); + } + + /** Get the datanode */ + protected DatanodeInfo getDatanode() { + return datanode; + } + + /** Get the name of the datanode */ + protected String getName() { + return datanode.getName(); + } + + /* Get the storage id of the datanode */ + protected String getStorageID() { + return datanode.getStorageID(); + } + + /** Decide if still need to move more bytes */ + protected boolean isMoveQuotaFull() { + return scheduledSize nodeTasks = new ArrayList(2); + private long blocksToReceive = 0L; + /* source blocks point to balancerBlocks in the global list because + * we want to keep one copy of a block in balancer and be aware that + * the locations are changing over time. + */ + private List srcBlockList + = new ArrayList(); + + /* constructor */ + private Source(DatanodeInfo node, double avgUtil, double threshold) { + super(node, avgUtil, threshold); + } + + /** Add a node task */ + private void addNodeTask(NodeTask task) { + assert (task.datanode != this) : + "Source and target are the same " + datanode.getName(); + incScheduledSize(task.getSize()); + nodeTasks.add(task); + } + + /* Return an iterator to this source's blocks */ + private Iterator getBlockIterator() { + return srcBlockList.iterator(); + } + + /* fetch new blocks of this source from namenode and + * update this source's block list & the global block list + * Return the total size of the received blocks in the number of bytes. + */ + private long getBlockList() throws IOException { + BlockWithLocations[] newBlocks = namenode.getBlocks(datanode, + (long)Math.min(MAX_BLOCKS_SIZE_TO_FETCH, blocksToReceive)).getBlocks(); + long bytesReceived = 0; + for (BlockWithLocations blk : newBlocks) { + bytesReceived += blk.getBlock().getNumBytes(); + BalancerBlock block; + synchronized(globalBlockList) { + block = globalBlockList.get(blk.getBlock()); + if (block==null) { + block = new BalancerBlock(blk.getBlock()); + globalBlockList.put(blk.getBlock(), block); + } else { + block.clearLocations(); + } + + synchronized (block) { + // update locations + for ( String location : blk.getDatanodes() ) { + BalancerDatanode datanode = datanodes.get(location); + if (datanode != null) { // not an unknown datanode + block.addLocation(datanode); + } + } + } + if (!srcBlockList.contains(block) && isGoodBlockCandidate(block)) { + // filter bad candidates + srcBlockList.add(block); + } + } + } + return bytesReceived; + } + + /* Decide if the given block is a good candidate to move or not */ + private boolean isGoodBlockCandidate(BalancerBlock block) { + for (NodeTask nodeTask : nodeTasks) { + if (Balancer.this.isGoodBlockCandidate(this, nodeTask.datanode, block)) { + return true; + } + } + return false; + } + + /* Return a block that's good for the source thread to dispatch immediately + * The block's source, target, and proxy source are determined too. + * When choosing proxy and target, source & target throttling + * has been considered. They are chosen only when they have the capacity + * to support this block move. + * The block should be dispatched immediately after this method is returned. + */ + private PendingBlockMove chooseNextBlockToMove() { + for ( Iterator tasks=nodeTasks.iterator(); tasks.hasNext(); ) { + NodeTask task = tasks.next(); + BalancerDatanode target = task.getDatanode(); + PendingBlockMove pendingBlock = new PendingBlockMove(); + if ( target.addPendingBlock(pendingBlock) ) { + // target is not busy, so do a tentative block allocation + pendingBlock.source = this; + pendingBlock.target = target; + if ( pendingBlock.chooseBlockAndProxy() ) { + long blockSize = pendingBlock.block.getNumBytes(); + scheduledSize -= blockSize; + task.size -= blockSize; + if (task.size == 0) { + tasks.remove(); + } + return pendingBlock; + } else { + // cancel the tentative move + target.removePendingBlock(pendingBlock); + } + } + } + return null; + } + + /* iterate all source's blocks to remove moved ones */ + private void filterMovedBlocks() { + for (Iterator blocks=getBlockIterator(); + blocks.hasNext();) { + if (movedBlocks.contains(blocks.next())) { + blocks.remove(); + } + } + } + + private static final int SOURCE_BLOCK_LIST_MIN_SIZE=5; + /* Return if should fetch more blocks from namenode */ + private boolean shouldFetchMoreBlocks() { + return srcBlockList.size()0; + } + + /* This method iteratively does the following: + * it first selects a block to move, + * then sends a request to the proxy source to start the block move + * when the source's block list falls below a threshold, it asks + * the namenode for more blocks. + * It terminates when it has dispatch enough block move tasks or + * it has received enough blocks from the namenode, or + * the elapsed time of the iteration has exceeded the max time limit. + */ + private static final long MAX_ITERATION_TIME = 20*60*1000L; //20 mins + private void dispatchBlocks(long startTime) { + this.blocksToReceive = 2*scheduledSize; + boolean isTimeUp = false; + while(!isTimeUp && scheduledSize>0 && + (!srcBlockList.isEmpty() || blocksToReceive>0)) { + + // check if time is up or not + // Even if not sent everything the iteration is over + if (Util.now()-startTime > maxIterationTime) { + isTimeUp = true; + continue; + } + + PendingBlockMove pendingBlock = chooseNextBlockToMove(); + if (pendingBlock != null) { + // move the block + pendingBlock.scheduleBlockMove(); + continue; + } + + /* Since we can not schedule any block to move, + * filter any moved blocks from the source block list and + * check if we should fetch more blocks from the namenode + */ + filterMovedBlocks(); // filter already moved blocks + + if (shouldFetchMoreBlocks()) { + // fetch new blocks + try { + blocksToReceive -= getBlockList(); + continue; + } catch (IOException e) { + LOG.warn(StringUtils.stringifyException(e)); + return; + } + } + + /* Now we can not schedule any block to move and there are + * no new blocks added to the source block list, so we wait. + */ + try { + synchronized(Balancer.this) { + Balancer.this.wait(1000); // wait for targets/sources to be idle + } + } catch (InterruptedException ignored) { + } + } + } + } + + /* Check that this Balancer is compatible with the Block Placement Policy + * used by the Namenode. + */ + private void checkReplicationPolicyCompatibility(Configuration conf) throws UnsupportedActionException { + if (BlockPlacementPolicy.getInstance(conf, null, null).getClass() != + BlockPlacementPolicyDefault.class) { + throw new UnsupportedActionException("Balancer without BlockPlacementPolicyDefault"); + } + } + + /** Default constructor */ + Balancer() throws UnsupportedActionException { + } + + /** Construct a balancer from the given configuration */ + Balancer(Configuration conf) throws UnsupportedActionException { + setConf(conf); + checkReplicationPolicyCompatibility(conf); + } + + /** Construct a balancer from the given configuration and threshold */ + Balancer(Configuration conf, double threshold) throws UnsupportedActionException { + setConf(conf); + checkReplicationPolicyCompatibility(conf); + this.threshold = threshold; + } + + /** + * Run a balancer + * @param args + */ + public static void main(String[] args) { + try { + System.exit( ToolRunner.run(null, new Balancer(), args) ); + } catch (Throwable e) { + LOG.error(StringUtils.stringifyException(e)); + System.exit(-1); + } + + } + + private static void printUsage(Options opts) { + HelpFormatter formatter = new HelpFormatter(); + formatter.printHelp("balancer", opts); + } + + /* parse argument to get the threshold */ + private double checkThreshold(int value) { + double threshold = (double) value; + try { + if (threshold < 0 || threshold > 100) { + throw new NumberFormatException(); + } + LOG.info("Using a threshold of " + threshold); + } catch (NumberFormatException e) { + System.err.println("Expect a double parameter in the range of [0, 100]: " + + value); + throw e; + } + return threshold; + } + + /* Initialize balancer. It sets the value of the threshold, and + * builds the communication proxies to + * namenode as a client and a secondary namenode and retry proxies + * when connection fails. + */ + private void init(double threshold) throws IOException { + this.threshold = threshold; + this.namenode = createNamenode(conf); + this.client = DFSClient.createNamenode(conf); + this.fs = FileSystem.get(conf); + } + + /* Build a NamenodeProtocol connection to the namenode and + * set up the retry policy */ + private static NamenodeProtocol createNamenode(Configuration conf) + throws IOException { + InetSocketAddress nameNodeAddr = NameNode.getAddress(conf); + RetryPolicy timeoutPolicy = RetryPolicies.exponentialBackoffRetry( + 5, 200, TimeUnit.MILLISECONDS); + Map,RetryPolicy> exceptionToPolicyMap = + new HashMap, RetryPolicy>(); + RetryPolicy methodPolicy = RetryPolicies.retryByException( + timeoutPolicy, exceptionToPolicyMap); + Map methodNameToPolicyMap = + new HashMap(); + methodNameToPolicyMap.put("getBlocks", methodPolicy); + + UserGroupInformation ugi; + try { + ugi = UnixUserGroupInformation.login(conf); + } catch (javax.security.auth.login.LoginException e) { + throw new IOException(StringUtils.stringifyException(e)); + } + + return (NamenodeProtocol) RetryProxy.create( + NamenodeProtocol.class, + RPC.getProxy(NamenodeProtocol.class, + NamenodeProtocol.versionID, + nameNodeAddr, + ugi, + conf, + NetUtils.getDefaultSocketFactory(conf)), + methodNameToPolicyMap); + } + + /* Shuffle datanode array */ + static private void shuffleArray(DatanodeInfo[] datanodes) { + for (int i=datanodes.length; i>1; i--) { + int randomIndex = rnd.nextInt(i); + DatanodeInfo tmp = datanodes[randomIndex]; + datanodes[randomIndex] = datanodes[i-1]; + datanodes[i-1] = tmp; + } + } + + /* get all live datanodes of a cluster and their disk usage + * decide the number of bytes need to be moved + */ + private long initNodes() throws IOException { + return initNodes(client.getDatanodeReport(DatanodeReportType.LIVE)); + } + + /* Given a data node set, build a network topology and decide + * over-utilized datanodes, above average utilized datanodes, + * below average utilized datanodes, and underutilized datanodes. + * The input data node set is shuffled before the datanodes + * are put into the over-utilized datanodes, above average utilized + * datanodes, below average utilized datanodes, and + * underutilized datanodes lists. This will add some randomness + * to the node matching later on. + * + * @return the total number of bytes that are + * needed to move to make the cluster balanced. + * @param datanodes a set of datanodes + */ + private long initNodes(DatanodeInfo[] datanodes) { + // compute average utilization + long totalCapacity=0L, totalUsedSpace=0L; + for (DatanodeInfo datanode : datanodes) { + if (datanode.isDecommissioned() || datanode.isDecommissionInProgress()) { + continue; // ignore decommissioning or decommissioned nodes + } + totalCapacity += datanode.getCapacity(); + totalUsedSpace += datanode.getDfsUsed(); + } + this.avgUtilization = ((double)totalUsedSpace)/totalCapacity*100; + + /*create network topology and all data node lists: + * overloaded, above-average, below-average, and underloaded + * we alternates the accessing of the given datanodes array either by + * an increasing order or a decreasing order. + */ + long overLoadedBytes = 0L, underLoadedBytes = 0L; + shuffleArray(datanodes); + for (DatanodeInfo datanode : datanodes) { + if (datanode.isDecommissioned() || datanode.isDecommissionInProgress()) { + continue; // ignore decommissioning or decommissioned nodes + } + cluster.add(datanode); + BalancerDatanode datanodeS; + if (getUtilization(datanode) > avgUtilization) { + datanodeS = new Source(datanode, avgUtilization, threshold); + if (isAboveAvgUtilized(datanodeS)) { + this.aboveAvgUtilizedDatanodes.add((Source)datanodeS); + } else { + assert(isOverUtilized(datanodeS)) : + datanodeS.getName()+ "is not an overUtilized node"; + this.overUtilizedDatanodes.add((Source)datanodeS); + overLoadedBytes += (long)((datanodeS.utilization-avgUtilization + -threshold)*datanodeS.datanode.getCapacity()/100.0); + } + } else { + datanodeS = new BalancerDatanode(datanode, avgUtilization, threshold); + if ( isBelowAvgUtilized(datanodeS)) { + this.belowAvgUtilizedDatanodes.add(datanodeS); + } else { + assert (isUnderUtilized(datanodeS)) : + datanodeS.getName()+ "is not an underUtilized node"; + this.underUtilizedDatanodes.add(datanodeS); + underLoadedBytes += (long)((avgUtilization-threshold- + datanodeS.utilization)*datanodeS.datanode.getCapacity()/100.0); + } + } + this.datanodes.put(datanode.getStorageID(), datanodeS); + } + + //logging + logImbalancedNodes(); + + assert (this.datanodes.size() == + overUtilizedDatanodes.size()+underUtilizedDatanodes.size()+ + aboveAvgUtilizedDatanodes.size()+belowAvgUtilizedDatanodes.size()) + : "Mismatched number of datanodes"; + + // return number of bytes to be moved in order to make the cluster balanced + return Math.max(overLoadedBytes, underLoadedBytes); + } + + /* log the over utilized & under utilized nodes */ + private void logImbalancedNodes() { + StringBuilder msg = new StringBuilder(); + msg.append(overUtilizedDatanodes.size()); + msg.append(" over utilized nodes:"); + for (Source node : overUtilizedDatanodes) { + msg.append( " " ); + msg.append( node.getName() ); + } + LOG.info(msg); + msg = new StringBuilder(); + msg.append(underUtilizedDatanodes.size()); + msg.append(" under utilized nodes: "); + for (BalancerDatanode node : underUtilizedDatanodes) { + msg.append( " " ); + msg.append( node.getName() ); + } + LOG.info(msg); + } + + /* Decide all pairs and + * the number of bytes to move from a source to a target + * Maximum bytes to be moved per node is + * Min(1 Band worth of bytes, MAX_SIZE_TO_MOVE). + * Return total number of bytes to move in this iteration + */ + private long chooseNodes() { + // Match nodes on the same rack first + chooseNodes(true); + // Then match nodes on different racks + chooseNodes(false); + + assert (datanodes.size() == + overUtilizedDatanodes.size()+underUtilizedDatanodes.size()+ + aboveAvgUtilizedDatanodes.size()+belowAvgUtilizedDatanodes.size()+ + sources.size()+targets.size()) + : "Mismatched number of datanodes"; + + long bytesToMove = 0L; + for (Source src : sources) { + bytesToMove += src.scheduledSize; + } + return bytesToMove; + } + + /* if onRack is true, decide all pairs + * where source and target are on the same rack; Otherwise + * decide all pairs where source and target are + * on different racks + */ + private void chooseNodes(boolean onRack) { + /* first step: match each overUtilized datanode (source) to + * one or more underUtilized datanodes (targets). + */ + chooseTargets(underUtilizedDatanodes.iterator(), onRack); + + /* match each remaining overutilized datanode (source) to + * below average utilized datanodes (targets). + * Note only overutilized datanodes that haven't had that max bytes to move + * satisfied in step 1 are selected + */ + chooseTargets(belowAvgUtilizedDatanodes.iterator(), onRack); + + /* match each remaining underutilized datanode to + * above average utilized datanodes. + * Note only underutilized datanodes that have not had that max bytes to + * move satisfied in step 1 are selected. + */ + chooseSources(aboveAvgUtilizedDatanodes.iterator(), onRack); + } + + /* choose targets from the target candidate list for each over utilized + * source datanode. OnRackTarget determines if the chosen target + * should be on the same rack as the source + */ + private void chooseTargets( + Iterator targetCandidates, boolean onRackTarget ) { + for (Iterator srcIterator = overUtilizedDatanodes.iterator(); + srcIterator.hasNext();) { + Source source = srcIterator.next(); + while (chooseTarget(source, targetCandidates, onRackTarget)) { + } + if (!source.isMoveQuotaFull()) { + srcIterator.remove(); + } + } + return; + } + + /* choose sources from the source candidate list for each under utilized + * target datanode. onRackSource determines if the chosen source + * should be on the same rack as the target + */ + private void chooseSources( + Iterator sourceCandidates, boolean onRackSource) { + for (Iterator targetIterator = + underUtilizedDatanodes.iterator(); targetIterator.hasNext();) { + BalancerDatanode target = targetIterator.next(); + while (chooseSource(target, sourceCandidates, onRackSource)) { + } + if (!target.isMoveQuotaFull()) { + targetIterator.remove(); + } + } + return; + } + + /* For the given source, choose targets from the target candidate list. + * OnRackTarget determines if the chosen target + * should be on the same rack as the source + */ + private boolean chooseTarget(Source source, + Iterator targetCandidates, boolean onRackTarget) { + if (!source.isMoveQuotaFull()) { + return false; + } + boolean foundTarget = false; + BalancerDatanode target = null; + while (!foundTarget && targetCandidates.hasNext()) { + target = targetCandidates.next(); + if (!target.isMoveQuotaFull()) { + targetCandidates.remove(); + continue; + } + if (onRackTarget) { + // choose from on-rack nodes + if (cluster.isOnSameRack(source.datanode, target.datanode)) { + foundTarget = true; + } + } else { + // choose from off-rack nodes + if (!cluster.isOnSameRack(source.datanode, target.datanode)) { + foundTarget = true; + } + } + } + if (foundTarget) { + assert(target != null):"Choose a null target"; + long size = Math.min(source.availableSizeToMove(), + target.availableSizeToMove()); + NodeTask nodeTask = new NodeTask(target, size); + source.addNodeTask(nodeTask); + target.incScheduledSize(nodeTask.getSize()); + sources.add(source); + targets.add(target); + if (!target.isMoveQuotaFull()) { + targetCandidates.remove(); + } + LOG.info("Decided to move "+StringUtils.byteDesc(size)+" bytes from " + +source.datanode.getName() + " to " + target.datanode.getName()); + return true; + } + return false; + } + + /* For the given target, choose sources from the source candidate list. + * OnRackSource determines if the chosen source + * should be on the same rack as the target + */ + private boolean chooseSource(BalancerDatanode target, + Iterator sourceCandidates, boolean onRackSource) { + if (!target.isMoveQuotaFull()) { + return false; + } + boolean foundSource = false; + Source source = null; + while (!foundSource && sourceCandidates.hasNext()) { + source = sourceCandidates.next(); + if (!source.isMoveQuotaFull()) { + sourceCandidates.remove(); + continue; + } + if (onRackSource) { + // choose from on-rack nodes + if ( cluster.isOnSameRack(source.getDatanode(), target.getDatanode())) { + foundSource = true; + } + } else { + // choose from off-rack nodes + if (!cluster.isOnSameRack(source.datanode, target.datanode)) { + foundSource = true; + } + } + } + if (foundSource) { + assert(source != null):"Choose a null source"; + long size = Math.min(source.availableSizeToMove(), + target.availableSizeToMove()); + NodeTask nodeTask = new NodeTask(target, size); + source.addNodeTask(nodeTask); + target.incScheduledSize(nodeTask.getSize()); + sources.add(source); + targets.add(target); + if ( !source.isMoveQuotaFull()) { + sourceCandidates.remove(); + } + LOG.info("Decided to move "+StringUtils.byteDesc(size)+" bytes from " + +source.datanode.getName() + " to " + target.datanode.getName()); + return true; + } + return false; + } + + private static class BytesMoved { + private long bytesMoved = 0L;; + private synchronized void inc( long bytes ) { + bytesMoved += bytes; + } + + private long get() { + return bytesMoved; + } + }; + private BytesMoved bytesMoved = new BytesMoved(); + private int notChangedIterations = 0; + + /* Start a thread to dispatch block moves for each source. + * The thread selects blocks to move & sends request to proxy source to + * initiate block move. The process is flow controlled. Block selection is + * blocked if there are too many un-confirmed block moves. + * Return the total number of bytes successfully moved in this iteration. + */ + private long dispatchBlockMoves() throws InterruptedException { + long bytesLastMoved = bytesMoved.get(); + + Future[] futures = new Future[sources.size()]; + int i=0; + for (Source source : sources) { + futures[i++] = dispatcherExecutor.submit( + source.new BlockMoveDispatcher(Util.now())); + } + + // wait for all dispatcher threads to finish + for (Future future : futures) { + try { + future.get(); + } catch (ExecutionException e) { + LOG.warn("Dispatcher thread failed", e.getCause()); + } + } + + // wait for all block moving to be done + waitForMoveCompletion(); + + return bytesMoved.get()-bytesLastMoved; + } + + // The sleeping period before checking if block move is completed again + static private long blockMoveWaitTime = 30000L; + // How many blockMoveWait to wait until stopping the move + private static final int MAX_WAIT_ITERATIONS = 1; + + /** set the sleeping period for block move completion check */ + static void setBlockMoveWaitTime(long time) { + blockMoveWaitTime = time; + } + + /* wait for all block move confirmations + * by checking each target's pendingMove queue + */ + private void waitForMoveCompletion() { + boolean shouldWait; + int waitedIterations = 0; + do { + shouldWait = false; + for (BalancerDatanode target : targets) { + if (!target.isPendingQEmpty()) { + shouldWait = true; + } + } + if (shouldWait) { + try { + if (waitedIterations > MAX_WAIT_ITERATIONS) { + for (BalancerDatanode target : targets) { + target.killPending(); + } + continue; + } + waitedIterations++; + Thread.sleep(blockMoveWaitTime); + } catch (InterruptedException ignored) { + } + } + } while (shouldWait); + } + + /** This window makes sure to keep blocks that have been moved within 1.5 hour. + * Old window has blocks that are older; + * Current window has blocks that are more recent; + * Cleanup method triggers the check if blocks in the old window are + * more than 1.5 hour old. If yes, purge the old window and then + * move blocks in current window to old window. + */ + private static class MovedBlocks { + private long lastCleanupTime = System.currentTimeMillis(); + private static long winWidth = 5400*1000L; // 1.5 hour + final private static int CUR_WIN = 0; + final private static int OLD_WIN = 1; + final private static int NUM_WINS = 2; + final private List> movedBlocks = + new ArrayList>(NUM_WINS); + + /* initialize the moved blocks collection */ + private MovedBlocks() { + movedBlocks.add(new HashMap()); + movedBlocks.add(new HashMap()); + } + + /* set the win width */ + private void setWinWidth(Configuration conf) { + winWidth = conf.getLong( + "dfs.balancer.movedWinWidth", 5400*1000L); + } + + /* add a block thus marking a block to be moved */ + synchronized private void add(BalancerBlock block) { + movedBlocks.get(CUR_WIN).put(block.getBlock(), block); + } + + /* check if a block is marked as moved */ + synchronized private boolean contains(BalancerBlock block) { + return contains(block.getBlock()); + } + + /* check if a block is marked as moved */ + synchronized private boolean contains(Block block) { + return movedBlocks.get(CUR_WIN).containsKey(block) || + movedBlocks.get(OLD_WIN).containsKey(block); + } + + /* remove old blocks */ + synchronized private void cleanup() { + long curTime = System.currentTimeMillis(); + // check if old win is older than winWidth + if (lastCleanupTime + winWidth <= curTime) { + // purge the old window + movedBlocks.set(OLD_WIN, movedBlocks.get(CUR_WIN)); + movedBlocks.set(CUR_WIN, new HashMap()); + lastCleanupTime = curTime; + } + } + } + + /* Decide if it is OK to move the given block from source to target + * A block is a good candidate if + * 1. the block is not in the process of being moved/has not been moved; + * 2. the block does not have a replica on the target; + * 3. doing the move does not reduce the number of racks that the block has + */ + private boolean isGoodBlockCandidate(Source source, + BalancerDatanode target, BalancerBlock block) { + // check if the block is moved or not + if (movedBlocks.contains(block)) { + return false; + } + if (block.isLocatedOnDatanode(target)) { + return false; + } + + boolean goodBlock = false; + if (cluster.isOnSameRack(source.getDatanode(), target.getDatanode())) { + // good if source and target are on the same rack + goodBlock = true; + } else { + boolean notOnSameRack = true; + synchronized (block) { + for (BalancerDatanode loc : block.locations) { + if (cluster.isOnSameRack(loc.datanode, target.datanode)) { + notOnSameRack = false; + break; + } + } + } + if (notOnSameRack) { + // good if target is target is not on the same rack as any replica + goodBlock = true; + } else { + // good if source is on the same rack as on of the replicas + for (BalancerDatanode loc : block.locations) { + if (loc != source && + cluster.isOnSameRack(loc.datanode, source.datanode)) { + goodBlock = true; + break; + } + } + } + } + return goodBlock; + } + + /* reset all fields in a balancer preparing for the next iteration */ + private void resetData() { + this.cluster = new NetworkTopology(); + this.overUtilizedDatanodes.clear(); + this.aboveAvgUtilizedDatanodes.clear(); + this.belowAvgUtilizedDatanodes.clear(); + this.underUtilizedDatanodes.clear(); + this.datanodes.clear(); + this.sources.clear(); + this.targets.clear(); + this.avgUtilization = 0.0D; + cleanGlobalBlockList(); + this.movedBlocks.cleanup(); + } + + /* Remove all blocks from the global block list except for the ones in the + * moved list. + */ + private void cleanGlobalBlockList() { + for (Iterator globalBlockListIterator=globalBlockList.keySet().iterator(); + globalBlockListIterator.hasNext();) { + Block block = globalBlockListIterator.next(); + if(!movedBlocks.contains(block)) { + globalBlockListIterator.remove(); + } + } + } + + /* Return true if the given datanode is overUtilized */ + private boolean isOverUtilized(BalancerDatanode datanode) { + return datanode.utilization > (avgUtilization+threshold); + } + + /* Return true if the given datanode is above average utilized + * but not overUtilized */ + private boolean isAboveAvgUtilized(BalancerDatanode datanode) { + return (datanode.utilization <= (avgUtilization+threshold)) + && (datanode.utilization > avgUtilization); + } + + /* Return true if the given datanode is underUtilized */ + private boolean isUnderUtilized(BalancerDatanode datanode) { + return datanode.utilization < (avgUtilization-threshold); + } + + /* Return true if the given datanode is below average utilized + * but not underUtilized */ + private boolean isBelowAvgUtilized(BalancerDatanode datanode) { + return (datanode.utilization >= (avgUtilization-threshold)) + && (datanode.utilization < avgUtilization); + } + + @SuppressWarnings(value = { "static-access" }) + private Options setupOptions() { + Options cliOpts = new Options(); + cliOpts.addOption(OptionBuilder.hasArg().hasArgs(1).withDescription( + "percentage of disk capacity. Default is 10") + .isRequired(false).create("threshold")); + cliOpts.addOption(OptionBuilder.hasArg().hasArgs(1).isRequired(false) + .withDescription("The length of an iteration in minutes. " + + "Default is " + maxIterationTime/(60 * 1000)). + create("iter_len")); + cliOpts.addOption(OptionBuilder.hasArg().hasArgs(1).isRequired(false) + .withDescription("The number of blocks to move in parallel to " + + "one node. Default is " + MAX_NUM_CONCURRENT_MOVES). + create("node_par_moves")); + cliOpts.addOption(OptionBuilder.hasArg().hasArgs(1).isRequired(false) + .withDescription("The number of blocks to move in parallel " + + "in total for the cluster. Default is " + MOVER_THREAD_POOL_SIZE) + .create("par_moves")); + return cliOpts; + } + + // Exit status + final public static int SUCCESS = 1; + final public static int ALREADY_RUNNING = -1; + final public static int NO_MOVE_BLOCK = -2; + final public static int NO_MOVE_PROGRESS = -3; + final public static int IO_EXCEPTION = -4; + final public static int ILLEGAL_ARGS = -5; + /** main method of Balancer + * @param args arguments to a Balancer + * @exception any exception occurs during datanode balancing + */ + public int run(String[] args) throws Exception { + long startTime = Util.now(); + OutputStream out = null; + try { + // initialize a balancer + // init(parseArgs(args)); + + Options cliOpts = setupOptions(); + BasicParser parser = new BasicParser(); + CommandLine cl = null; + try { + cl = parser.parse(cliOpts, args); + } catch (ParseException ex) { + printUsage(cliOpts); + return ILLEGAL_ARGS; + } + + int threshold = Integer.parseInt(cl.getOptionValue("threshold", "10")); + int iterationTime = Integer.parseInt(cl.getOptionValue("iter_len", + String.valueOf(maxIterationTime/(60 * 1000)))); + maxConcurrentMoves = Integer.parseInt(cl.getOptionValue("node_par_moves", + String.valueOf(MAX_NUM_CONCURRENT_MOVES))); + int moveThreads = Integer.parseInt(cl.getOptionValue("par_moves", + String.valueOf(MOVER_THREAD_POOL_SIZE))); + + moverExecutor = Executors.newFixedThreadPool(moveThreads); + int dispatchThreads = (int)Math.max(1, moveThreads/maxConcurrentMoves); + dispatcherExecutor = Executors.newFixedThreadPool(dispatchThreads); + + maxIterationTime = iterationTime * 60 * 1000L; + System.out.println("Running with threshold of " + this.threshold + + " and iteration time of " + maxIterationTime + " milliseconds"); + init(checkThreshold(threshold)); + + /* Check if there is another balancer running. + * Exit if there is another one running. + */ + out = checkAndMarkRunningBalancer(); + if (out == null) { + System.out.println("Another balancer is running. Exiting..."); + return ALREADY_RUNNING; + } + + Formatter formatter = new Formatter(System.out); + System.out.println("Time Stamp Iteration# Bytes Already Moved Bytes Left To Move Bytes Being Moved Iterations Left Seconds Left"); + int iterations = 0; + while (true ) { + /* get all live datanodes of a cluster and their disk usage + * decide the number of bytes need to be moved + */ + long bytesLeftToMove = initNodes(); + if (bytesLeftToMove == 0) { + System.out.println("The cluster is balanced. Exiting..."); + return SUCCESS; + } else { + LOG.info( "Need to move "+ StringUtils.byteDesc(bytesLeftToMove) + +" bytes to make the cluster balanced." ); + } + + /* Decide all the nodes that will participate in the block move and + * the number of bytes that need to be moved from one node to another + * in this iteration. Maximum bytes to be moved per node is + * Min(1 Band worth of bytes, MAX_SIZE_TO_MOVE). + */ + long bytesToMove = chooseNodes(); + if (bytesToMove == 0) { + System.out.println("No block can be moved. Exiting..."); + return NO_MOVE_BLOCK; + } else { + LOG.info( "Will move " + StringUtils.byteDesc(bytesToMove) + + "bytes in this iteration"); + } + + long moved = bytesMoved.get(); + String iterationsLeft = "N/A"; + String timeLeft = "N/A"; + if (iterations != 0 && moved != 0) { + long bytesPerIteration = moved / iterations; + long iterLeft = bytesLeftToMove / bytesPerIteration; + iterationsLeft = String.valueOf(iterLeft ); + long secondsPerIteration = (maxIterationTime + blockMoveWaitTime)/1000; + long secondsLeft = secondsPerIteration * iterLeft; + long daysLeft = TimeUnit.SECONDS.toDays(secondsLeft); + timeLeft = ""; + if (daysLeft > 0) { + timeLeft = timeLeft + daysLeft + "d "; + } + long hoursLeft = TimeUnit.SECONDS.toHours(secondsLeft) - + TimeUnit.DAYS.toHours(daysLeft); + if (hoursLeft > 0) { + timeLeft = timeLeft + hoursLeft + "h "; + } + long minutesLeft = TimeUnit.SECONDS.toMinutes(secondsLeft) - + TimeUnit.HOURS.toMinutes(hoursLeft) - + TimeUnit.DAYS.toMinutes(daysLeft); + timeLeft = timeLeft + minutesLeft + "m"; + + } + + formatter.format("%-24s %10d %19s %18s %17s %15s %12s\n", + DateFormat.getDateTimeInstance().format(new Date()), + iterations, + StringUtils.byteDesc(bytesMoved.get()), + StringUtils.byteDesc(bytesLeftToMove), + StringUtils.byteDesc(bytesToMove), + iterationsLeft, + timeLeft + ); + + /* For each pair of , start a thread that repeatedly + * decide a block to be moved and its proxy source, + * then initiates the move until all bytes are moved or no more block + * available to move. + * Exit no byte has been moved for 5 consecutive iterations. + */ + if (dispatchBlockMoves() > 0) { + notChangedIterations = 0; + } else { + notChangedIterations++; + if (notChangedIterations >= 5) { + System.out.println( + "No block has been moved for 5 iterations. Exiting..."); + return NO_MOVE_PROGRESS; + } + } + + // clean all lists + resetData(); + + try { + Thread.sleep(2*conf.getLong("dfs.heartbeat.interval", 3)); + } catch (InterruptedException ignored) { + } + + iterations++; + } + } catch (IllegalArgumentException ae) { + ae.printStackTrace(); + return ILLEGAL_ARGS; + } catch (IOException e) { + e.printStackTrace(); + System.out.println("Received an IO exception: " + e.getMessage() + + " . Exiting..."); + return IO_EXCEPTION; + } catch (Exception ex) { + ex.printStackTrace(); + return ILLEGAL_ARGS; + } finally { + // shutdown thread pools + dispatcherExecutor.shutdownNow(); + moverExecutor.shutdownNow(); + + // close the output file + IOUtils.closeStream(out); + if (fs != null) { + try { + fs.delete(BALANCER_ID_PATH, true); + } catch(IOException ignored) { + } + } + System.out.println("Balancing took " + + time2Str(Util.now()-startTime)); + } + } + + private Path BALANCER_ID_PATH = new Path("/system/balancer.id"); + /* The idea for making sure that there is no more than one balancer + * running in an HDFS is to create a file in the HDFS, writes the IP address + * of the machine on which the balancer is running to the file, but did not + * close the file until the balancer exits. + * This prevents the second balancer from running because it can not + * creates the file while the first one is running. + * + * This method checks if there is any running balancer and + * if no, mark yes if no. + * Note that this is an atomic operation. + * + * Return null if there is a running balancer; otherwise the output stream + * to the newly created file. + */ + private OutputStream checkAndMarkRunningBalancer() throws IOException { + try { + DataOutputStream out = fs.create(BALANCER_ID_PATH); + out. writeBytes(InetAddress.getLocalHost().getHostName()); + out.flush(); + return out; + } catch(RemoteException e) { + if(AlreadyBeingCreatedException.class.getName().equals(e.getClassName())){ + return null; + } else { + throw e; + } + } + } + + /* Given elaspedTime in ms, return a printable string */ + private static String time2Str(long elapsedTime) { + String unit; + double time = elapsedTime; + if (elapsedTime < 1000) { + unit = "milliseconds"; + } else if (elapsedTime < 60*1000) { + unit = "seconds"; + time = time/1000; + } else if (elapsedTime < 3600*1000) { + unit = "minutes"; + time = time/(60*1000); + } else { + unit = "hours"; + time = time/(3600*1000); + } + + return time+" "+unit; + } + + /** return this balancer's configuration */ + public Configuration getConf() { + return conf; + } + + /** set this balancer's configuration */ + public void setConf(Configuration conf) { + this.conf = conf; + movedBlocks.setWinWidth(conf); + } + +} diff --git a/src/hdfs/org/apache/hadoop/hdfs/server/common/GenerationStamp.java b/src/hdfs/org/apache/hadoop/hdfs/server/common/GenerationStamp.java new file mode 100644 index 0000000..1b1f9df --- /dev/null +++ b/src/hdfs/org/apache/hadoop/hdfs/server/common/GenerationStamp.java @@ -0,0 +1,114 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.common; + +import java.io.*; +import org.apache.hadoop.io.*; + +/**************************************************************** + * A GenerationStamp is a Hadoop FS primitive, identified by a long. + ****************************************************************/ +public class GenerationStamp implements WritableComparable { + public static final long WILDCARD_STAMP = 1; + public static final long FIRST_VALID_STAMP = 1000L; + + static { // register a ctor + WritableFactories.setFactory + (GenerationStamp.class, + new WritableFactory() { + public Writable newInstance() { return new GenerationStamp(0); } + }); + } + + long genstamp; + + /** + * Create a new instance, initialized to FIRST_VALID_STAMP. + */ + public GenerationStamp() {this(GenerationStamp.FIRST_VALID_STAMP);} + + /** + * Create a new instance, initialized to the specified value. + */ + GenerationStamp(long stamp) {this.genstamp = stamp;} + + /** + * Returns the current generation stamp + */ + public long getStamp() { + return this.genstamp; + } + + /** + * Sets the current generation stamp + */ + public void setStamp(long stamp) { + this.genstamp = stamp; + } + + /** + * First increments the counter and then returns the stamp + */ + public synchronized long nextStamp() { + this.genstamp++; + return this.genstamp; + } + + ///////////////////////////////////// + // Writable + ///////////////////////////////////// + public void write(DataOutput out) throws IOException { + out.writeLong(genstamp); + } + + public void readFields(DataInput in) throws IOException { + this.genstamp = in.readLong(); + if (this.genstamp < 0) { + throw new IOException("Bad Generation Stamp: " + this.genstamp); + } + } + + ///////////////////////////////////// + // Comparable + ///////////////////////////////////// + public static int compare(long x, long y) { + return x < y? -1: x == y? 0: 1; + } + + /** {@inheritDoc} */ + public int compareTo(GenerationStamp that) { + return compare(this.genstamp, that.genstamp); + } + + /** {@inheritDoc} */ + public boolean equals(Object o) { + if (!(o instanceof GenerationStamp)) { + return false; + } + return genstamp == ((GenerationStamp)o).genstamp; + } + + public static boolean equalsWithWildcard(long x, long y) { + return x == y || x == WILDCARD_STAMP || y == WILDCARD_STAMP; + } + + /** {@inheritDoc} */ + public int hashCode() { + return 37 * 17 + (int) (genstamp^(genstamp>>>32)); + } +} diff --git a/src/hdfs/org/apache/hadoop/hdfs/server/common/HdfsConstants.java b/src/hdfs/org/apache/hadoop/hdfs/server/common/HdfsConstants.java new file mode 100644 index 0000000..591e25f --- /dev/null +++ b/src/hdfs/org/apache/hadoop/hdfs/server/common/HdfsConstants.java @@ -0,0 +1,70 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.common; + + +/************************************ + * Some handy internal HDFS constants + * + ************************************/ + +public interface HdfsConstants { + /** + * Type of the node + */ + static public enum NodeType { + NAME_NODE, + DATA_NODE; + } + + // Startup options + static public enum StartupOption{ + FORMAT ("-format"), + REGULAR ("-regular"), + UPGRADE ("-upgrade"), + ROLLBACK("-rollback"), + FINALIZE("-finalize"), + IMPORT ("-importCheckpoint"); + + private String name = null; + private StartupOption(String arg) {this.name = arg;} + public String getName() {return name;} + } + + // Timeouts for communicating with DataNode for streaming writes/reads + public static int READ_TIMEOUT = 60 * 1000; + public static int WRITE_TIMEOUT = 8 * 60 * 1000; + public static int WRITE_TIMEOUT_EXTENSION = 5 * 1000; //for write pipeline + + // constants for edits log + public static long DEFAULT_EDIT_PREALLOCATE_SIZE = 1024 * 1024; // 1 MB + public static int DEFAULT_EDIT_BUFFER_SIZE = 512 * 1024; // 0.5 MB + public static int DEFAULT_MAX_BUFFERED_TRANSACTIONS = 10000; // ten + + // property for fsimage compression + public static final String DFS_IMAGE_COMPRESS_KEY = "dfs.image.compress"; + public static final boolean DFS_IMAGE_COMPRESS_DEFAULT = false; + public static final String DFS_IMAGE_COMPRESSION_CODEC_KEY = + "dfs.image.compression.codec"; + public static final String DFS_IMAGE_COMPRESSION_CODEC_DEFAULT = + "org.apache.hadoop.io.compress.DefaultCodec"; + public static final String DFS_IMAGE_TRANSFER_RATE_KEY = + "dfs.image.transfer.bandwidthPerSec"; + public static final long DFS_IMAGE_TRANSFER_RATE_DEFAULT = 0; // disable +} + diff --git a/src/hdfs/org/apache/hadoop/hdfs/server/common/InconsistentFSStateException.java b/src/hdfs/org/apache/hadoop/hdfs/server/common/InconsistentFSStateException.java new file mode 100644 index 0000000..144d8ce --- /dev/null +++ b/src/hdfs/org/apache/hadoop/hdfs/server/common/InconsistentFSStateException.java @@ -0,0 +1,46 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.common; + +import java.io.File; +import java.io.IOException; +import org.apache.hadoop.util.StringUtils; + +/** + * The exception is thrown when file system state is inconsistent + * and is not recoverable. + * + */ +public class InconsistentFSStateException extends IOException { + + public InconsistentFSStateException(File dir, String descr) { + super("Directory " + getFilePath(dir) + + " is in an inconsistent state: " + descr); + } + + public InconsistentFSStateException(File dir, String descr, Throwable ex) { + this(dir, descr + "\n" + StringUtils.stringifyException(ex)); + } + + private static String getFilePath(File dir) { + try { + return dir.getCanonicalPath(); + } catch(IOException e) {} + return dir.getPath(); + } +} diff --git a/src/hdfs/org/apache/hadoop/hdfs/server/common/IncorrectVersionException.java b/src/hdfs/org/apache/hadoop/hdfs/server/common/IncorrectVersionException.java new file mode 100644 index 0000000..ef7d82c --- /dev/null +++ b/src/hdfs/org/apache/hadoop/hdfs/server/common/IncorrectVersionException.java @@ -0,0 +1,43 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.common; + +import java.io.IOException; + +import org.apache.hadoop.hdfs.protocol.FSConstants; + +/** + * The exception is thrown when external version does not match + * current version of the appication. + * + */ +public class IncorrectVersionException extends IOException { + + public IncorrectVersionException(int versionReported, String ofWhat) { + this(versionReported, ofWhat, FSConstants.LAYOUT_VERSION); + } + + public IncorrectVersionException(int versionReported, + String ofWhat, + int versionExpected) { + super("Unexpected version " + + (ofWhat==null ? "" : "of " + ofWhat) + ". Reported: " + + versionReported + ". Expecting = " + versionExpected + "."); + } + +} diff --git a/src/hdfs/org/apache/hadoop/hdfs/server/common/Storage.java b/src/hdfs/org/apache/hadoop/hdfs/server/common/Storage.java new file mode 100644 index 0000000..0f5d652 --- /dev/null +++ b/src/hdfs/org/apache/hadoop/hdfs/server/common/Storage.java @@ -0,0 +1,767 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.common; + +import java.io.File; +import java.io.FileInputStream; +import java.io.FileOutputStream; +import java.io.IOException; +import java.io.RandomAccessFile; +import java.nio.channels.FileLock; +import java.nio.channels.OverlappingFileLockException; +import java.util.ArrayList; +import java.util.List; +import java.util.Iterator; +import java.util.Properties; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hdfs.protocol.FSConstants; +import org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType; +import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption; +import org.apache.hadoop.fs.FileUtil; +import org.apache.hadoop.util.StringUtils; +import org.apache.hadoop.util.VersionInfo; + + + +/** + * Storage information file. + *

+ * Local storage information is stored in a separate file VERSION. + * It contains type of the node, + * the storage layout version, the namespace id, and + * the fs state creation time. + *

+ * Local storage can reside in multiple directories. + * Each directory should contain the same VERSION file as the others. + * During startup Hadoop servers (name-node and data-nodes) read their local + * storage information from them. + *

+ * The servers hold a lock for each storage directory while they run so that + * other nodes were not able to startup sharing the same storage. + * The locks are released when the servers stop (normally or abnormally). + * + */ +public abstract class Storage extends StorageInfo { + public static final Log LOG = LogFactory.getLog(Storage.class.getName()); + + // Constants + + // last layout version that did not suppot upgrades + protected static final int LAST_PRE_UPGRADE_LAYOUT_VERSION = -3; + + // this corresponds to Hadoop-0.14. + public static final int LAST_UPGRADABLE_LAYOUT_VERSION = -7; + protected static final String LAST_UPGRADABLE_HADOOP_VERSION = "Hadoop-0.14"; + + /* this should be removed when LAST_UPGRADABLE_LV goes beyond -13. + * any upgrade code that uses this constant should also be removed. */ + public static final int PRE_GENERATIONSTAMP_LAYOUT_VERSION = -13; + + private static final String STORAGE_FILE_LOCK = "in_use.lock"; + protected static final String STORAGE_FILE_VERSION = "VERSION"; + public static final String STORAGE_DIR_CURRENT = "current"; + private static final String STORAGE_DIR_PREVIOUS = "previous"; + private static final String STORAGE_TMP_REMOVED = "removed.tmp"; + private static final String STORAGE_TMP_PREVIOUS = "previous.tmp"; + private static final String STORAGE_TMP_FINALIZED = "finalized.tmp"; + private static final String STORAGE_TMP_LAST_CKPT = "lastcheckpoint.tmp"; + private static final String STORAGE_PREVIOUS_CKPT = "previous.checkpoint"; + + public enum StorageState { + NON_EXISTENT, + NOT_FORMATTED, + COMPLETE_UPGRADE, + RECOVER_UPGRADE, + COMPLETE_FINALIZE, + COMPLETE_ROLLBACK, + RECOVER_ROLLBACK, + COMPLETE_CHECKPOINT, + RECOVER_CHECKPOINT, + NORMAL; + } + + /** + * An interface to denote storage directory type + * Implementations can define a type for storage directory by implementing + * this interface. + */ + public interface StorageDirType { + public StorageDirType getStorageDirType(); + public boolean isOfType(StorageDirType type); + } + + private NodeType storageType; // Type of the node using this storage + protected List storageDirs = new ArrayList(); + + private class DirIterator implements Iterator { + StorageDirType dirType; + int prevIndex; // for remove() + int nextIndex; // for next() + + DirIterator(StorageDirType dirType) { + this.dirType = dirType; + this.nextIndex = 0; + this.prevIndex = 0; + } + + public boolean hasNext() { + if (storageDirs.isEmpty() || nextIndex >= storageDirs.size()) + return false; + if (dirType != null) { + while (nextIndex < storageDirs.size()) { + if (getStorageDir(nextIndex).getStorageDirType().isOfType(dirType)) + break; + nextIndex++; + } + if (nextIndex >= storageDirs.size()) + return false; + } + return true; + } + + public StorageDirectory next() { + StorageDirectory sd = getStorageDir(nextIndex); + prevIndex = nextIndex; + nextIndex++; + if (dirType != null) { + while (nextIndex < storageDirs.size()) { + if (getStorageDir(nextIndex).getStorageDirType().isOfType(dirType)) + break; + nextIndex++; + } + } + return sd; + } + + public void remove() { + nextIndex = prevIndex; // restore previous state + storageDirs.remove(prevIndex); // remove last returned element + hasNext(); // reset nextIndex to correct place + } + } + + /** + * Return default iterator + * This iterator returns all entires of storageDirs + */ + public Iterator dirIterator() { + return dirIterator(null); + } + + /** + * Return iterator based on Storage Directory Type + * This iterator selects entires of storageDirs of type dirType and returns + * them via the Iterator + */ + public Iterator dirIterator(StorageDirType dirType) { + return new DirIterator(dirType); + } + + /** + * One of the storage directories. + */ + public class StorageDirectory { + File root; // root directory + FileLock lock; // storage lock + StorageDirType dirType; // storage dir type + + public StorageDirectory(File dir) { + // default dirType is null + this(dir, null); + } + + public StorageDirectory(File dir, StorageDirType dirType) { + this.root = dir; + this.lock = null; + this.dirType = dirType; + } + + /** + * Get root directory of this storage + */ + public File getRoot() { + return root; + } + + /** + * Get storage directory type + */ + public StorageDirType getStorageDirType() { + return dirType; + } + + /** + * Read version file. + * + * @throws IOException if file cannot be read or contains inconsistent data + */ + public void read() throws IOException { + read(getVersionFile()); + } + + public void read(File from) throws IOException { + RandomAccessFile file = new RandomAccessFile(from, "rws"); + FileInputStream in = null; + try { + in = new FileInputStream(file.getFD()); + file.seek(0); + Properties props = new Properties(); + props.load(in); + getFields(props, this); + } finally { + if (in != null) { + in.close(); + } + file.close(); + } + } + + /** + * Write version file. + * + * @throws IOException + */ + public void write() throws IOException { + corruptPreUpgradeStorage(root); + write(getVersionFile()); + } + + public void write(File to) throws IOException { + Properties props = new Properties(); + setFields(props, this); + RandomAccessFile file = new RandomAccessFile(to, "rws"); + FileOutputStream out = null; + try { + file.seek(0); + out = new FileOutputStream(file.getFD()); + /* + * If server is interrupted before this line, + * the version file will remain unchanged. + */ + props.store(out, null); + /* + * Now the new fields are flushed to the head of the file, but file + * length can still be larger then required and therefore the file can + * contain whole or corrupted fields from its old contents in the end. + * If server is interrupted here and restarted later these extra fields + * either should not effect server behavior or should be handled + * by the server correctly. + */ + file.setLength(out.getChannel().position()); + } finally { + if (out != null) { + out.close(); + } + file.close(); + } + } + + /** + * Clear and re-create storage directory. + *

+ * Removes contents of the current directory and creates an empty directory. + * + * This does not fully format storage directory. + * It cannot write the version file since it should be written last after + * all other storage type dependent files are written. + * Derived storage is responsible for setting specific storage values and + * writing the version file to disk. + * + * @throws IOException + */ + public void clearDirectory() throws IOException { + File curDir = this.getCurrentDir(); + if (curDir.exists()) + if (!(FileUtil.fullyDelete(curDir))) + throw new IOException("Cannot remove current directory: " + curDir); + if (!curDir.mkdirs()) + throw new IOException("Cannot create directory " + curDir); + } + + public File getCurrentDir() { + return new File(root, STORAGE_DIR_CURRENT); + } + public File getVersionFile() { + return new File(new File(root, STORAGE_DIR_CURRENT), STORAGE_FILE_VERSION); + } + public File getPreviousVersionFile() { + return new File(new File(root, STORAGE_DIR_PREVIOUS), STORAGE_FILE_VERSION); + } + public File getPreviousDir() { + return new File(root, STORAGE_DIR_PREVIOUS); + } + public File getPreviousTmp() { + return new File(root, STORAGE_TMP_PREVIOUS); + } + public File getRemovedTmp() { + return new File(root, STORAGE_TMP_REMOVED); + } + public File getFinalizedTmp() { + return new File(root, STORAGE_TMP_FINALIZED); + } + public File getLastCheckpointTmp() { + return new File(root, STORAGE_TMP_LAST_CKPT); + } + public File getPreviousCheckpoint() { + return new File(root, STORAGE_PREVIOUS_CKPT); + } + + /** + * Check consistency of the storage directory + * + * @param startOpt a startup option. + * + * @return state {@link StorageState} of the storage directory + * @throws {@link InconsistentFSStateException} if directory state is not + * consistent and cannot be recovered + */ + public StorageState analyzeStorage(StartupOption startOpt) throws IOException { + assert root != null : "root is null"; + String rootPath = root.getCanonicalPath(); + try { // check that storage exists + if (!root.exists()) { + // storage directory does not exist + if (startOpt != StartupOption.FORMAT) { + LOG.info("Storage directory " + rootPath + " does not exist."); + return StorageState.NON_EXISTENT; + } + LOG.info(rootPath + " does not exist. Creating ..."); + if (!root.mkdirs()) + throw new IOException("Cannot create directory " + rootPath); + } + // or is inaccessible + if (!root.isDirectory()) { + LOG.info(rootPath + "is not a directory."); + return StorageState.NON_EXISTENT; + } + if (!root.canWrite()) { + LOG.info("Cannot access storage directory " + rootPath); + return StorageState.NON_EXISTENT; + } + } catch(SecurityException ex) { + LOG.info("Cannot access storage directory " + rootPath, ex); + return StorageState.NON_EXISTENT; + } + + this.lock(); // lock storage if it exists + + if (startOpt == HdfsConstants.StartupOption.FORMAT) + return StorageState.NOT_FORMATTED; + if (startOpt != HdfsConstants.StartupOption.IMPORT) { + //make sure no conversion is required + checkConversionNeeded(this); + } + + // check whether current directory is valid + File versionFile = getVersionFile(); + boolean hasCurrent = versionFile.exists(); + + // check which directories exist + boolean hasPrevious = getPreviousDir().exists(); + boolean hasPreviousTmp = getPreviousTmp().exists(); + boolean hasRemovedTmp = getRemovedTmp().exists(); + boolean hasFinalizedTmp = getFinalizedTmp().exists(); + boolean hasCheckpointTmp = getLastCheckpointTmp().exists(); + + if (!(hasPreviousTmp || hasRemovedTmp + || hasFinalizedTmp || hasCheckpointTmp)) { + // no temp dirs - no recovery + if (hasCurrent) + return StorageState.NORMAL; + if (hasPrevious) + throw new InconsistentFSStateException(root, + "version file in current directory is missing."); + return StorageState.NOT_FORMATTED; + } + + if ((hasPreviousTmp?1:0) + (hasRemovedTmp?1:0) + + (hasFinalizedTmp?1:0) + (hasCheckpointTmp?1:0) > 1) + // more than one temp dirs + throw new InconsistentFSStateException(root, + "too many temporary directories."); + + // # of temp dirs == 1 should either recover or complete a transition + if (hasCheckpointTmp) { + return hasCurrent ? StorageState.COMPLETE_CHECKPOINT + : StorageState.RECOVER_CHECKPOINT; + } + + if (hasFinalizedTmp) { + if (hasPrevious) + throw new InconsistentFSStateException(root, + STORAGE_DIR_PREVIOUS + " and " + STORAGE_TMP_FINALIZED + + "cannot exist together."); + return StorageState.COMPLETE_FINALIZE; + } + + if (hasPreviousTmp) { + if (hasPrevious) + throw new InconsistentFSStateException(root, + STORAGE_DIR_PREVIOUS + " and " + STORAGE_TMP_PREVIOUS + + " cannot exist together."); + if (hasCurrent) + return StorageState.COMPLETE_UPGRADE; + return StorageState.RECOVER_UPGRADE; + } + + assert hasRemovedTmp : "hasRemovedTmp must be true"; + if (!(hasCurrent ^ hasPrevious)) + throw new InconsistentFSStateException(root, + "one and only one directory " + STORAGE_DIR_CURRENT + + " or " + STORAGE_DIR_PREVIOUS + + " must be present when " + STORAGE_TMP_REMOVED + + " exists."); + if (hasCurrent) + return StorageState.COMPLETE_ROLLBACK; + return StorageState.RECOVER_ROLLBACK; + } + + /** + * Complete or recover storage state from previously failed transition. + * + * @param curState specifies what/how the state should be recovered + * @throws IOException + */ + public void doRecover(StorageState curState) throws IOException { + File curDir = getCurrentDir(); + String rootPath = root.getCanonicalPath(); + switch(curState) { + case COMPLETE_UPGRADE: // mv previous.tmp -> previous + LOG.info("Completing previous upgrade for storage directory " + + rootPath + "."); + rename(getPreviousTmp(), getPreviousDir()); + return; + case RECOVER_UPGRADE: // mv previous.tmp -> current + LOG.info("Recovering storage directory " + rootPath + + " from previous upgrade."); + if (curDir.exists()) + deleteDir(curDir); + rename(getPreviousTmp(), curDir); + return; + case COMPLETE_ROLLBACK: // rm removed.tmp + LOG.info("Completing previous rollback for storage directory " + + rootPath + "."); + deleteDir(getRemovedTmp()); + return; + case RECOVER_ROLLBACK: // mv removed.tmp -> current + LOG.info("Recovering storage directory " + rootPath + + " from previous rollback."); + rename(getRemovedTmp(), curDir); + return; + case COMPLETE_FINALIZE: // rm finalized.tmp + LOG.info("Completing previous finalize for storage directory " + + rootPath + "."); + deleteDir(getFinalizedTmp()); + return; + case COMPLETE_CHECKPOINT: // mv lastcheckpoint.tmp -> previous.checkpoint + LOG.info("Completing previous checkpoint for storage directory " + + rootPath + "."); + File prevCkptDir = getPreviousCheckpoint(); + if (prevCkptDir.exists()) + deleteDir(prevCkptDir); + rename(getLastCheckpointTmp(), prevCkptDir); + return; + case RECOVER_CHECKPOINT: // mv lastcheckpoint.tmp -> current + LOG.info("Recovering storage directory " + rootPath + + " from failed checkpoint."); + if (curDir.exists()) + deleteDir(curDir); + rename(getLastCheckpointTmp(), curDir); + return; + default: + throw new IOException("Unexpected FS state: " + curState); + } + } + + /** + * Lock storage to provide exclusive access. + * + *

Locking is not supported by all file systems. + * E.g., NFS does not consistently support exclusive locks. + * + *

If locking is supported we guarantee exculsive access to the + * storage directory. Otherwise, no guarantee is given. + * + * @throws IOException if locking fails + */ + public void lock() throws IOException { + this.lock = tryLock(); + if (lock == null) { + String msg = "Cannot lock storage " + this.root + + ". The directory is already locked."; + LOG.info(msg); + throw new IOException(msg); + } + } + + /** + * Attempts to acquire an exclusive lock on the storage. + * + * @return A lock object representing the newly-acquired lock or + * null if storage is already locked. + * @throws IOException if locking fails. + */ + FileLock tryLock() throws IOException { + File lockF = new File(root, STORAGE_FILE_LOCK); + lockF.deleteOnExit(); + RandomAccessFile file = new RandomAccessFile(lockF, "rws"); + FileLock res = null; + try { + res = file.getChannel().tryLock(); + } catch(OverlappingFileLockException oe) { + file.close(); + return null; + } catch(IOException e) { + LOG.info(StringUtils.stringifyException(e)); + file.close(); + throw e; + } + return res; + } + + /** + * Unlock storage. + * + * @throws IOException + */ + public void unlock() throws IOException { + if (this.lock == null) + return; + this.lock.release(); + lock.channel().close(); + lock = null; + } + } + + /** + * Create empty storage info of the specified type + */ + protected Storage(NodeType type) { + super(); + this.storageType = type; + } + + protected Storage(NodeType type, int nsID, long cT) { + super(FSConstants.LAYOUT_VERSION, nsID, cT); + this.storageType = type; + } + + protected Storage(NodeType type, StorageInfo storageInfo) { + super(storageInfo); + this.storageType = type; + } + + public int getNumStorageDirs() { + return storageDirs.size(); + } + + public StorageDirectory getStorageDir(int idx) { + return storageDirs.get(idx); + } + + protected void addStorageDir(StorageDirectory sd) { + storageDirs.add(sd); + } + + public abstract boolean isConversionNeeded(StorageDirectory sd) throws IOException; + + /* + * Coversion is no longer supported. So this should throw exception if + * conversion is needed. + */ + private void checkConversionNeeded(StorageDirectory sd) throws IOException { + if (isConversionNeeded(sd)) { + //throw an exception + checkVersionUpgradable(0); + } + } + + /** + * Checks if the upgrade from the given old version is supported. If + * no upgrade is supported, it throws IncorrectVersionException. + * + * @param oldVersion + */ + protected static void checkVersionUpgradable(int oldVersion) + throws IOException { + if (oldVersion > LAST_UPGRADABLE_LAYOUT_VERSION) { + String msg = "*********** Upgrade is not supported from this older" + + " version of storage to the current version." + + " Please upgrade to " + LAST_UPGRADABLE_HADOOP_VERSION + + " or a later version and then upgrade to current" + + " version. Old layout version is " + + (oldVersion == 0 ? "'too old'" : (""+oldVersion)) + + " and latest layout version this software version can" + + " upgrade from is " + LAST_UPGRADABLE_LAYOUT_VERSION + + ". ************"; + LOG.error(msg); + throw new IOException(msg); + } + + } + + /** + * Get common storage fields. + * Should be overloaded if additional fields need to be get. + * + * @param props + * @throws IOException + */ + protected void getFields(Properties props, + StorageDirectory sd + ) throws IOException { + String sv, st, sid, sct; + sv = props.getProperty("layoutVersion"); + st = props.getProperty("storageType"); + sid = props.getProperty("namespaceID"); + sct = props.getProperty("cTime"); + if (sv == null || st == null || sid == null || sct == null) + throw new InconsistentFSStateException(sd.root, + "file " + STORAGE_FILE_VERSION + " is invalid."); + int rv = Integer.parseInt(sv); + NodeType rt = NodeType.valueOf(st); + int rid = Integer.parseInt(sid); + long rct = Long.parseLong(sct); + if (!storageType.equals(rt) || + !((namespaceID == 0) || (rid == 0) || namespaceID == rid)) + throw new InconsistentFSStateException(sd.root, + "is incompatible with others. " + + " namespaceID is " + namespaceID + + " and rid is " + rid + "," + + " storage type is " + storageType + + " but rt is " + rt); + if (rv < FSConstants.LAYOUT_VERSION) // future version + throw new IncorrectVersionException(rv, "storage directory " + + sd.root.getCanonicalPath()); + layoutVersion = rv; + storageType = rt; + namespaceID = rid; + cTime = rct; + } + + /** + * Set common storage fields. + * Should be overloaded if additional fields need to be set. + * + * @param props + * @throws IOException + */ + protected void setFields(Properties props, + StorageDirectory sd + ) throws IOException { + props.setProperty("layoutVersion", String.valueOf(layoutVersion)); + props.setProperty("storageType", storageType.toString()); + props.setProperty("namespaceID", String.valueOf(namespaceID)); + props.setProperty("cTime", String.valueOf(cTime)); + } + + public static void rename(File from, File to) throws IOException { + if (!from.renameTo(to)) + throw new IOException("Failed to rename " + + from.getCanonicalPath() + " to " + to.getCanonicalPath()); + } + + protected static void deleteDir(File dir) throws IOException { + if (!FileUtil.fullyDelete(dir)) + throw new IOException("Failed to delete " + dir.getCanonicalPath()); + } + + /** + * Write all data storage files. + * @throws IOException + */ + public void writeAll() throws IOException { + this.layoutVersion = FSConstants.LAYOUT_VERSION; + for (Iterator it = storageDirs.iterator(); it.hasNext();) { + it.next().write(); + } + } + + /** + * Unlock all storage directories. + * @throws IOException + */ + public void unlockAll() throws IOException { + for (Iterator it = storageDirs.iterator(); it.hasNext();) { + it.next().unlock(); + } + } + + /** + * Check whether underlying file system supports file locking. + * + * @return true if exclusive locks are supported or + * false otherwise. + * @throws IOException + * @see StorageDirectory#lock() + */ + public boolean isLockSupported(int idx) throws IOException { + StorageDirectory sd = storageDirs.get(idx); + FileLock firstLock = null; + FileLock secondLock = null; + try { + firstLock = sd.lock; + if(firstLock == null) { + firstLock = sd.tryLock(); + if(firstLock == null) + return true; + } + secondLock = sd.tryLock(); + if(secondLock == null) + return true; + } finally { + if(firstLock != null && firstLock != sd.lock) { + firstLock.release(); + firstLock.channel().close(); + } + if(secondLock != null) { + secondLock.release(); + secondLock.channel().close(); + } + } + return false; + } + + public static String getBuildVersion() { + return VersionInfo.getRevision(); + } + + public static String getRegistrationID(StorageInfo storage) { + return "NS-" + Integer.toString(storage.getNamespaceID()) + + "-" + Integer.toString(storage.getLayoutVersion()) + + "-" + Long.toString(storage.getCTime()); + } + + // Pre-upgrade version compatibility + protected abstract void corruptPreUpgradeStorage(File rootDir) throws IOException; + + protected void writeCorruptedData(RandomAccessFile file) throws IOException { + final String messageForPreUpgradeVersion = + "\nThis file is INTENTIONALLY CORRUPTED so that versions\n" + + "of Hadoop prior to 0.13 (which are incompatible\n" + + "with this directory layout) will fail to start.\n"; + + file.seek(0); + file.writeInt(FSConstants.LAYOUT_VERSION); + org.apache.hadoop.io.UTF8.writeString(file, ""); + file.writeBytes(messageForPreUpgradeVersion); + file.getFD().sync(); + } +} diff --git a/src/hdfs/org/apache/hadoop/hdfs/server/common/StorageInfo.java b/src/hdfs/org/apache/hadoop/hdfs/server/common/StorageInfo.java new file mode 100644 index 0000000..8f768b5 --- /dev/null +++ b/src/hdfs/org/apache/hadoop/hdfs/server/common/StorageInfo.java @@ -0,0 +1,54 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.common; + + +/** + * Common class for storage information. + * + * TODO namespaceID should be long and computed as hash(address + port) + */ +public class StorageInfo { + public int layoutVersion; // Version read from the stored file. + public int namespaceID; // namespace id of the storage + public long cTime; // creation timestamp + + public StorageInfo () { + this(0, 0, 0L); + } + + public StorageInfo(int layoutV, int nsID, long cT) { + layoutVersion = layoutV; + namespaceID = nsID; + cTime = cT; + } + + public StorageInfo(StorageInfo from) { + setStorageInfo(from); + } + + public int getLayoutVersion(){ return layoutVersion; } + public int getNamespaceID() { return namespaceID; } + public long getCTime() { return cTime; } + + public void setStorageInfo(StorageInfo from) { + layoutVersion = from.layoutVersion; + namespaceID = from.namespaceID; + cTime = from.cTime; + } +} \ No newline at end of file diff --git a/src/hdfs/org/apache/hadoop/hdfs/server/common/ThreadLocalDateFormat.java b/src/hdfs/org/apache/hadoop/hdfs/server/common/ThreadLocalDateFormat.java new file mode 100644 index 0000000..06837c7 --- /dev/null +++ b/src/hdfs/org/apache/hadoop/hdfs/server/common/ThreadLocalDateFormat.java @@ -0,0 +1,87 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.common; + +import java.text.ParseException; +import java.text.SimpleDateFormat; +import java.util.Date; +import java.util.TimeZone; + +/** + * Thread safe implementation of {@link SimpleDateFormat} + * TODO: This needs to be moved to hadoop common project. + */ +public class ThreadLocalDateFormat { + private final String format; + + /** + * Constructs {@link ThreadLocalDateFormat} using given date format pattern + * @param format Date format pattern + */ + public ThreadLocalDateFormat(String format) { + this.format = format; + } + + /** + * ThreadLocal based {@link SimpleDateFormat} + */ + private final ThreadLocal dateFormat = + new ThreadLocal() { + @Override + protected SimpleDateFormat initialValue() { + SimpleDateFormat df = new SimpleDateFormat(format); + return df; + } + }; + + /** + * Format given Date into date/time string. + * @param date Date to be formatted. + * @return the formatted date-time string. + */ + public String format(Date date) { + return dateFormat.get().format(date); + } + + /** + * Parse the String to produce Date. + * @param source String to parse. + * @return Date parsed from the String. + * @throws ParseException + * - if the beginning of the specified string cannot be parsed. + */ + public Date parse(String source) throws ParseException { + return dateFormat.get().parse(source); + } + + /** + * @param zone + */ + public void setTimeZone(TimeZone zone) { + dateFormat.get().setTimeZone(zone); + } + + /** + * Get access to underlying SimpleDateFormat. + * Note: Do not pass reference to this Date to other threads! + * @return the SimpleDateFormat for the thread. + */ + SimpleDateFormat get() { + return dateFormat.get(); + } +} diff --git a/src/hdfs/org/apache/hadoop/hdfs/server/common/UpgradeManager.java b/src/hdfs/org/apache/hadoop/hdfs/server/common/UpgradeManager.java new file mode 100644 index 0000000..b75c5fd --- /dev/null +++ b/src/hdfs/org/apache/hadoop/hdfs/server/common/UpgradeManager.java @@ -0,0 +1,89 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.common; + +import java.io.IOException; +import java.util.SortedSet; + +import org.apache.hadoop.hdfs.protocol.FSConstants; +import org.apache.hadoop.hdfs.server.protocol.UpgradeCommand; + +/** + * Generic upgrade manager. + * + * {@link #broadcastCommand} is the command that should be + * + */ +public abstract class UpgradeManager { + protected SortedSet currentUpgrades = null; + protected boolean upgradeState = false; // true if upgrade is in progress + protected int upgradeVersion = 0; + protected UpgradeCommand broadcastCommand = null; + + public synchronized UpgradeCommand getBroadcastCommand() { + return this.broadcastCommand; + } + + public boolean getUpgradeState() { + return this.upgradeState; + } + + public int getUpgradeVersion(){ + return this.upgradeVersion; + } + + public void setUpgradeState(boolean uState, int uVersion) { + this.upgradeState = uState; + this.upgradeVersion = uVersion; + } + + public SortedSet getDistributedUpgrades() throws IOException { + return UpgradeObjectCollection.getDistributedUpgrades( + getUpgradeVersion(), getType()); + } + + public short getUpgradeStatus() { + if(currentUpgrades == null) + return 100; + return currentUpgrades.first().getUpgradeStatus(); + } + + public boolean initializeUpgrade() throws IOException { + currentUpgrades = getDistributedUpgrades(); + if(currentUpgrades == null) { + // set new upgrade state + setUpgradeState(false, FSConstants.LAYOUT_VERSION); + return false; + } + Upgradeable curUO = currentUpgrades.first(); + // set and write new upgrade state into disk + setUpgradeState(true, curUO.getVersion()); + return true; + } + + public boolean isUpgradeCompleted() { + if (currentUpgrades == null) { + return true; + } + return false; + } + + public abstract HdfsConstants.NodeType getType(); + public abstract boolean startUpgrade() throws IOException; + public abstract void completeUpgrade() throws IOException; +} diff --git a/src/hdfs/org/apache/hadoop/hdfs/server/common/UpgradeObject.java b/src/hdfs/org/apache/hadoop/hdfs/server/common/UpgradeObject.java new file mode 100644 index 0000000..ee60e1b --- /dev/null +++ b/src/hdfs/org/apache/hadoop/hdfs/server/common/UpgradeObject.java @@ -0,0 +1,66 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.common; + +import java.io.IOException; + +import org.apache.hadoop.hdfs.server.common.UpgradeObjectCollection.UOSignature; + +/** + * Abstract upgrade object. + * + * Contains default implementation of common methods of {@link Upgradeable} + * interface. + */ +public abstract class UpgradeObject implements Upgradeable { + protected short status; + + public short getUpgradeStatus() { + return status; + } + + public String getDescription() { + return "Upgrade object for " + getType() + " layout version " + getVersion(); + } + + public UpgradeStatusReport getUpgradeStatusReport(boolean details) + throws IOException { + return new UpgradeStatusReport(getVersion(), getUpgradeStatus(), false); + } + + public int compareTo(Upgradeable o) { + if(this.getVersion() != o.getVersion()) + return (getVersion() > o.getVersion() ? -1 : 1); + int res = this.getType().toString().compareTo(o.getType().toString()); + if(res != 0) + return res; + return getClass().getCanonicalName().compareTo( + o.getClass().getCanonicalName()); + } + + public boolean equals(Object o) { + if (!(o instanceof UpgradeObject)) { + return false; + } + return this.compareTo((UpgradeObject)o) == 0; + } + + public int hashCode() { + return new UOSignature(this).hashCode(); + } +} diff --git a/src/hdfs/org/apache/hadoop/hdfs/server/common/UpgradeObjectCollection.java b/src/hdfs/org/apache/hadoop/hdfs/server/common/UpgradeObjectCollection.java new file mode 100644 index 0000000..b853da5 --- /dev/null +++ b/src/hdfs/org/apache/hadoop/hdfs/server/common/UpgradeObjectCollection.java @@ -0,0 +1,130 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.common; + +import java.io.IOException; +import java.util.SortedSet; +import java.util.TreeSet; + +import org.apache.hadoop.hdfs.protocol.FSConstants; +import org.apache.hadoop.util.StringUtils; + +/** + * Collection of upgrade objects. + * + * Upgrade objects should be registered here before they can be used. + */ +public class UpgradeObjectCollection { + static { + initialize(); + // Registered distributed upgrade objects here + // registerUpgrade(new UpgradeObject()); + } + + static class UOSignature implements Comparable { + int version; + HdfsConstants.NodeType type; + String className; + + UOSignature(Upgradeable uo) { + this.version = uo.getVersion(); + this.type = uo.getType(); + this.className = uo.getClass().getCanonicalName(); + } + + int getVersion() { + return version; + } + + HdfsConstants.NodeType getType() { + return type; + } + + String getClassName() { + return className; + } + + Upgradeable instantiate() throws IOException { + try { + return (Upgradeable)Class.forName(getClassName()).newInstance(); + } catch(ClassNotFoundException e) { + throw new IOException(StringUtils.stringifyException(e)); + } catch(InstantiationException e) { + throw new IOException(StringUtils.stringifyException(e)); + } catch(IllegalAccessException e) { + throw new IOException(StringUtils.stringifyException(e)); + } + } + + public int compareTo(UOSignature o) { + if(this.version != o.version) + return (version < o.version ? -1 : 1); + int res = this.getType().toString().compareTo(o.getType().toString()); + if(res != 0) + return res; + return className.compareTo(o.className); + } + + public boolean equals(Object o) { + if (!(o instanceof UOSignature)) { + return false; + } + return this.compareTo((UOSignature)o) == 0; + } + + public int hashCode() { + return version ^ ((type==null)?0:type.hashCode()) + ^ ((className==null)?0:className.hashCode()); + } + } + + /** + * Static collection of upgrade objects sorted by version. + * Layout versions are negative therefore newer versions will go first. + */ + static SortedSet upgradeTable; + + static final void initialize() { + upgradeTable = new TreeSet(); + } + + static void registerUpgrade(Upgradeable uo) { + // Registered distributed upgrade objects here + upgradeTable.add(new UOSignature(uo)); + } + + public static SortedSet getDistributedUpgrades(int versionFrom, + HdfsConstants.NodeType type + ) throws IOException { + assert FSConstants.LAYOUT_VERSION <= versionFrom : "Incorrect version " + + versionFrom + ". Expected to be <= " + FSConstants.LAYOUT_VERSION; + SortedSet upgradeObjects = new TreeSet(); + for(UOSignature sig : upgradeTable) { + if(sig.getVersion() < FSConstants.LAYOUT_VERSION) + continue; + if(sig.getVersion() > versionFrom) + break; + if(sig.getType() != type ) + continue; + upgradeObjects.add(sig.instantiate()); + } + if(upgradeObjects.size() == 0) + return null; + return upgradeObjects; + } +} diff --git a/src/hdfs/org/apache/hadoop/hdfs/server/common/UpgradeStatusReport.java b/src/hdfs/org/apache/hadoop/hdfs/server/common/UpgradeStatusReport.java new file mode 100644 index 0000000..86fa7a8 --- /dev/null +++ b/src/hdfs/org/apache/hadoop/hdfs/server/common/UpgradeStatusReport.java @@ -0,0 +1,124 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.common; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; + +import org.apache.hadoop.io.Writable; +import org.apache.hadoop.io.WritableFactories; +import org.apache.hadoop.io.WritableFactory; + +/** + * Base upgrade upgradeStatus class. + * Overload this class if specific status fields need to be reported. + * + * Describes status of current upgrade. + */ +public class UpgradeStatusReport implements Writable { + protected int version; + protected short upgradeStatus; + protected boolean finalized; + + public UpgradeStatusReport() { + this.version = 0; + this.upgradeStatus = 0; + this.finalized = false; + } + + public UpgradeStatusReport(int version, short status, boolean isFinalized) { + this.version = version; + this.upgradeStatus = status; + this.finalized = isFinalized; + } + + /** + * Get the layout version of the currently running upgrade. + * @return layout version + */ + public int getVersion() { + return this.version; + } + + /** + * Get upgrade upgradeStatus as a percentage of the total upgrade done. + * + * @see Upgradeable#getUpgradeStatus() + */ + public short getUpgradeStatus() { + return upgradeStatus; + } + + /** + * Is current upgrade finalized. + * @return true if finalized or false otherwise. + */ + public boolean isFinalized() { + return this.finalized; + } + + /** + * Get upgradeStatus data as a text for reporting. + * Should be overloaded for a particular upgrade specific upgradeStatus data. + * + * @param details true if upgradeStatus details need to be included, + * false otherwise + * @return text + */ + public String getStatusText(boolean details) { + return "Upgrade for version " + getVersion() + + (upgradeStatus<100 ? + " is in progress. Status = " + upgradeStatus + "%" : + " has been completed." + + "\nUpgrade is " + (finalized ? "" : "not ") + + "finalized."); + } + + /** + * Print basic upgradeStatus details. + */ + public String toString() { + return getStatusText(false); + } + + ///////////////////////////////////////////////// + // Writable + ///////////////////////////////////////////////// + static { // register a ctor + WritableFactories.setFactory + (UpgradeStatusReport.class, + new WritableFactory() { + public Writable newInstance() { return new UpgradeStatusReport(); } + }); + } + + /** + */ + public void write(DataOutput out) throws IOException { + out.writeInt(this.version); + out.writeShort(this.upgradeStatus); + } + + /** + */ + public void readFields(DataInput in) throws IOException { + this.version = in.readInt(); + this.upgradeStatus = in.readShort(); + } +} diff --git a/src/hdfs/org/apache/hadoop/hdfs/server/common/Upgradeable.java b/src/hdfs/org/apache/hadoop/hdfs/server/common/Upgradeable.java new file mode 100644 index 0000000..a45f4ed --- /dev/null +++ b/src/hdfs/org/apache/hadoop/hdfs/server/common/Upgradeable.java @@ -0,0 +1,98 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.common; + +import java.io.IOException; + +import org.apache.hadoop.hdfs.server.protocol.UpgradeCommand; + +/** + * Common interface for distributed upgrade objects. + * + * Each upgrade object corresponds to a layout version, + * which is the latest version that should be upgraded using this object. + * That is all components whose layout version is greater or equal to the + * one returned by {@link #getVersion()} must be upgraded with this object. + */ +public interface Upgradeable extends Comparable { + /** + * Get the layout version of the upgrade object. + * @return layout version + */ + int getVersion(); + + /** + * Get the type of the software component, which this object is upgrading. + * @return type + */ + HdfsConstants.NodeType getType(); + + /** + * Description of the upgrade object for displaying. + * @return description + */ + String getDescription(); + + /** + * Upgrade status determines a percentage of the work done out of the total + * amount required by the upgrade. + * + * 100% means that the upgrade is completed. + * Any value < 100 means it is not complete. + * + * The return value should provide at least 2 values, e.g. 0 and 100. + * @return integer value in the range [0, 100]. + */ + short getUpgradeStatus(); + + /** + * Prepare for the upgrade. + * E.g. initialize upgrade data structures and set status to 0. + * + * Returns an upgrade command that is used for broadcasting to other cluster + * components. + * E.g. name-node informs data-nodes that they must perform a distributed upgrade. + * + * @return an UpgradeCommand for broadcasting. + * @throws IOException + */ + UpgradeCommand startUpgrade() throws IOException; + + /** + * Complete upgrade. + * E.g. cleanup upgrade data structures or write metadata to disk. + * + * Returns an upgrade command that is used for broadcasting to other cluster + * components. + * E.g. data-nodes inform the name-node that they completed the upgrade + * while other data-nodes are still upgrading. + * + * @throws IOException + */ + UpgradeCommand completeUpgrade() throws IOException; + + /** + * Get status report for the upgrade. + * + * @param details true if upgradeStatus details need to be included, + * false otherwise + * @return {@link UpgradeStatusReport} + * @throws IOException + */ + UpgradeStatusReport getUpgradeStatusReport(boolean details) throws IOException; +} diff --git a/src/hdfs/org/apache/hadoop/hdfs/server/common/Util.java b/src/hdfs/org/apache/hadoop/hdfs/server/common/Util.java new file mode 100644 index 0000000..5e8de72 --- /dev/null +++ b/src/hdfs/org/apache/hadoop/hdfs/server/common/Util.java @@ -0,0 +1,28 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.common; + +public final class Util { + /** + * Current system time. + * @return current time in msec. + */ + public static long now() { + return System.currentTimeMillis(); + } +} \ No newline at end of file diff --git a/src/hdfs/org/apache/hadoop/hdfs/server/datanode/BlockAlreadyExistsException.java b/src/hdfs/org/apache/hadoop/hdfs/server/datanode/BlockAlreadyExistsException.java new file mode 100644 index 0000000..3cab5e6 --- /dev/null +++ b/src/hdfs/org/apache/hadoop/hdfs/server/datanode/BlockAlreadyExistsException.java @@ -0,0 +1,38 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +package org.apache.hadoop.hdfs.server.datanode; + +import java.io.IOException; + +/** + * Exception indicating that the target block already exists + * and is not set to be recovered/overwritten. + */ +class BlockAlreadyExistsException extends IOException { + private static final long serialVersionUID = 1L; + + public BlockAlreadyExistsException() { + super(); + } + + public BlockAlreadyExistsException(String msg) { + super(msg); + } +} diff --git a/src/hdfs/org/apache/hadoop/hdfs/server/datanode/BlockMetadataHeader.java b/src/hdfs/org/apache/hadoop/hdfs/server/datanode/BlockMetadataHeader.java new file mode 100644 index 0000000..3254e98 --- /dev/null +++ b/src/hdfs/org/apache/hadoop/hdfs/server/datanode/BlockMetadataHeader.java @@ -0,0 +1,130 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.datanode; + +import java.io.BufferedInputStream; +import java.io.DataInputStream; +import java.io.DataOutputStream; +import java.io.File; +import java.io.FileInputStream; +import java.io.IOException; + +import org.apache.hadoop.io.IOUtils; +import org.apache.hadoop.util.DataChecksum; + + +/** + * BlockMetadataHeader manages metadata for data blocks on Datanodes. + * This is not related to the Block related functionality in Namenode. + * The biggest part of data block metadata is CRC for the block. + */ +class BlockMetadataHeader { + + static final short METADATA_VERSION = FSDataset.METADATA_VERSION; + + /** + * Header includes everything except the checksum(s) themselves. + * Version is two bytes. Following it is the DataChecksum + * that occupies 5 bytes. + */ + private short version; + private DataChecksum checksum = null; + + BlockMetadataHeader(short version, DataChecksum checksum) { + this.checksum = checksum; + this.version = version; + } + + short getVersion() { + return version; + } + + DataChecksum getChecksum() { + return checksum; + } + + + /** + * This reads all the fields till the beginning of checksum. + * @param in + * @return Metadata Header + * @throws IOException + */ + static BlockMetadataHeader readHeader(DataInputStream in) throws IOException { + return readHeader(in.readShort(), in); + } + + /** + * Reads header at the top of metadata file and returns the header. + * + * @param dataset + * @param block + * @return + * @throws IOException + */ + static BlockMetadataHeader readHeader(File file) throws IOException { + DataInputStream in = null; + try { + in = new DataInputStream(new BufferedInputStream( + new FileInputStream(file))); + return readHeader(in); + } finally { + IOUtils.closeStream(in); + } + } + + // Version is already read. + private static BlockMetadataHeader readHeader(short version, DataInputStream in) + throws IOException { + DataChecksum checksum = DataChecksum.newDataChecksum(in); + return new BlockMetadataHeader(version, checksum); + } + + /** + * This writes all the fields till the beginning of checksum. + * @param out DataOutputStream + * @param header + * @return + * @throws IOException + */ + private static void writeHeader(DataOutputStream out, + BlockMetadataHeader header) + throws IOException { + out.writeShort(header.getVersion()); + header.getChecksum().writeHeader(out); + } + + /** + * Writes all the fields till the beginning of checksum. + * @param out + * @param checksum + * @throws IOException + */ + static void writeHeader(DataOutputStream out, DataChecksum checksum) + throws IOException { + writeHeader(out, new BlockMetadataHeader(METADATA_VERSION, checksum)); + } + + /** + * Returns the size of the header + */ + static int getHeaderSize() { + return Short.SIZE/Byte.SIZE + DataChecksum.getChecksumHeaderSize(); + } +} + diff --git a/src/hdfs/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java b/src/hdfs/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java new file mode 100644 index 0000000..02fe8ce --- /dev/null +++ b/src/hdfs/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java @@ -0,0 +1,1031 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.datanode; + +import java.io.BufferedOutputStream; +import java.io.DataInputStream; +import java.io.DataOutputStream; +import java.io.EOFException; +import java.io.IOException; +import java.io.OutputStream; +import java.nio.ByteBuffer; +import java.util.LinkedList; +import java.util.zip.CRC32; +import java.util.zip.Checksum; + +import org.apache.commons.logging.Log; +import org.apache.hadoop.fs.FSInputChecker; +import org.apache.hadoop.fs.FSOutputSummer; +import org.apache.hadoop.hdfs.protocol.Block; +import org.apache.hadoop.hdfs.protocol.DataTransferProtocol; +import org.apache.hadoop.hdfs.protocol.DatanodeInfo; +import org.apache.hadoop.hdfs.protocol.FSConstants; +import org.apache.hadoop.hdfs.protocol.LocatedBlock; +import org.apache.hadoop.hdfs.util.DataTransferThrottler; +import org.apache.hadoop.io.IOUtils; +import org.apache.hadoop.util.Daemon; +import org.apache.hadoop.util.DataChecksum; +import org.apache.hadoop.util.StringUtils; +import static org.apache.hadoop.hdfs.server.datanode.DataNode.DN_CLIENTTRACE_FORMAT; + +/** A class that receives a block and writes to its own disk, meanwhile + * may copies it to another site. If a throttler is provided, + * streaming throttling is also supported. + **/ +class BlockReceiver implements java.io.Closeable, FSConstants { + public static final Log LOG = DataNode.LOG; + static final Log ClientTraceLog = DataNode.ClientTraceLog; + + private Block block; // the block to receive + protected boolean finalized; + private DataInputStream in = null; // from where data are read + private DataChecksum checksum; // from where chunks of a block can be read + private OutputStream out = null; // to block file at local disk + private DataOutputStream checksumOut = null; // to crc file at local disk + private int bytesPerChecksum; + private int checksumSize; + private ByteBuffer buf; // contains one full packet. + private int bufRead; //amount of valid data in the buf + private int maxPacketReadLen; + protected long offsetInBlock; + protected final String inAddr; + protected final String myAddr; + private String mirrorAddr; + private DataOutputStream mirrorOut; + private Daemon responder = null; + private DataTransferThrottler throttler; + private FSDataset.BlockWriteStreams streams; + private boolean isRecovery = false; + private String clientName; + DatanodeInfo srcDataNode = null; + private Checksum partialCrc = null; + private DataNode datanode = null; + + BlockReceiver(Block block, DataInputStream in, String inAddr, + String myAddr, boolean isRecovery, String clientName, + DatanodeInfo srcDataNode, DataNode datanode) throws IOException { + try{ + this.block = block; + this.in = in; + this.inAddr = inAddr; + this.myAddr = myAddr; + this.isRecovery = isRecovery; + this.clientName = clientName; + this.offsetInBlock = 0; + this.srcDataNode = srcDataNode; + this.datanode = datanode; + this.checksum = DataChecksum.newDataChecksum(in); + this.bytesPerChecksum = checksum.getBytesPerChecksum(); + this.checksumSize = checksum.getChecksumSize(); + // + // Open local disk out + // + streams = datanode.data.writeToBlock(block, isRecovery); + this.finalized = datanode.data.isValidBlock(block); + if (streams != null) { + this.out = streams.dataOut; + this.checksumOut = new DataOutputStream(new BufferedOutputStream( + streams.checksumOut, + SMALL_BUFFER_SIZE)); + // If this block is for appends, then remove it from periodic + // validation. + if (datanode.blockScanner != null && isRecovery) { + datanode.blockScanner.deleteBlock(block); + } + } + } catch (BlockAlreadyExistsException bae) { + throw bae; + } catch(IOException ioe) { + IOUtils.closeStream(this); + cleanupBlock(); + + // check if there is a disk error + IOException cause = FSDataset.getCauseIfDiskError(ioe); + DataNode.LOG.warn("IOException in BlockReceiver constructor. Cause is ", + cause); + + if (cause != null) { // possible disk error + ioe = cause; + datanode.checkDiskError(ioe); // may throw an exception here + } + + throw ioe; + } + } + + /** + * close files. + */ + public void close() throws IOException { + + IOException ioe = null; + // close checksum file + try { + if (checksumOut != null) { + checksumOut.flush(); + checksumOut.close(); + checksumOut = null; + } + } catch(IOException e) { + ioe = e; + } + // close block file + try { + if (out != null) { + out.flush(); + out.close(); + out = null; + } + } catch (IOException e) { + ioe = e; + } + // disk check + if(ioe != null) { + datanode.checkDiskError(ioe); + throw ioe; + } + } + + /** + * Flush block data and metadata files to disk. + * @throws IOException + */ + void flush() throws IOException { + if (checksumOut != null) { + checksumOut.flush(); + } + if (out != null) { + out.flush(); + } + } + + /** + * While writing to mirrorOut, failure to write to mirror should not + * affect this datanode unless a client is writing the block. + */ + private void handleMirrorOutError(IOException ioe) throws IOException { + LOG.info(datanode.dnRegistration + ":Exception writing block " + + block + " to mirror " + mirrorAddr + "\n" + + StringUtils.stringifyException(ioe)); + mirrorOut = null; + // + // If stream-copy fails, continue + // writing to disk for replication requests. For client + // writes, return error so that the client can do error + // recovery. + // + if (clientName.length() > 0) { + throw ioe; + } + } + + /** + * Verify multiple CRC chunks. + */ + private void verifyChunks( byte[] dataBuf, int dataOff, int len, + byte[] checksumBuf, int checksumOff ) + throws IOException { + while (len > 0) { + int chunkLen = Math.min(len, bytesPerChecksum); + + checksum.update(dataBuf, dataOff, chunkLen); + + if (!checksum.compare(checksumBuf, checksumOff)) { + if (srcDataNode != null) { + try { + LOG.info("report corrupt block " + block + " from datanode " + + srcDataNode + " to namenode"); + LocatedBlock lb = new LocatedBlock(block, + new DatanodeInfo[] {srcDataNode}); + datanode.namenode.reportBadBlocks(new LocatedBlock[] {lb}); + } catch (IOException e) { + LOG.warn("Failed to report bad block " + block + + " from datanode " + srcDataNode + " to namenode"); + } + } + throw new IOException("Unexpected checksum mismatch " + + "while writing " + block + " from " + inAddr); + } + + checksum.reset(); + dataOff += chunkLen; + checksumOff += checksumSize; + len -= chunkLen; + } + } + + /** + * Makes sure buf.position() is zero without modifying buf.remaining(). + * It moves the data if position needs to be changed. + */ + private void shiftBufData() { + if (bufRead != buf.limit()) { + throw new IllegalStateException("bufRead should be same as " + + "buf.limit()"); + } + + //shift the remaining data on buf to the front + if (buf.position() > 0) { + int dataLeft = buf.remaining(); + if (dataLeft > 0) { + byte[] b = buf.array(); + System.arraycopy(b, buf.position(), b, 0, dataLeft); + } + buf.position(0); + bufRead = dataLeft; + buf.limit(bufRead); + } + } + + /** + * reads upto toRead byte to buf at buf.limit() and increments the limit. + * throws an IOException if read does not succeed. + */ + private int readToBuf(int toRead) throws IOException { + if (toRead < 0) { + toRead = (maxPacketReadLen > 0 ? maxPacketReadLen : buf.capacity()) + - buf.limit(); + } + + int nRead = in.read(buf.array(), buf.limit(), toRead); + + if (nRead < 0) { + throw new EOFException("while trying to read " + toRead + " bytes"); + } + bufRead = buf.limit() + nRead; + buf.limit(bufRead); + return nRead; + } + + + /** + * Reads (at least) one packet and returns the packet length. + * buf.position() points to the start of the packet and + * buf.limit() point to the end of the packet. There could + * be more data from next packet in buf.

+ * + * It tries to read a full packet with single read call. + * Consecutive packets are usually of the same length. + */ + private int readNextPacket() throws IOException { + /* This dances around buf a little bit, mainly to read + * full packet with single read and to accept arbitarary size + * for next packet at the same time. + */ + if (buf == null) { + /* initialize buffer to the best guess size: + * 'chunksPerPacket' calculation here should match the same + * calculation in DFSClient to make the guess accurate. + */ + int chunkSize = bytesPerChecksum + checksumSize; + int chunksPerPacket = (datanode.writePacketSize - DataNode.PKT_HEADER_LEN - + SIZE_OF_INTEGER + chunkSize - 1)/chunkSize; + buf = ByteBuffer.allocate(DataNode.PKT_HEADER_LEN + SIZE_OF_INTEGER + + Math.max(chunksPerPacket, 1) * chunkSize); + buf.limit(0); + } + + // See if there is data left in the buffer : + if (bufRead > buf.limit()) { + buf.limit(bufRead); + } + + while (buf.remaining() < SIZE_OF_INTEGER) { + if (buf.position() > 0) { + shiftBufData(); + } + readToBuf(-1); + } + + /* We mostly have the full packet or at least enough for an int + */ + buf.mark(); + int payloadLen = buf.getInt(); + buf.reset(); + + if (payloadLen == 0) { + //end of stream! + buf.limit(buf.position() + SIZE_OF_INTEGER); + return 0; + } + + // check corrupt values for pktLen, 100MB upper limit should be ok? + if (payloadLen < 0 || payloadLen > (100*1024*1024)) { + throw new IOException("Incorrect value for packet payload : " + + payloadLen); + } + + int pktSize = payloadLen + DataNode.PKT_HEADER_LEN; + + if (buf.remaining() < pktSize) { + //we need to read more data + int toRead = pktSize - buf.remaining(); + + // first make sure buf has enough space. + int spaceLeft = buf.capacity() - buf.limit(); + if (toRead > spaceLeft && buf.position() > 0) { + shiftBufData(); + spaceLeft = buf.capacity() - buf.limit(); + } + if (toRead > spaceLeft) { + byte oldBuf[] = buf.array(); + int toCopy = buf.limit(); + buf = ByteBuffer.allocate(toCopy + toRead); + System.arraycopy(oldBuf, 0, buf.array(), 0, toCopy); + buf.limit(toCopy); + } + + //now read: + while (toRead > 0) { + toRead -= readToBuf(toRead); + } + } + + if (buf.remaining() > pktSize) { + buf.limit(buf.position() + pktSize); + } + + if (pktSize > maxPacketReadLen) { + maxPacketReadLen = pktSize; + } + + return payloadLen; + } + + /** + * Receives and processes a packet. It can contain many chunks. + * returns size of the packet. + */ + private int receivePacket() throws IOException { + + int payloadLen = readNextPacket(); + + if (payloadLen <= 0) { + return payloadLen; + } + + buf.mark(); + //read the header + buf.getInt(); // packet length + offsetInBlock = buf.getLong(); // get offset of packet in block + long seqno = buf.getLong(); // get seqno + boolean lastPacketInBlock = (buf.get() != 0); + + int endOfHeader = buf.position(); + buf.reset(); + + if (LOG.isDebugEnabled()){ + LOG.debug("Receiving one packet for block " + block + + " of length " + payloadLen + + " seqno " + seqno + + " offsetInBlock " + offsetInBlock + + " lastPacketInBlock " + lastPacketInBlock); + } + + setBlockPosition(offsetInBlock); + + //First write the packet to the mirror: + if (mirrorOut != null) { + try { + mirrorOut.write(buf.array(), buf.position(), buf.remaining()); + mirrorOut.flush(); + } catch (IOException e) { + handleMirrorOutError(e); + } + } + + buf.position(endOfHeader); + int len = buf.getInt(); + + if (len < 0) { + throw new IOException("Got wrong length during writeBlock(" + block + + ") from " + inAddr + " at offset " + + offsetInBlock + ": " + len); + } + + if (len == 0) { + LOG.debug("Receiving empty packet for block " + block); + } else { + offsetInBlock += len; + + int checksumLen = ((len + bytesPerChecksum - 1)/bytesPerChecksum)* + checksumSize; + + if ( buf.remaining() != (checksumLen + len)) { + throw new IOException("Data remaining in packet does not match " + + "sum of checksumLen and dataLen"); + } + int checksumOff = buf.position(); + int dataOff = checksumOff + checksumLen; + byte pktBuf[] = buf.array(); + + buf.position(buf.limit()); // move to the end of the data. + + /* skip verifying checksum iff this is not the last one in the + * pipeline and clientName is non-null. i.e. Checksum is verified + * on all the datanodes when the data is being written by a + * datanode rather than a client. Whe client is writing the data, + * protocol includes acks and only the last datanode needs to verify + * checksum. + */ + if (mirrorOut == null || clientName.length() == 0) { + verifyChunks(pktBuf, dataOff, len, pktBuf, checksumOff); + } + + try { + if (!finalized) { + //finally write to the disk : + out.write(pktBuf, dataOff, len); + + // If this is a partial chunk, then verify that this is the only + // chunk in the packet. Calculate new crc for this chunk. + if (partialCrc != null) { + if (len > bytesPerChecksum) { + throw new IOException("Got wrong length during writeBlock(" + + block + ") from " + inAddr + " " + + "A packet can have only one partial chunk."+ + " len = " + len + + " bytesPerChecksum " + bytesPerChecksum); + } + partialCrc.update(pktBuf, dataOff, len); + byte[] buf = FSOutputSummer.convertToByteStream(partialCrc, checksumSize); + checksumOut.write(buf); + LOG.debug("Writing out partial crc for data len " + len); + partialCrc = null; + } else { + checksumOut.write(pktBuf, checksumOff, checksumLen); + } + datanode.myMetrics.bytesWritten.inc(len); + } + } catch (IOException iex) { + datanode.checkDiskError(iex); + throw iex; + } + } + + /// flush entire packet before sending ack + flush(); + + // put in queue for pending acks + if (responder != null) { + ((PacketResponder)responder.getRunnable()).enqueue(seqno, + lastPacketInBlock); + } + + if (throttler != null) { // throttle I/O + throttler.throttle(payloadLen); + } + + return payloadLen; + } + + void writeChecksumHeader(DataOutputStream mirrorOut) throws IOException { + checksum.writeHeader(mirrorOut); + } + + + void receiveBlock( + DataOutputStream mirrOut, // output to next datanode + DataInputStream mirrIn, // input from next datanode + DataOutputStream replyOut, // output to previous datanode + String mirrAddr, DataTransferThrottler throttlerArg, + int numTargets) throws IOException { + + mirrorOut = mirrOut; + mirrorAddr = mirrAddr; + throttler = throttlerArg; + + try { + // write data chunk header + if (!finalized) { + BlockMetadataHeader.writeHeader(checksumOut, checksum); + } + if (clientName.length() > 0) { + responder = new Daemon(datanode.threadGroup, + new PacketResponder(this, block, mirrIn, + replyOut, numTargets)); + responder.start(); // start thread to processes reponses + } + + /* + * Receive until packet length is zero. + */ + while (receivePacket() > 0) {} + + // flush the mirror out + if (mirrorOut != null) { + try { + mirrorOut.writeInt(0); // mark the end of the block + mirrorOut.flush(); + } catch (IOException e) { + handleMirrorOutError(e); + } + } + + // wait for all outstanding packet responses. And then + // indicate responder to gracefully shutdown. + if (responder != null) { + ((PacketResponder)responder.getRunnable()).close(); + } + + // if this write is for a replication request (and not + // from a client), then finalize block. For client-writes, + // the block is finalized in the PacketResponder. + if (clientName.length() == 0) { + // close the block/crc files + close(); + + // Finalize the block. Does this fsync()? + block.setNumBytes(offsetInBlock); + datanode.data.finalizeBlock(block); + datanode.myMetrics.blocksWritten.inc(); + } + + } catch (IOException ioe) { + LOG.info("Exception in receiveBlock for block " + block + + " " + ioe); + IOUtils.closeStream(this); + if (responder != null) { + responder.interrupt(); + } + cleanupBlock(); + throw ioe; + } finally { + if (responder != null) { + try { + responder.join(); + } catch (InterruptedException e) { + throw new IOException("Interrupted receiveBlock"); + } + responder = null; + } + } + } + + /** Cleanup a partial block + * if this write is for a replication request (and not from a client) + */ + private void cleanupBlock() throws IOException { + if (clientName.length() == 0) { // not client write + datanode.data.unfinalizeBlock(block); + } + } + + /** + * Sets the file pointer in the local block file to the specified value. + */ + private void setBlockPosition(long offsetInBlock) throws IOException { + if (finalized) { + if (!isRecovery) { + throw new IOException("Write to offset " + offsetInBlock + + " of block " + block + + " that is already finalized."); + } + if (offsetInBlock > datanode.data.getLength(block)) { + throw new IOException("Write to offset " + offsetInBlock + + " of block " + block + + " that is already finalized and is of size " + + datanode.data.getLength(block)); + } + return; + } + + if (datanode.data.getChannelPosition(block, streams) == offsetInBlock) { + return; // nothing to do + } + long offsetInChecksum = BlockMetadataHeader.getHeaderSize() + + offsetInBlock / bytesPerChecksum * checksumSize; + if (out != null) { + out.flush(); + } + if (checksumOut != null) { + checksumOut.flush(); + } + + // If this is a partial chunk, then read in pre-existing checksum + if (offsetInBlock % bytesPerChecksum != 0) { + LOG.info("setBlockPosition trying to set position to " + + offsetInBlock + + " for block " + block + + " which is not a multiple of bytesPerChecksum " + + bytesPerChecksum); + computePartialChunkCrc(offsetInBlock, offsetInChecksum, bytesPerChecksum); + } + + LOG.info("Changing block file offset of block " + block + " from " + + datanode.data.getChannelPosition(block, streams) + + " to " + offsetInBlock + + " meta file offset to " + offsetInChecksum); + + // set the position of the block file + datanode.data.setChannelPosition(block, streams, offsetInBlock, offsetInChecksum); + } + + /** + * reads in the partial crc chunk and computes checksum + * of pre-existing data in partial chunk. + */ + private void computePartialChunkCrc(long blkoff, long ckoff, + int bytesPerChecksum) throws IOException { + + // find offset of the beginning of partial chunk. + // + int sizePartialChunk = (int) (blkoff % bytesPerChecksum); + int checksumSize = checksum.getChecksumSize(); + blkoff = blkoff - sizePartialChunk; + LOG.info("computePartialChunkCrc sizePartialChunk " + + sizePartialChunk + + " block " + block + + " offset in block " + blkoff + + " offset in metafile " + ckoff); + + // create an input stream from the block file + // and read in partial crc chunk into temporary buffer + // + byte[] buf = new byte[sizePartialChunk]; + byte[] crcbuf = new byte[checksumSize]; + FSDataset.BlockInputStreams instr = null; + try { + instr = datanode.data.getTmpInputStreams(block, blkoff, ckoff); + IOUtils.readFully(instr.dataIn, buf, 0, sizePartialChunk); + + // open meta file and read in crc value computer earlier + IOUtils.readFully(instr.checksumIn, crcbuf, 0, crcbuf.length); + } finally { + IOUtils.closeStream(instr); + } + + // compute crc of partial chunk from data read in the block file. + partialCrc = new CRC32(); + partialCrc.update(buf, 0, sizePartialChunk); + LOG.info("Read in partial CRC chunk from disk for block " + block); + + // paranoia! verify that the pre-computed crc matches what we + // recalculated just now + if (partialCrc.getValue() != FSInputChecker.checksum2long(crcbuf)) { + String msg = "Partial CRC " + partialCrc.getValue() + + " does not match value computed the " + + " last time file was closed " + + FSInputChecker.checksum2long(crcbuf); + throw new IOException(msg); + } + //LOG.debug("Partial CRC matches 0x" + + // Long.toHexString(partialCrc.getValue())); + } + + + /** + * Processed responses from downstream datanodes in the pipeline + * and sends back replies to the originator. + */ + class PacketResponder implements Runnable, FSConstants { + + //packet waiting for ack + private LinkedList ackQueue = new LinkedList(); + private volatile boolean running = true; + private Block block; + DataInputStream mirrorIn; // input from downstream datanode + DataOutputStream replyOut; // output to upstream datanode + private int numTargets; // number of downstream datanodes including myself + private BlockReceiver receiver; // The owner of this responder. + + public String toString() { + return "PacketResponder " + numTargets + " for Block " + this.block; + } + + PacketResponder(BlockReceiver receiver, Block b, DataInputStream in, + DataOutputStream out, int numTargets) { + this.receiver = receiver; + this.block = b; + mirrorIn = in; + replyOut = out; + this.numTargets = numTargets; + } + + /** + * enqueue the seqno that is still be to acked by the downstream datanode. + * @param seqno + * @param lastPacketInBlock + */ + synchronized void enqueue(long seqno, boolean lastPacketInBlock) { + if (running) { + LOG.debug("PacketResponder " + numTargets + " adding seqno " + seqno + + " to ack queue."); + ackQueue.addLast(new Packet(seqno, lastPacketInBlock)); + notifyAll(); + } + } + + /** + * wait for all pending packets to be acked. Then shutdown thread. + */ + synchronized void close() { + while (running && ackQueue.size() != 0 && datanode.shouldRun) { + try { + wait(); + } catch (InterruptedException e) { + running = false; + } + } + LOG.debug("PacketResponder " + numTargets + + " for block " + block + " Closing down."); + running = false; + notifyAll(); + } + + private synchronized void lastDataNodeRun() { + long lastHeartbeat = System.currentTimeMillis(); + boolean lastPacket = false; + final long startTime = ClientTraceLog.isInfoEnabled() ? System.nanoTime() : 0; + + while (running && datanode.shouldRun && !lastPacket) { + long now = System.currentTimeMillis(); + try { + + // wait for a packet to be sent to downstream datanode + while (running && datanode.shouldRun && ackQueue.size() == 0) { + long idle = now - lastHeartbeat; + long timeout = (datanode.socketTimeout/2) - idle; + if (timeout <= 0) { + timeout = 1000; + } + try { + wait(timeout); + } catch (InterruptedException e) { + if (running) { + LOG.info("PacketResponder " + numTargets + + " for block " + block + " Interrupted."); + running = false; + } + break; + } + + // send a heartbeat if it is time. + now = System.currentTimeMillis(); + if (now - lastHeartbeat > datanode.socketTimeout/2) { + replyOut.writeLong(-1); // send heartbeat + replyOut.flush(); + lastHeartbeat = now; + } + } + + if (!running || !datanode.shouldRun) { + break; + } + Packet pkt = ackQueue.removeFirst(); + long expected = pkt.seqno; + notifyAll(); + LOG.debug("PacketResponder " + numTargets + + " for block " + block + + " acking for packet " + expected); + + // If this is the last packet in block, then close block + // file and finalize the block before responding success + if (pkt.lastPacketInBlock) { + if (!receiver.finalized) { + receiver.close(); + final long endTime = ClientTraceLog.isInfoEnabled() ? System.nanoTime() : 0; + block.setNumBytes(receiver.offsetInBlock); + datanode.data.finalizeBlock(block); + datanode.myMetrics.blocksWritten.inc(); + datanode.notifyNamenodeReceivedBlock(block, + DataNode.EMPTY_DEL_HINT); + if (ClientTraceLog.isInfoEnabled() && + receiver.clientName.length() > 0) { + long offset = 0; + ClientTraceLog.info(String.format(DN_CLIENTTRACE_FORMAT, + receiver.inAddr, receiver.myAddr, block.getNumBytes(), + "HDFS_WRITE", receiver.clientName, offset, + datanode.dnRegistration.getStorageID(), block, endTime-startTime)); + } else { + LOG.info("Received block " + block + + " of size " + block.getNumBytes() + + " from " + receiver.inAddr); + } + } + lastPacket = true; + } + + replyOut.writeLong(expected); + replyOut.writeShort(DataTransferProtocol.OP_STATUS_SUCCESS); + replyOut.flush(); + } catch (Exception e) { + LOG.warn("IOException in BlockReceiver.lastNodeRun: ", e); + if (running) { + try { + datanode.checkDiskError(e); // may throw an exception here + } catch (IOException ioe) { + LOG.warn("DataNode.chekDiskError failed in lastDataNodeRun with: ", + ioe); + } + LOG.info("PacketResponder " + block + " " + numTargets + + " Exception " + StringUtils.stringifyException(e)); + running = false; + } + } + } + LOG.info("PacketResponder " + numTargets + + " for block " + block + " terminating"); + } + + /** + * Thread to process incoming acks. + * @see java.lang.Runnable#run() + */ + public void run() { + + // If this is the last datanode in pipeline, then handle differently + if (numTargets == 0) { + lastDataNodeRun(); + return; + } + + boolean lastPacketInBlock = false; + final long startTime = ClientTraceLog.isInfoEnabled() ? System.nanoTime() : 0; + while (running && datanode.shouldRun && !lastPacketInBlock) { + + try { + short op = DataTransferProtocol.OP_STATUS_SUCCESS; + boolean didRead = false; + long expected = -2; + try { + // read seqno from downstream datanode + long seqno = mirrorIn.readLong(); + didRead = true; + if (seqno == -1) { + replyOut.writeLong(-1); // send keepalive + replyOut.flush(); + LOG.debug("PacketResponder " + numTargets + " got -1"); + continue; + } else if (seqno == -2) { + LOG.debug("PacketResponder " + numTargets + " got -2"); + } else { + LOG.debug("PacketResponder " + numTargets + " got seqno = " + + seqno); + Packet pkt = null; + synchronized (this) { + while (running && datanode.shouldRun && ackQueue.size() == 0) { + if (LOG.isDebugEnabled()) { + LOG.debug("PacketResponder " + numTargets + + " seqno = " + seqno + + " for block " + block + + " waiting for local datanode to finish write."); + } + wait(); + } + pkt = ackQueue.removeFirst(); + expected = pkt.seqno; + notifyAll(); + LOG.debug("PacketResponder " + numTargets + " seqno = " + seqno); + if (seqno != expected) { + throw new IOException("PacketResponder " + numTargets + + " for block " + block + + " expected seqno:" + expected + + " received:" + seqno); + } + lastPacketInBlock = pkt.lastPacketInBlock; + } + } + } catch (Throwable e) { + if (running) { + LOG.info("PacketResponder " + block + " " + numTargets + + " Exception " + StringUtils.stringifyException(e)); + running = false; + } + } + + if (Thread.interrupted()) { + /* The receiver thread cancelled this thread. + * We could also check any other status updates from the + * receiver thread (e.g. if it is ok to write to replyOut). + * It is prudent to not send any more status back to the client + * because this datanode has a problem. The upstream datanode + * will detect a timout on heartbeats and will declare that + * this datanode is bad, and rightly so. + */ + LOG.info("PacketResponder " + block + " " + numTargets + + " : Thread is interrupted."); + running = false; + continue; + } + + if (!didRead) { + op = DataTransferProtocol.OP_STATUS_ERROR; + } + + // If this is the last packet in block, then close block + // file and finalize the block before responding success + if (lastPacketInBlock && !receiver.finalized) { + receiver.close(); + final long endTime = ClientTraceLog.isInfoEnabled() ? System.nanoTime() : 0; + block.setNumBytes(receiver.offsetInBlock); + datanode.data.finalizeBlock(block); + datanode.myMetrics.blocksWritten.inc(); + datanode.notifyNamenodeReceivedBlock(block, + DataNode.EMPTY_DEL_HINT); + if (ClientTraceLog.isInfoEnabled() && + receiver.clientName.length() > 0) { + long offset = 0; + ClientTraceLog.info(String.format(DN_CLIENTTRACE_FORMAT, + receiver.inAddr, receiver.myAddr, block.getNumBytes(), + "HDFS_WRITE", receiver.clientName, offset, + datanode.dnRegistration.getStorageID(), block, endTime-startTime)); + } else { + LOG.info("Received block " + block + + " of size " + block.getNumBytes() + + " from " + receiver.inAddr); + } + } + + // send my status back to upstream datanode + replyOut.writeLong(expected); // send seqno upstream + replyOut.writeShort(DataTransferProtocol.OP_STATUS_SUCCESS); + + LOG.debug("PacketResponder " + numTargets + + " for block " + block + + " responded my status " + + " for seqno " + expected); + + // forward responses from downstream datanodes. + for (int i = 0; i < numTargets && datanode.shouldRun; i++) { + try { + if (op == DataTransferProtocol.OP_STATUS_SUCCESS) { + op = mirrorIn.readShort(); + if (op != DataTransferProtocol.OP_STATUS_SUCCESS) { + LOG.debug("PacketResponder for block " + block + + ": error code received from downstream " + + " datanode[" + i + "] " + op); + } + } + } catch (Throwable e) { + op = DataTransferProtocol.OP_STATUS_ERROR; + } + replyOut.writeShort(op); + } + replyOut.flush(); + LOG.debug("PacketResponder " + block + " " + numTargets + + " responded other status " + " for seqno " + expected); + + // If we were unable to read the seqno from downstream, then stop. + if (expected == -2) { + running = false; + } + // If we forwarded an error response from a downstream datanode + // and we are acting on behalf of a client, then we quit. The + // client will drive the recovery mechanism. + if (op == DataTransferProtocol.OP_STATUS_ERROR && receiver.clientName.length() > 0) { + running = false; + } + } catch (IOException e) { + LOG.warn("IOException in BlockReceiver.run(): ", e); + if (running) { + try { + datanode.checkDiskError(e); // may throw an exception here + } catch (IOException ioe) { + LOG.warn("DataNode.chekDiskError failed in run() with: ", ioe); + } + LOG.info("PacketResponder " + block + " " + numTargets + + " Exception " + StringUtils.stringifyException(e)); + running = false; + } + } catch (RuntimeException e) { + if (running) { + LOG.info("PacketResponder " + block + " " + numTargets + + " Exception " + StringUtils.stringifyException(e)); + running = false; + } + } + } + LOG.info("PacketResponder " + numTargets + + " for block " + block + " terminating"); + } + } + + /** + * This information is cached by the Datanode in the ackQueue. + */ + static private class Packet { + long seqno; + boolean lastPacketInBlock; + + Packet(long seqno, boolean lastPacketInBlock) { + this.seqno = seqno; + this.lastPacketInBlock = lastPacketInBlock; + } + } +} diff --git a/src/hdfs/org/apache/hadoop/hdfs/server/datanode/BlockSender.java b/src/hdfs/org/apache/hadoop/hdfs/server/datanode/BlockSender.java new file mode 100644 index 0000000..74e4a03 --- /dev/null +++ b/src/hdfs/org/apache/hadoop/hdfs/server/datanode/BlockSender.java @@ -0,0 +1,473 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.datanode; + +import java.io.BufferedInputStream; +import java.io.DataInputStream; +import java.io.DataOutputStream; +import java.io.FileInputStream; +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.net.SocketException; +import java.nio.ByteBuffer; +import java.nio.channels.FileChannel; +import java.util.Arrays; + +import org.apache.commons.logging.Log; +import org.apache.hadoop.fs.ChecksumException; +import org.apache.hadoop.hdfs.protocol.Block; +import org.apache.hadoop.hdfs.protocol.FSConstants; +import org.apache.hadoop.hdfs.util.DataTransferThrottler; +import org.apache.hadoop.io.IOUtils; +import org.apache.hadoop.net.SocketOutputStream; +import org.apache.hadoop.util.DataChecksum; +import org.apache.hadoop.util.StringUtils; + +/** + * Reads a block from the disk and sends it to a recipient. + */ +public class BlockSender implements java.io.Closeable, FSConstants { + public static final Log LOG = DataNode.LOG; + static final Log ClientTraceLog = DataNode.ClientTraceLog; + + private Block block; // the block to read from + private InputStream blockIn; // data stream + private long blockInPosition = -1; // updated while using transferTo(). + private DataInputStream checksumIn; // checksum datastream + private DataChecksum checksum; // checksum stream + private long offset; // starting position to read + private long endOffset; // ending position + private long blockLength; + private int bytesPerChecksum; // chunk size + private int checksumSize; // checksum size + private boolean corruptChecksumOk; // if need to verify checksum + private boolean chunkOffsetOK; // if need to send chunk offset + private long seqno; // sequence number of packet + + private boolean transferToAllowed = true; + private boolean blockReadFully; //set when the whole block is read + private boolean verifyChecksum; //if true, check is verified while reading + private DataTransferThrottler throttler; + private final String clientTraceFmt; // format of client trace log message + + /** + * Minimum buffer used while sending data to clients. Used only if + * transferTo() is enabled. 64KB is not that large. It could be larger, but + * not sure if there will be much more improvement. + */ + private static final int MIN_BUFFER_WITH_TRANSFERTO = 64*1024; + + + BlockSender(Block block, long startOffset, long length, + boolean corruptChecksumOk, boolean chunkOffsetOK, + boolean verifyChecksum, DataNode datanode) throws IOException { + this(block, startOffset, length, corruptChecksumOk, chunkOffsetOK, + verifyChecksum, datanode, null); + } + + BlockSender(Block block, long startOffset, long length, + boolean corruptChecksumOk, boolean chunkOffsetOK, + boolean verifyChecksum, DataNode datanode, String clientTraceFmt) + throws IOException { + this(block, datanode.data.getLength(block), startOffset, length, + corruptChecksumOk, chunkOffsetOK, verifyChecksum, + datanode.transferToAllowed, + (!corruptChecksumOk || datanode.data.metaFileExists(block)) + ? new DataInputStream(new BufferedInputStream( + datanode.data.getMetaDataInputStream(block), BUFFER_SIZE)) + : null, new BlockInputStreamFactory(block, datanode.data), + clientTraceFmt); + } + + public BlockSender(Block block, long blockLength, long startOffset, long length, + boolean corruptChecksumOk, boolean chunkOffsetOK, + boolean verifyChecksum, boolean transferToAllowed, + DataInputStream metadataIn, InputStreamFactory streamFactory + ) throws IOException { + this(block, blockLength, startOffset, length, + corruptChecksumOk, chunkOffsetOK, verifyChecksum, transferToAllowed, + metadataIn, streamFactory, null); + } + + private BlockSender(Block block, long blockLength, long startOffset, long length, + boolean corruptChecksumOk, boolean chunkOffsetOK, + boolean verifyChecksum, boolean transferToAllowed, + DataInputStream metadataIn, InputStreamFactory streamFactory, + String clientTraceFmt) throws IOException { + try { + this.block = block; + this.chunkOffsetOK = chunkOffsetOK; + this.corruptChecksumOk = corruptChecksumOk; + this.verifyChecksum = verifyChecksum; + this.blockLength = blockLength; + this.transferToAllowed = transferToAllowed; + this.clientTraceFmt = clientTraceFmt; + + if ( !corruptChecksumOk || metadataIn != null) { + this.checksumIn = metadataIn; + + // read and handle the common header here. For now just a version + BlockMetadataHeader header = BlockMetadataHeader.readHeader(checksumIn); + short version = header.getVersion(); + + if (version != FSDataset.METADATA_VERSION) { + LOG.warn("Wrong version (" + version + ") for metadata file for " + + block + " ignoring ..."); + } + checksum = header.getChecksum(); + } else { + LOG.warn("Could not find metadata file for " + block); + // This only decides the buffer size. Use BUFFER_SIZE? + checksum = DataChecksum.newDataChecksum(DataChecksum.CHECKSUM_NULL, + 16 * 1024); + } + + /* If bytesPerChecksum is very large, then the metadata file + * is mostly corrupted. For now just truncate bytesPerchecksum to + * blockLength. + */ + bytesPerChecksum = checksum.getBytesPerChecksum(); + if (bytesPerChecksum > 10*1024*1024 && bytesPerChecksum > blockLength){ + checksum = DataChecksum.newDataChecksum(checksum.getChecksumType(), + Math.max((int)blockLength, 10*1024*1024)); + bytesPerChecksum = checksum.getBytesPerChecksum(); + } + checksumSize = checksum.getChecksumSize(); + + if (length < 0) { + length = blockLength; + } + + endOffset = blockLength; + if (startOffset < 0 || startOffset > endOffset + || (length + startOffset) > endOffset) { + String msg = " Offset " + startOffset + " and length " + length + + " don't match block " + block + " ( blockLen " + endOffset + " )"; + LOG.warn("sendBlock() : " + msg); + throw new IOException(msg); + } + + + offset = (startOffset - (startOffset % bytesPerChecksum)); + if (length >= 0) { + // Make sure endOffset points to end of a checksumed chunk. + long tmpLen = startOffset + length; + if (tmpLen % bytesPerChecksum != 0) { + tmpLen += (bytesPerChecksum - tmpLen % bytesPerChecksum); + } + if (tmpLen < endOffset) { + endOffset = tmpLen; + } + } + + // seek to the right offsets + if (offset > 0) { + long checksumSkip = (offset / bytesPerChecksum) * checksumSize; + // note blockInStream is seeked when created below + if (checksumSkip > 0) { + // Should we use seek() for checksum file as well? + IOUtils.skipFully(checksumIn, checksumSkip); + } + } + seqno = 0; + + blockIn = streamFactory.createStream(offset); + } catch (IOException ioe) { + IOUtils.closeStream(this); + IOUtils.closeStream(blockIn); + throw ioe; + } + } + + /** + * close opened files. + */ + public void close() throws IOException { + IOException ioe = null; + // close checksum file + if(checksumIn!=null) { + try { + checksumIn.close(); + } catch (IOException e) { + ioe = e; + } + checksumIn = null; + } + // close data file + if(blockIn!=null) { + try { + blockIn.close(); + } catch (IOException e) { + ioe = e; + } + blockIn = null; + } + // throw IOException if there is any + if(ioe!= null) { + throw ioe; + } + } + + /** + * Converts an IOExcpetion (not subclasses) to SocketException. + * This is typically done to indicate to upper layers that the error + * was a socket error rather than often more serious exceptions like + * disk errors. + */ + private static IOException ioeToSocketException(IOException ioe) { + if (ioe.getClass().equals(IOException.class)) { + // "se" could be a new class in stead of SocketException. + IOException se = new SocketException("Original Exception : " + ioe); + se.initCause(ioe); + /* Change the stacktrace so that original trace is not truncated + * when printed.*/ + se.setStackTrace(ioe.getStackTrace()); + return se; + } + // otherwise just return the same exception. + return ioe; + } + + /** + * Sends upto maxChunks chunks of data. + * + * When blockInPosition is >= 0, assumes 'out' is a + * {@link SocketOutputStream} and tries + * {@link SocketOutputStream#transferToFully(FileChannel, long, int)} to + * send data (and updates blockInPosition). + */ + private int sendChunks(ByteBuffer pkt, int maxChunks, OutputStream out) + throws IOException { + // Sends multiple chunks in one packet with a single write(). + + int len = (int) Math.min(endOffset - offset, + (((long) bytesPerChecksum) * ((long) maxChunks))); + if (len == 0) { + return 0; + } + + int numChunks = (len + bytesPerChecksum - 1)/bytesPerChecksum; + int packetLen = len + numChunks*checksumSize + 4; + pkt.clear(); + + // write packet header + pkt.putInt(packetLen); + pkt.putLong(offset); + pkt.putLong(seqno); + pkt.put((byte)((offset + len >= endOffset) ? 1 : 0)); + //why no ByteBuf.putBoolean()? + pkt.putInt(len); + + int checksumOff = pkt.position(); + int checksumLen = numChunks * checksumSize; + byte[] buf = pkt.array(); + + if (checksumSize > 0 && checksumIn != null) { + try { + checksumIn.readFully(buf, checksumOff, checksumLen); + } catch (IOException e) { + LOG.warn(" Could not read or failed to veirfy checksum for data" + + " at offset " + offset + " for block " + block + " got : " + + StringUtils.stringifyException(e)); + IOUtils.closeStream(checksumIn); + checksumIn = null; + if (corruptChecksumOk) { + if (checksumOff < checksumLen) { + // Just fill the array with zeros. + Arrays.fill(buf, checksumOff, checksumLen, (byte) 0); + } + } else { + throw e; + } + } + } + + int dataOff = checksumOff + checksumLen; + + if (blockInPosition < 0) { + //normal transfer + IOUtils.readFully(blockIn, buf, dataOff, len); + + if (verifyChecksum) { + int dOff = dataOff; + int cOff = checksumOff; + int dLeft = len; + + for (int i=0; i= 0) { + //use transferTo(). Checks on out and blockIn are already done. + + SocketOutputStream sockOut = (SocketOutputStream)out; + //first write the packet + sockOut.write(buf, 0, dataOff); + // no need to flush. since we know out is not a buffered stream. + + sockOut.transferToFully(((FileInputStream)blockIn).getChannel(), + blockInPosition, len); + + blockInPosition += len; + } else { + // normal transfer + out.write(buf, 0, dataOff + len); + } + + } catch (IOException e) { + /* exception while writing to the client (well, with transferTo(), + * it could also be while reading from the local file). + */ + throw ioeToSocketException(e); + } + + if (throttler != null) { // rebalancing so throttle + throttler.throttle(packetLen); + } + + return len; + } + + /** + * sendBlock() is used to read block and its metadata and stream the data to + * either a client or to another datanode. + * + * @param out stream to which the block is written to + * @param baseStream optional. if non-null, out is assumed to + * be a wrapper over this stream. This enables optimizations for + * sending the data, e.g. + * {@link SocketOutputStream#transferToFully(FileChannel, + * long, int)}. + * @param throttler for sending data. + * @return total bytes reads, including crc. + */ + public long sendBlock(DataOutputStream out, OutputStream baseStream, + DataTransferThrottler throttler) throws IOException { + if( out == null ) { + throw new IOException( "out stream is null" ); + } + this.throttler = throttler; + + long initialOffset = offset; + long totalRead = 0; + OutputStream streamForSendChunks = out; + + final long startTime = ClientTraceLog.isInfoEnabled() ? System.nanoTime() : 0; + try { + try { + checksum.writeHeader(out); + if ( chunkOffsetOK ) { + out.writeLong( offset ); + } + out.flush(); + } catch (IOException e) { //socket error + throw ioeToSocketException(e); + } + + int maxChunksPerPacket; + int pktSize = DataNode.PKT_HEADER_LEN + SIZE_OF_INTEGER; + + if (transferToAllowed && !verifyChecksum && + baseStream instanceof SocketOutputStream && + blockIn instanceof FileInputStream) { + + FileChannel fileChannel = ((FileInputStream)blockIn).getChannel(); + + // blockInPosition also indicates sendChunks() uses transferTo. + blockInPosition = fileChannel.position(); + streamForSendChunks = baseStream; + + // assure a mininum buffer size. + maxChunksPerPacket = (Math.max(BUFFER_SIZE, + MIN_BUFFER_WITH_TRANSFERTO) + + bytesPerChecksum - 1)/bytesPerChecksum; + + // allocate smaller buffer while using transferTo(). + pktSize += checksumSize * maxChunksPerPacket; + } else { + maxChunksPerPacket = Math.max(1, + (BUFFER_SIZE + bytesPerChecksum - 1)/bytesPerChecksum); + pktSize += (bytesPerChecksum + checksumSize) * maxChunksPerPacket; + } + + ByteBuffer pktBuf = ByteBuffer.allocate(pktSize); + + while (endOffset > offset) { + long len = sendChunks(pktBuf, maxChunksPerPacket, + streamForSendChunks); + offset += len; + totalRead += len + ((len + bytesPerChecksum - 1)/bytesPerChecksum* + checksumSize); + seqno++; + } + try { + out.writeInt(0); // mark the end of block + out.flush(); + } catch (IOException e) { //socket error + throw ioeToSocketException(e); + } + } finally { + if (clientTraceFmt != null) { + final long endTime = System.nanoTime(); + ClientTraceLog.info(String.format(clientTraceFmt, totalRead, initialOffset, endTime - startTime)); + } + close(); + } + + blockReadFully = (initialOffset == 0 && offset >= blockLength); + + return totalRead; + } + + boolean isBlockReadFully() { + return blockReadFully; + } + + public static interface InputStreamFactory { + public InputStream createStream(long offset) throws IOException; + } + + private static class BlockInputStreamFactory implements InputStreamFactory { + private final Block block; + private final FSDatasetInterface data; + + private BlockInputStreamFactory(Block block, FSDatasetInterface data) { + this.block = block; + this.data = data; + } + + @Override + public InputStream createStream(long offset) throws IOException { + return data.getBlockInputStream(block, offset); + } + } +} diff --git a/src/hdfs/org/apache/hadoop/hdfs/server/datanode/BlockTransferThrottler.java b/src/hdfs/org/apache/hadoop/hdfs/server/datanode/BlockTransferThrottler.java new file mode 100644 index 0000000..e69de29 diff --git a/src/hdfs/org/apache/hadoop/hdfs/server/datanode/DataBlockScanner.java b/src/hdfs/org/apache/hadoop/hdfs/server/datanode/DataBlockScanner.java new file mode 100644 index 0000000..1cd805d --- /dev/null +++ b/src/hdfs/org/apache/hadoop/hdfs/server/datanode/DataBlockScanner.java @@ -0,0 +1,968 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdfs.server.datanode; + +import java.io.BufferedReader; +import java.io.Closeable; +import java.io.DataOutputStream; +import java.io.File; +import java.io.FileNotFoundException; +import java.io.FileOutputStream; +import java.io.FileReader; +import java.io.IOException; +import java.io.PrintStream; +import java.text.DateFormat; +import java.text.SimpleDateFormat; +import java.util.Arrays; +import java.util.Collections; +import java.util.Date; +import java.util.HashMap; +import java.util.Iterator; +import java.util.Random; +import java.util.TreeSet; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +import javax.servlet.http.HttpServlet; +import javax.servlet.http.HttpServletRequest; +import javax.servlet.http.HttpServletResponse; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdfs.protocol.Block; +import org.apache.hadoop.hdfs.protocol.DatanodeInfo; +import org.apache.hadoop.hdfs.protocol.LocatedBlock; +import org.apache.hadoop.hdfs.util.DataTransferThrottler; +import org.apache.hadoop.io.IOUtils; +import org.apache.hadoop.util.StringUtils; + +/* + * This keeps track of blocks and their last verification times. + * Currently it does not modify the metadata for block. + */ + +class DataBlockScanner implements Runnable { + + public static final Log LOG = LogFactory.getLog(DataBlockScanner.class); + + private static final int MAX_SCAN_RATE = 8 * 1024 * 1024; // 8MB per sec + private static final int MIN_SCAN_RATE = 1 * 1024 * 1024; // 1MB per sec + + static final long DEFAULT_SCAN_PERIOD_HOURS = 21*24L; // three weeks + private static final long ONE_DAY = 24*3600*1000L; + + static final DateFormat dateFormat = + new SimpleDateFormat("yyyy-MM-dd HH:mm:ss,SSS"); + + static final String verificationLogFile = "dncp_block_verification.log"; + static final int verficationLogLimit = 5; // * numBlocks. + + private long scanPeriod = DEFAULT_SCAN_PERIOD_HOURS * 3600 * 1000; + DataNode datanode; + FSDataset dataset; + + // sorted set + TreeSet blockInfoSet; + HashMap blockMap; + + long totalScans = 0; + long totalVerifications = 0; // includes remote verification by clients. + long totalScanErrors = 0; + long totalTransientErrors = 0; + + long currentPeriodStart = System.currentTimeMillis(); + long bytesLeft = 0; // Bytes to scan in this period + long totalBytesToScan = 0; + + private LogFileHandler verificationLog; + + Random random = new Random(); + + DataTransferThrottler throttler = null; + + private static enum ScanType { + REMOTE_READ, // Verified when a block read by a client etc + VERIFICATION_SCAN, // scanned as part of periodic verfication + NONE, + } + + static class BlockScanInfo implements Comparable { + Block block; + long lastScanTime = 0; + long lastLogTime = 0; + ScanType lastScanType = ScanType.NONE; + boolean lastScanOk = true; + + BlockScanInfo(Block block) { + this.block = block; + } + + public int hashCode() { + return block.hashCode(); + } + + public boolean equals(Object other) { + return other instanceof BlockScanInfo && + compareTo((BlockScanInfo)other) == 0; + } + + long getLastScanTime() { + return ( lastScanType == ScanType.NONE) ? 0 : lastScanTime; + } + + public int compareTo(BlockScanInfo other) { + long t1 = lastScanTime; + long t2 = other.lastScanTime; + return ( t1 < t2 ) ? -1 : + (( t1 > t2 ) ? 1 : block.compareTo(other.block)); + } + } + + DataBlockScanner(DataNode datanode, FSDataset dataset, Configuration conf) { + this.datanode = datanode; + this.dataset = dataset; + scanPeriod = conf.getInt("dfs.datanode.scan.period.hours", 0); + if ( scanPeriod <= 0 ) { + scanPeriod = DEFAULT_SCAN_PERIOD_HOURS; + } + scanPeriod *= 3600 * 1000; + // initialized when the scanner thread is started. + } + + private synchronized boolean isInitiliazed() { + return throttler != null; + } + + private void updateBytesToScan(long len, long lastScanTime) { + // len could be negative when a block is deleted. + totalBytesToScan += len; + if ( lastScanTime < currentPeriodStart ) { + bytesLeft += len; + } + // Should we change throttler bandwidth every time bytesLeft changes? + // not really required. + } + + private synchronized void addBlockInfo(BlockScanInfo info) { + boolean added = blockInfoSet.add(info); + blockMap.put(info.block, info); + + if ( added ) { + LogFileHandler log = verificationLog; + if (log != null) { + log.setMaxNumLines(blockMap.size() * verficationLogLimit); + } + updateBytesToScan(info.block.getNumBytes(), info.lastScanTime); + } + } + + private synchronized void delBlockInfo(BlockScanInfo info) { + boolean exists = blockInfoSet.remove(info); + blockMap.remove(info.block); + if ( exists ) { + LogFileHandler log = verificationLog; + if (log != null) { + log.setMaxNumLines(blockMap.size() * verficationLogLimit); + } + updateBytesToScan(-info.block.getNumBytes(), info.lastScanTime); + } + } + + /** Update blockMap by the given LogEntry */ + private synchronized void updateBlockInfo(LogEntry e) { + BlockScanInfo info = blockMap.get(new Block(e.blockId, 0, e.genStamp)); + + if(info != null && e.verificationTime > 0 && + info.lastScanTime < e.verificationTime) { + delBlockInfo(info); + info.lastScanTime = e.verificationTime; + info.lastScanType = ScanType.VERIFICATION_SCAN; + addBlockInfo(info); + } + } + + private void init() { + + // get the list of blocks and arrange them in random order + Block arr[] = dataset.getBlockReport(); + Collections.shuffle(Arrays.asList(arr)); + + blockInfoSet = new TreeSet(); + blockMap = new HashMap(); + + long scanTime = -1; + for (Block block : arr) { + BlockScanInfo info = new BlockScanInfo( block ); + info.lastScanTime = scanTime--; + //still keep 'info.lastScanType' to NONE. + addBlockInfo(info); + } + + /* Pick the first directory that has any existing scanner log. + * otherwise, pick the first directory. + */ + File dir = null; + FSDataset.FSVolume[] volumes = dataset.volumes.volumes; + for(FSDataset.FSVolume vol : volumes) { + if (LogFileHandler.isFilePresent(vol.getDir(), verificationLogFile)) { + dir = vol.getDir(); + break; + } + } + if (dir == null) { + dir = volumes[0].getDir(); + } + + try { + // max lines will be updated later during initialization. + verificationLog = new LogFileHandler(dir, verificationLogFile, 100); + } catch (IOException e) { + LOG.warn("Could not open verfication log. " + + "Verification times are not stored."); + } + + synchronized (this) { + throttler = new DataTransferThrottler(200, MAX_SCAN_RATE); + } + } + + private synchronized long getNewBlockScanTime() { + /* If there are a lot of blocks, this returns a random time with in + * the scan period. Otherwise something sooner. + */ + long period = Math.min(scanPeriod, + Math.max(blockMap.size(),1) * 600 * 1000L); + return System.currentTimeMillis() - scanPeriod + + random.nextInt((int)period); + } + + /** Adds block to list of blocks */ + synchronized void addBlock(Block block) { + if (!isInitiliazed()) { + return; + } + + BlockScanInfo info = blockMap.get(block); + if ( info != null ) { + LOG.warn("Adding an already existing block " + block); + delBlockInfo(info); + } + + info = new BlockScanInfo(block); + info.lastScanTime = getNewBlockScanTime(); + + addBlockInfo(info); + adjustThrottler(); + } + + /** Deletes the block from internal structures */ + synchronized void deleteBlock(Block block) { + if (!isInitiliazed()) { + return; + } + BlockScanInfo info = blockMap.get(block); + if ( info != null ) { + delBlockInfo(info); + } + } + + /** @return the last scan time */ + synchronized long getLastScanTime(Block block) { + if (!isInitiliazed()) { + return 0; + } + BlockScanInfo info = blockMap.get(block); + return info == null? 0: info.lastScanTime; + } + + /** Deletes blocks from internal structures */ + void deleteBlocks(Block[] blocks) { + for ( Block b : blocks ) { + deleteBlock(b); + } + } + + void verifiedByClient(Block block) { + updateScanStatus(block, ScanType.REMOTE_READ, true); + } + + private synchronized void updateScanStatus(Block block, + ScanType type, + boolean scanOk) { + BlockScanInfo info = blockMap.get(block); + + if ( info != null ) { + delBlockInfo(info); + } else { + // It might already be removed. Thats ok, it will be caught next time. + info = new BlockScanInfo(block); + } + + long now = System.currentTimeMillis(); + info.lastScanType = type; + info.lastScanTime = now; + info.lastScanOk = scanOk; + addBlockInfo(info); + + if (type == ScanType.REMOTE_READ) { + totalVerifications++; + } + + // Don't update meta data too often in case of REMOTE_READ + // of if the verification failed. + long diff = now - info.lastLogTime; + if (!scanOk || (type == ScanType.REMOTE_READ && + diff < scanPeriod/3 && diff < ONE_DAY)) { + return; + } + + info.lastLogTime = now; + LogFileHandler log = verificationLog; + if (log != null) { + log.appendLine(LogEntry.newEnry(block, now)); + } + } + + private void handleScanFailure(Block block) { + + LOG.info("Reporting bad block " + block + " to namenode."); + + try { + DatanodeInfo[] dnArr = { new DatanodeInfo(datanode.dnRegistration) }; + LocatedBlock[] blocks = { new LocatedBlock(block, dnArr) }; + datanode.namenode.reportBadBlocks(blocks); + } catch (IOException e){ + /* One common reason is that NameNode could be in safe mode. + * Should we keep on retrying in that case? + */ + LOG.warn("Failed to report bad block " + block + " to namenode : " + + " Exception : " + StringUtils.stringifyException(e)); + } + } + + static private class LogEntry { + long blockId = -1; + long verificationTime = -1; + long genStamp = Block.GRANDFATHER_GENERATION_STAMP; + + /** + * The format consists of single line with multiple entries. each + * entry is in the form : name="value". + * This simple text and easily extendable and easily parseable with a + * regex. + */ + private static Pattern entryPattern = + Pattern.compile("\\G\\s*([^=\\p{Space}]+)=\"(.*?)\"\\s*"); + + static String newEnry(Block block, long time) { + return "date=\"" + dateFormat.format(new Date(time)) + "\"\t " + + "time=\"" + time + "\"\t " + + "genstamp=\"" + block.getGenerationStamp() + "\"\t " + + "id=\"" + block.getBlockId() +"\""; + } + + static LogEntry parseEntry(String line) { + LogEntry entry = new LogEntry(); + + Matcher matcher = entryPattern.matcher(line); + while (matcher.find()) { + String name = matcher.group(1); + String value = matcher.group(2); + + try { + if (name.equals("id")) { + entry.blockId = Long.valueOf(value); + } else if (name.equals("time")) { + entry.verificationTime = Long.valueOf(value); + } else if (name.equals("genstamp")) { + entry.genStamp = Long.valueOf(value); + } + } catch(NumberFormatException nfe) { + LOG.warn("Cannot parse line: " + line, nfe); + return null; + } + } + + return entry; + } + } + + private synchronized void adjustThrottler() { + long timeLeft = currentPeriodStart+scanPeriod - System.currentTimeMillis(); + long bw = Math.max(bytesLeft*1000/timeLeft, MIN_SCAN_RATE); + throttler.setBandwidth(Math.min(bw, MAX_SCAN_RATE)); + } + + private void verifyBlock(Block block) { + + BlockSender blockSender = null; + + /* In case of failure, attempt to read second time to reduce + * transient errors. How do we flush block data from kernel + * buffers before the second read? + */ + for (int i=0; i<2; i++) { + boolean second = (i > 0); + + try { + adjustThrottler(); + + blockSender = new BlockSender(block, 0, -1, false, + false, true, datanode); + + DataOutputStream out = + new DataOutputStream(new IOUtils.NullOutputStream()); + + blockSender.sendBlock(out, null, throttler); + + LOG.info((second ? "Second " : "") + + "Verification succeeded for " + block); + + if ( second ) { + totalTransientErrors++; + } + + updateScanStatus(block, ScanType.VERIFICATION_SCAN, true); + + return; + } catch (IOException e) { + + updateScanStatus(block, ScanType.VERIFICATION_SCAN, false); + + // If the block does not exists anymore, then its not an error + if ( dataset.getFile(block) == null ) { + LOG.info("Verification failed for " + block + ". Its ok since " + + "it not in datanode dataset anymore."); + deleteBlock(block); + return; + } + + LOG.warn((second ? "Second " : "First ") + + "Verification failed for " + block + ". Exception : " + + StringUtils.stringifyException(e)); + + if (second) { + totalScanErrors++; + datanode.getMetrics().blockVerificationFailures.inc(); + handleScanFailure(block); + return; + } + } finally { + IOUtils.closeStream(blockSender); + datanode.getMetrics().blocksVerified.inc(); + totalScans++; + totalVerifications++; + } + } + } + + private synchronized long getEarliestScanTime() { + if ( blockInfoSet.size() > 0 ) { + return blockInfoSet.first().lastScanTime; + } + return Long.MAX_VALUE; + } + + // Picks one block and verifies it + private void verifyFirstBlock() { + Block block = null; + synchronized (this) { + if ( blockInfoSet.size() > 0 ) { + block = blockInfoSet.first().block; + } + } + + if ( block != null ) { + verifyBlock(block); + } + } + + /** returns false if the process was interrupted + * because the thread is marked to exit. + */ + private boolean assignInitialVerificationTimes() { + int numBlocks = 1; + synchronized (this) { + numBlocks = Math.max(blockMap.size(), 1); + } + + //First udpates the last verification times from the log file. + LogFileHandler.Reader logReader = null; + try { + if (verificationLog != null) { + logReader = verificationLog.new Reader(false); + } + } catch (IOException e) { + LOG.warn("Could not read previous verification times : " + + StringUtils.stringifyException(e)); + } + + if (verificationLog != null) { + verificationLog.updateCurNumLines(); + } + + try { + // update verification times from the verificationLog. + while (logReader != null && logReader.hasNext()) { + if (!datanode.shouldRun || Thread.interrupted()) { + return false; + } + LogEntry entry = LogEntry.parseEntry(logReader.next()); + if (entry != null) { + updateBlockInfo(entry); + } + } + } finally { + IOUtils.closeStream(logReader); + } + + /* Initially spread the block reads over half of + * MIN_SCAN_PERIOD so that we don't keep scanning the + * blocks too quickly when restarted. + */ + long verifyInterval = (long) (Math.min( scanPeriod/2.0/numBlocks, + 10*60*1000 )); + long lastScanTime = System.currentTimeMillis() - scanPeriod; + + /* Before this loop, entries in blockInfoSet that are not + * updated above have lastScanTime of <= 0 . Loop until first entry has + * lastModificationTime > 0. + */ + synchronized (this) { + if (blockInfoSet.size() > 0 ) { + BlockScanInfo info; + while ((info = blockInfoSet.first()).lastScanTime < 0) { + delBlockInfo(info); + info.lastScanTime = lastScanTime; + lastScanTime += verifyInterval; + addBlockInfo(info); + } + } + } + + return true; + } + + private synchronized void startNewPeriod() { + LOG.info("Starting a new period : work left in prev period : " + + String.format("%.2f%%", (bytesLeft * 100.0)/totalBytesToScan)); + // reset the byte counts : + bytesLeft = totalBytesToScan; + currentPeriodStart = System.currentTimeMillis(); + } + + public void run() { + try { + + init(); + + //Read last verification times + if (!assignInitialVerificationTimes()) { + return; + } + + adjustThrottler(); + + while (datanode.shouldRun && !Thread.interrupted()) { + long now = System.currentTimeMillis(); + synchronized (this) { + if ( now >= (currentPeriodStart + scanPeriod)) { + startNewPeriod(); + } + } + if ( (now - getEarliestScanTime()) >= scanPeriod ) { + verifyFirstBlock(); + } else { + try { + Thread.sleep(1000); + } catch (InterruptedException ignored) {} + } + } + } catch (RuntimeException e) { + LOG.warn("RuntimeException during DataBlockScanner.run() : " + + StringUtils.stringifyException(e)); + throw e; + } finally { + shutdown(); + LOG.info("Exiting DataBlockScanner thread."); + } + } + + synchronized void shutdown() { + LogFileHandler log = verificationLog; + verificationLog = null; + if (log != null) { + log.close(); + } + } + + synchronized void printBlockReport(StringBuilder buffer, + boolean summaryOnly) { + long oneHour = 3600*1000; + long oneDay = 24*oneHour; + long oneWeek = 7*oneDay; + long fourWeeks = 4*oneWeek; + + int inOneHour = 0; + int inOneDay = 0; + int inOneWeek = 0; + int inFourWeeks = 0; + int inScanPeriod = 0; + int neverScanned = 0; + + int total = blockInfoSet.size(); + + long now = System.currentTimeMillis(); + + Date date = new Date(); + + for(Iterator it = blockInfoSet.iterator(); it.hasNext();) { + BlockScanInfo info = it.next(); + + long scanTime = info.getLastScanTime(); + long diff = now - scanTime; + + if (diff <= oneHour) inOneHour++; + if (diff <= oneDay) inOneDay++; + if (diff <= oneWeek) inOneWeek++; + if (diff <= fourWeeks) inFourWeeks++; + if (diff <= scanPeriod) inScanPeriod++; + if (scanTime <= 0) neverScanned++; + + if (!summaryOnly) { + date.setTime(scanTime); + String scanType = + (info.lastScanType == ScanType.REMOTE_READ) ? "remote" : + ((info.lastScanType == ScanType.VERIFICATION_SCAN) ? "local" : + "none"); + buffer.append(String.format("%-26s : status : %-6s type : %-6s" + + " scan time : " + + "%-15d %s\n", info.block, + (info.lastScanOk ? "ok" : "failed"), + scanType, scanTime, + (scanTime <= 0) ? "not yet verified" : + dateFormat.format(date))); + } + } + + double pctPeriodLeft = (scanPeriod + currentPeriodStart - now) + *100.0/scanPeriod; + double pctProgress = (totalBytesToScan == 0) ? 100 : + (totalBytesToScan-bytesLeft)*10000.0/totalBytesToScan/ + (100-pctPeriodLeft+1e-10); + + buffer.append(String.format("\nTotal Blocks : %6d" + + "\nVerified in last hour : %6d" + + "\nVerified in last day : %6d" + + "\nVerified in last week : %6d" + + "\nVerified in last four weeks : %6d" + + "\nVerified in SCAN_PERIOD : %6d" + + "\nNot yet verified : %6d" + + "\nVerified since restart : %6d" + + "\nScans since restart : %6d" + + "\nScan errors since restart : %6d" + + "\nTransient scan errors : %6d" + + "\nCurrent scan rate limit KBps : %6d" + + "\nProgress this period : %6.0f%%" + + "\nTime left in cur period : %6.2f%%" + + "\n", + total, inOneHour, inOneDay, inOneWeek, + inFourWeeks, inScanPeriod, neverScanned, + totalVerifications, totalScans, + totalScanErrors, totalTransientErrors, + Math.round(throttler.getBandwidth()/1024.0), + pctProgress, pctPeriodLeft)); + } + + /** + * This class takes care of log file used to store the last verification + * times of the blocks. It rolls the current file when it is too big etc. + * If there is an error while writing, it stops updating with an error + * message. + */ + private static class LogFileHandler { + + private static final String curFileSuffix = ".curr"; + private static final String prevFileSuffix = ".prev"; + + // Don't roll files more often than this + private static final long minRollingPeriod = 6 * 3600 * 1000L; // 6 hours + private static final long minWarnPeriod = minRollingPeriod; + private static final int minLineLimit = 1000; + + + static boolean isFilePresent(File dir, String filePrefix) { + return new File(dir, filePrefix + curFileSuffix).exists() || + new File(dir, filePrefix + prevFileSuffix).exists(); + } + private File curFile; + private File prevFile; + + private int maxNumLines = -1; // not very hard limit on number of lines. + private int curNumLines = -1; + + long lastWarningTime = 0; + + private PrintStream out; + + int numReaders = 0; + + /** + * Opens the log file for appending. + * Note that rolling will happen only after "updateLineCount()" is + * called. This is so that line count could be updated in a separate + * thread without delaying start up. + * + * @param dir where the logs files are located. + * @param filePrefix prefix of the file. + * @param maxNumLines max lines in a file (its a soft limit). + * @throws IOException + */ + LogFileHandler(File dir, String filePrefix, int maxNumLines) + throws IOException { + curFile = new File(dir, filePrefix + curFileSuffix); + prevFile = new File(dir, filePrefix + prevFileSuffix); + openCurFile(); + curNumLines = -1; + setMaxNumLines(maxNumLines); + } + + // setting takes affect when next entry is added. + synchronized void setMaxNumLines(int maxNumLines) { + this.maxNumLines = Math.max(maxNumLines, minLineLimit); + } + + /** + * Append "\n" + line. + * If the log file need to be rolled, it will done after + * appending the text. + * This does not throw IOException when there is an error while + * appending. Currently does not throw an error even if rolling + * fails (may be it should?). + * return true if append was successful. + */ + synchronized boolean appendLine(String line) { + out.println(); + out.print(line); + curNumLines += (curNumLines < 0) ? -1 : 1; + try { + rollIfRequired(); + } catch (IOException e) { + warn("Rolling failed for " + curFile + " : " + e.getMessage()); + return false; + } + return true; + } + + //warns only once in a while + synchronized private void warn(String msg) { + long now = System.currentTimeMillis(); + if ((now - lastWarningTime) >= minWarnPeriod) { + lastWarningTime = now; + LOG.warn(msg); + } + } + + private synchronized void openCurFile() throws FileNotFoundException { + close(); + out = new PrintStream(new FileOutputStream(curFile, true)); + } + + //This reads the current file and updates the count. + void updateCurNumLines() { + int count = 0; + Reader it = null; + try { + for(it = new Reader(true); it.hasNext(); count++) { + it.next(); + } + } catch (IOException e) { + + } finally { + synchronized (this) { + curNumLines = count; + } + IOUtils.closeStream(it); + } + } + + private void rollIfRequired() throws IOException { + if (curNumLines < maxNumLines || numReaders > 0) { + return; + } + + long now = System.currentTimeMillis(); + if (now < minRollingPeriod) { + return; + } + + if (!prevFile.delete() && prevFile.exists()) { + throw new IOException("Could not delete " + prevFile); + } + + close(); + + if (!curFile.renameTo(prevFile)) { + openCurFile(); + throw new IOException("Could not rename " + curFile + + " to " + prevFile); + } + + openCurFile(); + updateCurNumLines(); + } + + synchronized void close() { + if (out != null) { + out.close(); + out = null; + } + } + + /** + * This is used to read the lines in order. + * If the data is not read completely (i.e, untill hasNext() returns + * false), it needs to be explicitly + */ + private class Reader implements Iterator, Closeable { + + BufferedReader reader; + File file; + String line; + boolean closed = false; + + private Reader(boolean skipPrevFile) throws IOException { + synchronized (LogFileHandler.this) { + numReaders++; + } + reader = null; + file = (skipPrevFile) ? curFile : prevFile; + readNext(); + } + + private boolean openFile() throws IOException { + + for(int i=0; i<2; i++) { + if (reader != null || i > 0) { + // move to next file + file = (file == prevFile) ? curFile : null; + } + if (file == null) { + return false; + } + if (file.exists()) { + break; + } + } + + if (reader != null ) { + reader.close(); + reader = null; + } + + reader = new BufferedReader(new FileReader(file)); + return true; + } + + // read next line if possible. + private void readNext() throws IOException { + line = null; + try { + if (reader != null && (line = reader.readLine()) != null) { + return; + } + if (line == null) { + // move to the next file. + if (openFile()) { + readNext(); + } + } + } finally { + if (!hasNext()) { + close(); + } + } + } + + public boolean hasNext() { + return line != null; + } + + public String next() { + String curLine = line; + try { + readNext(); + } catch (IOException e) { + LOG.info("Could not reade next line in LogHandler : " + + StringUtils.stringifyException(e)); + } + return curLine; + } + + public void remove() { + throw new RuntimeException("remove() is not supported."); + } + + public void close() throws IOException { + if (!closed) { + try { + if (reader != null) { + reader.close(); + } + } finally { + file = null; + reader = null; + closed = true; + synchronized (LogFileHandler.this) { + numReaders--; + assert(numReaders >= 0); + } + } + } + } + } + } + + public static class Servlet extends HttpServlet { + + public void doGet(HttpServletRequest request, + HttpServletResponse response) throws IOException { + + response.setContentType("text/plain"); + + DataBlockScanner blockScanner = (DataBlockScanner) + getServletContext().getAttribute("datanode.blockScanner"); + + boolean summary = (request.getParameter("listblocks") == null); + + StringBuilder buffer = new StringBuilder(8*1024); + if (blockScanner == null) { + buffer.append("Periodic block scanner is not running. " + + "Please check the datanode log if this is unexpected."); + } else if (blockScanner.isInitiliazed()) { + blockScanner.printBlockReport(buffer, summary); + } else { + buffer.append("Periodic block scanner is not yet initialized. " + + "Please check back again after some time."); + } + response.getWriter().write(buffer.toString()); // extra copy! + } + } +} diff --git a/src/hdfs/org/apache/hadoop/hdfs/server/datanode/DataNode.java b/src/hdfs/org/apache/hadoop/hdfs/server/datanode/DataNode.java new file mode 100644 index 0000000..1c82c83 --- /dev/null +++ b/src/hdfs/org/apache/hadoop/hdfs/server/datanode/DataNode.java @@ -0,0 +1,1750 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.datanode; + +import java.io.BufferedOutputStream; +import java.io.DataOutputStream; +import java.io.File; +import java.io.IOException; +import java.io.OutputStream; +import java.lang.reflect.Proxy; +import java.net.InetSocketAddress; +import java.net.ServerSocket; +import java.net.Socket; +import java.net.SocketTimeoutException; +import java.net.UnknownHostException; +import java.nio.channels.ServerSocketChannel; +import java.nio.channels.SocketChannel; +import java.security.NoSuchAlgorithmException; +import java.security.SecureRandom; +import java.util.AbstractList; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashMap; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.Random; +import java.util.concurrent.atomic.AtomicInteger; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.conf.Configured; +import org.apache.hadoop.hdfs.HDFSPolicyProvider; +import org.apache.hadoop.hdfs.protocol.Block; +import org.apache.hadoop.hdfs.protocol.BlockListAsLongs; +import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol; +import org.apache.hadoop.hdfs.protocol.DataTransferProtocol; +import org.apache.hadoop.hdfs.protocol.DatanodeID; +import org.apache.hadoop.hdfs.protocol.DatanodeInfo; +import org.apache.hadoop.hdfs.protocol.FSConstants; +import org.apache.hadoop.hdfs.protocol.LocatedBlock; +import org.apache.hadoop.hdfs.protocol.UnregisteredDatanodeException; +import org.apache.hadoop.hdfs.protocol.ProtocolCompatible; +import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption; +import org.apache.hadoop.hdfs.server.common.HdfsConstants; +import org.apache.hadoop.hdfs.server.common.GenerationStamp; +import org.apache.hadoop.hdfs.server.common.IncorrectVersionException; +import org.apache.hadoop.hdfs.server.common.Storage; +import org.apache.hadoop.hdfs.server.datanode.metrics.DataNodeMetrics; +import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; +import org.apache.hadoop.hdfs.server.namenode.FileChecksumServlets; +import org.apache.hadoop.hdfs.server.namenode.NameNode; +import org.apache.hadoop.hdfs.server.namenode.StreamFile; +import org.apache.hadoop.hdfs.server.protocol.BlockCommand; +import org.apache.hadoop.hdfs.server.protocol.BlockMetaDataInfo; +import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand; +import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol; +import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration; +import org.apache.hadoop.hdfs.server.protocol.DisallowedDatanodeException; +import org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol; +import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; +import org.apache.hadoop.hdfs.server.protocol.UpgradeCommand; +import org.apache.hadoop.http.HttpServer; +import org.apache.hadoop.io.IOUtils; +import org.apache.hadoop.io.Text; +import org.apache.hadoop.ipc.RPC; +import org.apache.hadoop.ipc.RemoteException; +import org.apache.hadoop.ipc.Server; +import org.apache.hadoop.net.DNS; +import org.apache.hadoop.net.NetUtils; +import org.apache.hadoop.security.SecurityUtil; +import org.apache.hadoop.security.authorize.ConfiguredPolicy; +import org.apache.hadoop.security.authorize.PolicyProvider; +import org.apache.hadoop.security.authorize.ServiceAuthorizationManager; +import org.apache.hadoop.util.Daemon; +import org.apache.hadoop.util.DiskChecker; +import org.apache.hadoop.util.ReflectionUtils; +import org.apache.hadoop.util.StringUtils; +import org.apache.hadoop.util.DiskChecker.DiskErrorException; +import org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException; + +/********************************************************** + * DataNode is a class (and program) that stores a set of + * blocks for a DFS deployment. A single deployment can + * have one or many DataNodes. Each DataNode communicates + * regularly with a single NameNode. It also communicates + * with client code and other DataNodes from time to time. + * + * DataNodes store a series of named blocks. The DataNode + * allows client code to read these blocks, or to write new + * block data. The DataNode may also, in response to instructions + * from its NameNode, delete blocks or copy blocks to/from other + * DataNodes. + * + * The DataNode maintains just one critical table: + * block-> stream of bytes (of BLOCK_SIZE or less) + * + * This info is stored on a local disk. The DataNode + * reports the table's contents to the NameNode upon startup + * and every so often afterwards. + * + * DataNodes spend their lives in an endless loop of asking + * the NameNode for something to do. A NameNode cannot connect + * to a DataNode directly; a NameNode simply returns values from + * functions invoked by a DataNode. + * + * DataNodes maintain an open server socket so that client code + * or other DataNodes can read/write data. The host/port for + * this server is reported to the NameNode, which then sends that + * information to clients or other DataNodes that might be interested. + * + **********************************************************/ +public class DataNode extends Configured + implements InterDatanodeProtocol, ClientDatanodeProtocol, FSConstants, Runnable { + public static final Log LOG = LogFactory.getLog(DataNode.class); + + static{ + Configuration.addDefaultResource("hdfs-default.xml"); + Configuration.addDefaultResource("hdfs-site.xml"); + } + + public static final String DN_CLIENTTRACE_FORMAT = + "src: %s" + // src IP + ", dest: %s" + // dst IP + ", bytes: %s" + // byte count + ", op: %s" + // operation + ", cliID: %s" + // DFSClient id + ", offset: %s" + // offset + ", srvID: %s" + // DatanodeRegistration + ", blockid: %s" + // block id + ", duration: %s"; // duration time + + static final Log ClientTraceLog = + LogFactory.getLog(DataNode.class.getName() + ".clienttrace"); + + /** + * Use {@link NetUtils#createSocketAddr(String)} instead. + */ + @Deprecated + public static InetSocketAddress createSocketAddr(String target + ) throws IOException { + return NetUtils.createSocketAddr(target); + } + + public DatanodeProtocol namenode = null; + public FSDatasetInterface data = null; + public DatanodeRegistration dnRegistration = null; + + volatile boolean shouldRun = true; + private LinkedList receivedBlockList = new LinkedList(); + /** list of blocks being recovered */ + private final Map ongoingRecovery = new HashMap(); + private LinkedList delHints = new LinkedList(); + public final static String EMPTY_DEL_HINT = ""; + AtomicInteger xmitsInProgress = new AtomicInteger(); + Daemon dataXceiverServer = null; + ThreadGroup threadGroup = null; + long blockReportInterval; + //disallow the sending of BR before instructed to do so + long lastBlockReport = 0; + boolean resetBlockReportTime = true; + long initialBlockReportDelay = BLOCKREPORT_INITIAL_DELAY * 1000L; + long lastHeartbeat = 0; + long heartBeatInterval; + private DataStorage storage = null; + private HttpServer infoServer = null; + DataNodeMetrics myMetrics; + private static InetSocketAddress nameNodeAddr; + protected InetSocketAddress selfAddr; + private static DataNode datanodeObject = null; + private Thread dataNodeThread = null; + String machineName; + private static String dnThreadName; + int socketTimeout; + int socketWriteTimeout = 0; + boolean transferToAllowed = true; + int writePacketSize = 0; + + public DataBlockScanner blockScanner = null; + public Daemon blockScannerThread = null; + + private static final Random R = new Random(); + + // For InterDataNodeProtocol + public Server ipcServer; + + /** + * Current system time. + * @return current time in msec. + */ + static long now() { + return System.currentTimeMillis(); + } + + /** + * Create the DataNode given a configuration and an array of dataDirs. + * 'dataDirs' is where the blocks are stored. + */ + DataNode(Configuration conf, + AbstractList dataDirs) throws IOException { + super(conf); + datanodeObject = this; + + try { + startDataNode(conf, dataDirs); + } catch (IOException ie) { + LOG.info("Failed to start datanode " + StringUtils.stringifyException(ie)); + shutdown(); + throw ie; + } + } + + + /** + * This method starts the data node with the specified conf. + * + * @param conf - the configuration + * if conf's CONFIG_PROPERTY_SIMULATED property is set + * then a simulated storage based data node is created. + * + * @param dataDirs - only for a non-simulated storage data node + * @throws IOException + */ + void startDataNode(Configuration conf, + AbstractList dataDirs + ) throws IOException { + // use configured nameserver & interface to get local hostname + if (conf.get("slave.host.name") != null) { + machineName = conf.get("slave.host.name"); + } + if (machineName == null) { + machineName = DNS.getDefaultHost( + conf.get("dfs.datanode.dns.interface","default"), + conf.get("dfs.datanode.dns.nameserver","default")); + } + InetSocketAddress nameNodeAddr = DataNode.getNameNodeAddress(conf);// was NameNode.getAddress(conf); + + this.socketTimeout = conf.getInt("dfs.socket.timeout", + HdfsConstants.READ_TIMEOUT); + this.socketWriteTimeout = conf.getInt("dfs.datanode.socket.write.timeout", + HdfsConstants.WRITE_TIMEOUT); + /* Based on results on different platforms, we might need set the default + * to false on some of them. */ + this.transferToAllowed = conf.getBoolean("dfs.datanode.transferTo.allowed", + true); + this.writePacketSize = conf.getInt("dfs.write.packet.size", 64*1024); + String address = + NetUtils.getServerAddress(conf, + "dfs.datanode.bindAddress", + "dfs.datanode.port", + "dfs.datanode.address"); + InetSocketAddress socAddr = NetUtils.createSocketAddr(address); + int tmpPort = socAddr.getPort(); + storage = new DataStorage(); + // construct registration + this.dnRegistration = new DatanodeRegistration(machineName + ":" + tmpPort); + + // connect to name node + this.namenode = (DatanodeProtocol) + RPC.waitForProxy(DatanodeProtocol.class, + DatanodeProtocol.versionID, + nameNodeAddr, + conf); + // get version and id info from the name-node + NamespaceInfo nsInfo = handshake(); + StartupOption startOpt = getStartupOption(conf); + assert startOpt != null : "Startup option must be set."; + + boolean simulatedFSDataset = + conf.getBoolean("dfs.datanode.simulateddatastorage", false); + if (simulatedFSDataset) { + setNewStorageID(dnRegistration); + dnRegistration.storageInfo.layoutVersion = FSConstants.LAYOUT_VERSION; + dnRegistration.storageInfo.namespaceID = nsInfo.namespaceID; + // it would have been better to pass storage as a parameter to + // constructor below - need to augment ReflectionUtils used below. + conf.set("StorageId", dnRegistration.getStorageID()); + try { + //Equivalent of following (can't do because Simulated is in test dir) + // this.data = new SimulatedFSDataset(conf); + this.data = (FSDatasetInterface) ReflectionUtils.newInstance( + Class.forName("org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset"), conf); + } catch (ClassNotFoundException e) { + throw new IOException(StringUtils.stringifyException(e)); + } + } else { // real storage + // read storage info, lock data dirs and transition fs state if necessary + storage.recoverTransitionRead(nsInfo, dataDirs, startOpt); + // adjust + this.dnRegistration.setStorageInfo(storage); + // initialize data node internal structure + this.data = new FSDataset(storage, conf); + } + + + // find free port + ServerSocket ss = (socketWriteTimeout > 0) ? + ServerSocketChannel.open().socket() : new ServerSocket(); + Server.bind(ss, socAddr, + conf.getInt("dfs.datanode.xceiver.listen.queue.size", 128)); + ss.setReceiveBufferSize(DEFAULT_DATA_SOCKET_SIZE); + // adjust machine name with the actual port + tmpPort = ss.getLocalPort(); + selfAddr = new InetSocketAddress(ss.getInetAddress().getHostAddress(), + tmpPort); + this.dnRegistration.setName(machineName + ":" + tmpPort); + LOG.info("Opened info server at " + tmpPort); + + this.threadGroup = new ThreadGroup("dataXceiverServer"); + this.dataXceiverServer = new Daemon(threadGroup, + new DataXceiverServer(ss, conf, this)); + this.threadGroup.setDaemon(true); // auto destroy when empty + + this.blockReportInterval = + conf.getLong("dfs.blockreport.intervalMsec", BLOCKREPORT_INTERVAL); + this.initialBlockReportDelay = conf.getLong("dfs.blockreport.initialDelay", + BLOCKREPORT_INITIAL_DELAY)* 1000L; + if (this.initialBlockReportDelay >= blockReportInterval) { + this.initialBlockReportDelay = 0; + LOG.info("dfs.blockreport.initialDelay is greater than " + + "dfs.blockreport.intervalMsec." + " Setting initial delay to 0 msec:"); + } + this.heartBeatInterval = conf.getLong("dfs.heartbeat.interval", HEARTBEAT_INTERVAL) * 1000L; + DataNode.nameNodeAddr = nameNodeAddr; + + //initialize periodic block scanner + String reason = null; + if (conf.getInt("dfs.datanode.scan.period.hours", 0) < 0) { + reason = "verification is turned off by configuration"; + } else if ( !(data instanceof FSDataset) ) { + reason = "verifcation is supported only with FSDataset"; + } + if ( reason == null ) { + blockScanner = new DataBlockScanner(this, (FSDataset)data, conf); + } else { + LOG.info("Periodic Block Verification is disabled because " + + reason + "."); + } + + //create a servlet to serve full-file content + String infoAddr = + NetUtils.getServerAddress(conf, + "dfs.datanode.info.bindAddress", + "dfs.datanode.info.port", + "dfs.datanode.http.address"); + InetSocketAddress infoSocAddr = NetUtils.createSocketAddr(infoAddr); + String infoHost = infoSocAddr.getHostName(); + int tmpInfoPort = infoSocAddr.getPort(); + this.infoServer = new HttpServer("datanode", infoHost, tmpInfoPort, + tmpInfoPort == 0, conf); + if (conf.getBoolean("dfs.https.enable", false)) { + boolean needClientAuth = conf.getBoolean("dfs.https.need.client.auth", false); + InetSocketAddress secInfoSocAddr = NetUtils.createSocketAddr(conf.get( + "dfs.datanode.https.address", infoHost + ":" + 0)); + Configuration sslConf = new Configuration(false); + sslConf.addResource(conf.get("dfs.https.server.keystore.resource", + "ssl-server.xml")); + this.infoServer.addSslListener(secInfoSocAddr, sslConf, needClientAuth); + } + this.infoServer.addInternalServlet(null, "/streamFile/*", StreamFile.class); + this.infoServer.addInternalServlet(null, "/getFileChecksum/*", + FileChecksumServlets.GetServlet.class); + this.infoServer.setAttribute("datanode.blockScanner", blockScanner); + this.infoServer.addServlet(null, "/blockScannerReport", + DataBlockScanner.Servlet.class); + this.infoServer.start(); + // adjust info port + this.dnRegistration.setInfoPort(this.infoServer.getPort()); + myMetrics = new DataNodeMetrics(conf, dnRegistration.getStorageID()); + + // set service-level authorization security policy + if (conf.getBoolean( + ServiceAuthorizationManager.SERVICE_AUTHORIZATION_CONFIG, false)) { + PolicyProvider policyProvider = + (PolicyProvider)(ReflectionUtils.newInstance( + conf.getClass(PolicyProvider.POLICY_PROVIDER_CONFIG, + HDFSPolicyProvider.class, PolicyProvider.class), + conf)); + SecurityUtil.setPolicy(new ConfiguredPolicy(conf, policyProvider)); + } + + //init ipc server + InetSocketAddress ipcAddr = NetUtils.createSocketAddr( + conf.get("dfs.datanode.ipc.address")); + ipcServer = RPC.getServer(this, ipcAddr.getHostName(), ipcAddr.getPort(), + conf.getInt("dfs.datanode.handler.count", 3), false, conf); + ipcServer.start(); + dnRegistration.setIpcPort(ipcServer.getListenerAddress().getPort()); + + LOG.info("dnRegistration = " + dnRegistration); + } + + /** + * Creates either NIO or regular depending on socketWriteTimeout. + */ + protected Socket newSocket() throws IOException { + return (socketWriteTimeout > 0) ? + SocketChannel.open().socket() : new Socket(); + } + + private NamespaceInfo handshake() throws IOException { + NamespaceInfo nsInfo = new NamespaceInfo(); + while (shouldRun) { + try { + nsInfo = namenode.versionRequest(); + break; + } catch(SocketTimeoutException e) { // namenode is busy + LOG.info("Problem connecting to server: " + getNameNodeAddr()); + try { + Thread.sleep(1000); + } catch (InterruptedException ie) {} + } + } + String errorMsg = null; + // verify build version + if( ! nsInfo.getBuildVersion().equals( Storage.getBuildVersion() )) { + errorMsg = "Incompatible build versions: namenode BV = " + + nsInfo.getBuildVersion() + "; datanode BV = " + + Storage.getBuildVersion(); + LOG.warn( errorMsg ); + try { + namenode.errorReport( dnRegistration, + DatanodeProtocol.NOTIFY, errorMsg ); + } catch( SocketTimeoutException e ) { // namenode is busy + LOG.info("Problem connecting to server: " + getNameNodeAddr()); + } + } + assert FSConstants.LAYOUT_VERSION == nsInfo.getLayoutVersion() : + "Data-node and name-node layout versions must be the same." + + "Expected: "+ FSConstants.LAYOUT_VERSION + " actual "+ nsInfo.getLayoutVersion(); + return nsInfo; + } + + /** Return the DataNode object + * + */ + public static DataNode getDataNode() { + return datanodeObject; + } + + public static InterDatanodeProtocol createInterDataNodeProtocolProxy( + DatanodeID datanodeid, Configuration conf) throws IOException { + InetSocketAddress addr = NetUtils.createSocketAddr( + datanodeid.getHost() + ":" + datanodeid.getIpcPort()); + if (InterDatanodeProtocol.LOG.isDebugEnabled()) { + InterDatanodeProtocol.LOG.info("InterDatanodeProtocol addr=" + addr); + } + return (InterDatanodeProtocol)RPC.getProxy(InterDatanodeProtocol.class, + InterDatanodeProtocol.versionID, addr, conf); + } + + /** + * This method returns the address namenode uses to communicate with + * datanodes. If this address is not configured the default NameNode + * address is used, as it is running only one RPC server. + * If it is running multiple servers this address cannot be used by clients!! + * @param conf + * @return + */ + public static InetSocketAddress getNameNodeAddress(Configuration conf) { + InetSocketAddress addr = null; + addr = NameNode.getDNProtocolAddress(conf); + if (addr != null) { + return addr; + } + return NameNode.getAddress(conf); + } + + public InetSocketAddress getNameNodeAddr() { + return nameNodeAddr; + } + + public InetSocketAddress getSelfAddr() { + return selfAddr; + } + + DataNodeMetrics getMetrics() { + return myMetrics; + } + + /** + * Return the namenode's identifier + */ + public String getNamenode() { + //return namenode.toString(); + return ""; + } + + public static void setNewStorageID(DatanodeRegistration dnReg) { + /* Return + * "DS-randInt-ipaddr-currentTimeMillis" + * It is considered extermely rare for all these numbers to match + * on a different machine accidentally for the following + * a) SecureRandom(INT_MAX) is pretty much random (1 in 2 billion), and + * b) Good chance ip address would be different, and + * c) Even on the same machine, Datanode is designed to use different ports. + * d) Good chance that these are started at different times. + * For a confict to occur all the 4 above have to match!. + * The format of this string can be changed anytime in future without + * affecting its functionality. + */ + String ip = "unknownIP"; + try { + ip = DNS.getDefaultIP("default"); + } catch (UnknownHostException ignored) { + LOG.warn("Could not find ip address of \"default\" inteface."); + } + + int rand = 0; + try { + rand = SecureRandom.getInstance("SHA1PRNG").nextInt(Integer.MAX_VALUE); + } catch (NoSuchAlgorithmException e) { + LOG.warn("Could not use SecureRandom"); + rand = R.nextInt(Integer.MAX_VALUE); + } + dnReg.storageID = "DS-" + rand + "-"+ ip + "-" + dnReg.getPort() + "-" + + System.currentTimeMillis(); + } + /** + * Register datanode + *

+ * The datanode needs to register with the namenode on startup in order + * 1) to report which storage it is serving now and + * 2) to receive a registrationID + * issued by the namenode to recognize registered datanodes. + * + * @see FSNamesystem#registerDatanode(DatanodeRegistration) + * @throws IOException + */ + private void register() throws IOException { + if (dnRegistration.getStorageID().equals("")) { + setNewStorageID(dnRegistration); + } + while(shouldRun) { + try { + // reset name to machineName. Mainly for web interface. + dnRegistration.name = machineName + ":" + dnRegistration.getPort(); + dnRegistration = namenode.register(dnRegistration); + break; + } catch(SocketTimeoutException e) { // namenode is busy + LOG.info("Problem connecting to server: " + getNameNodeAddr()); + try { + Thread.sleep(1000); + } catch (InterruptedException ie) {} + } + } + assert ("".equals(storage.getStorageID()) + && !"".equals(dnRegistration.getStorageID())) + || storage.getStorageID().equals(dnRegistration.getStorageID()) : + "New storageID can be assigned only if data-node is not formatted"; + if (storage.getStorageID().equals("")) { + storage.setStorageID(dnRegistration.getStorageID()); + storage.writeAll(); + LOG.info("New storage id " + dnRegistration.getStorageID() + + " is assigned to data-node " + dnRegistration.getName()); + } + if(! storage.getStorageID().equals(dnRegistration.getStorageID())) { + throw new IOException("Inconsistent storage IDs. Name-node returned " + + dnRegistration.getStorageID() + + ". Expecting " + storage.getStorageID()); + } + + // random short delay - helps scatter the BR from all DNs + scheduleBlockReport(initialBlockReportDelay); + } + + /** + * Shut down this instance of the datanode. + * Returns only after shutdown is complete. + * This method can only be called by the offerService thread. + * Otherwise, deadlock might occur. + */ + public void shutdown() { + if (infoServer != null) { + try { + infoServer.stop(); + } catch (Exception e) { + LOG.warn("Exception shutting down DataNode", e); + } + } + if (ipcServer != null) { + ipcServer.stop(); + } + this.shouldRun = false; + if (dataXceiverServer != null) { + ((DataXceiverServer) this.dataXceiverServer.getRunnable()).kill(); + this.dataXceiverServer.interrupt(); + + // wait for all data receiver threads to exit + if (this.threadGroup != null) { + int retries = 0; + while (true) { + this.threadGroup.interrupt(); + LOG.info("Waiting for threadgroup to exit, active threads is " + + this.threadGroup.activeCount()); + if (this.threadGroup.activeCount() == 0) { + break; + } + try { + if (++retries > 600) { + Thread[] activeThreads = new Thread[this.threadGroup.activeCount()]; + this.threadGroup.enumerate(activeThreads, true); + LOG.info("Active Threads: " + Arrays.toString(activeThreads)); + LOG.warn("Waited for ThreadGroup to be empty for 10 minutes." + + " SHUTTING DOWN NOW"); + break; + } + Thread.sleep(1000); + } catch (InterruptedException e) {} + } + } + // wait for dataXceiveServer to terminate + try { + this.dataXceiverServer.join(); + } catch (InterruptedException ie) { + } + } + + RPC.stopProxy(namenode); // stop the RPC threads + + if(upgradeManager != null) + upgradeManager.shutdownUpgrade(); + if (blockScannerThread != null) { + blockScannerThread.interrupt(); + try { + blockScannerThread.join(3600000L); // wait for at most 1 hour + } catch (InterruptedException ie) { + } + } + if (storage != null) { + try { + this.storage.unlockAll(); + } catch (IOException ie) { + } + } + if (dataNodeThread != null) { + dataNodeThread.interrupt(); + try { + dataNodeThread.join(); + } catch (InterruptedException ie) { + } + } + if (data != null) { + data.shutdown(); + } + if (myMetrics != null) { + myMetrics.shutdown(); + } + } + + + /** Check if there is no space in disk + * @param e that caused this checkDiskError call + **/ + protected void checkDiskError(Exception e ) throws IOException { + + LOG.warn("checkDiskError: exception: ", e); + + if (e.getMessage() != null && + e.getMessage().startsWith("No space left on device")) { + throw new DiskOutOfSpaceException("No space left on device"); + } else { + checkDiskError(); + } + } + + /** + * Check if there is a disk failure and if so, handle the error + * + **/ + protected void checkDiskError( ) { + try { + data.checkDataDir(); + } catch(DiskErrorException de) { + handleDiskError(de.getMessage()); + } + } + + private void handleDiskError(String errMsgr) { + boolean hasEnoughResource = data.hasEnoughResource(); + LOG.warn("DataNode.handleDiskError: Keep Running: " + hasEnoughResource); + + //if hasEnoughtResource = true - more volumes are available, so we don't want + // to shutdown DN completely and don't want NN to remove it. + int dp_error = DatanodeProtocol.DISK_ERROR; + if(hasEnoughResource == false) { + // DN will be shutdown and NN should remove it + dp_error = DatanodeProtocol.FATAL_DISK_ERROR; + } + //inform NameNode + try { + namenode.errorReport( + dnRegistration, dp_error, errMsgr); + } catch(IOException ignored) { + } + + + if(hasEnoughResource) { + scheduleBlockReport(0); + return; // do not shutdown + } + + LOG.warn("DataNode is shutting down.\n" + errMsgr); + shouldRun = false; + } + + /** Number of concurrent xceivers per node. */ + int getXceiverCount() { + return threadGroup == null ? 0 : threadGroup.activeCount(); + } + + /** + * Main loop for the DataNode. Runs until shutdown, + * forever calling remote NameNode functions. + */ + public void offerService() throws Exception { + + LOG.info("using BLOCKREPORT_INTERVAL of " + blockReportInterval + "msec" + + " Initial delay: " + initialBlockReportDelay + "msec"); + + // + // Now loop for a long time.... + // + + while (shouldRun) { + try { + long startTime = now(); + + // + // Every so often, send heartbeat or block-report + // + + if (startTime - lastHeartbeat > heartBeatInterval) { + // + // All heartbeat messages include following info: + // -- Datanode name + // -- data transfer port + // -- Total capacity + // -- Bytes remaining + // + lastHeartbeat = startTime; + DatanodeCommand[] cmds = namenode.sendHeartbeat(dnRegistration, + data.getCapacity(), + data.getDfsUsed(), + data.getRemaining(), + xmitsInProgress.get(), + getXceiverCount()); + myMetrics.heartbeats.inc(now() - startTime); + //LOG.info("Just sent heartbeat, with name " + localName); + if (!processCommand(cmds)) + continue; + } + + // check if there are newly received blocks + Block [] blockArray=null; + String [] delHintArray=null; + synchronized(receivedBlockList) { + synchronized(delHints) { + int numBlocks = receivedBlockList.size(); + if (numBlocks > 0) { + if(numBlocks!=delHints.size()) { + LOG.warn("Panic: receiveBlockList and delHints are not of the same length" ); + } + // + // Send newly-received blockids to namenode + // + blockArray = receivedBlockList.toArray(new Block[numBlocks]); + delHintArray = delHints.toArray(new String[numBlocks]); + } + } + } + if (blockArray != null) { + if(delHintArray == null || delHintArray.length != blockArray.length ) { + LOG.warn("Panic: block array & delHintArray are not the same" ); + } + namenode.blockReceived(dnRegistration, blockArray, delHintArray); + synchronized (receivedBlockList) { + synchronized (delHints) { + for(int i=0; i blockReportInterval) { + // + // Send latest blockinfo report if timer has expired. + // Get back a list of local block(s) that are obsolete + // and can be safely GC'ed. + // + long brStartTime = now(); + Block[] bReport = data.getBlockReport(); + DatanodeCommand cmd = namenode.blockReport(dnRegistration, + BlockListAsLongs.convertToArrayLongs(bReport)); + long brTime = now() - brStartTime; + myMetrics.blockReports.inc(brTime); + LOG.info("BlockReport of " + bReport.length + + " blocks got processed in " + brTime + " msecs"); + // + // If we have sent the first block report, then wait a random + // time before we start the periodic block reports. + // + if (resetBlockReportTime) { + lastBlockReport = startTime - R.nextInt((int)(blockReportInterval)); + resetBlockReportTime = false; + } else { + /* say the last block report was at 8:20:14. The current report + * should have started around 9:20:14 (default 1 hour interval). + * If current time is : + * 1) normal like 9:20:18, next report should be at 10:20:14 + * 2) unexpected like 11:35:43, next report should be at 12:20:14 + */ + lastBlockReport += (now() - lastBlockReport) / + blockReportInterval * blockReportInterval; + } + processCommand(cmd); + } + + // start block scanner + if (blockScanner != null && blockScannerThread == null && + upgradeManager.isUpgradeCompleted()) { + LOG.info("Starting Periodic block scanner."); + blockScannerThread = new Daemon(blockScanner); + blockScannerThread.start(); + } + + // + // There is no work to do; sleep until hearbeat timer elapses, + // or work arrives, and then iterate again. + // + long waitTime = heartBeatInterval - (System.currentTimeMillis() - lastHeartbeat); + synchronized(receivedBlockList) { + if (waitTime > 0 && receivedBlockList.size() == 0) { + try { + receivedBlockList.wait(waitTime); + } catch (InterruptedException ie) { + } + } + } // synchronized + } catch(RemoteException re) { + String reClass = re.getClassName(); + if (UnregisteredDatanodeException.class.getName().equals(reClass) || + DisallowedDatanodeException.class.getName().equals(reClass) || + IncorrectVersionException.class.getName().equals(reClass)) { + LOG.warn("DataNode is shutting down: " + + StringUtils.stringifyException(re)); + shutdown(); + return; + } + LOG.warn(StringUtils.stringifyException(re)); + } catch (IOException e) { + LOG.warn(StringUtils.stringifyException(e)); + } + } // while (shouldRun) + } // offerService + + /** + * Process an array of datanode commands + * + * @param cmds an array of datanode commands + * @return true if further processing may be required or false otherwise. + */ + private boolean processCommand(DatanodeCommand[] cmds) { + if (cmds != null) { + for (DatanodeCommand cmd : cmds) { + try { + if (processCommand(cmd) == false) { + return false; + } + } catch (IOException ioe) { + LOG.warn("Error processing datanode Command", ioe); + } + } + } + return true; + } + + /** + * + * @param cmd + * @return true if further processing may be required or false otherwise. + * @throws IOException + */ + private boolean processCommand(DatanodeCommand cmd) throws IOException { + if (cmd == null) + return true; + final BlockCommand bcmd = cmd instanceof BlockCommand? (BlockCommand)cmd: null; + + switch(cmd.getAction()) { + case DatanodeProtocol.DNA_TRANSFER: + // Send a copy of a block to another datanode + transferBlocks(bcmd.getBlocks(), bcmd.getTargets()); + myMetrics.blocksReplicated.inc(bcmd.getBlocks().length); + break; + case DatanodeProtocol.DNA_INVALIDATE: + // + // Some local block(s) are obsolete and can be + // safely garbage-collected. + // + Block toDelete[] = bcmd.getBlocks(); + try { + if (blockScanner != null) { + blockScanner.deleteBlocks(toDelete); + } + data.invalidate(toDelete); + } catch(IOException e) { + checkDiskError(); + throw e; + } + myMetrics.blocksRemoved.inc(toDelete.length); + break; + case DatanodeProtocol.DNA_SHUTDOWN: + // shut down the data node + this.shutdown(); + return false; + case DatanodeProtocol.DNA_REGISTER: + // namenode requested a registration - at start or if NN lost contact + LOG.info("DatanodeCommand action: DNA_REGISTER"); + if (shouldRun) { + register(); + } + break; + case DatanodeProtocol.DNA_FINALIZE: + storage.finalizeUpgrade(); + break; + case UpgradeCommand.UC_ACTION_START_UPGRADE: + // start distributed upgrade here + processDistributedUpgradeCommand((UpgradeCommand)cmd); + break; + case DatanodeProtocol.DNA_RECOVERBLOCK: + recoverBlocks(bcmd.getBlocks(), bcmd.getTargets()); + break; + default: + LOG.warn("Unknown DatanodeCommand action: " + cmd.getAction()); + } + return true; + } + + // Distributed upgrade manager + UpgradeManagerDatanode upgradeManager = new UpgradeManagerDatanode(this); + + private void processDistributedUpgradeCommand(UpgradeCommand comm + ) throws IOException { + assert upgradeManager != null : "DataNode.upgradeManager is null."; + upgradeManager.processUpgradeCommand(comm); + } + + + /** + * Start distributed upgrade if it should be initiated by the data-node. + */ + private void startDistributedUpgradeIfNeeded() throws IOException { + UpgradeManagerDatanode um = DataNode.getDataNode().upgradeManager; + assert um != null : "DataNode.upgradeManager is null."; + if(!um.getUpgradeState()) + return; + um.setUpgradeState(false, um.getUpgradeVersion()); + um.startUpgrade(); + return; + } + + private void transferBlock( Block block, + DatanodeInfo xferTargets[] + ) throws IOException { + if (!data.isValidBlock(block)) { + // block does not exist or is under-construction + String errStr = "Can't send invalid block " + block; + LOG.info(errStr); + namenode.errorReport(dnRegistration, + DatanodeProtocol.INVALID_BLOCK, + errStr); + return; + } + + // Check if NN recorded length matches on-disk length + long onDiskLength = data.getLength(block); + if (block.getNumBytes() > onDiskLength) { + // Shorter on-disk len indicates corruption so report NN the corrupt block + namenode.reportBadBlocks(new LocatedBlock[]{ + new LocatedBlock(block, new DatanodeInfo[] { + new DatanodeInfo(dnRegistration)})}); + LOG.info("Can't replicate block " + block + + " because on-disk length " + onDiskLength + + " is shorter than NameNode recorded length " + block.getNumBytes()); + return; + } + + int numTargets = xferTargets.length; + if (numTargets > 0) { + if (LOG.isInfoEnabled()) { + StringBuilder xfersBuilder = new StringBuilder(); + for (int i = 0; i < numTargets; i++) { + xfersBuilder.append(xferTargets[i].getName()); + xfersBuilder.append(" "); + } + LOG.info(dnRegistration + " Starting thread to transfer block " + + block + " to " + xfersBuilder); + } + + new Daemon(new DataTransfer(xferTargets, block, this)).start(); + } + } + + private void transferBlocks( Block blocks[], + DatanodeInfo xferTargets[][] + ) { + for (int i = 0; i < blocks.length; i++) { + try { + transferBlock(blocks[i], xferTargets[i]); + } catch (IOException ie) { + LOG.warn("Failed to transfer block " + blocks[i], ie); + } + } + } + + /* + * Informing the name node could take a long long time! Should we wait + * till namenode is informed before responding with success to the + * client? For now we don't. + */ + protected void notifyNamenodeReceivedBlock(Block block, String delHint) { + if(block==null || delHint==null) { + throw new IllegalArgumentException(block==null?"Block is null":"delHint is null"); + } + synchronized (receivedBlockList) { + synchronized (delHints) { + receivedBlockList.add(block); + delHints.add(delHint); + receivedBlockList.notifyAll(); + } + } + } + + + + + /* ******************************************************************** + Protocol when a client reads data from Datanode (Cur Ver: 9): + + Client's Request : + ================= + + Processed in DataXceiver: + +----------------------------------------------+ + | Common Header | 1 byte OP == OP_READ_BLOCK | + +----------------------------------------------+ + + Processed in readBlock() : + +-------------------------------------------------------------------------+ + | 8 byte Block ID | 8 byte genstamp | 8 byte start offset | 8 byte length | + +-------------------------------------------------------------------------+ + | vInt length | | + +-----------------------------------+ + + Client sends optional response only at the end of receiving data. + + DataNode Response : + =================== + + In readBlock() : + If there is an error while initializing BlockSender : + +---------------------------+ + | 2 byte OP_STATUS_ERROR | and connection will be closed. + +---------------------------+ + Otherwise + +---------------------------+ + | 2 byte OP_STATUS_SUCCESS | + +---------------------------+ + + Actual data, sent by BlockSender.sendBlock() : + + ChecksumHeader : + +--------------------------------------------------+ + | 1 byte CHECKSUM_TYPE | 4 byte BYTES_PER_CHECKSUM | + +--------------------------------------------------+ + Followed by actual data in the form of PACKETS: + +------------------------------------+ + | Sequence of data PACKETs .... | + +------------------------------------+ + + A "PACKET" is defined further below. + + The client reads data until it receives a packet with + "LastPacketInBlock" set to true or with a zero length. If there is + no checksum error, it replies to DataNode with OP_STATUS_CHECKSUM_OK: + + Client optional response at the end of data transmission : + +------------------------------+ + | 2 byte OP_STATUS_CHECKSUM_OK | + +------------------------------+ + + PACKET : Contains a packet header, checksum and data. Amount of data + ======== carried is set by BUFFER_SIZE. + + +-----------------------------------------------------+ + | 4 byte packet length (excluding packet header) | + +-----------------------------------------------------+ + | 8 byte offset in the block | 8 byte sequence number | + +-----------------------------------------------------+ + | 1 byte isLastPacketInBlock | + +-----------------------------------------------------+ + | 4 byte Length of actual data | + +-----------------------------------------------------+ + | x byte checksum data. x is defined below | + +-----------------------------------------------------+ + | actual data ...... | + +-----------------------------------------------------+ + + x = (length of data + BYTE_PER_CHECKSUM - 1)/BYTES_PER_CHECKSUM * + CHECKSUM_SIZE + + CHECKSUM_SIZE depends on CHECKSUM_TYPE (usually, 4 for CRC32) + + The above packet format is used while writing data to DFS also. + Not all the fields might be used while reading. + + ************************************************************************ */ + + /** Header size for a packet */ + public static final int PKT_HEADER_LEN = ( 4 + /* Packet payload length */ + 8 + /* offset in block */ + 8 + /* seqno */ + 1 /* isLastPacketInBlock */); + + + + /** + * Used for transferring a block of data. This class + * sends a piece of data to another DataNode. + */ + class DataTransfer implements Runnable { + DatanodeInfo targets[]; + Block b; + Block destinationBlock; + DataNode datanode; + + /** + * Connect to the first item in the target list. Pass along the + * entire target list, the block, and the data. + */ + public DataTransfer(DatanodeInfo targets[], Block b, DataNode datanode) throws IOException { + this.targets = targets; + this.b = b; + this.destinationBlock = b; // the source blockid and the destination block id are same + this.datanode = datanode; + } + + /** + * Connect to the first item in the target list. Pass along the + * entire target list, the block, and the data. + */ + public DataTransfer(DatanodeInfo targets[], Block b, Block destinationBlock, + DataNode datanode) throws IOException { + this.targets = targets; + this.b = b; + this.destinationBlock = destinationBlock; + this.datanode = datanode; + } + + /** + * Do the deed, write the bytes + */ + public void run() { + xmitsInProgress.getAndIncrement(); + Socket sock = null; + DataOutputStream out = null; + BlockSender blockSender = null; + + try { + InetSocketAddress curTarget = + NetUtils.createSocketAddr(targets[0].getName()); + sock = newSocket(); + NetUtils.connect(sock, curTarget, socketTimeout); + sock.setSoTimeout(targets.length * socketTimeout); + + long writeTimeout = socketWriteTimeout + + HdfsConstants.WRITE_TIMEOUT_EXTENSION * (targets.length-1); + OutputStream baseStream = NetUtils.getOutputStream(sock, writeTimeout); + out = new DataOutputStream(new BufferedOutputStream(baseStream, + SMALL_BUFFER_SIZE)); + + blockSender = new BlockSender(b, 0, b.getNumBytes(), false, false, false, + datanode); + DatanodeInfo srcNode = new DatanodeInfo(dnRegistration); + + // + // Header info + // + out.writeShort(DataTransferProtocol.DATA_TRANSFER_VERSION); + out.writeByte(DataTransferProtocol.OP_WRITE_BLOCK); + out.writeLong(destinationBlock.getBlockId()); + out.writeLong(destinationBlock.getGenerationStamp()); + out.writeInt(0); // no pipelining + out.writeBoolean(false); // not part of recovery + Text.writeString(out, ""); // client + out.writeBoolean(true); // sending src node information + srcNode.write(out); // Write src node DatanodeInfo + // write targets + out.writeInt(targets.length - 1); + for (int i = 1; i < targets.length; i++) { + targets[i].write(out); + } + // send data & checksum + blockSender.sendBlock(out, baseStream, null); + + // no response necessary + LOG.info(dnRegistration + ":Transmitted block " + b + " to " + curTarget); + + } catch (IOException ie) { + LOG.warn(dnRegistration + ":Failed to transfer " + b + " to " + targets[0].getName() + + " got " + StringUtils.stringifyException(ie)); + // check if there are any disk problem + datanode.checkDiskError(); + + } finally { + xmitsInProgress.getAndDecrement(); + IOUtils.closeStream(blockSender); + IOUtils.closeStream(out); + IOUtils.closeSocket(sock); + } + } + } + + /** + * No matter what kind of exception we get, keep retrying to offerService(). + * That's the loop that connects to the NameNode and provides basic DataNode + * functionality. + * + * Only stop when "shouldRun" is turned off (which can only happen at shutdown). + */ + public void run() { + LOG.info(dnRegistration + "In DataNode.run, data = " + data); + + // start dataXceiveServer + dataXceiverServer.start(); + + while (shouldRun) { + try { + startDistributedUpgradeIfNeeded(); + offerService(); + } catch (Exception ex) { + LOG.error("Exception: " + StringUtils.stringifyException(ex)); + if (shouldRun) { + try { + Thread.sleep(5000); + } catch (InterruptedException ie) { + } + } + } + } + + LOG.info(dnRegistration + ":Finishing DataNode in: "+data); + shutdown(); + } + + /** Start a single datanode daemon and wait for it to finish. + * If this thread is specifically interrupted, it will stop waiting. + */ + public static void runDatanodeDaemon(DataNode dn) throws IOException { + if (dn != null) { + //register datanode + dn.register(); + dn.dataNodeThread = new Thread(dn, dnThreadName); + dn.dataNodeThread.setDaemon(true); // needed for JUnit testing + dn.dataNodeThread.start(); + } + } + + static boolean isDatanodeUp(DataNode dn) { + return dn.dataNodeThread != null && dn.dataNodeThread.isAlive(); + } + + /** Instantiate a single datanode object. This must be run by invoking + * {@link DataNode#runDatanodeDaemon(DataNode)} subsequently. + */ + public static DataNode instantiateDataNode(String args[], + Configuration conf) throws IOException { + if (conf == null) + conf = new Configuration(); + if (!parseArguments(args, conf)) { + printUsage(); + return null; + } + if (conf.get("dfs.network.script") != null) { + LOG.error("This configuration for rack identification is not supported" + + " anymore. RackID resolution is handled by the NameNode."); + System.exit(-1); + } + String[] dataDirs = conf.getStrings("dfs.data.dir"); + dnThreadName = "DataNode: [" + + StringUtils.arrayToString(dataDirs) + "]"; + return makeInstance(dataDirs, conf); + } + + /** Instantiate & Start a single datanode daemon and wait for it to finish. + * If this thread is specifically interrupted, it will stop waiting. + */ + public static DataNode createDataNode(String args[], + Configuration conf) throws IOException { + DataNode dn = instantiateDataNode(args, conf); + runDatanodeDaemon(dn); + return dn; + } + + void join() { + if (dataNodeThread != null) { + try { + dataNodeThread.join(); + } catch (InterruptedException e) {} + } + } + + /** + * Make an instance of DataNode after ensuring that at least one of the + * given data directories (and their parent directories, if necessary) + * can be created. + * @param dataDirs List of directories, where the new DataNode instance should + * keep its files. + * @param conf Configuration instance to use. + * @return DataNode instance for given list of data dirs and conf, or null if + * no directory from this directory list can be created. + * @throws IOException + */ + public static DataNode makeInstance(String[] dataDirs, Configuration conf) + throws IOException { + ArrayList dirs = new ArrayList(); + for (int i = 0; i < dataDirs.length; i++) { + File data = new File(dataDirs[i]); + try { + DiskChecker.checkDir(data); + dirs.add(data); + } catch(DiskErrorException e) { + LOG.warn("Invalid directory in dfs.data.dir: " + e.getMessage()); + } + } + if (dirs.size() > 0) + return new DataNode(conf, dirs); + LOG.error("All directories in dfs.data.dir are invalid."); + return null; + } + + @Override + public String toString() { + return "DataNode{" + + "data=" + data + + ", localName='" + dnRegistration.getName() + "'" + + ", storageID='" + dnRegistration.getStorageID() + "'" + + ", xmitsInProgress=" + xmitsInProgress.get() + + "}"; + } + + private static void printUsage() { + System.err.println("Usage: java DataNode"); + System.err.println(" [-rollback]"); + } + + /** + * Parse and verify command line arguments and set configuration parameters. + * + * @return false if passed argements are incorrect + */ + private static boolean parseArguments(String args[], + Configuration conf) { + int argsLen = (args == null) ? 0 : args.length; + StartupOption startOpt = StartupOption.REGULAR; + for(int i=0; i < argsLen; i++) { + String cmd = args[i]; + if ("-r".equalsIgnoreCase(cmd) || "--rack".equalsIgnoreCase(cmd)) { + LOG.error("-r, --rack arguments are not supported anymore. RackID " + + "resolution is handled by the NameNode."); + System.exit(-1); + } else if ("-rollback".equalsIgnoreCase(cmd)) { + startOpt = StartupOption.ROLLBACK; + } else if ("-regular".equalsIgnoreCase(cmd)) { + startOpt = StartupOption.REGULAR; + } else + return false; + } + setStartupOption(conf, startOpt); + return true; + } + + private static void setStartupOption(Configuration conf, StartupOption opt) { + conf.set("dfs.datanode.startup", opt.toString()); + } + + static StartupOption getStartupOption(Configuration conf) { + return StartupOption.valueOf(conf.get("dfs.datanode.startup", + StartupOption.REGULAR.toString())); + } + + /** + * This methods arranges for the data node to send the block report at the next heartbeat. + */ + public void scheduleBlockReport(long delay) { + if (delay > 0) { // send BR after random delay + lastBlockReport = System.currentTimeMillis() + - ( blockReportInterval - R.nextInt((int)(delay))); + } else { // send at next heartbeat + lastBlockReport = lastHeartbeat - blockReportInterval; + } + resetBlockReportTime = true; // reset future BRs for randomness + } + + + /** + * This method is used for testing. + * Examples are adding and deleting blocks directly. + * The most common usage will be when the data node's storage is similated. + * + * @return the fsdataset that stores the blocks + */ + public FSDatasetInterface getFSDataset() { + return data; + } + + /** + */ + public static void main(String args[]) { + try { + StringUtils.startupShutdownMessage(DataNode.class, args, LOG); + DataNode datanode = createDataNode(args, null); + if (datanode != null) + datanode.join(); + } catch (Throwable e) { + LOG.error(StringUtils.stringifyException(e)); + System.exit(-1); + } + } + + // InterDataNodeProtocol implementation + /** {@inheritDoc} */ + public BlockMetaDataInfo getBlockMetaDataInfo(Block block + ) throws IOException { + if (LOG.isDebugEnabled()) { + LOG.debug("block=" + block); + } + Block stored = data.getStoredBlock(block.getBlockId()); + + if (stored == null) { + return null; + } + BlockMetaDataInfo info = new BlockMetaDataInfo(stored, + blockScanner.getLastScanTime(stored)); + if (LOG.isDebugEnabled()) { + LOG.debug("getBlockMetaDataInfo successful block=" + stored + + " length " + stored.getNumBytes() + + " genstamp " + stored.getGenerationStamp()); + } + + // paranoia! verify that the contents of the stored block + // matches the block file on disk. + data.validateBlockMetadata(stored); + return info; + } + + public Daemon recoverBlocks(final Block[] blocks, final DatanodeInfo[][] targets) { + Daemon d = new Daemon(threadGroup, new Runnable() { + /** Recover a list of blocks. It is run by the primary datanode. */ + public void run() { + for(int i = 0; i < blocks.length; i++) { + try { + logRecoverBlock("NameNode", blocks[i], targets[i]); + recoverBlock(blocks[i], false, targets[i], true); + } catch (IOException e) { + LOG.warn("recoverBlocks FAILED, blocks[" + i + "]=" + blocks[i], e); + } + } + } + }); + d.start(); + return d; + } + + /** {@inheritDoc} */ + public void updateBlock(Block oldblock, Block newblock, boolean finalize) throws IOException { + LOG.info("oldblock=" + oldblock + "(length=" + oldblock.getNumBytes() + + "), newblock=" + newblock + "(length=" + newblock.getNumBytes() + + "), datanode=" + dnRegistration.getName()); + data.updateBlock(oldblock, newblock); + if (finalize) { + data.finalizeBlock(newblock); + myMetrics.blocksWritten.inc(); + notifyNamenodeReceivedBlock(newblock, EMPTY_DEL_HINT); + LOG.info("Received block " + newblock + + " of size " + newblock.getNumBytes() + + " as part of lease recovery."); + } + } + + /** {@inheritDoc} */ + public long getProtocolVersion(String protocol, long clientVersion + ) throws IOException { + long datanodeVersion = 0; + if (protocol.equals(InterDatanodeProtocol.class.getName())) { + return InterDatanodeProtocol.versionID; + } else if (protocol.equals(ClientDatanodeProtocol.class.getName())) { + checkVersion(protocol, clientVersion, ClientDatanodeProtocol.versionID); + return ClientDatanodeProtocol.versionID; + } + throw new IOException("Unknown protocol to " + getClass().getSimpleName() + + ": " + protocol); + } + + private void checkVersion(String protocol, long clientVersion, + long serverVersion) throws IOException { + if (serverVersion > clientVersion && + !ProtocolCompatible.isCompatibleClientDatanodeProtocol( + clientVersion, serverVersion)) { + throw new RPC.VersionIncompatible(protocol, clientVersion, serverVersion); + } + } + + /** A convenient class used in lease recovery */ + private static class BlockRecord { + final DatanodeID id; + final InterDatanodeProtocol datanode; + final Block block; + + BlockRecord(DatanodeID id, InterDatanodeProtocol datanode, Block block) { + this.id = id; + this.datanode = datanode; + this.block = block; + } + + /** {@inheritDoc} */ + public String toString() { + return "block:" + block + " node:" + id; + } + } + + /** Recover a block */ + private LocatedBlock recoverBlock(Block block, boolean keepLength, + DatanodeID[] datanodeids, boolean closeFile) throws IOException { + + // If the block is already being recovered, then skip recovering it. + // This can happen if the namenode and client start recovering the same + // file at the same time. + synchronized (ongoingRecovery) { + Block tmp = new Block(); + tmp.set(block.getBlockId(), block.getNumBytes(), GenerationStamp.WILDCARD_STAMP); + if (ongoingRecovery.get(tmp) != null) { + String msg = "Block " + block + " is already being recovered, " + + " ignoring this request to recover it."; + LOG.info(msg); + throw new IOException(msg); + } + ongoingRecovery.put(block, block); + } + try { + List syncList = new ArrayList(); + long minlength = Long.MAX_VALUE; + int errorCount = 0; + + List datanodeProxies = + new ArrayList(); + //check generation stamps + for(DatanodeID id : datanodeids) { + try { + InterDatanodeProtocol datanode; + if (dnRegistration.equals(id)) { + LOG.info("Skipping IDNPP creation for local id " + id); + datanode = this; + } else { + LOG.info("Creating IDNPP for non-local id " + id + " (dnReg=" + dnRegistration + ")"); + datanode = DataNode.createInterDataNodeProtocolProxy(id, getConf()); + datanodeProxies.add(datanode); + } + BlockMetaDataInfo info = datanode.getBlockMetaDataInfo(block); + if (info != null && info.getGenerationStamp() >= block.getGenerationStamp()) { + if (keepLength) { + if (info.getNumBytes() == block.getNumBytes()) { + syncList.add(new BlockRecord(id, datanode, new Block(info))); + } + } + else { + syncList.add(new BlockRecord(id, datanode, new Block(info))); + if (info.getNumBytes() < minlength) { + minlength = info.getNumBytes(); + } + } + } + } catch (IOException e) { + ++errorCount; + InterDatanodeProtocol.LOG.warn( + "Failed to getBlockMetaDataInfo for block (=" + block + + ") from datanode (=" + id + ")", e); + } + } + + if (syncList.isEmpty() && errorCount > 0) { + stopAllProxies(datanodeProxies); + throw new IOException("All datanodes failed: block=" + block + + ", datanodeids=" + Arrays.asList(datanodeids)); + } + if (!keepLength) { + block.setNumBytes(minlength); + } + return syncBlock(block, syncList, closeFile, datanodeProxies); + } finally { + synchronized (ongoingRecovery) { + ongoingRecovery.remove(block); + } + } + } + + private void stopAllProxies(List datanodeProxies) { + // safe to stop proxies now + for (InterDatanodeProtocol proxy : datanodeProxies) { + stopDatanodeProxy(proxy); + } + } + + private void stopDatanodeProxy(InterDatanodeProtocol datanode) { + // if this is a proxy instance, close it + if (Proxy.isProxyClass(datanode.getClass())) { + RPC.stopProxy(datanode); + } + } + + /** Block synchronization */ + private LocatedBlock syncBlock( + Block block, List syncList, + boolean closeFile, List datanodeProxies + ) + throws IOException { + if (LOG.isDebugEnabled()) { + LOG.debug("block=" + block + ", (length=" + block.getNumBytes() + + "), syncList=" + syncList + ", closeFile=" + closeFile); + } + + //syncList.isEmpty() that all datanodes do not have the block + //so the block can be deleted. + if (syncList.isEmpty()) { + namenode.commitBlockSynchronization(block, 0, 0, closeFile, true, + DatanodeID.EMPTY_ARRAY); + return null; + } + + List successList = new ArrayList(); + + long generationstamp = namenode.nextGenerationStamp(block); + Block newblock = new Block(block.getBlockId(), block.getNumBytes(), generationstamp); + + for(BlockRecord r : syncList) { + try { + r.datanode.updateBlock(r.block, newblock, closeFile); + successList.add(r.id); + } catch (IOException e) { + InterDatanodeProtocol.LOG.warn("Failed to updateBlock (newblock=" + + newblock + ", datanode=" + r.id + ")", e); + } + } + + stopAllProxies(datanodeProxies); + + if (!successList.isEmpty()) { + DatanodeID[] nlist = successList.toArray(new DatanodeID[successList.size()]); + + namenode.commitBlockSynchronization(block, + newblock.getGenerationStamp(), newblock.getNumBytes(), closeFile, false, + nlist); + DatanodeInfo[] info = new DatanodeInfo[nlist.length]; + for (int i = 0; i < nlist.length; i++) { + info[i] = new DatanodeInfo(nlist[i]); + } + return new LocatedBlock(newblock, info); // success + } + + //failed + StringBuilder b = new StringBuilder(); + for(BlockRecord r : syncList) { + b.append("\n " + r.id); + } + throw new IOException("Cannot recover " + block + ", none of these " + + syncList.size() + " datanodes success {" + b + "\n}"); + } + + // ClientDataNodeProtocol implementation + /** {@inheritDoc} */ + public LocatedBlock recoverBlock(Block block, boolean keepLength, DatanodeInfo[] targets + ) throws IOException { + logRecoverBlock("Client", block, targets); + return recoverBlock(block, keepLength, targets, false); + } + + /** {@inheritDoc} */ + public Block getBlockInfo(Block block) throws IOException { + Block stored = data.getStoredBlock(block.getBlockId()); + return stored; + } + + /** {@inheritDoc} */ + public void copyBlock(Block srcBlock, Block destinationBlock, + DatanodeInfo target) throws IOException { + + if (!data.isValidBlock(srcBlock)) { + // block does not exist or is under-construction + String errStr = "copyBlock: Can't send invalid block " + srcBlock; + LOG.info(errStr); + throw new IOException(errStr); + } + + // Check if specified length matches on-disk length + long onDiskLength = data.getLength(srcBlock); + if (srcBlock.getNumBytes() > onDiskLength) { + // Shorter on-disk len indicates corruption so report NN the corrupt block + String msg = "copyBlock: Can't replicate block " + srcBlock + + " because on-disk length " + onDiskLength + + " is shorter than provided length " + srcBlock.getNumBytes(); + LOG.info(msg); + throw new IOException(msg); + } + + LOG.info(dnRegistration + " copyBlock: Starting thread to transfer block " + + srcBlock + " to " + target.getName()); + DatanodeInfo[] targets = new DatanodeInfo[1]; + targets[0] = target; + new Daemon(new DataTransfer(targets, srcBlock, destinationBlock, this)).start(); + } + + private static void logRecoverBlock(String who, + Block block, DatanodeID[] targets) { + StringBuilder msg = new StringBuilder(targets[0].getName()); + for (int i = 1; i < targets.length; i++) { + msg.append(", " + targets[i].getName()); + } + LOG.info(who + " calls recoverBlock(block=" + block + + ", targets=[" + msg + "])"); + } +} diff --git a/src/hdfs/org/apache/hadoop/hdfs/server/datanode/DataStorage.java b/src/hdfs/org/apache/hadoop/hdfs/server/datanode/DataStorage.java new file mode 100644 index 0000000..261ef51 --- /dev/null +++ b/src/hdfs/org/apache/hadoop/hdfs/server/datanode/DataStorage.java @@ -0,0 +1,439 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdfs.server.datanode; + +import java.io.File; +import java.io.FileInputStream; +import java.io.FileOutputStream; +import java.io.IOException; +import java.io.RandomAccessFile; +import java.nio.channels.FileLock; +import java.util.Collection; +import java.util.ArrayList; +import java.util.Iterator; +import java.util.Properties; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +import org.apache.hadoop.hdfs.protocol.Block; +import org.apache.hadoop.hdfs.protocol.FSConstants; +import org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType; +import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption; +import org.apache.hadoop.hdfs.server.common.HdfsConstants; +import org.apache.hadoop.hdfs.server.common.InconsistentFSStateException; +import org.apache.hadoop.hdfs.server.common.Storage; +import org.apache.hadoop.hdfs.server.common.StorageInfo; +import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; +import org.apache.hadoop.util.Daemon; +import org.apache.hadoop.fs.FileUtil.HardLink; +import org.apache.hadoop.io.IOUtils; + +/** + * Data storage information file. + *

+ * @see Storage + */ +public class DataStorage extends Storage { + // Constants + final static String BLOCK_SUBDIR_PREFIX = "subdir"; + final static String BLOCK_FILE_PREFIX = "blk_"; + final static String COPY_FILE_PREFIX = "dncp_"; + + private String storageID; + + DataStorage() { + super(NodeType.DATA_NODE); + storageID = ""; + } + + DataStorage(int nsID, long cT, String strgID) { + super(NodeType.DATA_NODE, nsID, cT); + this.storageID = strgID; + } + + public DataStorage(StorageInfo storageInfo, String strgID) { + super(NodeType.DATA_NODE, storageInfo); + this.storageID = strgID; + } + + public String getStorageID() { + return storageID; + } + + void setStorageID(String newStorageID) { + this.storageID = newStorageID; + } + + /** + * Analyze storage directories. + * Recover from previous transitions if required. + * Perform fs state transition if necessary depending on the namespace info. + * Read storage info. + * + * @param nsInfo namespace information + * @param dataDirs array of data storage directories + * @param startOpt startup option + * @throws IOException + */ + void recoverTransitionRead(NamespaceInfo nsInfo, + Collection dataDirs, + StartupOption startOpt + ) throws IOException { + assert FSConstants.LAYOUT_VERSION == nsInfo.getLayoutVersion() : + "Data-node and name-node layout versions must be the same."; + + // 1. For each data directory calculate its state and + // check whether all is consistent before transitioning. + // Format and recover. + this.storageID = ""; + this.storageDirs = new ArrayList(dataDirs.size()); + ArrayList dataDirStates = new ArrayList(dataDirs.size()); + for(Iterator it = dataDirs.iterator(); it.hasNext();) { + File dataDir = it.next(); + StorageDirectory sd = new StorageDirectory(dataDir); + StorageState curState; + try { + curState = sd.analyzeStorage(startOpt); + // sd is locked but not opened + switch(curState) { + case NORMAL: + break; + case NON_EXISTENT: + // ignore this storage + LOG.info("Storage directory " + dataDir + " does not exist."); + it.remove(); + continue; + case NOT_FORMATTED: // format + LOG.info("Storage directory " + dataDir + " is not formatted."); + LOG.info("Formatting ..."); + format(sd, nsInfo); + break; + default: // recovery part is common + sd.doRecover(curState); + } + } catch (IOException ioe) { + sd.unlock(); + throw ioe; + } + // add to the storage list + addStorageDir(sd); + dataDirStates.add(curState); + } + + if (dataDirs.size() == 0) // none of the data dirs exist + throw new IOException( + "All specified directories are not accessible or do not exist."); + + // 2. Do transitions + // Each storage directory is treated individually. + // During sturtup some of them can upgrade or rollback + // while others could be uptodate for the regular startup. + for(int idx = 0; idx < getNumStorageDirs(); idx++) { + doTransition(getStorageDir(idx), nsInfo, startOpt); + assert this.getLayoutVersion() == nsInfo.getLayoutVersion() : + "Data-node and name-node layout versions must be the same."; + assert this.getCTime() == nsInfo.getCTime() : + "Data-node and name-node CTimes must be the same."; + } + + // 3. Update all storages. Some of them might have just been formatted. + this.writeAll(); + } + + void format(StorageDirectory sd, NamespaceInfo nsInfo) throws IOException { + sd.clearDirectory(); // create directory + this.layoutVersion = FSConstants.LAYOUT_VERSION; + this.namespaceID = nsInfo.getNamespaceID(); + this.cTime = 0; + // store storageID as it currently is + sd.write(); + } + + protected void setFields(Properties props, + StorageDirectory sd + ) throws IOException { + super.setFields(props, sd); + props.setProperty("storageID", storageID); + } + + protected void getFields(Properties props, + StorageDirectory sd + ) throws IOException { + super.getFields(props, sd); + String ssid = props.getProperty("storageID"); + if (ssid == null || + !("".equals(storageID) || "".equals(ssid) || + storageID.equals(ssid))) + throw new InconsistentFSStateException(sd.getRoot(), + "has incompatible storage Id."); + if ("".equals(storageID)) // update id only if it was empty + storageID = ssid; + } + + public boolean isConversionNeeded(StorageDirectory sd) throws IOException { + File oldF = new File(sd.getRoot(), "storage"); + if (!oldF.exists()) + return false; + // check the layout version inside the storage file + // Lock and Read old storage file + RandomAccessFile oldFile = new RandomAccessFile(oldF, "rws"); + FileLock oldLock = oldFile.getChannel().tryLock(); + try { + oldFile.seek(0); + int oldVersion = oldFile.readInt(); + if (oldVersion < LAST_PRE_UPGRADE_LAYOUT_VERSION) + return false; + } finally { + oldLock.release(); + oldFile.close(); + } + return true; + } + + /** + * Analize which and whether a transition of the fs state is required + * and perform it if necessary. + * + * Rollback if previousLV >= LAYOUT_VERSION && prevCTime <= namenode.cTime + * Upgrade if this.LV > LAYOUT_VERSION || this.cTime < namenode.cTime + * Regular startup if this.LV = LAYOUT_VERSION && this.cTime = namenode.cTime + * + * @param sd storage directory + * @param nsInfo namespace info + * @param startOpt startup option + * @throws IOException + */ + private void doTransition( StorageDirectory sd, + NamespaceInfo nsInfo, + StartupOption startOpt + ) throws IOException { + if (startOpt == StartupOption.ROLLBACK) + doRollback(sd, nsInfo); // rollback if applicable + sd.read(); + checkVersionUpgradable(this.layoutVersion); + assert this.layoutVersion >= FSConstants.LAYOUT_VERSION : + "Future version is not allowed"; + if (getNamespaceID() != nsInfo.getNamespaceID()) + throw new IOException( + "Incompatible namespaceIDs in " + sd.getRoot().getCanonicalPath() + + ": namenode namespaceID = " + nsInfo.getNamespaceID() + + "; datanode namespaceID = " + getNamespaceID()); + if (this.layoutVersion == FSConstants.LAYOUT_VERSION + && this.cTime == nsInfo.getCTime()) + return; // regular startup + // verify necessity of a distributed upgrade + verifyDistributedUpgradeProgress(nsInfo); + if (this.layoutVersion > FSConstants.LAYOUT_VERSION + || this.cTime < nsInfo.getCTime()) { + doUpgrade(sd, nsInfo); // upgrade + return; + } + // layoutVersion == LAYOUT_VERSION && this.cTime > nsInfo.cTime + // must shutdown + throw new IOException("Datanode state: LV = " + this.getLayoutVersion() + + " CTime = " + this.getCTime() + + " is newer than the namespace state: LV = " + + nsInfo.getLayoutVersion() + + " CTime = " + nsInfo.getCTime()); + } + + /** + * Move current storage into a backup directory, + * and hardlink all its blocks into the new current directory. + * + * @param sd storage directory + * @throws IOException + */ + void doUpgrade(StorageDirectory sd, + NamespaceInfo nsInfo + ) throws IOException { + LOG.info("Upgrading storage directory " + sd.getRoot() + + ".\n old LV = " + this.getLayoutVersion() + + "; old CTime = " + this.getCTime() + + ".\n new LV = " + nsInfo.getLayoutVersion() + + "; new CTime = " + nsInfo.getCTime()); + File curDir = sd.getCurrentDir(); + File prevDir = sd.getPreviousDir(); + assert curDir.exists() : "Current directory must exist."; + // delete previous dir before upgrading + if (prevDir.exists()) + deleteDir(prevDir); + File tmpDir = sd.getPreviousTmp(); + assert !tmpDir.exists() : "previous.tmp directory must not exist."; + // rename current to tmp + rename(curDir, tmpDir); + // hardlink blocks + linkBlocks(tmpDir, curDir, this.getLayoutVersion()); + // write version file + this.layoutVersion = FSConstants.LAYOUT_VERSION; + assert this.namespaceID == nsInfo.getNamespaceID() : + "Data-node and name-node layout versions must be the same."; + this.cTime = nsInfo.getCTime(); + sd.write(); + // rename tmp to previous + rename(tmpDir, prevDir); + LOG.info("Upgrade of " + sd.getRoot()+ " is complete."); + } + + void doRollback( StorageDirectory sd, + NamespaceInfo nsInfo + ) throws IOException { + File prevDir = sd.getPreviousDir(); + // regular startup if previous dir does not exist + if (!prevDir.exists()) + return; + DataStorage prevInfo = new DataStorage(); + StorageDirectory prevSD = prevInfo.new StorageDirectory(sd.getRoot()); + prevSD.read(prevSD.getPreviousVersionFile()); + + // We allow rollback to a state, which is either consistent with + // the namespace state or can be further upgraded to it. + if (!(prevInfo.getLayoutVersion() >= FSConstants.LAYOUT_VERSION + && prevInfo.getCTime() <= nsInfo.getCTime())) // cannot rollback + throw new InconsistentFSStateException(prevSD.getRoot(), + "Cannot rollback to a newer state.\nDatanode previous state: LV = " + + prevInfo.getLayoutVersion() + " CTime = " + prevInfo.getCTime() + + " is newer than the namespace state: LV = " + + nsInfo.getLayoutVersion() + " CTime = " + nsInfo.getCTime()); + LOG.info("Rolling back storage directory " + sd.getRoot() + + ".\n target LV = " + nsInfo.getLayoutVersion() + + "; target CTime = " + nsInfo.getCTime()); + File tmpDir = sd.getRemovedTmp(); + assert !tmpDir.exists() : "removed.tmp directory must not exist."; + // rename current to tmp + File curDir = sd.getCurrentDir(); + assert curDir.exists() : "Current directory must exist."; + rename(curDir, tmpDir); + // rename previous to current + rename(prevDir, curDir); + // delete tmp dir + deleteDir(tmpDir); + LOG.info("Rollback of " + sd.getRoot() + " is complete."); + } + + void doFinalize(StorageDirectory sd) throws IOException { + File prevDir = sd.getPreviousDir(); + if (!prevDir.exists()) + return; // already discarded + final String dataDirPath = sd.getRoot().getCanonicalPath(); + LOG.info("Finalizing upgrade for storage directory " + + dataDirPath + + ".\n cur LV = " + this.getLayoutVersion() + + "; cur CTime = " + this.getCTime()); + assert sd.getCurrentDir().exists() : "Current directory must exist."; + final File tmpDir = sd.getFinalizedTmp(); + // rename previous to tmp + rename(prevDir, tmpDir); + + // delete tmp dir in a separate thread + new Daemon(new Runnable() { + public void run() { + try { + deleteDir(tmpDir); + } catch(IOException ex) { + LOG.error("Finalize upgrade for " + dataDirPath + " failed.", ex); + } + LOG.info("Finalize upgrade for " + dataDirPath + " is complete."); + } + public String toString() { return "Finalize " + dataDirPath; } + }).start(); + } + + void finalizeUpgrade() throws IOException { + for (Iterator it = storageDirs.iterator(); it.hasNext();) { + doFinalize(it.next()); + } + } + + static void linkBlocks(File from, File to, int oldLV) throws IOException { + if (!from.isDirectory()) { + if (from.getName().startsWith(COPY_FILE_PREFIX)) { + IOUtils.copyBytes(new FileInputStream(from), + new FileOutputStream(to), 16*1024, true); + } else { + + //check if we are upgrading from pre-generation stamp version. + if (oldLV >= PRE_GENERATIONSTAMP_LAYOUT_VERSION) { + // Link to the new file name. + to = new File(convertMetatadataFileName(to.getAbsolutePath())); + } + + HardLink.createHardLink(from, to); + } + return; + } + // from is a directory + if (!to.mkdir()) + throw new IOException("Cannot create directory " + to); + String[] blockNames = from.list(new java.io.FilenameFilter() { + public boolean accept(File dir, String name) { + return name.startsWith(BLOCK_SUBDIR_PREFIX) + || name.startsWith(BLOCK_FILE_PREFIX) + || name.startsWith(COPY_FILE_PREFIX); + } + }); + + for(int i = 0; i < blockNames.length; i++) + linkBlocks(new File(from, blockNames[i]), + new File(to, blockNames[i]), oldLV); + } + + protected void corruptPreUpgradeStorage(File rootDir) throws IOException { + File oldF = new File(rootDir, "storage"); + if (oldF.exists()) + return; + // recreate old storage file to let pre-upgrade versions fail + if (!oldF.createNewFile()) + throw new IOException("Cannot create file " + oldF); + RandomAccessFile oldFile = new RandomAccessFile(oldF, "rws"); + // write new version into old storage file + try { + writeCorruptedData(oldFile); + } finally { + oldFile.close(); + } + } + + private void verifyDistributedUpgradeProgress( + NamespaceInfo nsInfo + ) throws IOException { + UpgradeManagerDatanode um = DataNode.getDataNode().upgradeManager; + assert um != null : "DataNode.upgradeManager is null."; + um.setUpgradeState(false, getLayoutVersion()); + um.initializeUpgrade(nsInfo); + } + + private static final Pattern PRE_GENSTAMP_META_FILE_PATTERN = + Pattern.compile("(.*blk_[-]*\\d+)\\.meta$"); + /** + * This is invoked on target file names when upgrading from pre generation + * stamp version (version -13) to correct the metatadata file name. + * @param oldFileName + * @return the new metadata file name with the default generation stamp. + */ + private static String convertMetatadataFileName(String oldFileName) { + Matcher matcher = PRE_GENSTAMP_META_FILE_PATTERN.matcher(oldFileName); + if (matcher.matches()) { + //return the current metadata file name + return FSDataset.getMetaFileName(matcher.group(1), + Block.GRANDFATHER_GENERATION_STAMP); + } + return oldFileName; + } +} diff --git a/src/hdfs/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java b/src/hdfs/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java new file mode 100644 index 0000000..8aab806 --- /dev/null +++ b/src/hdfs/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java @@ -0,0 +1,669 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.datanode; + +import java.io.BufferedInputStream; +import java.io.BufferedOutputStream; +import java.io.DataInputStream; +import java.io.DataOutputStream; +import java.io.IOException; +import java.io.OutputStream; +import java.net.InetSocketAddress; +import java.net.Socket; +import java.net.SocketException; +import java.net.SocketTimeoutException; + +import org.apache.commons.logging.Log; +import org.apache.hadoop.hdfs.protocol.Block; +import org.apache.hadoop.hdfs.protocol.DataTransferProtocol; +import org.apache.hadoop.hdfs.protocol.DatanodeInfo; +import org.apache.hadoop.hdfs.protocol.FSConstants; +import org.apache.hadoop.hdfs.server.common.HdfsConstants; +import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.MetaDataInputStream; +import org.apache.hadoop.io.IOUtils; +import org.apache.hadoop.io.MD5Hash; +import org.apache.hadoop.io.Text; +import org.apache.hadoop.net.NetUtils; +import org.apache.hadoop.util.DataChecksum; +import org.apache.hadoop.util.StringUtils; +import static org.apache.hadoop.hdfs.server.datanode.DataNode.DN_CLIENTTRACE_FORMAT; + +/** + * Thread for processing incoming/outgoing data stream. + */ +class DataXceiver implements Runnable, FSConstants { + public static final Log LOG = DataNode.LOG; + static final Log ClientTraceLog = DataNode.ClientTraceLog; + + Socket s; + final String remoteAddress; // address of remote side + final String localAddress; // local address of this daemon + DataNode datanode; + DataXceiverServer dataXceiverServer; + + public DataXceiver(Socket s, DataNode datanode, + DataXceiverServer dataXceiverServer) { + + this.s = s; + this.datanode = datanode; + this.dataXceiverServer = dataXceiverServer; + dataXceiverServer.childSockets.put(s, s); + remoteAddress = s.getRemoteSocketAddress().toString(); + localAddress = s.getLocalSocketAddress().toString(); + LOG.debug("Number of active connections is: " + datanode.getXceiverCount()); + } + + /** + * Update the thread name to contain the current status. + * Use this only after this receiver has started on its thread, i.e., + * outside the constructor. + */ + private void updateCurrentThreadName(String status) { + StringBuilder sb = new StringBuilder(); + sb.append("DataXceiver for client ").append(remoteAddress); + if (status != null) { + sb.append(" [").append(status).append("]"); + } + Thread.currentThread().setName(sb.toString()); + } + + /** + * Read/write data from/to the DataXceiveServer. + */ + public void run() { + DataInputStream in=null; + byte op = -1; + try { + in = new DataInputStream( + new BufferedInputStream(NetUtils.getInputStream(s), + SMALL_BUFFER_SIZE)); + short version = in.readShort(); + if ( version != DataTransferProtocol.DATA_TRANSFER_VERSION ) { + throw new IOException( "Version Mismatch" ); + } + boolean local = s.getInetAddress().equals(s.getLocalAddress()); + updateCurrentThreadName("waiting for operation"); + op = in.readByte(); + // Make sure the xciver count is not exceeded + int curXceiverCount = datanode.getXceiverCount(); + if (curXceiverCount > dataXceiverServer.maxXceiverCount) { + throw new IOException("xceiverCount " + curXceiverCount + + " exceeds the limit of concurrent xcievers " + + dataXceiverServer.maxXceiverCount); + } + long startTime = DataNode.now(); + switch ( op ) { + case DataTransferProtocol.OP_READ_BLOCK: + readBlock( in ); + datanode.myMetrics.readBlockOp.inc(DataNode.now() - startTime); + if (local) + datanode.myMetrics.readsFromLocalClient.inc(); + else + datanode.myMetrics.readsFromRemoteClient.inc(); + break; + case DataTransferProtocol.OP_WRITE_BLOCK: + writeBlock( in ); + datanode.myMetrics.writeBlockOp.inc(DataNode.now() - startTime); + if (local) + datanode.myMetrics.writesFromLocalClient.inc(); + else + datanode.myMetrics.writesFromRemoteClient.inc(); + break; + case DataTransferProtocol.OP_READ_METADATA: + readMetadata( in ); + datanode.myMetrics.readMetadataOp.inc(DataNode.now() - startTime); + break; + case DataTransferProtocol.OP_REPLACE_BLOCK: // for balancing purpose; send to a destination + replaceBlock(in); + datanode.myMetrics.replaceBlockOp.inc(DataNode.now() - startTime); + break; + case DataTransferProtocol.OP_COPY_BLOCK: + // for balancing purpose; send to a proxy source + copyBlock(in); + datanode.myMetrics.copyBlockOp.inc(DataNode.now() - startTime); + break; + case DataTransferProtocol.OP_BLOCK_CHECKSUM: //get the checksum of a block + getBlockChecksum(in); + datanode.myMetrics.blockChecksumOp.inc(DataNode.now() - startTime); + break; + default: + throw new IOException("Unknown opcode " + op + " in data stream"); + } + } catch (Throwable t) { + if (op == DataTransferProtocol.OP_READ_BLOCK && t instanceof SocketTimeoutException) { + // Ignore SocketTimeoutException for reading. + // This usually means that the client who's reading data from the DataNode has exited. + LOG.info(datanode.dnRegistration + ":DataXceiver" + " (IGNORED) " + + StringUtils.stringifyException(t)); + } else { + LOG.error(datanode.dnRegistration + ":DataXceiver",t); + } + } finally { + LOG.debug(datanode.dnRegistration + ":Number of active connections is: " + + datanode.getXceiverCount()); + updateCurrentThreadName("Cleaning up"); + IOUtils.closeStream(in); + IOUtils.closeSocket(s); + dataXceiverServer.childSockets.remove(s); + } + } + + /** + * Read a block from the disk. + * @param in The stream to read from + * @throws IOException + */ + private void readBlock(DataInputStream in) throws IOException { + // + // Read in the header + // + long blockId = in.readLong(); + Block block = new Block( blockId, 0 , in.readLong()); + + long startOffset = in.readLong(); + long length = in.readLong(); + String clientName = Text.readString(in); + // send the block + OutputStream baseStream = NetUtils.getOutputStream(s, + datanode.socketWriteTimeout); + DataOutputStream out = new DataOutputStream( + new BufferedOutputStream(baseStream, SMALL_BUFFER_SIZE)); + + BlockSender blockSender = null; + final String clientTraceFmt = + clientName.length() > 0 && ClientTraceLog.isInfoEnabled() + ? String.format(DN_CLIENTTRACE_FORMAT, localAddress, remoteAddress, + "%d", "HDFS_READ", clientName, "%d", + datanode.dnRegistration.getStorageID(), block, "%d") + : datanode.dnRegistration + " Served block " + block + " to " + + s.getInetAddress(); + updateCurrentThreadName("sending block " + block); + try { + try { + blockSender = new BlockSender(block, startOffset, length, + true, true, false, datanode, clientTraceFmt); + } catch(IOException e) { + out.writeShort(DataTransferProtocol.OP_STATUS_ERROR); + throw e; + } + + out.writeShort(DataTransferProtocol.OP_STATUS_SUCCESS); // send op status + long read = blockSender.sendBlock(out, baseStream, null); // send data + + if (blockSender.isBlockReadFully()) { + // See if client verification succeeded. + // This is an optional response from client. + try { + if (in.readShort() == DataTransferProtocol.OP_STATUS_CHECKSUM_OK && + datanode.blockScanner != null) { + datanode.blockScanner.verifiedByClient(block); + } + } catch (IOException ignored) {} + } + + datanode.myMetrics.bytesRead.inc((int) read); + datanode.myMetrics.blocksRead.inc(); + } catch ( SocketException ignored ) { + // Its ok for remote side to close the connection anytime. + datanode.myMetrics.blocksRead.inc(); + } catch ( IOException ioe ) { + /* What exactly should we do here? + * Earlier version shutdown() datanode if there is disk error. + */ + LOG.warn(datanode.dnRegistration + ":Got exception while serving " + + block + " to " + + s.getInetAddress() + ":\n" + + StringUtils.stringifyException(ioe) ); + throw ioe; + } finally { + IOUtils.closeStream(out); + IOUtils.closeStream(blockSender); + } + } + + /** + * Write a block to disk. + * + * @param in The stream to read from + * @throws IOException + */ + private void writeBlock(DataInputStream in) throws IOException { + DatanodeInfo srcDataNode = null; + LOG.debug("writeBlock receive buf size " + s.getReceiveBufferSize() + + " tcp no delay " + s.getTcpNoDelay()); + // + // Read in the header + // + Block block = new Block(in.readLong(), + dataXceiverServer.estimateBlockSize, in.readLong()); + LOG.info("Receiving block " + block + + " src: " + remoteAddress + + " dest: " + localAddress); + int pipelineSize = in.readInt(); // num of datanodes in entire pipeline + boolean isRecovery = in.readBoolean(); // is this part of recovery? + String client = Text.readString(in); // working on behalf of this client + boolean hasSrcDataNode = in.readBoolean(); // is src node info present + if (hasSrcDataNode) { + srcDataNode = new DatanodeInfo(); + srcDataNode.readFields(in); + } + int numTargets = in.readInt(); + if (numTargets < 0) { + throw new IOException("Mislabelled incoming datastream."); + } + DatanodeInfo targets[] = new DatanodeInfo[numTargets]; + for (int i = 0; i < targets.length; i++) { + DatanodeInfo tmp = new DatanodeInfo(); + tmp.readFields(in); + targets[i] = tmp; + } + + DataOutputStream mirrorOut = null; // stream to next target + DataInputStream mirrorIn = null; // reply from next target + DataOutputStream replyOut = null; // stream to prev target + Socket mirrorSock = null; // socket to next target + BlockReceiver blockReceiver = null; // responsible for data handling + String mirrorNode = null; // the name:port of next target + String firstBadLink = ""; // first datanode that failed in connection setup + + updateCurrentThreadName("receiving block " + block + " client=" + client); + try { + // open a block receiver and check if the block does not exist + blockReceiver = new BlockReceiver(block, in, + s.getRemoteSocketAddress().toString(), + s.getLocalSocketAddress().toString(), + isRecovery, client, srcDataNode, datanode); + + // get a connection back to the previous target + replyOut = new DataOutputStream( + NetUtils.getOutputStream(s, datanode.socketWriteTimeout)); + + // + // Open network conn to backup machine, if + // appropriate + // + if (targets.length > 0) { + InetSocketAddress mirrorTarget = null; + // Connect to backup machine + mirrorNode = targets[0].getName(); + mirrorTarget = NetUtils.createSocketAddr(mirrorNode); + mirrorSock = datanode.newSocket(); + try { + int timeoutValue = numTargets * datanode.socketTimeout; + int writeTimeout = datanode.socketWriteTimeout + + (HdfsConstants.WRITE_TIMEOUT_EXTENSION * numTargets); + NetUtils.connect(mirrorSock, mirrorTarget, timeoutValue); + mirrorSock.setSoTimeout(timeoutValue); + mirrorSock.setSendBufferSize(DEFAULT_DATA_SOCKET_SIZE); + mirrorOut = new DataOutputStream( + new BufferedOutputStream( + NetUtils.getOutputStream(mirrorSock, writeTimeout), + SMALL_BUFFER_SIZE)); + mirrorIn = new DataInputStream(NetUtils.getInputStream(mirrorSock)); + + // Write header: Copied from DFSClient.java! + mirrorOut.writeShort( DataTransferProtocol.DATA_TRANSFER_VERSION ); + mirrorOut.write( DataTransferProtocol.OP_WRITE_BLOCK ); + mirrorOut.writeLong( block.getBlockId() ); + mirrorOut.writeLong( block.getGenerationStamp() ); + mirrorOut.writeInt( pipelineSize ); + mirrorOut.writeBoolean( isRecovery ); + Text.writeString( mirrorOut, client ); + mirrorOut.writeBoolean(hasSrcDataNode); + if (hasSrcDataNode) { // pass src node information + srcDataNode.write(mirrorOut); + } + mirrorOut.writeInt( targets.length - 1 ); + for ( int i = 1; i < targets.length; i++ ) { + targets[i].write( mirrorOut ); + } + + blockReceiver.writeChecksumHeader(mirrorOut); + mirrorOut.flush(); + + // read connect ack (only for clients, not for replication req) + if (client.length() != 0) { + firstBadLink = Text.readString(mirrorIn); + if (LOG.isDebugEnabled() || firstBadLink.length() > 0) { + LOG.info("Datanode " + targets.length + + " got response for connect ack " + + " from downstream datanode with firstbadlink as " + + firstBadLink); + } + } + + } catch (IOException e) { + if (client.length() != 0) { + Text.writeString(replyOut, mirrorNode); + replyOut.flush(); + } + IOUtils.closeStream(mirrorOut); + mirrorOut = null; + IOUtils.closeStream(mirrorIn); + mirrorIn = null; + IOUtils.closeSocket(mirrorSock); + mirrorSock = null; + if (client.length() > 0) { + throw e; + } else { + LOG.info(datanode.dnRegistration + ":Exception transfering block " + + block + " to mirror " + mirrorNode + + ". continuing without the mirror.\n" + + StringUtils.stringifyException(e)); + } + } + } + + // send connect ack back to source (only for clients) + if (client.length() != 0) { + if (LOG.isDebugEnabled() || firstBadLink.length() > 0) { + LOG.info("Datanode " + targets.length + + " forwarding connect ack to upstream firstbadlink is " + + firstBadLink); + } + Text.writeString(replyOut, firstBadLink); + replyOut.flush(); + } + + // receive the block and mirror to the next target + String mirrorAddr = (mirrorSock == null) ? null : mirrorNode; + blockReceiver.receiveBlock(mirrorOut, mirrorIn, replyOut, + mirrorAddr, null, targets.length); + + // if this write is for a replication request (and not + // from a client), then confirm block. For client-writes, + // the block is finalized in the PacketResponder. + if (client.length() == 0) { + datanode.notifyNamenodeReceivedBlock(block, DataNode.EMPTY_DEL_HINT); + LOG.info("Received block " + block + + " src: " + remoteAddress + + " dest: " + localAddress + + " of size " + block.getNumBytes()); + } + + if (datanode.blockScanner != null) { + datanode.blockScanner.addBlock(block); + } + + } catch (IOException ioe) { + LOG.info("writeBlock " + block + " received exception " + ioe); + throw ioe; + } finally { + // close all opened streams + IOUtils.closeStream(mirrorOut); + IOUtils.closeStream(mirrorIn); + IOUtils.closeStream(replyOut); + IOUtils.closeSocket(mirrorSock); + IOUtils.closeStream(blockReceiver); + } + } + + /** + * Reads the metadata and sends the data in one 'DATA_CHUNK'. + * @param in + */ + void readMetadata(DataInputStream in) throws IOException { + Block block = new Block( in.readLong(), 0 , in.readLong()); + MetaDataInputStream checksumIn = null; + DataOutputStream out = null; + updateCurrentThreadName("reading metadata for block " + block); + try { + + checksumIn = datanode.data.getMetaDataInputStream(block); + + long fileSize = checksumIn.getLength(); + + if (fileSize >= 1L<<31 || fileSize <= 0) { + throw new IOException("Unexpected size for checksumFile of block" + + block); + } + + byte [] buf = new byte[(int)fileSize]; + IOUtils.readFully(checksumIn, buf, 0, buf.length); + + out = new DataOutputStream( + NetUtils.getOutputStream(s, datanode.socketWriteTimeout)); + + out.writeByte(DataTransferProtocol.OP_STATUS_SUCCESS); + out.writeInt(buf.length); + out.write(buf); + + //last DATA_CHUNK + out.writeInt(0); + } finally { + IOUtils.closeStream(out); + IOUtils.closeStream(checksumIn); + } + } + + /** + * Get block checksum (MD5 of CRC32). + * @param in + */ + void getBlockChecksum(DataInputStream in) throws IOException { + final Block block = new Block(in.readLong(), 0 , in.readLong()); + + DataOutputStream out = null; + final MetaDataInputStream metadataIn = datanode.data.getMetaDataInputStream(block); + final DataInputStream checksumIn = new DataInputStream(new BufferedInputStream( + metadataIn, BUFFER_SIZE)); + + updateCurrentThreadName("getting checksum for block " + block); + try { + //read metadata file + final BlockMetadataHeader header = BlockMetadataHeader.readHeader(checksumIn); + final DataChecksum checksum = header.getChecksum(); + final int bytesPerCRC = checksum.getBytesPerChecksum(); + final long crcPerBlock = (metadataIn.getLength() + - BlockMetadataHeader.getHeaderSize())/checksum.getChecksumSize(); + + //compute block checksum + final MD5Hash md5 = MD5Hash.digest(checksumIn); + + if (LOG.isDebugEnabled()) { + LOG.debug("block=" + block + ", bytesPerCRC=" + bytesPerCRC + + ", crcPerBlock=" + crcPerBlock + ", md5=" + md5); + } + + //write reply + out = new DataOutputStream( + NetUtils.getOutputStream(s, datanode.socketWriteTimeout)); + out.writeShort(DataTransferProtocol.OP_STATUS_SUCCESS); + out.writeInt(bytesPerCRC); + out.writeLong(crcPerBlock); + md5.write(out); + out.flush(); + } finally { + IOUtils.closeStream(out); + IOUtils.closeStream(checksumIn); + IOUtils.closeStream(metadataIn); + } + } + + /** + * Read a block from the disk and then sends it to a destination. + * + * @param in The stream to read from + * @throws IOException + */ + private void copyBlock(DataInputStream in) throws IOException { + // Read in the header + long blockId = in.readLong(); // read block id + Block block = new Block(blockId, 0, in.readLong()); + + if (!dataXceiverServer.balanceThrottler.acquire()) { // not able to start + LOG.info("Not able to copy block " + blockId + " to " + + s.getRemoteSocketAddress() + " because threads quota is exceeded."); + return; + } + + BlockSender blockSender = null; + DataOutputStream reply = null; + boolean isOpSuccess = true; + updateCurrentThreadName("Copying block " + block); + try { + // check if the block exists or not + blockSender = new BlockSender(block, 0, -1, false, false, false, + datanode); + + // set up response stream + OutputStream baseStream = NetUtils.getOutputStream( + s, datanode.socketWriteTimeout); + reply = new DataOutputStream(new BufferedOutputStream( + baseStream, SMALL_BUFFER_SIZE)); + + // send block content to the target + long read = blockSender.sendBlock(reply, baseStream, + dataXceiverServer.balanceThrottler); + + datanode.myMetrics.bytesRead.inc((int) read); + datanode.myMetrics.blocksRead.inc(); + + LOG.info("Copied block " + block + " to " + s.getRemoteSocketAddress()); + } catch (IOException ioe) { + isOpSuccess = false; + throw ioe; + } finally { + dataXceiverServer.balanceThrottler.release(); + if (isOpSuccess) { + try { + // send one last byte to indicate that the resource is cleaned. + reply.writeChar('d'); + } catch (IOException ignored) { + } + } + IOUtils.closeStream(reply); + IOUtils.closeStream(blockSender); + } + } + + /** + * Receive a block and write it to disk, it then notifies the namenode to + * remove the copy from the source. + * + * @param in The stream to read from + * @throws IOException + */ + private void replaceBlock(DataInputStream in) throws IOException { + /* read header */ + long blockId = in.readLong(); + Block block = new Block(blockId, dataXceiverServer.estimateBlockSize, + in.readLong()); // block id & generation stamp + String sourceID = Text.readString(in); // read del hint + DatanodeInfo proxySource = new DatanodeInfo(); // read proxy source + proxySource.readFields(in); + + if (!dataXceiverServer.balanceThrottler.acquire()) { // not able to start + LOG.warn("Not able to receive block " + blockId + " from " + + s.getRemoteSocketAddress() + " because threads quota is exceeded."); + sendResponse(s, (short)DataTransferProtocol.OP_STATUS_ERROR, + datanode.socketWriteTimeout); + return; + } + + Socket proxySock = null; + DataOutputStream proxyOut = null; + short opStatus = DataTransferProtocol.OP_STATUS_SUCCESS; + BlockReceiver blockReceiver = null; + DataInputStream proxyReply = null; + + updateCurrentThreadName("replacing block " + block + " from " + sourceID); + try { + // get the output stream to the proxy + InetSocketAddress proxyAddr = NetUtils.createSocketAddr( + proxySource.getName()); + proxySock = datanode.newSocket(); + NetUtils.connect(proxySock, proxyAddr, datanode.socketTimeout); + proxySock.setSoTimeout(datanode.socketTimeout); + + OutputStream baseStream = NetUtils.getOutputStream(proxySock, + datanode.socketWriteTimeout); + proxyOut = new DataOutputStream( + new BufferedOutputStream(baseStream, SMALL_BUFFER_SIZE)); + + /* send request to the proxy */ + proxyOut.writeShort(DataTransferProtocol.DATA_TRANSFER_VERSION); // transfer version + proxyOut.writeByte(DataTransferProtocol.OP_COPY_BLOCK); // op code + proxyOut.writeLong(block.getBlockId()); // block id + proxyOut.writeLong(block.getGenerationStamp()); // block id + proxyOut.flush(); + + // receive the response from the proxy + proxyReply = new DataInputStream(new BufferedInputStream( + NetUtils.getInputStream(proxySock), BUFFER_SIZE)); + // open a block receiver and check if the block does not exist + blockReceiver = new BlockReceiver( + block, proxyReply, proxySock.getRemoteSocketAddress().toString(), + proxySock.getLocalSocketAddress().toString(), + false, "", null, datanode); + + // receive a block + blockReceiver.receiveBlock(null, null, null, null, + dataXceiverServer.balanceThrottler, -1); + + // notify name node + datanode.notifyNamenodeReceivedBlock(block, sourceID); + + LOG.info("Moved block " + block + + " from " + s.getRemoteSocketAddress()); + + } catch (IOException ioe) { + opStatus = DataTransferProtocol.OP_STATUS_ERROR; + throw ioe; + } finally { + // receive the last byte that indicates the proxy released its thread resource + if (opStatus == DataTransferProtocol.OP_STATUS_SUCCESS) { + try { + proxyReply.readChar(); + } catch (IOException ignored) { + } + } + + // now release the thread resource + dataXceiverServer.balanceThrottler.release(); + + // send response back + try { + sendResponse(s, opStatus, datanode.socketWriteTimeout); + } catch (IOException ioe) { + LOG.warn("Error writing reply back to " + s.getRemoteSocketAddress()); + } + IOUtils.closeStream(proxyOut); + IOUtils.closeStream(blockReceiver); + IOUtils.closeStream(proxyReply); + } + } + + /** + * Utility function for sending a response. + * @param s socket to write to + * @param opStatus status message to write + * @param timeout send timeout + **/ + private void sendResponse(Socket s, short opStatus, long timeout) + throws IOException { + DataOutputStream reply = + new DataOutputStream(NetUtils.getOutputStream(s, timeout)); + try { + reply.writeShort(opStatus); + reply.flush(); + } finally { + IOUtils.closeStream(reply); + } + } +} diff --git a/src/hdfs/org/apache/hadoop/hdfs/server/datanode/DataXceiverServer.java b/src/hdfs/org/apache/hadoop/hdfs/server/datanode/DataXceiverServer.java new file mode 100644 index 0000000..2255288 --- /dev/null +++ b/src/hdfs/org/apache/hadoop/hdfs/server/datanode/DataXceiverServer.java @@ -0,0 +1,176 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.datanode; + +import java.io.IOException; +import java.net.ServerSocket; +import java.net.Socket; +import java.net.SocketTimeoutException; +import java.util.Collections; +import java.util.HashMap; +import java.util.Iterator; +import java.util.Map; + +import org.apache.commons.logging.Log; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdfs.protocol.FSConstants; +import org.apache.hadoop.hdfs.server.balancer.Balancer; +import org.apache.hadoop.hdfs.util.DataTransferThrottler; +import org.apache.hadoop.util.Daemon; +import org.apache.hadoop.util.StringUtils; + +/** + * Server used for receiving/sending a block of data. + * This is created to listen for requests from clients or + * other DataNodes. This small server does not use the + * Hadoop IPC mechanism. + */ +class DataXceiverServer implements Runnable, FSConstants { + public static final Log LOG = DataNode.LOG; + + ServerSocket ss; + DataNode datanode; + // Record all sockets opend for data transfer + Map childSockets = Collections.synchronizedMap( + new HashMap()); + + /** + * Maximal number of concurrent xceivers per node. + * Enforcing the limit is required in order to avoid data-node + * running out of memory. + */ + static final int MAX_XCEIVER_COUNT = 256; + int maxXceiverCount = MAX_XCEIVER_COUNT; + + /** A manager to make sure that cluster balancing does not + * take too much resources. + * + * It limits the number of block moves for balancing and + * the total amount of bandwidth they can use. + */ + static class BlockBalanceThrottler extends DataTransferThrottler { + private int numThreads; + + /**Constructor + * + * @param bandwidth Total amount of bandwidth can be used for balancing + */ + private BlockBalanceThrottler(long bandwidth) { + super(bandwidth); + LOG.info("Balancing bandwith is "+ bandwidth + " bytes/s"); + } + + /** Check if the block move can start. + * + * Return true if the thread quota is not exceeded and + * the counter is incremented; False otherwise. + */ + synchronized boolean acquire() { + if (numThreads >= Balancer.MAX_NUM_CONCURRENT_MOVES) { + return false; + } + numThreads++; + return true; + } + + /** Mark that the move is completed. The thread counter is decremented. */ + synchronized void release() { + numThreads--; + } + } + + BlockBalanceThrottler balanceThrottler; + + /** + * We need an estimate for block size to check if the disk partition has + * enough space. For now we set it to be the default block size set + * in the server side configuration, which is not ideal because the + * default block size should be a client-size configuration. + * A better solution is to include in the header the estimated block size, + * i.e. either the actual block size or the default block size. + */ + long estimateBlockSize; + + + DataXceiverServer(ServerSocket ss, Configuration conf, + DataNode datanode) { + + this.ss = ss; + this.datanode = datanode; + + this.maxXceiverCount = conf.getInt("dfs.datanode.max.xcievers", + MAX_XCEIVER_COUNT); + + this.estimateBlockSize = conf.getLong("dfs.block.size", DEFAULT_BLOCK_SIZE); + + //set up parameter for cluster balancing + this.balanceThrottler = new BlockBalanceThrottler( + conf.getLong("dfs.balance.bandwidthPerSec", 1024L*1024)); + } + + /** + */ + public void run() { + while (datanode.shouldRun) { + try { + Socket s = ss.accept(); + s.setTcpNoDelay(true); + new Daemon(datanode.threadGroup, + new DataXceiver(s, datanode, this)).start(); + } catch (SocketTimeoutException ignored) { + // wake up to see if should continue to run + } catch (IOException ie) { + LOG.warn(datanode.dnRegistration + ":DataXceiveServer: " + + StringUtils.stringifyException(ie)); + } catch (Throwable te) { + LOG.error(datanode.dnRegistration + ":DataXceiveServer: Exiting due to:" + + StringUtils.stringifyException(te)); + datanode.shouldRun = false; + } + } + try { + ss.close(); + } catch (IOException ie) { + LOG.warn(datanode.dnRegistration + ":DataXceiveServer: " + + StringUtils.stringifyException(ie)); + } + } + + void kill() { + assert datanode.shouldRun == false : + "shoudRun should be set to false before killing"; + try { + this.ss.close(); + } catch (IOException ie) { + LOG.warn(datanode.dnRegistration + ":DataXceiveServer.kill(): " + + StringUtils.stringifyException(ie)); + } + + // close all the sockets that were accepted earlier + synchronized (childSockets) { + for (Iterator it = childSockets.values().iterator(); + it.hasNext();) { + Socket thissock = it.next(); + try { + thissock.close(); + } catch (IOException e) { + } + } + } + } +} diff --git a/src/hdfs/org/apache/hadoop/hdfs/server/datanode/DatanodeBlockInfo.java b/src/hdfs/org/apache/hadoop/hdfs/server/datanode/DatanodeBlockInfo.java new file mode 100644 index 0000000..acd19e3 --- /dev/null +++ b/src/hdfs/org/apache/hadoop/hdfs/server/datanode/DatanodeBlockInfo.java @@ -0,0 +1,134 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.datanode; + +import java.io.File; +import java.io.FileInputStream; +import java.io.FileOutputStream; +import java.io.IOException; + +import org.apache.hadoop.hdfs.protocol.Block; +import org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolume; +import org.apache.hadoop.fs.FileUtil; +import org.apache.hadoop.fs.FileUtil.HardLink; +import org.apache.hadoop.io.IOUtils; + +/** + * This class is used by the datanode to maintain the map from a block + * to its metadata. + */ +class DatanodeBlockInfo { + + private FSVolume volume; // volume where the block belongs + private File file; // block file + private boolean detached; // copy-on-write done for block + + DatanodeBlockInfo(FSVolume vol, File file) { + this.volume = vol; + this.file = file; + detached = false; + } + + DatanodeBlockInfo(FSVolume vol) { + this.volume = vol; + this.file = null; + detached = false; + } + + FSVolume getVolume() { + return volume; + } + + File getFile() { + return file; + } + + /** + * Is this block already detached? + */ + boolean isDetached() { + return detached; + } + + /** + * Block has been successfully detached + */ + void setDetached() { + detached = true; + } + + /** + * Copy specified file into a temporary file. Then rename the + * temporary file to the original name. This will cause any + * hardlinks to the original file to be removed. The temporary + * files are created in the detachDir. The temporary files will + * be recovered (especially on Windows) on datanode restart. + */ + private void detachFile(File file, Block b) throws IOException { + File tmpFile = volume.createDetachFile(b, file.getName()); + try { + IOUtils.copyBytes(new FileInputStream(file), + new FileOutputStream(tmpFile), + 16*1024, true); + if (file.length() != tmpFile.length()) { + throw new IOException("Copy of file " + file + " size " + file.length()+ + " into file " + tmpFile + + " resulted in a size of " + tmpFile.length()); + } + FileUtil.replaceFile(tmpFile, file); + } catch (IOException e) { + boolean done = tmpFile.delete(); + if (!done) { + DataNode.LOG.info("detachFile failed to delete temporary file " + + tmpFile); + } + throw e; + } + } + + /** + * Returns true if this block was copied, otherwise returns false. + */ + boolean detachBlock(Block block, int numLinks) throws IOException { + if (isDetached()) { + return false; + } + if (file == null || volume == null) { + throw new IOException("detachBlock:Block not found. " + block); + } + File meta = FSDataset.getMetaFile(file, block); + if (meta == null) { + throw new IOException("Meta file not found for block " + block); + } + + if (HardLink.getLinkCount(file) > numLinks) { + DataNode.LOG.info("CopyOnWrite for block " + block); + detachFile(file, block); + } + if (HardLink.getLinkCount(meta) > numLinks) { + detachFile(meta, block); + } + setDetached(); + return true; + } + + public String toString() { + return getClass().getSimpleName() + "(volume=" + volume + + ", file=" + file + ", detached=" + detached + ")"; + } +} diff --git a/src/hdfs/org/apache/hadoop/hdfs/server/datanode/FSDataset.java b/src/hdfs/org/apache/hadoop/hdfs/server/datanode/FSDataset.java new file mode 100644 index 0000000..f89f819 --- /dev/null +++ b/src/hdfs/org/apache/hadoop/hdfs/server/datanode/FSDataset.java @@ -0,0 +1,1620 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.datanode; + +import java.io.File; +import java.io.FileInputStream; +import java.io.FileOutputStream; +import java.io.FilenameFilter; +import java.io.IOException; +import java.io.InputStream; +import java.io.RandomAccessFile; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashMap; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Random; +import java.util.TreeSet; +import java.util.concurrent.Callable; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.Future; + +import javax.management.NotCompliantMBeanException; +import javax.management.ObjectName; +import javax.management.StandardMBean; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.DF; +import org.apache.hadoop.fs.DU; +import org.apache.hadoop.fs.FileUtil; +import org.apache.hadoop.hdfs.protocol.Block; +import org.apache.hadoop.hdfs.protocol.FSConstants; +import org.apache.hadoop.hdfs.server.datanode.metrics.FSDatasetMBean; +import org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol; +import org.apache.hadoop.metrics.util.MBeanUtil; +import org.apache.hadoop.util.DataChecksum; +import org.apache.hadoop.util.DiskChecker; +import org.apache.hadoop.util.DiskChecker.DiskErrorException; +import org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException; +import org.mortbay.log.Log; + +/************************************************** + * FSDataset manages a set of data blocks. Each block + * has a unique name and an extent on disk. + * + ***************************************************/ +public class FSDataset implements FSConstants, FSDatasetInterface { + + + /** + * A node type that can be built into a tree reflecting the + * hierarchy of blocks on the local disk. + */ + class FSDir { + File dir; + int numBlocks = 0; + FSDir children[]; + int lastChildIdx = 0; + /** + */ + public FSDir(File dir) + throws IOException { + this.dir = dir; + this.children = null; + if (!dir.exists()) { + if (!dir.mkdirs()) { + throw new IOException("Mkdirs failed to create " + + dir.toString()); + } + } else { + File[] files = dir.listFiles(); + int numChildren = 0; + for (int idx = 0; idx < files.length; idx++) { + if (files[idx].isDirectory()) { + numChildren++; + } else if (Block.isBlockFilename(files[idx])) { + numBlocks++; + } + } + if (numChildren > 0) { + children = new FSDir[numChildren]; + int curdir = 0; + for (int idx = 0; idx < files.length; idx++) { + if (files[idx].isDirectory()) { + children[curdir] = new FSDir(files[idx]); + curdir++; + } + } + } + } + } + + public File addBlock(Block b, File src) throws IOException { + //First try without creating subdirectories + File file = addBlock(b, src, false, false); + return (file != null) ? file : addBlock(b, src, true, true); + } + + private File addBlock(Block b, File src, boolean createOk, + boolean resetIdx) throws IOException { + if (numBlocks < maxBlocksPerDir) { + File dest = new File(dir, b.getBlockName()); + File metaData = getMetaFile( src, b ); + File newmeta = getMetaFile(dest, b); + if ( ! metaData.renameTo( newmeta ) || + ! src.renameTo( dest ) ) { + throw new IOException( "could not move files for " + b + + " from tmp to " + + dest.getAbsolutePath() ); + } + if (DataNode.LOG.isDebugEnabled()) { + DataNode.LOG.debug("addBlock: Moved " + metaData + " to " + newmeta); + DataNode.LOG.debug("addBlock: Moved " + src + " to " + dest); + } + + numBlocks += 1; + return dest; + } + + if (lastChildIdx < 0 && resetIdx) { + //reset so that all children will be checked + lastChildIdx = random.nextInt(children.length); + } + + if (lastChildIdx >= 0 && children != null) { + //Check if any child-tree has room for a block. + for (int i=0; i < children.length; i++) { + int idx = (lastChildIdx + i)%children.length; + File file = children[idx].addBlock(b, src, false, resetIdx); + if (file != null) { + lastChildIdx = idx; + return file; + } + } + lastChildIdx = -1; + } + + if (!createOk) { + return null; + } + + if (children == null || children.length == 0) { + children = new FSDir[maxBlocksPerDir]; + for (int idx = 0; idx < maxBlocksPerDir; idx++) { + children[idx] = new FSDir(new File(dir, DataStorage.BLOCK_SUBDIR_PREFIX+idx)); + } + } + + //now pick a child randomly for creating a new set of subdirs. + lastChildIdx = random.nextInt(children.length); + return children[ lastChildIdx ].addBlock(b, src, true, false); + } + + /** Find the metadata file for the specified block file. + * Return the generation stamp from the name of the metafile. + */ + long getGenerationStampFromFile(File[] listdir, File blockFile) { + String blockName = blockFile.getName(); + for (int j = 0; j < listdir.length; j++) { + String path = listdir[j].getName(); + if (!path.startsWith(blockName)) { + continue; + } + String[] vals = path.split("_"); + if (vals.length != 3) { // blk, blkid, genstamp.meta + continue; + } + String[] str = vals[2].split("\\."); + if (str.length != 2) { + continue; + } + return Long.parseLong(str[0]); + } + DataNode.LOG.warn("Block " + blockFile + + " does not have a metafile!"); + return Block.GRANDFATHER_GENERATION_STAMP; + } + + /** + * Populate the given blockSet with any child blocks + * found at this node. + */ + public void getBlockInfo(TreeSet blockSet) { + if (children != null) { + for (int i = 0; i < children.length; i++) { + children[i].getBlockInfo(blockSet); + } + } + + File blockFiles[] = dir.listFiles(); + for (int i = 0; i < blockFiles.length; i++) { + if (Block.isBlockFilename(blockFiles[i])) { + long genStamp = getGenerationStampFromFile(blockFiles, blockFiles[i]); + blockSet.add(new Block(blockFiles[i], blockFiles[i].length(), genStamp)); + } + } + } + + void getVolumeMap(HashMap volumeMap, FSVolume volume) { + if (children != null) { + for (int i = 0; i < children.length; i++) { + children[i].getVolumeMap(volumeMap, volume); + } + } + + File blockFiles[] = dir.listFiles(); + for (int i = 0; i < blockFiles.length; i++) { + if (Block.isBlockFilename(blockFiles[i])) { + long genStamp = getGenerationStampFromFile(blockFiles, blockFiles[i]); + volumeMap.put(new Block(blockFiles[i], blockFiles[i].length(), genStamp), + new DatanodeBlockInfo(volume, blockFiles[i])); + } + } + } + + /** + * check if a data diretory is healthy + * @throws DiskErrorException + */ + public void checkDirTree() throws DiskErrorException { + DiskChecker.checkDir(dir); + + if (children != null) { + for (int i = 0; i < children.length; i++) { + children[i].checkDirTree(); + } + } + } + + void clearPath(File f) { + String root = dir.getAbsolutePath(); + String dir = f.getAbsolutePath(); + if (dir.startsWith(root)) { + String[] dirNames = dir.substring(root.length()). + split(File.separator + "subdir"); + if (clearPath(f, dirNames, 1)) + return; + } + clearPath(f, null, -1); + } + + /* + * dirNames is an array of string integers derived from + * usual directory structure data/subdirN/subdirXY/subdirM ... + * If dirName array is non-null, we only check the child at + * the children[dirNames[idx]]. This avoids iterating over + * children in common case. If directory structure changes + * in later versions, we need to revisit this. + */ + private boolean clearPath(File f, String[] dirNames, int idx) { + if ((dirNames == null || idx == dirNames.length) && + dir.compareTo(f) == 0) { + numBlocks--; + return true; + } + + if (dirNames != null) { + //guess the child index from the directory name + if (idx > (dirNames.length - 1) || children == null) { + return false; + } + int childIdx; + try { + childIdx = Integer.parseInt(dirNames[idx]); + } catch (NumberFormatException ignored) { + // layout changed? we could print a warning. + return false; + } + return (childIdx >= 0 && childIdx < children.length) ? + children[childIdx].clearPath(f, dirNames, idx+1) : false; + } + + //guesses failed. back to blind iteration. + if (children != null) { + for(int i=0; i < children.length; i++) { + if (children[i].clearPath(f, null, -1)){ + return true; + } + } + } + return false; + } + + public String toString() { + return "FSDir{" + + "dir=" + dir + + ", children=" + (children == null ? null : Arrays.asList(children)) + + "}"; + } + } + + class FSVolume { + private File currentDir; + private FSDir dataDir; + private File tmpDir; + private File detachDir; // copy on write for blocks in snapshot + private DF usage; + private DU dfsUsage; + private long reserved; + + + FSVolume(File currentDir, Configuration conf) throws IOException { + this.reserved = conf.getLong("dfs.datanode.du.reserved", 0); + this.currentDir = currentDir; + boolean supportAppends = conf.getBoolean("dfs.support.append", false); + File parent = currentDir.getParentFile(); + + this.detachDir = new File(parent, "detach"); + if (detachDir.exists()) { + recoverDetachedBlocks(currentDir, detachDir); + } + + // Files that were being written when the datanode was last shutdown + // are now moved back to the data directory. It is possible that + // in the future, we might want to do some sort of datanode-local + // recovery for these blocks. For example, crc validation. + // + this.tmpDir = new File(parent, "tmp"); + if (tmpDir.exists()) { + if (supportAppends) { + recoverDetachedBlocks(currentDir, tmpDir); + } else { + FileUtil.fullyDelete(tmpDir); + } + } + this.dataDir = new FSDir(currentDir); + if (!tmpDir.mkdirs()) { + if (!tmpDir.isDirectory()) { + throw new IOException("Mkdirs failed to create " + tmpDir.toString()); + } + } + if (!detachDir.mkdirs()) { + if (!detachDir.isDirectory()) { + throw new IOException("Mkdirs failed to create " + detachDir.toString()); + } + } + this.usage = new DF(parent, conf); + this.dfsUsage = new DU(parent, conf); + this.dfsUsage.start(); + } + + File getCurrentDir() { + return currentDir; + } + + void decDfsUsed(long value) { + // The caller to this method (BlockFileDeleteTask.run()) does + // not have locked FSDataset.this yet. + synchronized(FSDataset.this) { + dfsUsage.decDfsUsed(value); + } + } + + long getDfsUsed() throws IOException { + return dfsUsage.getUsed(); + } + + long getCapacity() throws IOException { + if (reserved > usage.getCapacity()) { + return 0; + } + + return usage.getCapacity()-reserved; + } + + long getAvailable() throws IOException { + long remaining = getCapacity()-getDfsUsed(); + long available = usage.getAvailable(); + if (remaining>available) { + remaining = available; + } + return (remaining > 0) ? remaining : 0; + } + + String getMount() throws IOException { + return usage.getMount(); + } + + File getDir() { + return dataDir.dir; + } + + /** + * Temporary files. They get moved to the real block directory either when + * the block is finalized or the datanode restarts. + */ + File createTmpFile(Block b) throws IOException { + File f = new File(tmpDir, b.getBlockName()); + return createTmpFile(b, f); + } + + /** + * Returns the name of the temporary file for this block. + */ + File getTmpFile(Block b) throws IOException { + File f = new File(tmpDir, b.getBlockName()); + return f; + } + + /** + * Files used for copy-on-write. They need recovery when datanode + * restarts. + */ + File createDetachFile(Block b, String filename) throws IOException { + File f = new File(detachDir, filename); + return createTmpFile(b, f); + } + + private File createTmpFile(Block b, File f) throws IOException { + if (f.exists()) { + throw new IOException("Unexpected problem in creating temporary file for "+ + b + ". File " + f + " should not be present, but is."); + } + // Create the zero-length temp file + // + boolean fileCreated = false; + try { + fileCreated = f.createNewFile(); + } catch (IOException ioe) { + throw (IOException)new IOException(DISK_ERROR +f).initCause(ioe); + } + if (!fileCreated) { + throw new IOException("Unexpected problem in creating temporary file for "+ + b + ". File " + f + " should be creatable, but is already present."); + } + return f; + } + + File addBlock(Block b, File f) throws IOException { + File blockFile = dataDir.addBlock(b, f); + File metaFile = getMetaFile( blockFile , b); + dfsUsage.incDfsUsed(b.getNumBytes()+metaFile.length()); + return blockFile; + } + + void checkDirs() throws DiskErrorException { + dataDir.checkDirTree(); + DiskChecker.checkDir(tmpDir); + } + + void getBlockInfo(TreeSet blockSet) { + dataDir.getBlockInfo(blockSet); + } + + void getVolumeMap(HashMap volumeMap) { + dataDir.getVolumeMap(volumeMap, this); + } + + void clearPath(File f) { + dataDir.clearPath(f); + } + + public String toString() { + return dataDir.dir.getAbsolutePath(); + } + + /** + * Recover detached files on datanode restart. If a detached block + * does not exist in the original directory, then it is moved to the + * original directory. + */ + private void recoverDetachedBlocks(File dataDir, File dir) + throws IOException { + File contents[] = dir.listFiles(); + if (contents == null) { + return; + } + for (int i = 0; i < contents.length; i++) { + if (!contents[i].isFile()) { + throw new IOException ("Found " + contents[i] + " in " + dir + + " but it is not a file."); + } + + // + // If the original block file still exists, then no recovery + // is needed. + // + File blk = new File(dataDir, contents[i].getName()); + if (!blk.exists()) { + if (!contents[i].renameTo(blk)) { + throw new IOException("Unable to recover detached file " + + contents[i]); + } + continue; + } + if (!contents[i].delete()) { + throw new IOException("Unable to cleanup detached file " + + contents[i]); + } + } + } + } + + static class FSVolumeSet { + FSVolume[] volumes = null; + int curVolume = 0; + ExecutorService scannersExecutor; + + FSVolumeSet(FSVolume[] volumes, int threads) { + this.volumes = volumes; + if (threads > 1) { + scannersExecutor = Executors.newFixedThreadPool(threads); + } + } + + private int numberOfVolumes() { + return volumes.length; + } + + synchronized FSVolume getNextVolume(long blockSize) throws IOException { + + if(volumes.length < 1) { + throw new DiskOutOfSpaceException("No more available volumes"); + } + + // since volumes could've been removed because of the failure + // make sure we are not out of bounds + if(curVolume >= volumes.length) { + curVolume = 0; + } + + int startVolume = curVolume; + + while (true) { + FSVolume volume = volumes[curVolume]; + curVolume = (curVolume + 1) % volumes.length; + if (volume.getAvailable() > blockSize) { return volume; } + if (curVolume == startVolume) { + throw new DiskOutOfSpaceException("Insufficient space for an additional block"); + } + } + } + + long getDfsUsed() throws IOException { + long dfsUsed = 0L; + for (int idx = 0; idx < volumes.length; idx++) { + dfsUsed += volumes[idx].getDfsUsed(); + } + return dfsUsed; + } + + synchronized long getCapacity() throws IOException { + long capacity = 0L; + for (int idx = 0; idx < volumes.length; idx++) { + capacity += volumes[idx].getCapacity(); + } + return capacity; + } + + synchronized long getRemaining() throws IOException { + long remaining = 0L; + for (int idx = 0; idx < volumes.length; idx++) { + remaining += volumes[idx].getAvailable(); + } + return remaining; + } + + synchronized void getBlockInfo(TreeSet blockSet) { + long startTime = System.currentTimeMillis(); + if (scannersExecutor != null) { + List>> builders = + new ArrayList>>(); + for (int idx = 0; idx < volumes.length; idx++) { + builders.add(scannersExecutor.submit( + new BlockInfoBuilder(volumes[idx]))); + } + for (Future> future : builders) { + try { + blockSet.addAll(future.get()); + } catch (ExecutionException ex) { + DataNode.LOG.error("Error scanning volumes ", ex.getCause()); + } catch (InterruptedException iex) { + DataNode.LOG.error("Error waiting for scan", iex); + } + } + } else { + for (int idx = 0; idx < volumes.length; idx++) { + volumes[idx].getBlockInfo(blockSet); + } + } + long scanTime = (System.currentTimeMillis() - startTime)/1000; + DataNode.LOG.info("Finished generating block report for " + + volumes.length + " volumes in " + scanTime + " seconds"); + } + + synchronized void getVolumeMap(HashMap volumeMap) { + for (int idx = 0; idx < volumes.length; idx++) { + volumes[idx].getVolumeMap(volumeMap); + } + } + + /** + * goes over all the volumes and checkDir eachone of them + * if one throws DiskErrorException - removes from the list of active + * volumes. + * @return list of all the removed volumes + */ + synchronized List checkDirs() { + + ArrayList removed_vols = null; + + for (int idx = 0; idx < volumes.length; idx++) { + FSVolume fsv = volumes[idx]; + try { + fsv.checkDirs(); + } catch (DiskErrorException e) { + DataNode.LOG.warn("Removing failed volume " + fsv + ": ",e); + if(removed_vols == null) { + removed_vols = new ArrayList(1); + } + removed_vols.add(volumes[idx]); + volumes[idx] = null; //remove the volume + } + } + + // repair array - copy non null elements + int removed_size = (removed_vols==null)? 0 : removed_vols.size(); + if(removed_size > 0) { + FSVolume fsvs[] = new FSVolume [volumes.length-removed_size]; + for(int idx=0,idy=0; idx> { + FSVolume volume; + + public BlockInfoBuilder(FSVolume volume) { + this.volume = volume; + } + + @Override + public TreeSet call() throws Exception { + TreeSet result = new TreeSet(); + volume.getBlockInfo(result); + return result; + } + } + ////////////////////////////////////////////////////// + // + // FSDataSet + // + ////////////////////////////////////////////////////// + + //Find better place? + public static final String METADATA_EXTENSION = ".meta"; + public static final short METADATA_VERSION = 1; + + + static class ActiveFile { + final File file; + final List threads = new ArrayList(2); + + ActiveFile(File f, List list) { + file = f; + if (list != null) { + threads.addAll(list); + } + threads.add(Thread.currentThread()); + } + + public String toString() { + return getClass().getSimpleName() + "(file=" + file + + ", threads=" + threads + ")"; + } + } + + static String getMetaFileName(String blockFileName, long genStamp) { + return blockFileName + "_" + genStamp + METADATA_EXTENSION; + } + + static File getMetaFile(File f , Block b) { + return new File(getMetaFileName(f.getAbsolutePath(), + b.getGenerationStamp())); + } + protected File getMetaFile(Block b) throws IOException { + return getMetaFile(getBlockFile(b), b); + } + + /** Find the corresponding meta data file from a given block file */ + private static File findMetaFile(final File blockFile) throws IOException { + final String prefix = blockFile.getName() + "_"; + final File parent = blockFile.getParentFile(); + File[] matches = parent.listFiles(new FilenameFilter() { + public boolean accept(File dir, String name) { + return dir.equals(parent) + && name.startsWith(prefix) && name.endsWith(METADATA_EXTENSION); + } + }); + + if (matches == null || matches.length == 0) { + throw new IOException("Meta file not found, blockFile=" + blockFile); + } + else if (matches.length > 1) { + throw new IOException("Found more than one meta files: " + + Arrays.asList(matches)); + } + return matches[0]; + } + + /** Find the corresponding meta data file from a given block file */ + private static long parseGenerationStamp(File blockFile, File metaFile + ) throws IOException { + String metaname = metaFile.getName(); + String gs = metaname.substring(blockFile.getName().length() + 1, + metaname.length() - METADATA_EXTENSION.length()); + try { + return Long.parseLong(gs); + } catch(NumberFormatException nfe) { + throw (IOException)new IOException("blockFile=" + blockFile + + ", metaFile=" + metaFile).initCause(nfe); + } + } + + /** Return the block file for the given ID */ + public File findBlockFile(long blockId) { + final Block b = new Block(blockId); + File blockfile = null; + ActiveFile activefile = ongoingCreates.get(b); + if (activefile != null) { + blockfile = activefile.file; + } + if (blockfile == null) { + blockfile = getFile(b); + } + if (blockfile == null) { + if (DataNode.LOG.isDebugEnabled()) { + DataNode.LOG.debug("ongoingCreates=" + ongoingCreates); + DataNode.LOG.debug("volumeMap=" + volumeMap); + } + } + return blockfile; + } + + /** {@inheritDoc} */ + public synchronized Block getStoredBlock(long blkid) throws IOException { + File blockfile = findBlockFile(blkid); + if (blockfile == null) { + return null; + } + File metafile = findMetaFile(blockfile); + return new Block(blkid, blockfile.length(), + parseGenerationStamp(blockfile, metafile)); + } + + public boolean metaFileExists(Block b) throws IOException { + return getMetaFile(b).exists(); + } + + public long getMetaDataLength(Block b) throws IOException { + File checksumFile = getMetaFile( b ); + return checksumFile.length(); + } + + public MetaDataInputStream getMetaDataInputStream(Block b) + throws IOException { + File checksumFile = getMetaFile( b ); + return new MetaDataInputStream(new FileInputStream(checksumFile), + checksumFile.length()); + } + + FSVolumeSet volumes; + private HashMap ongoingCreates = new HashMap(); + private int maxBlocksPerDir = 0; + private HashMap volumeMap = null; + static Random random = new Random(); + FSDatasetAsyncDiskService asyncDiskService; + + /** + * An FSDataset has a directory where it loads its data files. + */ + public FSDataset(DataStorage storage, Configuration conf) throws IOException { + this.maxBlocksPerDir = conf.getInt("dfs.datanode.numblocks", 64); + FSVolume[] volArray = new FSVolume[storage.getNumStorageDirs()]; + for (int idx = 0; idx < storage.getNumStorageDirs(); idx++) { + volArray[idx] = new FSVolume(storage.getStorageDir(idx).getCurrentDir(), conf); + } + int threads = conf.getInt("dfs.datanode.blockscanner.threads", 1); + volumes = new FSVolumeSet(volArray, threads); + volumeMap = new HashMap(); + volumes.getVolumeMap(volumeMap); + File[] roots = new File[storage.getNumStorageDirs()]; + for (int idx = 0; idx < storage.getNumStorageDirs(); idx++) { + roots[idx] = storage.getStorageDir(idx).getCurrentDir(); + } + asyncDiskService = new FSDatasetAsyncDiskService(roots, conf); + registerMBean(storage.getStorageID()); + } + + /** + * Return the total space used by dfs datanode + */ + public long getDfsUsed() throws IOException { + return volumes.getDfsUsed(); + } + /** + * Return true - if there are still valid volumes + * on the DataNode + */ + public boolean hasEnoughResource(){ + return volumes.numberOfVolumes() >= MIN_NUM_OF_VALID_VOLUMES; + } + + /** + * Return total capacity, used and unused + */ + public long getCapacity() throws IOException { + return volumes.getCapacity(); + } + + /** + * Return how many bytes can still be stored in the FSDataset + */ + public long getRemaining() throws IOException { + return volumes.getRemaining(); + } + + /** + * Find the block's on-disk length + */ + public long getLength(Block b) throws IOException { + return getBlockFile(b).length(); + } + + /** + * Get File name for a given block. + */ + public synchronized File getBlockFile(Block b) throws IOException { + File f = validateBlockFile(b); + if(f == null) { + if (InterDatanodeProtocol.LOG.isDebugEnabled()) { + InterDatanodeProtocol.LOG.debug("b=" + b + ", volumeMap=" + volumeMap); + } + throw new IOException("Block " + b + " is not valid."); + } + return f; + } + + public synchronized InputStream getBlockInputStream(Block b) throws IOException { + return new FileInputStream(getBlockFile(b)); + } + + public synchronized InputStream getBlockInputStream(Block b, long seekOffset) throws IOException { + File blockFile = getBlockFile(b); + RandomAccessFile blockInFile = new RandomAccessFile(blockFile, "r"); + if (seekOffset > 0) { + blockInFile.seek(seekOffset); + } + return new FileInputStream(blockInFile.getFD()); + } + + /** + * Returns handles to the block file and its metadata file + */ + public synchronized BlockInputStreams getTmpInputStreams(Block b, + long blkOffset, long ckoff) throws IOException { + + DatanodeBlockInfo info = volumeMap.get(b); + if (info == null) { + throw new IOException("Block " + b + " does not exist in volumeMap."); + } + FSVolume v = info.getVolume(); + File blockFile = v.getTmpFile(b); + RandomAccessFile blockInFile = new RandomAccessFile(blockFile, "r"); + if (blkOffset > 0) { + blockInFile.seek(blkOffset); + } + File metaFile = getMetaFile(blockFile, b); + RandomAccessFile metaInFile = new RandomAccessFile(metaFile, "r"); + if (ckoff > 0) { + metaInFile.seek(ckoff); + } + return new BlockInputStreams(new FileInputStream(blockInFile.getFD()), + new FileInputStream(metaInFile.getFD())); + } + + private BlockWriteStreams createBlockWriteStreams( File f , File metafile) throws IOException { + return new BlockWriteStreams(new FileOutputStream(new RandomAccessFile( f , "rw" ).getFD()), + new FileOutputStream( new RandomAccessFile( metafile , "rw" ).getFD() )); + + } + + /** + * Make a copy of the block if this block is linked to an existing + * snapshot. This ensures that modifying this block does not modify + * data in any existing snapshots. + * @param block Block + * @param numLinks Detach if the number of links exceed this value + * @throws IOException + * @return - true if the specified block was detached + */ + public boolean detachBlock(Block block, int numLinks) throws IOException { + DatanodeBlockInfo info = null; + + synchronized (this) { + info = volumeMap.get(block); + } + return info.detachBlock(block, numLinks); + } + + static private void updateBlockMap(Map blockmap, + Block oldblock, Block newblock) throws IOException { + if (blockmap.containsKey(oldblock)) { + T value = blockmap.remove(oldblock); + blockmap.put(newblock, value); + } + } + + /** {@inheritDoc} */ + public void updateBlock(Block oldblock, Block newblock) throws IOException { + if (oldblock.getBlockId() != newblock.getBlockId()) { + throw new IOException("Cannot update oldblock (=" + oldblock + + ") to newblock (=" + newblock + ")."); + } + + for(;;) { + final List threads = tryUpdateBlock(oldblock, newblock); + if (threads == null) { + return; + } + + // interrupt and wait for all ongoing create threads + for(Thread t : threads) { + t.interrupt(); + } + for(Thread t : threads) { + try { + t.join(); + } catch (InterruptedException e) { + DataNode.LOG.warn("interruptOngoingCreates: t=" + t, e); + } + } + } + } + + /** + * Try to update an old block to a new block. + * If there are ongoing create threads running for the old block, + * the threads will be returned without updating the block. + * + * @return ongoing create threads if there is any. Otherwise, return null. + */ + private synchronized List tryUpdateBlock( + Block oldblock, Block newblock) throws IOException { + //check ongoing create threads + final ActiveFile activefile = ongoingCreates.get(oldblock); + if (activefile != null && !activefile.threads.isEmpty()) { + //remove dead threads + for(Iterator i = activefile.threads.iterator(); i.hasNext(); ) { + final Thread t = i.next(); + if (!t.isAlive()) { + i.remove(); + } + } + + //return living threads + if (!activefile.threads.isEmpty()) { + return new ArrayList(activefile.threads); + } + } + + //No ongoing create threads is alive. Update block. + File blockFile = findBlockFile(oldblock.getBlockId()); + if (blockFile == null) { + throw new IOException("Block " + oldblock + " does not exist."); + } + + File oldMetaFile = findMetaFile(blockFile); + long oldgs = parseGenerationStamp(blockFile, oldMetaFile); + + //rename meta file to a tmp file + File tmpMetaFile = new File(oldMetaFile.getParent(), + oldMetaFile.getName()+"_tmp" + newblock.getGenerationStamp()); + if (!oldMetaFile.renameTo(tmpMetaFile)){ + throw new IOException("Cannot rename block meta file to " + tmpMetaFile); + } + + //update generation stamp + if (oldgs > newblock.getGenerationStamp()) { + throw new IOException("Cannot update block (id=" + newblock.getBlockId() + + ") generation stamp from " + oldgs + + " to " + newblock.getGenerationStamp()); + } + + //update length + if (newblock.getNumBytes() > oldblock.getNumBytes()) { + throw new IOException("Cannot update block file (=" + blockFile + + ") length from " + oldblock.getNumBytes() + " to " + newblock.getNumBytes()); + } + if (newblock.getNumBytes() < oldblock.getNumBytes()) { + truncateBlock(blockFile, tmpMetaFile, oldblock.getNumBytes(), newblock.getNumBytes()); + } + + //rename the tmp file to the new meta file (with new generation stamp) + File newMetaFile = getMetaFile(blockFile, newblock); + if (!tmpMetaFile.renameTo(newMetaFile)) { + throw new IOException("Cannot rename tmp meta file to " + newMetaFile); + } + + updateBlockMap(ongoingCreates, oldblock, newblock); + updateBlockMap(volumeMap, oldblock, newblock); + + // paranoia! verify that the contents of the stored block + // matches the block file on disk. + validateBlockMetadata(newblock); + return null; + } + + static private void truncateBlock(File blockFile, File metaFile, + long oldlen, long newlen) throws IOException { + if (newlen == oldlen) { + return; + } + if (newlen > oldlen) { + throw new IOException("Cannout truncate block to from oldlen (=" + oldlen + + ") to newlen (=" + newlen + ")"); + } + + DataChecksum dcs = BlockMetadataHeader.readHeader(metaFile).getChecksum(); + int checksumsize = dcs.getChecksumSize(); + int bpc = dcs.getBytesPerChecksum(); + long n = (newlen - 1)/bpc + 1; + long newmetalen = BlockMetadataHeader.getHeaderSize() + n*checksumsize; + long lastchunkoffset = (n - 1)*bpc; + int lastchunksize = (int)(newlen - lastchunkoffset); + byte[] b = new byte[Math.max(lastchunksize, checksumsize)]; + + RandomAccessFile blockRAF = new RandomAccessFile(blockFile, "rw"); + try { + //truncate blockFile + blockRAF.setLength(newlen); + + //read last chunk + blockRAF.seek(lastchunkoffset); + blockRAF.readFully(b, 0, lastchunksize); + } finally { + blockRAF.close(); + } + + //compute checksum + dcs.update(b, 0, lastchunksize); + dcs.writeValue(b, 0, false); + + //update metaFile + RandomAccessFile metaRAF = new RandomAccessFile(metaFile, "rw"); + try { + metaRAF.setLength(newmetalen); + metaRAF.seek(newmetalen - checksumsize); + metaRAF.write(b, 0, checksumsize); + } finally { + metaRAF.close(); + } + } + + private final static String DISK_ERROR = "Possible disk error on file creation: "; + /** Get the cause of an I/O exception if caused by a possible disk error + * @param ioe an I/O exception + * @return cause if the I/O exception is caused by a possible disk error; + * null otherwise. + */ + static IOException getCauseIfDiskError(IOException ioe) { + if (ioe.getMessage()!=null && ioe.getMessage().startsWith(DISK_ERROR)) { + return (IOException)ioe.getCause(); + } else { + return null; + } + } + + /** + * Start writing to a block file + * If isRecovery is true and the block pre-exists, then we kill all + volumeMap.put(b, v); + volumeMap.put(b, v); + * other threads that might be writing to this block, and then reopen the file. + */ + public BlockWriteStreams writeToBlock(Block b, boolean isRecovery) throws IOException { + // + // Make sure the block isn't a valid one - we're still creating it! + // + if (isValidBlock(b)) { + if (!isRecovery) { + throw new BlockAlreadyExistsException("Block " + b + " is valid, and cannot be written to."); + } + // If the block was successfully finalized because all packets + // were successfully processed at the Datanode but the ack for + // some of the packets were not received by the client. The client + // re-opens the connection and retries sending those packets. + // The other reason is that an "append" is occurring to this block. + detachBlock(b, 1); + } + long blockSize = b.getNumBytes(); + + // + // Serialize access to /tmp, and check if file already there. + // + File f = null; + List threads = null; + synchronized (this) { + // + // Is it already in the create process? + // + ActiveFile activeFile = ongoingCreates.get(b); + if (activeFile != null) { + f = activeFile.file; + threads = activeFile.threads; + + if (!isRecovery) { + throw new BlockAlreadyExistsException("Block " + b + + " has already been started (though not completed), and thus cannot be created."); + } else { + for (Thread thread:threads) { + thread.interrupt(); + } + } + ongoingCreates.remove(b); + } + FSVolume v = null; + if (!isRecovery) { + v = volumes.getNextVolume(blockSize); + // create temporary file to hold block in the designated volume + f = createTmpFile(v, b); + volumeMap.put(b, new DatanodeBlockInfo(v)); + } else if (f != null) { + DataNode.LOG.info("Reopen already-open Block for append " + b); + // create or reuse temporary file to hold block in the designated volume + v = volumeMap.get(b).getVolume(); + volumeMap.put(b, new DatanodeBlockInfo(v)); + } else { + // reopening block for appending to it. + DataNode.LOG.info("Reopen Block for append " + b); + v = volumeMap.get(b).getVolume(); + f = createTmpFile(v, b); + File blkfile = getBlockFile(b); + File oldmeta = getMetaFile(b); + File newmeta = getMetaFile(f, b); + + // rename meta file to tmp directory + DataNode.LOG.debug("Renaming " + oldmeta + " to " + newmeta); + if (!oldmeta.renameTo(newmeta)) { + throw new IOException("Block " + b + " reopen failed. " + + " Unable to move meta file " + oldmeta + + " to tmp dir " + newmeta); + } + + // rename block file to tmp directory + DataNode.LOG.debug("Renaming " + blkfile + " to " + f); + if (!blkfile.renameTo(f)) { + if (!f.delete()) { + throw new IOException("Block " + b + " reopen failed. " + + " Unable to remove file " + f); + } + if (!blkfile.renameTo(f)) { + throw new IOException("Block " + b + " reopen failed. " + + " Unable to move block file " + blkfile + + " to tmp dir " + f); + } + } + volumeMap.put(b, new DatanodeBlockInfo(v)); + } + if (f == null) { + DataNode.LOG.warn("Block " + b + " reopen failed " + + " Unable to locate tmp file."); + throw new IOException("Block " + b + " reopen failed " + + " Unable to locate tmp file."); + } + ongoingCreates.put(b, new ActiveFile(f, threads)); + } + + try { + if (threads != null) { + for (Thread thread:threads) { + thread.join(); + } + } + } catch (InterruptedException e) { + throw new IOException("Recovery waiting for thread interrupted."); + } + + // + // Finally, allow a writer to the block file + // REMIND - mjc - make this a filter stream that enforces a max + // block size, so clients can't go crazy + // + File metafile = getMetaFile(f, b); + DataNode.LOG.debug("writeTo blockfile is " + f + " of size " + f.length()); + DataNode.LOG.debug("writeTo metafile is " + metafile + " of size " + metafile.length()); + return createBlockWriteStreams( f , metafile); + } + + /** + * Retrieves the offset in the block to which the + * the next write will write data to. + */ + public long getChannelPosition(Block b, BlockWriteStreams streams) + throws IOException { + FileOutputStream file = (FileOutputStream) streams.dataOut; + return file.getChannel().position(); + } + + /** + * Sets the offset in the block to which the + * the next write will write data to. + */ + public void setChannelPosition(Block b, BlockWriteStreams streams, + long dataOffset, long ckOffset) + throws IOException { + long size = 0; + synchronized (this) { + FSVolume vol = volumeMap.get(b).getVolume(); + size = vol.getTmpFile(b).length(); + } + if (size < dataOffset) { + String msg = "Trying to change block file offset of block " + b + + " to " + dataOffset + + " but actual size of file is " + + size; + throw new IOException(msg); + } + FileOutputStream file = (FileOutputStream) streams.dataOut; + file.getChannel().position(dataOffset); + file = (FileOutputStream) streams.checksumOut; + file.getChannel().position(ckOffset); + } + + synchronized File createTmpFile( FSVolume vol, Block blk ) throws IOException { + if ( vol == null ) { + vol = volumeMap.get( blk ).getVolume(); + if ( vol == null ) { + throw new IOException("Could not find volume for block " + blk); + } + } + return vol.createTmpFile(blk); + } + + // + // REMIND - mjc - eventually we should have a timeout system + // in place to clean up block files left by abandoned clients. + // We should have some timer in place, so that if a blockfile + // is created but non-valid, and has been idle for >48 hours, + // we can GC it safely. + // + + /** + * Complete the block write! + */ + public synchronized void finalizeBlock(Block b) throws IOException { + ActiveFile activeFile = ongoingCreates.get(b); + if (activeFile == null) { + throw new IOException("Block " + b + " is already finalized."); + } + File f = activeFile.file; + if (f == null || !f.exists()) { + throw new IOException("No temporary file " + f + " for block " + b); + } + FSVolume v = volumeMap.get(b).getVolume(); + if (v == null) { + throw new IOException("No volume for temporary file " + f + + " for block " + b); + } + + File dest = null; + dest = v.addBlock(b, f); + volumeMap.put(b, new DatanodeBlockInfo(v, dest)); + ongoingCreates.remove(b); + } + + /** + * Remove the temporary block file (if any) + */ + public synchronized void unfinalizeBlock(Block b) throws IOException { + // remove the block from in-memory data structure + ActiveFile activefile = ongoingCreates.remove(b); + if (activefile == null) { + return; + } + volumeMap.remove(b); + + // delete the on-disk temp file + if (delBlockFromDisk(activefile.file, getMetaFile(activefile.file, b), b)) { + DataNode.LOG.warn("Block " + b + " unfinalized and removed. " ); + } + } + + /** + * Remove a block from disk + * @param blockFile block file + * @param metaFile block meta file + * @param b a block + * @return true if on-disk files are deleted; false otherwise + */ + private boolean delBlockFromDisk(File blockFile, File metaFile, Block b) { + if (blockFile == null) { + DataNode.LOG.warn("No file exists for block: " + b); + return true; + } + + if (!blockFile.delete()) { + DataNode.LOG.warn("Not able to delete the block file: " + blockFile); + return false; + } else { // remove the meta file + if (metaFile != null && !metaFile.delete()) { + DataNode.LOG.warn( + "Not able to delete the meta block file: " + metaFile); + return false; + } + } + return true; + } + + /** + * Return a table of block data + */ + public Block[] getBlockReport() { + TreeSet blockSet = new TreeSet(); + volumes.getBlockInfo(blockSet); + Block blockTable[] = new Block[blockSet.size()]; + int i = 0; + for (Iterator it = blockSet.iterator(); it.hasNext(); i++) { + blockTable[i] = it.next(); + } + return blockTable; + } + + /** + * Check whether the given block is a valid one. + */ + public boolean isValidBlock(Block b) { + File f = null;; + try { + f = validateBlockFile(b); + } catch(IOException e) { + Log.warn("Block " + b + " is not valid:",e); + } + + return f != null; + } + + /** + * Find the file corresponding to the block and return it if it exists. + */ + File validateBlockFile(Block b) throws IOException { + //Should we check for metadata file too? + File f = getFile(b); + + if(f != null ) { + if(f.exists()) + return f; + + // if file is not null, but doesn't exist - possibly disk failed + DataNode datanode = DataNode.getDataNode(); + datanode.checkDiskError(); + } + + if (InterDatanodeProtocol.LOG.isDebugEnabled()) { + InterDatanodeProtocol.LOG.debug("b=" + b + ", f=" + f); + } + return null; + } + + /** {@inheritDoc} */ + public void validateBlockMetadata(Block b) throws IOException { + DatanodeBlockInfo info = volumeMap.get(b); + if (info == null) { + throw new IOException("Block " + b + " does not exist in volumeMap."); + } + FSVolume v = info.getVolume(); + File tmp = v.getTmpFile(b); + File f = getFile(b); + if (f == null) { + f = tmp; + } + if (f == null) { + throw new IOException("Block " + b + " does not exist on disk."); + } + if (!f.exists()) { + throw new IOException("Block " + b + + " block file " + f + + " does not exist on disk."); + } + if (b.getNumBytes() != f.length()) { + throw new IOException("Block " + b + + " length is " + b.getNumBytes() + + " does not match block file length " + + f.length()); + } + File meta = getMetaFile(f, b); + if (meta == null) { + throw new IOException("Block " + b + + " metafile does not exist."); + } + if (!meta.exists()) { + throw new IOException("Block " + b + + " metafile " + meta + + " does not exist on disk."); + } + if (meta.length() == 0) { + throw new IOException("Block " + b + " metafile " + meta + " is empty."); + } + long stamp = parseGenerationStamp(f, meta); + if (stamp != b.getGenerationStamp()) { + throw new IOException("Block " + b + + " genstamp is " + b.getGenerationStamp() + + " does not match meta file stamp " + + stamp); + } + } + + /** + * We're informed that a block is no longer valid. We + * could lazily garbage-collect the block, but why bother? + * just get rid of it. + */ + public void invalidate(Block invalidBlks[]) throws IOException { + boolean error = false; + for (int i = 0; i < invalidBlks.length; i++) { + File f = null; + FSVolume v; + synchronized (this) { + f = getFile(invalidBlks[i]); + DatanodeBlockInfo dinfo = volumeMap.get(invalidBlks[i]); + if (dinfo == null) { + DataNode.LOG.warn("Unexpected error trying to delete block " + + invalidBlks[i] + + ". BlockInfo not found in volumeMap."); + error = true; + continue; + } + v = dinfo.getVolume(); + if (f == null) { + DataNode.LOG.warn("Unexpected error trying to delete block " + + invalidBlks[i] + + ". Block not found in blockMap." + + ((v == null) ? " " : " Block found in volumeMap.")); + error = true; + continue; + } + if (v == null) { + DataNode.LOG.warn("Unexpected error trying to delete block " + + invalidBlks[i] + + ". No volume for this block." + + " Block found in blockMap. " + f + "."); + error = true; + continue; + } + File parent = f.getParentFile(); + if (parent == null) { + DataNode.LOG.warn("Unexpected error trying to delete block " + + invalidBlks[i] + + ". Parent not found for file " + f + "."); + error = true; + continue; + } + v.clearPath(parent); + volumeMap.remove(invalidBlks[i]); + } + File metaFile = getMetaFile( f, invalidBlks[i] ); + long dfsBytes = f.length() + metaFile.length(); + + // Delete the block asynchronously to make sure we can do it fast enough + asyncDiskService.deleteAsync(v, f, metaFile, dfsBytes, invalidBlks[i].toString()); + } + if (error) { + throw new IOException("Error in deleting blocks."); + } + } + + /** + * Turn the block identifier into a filename. + */ + public synchronized File getFile(Block b) { + DatanodeBlockInfo info = volumeMap.get(b); + if (info != null) { + return info.getFile(); + } + return null; + } + + /** + * check if a data directory is healthy + * if some volumes failed - make sure to remove all the blocks that belong + * to these volumes + * @throws DiskErrorException + */ + public void checkDataDir() throws DiskErrorException { + long total_blocks=0, removed_blocks=0; + List failed_vols = volumes.checkDirs(); + + //if there no failed volumes return + if(failed_vols == null) + return; + + // else + // remove related blocks + long mlsec = System.currentTimeMillis(); + synchronized (this) { + Iterator ib = volumeMap.keySet().iterator(); + while(ib.hasNext()) { + Block b = ib.next(); + total_blocks ++; + // check if the volume block belongs to still valid + FSVolume vol = volumeMap.get(b).getVolume(); + for(FSVolume fv: failed_vols) { + if(vol == fv) { + DataNode.LOG.warn("removing block " + b.getBlockId() + " from vol " + + vol.dataDir.dir.getAbsolutePath()); + ib.remove(); + removed_blocks++; + break; + } + } + } + } // end of sync + mlsec = System.currentTimeMillis() - mlsec; + DataNode.LOG.warn(">>>>>>>>>>>>Removed " + removed_blocks + " out of " + total_blocks + + "(took " + mlsec + " millisecs)"); + + // report the error + StringBuilder sb = new StringBuilder(); + for(FSVolume fv : failed_vols) { + sb.append(fv.dataDir.dir.getAbsolutePath() + ";"); + } + + throw new DiskErrorException("DataNode failed volumes:" + sb); + + } + + + public String toString() { + return "FSDataset{dirpath='"+volumes+"'}"; + } + + private ObjectName mbeanName; + private Random rand = new Random(); + + /** + * Register the FSDataset MBean using the name + * "hadoop:service=DataNode,name=FSDatasetState-" + */ + void registerMBean(final String storageId) { + // We wrap to bypass standard mbean naming convetion. + // This wraping can be removed in java 6 as it is more flexible in + // package naming for mbeans and their impl. + StandardMBean bean; + String storageName; + if (storageId == null || storageId.equals("")) {// Temp fix for the uninitialized storage + storageName = "UndefinedStorageId" + rand.nextInt(); + } else { + storageName = storageId; + } + try { + bean = new StandardMBean(this,FSDatasetMBean.class); + mbeanName = MBeanUtil.registerMBean("DataNode", "FSDatasetState-" + storageName, bean); + } catch (NotCompliantMBeanException e) { + e.printStackTrace(); + } + + DataNode.LOG.info("Registered FSDatasetStatusMBean"); + } + + public void shutdown() { + if (mbeanName != null) + MBeanUtil.unregisterMBean(mbeanName); + + if (asyncDiskService != null) { + asyncDiskService.shutdown(); + } + + if(volumes != null) { + if (volumes.scannersExecutor != null) { + volumes.scannersExecutor.shutdown(); + } + for (FSVolume volume : volumes.volumes) { + if(volume != null) { + volume.dfsUsage.shutdown(); + } + } + } + } + + public String getStorageInfo() { + return toString(); + } +} diff --git a/src/hdfs/org/apache/hadoop/hdfs/server/datanode/FSDatasetAsyncDiskService.java b/src/hdfs/org/apache/hadoop/hdfs/server/datanode/FSDatasetAsyncDiskService.java new file mode 100644 index 0000000..2d26f63 --- /dev/null +++ b/src/hdfs/org/apache/hadoop/hdfs/server/datanode/FSDatasetAsyncDiskService.java @@ -0,0 +1,199 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdfs.server.datanode; + +import java.io.File; +import java.util.HashMap; +import java.util.Map; +import java.util.concurrent.LinkedBlockingQueue; +import java.util.concurrent.ThreadFactory; +import java.util.concurrent.ThreadPoolExecutor; +import java.util.concurrent.TimeUnit; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; + +import org.apache.hadoop.conf.Configuration; + +/* + * This class is a container of multiple thread pools, each for a volume, + * so that we can schedule async disk operations easily. + * + * Examples of async disk operations are deletion of block files in FSDataset. + * We don't want to create a new thread for each of the deletion request, and + * we don't want to do all deletions in the heartbeat thread since deletion + * can be slow, and we don't want to use a single thread pool because that + * is inefficient when we have more than 1 volume. AsyncDiskService is the + * solution for these. + * + * This class is used inside FSDataset. + * + * In the future, we should extract AsyncDiskService and put it into common. + * The FSDataset-specific logic should reside here. + */ +class FSDatasetAsyncDiskService { + + public static final Log LOG = LogFactory.getLog(FSDatasetAsyncDiskService.class); + + // ThreadPool core pool size + private static final int CORE_THREADS_PER_VOLUME = 1; + // ThreadPool maximum pool size + private static final int DEFAULT_MAXIMUM_THREADS_PER_VOLUME = 2; + // ThreadPool keep-alive time for threads over core pool size + private static final long THREADS_KEEP_ALIVE_SECONDS = 60; + + private final ThreadGroup threadGroup = new ThreadGroup("async disk service"); + + private ThreadFactory threadFactory; + + private HashMap executors + = new HashMap(); + + /** + * Create a AsyncDiskServices with a set of volumes (specified by their + * root directories). + * + * The AsyncDiskServices uses one ThreadPool per volume to do the async + * disk operations. + * + * @param volumes The roots of the data volumes. + */ + FSDatasetAsyncDiskService(File[] volumes, Configuration conf) { + + threadFactory = new ThreadFactory() { + public Thread newThread(Runnable r) { + return new Thread(threadGroup, r); + } + }; + + // Create one ThreadPool per volume + for (int v = 0 ; v < volumes.length; v++) { + ThreadPoolExecutor executor = new ThreadPoolExecutor( + CORE_THREADS_PER_VOLUME, + conf.getInt("dfs.datanode.max.deletion.threads.per.volume", + DEFAULT_MAXIMUM_THREADS_PER_VOLUME), + THREADS_KEEP_ALIVE_SECONDS, TimeUnit.SECONDS, + new LinkedBlockingQueue(), threadFactory); + + // This can reduce the number of running threads + executor.allowCoreThreadTimeOut(true); + executors.put(volumes[v], executor); + } + + } + + /** + * Execute the task sometime in the future, using ThreadPools. + */ + synchronized void execute(File root, Runnable task) { + if (executors == null) { + throw new RuntimeException("AsyncDiskService is already shutdown"); + } + ThreadPoolExecutor executor = executors.get(root); + if (executor == null) { + throw new RuntimeException("Cannot find root " + root + + " for execution of task " + task); + } else { + executor.execute(task); + } + } + + /** + * Gracefully shut down all ThreadPool. Will wait for all deletion + * tasks to finish. + */ + synchronized void shutdown() { + + if (executors == null) { + + LOG.warn("AsyncDiskService has already shut down."); + + } else { + LOG.info("Shutting down all async disk service threads..."); + + for (Map.Entry e + : executors.entrySet()) { + e.getValue().shutdown(); + } + // clear the executor map so that calling execute again will fail. + executors = null; + + LOG.info("All async disk service threads have been shut down."); + } + } + + /** + * Delete the block file and meta file from the disk asynchronously, adjust + * dfsUsed statistics accordingly. + */ + void deleteAsync(FSDataset.FSVolume volume, File blockFile, + File metaFile, long dfsBytes, String blockName) { + DataNode.LOG.info("Scheduling block " + blockName + " file " + blockFile + + " for deletion"); + ReplicaFileDeleteTask deletionTask = + new ReplicaFileDeleteTask(volume, blockFile, metaFile, dfsBytes, + blockName); + execute(volume.getCurrentDir(), deletionTask); + } + + /** A task for deleting a block file and its associated meta file, as well + * as decrement the dfs usage of the volume. + */ + static class ReplicaFileDeleteTask implements Runnable { + + FSDataset.FSVolume volume; + File blockFile; + File metaFile; + long dfsBytes; + String blockName; + + ReplicaFileDeleteTask(FSDataset.FSVolume volume, File blockFile, + File metaFile, long dfsBytes, String blockName) { + this.volume = volume; + this.blockFile = blockFile; + this.metaFile = metaFile; + this.dfsBytes = dfsBytes; + this.blockName = blockName; + } + + FSDataset.FSVolume getVolume() { + return volume; + } + + @Override + public String toString() { + // Called in AsyncDiskService.execute for displaying error messages. + return "deletion of block " + blockName + " with block file " + blockFile + + " and meta file " + metaFile + " from volume " + volume; + } + + @Override + public void run() { + if ( !blockFile.delete() || ( !metaFile.delete() && metaFile.exists() ) ) { + DataNode.LOG.warn("Unexpected error trying to delete block " + + blockName + " at file " + blockFile + ". Ignored."); + } else { + volume.decDfsUsed(dfsBytes); + DataNode.LOG.info("Deleted block " + blockName + " at file " + blockFile); + } + } + }; + + +} diff --git a/src/hdfs/org/apache/hadoop/hdfs/server/datanode/FSDatasetInterface.java b/src/hdfs/org/apache/hadoop/hdfs/server/datanode/FSDatasetInterface.java new file mode 100644 index 0000000..60be87a --- /dev/null +++ b/src/hdfs/org/apache/hadoop/hdfs/server/datanode/FSDatasetInterface.java @@ -0,0 +1,273 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.datanode; + + +import java.io.Closeable; +import java.io.FilterInputStream; +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; + + + + +import org.apache.hadoop.hdfs.server.datanode.metrics.FSDatasetMBean; +import org.apache.hadoop.hdfs.protocol.Block; +import org.apache.hadoop.io.IOUtils; +import org.apache.hadoop.util.DiskChecker.DiskErrorException; + +/** + * This is an interface for the underlying storage that stores blocks for + * a data node. + * Examples are the FSDataset (which stores blocks on dirs) and + * SimulatedFSDataset (which simulates data). + * + */ +public interface FSDatasetInterface extends FSDatasetMBean { + + + /** + * Returns the length of the metadata file of the specified block + * @param b - the block for which the metadata length is desired + * @return the length of the metadata file for the specified block. + * @throws IOException + */ + public long getMetaDataLength(Block b) throws IOException; + + /** + * This class provides the input stream and length of the metadata + * of a block + * + */ + static class MetaDataInputStream extends FilterInputStream { + MetaDataInputStream(InputStream stream, long len) { + super(stream); + length = len; + } + private long length; + + public long getLength() { + return length; + } + } + + /** + * Returns metaData of block b as an input stream (and its length) + * @param b - the block + * @return the metadata input stream; + * @throws IOException + */ + public MetaDataInputStream getMetaDataInputStream(Block b) + throws IOException; + + /** + * Does the meta file exist for this block? + * @param b - the block + * @return true of the metafile for specified block exits + * @throws IOException + */ + public boolean metaFileExists(Block b) throws IOException; + + + /** + * Returns the specified block's on-disk length (excluding metadata) + * @param b + * @return the specified block's on-disk length (excluding metadta) + * @throws IOException + */ + public long getLength(Block b) throws IOException; + + /** + * @return the generation stamp stored with the block. + */ + public Block getStoredBlock(long blkid) throws IOException; + + /** + * Returns an input stream to read the contents of the specified block + * @param b + * @return an input stream to read the contents of the specified block + * @throws IOException + */ + public InputStream getBlockInputStream(Block b) throws IOException; + + /** + * Returns an input stream at specified offset of the specified block + * @param b + * @param seekOffset + * @return an input stream to read the contents of the specified block, + * starting at the offset + * @throws IOException + */ + public InputStream getBlockInputStream(Block b, long seekOffset) + throws IOException; + + /** + * Returns an input stream at specified offset of the specified block + * The block is still in the tmp directory and is not finalized + * @param b + * @param blkoff + * @param ckoff + * @return an input stream to read the contents of the specified block, + * starting at the offset + * @throws IOException + */ + public BlockInputStreams getTmpInputStreams(Block b, long blkoff, long ckoff) + throws IOException; + + /** + * + * This class contains the output streams for the data and checksum + * of a block + * + */ + static class BlockWriteStreams { + OutputStream dataOut; + OutputStream checksumOut; + BlockWriteStreams(OutputStream dOut, OutputStream cOut) { + dataOut = dOut; + checksumOut = cOut; + } + + } + + /** + * This class contains the input streams for the data and checksum + * of a block + */ + static class BlockInputStreams implements Closeable { + final InputStream dataIn; + final InputStream checksumIn; + + BlockInputStreams(InputStream dataIn, InputStream checksumIn) { + this.dataIn = dataIn; + this.checksumIn = checksumIn; + } + + /** {@inheritDoc} */ + public void close() { + IOUtils.closeStream(dataIn); + IOUtils.closeStream(checksumIn); + } + } + + /** + * Creates the block and returns output streams to write data and CRC + * @param b + * @param isRecovery True if this is part of erro recovery, otherwise false + * @return a BlockWriteStreams object to allow writing the block data + * and CRC + * @throws IOException + */ + public BlockWriteStreams writeToBlock(Block b, boolean isRecovery) throws IOException; + + /** + * Update the block to the new generation stamp and length. + */ + public void updateBlock(Block oldblock, Block newblock) throws IOException; + + /** + * Finalizes the block previously opened for writing using writeToBlock. + * The block size is what is in the parameter b and it must match the amount + * of data written + * @param b + * @throws IOException + */ + public void finalizeBlock(Block b) throws IOException; + + /** + * Unfinalizes the block previously opened for writing using writeToBlock. + * The temporary file associated with this block is deleted. + * @param b + * @throws IOException + */ + public void unfinalizeBlock(Block b) throws IOException; + + /** + * Returns the block report - the full list of blocks stored + * @return - the block report - the full list of blocks stored + */ + public Block[] getBlockReport(); + + /** + * Is the block valid? + * @param b + * @return - true if the specified block is valid + */ + public boolean isValidBlock(Block b); + + /** + * Invalidates the specified blocks + * @param invalidBlks - the blocks to be invalidated + * @throws IOException + */ + public void invalidate(Block invalidBlks[]) throws IOException; + + /** + * Check if all the data directories are healthy + * @throws DiskErrorException + */ + public void checkDataDir() throws DiskErrorException; + + /** + * Stringifies the name of the storage + */ + public String toString(); + + /** + * Shutdown the FSDataset + */ + public void shutdown(); + + /** + * Returns the current offset in the data stream. + * @param b + * @param stream The stream to the data file and checksum file + * @return the position of the file pointer in the data stream + * @throws IOException + */ + public long getChannelPosition(Block b, BlockWriteStreams stream) throws IOException; + + /** + * Sets the file pointer of the data stream and checksum stream to + * the specified values. + * @param b + * @param stream The stream for the data file and checksum file + * @param dataOffset The position to which the file pointre for the data stream + * should be set + * @param ckOffset The position to which the file pointre for the checksum stream + * should be set + * @throws IOException + */ + public void setChannelPosition(Block b, BlockWriteStreams stream, long dataOffset, + long ckOffset) throws IOException; + + /** + * Validate that the contents in the Block matches + * the file on disk. Returns true if everything is fine. + * @param b The block to be verified. + * @throws IOException + */ + public void validateBlockMetadata(Block b) throws IOException; + + /** + * checks how many valid storage volumes are there in the DataNode + * @return true if more then minimum valid volumes left in the FSDataSet + */ + public boolean hasEnoughResource(); +} diff --git a/src/hdfs/org/apache/hadoop/hdfs/server/datanode/UpgradeManagerDatanode.java b/src/hdfs/org/apache/hadoop/hdfs/server/datanode/UpgradeManagerDatanode.java new file mode 100644 index 0000000..913cc34 --- /dev/null +++ b/src/hdfs/org/apache/hadoop/hdfs/server/datanode/UpgradeManagerDatanode.java @@ -0,0 +1,151 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.datanode; + +import java.io.IOException; + +import org.apache.hadoop.hdfs.protocol.FSConstants; +import org.apache.hadoop.hdfs.server.common.HdfsConstants; +import org.apache.hadoop.hdfs.server.common.UpgradeManager; +import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; +import org.apache.hadoop.hdfs.server.protocol.UpgradeCommand; +import org.apache.hadoop.util.Daemon; + +/** + * Upgrade manager for data-nodes. + * + * Distributed upgrades for a data-node are performed in a separate thread. + * The upgrade starts when the data-node receives the start upgrade command + * from the namenode. At that point the manager finds a respective upgrade + * object and starts a daemon in order to perform the upgrade defined by the + * object. + */ +class UpgradeManagerDatanode extends UpgradeManager { + DataNode dataNode = null; + Daemon upgradeDaemon = null; + + UpgradeManagerDatanode(DataNode dataNode) { + super(); + this.dataNode = dataNode; + } + + public HdfsConstants.NodeType getType() { + return HdfsConstants.NodeType.DATA_NODE; + } + + synchronized void initializeUpgrade(NamespaceInfo nsInfo) throws IOException { + if( ! super.initializeUpgrade()) + return; // distr upgrade is not needed + DataNode.LOG.info("\n Distributed upgrade for DataNode " + + dataNode.dnRegistration.getName() + + " version " + getUpgradeVersion() + " to current LV " + + FSConstants.LAYOUT_VERSION + " is initialized."); + UpgradeObjectDatanode curUO = (UpgradeObjectDatanode)currentUpgrades.first(); + curUO.setDatanode(dataNode); + upgradeState = curUO.preUpgradeAction(nsInfo); + // upgradeState is true if the data-node should start the upgrade itself + } + + /** + * Start distributed upgrade. + * Instantiates distributed upgrade objects. + * + * @return true if distributed upgrade is required or false otherwise + * @throws IOException + */ + public synchronized boolean startUpgrade() throws IOException { + if(upgradeState) { // upgrade is already in progress + assert currentUpgrades != null : + "UpgradeManagerDatanode.currentUpgrades is null."; + UpgradeObjectDatanode curUO = (UpgradeObjectDatanode)currentUpgrades.first(); + curUO.startUpgrade(); + return true; + } + if(broadcastCommand != null) { + if(broadcastCommand.getVersion() > this.getUpgradeVersion()) { + // stop broadcasting, the cluster moved on + // start upgrade for the next version + broadcastCommand = null; + } else { + // the upgrade has been finished by this data-node, + // but the cluster is still running it, + // reply with the broadcast command + assert currentUpgrades == null : + "UpgradeManagerDatanode.currentUpgrades is not null."; + assert upgradeDaemon == null : + "UpgradeManagerDatanode.upgradeDaemon is not null."; + dataNode.namenode.processUpgradeCommand(broadcastCommand); + return true; + } + } + if(currentUpgrades == null) + currentUpgrades = getDistributedUpgrades(); + if(currentUpgrades == null) { + DataNode.LOG.info("\n Distributed upgrade for DataNode version " + + getUpgradeVersion() + " to current LV " + + FSConstants.LAYOUT_VERSION + " cannot be started. " + + "The upgrade object is not defined."); + return false; + } + upgradeState = true; + UpgradeObjectDatanode curUO = (UpgradeObjectDatanode)currentUpgrades.first(); + curUO.setDatanode(dataNode); + curUO.startUpgrade(); + upgradeDaemon = new Daemon(curUO); + upgradeDaemon.start(); + DataNode.LOG.info("\n Distributed upgrade for DataNode " + + dataNode.dnRegistration.getName() + + " version " + getUpgradeVersion() + " to current LV " + + FSConstants.LAYOUT_VERSION + " is started."); + return true; + } + + synchronized void processUpgradeCommand(UpgradeCommand command + ) throws IOException { + assert command.getAction() == UpgradeCommand.UC_ACTION_START_UPGRADE : + "Only start upgrade action can be processed at this time."; + this.upgradeVersion = command.getVersion(); + // Start distributed upgrade + if(startUpgrade()) // upgrade started + return; + throw new IOException( + "Distributed upgrade for DataNode " + dataNode.dnRegistration.getName() + + " version " + getUpgradeVersion() + " to current LV " + + FSConstants.LAYOUT_VERSION + " cannot be started. " + + "The upgrade object is not defined."); + } + + public synchronized void completeUpgrade() throws IOException { + assert currentUpgrades != null : + "UpgradeManagerDatanode.currentUpgrades is null."; + UpgradeObjectDatanode curUO = (UpgradeObjectDatanode)currentUpgrades.first(); + broadcastCommand = curUO.completeUpgrade(); + upgradeState = false; + currentUpgrades = null; + upgradeDaemon = null; + DataNode.LOG.info("\n Distributed upgrade for DataNode " + + dataNode.dnRegistration.getName() + + " version " + getUpgradeVersion() + " to current LV " + + FSConstants.LAYOUT_VERSION + " is complete."); + } + + synchronized void shutdownUpgrade() { + if(upgradeDaemon != null) + upgradeDaemon.interrupt(); + } +} diff --git a/src/hdfs/org/apache/hadoop/hdfs/server/datanode/UpgradeObjectDatanode.java b/src/hdfs/org/apache/hadoop/hdfs/server/datanode/UpgradeObjectDatanode.java new file mode 100644 index 0000000..c32a37f --- /dev/null +++ b/src/hdfs/org/apache/hadoop/hdfs/server/datanode/UpgradeObjectDatanode.java @@ -0,0 +1,134 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.datanode; + +import org.apache.hadoop.hdfs.protocol.FSConstants; +import org.apache.hadoop.hdfs.server.common.HdfsConstants; +import org.apache.hadoop.hdfs.server.common.UpgradeObject; +import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol; +import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; +import org.apache.hadoop.hdfs.server.protocol.UpgradeCommand; +import org.apache.hadoop.util.StringUtils; +import java.io.IOException; +import java.net.SocketTimeoutException; + +/** + * Base class for data-node upgrade objects. + * Data-node upgrades are run in separate threads. + */ +public abstract class UpgradeObjectDatanode extends UpgradeObject implements Runnable { + private DataNode dataNode = null; + + public HdfsConstants.NodeType getType() { + return HdfsConstants.NodeType.DATA_NODE; + } + + protected DataNode getDatanode() { + return dataNode; + } + + void setDatanode(DataNode dataNode) { + this.dataNode = dataNode; + } + + /** + * Specifies how the upgrade is performed. + * @throws IOException + */ + public abstract void doUpgrade() throws IOException; + + /** + * Specifies what to do before the upgrade is started. + * + * The default implementation checks whether the data-node missed the upgrade + * and throws an exception if it did. This leads to the data-node shutdown. + * + * Data-nodes usually start distributed upgrade when the name-node replies + * to its heartbeat with a start upgrade command. + * Sometimes though, e.g. when a data-node missed the upgrade and wants to + * catchup with the rest of the cluster, it is necessary to initiate the + * upgrade directly on the data-node, since the name-node might not ever + * start it. An override of this method should then return true. + * And the upgrade will start after data-ndoe registration but before sending + * its first heartbeat. + * + * @param nsInfo name-node versions, verify that the upgrade + * object can talk to this name-node version if necessary. + * + * @throws IOException + * @return true if data-node itself should start the upgrade or + * false if it should wait until the name-node starts the upgrade. + */ + boolean preUpgradeAction(NamespaceInfo nsInfo) throws IOException { + int nsUpgradeVersion = nsInfo.getDistributedUpgradeVersion(); + if(nsUpgradeVersion >= getVersion()) + return false; // name-node will perform the upgrade + // Missed the upgrade. Report problem to the name-node and throw exception + String errorMsg = + "\n Data-node missed a distributed upgrade and will shutdown." + + "\n " + getDescription() + "." + + " Name-node version = " + nsInfo.getLayoutVersion() + "."; + DataNode.LOG.fatal( errorMsg ); + try { + dataNode.namenode.errorReport(dataNode.dnRegistration, + DatanodeProtocol.NOTIFY, errorMsg); + } catch(SocketTimeoutException e) { // namenode is busy + DataNode.LOG.info("Problem connecting to server: " + + dataNode.getNameNodeAddr()); + } + throw new IOException(errorMsg); + } + + public void run() { + assert dataNode != null : "UpgradeObjectDatanode.dataNode is null"; + while(dataNode.shouldRun) { + try { + doUpgrade(); + } catch(Exception e) { + DataNode.LOG.error(StringUtils.stringifyException(e)); + } + break; + } + + // report results + if(getUpgradeStatus() < 100) { + DataNode.LOG.info("\n Distributed upgrade for DataNode version " + + getVersion() + " to current LV " + + FSConstants.LAYOUT_VERSION + " cannot be completed."); + } + + // Complete the upgrade by calling the manager method + try { + dataNode.upgradeManager.completeUpgrade(); + } catch(IOException e) { + DataNode.LOG.error(StringUtils.stringifyException(e)); + } + } + + /** + * Complete upgrade and return a status complete command for broadcasting. + * + * Data-nodes finish upgrade at different times. + * The data-node needs to re-confirm with the name-node that the upgrade + * is complete while other nodes are still upgrading. + */ + public UpgradeCommand completeUpgrade() throws IOException { + return new UpgradeCommand(UpgradeCommand.UC_ACTION_REPORT_STATUS, + getVersion(), (short)100); + } +} diff --git a/src/hdfs/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeActivityMBean.java b/src/hdfs/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeActivityMBean.java new file mode 100644 index 0000000..b555c90 --- /dev/null +++ b/src/hdfs/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeActivityMBean.java @@ -0,0 +1,76 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdfs.server.datanode.metrics; +import java.util.Random; + +import javax.management.ObjectName; +import org.apache.hadoop.metrics.util.MBeanUtil; +import org.apache.hadoop.metrics.util.MetricsDynamicMBeanBase; +import org.apache.hadoop.metrics.util.MetricsRegistry; + +/** + * + * This is the JMX MBean for reporting the DataNode Activity. + * The MBean is register using the name + * "hadoop:service=DataNode,name=DataNodeActivity-" + * + * Many of the activity metrics are sampled and averaged on an interval + * which can be specified in the metrics config file. + *

+ * For the metrics that are sampled and averaged, one must specify + * a metrics context that does periodic update calls. Most metrics contexts do. + * The default Null metrics context however does NOT. So if you aren't + * using any other metrics context then you can turn on the viewing and averaging + * of sampled metrics by specifying the following two lines + * in the hadoop-meterics.properties file: +*

+ *        dfs.class=org.apache.hadoop.metrics.spi.NullContextWithUpdateThread
+ *        dfs.period=10
+ *  
+ *

+ * Note that the metrics are collected regardless of the context used. + * The context with the update thread is used to average the data periodically + * + * + * + * Impl details: We use a dynamic mbean that gets the list of the metrics + * from the metrics registry passed as an argument to the constructor + */ + +public class DataNodeActivityMBean extends MetricsDynamicMBeanBase { + final private ObjectName mbeanName; + private Random rand = new Random(); + + public DataNodeActivityMBean(final MetricsRegistry mr, final String storageId) { + super(mr, "Activity statistics at the DataNode"); + String storageName; + if (storageId.equals("")) {// Temp fix for the uninitialized storage + storageName = "UndefinedStorageId" + rand.nextInt(); + } else { + storageName = storageId; + } + mbeanName = MBeanUtil.registerMBean("DataNode", "DataNodeActivity-" + storageName, this); + } + + + public void shutdown() { + if (mbeanName != null) + MBeanUtil.unregisterMBean(mbeanName); + } +} diff --git a/src/hdfs/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeMetrics.java b/src/hdfs/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeMetrics.java new file mode 100644 index 0000000..d60ae64 --- /dev/null +++ b/src/hdfs/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeMetrics.java @@ -0,0 +1,138 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.datanode.metrics; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.metrics.MetricsContext; +import org.apache.hadoop.metrics.MetricsRecord; +import org.apache.hadoop.metrics.MetricsUtil; +import org.apache.hadoop.metrics.Updater; +import org.apache.hadoop.metrics.jvm.JvmMetrics; +import org.apache.hadoop.metrics.util.MetricsBase; +import org.apache.hadoop.metrics.util.MetricsRegistry; +import org.apache.hadoop.metrics.util.MetricsTimeVaryingInt; +import org.apache.hadoop.metrics.util.MetricsTimeVaryingLong; +import org.apache.hadoop.metrics.util.MetricsTimeVaryingRate; + + +/** + * + * This class is for maintaining the various DataNode statistics + * and publishing them through the metrics interfaces. + * This also registers the JMX MBean for RPC. + *

+ * This class has a number of metrics variables that are publicly accessible; + * these variables (objects) have methods to update their values; + * for example: + *

{@link #blocksRead}.inc() + * + */ +public class DataNodeMetrics implements Updater { + private final MetricsRecord metricsRecord; + private DataNodeActivityMBean datanodeActivityMBean; + public MetricsRegistry registry = new MetricsRegistry(); + + + public MetricsTimeVaryingLong bytesWritten = + new MetricsTimeVaryingLong("bytes_written", registry); + public MetricsTimeVaryingLong bytesRead = + new MetricsTimeVaryingLong("bytes_read", registry); + public MetricsTimeVaryingInt blocksWritten = + new MetricsTimeVaryingInt("blocks_written", registry); + public MetricsTimeVaryingInt blocksRead = + new MetricsTimeVaryingInt("blocks_read", registry); + public MetricsTimeVaryingInt blocksReplicated = + new MetricsTimeVaryingInt("blocks_replicated", registry); + public MetricsTimeVaryingInt blocksRemoved = + new MetricsTimeVaryingInt("blocks_removed", registry); + public MetricsTimeVaryingInt blocksVerified = + new MetricsTimeVaryingInt("blocks_verified", registry); + public MetricsTimeVaryingInt blockVerificationFailures = + new MetricsTimeVaryingInt("block_verification_failures", registry); + + public MetricsTimeVaryingInt readsFromLocalClient = + new MetricsTimeVaryingInt("reads_from_local_client", registry); + public MetricsTimeVaryingInt readsFromRemoteClient = + new MetricsTimeVaryingInt("reads_from_remote_client", registry); + public MetricsTimeVaryingInt writesFromLocalClient = + new MetricsTimeVaryingInt("writes_from_local_client", registry); + public MetricsTimeVaryingInt writesFromRemoteClient = + new MetricsTimeVaryingInt("writes_from_remote_client", registry); + + public MetricsTimeVaryingRate readBlockOp = + new MetricsTimeVaryingRate("readBlockOp", registry); + public MetricsTimeVaryingRate writeBlockOp = + new MetricsTimeVaryingRate("writeBlockOp", registry); + public MetricsTimeVaryingRate readMetadataOp = + new MetricsTimeVaryingRate("readMetadataOp", registry); + public MetricsTimeVaryingRate blockChecksumOp = + new MetricsTimeVaryingRate("blockChecksumOp", registry); + public MetricsTimeVaryingRate copyBlockOp = + new MetricsTimeVaryingRate("copyBlockOp", registry); + public MetricsTimeVaryingRate replaceBlockOp = + new MetricsTimeVaryingRate("replaceBlockOp", registry); + public MetricsTimeVaryingRate heartbeats = + new MetricsTimeVaryingRate("heartBeats", registry); + public MetricsTimeVaryingRate blockReports = + new MetricsTimeVaryingRate("blockReports", registry); + + + public DataNodeMetrics(Configuration conf, String storageId) { + String sessionId = conf.get("session.id"); + // Initiate reporting of Java VM metrics + JvmMetrics.init("DataNode", sessionId); + + + // Now the MBean for the data node + datanodeActivityMBean = new DataNodeActivityMBean(registry, storageId); + + // Create record for DataNode metrics + MetricsContext context = MetricsUtil.getContext("dfs"); + metricsRecord = MetricsUtil.createRecord(context, "datanode"); + metricsRecord.setTag("sessionId", sessionId); + context.registerUpdater(this); + } + + public void shutdown() { + if (datanodeActivityMBean != null) + datanodeActivityMBean.shutdown(); + } + + /** + * Since this object is a registered updater, this method will be called + * periodically, e.g. every 5 seconds. + */ + public void doUpdates(MetricsContext unused) { + synchronized (this) { + for (MetricsBase m : registry.getMetricsList()) { + m.pushMetric(metricsRecord); + } + } + metricsRecord.update(); + } + public void resetAllMinMax() { + readBlockOp.resetMinMax(); + writeBlockOp.resetMinMax(); + readMetadataOp.resetMinMax(); + blockChecksumOp.resetMinMax(); + copyBlockOp.resetMinMax(); + replaceBlockOp.resetMinMax(); + heartbeats.resetMinMax(); + blockReports.resetMinMax(); + } +} diff --git a/src/hdfs/org/apache/hadoop/hdfs/server/datanode/metrics/FSDatasetMBean.java b/src/hdfs/org/apache/hadoop/hdfs/server/datanode/metrics/FSDatasetMBean.java new file mode 100644 index 0000000..ad5cca7 --- /dev/null +++ b/src/hdfs/org/apache/hadoop/hdfs/server/datanode/metrics/FSDatasetMBean.java @@ -0,0 +1,65 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.datanode.metrics; + +import java.io.IOException; + +/** + * + * This Interface defines the methods to get the status of a the FSDataset of + * a data node. + * It is also used for publishing via JMX (hence we follow the JMX naming + * convention.) + * * Note we have not used the MetricsDynamicMBeanBase to implement this + * because the interface for the FSDatasetMBean is stable and should + * be published as an interface. + * + *

+ * Data Node runtime statistic info is report in another MBean + * @see org.apache.hadoop.hdfs.server.datanode.metrics.DataNodeStatisticsMBean + * + */ +public interface FSDatasetMBean { + + /** + * Returns the total space (in bytes) used by dfs datanode + * @return the total space used by dfs datanode + * @throws IOException + */ + public long getDfsUsed() throws IOException; + + /** + * Returns total capacity (in bytes) of storage (used and unused) + * @return total capacity of storage (used and unused) + * @throws IOException + */ + public long getCapacity() throws IOException; + + /** + * Returns the amount of free storage space (in bytes) + * @return The amount of free storage space + * @throws IOException + */ + public long getRemaining() throws IOException; + + /** + * Returns the storage id of the underlying storage + */ + public String getStorageInfo(); + +} diff --git a/src/hdfs/org/apache/hadoop/hdfs/server/hightidenode/ConfigManager.java b/src/hdfs/org/apache/hadoop/hdfs/server/hightidenode/ConfigManager.java new file mode 100644 index 0000000..fd9a4ca --- /dev/null +++ b/src/hdfs/org/apache/hadoop/hdfs/server/hightidenode/ConfigManager.java @@ -0,0 +1,396 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdfs.server.hightidenode; + +import java.io.File; +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collection; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Properties; +import java.util.Set; +import java.util.HashSet; + +import javax.xml.parsers.DocumentBuilder; +import javax.xml.parsers.DocumentBuilderFactory; +import javax.xml.parsers.ParserConfigurationException; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.w3c.dom.Document; +import org.w3c.dom.Element; +import org.w3c.dom.Node; +import org.w3c.dom.NodeList; +import org.w3c.dom.Text; +import org.xml.sax.SAXException; + +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdfs.protocol.PolicyInfo; +import org.apache.hadoop.hdfs.protocol.PolicyInfo.PathInfo; + +/** + * Maintains the configuration xml file that is read into memory. + */ +class ConfigManager { + public static final Log LOG = LogFactory.getLog( + "org.apache.hadoop.hdfs.server.hightidenode.ConfigManager"); + + /** Time to wait between checks of the config file */ + public static final long RELOAD_INTERVAL = 10 * 1000; + + /** Time to wait between successive runs of all policies */ + public static final long RESCAN_INTERVAL = 3600 * 1000; + + /** + * Time to wait after the config file has been modified before reloading it + * (this is done to prevent loading a file that hasn't been fully written). + */ + public static final long RELOAD_WAIT = 5 * 1000; + + private Configuration conf; // Hadoop configuration + private String configFileName; // Path to config XML file + + private long lastReloadAttempt; // Last time we tried to reload the config file + private long lastSuccessfulReload; // Last time we successfully reloaded config + private boolean lastReloadAttemptFailed = false; + private long reloadInterval = RELOAD_INTERVAL; + private long periodicity; // time between runs of all policies + + // Reload the configuration + private boolean doReload; + private Thread reloadThread; + private volatile boolean running = false; + + // Collection of all configured policies. + Collection allPolicies = new ArrayList(); + + public ConfigManager(Configuration conf) throws IOException, SAXException, + HighTideConfigurationException, ClassNotFoundException, ParserConfigurationException { + this.conf = conf; + this.configFileName = conf.get("hightide.config.file"); + this.doReload = conf.getBoolean("hightide.config.reload", true); + this.reloadInterval = conf.getLong("hightide.config.reload.interval", RELOAD_INTERVAL); + if (configFileName == null) { + String msg = "No hightide.config.file given in conf - " + + "the Hadoop HighTideNode cannot run. Aborting...."; + LOG.warn(msg); + throw new IOException(msg); + } + reloadConfigs(); + lastSuccessfulReload = HighTideNode.now(); + lastReloadAttempt = HighTideNode.now(); + running = true; + } + + /** + * Reload config file if it hasn't been loaded in a while + * Returns true if the file was reloaded. + */ + public synchronized boolean reloadConfigsIfNecessary() { + long time = HighTideNode.now(); + if (time > lastReloadAttempt + reloadInterval) { + lastReloadAttempt = time; + File file = null; + try { + file = new File(configFileName); + long lastModified = file.lastModified(); + if (lastModified > lastSuccessfulReload && + time > lastModified + RELOAD_WAIT) { + reloadConfigs(); + lastSuccessfulReload = time; + lastReloadAttemptFailed = false; + return true; + } + } catch (Exception e) { + if (!lastReloadAttemptFailed) { + LOG.error("Failed to reload config file - " + file + + "will use existing configuration.", e); + } + lastReloadAttemptFailed = true; + } + } + return false; + } + + /** + * Updates the in-memory data structures from the config file. This file is + * expected to be in the following whitespace-separated format: + * Blank lines and lines starting with # are ignored. + * + * @throws IOException if the config file cannot be read. + * @throws HighTideConfigurationException if configuration entries are invalid. + * @throws ClassNotFoundException if user-defined policy classes cannot be loaded + * @throws ParserConfigurationException if XML parser is misconfigured. + * @throws SAXException if config file is malformed. + * @returns A new set of policy categories. + */ + void reloadConfigs() throws IOException, ParserConfigurationException, + SAXException, ClassNotFoundException, HighTideConfigurationException { + + if (configFileName == null) { + return; + } + + File file = new File(configFileName); + if (!file.exists()) { + throw new HighTideConfigurationException("Configuration file " + configFileName + + " does not exist."); + } + + // Read and parse the configuration file. + // allow include files in configuration file + DocumentBuilderFactory docBuilderFactory = DocumentBuilderFactory.newInstance(); + docBuilderFactory.setIgnoringComments(true); + docBuilderFactory.setNamespaceAware(true); + try { + docBuilderFactory.setXIncludeAware(true); + } catch (UnsupportedOperationException e) { + LOG.error("Failed to set setXIncludeAware(true) for raid parser " + + docBuilderFactory + ":" + e, e); + } + LOG.error("Reloading config file " + file); + + DocumentBuilder builder = docBuilderFactory.newDocumentBuilder(); + Document doc = builder.parse(file); + Element root = doc.getDocumentElement(); + if (!"configuration".equalsIgnoreCase(root.getTagName())) + throw new HighTideConfigurationException("Bad configuration file: " + + "top-level element not "); + NodeList elements = root.getChildNodes(); + + Set existingPolicies = new HashSet(); + + // loop through all the configured source paths. + for (int i = 0; i < elements.getLength(); i++) { + Node node = elements.item(i); + if (!(node instanceof Element)) { + continue; + } + Element element = (Element)node; + String elementTagName = element.getTagName(); + String policyName = null; + if ("srcPath".equalsIgnoreCase(elementTagName)) { + String srcPathPrefix = element.getAttribute("name"); + + if (srcPathPrefix == null || srcPathPrefix.length() == 0) { + throw new HighTideConfigurationException("Bad configuration file: " + + "srcPath node does not have a path."); + } + PolicyInfo policyInfo = new PolicyInfo(srcPathPrefix, conf); + policyName = srcPathPrefix; + Properties policyProperties; + + // loop through all elements of this policy + NodeList policies = element.getChildNodes(); + for (int j = 0; j < policies.getLength(); j++) { + Node node1 = policies.item(j); + if (!(node1 instanceof Element)) { + continue; + } + Element policy = (Element)node1; + if ((!"property".equalsIgnoreCase(policy.getTagName())) && + (!"destPath".equalsIgnoreCase(policy.getTagName()))) { + throw new HighTideConfigurationException("Bad configuration file: " + + "Expecting or for srcPath " + srcPathPrefix + + " but found " + policy.getTagName()); + } + + // parse the items + if ("destPath".equalsIgnoreCase(policy.getTagName())) { + String destPath = policy.getAttribute("name"); + if (destPath == null) { + throw new HighTideConfigurationException("Bad configuration file: " + + " tag should have an attribute named 'name'."); + } + NodeList properties = policy.getChildNodes(); + Properties destProperties = new Properties(); + for (int k = 0; k < properties.getLength(); k++) { + Node node2 = properties.item(k); + if (!(node2 instanceof Element)) { + continue; + } + Element property = (Element)node2; + String propertyName = property.getTagName(); + if (!("property".equalsIgnoreCase(propertyName))) { + throw new HighTideConfigurationException("Bad configuration file: " + + " can have only children." + + " but found " + propertyName); + } + NodeList nl = property.getChildNodes(); + String pname=null,pvalue=null; + for (int l = 0; l < nl.getLength(); l++){ + Node node3 = nl.item(l); + if (!(node3 instanceof Element)) { + continue; + } + Element item = (Element) node3; + String itemName = item.getTagName(); + if ("name".equalsIgnoreCase(itemName)){ + pname = ((Text)item.getFirstChild()).getData().trim(); + } else if ("value".equalsIgnoreCase(itemName)){ + pvalue = ((Text)item.getFirstChild()).getData().trim(); + } + } + if (pname == null || pvalue == null) { + throw new HighTideConfigurationException("Bad configuration file: " + + "All property for destPath " + destPath + + " must have name and value "); + } + LOG.info(policyName + "." + pname + " = " + pvalue); + destProperties.setProperty(pname, pvalue); + } + policyInfo.addDestPath(destPath, destProperties); + + } else if ("property".equalsIgnoreCase(policy.getTagName())) { + Element property = (Element)node1; + NodeList nl = property.getChildNodes(); + String pname=null,pvalue=null; + for (int l = 0; l < nl.getLength(); l++){ + Node node3 = nl.item(l); + if (!(node3 instanceof Element)) { + continue; + } + Element item = (Element) node3; + String itemName = item.getTagName(); + if ("name".equalsIgnoreCase(itemName)){ + pname = ((Text)item.getFirstChild()).getData().trim(); + } else if ("value".equalsIgnoreCase(itemName)){ + pvalue = ((Text)item.getFirstChild()).getData().trim(); + } + } + if (pname == null || pvalue == null) { + throw new HighTideConfigurationException("Bad configuration file: " + + "All property for srcPath " + srcPathPrefix + + " must have name and value "); + } + LOG.info(policyName + "." + pname + " = " + pvalue); + policyInfo.setProperty(pname,pvalue); + } + } + existingPolicies.add(policyInfo); + } else { + throw new HighTideConfigurationException("Bad configuration file: " + + "The top level item must be srcPath but found " + elementTagName); + } + } + validateAllPolicies(existingPolicies); + setAllPolicies(existingPolicies); + return; + } + + /** + * Get a collection of all policies + */ + public synchronized Collection getAllPolicies() { + return allPolicies; + } + + /** + * Set a collection of all policies + */ + protected synchronized void setAllPolicies(Collection value) { + this.allPolicies = value; + } + + /** + * Validate a collection of policies + */ + private void validateAllPolicies(Collection all) + throws IOException, NumberFormatException { + for (PolicyInfo pinfo: all) { + Path srcPath = pinfo.getSrcPath(); + if (srcPath == null) { + throw new IOException("Unable to find srcPath in policy."); + } + if (pinfo.getProperty("replication") == null) { + throw new IOException("Unable to find replication in policy." + + srcPath); + } + int repl = Integer.parseInt(pinfo.getProperty("replication")); + if (pinfo.getProperty("modTimePeriod") == null) { + throw new IOException("Unable to find modTimePeriod in policy." + + srcPath); + } + long value = Long.parseLong(pinfo.getProperty("modTimePeriod")); + List dpaths = pinfo.getDestPaths(); + if (dpaths == null || dpaths.size() == 0) { + throw new IOException("Unable to find dest in policy." + srcPath); + } + for (PathInfo pp: dpaths) { + if (pp.getPath() == null) { + throw new IOException("Unable to find valid destPath in policy " + + srcPath); + } + if (pp.getProperty("replication") == null) { + throw new IOException("Unable to find dest replication in policy." + + srcPath); + } + repl = Integer.parseInt(pp.getProperty("replication")); + } + } + } + + /** + * Start a background thread to reload the config file + */ + void startReload() { + if (doReload) { + reloadThread = new UpdateThread(); + reloadThread.start(); + } + } + + /** + * Stop the background thread that reload the config file + */ + void stopReload() throws InterruptedException { + if (reloadThread != null) { + running = false; + reloadThread.interrupt(); + reloadThread.join(); + reloadThread = null; + } + } + + /** + * A thread which reloads the config file. + */ + private class UpdateThread extends Thread { + private UpdateThread() { + super("HighTideNode config reload thread"); + } + + public void run() { + while (running) { + try { + Thread.sleep(reloadInterval); + reloadConfigsIfNecessary(); + } catch (InterruptedException e) { + // do nothing + } catch (Exception e) { + LOG.error("Failed to reload config file ", e); + } + } + } + } + +} diff --git a/src/hdfs/org/apache/hadoop/hdfs/server/hightidenode/DirectoryTraversal.java b/src/hdfs/org/apache/hadoop/hdfs/server/hightidenode/DirectoryTraversal.java new file mode 100644 index 0000000..980b098 --- /dev/null +++ b/src/hdfs/org/apache/hadoop/hdfs/server/hightidenode/DirectoryTraversal.java @@ -0,0 +1,200 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdfs.server.hightidenode; + +import java.io.FileNotFoundException; +import java.io.IOException; +import java.util.LinkedList; +import java.util.List; +import java.util.Stack; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.util.StringUtils; + +/** + * Implements depth-first traversal using a Stack object. The traversal + * can be stopped at any time and the state of traversal is saved. + */ +public class DirectoryTraversal { + public static final Log LOG = + LogFactory.getLog("org.apache.hadoop.hdfs.server.hightidenode.DirectoryTraversal"); + + private FileSystem fs; + private List paths; + private int pathIdx = 0; // Next path to process. + private Stack stack = new Stack(); + + /** + * Represents a directory node in directory traversal. + */ + static class Node { + private FileStatus path; // Path that this node represents. + private FileStatus[] elements; // Elements in the node. + private int idx = 0; + + public Node(FileStatus path, FileStatus[] elements) { + this.path = path; + this.elements = elements; + } + + public boolean hasNext() { + return idx < elements.length; + } + + public FileStatus next() { + return elements[idx++]; + } + + public FileStatus path() { + return this.path; + } + } + + /** + * Constructor. + * @param fs The filesystem to use. + * @param startPaths A list of paths that need to be traversed + */ + public DirectoryTraversal(FileSystem fs, List startPaths) { + this.fs = fs; + paths = startPaths; + pathIdx = 0; + } + + /** + * Return the next file. + * @throws IOException + */ + public FileStatus getNextFile() throws IOException { + // Check if traversal is done. + while (!doneTraversal()) { + // If traversal is not done, check if the stack is not empty. + while (!stack.isEmpty()) { + // If the stack is not empty, look at the top node. + Node node = stack.peek(); + // Check if the top node has an element. + if (node.hasNext()) { + FileStatus element = node.next(); + // Is the next element a directory. + if (!element.isDir()) { + // It is a file, return it. + return element; + } + // Next element is a directory, push it on to the stack and + // continue + try { + pushNewNode(element); + } catch (FileNotFoundException e) { + // Ignore and move to the next element. + } + continue; + } else { + // Top node has no next element, pop it and continue. + stack.pop(); + continue; + } + } + // If the stack is empty, do we have more paths? + while (!paths.isEmpty()) { + FileStatus next = paths.remove(0); + pathIdx++; + if (!next.isDir()) { + return next; + } + try { + pushNewNode(next); + } catch (FileNotFoundException e) { + continue; + } + break; + } + } + return null; + } + + /** + * Gets the next directory in the tree. The algorithm returns deeper directories + * first. + * @return A FileStatus representing the directory. + * @throws IOException + */ + public FileStatus getNextDirectory() throws IOException { + // Check if traversal is done. + while (!doneTraversal()) { + // If traversal is not done, check if the stack is not empty. + while (!stack.isEmpty()) { + // If the stack is not empty, look at the top node. + Node node = stack.peek(); + // Check if the top node has an element. + if (node.hasNext()) { + FileStatus element = node.next(); + // Is the next element a directory. + if (element.isDir()) { + // Next element is a directory, push it on to the stack and + // continue + try { + pushNewNode(element); + } catch (FileNotFoundException e) { + // Ignore and move to the next element. + } + continue; + } + } else { + stack.pop(); + return node.path; + } + } + // If the stack is empty, do we have more paths? + while (!paths.isEmpty()) { + FileStatus next = paths.remove(0); + pathIdx++; + if (next.isDir()) { + try { + pushNewNode(next); + } catch (FileNotFoundException e) { + continue; + } + break; + } + } + } + return null; + } + + private void pushNewNode(FileStatus stat) throws IOException { + if (!stat.isDir()) { + return; + } + Path p = stat.getPath(); + LOG.debug("Traversing to directory " + p); + FileStatus[] elements = fs.listStatus(p); + Node newNode = new Node(stat, (elements == null? new FileStatus[0]: elements)); + stack.push(newNode); + } + + public boolean doneTraversal() { + return paths.isEmpty() && stack.isEmpty(); + } +} diff --git a/src/hdfs/org/apache/hadoop/hdfs/server/hightidenode/FileFixer.java b/src/hdfs/org/apache/hadoop/hdfs/server/hightidenode/FileFixer.java new file mode 100644 index 0000000..f8a84ec --- /dev/null +++ b/src/hdfs/org/apache/hadoop/hdfs/server/hightidenode/FileFixer.java @@ -0,0 +1,510 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdfs.server.hightidenode; + +import java.io.BufferedOutputStream; +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.DataInputStream; +import java.io.DataOutputStream; +import java.io.File; +import java.io.FileInputStream; +import java.io.FileOutputStream; +import java.io.FileNotFoundException; +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.util.ArrayList; +import java.util.Collections; +import java.util.Comparator; +import java.util.Date; +import java.util.HashMap; +import java.util.Iterator; +import java.util.LinkedList; +import java.util.Map; +import java.util.HashMap; +import java.util.Set; +import java.util.HashSet; +import java.util.List; +import java.util.Collection; +import java.util.regex.Pattern; +import java.util.Random; +import java.util.Queue; +import java.util.concurrent.LinkedBlockingQueue; +import java.util.concurrent.ThreadPoolExecutor; +import java.util.concurrent.TimeUnit; +import java.net.InetSocketAddress; +import java.net.Socket; +import java.nio.channels.SocketChannel; + +import org.apache.hadoop.util.DataChecksum; +import org.apache.hadoop.hdfs.DistributedFileSystem; +import org.apache.hadoop.hdfs.DFSUtil; +import org.apache.hadoop.hdfs.DFSClient.DFSInputStream; +import org.apache.hadoop.hdfs.protocol.Block; +import org.apache.hadoop.hdfs.protocol.DataTransferProtocol; +import org.apache.hadoop.hdfs.protocol.DatanodeInfo; +import org.apache.hadoop.hdfs.protocol.FSConstants; +import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType; +import org.apache.hadoop.hdfs.protocol.LocatedBlock; +import org.apache.hadoop.hdfs.protocol.LocatedBlocks; +import org.apache.hadoop.hdfs.server.common.HdfsConstants; +import org.apache.hadoop.hdfs.server.datanode.BlockSender; +import org.apache.hadoop.hdfs.server.datanode.FSDataset; +import org.apache.hadoop.io.Text; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.BlockMissingException; +import org.apache.hadoop.fs.ChecksumException; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.FileUtil; +import org.apache.hadoop.util.StringUtils; +import org.apache.hadoop.net.NetUtils; +import org.apache.hadoop.ipc.RPC; + +import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol; +import org.apache.hadoop.hdfs.protocol.ProtocolCompatible; +import org.apache.hadoop.hdfs.protocol.HighTideProtocol; +import org.apache.hadoop.hdfs.protocol.PolicyInfo; +import org.apache.hadoop.hdfs.protocol.PolicyInfo.PathInfo; +import org.apache.hadoop.hdfs.server.hightidenode.metrics.HighTideNodeMetrics; + +/** + * This class fixes files by copying data from one of the files in the + * equivalent set. + * It periodically fetches the list of corrupt files from the namenode, + * and fixed missing blocks + */ +public class FileFixer implements Runnable { + public static final Log LOG = LogFactory.getLog( + "org.apache.hadoop.hdfs.hightide.FileFixer"); + private final Configuration conf; + + private volatile boolean running = true; + private int blockFixInterval = 60*1000; // 1min + private int numThreads = 100; + + // ThreadPool keep-alive time for threads over core pool size + private static final long THREADS_KEEP_ALIVE_SECONDS = 60; + + // a queue to store corrupted files + static class PathToPolicy { + String spath; + PolicyInfo pinfo; + PathToPolicy(Path p, PolicyInfo info) { + this.spath = p.toString(); + this.pinfo = info; + } + } + + private Collection all; // list of all policies + List pathToPolicy; // find policy based on longest path match + + private PendingReplication filesBeingFixed; // files that are being fixed + + + ThreadPoolExecutor executor; // threads to fix blocks + + FileFixer(Configuration conf) throws IOException { + this.conf = conf; + blockFixInterval = conf.getInt("hightide.blockfix.interval", + blockFixInterval); + numThreads = conf.getInt("hightide.blockfix.numthreads", numThreads); + + pathToPolicy = new LinkedList(); + executor = new ThreadPoolExecutor( numThreads, numThreads, + THREADS_KEEP_ALIVE_SECONDS, TimeUnit.SECONDS, + new LinkedBlockingQueue()); + + // start a thread to purge enties from this set automatically + filesBeingFixed = new PendingReplication(conf.getInt( + "dfs.hightide.pending.timeout.sec", -1) * 1000L); + } + + /** + * The list of all configured policies. + */ + void setPolicyInfo(Collection all) throws IOException { + this.all = all; + this.pathToPolicy.clear(); + + // keep a reverse map from all top-level paths to policies + for (PolicyInfo pinfo: all) { + pathToPolicy.add(new PathToPolicy(pinfo.getSrcPath(), pinfo)); + for (PathInfo d:pinfo.getDestPaths()) { + pathToPolicy.add(new PathToPolicy(d.rpath, pinfo)); + } + } + + // keep all paths sorted in revere lexicographical order so that + // we longest path is first. + Comparator comp = new Comparator() { + public int compare(PathToPolicy p1, PathToPolicy p2) { + return 0 - p1.spath.compareTo(p2.spath); + } + }; + Collections.sort(pathToPolicy, comp); + } + + /** + * A singleton thread that finds corrupted files and then schedules + * blocks to be copied. This thread talks only to NameNodes and does + * not talk to any datanodes. + */ + public void run() { + while (running) { + try { + LOG.info("FileFixer continuing to run..."); + doFindFiles(); + } catch (Exception e) { + LOG.error(StringUtils.stringifyException(e)); + } catch (Error err) { + LOG.error("Exiting after encountering " + + StringUtils.stringifyException(err)); + shutdown(); + throw err; + } + try { + // Sleep before proceeding to fix more files. + Thread.sleep(blockFixInterval); + } catch (InterruptedException ie) { + LOG.error("Encountering InturruptedException " + + StringUtils.stringifyException(ie)); + } + } + } + + /* + * Release all resources, shutdown any threads + */ + void shutdown() { + running = false; + filesBeingFixed.stop(); + } + + /* + * returns the FileSystem of the path. If the FileSystem is down, then + * log an error and return null + */ + static FileSystem getFs(Configuration conf, Path p) { + try { + return p.getFileSystem(conf); + } catch (Exception e) { + // if a single namenode is down, log it and ignore. Continue to + // fix other namenodes. + LOG.warn("getFs: Unable to contact filesystem: " + p + " ignoring.... " + + e); + e.printStackTrace(); + return null; + } + } + + /** + * Poll namenode(s) to find corrupted files. Enqueue blocks for replication + * if needed. + */ + private void doFindFiles() throws IOException { + Set allFs = new HashSet(); + Set filesToFix = new HashSet(); // files that are yet to be fixed + + // collect all unique filesystems in all policies. + for (PolicyInfo pinfo: all) { + FileSystem fs = getFs(pinfo.getConf(), pinfo.getSrcPath()); + if (fs != null) { + allFs.add(fs); + } + for (PathInfo d:pinfo.getDestPaths()) { + fs = getFs(pinfo.getConf(), d.rpath); + if (fs != null) { + allFs.add(fs); + } + } + } + + // make a RPC to all relevant namenodes to find corrupt files + for (FileSystem fs:allFs) { + if (!running) break; + List corruptFiles = null; + + corruptFiles = getCorruptFilesFromNamenode(fs); + + // if we are not already fixing this one, then put it in the list + // of files that need fixing. + for (Path p : corruptFiles) { + if (filesBeingFixed.add(p)) { + filesToFix.add(p); + } + } + } + + if (!filesToFix.isEmpty()) { + LOG.info("Found " + filesToFix.size() + " corrupt files."); + } + + for (Path path: filesToFix) { + if (!running) break; + try { + fixFile(path); + } catch (IOException ie) { + LOG.error("Error while processing " + path + + ": " + StringUtils.stringifyException(ie)); + // For certain kinds of errors, it might be good if we remove + // this file from filesBeingFixed, so that the file-fix gets + // attemted in the immediate next iteration. For example, if + // we get a network Exception, we can retry immediately. On + // the other hand, if we get a file length mismatch exception + // then no amount of retry will fix it, so it is better to + // retry less frequently. + } + } + } + + /** + * Fix a specific file + */ + private void fixFile(Path badFile) throws IOException { + + PolicyInfo pinfo = null; + String filename = badFile.toString(); + + LOG.info("File = file to fix:" + badFile); + + // Find the policy that maps this file + for (PathToPolicy pp: pathToPolicy) { + if (filename.startsWith(pp.spath)) { + pinfo = pp.pinfo; + break; + } + } + if (pinfo == null) { + throw new IOException("Unable to find matching policy for " + + badFile); + } + + // process the file and fix it. + Path src; + HighTideNode.getMetrics().fixAttempt.inc(); + + if (filename.startsWith(pinfo.getSrcPath().toString())) { + // srcPath is corrupted, pick the first destPath as source of truth. + String[] splits = filename.split(pinfo.getSrcPath().toString()); + src = new Path(pinfo.getDestPaths().get(0).rpath.toString() + splits[1]); + } else { + // dest file is corrupted, copy from source to destination + String[] splits = filename.split(pinfo.getDestPaths().get(0).rpath.toString()); + src = new Path(pinfo.getSrcPath().toString() + splits[1]); + } + DistributedFileSystem srcFs = (DistributedFileSystem) src.getFileSystem(pinfo.getConf()); + DistributedFileSystem destFs = (DistributedFileSystem) badFile.getFileSystem(pinfo.getConf()); + + FileStatus sstat = srcFs.getFileStatus(src); + FileStatus dstat = destFs.getFileStatus(badFile); + + // assert that modtime of the two files are same + if (sstat.getModificationTime() != dstat.getModificationTime()) { + String msg = "Unable to fix file " + badFile + + " because src " + src + " has modification time as " + + HighTideNode.dateForm.format(new Date(sstat.getModificationTime())) + + " but destination " + badFile + " has modification time as " + + HighTideNode.dateForm.format(new Date(dstat.getModificationTime())); + LOG.error(msg); + HighTideNode.getMetrics().fixFailedModTimeMismatch.inc(); + throw new IOException(msg); + } + + // check that blocksize of the two files are same + if (sstat.getBlockSize() != dstat.getBlockSize()) { + String msg = "Unable to fix file " + badFile + + " because src " + src + " has blocksize as " + + sstat.getBlockSize() + + " but destination " + badFile + " has blocksize as " + + dstat.getBlockSize(); + LOG.error(msg); + HighTideNode.getMetrics().fixFailedBlockSizeMismatch.inc(); + throw new IOException(msg); + } + + // check that size of the two files are same + if (sstat.getLen() != dstat.getLen()) { + String msg = "Unable to fix file " + badFile + + " because src " + src + " has size as " + + sstat.getLen() + + " but destination " + badFile + " has size as " + + dstat.getLen(); + LOG.error(msg); + HighTideNode.getMetrics().fixFailedFileLengthMismatch.inc(); + throw new IOException(msg); + } + + List badBlocks = corruptBlocksInFile(destFs, badFile.toUri().getPath(), dstat); + List goodBlocks = srcFs.getClient().namenode.getBlockLocations( + src.toUri().getPath(), 0L, sstat.getLen()).getLocatedBlocks(); + + // for each of the bad blocks, find the good block + for (LocatedBlock badBlock: badBlocks) { + LocatedBlock found = null; + for (LocatedBlock goodBlock: goodBlocks) { + if (badBlock.getStartOffset() == goodBlock.getStartOffset()) { + found = goodBlock; + break; + } + } + if (found == null || found.getLocations().length == 0) { + String msg = "Could not find a good block location for badBlock " + badBlock + + " in file " + badFile; + LOG.error(msg); + HighTideNode.getMetrics().fixFailedNoGoodBlock.inc(); + throw new IOException (msg); + } + + // execute asynchronously + WorkItem bp = new WorkItem(badFile, found, badBlock, destFs, conf); + LOG.info("Queueing up block " + badBlock.getBlock().getBlockName() + + " to be fixed from block " + found.getBlock().getBlockName()); + executor.execute(bp); + } + } + + /** + * @return A list of corrupt files as obtained from the namenode + * If the namenode is down, then return an empty list. + */ + List getCorruptFilesFromNamenode(FileSystem fs) throws IOException { + if (!(fs instanceof DistributedFileSystem)) { + throw new IOException("Only DistributedFileSystem can be handled " + + " by HighTide."); + } + + DistributedFileSystem dfs = (DistributedFileSystem) fs; + List corruptFiles = new LinkedList(); + + try { + LOG.info("Checking filesystem: " + dfs.getUri()); + String[] files = + DFSUtil.getCorruptFiles(dfs); + for (String f: files) { + Path p = new Path(f).makeQualified(fs); + corruptFiles.add(p); + } + return corruptFiles; + } catch (Exception e) { + // if a single namenode is down, log it and ignore. Continue to + // fix other namenodes. + LOG.warn("getCorruptFilesFromNamenode: Unable to contact filesystem: " + fs.getUri() + + " ignoring..." + e); + e.printStackTrace(); + return corruptFiles; + } + } + + /** + * Returns the corrupt blocks in a file. + **/ + List corruptBlocksInFile( + DistributedFileSystem fs, String uriPath, FileStatus stat) + throws IOException { + List corrupt = new LinkedList(); + LocatedBlocks locatedBlocks = fs.getClient().namenode.getBlockLocations( + uriPath, 0, stat.getLen()); + for (LocatedBlock b: locatedBlocks.getLocatedBlocks()) { + if (b.isCorrupt() || + (b.getLocations().length == 0 && b.getBlockSize() > 0)) { + LOG.info("Adding bad block for file " + uriPath); + corrupt.add(b); + } + } + return corrupt; + } + + /** + * Setup a session with the specified datanode + */ + static ClientDatanodeProtocol createClientDatanodeProtocolProxy ( + DatanodeInfo datanodeid, Configuration conf) throws IOException { + InetSocketAddress addr = NetUtils.createSocketAddr( + datanodeid.getHost() + ":" + datanodeid.getIpcPort()); + if (ClientDatanodeProtocol.LOG.isDebugEnabled()) { + ClientDatanodeProtocol.LOG.info("ClientDatanodeProtocol addr=" + addr); + } + try { + return (ClientDatanodeProtocol)RPC.getProxy(ClientDatanodeProtocol.class, + ClientDatanodeProtocol.versionID, addr, conf); + } catch (RPC.VersionMismatch e) { + long clientVersion = e.getClientVersion(); + long datanodeVersion = e.getServerVersion(); + if (clientVersion > datanodeVersion && + !ProtocolCompatible.isCompatibleClientDatanodeProtocol( + clientVersion, datanodeVersion)) { + throw new RPC.VersionIncompatible( + ClientDatanodeProtocol.class.getName(), clientVersion, datanodeVersion); + } + return (ClientDatanodeProtocol)e.getProxy(); + } + } + + // a class to store pairs of blocks. + static class WorkItem implements Runnable { + Path badfile; // file to be fixed + LocatedBlock goodBlock; // existing replica of missing block + LocatedBlock badBlock; // missing block + DistributedFileSystem destFs; // filesystem of destination + Configuration conf; + private static Random rand = new Random(); + + WorkItem(Path file, LocatedBlock g, LocatedBlock b, FileSystem fs, Configuration conf) { + this.goodBlock = g; + this.badBlock = b; + this.badfile = file; + this.destFs = (DistributedFileSystem)fs; + this.conf = conf; + } + + @Override + public void run() { + + String msg = ""; + try { + // find a random datanode from the destination cluster + DatanodeInfo[] targets = destFs.getClient().datanodeReport(DatanodeReportType.LIVE); + DatanodeInfo target = targets[rand.nextInt(targets.length)]; + + // find a source datanode from among the datanodes that host this block + DatanodeInfo srcdn = goodBlock.getLocations()[rand.nextInt(goodBlock.getLocations().length)]; + + // The RPC is asynchronous, i.e. the RPC will return immediately even before the + // physical block copy occurs from the datanode. + msg = "File " + badfile + ": Copying block " + + goodBlock.getBlock().getBlockName() + " from " + srcdn.getName() + + " to block " + badBlock.getBlock().getBlockName() + + " on " + target.getName(); + LOG.info(msg); + ClientDatanodeProtocol datanode = createClientDatanodeProtocolProxy(srcdn, conf); + datanode.copyBlock(goodBlock.getBlock(), badBlock.getBlock(), target); + RPC.stopProxy(datanode); + HighTideNode.getMetrics().fixSuccessfullyStarted.inc(); + } catch (Throwable e) { + HighTideNode.getMetrics().fixFailedDatanodeError.inc(); + LOG.error(StringUtils.stringifyException(e) + msg + ". Failed to contact datanode."); + } + } + } +} diff --git a/src/hdfs/org/apache/hadoop/hdfs/server/hightidenode/HighTideConfigurationException.java b/src/hdfs/org/apache/hadoop/hdfs/server/hightidenode/HighTideConfigurationException.java new file mode 100644 index 0000000..4033996 --- /dev/null +++ b/src/hdfs/org/apache/hadoop/hdfs/server/hightidenode/HighTideConfigurationException.java @@ -0,0 +1,30 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdfs.server.hightidenode; + +/** + * Thrown when the config file for {@link HighTideNode} is malformed. + */ +public class HighTideConfigurationException extends Exception { + private static final long serialVersionUID = 4046516718965587999L; + + public HighTideConfigurationException(String message) { + super(message); + } +} diff --git a/src/hdfs/org/apache/hadoop/hdfs/server/hightidenode/HighTideNode.java b/src/hdfs/org/apache/hadoop/hdfs/server/hightidenode/HighTideNode.java new file mode 100644 index 0000000..fe95cff --- /dev/null +++ b/src/hdfs/org/apache/hadoop/hdfs/server/hightidenode/HighTideNode.java @@ -0,0 +1,494 @@ + /* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.hightidenode; + +import java.io.IOException; +import java.io.FileNotFoundException; +import java.util.ArrayList; +import java.util.Collection; +import java.util.HashMap; +import java.util.List; +import java.util.LinkedList; +import java.util.Iterator; +import java.util.Arrays; +import java.util.Map; +import java.util.Random; +import java.util.Set; +import java.util.HashSet; +import java.util.regex.Matcher; +import java.util.regex.Pattern; +import java.util.Date; +import java.text.SimpleDateFormat; +import java.lang.Thread; +import java.lang.InterruptedException; +import java.net.InetSocketAddress; +import java.net.URI; + +import org.xml.sax.SAXException; +import javax.xml.parsers.ParserConfigurationException; + +import org.apache.hadoop.hdfs.DFSClient; +import org.apache.hadoop.hdfs.DistributedFileSystem; +import org.apache.hadoop.hdfs.protocol.Block; +import org.apache.hadoop.ipc.*; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.util.StringUtils; +import org.apache.hadoop.util.Daemon; +import org.apache.hadoop.util.ReflectionUtils; +import org.apache.hadoop.util.ToolRunner; +import org.apache.hadoop.net.NetUtils; +import org.apache.hadoop.fs.HarFileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.BlockLocation; +import org.apache.hadoop.fs.FSDataInputStream; +import org.apache.hadoop.fs.FSDataOutputStream; + +import org.apache.hadoop.hdfs.protocol.HighTideProtocol; +import org.apache.hadoop.hdfs.protocol.PolicyInfo; +import org.apache.hadoop.hdfs.protocol.PolicyInfo.PathInfo; +import org.apache.hadoop.hdfs.server.hightidenode.metrics.HighTideNodeMetrics; + +/** + * A {@link HighTideNode} that implements + */ +public class HighTideNode implements HighTideProtocol { + + static{ + Configuration.addDefaultResource("hdfs-default.xml"); + Configuration.addDefaultResource("hdfs-site.xml"); + } + public static final Log LOG = LogFactory.getLog( "org.apache.hadoop.hightidenode.HighTideNode"); + public static final long SLEEP_TIME = 10000L; // 10 seconds + public static final int DEFAULT_PORT = 60100; + public static final String HIGHTIDE_FULLSYNC_INTERVAL = "hightide.fullsync.interval.seconds"; + public static final long HIGHTIDE_FULLSYNC_INTERVAL_DEFAULT = 60 * 60; // 1 hour + + public static final SimpleDateFormat dateForm = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss"); + + /** RPC server */ + private Server server; + /** RPC server address */ + private InetSocketAddress serverAddress = null; + /** only used for testing purposes */ + private boolean stopRequested = false; + + /** Configuration Manager */ + private ConfigManager configMgr; + + /** hadoop configuration */ + private Configuration conf; + + protected boolean initialized; // Are we initialized? + protected volatile boolean running; // Are we running? + + /** Deamon thread to find missing blocks */ + Daemon triggerThread = null; + + /** Daemon thread to fix corrupt files */ + public FileFixer fileFixer = null; + Daemon fileFixerThread = null; + + static HighTideNodeMetrics myMetrics; + + // statistics about replicas fixed + public static class Statistics { + long numProcessedBlocks; // total blocks encountered in namespace + long processedSize; // disk space occupied by all blocks + + public void clear() { + numProcessedBlocks = 0; + processedSize = 0; + } + public String toString() { + String msg = " numProcessedBlocks = " + numProcessedBlocks + + " processedSize = " + processedSize; + return msg; + } + } + + // Startup options + static public enum StartupOption{ + TEST ("-test"), + REGULAR ("-regular"); + + private String name = null; + private StartupOption(String arg) {this.name = arg;} + public String getName() {return name;} + } + + /** + * Start HighTideNode. + *

+ * The hightidenode-node can be started with one of the following startup options: + *

    + *
  • {@link StartupOption#REGULAR REGULAR} - normal hightidenode node startup
  • + *
+ * The option is passed via configuration field: + * fs.hightidenodenode.startup + * + * The conf will be modified to reflect the actual ports on which + * the HighTideNode is up and running if the user passes the port as + * zero in the conf. + * + * @param conf confirguration + * @throws IOException + */ + + HighTideNode(Configuration conf) throws IOException { + try { + initialize(conf); + } catch (IOException e) { + LOG.error(StringUtils.stringifyException(e)); + this.stop(); + throw e; + } catch (Exception e) { + LOG.error(StringUtils.stringifyException(e)); + this.stop(); + throw new IOException(e); + } + } + + public long getProtocolVersion(String protocol, + long clientVersion) throws IOException { + if (protocol.equals(HighTideProtocol.class.getName())) { + return HighTideProtocol.versionID; + } else { + throw new IOException("Unknown protocol to hightide node: " + protocol); + } + } + + /** + * Wait for service to finish. + * (Normally, it runs forever.) + */ + public void join() { + try { + if (server != null) server.join(); + if (triggerThread != null) triggerThread.join(); + if (fileFixerThread != null) fileFixerThread.join(); + } catch (InterruptedException ie) { + // do nothing + } + } + + /** + * Stop all HighTideNode threads and wait for all to finish. + */ + public void stop() { + if (stopRequested) { + return; + } + stopRequested = true; + running = false; + if (server != null) server.stop(); + if (triggerThread != null) triggerThread.interrupt(); + if (fileFixer != null) fileFixer.shutdown(); + if (fileFixerThread != null) fileFixerThread.interrupt(); + if (myMetrics != null) { + myMetrics.shutdown(); + } + } + + private static InetSocketAddress getAddress(String address) { + return NetUtils.createSocketAddr(address); + } + + public static InetSocketAddress getAddress(Configuration conf) { + String nodeport = conf.get("hightidenode.server.address"); + if (nodeport == null) { + nodeport = "localhost:" + DEFAULT_PORT; + } + return getAddress(nodeport); + } + + public InetSocketAddress getListenerAddress() { + return server.getListenerAddress(); + } + + private void initialize(Configuration conf) + throws IOException, SAXException, InterruptedException, + HighTideConfigurationException, + ClassNotFoundException, ParserConfigurationException { + this.conf = conf; + InetSocketAddress socAddr = HighTideNode.getAddress(conf); + int handlerCount = conf.getInt("fs.hightidenodenode.handler.count", 10); + + // read in the configuration + configMgr = new ConfigManager(conf); + configMgr.reloadConfigsIfNecessary(); + configMgr.startReload(); + + // create Metrics object + myMetrics = new HighTideNodeMetrics(conf, this); + + // create rpc server + this.server = RPC.getServer(this, socAddr.getHostName(), socAddr.getPort(), + handlerCount, false, conf); + + // The rpc-server port can be ephemeral... ensure we have the correct info + this.serverAddress = this.server.getListenerAddress(); + LOG.info("HighTideNode up at: " + this.serverAddress); + + initialized = true; + running = true; + this.server.start(); // start RPC server + + + this.fileFixer = new FileFixer(conf); + this.fileFixerThread = new Daemon(this.fileFixer); + fileFixer.setPolicyInfo(configMgr.getAllPolicies()); + this.fileFixerThread.start(); + + // start the deamon thread to resync if needed + this.triggerThread = new Daemon(new TriggerMonitor()); + this.triggerThread.start(); + } + + /** + * Sync up on hightidenode restart + */ + class TriggerMonitor implements Runnable { + + private Map scanTimes = new HashMap(); + private Map scanState = + new HashMap(); + + public void run() { + while (running) { + try { + doFullSync(); + } catch (IOException e) { + LOG.info("Exception in doFullSync. " + StringUtils.stringifyException(e)); + } + + long sleepTime = conf.getLong(HIGHTIDE_FULLSYNC_INTERVAL, + HIGHTIDE_FULLSYNC_INTERVAL_DEFAULT) * 1000; + try { + Thread.sleep(sleepTime); + } catch (InterruptedException e) { + LOG.info("InterrupedException in TriggerMonitor.run."); + } + } + } + } + + /** + * Full sync of all policies + */ + void doFullSync() throws IOException { + for (PolicyInfo pinfo:configMgr.getAllPolicies()) { + doFullSync(pinfo); + } + } + + /** + * Full sync of specified policy + */ + void doFullSync(PolicyInfo pinfo) throws IOException { + Path srcPath = pinfo.getSrcPath(); + LOG.info("Starting fullsync of srcPath " + srcPath); + + FileSystem srcFs = srcPath.getFileSystem(pinfo.getConf()); + int srcRepl = Integer.parseInt(pinfo.getProperty("replication")); + long modTimePeriod = Long.parseLong(pinfo.getProperty("modTimePeriod")); + long now = HighTideNode.now(); + + // traverse all files inside the subtree rooted at srcPath + List slist = new ArrayList(); + slist.add(srcFs.getFileStatus(srcPath)); + DirectoryTraversal traverse = new DirectoryTraversal(srcFs, slist); + + while (true) { + FileStatus sstat = traverse.getNextFile(); + if (sstat == null) { + break; // done checking all files + } + + // we always follow the order of first changing the replication + // on the destination files before we update the repl factor of + // the source file. So, it is safe to make the following check + // and avoid checking the destination files if the source file + // already is at the specified replication factor. + if (sstat.getReplication() == srcRepl) { + continue; + } + // if the file has been updated recently, then do not + // do anything to it + if (sstat.getModificationTime() + modTimePeriod > now) { + continue; + } + + // find the suffix in the srcPath that is mapped to the destination + srcPath = sstat.getPath(); + String[] splits = srcPath.toString().split(pinfo.getSrcPath().toString()); + String suffix = splits[1]; + + // match each pair of src and destination paths + boolean match = true; + for (PathInfo destPathInfo: pinfo.getDestPaths()) { + Path destPath = new Path(destPathInfo.getPath().toString() + suffix); + LOG.debug("Comparing " + srcPath + " with " + destPath); + + int destRepl = Integer.parseInt(destPathInfo.getProperty("replication")); + FileSystem destFs = destPath.getFileSystem(pinfo.getConf()); + + FileStatus dstat = null; + + try { + dstat = destFs.getFileStatus(destPath); + } catch (java.io.FileNotFoundException e ) { + match = false; + continue; // ok if the destination does not exist + } catch (IOException e) { + match = false; + LOG.info("Unable to locate matching file in destination " + + destPath + StringUtils.stringifyException(e) + + ". Ignoring..."); + } + LOG.info("Matching " + srcPath + " with " + destPath); + HighTideNode.getMetrics().filesMatched.inc(); + + if (dstat.getModificationTime() == sstat.getModificationTime() && + dstat.getBlockSize() == sstat.getBlockSize() && + dstat.getLen() == sstat.getLen()) { + + // first reduce the intended replication on the destination path + if (dstat.getReplication() > destRepl) { + HighTideNode.getMetrics().filesChanged.inc(); + long saved = dstat.getLen() * (dstat.getReplication() - destRepl); + LOG.info("Changing replication of dest " + destPath + + " from " + dstat.getReplication() + " to " + destRepl); + destFs.setReplication(dstat.getPath(), (short)destRepl); + + saved += HighTideNode.getMetrics().savedSize.get(); + HighTideNode.getMetrics().savedSize.set(saved); + } + } else { + // one destination path does not match the source + match = false; + break; + } + } + // if the all the destination paths matched the source, then + // reduce repl factor of the source + if (match && sstat.getReplication() > srcRepl) { + LOG.info("Changing replication of source " + srcPath + + " from " + sstat.getReplication() + " to " + srcRepl); + HighTideNode.getMetrics().filesChanged.inc(); + long saved = sstat.getLen() * (sstat.getReplication() - srcRepl); + srcFs.setReplication(sstat.getPath(), (short)srcRepl); + + saved += HighTideNode.getMetrics().savedSize.get(); + HighTideNode.getMetrics().savedSize.set(saved); + } + } + LOG.info("Completed fullsync of srcPath " + srcPath); + } + + /** + * Shuts down the HighTideNode + */ + void shutdown() throws IOException, InterruptedException { + configMgr.stopReload(); // stop config reloads + fileFixer.shutdown(); // stop block fixer + fileFixerThread.interrupt(); + server.stop(); // stop http server + } + + + /** + * Implement HighTideProtocol methods + */ + + /** {@inheritDoc} */ + public PolicyInfo[] getAllPolicies() throws IOException { + Collection list = configMgr.getAllPolicies(); + return list.toArray(new PolicyInfo[list.size()]); + } + + /** + * returns my Metrics object + */ + public static HighTideNodeMetrics getMetrics() { + return myMetrics; + } + + /** + * Returns current time. + */ + static long now() { + return System.currentTimeMillis(); + } + + private static void printUsage() { + System.err.println("Usage: java HighTideNode "); + } + + private static StartupOption parseArguments(String args[]) { + int argsLen = (args == null) ? 0 : args.length; + StartupOption startOpt = StartupOption.REGULAR; + for(int i=0; i < argsLen; i++) { + String cmd = args[i]; // We have to parse command line args in future. + } + return startOpt; + } + + /** + * Convert command line options to configuration parameters + */ + private static void setStartupOption(Configuration conf, StartupOption opt) { + conf.set("fs.hightidenodenode.startup", opt.toString()); + } + + /** + * Create an instance of the HighTideNode + */ + public static HighTideNode createHighTideNode(String argv[], + Configuration conf) throws IOException { + if (conf == null) { + conf = new Configuration(); + } + StartupOption startOpt = parseArguments(argv); + if (startOpt == null) { + printUsage(); + return null; + } + setStartupOption(conf, startOpt); + HighTideNode node = new HighTideNode(conf); + return node; + } + + /** + */ + public static void main(String argv[]) throws Exception { + try { + StringUtils.startupShutdownMessage(HighTideNode.class, argv, LOG); + HighTideNode hightidenode = createHighTideNode(argv, null); + if (hightidenode != null) { + hightidenode.join(); + } + } catch (Throwable e) { + LOG.error(StringUtils.stringifyException(e)); + System.exit(-1); + } + } +} diff --git a/src/hdfs/org/apache/hadoop/hdfs/server/hightidenode/PendingReplication.java b/src/hdfs/org/apache/hadoop/hdfs/server/hightidenode/PendingReplication.java new file mode 100644 index 0000000..8b45662 --- /dev/null +++ b/src/hdfs/org/apache/hadoop/hdfs/server/hightidenode/PendingReplication.java @@ -0,0 +1,195 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.hightidenode; + +import org.apache.hadoop.hdfs.protocol.Block; +import org.apache.hadoop.util.*; +import org.apache.hadoop.fs.Path; +import java.io.*; +import java.util.*; +import java.sql.Time; + +/*************************************************** + * PendingReplication does the bookkeeping of all + * files that are getting replicated. + * + * It does the following: + * 1) record files that are getting replicated at this instant. + * 2) a coarse grain timer to track age of replication request + * 3) a thread that periodically identifies replication-requests + * that possibly are declared done. + * + ***************************************************/ +class PendingReplication { + private Map pendingReplications; + Daemon timerThread = null; + private volatile boolean fsRunning = true; + + // + // It might take anywhere between 5 to 10 minutes before + // a request is timed out. + // + private long timeout = 5 * 60 * 1000; + private long defaultRecheckInterval = 1 * 60 * 1000; + + PendingReplication(long timeoutPeriod) { + if ( timeoutPeriod > 0 ) { + this.timeout = timeoutPeriod; + } + init(); + } + + PendingReplication() { + init(); + } + + void init() { + pendingReplications = new HashMap(); + this.timerThread = new Daemon(new PendingReplicationMonitor()); + timerThread.start(); + } + + /** + * Add a block to the list of pending Replications. Returns true + * if the filename is added for the first time. + */ + boolean add(Path filename) { + synchronized (pendingReplications) { + PendingInfo found = pendingReplications.get(filename); + if (found == null) { + pendingReplications.put(filename, new PendingInfo(filename)); + return true; + } + return false; + } + } + /** + * Remove a block to the list of pending Replications. + */ + void remove(Path filename) { + synchronized (pendingReplications) { + pendingReplications.remove(filename); + } + } + + /** + * The total number of files that are undergoing replication + */ + int size() { + return pendingReplications.size(); + } + + /** + * An object that contains information about a file that + * is being replicated. It records the timestamp when the + * system started replicating the most recent copy of this + * file. + */ + static class PendingInfo { + private long timeStamp; + private Path filename; + + PendingInfo(Path filename) { + this.timeStamp = HighTideNode.now(); + this.filename = filename; + } + + long getTimeStamp() { + return timeStamp; + } + + void setTimeStamp() { + timeStamp = HighTideNode.now(); + } + + Path getFile() { + return filename; + } + } + + /* + * A periodic thread that scans for blocks that should have finished. + */ + class PendingReplicationMonitor implements Runnable { + public void run() { + while (fsRunning) { + long period = Math.min(defaultRecheckInterval, timeout); + try { + pendingReplicationCheck(); + Thread.sleep(period); + } catch (InterruptedException ie) { + HighTideNode.LOG.debug( + "PendingReplicationMonitor thread received exception. " + ie); + } + } + } + + /** + * Iterate through all items and detect timed-out items + */ + void pendingReplicationCheck() { + synchronized (pendingReplications) { + Iterator iter = pendingReplications.entrySet().iterator(); + long now = HighTideNode.now(); + HighTideNode.LOG.info("PendingReplicationMonitor checking Q"); + while (iter.hasNext()) { + Map.Entry entry = (Map.Entry) iter.next(); + PendingInfo pendingBlock = (PendingInfo) entry.getValue(); + if (now > pendingBlock.getTimeStamp() + timeout) { + HighTideNode.LOG.info( + "PendingReplicationMonitor purging record for file " + + pendingBlock.getFile()); + iter.remove(); + HighTideNode.getMetrics().fixClearedOut.inc(); + } + } + } + } + } + + /* + * Shuts down the pending replication monitor thread. + * Waits for the thread to exit. + */ + void stop() { + fsRunning = false; + timerThread.interrupt(); + try { + timerThread.join(3000); + } catch (InterruptedException ie) { + } + } + + /** + * Iterate through all items and print them. + */ + void metaSave(PrintWriter out) { + synchronized (pendingReplications) { + out.println("Metasave: Blocks being replicated: " + + pendingReplications.size()); + Iterator iter = pendingReplications.entrySet().iterator(); + while (iter.hasNext()) { + Map.Entry entry = (Map.Entry) iter.next(); + PendingInfo pendingBlock = (PendingInfo) entry.getValue(); + Path filename = (Path) entry.getKey(); + out.println(filename + + " StartTime: " + new Time(pendingBlock.timeStamp)); + } + } + } +} diff --git a/src/hdfs/org/apache/hadoop/hdfs/server/hightidenode/README b/src/hdfs/org/apache/hadoop/hdfs/server/hightidenode/README new file mode 100644 index 0000000..3b4711f --- /dev/null +++ b/src/hdfs/org/apache/hadoop/hdfs/server/hightidenode/README @@ -0,0 +1,14 @@ +Goal: The goal of the HighTideNode is to keep only one physical replica per data center. This is mostly for older files that change very infrequently.The HighTide server watches over the two HDFS namespaces from two different NameNodes in two different data centers. These two equivalent namespaces are populated via means that are external to HighTide. The HighTide server verifies (via modtime/filelength ) that two files in the two HDFS contain identical data, and if so, reduces the replication factor to 2 on both HDFS. (One or both HDFS could be using HDFS-RAID too).The HighTideNode monitors any missing replicas on both namenode, and if it finds any it will fix by copying data from the other namenode in the remote data center. +In short, the replication within a HDFS cluster will occur via the NameNode as usual. Each NameNode will maintain fewer than 3 copies of the data. The replication across HDFS clusters is coordinated by the HighTideNode. It invokes the -list-corruptFiles RPC to each NameNode periodically (every minute) to detect missing replicas. + +DataNodeGateway:I envision a single HighTideNode coordinating replication between multiple HDFS clusters. An alternative approach would be to do some sort of a GateWay approach: a specialized DataNode that exports the DataNode protocol and appears like a huge-big DataNode to a HDFS cluster, but instead of storing blocks on local disks, the GatewayDataNode would store data in a remote HDFS cluster. This is similar to existing NFS Gateways, e.g. NFS-CIFS interaction. The downside is that this design is more complex and intrusive to HDFS rather than being a layer on top of it. This code does not implement this approach. + +Mean-Time-To-Recover (MTR): Will this approach of having remote replicas increase the probability of data loss? My claim is that we should try to keep the MTR practically the same as it is today. If all the replicas of a block on HDFS1 goes missing, then the HighTideNode will first increase the replication factor of the equivalent file in HDFS2. This ensures that we get back to 3-overall copies as soon as possible, thus keeping the MTR same as it is now. Then the HighTideNode will copy over this block from HDFS2 to HDFS1, wait for HDFS1 to attain a replica count of 2 before decreasing the replica count on HDFS2 from 3 back to 2. + +HDFS-RAID: HighTide can co-exist with HDFS-RAID. HDFS-RAID allows us to keep fewer physical copies of data + parity. The MTR from RAID is smaller compared to HighTide, but the savings using HighTide is way more because the savings-percentage does not depend on RAID-stripe size or on file-lengths. Once can use RAID to achieve a replication factor of 1.2 in each HDFS cluster and then use HighTide to have an additional 1 replicas on the remote HDFS cluster(s). + +Performance: Map-reduce jobs could have a performance impact if the number of replicas are reduced from 3 to 2. So, the tradeoff is reducing the total amount of storage while possibly increasing job latencies. + +HBase: With the current HBase design it is difficult to use HighTide to replicate across data centers. This is something that we need to delve more into. + + diff --git a/src/hdfs/org/apache/hadoop/hdfs/server/hightidenode/hightide.xml b/src/hdfs/org/apache/hadoop/hdfs/server/hightidenode/hightide.xml new file mode 100644 index 0000000..8b05e1a --- /dev/null +++ b/src/hdfs/org/apache/hadoop/hdfs/server/hightidenode/hightide.xml @@ -0,0 +1,35 @@ + + + + replication + 2 + the original src file should have this replication factor + + + + modTimePeriod + 1296000000 + Pick files only if neither src or destination has been modified + in 15 days. + + + + simulate + false + Do not reduce replication factor of raided file. + + + + + + replication + 1 + the destination should have these may replicas + + + + + + + + diff --git a/src/hdfs/org/apache/hadoop/hdfs/server/hightidenode/metrics/HighTideNodeActivityMBean.java b/src/hdfs/org/apache/hadoop/hdfs/server/hightidenode/metrics/HighTideNodeActivityMBean.java new file mode 100644 index 0000000..b932909 --- /dev/null +++ b/src/hdfs/org/apache/hadoop/hdfs/server/hightidenode/metrics/HighTideNodeActivityMBean.java @@ -0,0 +1,38 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.hightidenode.metrics; + +import javax.management.ObjectName; + +import org.apache.hadoop.metrics.util.MBeanUtil; +import org.apache.hadoop.metrics.util.MetricsDynamicMBeanBase; +import org.apache.hadoop.metrics.util.MetricsRegistry; + +public class HighTideNodeActivityMBean extends MetricsDynamicMBeanBase { + final private ObjectName mbeanName; + + protected HighTideNodeActivityMBean(final MetricsRegistry mr) { + super(mr, "Activity statistics at the HighTideNode"); + mbeanName = MBeanUtil.registerMBean("HighTideNode", "HighTideNodeActivity", this); + } + + public void shutdown() { + if (mbeanName != null) + MBeanUtil.unregisterMBean(mbeanName); + } +} diff --git a/src/hdfs/org/apache/hadoop/hdfs/server/hightidenode/metrics/HighTideNodeMetrics.java b/src/hdfs/org/apache/hadoop/hdfs/server/hightidenode/metrics/HighTideNodeMetrics.java new file mode 100644 index 0000000..192f194 --- /dev/null +++ b/src/hdfs/org/apache/hadoop/hdfs/server/hightidenode/metrics/HighTideNodeMetrics.java @@ -0,0 +1,121 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.hightidenode.metrics; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.metrics.MetricsContext; +import org.apache.hadoop.metrics.MetricsRecord; +import org.apache.hadoop.metrics.MetricsUtil; +import org.apache.hadoop.metrics.Updater; +import org.apache.hadoop.metrics.util.MetricsBase; +import org.apache.hadoop.metrics.util.MetricsRegistry; +import org.apache.hadoop.metrics.util.MetricsTimeVaryingLong; +import org.apache.hadoop.metrics.util.MetricsLongValue; +import org.apache.hadoop.metrics.jvm.JvmMetrics; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdfs.server.hightidenode.HighTideNode; + +/** + * + * This class is for maintaining the various HighTideNode activity statistics + * and publishing them through the metrics interfaces. + * This also registers the JMX MBean for RPC. + *

+ * This class has a number of metrics variables that are publicly accessible; + * these variables (objects) have methods to update their values; + * for example: + *

{@link #syncs}.inc() + * + */ + +public class HighTideNodeMetrics implements Updater { + public static final Log LOG = LogFactory.getLog( + "org.apache.hadoop.hdfs.server.hightidenode.metrics.HighTideNodeMetrics"); + + private final MetricsRecord metricsRecord; + public MetricsRegistry registry = new MetricsRegistry(); + + private HighTideNodeActivityMBean hightidenodeActivityMBean; + + public MetricsTimeVaryingLong fixAttempt = + new MetricsTimeVaryingLong("FixAttempted", registry); + public MetricsTimeVaryingLong fixFailedModTimeMismatch = + new MetricsTimeVaryingLong("FixFailedModTimeMismatch", registry); + public MetricsTimeVaryingLong fixFailedBlockSizeMismatch = + new MetricsTimeVaryingLong("FixFailedBlockSizeMismatch", registry); + public MetricsTimeVaryingLong fixFailedFileLengthMismatch = + new MetricsTimeVaryingLong("FixFailedFileLengthMismatch", registry); + public MetricsTimeVaryingLong fixFailedNoGoodBlock = + new MetricsTimeVaryingLong("FixFailedNoGoodBlock", registry); + public MetricsTimeVaryingLong fixFailedDatanodeError = + new MetricsTimeVaryingLong("FixFailedDatanodeError", registry); + public MetricsTimeVaryingLong fixClearedOut = + new MetricsTimeVaryingLong("FixClearedOut", registry); + public MetricsTimeVaryingLong fixSuccessfullyStarted = + new MetricsTimeVaryingLong("FixSuccessfullyStarted", registry); + + public MetricsTimeVaryingLong filesMatched = + new MetricsTimeVaryingLong("NumberOfFilesMatched", registry); + public MetricsTimeVaryingLong filesChanged = + new MetricsTimeVaryingLong("NumberOfFilesChanged", registry); + public MetricsLongValue savedSize = + new MetricsLongValue("SavedSize", registry); + + public HighTideNodeMetrics(Configuration conf, HighTideNode hightideNode) { + String sessionId = conf.get("session.id"); + // Initiate Java VM metrics + JvmMetrics.init("HighTideNode", sessionId); + + + // Now the Mbean for the name node - this also registers the MBean + hightidenodeActivityMBean = new HighTideNodeActivityMBean(registry); + + // Create a record for HighTideNode metrics + MetricsContext metricsContext = MetricsUtil.getContext("dfs"); + metricsRecord = MetricsUtil.createRecord(metricsContext, "hightidenode"); + metricsRecord.setTag("sessionId", sessionId); + metricsContext.registerUpdater(this); + LOG.info("Initializing HighTideNodeMetrics using context object:" + + metricsContext.getClass().getName()); + } + + public void shutdown() { + if (hightidenodeActivityMBean != null) + hightidenodeActivityMBean.shutdown(); + } + + /** + * Since this object is a registered updater, this method will be called + * periodically, e.g. every 5 seconds. + */ + public void doUpdates(MetricsContext unused) { + synchronized (this) { + for (MetricsBase m : registry.getMetricsList()) { + m.pushMetric(metricsRecord); + } + } + metricsRecord.update(); + } + + /** + * Clear out all metrics that are min or max + */ + public void resetAllMinMax() { + } +} diff --git a/src/hdfs/org/apache/hadoop/hdfs/server/namenode/BlockPlacementPolicy.java b/src/hdfs/org/apache/hadoop/hdfs/server/namenode/BlockPlacementPolicy.java new file mode 100644 index 0000000..53c0f35 --- /dev/null +++ b/src/hdfs/org/apache/hadoop/hdfs/server/namenode/BlockPlacementPolicy.java @@ -0,0 +1,173 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.namenode; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdfs.protocol.Block; +import org.apache.hadoop.hdfs.protocol.DatanodeInfo; +import org.apache.hadoop.hdfs.protocol.LocatedBlock; +import org.apache.hadoop.net.NetworkTopology; +import org.apache.hadoop.net.Node; +import org.apache.hadoop.util.ReflectionUtils; +import java.util.*; + +/** + * This interface is used for choosing the desired number of targets + * for placing block replicas. + */ +public abstract class BlockPlacementPolicy { + + public static class NotEnoughReplicasException extends Exception { + private static final long serialVersionUID = 1L; + NotEnoughReplicasException(String msg) { + super(msg); + } + } + + /** + * choose numOfReplicas data nodes for writer + * to re-replicate a block with size blocksize + * If not, return as many as we can. + * + * @param srcPath the file to which this chooseTargets is being invoked. + * @param numOfReplicas additional number of replicas wanted. + * @param writer the writer's machine, null if not in the cluster. + * @param chosenNodes datanodes that have been chosen as targets. + * @param blocksize size of the data to be written. + * @return array of DatanodeDescriptor instances chosen as target + * and sorted as a pipeline. + */ + abstract DatanodeDescriptor[] chooseTarget(String srcPath, + int numOfReplicas, + DatanodeDescriptor writer, + List chosenNodes, + long blocksize); + + /** + * choose numOfReplicas data nodes for writer + * to re-replicate a block with size blocksize + * If not, return as many as we can. + * The base implemenatation extracts the pathname of the file from the + * specified srcInode, but this could be a costly operation depending on the + * file system implementation. Concrete implementations of this class should + * override this method to avoid this overhead. + * + * @param srcInode The inode of the file for which chooseTarget is being invoked. + * @param numOfReplicas additional number of replicas wanted. + * @param writer the writer's machine, null if not in the cluster. + * @param chosenNodes datanodes that have been chosen as targets. + * @param blocksize size of the data to be written. + * @return array of DatanodeDescriptor instances chosen as target + * and sorted as a pipeline. + */ + DatanodeDescriptor[] chooseTarget(FSInodeInfo srcInode, + int numOfReplicas, + DatanodeDescriptor writer, + List chosenNodes, + List excludesNodes, + long blocksize) { + return chooseTarget(srcInode.getFullPathName(), numOfReplicas, writer, + chosenNodes, blocksize); + } + + /** + * Verify that the block is replicated on at least minRacks different racks + * if there is more than minRacks rack in the system. + * + * @param srcPath the full pathname of the file to be verified + * @param lBlk block with locations + * @param minRacks number of racks the block should be replicated to + * @return the difference between the required and the actual number of racks + * the block is replicated to. + */ + abstract public int verifyBlockPlacement(String srcPath, + LocatedBlock lBlk, + int minRacks); + /** + * Decide whether deleting the specified replica of the block still makes + * the block conform to the configured block placement policy. + * + * @param srcInode The inode of the file to which the block-to-be-deleted belongs + * @param block The block to be deleted + * @param replicationFactor The required number of replicas for this block + * @param existingReplicas The replica locations of this block that are present + on at least two unique racks. + * @param moreExistingReplicas Replica locations of this block that are not + listed in the previous parameter. + * @return the replica that is the best candidate for deletion + */ + abstract public DatanodeDescriptor chooseReplicaToDelete(FSInodeInfo srcInode, + Block block, + short replicationFactor, + Collection existingReplicas, + Collection moreExistingReplicas); + + /** + * Used to setup a BlockPlacementPolicy object. This should be defined by + * all implementations of a BlockPlacementPolicy. + * + * @param conf the configuration object + * @param stats retrieve cluster status from here + * @param clusterMap cluster topology + */ + abstract protected void initialize(Configuration conf, FSClusterStats stats, + NetworkTopology clusterMap); + + /** + * Get an instance of the configured Block Placement Policy based on the + * value of the configuration paramater dfs.block.replicator.classname. + * + * @param conf the configuration to be used + * @param stats an object thatis used to retrieve the load on the cluster + * @param clusterMap the network topology of the cluster + * @return an instance of BlockPlacementPolicy + */ + public static BlockPlacementPolicy getInstance(Configuration conf, + FSClusterStats stats, + NetworkTopology clusterMap) { + Class replicatorClass = + conf.getClass("dfs.block.replicator.classname", + BlockPlacementPolicyDefault.class, + BlockPlacementPolicy.class); + BlockPlacementPolicy replicator = (BlockPlacementPolicy) ReflectionUtils.newInstance( + replicatorClass, conf); + replicator.initialize(conf, stats, clusterMap); + return replicator; + } + + /** + * choose numOfReplicas nodes for writer to replicate + * a block with size blocksize + * If not, return as many as we can. + * + * @param srcPath a string representation of the file for which chooseTarget is invoked + * @param numOfReplicas number of replicas wanted. + * @param writer the writer's machine, null if not in the cluster. + * @param blocksize size of the data to be written. + * @return array of DatanodeDescriptor instances chosen as targets + * and sorted as a pipeline. + */ + DatanodeDescriptor[] chooseTarget(String srcPath, + int numOfReplicas, + DatanodeDescriptor writer, + long blocksize) { + return chooseTarget(srcPath, numOfReplicas, writer, + new ArrayList(), + blocksize); + } +} diff --git a/src/hdfs/org/apache/hadoop/hdfs/server/namenode/BlockPlacementPolicyDefault.java b/src/hdfs/org/apache/hadoop/hdfs/server/namenode/BlockPlacementPolicyDefault.java new file mode 100644 index 0000000..02d7704 --- /dev/null +++ b/src/hdfs/org/apache/hadoop/hdfs/server/namenode/BlockPlacementPolicyDefault.java @@ -0,0 +1,508 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.namenode; + +import org.apache.commons.logging.*; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdfs.protocol.Block; +import org.apache.hadoop.hdfs.protocol.DatanodeInfo; +import org.apache.hadoop.hdfs.protocol.FSConstants; +import org.apache.hadoop.hdfs.protocol.LocatedBlock; +import org.apache.hadoop.hdfs.protocol.DatanodeInfo; +import org.apache.hadoop.net.NetworkTopology; +import org.apache.hadoop.net.Node; +import org.apache.hadoop.net.NodeBase; +import java.util.*; + +/** The class is responsible for choosing the desired number of targets + * for placing block replicas. + * The replica placement strategy is that if the writer is on a datanode, + * the 1st replica is placed on the local machine, + * otherwise a random datanode. The 2nd replica is placed on a datanode + * that is on a different rack. The 3rd replica is placed on a datanode + * which is on a different node of the rack as the second replica. + */ +public class BlockPlacementPolicyDefault extends BlockPlacementPolicy { + private boolean considerLoad; + private NetworkTopology clusterMap; + private FSClusterStats stats; + + BlockPlacementPolicyDefault(Configuration conf, FSClusterStats stats, + NetworkTopology clusterMap) { + initialize(conf, stats, clusterMap); + } + + BlockPlacementPolicyDefault() { + } + + /** {@inheritDoc} */ + public void initialize(Configuration conf, FSClusterStats stats, + NetworkTopology clusterMap) { + this.considerLoad = conf.getBoolean("dfs.replication.considerLoad", true); + this.stats = stats; + this.clusterMap = clusterMap; + } + + /** {@inheritDoc} */ + public DatanodeDescriptor[] chooseTarget(String srcPath, + int numOfReplicas, + DatanodeDescriptor writer, + List chosenNodes, + long blocksize) { + return chooseTarget(numOfReplicas, writer, chosenNodes, null, blocksize); + } + + /** {@inheritDoc} */ + @Override + public DatanodeDescriptor[] chooseTarget(FSInodeInfo srcInode, + int numOfReplicas, + DatanodeDescriptor writer, + List chosenNodes, + List excludesNodes, + long blocksize) { + return chooseTarget(numOfReplicas, writer, chosenNodes, null, blocksize); + } + + /** + * This is not part of the public API but is used by the unit tests. + */ + DatanodeDescriptor[] chooseTarget(int numOfReplicas, + DatanodeDescriptor writer, + List chosenNodes, + List exlcNodes, + long blocksize) { + if (numOfReplicas == 0 || clusterMap.getNumOfLeaves()==0) { + return new DatanodeDescriptor[0]; + } + + HashMap excludedNodes = new HashMap(); + if (exlcNodes != null) { + for (Node node:exlcNodes) { + excludedNodes.put(node, node); + } + } + + int clusterSize = clusterMap.getNumOfLeaves(); + int totalNumOfReplicas = chosenNodes.size()+numOfReplicas; + if (totalNumOfReplicas > clusterSize) { + numOfReplicas -= (totalNumOfReplicas-clusterSize); + totalNumOfReplicas = clusterSize; + } + + int maxNodesPerRack = + (totalNumOfReplicas-1)/clusterMap.getNumOfRacks()+2; + + List results = + new ArrayList(chosenNodes); + for (Node node:chosenNodes) { + excludedNodes.put(node, node); + } + + if (!clusterMap.contains(writer)) { + writer=null; + } + + DatanodeDescriptor localNode = chooseTarget(numOfReplicas, writer, + excludedNodes, blocksize, maxNodesPerRack, results); + + results.removeAll(chosenNodes); + + // sorting nodes to form a pipeline + return getPipeline((writer==null)?localNode:writer, + results.toArray(new DatanodeDescriptor[results.size()])); + } + + /* choose numOfReplicas from all data nodes */ + private DatanodeDescriptor chooseTarget(int numOfReplicas, + DatanodeDescriptor writer, + HashMap excludedNodes, + long blocksize, + int maxNodesPerRack, + List results) { + + if (numOfReplicas == 0 || clusterMap.getNumOfLeaves()==0) { + return writer; + } + + int numOfResults = results.size(); + boolean newBlock = (numOfResults==0); + if (writer == null && !newBlock) { + writer = results.get(0); + } + + try { + if (numOfResults == 0) { + writer = chooseLocalNode(writer, excludedNodes, + blocksize, maxNodesPerRack, results); + if (--numOfReplicas == 0) { + return writer; + } + } + if (numOfResults <= 1) { + chooseRemoteRack(1, results.get(0), excludedNodes, + blocksize, maxNodesPerRack, results); + if (--numOfReplicas == 0) { + return writer; + } + } + if (numOfResults <= 2) { + if (clusterMap.isOnSameRack(results.get(0), results.get(1))) { + chooseRemoteRack(1, results.get(0), excludedNodes, + blocksize, maxNodesPerRack, results); + } else if (newBlock){ + chooseLocalRack(results.get(1), excludedNodes, blocksize, + maxNodesPerRack, results); + } else { + chooseLocalRack(writer, excludedNodes, blocksize, + maxNodesPerRack, results); + } + if (--numOfReplicas == 0) { + return writer; + } + } + chooseRandom(numOfReplicas, NodeBase.ROOT, excludedNodes, + blocksize, maxNodesPerRack, results); + } catch (NotEnoughReplicasException e) { + FSNamesystem.LOG.warn("Not able to place enough replicas, still in need of " + + numOfReplicas); + } + return writer; + } + + /* choose localMachine as the target. + * if localMachine is not available, + * choose a node on the same rack + * @return the chosen node + */ + private DatanodeDescriptor chooseLocalNode( + DatanodeDescriptor localMachine, + HashMap excludedNodes, + long blocksize, + int maxNodesPerRack, + List results) + throws NotEnoughReplicasException { + // if no local machine, randomly choose one node + if (localMachine == null) + return chooseRandom(NodeBase.ROOT, excludedNodes, + blocksize, maxNodesPerRack, results); + + // otherwise try local machine first + Node oldNode = excludedNodes.put(localMachine, localMachine); + if (oldNode == null) { // was not in the excluded list + if (isGoodTarget(localMachine, blocksize, + maxNodesPerRack, false, results)) { + results.add(localMachine); + return localMachine; + } + } + + // try a node on local rack + return chooseLocalRack(localMachine, excludedNodes, + blocksize, maxNodesPerRack, results); + } + + /* choose one node from the rack that localMachine is on. + * if no such node is available, choose one node from the rack where + * a second replica is on. + * if still no such node is available, choose a random node + * in the cluster. + * @return the chosen node + */ + private DatanodeDescriptor chooseLocalRack( + DatanodeDescriptor localMachine, + HashMap excludedNodes, + long blocksize, + int maxNodesPerRack, + List results) + throws NotEnoughReplicasException { + // no local machine, so choose a random machine + if (localMachine == null) { + return chooseRandom(NodeBase.ROOT, excludedNodes, + blocksize, maxNodesPerRack, results); + } + + // choose one from the local rack + try { + return chooseRandom( + localMachine.getNetworkLocation(), + excludedNodes, blocksize, maxNodesPerRack, results); + } catch (NotEnoughReplicasException e1) { + // find the second replica + DatanodeDescriptor newLocal=null; + for(Iterator iter=results.iterator(); + iter.hasNext();) { + DatanodeDescriptor nextNode = iter.next(); + if (nextNode != localMachine) { + newLocal = nextNode; + break; + } + } + if (newLocal != null) { + try { + return chooseRandom( + newLocal.getNetworkLocation(), + excludedNodes, blocksize, maxNodesPerRack, results); + } catch(NotEnoughReplicasException e2) { + //otherwise randomly choose one from the network + return chooseRandom(NodeBase.ROOT, excludedNodes, + blocksize, maxNodesPerRack, results); + } + } else { + //otherwise randomly choose one from the network + return chooseRandom(NodeBase.ROOT, excludedNodes, + blocksize, maxNodesPerRack, results); + } + } + } + + /* choose numOfReplicas nodes from the racks + * that localMachine is NOT on. + * if not enough nodes are available, choose the remaining ones + * from the local rack + */ + + private void chooseRemoteRack(int numOfReplicas, + DatanodeDescriptor localMachine, + HashMap excludedNodes, + long blocksize, + int maxReplicasPerRack, + List results) + throws NotEnoughReplicasException { + int oldNumOfReplicas = results.size(); + // randomly choose one node from remote racks + try { + chooseRandom(numOfReplicas, "~"+localMachine.getNetworkLocation(), + excludedNodes, blocksize, maxReplicasPerRack, results); + } catch (NotEnoughReplicasException e) { + chooseRandom(numOfReplicas-(results.size()-oldNumOfReplicas), + localMachine.getNetworkLocation(), excludedNodes, blocksize, + maxReplicasPerRack, results); + } + } + + /* Randomly choose one target from nodes. + * @return the chosen node + */ + private DatanodeDescriptor chooseRandom( + String nodes, + HashMap excludedNodes, + long blocksize, + int maxNodesPerRack, + List results) + throws NotEnoughReplicasException { + int numOfAvailableNodes = + clusterMap.countNumOfAvailableNodes(nodes, excludedNodes.keySet()); + while(numOfAvailableNodes > 0) { + DatanodeDescriptor chosenNode = + (DatanodeDescriptor)(clusterMap.chooseRandom(nodes)); + + Node oldNode = excludedNodes.put(chosenNode, chosenNode); + if (oldNode == null) { // choosendNode was not in the excluded list + numOfAvailableNodes--; + if (isGoodTarget(chosenNode, blocksize, maxNodesPerRack, results)) { + results.add(chosenNode); + return chosenNode; + } + } + } + + throw new NotEnoughReplicasException( + "Not able to place enough replicas"); + } + + /* Randomly choose numOfReplicas targets from nodes. + */ + private void chooseRandom(int numOfReplicas, + String nodes, + HashMap excludedNodes, + long blocksize, + int maxNodesPerRack, + List results) + throws NotEnoughReplicasException { + + int numOfAvailableNodes = + clusterMap.countNumOfAvailableNodes(nodes, excludedNodes.keySet()); + while(numOfReplicas > 0 && numOfAvailableNodes > 0) { + DatanodeDescriptor chosenNode = + (DatanodeDescriptor)(clusterMap.chooseRandom(nodes)); + Node oldNode = excludedNodes.put(chosenNode, chosenNode); + if (oldNode == null) { + numOfAvailableNodes--; + + if (isGoodTarget(chosenNode, blocksize, maxNodesPerRack, results)) { + numOfReplicas--; + results.add(chosenNode); + } + } + } + + if (numOfReplicas>0) { + throw new NotEnoughReplicasException( + "Not able to place enough replicas"); + } + } + + /* judge if a node is a good target. + * return true if node has enough space, + * does not have too much load, and the rack does not have too many nodes + */ + private boolean isGoodTarget(DatanodeDescriptor node, + long blockSize, int maxTargetPerLoc, + List results) { + return isGoodTarget(node, blockSize, maxTargetPerLoc, + this.considerLoad, results); + } + + private boolean isGoodTarget(DatanodeDescriptor node, + long blockSize, int maxTargetPerLoc, + boolean considerLoad, + List results) { + Log logr = FSNamesystem.LOG; + // check if the node is (being) decommissed + if (node.isDecommissionInProgress() || node.isDecommissioned()) { + logr.debug("Node "+NodeBase.getPath(node)+ + " is not chosen because the node is (being) decommissioned"); + return false; + } + + long remaining = node.getRemaining() - + (node.getBlocksScheduled() * blockSize); + // check the remaining capacity of the target machine + if (blockSize* FSConstants.MIN_BLOCKS_FOR_WRITE>remaining) { + logr.debug("Node "+NodeBase.getPath(node)+ + " is not chosen because the node does not have enough space"); + return false; + } + + // check the communication traffic of the target machine + if (considerLoad) { + double avgLoad = 0; + int size = clusterMap.getNumOfLeaves(); + if (size != 0 && stats != null) { + avgLoad = (double)stats.getTotalLoad()/size; + } + if (node.getXceiverCount() > (2.0 * avgLoad)) { + logr.debug("Node "+NodeBase.getPath(node)+ + " is not chosen because the node is too busy"); + return false; + } + } + + // check if the target rack has chosen too many nodes + String rackname = node.getNetworkLocation(); + int counter=1; + for(Iterator iter = results.iterator(); + iter.hasNext();) { + Node result = iter.next(); + if (rackname.equals(result.getNetworkLocation())) { + counter++; + } + } + if (counter>maxTargetPerLoc) { + logr.debug("Node "+NodeBase.getPath(node)+ + " is not chosen because the rack has too many chosen nodes"); + return false; + } + return true; + } + + /* Return a pipeline of nodes. + * The pipeline is formed finding a shortest path that + * starts from the writer and traverses all nodes + * This is basically a traveling salesman problem. + */ + private DatanodeDescriptor[] getPipeline( + DatanodeDescriptor writer, + DatanodeDescriptor[] nodes) { + if (nodes.length==0) return nodes; + + synchronized(clusterMap) { + int index=0; + if (writer == null || !clusterMap.contains(writer)) { + writer = nodes[0]; + } + for(;indexcurrentDistance) { + shortestDistance = currentDistance; + shortestNode = currentNode; + shortestIndex = i; + } + } + //switch position index & shortestIndex + if (index != shortestIndex) { + nodes[shortestIndex] = nodes[index]; + nodes[index] = shortestNode; + } + writer = shortestNode; + } + } + return nodes; + } + + /** {@inheritDoc} */ + public int verifyBlockPlacement(String srcPath, + LocatedBlock lBlk, + int minRacks) { + DatanodeInfo[] locs = lBlk.getLocations(); + if (locs == null) + locs = new DatanodeInfo[0]; + int numRacks = clusterMap.getNumOfRacks(); + if(numRacks <= 1) // only one rack + return 0; + minRacks = Math.min(minRacks, numRacks); + // 1. Check that all locations are different. + // 2. Count locations on different racks. + Set racks = new TreeSet(); + for (DatanodeInfo dn : locs) + racks.add(dn.getNetworkLocation()); + return minRacks - racks.size(); + } + + /** {@inheritDoc} */ + public DatanodeDescriptor chooseReplicaToDelete(FSInodeInfo inode, + Block block, + short replicationFactor, + Collection first, + Collection second) { + long minSpace = Long.MAX_VALUE; + DatanodeDescriptor cur = null; + + // pick replica from the first Set. If first is empty, then pick replicas + // from second set. + Iterator iter = + first.isEmpty() ? second.iterator() : first.iterator(); + + // pick node with least free space + while (iter.hasNext() ) { + DatanodeDescriptor node = iter.next(); + long free = node.getRemaining(); + if (minSpace > free) { + minSpace = free; + cur = node; + } + } + return cur; + } +} + diff --git a/src/hdfs/org/apache/hadoop/hdfs/server/namenode/BlocksMap.java b/src/hdfs/org/apache/hadoop/hdfs/server/namenode/BlocksMap.java new file mode 100644 index 0000000..1866760 --- /dev/null +++ b/src/hdfs/org/apache/hadoop/hdfs/server/namenode/BlocksMap.java @@ -0,0 +1,487 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.namenode; + +import java.util.Iterator; + +import org.apache.hadoop.hdfs.protocol.Block; +import org.apache.hadoop.hdfs.util.GSet; +import org.apache.hadoop.hdfs.util.LightWeightGSet; + +/** + * This class maintains the map from a block to its metadata. + * block's metadata currently includes INode it belongs to and + * the datanodes that store the block. + */ +class BlocksMap { + + /** + * Internal class for block metadata. + */ + static class BlockInfo extends Block implements LightWeightGSet.LinkedElement { + private INodeFile inode; + + /** For implementing {@link LightWeightGSet.LinkedElement} interface */ + private LightWeightGSet.LinkedElement nextLinkedElement; + + /** + * This array contains triplets of references. + * For each i-th data-node the block belongs to + * triplets[3*i] is the reference to the DatanodeDescriptor + * and triplets[3*i+1] and triplets[3*i+2] are references + * to the previous and the next blocks, respectively, in the + * list of blocks belonging to this data-node. + */ + private Object[] triplets; + + BlockInfo(Block blk, int replication) { + super(blk); + this.triplets = new Object[3*replication]; + this.inode = null; + } + + INodeFile getINode() { + return inode; + } + + public void setINode(INodeFile inode) { + this.inode = inode; + } + + DatanodeDescriptor getDatanode(int index) { + assert this.triplets != null : "BlockInfo is not initialized"; + assert index >= 0 && index*3 < triplets.length : "Index is out of bound"; + DatanodeDescriptor node = (DatanodeDescriptor)triplets[index*3]; + assert node == null || + DatanodeDescriptor.class.getName().equals(node.getClass().getName()) : + "DatanodeDescriptor is expected at " + index*3; + return node; + } + + BlockInfo getPrevious(int index) { + assert this.triplets != null : "BlockInfo is not initialized"; + assert index >= 0 && index*3+1 < triplets.length : "Index is out of bound"; + BlockInfo info = (BlockInfo)triplets[index*3+1]; + assert info == null || + BlockInfo.class.getName().equals(info.getClass().getName()) : + "BlockInfo is expected at " + index*3; + return info; + } + + BlockInfo getNext(int index) { + assert this.triplets != null : "BlockInfo is not initialized"; + assert index >= 0 && index*3+2 < triplets.length : "Index is out of bound"; + BlockInfo info = (BlockInfo)triplets[index*3+2]; + assert info == null || + BlockInfo.class.getName().equals(info.getClass().getName()) : + "BlockInfo is expected at " + index*3; + return info; + } + + void setDatanode(int index, DatanodeDescriptor node) { + assert this.triplets != null : "BlockInfo is not initialized"; + assert index >= 0 && index*3 < triplets.length : "Index is out of bound"; + triplets[index*3] = node; + } + + void setPrevious(int index, BlockInfo to) { + assert this.triplets != null : "BlockInfo is not initialized"; + assert index >= 0 && index*3+1 < triplets.length : "Index is out of bound"; + triplets[index*3+1] = to; + } + + void setNext(int index, BlockInfo to) { + assert this.triplets != null : "BlockInfo is not initialized"; + assert index >= 0 && index*3+2 < triplets.length : "Index is out of bound"; + triplets[index*3+2] = to; + } + + private int getCapacity() { + assert this.triplets != null : "BlockInfo is not initialized"; + assert triplets.length % 3 == 0 : "Malformed BlockInfo"; + return triplets.length / 3; + } + + /** + * Ensure that there is enough space to include num more triplets. + * * @return first free triplet index. + */ + private int ensureCapacity(int num) { + assert this.triplets != null : "BlockInfo is not initialized"; + int last = numNodes(); + if(triplets.length >= (last+num)*3) + return last; + /* Not enough space left. Create a new array. Should normally + * happen only when replication is manually increased by the user. */ + Object[] old = triplets; + triplets = new Object[(last+num)*3]; + for(int i=0; i < last*3; i++) { + triplets[i] = old[i]; + } + return last; + } + + /** + * Count the number of data-nodes the block belongs to. + */ + int numNodes() { + assert this.triplets != null : "BlockInfo is not initialized"; + assert triplets.length % 3 == 0 : "Malformed BlockInfo"; + for(int idx = getCapacity()-1; idx >= 0; idx--) { + if(getDatanode(idx) != null) + return idx+1; + } + return 0; + } + + /** + * Add data-node this block belongs to. + */ + boolean addNode(DatanodeDescriptor node) { + if(findDatanode(node) >= 0) // the node is already there + return false; + // find the last null node + int lastNode = ensureCapacity(1); + setDatanode(lastNode, node); + setNext(lastNode, null); + setPrevious(lastNode, null); + return true; + } + + /** + * Remove data-node from the block. + */ + boolean removeNode(DatanodeDescriptor node) { + int dnIndex = findDatanode(node); + if(dnIndex < 0) // the node is not found + return false; + assert getPrevious(dnIndex) == null && getNext(dnIndex) == null : + "Block is still in the list and must be removed first."; + // find the last not null node + int lastNode = numNodes()-1; + // replace current node triplet by the lastNode one + setDatanode(dnIndex, getDatanode(lastNode)); + setNext(dnIndex, getNext(lastNode)); + setPrevious(dnIndex, getPrevious(lastNode)); + // set the last triplet to null + setDatanode(lastNode, null); + setNext(lastNode, null); + setPrevious(lastNode, null); + return true; + } + + /** + * Find specified DatanodeDescriptor. + * @param dn + * @return index or -1 if not found. + */ + int findDatanode(DatanodeDescriptor dn) { + int len = getCapacity(); + for(int idx = 0; idx < len; idx++) { + DatanodeDescriptor cur = getDatanode(idx); + if(cur == dn) + return idx; + if(cur == null) + break; + } + return -1; + } + + /** + * Insert this block into the head of the list of blocks + * related to the specified DatanodeDescriptor. + * If the head is null then form a new list. + * @return current block as the new head of the list. + */ + BlockInfo listInsert(BlockInfo head, DatanodeDescriptor dn) { + int dnIndex = this.findDatanode(dn); + assert dnIndex >= 0 : "Data node is not found: current"; + assert getPrevious(dnIndex) == null && getNext(dnIndex) == null : + "Block is already in the list and cannot be inserted."; + this.setPrevious(dnIndex, null); + this.setNext(dnIndex, head); + if(head != null) + head.setPrevious(head.findDatanode(dn), this); + return this; + } + + /** + * Remove this block from the list of blocks + * related to the specified DatanodeDescriptor. + * If this block is the head of the list then return the next block as + * the new head. + * @return the new head of the list or null if the list becomes + * empy after deletion. + */ + BlockInfo listRemove(BlockInfo head, DatanodeDescriptor dn) { + if(head == null) + return null; + int dnIndex = this.findDatanode(dn); + if(dnIndex < 0) // this block is not on the data-node list + return head; + + BlockInfo next = this.getNext(dnIndex); + BlockInfo prev = this.getPrevious(dnIndex); + this.setNext(dnIndex, null); + this.setPrevious(dnIndex, null); + if(prev != null) + prev.setNext(prev.findDatanode(dn), next); + if(next != null) + next.setPrevious(next.findDatanode(dn), prev); + if(this == head) // removing the head + head = next; + return head; + } + + int listCount(DatanodeDescriptor dn) { + int count = 0; + for(BlockInfo cur = this; cur != null; + cur = cur.getNext(cur.findDatanode(dn))) + count++; + return count; + } + + boolean listIsConsistent(DatanodeDescriptor dn) { + // going forward + int count = 0; + BlockInfo next, nextPrev; + BlockInfo cur = this; + while(cur != null) { + next = cur.getNext(cur.findDatanode(dn)); + if(next != null) { + nextPrev = next.getPrevious(next.findDatanode(dn)); + if(cur != nextPrev) { + System.out.println("Inconsistent list: cur->next->prev != cur"); + return false; + } + } + cur = next; + count++; + } + return true; + } + + @Override + public LightWeightGSet.LinkedElement getNext() { + return nextLinkedElement; + } + + @Override + public void setNext(LightWeightGSet.LinkedElement next) { + this.nextLinkedElement = next; + } + } + + private static class NodeIterator implements Iterator { + private BlockInfo blockInfo; + private int nextIdx = 0; + + NodeIterator(BlockInfo blkInfo) { + this.blockInfo = blkInfo; + } + + public boolean hasNext() { + return blockInfo != null && nextIdx < blockInfo.getCapacity() + && blockInfo.getDatanode(nextIdx) != null; + } + + public DatanodeDescriptor next() { + return blockInfo.getDatanode(nextIdx++); + } + + public void remove() { + throw new UnsupportedOperationException("Sorry. can't remove."); + } + } + + /** Constant {@link LightWeightGSet} capacity. */ + private final int capacity; + + private GSet blocks; + + BlocksMap(int initialCapacity, float loadFactor) { + this.capacity = computeCapacity(); + this.blocks = new LightWeightGSet(capacity); + } + + /** + * Let t = 2% of max memory. + * Let e = round(log_2 t). + * Then, we choose capacity = 2^e/(size of reference), + * unless it is outside the close interval [1, 2^30]. + */ + private static int computeCapacity() { + //VM detection + //See http://java.sun.com/docs/hotspot/HotSpotFAQ.html#64bit_detection + final String vmBit = System.getProperty("sun.arch.data.model"); + + //2% of max memory + final double twoPC = Runtime.getRuntime().maxMemory()/50.0; + + //compute capacity + final int e1 = (int)(Math.log(twoPC)/Math.log(2.0) + 0.5); + final int e2 = e1 - ("32".equals(vmBit)? 2: 3); + final int exponent = e2 < 0? 0: e2 > 30? 30: e2; + final int c = 1 << exponent; + + LightWeightGSet.LOG.info("VM type = " + vmBit + "-bit"); + LightWeightGSet.LOG.info("2% max memory = " + twoPC/(1 << 20) + " MB"); + LightWeightGSet.LOG.info("capacity = 2^" + exponent + + " = " + c + " entries"); + return c; + } + + void close() { + blocks = null; + } + + /** + * Add BlockInfo if mapping does not exist. + */ + private BlockInfo checkBlockInfo(Block b, int replication) { + BlockInfo info = blocks.get(b); + if (info == null) { + info = new BlockInfo(b, replication); + blocks.put(info); + } + return info; + } + + INodeFile getINode(Block b) { + BlockInfo info = blocks.get(b); + return (info != null) ? info.inode : null; + } + + /** + * Add block b belonging to the specified file inode to the map. + */ + BlockInfo addINode(Block b, INodeFile iNode) { + BlockInfo info = checkBlockInfo(b, iNode.getReplication()); + info.inode = iNode; + return info; + } + + /** + * Remove INode reference from block b. + * If it does not belong to any file and data-nodes, + * then remove the block from the block map. + */ + void removeINode(Block b) { + BlockInfo info = blocks.get(b); + if (info != null) { + info.inode = null; + if (info.getDatanode(0) == null) { // no datanodes left + blocks.remove(b); // remove block from the map + } + } + } + + /** + * Remove the block from the block map; + * remove it from all data-node lists it belongs to; + * and remove all data-node locations associated with the block. + */ + void removeBlock(BlockInfo blockInfo) { + if (blockInfo == null) + return; + blockInfo.inode = null; + for(int idx = blockInfo.numNodes()-1; idx >= 0; idx--) { + DatanodeDescriptor dn = blockInfo.getDatanode(idx); + dn.removeBlock(blockInfo); // remove from the list and wipe the location + } + blocks.remove(blockInfo); // remove block from the map + } + + /** Returns the block object it it exists in the map. */ + BlockInfo getStoredBlock(Block b) { + return blocks.get(b); + } + + /** Returned Iterator does not support. */ + Iterator nodeIterator(Block b) { + return new NodeIterator(blocks.get(b)); + } + + /** counts number of containing nodes. Better than using iterator. */ + int numNodes(Block b) { + BlockInfo info = blocks.get(b); + return info == null ? 0 : info.numNodes(); + } + + /** returns true if the node does not already exists and is added. + * false if the node already exists.*/ + boolean addNode(Block b, DatanodeDescriptor node, int replication) { + // insert into the map if not there yet + BlockInfo info = checkBlockInfo(b, replication); + // add block to the data-node list and the node to the block info + return node.addBlock(info); + } + + /** + * Remove data-node reference from the block. + * Remove the block from the block map + * only if it does not belong to any file and data-nodes. + */ + boolean removeNode(Block b, DatanodeDescriptor node) { + BlockInfo info = blocks.get(b); + if (info == null) + return false; + + // remove block from the data-node list and the node from the block info + boolean removed = node.removeBlock(info); + + if (info.getDatanode(0) == null // no datanodes left + && info.inode == null) { // does not belong to a file + blocks.remove(b); // remove block from the map + } + return removed; + } + + int size() { + return blocks.size(); + } + + Iterable getBlocks() { + return blocks; + } + /** + * Check if the block exists in map + */ + boolean contains(Block block) { + return blocks.contains(block); + } + + /** + * Check if the replica at the given datanode exists in map + */ + boolean contains(Block block, DatanodeDescriptor datanode) { + BlockInfo info = blocks.get(block); + if (info == null) + return false; + + if (-1 == info.findDatanode(datanode)) + return false; + + return true; + } + + /** Get the capacity of the HashMap that stores blocks */ + public int getCapacity() { + return capacity; + } +} diff --git a/src/hdfs/org/apache/hadoop/hdfs/server/namenode/CheckpointSignature.java b/src/hdfs/org/apache/hadoop/hdfs/server/namenode/CheckpointSignature.java new file mode 100644 index 0000000..2002755 --- /dev/null +++ b/src/hdfs/org/apache/hadoop/hdfs/server/namenode/CheckpointSignature.java @@ -0,0 +1,135 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.namenode; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; + +import org.apache.hadoop.hdfs.server.common.StorageInfo; +import org.apache.hadoop.io.MD5Hash; +import org.apache.hadoop.io.WritableComparable; + +/** + * A unique signature intended to identify checkpoint transactions. + */ +public class CheckpointSignature extends StorageInfo + implements WritableComparable { + private static final String FIELD_SEPARATOR = ":"; + long editsTime = -1L; + long checkpointTime = -1L; + MD5Hash imageDigest = null; + + CheckpointSignature() {} + + CheckpointSignature(FSImage fsImage) { + super(fsImage); + editsTime = fsImage.getEditLog().getFsEditTime(); + checkpointTime = fsImage.checkpointTime; + imageDigest = fsImage.getImageDigest(); + } + + CheckpointSignature(String str) { + String[] fields = str.split(FIELD_SEPARATOR); + assert fields.length == 6 : "Must be 6 fields in CheckpointSignature"; + layoutVersion = Integer.valueOf(fields[0]); + namespaceID = Integer.valueOf(fields[1]); + cTime = Long.valueOf(fields[2]); + editsTime = Long.valueOf(fields[3]); + checkpointTime = Long.valueOf(fields[4]); + imageDigest = new MD5Hash(fields[5]); + } + + /** + * Get the MD5 image digest + * @return the MD5 image digest + */ + MD5Hash getImageDigest() { + return imageDigest; + } + + public String toString() { + return String.valueOf(layoutVersion) + FIELD_SEPARATOR + + String.valueOf(namespaceID) + FIELD_SEPARATOR + + String.valueOf(cTime) + FIELD_SEPARATOR + + String.valueOf(editsTime) + FIELD_SEPARATOR + + String.valueOf(checkpointTime) + FIELD_SEPARATOR + + imageDigest.toString(); + } + + void validateStorageInfo(StorageInfo si) throws IOException { + if(layoutVersion != si.layoutVersion + || namespaceID != si.namespaceID || cTime != si.cTime) { + // checkpointTime can change when the image is saved - do not compare + throw new IOException("Inconsistent checkpoint fileds. " + + "LV = " + layoutVersion + " namespaceID = " + namespaceID + + " cTime = " + cTime + ". Expecting respectively: " + + si.layoutVersion + "; " + si.namespaceID + "; " + si.cTime); + } + } + + // + // Comparable interface + // + public int compareTo(CheckpointSignature o) { + return + (layoutVersion < o.layoutVersion) ? -1 : + (layoutVersion > o.layoutVersion) ? 1 : + (namespaceID < o.namespaceID) ? -1 : (namespaceID > o.namespaceID) ? 1 : + (cTime < o.cTime) ? -1 : (cTime > o.cTime) ? 1 : + (editsTime < o.editsTime) ? -1 : (editsTime > o.editsTime) ? 1 : + (checkpointTime < o.checkpointTime) ? -1 : + (checkpointTime > o.checkpointTime) ? 1 : + imageDigest.compareTo(o.imageDigest); + } + + public boolean equals(Object o) { + if (!(o instanceof CheckpointSignature)) { + return false; + } + return compareTo((CheckpointSignature)o) == 0; + } + + public int hashCode() { + return layoutVersion ^ namespaceID ^ + (int)(cTime ^ editsTime ^ checkpointTime) ^ + imageDigest.hashCode(); + } + + ///////////////////////////////////////////////// + // Writable + ///////////////////////////////////////////////// + public void write(DataOutput out) throws IOException { + out.writeInt(getLayoutVersion()); + out.writeInt(getNamespaceID()); + out.writeLong(getCTime()); + out.writeLong(editsTime); + out.writeLong(checkpointTime); + imageDigest.write(out); + } + + public void readFields(DataInput in) throws IOException { + layoutVersion = in.readInt(); + namespaceID = in.readInt(); + cTime = in.readLong(); + editsTime = in.readLong(); + checkpointTime = in.readLong(); + imageDigest = new MD5Hash(); + imageDigest.readFields(in); + } +} diff --git a/src/hdfs/org/apache/hadoop/hdfs/server/namenode/ConfigManager.java b/src/hdfs/org/apache/hadoop/hdfs/server/namenode/ConfigManager.java new file mode 100644 index 0000000..eb4207c --- /dev/null +++ b/src/hdfs/org/apache/hadoop/hdfs/server/namenode/ConfigManager.java @@ -0,0 +1,145 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.namenode; + +import org.apache.commons.logging.*; +import org.apache.hadoop.conf.*; + +import java.io.*; +import java.util.LinkedList; + + +/*************************************************** + * Manager FSNamesystem dynamic loadingof configurations. + ***************************************************/ +public class ConfigManager { + public static final Log LOG = LogFactory.getLog(FSNamesystem.class); + + /** Time to wait between checks of the allocation file */ + public static final long CONFIG_RELOAD_INTERVAL = 300 * 1000; + + /** + * Time to wait after the allocation has been modified before reloading it + * (this is done to prevent loading a file that hasn't been fully written). + */ + public static final long DEFAULT_CONFIG_RELOAD_WAIT = 10 * 1000; + + private FSNamesystem namesys; + private String whitelistFile; // Path to file that contains whitelist directories + + private long configReloadWait = DEFAULT_CONFIG_RELOAD_WAIT; + private long lastReloadAttempt; // Last time we tried to reload the pools file + private long lastSuccessfulReload; // Last time we successfully reloaded pools + private boolean lastReloadAttemptFailed = false; + + public ConfigManager(FSNamesystem namesys, Configuration conf) { + this.namesys = namesys; + + // the name of the whitelist file + this.whitelistFile = conf.get("dfs.namenode.whitelist.file"); + if (whitelistFile == null) { + LOG.warn("No whitelist file specified in dfs.namenode.whitelist.file." + + " The namenode will allow deletion/renaming of any directory."); + } + + // periodicity of reload of the config file + long value = conf.getLong("dfs.namenode.config.reload.wait", 0); + if (value != 0) { + configReloadWait = value; + } + } + + /** + * Checks to see if the namenode config file is updated on + * disk, If so, then read all it contents. At present, only + * the whitelist config is updated, but we will enahnce this to + * update all possible namenode configs in future. + */ + public void reloadConfigIfNecessary() { + if (whitelistFile == null) { + return; + } + long time = System.currentTimeMillis(); + if (time > lastReloadAttempt + CONFIG_RELOAD_INTERVAL) { + lastReloadAttempt = time; + try { + File file = new File(whitelistFile); + long lastModified = file.lastModified(); + if (lastModified > lastSuccessfulReload && + time > lastModified + configReloadWait) { + reloadWhitelist(); + lastSuccessfulReload = time; + lastReloadAttemptFailed = false; + } + } catch (Exception e) { + // Throwing the error further out here won't help - the RPC thread + // will catch it and report it in a loop. Instead, just log it and + // hope somebody will notice from the log. + // We log the error only on the first failure so we don't fill up the + // server's log with these messages. + if (!lastReloadAttemptFailed) { + LOG.error("Failed to reload whitelist file - " + + "will use existing allocations.", e); + } + lastReloadAttemptFailed = true; + } + } + } + + /** + * Removes all the entries currently in neverDeletePaths + * and add the new ones specified + */ + void reloadWhitelist() throws IOException { + + // read the entire whitelist into memory outside the + // FSNamessytem lock. + // + LinkedList paths = new LinkedList(); + FileInputStream fstream = new FileInputStream(whitelistFile); + DataInputStream in = new DataInputStream(fstream); + BufferedReader br = new BufferedReader(new InputStreamReader(in)); + int count = 0; + while (true) { + String str = br.readLine(); + if (str == null) { + break; // end of file + } + str = str.trim(); // remove all whitespace from start and end + if (str.startsWith("#")) { + continue; // ignore lines with starting with # + } + paths.add(str); + LOG.info("Whitelisted directory [" + count + "] " + str); + count++; + } + in.close(); + + // acquire the writelock and insert newly read entries into + // the Namenode's configuration. + namesys.writeLock(); + try { + namesys.neverDeletePaths.clear(); + for (String s: paths) { + namesys.neverDeletePaths.add(s); + } + } finally { + namesys.writeUnlock(); + } + } +} diff --git a/src/hdfs/org/apache/hadoop/hdfs/server/namenode/CorruptReplicasMap.java b/src/hdfs/org/apache/hadoop/hdfs/server/namenode/CorruptReplicasMap.java new file mode 100644 index 0000000..481ab78 --- /dev/null +++ b/src/hdfs/org/apache/hadoop/hdfs/server/namenode/CorruptReplicasMap.java @@ -0,0 +1,129 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.namenode; + +import org.apache.hadoop.hdfs.protocol.Block; +import org.apache.hadoop.ipc.Server; + +import java.util.*; + +/** + * Stores information about all corrupt blocks in the File System. + * A Block is considered corrupt only if all of its replicas are + * corrupt. While reporting replicas of a Block, we hide any corrupt + * copies. These copies are removed once Block is found to have + * expected number of good replicas. + * Mapping: Block -> TreeSet + */ + +public class CorruptReplicasMap{ + + private Map> corruptReplicasMap = + new TreeMap>(); + + /** + * Mark the block belonging to datanode as corrupt. + * + * @param blk Block to be added to CorruptReplicasMap + * @param dn DatanodeDescriptor which holds the corrupt replica + */ + public void addToCorruptReplicasMap(Block blk, DatanodeDescriptor dn) { + Collection nodes = getNodes(blk); + if (nodes == null) { + nodes = new TreeSet(); + corruptReplicasMap.put(blk, nodes); + } + if (!nodes.contains(dn)) { + nodes.add(dn); + NameNode.stateChangeLog.info("BLOCK NameSystem.addToCorruptReplicasMap: "+ + blk.getBlockName() + + " added as corrupt on " + dn.getName() + + " by " + Server.getRemoteIp()); + } else { + NameNode.stateChangeLog.info("BLOCK NameSystem.addToCorruptReplicasMap: "+ + "duplicate requested for " + + blk.getBlockName() + " to add as corrupt " + + "on " + dn.getName() + + " by " + Server.getRemoteIp()); + } + } + + /** + * Remove Block from CorruptBlocksMap + * + * @param blk Block to be removed + */ + void removeFromCorruptReplicasMap(Block blk) { + if (corruptReplicasMap != null) { + corruptReplicasMap.remove(blk); + } + } + + /** + * Remove the block at the given datanode from CorruptBlockMap + * @param blk block to be removed + * @param datanode datanode where the block is located + * @return true if the removal is successful; + false if the replica is not in the map + */ + boolean removeFromCorruptReplicasMap(Block blk, DatanodeDescriptor datanode) { + Collection datanodes = corruptReplicasMap.get(blk); + if (datanodes==null) + return false; + if (datanodes.remove(datanode)) { // remove the replicas + if (datanodes.isEmpty()) { + // remove the block if there is no more corrupted replicas + corruptReplicasMap.remove(blk); + } + return true; + } + return false; + } + + + /** + * Get Nodes which have corrupt replicas of Block + * + * @param blk Block for which nodes are requested + * @return collection of nodes. Null if does not exists + */ + Collection getNodes(Block blk) { + return corruptReplicasMap.get(blk); + } + + /** + * Check if replica belonging to Datanode is corrupt + * + * @param blk Block to check + * @param node DatanodeDescriptor which holds the replica + * @return true if replica is corrupt, false if does not exists in this map + */ + boolean isReplicaCorrupt(Block blk, DatanodeDescriptor node) { + Collection nodes = getNodes(blk); + return ((nodes != null) && (nodes.contains(node))); + } + + public int numCorruptReplicas(Block blk) { + Collection nodes = getNodes(blk); + return (nodes == null) ? 0 : nodes.size(); + } + + public int size() { + return corruptReplicasMap.size(); + } +} diff --git a/src/hdfs/org/apache/hadoop/hdfs/server/namenode/DatanodeDescriptor.java b/src/hdfs/org/apache/hadoop/hdfs/server/namenode/DatanodeDescriptor.java new file mode 100644 index 0000000..f3efd9a --- /dev/null +++ b/src/hdfs/org/apache/hadoop/hdfs/server/namenode/DatanodeDescriptor.java @@ -0,0 +1,528 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.namenode; + +import java.io.DataInput; +import java.io.IOException; +import java.util.*; + +import org.apache.hadoop.hdfs.protocol.Block; +import org.apache.hadoop.hdfs.protocol.BlockListAsLongs; +import org.apache.hadoop.hdfs.protocol.DatanodeID; +import org.apache.hadoop.hdfs.protocol.DatanodeInfo; +import org.apache.hadoop.hdfs.server.namenode.BlocksMap.BlockInfo; +import org.apache.hadoop.hdfs.server.protocol.BlockCommand; +import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol; +import org.apache.hadoop.io.Text; +import org.apache.hadoop.io.UTF8; +import org.apache.hadoop.io.WritableUtils; + +/************************************************** + * DatanodeDescriptor tracks stats on a given DataNode, + * such as available storage capacity, last update time, etc., + * and maintains a set of blocks stored on the datanode. + * + * This data structure is a data structure that is internal + * to the namenode. It is *not* sent over-the-wire to the Client + * or the Datnodes. Neither is it stored persistently in the + * fsImage. + + **************************************************/ +public class DatanodeDescriptor extends DatanodeInfo { + + // Stores status of decommissioning. + // If node is not decommissioning, do not use this object for anything. + DecommissioningStatus decommissioningStatus = new DecommissioningStatus(); + + /** Block and targets pair */ + public static class BlockTargetPair { + public final Block block; + public final DatanodeDescriptor[] targets; + + BlockTargetPair(Block block, DatanodeDescriptor[] targets) { + this.block = block; + this.targets = targets; + } + } + + /** A BlockTargetPair queue. */ + private static class BlockQueue { + private final Queue blockq = new LinkedList(); + + /** Size of the queue */ + synchronized int size() {return blockq.size();} + + /** Enqueue */ + synchronized boolean offer(Block block, DatanodeDescriptor[] targets) { + return blockq.offer(new BlockTargetPair(block, targets)); + } + + /** Dequeue */ + synchronized List poll(int numBlocks) { + if (numBlocks <= 0 || blockq.isEmpty()) { + return null; + } + + List results = new ArrayList(); + for(; !blockq.isEmpty() && numBlocks > 0; numBlocks--) { + results.add(blockq.poll()); + } + return results; + } + } + + private volatile BlockInfo blockList = null; + private int numOfBlocks = 0; // number of block this DN has + + // isAlive == heartbeats.contains(this) + // This is an optimization, because contains takes O(n) time on Arraylist + protected boolean isAlive = false; + + /** A queue of blocks to be replicated by this datanode */ + private BlockQueue replicateBlocks = new BlockQueue(); + /** A queue of blocks to be recovered by this datanode */ + private BlockQueue recoverBlocks = new BlockQueue(); + /** A set of blocks to be invalidated by this datanode */ + private Set invalidateBlocks = new TreeSet(); + + /* Variables for maintaning number of blocks scheduled to be written to + * this datanode. This count is approximate and might be slightly higger + * in case of errors (e.g. datanode does not report if an error occurs + * while writing the block). + */ + private int currApproxBlocksScheduled = 0; + private int prevApproxBlocksScheduled = 0; + private long lastBlocksScheduledRollTime = 0; + private static final int BLOCKS_SCHEDULED_ROLL_INTERVAL = 600*1000; //10min + + /** Default constructor */ + public DatanodeDescriptor() {} + + /** DatanodeDescriptor constructor + * @param nodeID id of the data node + */ + public DatanodeDescriptor(DatanodeID nodeID) { + this(nodeID, 0L, 0L, 0L, 0); + } + + /** DatanodeDescriptor constructor + * + * @param nodeID id of the data node + * @param networkLocation location of the data node in network + */ + public DatanodeDescriptor(DatanodeID nodeID, + String networkLocation) { + this(nodeID, networkLocation, null); + } + + /** DatanodeDescriptor constructor + * + * @param nodeID id of the data node + * @param networkLocation location of the data node in network + * @param hostName it could be different from host specified for DatanodeID + */ + public DatanodeDescriptor(DatanodeID nodeID, + String networkLocation, + String hostName) { + this(nodeID, networkLocation, hostName, 0L, 0L, 0L, 0); + } + + /** DatanodeDescriptor constructor + * + * @param nodeID id of the data node + * @param capacity capacity of the data node + * @param dfsUsed space used by the data node + * @param remaining remaing capacity of the data node + * @param xceiverCount # of data transfers at the data node + */ + public DatanodeDescriptor(DatanodeID nodeID, + long capacity, + long dfsUsed, + long remaining, + int xceiverCount) { + super(nodeID); + updateHeartbeat(capacity, dfsUsed, remaining, xceiverCount); + } + + /** DatanodeDescriptor constructor + * + * @param nodeID id of the data node + * @param networkLocation location of the data node in network + * @param capacity capacity of the data node, including space used by non-dfs + * @param dfsUsed the used space by dfs datanode + * @param remaining remaing capacity of the data node + * @param xceiverCount # of data transfers at the data node + */ + public DatanodeDescriptor(DatanodeID nodeID, + String networkLocation, + String hostName, + long capacity, + long dfsUsed, + long remaining, + int xceiverCount) { + super(nodeID, networkLocation, hostName); + updateHeartbeat(capacity, dfsUsed, remaining, xceiverCount); + } + + /** + * Add data-node to the block. + * Add block to the head of the list of blocks belonging to the data-node. + */ + boolean addBlock(BlockInfo b) { + if(!b.addNode(this)) + return false; + // add to the head of the data-node list + blockList = b.listInsert(blockList, this); + numOfBlocks++; + return true; + } + + /** + * Remove block from the list of blocks belonging to the data-node. + * Remove data-node from the block. + */ + boolean removeBlock(BlockInfo b) { + blockList = b.listRemove(blockList, this); + if ( b.removeNode(this) ) { + numOfBlocks--; + return true; + } else { + return false; + } + } + + /** + * Move block to the head of the list of blocks belonging to the data-node. + */ + void moveBlockToHead(BlockInfo b) { + blockList = b.listRemove(blockList, this); + blockList = b.listInsert(blockList, this); + } + + void resetBlocks() { + this.capacity = 0; + this.remaining = 0; + this.dfsUsed = 0; + this.xceiverCount = 0; + this.blockList = null; + this.numOfBlocks = 0; + this.invalidateBlocks.clear(); + } + + public int numBlocks() { + return numOfBlocks; + } + + /** + */ + void updateHeartbeat(long capacity, long dfsUsed, long remaining, + int xceiverCount) { + this.capacity = capacity; + this.dfsUsed = dfsUsed; + this.remaining = remaining; + this.lastUpdate = System.currentTimeMillis(); + this.xceiverCount = xceiverCount; + rollBlocksScheduled(lastUpdate); + } + + /** + * Iterates over the list of blocks belonging to the data-node. + */ + static private class BlockIterator implements Iterator { + private BlockInfo current; + private DatanodeDescriptor node; + + BlockIterator(BlockInfo head, DatanodeDescriptor dn) { + this.current = head; + this.node = dn; + } + + public boolean hasNext() { + return current != null; + } + + public BlockInfo next() { + BlockInfo res = current; + current = current.getNext(current.findDatanode(node)); + return res; + } + + public void remove() { + throw new UnsupportedOperationException("Sorry. can't remove."); + } + } + + Iterator getBlockIterator() { + return new BlockIterator(this.blockList, this); + } + + /** + * Store block replication work. + */ + void addBlockToBeReplicated(Block block, DatanodeDescriptor[] targets) { + assert(block != null && targets != null && targets.length > 0); + replicateBlocks.offer(block, targets); + } + + /** + * Store block recovery work. + */ + void addBlockToBeRecovered(Block block, DatanodeDescriptor[] targets) { + assert(block != null && targets != null && targets.length > 0); + recoverBlocks.offer(block, targets); + } + + /** + * Store block invalidation work. + */ + void addBlocksToBeInvalidated(List blocklist) { + assert(blocklist != null && blocklist.size() > 0); + synchronized (invalidateBlocks) { + for(Block blk : blocklist) { + invalidateBlocks.add(blk); + } + } + } + + /** + * The number of work items that are pending to be replicated + */ + int getNumberOfBlocksToBeReplicated() { + return replicateBlocks.size(); + } + + /** + * The number of block invalidation items that are pending to + * be sent to the datanode + */ + int getNumberOfBlocksToBeInvalidated() { + synchronized (invalidateBlocks) { + return invalidateBlocks.size(); + } + } + + BlockCommand getReplicationCommand(int maxTransfers) { + List blocktargetlist = replicateBlocks.poll(maxTransfers); + return blocktargetlist == null? null: + new BlockCommand(DatanodeProtocol.DNA_TRANSFER, blocktargetlist); + } + + BlockCommand getLeaseRecoveryCommand(int maxTransfers) { + List blocktargetlist = recoverBlocks.poll(maxTransfers); + return blocktargetlist == null? null: + new BlockCommand(DatanodeProtocol.DNA_RECOVERBLOCK, blocktargetlist); + } + + /** + * Remove the specified number of blocks to be invalidated + */ + BlockCommand getInvalidateBlocks(int maxblocks) { + Block[] deleteList = getBlockArray(invalidateBlocks, maxblocks); + return deleteList == null? + null: new BlockCommand(DatanodeProtocol.DNA_INVALIDATE, deleteList); + } + + static private Block[] getBlockArray(Collection blocks, int max) { + Block[] blockarray = null; + synchronized(blocks) { + int available = blocks.size(); + int n = available; + if (max > 0 && n > 0) { + if (max < n) { + n = max; + } + // allocate the properly sized block array ... + blockarray = new Block[n]; + + // iterate tree collecting n blocks... + Iterator e = blocks.iterator(); + int blockCount = 0; + + while (blockCount < n && e.hasNext()) { + // insert into array ... + blockarray[blockCount++] = e.next(); + + // remove from tree via iterator, if we are removing + // less than total available blocks + if (n < available){ + e.remove(); + } + } + assert(blockarray.length == n); + + // now if the number of blocks removed equals available blocks, + // them remove all blocks in one fell swoop via clear + if (n == available) { + blocks.clear(); + } + } + } + return blockarray; + } + + void reportDiff(BlocksMap blocksMap, + BlockListAsLongs newReport, + Collection toAdd, + Collection toRemove, + Collection toInvalidate) { + // place a deilimiter in the list which separates blocks + // that have been reported from those that have not + BlockInfo delimiter = new BlockInfo(new Block(), 1); + boolean added = this.addBlock(delimiter); + assert added : "Delimiting block cannot be present in the node"; + if(newReport == null) + newReport = new BlockListAsLongs( new long[0]); + // scan the report and collect newly reported blocks + // Note we are taking special precaution to limit tmp blocks allocated + // as part this block report - which why block list is stored as longs + Block iblk = new Block(); // a fixed new'ed block to be reused with index i + for (int i = 0; i < newReport.getNumberOfBlocks(); ++i) { + iblk.set(newReport.getBlockId(i), newReport.getBlockLen(i), + newReport.getBlockGenStamp(i)); + BlockInfo storedBlock = blocksMap.getStoredBlock(iblk); + if(storedBlock == null) { + // If block is not in blocksMap it does not belong to any file + toInvalidate.add(new Block(iblk)); + continue; + } + if(storedBlock.findDatanode(this) < 0) {// Known block, but not on the DN + // if the size differs from what is in the blockmap, then return + // the new block. addStoredBlock will then pick up the right size of this + // block and will update the block object in the BlocksMap + if (storedBlock.getNumBytes() != iblk.getNumBytes()) { + toAdd.add(new Block(iblk)); + } else { + toAdd.add(storedBlock); + } + continue; + } + // move block to the head of the list + this.moveBlockToHead(storedBlock); + } + // collect blocks that have not been reported + // all of them are next to the delimiter + Iterator it = new BlockIterator(delimiter.getNext(0), this); + while(it.hasNext()) + toRemove.add(it.next()); + this.removeBlock(delimiter); + } + + /** Serialization for FSEditLog */ + void readFieldsFromFSEditLog(DataInput in) throws IOException { + this.name = UTF8.readString(in); + this.storageID = UTF8.readString(in); + this.infoPort = in.readShort() & 0x0000ffff; + + this.capacity = in.readLong(); + this.dfsUsed = in.readLong(); + this.remaining = in.readLong(); + this.lastUpdate = in.readLong(); + this.xceiverCount = in.readInt(); + this.location = Text.readString(in); + this.hostName = Text.readString(in); + setAdminState(WritableUtils.readEnum(in, AdminStates.class)); + } + + /** + * @return Approximate number of blocks currently scheduled to be written + * to this datanode. + */ + public int getBlocksScheduled() { + return currApproxBlocksScheduled + prevApproxBlocksScheduled; + } + + /** + * Increments counter for number of blocks scheduled. + */ + void incBlocksScheduled() { + currApproxBlocksScheduled++; + } + + /** + * Decrements counter for number of blocks scheduled. + */ + void decBlocksScheduled() { + if (prevApproxBlocksScheduled > 0) { + prevApproxBlocksScheduled--; + } else if (currApproxBlocksScheduled > 0) { + currApproxBlocksScheduled--; + } + // its ok if both counters are zero. + } + + /** + * Adjusts curr and prev number of blocks scheduled every few minutes. + */ + private void rollBlocksScheduled(long now) { + if ((now - lastBlocksScheduledRollTime) > + BLOCKS_SCHEDULED_ROLL_INTERVAL) { + prevApproxBlocksScheduled = currApproxBlocksScheduled; + currApproxBlocksScheduled = 0; + lastBlocksScheduledRollTime = now; + } + } + + class DecommissioningStatus { + int underReplicatedBlocks; + int decommissionOnlyReplicas; + int underReplicatedInOpenFiles; + long startTime; + + synchronized void set(int underRep, int onlyRep, int underConstruction) { + if (isDecommissionInProgress() == false) { + return; + } + underReplicatedBlocks = underRep; + decommissionOnlyReplicas = onlyRep; + underReplicatedInOpenFiles = underConstruction; + } + + synchronized int getUnderReplicatedBlocks() { + if (isDecommissionInProgress() == false) { + return 0; + } + return underReplicatedBlocks; + } + + synchronized int getDecommissionOnlyReplicas() { + if (isDecommissionInProgress() == false) { + return 0; + } + return decommissionOnlyReplicas; + } + + synchronized int getUnderReplicatedInOpenFiles() { + if (isDecommissionInProgress() == false) { + return 0; + } + return underReplicatedInOpenFiles; + } + + synchronized void setStartTime(long time) { + startTime = time; + } + + synchronized long getStartTime() { + if (isDecommissionInProgress() == false) { + return 0; + } + return startTime; + } + } // End of class DecommissioningStatus + +} diff --git a/src/hdfs/org/apache/hadoop/hdfs/server/namenode/DecommissionManager.java b/src/hdfs/org/apache/hadoop/hdfs/server/namenode/DecommissionManager.java new file mode 100644 index 0000000..4e78d4a --- /dev/null +++ b/src/hdfs/org/apache/hadoop/hdfs/server/namenode/DecommissionManager.java @@ -0,0 +1,311 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.namenode; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Iterator; +import java.util.LinkedList; +import java.util.List; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hdfs.protocol.Block; +import org.apache.hadoop.hdfs.util.GSet; +import org.apache.hadoop.hdfs.util.LightWeightGSet; + +/** + * Manage node decommissioning. + */ +class DecommissionManager { + static final Log LOG = LogFactory.getLog(DecommissionManager.class); + + private final FSNamesystem fsnamesystem; + + DecommissionManager(FSNamesystem namesystem) { + this.fsnamesystem = namesystem; + } + + /** Periodically check decommission status. */ + class Monitor implements Runnable { + /** recheckInterval is how often namenode checks + * if a node has finished decommission + */ + private final long recheckInterval; + /** The number of decommission nodes to check for each interval */ + private final int numNodesPerCheck; + + // datanodes that just started decomission, + // which has higher priority to be checked next + private final LinkedList newlyStarted = + new LinkedList(); + // datanodes that needs to be checked next + private LinkedList toBeChecked = + new LinkedList(); + // datanodes that just finished check + private LinkedList checked = + new LinkedList(); + + // the node is under check + private volatile DatanodeDescriptor nodeBeingCheck; + // if there was an attempt to stop nodeBeingCheck from decommission + private volatile boolean pendingToStopDecommission = false; + + Monitor(int recheckIntervalInSecond, int numNodesPerCheck) { + this.recheckInterval = recheckIntervalInSecond * 1000L; + this.numNodesPerCheck = numNodesPerCheck; + } + + /** + * Add a datanode that is just marked to start decommission + * @param datanode a newly marked decommissioned node + * @return true if the node is added + */ + synchronized boolean startDecommision(DatanodeDescriptor datanode) { + if (datanode == null) { + throw new IllegalArgumentException( + "datanode to be decomissioned can not be null"); + } + if (nodeBeingCheck == datanode) { + pendingToStopDecommission = false; + return false; + } + if (!newlyStarted.contains(datanode) && + !toBeChecked.contains(datanode) && !checked.contains(datanode)) { + newlyStarted.offer(datanode); + notifyAll(); + return true; + } + return false; + } + + /** + * Stop a node from decommission by removing it from the queue + * @param datanode a datanode + * @return true if decommission is stopped; false if it is pending + */ + synchronized boolean stopDecommission(DatanodeDescriptor datanode) + throws IOException { + if (datanode == null) { + throw new IllegalArgumentException( + "datanode to be removed can not be null"); + } + if (datanode == nodeBeingCheck) { + // the node to be stopped decommission is under check + // so waiting for it to be done + pendingToStopDecommission = true; + return false; + } + if (newlyStarted.remove(datanode) || + toBeChecked.remove(datanode)) { + checked.remove(datanode); + } + return true; + } + + /** + * Return a list of unchecked blocks on srcNode + * + * @param srcNode a datanode + * @param checkedBlocks all blocks that have been checked + * @param numBlocks maximum number of blocks to return + * @return a list of blocks to be checked + */ + private List fetchBlocks( + GSet checkedBlocks, int numBlocks) { + final List blocksToCheck = new ArrayList(numBlocks); + fsnamesystem.readLock(); + try { + final Iterator it = nodeBeingCheck.getBlockIterator(); + while (blocksToCheck.size() checkedBlocks = + new LightWeightGSet(numOfBlocks); + List blocksToCheck; + int numBlocksToCheck; + do { + // get a batch of unchecked blocks + blocksToCheck = fetchBlocks(checkedBlocks, numOfBlocksToFetch); + numBlocksToCheck = blocksToCheck.size(); + for (int i=0; i tmp = toBeChecked; + toBeChecked = checked; + checked = tmp; + } + } + + /** + * Mark the given datanode as just checked + * @param datanode + */ + synchronized private void doneCheck(final boolean isDecommissioned) { + if (!isDecommissioned) { + // put to checked for next iteration of check + checked.add(nodeBeingCheck); + } + nodeBeingCheck = null; + } + + /** + * Check up to numNodesPerCheck decommissioning in progress datanodes to + * see if all their blocks are replicated. + */ + private void check() { + for (int i=0; iblockset mapping always-current + * and logged to disk. + * + *************************************************/ +class FSDirectory implements FSConstants, Closeable { + + final FSNamesystem namesystem; + final INodeDirectoryWithQuota rootDir; + FSImage fsImage; + private boolean ready = false; + // Metrics record + private MetricsRecord directoryMetrics = null; + private final int lsLimit; // max list limit + /** + * Caches frequently used file names used in {@link INode} to reuse + * byte[] objects and reduce heap usage. + */ + private final NameCache nameCache; + + // lock to protect BlockMap. + private ReentrantReadWriteLock bLock; + private Condition cond; + private boolean hasRwLock; + + // utility methods to acquire and release read lock and write lock + // if hasRwLock is false, then readLocks morph into writeLocks. + void readLock() { + if (hasRwLock) { + this.bLock.readLock().lock(); + } else { + writeLock(); + } + } + + void readUnlock() { + if (hasRwLock) { + this.bLock.readLock().unlock(); + } else { + writeUnlock(); + } + } + + void writeLock() { + this.bLock.writeLock().lock(); + } + + void writeUnlock() { + this.bLock.writeLock().unlock(); + } + + boolean hasWriteLock() { + return this.bLock.isWriteLockedByCurrentThread(); + } + + /** Access an existing dfs name directory. */ + FSDirectory(FSNamesystem ns, Configuration conf) throws IOException { + this(new FSImage(conf), ns, conf); + } + + FSDirectory(FSImage fsImage, FSNamesystem ns, Configuration conf) { + rootDir = new INodeDirectoryWithQuota(INodeDirectory.ROOT_NAME, + ns.createFsOwnerPermissions(new FsPermission((short)0755)), + Integer.MAX_VALUE, -1); + this.fsImage = fsImage; + int configuredLimit = conf.getInt( + "dfs.ls.limit", 1000); + this.lsLimit = configuredLimit>0 ? + configuredLimit : 1000; + namesystem = ns; + int threshold = conf.getInt( + "dfs.namenode.name.cache.threshold", + 10); + NameNode.LOG.info("Caching file names occuring more than " + threshold + + " times "); + nameCache = new NameCache(threshold); + initialize(conf); + } + + private void initialize(Configuration conf) { + MetricsContext metricsContext = MetricsUtil.getContext("dfs"); + directoryMetrics = MetricsUtil.createRecord(metricsContext, "FSDirectory"); + directoryMetrics.setTag("sessionId", conf.get("session.id")); + this.bLock = new ReentrantReadWriteLock(true); // fair + this.cond = bLock.writeLock().newCondition(); + this.hasRwLock = namesystem.hasRwLock; + } + + void loadFSImage(Collection dataDirs, + Collection editsDirs, + StartupOption startOpt) throws IOException { + // format before starting up if requested + if (startOpt == StartupOption.FORMAT) { + fsImage.setStorageDirectories(dataDirs, editsDirs); + fsImage.format(); + startOpt = StartupOption.REGULAR; + } + try { + if (fsImage.recoverTransitionRead(dataDirs, editsDirs, startOpt)) { + fsImage.saveFSImage(); + } + FSEditLog editLog = fsImage.getEditLog(); + assert editLog != null : "editLog must be initialized"; + if (!editLog.isOpen()) + editLog.open(); + fsImage.setCheckpointDirectories(null, null); + } catch(IOException e) { + fsImage.close(); + throw e; + } + writeLock(); + try { + this.ready = true; + this.nameCache.initialized(); + cond.signalAll(); + } finally { + writeUnlock(); + } + } + + private void incrDeletedFileCount(int count) { + directoryMetrics.incrMetric("files_deleted", count); + directoryMetrics.update(); + } + + /** + * Shutdown the filestore + */ + public void close() throws IOException { + fsImage.close(); + } + + /** + * Block until the object is ready to be used. + */ + void waitForReady() { + if (!ready) { + writeLock(); + try { + while (!ready) { + try { + cond.await(5000, TimeUnit.MILLISECONDS); + } catch (InterruptedException ie) { + } + } + } finally { + writeUnlock(); + } + } + } + + /** + * Add the given filename to the fs. + */ + INodeFileUnderConstruction addFile(String path, + PermissionStatus permissions, + short replication, + long preferredBlockSize, + String clientName, + String clientMachine, + DatanodeDescriptor clientNode, + long generationStamp) + throws IOException { + waitForReady(); + + // Always do an implicit mkdirs for parent directory tree. + long modTime = FSNamesystem.now(); + if (!mkdirs(new Path(path).getParent().toString(), permissions, true, + modTime)) { + return null; + } + INodeFileUnderConstruction newNode = new INodeFileUnderConstruction( + permissions,replication, + preferredBlockSize, modTime, clientName, + clientMachine, clientNode); + writeLock(); + try { + newNode = addNode(path, newNode, -1, false); + } finally { + writeUnlock(); + } + if (newNode == null) { + NameNode.stateChangeLog.info("DIR* FSDirectory.addFile: " + +"failed to add "+path + +" to the file system"); + return null; + } + // add create file record to log, record new generation stamp + fsImage.getEditLog().logOpenFile(path, newNode); + + NameNode.stateChangeLog.debug("DIR* FSDirectory.addFile: " + +path+" is added to the file system"); + return newNode; + } + + /** + */ + INode unprotectedAddFile( String path, + PermissionStatus permissions, + Block[] blocks, + short replication, + long modificationTime, + long atime, + long preferredBlockSize) { + INode newNode; + long diskspace = -1; // unknown + if (blocks == null) + newNode = new INodeDirectory(permissions, modificationTime); + else { + newNode = new INodeFile(permissions, blocks.length, replication, + modificationTime, atime, preferredBlockSize); + diskspace = ((INodeFile)newNode).diskspaceConsumed(blocks); + } + writeLock(); + try { + try { + newNode = addNode(path, newNode, diskspace, false); + if(newNode != null && blocks != null) { + int nrBlocks = blocks.length; + // Add file->block mapping + INodeFile newF = (INodeFile)newNode; + for (int i = 0; i < nrBlocks; i++) { + newF.setBlock(i, namesystem.blocksMap.addINode(blocks[i], newF)); + } + } + } catch (IOException e) { + return null; + } + return newNode; + } finally { + writeUnlock(); + } + } + + INodeDirectory addToParent( byte[][] src, + INodeDirectory parentINode, + PermissionStatus permissions, + Block[] blocks, + short replication, + long modificationTime, + long atime, + long nsQuota, + long dsQuota, + long preferredBlockSize, + boolean propagateModTime) { + // NOTE: This does not update space counts for parents + // create new inode + INode newNode; + if (blocks == null) { + if (nsQuota >= 0 || dsQuota >= 0) { + newNode = new INodeDirectoryWithQuota( + permissions, modificationTime, nsQuota, dsQuota); + } else { + newNode = new INodeDirectory(permissions, modificationTime); + } + } else + newNode = new INodeFile(permissions, blocks.length, replication, + modificationTime, atime, preferredBlockSize); + // add new node to the parent + INodeDirectory newParent = null; + writeLock(); + try { + try { + newParent = rootDir.addToParent(src, newNode, parentINode, + false, propagateModTime); + cacheName(newNode); + } catch (FileNotFoundException e) { + return null; + } + if(newParent == null) + return null; + if(blocks != null) { + int nrBlocks = blocks.length; + // Add file->block mapping + INodeFile newF = (INodeFile)newNode; + for (int i = 0; i < nrBlocks; i++) { + newF.setBlock(i, namesystem.blocksMap.addINode(blocks[i], newF)); + } + } + } finally { + writeUnlock(); + } + return newParent; + } + + /** + * Add a block to the file. Returns a reference to the added block. + */ + Block addBlock(String path, INode[] inodes, Block block) throws IOException { + waitForReady(); + + writeLock(); + try { + INodeFile fileNode = (INodeFile) inodes[inodes.length-1]; + + // check quota limits and updated space consumed + updateCount(inodes, inodes.length-1, 0, + fileNode.getPreferredBlockSize()*fileNode.getReplication(), true); + + // associate the new list of blocks with this file + namesystem.blocksMap.addINode(block, fileNode); + BlockInfo blockInfo = namesystem.blocksMap.getStoredBlock(block); + fileNode.addBlock(blockInfo); + + NameNode.stateChangeLog.debug("DIR* FSDirectory.addFile: " + + path + " with " + block + + " block is added to the in-memory " + + "file system"); + } finally { + writeUnlock(); + } + return block; + } + + /** + * Persist the block list for the inode. + */ + void persistBlocks(String path, INodeFileUnderConstruction file) + throws IOException { + waitForReady(); + + writeLock(); + try { + fsImage.getEditLog().logOpenFile(path, file); + NameNode.stateChangeLog.debug("DIR* FSDirectory.persistBlocks: " + +path+" with "+ file.getBlocks().length + +" blocks is persisted to the file system"); + } finally { + writeUnlock(); + } + } + + /** + * Close file. + */ + void closeFile(String path, INodeFile file) throws IOException { + waitForReady(); + writeLock(); + try { + long now = FSNamesystem.now(); + // file is closed + file.setModificationTimeForce(now); + fsImage.getEditLog().logCloseFile(path, file); + if (NameNode.stateChangeLog.isDebugEnabled()) { + NameNode.stateChangeLog.debug("DIR* FSDirectory.closeFile: " + +path+" with "+ file.getBlocks().length + +" blocks is persisted to the file system"); + } + } finally { + writeUnlock(); + } + } + + /** + * Remove a block to the file. + */ + boolean removeBlock(String path, INodeFileUnderConstruction fileNode, + Block block) throws IOException { + waitForReady(); + + writeLock(); + try { + // modify file-> block and blocksMap + fileNode.removeBlock(block); + namesystem.blocksMap.removeINode(block); + // If block is removed from blocksMap remove it from corruptReplicasMap + namesystem.corruptReplicas.removeFromCorruptReplicasMap(block); + + // write modified block locations to log + fsImage.getEditLog().logOpenFile(path, fileNode); + NameNode.stateChangeLog.debug("DIR* FSDirectory.addFile: " + +path+" with "+block + +" block is added to the file system"); + } finally { + writeUnlock(); + } + return true; + } + + /** + * @see #unprotectedRenameTo(String, String, long) + */ + boolean renameTo(String src, String dst) throws QuotaExceededException { + if (NameNode.stateChangeLog.isDebugEnabled()) { + NameNode.stateChangeLog.debug("DIR* FSDirectory.renameTo: " + +src+" to "+dst); + } + waitForReady(); + long now = FSNamesystem.now(); + if (!unprotectedRenameTo(src, dst, now)) + return false; + fsImage.getEditLog().logRename(src, dst, now); + return true; + } + + /** Change a path name + * + * @param src source path + * @param dst destination path + * @return true if rename succeeds; false otherwise + * @throws QuotaExceededException if the operation violates any quota limit + */ + boolean unprotectedRenameTo(String src, String dst, long timestamp) + throws QuotaExceededException { + writeLock(); + try { + INode[] srcInodes = rootDir.getExistingPathINodes(src); + + // check the validation of the source + if (srcInodes[srcInodes.length-1] == null) { + NameNode.stateChangeLog.warn("DIR* FSDirectory.unprotectedRenameTo: " + + "failed to rename " + src + " to " + dst + + " because source does not exist"); + return false; + } + if (srcInodes.length == 1) { + NameNode.stateChangeLog.warn("DIR* FSDirectory.unprotectedRenameTo: " + +"failed to rename "+src+" to "+dst+ " because source is the root"); + return false; + } + if (isDir(dst)) { + dst += Path.SEPARATOR + new Path(src).getName(); + } + + // check the validity of the destination + if (dst.equals(src)) { + return true; + } + // dst cannot be directory or a file under src + if (dst.startsWith(src) && + dst.charAt(src.length()) == Path.SEPARATOR_CHAR) { + NameNode.stateChangeLog.warn("DIR* FSDirectory.unprotectedRenameTo: " + + "failed to rename " + src + " to " + dst + + " because destination starts with src"); + return false; + } + + byte[][] dstComponents = INode.getPathComponents(dst); + INode[] dstInodes = new INode[dstComponents.length]; + rootDir.getExistingPathINodes(dstComponents, dstInodes); + if (dstInodes[dstInodes.length-1] != null) { + NameNode.stateChangeLog.warn("DIR* FSDirectory.unprotectedRenameTo: " + +"failed to rename "+src+" to "+dst+ + " because destination exists"); + return false; + } + if (dstInodes[dstInodes.length-2] == null) { + NameNode.stateChangeLog.warn("DIR* FSDirectory.unprotectedRenameTo: " + +"failed to rename "+src+" to "+dst+ + " because destination's parent does not exist"); + return false; + } + + // Ensure dst has quota to accommodate rename + verifyQuotaForRename(srcInodes,dstInodes); + + INode dstChild = null; + INode srcChild = null; + String srcChildName = null; + try { + // remove src + srcChild = removeChild(srcInodes, srcInodes.length-1); + if (srcChild == null) { + NameNode.stateChangeLog.warn("DIR* FSDirectory.unprotectedRenameTo: " + + "failed to rename " + src + " to " + dst + + " because the source can not be removed"); + return false; + } + srcChildName = srcChild.getLocalName(); + srcChild.setLocalName(dstComponents[dstInodes.length-1]); + + // add src to the destination + dstChild = addChildNoQuotaCheck(dstInodes, dstInodes.length - 1, + srcChild, -1, false); + if (dstChild != null) { + srcChild = null; + if (NameNode.stateChangeLog.isDebugEnabled()) { + NameNode.stateChangeLog.debug("DIR* FSDirectory.unprotectedRenameTo: " + src + + " is renamed to " + dst); + } + // update modification time of dst and the parent of src + srcInodes[srcInodes.length-2].setModificationTime(timestamp); + dstInodes[dstInodes.length-2].setModificationTime(timestamp); + return true; + } + } finally { + if (dstChild == null && srcChild != null) { + // put it back + srcChild.setLocalName(srcChildName); + addChildNoQuotaCheck(srcInodes, srcInodes.length - 1, srcChild, -1, + false); + } + } + NameNode.stateChangeLog.warn("DIR* FSDirectory.unprotectedRenameTo: " + +"failed to rename "+src+" to "+dst); + return false; + } finally { + writeUnlock(); + } + } + + /** + * Set file replication + * + * @param src file name + * @param replication new replication + * @param oldReplication old replication - output parameter + * @return array of file blocks + * @throws IOException + */ + Block[] setReplication(String src, + short replication, + int[] oldReplication + ) throws IOException { + waitForReady(); + Block[] fileBlocks = unprotectedSetReplication(src, replication, oldReplication); + if (fileBlocks != null) // log replication change + fsImage.getEditLog().logSetReplication(src, replication); + return fileBlocks; + } + + Block[] unprotectedSetReplication( String src, + short replication, + int[] oldReplication + ) throws IOException { + if (oldReplication == null) + oldReplication = new int[1]; + oldReplication[0] = -1; + Block[] fileBlocks = null; + writeLock(); + try { + INode[] inodes = rootDir.getExistingPathINodes(src); + INode inode = inodes[inodes.length - 1]; + if (inode == null) + return null; + if (inode.isDirectory()) + return null; + INodeFile fileNode = (INodeFile)inode; + oldReplication[0] = fileNode.getReplication(); + + // check disk quota + long dsDelta = (replication - oldReplication[0]) * + (fileNode.diskspaceConsumed()/oldReplication[0]); + updateCount(inodes, inodes.length-1, 0, dsDelta, true); + + fileNode.setReplication(replication); + fileBlocks = fileNode.getBlocks(); + } finally { + writeUnlock(); + } + return fileBlocks; + } + + /** + * Get the blocksize of a file + * @param filename the filename + * @return the number of bytes + * @throws IOException if it is a directory or does not exist. + */ + long getPreferredBlockSize(String filename) throws IOException { + readLock(); + try { + INode fileNode = rootDir.getNode(filename); + if (fileNode == null) { + throw new IOException("Unknown file: " + filename); + } + if (fileNode.isDirectory()) { + throw new IOException("Getting block size of a directory: " + + filename); + } + return ((INodeFile)fileNode).getPreferredBlockSize(); + } finally { + readUnlock(); + } + } + + boolean exists(String src) { + src = normalizePath(src); + readLock(); + try { + INode inode = rootDir.getNode(src); + if (inode == null) { + return false; + } + return inode.isDirectory()? true: ((INodeFile)inode).getBlocks() != null; + } finally { + readUnlock(); + } + } + + void setPermission(String src, FsPermission permission + ) throws IOException { + unprotectedSetPermission(src, permission); + fsImage.getEditLog().logSetPermissions(src, permission); + } + + void unprotectedSetPermission(String src, FsPermission permissions) throws FileNotFoundException { + writeLock(); + try { + INode inode = rootDir.getNode(src); + if(inode == null) + throw new FileNotFoundException("File does not exist: " + src); + inode.setPermission(permissions); + } finally { + writeUnlock(); + } + } + + void setOwner(String src, String username, String groupname + ) throws IOException { + unprotectedSetOwner(src, username, groupname); + fsImage.getEditLog().logSetOwner(src, username, groupname); + } + + void unprotectedSetOwner(String src, String username, String groupname) throws FileNotFoundException { + writeLock(); + try { + INode inode = rootDir.getNode(src); + if(inode == null) + throw new FileNotFoundException("File does not exist: " + src); + if (username != null) { + inode.setUser(username); + } + if (groupname != null) { + inode.setGroup(groupname); + } + } finally { + writeUnlock(); + } + } + + /** + * Concat all the blocks from srcs to trg and delete the srcs files + */ + public void concatInternal(String target, String [] srcs) { + // actual move + waitForReady(); + + long now = FSNamesystem.now(); + unprotectedConcat(target, srcs, now); + fsImage.getEditLog().logConcat(target, srcs, now); + } + + /** + * Concat all the blocks from srcs to trg and delete the srcs files + * @param target target file to move the blocks to + * @param srcs list of file to move the blocks from + * Must be public because also called from EditLogs + * NOTE: - it does not update quota (not needed for concat) + */ + public void unprotectedConcat(String target, String [] srcs, long now) { + if (NameNode.stateChangeLog.isDebugEnabled()) { + NameNode.stateChangeLog.debug("DIR* FSNamesystem.concat to "+target); + } + // do the move + + INode [] trgINodes = getExistingPathINodes(target); + INodeFile trgInode = (INodeFile) trgINodes[trgINodes.length-1]; + INodeDirectory trgParent = (INodeDirectory)trgINodes[trgINodes.length-2]; + + INodeFile [] allSrcInodes = new INodeFile[srcs.length]; + int i = 0; + int totalBlocks = 0; + + writeLock(); + try { + for(String src : srcs) { + INodeFile srcInode = getFileINode(src); + allSrcInodes[i++] = srcInode; + totalBlocks += srcInode.blocks.length; + } + trgInode.appendBlocks(allSrcInodes, totalBlocks); // copy the blocks + + // since we are in the same dir - we can use same parent to remove files + int count = 0; + for(INodeFile nodeToRemove: allSrcInodes) { + if(nodeToRemove == null) continue; + + nodeToRemove.blocks = null; + trgParent.removeChild(nodeToRemove); + count++; + } + trgInode.setModificationTime(now); + trgParent.setModificationTime(now); + // update quota on the parent directory ('count' files removed, 0 space) + unprotectedUpdateCount(trgINodes, trgINodes.length-1, - count, 0); + } finally { + writeUnlock(); + } + } + + /** + * Remove the file from management, return blocks + */ + INode delete(String src, List collectedBlocks) { + if (NameNode.stateChangeLog.isDebugEnabled()) { + NameNode.stateChangeLog.debug("DIR* FSDirectory.delete: "+src); + } + waitForReady(); + long now = FSNamesystem.now(); + INode deletedNode = unprotectedDelete(src, collectedBlocks, now); + if (deletedNode != null) { + fsImage.getEditLog().logDelete(src, now); + } + return deletedNode; + } + + /** Return if a directory is empty or not **/ + boolean isDirEmpty(String src) { + boolean dirNotEmpty = true; + if (!isDir(src)) { + return true; + } + readLock(); + try { + INode targetNode = rootDir.getNode(src); + assert targetNode != null : "should be taken care in isDir() above"; + if (((INodeDirectory)targetNode).getChildren().size() != 0) { + dirNotEmpty = false; + } + } finally { + readUnlock(); + } + return dirNotEmpty; + } + + /** + * Delete a path from the name space + * Update the count at each ancestor directory with quota + * @param src a string representation of a path to an inode + * @param modificationTime the time the inode is removed + * @return the deleted target inode, null if deletion failed + */ + INode unprotectedDelete(String src, long modificationTime) { + return unprotectedDelete(src, null, modificationTime); + } + + /** + * Delete a path from the name space + * Update the count at each ancestor directory with quota + * The blocks will be put in toBeDeletedBlocks to be removed later + * @param src a string representation of a path to an inode + * @param toBeDeletedBlocks the place holder for the blocks to be removed + * @param modificationTime the time the inode is removed + * @return the deleted target inode, null if deletion failed + */ + INode unprotectedDelete(String src, List toBeDeletedBlocks, + long modificationTime) { + src = normalizePath(src); + + writeLock(); + try { + INode[] inodes = rootDir.getExistingPathINodes(src); + INode targetNode = inodes[inodes.length-1]; + + if (targetNode == null) { // non-existent src + NameNode.stateChangeLog.debug("DIR* FSDirectory.unprotectedDelete: " + +"failed to remove "+src+" because it does not exist"); + return null; + } else if (inodes.length == 1) { // src is the root + NameNode.stateChangeLog.warn("DIR* FSDirectory.unprotectedDelete: " + + "failed to remove " + src + + " because the root is not allowed to be deleted"); + return null; + } else { + try { + // Remove the node from the namespace + removeChild(inodes, inodes.length-1); + // set the parent's modification time + inodes[inodes.length-2].setModificationTime(modificationTime); + // GC all the blocks underneath the node. + if (toBeDeletedBlocks == null) { + ArrayList v = new ArrayList(); + int filesRemoved = + targetNode.collectSubtreeBlocksAndClear(v); + incrDeletedFileCount(filesRemoved); + // remove the blocks right away if toBeDeletedBlocks is null + namesystem.removePathAndBlocks(src, v); + } else { + int filesRemoved = + targetNode.collectSubtreeBlocksAndClear(toBeDeletedBlocks); + incrDeletedFileCount(filesRemoved); + // pass null for the blocks because the blocks will be deleted later + namesystem.removePathAndBlocks(src, null); + } + if (NameNode.stateChangeLog.isDebugEnabled()) { + NameNode.stateChangeLog.debug("DIR* FSDirectory.unprotectedDelete: " + +src+" is removed"); + } + return targetNode; + } catch (IOException e) { + NameNode.stateChangeLog.warn("DIR* FSDirectory.unprotectedDelete: " + + "failed to remove " + src + " because " + e.getMessage()); + return null; + } + } + } finally { + writeUnlock(); + } + } + + /** + * Replaces the specified inode with the specified one. + */ + void replaceNode(String path, INodeFile oldnode, INodeFile newnode) + throws IOException { + replaceNode(path, oldnode, newnode, true); + } + + /** + * @see #replaceNode(String, INodeFile, INodeFile) + */ + private void replaceNode(String path, INodeFile oldnode, INodeFile newnode, + boolean updateDiskspace) throws IOException { + writeLock(); + try { + long dsOld = oldnode.diskspaceConsumed(); + + // + // Remove the node from the namespace + // + if (!oldnode.removeNode()) { + NameNode.stateChangeLog.warn("DIR* FSDirectory.replaceNode: " + + "failed to remove " + path); + throw new IOException("FSDirectory.replaceNode: " + + "failed to remove " + path); + } + + /* Currently oldnode and newnode are assumed to contain the same + * blocks. Otherwise, blocks need to be removed from the blocksMap. + */ + + rootDir.addNode(path, newnode); + + //check if disk space needs to be updated. + long dsNew = 0; + if (updateDiskspace && (dsNew = newnode.diskspaceConsumed()) != dsOld) { + try { + updateSpaceConsumed(path, 0, dsNew-dsOld); + } catch (QuotaExceededException e) { + // undo + replaceNode(path, newnode, oldnode, false); + throw e; + } + } + + int index = 0; + for (Block b : newnode.getBlocks()) { + BlockInfo info = namesystem.blocksMap.addINode(b, newnode); + newnode.setBlock(index, info); // inode refers to the block in BlocksMap + index++; + } + } finally { + writeUnlock(); + } + } + + /** + * Get a listing of files given path 'src' + * + * This function is admittedly very inefficient right now. We'll + * make it better later. + */ + FileStatus[] getListing(String src) { + String srcs = normalizePath(src); + + readLock(); + try { + INode targetNode = rootDir.getNode(srcs); + if (targetNode == null) + return null; + if (!targetNode.isDirectory()) { + return new FileStatus[]{createFileStatus( + src, targetNode)}; + } + List contents = ((INodeDirectory)targetNode).getChildren(); + if(! srcs.endsWith(Path.SEPARATOR)) + srcs += Path.SEPARATOR; + FileStatus listing[] = new FileStatus[contents.size()]; + int i = 0; + for (INode cur : contents) { + listing[i] = createFileStatus(srcs+cur.getLocalName(), cur); + i++; + } + return listing; + } finally { + readUnlock(); + } + } + + /** + * Get a listing of files given path 'src' + * + * This function is admittedly very inefficient right now. We'll + * make it better later. + */ + HdfsFileStatus[] getHdfsListing(String src) { + String srcs = normalizePath(src); + + readLock(); + try { + INode targetNode = rootDir.getNode(srcs); + if (targetNode == null) + return null; + if (!targetNode.isDirectory()) { + return new HdfsFileStatus[]{createHdfsFileStatus( + HdfsFileStatus.EMPTY_NAME, targetNode)}; + } + List contents = ((INodeDirectory)targetNode).getChildren(); + HdfsFileStatus listing[] = new HdfsFileStatus[contents.size()]; + int i = 0; + for (INode cur : contents) { + listing[i] = createHdfsFileStatus(cur.name, cur); + i++; + } + return listing; + } finally { + readUnlock(); + } + } + + /** + * Get a partial listing of the indicated directory + * + * @param src the directory name + * @param startAfter the name to start listing after + * @param needLocation if block locations are returned + * @return a partial listing starting after startAfter + */ + DirectoryListing getPartialListing(String src, byte[] startAfter, + boolean needLocation) throws IOException { + String srcs = normalizePath(src); + + readLock(); + try { + INode targetNode = rootDir.getNode(srcs); + if (targetNode == null) + return null; + + if (!targetNode.isDirectory()) { + HdfsFileStatus[] partialListing = new HdfsFileStatus[]{ + createHdfsFileStatus( + HdfsFileStatus.EMPTY_NAME, targetNode)}; + if (needLocation) { + return new LocatedDirectoryListing(partialListing, + new LocatedBlocks[] {createLocatedBlocks(targetNode)}, 0); + } else { + return new DirectoryListing(partialListing, 0); + } + } + INodeDirectory dirInode = (INodeDirectory)targetNode; + List contents = dirInode.getChildren(); + // find the first child whose name is greater than startAfter + int startChild = dirInode.nextChild(startAfter); + int totalNumChildren = contents.size(); + int numOfListing = Math.min(totalNumChildren-startChild, this.lsLimit); + HdfsFileStatus listing[] = new HdfsFileStatus[numOfListing]; + LocatedBlocks [] blockLocations = new LocatedBlocks[numOfListing]; + for (int i=0; iinodes.length) { + numOfINodes = inodes.length; + } + if (checkQuota) { + verifyQuota(inodes, numOfINodes, nsDelta, dsDelta, null); + } + for(int i = 0; i < numOfINodes; i++) { + if (inodes[i].isQuotaSet()) { // a directory with quota + INodeDirectoryWithQuota node =(INodeDirectoryWithQuota)inodes[i]; + node.updateNumItemsInTree(nsDelta, dsDelta); + } + } + } + + /** + * update quota of each inode and check to see if quota is exceeded. + * See {@link #updateCount(INode[], int, long, long, boolean)} + */ + private void updateCountNoQuotaCheck(INode[] inodes, int numOfINodes, + long nsDelta, long dsDelta) { + try { + updateCount(inodes, numOfINodes, nsDelta, dsDelta, false); + } catch (QuotaExceededException e) { + NameNode.LOG.warn("FSDirectory.updateCountNoQuotaCheck - unexpected ", e); + } + } + + /** + * updates quota without verification + * callers responsibility is to make sure quota is not exceeded + * @param inodes + * @param numOfINodes + * @param nsDelta + * @param dsDelta + */ + private void unprotectedUpdateCount(INode[] inodes, int numOfINodes, + long nsDelta, long dsDelta) { + for(int i=0; i < numOfINodes; i++) { + if (inodes[i].isQuotaSet()) { // a directory with quota + INodeDirectoryWithQuota node =(INodeDirectoryWithQuota)inodes[i]; + node.unprotectedUpdateNumItemsInTree(nsDelta, dsDelta); + } + } + } + + + /** Return the name of the path represented by inodes at [0, pos] */ + private static String getFullPathName(INode[] inodes, int pos) { + StringBuilder fullPathName = new StringBuilder(); + for (int i=1; i<=pos; i++) { + fullPathName.append(Path.SEPARATOR_CHAR).append(inodes[i].getLocalName()); + } + return fullPathName.toString(); + } + + /** Return the full path name of the specified inode */ + static String getFullPathName(INode inode) { + // calculate the depth of this inode from root + int depth = 0; + for (INode i = inode; i != null; i = i.parent) { + depth++; + } + INode[] inodes = new INode[depth]; + + // fill up the inodes in the path from this inode to root + for (int i = 0; i < depth; i++) { + inodes[depth-i-1] = inode; + inode = inode.parent; + } + return getFullPathName(inodes, depth-1); + } + + /** + * Create a directory + * If ancestor directories do not exist, automatically create them. + + * @param src string representation of the path to the directory + * @param permissions the permission of the directory + * @param inheritPermission if the permission of the directory should inherit + * from its parent or not. The automatically created + * ones always inherit its permission from its parent + * @param now creation time + * @return true if the operation succeeds false otherwise + * @throws FileNotFoundException if an ancestor or itself is a file + * @throws QuotaExceededException if directory creation violates + * any quota limit + */ + boolean mkdirs(String src, PermissionStatus permissions, + boolean inheritPermission, long now) + throws FileNotFoundException, QuotaExceededException { + src = normalizePath(src); + String[] names = INode.getPathNames(src); + byte[][] components = INode.getPathComponents(names); + INode[] inodes = new INode[components.length]; + + writeLock(); + try { + rootDir.getExistingPathINodes(components, inodes); + + // find the index of the first null in inodes[] + StringBuilder pathbuilder = new StringBuilder(); + int i = 1; + for(; i < inodes.length && inodes[i] != null; i++) { + pathbuilder.append(Path.SEPARATOR + names[i]); + if (!inodes[i].isDirectory()) { + throw new FileNotFoundException("Parent path is not a directory: " + + pathbuilder); + } + } + + // create directories beginning from the first null index + for(; i < inodes.length; i++) { + pathbuilder.append(Path.SEPARATOR + names[i]); + String cur = pathbuilder.toString(); + unprotectedMkdir(inodes, i, components[i], permissions, + inheritPermission || i != components.length-1, now); + if (inodes[i] == null) { + return false; + } + // Directory creation also count towards FilesCreated + // to match count of files_deleted metric. + if (namesystem != null) + NameNode.getNameNodeMetrics().numFilesCreated.inc(); + fsImage.getEditLog().logMkDir(cur, inodes[i]); + NameNode.stateChangeLog.debug( + "DIR* FSDirectory.mkdirs: created directory " + cur); + } + } finally { + writeUnlock(); + } + return true; + } + + /** + */ + INode unprotectedMkdir(String src, PermissionStatus permissions, + long timestamp) throws QuotaExceededException { + byte[][] components = INode.getPathComponents(src); + INode[] inodes = new INode[components.length]; + writeLock(); + try { + rootDir.getExistingPathINodes(components, inodes); + unprotectedMkdir(inodes, inodes.length-1, components[inodes.length-1], + permissions, false, timestamp); + return inodes[inodes.length-1]; + } finally { + writeUnlock(); + } + } + + /** create a directory at index pos. + * The parent path to the directory is at [0, pos-1]. + * All ancestors exist. Newly created one stored at index pos. + */ + private void unprotectedMkdir(INode[] inodes, int pos, + byte[] name, PermissionStatus permission, boolean inheritPermission, + long timestamp) throws QuotaExceededException { + inodes[pos] = addChild(inodes, pos, + new INodeDirectory(name, permission, timestamp), + -1, inheritPermission ); + } + + /** Add a node child to the namespace. The full path name of the node is src. + * childDiskspace should be -1, if unknown. + * QuotaExceededException is thrown if it violates quota limit */ + private T addNode(String src, T child, + long childDiskspace, boolean inheritPermission) + throws QuotaExceededException { + byte[][] components = INode.getPathComponents(src); + byte[] path = components[components.length - 1]; + child.setLocalName(path); + cacheName(child); + INode[] inodes = new INode[components.length]; + writeLock(); + try { + rootDir.getExistingPathINodes(components, inodes); + return addChild(inodes, inodes.length-1, child, childDiskspace, + inheritPermission); + } finally { + writeUnlock(); + } + } + + /** + * Verify quota for adding or moving a new INode with required + * namespace and diskspace to a given position. + * + * @param inodes INodes corresponding to a path + * @param pos position where a new INode will be added + * @param nsDelta needed namespace + * @param dsDelta needed diskspace + * @param commonAncestor Last node in inodes array that is a common ancestor + * for a INode that is being moved from one location to the other. + * Pass null if a node is not being moved. + * @throws QuotaExceededException if quota limit is exceeded. + */ + private void verifyQuota(INode[] inodes, int pos, long nsDelta, long dsDelta, + INode commonAncestor) throws QuotaExceededException { + if (!ready) { + // Do not check quota if edits log is still being processed + return; + } + if (pos>inodes.length) { + pos = inodes.length; + } + int i = pos - 1; + try { + // check existing components in the path + for(; i >= 0; i--) { + if (commonAncestor == inodes[i]) { + // Moving an existing node. Stop checking for quota when common + // ancestor is reached + return; + } + if (inodes[i].isQuotaSet()) { // a directory with quota + INodeDirectoryWithQuota node =(INodeDirectoryWithQuota)inodes[i]; + node.verifyQuota(nsDelta, dsDelta); + } + } + } catch (QuotaExceededException e) { + e.setPathName(getFullPathName(inodes, i)); + throw e; + } + } + + /** + * Verify quota for rename operation where srcInodes[srcInodes.length-1] moves + * dstInodes[dstInodes.length-1] + * + * @param srcInodes directory from where node is being moved. + * @param dstInodes directory to where node is moved to. + * @throws QuotaExceededException if quota limit is exceeded. + */ + private void verifyQuotaForRename(INode[] srcInodes, INode[]dstInodes) + throws QuotaExceededException { + if (!ready) { + // Do not check quota if edits log is still being processed + return; + } + INode srcInode = srcInodes[srcInodes.length - 1]; + INode commonAncestor = null; + for(int i =0;srcInodes[i] == dstInodes[i]; i++) { + commonAncestor = srcInodes[i]; + } + INode.DirCounts counts = new INode.DirCounts(); + srcInode.spaceConsumedInTree(counts); + verifyQuota(dstInodes, dstInodes.length - 1, counts.getNsCount(), + counts.getDsCount(), commonAncestor); + } + + /** Add a node child to the inodes at index pos. + * Its ancestors are stored at [0, pos-1]. + * QuotaExceededException is thrown if it violates quota limit */ + private T addChild(INode[] pathComponents, int pos, + T child, long childDiskspace, boolean inheritPermission, + boolean checkQuota) throws QuotaExceededException { + INode.DirCounts counts = new INode.DirCounts(); + child.spaceConsumedInTree(counts); + if (childDiskspace < 0) { + childDiskspace = counts.getDsCount(); + } + updateCount(pathComponents, pos, counts.getNsCount(), childDiskspace, + checkQuota); + T addedNode = ((INodeDirectory)pathComponents[pos-1]).addChild( + child, inheritPermission); + if (addedNode == null) { + updateCount(pathComponents, pos, -counts.getNsCount(), + -childDiskspace, true); + } + return addedNode; + } + + private T addChild(INode[] pathComponents, int pos, + T child, long childDiskspace, boolean inheritPermission) + throws QuotaExceededException { + return addChild(pathComponents, pos, child, childDiskspace, + inheritPermission, true); + } + + private T addChildNoQuotaCheck(INode[] pathComponents, + int pos, T child, long childDiskspace, boolean inheritPermission) { + T inode = null; + try { + inode = addChild(pathComponents, pos, child, childDiskspace, + inheritPermission, false); + } catch (QuotaExceededException e) { + NameNode.LOG.warn("FSDirectory.addChildNoQuotaCheck - unexpected", e); + } + return inode; + } + + /** Remove an inode at index pos from the namespace. + * Its ancestors are stored at [0, pos-1]. + * Count of each ancestor with quota is also updated. + * Return the removed node; null if the removal fails. + */ + private INode removeChild(INode[] pathComponents, int pos) { + INode removedNode = + ((INodeDirectory)pathComponents[pos-1]).removeChild(pathComponents[pos]); + if (removedNode != null) { + INode.DirCounts counts = new INode.DirCounts(); + removedNode.spaceConsumedInTree(counts); + updateCountNoQuotaCheck(pathComponents, pos, + -counts.getNsCount(), -counts.getDsCount()); + } + return removedNode; + } + + /** + */ + String normalizePath(String src) { + if (src.length() > 1 && src.endsWith("/")) { + src = src.substring(0, src.length() - 1); + } + return src; + } + + ContentSummary getContentSummary(String src) throws IOException { + String srcs = normalizePath(src); + readLock(); + try { + INode targetNode = rootDir.getNode(srcs); + if (targetNode == null) { + throw new FileNotFoundException("File does not exist: " + srcs); + } + else { + return targetNode.computeContentSummary(); + } + } finally { + readUnlock(); + } + } + + /** Update the count of each directory with quota in the namespace + * A directory's count is defined as the total number inodes in the tree + * rooted at the directory. + * + * This is an update of existing state of the filesystem and does not + * throw QuotaExceededException. + */ + void updateCountForINodeWithQuota() { + updateCountForINodeWithQuota(rootDir, new INode.DirCounts(), + new ArrayList(50)); + } + + /** + * Update the count of the directory if it has a quota and return the count + * + * This does not throw a QuotaExceededException. This is just an update + * of of existing state and throwing QuotaExceededException does not help + * with fixing the state, if there is a problem. + * + * @param dir the root of the tree that represents the directory + * @param counters counters for name space and disk space + * @param nodesInPath INodes for the each of components in the path. + * @return the size of the tree + */ + private static void updateCountForINodeWithQuota(INodeDirectory dir, + INode.DirCounts counts, + ArrayList nodesInPath) { + long parentNamespace = counts.nsCount; + long parentDiskspace = counts.dsCount; + + counts.nsCount = 1L;//for self. should not call node.spaceConsumedInTree() + counts.dsCount = 0L; + + /* We don't need nodesInPath if we could use 'parent' field in + * INode. using 'parent' is not currently recommended. */ + nodesInPath.add(dir); + + for (INode child : dir.getChildren()) { + if (child.isDirectory()) { + updateCountForINodeWithQuota((INodeDirectory)child, + counts, nodesInPath); + } else { // reduce recursive calls + counts.nsCount += 1; + counts.dsCount += ((INodeFile)child).diskspaceConsumed(); + } + } + + if (dir.isQuotaSet()) { + ((INodeDirectoryWithQuota)dir).setSpaceConsumed(counts.nsCount, + counts.dsCount); + + // check if quota is violated for some reason. + if ((dir.getNsQuota() >= 0 && counts.nsCount > dir.getNsQuota()) || + (dir.getDsQuota() >= 0 && counts.dsCount > dir.getDsQuota())) { + + // can only happen because of a software bug. the bug should be fixed. + StringBuilder path = new StringBuilder(512); + for (INode n : nodesInPath) { + path.append('/'); + path.append(n.getLocalName()); + } + + NameNode.LOG.warn("Quota violation in image for " + path + + " (Namespace quota : " + dir.getNsQuota() + + " consumed : " + counts.nsCount + ")" + + " (Diskspace quota : " + dir.getDsQuota() + + " consumed : " + counts.dsCount + ")."); + } + } + + // pop + nodesInPath.remove(nodesInPath.size()-1); + + counts.nsCount += parentNamespace; + counts.dsCount += parentDiskspace; + } + + /** + * See {@link ClientProtocol#setQuota(String, long, long)} for the contract. + * Sets quota for for a directory. + * @returns INodeDirectory if any of the quotas have changed. null other wise. + * @throws FileNotFoundException if the path does not exist or is a file + * @throws QuotaExceededException if the directory tree size is + * greater than the given quota + */ + INodeDirectory unprotectedSetQuota(String src, long nsQuota, long dsQuota) + throws FileNotFoundException, QuotaExceededException { + // sanity check + if ((nsQuota < 0 && nsQuota != FSConstants.QUOTA_DONT_SET && + nsQuota < FSConstants.QUOTA_RESET) || + (dsQuota < 0 && dsQuota != FSConstants.QUOTA_DONT_SET && + dsQuota < FSConstants.QUOTA_RESET)) { + throw new IllegalArgumentException("Illegal value for nsQuota or " + + "dsQuota : " + nsQuota + " and " + + dsQuota); + } + + String srcs = normalizePath(src); + INode[] inodes = rootDir.getExistingPathINodes(src); + INode targetNode = inodes[inodes.length-1]; + if (targetNode == null) { + throw new FileNotFoundException("Directory does not exist: " + srcs); + } else if (!targetNode.isDirectory()) { + throw new FileNotFoundException("Cannot set quota on a file: " + srcs); + } else { // a directory inode + INodeDirectory dirNode = (INodeDirectory)targetNode; + long oldNsQuota = dirNode.getNsQuota(); + long oldDsQuota = dirNode.getDsQuota(); + if (nsQuota == FSConstants.QUOTA_DONT_SET) { + nsQuota = oldNsQuota; + } + if (dsQuota == FSConstants.QUOTA_DONT_SET) { + dsQuota = oldDsQuota; + } + + if (dirNode instanceof INodeDirectoryWithQuota) { + // a directory with quota; so set the quota to the new value + ((INodeDirectoryWithQuota)dirNode).setQuota(nsQuota, dsQuota); + } else { + // a non-quota directory; so replace it with a directory with quota + INodeDirectoryWithQuota newNode = + new INodeDirectoryWithQuota(nsQuota, dsQuota, dirNode); + // non-root directory node; parent != null + INodeDirectory parent = (INodeDirectory)inodes[inodes.length-2]; + dirNode = newNode; + parent.replaceChild(newNode); + } + return (oldNsQuota != nsQuota || oldDsQuota != dsQuota) ? dirNode : null; + } + } + + /** + * See {@link ClientProtocol#setQuota(String, long, long)} for the + * contract. + * @see #unprotectedSetQuota(String, long, long) + */ + void setQuota(String src, long nsQuota, long dsQuota) + throws FileNotFoundException, QuotaExceededException { + writeLock(); + try { + INodeDirectory dir = unprotectedSetQuota(src, nsQuota, dsQuota); + if (dir != null) { + fsImage.getEditLog().logSetQuota(src, dir.getNsQuota(), + dir.getDsQuota()); + } + } finally { + writeUnlock(); + } + } + + long totalInodes() { + readLock(); + try { + return rootDir.numItemsInTree(); + } finally { + readUnlock(); + } + } + + /** + * Sets the access time on the file. Logs it in the transaction log + */ + void setTimes(String src, INodeFile inode, long mtime, long atime, boolean force) + throws IOException { + if (unprotectedSetTimes(src, inode, mtime, atime, force)) { + fsImage.getEditLog().logTimes(src, mtime, atime); + } + } + + boolean unprotectedSetTimes(String src, long mtime, long atime, boolean force) + throws IOException { + INodeFile inode = getFileINode(src); + return unprotectedSetTimes(src, inode, mtime, atime, force); + } + + private boolean unprotectedSetTimes(String src, INodeFile inode, long mtime, + long atime, boolean force) throws IOException { + boolean status = false; + if (mtime != -1) { + inode.setModificationTimeForce(mtime); + status = true; + } + if (atime != -1) { + long inodeTime = inode.getAccessTime(); + + // if the last access time update was within the last precision interval, then + // no need to store access time + if (atime <= inodeTime + namesystem.getAccessTimePrecision() && !force) { + status = false; + } else { + inode.setAccessTime(atime); + status = true; + } + } + return status; + } + + /** + * Create HdfsFileStatus by file INode + */ + static FileStatus createFileStatus(String path, INode node) { + // length is zero for directories + return new FileStatus(node.isDirectory() ? 0 : node.computeContentSummary().getLength(), + node.isDirectory(), + node.isDirectory() ? 0 : ((INodeFile)node).getReplication(), + node.isDirectory() ? 0 : ((INodeFile)node).getPreferredBlockSize(), + node.getModificationTime(), + node.getAccessTime(), + node.getFsPermission(), + node.getUserName(), + node.getGroupName(), + new Path(path)); + } + + /** + * Create HdfsFileStatus by file INode + */ + private HdfsFileStatus createHdfsFileStatus(byte[] path, INode node) { + long size = 0; // length is zero for directories + short replication = 0; + long blocksize = 0; + if (node instanceof INodeFile) { + INodeFile fileNode = (INodeFile)node; + size = fileNode.computeContentSummary().getLength(); + replication = fileNode.getReplication(); + blocksize = fileNode.getPreferredBlockSize(); + } + return new HdfsFileStatus( + size, + node.isDirectory(), + replication, + blocksize, + node.getModificationTime(), + node.getAccessTime(), + node.getFsPermission(), + node.getUserName(), + node.getGroupName(), + path); + } + + /** a default LocatedBlocks object, its content should not be changed */ + private final static LocatedBlocks EMPTY_BLOCK_LOCS = new LocatedBlocks(); + /** + * Create FileStatus with location info by file INode + */ + private LocatedBlocks createLocatedBlocks(INode node) throws IOException { + LocatedBlocks loc = null; + if (node instanceof INodeFile) { + loc = namesystem.getBlockLocationsInternal( + (INodeFile)node, 0L, Integer.MAX_VALUE, Integer.MAX_VALUE); + } + if (loc==null) { + loc = EMPTY_BLOCK_LOCS; + } + return loc; + } + + /** + * Caches frequently used file names to reuse file name objects and + * reduce heap size. + */ + void cacheName(INode inode) { + // Name is cached only for files + if (inode.isDirectory()) { + return; + } + ByteArray name = new ByteArray(inode.getLocalNameBytes()); + name = nameCache.put(name); + if (name != null) { + inode.setLocalName(name.getBytes()); + } + } +} diff --git a/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java b/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java new file mode 100644 index 0000000..fc156cd --- /dev/null +++ b/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java @@ -0,0 +1,1383 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.namenode; + +import java.io.BufferedInputStream; +import java.io.DataInput; +import java.io.DataInputStream; +import java.io.DataOutput; +import java.io.EOFException; +import java.io.File; +import java.io.FileInputStream; +import java.io.FileOutputStream; +import java.io.IOException; +import java.io.RandomAccessFile; +import java.util.ArrayList; +import java.util.Iterator; +import java.lang.Math; +import java.nio.channels.FileChannel; +import java.nio.ByteBuffer; + +import org.apache.hadoop.hdfs.protocol.Block; +import org.apache.hadoop.hdfs.protocol.DatanodeID; +import org.apache.hadoop.hdfs.protocol.FSConstants; +import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; +import org.apache.hadoop.hdfs.server.common.HdfsConstants; +import org.apache.hadoop.hdfs.server.common.Storage; +import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory; +import org.apache.hadoop.hdfs.server.namenode.FSImage.NameNodeDirType; +import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics; +import org.apache.hadoop.io.*; +import org.apache.hadoop.fs.permission.*; + +/** + * FSEditLog maintains a log of the namespace modifications. + * + */ +public class FSEditLog { + private static final byte OP_INVALID = -1; + private static final byte OP_ADD = 0; + private static final byte OP_RENAME = 1; // rename + private static final byte OP_DELETE = 2; // delete + private static final byte OP_MKDIR = 3; // create directory + private static final byte OP_SET_REPLICATION = 4; // set replication + //the following two are used only for backward compatibility : + @Deprecated private static final byte OP_DATANODE_ADD = 5; + @Deprecated private static final byte OP_DATANODE_REMOVE = 6; + private static final byte OP_SET_PERMISSIONS = 7; + private static final byte OP_SET_OWNER = 8; + private static final byte OP_CLOSE = 9; // close after write + private static final byte OP_SET_GENSTAMP = 10; // store genstamp + /* The following two are not used any more. Should be removed once + * LAST_UPGRADABLE_LAYOUT_VERSION is -17 or newer. */ + private static final byte OP_SET_NS_QUOTA = 11; // set namespace quota + private static final byte OP_CLEAR_NS_QUOTA = 12; // clear namespace quota + private static final byte OP_TIMES = 13; // sets mod & access time on a file + private static final byte OP_SET_QUOTA = 14; // sets name and disk quotas. + private static final byte OP_CONCAT_DELETE = 16; // concat files. + private static int sizeFlushBuffer = HdfsConstants.DEFAULT_EDIT_BUFFER_SIZE; + private static long preallocateSize= HdfsConstants.DEFAULT_EDIT_PREALLOCATE_SIZE; + private static long maxBufferedTransactions= HdfsConstants.DEFAULT_MAX_BUFFERED_TRANSACTIONS; + + private ArrayList editStreams = null; + private FSImage fsimage = null; + + // a monotonically increasing counter that represents transactionIds. + private long txid = 0; + + // stores the last synced transactionId. + private long synctxid = 0; + + // the time of printing the statistics to the log file. + private long lastPrintTime; + + // is a sync currently running? + private boolean isSyncRunning; + + // these are statistics counters. + private long numTransactions; // number of transactions + private long numTransactionsBatchedInSync; + private long totalTimeTransactions; // total time for all transactions + private NameNodeMetrics metrics; + + private static class TransactionId { + public long txid; + + TransactionId(long value) { + this.txid = value; + } + } + + // stores the most current transactionId of this thread. + private static final ThreadLocal myTransactionId = new ThreadLocal() { + protected synchronized TransactionId initialValue() { + return new TransactionId(Long.MAX_VALUE); + } + }; + + /** + * An implementation of the abstract class {@link EditLogOutputStream}, + * which stores edits in a local file. + */ + static private class EditLogFileOutputStream extends EditLogOutputStream { + private File file; + private FileOutputStream fp; // file stream for storing edit logs + private FileChannel fc; // channel of the file stream for sync + private DataOutputBuffer bufCurrent; // current buffer for writing + private DataOutputBuffer bufReady; // buffer ready for flushing + static ByteBuffer fill = ByteBuffer.allocateDirect(512); // preallocation + + EditLogFileOutputStream(File name) throws IOException { + super(); + FSNamesystem.LOG.info("Edit Log preallocate size for " + name + + " is " + preallocateSize + " bytes " + + " and initial size of edits buffer is " + + sizeFlushBuffer + " bytes. " + + "Max number of buffered transactions is " + + maxBufferedTransactions); + file = name; + bufCurrent = new DataOutputBuffer(sizeFlushBuffer); + bufReady = new DataOutputBuffer(sizeFlushBuffer); + RandomAccessFile rp = new RandomAccessFile(name, "rw"); + fp = new FileOutputStream(rp.getFD()); // open for append + fc = rp.getChannel(); + fc.position(fc.size()); + } + + @Override + String getName() { + return file.getPath(); + } + + /** {@inheritDoc} */ + @Override + public void write(int b) throws IOException { + bufCurrent.write(b); + } + + /** {@inheritDoc} */ + @Override + void write(byte op, Writable ... writables) throws IOException { + write(op); + for(Writable w : writables) { + w.write(bufCurrent); + } + } + + /** + * Create empty edits logs file. + */ + @Override + void create() throws IOException { + fc.truncate(0); + fc.position(0); + bufCurrent.writeInt(FSConstants.LAYOUT_VERSION); + setReadyToFlush(); + flush(); + } + + @Override + public void close() throws IOException { + // close should have been called after all pending transactions + // have been flushed & synced. + int bufSize = bufCurrent.size(); + if (bufSize != 0) { + throw new IOException("FSEditStream has " + bufSize + + " bytes still to be flushed and cannot " + + "be closed."); + } + bufCurrent.close(); + bufReady.close(); + + // remove the last INVALID marker from transaction log. + fc.truncate(fc.position()); + fp.close(); + + bufCurrent = bufReady = null; + } + + /** + * All data that has been written to the stream so far will be flushed. + * New data can be still written to the stream while flushing is performed. + */ + @Override + void setReadyToFlush() throws IOException { + assert bufReady.size() == 0 : "previous data is not flushed yet"; + write(OP_INVALID); // insert end-of-file marker + DataOutputBuffer tmp = bufReady; + bufReady = bufCurrent; + bufCurrent = tmp; + } + + /** + * Flush ready buffer to persistent store. + * currentBuffer is not flushed as it accumulates new log records + * while readyBuffer will be flushed and synced. + */ + @Override + protected void flushAndSync() throws IOException { + preallocate(); // preallocate file if necessary + bufReady.writeTo(fp); // write data to file + bufReady.reset(); // erase all data in the buffer + fc.force(false); // metadata updates not needed because of preallocation + fc.position(fc.position()-1); // skip back the end-of-file marker + } + + /** + * Return the size of the current edit log including buffered data. + */ + @Override + long length() throws IOException { + // file size + size of both buffers + return fc.size() + bufReady.size() + bufCurrent.size(); + } + + // allocate a big chunk of data + private void preallocate() throws IOException { + long position = fc.position(); + // if freespace is less than 1% of the preallocationSize or less + // than 4K, then trigger preallocation + long triggerSize = Math.max(preallocateSize / 100, 4096); + if (position + triggerSize >= fc.size()) { + FSNamesystem.LOG.debug("Preallocating Edit log, current size " + + fc.size()); + long newsize = position + preallocateSize; + fill.position(0); + int written = fc.write(fill, newsize); + FSNamesystem.LOG.debug("Edit log size is now " + fc.size() + + " written " + written + " bytes " + + " at offset " + newsize); + } + } + + /** + * Returns the file associated with this stream + */ + File getFile() { + return file; + } + } + + static class EditLogFileInputStream extends EditLogInputStream { + private File file; + private FileInputStream fStream; + + EditLogFileInputStream(File name) throws IOException { + file = name; + fStream = new FileInputStream(name); + } + + @Override + String getName() { + return file.getPath(); + } + + @Override + public int available() throws IOException { + return fStream.available(); + } + + @Override + public int read() throws IOException { + return fStream.read(); + } + + @Override + public int read(byte[] b, int off, int len) throws IOException { + return fStream.read(b, off, len); + } + + @Override + public void close() throws IOException { + fStream.close(); + } + + @Override + long length() throws IOException { + // file size + size of both buffers + return file.length(); + } + } + + FSEditLog(FSImage image) { + fsimage = image; + isSyncRunning = false; + metrics = NameNode.getNameNodeMetrics(); + lastPrintTime = FSNamesystem.now(); + } + + private File getEditFile(StorageDirectory sd) { + return fsimage.getEditFile(sd); + } + + private File getEditNewFile(StorageDirectory sd) { + return fsimage.getEditNewFile(sd); + } + + private int getNumStorageDirs() { + int numStorageDirs = 0; + for (Iterator it = + fsimage.dirIterator(NameNodeDirType.EDITS); it.hasNext(); it.next()) + numStorageDirs++; + return numStorageDirs; + } + + synchronized int getNumEditStreams() { + return editStreams == null ? 0 : editStreams.size(); + } + + boolean isOpen() { + return getNumEditStreams() > 0; + } + + /** + * Create empty edit log files. + * Initialize the output stream for logging. + * + * @throws IOException + */ + public synchronized void open() throws IOException { + numTransactions = totalTimeTransactions = numTransactionsBatchedInSync = 0; + if (editStreams == null) + editStreams = new ArrayList(); + for (Iterator it = + fsimage.dirIterator(NameNodeDirType.EDITS); it.hasNext();) { + StorageDirectory sd = it.next(); + File eFile = getEditFile(sd); + try { + EditLogOutputStream eStream = new EditLogFileOutputStream(eFile); + editStreams.add(eStream); + } catch (IOException e) { + FSNamesystem.LOG.warn("Unable to open edit log file " + eFile); + // Remove the directory from list of storage directories + it.remove(); + } + } + } + + public synchronized void createEditLogFile(File name) throws IOException { + EditLogOutputStream eStream = new EditLogFileOutputStream(name); + eStream.create(); + eStream.close(); + } + + /** + * Create edits.new if non existent. + */ + synchronized void createNewIfMissing() throws IOException { + for (Iterator it = + fsimage.dirIterator(NameNodeDirType.EDITS); it.hasNext();) { + File newFile = getEditNewFile(it.next()); + if (!newFile.exists()) + createEditLogFile(newFile); + } + } + + /** + * Shutdown the file store. + */ + public synchronized void close() throws IOException { + while (isSyncRunning) { + try { + wait(1000); + } catch (InterruptedException ie) { + } + } + if (editStreams == null) { + return; + } + printStatistics(true); + numTransactions = totalTimeTransactions = numTransactionsBatchedInSync = 0; + + for (int idx = 0; idx < editStreams.size(); idx++) { + EditLogOutputStream eStream = editStreams.get(idx); + try { + eStream.setReadyToFlush(); + eStream.flush(); + eStream.close(); + } catch (IOException e) { + processIOError(idx); + idx--; + } + } + editStreams.clear(); + } + + /** + * If there is an IO Error on any log operations, remove that + * directory from the list of directories. + * If no more directories remain, then exit. + */ + synchronized void processIOError(int index) { + if (editStreams == null || editStreams.size() <= 1) { + FSNamesystem.LOG.fatal( + "Fatal Error : All storage directories are inaccessible."); + Runtime.getRuntime().exit(-1); + } + assert(index < getNumStorageDirs()); + assert(getNumStorageDirs() == editStreams.size()); + + File parentStorageDir = ((EditLogFileOutputStream)editStreams + .get(index)).getFile() + .getParentFile().getParentFile(); + editStreams.remove(index); + // + // Invoke the ioerror routine of the fsimage + // + fsimage.processIOError(parentStorageDir); + } + + /** + * If there is an IO Error on any log operations on storage directory, + * remove any stream associated with that directory + */ + synchronized void processIOError(StorageDirectory sd) { + // Try to remove stream only if one should exist + if (!sd.getStorageDirType().isOfType(NameNodeDirType.EDITS)) + return; + if (editStreams == null || editStreams.size() <= 1) { + FSNamesystem.LOG.fatal( + "Fatal Error : All storage directories are inaccessible."); + Runtime.getRuntime().exit(-1); + } + for (int idx = 0; idx < editStreams.size(); idx++) { + File parentStorageDir = ((EditLogFileOutputStream)editStreams + .get(idx)).getFile() + .getParentFile().getParentFile(); + if (parentStorageDir.getName().equals(sd.getRoot().getName())) + editStreams.remove(idx); + } + } + + /** + * The specified streams have IO errors. Remove them from logging + * new transactions. + */ + private void processIOError(ArrayList errorStreams) { + if (errorStreams == null) { + return; // nothing to do + } + for (int idx = 0; idx < errorStreams.size(); idx++) { + EditLogOutputStream eStream = errorStreams.get(idx); + int j = 0; + int numEditStreams = editStreams.size(); + for (j = 0; j < numEditStreams; j++) { + if (editStreams.get(j) == eStream) { + break; + } + } + if (j == numEditStreams) { + FSNamesystem.LOG.error("Unable to find sync log on which " + + " IO error occured. " + + "Fatal Error."); + Runtime.getRuntime().exit(-1); + } + processIOError(j); + } + fsimage.incrementCheckpointTime(); + } + + /** + * check if ANY edits.new log exists + */ + boolean existsNew() throws IOException { + for (Iterator it = + fsimage.dirIterator(NameNodeDirType.EDITS); it.hasNext();) { + if (getEditNewFile(it.next()).exists()) { + return true; + } + } + return false; + } + + /** + * Load an edit log, and apply the changes to the in-memory structure + * This is where we apply edits that we've been writing to disk all + * along. + */ + static int loadFSEdits(EditLogInputStream edits) throws IOException { + FSNamesystem fsNamesys = FSNamesystem.getFSNamesystem(); + FSDirectory fsDir = fsNamesys.dir; + int numEdits = 0; + int logVersion = 0; + String clientName = null; + String clientMachine = null; + String path = null; + int numOpAdd = 0, numOpClose = 0, numOpDelete = 0, + numOpRename = 0, numOpSetRepl = 0, numOpMkDir = 0, + numOpSetPerm = 0, numOpSetOwner = 0, numOpSetGenStamp = 0, + numOpTimes = 0, numOpOther = 0, numOpConcatDelete = 0; + long startTime = FSNamesystem.now(); + + DataInputStream in = new DataInputStream(new BufferedInputStream(edits)); + try { + // Read log file version. Could be missing. + in.mark(4); + // If edits log is greater than 2G, available method will return negative + // numbers, so we avoid having to call available + boolean available = true; + try { + logVersion = in.readByte(); + } catch (EOFException e) { + available = false; + } + if (available) { + in.reset(); + logVersion = in.readInt(); + if (logVersion < FSConstants.LAYOUT_VERSION) // future version + throw new IOException( + "Unexpected version of the file system log file: " + + logVersion + ". Current version = " + + FSConstants.LAYOUT_VERSION + "."); + } + assert logVersion <= Storage.LAST_UPGRADABLE_LAYOUT_VERSION : + "Unsupported version " + logVersion; + + while (true) { + long timestamp = 0; + long mtime = 0; + long atime = 0; + long blockSize = 0; + byte opcode = -1; + try { + opcode = in.readByte(); + if (opcode == OP_INVALID) { + FSNamesystem.LOG.info("Invalid opcode, reached end of edit log " + + "Number of transactions found " + numEdits); + break; // no more transactions + } + } catch (EOFException e) { + break; // no more transactions + } + numEdits++; + switch (opcode) { + case OP_ADD: + case OP_CLOSE: { + // versions > 0 support per file replication + // get name and replication + int length = in.readInt(); + if (-7 == logVersion && length != 3|| + -17 < logVersion && logVersion < -7 && length != 4 || + logVersion <= -17 && length != 5) { + throw new IOException("Incorrect data format." + + " logVersion is " + logVersion + + " but writables.length is " + + length + ". "); + } + path = FSImage.readString(in); + short replication = adjustReplication(readShort(in)); + mtime = readLong(in); + if (logVersion <= -17) { + atime = readLong(in); + } + if (logVersion < -7) { + blockSize = readLong(in); + } + // get blocks + Block blocks[] = null; + if (logVersion <= -14) { + blocks = readBlocks(in); + } else { + BlockTwo oldblk = new BlockTwo(); + int num = in.readInt(); + blocks = new Block[num]; + for (int i = 0; i < num; i++) { + oldblk.readFields(in); + blocks[i] = new Block(oldblk.blkid, oldblk.len, + Block.GRANDFATHER_GENERATION_STAMP); + } + } + + // Older versions of HDFS does not store the block size in inode. + // If the file has more than one block, use the size of the + // first block as the blocksize. Otherwise use the default + // block size. + if (-8 <= logVersion && blockSize == 0) { + if (blocks.length > 1) { + blockSize = blocks[0].getNumBytes(); + } else { + long first = ((blocks.length == 1)? blocks[0].getNumBytes(): 0); + blockSize = Math.max(fsNamesys.getDefaultBlockSize(), first); + } + } + + PermissionStatus permissions = fsNamesys.getUpgradePermission(); + if (logVersion <= -11) { + permissions = PermissionStatus.read(in); + } + + // clientname, clientMachine and block locations of last block. + if (opcode == OP_ADD && logVersion <= -12) { + clientName = FSImage.readString(in); + clientMachine = FSImage.readString(in); + if (-13 <= logVersion) { + readDatanodeDescriptorArray(in); + } + } else { + clientName = ""; + clientMachine = ""; + } + + // The open lease transaction re-creates a file if necessary. + // Delete the file if it already exists. + if (FSNamesystem.LOG.isDebugEnabled()) { + FSNamesystem.LOG.debug(opcode + ": " + path + + " numblocks : " + blocks.length + + " clientHolder " + clientName + + " clientMachine " + clientMachine); + } + + fsDir.unprotectedDelete(path, mtime); + + // add to the file tree + INodeFile node = (INodeFile)fsDir.unprotectedAddFile( + path, permissions, + blocks, replication, + mtime, atime, blockSize); + if (opcode == OP_ADD) { + numOpAdd++; + // + // Replace current node with a INodeUnderConstruction. + // Recreate in-memory lease record. + // + INodeFileUnderConstruction cons = new INodeFileUnderConstruction( + node.getLocalNameBytes(), + node.getReplication(), + node.getModificationTime(), + node.getPreferredBlockSize(), + node.getBlocks(), + node.getPermissionStatus(), + clientName, + clientMachine, + null); + fsDir.replaceNode(path, node, cons); + fsNamesys.leaseManager.addLease(cons.clientName, path); + } + break; + } + case OP_SET_REPLICATION: { + numOpSetRepl++; + path = FSImage.readString(in); + short replication = adjustReplication(readShort(in)); + fsDir.unprotectedSetReplication(path, replication, null); + break; + } + case OP_CONCAT_DELETE: { + if (logVersion > -22) { + throw new IOException("Unexpected opcode " + opcode + + " for version " + logVersion); + } + numOpConcatDelete++; + int length = in.readInt(); + if (length < 3) { // trg, srcs.., timestam + throw new IOException("Incorrect data format. " + + "Concat operation."); + } + String trg = FSImage.readString(in); + int srcSize = length - 1 - 1; //trg and timestamp + String [] srcs = new String [srcSize]; + for(int i=0; i -11) + throw new IOException("Unexpected opcode " + opcode + + " for version " + logVersion); + fsDir.unprotectedSetPermission( + FSImage.readString(in), FsPermission.read(in)); + break; + } + case OP_SET_OWNER: { + numOpSetOwner++; + if (logVersion > -11) + throw new IOException("Unexpected opcode " + opcode + + " for version " + logVersion); + fsDir.unprotectedSetOwner(FSImage.readString(in), + FSImage.readString_EmptyAsNull(in), + FSImage.readString_EmptyAsNull(in)); + break; + } + case OP_SET_NS_QUOTA: { + if (logVersion > -16) { + throw new IOException("Unexpected opcode " + opcode + + " for version " + logVersion); + } + fsDir.unprotectedSetQuota(FSImage.readString(in), + readLongWritable(in), + FSConstants.QUOTA_DONT_SET); + break; + } + case OP_CLEAR_NS_QUOTA: { + if (logVersion > -16) { + throw new IOException("Unexpected opcode " + opcode + + " for version " + logVersion); + } + fsDir.unprotectedSetQuota(FSImage.readString(in), + FSConstants.QUOTA_RESET, + FSConstants.QUOTA_DONT_SET); + break; + } + + case OP_SET_QUOTA: + fsDir.unprotectedSetQuota(FSImage.readString(in), + readLongWritable(in), + readLongWritable(in)); + + break; + + case OP_TIMES: { + numOpTimes++; + int length = in.readInt(); + if (length != 3) { + throw new IOException("Incorrect data format. " + + "times operation."); + } + path = FSImage.readString(in); + mtime = readLong(in); + atime = readLong(in); + fsDir.unprotectedSetTimes(path, mtime, atime, true); + break; + } + default: { + throw new IOException("Never seen opcode " + opcode); + } + } + } + } finally { + in.close(); + } + FSImage.LOG.info("Edits file " + edits.getName() + + " of size " + edits.length() + " edits # " + numEdits + + " loaded in " + (FSNamesystem.now()-startTime)/1000 + " seconds."); + + if (FSImage.LOG.isDebugEnabled()) { + FSImage.LOG.debug("numOpAdd = " + numOpAdd + " numOpClose = " + numOpClose + + " numOpDelete = " + numOpDelete + " numOpRename = " + numOpRename + + " numOpSetRepl = " + numOpSetRepl + " numOpMkDir = " + numOpMkDir + + " numOpSetPerm = " + numOpSetPerm + + " numOpSetOwner = " + numOpSetOwner + + " numOpSetGenStamp = " + numOpSetGenStamp + + " numOpTimes = " + numOpTimes + + " numOpConcatDelete = " + numOpConcatDelete + + " numOpOther = " + numOpOther); + } + + if (logVersion != FSConstants.LAYOUT_VERSION) // other version + numEdits++; // save this image asap + return numEdits; + } + + // a place holder for reading a long + private static final LongWritable longWritable = new LongWritable(); + + /** Read an integer from an input stream */ + private static long readLongWritable(DataInputStream in) throws IOException { + synchronized (longWritable) { + longWritable.readFields(in); + return longWritable.get(); + } + } + + static short adjustReplication(short replication) { + FSNamesystem fsNamesys = FSNamesystem.getFSNamesystem(); + short minReplication = fsNamesys.getMinReplication(); + if (replicationmaxReplication) { + replication = maxReplication; + } + return replication; + } + + /** + * Write an operation to the edit log. Do not sync to persistent + * store yet. + */ + synchronized void logEdit(byte op, Writable ... writables) { + assert this.getNumEditStreams() > 0 : "no editlog streams"; + long start = FSNamesystem.now(); + for (int idx = 0; idx < editStreams.size(); idx++) { + EditLogOutputStream eStream = editStreams.get(idx); + try { + eStream.write(op, writables); + } catch (IOException ie) { + processIOError(idx); + // processIOError will remove the idx's stream + // from the editStreams collection, so we need to update idx + idx--; + } + } + // get a new transactionId + txid++; + + // + // record the transactionId when new data was written to the edits log + // + TransactionId id = myTransactionId.get(); + id.txid = txid; + + // update statistics + long end = FSNamesystem.now(); + numTransactions++; + totalTimeTransactions += (end-start); + if (metrics != null) { // Metrics is non-null only when used inside name node + metrics.transactions.inc((end-start)); + metrics.numBufferedTransactions.set((int)(txid-synctxid)); + } + } + + /** + * Syncs all pending transactions from all threads. + */ + synchronized void logSyncAll() throws IOException { + // stores in the Thread local variable of current threads + TransactionId id = myTransactionId.get(); + id.txid = txid; + logSync(); + } + + /** + * if there are too many transactions that are yet to be synced, + * then sync them. Otherwise, the in-memory buffer that keeps + * the transactions would grow to be very very big. This can happen + * when there are a large number of listStatus calls which update + * the access time of files. + */ + public void logSyncIfNeeded() throws IOException { + boolean doSync = false; + synchronized (this) { + if (txid > synctxid + maxBufferedTransactions) { + FSNamesystem.LOG.info("Out of band log sync triggered " + + " because there are " + + (txid-synctxid) + + " buffered transactions which " + + " is more than the configured limit of " + + maxBufferedTransactions); + doSync = true; + } + } + if (doSync) { + logSync(); + } + } + + // + // Sync all modifications done by this thread. + // + public void logSync() throws IOException { + ArrayList errorStreams = null; + long syncStart = 0; + + // Fetch the transactionId of this thread. + long mytxid = myTransactionId.get().txid; + + final int numEditStreams; + synchronized (this) { + numEditStreams = editStreams.size(); + assert numEditStreams > 0 : "no editlog streams"; + printStatistics(false); + + // if somebody is already syncing, then wait + while (mytxid > synctxid && isSyncRunning) { + try { + wait(1000); + } catch (InterruptedException ie) { + } + } + + // + // If this transaction was already flushed, then nothing to do + // + if (mytxid <= synctxid) { + numTransactionsBatchedInSync++; + if (metrics != null) // Metrics is non-null only when used inside name node + metrics.transactionsBatchedInSync.inc(); + return; + } + + // now, this thread will do the sync + syncStart = txid; + isSyncRunning = true; + + // swap buffers + for (int idx = 0; idx < numEditStreams; idx++) { + editStreams.get(idx).setReadyToFlush(); + } + } + + // do the sync + long start = FSNamesystem.now(); + for (int idx = 0; idx < numEditStreams; idx++) { + EditLogOutputStream eStream = editStreams.get(idx); + try { + eStream.flush(); + } catch (IOException ie) { + // + // remember the streams that encountered an error. + // + if (errorStreams == null) { + errorStreams = new ArrayList(1); + } + errorStreams.add(eStream); + FSNamesystem.LOG.error("Unable to sync edit log. " + + "Fatal Error."); + } + } + long elapsed = FSNamesystem.now() - start; + + synchronized (this) { + processIOError(errorStreams); + synctxid = syncStart; + isSyncRunning = false; + this.notifyAll(); + } + + if (metrics != null) // Metrics is non-null only when used inside name node + metrics.syncs.inc(elapsed); + } + + // + // print statistics every 1 minute. + // + private void printStatistics(boolean force) { + long now = FSNamesystem.now(); + if (lastPrintTime + 60000 > now && !force) { + return; + } + if (editStreams == null || editStreams.size()==0) { + return; + } + lastPrintTime = now; + StringBuilder buf = new StringBuilder(); + buf.append("Number of transactions: "); + buf.append(numTransactions); + buf.append(" Total time for transactions(ms): "); + buf.append(totalTimeTransactions); + buf.append(" Number of transactions batched in Syncs: "); + buf.append(numTransactionsBatchedInSync); + buf.append(" Number of syncs: "); + buf.append(editStreams.get(0).getNumSync()); + buf.append(" SyncTimes(ms): "); + + int numEditStreams = editStreams.size(); + for (int idx = 0; idx < numEditStreams; idx++) { + EditLogOutputStream eStream = editStreams.get(idx); + buf.append(" " + eStream.getName() + ":"); + buf.append(eStream.getTotalSyncTime()); + buf.append(" "); + } + FSNamesystem.LOG.info(buf); + } + + /** + * Add open lease record to edit log. + * Records the block locations of the last block. + */ + public void logOpenFile(String path, INodeFileUnderConstruction newNode) + throws IOException { + + UTF8 nameReplicationPair[] = new UTF8[] { + new UTF8(path), + FSEditLog.toLogReplication(newNode.getReplication()), + FSEditLog.toLogLong(newNode.getModificationTime()), + FSEditLog.toLogLong(newNode.getAccessTime()), + FSEditLog.toLogLong(newNode.getPreferredBlockSize())}; + logEdit(OP_ADD, + new ArrayWritable(UTF8.class, nameReplicationPair), + new ArrayWritable(Block.class, newNode.getBlocks()), + newNode.getPermissionStatus(), + new UTF8(newNode.getClientName()), + new UTF8(newNode.getClientMachine())); + } + + /** + * Add close lease record to edit log. + */ + public void logCloseFile(String path, INodeFile newNode) { + UTF8 nameReplicationPair[] = new UTF8[] { + new UTF8(path), + FSEditLog.toLogReplication(newNode.getReplication()), + FSEditLog.toLogLong(newNode.getModificationTime()), + FSEditLog.toLogLong(newNode.getAccessTime()), + FSEditLog.toLogLong(newNode.getPreferredBlockSize())}; + logEdit(OP_CLOSE, + new ArrayWritable(UTF8.class, nameReplicationPair), + new ArrayWritable(Block.class, newNode.getBlocks()), + newNode.getPermissionStatus()); + } + + /** + * Add create directory record to edit log + */ + public void logMkDir(String path, INode newNode) { + UTF8 info[] = new UTF8[] { + new UTF8(path), + FSEditLog.toLogLong(newNode.getModificationTime()), + FSEditLog.toLogLong(newNode.getAccessTime()) + }; + logEdit(OP_MKDIR, new ArrayWritable(UTF8.class, info), + newNode.getPermissionStatus()); + } + + /** + * Add rename record to edit log + * TODO: use String parameters until just before writing to disk + */ + void logRename(String src, String dst, long timestamp) { + UTF8 info[] = new UTF8[] { + new UTF8(src), + new UTF8(dst), + FSEditLog.toLogLong(timestamp)}; + logEdit(OP_RENAME, new ArrayWritable(UTF8.class, info)); + } + + /** + * Add set replication record to edit log + */ + void logSetReplication(String src, short replication) { + logEdit(OP_SET_REPLICATION, + new UTF8(src), + FSEditLog.toLogReplication(replication)); + } + + /** Add set namespace quota record to edit log + * + * @param src the string representation of the path to a directory + * @param quota the directory size limit + */ + void logSetQuota(String src, long nsQuota, long dsQuota) { + logEdit(OP_SET_QUOTA, new UTF8(src), + new LongWritable(nsQuota), new LongWritable(dsQuota)); + } + + /** Add set permissions record to edit log */ + void logSetPermissions(String src, FsPermission permissions) { + logEdit(OP_SET_PERMISSIONS, new UTF8(src), permissions); + } + + /** Add set owner record to edit log */ + void logSetOwner(String src, String username, String groupname) { + UTF8 u = new UTF8(username == null? "": username); + UTF8 g = new UTF8(groupname == null? "": groupname); + logEdit(OP_SET_OWNER, new UTF8(src), u, g); + } + + /** + * concat(trg,src..) log + */ + void logConcat(String trg, String [] srcs, long timestamp) { + int size = 1 + srcs.length + 1; // trg, srcs, timestamp + UTF8 info[] = new UTF8[size]; + int idx = 0; + info[idx++] = new UTF8(trg); + for(int i=0; i it = + fsimage.dirIterator(NameNodeDirType.EDITS); it.hasNext();) { + File editsNew = getEditNewFile(it.next()); + if (!editsNew.exists()) { + throw new IOException("Inconsistent existance of edits.new " + + editsNew); + } + } + return; // nothing to do, edits.new exists! + } + + close(); // close existing edit log + + // + // Open edits.new + // + for (Iterator it = + fsimage.dirIterator(NameNodeDirType.EDITS); it.hasNext();) { + StorageDirectory sd = it.next(); + try { + EditLogFileOutputStream eStream = + new EditLogFileOutputStream(getEditNewFile(sd)); + eStream.create(); + editStreams.add(eStream); + } catch (IOException e) { + // remove stream and this storage directory from list + processIOError(sd); + it.remove(); + } + } + } + + /** + * Removes the old edit log and renamed edits.new as edits. + * Reopens the edits file. + */ + synchronized void purgeEditLog() throws IOException { + // + // If edits.new does not exists, then return error. + // + if (!existsNew()) { + throw new IOException("Attempt to purge edit log " + + "but edits.new does not exist."); + } + close(); + + // + // Delete edits and rename edits.new to edits. + // + for (Iterator it = + fsimage.dirIterator(NameNodeDirType.EDITS); it.hasNext();) { + StorageDirectory sd = it.next(); + if (!getEditNewFile(sd).renameTo(getEditFile(sd))) { + // + // renameTo() fails on Windows if the destination + // file exists. + // + getEditFile(sd).delete(); + if (!getEditNewFile(sd).renameTo(getEditFile(sd))) { + // Should we also remove from edits + it.remove(); + } + } + } + // + // Reopen all the edits logs. + // + open(); + } + + /** + * Return the name of the edit file + */ + synchronized File getFsEditName() throws IOException { + StorageDirectory sd = null; + for (Iterator it = + fsimage.dirIterator(NameNodeDirType.EDITS); it.hasNext();) + sd = it.next(); + return getEditFile(sd); + } + + /** + * Returns the timestamp of the edit log + */ + synchronized long getFsEditTime() { + Iterator it = fsimage.dirIterator(NameNodeDirType.EDITS); + if(it.hasNext()) + return getEditFile(it.next()).lastModified(); + return 0; + } + + // sets the initial capacity of the flush buffer. + static void setBufferCapacity(int size) { + sizeFlushBuffer = size; + } + // + // maximum number of transactions to be buffered in memory + static void setMaxBufferedTransactions(int num) { + maxBufferedTransactions = num; + } + + // sets the preallocate trigger of the edits log. + static void setPreallocateSize(long size) { + preallocateSize = size; + } + + /** + * A class to read in blocks stored in the old format. The only two + * fields in the block were blockid and length. + */ + static class BlockTwo implements Writable { + long blkid; + long len; + + static { // register a ctor + WritableFactories.setFactory + (BlockTwo.class, + new WritableFactory() { + public Writable newInstance() { return new BlockTwo(); } + }); + } + + + BlockTwo() { + blkid = 0; + len = 0; + } + ///////////////////////////////////// + // Writable + ///////////////////////////////////// + public void write(DataOutput out) throws IOException { + out.writeLong(blkid); + out.writeLong(len); + } + + public void readFields(DataInput in) throws IOException { + this.blkid = in.readLong(); + this.len = in.readLong(); + } + } + + /** This method is defined for compatibility reason. */ + static private DatanodeDescriptor[] readDatanodeDescriptorArray(DataInput in + ) throws IOException { + DatanodeDescriptor[] locations = new DatanodeDescriptor[in.readInt()]; + for (int i = 0; i < locations.length; i++) { + locations[i] = new DatanodeDescriptor(); + locations[i].readFieldsFromFSEditLog(in); + } + return locations; + } + + static private short readShort(DataInputStream in) throws IOException { + return Short.parseShort(FSImage.readString(in)); + } + + static private long readLong(DataInputStream in) throws IOException { + return Long.parseLong(FSImage.readString(in)); + } + + static private Block[] readBlocks(DataInputStream in) throws IOException { + int numBlocks = in.readInt(); + Block[] blocks = new Block[numBlocks]; + for (int i = 0; i < numBlocks; i++) { + blocks[i] = new Block(); + blocks[i].readFields(in); + } + return blocks; + } +} diff --git a/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSImage.java b/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSImage.java new file mode 100644 index 0000000..ac3280c --- /dev/null +++ b/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSImage.java @@ -0,0 +1,1827 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.namenode; + +import java.io.BufferedInputStream; +import java.io.BufferedOutputStream; +import java.io.DataInput; +import java.io.DataInputStream; +import java.io.DataOutput; +import java.io.DataOutputStream; +import java.io.File; +import java.io.FileInputStream; +import java.io.FileOutputStream; +import java.io.IOException; +import java.io.RandomAccessFile; +import java.nio.ByteBuffer; +import java.security.DigestInputStream; +import java.security.DigestOutputStream; +import java.security.MessageDigest; +import java.text.SimpleDateFormat; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Date; +import java.util.HashMap; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Properties; +import java.util.Random; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.fs.permission.PermissionStatus; +import org.apache.hadoop.hdfs.DFSUtil; +import org.apache.hadoop.hdfs.protocol.Block; +import org.apache.hadoop.hdfs.protocol.DatanodeID; +import org.apache.hadoop.hdfs.protocol.FSConstants; +import org.apache.hadoop.hdfs.server.common.HdfsConstants; +import org.apache.hadoop.hdfs.server.common.InconsistentFSStateException; +import org.apache.hadoop.hdfs.server.common.Storage; +import org.apache.hadoop.hdfs.server.common.StorageInfo; +import org.apache.hadoop.hdfs.server.common.UpgradeManager; +import org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType; +import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption; +import org.apache.hadoop.hdfs.server.namenode.BlocksMap.BlockInfo; +import org.apache.hadoop.hdfs.server.namenode.FSEditLog.EditLogFileInputStream; +import org.apache.hadoop.hdfs.util.DataTransferThrottler; +import org.apache.hadoop.io.MD5Hash; +import org.apache.hadoop.io.Text; +import org.apache.hadoop.io.UTF8; +import org.apache.hadoop.io.Writable; +import org.apache.hadoop.io.compress.CompressionCodec; +import org.apache.hadoop.io.compress.CompressionCodecFactory; + +/** + * FSImage handles checkpointing and logging of the namespace edits. + * + */ +public class FSImage extends Storage { + + private static final SimpleDateFormat DATE_FORM = + new SimpleDateFormat("yyyy-MM-dd HH:mm:ss"); + + static final String MESSAGE_DIGEST_PROPERTY = "imageMD5Digest"; + + // + // The filenames used for storing the images + // + enum NameNodeFile { + IMAGE ("fsimage"), + TIME ("fstime"), + EDITS ("edits"), + IMAGE_NEW ("fsimage.ckpt"), + EDITS_NEW ("edits.new"); + + private String fileName = null; + private NameNodeFile(String name) {this.fileName = name;} + String getName() {return fileName;} + } + + // checkpoint states + enum CheckpointStates{START, ROLLED_EDITS, UPLOAD_START, UPLOAD_DONE; } + /** + * Implementation of StorageDirType specific to namenode storage + * A Storage directory could be of type IMAGE which stores only fsimage, + * or of type EDITS which stores edits or of type IMAGE_AND_EDITS which + * stores both fsimage and edits. + */ + static enum NameNodeDirType implements StorageDirType { + UNDEFINED, + IMAGE, + EDITS, + IMAGE_AND_EDITS; + + public StorageDirType getStorageDirType() { + return this; + } + + public boolean isOfType(StorageDirType type) { + if ((this == IMAGE_AND_EDITS) && (type == IMAGE || type == EDITS)) + return true; + return this == type; + } + } + + protected long checkpointTime = -1L; + private FSEditLog editLog = null; + private boolean isUpgradeFinalized = false; + MD5Hash imageDigest = null; + MD5Hash checkpointImageDigest = null; + + /** + * list of failed (and thus removed) storages + */ + protected List removedStorageDirs = new ArrayList(); + + /** + * Directories for importing an image from a checkpoint. + */ + private Collection checkpointDirs; + private Collection checkpointEditsDirs; + + /** + * Image compression related fields + */ + private boolean compressImage = false; // if image should be compressed + private CompressionCodec saveCodec; // the compression codec + private CompressionCodecFactory codecFac; // all the supported codecs + + DataTransferThrottler imageTransferThrottler = null; // throttle image transfer + + /** + * Can fs-image be rolled? + */ + volatile private CheckpointStates ckptState = FSImage.CheckpointStates.START; + + /** + * Used for saving the image to disk + */ + static private final ThreadLocal FILE_PERM = + new ThreadLocal() { + @Override + protected FsPermission initialValue() { + return new FsPermission((short) 0); + } + }; + static private final byte[] PATH_SEPARATOR = DFSUtil.string2Bytes(Path.SEPARATOR); + + /** + */ + FSImage() { + super(NodeType.NAME_NODE); + this.editLog = new FSEditLog(this); + } + + /** + * Constructor + * @param conf Configuration + */ + FSImage(Configuration conf) throws IOException { + this(); + setCheckpointDirectories(FSImage.getCheckpointDirs(conf, null), + FSImage.getCheckpointEditsDirs(conf, null)); + this.compressImage = conf.getBoolean( + HdfsConstants.DFS_IMAGE_COMPRESS_KEY, + HdfsConstants.DFS_IMAGE_COMPRESS_DEFAULT); + this.codecFac = new CompressionCodecFactory(conf); + if (this.compressImage) { + String codecClassName = conf.get( + HdfsConstants.DFS_IMAGE_COMPRESSION_CODEC_KEY, + HdfsConstants.DFS_IMAGE_COMPRESSION_CODEC_DEFAULT); + this.saveCodec = codecFac.getCodecByClassName(codecClassName); + if (this.saveCodec == null) { + throw new IOException("Not supported codec: " + codecClassName); + } + } + long transferBandwidth = conf.getLong( + HdfsConstants.DFS_IMAGE_TRANSFER_RATE_KEY, + HdfsConstants.DFS_IMAGE_TRANSFER_RATE_DEFAULT); + + if (transferBandwidth > 0) { + this.imageTransferThrottler = new DataTransferThrottler(transferBandwidth); + } + } + + /** + */ + FSImage(Collection fsDirs, Collection fsEditsDirs) + throws IOException { + this(); + setStorageDirectories(fsDirs, fsEditsDirs); + } + + public FSImage(StorageInfo storageInfo) { + super(NodeType.NAME_NODE, storageInfo); + } + + /** + * Represents an Image (image and edit file). + */ + public FSImage(File imageDir) throws IOException { + this(); + ArrayList dirs = new ArrayList(1); + ArrayList editsDirs = new ArrayList(1); + dirs.add(imageDir); + editsDirs.add(imageDir); + setStorageDirectories(dirs, editsDirs); + } + + void setStorageDirectories(Collection fsNameDirs, + Collection fsEditsDirs + ) throws IOException { + this.storageDirs = new ArrayList(); + this.removedStorageDirs = new ArrayList(); + // Add all name dirs with appropriate NameNodeDirType + for (File dirName : fsNameDirs) { + boolean isAlsoEdits = false; + for (File editsDirName : fsEditsDirs) { + if (editsDirName.compareTo(dirName) == 0) { + isAlsoEdits = true; + fsEditsDirs.remove(editsDirName); + break; + } + } + NameNodeDirType dirType = (isAlsoEdits) ? + NameNodeDirType.IMAGE_AND_EDITS : + NameNodeDirType.IMAGE; + this.addStorageDir(new StorageDirectory(dirName, dirType)); + } + + // Add edits dirs if they are different from name dirs + for (File dirName : fsEditsDirs) { + this.addStorageDir(new StorageDirectory(dirName, + NameNodeDirType.EDITS)); + } + } + + void setCheckpointDirectories(Collection dirs, + Collection editsDirs) { + checkpointDirs = dirs; + checkpointEditsDirs = editsDirs; + } + + static File getImageFile(StorageDirectory sd, NameNodeFile type) { + return new File(sd.getCurrentDir(), type.getName()); + } + + List getRemovedStorageDirs() { + return this.removedStorageDirs; + } + + File getEditFile(StorageDirectory sd) { + return getImageFile(sd, NameNodeFile.EDITS); + } + + File getEditNewFile(StorageDirectory sd) { + return getImageFile(sd, NameNodeFile.EDITS_NEW); + } + + File[] getFileNames(NameNodeFile type, NameNodeDirType dirType) { + ArrayList list = new ArrayList(); + Iterator it = (dirType == null) ? dirIterator() : + dirIterator(dirType); + for ( ;it.hasNext(); ) { + list.add(getImageFile(it.next(), type)); + } + return list.toArray(new File[list.size()]); + } + + File[] getImageFiles() { + return getFileNames(NameNodeFile.IMAGE, NameNodeDirType.IMAGE); + } + + File[] getEditsFiles() { + return getFileNames(NameNodeFile.EDITS, NameNodeDirType.EDITS); + } + + File[] getTimeFiles() { + return getFileNames(NameNodeFile.TIME, null); + } + + /** + * Get the MD5 digest of the current image + * @return the MD5 digest of the current image + */ + MD5Hash getImageDigest() { + return imageDigest; + } + + /** + * Analyze storage directories. + * Recover from previous transitions if required. + * Perform fs state transition if necessary depending on the namespace info. + * Read storage info. + * + * @param dataDirs + * @param startOpt startup option + * @throws IOException + * @return true if the image needs to be saved or false otherwise + */ + boolean recoverTransitionRead(Collection dataDirs, + Collection editsDirs, + StartupOption startOpt + ) throws IOException { + assert startOpt != StartupOption.FORMAT : + "NameNode formatting should be performed before reading the image"; + + // none of the data dirs exist + if (dataDirs.size() == 0 || editsDirs.size() == 0) + throw new IOException( + "All specified directories are not accessible or do not exist."); + + if(startOpt == StartupOption.IMPORT + && (checkpointDirs == null || checkpointDirs.isEmpty())) + throw new IOException("Cannot import image from a checkpoint. " + + "\"fs.checkpoint.dir\" is not set." ); + + if(startOpt == StartupOption.IMPORT + && (checkpointEditsDirs == null || checkpointEditsDirs.isEmpty())) + throw new IOException("Cannot import image from a checkpoint. " + + "\"fs.checkpoint.edits.dir\" is not set." ); + + setStorageDirectories(dataDirs, editsDirs); + // 1. For each data directory calculate its state and + // check whether all is consistent before transitioning. + Map dataDirStates = + new HashMap(); + boolean isFormatted = false; + for (Iterator it = + dirIterator(); it.hasNext();) { + StorageDirectory sd = it.next(); + StorageState curState; + try { + curState = sd.analyzeStorage(startOpt); + // sd is locked but not opened + switch(curState) { + case NON_EXISTENT: + // name-node fails if any of the configured storage dirs are missing + throw new InconsistentFSStateException(sd.getRoot(), + "storage directory does not exist or is not accessible."); + case NOT_FORMATTED: + break; + case NORMAL: + break; + default: // recovery is possible + sd.doRecover(curState); + } + if (curState != StorageState.NOT_FORMATTED + && startOpt != StartupOption.ROLLBACK) { + sd.read(); // read and verify consistency with other directories + isFormatted = true; + } + if (startOpt == StartupOption.IMPORT && isFormatted) + // import of a checkpoint is allowed only into empty image directories + throw new IOException("Cannot import image from a checkpoint. " + + " NameNode already contains an image in " + sd.getRoot()); + } catch (IOException ioe) { + sd.unlock(); + throw ioe; + } + dataDirStates.put(sd,curState); + } + + if (!isFormatted && startOpt != StartupOption.ROLLBACK + && startOpt != StartupOption.IMPORT) + throw new IOException("NameNode is not formatted."); + if (layoutVersion < LAST_PRE_UPGRADE_LAYOUT_VERSION) { + checkVersionUpgradable(layoutVersion); + } + if (startOpt != StartupOption.UPGRADE + && layoutVersion < LAST_PRE_UPGRADE_LAYOUT_VERSION + && layoutVersion != FSConstants.LAYOUT_VERSION) + throw new IOException( + "\nFile system image contains an old layout version " + layoutVersion + + ".\nAn upgrade to version " + FSConstants.LAYOUT_VERSION + + " is required.\nPlease restart NameNode with -upgrade option."); + // check whether distributed upgrade is reguired and/or should be continued + verifyDistributedUpgradeProgress(startOpt); + + // 2. Format unformatted dirs. + this.checkpointTime = 0L; + for (Iterator it = + dirIterator(); it.hasNext();) { + StorageDirectory sd = it.next(); + StorageState curState = dataDirStates.get(sd); + switch(curState) { + case NON_EXISTENT: + assert false : StorageState.NON_EXISTENT + " state cannot be here"; + case NOT_FORMATTED: + LOG.info("Storage directory " + sd.getRoot() + " is not formatted."); + LOG.info("Formatting ..."); + sd.clearDirectory(); // create empty currrent dir + break; + default: + break; + } + } + + // 3. Do transitions + switch(startOpt) { + case UPGRADE: + doUpgrade(); + return false; // upgrade saved image already + case IMPORT: + doImportCheckpoint(); + return true; + case ROLLBACK: + doRollback(); + break; + case REGULAR: + // just load the image + } + return loadFSImage(); + } + + private void doUpgrade() throws IOException { + if(getDistributedUpgradeState()) { + // only distributed upgrade need to continue + // don't do version upgrade + this.loadFSImage(); + initializeDistributedUpgrade(); + return; + } + // Upgrade is allowed only if there are + // no previous fs states in any of the directories + for (Iterator it = + dirIterator(); it.hasNext();) { + StorageDirectory sd = it.next(); + if (sd.getPreviousDir().exists()) + throw new InconsistentFSStateException(sd.getRoot(), + "previous fs state should not exist during upgrade. " + + "Finalize or rollback first."); + } + + // load the latest image + this.loadFSImage(); + + // Do upgrade for each directory + long oldCTime = this.getCTime(); + this.cTime = FSNamesystem.now(); // generate new cTime for the state + int oldLV = this.getLayoutVersion(); + this.layoutVersion = FSConstants.LAYOUT_VERSION; + this.checkpointTime = FSNamesystem.now(); + for (Iterator it = + dirIterator(); it.hasNext();) { + StorageDirectory sd = it.next(); + LOG.info("Upgrading image directory " + sd.getRoot() + + ".\n old LV = " + oldLV + + "; old CTime = " + oldCTime + + ".\n new LV = " + this.getLayoutVersion() + + "; new CTime = " + this.getCTime()); + File curDir = sd.getCurrentDir(); + File prevDir = sd.getPreviousDir(); + File tmpDir = sd.getPreviousTmp(); + assert curDir.exists() : "Current directory must exist."; + assert !prevDir.exists() : "prvious directory must not exist."; + assert !tmpDir.exists() : "prvious.tmp directory must not exist."; + // rename current to tmp + rename(curDir, tmpDir); + // save new image + if (!curDir.mkdir()) + throw new IOException("Cannot create directory " + curDir); + saveFSImage(getImageFile(sd, NameNodeFile.IMAGE)); + editLog.createEditLogFile(getImageFile(sd, NameNodeFile.EDITS)); + // write version and time files + sd.write(); + // rename tmp to previous + rename(tmpDir, prevDir); + isUpgradeFinalized = false; + LOG.info("Upgrade of " + sd.getRoot() + " is complete."); + } + initializeDistributedUpgrade(); + editLog.open(); + } + + private void doRollback() throws IOException { + // Rollback is allowed only if there is + // a previous fs states in at least one of the storage directories. + // Directories that don't have previous state do not rollback + boolean canRollback = false; + FSImage prevState = new FSImage(); + prevState.layoutVersion = FSConstants.LAYOUT_VERSION; + for (Iterator it = + dirIterator(); it.hasNext();) { + StorageDirectory sd = it.next(); + File prevDir = sd.getPreviousDir(); + if (!prevDir.exists()) { // use current directory then + LOG.info("Storage directory " + sd.getRoot() + + " does not contain previous fs state."); + sd.read(); // read and verify consistency with other directories + continue; + } + StorageDirectory sdPrev = prevState.new StorageDirectory(sd.getRoot()); + sdPrev.read(sdPrev.getPreviousVersionFile()); // read and verify consistency of the prev dir + canRollback = true; + } + if (!canRollback) + throw new IOException("Cannot rollback. " + + "None of the storage directories contain previous fs state."); + + // Now that we know all directories are going to be consistent + // Do rollback for each directory containing previous state + for (Iterator it = + dirIterator(); it.hasNext();) { + StorageDirectory sd = it.next(); + File prevDir = sd.getPreviousDir(); + if (!prevDir.exists()) + continue; + + LOG.info("Rolling back storage directory " + sd.getRoot() + + ".\n new LV = " + prevState.getLayoutVersion() + + "; new CTime = " + prevState.getCTime()); + File tmpDir = sd.getRemovedTmp(); + assert !tmpDir.exists() : "removed.tmp directory must not exist."; + // rename current to tmp + File curDir = sd.getCurrentDir(); + assert curDir.exists() : "Current directory must exist."; + rename(curDir, tmpDir); + // rename previous to current + rename(prevDir, curDir); + + // delete tmp dir + deleteDir(tmpDir); + LOG.info("Rollback of " + sd.getRoot()+ " is complete."); + } + isUpgradeFinalized = true; + // check whether name-node can start in regular mode + verifyDistributedUpgradeProgress(StartupOption.REGULAR); + } + + private void doFinalize(StorageDirectory sd) throws IOException { + File prevDir = sd.getPreviousDir(); + if (!prevDir.exists()) { // already discarded + LOG.info("Directory " + prevDir + " does not exist."); + LOG.info("Finalize upgrade for " + sd.getRoot()+ " is not required."); + return; + } + LOG.info("Finalizing upgrade for storage directory " + + sd.getRoot() + "." + + (getLayoutVersion()==0 ? "" : + "\n cur LV = " + this.getLayoutVersion() + + "; cur CTime = " + this.getCTime())); + assert sd.getCurrentDir().exists() : "Current directory must exist."; + final File tmpDir = sd.getFinalizedTmp(); + // rename previous to tmp and remove + rename(prevDir, tmpDir); + deleteDir(tmpDir); + isUpgradeFinalized = true; + LOG.info("Finalize upgrade for " + sd.getRoot()+ " is complete."); + } + + /** + * Load image from a checkpoint directory and save it into the current one. + * @throws IOException + */ + void doImportCheckpoint() throws IOException { + FSImage ckptImage = new FSImage(); + FSNamesystem fsNamesys = FSNamesystem.getFSNamesystem(); + // replace real image with the checkpoint image + FSImage realImage = fsNamesys.getFSImage(); + assert realImage == this; + ckptImage.codecFac = realImage.codecFac; + fsNamesys.dir.fsImage = ckptImage; + // load from the checkpoint dirs + try { + ckptImage.recoverTransitionRead(checkpointDirs, checkpointEditsDirs, + StartupOption.REGULAR); + } finally { + ckptImage.close(); + } + // return back the real image + realImage.setStorageInfo(ckptImage); + fsNamesys.dir.fsImage = realImage; + // and save it + saveFSImage(); + } + + void finalizeUpgrade() throws IOException { + for (Iterator it = + dirIterator(); it.hasNext();) { + doFinalize(it.next()); + } + } + + boolean isUpgradeFinalized() { + return isUpgradeFinalized; + } + + protected void getFields(Properties props, + StorageDirectory sd + ) throws IOException { + super.getFields(props, sd); + if (layoutVersion == 0) + throw new IOException("NameNode directory " + + sd.getRoot() + " is not formatted."); + String sDUS, sDUV; + sDUS = props.getProperty("distributedUpgradeState"); + sDUV = props.getProperty("distributedUpgradeVersion"); + setDistributedUpgradeState( + sDUS == null? false : Boolean.parseBoolean(sDUS), + sDUV == null? getLayoutVersion() : Integer.parseInt(sDUV)); + String sMd5 = props.getProperty(MESSAGE_DIGEST_PROPERTY); + if (layoutVersion <= -26) { + if (sMd5 == null) { + throw new InconsistentFSStateException(sd.getRoot(), + "file " + STORAGE_FILE_VERSION + " does not have MD5 image digest."); + } + this.imageDigest = new MD5Hash(sMd5); + } else if (sMd5 != null) { + throw new InconsistentFSStateException(sd.getRoot(), + "file " + STORAGE_FILE_VERSION + + " has image MD5 digest when version is " + layoutVersion); + } + this.checkpointTime = readCheckpointTime(sd); + } + + long readCheckpointTime(StorageDirectory sd) throws IOException { + File timeFile = getImageFile(sd, NameNodeFile.TIME); + long timeStamp = 0L; + if (timeFile.exists() && timeFile.canRead()) { + DataInputStream in = new DataInputStream(new FileInputStream(timeFile)); + try { + timeStamp = in.readLong(); + } finally { + in.close(); + } + } + return timeStamp; + } + + /** + * Write last checkpoint time and version file into the storage directory. + * + * The version file should always be written last. + * Missing or corrupted version file indicates that + * the checkpoint is not valid. + * + * @param sd storage directory + * @throws IOException + */ + protected void setFields(Properties props, + StorageDirectory sd + ) throws IOException { + super.setFields(props, sd); + boolean uState = getDistributedUpgradeState(); + int uVersion = getDistributedUpgradeVersion(); + if(uState && uVersion != getLayoutVersion()) { + props.setProperty("distributedUpgradeState", Boolean.toString(uState)); + props.setProperty("distributedUpgradeVersion", Integer.toString(uVersion)); + } + if (imageDigest == null) { + imageDigest = MD5Hash.digest( + new FileInputStream(getImageFile(sd, NameNodeFile.IMAGE))); + } + props.setProperty(MESSAGE_DIGEST_PROPERTY, imageDigest.toString()); + writeCheckpointTime(sd); + } + + /** + * Write last checkpoint time into a separate file. + * + * @param sd + * @throws IOException + */ + void writeCheckpointTime(StorageDirectory sd) throws IOException { + if (checkpointTime < 0L) + return; // do not write negative time + File timeFile = getImageFile(sd, NameNodeFile.TIME); + if (timeFile.exists()) { timeFile.delete(); } + DataOutputStream out = new DataOutputStream( + new FileOutputStream(timeFile)); + try { + out.writeLong(checkpointTime); + } finally { + out.close(); + } + } + + /** + * Record new checkpoint time in order to + * distinguish healthy directories from the removed ones. + * If there is an error writing new checkpoint time, the corresponding + * storage directory is removed from the list. + */ + void incrementCheckpointTime() { + this.checkpointTime++; + + // Write new checkpoint time in all storage directories + for(Iterator it = + dirIterator(); it.hasNext();) { + StorageDirectory sd = it.next(); + try { + writeCheckpointTime(sd); + } catch(IOException e) { + // Close any edits stream associated with this dir and remove directory + if (sd.getStorageDirType().isOfType(NameNodeDirType.EDITS)) + editLog.processIOError(sd); + + //add storage to the removed list + removedStorageDirs.add(sd); + it.remove(); + } + } + } + + /** + * Remove storage directory given directory + */ + + void processIOError(File dirName) { + for (Iterator it = + dirIterator(); it.hasNext();) { + StorageDirectory sd = it.next(); + if (sd.getRoot().getPath().equals(dirName.getPath())) { + //add storage to the removed list + LOG.info(" removing " + dirName.getPath()); + removedStorageDirs.add(sd); + it.remove(); + } + } + } + + public FSEditLog getEditLog() { + return editLog; + } + + public boolean isConversionNeeded(StorageDirectory sd) throws IOException { + File oldImageDir = new File(sd.getRoot(), "image"); + if (!oldImageDir.exists()) { + if(sd.getVersionFile().exists()) + throw new InconsistentFSStateException(sd.getRoot(), + oldImageDir + " does not exist."); + return false; + } + // check the layout version inside the image file + File oldF = new File(oldImageDir, "fsimage"); + RandomAccessFile oldFile = new RandomAccessFile(oldF, "rws"); + try { + oldFile.seek(0); + int odlVersion = oldFile.readInt(); + if (odlVersion < LAST_PRE_UPGRADE_LAYOUT_VERSION) + return false; + } finally { + oldFile.close(); + } + return true; + } + + // + // Atomic move sequence, to recover from interrupted checkpoint + // + boolean recoverInterruptedCheckpoint(StorageDirectory nameSD, + StorageDirectory editsSD) + throws IOException { + boolean needToSave = false; + File curFile = getImageFile(nameSD, NameNodeFile.IMAGE); + File ckptFile = getImageFile(nameSD, NameNodeFile.IMAGE_NEW); + + // + // If we were in the midst of a checkpoint + // + if (ckptFile.exists()) { + needToSave = true; + if (getImageFile(editsSD, NameNodeFile.EDITS_NEW).exists()) { + // + // checkpointing migth have uploaded a new + // merged image, but we discard it here because we are + // not sure whether the entire merged image was uploaded + // before the namenode crashed. + // + if (!ckptFile.delete()) { + throw new IOException("Unable to delete " + ckptFile); + } + } else { + // + // checkpointing was in progress when the namenode + // shutdown. The fsimage.ckpt was created and the edits.new + // file was moved to edits. We complete that checkpoint by + // moving fsimage.new to fsimage. There is no need to + // update the fstime file here. renameTo fails on Windows + // if the destination file already exists. + // + if (!ckptFile.renameTo(curFile)) { + if (!curFile.delete()) + LOG.warn("Unable to delete dir " + curFile + " before rename"); + if (!ckptFile.renameTo(curFile)) { + throw new IOException("Unable to rename " + ckptFile + + " to " + curFile); + } + } + } + } + return needToSave; + } + + /** + * Choose latest image from one of the directories, + * load it and merge with the edits from that directory. + * + * @return whether the image should be saved + * @throws IOException + */ + boolean loadFSImage() throws IOException { + // Now check all curFiles and see which is the newest + long latestNameCheckpointTime = Long.MIN_VALUE; + long latestEditsCheckpointTime = Long.MIN_VALUE; + StorageDirectory latestNameSD = null; + StorageDirectory latestEditsSD = null; + boolean needToSave = false; + isUpgradeFinalized = true; + Collection imageDirs = new ArrayList(); + Collection editsDirs = new ArrayList(); + for (Iterator it = dirIterator(); it.hasNext();) { + StorageDirectory sd = it.next(); + if (!sd.getVersionFile().exists()) { + needToSave |= true; + continue; // some of them might have just been formatted + } + boolean imageExists = false, editsExists = false; + if (sd.getStorageDirType().isOfType(NameNodeDirType.IMAGE)) { + imageExists = getImageFile(sd, NameNodeFile.IMAGE).exists(); + imageDirs.add(sd.getRoot().getCanonicalPath()); + } + if (sd.getStorageDirType().isOfType(NameNodeDirType.EDITS)) { + editsExists = getImageFile(sd, NameNodeFile.EDITS).exists(); + editsDirs.add(sd.getRoot().getCanonicalPath()); + } + + checkpointTime = readCheckpointTime(sd); + if ((checkpointTime != Long.MIN_VALUE) && + ((checkpointTime != latestNameCheckpointTime) || + (checkpointTime != latestEditsCheckpointTime))) { + // Force saving of new image if checkpoint time + // is not same in all of the storage directories. + needToSave |= true; + } + if (sd.getStorageDirType().isOfType(NameNodeDirType.IMAGE) && + (latestNameCheckpointTime < checkpointTime) && imageExists) { + latestNameCheckpointTime = checkpointTime; + latestNameSD = sd; + } + if (sd.getStorageDirType().isOfType(NameNodeDirType.EDITS) && + (latestEditsCheckpointTime < checkpointTime) && editsExists) { + latestEditsCheckpointTime = checkpointTime; + latestEditsSD = sd; + } + if (checkpointTime <= 0L) + needToSave |= true; + // set finalized flag + isUpgradeFinalized = isUpgradeFinalized && !sd.getPreviousDir().exists(); + } + + // We should have at least one image and one edits dirs + if (latestNameSD == null) + throw new IOException("Image file is not found in " + imageDirs); + if (latestEditsSD == null) + throw new IOException("Edits file is not found in " + editsDirs); + + // Make sure we are loading image and edits from same checkpoint + if (latestNameCheckpointTime != latestEditsCheckpointTime) + throw new IOException("Inconsitent storage detected, " + + "name and edits storage do not match"); + + // Recover from previous interrrupted checkpoint if any + needToSave |= recoverInterruptedCheckpoint(latestNameSD, latestEditsSD); + + // + // Load in bits + // + latestNameSD.read(); + needToSave |= loadFSImage(getImageFile(latestNameSD, NameNodeFile.IMAGE)); + + // Load latest edits + needToSave |= (loadFSEdits(latestEditsSD) > 0); + + return needToSave; + } + + /** + * Load in the filesystem imagefrom file. It's a big list of + * filenames and blocks. Return whether we should + * "re-save" and consolidate the edit-logs + */ + boolean loadFSImage(File curFile) throws IOException { + assert this.getLayoutVersion() < 0 : "Negative layout version is expected."; + assert curFile != null : "curFile is null"; + + long startTime = FSNamesystem.now(); + FSNamesystem fsNamesys = FSNamesystem.getFSNamesystem(); + FSDirectory fsDir = fsNamesys.dir; + + // + // Load in bits + // + boolean needToSave = true; + MessageDigest digester = MD5Hash.getDigester(); + DigestInputStream fin = new DigestInputStream( + new FileInputStream(curFile), digester); + DataInputStream in = new DataInputStream(fin); + try { + /* + * Note: Remove any checks for version earlier than + * Storage.LAST_UPGRADABLE_LAYOUT_VERSION since we should never get + * to here with older images. + */ + + /* + * TODO we need to change format of the image file + * it should not contain version and namespace fields + */ + // read image version: first appeared in version -1 + int imgVersion = in.readInt(); + needToSave = (imgVersion != FSConstants.LAYOUT_VERSION); + + // read namespaceID: first appeared in version -2 + this.namespaceID = in.readInt(); + + // read number of files + long numFiles; + if (imgVersion <= -16) { + numFiles = in.readLong(); + } else { + numFiles = in.readInt(); + } + + this.layoutVersion = imgVersion; + // read in the last generation stamp. + if (imgVersion <= -12) { + long genstamp = in.readLong(); + fsNamesys.setGenerationStamp(genstamp); + } + + // read compression related info + boolean isCompressed = false; + if (imgVersion <= -25) { // -25: 1st version providing compression option + isCompressed = in.readBoolean(); + if (isCompressed) { + String codecClassName = Text.readString(in); + CompressionCodec loadCodec = codecFac.getCodecByClassName(codecClassName); + if (loadCodec == null) { + throw new IOException("Image compression codec not supported: " + + codecClassName); + } + in = new DataInputStream(loadCodec.createInputStream(fin)); + LOG.info("Loading image file " + curFile + + " compressed using codec " + codecClassName); + } + } + if (!isCompressed) { + in = new DataInputStream(new BufferedInputStream(fin)); + } + + // read file info + short replication = FSNamesystem.getFSNamesystem().getDefaultReplication(); + + LOG.info("Number of files = " + numFiles); + + byte[][] pathComponents; + byte[][] parentPath = {{}}; + INodeDirectory parentINode = fsDir.rootDir; + for (long i = 0; i < numFiles; i++) { + long modificationTime = 0; + long atime = 0; + long blockSize = 0; + pathComponents = readPathComponents(in); + replication = in.readShort(); + replication = FSEditLog.adjustReplication(replication); + modificationTime = in.readLong(); + if (imgVersion <= -17) { + atime = in.readLong(); + } + if (imgVersion <= -8) { + blockSize = in.readLong(); + } + int numBlocks = in.readInt(); + Block blocks[] = null; + + // for older versions, a blocklist of size 0 + // indicates a directory. + if ((-9 <= imgVersion && numBlocks > 0) || + (imgVersion < -9 && numBlocks >= 0)) { + blocks = new Block[numBlocks]; + for (int j = 0; j < numBlocks; j++) { + blocks[j] = new Block(); + if (-14 < imgVersion) { + blocks[j].set(in.readLong(), in.readLong(), + Block.GRANDFATHER_GENERATION_STAMP); + } else { + blocks[j].readFields(in); + } + } + } + // Older versions of HDFS does not store the block size in inode. + // If the file has more than one block, use the size of the + // first block as the blocksize. Otherwise use the default block size. + // + if (-8 <= imgVersion && blockSize == 0) { + if (numBlocks > 1) { + blockSize = blocks[0].getNumBytes(); + } else { + long first = ((numBlocks == 1) ? blocks[0].getNumBytes(): 0); + blockSize = Math.max(fsNamesys.getDefaultBlockSize(), first); + } + } + + // get quota only when the node is a directory + long nsQuota = -1L; + if (imgVersion <= -16 && blocks == null) { + nsQuota = in.readLong(); + } + long dsQuota = -1L; + if (imgVersion <= -18 && blocks == null) { + dsQuota = in.readLong(); + } + + PermissionStatus permissions = fsNamesys.getUpgradePermission(); + if (imgVersion <= -11) { + permissions = PermissionStatus.read(in); + } + if (isRoot(pathComponents)) { // it is the root + // update the root's attributes + if (nsQuota != -1 || dsQuota != -1) { + fsDir.rootDir.setQuota(nsQuota, dsQuota); + } + fsDir.rootDir.setModificationTime(modificationTime); + fsDir.rootDir.setPermissionStatus(permissions); + continue; + } + // check if the new inode belongs to the same parent + if(!isParent(pathComponents, parentPath)) { + parentINode = null; + parentPath = getParent(pathComponents); + } + // add new inode + parentINode = fsDir.addToParent(pathComponents, parentINode, permissions, + blocks, replication, modificationTime, + atime, nsQuota, dsQuota, blockSize, false); + } + + // load datanode info + this.loadDatanodes(imgVersion, in); + + // load Files Under Construction + this.loadFilesUnderConstruction(imgVersion, in, fsNamesys); + + } finally { + in.close(); + } + + // verify checksum + MD5Hash readImageMd5 = new MD5Hash(digester.digest()); + if (imageDigest == null) { + imageDigest = readImageMd5; // set this fsimage's checksum + } else if (!imageDigest.equals(readImageMd5)) { + throw new IOException("Image file " + curFile + "is corrupt!"); + } + + LOG.info("Image file of size " + curFile.length() + " loaded in " + + (FSNamesystem.now() - startTime)/1000 + " seconds."); + + return needToSave; + } + + /** + * Return string representing the parent of the given path. + */ + String getParent(String path) { + return path.substring(0, path.lastIndexOf(Path.SEPARATOR)); + } + + byte[][] getParent(byte[][] path) { + byte[][] result = new byte[path.length - 1][]; + for (int i = 0; i < result.length; i++) { + result[i] = new byte[path[i].length]; + System.arraycopy(path[i], 0, result[i], 0, path[i].length); + } + return result; + } + + private boolean isRoot(byte[][] path) { + return path.length == 1 && + path[0] == null; + } + + private boolean isParent(byte[][] path, byte[][] parent) { + if (path == null || parent == null) + return false; + if (parent.length == 0 || path.length != parent.length + 1) + return false; + boolean isParent = true; + for (int i = 0; i < parent.length; i++) { + isParent = isParent && Arrays.equals(path[i], parent[i]); + } + return isParent; + } + + /** + * Load and merge edits from two edits files + * + * @param sd storage directory + * @return number of edits loaded + * @throws IOException + */ + int loadFSEdits(StorageDirectory sd) throws IOException { + int numEdits = 0; + EditLogFileInputStream edits = + new EditLogFileInputStream(getImageFile(sd, NameNodeFile.EDITS)); + numEdits = FSEditLog.loadFSEdits(edits); + edits.close(); + File editsNew = getImageFile(sd, NameNodeFile.EDITS_NEW); + if (editsNew.exists() && editsNew.length() > 0) { + edits = new EditLogFileInputStream(editsNew); + numEdits += FSEditLog.loadFSEdits(edits); + edits.close(); + } + // update the counts. + FSNamesystem.getFSNamesystem().dir.updateCountForINodeWithQuota(); + return numEdits; + } + + /** + * Save the contents of the FS image to the file. + */ + void saveFSImage(File newFile) throws IOException { + FSNamesystem fsNamesys = FSNamesystem.getFSNamesystem(); + FSDirectory fsDir = fsNamesys.dir; + long startTime = FSNamesystem.now(); + // + // Write out data + // + FileOutputStream fstream = new FileOutputStream(newFile); + MessageDigest digester = MD5Hash.getDigester(); + DigestOutputStream fout = new DigestOutputStream(fstream, digester); + DataOutputStream out = new DataOutputStream(fout); + long numOfBytesWritten = 0; + try { + out.writeInt(FSConstants.LAYOUT_VERSION); + out.writeInt(namespaceID); + out.writeLong(fsDir.rootDir.numItemsInTree()); + out.writeLong(fsNamesys.getGenerationStamp()); + + out.writeBoolean(compressImage); + if (compressImage) { + String codecClassName = saveCodec.getClass().getCanonicalName(); + Text.writeString(out, codecClassName); + out = new DataOutputStream(saveCodec.createOutputStream(fout)); + LOG.info("Saving image file " + newFile + + " compressed using codec " + codecClassName); + } else { + out = new DataOutputStream(new BufferedOutputStream(fout)); + } + + byte[] byteStore = new byte[4*FSConstants.MAX_PATH_LENGTH]; + ByteBuffer strbuf = ByteBuffer.wrap(byteStore); + // save the root + saveINode2Image(strbuf, fsDir.rootDir, out); + // save the rest of the nodes + saveImage(strbuf, 0, fsDir.rootDir, out); + fsNamesys.saveFilesUnderConstruction(out); + strbuf = null; + + out.flush(); + fstream.getChannel().force(true); + numOfBytesWritten = fstream.getChannel().position(); + } finally { + out.close(); + } + // set md5 of the saved image + imageDigest = new MD5Hash(digester.digest()); + + long imageFileLen = newFile.length(); + if (numOfBytesWritten != imageFileLen) { + throw new IOException("Something is wrong: write " + numOfBytesWritten + + " bytes but the image file length is " + imageFileLen); + } + LOG.info("Image file of size " + imageFileLen + " saved in " + + (FSNamesystem.now() - startTime)/1000 + " seconds."); + } + + private class FSImageSaver implements Runnable { + private File imageFile; + + FSImageSaver(File imageFile) { + this.imageFile = imageFile; + } + + public String toString() { + return "FSImage saver for " + imageFile.getAbsolutePath(); + } + + public void run() { + try { + saveFSImage(imageFile); + } catch (IOException ex) { + LOG.error("Unable to write image to " + imageFile.getAbsolutePath()); + } + } + } + + /** + * Save the contents of the FS image + * and create empty edits. + */ + public void saveFSImage() throws IOException { + editLog.createNewIfMissing(); + List savers = new ArrayList(); + for (Iterator it = + dirIterator(); it.hasNext();) { + StorageDirectory sd = it.next(); + NameNodeDirType dirType = (NameNodeDirType)sd.getStorageDirType(); + if (dirType.isOfType(NameNodeDirType.IMAGE)) { + FSImageSaver saver = new FSImageSaver( + getImageFile(sd, NameNodeFile.IMAGE_NEW)); + Thread saverThread = new Thread(saver, saver.toString()); + savers.add(saverThread); + saverThread.start(); + } + + if (dirType.isOfType(NameNodeDirType.EDITS)) { + editLog.createEditLogFile(getImageFile(sd, NameNodeFile.EDITS)); + File editsNew = getImageFile(sd, NameNodeFile.EDITS_NEW); + if (editsNew.exists()) + editLog.createEditLogFile(editsNew); + } + } + for (Thread saver : savers) { + while (saver.isAlive()) { + try { + saver.join(); + } catch (InterruptedException iex) { + LOG.error("Caught exception while waiting for thread " + + saver.getName() + " to finish. Retrying join"); + } + } + } + ckptState = CheckpointStates.UPLOAD_DONE; + rollFSImage(imageDigest); + } + + /** + * Generate new namespaceID. + * + * namespaceID is a persistent attribute of the namespace. + * It is generated when the namenode is formatted and remains the same + * during the life cycle of the namenode. + * When a datanodes register they receive it as the registrationID, + * which is checked every time the datanode is communicating with the + * namenode. Datanodes that do not 'know' the namespaceID are rejected. + * + * @return new namespaceID + */ + private int newNamespaceID() { + Random r = new Random(); + r.setSeed(FSNamesystem.now()); + int newID = 0; + while(newID == 0) + newID = r.nextInt(0x7FFFFFFF); // use 31 bits only + return newID; + } + + /** Create new dfs name directory. Caution: this destroys all files + * in this filesystem. */ + void format(StorageDirectory sd) throws IOException { + sd.clearDirectory(); // create currrent dir + sd.lock(); + try { + NameNodeDirType dirType = (NameNodeDirType)sd.getStorageDirType(); + if (dirType.isOfType(NameNodeDirType.IMAGE)) + saveFSImage(getImageFile(sd, NameNodeFile.IMAGE)); + if (dirType.isOfType(NameNodeDirType.EDITS)) + editLog.createEditLogFile(getImageFile(sd, NameNodeFile.EDITS)); + sd.write(); + } finally { + sd.unlock(); + } + LOG.info("Storage directory " + sd.getRoot() + + " has been successfully formatted."); + } + + public void format() throws IOException { + this.layoutVersion = FSConstants.LAYOUT_VERSION; + this.namespaceID = newNamespaceID(); + this.cTime = 0L; + this.checkpointTime = FSNamesystem.now(); + for (Iterator it = + dirIterator(); it.hasNext();) { + StorageDirectory sd = it.next(); + format(sd); + } + } + + /* + * Save one inode's attributes to the image. + */ + private static void saveINode2Image(ByteBuffer name, + INode node, + DataOutputStream out) throws IOException { + int nameLen = name.position(); + out.writeShort(nameLen); + out.write(name.array(), name.arrayOffset(), nameLen); + FsPermission filePerm = FILE_PERM.get(); + if (!node.isDirectory()) { // write file inode + INodeFile fileINode = (INodeFile)node; + out.writeShort(fileINode.getReplication()); + out.writeLong(fileINode.getModificationTime()); + out.writeLong(fileINode.getAccessTime()); + out.writeLong(fileINode.getPreferredBlockSize()); + Block[] blocks = fileINode.getBlocks(); + out.writeInt(blocks.length); + for (Block blk : blocks) + blk.write(out); + filePerm.fromShort(fileINode.getFsPermissionShort()); + PermissionStatus.write(out, fileINode.getUserName(), + fileINode.getGroupName(), + filePerm); + } else { // write directory inode + out.writeShort(0); // replication + out.writeLong(node.getModificationTime()); + out.writeLong(0); // access time + out.writeLong(0); // preferred block size + out.writeInt(-1); // # of blocks + out.writeLong(node.getNsQuota()); + out.writeLong(node.getDsQuota()); + filePerm.fromShort(node.getFsPermissionShort()); + PermissionStatus.write(out, node.getUserName(), + node.getGroupName(), + filePerm); + } + } + /** + * Save file tree image starting from the given root. + * This is a recursive procedure, which first saves all children of + * a current directory and then moves inside the sub-directories. + */ + private static void saveImage(ByteBuffer parentPrefix, + int prefixLength, + INodeDirectory current, + DataOutputStream out) throws IOException { + int newPrefixLength = prefixLength; + if (current.getChildrenRaw() == null) + return; + for(INode child : current.getChildren()) { + // print all children first + parentPrefix.position(prefixLength); + parentPrefix.put(PATH_SEPARATOR).put(child.getLocalNameBytes()); + saveINode2Image(parentPrefix, child, out); + } + for(INode child : current.getChildren()) { + if(!child.isDirectory()) + continue; + parentPrefix.position(prefixLength); + parentPrefix.put(PATH_SEPARATOR).put(child.getLocalNameBytes()); + newPrefixLength = parentPrefix.position(); + saveImage(parentPrefix, newPrefixLength, (INodeDirectory)child, out); + } + parentPrefix.position(prefixLength); + } + + void loadDatanodes(int version, DataInputStream in) throws IOException { + if (version > -3) // pre datanode image version + return; + if (version <= -12) { + return; // new versions do not store the datanodes any more. + } + int size = in.readInt(); + for(int i = 0; i < size; i++) { + DatanodeImage nodeImage = new DatanodeImage(); + nodeImage.readFields(in); + // We don't need to add these descriptors any more. + } + } + + private void loadFilesUnderConstruction(int version, DataInputStream in, + FSNamesystem fs) throws IOException { + + FSDirectory fsDir = fs.dir; + if (version > -13) // pre lease image version + return; + int size = in.readInt(); + + LOG.info("Number of files under construction = " + size); + + for (int i = 0; i < size; i++) { + INodeFileUnderConstruction cons = readINodeUnderConstruction(in); + + // verify that file exists in namespace + String path = cons.getLocalName(); + INode old = fsDir.getFileINode(path); + if (old == null) { + throw new IOException("Found lease for non-existent file " + path); + } + if (old.isDirectory()) { + throw new IOException("Found lease for directory " + path); + } + INodeFile oldnode = (INodeFile) old; + fsDir.replaceNode(path, oldnode, cons); + fs.leaseManager.addLease(cons.clientName, path); + } + } + + // Helper function that reads in an INodeUnderConstruction + // from the input stream + // + static INodeFileUnderConstruction readINodeUnderConstruction( + DataInputStream in) throws IOException { + byte[] name = readBytes(in); + short blockReplication = in.readShort(); + long modificationTime = in.readLong(); + long preferredBlockSize = in.readLong(); + int numBlocks = in.readInt(); + BlockInfo[] blocks = new BlockInfo[numBlocks]; + Block blk = new Block(); + for (int i = 0; i < numBlocks; i++) { + blk.readFields(in); + blocks[i] = new BlockInfo(blk, blockReplication); + } + PermissionStatus perm = PermissionStatus.read(in); + String clientName = readString(in); + String clientMachine = readString(in); + + // These locations are not used at all + int numLocs = in.readInt(); + DatanodeDescriptor[] locations = new DatanodeDescriptor[numLocs]; + for (int i = 0; i < numLocs; i++) { + locations[i] = new DatanodeDescriptor(); + locations[i].readFields(in); + } + + return new INodeFileUnderConstruction(name, + blockReplication, + modificationTime, + preferredBlockSize, + blocks, + perm, + clientName, + clientMachine, + null); + } + + // Helper function that writes an INodeUnderConstruction + // into the input stream + // + static void writeINodeUnderConstruction(DataOutputStream out, + INodeFileUnderConstruction cons, + String path) + throws IOException { + writeString(path, out); + out.writeShort(cons.getReplication()); + out.writeLong(cons.getModificationTime()); + out.writeLong(cons.getPreferredBlockSize()); + int nrBlocks = cons.getBlocks().length; + out.writeInt(nrBlocks); + for (int i = 0; i < nrBlocks; i++) { + cons.getBlocks()[i].write(out); + } + cons.getPermissionStatus().write(out); + writeString(cons.getClientName(), out); + writeString(cons.getClientMachine(), out); + + out.writeInt(0); // do not store locations of last block + } + + /** + * Moves fsimage.ckpt to fsImage and edits.new to edits + * Reopens the new edits file. + * + * @param newImageSignature the signature of the new image + */ + void rollFSImage(CheckpointSignature newImageSignature) throws IOException { + MD5Hash newImageDigest = newImageSignature.getImageDigest(); + if (!newImageDigest.equals(checkpointImageDigest)) { + throw new IOException( + "Checkpoint image is corrupt: expecting an MD5 checksum of" + + newImageDigest + " but is " + checkpointImageDigest); + } + rollFSImage(newImageSignature.getImageDigest()); + } + + private void rollFSImage(MD5Hash newImageDigest) throws IOException { + if (ckptState != CheckpointStates.UPLOAD_DONE) { + throw new IOException("Cannot roll fsImage before rolling edits log."); + } + // + // First, verify that edits.new and fsimage.ckpt exists in all + // checkpoint directories. + // + if (!editLog.existsNew()) { + throw new IOException("New Edits file does not exist"); + } + for (Iterator it = + dirIterator(NameNodeDirType.IMAGE); it.hasNext();) { + StorageDirectory sd = it.next(); + File ckpt = getImageFile(sd, NameNodeFile.IMAGE_NEW); + if (!ckpt.exists()) { + throw new IOException("Checkpoint file " + ckpt + + " does not exist"); + } + } + editLog.purgeEditLog(); // renamed edits.new to edits + + // + // Renames new image + // + for (Iterator it = + dirIterator(NameNodeDirType.IMAGE); it.hasNext();) { + StorageDirectory sd = it.next(); + File ckpt = getImageFile(sd, NameNodeFile.IMAGE_NEW); + File curFile = getImageFile(sd, NameNodeFile.IMAGE); + // renameTo fails on Windows if the destination file + // already exists. + if (!ckpt.renameTo(curFile)) { + curFile.delete(); + if (!ckpt.renameTo(curFile)) { + // Close edit stream, if this directory is also used for edits + if (sd.getStorageDirType().isOfType(NameNodeDirType.EDITS)) + editLog.processIOError(sd); + // add storage to the removed list + removedStorageDirs.add(sd); + it.remove(); + } + } + } + + // + // Updates the fstime file on all directories (fsimage and edits) + // and write version file + // + this.layoutVersion = FSConstants.LAYOUT_VERSION; + this.checkpointTime = FSNamesystem.now(); + this.imageDigest = newImageDigest; + for (Iterator it = + dirIterator(); it.hasNext();) { + StorageDirectory sd = it.next(); + // delete old edits if sd is the image only the directory + if (!sd.getStorageDirType().isOfType(NameNodeDirType.EDITS)) { + File editsFile = getImageFile(sd, NameNodeFile.EDITS); + editsFile.delete(); + } + // delete old fsimage if sd is the edits only the directory + if (!sd.getStorageDirType().isOfType(NameNodeDirType.IMAGE)) { + File imageFile = getImageFile(sd, NameNodeFile.IMAGE); + imageFile.delete(); + } + try { + sd.write(); + } catch (IOException e) { + LOG.error("Cannot write file " + sd.getRoot(), e); + // Close edit stream, if this directory is also used for edits + if (sd.getStorageDirType().isOfType(NameNodeDirType.EDITS)) + editLog.processIOError(sd); + //add storage to the removed list + removedStorageDirs.add(sd); + it.remove(); + } + } + ckptState = FSImage.CheckpointStates.START; + } + + CheckpointSignature rollEditLog() throws IOException { + getEditLog().rollEditLog(); + ckptState = CheckpointStates.ROLLED_EDITS; + return new CheckpointSignature(this); + } + + /** + * This is called just before a new checkpoint is uploaded to the + * namenode. + */ + void validateCheckpointUpload(CheckpointSignature sig) throws IOException { + if (ckptState != CheckpointStates.ROLLED_EDITS) { + throw new IOException("Namenode is not expecting an new image " + + ckptState); + } + // verify token + long modtime = getEditLog().getFsEditTime(); + if (sig.editsTime != modtime) { + throw new IOException("Namenode has an edit log with timestamp of " + + DATE_FORM.format(new Date(modtime)) + + " but new checkpoint was created using editlog " + + " with timestamp " + + DATE_FORM.format(new Date(sig.editsTime)) + + ". Checkpoint Aborted."); + } + sig.validateStorageInfo(this); + ckptState = FSImage.CheckpointStates.UPLOAD_START; + } + + /** + * This is called when a checkpoint upload finishes successfully. + */ + synchronized void checkpointUploadDone(MD5Hash checkpointImageMd5) { + checkpointImageDigest = checkpointImageMd5; + ckptState = CheckpointStates.UPLOAD_DONE; + } + + void close() throws IOException { + getEditLog().close(); + unlockAll(); + } + + /** + * Return the name of the image file. + */ + File getFsImageName() { + StorageDirectory sd = null; + for (Iterator it = + dirIterator(NameNodeDirType.IMAGE); it.hasNext();) + sd = it.next(); + return getImageFile(sd, NameNodeFile.IMAGE); + } + + public File getFsEditName() throws IOException { + return getEditLog().getFsEditName(); + } + + File getFsTimeName() { + StorageDirectory sd = null; + // NameNodeFile.TIME shoul be same on all directories + for (Iterator it = + dirIterator(); it.hasNext();) + sd = it.next(); + return getImageFile(sd, NameNodeFile.TIME); + } + + /** + * Return the name of the image file that is uploaded by periodic + * checkpointing. + */ + File[] getFsImageNameCheckpoint() { + ArrayList list = new ArrayList(); + for (Iterator it = + dirIterator(NameNodeDirType.IMAGE); it.hasNext();) { + list.add(getImageFile(it.next(), NameNodeFile.IMAGE_NEW)); + } + return list.toArray(new File[list.size()]); + } + + /** + * DatanodeImage is used to store persistent information + * about datanodes into the fsImage. + */ + static class DatanodeImage implements Writable { + DatanodeDescriptor node = new DatanodeDescriptor(); + + ///////////////////////////////////////////////// + // Writable + ///////////////////////////////////////////////// + /** + * Public method that serializes the information about a + * Datanode to be stored in the fsImage. + */ + public void write(DataOutput out) throws IOException { + new DatanodeID(node).write(out); + out.writeLong(node.getCapacity()); + out.writeLong(node.getRemaining()); + out.writeLong(node.getLastUpdate()); + out.writeInt(node.getXceiverCount()); + } + + /** + * Public method that reads a serialized Datanode + * from the fsImage. + */ + public void readFields(DataInput in) throws IOException { + DatanodeID id = new DatanodeID(); + id.readFields(in); + long capacity = in.readLong(); + long remaining = in.readLong(); + long lastUpdate = in.readLong(); + int xceiverCount = in.readInt(); + + // update the DatanodeDescriptor with the data we read in + node.updateRegInfo(id); + node.setStorageID(id.getStorageID()); + node.setCapacity(capacity); + node.setRemaining(remaining); + node.setLastUpdate(lastUpdate); + node.setXceiverCount(xceiverCount); + } + } + + protected void corruptPreUpgradeStorage(File rootDir) throws IOException { + File oldImageDir = new File(rootDir, "image"); + if (!oldImageDir.exists()) + if (!oldImageDir.mkdir()) + throw new IOException("Cannot create directory " + oldImageDir); + File oldImage = new File(oldImageDir, "fsimage"); + if (!oldImage.exists()) + // recreate old image file to let pre-upgrade versions fail + if (!oldImage.createNewFile()) + throw new IOException("Cannot create file " + oldImage); + RandomAccessFile oldFile = new RandomAccessFile(oldImage, "rws"); + // write new version into old image file + try { + writeCorruptedData(oldFile); + } finally { + oldFile.close(); + } + } + + private boolean getDistributedUpgradeState() { + return FSNamesystem.getFSNamesystem().getDistributedUpgradeState(); + } + + private int getDistributedUpgradeVersion() { + return FSNamesystem.getFSNamesystem().getDistributedUpgradeVersion(); + } + + private void setDistributedUpgradeState(boolean uState, int uVersion) { + FSNamesystem.getFSNamesystem().upgradeManager.setUpgradeState(uState, uVersion); + } + + private void verifyDistributedUpgradeProgress(StartupOption startOpt + ) throws IOException { + if(startOpt == StartupOption.ROLLBACK || startOpt == StartupOption.IMPORT) + return; + UpgradeManager um = FSNamesystem.getFSNamesystem().upgradeManager; + assert um != null : "FSNameSystem.upgradeManager is null."; + if(startOpt != StartupOption.UPGRADE) { + if(um.getUpgradeState()) + throw new IOException( + "\n Previous distributed upgrade was not completed. " + + "\n Please restart NameNode with -upgrade option."); + if(um.getDistributedUpgrades() != null) + throw new IOException("\n Distributed upgrade for NameNode version " + + um.getUpgradeVersion() + " to current LV " + FSConstants.LAYOUT_VERSION + + " is required.\n Please restart NameNode with -upgrade option."); + } + } + + private void initializeDistributedUpgrade() throws IOException { + UpgradeManagerNamenode um = FSNamesystem.getFSNamesystem().upgradeManager; + if(! um.initializeUpgrade()) + return; + // write new upgrade state into disk + FSNamesystem.getFSNamesystem().getFSImage().writeAll(); + NameNode.LOG.info("\n Distributed upgrade for NameNode version " + + um.getUpgradeVersion() + " to current LV " + + FSConstants.LAYOUT_VERSION + " is initialized."); + } + + static Collection getCheckpointDirs(Configuration conf, + String defaultName) { + Collection dirNames = conf.getStringCollection("fs.checkpoint.dir"); + if (dirNames.size() == 0 && defaultName != null) { + dirNames.add(defaultName); + } + Collection dirs = new ArrayList(dirNames.size()); + for(String name : dirNames) { + dirs.add(new File(name)); + } + return dirs; + } + + static Collection getCheckpointEditsDirs(Configuration conf, + String defaultName) { + Collection dirNames = + conf.getStringCollection("fs.checkpoint.edits.dir"); + if (dirNames.size() == 0 && defaultName != null) { + dirNames.add(defaultName); + } + Collection dirs = new ArrayList(dirNames.size()); + for(String name : dirNames) { + dirs.add(new File(name)); + } + return dirs; + } + + static private final UTF8 U_STR = new UTF8(); + static String readString(DataInputStream in) throws IOException { + U_STR.readFields(in); + return U_STR.toString(); + } + + + /** + * Reading the path from the image and converting it to byte[][] directly this + * saves us an array copy and conversions to and from String + * + * @param in + * @return the array each element of which is a byte[] representation of a + * path component + * @throws IOException + */ + public static byte[][] readPathComponents(DataInputStream in) + throws IOException { + U_STR.readFields(in); + return DFSUtil.bytes2byteArray(U_STR.getBytes(), U_STR.getLength(), + (byte) Path.SEPARATOR_CHAR); + + } + + static String readString_EmptyAsNull(DataInputStream in) throws IOException { + final String s = readString(in); + return s.isEmpty()? null: s; + } + + static byte[] readBytes(DataInputStream in) throws IOException { + U_STR.readFields(in); + int len = U_STR.getLength(); + byte[] bytes = new byte[len]; + System.arraycopy(U_STR.getBytes(), 0, bytes, 0, len); + return bytes; + } + + static void writeString(String str, DataOutputStream out) throws IOException { + U_STR.set(str); + U_STR.write(out); + } +} diff --git a/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSInodeInfo.java b/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSInodeInfo.java new file mode 100644 index 0000000..c24a21c --- /dev/null +++ b/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSInodeInfo.java @@ -0,0 +1,35 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.namenode; + +/** + * This interface is used used the pluggable block placement policy + * to expose a few characteristics of an Inode. + */ +public interface FSInodeInfo { + + /** + * a string representation of an inode + * + * @return the full pathname (from root) that this inode represents + */ + + public String getFullPathName() ; +} + + diff --git a/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java new file mode 100644 index 0000000..806f667 --- /dev/null +++ b/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -0,0 +1,6129 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.namenode; + +import org.apache.commons.logging.*; + +import org.apache.hadoop.conf.*; +import org.apache.hadoop.hdfs.DFSUtil; +import org.apache.hadoop.hdfs.protocol.*; +import org.apache.hadoop.hdfs.server.common.GenerationStamp; +import org.apache.hadoop.hdfs.server.common.HdfsConstants; +import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption; +import org.apache.hadoop.hdfs.server.common.Storage; +import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport; +import org.apache.hadoop.hdfs.server.namenode.BlocksMap.BlockInfo; +import org.apache.hadoop.hdfs.server.namenode.DatanodeDescriptor.DecommissioningStatus; +import org.apache.hadoop.hdfs.server.namenode.DecommissionManager.Monitor; +import org.apache.hadoop.hdfs.server.namenode.metrics.FSNamesystemMBean; +import org.apache.hadoop.hdfs.server.namenode.metrics.FSNamesystemMetrics; +import org.apache.hadoop.security.AccessControlException; +import org.apache.hadoop.security.PermissionChecker; +import org.apache.hadoop.security.UnixUserGroupInformation; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.util.*; +import org.apache.hadoop.metrics.util.MBeanUtil; +import org.apache.hadoop.net.CachedDNSToSwitchMapping; +import org.apache.hadoop.net.DNSToSwitchMapping; +import org.apache.hadoop.net.NetworkTopology; +import org.apache.hadoop.net.ScriptBasedMapping; +import org.apache.hadoop.hdfs.server.namenode.LeaseManager.Lease; +import org.apache.hadoop.hdfs.server.namenode.UnderReplicatedBlocks.BlockIterator; +import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations; +import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations; +import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand; +import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration; +import org.apache.hadoop.hdfs.server.protocol.DisallowedDatanodeException; +import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; +import org.apache.hadoop.hdfs.server.protocol.UpgradeCommand; +import org.apache.hadoop.hdfs.util.GSet; +import org.apache.hadoop.hdfs.util.LightWeightGSet; +import org.apache.hadoop.fs.ContentSummary; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.permission.*; +import org.apache.hadoop.ipc.Server; +import org.apache.hadoop.io.IOUtils; + +import java.io.BufferedWriter; +import java.io.File; +import java.io.FileWriter; +import java.io.FileNotFoundException; +import java.io.IOException; +import java.io.PrintWriter; +import java.io.DataOutputStream; +import java.net.InetAddress; +import java.net.InetSocketAddress; +import java.util.*; +import java.util.Map.Entry; + +import javax.management.NotCompliantMBeanException; +import javax.management.ObjectName; +import javax.management.StandardMBean; +import javax.security.auth.login.LoginException; +import java.util.concurrent.locks.ReadWriteLock; +import java.util.concurrent.locks.ReentrantReadWriteLock; + +/** + * ************************************************ + * FSNamesystem does the actual bookkeeping work for the + * DataNode. + *

+ * It tracks several important tables. + *

+ * 1) valid fsname --> blocklist (kept on disk, logged) + * 2) Set of all valid blocks (inverted #1) + * 3) block --> machinelist (kept in memory, rebuilt dynamically from reports) + * 4) machine --> blocklist (inverted #2) + * 5) LRU cache of updated-heartbeat machines + * ************************************************* + */ +public class FSNamesystem implements FSConstants, FSNamesystemMBean, FSClusterStats { + public static final Log LOG = LogFactory.getLog(FSNamesystem.class); + public static int BLOCK_DELETION_INCREMENT = 1000; + public static final String AUDIT_FORMAT = + "ugi=%s\t" + // ugi + "ip=%s\t" + // remote IP + "cmd=%s\t" + // command + "src=%s\t" + // src path + "dst=%s\t" + // dst path (optional) + "perm=%s"; // permissions (optional) + + private static final ThreadLocal auditFormatter = + new ThreadLocal() { + protected Formatter initialValue() { + return new Formatter(new StringBuilder(AUDIT_FORMAT.length() * 4)); + } + }; + + private static final void logAuditEvent(UserGroupInformation ugi, + InetAddress addr, String cmd, String src, String dst, + HdfsFileStatus stat) { + final Formatter fmt = auditFormatter.get(); + ((StringBuilder) fmt.out()).setLength(0); + auditLog.info(fmt.format(AUDIT_FORMAT, ugi, addr, cmd, src, dst, + (stat == null) + ? null + : stat.getOwner() + ':' + stat.getGroup() + ':' + + stat.getPermission() + ).toString()); + + } + + public static final Log auditLog = LogFactory.getLog( + FSNamesystem.class.getName() + ".audit"); + + // Default initial capacity and load factor of map + public static final int DEFAULT_INITIAL_MAP_CAPACITY = 16; + public static final float DEFAULT_MAP_LOAD_FACTOR = 0.75f; + public static final int DEFAULT_MAX_CORRUPT_FILEBLOCKS_RETURNED = 500; + + private boolean isPermissionEnabled; + private boolean persistBlocks; + private UserGroupInformation fsOwner; + private String supergroup; + private PermissionStatus defaultPermission; + // FSNamesystemMetrics counter variables + private FSNamesystemMetrics myFSMetrics; + private long capacityTotal = 0L, capacityUsed = 0L, capacityRemaining = 0L; + private int totalLoad = 0; + + // number of datanodes that have reported (used during safemode only) + private int dnReporting = 0; + + volatile long pendingReplicationBlocksCount = 0L; + volatile long corruptReplicaBlocksCount = 0L; + volatile long underReplicatedBlocksCount = 0L; + volatile long scheduledReplicationBlocksCount = 0L; + volatile long excessBlocksCount = 0L; + volatile long pendingDeletionBlocksCount = 0L; + // + // Stores the correct file name hierarchy + // + public FSDirectory dir; + + // + // Mapping: Block -> { INode, datanodes, self ref } + // Updated only in response to client-sent information. + // + final BlocksMap blocksMap = new BlocksMap(DEFAULT_INITIAL_MAP_CAPACITY, + DEFAULT_MAP_LOAD_FACTOR); + + // + // Store blocks-->datanodedescriptor(s) map of corrupt replicas + // + public CorruptReplicasMap corruptReplicas = new CorruptReplicasMap(); + + /** + * Stores the datanode -> block map. + *

+ * Done by storing a set of {@link DatanodeDescriptor} objects, sorted by + * storage id. In order to keep the storage map consistent it tracks + * all storages ever registered with the namenode. + * A descriptor corresponding to a specific storage id can be + *

    + *
  • added to the map if it is a new storage id;
  • + *
  • updated with a new datanode started as a replacement for the old one + * with the same storage id; and
  • + *
  • removed if and only if an existing datanode is restarted to serve a + * different storage id.
  • + *

+ * The list of the {@link DatanodeDescriptor}s in the map is checkpointed + * in the namespace image file. Only the {@link DatanodeInfo} part is + * persistent, the list of blocks is restored from the datanode block + * reports. + *

+ * Mapping: StorageID -> DatanodeDescriptor + */ + NavigableMap datanodeMap = + new TreeMap(); + + // + // Keeps a Collection for every named machine containing + // blocks that have recently been invalidated and are thought to live + // on the machine in question. + // Mapping: StorageID -> ArrayList + // + private Map> recentInvalidateSets = + new TreeMap>(); + + // + // Keeps a TreeSet for every named node. Each treeset contains + // a list of the blocks that are "extra" at that location. We'll + // eventually remove these extras. + // Mapping: StorageID -> TreeSet + // + Map> excessReplicateMap = + new TreeMap>(); + + Random r = new Random(); + + /** + * Stores a set of DatanodeDescriptor objects. + * This is a subset of {@link #datanodeMap}, containing nodes that are + * considered alive. + * The {@link HeartbeatMonitor} periodically checks for outdated entries, + * and removes them from the list. + */ + ArrayList heartbeats = new ArrayList(); + + // + // Store set of Blocks that need to be replicated 1 or more times. + // We also store pending replication-orders. + // Set of: Block + // + private UnderReplicatedBlocks neededReplications = new UnderReplicatedBlocks(); + private PendingReplicationBlocks pendingReplications; + + // list of blocks that need to be checked for possible overreplication + private TreeSet overReplicatedBlocks = new TreeSet(); + + public LeaseManager leaseManager = new LeaseManager(this); + + // + // Threaded object that checks to see if we have been + // getting heartbeats from all clients. + // + Daemon hbthread = null; // HeartbeatMonitor thread + public Daemon lmthread = null; // LeaseMonitor thread + Daemon smmthread = null; // SafeModeMonitor thread + public Daemon replthread = null; // Replication thread + + private volatile boolean fsRunning = true; + long systemStart = 0; + + // The maximum number of replicates we should allow for a single block + private int maxReplication; + // How many outgoing replication streams a given node should have at one time + private int maxReplicationStreams; + // MIN_REPLICATION is how many copies we need in place or else we disallow the write + private int minReplication; + // Default replication + private int defaultReplication; + // How many entries are returned by getCorruptInodes() + int maxCorruptFilesReturned; + // heartbeatRecheckInterval is how often namenode checks for expired datanodes + private long heartbeatRecheckInterval; + // heartbeatExpireInterval is how long namenode waits for datanode to report + // heartbeat + private long heartbeatExpireInterval; + //replicationRecheckInterval is how often namenode checks for new replication work + private long replicationRecheckInterval; + // default block size of a file + private long defaultBlockSize = 0; + // allow appending to hdfs files + private boolean supportAppends = true; + + /** + * Last block index used for replication work. + */ + private int replIndex = 0; + volatile private long missingBlocksInCurIter = 0; + volatile private long missingBlocksInPrevIter = 0; + + public static FSNamesystem fsNamesystemObject; + /** + * NameNode RPC address + */ + private InetSocketAddress nameNodeAddress = null; // TODO: name-node has this field, it should be removed here + private NameNode nameNode = null; + private SafeModeInfo safeMode; // safe mode information + private Host2NodesMap host2DataNodeMap = new Host2NodesMap(); + + // datanode networktoplogy + NetworkTopology clusterMap = new NetworkTopology(); + private DNSToSwitchMapping dnsToSwitchMapping; + + // for block replicas placement + BlockPlacementPolicy replicator; + + private HostsFileReader hostsReader; + private Daemon dnthread = null; + + private long maxFsObjects = 0; // maximum number of fs objects + + /** + * The global generation stamp for this file system. + */ + private final GenerationStamp generationStamp = new GenerationStamp(); + + // Ask Datanode only up to this many blocks to delete. + int blockInvalidateLimit = FSConstants.BLOCK_INVALIDATE_CHUNK; + + // precision of access times. + private long accessTimePrecision = 0; + + // lock to protect FSNamesystem. + private ReentrantReadWriteLock fsLock; + boolean hasRwLock = false; // shall we use read/write locks? + + // do not use manual override to exit safemode + volatile boolean manualOverrideSafeMode = false; + + // Permission violations only result in entries in the namenode log. + // The operation does not actually fail. + private boolean permissionAuditOnly = false; + + // set of absolute path names that cannot be deleted + Set neverDeletePaths = new TreeSet(); + + // dynamic loading of config files + private ConfigManager configManager; + + /** + * FSNamesystem constructor. + */ + FSNamesystem(NameNode nn, Configuration conf) throws IOException { + try { + initialize(nn, conf); + } catch (IOException e) { + LOG.error(getClass().getSimpleName() + " initialization failed.", e); + close(); + throw e; + } + } + + /** + * Initialize FSNamesystem. + */ + private void initialize(NameNode nn, Configuration conf) throws IOException { + this.systemStart = now(); + this.fsLock = new ReentrantReadWriteLock(true); // fair + configManager = new ConfigManager(this, conf); + setConfigurationParameters(conf); + + // This can be null if two ports are running. Should not rely on the value. + // The getter for this is deprecated + this.nameNodeAddress = nn.getNameNodeAddress(); + this.nameNode = nn; + this.registerMBean(conf); // register the MBean for the FSNamesystemStutus + this.dir = new FSDirectory(this, conf); + StartupOption startOpt = NameNode.getStartupOption(conf); + this.dir.loadFSImage(getNamespaceDirs(conf), + getNamespaceEditsDirs(conf), startOpt); + long timeTakenToLoadFSImage = now() - systemStart; + LOG.info("Finished loading FSImage in " + timeTakenToLoadFSImage + " msecs"); + NameNode.getNameNodeMetrics().fsImageLoadTime.set( + (int) timeTakenToLoadFSImage); + this.safeMode = new SafeModeInfo(conf); + setBlockTotal(); + if ("true".equals(conf.get("dfs.namenode.initialize.counting"))) { + LOG.info("Start counting items in the file tree"); + dir.rootDir.countItems(); + LOG.info("Finish counting items in the file tree"); + INodeDirectory.ItemCounts counts = dir.rootDir.getItemCounts(); + LOG.info("Counting result: " + + counts.numDirectories + " directories, " + + counts.numFiles + " files, and " + + counts.numBlocks + " blocks in file tree. " + + blocksMap.size() + " blocks in block map."); + } else { + LOG.info("Skip counting items in the file tree"); + } + pendingReplications = new PendingReplicationBlocks( + conf.getInt("dfs.replication.pending.timeout.sec", + -1) * 1000L); + this.hbthread = new Daemon(new HeartbeatMonitor()); + this.lmthread = new Daemon(leaseManager.new Monitor()); + this.replthread = new Daemon(new ReplicationMonitor()); + hbthread.start(); + lmthread.start(); + replthread.start(); + + this.hostsReader = new HostsFileReader(conf.get("dfs.hosts", ""), + conf.get("dfs.hosts.exclude", "")); + this.dnthread = new Daemon(new DecommissionManager(this).new Monitor( + conf.getInt("dfs.namenode.decommission.interval", 30), + conf.getInt("dfs.namenode.decommission.nodes.per.interval", 5))); + dnthread.start(); + + this.dnsToSwitchMapping = ReflectionUtils.newInstance( + conf.getClass("topology.node.switch.mapping.impl", ScriptBasedMapping.class, + DNSToSwitchMapping.class), conf); + + /* If the dns to swith mapping supports cache, resolve network + * locations of those hosts in the include list, + * and store the mapping in the cache; so future calls to resolve + * will be fast. + */ + if (dnsToSwitchMapping instanceof CachedDNSToSwitchMapping) { + dnsToSwitchMapping.resolve(new ArrayList(hostsReader.getHosts())); + } + } + + public static Collection getNamespaceDirs(Configuration conf) { + Collection dirNames = conf.getStringCollection("dfs.name.dir"); + if (dirNames.isEmpty()) { + dirNames.add("/tmp/hadoop/dfs/name"); + } + Collection dirs = new ArrayList(dirNames.size()); + for (String name : dirNames) { + dirs.add(new File(name)); + } + return dirs; + } + + public static Collection getNamespaceEditsDirs(Configuration conf) { + Collection editsDirNames = + conf.getStringCollection("dfs.name.edits.dir"); + if (editsDirNames.isEmpty()) { + editsDirNames.add("/tmp/hadoop/dfs/name"); + } + Collection dirs = new ArrayList(editsDirNames.size()); + for (String name : editsDirNames) { + dirs.add(new File(name)); + } + return dirs; + } + + /** + * dirs is a list of directories where the filesystem directory state + * is stored + */ + FSNamesystem(FSImage fsImage, Configuration conf) throws IOException { + this.fsLock = new ReentrantReadWriteLock(); + setConfigurationParameters(conf); + this.dir = new FSDirectory(fsImage, this, conf); + } + + // utility methods to acquire and release read lock and write lock + // If hasRwLock is false, then a readLock actually turns into write lock. + + void readLock() { + if (this.hasRwLock) { + this.fsLock.readLock().lock(); + } else { + writeLock(); + } + } + + void readUnlock() { + if (this.hasRwLock) { + this.fsLock.readLock().unlock(); + } else { + writeUnlock(); + } + } + + void writeLock() { + this.fsLock.writeLock().lock(); + } + + void writeUnlock() { + this.fsLock.writeLock().unlock(); + } + + boolean hasWriteLock() { + return this.fsLock.isWriteLockedByCurrentThread(); + } + + /** + * Initializes some of the members from configuration + */ + private void setConfigurationParameters(Configuration conf) + throws IOException { + fsNamesystemObject = this; + + if (conf.getBoolean("hadoop.disable.shell", false)) { + conf.setStrings(UnixUserGroupInformation.UGI_PROPERTY_NAME, new String[]{"hadoop", "hadoop"}); + Shell.setDisabled(true); + } + + try { + fsOwner = UnixUserGroupInformation.login(conf); + } catch (LoginException e) { + throw new IOException(StringUtils.stringifyException(e)); + } + + LOG.info("fsOwner=" + fsOwner); + + this.hasRwLock = conf.getBoolean("dfs.rwlock", false); + this.supergroup = conf.get("dfs.permissions.supergroup", "supergroup"); + this.isPermissionEnabled = conf.getBoolean("dfs.permissions", true); + this.setPersistBlocks(conf.getBoolean("dfs.persist.blocks", false)); + LOG.info("supergroup=" + supergroup); + LOG.info("isPermissionEnabled=" + isPermissionEnabled); + short filePermission = (short) conf.getInt("dfs.upgrade.permission", 0777); + this.defaultPermission = PermissionStatus.createImmutable( + fsOwner.getUserName(), supergroup, new FsPermission(filePermission)); + + + this.replicator = BlockPlacementPolicy.getInstance( + conf, + this, + clusterMap); + this.maxCorruptFilesReturned = conf.getInt("dfs.corruptfilesreturned.max", + DEFAULT_MAX_CORRUPT_FILEBLOCKS_RETURNED); + this.defaultReplication = conf.getInt("dfs.replication", 3); + this.maxReplication = conf.getInt("dfs.replication.max", 512); + this.minReplication = conf.getInt("dfs.replication.min", 1); + if (minReplication <= 0) { + throw new IOException( + "Unexpected configuration parameters: dfs.replication.min = " + + minReplication + + " must be greater than 0"); + } + if (maxReplication >= (int) Short.MAX_VALUE) { + throw new IOException( + "Unexpected configuration parameters: dfs.replication.max = " + + maxReplication + " must be less than " + (Short.MAX_VALUE)); + } + if (maxReplication < minReplication) { + throw new IOException( + "Unexpected configuration parameters: dfs.replication.min = " + + minReplication + + " must be less than dfs.replication.max = " + + maxReplication); + } + this.maxReplicationStreams = conf.getInt("dfs.max-repl-streams", 2); + long heartbeatInterval = conf.getLong("dfs.heartbeat.interval", 3) * 1000; + this.heartbeatRecheckInterval = conf.getInt( + "heartbeat.recheck.interval", 5 * 60 * 1000); // 5 minutes + this.heartbeatExpireInterval = 2 * heartbeatRecheckInterval + + 10 * heartbeatInterval; + this.replicationRecheckInterval = + conf.getInt("dfs.replication.interval", 3) * 1000L; + this.defaultBlockSize = conf.getLong("dfs.block.size", DEFAULT_BLOCK_SIZE); + this.maxFsObjects = conf.getLong("dfs.max.objects", 0); + this.blockInvalidateLimit = Math.max(this.blockInvalidateLimit, + 20 * (int) (heartbeatInterval / 1000)); + this.accessTimePrecision = conf.getLong("dfs.access.time.precision", 0); + this.supportAppends = conf.getBoolean("dfs.support.append", false); + + long editPreallocateSize = conf.getLong("dfs.edit.preallocate.size", + HdfsConstants.DEFAULT_EDIT_PREALLOCATE_SIZE); + FSEditLog.setPreallocateSize(editPreallocateSize); + int editBufferSize = conf.getInt("dfs.edit.buffer.size", + HdfsConstants.DEFAULT_EDIT_BUFFER_SIZE); + FSEditLog.setBufferCapacity(editBufferSize); + int maxBufferedTransactions = conf.getInt("dfs.max.buffered.transactions", + HdfsConstants.DEFAULT_MAX_BUFFERED_TRANSACTIONS); + FSEditLog.setMaxBufferedTransactions(maxBufferedTransactions); + + // Permission violations are logged in the namenode logs. The operation + // does not fail. + this.permissionAuditOnly = conf.getBoolean("dfs.permissions.audit.log", false); + + // set soft and hard lease period + long hardLeaseLimit = conf.getLong(FSConstants.DFS_HARD_LEASE_KEY, + FSConstants.LEASE_HARDLIMIT_PERIOD); + long softLeaseLimit = conf.getLong(FSConstants.DFS_SOFT_LEASE_KEY, + FSConstants.LEASE_SOFTLIMIT_PERIOD); + this.leaseManager.setLeasePeriod( + Math.min(hardLeaseLimit, softLeaseLimit), hardLeaseLimit); + } + + /** + * Return the default path permission when upgrading from releases with no + * permissions (<=0.15) to releases with permissions (>=0.16) + */ + protected PermissionStatus getUpgradePermission() { + return defaultPermission; + } + + /** + * Return the FSNamesystem object + */ + public static FSNamesystem getFSNamesystem() { + return fsNamesystemObject; + } + + NamespaceInfo getNamespaceInfo() { + writeLock(); + try { + return new NamespaceInfo(dir.fsImage.getNamespaceID(), + dir.fsImage.getCTime(), + getDistributedUpgradeVersion()); + } finally { + writeUnlock(); + } + } + + /** + * Close down this file system manager. + * Causes heartbeat and lease daemons to stop; waits briefly for + * them to finish, but a short timeout returns control back to caller. + */ + public void close() { + fsRunning = false; + try { + if (pendingReplications != null) { + pendingReplications.stop(); + } + if (hbthread != null) { + hbthread.interrupt(); + } + if (replthread != null) { + replthread.interrupt(); + } + if (dnthread != null) { + dnthread.interrupt(); + } + if (smmthread != null) { + smmthread.interrupt(); + } + } catch (Exception e) { + LOG.warn("Exception shutting down FSNamesystem", e); + } finally { + // using finally to ensure we also wait for lease daemon + try { + if (lmthread != null) { + lmthread.interrupt(); + lmthread.join(3000); + } + dir.close(); + blocksMap.close(); + } catch (InterruptedException ie) { + } catch (IOException ie) { + LOG.error("Error closing FSDirectory", ie); + IOUtils.cleanup(LOG, dir); + } + } + } + + /** + * Is this name system running? + */ + boolean isRunning() { + return fsRunning; + } + + /** + * Dump all metadata into specified file + */ + void metaSave(String filename) throws IOException { + readLock(); + try { + checkSuperuserPrivilege(); + File file = new File(System.getProperty("hadoop.log.dir"), + filename); + PrintWriter out = new PrintWriter(new BufferedWriter( + new FileWriter(file, true))); + + + // + // Dump contents of neededReplication + // + synchronized (neededReplications) { + out.println("Metasave: Blocks waiting for replication: " + + neededReplications.size()); + for (Block block : neededReplications) { + List containingNodes = + new ArrayList(); + NumberReplicas numReplicas = new NumberReplicas(); + // source node returned is not used + chooseSourceDatanode(block, containingNodes, numReplicas); + int usableReplicas = numReplicas.liveReplicas() + + numReplicas.decommissionedReplicas(); + // l: == live:, d: == decommissioned c: == corrupt e: == excess + out.print(block + ((usableReplicas > 0) ? "" : " MISSING") + + " (replicas:" + + " l: " + numReplicas.liveReplicas() + + " d: " + numReplicas.decommissionedReplicas() + + " c: " + numReplicas.corruptReplicas() + + " e: " + numReplicas.excessReplicas() + ") "); + + Collection corruptNodes = + corruptReplicas.getNodes(block); + + for (Iterator jt = blocksMap.nodeIterator(block); + jt.hasNext();) { + DatanodeDescriptor node = jt.next(); + String state = ""; + if (corruptNodes != null && corruptNodes.contains(node)) { + state = "(corrupt)"; + } else if (node.isDecommissioned() || + node.isDecommissionInProgress()) { + state = "(decommissioned)"; + } + out.print(" " + node + state + " : "); + } + out.println(""); + } + } + + // + // Dump blocks from pendingReplication + // + pendingReplications.metaSave(out); + + // + // Dump blocks that are waiting to be deleted + // + dumpRecentInvalidateSets(out); + + // + // Dump all datanodes + // + datanodeDump(out); + + out.flush(); + out.close(); + } finally { + readUnlock(); + } + } + + long getDefaultBlockSize() { + return defaultBlockSize; + } + + long getAccessTimePrecision() { + return accessTimePrecision; + } + + private boolean isAccessTimeSupported() { + return accessTimePrecision > 0; + } + + /* get replication factor of a block */ + + private int getReplication(Block block) { + INodeFile fileINode = blocksMap.getINode(block); + if (fileINode == null) { // block does not belong to any file + return 0; + } + assert !fileINode.isDirectory() : "Block cannot belong to a directory."; + return fileINode.getReplication(); + } + + /* updates a block in under replication queue */ + + void updateNeededReplications(Block block, + int curReplicasDelta, int expectedReplicasDelta) { + writeLock(); + try { + NumberReplicas repl = countNodes(block); + int curExpectedReplicas = getReplication(block); + neededReplications.update(block, + repl.liveReplicas(), + repl.decommissionedReplicas(), + curExpectedReplicas, + curReplicasDelta, expectedReplicasDelta); + } finally { + writeUnlock(); + } + } + + ///////////////////////////////////////////////////////// + // + // These methods are called by secondary namenodes + // + ///////////////////////////////////////////////////////// + + /** + * return a list of blocks & their locations on datanode whose + * total size is size + * + * @param datanode on which blocks are located + * @param size total size of blocks + */ + BlocksWithLocations getBlocks(DatanodeID datanode, long size) + throws IOException { + readLock(); + try { + checkSuperuserPrivilege(); + + DatanodeDescriptor node = getDatanode(datanode); + if (node == null) { + NameNode.stateChangeLog.warn("BLOCK* NameSystem.getBlocks: " + + "Asking for blocks from an unrecorded node " + datanode.getName()); + throw new IllegalArgumentException( + "Unexpected exception. Got getBlocks message for datanode " + + datanode.getName() + ", but there is no info for it"); + } + + int numBlocks = node.numBlocks(); + if (numBlocks == 0) { + return new BlocksWithLocations(new BlockWithLocations[0]); + } + Iterator iter = node.getBlockIterator(); + int startBlock = r.nextInt(numBlocks); // starting from a random block + // skip blocks + for (int i = 0; i < startBlock; i++) { + iter.next(); + } + List results = new ArrayList(); + long totalSize = 0; + while (totalSize < size && iter.hasNext()) { + totalSize += addBlock(iter.next(), results); + } + if (totalSize < size) { + iter = node.getBlockIterator(); // start from the beginning + for (int i = 0; i < startBlock && totalSize < size; i++) { + totalSize += addBlock(iter.next(), results); + } + } + + return new BlocksWithLocations( + results.toArray(new BlockWithLocations[results.size()])); + } finally { + readUnlock(); + } + } + + /** + * Get all valid locations of the block & add the block to results + * return the length of the added block; 0 if the block is not added + */ + private long addBlock(Block block, List results) { + ArrayList machineSet = + new ArrayList(blocksMap.numNodes(block)); + for (Iterator it = + blocksMap.nodeIterator(block); it.hasNext();) { + String storageID = it.next().getStorageID(); + // filter invalidate replicas + Collection blocks = recentInvalidateSets.get(storageID); + if (blocks == null || !blocks.contains(block)) { + machineSet.add(storageID); + } + } + if (machineSet.size() == 0) { + return 0; + } else { + results.add(new BlockWithLocations(block, + machineSet.toArray(new String[machineSet.size()]))); + return block.getNumBytes(); + } + } + + ///////////////////////////////////////////////////////// + // + // These methods are called by HadoopFS clients + // + ///////////////////////////////////////////////////////// + + /** + * Set permissions for an existing file. + * + * @throws IOException + */ + public void setPermission(String src, FsPermission permission + ) throws IOException { + writeLock(); + try { + if (isInSafeMode()) { + throw new SafeModeException("Cannot set permission for " + src, safeMode); + } + checkOwner(src); + dir.setPermission(src, permission); + } finally { + writeUnlock(); + } + getEditLog().logSync(); + if (auditLog.isInfoEnabled()) { + final HdfsFileStatus stat = dir.getHdfsFileInfo(src); + logAuditEvent(UserGroupInformation.getCurrentUGI(), + Server.getRemoteIp(), + "setPermission", src, null, stat); + } + } + + /** + * Set owner for an existing file. + * + * @throws IOException + */ + public void setOwner(String src, String username, String group + ) throws IOException { + writeLock(); + try { + if (isInSafeMode()) { + throw new SafeModeException("Cannot set permission for " + src, safeMode); + } + FSPermissionChecker pc = checkOwner(src); + if (!pc.isSuper) { + if (username != null && !pc.user.equals(username)) { + throw new AccessControlException("Non-super user cannot change owner."); + } + if (group != null && !pc.containsGroup(group)) { + throw new AccessControlException("User does not belong to " + group + + " ."); + } + } + dir.setOwner(src, username, group); + } finally { + writeUnlock(); + } + getEditLog().logSync(); + if (auditLog.isInfoEnabled()) { + final HdfsFileStatus stat = dir.getHdfsFileInfo(src); + logAuditEvent(UserGroupInformation.getCurrentUGI(), + Server.getRemoteIp(), + "setOwner", src, null, stat); + } + } + + /** + * Get block locations within the specified range. + * + * @see #getBlockLocations(String, long, long) + */ + LocatedBlocks getBlockLocations(String clientMachine, String src, + long offset, long length) throws IOException { + if (isPermissionEnabled) { + checkPathAccess(src, FsAction.READ); + } + + LocatedBlocks blocks = getBlockLocations(src, offset, length, true); + if (blocks != null) { + //sort the blocks + DatanodeDescriptor client = host2DataNodeMap.getDatanodeByHost( + clientMachine); + for (LocatedBlock b : blocks.getLocatedBlocks()) { + clusterMap.pseudoSortByDistance(client, b.getLocations()); + } + } + return blocks; + } + + /** + * Get block locations within the specified range. + * + * @see ClientProtocol#getBlockLocations(String, long, long) + */ + public LocatedBlocks getBlockLocations(String src, long offset, long length + ) throws IOException { + return getBlockLocations(src, offset, length, false); + } + + /** + * Get block locations within the specified range. + * + * @see ClientProtocol#getBlockLocations(String, long, long) + */ + public LocatedBlocks getBlockLocations(String src, long offset, long length, + boolean doAccessTime) + throws IOException { + if (offset < 0) { + throw new IOException("Negative offset is not supported. File: " + src); + } + if (length < 0) { + throw new IOException("Negative length is not supported. File: " + src); + } + final LocatedBlocks ret = getBlockLocationsInternal(src, + offset, length, Integer.MAX_VALUE, doAccessTime); + if (auditLog.isInfoEnabled()) { + logAuditEvent(UserGroupInformation.getCurrentUGI(), + Server.getRemoteIp(), + "open", src, null, null); + } + return ret; + } + + private LocatedBlocks getBlockLocationsInternal(String src, + long offset, + long length, + int nrBlocksToReturn, + boolean doAccessTime) + throws IOException { + boolean doneSetTimes = false; + + for (int attempt = 0; attempt < 2; attempt++) { + if (attempt == 0) { // first attempt is with readlock + readLock(); + } else { // second attempt is with write lock + writeLock(); // writelock is needed to set accesstime + } + try { + long now = now(); + INodeFile inode = dir.getFileINode(src); + if (inode == null) { + return null; + } + if (doAccessTime && isAccessTimeSupported()) { + if (now <= inode.getAccessTime() + getAccessTimePrecision()) { + // if we have to set access time but we only have the readlock, then + // restart this entire operation with the writeLock. + if (attempt == 0) { + continue; + } + } + if (isInSafeMode()) { + throw new SafeModeException("Cannot set accesstimes for " + src, safeMode); + } + dir.setTimes(src, inode, -1, now, false); + doneSetTimes = true; // successful setTime call + } + return getBlockLocationsInternal(inode, offset, length, nrBlocksToReturn); + } finally { + if (attempt == 0) { + readUnlock(); + } else { + writeUnlock(); + } + if (doneSetTimes) { + getEditLog().logSyncIfNeeded(); // sync if too many transactions in buffer + } + } + } + return null; // can never reach here + } + + LocatedBlocks getBlockLocationsInternal(INodeFile inode, + long offset, long length, int nrBlocksToReturn) + throws IOException { + readLock(); + try { + Block[] blocks = inode.getBlocks(); + if (blocks == null) { + return null; + } + if (blocks.length == 0) { + return inode.createLocatedBlocks(new ArrayList(blocks.length)); + } + List results; + results = new ArrayList(blocks.length); + + int curBlk = 0; + long curPos = 0, blkSize = 0; + int nrBlocks = (blocks[0].getNumBytes() == 0) ? 0 : blocks.length; + for (curBlk = 0; curBlk < nrBlocks; curBlk++) { + blkSize = blocks[curBlk].getNumBytes(); + assert blkSize > 0 : "Block of size 0"; + if (curPos + blkSize > offset) { + break; + } + curPos += blkSize; + } + + if (nrBlocks > 0 && curBlk == nrBlocks) // offset >= end of file + { + return null; + } + + long endOff = offset + length; + + do { + // get block locations + int numNodes = blocksMap.numNodes(blocks[curBlk]); + int numCorruptNodes = countNodes(blocks[curBlk]).corruptReplicas(); + int numCorruptReplicas = corruptReplicas.numCorruptReplicas(blocks[curBlk]); + if (numCorruptNodes != numCorruptReplicas) { + LOG.warn("Inconsistent number of corrupt replicas for " + + blocks[curBlk] + "blockMap has " + numCorruptNodes + + " but corrupt replicas map has " + numCorruptReplicas); + } + boolean blockCorrupt = (numCorruptNodes == numNodes); + int numMachineSet = blockCorrupt ? numNodes : + (numNodes - numCorruptNodes); + DatanodeDescriptor[] machineSet = new DatanodeDescriptor[numMachineSet]; + if (numMachineSet > 0) { + numNodes = 0; + for (Iterator it = + blocksMap.nodeIterator(blocks[curBlk]); it.hasNext();) { + DatanodeDescriptor dn = it.next(); + boolean replicaCorrupt = corruptReplicas.isReplicaCorrupt(blocks[curBlk], dn); + if (blockCorrupt || (!blockCorrupt && !replicaCorrupt)) { + machineSet[numNodes++] = dn; + } + } + } + results.add(new LocatedBlock(blocks[curBlk], machineSet, curPos, + blockCorrupt)); + curPos += blocks[curBlk].getNumBytes(); + curBlk++; + } while (curPos < endOff + && curBlk < blocks.length + && results.size() < nrBlocksToReturn); + + return inode.createLocatedBlocks(results); + } finally { + readUnlock(); + } + } + + /** + * Moves all the blocks from srcs and appends them to trg + * To avoid rollbacks we will verify validitity of ALL of the args + * before we start actual move. + * @param target + * @param srcs + * @throws IOException + */ + public void concat(String target, String [] srcs) + throws IOException { + if(FSNamesystem.LOG.isDebugEnabled()) { + FSNamesystem.LOG.debug("concat " + Arrays.toString(srcs) + + " to " + target); + } + // check safe mode + if (isInSafeMode()) { + throw new SafeModeException("concat: cannot concat " + target, safeMode); + } + + // verify args + if(target.isEmpty()) { + throw new IllegalArgumentException("concat: trg file name is empty"); + } + if(srcs == null || srcs.length == 0) { + throw new IllegalArgumentException("concat: srcs list is empty or null"); + } + + // currently we require all the files to be in the same dir + String trgParent = + target.substring(0, target.lastIndexOf(Path.SEPARATOR_CHAR)); + for(String s : srcs) { + String srcParent = s.substring(0, s.lastIndexOf(Path.SEPARATOR_CHAR)); + if(! srcParent.equals(trgParent)) { + throw new IllegalArgumentException + ("concat: srcs and target shoould be in same dir"); + } + } + + writeLock(); + try { + // write permission for the target + if (isPermissionEnabled) { + checkPathAccess(target, FsAction.WRITE); + + // and srcs + for(String aSrc: srcs) { + checkPathAccess(aSrc, FsAction.READ); // read the file + checkParentAccess(aSrc, FsAction.WRITE); // for delete + } + } + + + // to make sure no two files are the same + Set si = new HashSet(); + + // we put the following prerequisite for the operation + // replication and blocks sizes should be the same for ALL the blocks + // check the target + INode inode = dir.getFileINode(target); + + if(inode == null) { + throw new IllegalArgumentException("concat: trg file doesn't exist"); + } + if(inode.isUnderConstruction()) { + throw new IllegalArgumentException("concat: trg file is uner construction"); + } + + INodeFile trgInode = (INodeFile) inode; + + // per design trg shouldn't be empty and all the blocks same size + if(trgInode.blocks.length == 0) { + throw new IllegalArgumentException("concat: "+ target + " file is empty"); + } + + long blockSize = trgInode.getPreferredBlockSize(); + + // check the end block to be full + if(blockSize != trgInode.blocks[trgInode.blocks.length-1].getNumBytes()) { + throw new IllegalArgumentException(target + " file " + target + + " blocks size should be the same. PreferredBlockSize is " + + blockSize + " " + trgInode.blocks[trgInode.blocks.length-1].getNumBytes()); + } + + si.add(trgInode); + short repl = trgInode.getReplication(); + + // now check the srcs + boolean endSrc = false; // final src file doesn't have to have full end block + for(int i=0; i= 0 && srcInode.blocks[idx].getNumBytes() != blockSize) { + throw new IllegalArgumentException("concat: blocks sizes of " + + src + " and " + target + " should all be the same"); + } + + si.add(srcInode); + } + + // make sure no two files are the same + if(si.size() < srcs.length+1) { // trg + srcs + // it means at least two files are the same + throw new IllegalArgumentException("at least two files are the same"); + } + + if(NameNode.stateChangeLog.isDebugEnabled()) { + NameNode.stateChangeLog.debug("DIR* NameSystem.concat: " + + Arrays.toString(srcs) + " to " + target); + } + + dir.concatInternal(target,srcs); + } finally { + writeUnlock(); + } + getEditLog().logSync(); + + + if (auditLog.isInfoEnabled()) { + final HdfsFileStatus stat = dir.getHdfsFileInfo(target); + logAuditEvent(UserGroupInformation.getCurrentUGI(), + Server.getRemoteIp(), + "concat", Arrays.toString(srcs), target, stat); + } + + } + + /** + * stores the modification and access time for this inode. + * The access time is precise upto an hour. The transaction, if needed, is + * written to the edits log but is not flushed. + */ + public void setTimes(String src, long mtime, long atime) throws IOException { + setTimesInternal(src, mtime, atime); + getEditLog().logSync(); + } + + private void setTimesInternal(String src, long mtime, long atime) + throws IOException { + writeLock(); + try { + if (!isAccessTimeSupported() && atime != -1) { + throw new IOException("Access time for hdfs is not configured. " + + " Please set dfs.support.accessTime configuration parameter."); + } + if (isInSafeMode()) { + throw new SafeModeException("Cannot set accesstimes for " + src, safeMode); + } + // + // The caller needs to have write access to set access & modification times. + if (isPermissionEnabled) { + checkPathAccess(src, FsAction.WRITE); + } + INodeFile inode = dir.getFileINode(src); + if (inode != null) { + dir.setTimes(src, inode, mtime, atime, true); + if (auditLog.isInfoEnabled()) { + final HdfsFileStatus stat = dir.getHdfsFileInfo(src); + logAuditEvent(UserGroupInformation.getCurrentUGI(), + Server.getRemoteIp(), + "setTimes", src, null, stat); + } + } else { + throw new FileNotFoundException("File " + src + " does not exist."); + } + } finally { + writeUnlock(); + } + } + + /** + * Set replication for an existing file. + *

+ * The NameNode sets new replication and schedules either replication of + * under-replicated data blocks or removal of the eccessive block copies + * if the blocks are over-replicated. + * + * @param src file name + * @param replication new replication + * @return true if successful; + * false if file does not exist or is a directory + * @see ClientProtocol#setReplication(String, short) + */ + public boolean setReplication(String src, short replication) + throws IOException { + boolean status = setReplicationInternal(src, replication); + getEditLog().logSync(); + if (status && auditLog.isInfoEnabled()) { + logAuditEvent(UserGroupInformation.getCurrentUGI(), + Server.getRemoteIp(), + "setReplication", src, null, null); + } + return status; + } + + private boolean setReplicationInternal(String src, + short replication + ) throws IOException { + writeLock(); + try { + if (isInSafeMode()) { + throw new SafeModeException("Cannot set replication for " + src, safeMode); + } + verifyReplication(src, replication, null); + if (isPermissionEnabled) { + checkPathAccess(src, FsAction.WRITE); + } + + int[] oldReplication = new int[1]; + Block[] fileBlocks; + fileBlocks = dir.setReplication(src, replication, oldReplication); + if (fileBlocks == null) // file not found or is a directory + { + return false; + } + int oldRepl = oldReplication[0]; + if (oldRepl == replication) // the same replication + { + return true; + } + + // update needReplication priority queues + for (int idx = 0; idx < fileBlocks.length; idx++) { + updateNeededReplications(fileBlocks[idx], 0, replication - oldRepl); + } + + if (oldRepl > replication) { + // old replication > the new one; need to remove copies + LOG.info("Reducing replication for file " + src + + ". New replication is " + replication); + for (int idx = 0; idx < fileBlocks.length; idx++) { + overReplicatedBlocks.add(fileBlocks[idx]); + } + } else { // replication factor is increased + LOG.info("Increasing replication for file " + src + + ". New replication is " + replication); + } + return true; + } finally { + writeUnlock(); + } + } + + long getPreferredBlockSize(String filename) throws IOException { + if (isPermissionEnabled) { + checkTraverse(filename); + } + return dir.getPreferredBlockSize(filename); + } + + /** + * Check whether the replication parameter is within the range + * determined by system configuration. + */ + private void verifyReplication(String src, + short replication, + String clientName + ) throws IOException { + String text = "file " + src + + ((clientName != null) ? " on client " + clientName : "") + + ".\n" + + "Requested replication " + replication; + + if (replication > maxReplication) { + throw new IOException(text + " exceeds maximum " + maxReplication); + } + + if (replication < minReplication) { + throw new IOException( + text + " is less than the required minimum " + minReplication); + } + } + + /** + * Create a new file entry in the namespace. + * + * @throws IOException if file name is invalid + * {@link FSDirectory#isValidToCreate(String)}. + * @see ClientProtocol#create(String, FsPermission, String, boolean, short, long) + */ + void startFile(String src, PermissionStatus permissions, + String holder, String clientMachine, + boolean overwrite, short replication, long blockSize + ) throws IOException { + startFileInternal(src, permissions, holder, clientMachine, overwrite, false, + replication, blockSize); + getEditLog().logSync(); + if (auditLog.isInfoEnabled()) { + final HdfsFileStatus stat = dir.getHdfsFileInfo(src); + logAuditEvent(UserGroupInformation.getCurrentUGI(), + Server.getRemoteIp(), + "create", src, null, stat); + } + } + + private void startFileInternal(String src, + PermissionStatus permissions, + String holder, + String clientMachine, + boolean overwrite, + boolean append, + short replication, + long blockSize + ) throws IOException { + writeLock(); + try { + if (NameNode.stateChangeLog.isDebugEnabled()) { + NameNode.stateChangeLog.debug("DIR* NameSystem.startFile: src=" + src + + ", holder=" + holder + + ", clientMachine=" + clientMachine + + ", replication=" + replication + + ", overwrite=" + overwrite + + ", append=" + append); + } + + if (isInSafeMode()) { + throw new SafeModeException("Cannot create file" + src, safeMode); + } + if (!DFSUtil.isValidName(src)) { + throw new IOException("Invalid file name: " + src); + } + + // Verify that the destination does not exist as a directory already. + boolean pathExists = dir.exists(src); + if (pathExists && dir.isDir(src)) { + throw new IOException("Cannot create file " + src + "; already exists as a directory."); + } + + if (isPermissionEnabled) { + if (append || (overwrite && pathExists)) { + checkPathAccess(src, FsAction.WRITE); + } else { + checkAncestorAccess(src, FsAction.WRITE); + } + } + + try { + INode myFile = dir.getFileINode(src); + if (myFile != null && myFile.isUnderConstruction()) { + INodeFileUnderConstruction pendingFile = (INodeFileUnderConstruction) myFile; + // + // If the file is under construction , then it must be in our + // leases. Find the appropriate lease record. + // + Lease lease = leaseManager.getLease(holder); + // + // We found the lease for this file. And surprisingly the original + // holder is trying to recreate this file. This should never occur. + // + if (lease != null) { + throw new AlreadyBeingCreatedException( + "failed to create file " + src + " for " + holder + + " on client " + clientMachine + + " because current leaseholder is trying to recreate file."); + } + // + // Find the original holder. + // + lease = leaseManager.getLease(pendingFile.clientName); + if (lease == null) { + throw new AlreadyBeingCreatedException( + "failed to create file " + src + " for " + holder + + " on client " + clientMachine + + " because pendingCreates is non-null but no leases found."); + } + // + // If the original holder has not renewed in the last SOFTLIMIT + // period, then start lease recovery. + // + if (lease.expiredSoftLimit()) { + LOG.info("startFile: recover lease " + lease + ", src=" + src); + internalReleaseLease(lease, src); + } + throw new AlreadyBeingCreatedException("failed to create file " + src + " for " + holder + + " on client " + clientMachine + + ", because this file is already being created by " + + pendingFile.getClientName() + + " on " + pendingFile.getClientMachine()); + } + + try { + verifyReplication(src, replication, clientMachine); + } catch (IOException e) { + throw new IOException("failed to create " + e.getMessage()); + } + if (append) { + if (myFile == null) { + throw new FileNotFoundException("failed to append to non-existent file " + + src + " on client " + clientMachine); + } else if (myFile.isDirectory()) { + throw new IOException("failed to append to directory " + src + + " on client " + clientMachine); + } + } else if (!dir.isValidToCreate(src)) { + if (overwrite) { + delete(src, true); + } else { + throw new IOException("failed to create file " + src + + " on client " + clientMachine + + " either because the filename is invalid or the file exists"); + } + } + + DatanodeDescriptor clientNode = + host2DataNodeMap.getDatanodeByHost(clientMachine); + + if (append) { + // + // Replace current node with a INodeUnderConstruction. + // Recreate in-memory lease record. + // + INodeFile node = (INodeFile) myFile; + INodeFileUnderConstruction cons = new INodeFileUnderConstruction( + node.getLocalNameBytes(), + node.getReplication(), + node.getModificationTime(), + node.getPreferredBlockSize(), + node.getBlocks(), + node.getPermissionStatus(), + holder, + clientMachine, + clientNode); + dir.replaceNode(src, node, cons); + leaseManager.addLease(cons.clientName, src); + + } else { + // Now we can add the name to the filesystem. This file has no + // blocks associated with it. + // + checkFsObjectLimit(); + + // increment global generation stamp + long genstamp = nextGenerationStamp(); + INodeFileUnderConstruction newNode = dir.addFile(src, permissions, + replication, blockSize, holder, clientMachine, clientNode, genstamp); + if (newNode == null) { + throw new IOException("DIR* NameSystem.startFile: " + + "Unable to add file to namespace."); + } + leaseManager.addLease(newNode.clientName, src); + if (NameNode.stateChangeLog.isDebugEnabled()) { + NameNode.stateChangeLog.debug("DIR* NameSystem.startFile: " + + "add " + src + " to namespace for " + holder); + } + } + } catch (IOException ie) { + NameNode.stateChangeLog.warn("DIR* NameSystem.startFile: " + + ie.getMessage()); + throw ie; + } + } finally { + writeUnlock(); + } + } + + /** + * Append to an existing file in the namespace. + */ + LocatedBlock appendFile(String src, String holder, String clientMachine + ) throws IOException { + if (supportAppends == false) { + throw new IOException("Append to hdfs not supported." + + " Please refer to dfs.support.append configuration parameter."); + } + startFileInternal(src, null, holder, clientMachine, false, true, + (short) maxReplication, (long) 0); + getEditLog().logSync(); + + // + // Create a LocatedBlock object for the last block of the file + // to be returned to the client. Return null if the file does not + // have a partial block at the end. + // + LocatedBlock lb = null; + writeLock(); + try { + INodeFileUnderConstruction file = (INodeFileUnderConstruction) dir.getFileINode(src); + + Block[] blocks = file.getBlocks(); + if (blocks != null && blocks.length > 0) { + Block last = blocks[blocks.length - 1]; + BlockInfo storedBlock = blocksMap.getStoredBlock(last); + if (file.getPreferredBlockSize() > storedBlock.getNumBytes()) { + long fileLength = file.computeContentSummary().getLength(); + DatanodeDescriptor[] targets = new DatanodeDescriptor[blocksMap.numNodes(last)]; + Iterator it = blocksMap.nodeIterator(last); + for (int i = 0; it != null && it.hasNext(); i++) { + targets[i] = it.next(); + } + // remove the replica locations of this block from the blocksMap + for (int i = 0; i < targets.length; i++) { + targets[i].removeBlock(storedBlock); + } + // set the locations of the last block in the lease record + file.setLastBlock(storedBlock, targets); + + lb = new LocatedBlock(last, targets, + fileLength - storedBlock.getNumBytes()); + + // Remove block from replication queue. + updateNeededReplications(last, 0, 0); + + // remove this block from the list of pending blocks to be deleted. + // This reduces the possibility of triggering HADOOP-1349. + // + for (DatanodeDescriptor dd : targets) { + String datanodeId = dd.getStorageID(); + Collection v = recentInvalidateSets.get(datanodeId); + if (v != null && v.remove(last)) { + if (v.isEmpty()) { + recentInvalidateSets.remove(datanodeId); + } + pendingDeletionBlocksCount--; + } + } + } + } + } finally { + writeUnlock(); + } + if (lb != null) { + if (NameNode.stateChangeLog.isDebugEnabled()) { + NameNode.stateChangeLog.debug("DIR* NameSystem.appendFile: file " + + src + " for " + holder + " at " + clientMachine + + " block " + lb.getBlock() + + " block size " + lb.getBlock().getNumBytes()); + } + } + + if (auditLog.isInfoEnabled()) { + logAuditEvent(UserGroupInformation.getCurrentUGI(), + Server.getRemoteIp(), + "append", src, null, null); + } + return lb; + } + + /** + * The client would like to obtain an additional block for the indicated + * filename (which is being written-to). Return an array that consists + * of the block, plus a set of machines. The first on this list should + * be where the client writes data. Subsequent items in the list must + * be provided in the connection to the first datanode. + *

+ * Make sure the previous blocks have been reported by datanodes and + * are replicated. Will return an empty 2-elt array if we want the + * client to "try again later". + */ + public LocatedBlock getAdditionalBlock(String src, + String clientName + ) throws IOException { + long fileLength, blockSize; + int replication; + DatanodeDescriptor clientNode = null; + Block newBlock = null; + + NameNode.stateChangeLog.debug("BLOCK* NameSystem.getAdditionalBlock: file " + + src + " for " + clientName); + + readLock(); + try { + // have we exceeded the configured limit of fs objects. + checkFsObjectLimit(); + + INodeFileUnderConstruction pendingFile = checkLease(src, clientName); + + // + // If we fail this, bad things happen! + // + if (!checkFileProgress(pendingFile, false)) { + throw new NotReplicatedYetException("Not replicated yet:" + src); + } + fileLength = pendingFile.computeContentSummary().getLength(); + blockSize = pendingFile.getPreferredBlockSize(); + clientNode = pendingFile.getClientNode(); + replication = (int) pendingFile.getReplication(); + } finally { + readUnlock(); + } + + // choose targets for the new block tobe allocated. + DatanodeDescriptor targets[] = replicator.chooseTarget(src, replication, + clientNode, + blockSize); + if (targets.length < this.minReplication) { + throw new IOException("File " + src + " could only be replicated to " + + targets.length + " nodes, instead of " + + minReplication); + } + + // Allocate a new block and record it in the INode. + writeLock(); + try { + if (isInSafeMode()) { + throw new SafeModeException("Cannot add block to " + src, safeMode); + } + INode[] pathINodes = dir.getExistingPathINodes(src); + int inodesLen = pathINodes.length; + checkLease(src, clientName, pathINodes[inodesLen - 1]); + INodeFileUnderConstruction pendingFile = (INodeFileUnderConstruction) + pathINodes[inodesLen - 1]; + + if (!checkFileProgress(pendingFile, false)) { + throw new NotReplicatedYetException("Not replicated yet:" + src); + } + + // allocate new block record block locations in INode. + newBlock = allocateBlock(src, pathINodes); + pendingFile.setTargets(targets); + + for (DatanodeDescriptor dn : targets) { + dn.incBlocksScheduled(); + } + if (getPersistBlocks()) { + dir.persistBlocks(src, pendingFile); + } + } finally { + writeUnlock(); + } + + // Create next block + return new LocatedBlock(newBlock, targets, fileLength); + } + + /** + * The client would like to let go of the given block + */ + public boolean abandonBlock(Block b, String src, String holder + ) throws IOException { + writeLock(); + try { + // + // Remove the block from the pending creates list + // + NameNode.stateChangeLog.debug("BLOCK* NameSystem.abandonBlock: " + + b + "of file " + src); + if (isInSafeMode()) { + throw new SafeModeException("Cannot abandon block " + b + + " for fle" + src, safeMode); + } + INodeFileUnderConstruction file = checkLease(src, holder); + dir.removeBlock(src, file, b); + NameNode.stateChangeLog.debug("BLOCK* NameSystem.abandonBlock: " + + b + + " is removed from pendingCreates"); + return true; + } finally { + writeUnlock(); + } + } + + // make sure that we still have the lease on this file. + + private INodeFileUnderConstruction checkLease(String src, String holder) + throws IOException { + INodeFile file = dir.getFileINode(src); + checkLease(src, holder, file); + return (INodeFileUnderConstruction) file; + } + + private void checkLease(String src, String holder, INode file) + throws IOException { + + if (file == null || file.isDirectory()) { + Lease lease = leaseManager.getLease(holder); + throw new LeaseExpiredException("No lease on " + src + + " File does not exist. " + + (lease != null ? lease.toString() : + "Holder " + holder + + " does not have any open files.")); + } + if (!file.isUnderConstruction()) { + Lease lease = leaseManager.getLease(holder); + throw new LeaseExpiredException("No lease on " + src + + " File is not open for writing. " + + (lease != null ? lease.toString() : + "Holder " + holder + + " does not have any open files.")); + } + INodeFileUnderConstruction pendingFile = (INodeFileUnderConstruction) file; + if (holder != null && !pendingFile.getClientName().equals(holder)) { + throw new LeaseExpiredException("Lease mismatch on " + src + " owned by " + + pendingFile.getClientName() + " but is accessed by " + holder); + } + } + + /** + * The FSNamesystem will already know the blocks that make up the file. + * Before we return, we make sure that all the file's blocks have + * been reported by datanodes and are replicated correctly. + */ + + enum CompleteFileStatus { + OPERATION_FAILED, + STILL_WAITING, + COMPLETE_SUCCESS + } + + public CompleteFileStatus completeFile(String src, String holder) + throws IOException { + CompleteFileStatus status = completeFileInternal(src, holder); + getEditLog().logSync(); + return status; + } + + + private CompleteFileStatus completeFileInternal(String src, + String holder) + throws IOException { + writeLock(); + try { + NameNode.stateChangeLog.debug("DIR* NameSystem.completeFile: " + src + " for " + holder); + if (isInSafeMode()) { + throw new SafeModeException("Cannot complete file " + src, safeMode); + } + INode iFile = dir.getFileINode(src); + INodeFileUnderConstruction pendingFile = null; + Block[] fileBlocks = null; + + if (iFile != null && iFile.isUnderConstruction()) { + pendingFile = (INodeFileUnderConstruction) iFile; + fileBlocks = dir.getFileBlocks(src); + } + if (fileBlocks == null) { + NameNode.stateChangeLog.warn("DIR* NameSystem.completeFile: " + + "failed to complete " + src + + " because dir.getFileBlocks() is null " + + " and pendingFile is " + + ((pendingFile == null) ? "null" : + ("from " + pendingFile.getClientMachine())) + ); + return CompleteFileStatus.OPERATION_FAILED; + } else if (!checkFileProgress(pendingFile, true)) { + return CompleteFileStatus.STILL_WAITING; + } + + finalizeINodeFileUnderConstruction(src, pendingFile); + + NameNode.stateChangeLog.info("DIR* NameSystem.completeFile: file " + src + + " is closed by " + holder); + return CompleteFileStatus.COMPLETE_SUCCESS; + } finally { + writeUnlock(); + } + } + + /** + * Check all blocks of a file. If any blocks are lower than their intended + * replication factor, then insert them into neededReplication + */ + private void checkReplicationFactor(INodeFile file) { + int numExpectedReplicas = file.getReplication(); + Block[] pendingBlocks = file.getBlocks(); + int nrBlocks = pendingBlocks.length; + for (int i = 0; i < nrBlocks; i++) { + // filter out containingNodes that are marked for decommission. + NumberReplicas number = countNodes(pendingBlocks[i]); + if (number.liveReplicas() < numExpectedReplicas) { + neededReplications.add(pendingBlocks[i], + number.liveReplicas(), + number.decommissionedReplicas, + numExpectedReplicas); + } + } + } + + static Random randBlockId = new Random(); + + /** + * Allocate a block at the given pending filename + * + * @param src path to the file + * @param inodes INode representing each of the components of src. + * inodes[inodes.length-1] is the INode for the file. + */ + private Block allocateBlock(String src, INode[] inodes) throws IOException { + Block b = new Block(FSNamesystem.randBlockId.nextLong(), 0, 0); + while (isValidBlock(b)) { + b.setBlockId(FSNamesystem.randBlockId.nextLong()); + } + b.setGenerationStamp(getGenerationStamp()); + b = dir.addBlock(src, inodes, b); + NameNode.stateChangeLog.info("BLOCK* NameSystem.allocateBlock: " + + src + ". " + b); + return b; + } + + /** + * Check that the indicated file's blocks are present and + * replicated. If not, return false. If checkall is true, then check + * all blocks, otherwise check only penultimate block. + */ + boolean checkFileProgress(INodeFile v, boolean checkall) { + if (checkall) { + // + // check all blocks of the file. + // + for (Block block : v.getBlocks()) { + if (blocksMap.numNodes(block) < this.minReplication) { + LOG.info( + "INodeFile " + v + " block " + block + " has replication " + + blocksMap.numNodes(block) + " and requires " + this.minReplication + ); + + return false; + } + } + } else { + // + // check the penultimate block of this file + // + Block b = v.getPenultimateBlock(); + if (b != null) { + if (blocksMap.numNodes(b) < this.minReplication) { + LOG.info( + "INodeFile " + v + " block " + b + " has replication " + + blocksMap.numNodes(b) + " and requires " + this.minReplication + ); + + return false; + } + } + } + return true; + } + + /** + * Remove a datanode from the invalidatesSet + * + * @param n datanode + */ + void removeFromInvalidates(String storageID) { + Collection blocks = recentInvalidateSets.remove(storageID); + if (blocks != null) { + pendingDeletionBlocksCount -= blocks.size(); + } + } + + /** + * Adds block to list of blocks which will be invalidated on + * specified datanode and log the move + * + * @param b block + * @param n datanode + */ + void addToInvalidates(Block b, DatanodeInfo n) { + addToInvalidatesNoLog(b, n); + NameNode.stateChangeLog.info("BLOCK* NameSystem.addToInvalidates: " + + b.getBlockName() + " is added to invalidSet of " + n.getName()); + } + + /** + * Adds block to list of blocks which will be invalidated on + * specified datanode + * + * @param b block + * @param n datanode + */ + void addToInvalidatesNoLog(Block b, DatanodeInfo n) { + Collection invalidateSet = recentInvalidateSets.get(n.getStorageID()); + if (invalidateSet == null) { + invalidateSet = new HashSet(); + recentInvalidateSets.put(n.getStorageID(), invalidateSet); + } + if (invalidateSet.add(b)) { + pendingDeletionBlocksCount++; + } + } + + /** + * Adds block to list of blocks which will be invalidated on + * all its datanodes. + */ + private void addToInvalidates(Block b) { + for (Iterator it = + blocksMap.nodeIterator(b); it.hasNext();) { + DatanodeDescriptor node = it.next(); + addToInvalidates(b, node); + } + } + + /** + * dumps the contents of recentInvalidateSets + */ + private void dumpRecentInvalidateSets(PrintWriter out) { + int size = recentInvalidateSets.values().size(); + out.println("Metasave: Blocks " + pendingDeletionBlocksCount + + " waiting deletion from " + size + " datanodes."); + if (size == 0) { + return; + } + for (Map.Entry> entry : recentInvalidateSets.entrySet()) { + Collection blocks = entry.getValue(); + if (blocks.size() > 0) { + out.println(datanodeMap.get(entry.getKey()).getName() + blocks); + } + } + } + + /** + * Mark the block belonging to datanode as corrupt + * + * @param blk Block to be marked as corrupt + * @param dn Datanode which holds the corrupt replica + */ + public void markBlockAsCorrupt(Block blk, DatanodeInfo dn) + throws IOException { + writeLock(); + try { + DatanodeDescriptor node = getDatanode(dn); + if (node == null) { + throw new IOException("Cannot mark block" + blk.getBlockName() + + " as corrupt because datanode " + dn.getName() + + " does not exist. "); + } + + final BlockInfo storedBlockInfo = blocksMap.getStoredBlock(blk); + if (storedBlockInfo == null) { + // Check if the replica is in the blockMap, if not + // ignore the request for now. This could happen when BlockScanner + // thread of Datanode reports bad block before Block reports are sent + // by the Datanode on startup + NameNode.stateChangeLog.info("BLOCK NameSystem.markBlockAsCorrupt: " + + "block " + blk + " could not be marked " + + "as corrupt as it does not exists in " + + "blocksMap"); + } else { + INodeFile inode = storedBlockInfo.getINode(); + if (inode == null) { + NameNode.stateChangeLog.info("BLOCK NameSystem.markBlockAsCorrupt: " + + "block " + blk + " could not be marked " + + "as corrupt as it does not belong to " + + "any file"); + addToInvalidates(storedBlockInfo, node); + return; + } + // Add this replica to corruptReplicas Map + corruptReplicas.addToCorruptReplicasMap(storedBlockInfo, node); + if (countNodes(storedBlockInfo).liveReplicas() > inode.getReplication()) { + // the block is over-replicated so invalidate the replicas immediately + invalidateBlock(storedBlockInfo, node); + } else { + // add the block to neededReplication + updateNeededReplications(storedBlockInfo, -1, 0); + } + } + } finally { + writeUnlock(); + } + } + + /** + * Invalidates the given block on the given datanode. + */ + private void invalidateBlock(Block blk, DatanodeInfo dn) + throws IOException { + assert (hasWriteLock()); + NameNode.stateChangeLog.info("DIR* NameSystem.invalidateBlock: " + + blk + " on " + + dn.getName()); + DatanodeDescriptor node = getDatanode(dn); + if (node == null) { + throw new IOException("Cannot invalidate block " + blk + + " because datanode " + dn.getName() + + " does not exist."); + } + + // Check how many copies we have of the block. If we have at least one + // copy on a live node, then we can delete it. + int count = countNodes(blk).liveReplicas(); + if (count > 1) { + addToInvalidates(blk, dn); + removeStoredBlock(blk, node); + NameNode.stateChangeLog.debug("BLOCK* NameSystem.invalidateBlocks: " + + blk + " on " + + dn.getName() + " listed for deletion."); + } else { + NameNode.stateChangeLog.info("BLOCK* NameSystem.invalidateBlocks: " + + blk + " on " + + dn.getName() + " is the only copy and was not deleted."); + } + } + + //////////////////////////////////////////////////////////////// + // Here's how to handle block-copy failure during client write: + // -- As usual, the client's write should result in a streaming + // backup write to a k-machine sequence. + // -- If one of the backup machines fails, no worries. Fail silently. + // -- Before client is allowed to close and finalize file, make sure + // that the blocks are backed up. Namenode may have to issue specific backup + // commands to make up for earlier datanode failures. Once all copies + // are made, edit namespace and return to client. + //////////////////////////////////////////////////////////////// + + /** + * Change the indicated filename. + */ + public boolean renameTo(String src, String dst) throws IOException { + boolean status = renameToInternal(src, dst); + getEditLog().logSync(); + if (status && auditLog.isInfoEnabled()) { + final HdfsFileStatus stat = dir.getHdfsFileInfo(dst); + logAuditEvent(UserGroupInformation.getCurrentUGI(), + Server.getRemoteIp(), + "rename", src, dst, stat); + } + return status; + } + + private boolean renameToInternal(String src, String dst + ) throws IOException { + writeLock(); + try { + NameNode.stateChangeLog.debug("DIR* NameSystem.renameTo: " + src + " to " + dst); + if (isInSafeMode()) { + throw new SafeModeException("Cannot rename " + src, safeMode); + } + if (!DFSUtil.isValidName(dst)) { + throw new IOException("Invalid name: " + dst); + } + + if (isPermissionEnabled) { + //We should not be doing this. This is move() not renameTo(). + //but for now, + String actualdst = dir.isDir(dst) ? + dst + Path.SEPARATOR + new Path(src).getName() : dst; + checkParentAccess(src, FsAction.WRITE); + checkAncestorAccess(actualdst, FsAction.WRITE); + } + if (neverDeletePaths.contains(src)) { + NameNode.stateChangeLog.warn("DIR* NameSystem.delete: " + + " Trying to rename a whitelisted path " + src + + " by user " + UserGroupInformation.getCurrentUGI() + + " from server " + Server.getRemoteIp()); + throw new IOException("Rename a whitelisted directory is not allowed " + src); + } + + HdfsFileStatus dinfo = dir.getHdfsFileInfo(dst); + if (dir.renameTo(src, dst)) { + changeLease(src, dst, dinfo); // update lease with new filename + return true; + } + return false; + } finally { + writeUnlock(); + } + } + + /** + * Remove the indicated filename from namespace. If the filename + * is a directory (non empty) and recursive is set to false then throw exception. + */ + public boolean delete(String src, boolean recursive) throws IOException { + if ((!recursive) && (!dir.isDirEmpty(src))) { + throw new IOException(src + " is non empty"); + } + boolean status = deleteInternal(src, true); + getEditLog().logSync(); + if (status && auditLog.isInfoEnabled()) { + logAuditEvent(UserGroupInformation.getCurrentUGI(), + Server.getRemoteIp(), + "delete", src, null, null); + } + return status; + } + + /** + * Remove the indicated filename from the namespace. This may + * invalidate some blocks that make up the file. + */ + boolean deleteInternal(String src, + boolean enforcePermission) throws IOException { + ArrayList collectedBlocks = new ArrayList(); + boolean deleteNow = false; + writeLock(); + try { + if (NameNode.stateChangeLog.isDebugEnabled()) { + NameNode.stateChangeLog.debug("DIR* NameSystem.delete: " + src); + } + if (isInSafeMode()) { + throw new SafeModeException("Cannot delete " + src, safeMode); + } + if (enforcePermission && isPermissionEnabled) { + checkPermission(src, false, null, FsAction.WRITE, null, FsAction.ALL); + } + if (neverDeletePaths.contains(src)) { + NameNode.stateChangeLog.warn("DIR* NameSystem.delete: " + + " Trying to delete a whitelisted path " + src + + " by user " + UserGroupInformation.getCurrentUGI() + + " from server " + Server.getRemoteIp()); + throw new IOException("Deleting a whitelisted directory is not allowed. " + src); + } + + if (dir.delete(src, collectedBlocks) == null) { + return false; + } + deleteNow = collectedBlocks.size() <= BLOCK_DELETION_INCREMENT; + if (deleteNow) { + removeBlocks(collectedBlocks); + } + } finally { + writeUnlock(); + } + if (!deleteNow) { + removeBlocks(collectedBlocks); + } + collectedBlocks.clear(); + return true; + } + + /** + * From the given list, incrementally remove the blocks from blockManager + */ + private void removeBlocks(List blocks) { + int start = 0; + int end = 0; + while (start < blocks.size()) { + end = BLOCK_DELETION_INCREMENT + start; + end = end > blocks.size() ? blocks.size() : end; + writeLock(); + try { + for (int i = start; i < end; i++) { + Block b = blocks.get(i); + blocksMap.removeINode(b); + corruptReplicas.removeFromCorruptReplicasMap(b); + addToInvalidates(b); + } + } finally { + writeUnlock(); + } + start = end; + } + } + + void removePathAndBlocks(String src, List blocks) throws IOException { + leaseManager.removeLeaseWithPrefixPath(src); + if (blocks == null) { + return; + } + for (Block b : blocks) { + blocksMap.removeINode(b); + corruptReplicas.removeFromCorruptReplicasMap(b); + addToInvalidates(b); + } + } + + /** + * Get the file info for a specific file. + * + * @param src The string representation of the path to the file + * @return object containing information regarding the file + * or null if file not found + * @throws IOException if permission to access file is denied by the system + */ + FileStatus getFileInfo(String src) throws IOException { + if (isPermissionEnabled) { + checkTraverse(src); + } + return dir.getFileInfo(src); + } + + /** + * Get the file info for a specific file. + * + * @param src The string representation of the path to the file + * @return object containing information regarding the file + * or null if file not found + * @throws IOException if permission to access file is denied by the system + */ + HdfsFileStatus getHdfsFileInfo(String src) throws IOException { + if (isPermissionEnabled) { + checkTraverse(src); + } + return dir.getHdfsFileInfo(src); + } + + /** + * Create all the necessary directories + */ + public boolean mkdirs(String src, PermissionStatus permissions + ) throws IOException { + boolean status = mkdirsInternal(src, permissions); + getEditLog().logSync(); + if (status && auditLog.isInfoEnabled()) { + final HdfsFileStatus stat = dir.getHdfsFileInfo(src); + logAuditEvent(UserGroupInformation.getCurrentUGI(), + Server.getRemoteIp(), + "mkdirs", src, null, stat); + } + return status; + } + + /** + * Create all the necessary directories + */ + private boolean mkdirsInternal(String src, + PermissionStatus permissions) + throws IOException { + writeLock(); + try { + NameNode.stateChangeLog.debug("DIR* NameSystem.mkdirs: " + src); + if (isPermissionEnabled) { + checkTraverse(src); + } + if (dir.isDir(src)) { + // all the users of mkdirs() are used to expect 'true' even if + // a new directory is not created. + return true; + } + if (isInSafeMode()) { + throw new SafeModeException("Cannot create directory " + src, safeMode); + } + if (!DFSUtil.isValidName(src)) { + throw new IOException("Invalid directory name: " + src); + } + if (isPermissionEnabled) { + checkAncestorAccess(src, FsAction.WRITE); + } + + // validate that we have enough inodes. This is, at best, a + // heuristic because the mkdirs() operation migth need to + // create multiple inodes. + checkFsObjectLimit(); + + if (!dir.mkdirs(src, permissions, false, now())) { + throw new IOException("Invalid directory name: " + src); + } + return true; + } finally { + writeUnlock(); + } + } + + ContentSummary getContentSummary(String src) throws IOException { + if (isPermissionEnabled) { + checkPermission(src, false, null, null, null, FsAction.READ_EXECUTE); + } + return dir.getContentSummary(src); + } + + /** + * Set the namespace quota and diskspace quota for a directory. + * See {@link ClientProtocol#setQuota(String, long, long)} for the + * contract. + */ + void setQuota(String path, long nsQuota, long dsQuota) throws IOException { + writeLock(); + try { + if (isInSafeMode()) { + throw new SafeModeException("Cannot setQuota " + path, safeMode); + } + if (isPermissionEnabled) { + checkSuperuserPrivilege(); + } + + dir.setQuota(path, nsQuota, dsQuota); + } finally { + writeUnlock(); + } + getEditLog().logSync(); + } + + /** + * Persist all metadata about this file. + * + * @param src The string representation of the path + * @param clientName The string representation of the client + * @throws IOException if path does not exist + */ + void fsync(String src, String clientName) throws IOException { + + NameNode.stateChangeLog.info("BLOCK* NameSystem.fsync: file " + + src + " for " + clientName); + writeLock(); + try { + if (isInSafeMode()) { + throw new SafeModeException("Cannot fsync file " + src, safeMode); + } + INodeFileUnderConstruction pendingFile = checkLease(src, clientName); + dir.persistBlocks(src, pendingFile); + } finally { + writeUnlock(); + } + getEditLog().logSync(); + } + + /** + * Move a file that is being written to be immutable. + * + * @param src The filename + * @param lease The lease for the client creating the file + */ + void internalReleaseLease(Lease lease, String src) throws IOException { + LOG.info("Recovering lease=" + lease + ", src=" + src); + + INodeFile iFile = dir.getFileINode(src); + if (iFile == null) { + final String message = "DIR* NameSystem.internalReleaseCreate: " + + "attempt to release a create lock on " + + src + " file does not exist."; + NameNode.stateChangeLog.warn(message); + throw new IOException(message); + } + if (!iFile.isUnderConstruction()) { + final String message = "DIR* NameSystem.internalReleaseCreate: " + + "attempt to release a create lock on " + + src + " but file is already closed."; + NameNode.stateChangeLog.warn(message); + throw new IOException(message); + } + + INodeFileUnderConstruction pendingFile = (INodeFileUnderConstruction) iFile; + + // Initialize lease recovery for pendingFile. If there are no blocks + // associated with this file, then reap lease immediately. Otherwise + // renew the lease and trigger lease recovery. + if (pendingFile.getTargets() == null || + pendingFile.getTargets().length == 0) { + if (pendingFile.getBlocks().length == 0) { + finalizeINodeFileUnderConstruction(src, pendingFile); + NameNode.stateChangeLog.warn("BLOCK*" + + " internalReleaseLease: No blocks found, lease removed."); + return; + } + // setup the Inode.targets for the last block from the blocksMap + // + Block[] blocks = pendingFile.getBlocks(); + Block last = blocks[blocks.length - 1]; + DatanodeDescriptor[] targets = + new DatanodeDescriptor[blocksMap.numNodes(last)]; + Iterator it = blocksMap.nodeIterator(last); + for (int i = 0; it != null && it.hasNext(); i++) { + targets[i] = it.next(); + } + pendingFile.setTargets(targets); + } + // start lease recovery of the last block for this file. + pendingFile.assignPrimaryDatanode(); + leaseManager.renewLease(lease); + } + + private void finalizeINodeFileUnderConstruction(String src, + INodeFileUnderConstruction pendingFile) + throws IOException { + leaseManager.removeLease(pendingFile.clientName, src); + + // The file is no longer pending. + // Create permanent INode, update blockmap + INodeFile newFile = pendingFile.convertToInodeFile(); + dir.replaceNode(src, pendingFile, newFile); + + // close file and persist block allocations for this file + dir.closeFile(src, newFile); + + checkReplicationFactor(newFile); + } + + /** + * corrupts a file by: + * 1. removing all targets of the last block + */ + void corruptFileForTesting(String src) throws IOException { + INodeFile inode = dir.getFileINode(src); + + if (inode.isUnderConstruction()) { + INodeFileUnderConstruction pendingFile = + (INodeFileUnderConstruction) inode; + BlockInfo[] blocks = pendingFile.getBlocks(); + if (blocks != null && blocks.length >= 1) { + BlockInfo lastBlockInfo = blocks[blocks.length - 1]; + + pendingFile.setLastBlock( + lastBlockInfo, + new DatanodeDescriptor[0] + ); + } + } + } + + // public only for testing + + void commitBlockSynchronization(Block lastblock, + long newgenerationstamp, long newlength, + boolean closeFile, boolean deleteblock, DatanodeID[] newtargets + ) throws IOException { + LOG.info("commitBlockSynchronization(lastblock=" + lastblock + + ", newgenerationstamp=" + newgenerationstamp + + ", newlength=" + newlength + + ", newtargets=" + Arrays.asList(newtargets) + + ", closeFile=" + closeFile + + ", deleteBlock=" + deleteblock + + ")"); + String src = null; + writeLock(); + try { + if (isInSafeMode()) { + throw new SafeModeException("Cannot commitBlockSynchronization " + + lastblock, safeMode); + } + final BlockInfo oldblockinfo = blocksMap.getStoredBlock(lastblock); + if (oldblockinfo == null) { + throw new IOException("Block (=" + lastblock + ") not found"); + } + INodeFile iFile = oldblockinfo.getINode(); + if (!iFile.isUnderConstruction()) { + throw new IOException("Unexpected block (=" + lastblock + + ") since the file (=" + iFile.getLocalName() + + ") is not under construction"); + } + INodeFileUnderConstruction pendingFile = (INodeFileUnderConstruction) iFile; + + + // Remove old block from blocks map. This always have to be done + // because the generation stamp of this block is changing. + blocksMap.removeBlock(oldblockinfo); + + if (deleteblock) { + pendingFile.removeBlock(lastblock); + } else { + // update last block, construct newblockinfo and add it to the blocks map + lastblock.set(lastblock.getBlockId(), newlength, newgenerationstamp); + final BlockInfo newblockinfo = blocksMap.addINode(lastblock, pendingFile); + + // find the DatanodeDescriptor objects + // There should be no locations in the blocksMap till now because the + // file is underConstruction + DatanodeDescriptor[] descriptors = null; + if (newtargets.length > 0) { + descriptors = new DatanodeDescriptor[newtargets.length]; + for (int i = 0; i < newtargets.length; i++) { + descriptors[i] = getDatanode(newtargets[i]); + } + } + if (closeFile) { + // the file is getting closed. Insert block locations into blocksMap. + // Otherwise fsck will report these blocks as MISSING, especially if the + // blocksReceived from Datanodes take a long time to arrive. + for (int i = 0; i < descriptors.length; i++) { + descriptors[i].addBlock(newblockinfo); + } + pendingFile.setLastBlock(newblockinfo, null); + } else { + // add locations into the INodeUnderConstruction + pendingFile.setLastBlock(newblockinfo, descriptors); + } + } + + // If this commit does not want to close the file, persist + // blocks only if append is supported and return + src = leaseManager.findPath(pendingFile); + if (!closeFile) { + if (supportAppends) { + dir.persistBlocks(src, pendingFile); + getEditLog().logSync(); + } + LOG.info("commitBlockSynchronization(" + lastblock + ") successful"); + return; + } + + //remove lease, close file + finalizeINodeFileUnderConstruction(src, pendingFile); + } finally {// end of synchronized section + writeUnlock(); + } + + getEditLog().logSync(); + LOG.info("commitBlockSynchronization(newblock=" + lastblock + + ", file=" + src + + ", newgenerationstamp=" + newgenerationstamp + + ", newlength=" + newlength + + ", newtargets=" + Arrays.asList(newtargets) + ") successful"); + } + + + /** + * Renew the lease(s) held by the given client + */ + void renewLease(String holder) throws IOException { + readLock(); + try { + if (isInSafeMode()) { + throw new SafeModeException("Cannot renew lease for " + holder, safeMode); + } + + leaseManager.renewLease(holder); + } finally { + readUnlock(); + } + leaseManager.renewLease(holder); + } + + private void getListingCheck(String src) throws IOException { + if (isPermissionEnabled) { + if (dir.isDir(src)) { + checkPathAccess(src, FsAction.READ_EXECUTE); + } else { + checkTraverse(src); + } + } + if (auditLog.isInfoEnabled()) { + logAuditEvent(UserGroupInformation.getCurrentUGI(), + Server.getRemoteIp(), + "listStatus", src, null, null); + } + } + + /** + * Get a listing of all files at 'src'. The Object[] array + * exists so we can return file attributes (soon to be implemented) + */ + public FileStatus[] getListing(String src) throws IOException { + getListingCheck(src); + return dir.getListing(src); + } + + /** + * Get a listing of all files at 'src'. The Object[] array + * exists so we can return file attributes (soon to be implemented) + */ + public HdfsFileStatus[] getHdfsListing(String src) throws IOException { + getListingCheck(src); + return dir.getHdfsListing(src); + } + + /** + * Get a partial listing of the indicated directory + * + * @param src the directory name + * @param startAfter the name to start after + * @param needLocation if blockLocations need to be returned + * @return a partial listing starting after startAfter + */ + public DirectoryListing getPartialListing(String src, byte[] startAfter, + boolean needLocation) + throws IOException { + readLock(); + try { + getListingCheck(src); + return dir.getPartialListing(src, startAfter, needLocation); + } finally { + readUnlock(); + } + } + + ///////////////////////////////////////////////////////// + // + // These methods are called by datanodes + // + ///////////////////////////////////////////////////////// + + /** + * Register Datanode. + *

+ * The purpose of registration is to identify whether the new datanode + * serves a new data storage, and will report new data block copies, + * which the namenode was not aware of; or the datanode is a replacement + * node for the data storage that was previously served by a different + * or the same (in terms of host:port) datanode. + * The data storages are distinguished by their storageIDs. When a new + * data storage is reported the namenode issues a new unique storageID. + *

+ * Finally, the namenode returns its namespaceID as the registrationID + * for the datanodes. + * namespaceID is a persistent attribute of the name space. + * The registrationID is checked every time the datanode is communicating + * with the namenode. + * Datanodes with inappropriate registrationID are rejected. + * If the namenode stops, and then restarts it can restore its + * namespaceID and will continue serving the datanodes that has previously + * registered with the namenode without restarting the whole cluster. + * + * @see org.apache.hadoop.hdfs.server.datanode.DataNode#register() + */ + public void registerDatanode(DatanodeRegistration nodeReg + ) throws IOException { + writeLock(); + try { + String dnAddress = Server.getRemoteAddress(); + if (dnAddress == null) { + // Mostly called inside an RPC. + // But if not, use address passed by the data-node. + dnAddress = nodeReg.getHost(); + } + + // check if the datanode is allowed to be connect to the namenode + if (!verifyNodeRegistration(nodeReg, dnAddress)) { + throw new DisallowedDatanodeException(nodeReg); + } + + String hostName = nodeReg.getHost(); + + // update the datanode's name with ip:port + DatanodeID dnReg = new DatanodeID(dnAddress + ":" + nodeReg.getPort(), + nodeReg.getStorageID(), + nodeReg.getInfoPort(), + nodeReg.getIpcPort()); + nodeReg.updateRegInfo(dnReg); + + NameNode.stateChangeLog.info( + "BLOCK* NameSystem.registerDatanode: " + + "node registration from " + nodeReg.getName() + + " storage " + nodeReg.getStorageID()); + + DatanodeDescriptor nodeS = datanodeMap.get(nodeReg.getStorageID()); + DatanodeDescriptor nodeN = host2DataNodeMap.getDatanodeByName(nodeReg.getName()); + + if (nodeN != null && nodeN != nodeS) { + NameNode.LOG.info("BLOCK* NameSystem.registerDatanode: " + + "node from name: " + nodeN.getName()); + // nodeN previously served a different data storage, + // which is not served by anybody anymore. + removeDatanode(nodeN); + // physically remove node from datanodeMap + wipeDatanode(nodeN); + nodeN = null; + } + + if (nodeS != null) { + if (nodeN == nodeS) { + // The same datanode has been just restarted to serve the same data + // storage. We do not need to remove old data blocks, the delta will + // be calculated on the next block report from the datanode + NameNode.stateChangeLog.debug("BLOCK* NameSystem.registerDatanode: " + + "node restarted."); + } else { + // nodeS is found + /* The registering datanode is a replacement node for the existing + data storage, which from now on will be served by a new node. + If this message repeats, both nodes might have same storageID + by (insanely rare) random chance. User needs to restart one of the + nodes with its data cleared (or user can just remove the StorageID + value in "VERSION" file under the data directory of the datanode, + but this is might not work if VERSION file format has changed + */ + NameNode.stateChangeLog.info("BLOCK* NameSystem.registerDatanode: " + + "node " + nodeS.getName() + + " is replaced by " + nodeReg.getName() + + " with the same storageID " + + nodeReg.getStorageID()); + } + // update cluster map + clusterMap.remove(nodeS); + nodeS.updateRegInfo(nodeReg); + nodeS.setHostName(hostName); + + // resolve network location + resolveNetworkLocation(nodeS); + clusterMap.add(nodeS); + + // also treat the registration message as a heartbeat + synchronized (heartbeats) { + if (!heartbeats.contains(nodeS)) { + heartbeats.add(nodeS); + //update its timestamp + nodeS.updateHeartbeat(0L, 0L, 0L, 0); + nodeS.isAlive = true; + } + } + return; + } + + // this is a new datanode serving a new data storage + if (nodeReg.getStorageID().equals("")) { + // this data storage has never been registered + // it is either empty or was created by pre-storageID version of DFS + nodeReg.storageID = newStorageID(); + NameNode.stateChangeLog.debug( + "BLOCK* NameSystem.registerDatanode: " + + "new storageID " + nodeReg.getStorageID() + " assigned."); + } + // register new datanode + DatanodeDescriptor nodeDescr + = new DatanodeDescriptor(nodeReg, NetworkTopology.DEFAULT_RACK, hostName); + resolveNetworkLocation(nodeDescr); + unprotectedAddDatanode(nodeDescr); + clusterMap.add(nodeDescr); + + // also treat the registration message as a heartbeat + synchronized (heartbeats) { + heartbeats.add(nodeDescr); + nodeDescr.isAlive = true; + // no need to update its timestamp + // because its is done when the descriptor is created + } + return; + } finally { + writeUnlock(); + } + } + + /* Resolve a node's network location */ + + private void resolveNetworkLocation(DatanodeDescriptor node) { + List names = new ArrayList(1); + if (dnsToSwitchMapping instanceof CachedDNSToSwitchMapping) { + // get the node's IP address + names.add(node.getHost()); + } else { + // get the node's host name + String hostName = node.getHostName(); + int colon = hostName.indexOf(":"); + hostName = (colon == -1) ? hostName : hostName.substring(0, colon); + names.add(hostName); + } + + // resolve its network location + List rName = dnsToSwitchMapping.resolve(names); + String networkLocation; + if (rName == null) { + LOG.error("The resolve call returned null! Using " + + NetworkTopology.DEFAULT_RACK + " for host " + names); + networkLocation = NetworkTopology.DEFAULT_RACK; + } else { + networkLocation = rName.get(0); + } + node.setNetworkLocation(networkLocation); + } + + /** + * Get registrationID for datanodes based on the namespaceID. + * + * @return registration ID + * @see #registerDatanode(DatanodeRegistration) + * @see FSImage#newNamespaceID() + */ + public String getRegistrationID() { + return Storage.getRegistrationID(dir.fsImage); + } + + /** + * Generate new storage ID. + * + * @return unique storage ID + *

+ * Note: that collisions are still possible if somebody will try + * to bring in a data storage from a different cluster. + */ + private String newStorageID() { + String newID = null; + while (newID == null) { + newID = "DS" + Integer.toString(r.nextInt()); + if (datanodeMap.get(newID) != null) { + newID = null; + } + } + return newID; + } + + private boolean isDatanodeDead(DatanodeDescriptor node) { + return (node.getLastUpdate() < + (now() - heartbeatExpireInterval)); + } + + private void setDatanodeDead(DatanodeDescriptor node) throws IOException { + node.setLastUpdate(0); + } + + /** + * The given node has reported in. This method should: + * 1) Record the heartbeat, so the datanode isn't timed out + * 2) Adjust usage stats for future block allocation + *

+ * If a substantial amount of time passed since the last datanode + * heartbeat then request an immediate block report. + * + * @return an array of datanode commands + * @throws IOException + */ + DatanodeCommand[] handleHeartbeat(DatanodeRegistration nodeReg, + long capacity, long dfsUsed, long remaining, + int xceiverCount, int xmitsInProgress) + throws IOException { + DatanodeCommand cmd = null; + synchronized (heartbeats) { + synchronized (datanodeMap) { + DatanodeDescriptor nodeinfo = null; + try { + nodeinfo = getDatanode(nodeReg); + } catch (UnregisteredDatanodeException e) { + return new DatanodeCommand[]{DatanodeCommand.REGISTER}; + } + + // Check if this datanode should actually be shutdown instead. + if (nodeinfo != null && shouldNodeShutdown(nodeinfo)) { + setDatanodeDead(nodeinfo); + throw new DisallowedDatanodeException(nodeinfo); + } + + if (nodeinfo == null || !nodeinfo.isAlive) { + return new DatanodeCommand[]{DatanodeCommand.REGISTER}; + } + + updateStats(nodeinfo, false); + nodeinfo.updateHeartbeat(capacity, dfsUsed, remaining, xceiverCount); + updateStats(nodeinfo, true); + + //check lease recovery + cmd = nodeinfo.getLeaseRecoveryCommand(Integer.MAX_VALUE); + if (cmd != null) { + return new DatanodeCommand[]{cmd}; + } + + ArrayList cmds = new ArrayList(2); + //check pending replication + cmd = nodeinfo.getReplicationCommand( + maxReplicationStreams - xmitsInProgress); + if (cmd != null) { + cmds.add(cmd); + } + //check block invalidation + cmd = nodeinfo.getInvalidateBlocks(blockInvalidateLimit); + if (cmd != null) { + cmds.add(cmd); + } + if (!cmds.isEmpty()) { + return cmds.toArray(new DatanodeCommand[cmds.size()]); + } + } + } + + //check distributed upgrade + cmd = getDistributedUpgradeCommand(); + if (cmd != null) { + return new DatanodeCommand[]{cmd}; + } + return null; + } + + private void updateStats(DatanodeDescriptor node, boolean isAdded) { + // + // The statistics are protected by the heartbeat lock + // + assert (Thread.holdsLock(heartbeats)); + if (isAdded) { + capacityTotal += node.getCapacity(); + capacityUsed += node.getDfsUsed(); + capacityRemaining += node.getRemaining(); + totalLoad += node.getXceiverCount(); + } else { + capacityTotal -= node.getCapacity(); + capacityUsed -= node.getDfsUsed(); + capacityRemaining -= node.getRemaining(); + totalLoad -= node.getXceiverCount(); + } + } + + /** + * Periodically calls heartbeatCheck(). + */ + class HeartbeatMonitor implements Runnable { + /** + */ + public void run() { + while (fsRunning) { + try { + heartbeatCheck(); + } catch (Exception e) { + FSNamesystem.LOG.error(StringUtils.stringifyException(e)); + } + try { + Thread.sleep(heartbeatRecheckInterval); + } catch (InterruptedException ie) { + } + } + } + } + + /** + * Periodically calls computeReplicationWork(). + */ + class ReplicationMonitor implements Runnable { + static final int INVALIDATE_WORK_PCT_PER_ITERATION = 32; + static final float REPLICATION_WORK_MULTIPLIER_PER_ITERATION = 2; + + public void run() { + while (fsRunning) { + try { + computeDatanodeWork(); + processPendingReplications(); + processOverReplicatedBlocksAsync(); + configManager.reloadConfigIfNecessary(); + Thread.sleep(replicationRecheckInterval); + } catch (InterruptedException ie) { + LOG.warn("ReplicationMonitor thread received InterruptedException." + ie); + break; + } catch (IOException ie) { + LOG.warn("ReplicationMonitor thread received exception. " + ie); + } catch (Throwable t) { + LOG.warn("ReplicationMonitor thread received Runtime exception. " + t); + Runtime.getRuntime().exit(-1); + } + } + } + } + + ///////////////////////////////////////////////////////// + // + // These methods are called by the Namenode system, to see + // if there is any work for registered datanodes. + // + ///////////////////////////////////////////////////////// + + /** + * Compute block replication and block invalidation work + * that can be scheduled on data-nodes. + * The datanode will be informed of this work at the next heartbeat. + * + * @return number of blocks scheduled for replication or removal. + */ + public int computeDatanodeWork() throws IOException { + int workFound = 0; + int blocksToProcess = 0; + int nodesToProcess = 0; + // blocks should not be replicated or removed if safe mode is on + if (isInSafeMode()) { + return workFound; + } + synchronized (heartbeats) { + blocksToProcess = (int) (heartbeats.size() + * ReplicationMonitor.REPLICATION_WORK_MULTIPLIER_PER_ITERATION); + nodesToProcess = (int) Math.ceil((double) heartbeats.size() + * ReplicationMonitor.INVALIDATE_WORK_PCT_PER_ITERATION / 100); + } + + workFound = computeReplicationWork(blocksToProcess); + + // Update FSNamesystemMetrics counters + writeLock(); + try { + pendingReplicationBlocksCount = pendingReplications.size(); + underReplicatedBlocksCount = neededReplications.size(); + scheduledReplicationBlocksCount = workFound; + corruptReplicaBlocksCount = corruptReplicas.size(); + } finally { + writeUnlock(); + } + + workFound += computeInvalidateWork(nodesToProcess); + return workFound; + } + + /** + * Schedule blocks for deletion at datanodes + * + * @param nodesToProcess number of datanodes to schedule deletion work + * @return total number of block for deletion + */ + int computeInvalidateWork(int nodesToProcess) { + int numOfNodes = 0; + ArrayList keyArray = null; + + readLock(); + try { + numOfNodes = recentInvalidateSets.size(); + // get an array of the keys + keyArray = new ArrayList(recentInvalidateSets.keySet()); + } finally { + readUnlock(); + } + + nodesToProcess = Math.min(numOfNodes, nodesToProcess); + + // randomly pick up nodesToProcess nodes + // and put them at [0, nodesToProcess) + int remainingNodes = numOfNodes - nodesToProcess; + if (nodesToProcess < remainingNodes) { + for (int i = 0; i < nodesToProcess; i++) { + int keyIndex = r.nextInt(numOfNodes - i) + i; + Collections.swap(keyArray, keyIndex, i); // swap to front + } + } else { + for (int i = 0; i < remainingNodes; i++) { + int keyIndex = r.nextInt(numOfNodes - i); + Collections.swap(keyArray, keyIndex, numOfNodes - i - 1); // swap to end + } + } + + int blockCnt = 0; + for (int nodeCnt = 0; nodeCnt < nodesToProcess; nodeCnt++) { + blockCnt += invalidateWorkForOneNode(keyArray.get(nodeCnt)); + } + return blockCnt; + } + + /** + * Scan blocks in {@link #neededReplications} and assign replication + * work to data-nodes they belong to. + *

+ * The number of process blocks equals either twice the number of live + * data-nodes or the number of under-replicated blocks whichever is less. + * + * @return number of blocks scheduled for replication during this iteration. + */ + private int computeReplicationWork( + int blocksToProcess) throws IOException { + // Choose the blocks to be replicated + List> blocksToReplicate = + chooseUnderReplicatedBlocks(blocksToProcess); + + // replicate blocks + int scheduledReplicationCount = 0; + for (int i = 0; i < blocksToReplicate.size(); i++) { + for (Block block : blocksToReplicate.get(i)) { + if (computeReplicationWorkForBlock(block, i)) { + scheduledReplicationCount++; + } + } + } + return scheduledReplicationCount; + } + + /** + * Get a list of block lists to be replicated + * The index of block lists represents the + * + * @param blocksToProcess + * @return Return a list of block lists to be replicated. + * The block list index represents its replication priority. + */ + List> chooseUnderReplicatedBlocks(int blocksToProcess) { + writeLock(); + try { + // initialize data structure for the return value + List> blocksToReplicate = + new ArrayList>(UnderReplicatedBlocks.LEVEL); + for (int i = 0; i < UnderReplicatedBlocks.LEVEL; i++) { + blocksToReplicate.add(new ArrayList()); + } + + synchronized (neededReplications) { + if (neededReplications.size() == 0) { + missingBlocksInCurIter = 0; + missingBlocksInPrevIter = 0; + return blocksToReplicate; + } + + // Go through all blocks that need replications. + BlockIterator neededReplicationsIterator = neededReplications.iterator(); + // skip to the first unprocessed block, which is at replIndex + for (int i = 0; i < replIndex && neededReplicationsIterator.hasNext(); i++) { + neededReplicationsIterator.next(); + } + // # of blocks to process equals either twice the number of live + // data-nodes or the number of under-replicated blocks whichever is less + blocksToProcess = Math.min(blocksToProcess, neededReplications.size()); + + for (int blkCnt = 0; blkCnt < blocksToProcess; blkCnt++, replIndex++) { + if (!neededReplicationsIterator.hasNext()) { + // start from the beginning + replIndex = 0; + missingBlocksInPrevIter = missingBlocksInCurIter; + missingBlocksInCurIter = 0; + blocksToProcess = Math.min(blocksToProcess, neededReplications.size()); + if (blkCnt >= blocksToProcess) { + break; + } + neededReplicationsIterator = neededReplications.iterator(); + assert neededReplicationsIterator.hasNext() : + "neededReplications should not be empty."; + } + + Block block = neededReplicationsIterator.next(); + int priority = neededReplicationsIterator.getPriority(); + if (priority < 0 || priority >= blocksToReplicate.size()) { + LOG.warn("Unexpected replication priority: " + priority + " " + block); + } else { + blocksToReplicate.get(priority).add(block); + } + } // end for + } // end synchronized + return blocksToReplicate; + } finally { + writeUnlock(); + } + } + + /** + * Replicate a block + * + * @param block block to be replicated + * @param priority a hint of its priority in the neededReplication queue + * @return if the block gets replicated or not + */ + boolean computeReplicationWorkForBlock(Block block, int priority) { + int requiredReplication, numEffectiveReplicas; + List containingNodes; + DatanodeDescriptor srcNode; + INodeFile fileINode = null; + + writeLock(); + try { + synchronized (neededReplications) { + // block should belong to a file + fileINode = blocksMap.getINode(block); + // abandoned block or block reopened for append + if (fileINode == null || fileINode.isUnderConstruction()) { + neededReplications.remove(block, priority); // remove from neededReplications + replIndex--; + return false; + } + requiredReplication = fileINode.getReplication(); + + // get a source data-node + containingNodes = new ArrayList(); + NumberReplicas numReplicas = new NumberReplicas(); + srcNode = chooseSourceDatanode(block, containingNodes, numReplicas); + if ((numReplicas.liveReplicas() + numReplicas.decommissionedReplicas()) + <= 0) { + missingBlocksInCurIter++; + } + if (srcNode == null) // block can not be replicated from any node + { + return false; + } + + // do not schedule more if enough replicas is already pending + numEffectiveReplicas = numReplicas.liveReplicas() + + pendingReplications.getNumReplicas(block); + if (numEffectiveReplicas >= requiredReplication) { + neededReplications.remove(block, priority); // remove from neededReplications + replIndex--; + NameNode.stateChangeLog.info("BLOCK* " + + "Removing block " + block + + " from neededReplications as it has enough replicas."); + return false; + } + } + } finally { + writeUnlock(); + } + + // choose replication targets: NOT HODING THE GLOBAL LOCK + DatanodeDescriptor targets[] = replicator.chooseTarget( + fileINode, + requiredReplication - numEffectiveReplicas, + srcNode, containingNodes, null, block.getNumBytes()); + if (targets.length == 0) { + return false; + } + + writeLock(); + try { + synchronized (neededReplications) { + // Recheck since global lock was released + // block should belong to a file + fileINode = blocksMap.getINode(block); + // abandoned block or block reopened for append + if (fileINode == null || fileINode.isUnderConstruction()) { + neededReplications.remove(block, priority); // remove from neededReplications + replIndex--; + return false; + } + requiredReplication = fileINode.getReplication(); + + // do not schedule more if enough replicas is already pending + NumberReplicas numReplicas = countNodes(block); + numEffectiveReplicas = numReplicas.liveReplicas() + + pendingReplications.getNumReplicas(block); + if (numEffectiveReplicas >= requiredReplication) { + neededReplications.remove(block, priority); // remove from neededReplications + replIndex--; + NameNode.stateChangeLog.info("BLOCK* " + + "Removing block " + block + + " from neededReplications as it has enough replicas."); + return false; + } + + // Add block to the to be replicated list + srcNode.addBlockToBeReplicated(block, targets); + + for (DatanodeDescriptor dn : targets) { + dn.incBlocksScheduled(); + } + + // Move the block-replication into a "pending" state. + // The reason we use 'pending' is so we can retry + // replications that fail after an appropriate amount of time. + pendingReplications.add(block, targets.length); + NameNode.stateChangeLog.debug( + "BLOCK* block " + block + + " is moved from neededReplications to pendingReplications"); + + // remove from neededReplications + if (numEffectiveReplicas + targets.length >= requiredReplication) { + neededReplications.remove(block, priority); // remove from neededReplications + replIndex--; + } + if (NameNode.stateChangeLog.isInfoEnabled()) { + StringBuffer targetList = new StringBuffer("datanode(s)"); + for (int k = 0; k < targets.length; k++) { + targetList.append(' '); + targetList.append(targets[k].getName()); + } + NameNode.stateChangeLog.info( + "BLOCK* ask " + + srcNode.getName() + " to replicate " + + block + " to " + targetList); + NameNode.stateChangeLog.debug( + "BLOCK* neededReplications = " + neededReplications.size() + + " pendingReplications = " + pendingReplications.size()); + } + } + } finally { + writeUnlock(); + } + + return true; + } + + /** + * Parse the data-nodes the block belongs to and choose one, + * which will be the replication source. + *

+ * We prefer nodes that are in DECOMMISSION_INPROGRESS state to other nodes + * since the former do not have write traffic and hence are less busy. + * We do not use already decommissioned nodes as a source. + * Otherwise we choose a random node among those that did not reach their + * replication limit. + *

+ * In addition form a list of all nodes containing the block + * and calculate its replication numbers. + */ + private DatanodeDescriptor chooseSourceDatanode( + Block block, + List containingNodes, + NumberReplicas numReplicas) { + containingNodes.clear(); + DatanodeDescriptor srcNode = null; + int live = 0; + int decommissioned = 0; + int corrupt = 0; + int excess = 0; + Iterator it = blocksMap.nodeIterator(block); + Collection nodesCorrupt = corruptReplicas.getNodes(block); + while (it.hasNext()) { + DatanodeDescriptor node = it.next(); + Collection excessBlocks = + excessReplicateMap.get(node.getStorageID()); + if ((nodesCorrupt != null) && (nodesCorrupt.contains(node))) { + corrupt++; + } else if (node.isDecommissionInProgress() || node.isDecommissioned()) { + decommissioned++; + } else if (excessBlocks != null && excessBlocks.contains(block)) { + excess++; + } else { + live++; + } + containingNodes.add(node); + // Check if this replica is corrupt + // If so, do not select the node as src node + if ((nodesCorrupt != null) && nodesCorrupt.contains(node)) { + continue; + } + if (node.getNumberOfBlocksToBeReplicated() >= maxReplicationStreams) { + continue; + } // already reached replication limit + // the block must not be scheduled for removal on srcNode + if (excessBlocks != null && excessBlocks.contains(block)) { + continue; + } + // never use already decommissioned nodes + if (node.isDecommissioned()) { + continue; + } + // we prefer nodes that are in DECOMMISSION_INPROGRESS state + if (node.isDecommissionInProgress() || srcNode == null) { + srcNode = node; + continue; + } + if (srcNode.isDecommissionInProgress()) { + continue; + } + // switch to a different node randomly + // this to prevent from deterministically selecting the same node even + // if the node failed to replicate the block on previous iterations + if (r.nextBoolean()) { + srcNode = node; + } + } + if (numReplicas != null) { + numReplicas.initialize(live, decommissioned, corrupt, excess); + } + return srcNode; + } + + /** + * Get blocks to invalidate for nodeId + * in {@link #recentInvalidateSets}. + * + * @return number of blocks scheduled for removal during this iteration. + */ + private int invalidateWorkForOneNode(String nodeId) { + writeLock(); + try { + // blocks should not be replicated or removed if safe mode is on + if (isInSafeMode()) { + return 0; + } + // get blocks to invalidate for the nodeId + assert nodeId != null; + DatanodeDescriptor dn = datanodeMap.get(nodeId); + if (dn == null) { + recentInvalidateSets.remove(nodeId); + return 0; + } + + Collection invalidateSet = recentInvalidateSets.get(nodeId); + if (invalidateSet == null) { + return 0; + } + + ArrayList blocksToInvalidate = + new ArrayList(blockInvalidateLimit); + + // # blocks that can be sent in one message is limited + Iterator it = invalidateSet.iterator(); + for (int blkCount = 0; blkCount < blockInvalidateLimit && it.hasNext(); + blkCount++) { + blocksToInvalidate.add(it.next()); + it.remove(); + } + + // If we send everything in this message, remove this node entry + if (!it.hasNext()) { + recentInvalidateSets.remove(nodeId); + } + + dn.addBlocksToBeInvalidated(blocksToInvalidate); + + if (NameNode.stateChangeLog.isInfoEnabled()) { + StringBuffer blockList = new StringBuffer(); + for (Block blk : blocksToInvalidate) { + blockList.append(' '); + blockList.append(blk); + } + NameNode.stateChangeLog.info("BLOCK* ask " + + dn.getName() + " to delete " + blockList); + } + pendingDeletionBlocksCount -= blocksToInvalidate.size(); + return blocksToInvalidate.size(); + } finally { + writeUnlock(); + } + } + + public void setNodeReplicationLimit(int limit) { + this.maxReplicationStreams = limit; + } + + /** + * If there were any replication requests that timed out, reap them + * and put them back into the neededReplication queue + */ + void processPendingReplications() { + Block[] timedOutItems = pendingReplications.getTimedOutBlocks(); + if (timedOutItems != null) { + writeLock(); + try { + for (int i = 0; i < timedOutItems.length; i++) { + NumberReplicas num = countNodes(timedOutItems[i]); + neededReplications.add(timedOutItems[i], + num.liveReplicas(), + num.decommissionedReplicas(), + getReplication(timedOutItems[i])); + } + } finally { + writeUnlock(); + } + /* If we know the target datanodes where the replication timedout, + * we could invoke decBlocksScheduled() on it. Its ok for now. + */ + } + } + + /** + * remove a datanode descriptor + * + * @param nodeID datanode ID + */ + public void removeDatanode(DatanodeID nodeID) + throws IOException { + writeLock(); + try { + DatanodeDescriptor nodeInfo = getDatanode(nodeID); + if (nodeInfo != null) { + removeDatanode(nodeInfo); + } else { + NameNode.stateChangeLog.warn("BLOCK* NameSystem.removeDatanode: " + + nodeID.getName() + " does not exist"); + } + } finally { + writeUnlock(); + } + } + + /** + * remove a datanode descriptor + * + * @param nodeInfo datanode descriptor + */ + private void removeDatanode(DatanodeDescriptor nodeInfo) { + synchronized (heartbeats) { + if (nodeInfo.isAlive) { + updateStats(nodeInfo, false); + heartbeats.remove(nodeInfo); + nodeInfo.isAlive = false; + } + } + + for (Iterator it = nodeInfo.getBlockIterator(); it.hasNext();) { + removeStoredBlock(it.next(), nodeInfo); + } + unprotectedRemoveDatanode(nodeInfo); + clusterMap.remove(nodeInfo); + } + + void unprotectedRemoveDatanode(DatanodeDescriptor nodeDescr) { + nodeDescr.resetBlocks(); + removeFromInvalidates(nodeDescr.getStorageID()); + NameNode.stateChangeLog.debug( + "BLOCK* NameSystem.unprotectedRemoveDatanode: " + + nodeDescr.getName() + " is out of service now."); + } + + void unprotectedAddDatanode(DatanodeDescriptor nodeDescr) { + /* To keep host2DataNodeMap consistent with datanodeMap, + remove from host2DataNodeMap the datanodeDescriptor removed + from datanodeMap before adding nodeDescr to host2DataNodeMap. + */ + host2DataNodeMap.remove( + datanodeMap.put(nodeDescr.getStorageID(), nodeDescr)); + host2DataNodeMap.add(nodeDescr); + + NameNode.stateChangeLog.debug( + "BLOCK* NameSystem.unprotectedAddDatanode: " + + "node " + nodeDescr.getName() + " is added to datanodeMap."); + } + + /** + * Physically remove node from datanodeMap. + * + * @param nodeID node + */ + void wipeDatanode(DatanodeID nodeID) throws IOException { + String key = nodeID.getStorageID(); + host2DataNodeMap.remove(datanodeMap.remove(key)); + NameNode.stateChangeLog.debug( + "BLOCK* NameSystem.wipeDatanode: " + + nodeID.getName() + " storage " + key + + " is removed from datanodeMap."); + } + + FSImage getFSImage() { + return dir.fsImage; + } + + FSEditLog getEditLog() { + return getFSImage().getEditLog(); + } + + /** + * Check if there are any expired heartbeats, and if so, + * whether any blocks have to be re-replicated. + * While removing dead datanodes, make sure that only one datanode is marked + * dead at a time within the synchronized section. Otherwise, a cascading + * effect causes more datanodes to be declared dead. + */ + void heartbeatCheck() { + boolean allAlive = false; + while (!allAlive) { + boolean foundDead = false; + DatanodeID nodeID = null; + + // locate the first dead node. + synchronized (heartbeats) { + for (Iterator it = heartbeats.iterator(); + it.hasNext();) { + DatanodeDescriptor nodeInfo = it.next(); + if (isDatanodeDead(nodeInfo)) { + foundDead = true; + nodeID = nodeInfo; + break; + } + } + } + + // acquire the fsnamesystem lock, and then remove the dead node. + if (foundDead) { + writeLock(); + try { + synchronized (heartbeats) { + synchronized (datanodeMap) { + DatanodeDescriptor nodeInfo = null; + try { + nodeInfo = getDatanode(nodeID); + } catch (IOException e) { + nodeInfo = null; + } + if (nodeInfo != null && isDatanodeDead(nodeInfo)) { + NameNode.stateChangeLog.info("BLOCK* NameSystem.heartbeatCheck: " + + "lost heartbeat from " + nodeInfo.getName()); + removeDatanode(nodeInfo); + } + } + } + } finally { + writeUnlock(); + } + } + allAlive = !foundDead; + } + } + + /** + * The given node is reporting all its blocks. Use this info to + * update the (machine-->blocklist) and (block-->machinelist) tables. + */ + public void processReport(DatanodeID nodeID, + BlockListAsLongs newReport + ) throws IOException { + writeLock(); + try { + long startTime = now(); + if (NameNode.stateChangeLog.isDebugEnabled()) { + NameNode.stateChangeLog.debug("BLOCK* NameSystem.processReport: " + + "from " + nodeID.getName() + " " + + newReport.getNumberOfBlocks() + " blocks"); + } + DatanodeDescriptor node = getDatanode(nodeID); + if (node == null || !node.isAlive) { + throw new IOException("ProcessReport from dead or unregisterted node: " + + nodeID.getName()); + } + + // Check if this datanode should actually be shutdown instead. + if (shouldNodeShutdown(node)) { + setDatanodeDead(node); + throw new DisallowedDatanodeException(node); + } + + // check the case when the NN does not know of any replicas on this + // datanode. This typically happens when the NN restarts and the first + // report from this datanode is being processed. Short-circuit the + // processing in this case: just add all these replicas to this + // datanode. This helps NN restart times tremendously. + if (node.numBlocks() == 0) { + NameNode.stateChangeLog.info("BLOCK* NameSystem.processReport: " + + "from " + nodeID.getName() + " " + + newReport.getNumberOfBlocks() + " blocks" + + " shortCircuit first report."); + Block iblk = new Block(); // a fixed new'ed block to be reused with index i + for (int i = 0; i < newReport.getNumberOfBlocks(); ++i) { + iblk.set(newReport.getBlockId(i), newReport.getBlockLen(i), + newReport.getBlockGenStamp(i)); + addStoredBlock(iblk, node, null); + } + + dnReporting++; + + if(isInSafeMode()) { + LOG.info("BLOCK* NameSystem.processReport:" + dnReporting + + " data nodes reporting, " + + safeMode.blockSafe + "/" + safeMode.blockTotal + + " blocks safe (" + safeMode.getSafeBlockRatio() + ")"); + } + } else { + + // + // Modify the (block-->datanode) map, according to the difference + // between the old and new block report. + // + Collection toAdd = new LinkedList(); + Collection toRemove = new LinkedList(); + Collection toInvalidate = new LinkedList(); + node.reportDiff(blocksMap, newReport, toAdd, toRemove, toInvalidate); + + for (Block b : toRemove) { + removeStoredBlock(b, node); + } + for (Block b : toAdd) { + addStoredBlock(b, node, null); + } + for (Block b : toInvalidate) { + NameNode.stateChangeLog.info("BLOCK* NameSystem.processReport: block " + + b + " on " + node.getName() + " size " + b.getNumBytes() + + " does not belong to any file."); + addToInvalidates(b, node); + } + } + NameNode.getNameNodeMetrics().blockReport.inc((int) (now() - startTime)); + } finally { + writeUnlock(); + } + } + + /** + * Return true if the block size number is valid + */ + private boolean checkBlockSize(Block block, INodeFile inode) { + if (block.getNumBytes() < 0) { + return false; + } + BlockInfo[] blocks = inode.getBlocks(); + if (blocks.length == 0) { + return false; + } + long maxBlockSize = inode.getPreferredBlockSize(); + if (block.getBlockId() == blocks[blocks.length - 1].getBlockId()) { + // the last block must be less or equal than maxBlockSize + return block.getNumBytes() <= maxBlockSize; + } + // the rest of blocks should be exactly at maxBlockSize + return block.getNumBytes() == maxBlockSize; + } + + /** + * Modify (block-->datanode) map. Remove block from set of + * needed replications if this takes care of the problem. + * + * @return the block that is stored in blockMap. + */ + private Block addStoredBlock(Block block, + DatanodeDescriptor node, + DatanodeDescriptor delNodeHint) { + assert (hasWriteLock()); + BlockInfo storedBlock = blocksMap.getStoredBlock(block); + if (storedBlock == null || storedBlock.getINode() == null) { + // If this block does not belong to anyfile, then we are done. + if (!isInSafeMode()) { + NameNode.stateChangeLog.info("BLOCK* NameSystem.addStoredBlock: " + + "addStoredBlock request received for " + + block + " on " + node.getName() + + " size " + block.getNumBytes() + + " But it does not belong to any file."); + } + // we could add this block to invalidate set of this datanode. + // it will happen in next block report otherwise. + return block; + } + + // add block to the data-node + boolean added = node.addBlock(storedBlock); + + assert storedBlock != null : "Block must be stored by now"; + + if (block != storedBlock) { + if (!checkBlockSize(block, storedBlock.getINode())) { + try { + // New replica has an invalid block size. Mark it as corrupted. + LOG.warn("Mark new replica " + block + " from " + node.getName() + + "as corrupt because its length " + block.getNumBytes() + + " is not valid"); + markBlockAsCorrupt(block, node); + } catch (IOException e) { + LOG.warn("Error in deleting bad block " + block + e); + } + } else { + long cursize = storedBlock.getNumBytes(); + if (cursize == 0) { + storedBlock.setNumBytes(block.getNumBytes()); + } else if (cursize != block.getNumBytes()) { + LOG.warn("Inconsistent size for block " + block + + " reported from " + node.getName() + + " current size is " + cursize + + " reported size is " + block.getNumBytes()); + try { + if (cursize > block.getNumBytes()) { + // new replica is smaller in size than existing block. + // Mark the new replica as corrupt. + LOG.warn("Mark new replica " + block + " from " + node.getName() + + "as corrupt because its length is shorter than existing ones"); + markBlockAsCorrupt(block, node); + } else { + // new replica is larger in size than existing block. + // Mark pre-existing replicas as corrupt. + int numNodes = blocksMap.numNodes(block); + int count = 0; + DatanodeDescriptor nodes[] = new DatanodeDescriptor[numNodes]; + Iterator it = blocksMap.nodeIterator(block); + for (; it != null && it.hasNext();) { + DatanodeDescriptor dd = it.next(); + if (!dd.equals(node)) { + nodes[count++] = dd; + } + } + for (int j = 0; j < count; j++) { + LOG.warn("Mark existing replica " + block + " from " + node.getName() + + " as corrupt because its length is shorter than the new one"); + markBlockAsCorrupt(block, nodes[j]); + } + // + // change the size of block in blocksMap + // + storedBlock = blocksMap.getStoredBlock(block); //extra look up! + if (storedBlock == null) { + LOG.warn("Block " + block + + " reported from " + node.getName() + + " does not exist in blockMap. Surprise! Surprise!"); + } else { + storedBlock.setNumBytes(block.getNumBytes()); + } + } + } catch (IOException e) { + LOG.warn("Error in deleting bad block " + block + e); + } + } + + //Updated space consumed if required. + INodeFile file = (storedBlock != null) ? storedBlock.getINode() : null; + long diff = (file == null) ? 0 : + (file.getPreferredBlockSize() - storedBlock.getNumBytes()); + + if (diff > 0 && file.isUnderConstruction() && + cursize < storedBlock.getNumBytes()) { + try { + String path = /* For finding parents */ + leaseManager.findPath((INodeFileUnderConstruction) file); + dir.updateSpaceConsumed(path, 0, -diff * file.getReplication()); + } catch (IOException e) { + LOG.warn("Unexpected exception while updating disk space : " + + e.getMessage()); + } + } + } + block = storedBlock; + } + assert storedBlock == block : "Block must be stored by now"; + + int curReplicaDelta = 0; + + if (added) { + curReplicaDelta = 1; + // + // At startup time, because too many new blocks come in + // they take up lots of space in the log file. + // So, we log only when namenode is out of safemode. + // + if (!isInSafeMode()) { + NameNode.stateChangeLog.info("BLOCK* NameSystem.addStoredBlock: " + + "blockMap updated: " + node.getName() + " is added to " + block + " size " + block.getNumBytes()); + } + } else { + NameNode.stateChangeLog.warn("BLOCK* NameSystem.addStoredBlock: " + + "Redundant addStoredBlock request received for " + + block + " on " + node.getName() + + " size " + block.getNumBytes()); + } + + // filter out containingNodes that are marked for decommission. + NumberReplicas num = null; + int numCurrentReplica = 0; + int numLiveReplicas = 0; + + if (isInSafeMode()) { + // if we are in safemode, then use a cheaper method to count + // only live replicas, that's all we need. + numLiveReplicas = countLiveNodes(storedBlock); + numCurrentReplica = numLiveReplicas; + } else { + // count live & decommissioned replicas + num = countNodes(storedBlock); + numLiveReplicas = num.liveReplicas(); + numCurrentReplica = numLiveReplicas + pendingReplications.getNumReplicas(block); + } + + // check whether safe replication is reached for the block + incrementSafeBlockCount(numCurrentReplica); + + // + // if file is being actively written to, then do not check + // replication-factor here. It will be checked when the file is closed. + // + INodeFile fileINode = null; + fileINode = storedBlock.getINode(); + if (fileINode.isUnderConstruction()) { + return block; + } + + // do not handle mis-replicated blocks during start up + if (!isPopulatingReplQueues()) { + return block; + } + + // handle underReplication/overReplication + short fileReplication = fileINode.getReplication(); + if (numCurrentReplica >= fileReplication) { + neededReplications.remove(block, numCurrentReplica, + num.decommissionedReplicas, fileReplication); + } else { + updateNeededReplications(block, curReplicaDelta, 0); + } + if (numCurrentReplica > fileReplication) { + processOverReplicatedBlock(block, fileReplication, node, delNodeHint); + } + // If the file replication has reached desired value + // we can remove any corrupt replicas the block may have + int corruptReplicasCount = corruptReplicas.numCorruptReplicas(block); + int numCorruptNodes = num.corruptReplicas(); + if (numCorruptNodes != corruptReplicasCount) { + LOG.warn("Inconsistent number of corrupt replicas for " + + block + "blockMap has " + numCorruptNodes + + " but corrupt replicas map has " + corruptReplicasCount); + } + if ((corruptReplicasCount > 0) && (numLiveReplicas >= fileReplication)) { + invalidateCorruptReplicas(block); + } + return block; + } + + /** + * Invalidate corrupt replicas. + *

+ * This will remove the replicas from the block's location list, + * add them to {@link #recentInvalidateSets} so that they could be further + * deleted from the respective data-nodes, + * and remove the block from corruptReplicasMap. + *

+ * This method should be called when the block has sufficient + * number of live replicas. + * + * @param blk Block whose corrupt replicas need to be invalidated + */ + void invalidateCorruptReplicas(Block blk) { + Collection nodes = corruptReplicas.getNodes(blk); + boolean gotException = false; + if (nodes == null) { + return; + } + for (Iterator it = nodes.iterator(); it.hasNext();) { + DatanodeDescriptor node = it.next(); + try { + invalidateBlock(blk, node); + } catch (IOException e) { + NameNode.stateChangeLog.info("NameNode.invalidateCorruptReplicas " + + "error in deleting bad block " + blk + + " on " + node + e); + gotException = true; + } + } + // Remove the block from corruptReplicasMap + if (!gotException) { + corruptReplicas.removeFromCorruptReplicasMap(blk); + } + } + + /** + * For each block in the name-node verify whether it belongs to any file, + * over or under replicated. Place it into the respective queue. + */ + private void processMisReplicatedBlocks() { + writeLock(); + try { + long nrInvalid = 0, nrOverReplicated = 0, nrUnderReplicated = 0; + neededReplications.clear(); + for (BlocksMap.BlockInfo block : blocksMap.getBlocks()) { + INodeFile fileINode = block.getINode(); + if (fileINode == null) { + // block does not belong to any file + nrInvalid++; + addToInvalidates(block); + continue; + } + // calculate current replication + short expectedReplication = fileINode.getReplication(); + NumberReplicas num = countNodes(block); + int numCurrentReplica = num.liveReplicas(); + // add to under-replicated queue if need to be + if (neededReplications.add(block, + numCurrentReplica, + num.decommissionedReplicas(), + expectedReplication)) { + nrUnderReplicated++; + } + + if (numCurrentReplica > expectedReplication) { + // over-replicated block + nrOverReplicated++; + overReplicatedBlocks.add(block); + } + } + LOG.info("Total number of blocks = " + blocksMap.size()); + LOG.info("Number of invalid blocks = " + nrInvalid); + LOG.info("Number of under-replicated blocks = " + nrUnderReplicated); + LOG.info("Number of over-replicated blocks = " + nrOverReplicated); + } finally { + writeUnlock(); + } + } + + /** + * This is called from the ReplicationMonitor to process over + * replicated blocks. + */ + private void processOverReplicatedBlocksAsync() { + + while (true) { + Block block; + writeLock(); + try { + block = overReplicatedBlocks.pollFirst(); + if (block == null) { // no entries + return; + } + } finally { + writeUnlock(); + } + // + // process one entry at a time + NameNode.stateChangeLog.debug("BLOCK* NameSystem.processOverReplicatedBlocksAsync: " + + block); + processOverReplicatedBlock(block, (short) -1, null, null); + } + } + + /** + * Find how many of the containing nodes are "extra", if any. + * If there are any extras, call findOverReplicatedReplicas() to + * insert them into excessReplicateTmp. + */ + private void processOverReplicatedBlock(Block block, short replication, + DatanodeDescriptor addedNode, DatanodeDescriptor delNodeHint) { + + List excessReplicateTmp = new ArrayList(); + List originalDatanodes = new ArrayList(); + + // find all replicas that can possibly be deleted. + // The results are returned in excessReplicateTmp. + findOverReplicatedReplicas(block, replication, + addedNode, delNodeHint, + excessReplicateTmp, originalDatanodes); + if (excessReplicateTmp.size() <= 0) { + return; + } + + writeLock(); // acquire write lock, + try { + + INodeFile inode = blocksMap.getINode(block); + if (inode == null) { + return; // file has been deleted already, nothing to do. + } + + // + // if the state of replicas of this block has changed since the time + // when we released and reacquired the lock, then all the decisions + // that we have made so far might not be correct. Do not delete excess + // replicas in this case. + + int live = 0; + Collection nodesCorrupt = corruptReplicas.getNodes(block); + for (Iterator it = blocksMap.nodeIterator(block); + it.hasNext();) { + DatanodeDescriptor node = it.next(); + if (((nodesCorrupt != null) && (nodesCorrupt.contains(node))) || + node.isDecommissionInProgress() || node.isDecommissioned()) { + // do nothing + } else { + live++; // number of live nodes + originalDatanodes.remove(node); + } + } + + if (originalDatanodes.size() > 0) { + NameNode.stateChangeLog.info("Unable to delete excess replicas for block " + + block + + " because the state of the original replicas have changed." + + " Will retry later."); + overReplicatedBlocks.add(block); + return; + } + + // loop through datanodes that have excess-replicas of this block + for (ListIterator iter = excessReplicateTmp.listIterator(); + iter.hasNext();) { + DatanodeID datanodeId = iter.next(); + + // re-check that block still has excess replicas. + // If not, then there is nothing more to do. + if (live <= inode.getReplication()) { + break; + } + + // find the DatanodeDescriptor for this datanode + DatanodeDescriptor datanode = null; + try { + datanode = getDatanode(datanodeId); + } catch (IOException e) { + } + if (datanode == null) { + NameNode.stateChangeLog.info("No datanode found while processing " + + "overreplicated block " + block); + continue; // dead datanode? + } + + // insert into excessReplicateMap + Collection excessBlocks = excessReplicateMap.get(datanodeId.getStorageID()); + if (excessBlocks == null) { + excessBlocks = new TreeSet(); + excessReplicateMap.put(datanodeId.getStorageID(), excessBlocks); + } + + if (excessBlocks.add(block)) { + excessBlocksCount++; + NameNode.stateChangeLog.info("BLOCK* NameSystem.chooseExcessReplicates: " + + "(" + datanodeId.getName() + ", " + block + + ") is added to excessReplicateMap"); + } + // + // The 'excessblocks' tracks blocks until we get confirmation + // that the datanode has deleted them; the only way we remove them + // is when we get a "removeBlock" message. + // + // The 'invalidate' list is used to inform the datanode the block + // should be deleted. Items are removed from the invalidate list + // upon giving instructions to the namenode. + // + addToInvalidatesNoLog(block, datanode); + live--; + NameNode.stateChangeLog.info("BLOCK* NameSystem.chooseExcessReplicates: " + + "(" + datanode.getName() + ", " + block + ") is added to recentInvalidateSets"); + } + } finally { + writeUnlock(); + } + } + + /** + * Find how many of the containing nodes are "extra", if any. + * If there are any extras, call chooseExcessReplicates() to + * mark them in the excessReplicateMap. + * + * @param excessReplicateMapTmp replicas that can possibly be in excess + * @param originalDatanodes all currently valid replicas of this block + */ + private void findOverReplicatedReplicas(Block block, short replication, + DatanodeDescriptor addedNode, DatanodeDescriptor delNodeHint, + List excessReplicateMapTmp, + List originalDatanodes) { + + Collection nonExcess; + INodeFile inode; + + readLock(); + try { + inode = blocksMap.getINode(block); + if (inode == null) { + return; // file has been deleted already, nothing to do. + } + + // if the caller did not specify what the target replication factor + // of the file, then fetch it from the inode. This happens when invoked + // by the ReplicationMonitor thread. + if (replication < 0) { + replication = inode.getReplication(); + } + + if (addedNode == delNodeHint) { + delNodeHint = null; + } + nonExcess = new ArrayList(); + Collection corruptNodes = corruptReplicas.getNodes(block); + for (Iterator it = blocksMap.nodeIterator(block); + it.hasNext();) { + DatanodeDescriptor cur = it.next(); + Collection excessBlocks = excessReplicateMap.get(cur.getStorageID()); + if (excessBlocks == null || !excessBlocks.contains(block)) { + if (!cur.isDecommissionInProgress() && !cur.isDecommissioned()) { + // exclude corrupt replicas + if (corruptNodes == null || !corruptNodes.contains(cur)) { + nonExcess.add(cur); + originalDatanodes.add(cur); + } + } + } + } + } finally { + readUnlock(); + } + + // this can be called without the FSnamesystem lock because it does not + // use any global data structures. Also, the inode is passed as it is to + // the Pluggable blockplacement policy. + chooseExcessReplicates(nonExcess, block, replication, + addedNode, delNodeHint, inode, excessReplicateMapTmp); + } + + /** + * We want "replication" replicates for the block, but we now have too many. + * In this method, copy enough nodes from 'srcNodes' into 'dstNodes' such that: + *

+ * srcNodes.size() - dstNodes.size() == replication + *

+ * We pick node that make sure that replicas are spread across racks and + * also try hard to pick one with least free space. + * The algorithm is first to pick a node with least free space from nodes + * that are on a rack holding more than one replicas of the block. + * So removing such a replica won't remove a rack. + * If no such a node is available, + * then pick a node with least free space + */ + void chooseExcessReplicates(Collection nonExcess, + Block b, short replication, + DatanodeDescriptor addedNode, + DatanodeDescriptor delNodeHint, + INodeFile inode, + List excessReplicateMapTmp) { + // first form a rack to datanodes map and + HashMap> rackMap = + new HashMap>(); + for (Iterator iter = nonExcess.iterator(); + iter.hasNext();) { + DatanodeDescriptor node = iter.next(); + String rackName = node.getNetworkLocation(); + ArrayList datanodeList = rackMap.get(rackName); + if (datanodeList == null) { + datanodeList = new ArrayList(); + } + datanodeList.add(node); + rackMap.put(rackName, datanodeList); + } + + // split nodes into two sets + // priSet contains nodes on rack with more than one replica + // remains contains the remaining nodes + ArrayList priSet = new ArrayList(); + ArrayList remains = new ArrayList(); + for (Iterator>> iter = + rackMap.entrySet().iterator(); iter.hasNext();) { + Entry> rackEntry = iter.next(); + ArrayList datanodeList = rackEntry.getValue(); + if (datanodeList.size() == 1) { + remains.add(datanodeList.get(0)); + } else { + priSet.addAll(datanodeList); + } + } + + // pick one node to delete that favors the delete hint + // otherwise pick one with least space from priSet if it is not empty + // otherwise one node with least space from remains + boolean firstOne = true; + while (nonExcess.size() - replication > 0) { + DatanodeInfo cur = null; + long minSpace = Long.MAX_VALUE; + + // check if we can del delNodeHint + if (firstOne && delNodeHint != null && nonExcess.contains(delNodeHint) && + (priSet.contains(delNodeHint) || (addedNode != null && !priSet.contains(addedNode)))) { + cur = delNodeHint; + } else { // regular excessive replica removal + cur = replicator.chooseReplicaToDelete(inode, b, replication, priSet, remains); + } + + firstOne = false; + // adjust rackmap, priSet, and remains + String rack = cur.getNetworkLocation(); + ArrayList datanodes = rackMap.get(rack); + datanodes.remove(cur); + if (datanodes.isEmpty()) { + rackMap.remove(rack); + } + if (priSet.remove(cur)) { + if (datanodes.size() == 1) { + priSet.remove(datanodes.get(0)); + remains.add(datanodes.get(0)); + } + } else { + remains.remove(cur); + } + nonExcess.remove(cur); + + excessReplicateMapTmp.add(cur); + NameNode.stateChangeLog.info("BLOCK* NameSystem.chooseExcessReplicates: " + + "(" + cur.getName() + ", " + b + + ") is added to excessReplicateMapTmp"); + } + } + + /** + * Modify (block-->datanode) map. Possibly generate + * replication tasks, if the removed block is still valid. + */ + private void removeStoredBlock(Block block, DatanodeDescriptor node) { + assert (hasWriteLock()); + NameNode.stateChangeLog.debug("BLOCK* NameSystem.removeStoredBlock: " + + block + " from " + node.getName()); + if (!blocksMap.removeNode(block, node)) { + NameNode.stateChangeLog.debug("BLOCK* NameSystem.removeStoredBlock: " + + block + " has already been removed from node " + node); + return; + } + + // + // It's possible that the block was removed because of a datanode + // failure. If the block is still valid, check if replication is + // necessary. In that case, put block on a possibly-will- + // be-replicated list. + // + INode fileINode = blocksMap.getINode(block); + if (fileINode != null) { + decrementSafeBlockCount(block); + updateNeededReplications(block, -1, 0); + } + + // + // We've removed a block from a node, so it's definitely no longer + // in "excess" there. + // + Collection excessBlocks = excessReplicateMap.get(node.getStorageID()); + if (excessBlocks != null) { + if (excessBlocks.remove(block)) { + excessBlocksCount--; + NameNode.stateChangeLog.debug("BLOCK* NameSystem.removeStoredBlock: " + + block + " is removed from excessBlocks"); + if (excessBlocks.size() == 0) { + excessReplicateMap.remove(node.getStorageID()); + } + } + } + + // Remove the replica from corruptReplicas + corruptReplicas.removeFromCorruptReplicasMap(block, node); + } + + /** + * The given node is reporting that it received a certain block. + */ + public void blockReceived(DatanodeID nodeID, + Block block, + String delHint + ) throws IOException { + writeLock(); + try { + DatanodeDescriptor node = getDatanode(nodeID); + if (node == null || !node.isAlive) { + NameNode.stateChangeLog.warn("BLOCK* NameSystem.blockReceived: " + block + + " is received from dead or unregistered node " + nodeID.getName()); + throw new IOException( + "Got blockReceived message from unregistered or dead node " + block); + } + + if (NameNode.stateChangeLog.isDebugEnabled()) { + NameNode.stateChangeLog.debug("BLOCK* NameSystem.blockReceived: " + + block + " is received from " + nodeID.getName()); + } + + // Check if this datanode should actually be shutdown instead. + if (shouldNodeShutdown(node)) { + setDatanodeDead(node); + throw new DisallowedDatanodeException(node); + } + + // decrement number of blocks scheduled to this datanode. + node.decBlocksScheduled(); + + // get the deletion hint node + DatanodeDescriptor delHintNode = null; + if (delHint != null && delHint.length() != 0) { + delHintNode = datanodeMap.get(delHint); + if (delHintNode == null) { + NameNode.stateChangeLog.warn("BLOCK* NameSystem.blockReceived: " + + block + + " is expected to be removed from an unrecorded node " + + delHint); + } + } + + // + // Modify the blocks->datanode map and node's map. + // + pendingReplications.remove(block); + addStoredBlock(block, node, delHintNode); + } finally { + writeUnlock(); + } + } + + public long getMissingBlocksCount() { + // not locking + return Math.max(missingBlocksInPrevIter, missingBlocksInCurIter); + } + + long[] getStats() throws IOException { + checkSuperuserPrivilege(); + synchronized (heartbeats) { + return new long[]{this.capacityTotal, this.capacityUsed, + this.capacityRemaining, + this.underReplicatedBlocksCount, + this.corruptReplicaBlocksCount, + getMissingBlocksCount()}; + } + } + + /** + * Total raw bytes including non-dfs used space. + */ + public long getCapacityTotal() { + synchronized (heartbeats) { + return this.capacityTotal; + } + } + + /** + * Total used space by data nodes + */ + public long getCapacityUsed() { + synchronized (heartbeats) { + return this.capacityUsed; + } + } + + /** + * Total used space by data nodes as percentage of total capacity + */ + public float getCapacityUsedPercent() { + synchronized (heartbeats) { + if (capacityTotal <= 0) { + return 100; + } + + return ((float) capacityUsed * 100.0f) / (float) capacityTotal; + } + } + + /** + * Total used space by data nodes for non DFS purposes such + * as storing temporary files on the local file system + */ + public long getCapacityUsedNonDFS() { + long nonDFSUsed = 0; + synchronized (heartbeats) { + nonDFSUsed = capacityTotal - capacityRemaining - capacityUsed; + } + return nonDFSUsed < 0 ? 0 : nonDFSUsed; + } + + /** + * Total non-used raw bytes. + */ + public long getCapacityRemaining() { + synchronized (heartbeats) { + return this.capacityRemaining; + } + } + + /** + * Total remaining space by data nodes as percentage of total capacity + */ + public float getCapacityRemainingPercent() { + synchronized (heartbeats) { + if (capacityTotal <= 0) { + return 0; + } + + return ((float) capacityRemaining * 100.0f) / (float) capacityTotal; + } + } + + /** + * Total number of connections. + */ + public int getTotalLoad() { + synchronized (heartbeats) { + return this.totalLoad; + } + } + + int getNumberOfDatanodes(DatanodeReportType type) { + return getDatanodeListForReport(type).size(); + } + + private ArrayList getDatanodeListForReport( + DatanodeReportType type) { + readLock(); + try { + + boolean listLiveNodes = type == DatanodeReportType.ALL || + type == DatanodeReportType.LIVE; + boolean listDeadNodes = type == DatanodeReportType.ALL || + type == DatanodeReportType.DEAD; + + HashSet mustList = new HashSet(); + + if (listDeadNodes) { + //first load all the nodes listed in include and exclude files. + for (Iterator it = hostsReader.getHosts().iterator(); + it.hasNext();) { + mustList.add(it.next()); + } + for (Iterator it = hostsReader.getExcludedHosts().iterator(); + it.hasNext();) { + mustList.add(it.next()); + } + } + + ArrayList nodes = null; + + synchronized (datanodeMap) { + nodes = new ArrayList(datanodeMap.size() + + mustList.size()); + + for (Iterator it = datanodeMap.values().iterator(); + it.hasNext();) { + DatanodeDescriptor dn = it.next(); + boolean isDead = isDatanodeDead(dn); + if ((isDead && listDeadNodes) || (!isDead && listLiveNodes)) { + nodes.add(dn); + } + //Remove any form of the this datanode in include/exclude lists. + mustList.remove(dn.getName()); + mustList.remove(dn.getHost()); + mustList.remove(dn.getHostName()); + } + } + + if (listDeadNodes) { + for (Iterator it = mustList.iterator(); it.hasNext();) { + DatanodeDescriptor dn = + new DatanodeDescriptor(new DatanodeID(it.next())); + dn.setLastUpdate(0); + nodes.add(dn); + } + } + + return nodes; + } finally { + readUnlock(); + } + } + + public DatanodeInfo[] datanodeReport(DatanodeReportType type + ) throws AccessControlException { + readLock(); + try { + checkSuperuserPrivilege(); + + ArrayList results = getDatanodeListForReport(type); + DatanodeInfo[] arr = new DatanodeInfo[results.size()]; + for (int i = 0; i < arr.length; i++) { + arr[i] = new DatanodeInfo(results.get(i)); + } + return arr; + } finally { + readUnlock(); + } + } + + /** + * Save namespace image. + * This will save current namespace into fsimage file and empty edits file. + * Requires superuser privilege and safe mode. + * + * @throws AccessControlException if superuser privilege is violated. + * @throws IOException if + */ + void saveNamespace() throws AccessControlException, IOException { + writeLock(); + try { + checkSuperuserPrivilege(); + if (!isInSafeMode()) { + throw new IOException("Safe mode should be turned ON " + + "in order to create namespace image."); + } + getFSImage().saveFSImage(); + LOG.info("New namespace image has been created."); + } finally { + writeUnlock(); + } + } + + /** + */ + public void DFSNodesStatus(ArrayList live, + ArrayList dead, + ArrayList excluded) { + readLock(); + try { + Set excludedHosts = hostsReader.getExcludedHosts(); + ArrayList results = + getDatanodeListForReport(DatanodeReportType.ALL); + for (Iterator it = results.iterator(); it.hasNext();) { + DatanodeDescriptor node = it.next(); + if (isDatanodeDead(node)) { + String hostName = node.getHostName(); + int colon = hostName.indexOf(":"); + String name = ""; + if (colon < 0) { + name = hostName; + } else { + name = hostName.substring(0, colon); + } + if (excludedHosts.contains(name)) { + excluded.add(node); + } else { + dead.add(node); + } + } else { + live.add(node); + } + } + } finally { + readUnlock(); + } + } + + /** + * Prints information about all datanodes. + */ + private void datanodeDump(PrintWriter out) { + readLock(); + try { + synchronized (datanodeMap) { + out.println("Metasave: Number of datanodes: " + datanodeMap.size()); + for (Iterator it = datanodeMap.values().iterator(); it.hasNext();) { + DatanodeDescriptor node = it.next(); + out.println(node.dumpDatanode()); + } + } + } finally { + readUnlock(); + } + } + + /** + * Start decommissioning the specified datanode. + */ + private void startDecommission(DatanodeDescriptor node) + throws IOException { + if (!node.isDecommissioned()) { + LOG.info("Start Decommissioning node " + node.getName()); + node.startDecommission(); + // put the node into the decommission manager's queue + if (((Monitor) dnthread.getRunnable()).startDecommision(node)) { + node.decommissioningStatus.setStartTime(now()); + } + } + } + + /** + * Stop decommissioning the specified datanodes. + */ + private void stopDecommission(DatanodeDescriptor node) + throws IOException { + if ((node.isDecommissionInProgress() && + ((Monitor) dnthread.getRunnable()).stopDecommission(node)) || + node.isDecommissioned()) { + LOG.info("Stop Decommissioning node " + node.getName()); + node.stopDecommission(); + } + } + + /** + */ + public DatanodeInfo getDataNodeInfo(String name) { + return datanodeMap.get(name); + } + + /** + * returns the namenode object that was used to initialize this namesystem + */ + public NameNode getNameNode() { + return this.nameNode; + } + + /** + * @deprecated use {@link NameNode#getNameNodeAddress()} instead. + */ + @Deprecated + public InetSocketAddress getDFSNameNodeAddress() { + return nameNodeAddress; + } + + /** + */ + public Date getStartTime() { + return new Date(systemStart); + } + + short getMaxReplication() { + return (short) maxReplication; + } + + short getMinReplication() { + return (short) minReplication; + } + + short getDefaultReplication() { + return (short) defaultReplication; + } + + /** + * A immutable object that stores the number of live replicas and + * the number of decommissined Replicas. + */ + static class NumberReplicas { + private int liveReplicas; + private int decommissionedReplicas; + private int corruptReplicas; + private int excessReplicas; + + NumberReplicas() { + initialize(0, 0, 0, 0); + } + + NumberReplicas(int live, int decommissioned, int corrupt, int excess) { + initialize(live, decommissioned, corrupt, excess); + } + + void initialize(int live, int decommissioned, int corrupt, int excess) { + liveReplicas = live; + decommissionedReplicas = decommissioned; + corruptReplicas = corrupt; + excessReplicas = excess; + } + + int liveReplicas() { + return liveReplicas; + } + + int decommissionedReplicas() { + return decommissionedReplicas; + } + + int corruptReplicas() { + return corruptReplicas; + } + + int excessReplicas() { + return excessReplicas; + } + } + + /** + * Counts the number of nodes in the given list into active and + * decommissioned counters. + */ + private NumberReplicas countNodes(Block b, + Iterator nodeIter) { + int count = 0; + int live = 0; + int corrupt = 0; + int excess = 0; + Collection nodesCorrupt = corruptReplicas.getNodes(b); + while (nodeIter.hasNext()) { + DatanodeDescriptor node = nodeIter.next(); + if ((nodesCorrupt != null) && (nodesCorrupt.contains(node))) { + corrupt++; + } else if (node.isDecommissionInProgress() || node.isDecommissioned()) { + count++; + } else { + Collection blocksExcess = + excessReplicateMap.get(node.getStorageID()); + if (blocksExcess != null && blocksExcess.contains(b)) { + excess++; + } else { + live++; + } + } + } + return new NumberReplicas(live, count, corrupt, excess); + } + + /** + * Counts the number of live nodes in the given list + */ + private int countLiveNodes(Block b, Iterator nodeIter) { + int live = 0; + Collection nodesCorrupt = null; + if (corruptReplicas.size() != 0) { + nodesCorrupt = corruptReplicas.getNodes(b); + } + while (nodeIter.hasNext()) { + DatanodeDescriptor node = nodeIter.next(); + if (((nodesCorrupt != null) && (nodesCorrupt.contains(node))) || + node.isDecommissionInProgress() || node.isDecommissioned()) { + // do nothing + } else { + live++; + } + } + return live; + } + + /** + * Return the number of nodes that are live + */ + int countLiveNodes(Block b) { + return countLiveNodes(b, blocksMap.nodeIterator(b)); + } + + /** + * Return the number of nodes that are live and decommissioned. + */ + NumberReplicas countNodes(Block b) { + return countNodes(b, blocksMap.nodeIterator(b)); + } + + private void logBlockReplicationInfo(Block block, DatanodeDescriptor srcNode, + NumberReplicas num) { + int curReplicas = num.liveReplicas(); + int curExpectedReplicas = getReplication(block); + INode fileINode = blocksMap.getINode(block); + Iterator nodeIter = blocksMap.nodeIterator(block); + StringBuffer nodeList = new StringBuffer(); + while (nodeIter.hasNext()) { + DatanodeDescriptor node = nodeIter.next(); + nodeList.append(node.name); + nodeList.append(" "); + } + FSNamesystem.LOG.info("Block: " + block + ", Expected Replicas: " + + curExpectedReplicas + ", live replicas: " + curReplicas + + ", corrupt replicas: " + num.corruptReplicas() + + ", decommissioned replicas: " + num.decommissionedReplicas() + + ", excess replicas: " + num.excessReplicas() + ", Is Open File: " + + fileINode.isUnderConstruction() + ", Datanodes having this block: " + + nodeList + ", Current Datanode: " + srcNode.name + + ", Is current datanode decommissioning: " + + srcNode.isDecommissionInProgress()); + } + + /** + * Check if a block on srcNode has reached its replication factor or not + * + * @param srcNode a datanode + * @param block a block + */ + void isReplicationInProgress(DatanodeDescriptor srcNode, Block block) { + final DecommissioningStatus status = srcNode.decommissioningStatus; + + INode fileINode = blocksMap.getINode(block); + if (fileINode == null) { + return; + } + NumberReplicas num = countNodes(block); + int curReplicas = num.liveReplicas(); + int curExpectedReplicas = getReplication(block); + if (curExpectedReplicas > curReplicas) { + //Log info about one block for this node which needs replication + if (status.underReplicatedBlocks == 0) { + logBlockReplicationInfo(block, srcNode, num); + } + status.underReplicatedBlocks++; + if ((curReplicas == 0) && (num.decommissionedReplicas() > 0)) { + status.decommissionOnlyReplicas++; + } + if (fileINode.isUnderConstruction()) { + status.underReplicatedInOpenFiles++; + } + if (!neededReplications.contains(block) && + pendingReplications.getNumReplicas(block) == 0) { + // + // These blocks have been reported from the datanode + // after the startDecommission method has been executed. These + // blocks were in flight when the decommissioning was started. + // + neededReplications.add(block, + curReplicas, + num.decommissionedReplicas(), + curExpectedReplicas); + } + } + } + + /** + * Keeps track of which datanodes/ipaddress are allowed to connect to the namenode. + */ + private boolean inHostsList(DatanodeID node, String ipAddr) { + Set hostsList = hostsReader.getHosts(); + return (hostsList.isEmpty() || + (ipAddr != null && hostsList.contains(ipAddr)) || + hostsList.contains(node.getHost()) || + hostsList.contains(node.getName()) || + ((node instanceof DatanodeInfo) && + hostsList.contains(((DatanodeInfo) node).getHostName()))); + } + + private boolean inExcludedHostsList(DatanodeID node, String ipAddr) { + Set excludeList = hostsReader.getExcludedHosts(); + return ((ipAddr != null && excludeList.contains(ipAddr)) || + excludeList.contains(node.getHost()) || + excludeList.contains(node.getName()) || + ((node instanceof DatanodeInfo) && + excludeList.contains(((DatanodeInfo) node).getHostName()))); + } + + /** + * Rereads the config to get hosts and exclude list file names. + * Rereads the files to update the hosts and exclude lists. It + * checks if any of the hosts have changed states: + * 1. Added to hosts --> no further work needed here. + * 2. Removed from hosts --> mark AdminState as decommissioned. + * 3. Added to exclude --> start decommission. + * 4. Removed from exclude --> stop decommission. + */ + public void refreshNodes(Configuration conf) throws IOException { + checkSuperuserPrivilege(); + // Reread the config to get dfs.hosts and dfs.hosts.exclude filenames. + // Update the file names and refresh internal includes and excludes list + if (conf == null) { + conf = new Configuration(); + } + hostsReader.updateFileNames(conf.get("dfs.hosts", ""), + conf.get("dfs.hosts.exclude", "")); + hostsReader.refresh(); + writeLock(); + try { + for (Iterator it = datanodeMap.values().iterator(); + it.hasNext();) { + DatanodeDescriptor node = it.next(); + // Check if not include. + if (!inHostsList(node, null)) { + node.setDecommissioned(); // case 2. + } else { + if (inExcludedHostsList(node, null)) { + startDecommission(node); // case 3. + } else { + stopDecommission(node); // case 4. + } + } + } + } finally { + writeUnlock(); + } + + } + + void finalizeUpgrade() throws IOException { + checkSuperuserPrivilege(); + getFSImage().finalizeUpgrade(); + } + + /** + * Checks if the node is not on the hosts list. If it is not, then + * it will be ignored. If the node is in the hosts list, but is also + * on the exclude list, then it will be decommissioned. + * Returns FALSE if node is rejected for registration. + * Returns TRUE if node is registered (including when it is on the + * exclude list and is being decommissioned). + */ + private boolean verifyNodeRegistration(DatanodeRegistration nodeReg, String ipAddr) + throws IOException { + assert (hasWriteLock()); + if (!inHostsList(nodeReg, ipAddr)) { + return false; + } + if (inExcludedHostsList(nodeReg, ipAddr)) { + DatanodeDescriptor node = getDatanode(nodeReg); + if (node == null) { + throw new IOException("verifyNodeRegistration: unknown datanode " + + nodeReg.getName()); + } + startDecommission(node); + } + return true; + } + + /** + * Checks if the Admin state bit is DECOMMISSIONED. If so, then + * we should shut it down. + *

+ * Returns true if the node should be shutdown. + */ + private boolean shouldNodeShutdown(DatanodeDescriptor node) { + return (node.isDecommissioned()); + } + + /** + * Get data node by storage ID. + * + * @param nodeID + * @return DatanodeDescriptor or null if the node is not found. + * @throws IOException + */ + public DatanodeDescriptor getDatanode(DatanodeID nodeID) throws IOException { + UnregisteredDatanodeException e = null; + DatanodeDescriptor node = datanodeMap.get(nodeID.getStorageID()); + if (node == null) { + return null; + } + if (!node.getName().equals(nodeID.getName())) { + e = new UnregisteredDatanodeException(nodeID, node); + NameNode.stateChangeLog.fatal("BLOCK* NameSystem.getDatanode: " + + e.getLocalizedMessage()); + throw e; + } + return node; + } + + /** + * Stop at and return the datanode at index (used for content browsing) + */ + @Deprecated + private DatanodeDescriptor getDatanodeByIndex(int index) { + int i = 0; + for (DatanodeDescriptor node : datanodeMap.values()) { + if (i == index) { + return node; + } + i++; + } + return null; + } + + @Deprecated + public String randomDataNode() { + int size = datanodeMap.size(); + int index = 0; + if (size != 0) { + index = r.nextInt(size); + for (int i = 0; i < size; i++) { + DatanodeDescriptor d = getDatanodeByIndex(index); + if (d != null && !d.isDecommissioned() && !isDatanodeDead(d) && + !d.isDecommissionInProgress()) { + return d.getHost() + ":" + d.getInfoPort(); + } + index = (index + 1) % size; + } + } + return null; + } + + public DatanodeDescriptor getRandomDatanode() { + return replicator.chooseTarget("", 1, null, + new ArrayList(), 0)[0]; + } + + /** + * SafeModeInfo contains information related to the safe mode. + *

+ * An instance of {@link SafeModeInfo} is created when the name node + * enters safe mode. + *

+ * During name node startup {@link SafeModeInfo} counts the number of + * safe blocks, those that have at least the minimal number of + * replicas, and calculates the ratio of safe blocks to the total number + * of blocks in the system, which is the size of + * {@link FSNamesystem#blocksMap}. When the ratio reaches the + * {@link #threshold} it starts the {@link SafeModeMonitor} daemon in order + * to monitor whether the safe mode {@link #extension} is passed. + * Then it leaves safe mode and destroys itself. + *

+ * If safe mode is turned on manually then the number of safe blocks is + * not tracked because the name node is not intended to leave safe mode + * automatically in the case. + * + * @see ClientProtocol#setSafeMode(FSConstants.SafeModeAction) + * @see SafeModeMonitor + */ + class SafeModeInfo { + // configuration fields + /** + * Safe mode threshold condition %. + */ + private double threshold; + /** + * Safe mode extension after the threshold. + */ + private long extension; + /** + * Min replication required by safe mode. + */ + private int safeReplication; + /** threshold for populating needed replication queues */ + private double replQueueThreshold; + + // internal fields + /** + * Time when threshold was reached. + *

+ *
-1 safe mode is off + *
0 safe mode is on, but threshold is not reached yet + */ + private long reached = -1; + /** + * Total number of blocks. + */ + int blockTotal; + /** + * Number of safe blocks. + */ + private int blockSafe; + /** Number of blocks needed before populating replication queues */ + private int blockReplQueueThreshold; + /** + * time of the last status printout + */ + private long lastStatusReport = 0; + /** flag indicating whether replication queues have been initialized */ + private boolean initializedReplQueues = false; + + /** + * Creates SafeModeInfo when the name node enters + * automatic safe mode at startup. + * + * @param conf configuration + */ + SafeModeInfo(Configuration conf) { + this.threshold = conf.getFloat("dfs.safemode.threshold.pct", 0.95f); + this.extension = conf.getLong("dfs.safemode.extension", 0); + // default to safe mode threshold + // (i.e., don't populate queues before leaving safe mode) + this.replQueueThreshold = + conf.getFloat("dfs.namenode.replqueue.threshold-pct", + (float) threshold); + this.safeReplication = conf.getInt("dfs.replication.min", 1); + this.blockTotal = 0; + this.blockSafe = 0; + } + + /** + * Creates SafeModeInfo when safe mode is entered manually. + *

+ * The {@link #threshold} is set to 1.5 so that it could never be reached. + * {@link #blockTotal} is set to -1 to indicate that safe mode is manual. + * + * @see SafeModeInfo + */ + private SafeModeInfo() { + this.threshold = 1.5f; // this threshold can never be reached + this.extension = Long.MAX_VALUE; + this.safeReplication = Short.MAX_VALUE + 1; // more than maxReplication + this.replQueueThreshold = 1.5f; // can never be reached + this.blockTotal = -1; + this.blockSafe = -1; + this.reached = -1; + enter(); + reportStatus("STATE* Safe mode is ON.", true); + } + + /** + * Check if safe mode is on. + * + * @return true if in safe mode + */ + synchronized boolean isOn() { + try { + assert isConsistent() : " SafeMode: Inconsistent filesystem state: " + + "Total num of blocks, active blocks, or " + + "total safe blocks don't match."; + } catch (IOException e) { + System.err.print(StringUtils.stringifyException(e)); + } + return this.reached >= 0; + } + + /** + * Check if we are populating replication queues. + */ + synchronized boolean isPopulatingReplQueues() { + return initializedReplQueues; + } + + /** + * Enter safe mode. + */ + void enter() { + this.reached = 0; + } + + /** + * Leave safe mode. + *

+ * Switch to manual safe mode if distributed upgrade is required.
+ * Check for invalid, under- & over-replicated blocks in the end of startup. + */ + synchronized void leave(boolean checkForUpgrades) { + if (checkForUpgrades) { + // verify whether a distributed upgrade needs to be started + boolean needUpgrade = false; + try { + needUpgrade = startDistributedUpgradeIfNeeded(); + } catch (IOException e) { + FSNamesystem.LOG.error(StringUtils.stringifyException(e)); + } + if (needUpgrade) { + // switch to manual safe mode + safeMode = new SafeModeInfo(); + return; + } + } + // if not done yet, initialize replication queues + if (!isPopulatingReplQueues()) { + initializeReplQueues(); + } + long timeInSafemode = now() - systemStart; + NameNode.stateChangeLog.info("STATE* Leaving safe mode after " + + timeInSafemode / 1000 + " secs."); + NameNode.getNameNodeMetrics().safeModeTime.set((int) timeInSafemode); + + if (reached >= 0) { + NameNode.stateChangeLog.info("STATE* Safe mode is OFF."); + } + reached = -1; + safeMode = null; + try { + nameNode.startServerForClientRequests(); + } catch (IOException ex) { + nameNode.stop(); + } + NameNode.stateChangeLog.info("STATE* Network topology has " + + clusterMap.getNumOfRacks() + " racks and " + + clusterMap.getNumOfLeaves() + " datanodes"); + NameNode.stateChangeLog.info("STATE* UnderReplicatedBlocks has " + + neededReplications.size() + " blocks"); + } + + /** + * Initialize replication queues. + */ + synchronized void initializeReplQueues() { + LOG.info("initializing replication queues"); + if (isPopulatingReplQueues()) { + LOG.warn("Replication queues already initialized."); + } + processMisReplicatedBlocks(); + initializedReplQueues = true; + } + + /** + * Check whether we have reached the threshold for + * initializing replication queues. + */ + synchronized boolean canInitializeReplQueues() { + return blockSafe >= blockReplQueueThreshold; + } + + /** + * Safe mode can be turned off iff + * the threshold is reached and + * the extension time have passed. + * + * @return true if can leave or false otherwise. + */ + synchronized boolean canLeave() { + if (reached == 0) { + return false; + } + if (now() - reached < extension) { + reportStatus("STATE* Safe mode ON.", false); + return false; + } + return !needEnter(); + } + + /** + * There is no need to enter safe mode + * if DFS is empty or {@link #threshold} == 0 + */ + boolean needEnter() { + return getSafeBlockRatio() < threshold; + } + + /** + * Ratio of the number of safe blocks to the total number of blocks + * to be compared with the threshold. + */ + private double getSafeBlockRatio() { + return (blockTotal == blockSafe ? 1 : (double) blockSafe / (double) blockTotal); + } + + /** + * Check and trigger safe mode if needed. + */ + private void checkMode() { + if (needEnter()) { + enter(); + // check if we are ready to initialize replication queues + if (canInitializeReplQueues() && !isPopulatingReplQueues()) { + initializeReplQueues(); + } + reportStatus("STATE* Safe mode ON.", false); + return; + } + // the threshold is reached + if (!isOn() || // safe mode is off + extension <= 0 || threshold <= 0) { // don't need to wait + this.leave(true); // leave safe mode + return; + } + if (reached > 0) { // threshold has already been reached before + reportStatus("STATE* Safe mode ON.", false); + return; + } + // start monitor + reached = now(); + smmthread = new Daemon(new SafeModeMonitor()); + smmthread.start(); + reportStatus("STATE* Safe mode extension entered.", true); + + // check if we are ready to initialize replication queues + if (canInitializeReplQueues() && !isPopulatingReplQueues()) { + initializeReplQueues(); + } + } + + /** + * Set total number of blocks. + */ + synchronized void setBlockTotal(int total) { + this.blockTotal = total; + this.blockReplQueueThreshold = + (int) (((double) blockTotal) * replQueueThreshold); + checkMode(); + } + + /** + * Increment number of safe blocks if current block has + * reached minimal replication. + * + * @param replication current replication + */ + synchronized void incrementSafeBlockCount(short replication) { + if ((int) replication == safeReplication) { + this.blockSafe++; + checkMode(); + } + } + + /** + * Decrement number of safe blocks if current block has + * fallen below minimal replication. + * + * @param replication current replication + */ + synchronized void decrementSafeBlockCount(short replication) { + if (replication == safeReplication - 1) { + this.blockSafe--; + checkMode(); + } + } + + /** + * Check if safe mode was entered manually or at startup. + */ + boolean isManual() { + return extension == Long.MAX_VALUE; + } + + /** + * Set manual safe mode. + */ + void setManual() { + extension = Long.MAX_VALUE; + } + + /** + * A tip on how safe mode is to be turned off: manually or automatically. + */ + String getTurnOffTip() { + String leaveMsg = "Safe mode will be turned off automatically"; + if (reached < 0) { + return "Safe mode is OFF."; + } + if (isManual()) { + if (getDistributedUpgradeState()) { + return leaveMsg + " upon completion of " + + "the distributed upgrade: upgrade progress = " + + getDistributedUpgradeStatus() + "%"; + } + leaveMsg = "Use \"hadoop dfsadmin -safemode leave\" to turn safe mode off"; + } + if (blockTotal < 0) { + return leaveMsg + "."; + } + String safeBlockRatioMsg = + String.format("The ratio of reported blocks %.8f has " + + (reached == 0 ? "not " : "") + "reached the threshold %.8f. ", + getSafeBlockRatio(), threshold) + + "Safe blocks = " + blockSafe + + ", Total blocks = " + blockTotal + "." + + leaveMsg; + if (reached == 0 || isManual()) // threshold is not reached or manual + { + return safeBlockRatioMsg + "."; + } + // extension period is in progress + return safeBlockRatioMsg + " in " + + Math.abs(reached + extension - now()) / 1000 + " seconds."; + } + + /** + * Print status every 20 seconds. + */ + private void reportStatus(String msg, boolean rightNow) { + long curTime = now(); + if (!rightNow && (curTime - lastStatusReport < 20 * 1000)) { + return; + } + NameNode.stateChangeLog.info(msg + " \n" + getTurnOffTip()); + lastStatusReport = curTime; + } + + /** + * Returns printable state of the class. + */ + public String toString() { + String resText = "Current safe block ratio = " + + getSafeBlockRatio() + + ". Safe blocks = " + blockSafe + + ". Total blocks = " + blockTotal + + ". Target threshold = " + threshold + + ". Minimal replication = " + safeReplication + "."; + if (reached > 0) { + resText += " Threshold was reached " + new Date(reached) + "."; + } + return resText; + } + + /** + * Checks consistency of the class state. + * This is costly and currently called only in assert. + */ + boolean isConsistent() throws IOException { + if (blockTotal == -1 && blockSafe == -1) { + return true; // manual safe mode + } + int activeBlocks = blocksMap.size() - (int) pendingDeletionBlocksCount; + return (blockTotal == activeBlocks) || + (blockSafe >= 0 && blockSafe <= blockTotal); + } + } + + /** + * Periodically check whether it is time to leave safe mode. + * This thread starts when the threshold level is reached. + */ + class SafeModeMonitor implements Runnable { + /** + * interval in msec for checking safe mode: {@value} + */ + private static final long recheckInterval = 1000; + + /** + */ + public void run() { + while (fsRunning && (safeMode != null && !safeMode.canLeave())) { + try { + Thread.sleep(recheckInterval); + } catch (InterruptedException ie) { + } + } + // if we stopped namenode while still in safemode, then exit here + if (!fsRunning) { + LOG.info("Quitting SafeModeMonitor thread. "); + return; + } + + // leave safe mode and stop the monitor + try { + leaveSafeMode(true); + } catch (SafeModeException es) { // should never happen + String msg = "SafeModeMonitor may not run during distributed upgrade."; + assert false : msg; + throw new RuntimeException(msg, es); + } + smmthread = null; + } + } + + /** + * Current system time. + * + * @return current time in msec. + */ + static long now() { + return System.currentTimeMillis(); + } + + boolean setSafeMode(SafeModeAction action) throws IOException { + if (action != SafeModeAction.SAFEMODE_GET) { + checkSuperuserPrivilege(); + switch (action) { + case SAFEMODE_LEAVE: // leave safe mode + if (!manualOverrideSafeMode) { + leaveSafeMode(false); + } else { + LOG.warn("Leaving safemode is not allowed. " + manualOverrideSafeMode); + } + break; + case SAFEMODE_ENTER: // enter safe mode + enterSafeMode(); + break; + } + } + return isInSafeMode(); + } + + /** + * Allow the ability to let an external API manually override exiting safemode + */ + void setSafeModeManualOverride(boolean flag) { + this.manualOverrideSafeMode = flag; + } + + /** + * Check whether the name node is in safe mode. + * + * @return true if safe mode is ON, false otherwise + */ + synchronized boolean isInSafeMode() { + if (safeMode == null) { + return false; + } + return safeMode.isOn(); + } + + /** + * Check whether replication queues are populated. + */ + synchronized boolean isPopulatingReplQueues() { + return (!isInSafeMode() || + safeMode.isPopulatingReplQueues()); + } + + /** + * Increment number of blocks that reached minimal replication. + * + * @param replication current replication + */ + void incrementSafeBlockCount(int replication) { + if (safeMode == null) { + return; + } + safeMode.incrementSafeBlockCount((short) replication); + } + + /** + * Decrement number of blocks that reached minimal replication. + */ + void decrementSafeBlockCount(Block b) { + if (safeMode == null) // mostly true + { + return; + } + safeMode.decrementSafeBlockCount((short) countNodes(b).liveReplicas()); + } + + /** + * Set the total number of blocks in the system. + */ + void setBlockTotal() { + if (safeMode == null) { + return; + } + safeMode.setBlockTotal(blocksMap.size()); + } + + /** + * Get the total number of blocks in the system. + */ + public long getBlocksTotal() { + return blocksMap.size(); + } + + /** + * Enter safe mode manually. + * + * @throws IOException + */ + void enterSafeMode() throws IOException { + writeLock(); + try { + if (!isInSafeMode()) { + safeMode = new SafeModeInfo(); + return; + } + safeMode.setManual(); + getEditLog().logSyncAll(); + NameNode.stateChangeLog.info("STATE* Safe mode is ON. " + + safeMode.getTurnOffTip()); + } finally { + writeUnlock(); + } + } + + /** + * Leave safe mode. + * + * @throws IOException + */ + void leaveSafeMode(boolean checkForUpgrades) throws SafeModeException { + writeLock(); + try { + if (!isInSafeMode()) { + NameNode.stateChangeLog.info("STATE* Safe mode is already OFF."); + return; + } + if (getDistributedUpgradeState()) { + throw new SafeModeException("Distributed upgrade is in progress", + safeMode); + } + safeMode.leave(checkForUpgrades); + } finally { + writeUnlock(); + } + } + + String getSafeModeTip() { + if (!isInSafeMode()) { + return ""; + } + return safeMode.getTurnOffTip(); + } + + long getEditLogSize() throws IOException { + return getEditLog().getEditLogSize(); + } + + CheckpointSignature rollEditLog() throws IOException { + writeLock(); + try { + if (isInSafeMode()) { + throw new SafeModeException("Checkpoint not created", + safeMode); + } + LOG.info("Roll Edit Log from " + Server.getRemoteAddress() + + " editlog file " + getFSImage().getEditLog().getFsEditName() + + " editlog timestamp " + getFSImage().getEditLog().getFsEditTime()); + return getFSImage().rollEditLog(); + } finally { + writeUnlock(); + } + } + + /** + * Moves fsimage.ckpt to fsImage and edits.new to edits + * Reopens the new edits file. + * + * @param newImageSignature the signature of the new image + */ + void rollFSImage(CheckpointSignature newImageSignature) throws IOException { + writeLock(); + try { + if (isInSafeMode()) { + throw new SafeModeException("Checkpoint not created", + safeMode); + } + LOG.info("Roll FSImage from " + Server.getRemoteAddress()); + getFSImage().rollFSImage(newImageSignature); + } finally { + writeUnlock(); + } + } + + /** + * Returns whether the given block is one pointed-to by a file. + */ + private boolean isValidBlock(Block b) { + return (blocksMap.getINode(b) != null); + } + + // Distributed upgrade manager + UpgradeManagerNamenode upgradeManager = new UpgradeManagerNamenode(); + + UpgradeStatusReport distributedUpgradeProgress(UpgradeAction action + ) throws IOException { + return upgradeManager.distributedUpgradeProgress(action); + } + + UpgradeCommand processDistributedUpgradeCommand(UpgradeCommand comm) + throws IOException { + return upgradeManager.processUpgradeCommand(comm); + } + + int getDistributedUpgradeVersion() { + return upgradeManager.getUpgradeVersion(); + } + + UpgradeCommand getDistributedUpgradeCommand() throws IOException { + return upgradeManager.getBroadcastCommand(); + } + + boolean getDistributedUpgradeState() { + return upgradeManager.getUpgradeState(); + } + + short getDistributedUpgradeStatus() { + return upgradeManager.getUpgradeStatus(); + } + + boolean startDistributedUpgradeIfNeeded() throws IOException { + return upgradeManager.startUpgrade(); + } + + PermissionStatus createFsOwnerPermissions(FsPermission permission) { + return new PermissionStatus(fsOwner.getUserName(), supergroup, permission); + } + + private FSPermissionChecker checkOwner(String path) + throws AccessControlException { + return checkPermission(path, true, null, null, null, null); + } + + private FSPermissionChecker checkPathAccess(String path, FsAction access + ) throws AccessControlException { + return checkPermission(path, false, null, null, access, null); + } + + private FSPermissionChecker checkParentAccess(String path, FsAction access + ) throws AccessControlException { + return checkPermission(path, false, null, access, null, null); + } + + private FSPermissionChecker checkAncestorAccess(String path, FsAction access + ) throws AccessControlException { + return checkPermission(path, false, access, null, null, null); + } + + private FSPermissionChecker checkTraverse(String path + ) throws AccessControlException { + return checkPermission(path, false, null, null, null, null); + } + + private void checkSuperuserPrivilege() throws AccessControlException { + if (isPermissionEnabled) { + try { + PermissionChecker.checkSuperuserPrivilege(fsOwner, supergroup); + } catch (AccessControlException e) { + if (this.permissionAuditOnly) { + // do not throw the exception, we would like to only log. + LOG.warn("PermissionAudit superuser failed for user: " + fsOwner + + " group:" + supergroup); + } else { + throw e; + } + } + } + } + + /** + * Check whether current user have permissions to access the path. + * For more details of the parameters, see + * {@link FSPermissionChecker#checkPermission(String, INodeDirectory, boolean, FsAction, FsAction, FsAction, FsAction)}. + */ + private FSPermissionChecker checkPermission(String path, boolean doCheckOwner, + FsAction ancestorAccess, FsAction parentAccess, FsAction access, + FsAction subAccess) + throws AccessControlException { + FSPermissionChecker pc = new FSPermissionChecker( + fsOwner.getUserName(), supergroup); + if (!pc.isSuper) { + dir.waitForReady(); + readLock(); + try { + pc.checkPermission(path, dir.rootDir, doCheckOwner, + ancestorAccess, parentAccess, access, subAccess); + } catch (AccessControlException e) { + if (this.permissionAuditOnly) { + // do not throw the exception, we would like to only log. + LOG.warn("PermissionAudit failed on " + path + + " checkOwner:" + doCheckOwner + + " ancestor:" + ancestorAccess + + " parent:" + parentAccess + + " access:" + access + + " subaccess:" + subAccess); + } else { + throw e; + } + } finally { + readUnlock(); + } + } + return pc; + } + + /** + * Check to see if we have exceeded the limit on the number + * of inodes. + */ + void checkFsObjectLimit() throws IOException { + if (maxFsObjects != 0 && + maxFsObjects <= dir.totalInodes() + getBlocksTotal()) { + throw new IOException("Exceeded the configured number of objects " + + maxFsObjects + " in the filesystem."); + } + } + + /** + * Get the total number of objects in the system. + */ + long getMaxObjects() { + return maxFsObjects; + } + + public long getFilesTotal() { + return this.dir.totalInodes(); + } + + public long getPendingReplicationBlocks() { + return pendingReplicationBlocksCount; + } + + public long getUnderReplicatedBlocks() { + return underReplicatedBlocksCount; + } + + /** + * Returns number of blocks with corrupt replicas + */ + public long getCorruptReplicaBlocks() { + return corruptReplicaBlocksCount; + } + + public long getScheduledReplicationBlocks() { + return scheduledReplicationBlocksCount; + } + + public long getPendingDeletionBlocks() { + return pendingDeletionBlocksCount; + } + + public long getExcessBlocks() { + return excessBlocksCount; + } + + public int getBlockCapacity() { + readLock(); + try { + return blocksMap.getCapacity(); + } finally { + readUnlock(); + } + } + + public String getFSState() { + return isInSafeMode() ? "safeMode" : "Operational"; + } + + private ObjectName mbeanName; + + /** + * Register the FSNamesystem MBean using the name + * "hadoop:service=NameNode,name=FSNamesystemState" + */ + void registerMBean(Configuration conf) { + // We wrap to bypass standard mbean naming convention. + // This wraping can be removed in java 6 as it is more flexible in + // package naming for mbeans and their impl. + StandardMBean bean; + try { + myFSMetrics = new FSNamesystemMetrics(conf); + bean = new StandardMBean(this, FSNamesystemMBean.class); + mbeanName = MBeanUtil.registerMBean("NameNode", "FSNamesystemState", bean); + } catch (NotCompliantMBeanException e) { + e.printStackTrace(); + } + + LOG.info("Registered FSNamesystemStatusMBean"); + } + + /** + * get FSNamesystemMetrics + */ + public FSNamesystemMetrics getFSNamesystemMetrics() { + return myFSMetrics; + } + + /** + * shutdown FSNamesystem + */ + public void shutdown() { + if (mbeanName != null) { + MBeanUtil.unregisterMBean(mbeanName); + } + } + + + /** + * Number of live data nodes + * + * @return Number of live data nodes + */ + public int getNumLiveDataNodes() { + int numLive = 0; + synchronized (datanodeMap) { + for (Iterator it = datanodeMap.values().iterator(); + it.hasNext();) { + DatanodeDescriptor dn = it.next(); + if (!isDatanodeDead(dn)) { + numLive++; + } + } + } + return numLive; + } + + + /** + * Number of dead data nodes + * + * @return Number of dead data nodes + */ + public int getNumDeadDataNodes() { + int numDead = 0; + synchronized (datanodeMap) { + for (Iterator it = datanodeMap.values().iterator(); + it.hasNext();) { + DatanodeDescriptor dn = it.next(); + if (isDatanodeDead(dn)) { + numDead++; + } + } + } + return numDead; + } + + /** + * Sets the generation stamp for this filesystem + */ + public void setGenerationStamp(long stamp) { + generationStamp.setStamp(stamp); + } + + /** + * Gets the generation stamp for this filesystem + */ + public long getGenerationStamp() { + return generationStamp.getStamp(); + } + + /** + * Increments, logs and then returns the stamp + */ + private long nextGenerationStamp() { + long gs = generationStamp.nextStamp(); + getEditLog().logGenerationStamp(gs); + return gs; + } + + /** + * Verifies that the block is associated with a file that has a lease. + * Increments, logs and then returns the stamp + */ + long nextGenerationStampForBlock(Block block) throws IOException { + writeLock(); + try { + if (isInSafeMode()) { + throw new SafeModeException("Cannot get nextGenStamp for " + block, safeMode); + } + BlockInfo storedBlock = blocksMap.getStoredBlock(block); + if (storedBlock == null) { + String msg = block + " is already commited, storedBlock == null."; + LOG.info(msg); + throw new IOException(msg); + } + INodeFile fileINode = storedBlock.getINode(); + if (!fileINode.isUnderConstruction()) { + String msg = block + " is already commited, !fileINode.isUnderConstruction()."; + LOG.info(msg); + throw new IOException(msg); + } + if (!((INodeFileUnderConstruction) fileINode).setLastRecoveryTime(now())) { + String msg = block + " is beening recovered, ignoring this request."; + LOG.info(msg); + throw new IOException(msg); + } + return nextGenerationStamp(); + } finally { + writeUnlock(); + } + } + + // rename was successful. If any part of the renamed subtree had + // files that were being written to, update with new filename. + // + + void changeLease(String src, String dst, HdfsFileStatus dinfo) + throws IOException { + String overwrite; + String replaceBy; + + boolean destinationExisted = true; + if (dinfo == null) { + destinationExisted = false; + } + + if (destinationExisted && dinfo.isDir()) { + Path spath = new Path(src); + overwrite = spath.getParent().toString() + Path.SEPARATOR; + replaceBy = dst + Path.SEPARATOR; + } else { + overwrite = src; + replaceBy = dst; + } + + leaseManager.changeLease(src, dst, overwrite, replaceBy); + } + + /** + * Serializes leases. + */ + void saveFilesUnderConstruction(DataOutputStream out) throws IOException { + synchronized (leaseManager) { + out.writeInt(leaseManager.countPath()); // write the size + + for (Lease lease : leaseManager.getSortedLeases()) { + for (String path : lease.getPaths()) { + // verify that path exists in namespace + INode node = dir.getFileINode(path); + if (node == null) { + throw new IOException("saveLeases found path " + path + + " but no matching entry in namespace."); + } + if (!node.isUnderConstruction()) { + throw new IOException("saveLeases found path " + path + + " but is not under construction."); + } + INodeFileUnderConstruction cons = (INodeFileUnderConstruction) node; + FSImage.writeINodeUnderConstruction(out, cons, path); + } + } + } + } + + public ArrayList getDecommissioningNodes() { + readLock(); + try { + ArrayList decommissioningNodes = new ArrayList(); + ArrayList results = getDatanodeListForReport(DatanodeReportType.LIVE); + for (Iterator it = results.iterator(); it.hasNext();) { + DatanodeDescriptor node = it.next(); + if (node.isDecommissionInProgress()) { + decommissioningNodes.add(node); + } + } + return decommissioningNodes; + } finally { + readUnlock(); + } + } + + /** + * Return an iterator over the set of blocks for which there are no replicas. + */ + BlockIterator getCorruptReplicaBlockIterator() { + return neededReplications + .iterator(UnderReplicatedBlocks.QUEUE_WITH_CORRUPT_BLOCKS); + } + + public static class CorruptFileBlockInfo { + String path; + Block block; + + public CorruptFileBlockInfo(String p, Block b) { + path = p; + block = b; + } + + public String toString() { + return block.getBlockName() + "\t" + path; + } + + public String getPath() { + return path; + } + } + + /** + * @param path Restrict corrupt files to this portion of namespace. + * @param startBlockAfter Support for continuation; the set of files we return + * back is ordered by blockid; startBlockAfter tells where to start from + * @return a list in which each entry describes a corrupt file/block + * @throws AccessControlException + * @throws IOException + */ + synchronized Collection + listCorruptFileBlocks(String path, + String startBlockAfter) + throws AccessControlException, IOException { + + checkSuperuserPrivilege(); + long startBlockId = 0; + // print a limited # of corrupt files per call + int count = 0; + ArrayList corruptFiles = + new ArrayList(); + + if (startBlockAfter != null) { + startBlockId = Block.filename2id(startBlockAfter); + } + BlockIterator blkIterator = getCorruptReplicaBlockIterator(); + while (blkIterator.hasNext()) { + Block blk = blkIterator.next(); + INode inode = blocksMap.getINode(blk); + if (inode != null && countNodes(blk).liveReplicas() == 0) { + String src = FSDirectory.getFullPathName(inode); + if (((startBlockAfter == null) || (blk.getBlockId() > startBlockId)) + && (src.startsWith(path))) { + corruptFiles.add(new CorruptFileBlockInfo(src, blk)); + count++; + if (count >= maxCorruptFilesReturned) + break; + } + } + } + LOG.info("list corrupt file blocks returned: " + count); + return corruptFiles; + } + + void setPersistBlocks(boolean persistBlocks) { + this.persistBlocks = persistBlocks; + } + + boolean getPersistBlocks() { + return persistBlocks; + } + + void setPermissionAuditLog(boolean permissionAuditOnly) { + this.permissionAuditOnly = permissionAuditOnly; + } + + boolean getPermissionAuditLog() { + return permissionAuditOnly; + } +} diff --git a/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java b/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java new file mode 100644 index 0000000..b562d71 --- /dev/null +++ b/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java @@ -0,0 +1,160 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.namenode; + +import java.util.*; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.fs.permission.*; +import org.apache.hadoop.ipc.Server; +import org.apache.hadoop.security.AccessControlException; +import org.apache.hadoop.security.PermissionChecker; +import org.apache.hadoop.security.UserGroupInformation; + +/** Perform permission checking in {@link FSNamesystem}. */ +class FSPermissionChecker extends PermissionChecker { + static final Log LOG = LogFactory.getLog(UserGroupInformation.class); + + FSPermissionChecker(String fsOwner, String supergroup + ) throws AccessControlException{ + super(fsOwner, supergroup); + } + + /** + * Check whether current user have permissions to access the path. + * Traverse is always checked. + * + * Parent path means the parent directory for the path. + * Ancestor path means the last (the closest) existing ancestor directory + * of the path. + * Note that if the parent path exists, + * then the parent path and the ancestor path are the same. + * + * For example, suppose the path is "/foo/bar/baz". + * No matter baz is a file or a directory, + * the parent path is "/foo/bar". + * If bar exists, then the ancestor path is also "/foo/bar". + * If bar does not exist and foo exists, + * then the ancestor path is "/foo". + * Further, if both foo and bar do not exist, + * then the ancestor path is "/". + * + * @param doCheckOwner Require user to be the owner of the path? + * @param ancestorAccess The access required by the ancestor of the path. + * @param parentAccess The access required by the parent of the path. + * @param access The access required by the path. + * @param subAccess If path is a directory, + * it is the access required of the path and all the sub-directories. + * If path is not a directory, there is no effect. + * @return a PermissionChecker object which caches data for later use. + * @throws AccessControlException + */ + void checkPermission(String path, INodeDirectory root, boolean doCheckOwner, + FsAction ancestorAccess, FsAction parentAccess, FsAction access, + FsAction subAccess) throws AccessControlException { + if (LOG.isDebugEnabled()) { + LOG.debug("ACCESS CHECK: " + this + + ", doCheckOwner=" + doCheckOwner + + ", ancestorAccess=" + ancestorAccess + + ", parentAccess=" + parentAccess + + ", access=" + access + + ", subAccess=" + subAccess); + } + + INode[] inodes = root.getExistingPathINodes(path); + int ancestorIndex = inodes.length - 2; + for(; ancestorIndex >= 0 && inodes[ancestorIndex] == null; + ancestorIndex--); + checkTraverse(inodes, ancestorIndex); + + if (ancestorAccess != null && inodes.length > 1) { + check(inodes, ancestorIndex, ancestorAccess); + } + if (parentAccess != null && inodes.length > 1) { + check(inodes, inodes.length - 2, parentAccess); + } + if (access != null) { + check(inodes[inodes.length - 1], access); + } + if (subAccess != null) { + checkSubAccess(inodes[inodes.length - 1], subAccess); + } + if (doCheckOwner) { + checkOwner(inodes[inodes.length - 1]); + } + } + + private void checkOwner(INode inode) throws AccessControlException { + if (inode != null && user.equals(inode.getUserName())) { + return; + } + throw new AccessControlException("Permission denied"); + } + + private void checkTraverse(INode[] inodes, int last + ) throws AccessControlException { + for(int j = 0; j <= last; j++) { + check(inodes[j], FsAction.EXECUTE); + } + } + + private void checkSubAccess(INode inode, FsAction access + ) throws AccessControlException { + if (inode == null || !inode.isDirectory()) { + return; + } + + Stack directories = new Stack(); + for(directories.push((INodeDirectory)inode); !directories.isEmpty(); ) { + INodeDirectory d = directories.pop(); + check(d, access); + + for(INode child : d.getChildren()) { + if (child.isDirectory()) { + directories.push((INodeDirectory)child); + } + } + } + } + + private void check(INode[] inodes, int i, FsAction access + ) throws AccessControlException { + check(i >= 0? inodes[i]: null, access); + } + + private void check(INode inode, FsAction access + ) throws AccessControlException { + if (inode == null) { + return; + } + FsPermission mode = inode.getFsPermission(); + + if (user.equals(inode.getUserName())) { //user class + if (mode.getUserAction().implies(access)) { return; } + } + else if (groups.contains(inode.getGroupName())) { //group class + if (mode.getGroupAction().implies(access)) { return; } + } + else { //other class + if (mode.getOtherAction().implies(access)) { return; } + } + throw new AccessControlException("Permission denied: user=" + user + + ", access=" + access + ", inode=" + inode); + } +} diff --git a/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FileChecksumServlets.java b/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FileChecksumServlets.java new file mode 100644 index 0000000..0c0c922 --- /dev/null +++ b/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FileChecksumServlets.java @@ -0,0 +1,103 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.namenode; + +import java.io.IOException; +import java.io.PrintWriter; +import java.net.URI; +import java.net.URISyntaxException; + +import javax.net.SocketFactory; +import javax.servlet.ServletContext; +import javax.servlet.ServletException; +import javax.servlet.http.HttpServletRequest; +import javax.servlet.http.HttpServletResponse; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.MD5MD5CRC32FileChecksum; +import org.apache.hadoop.hdfs.DFSClient; +import org.apache.hadoop.hdfs.protocol.ClientProtocol; +import org.apache.hadoop.hdfs.protocol.DatanodeID; +import org.apache.hadoop.hdfs.protocol.FSConstants; +import org.apache.hadoop.hdfs.server.common.HdfsConstants; +import org.apache.hadoop.hdfs.server.datanode.DataNode; +import org.apache.hadoop.ipc.RemoteException; +import org.apache.hadoop.net.NetUtils; +import org.apache.hadoop.security.UnixUserGroupInformation; +import org.apache.hadoop.security.UserGroupInformation; +import org.znerd.xmlenc.XMLOutputter; + +/** Servlets for file checksum */ +public class FileChecksumServlets { + /** Redirect file checksum queries to an appropriate datanode. */ + public static class RedirectServlet extends DfsServlet { + /** For java.io.Serializable */ + private static final long serialVersionUID = 1L; + + /** {@inheritDoc} */ + public void doGet(HttpServletRequest request, HttpServletResponse response + ) throws ServletException, IOException { + final UserGroupInformation ugi = getUGI(request); + final ServletContext context = getServletContext(); + final NameNode namenode = (NameNode)context.getAttribute("name.node"); + final DatanodeID datanode = namenode.namesystem.getRandomDatanode(); + try { + final URI uri = createRedirectUri("/getFileChecksum", ugi, datanode, request); + response.sendRedirect(uri.toURL().toString()); + } catch(URISyntaxException e) { + throw new ServletException(e); + //response.getWriter().println(e.toString()); + } catch (IOException e) { + response.sendError(400, e.getMessage()); + } + } + } + + /** Get FileChecksum */ + public static class GetServlet extends DfsServlet { + /** For java.io.Serializable */ + private static final long serialVersionUID = 1L; + + /** {@inheritDoc} */ + public void doGet(HttpServletRequest request, HttpServletResponse response + ) throws ServletException, IOException { + final UnixUserGroupInformation ugi = getUGI(request); + final PrintWriter out = response.getWriter(); + final String filename = getFilename(request, response); + final XMLOutputter xml = new XMLOutputter(out, "UTF-8"); + xml.declaration(); + + final Configuration conf = new Configuration(DataNode.getDataNode().getConf()); + final int socketTimeout = conf.getInt("dfs.socket.timeout", HdfsConstants.READ_TIMEOUT); + final SocketFactory socketFactory = NetUtils.getSocketFactory(conf, ClientProtocol.class); + UnixUserGroupInformation.saveToConf(conf, + UnixUserGroupInformation.UGI_PROPERTY_NAME, ugi); + final ClientProtocol nnproxy = DFSClient.createNamenode(conf); + + try { + final MD5MD5CRC32FileChecksum checksum = DFSClient.getFileChecksum( + filename, nnproxy, socketFactory, socketTimeout); + MD5MD5CRC32FileChecksum.write(xml, checksum); + } catch(IOException ioe) { + new RemoteException(ioe.getClass().getName(), ioe.getMessage() + ).writeXml(filename, xml); + } + xml.endDocument(); + } + } +} \ No newline at end of file diff --git a/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FileDataServlet.java b/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FileDataServlet.java new file mode 100644 index 0000000..18348d8 --- /dev/null +++ b/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FileDataServlet.java @@ -0,0 +1,128 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.namenode; + +import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; +import java.util.Enumeration; +import java.util.Iterator; +import java.util.Map; + +import javax.servlet.http.HttpServletRequest; +import javax.servlet.http.HttpServletResponse; + +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.hdfs.protocol.ClientProtocol; +import org.apache.hadoop.hdfs.protocol.DatanodeID; +import org.apache.hadoop.hdfs.protocol.DatanodeInfo; +import org.apache.hadoop.hdfs.protocol.LocatedBlocks; +import org.apache.hadoop.security.UnixUserGroupInformation; + +/** Redirect queries about the hosted filesystem to an appropriate datanode. + * @see org.apache.hadoop.hdfs.HftpFileSystem + */ +public class FileDataServlet extends DfsServlet { + + /** Create a redirection URI */ + protected URI createUri(FileStatus i, UnixUserGroupInformation ugi, + ClientProtocol nnproxy, HttpServletRequest request) + throws IOException, URISyntaxException { + String scheme = request.getScheme(); + final DatanodeID host = pickSrcDatanode(i, nnproxy); + final String hostname; + if (host instanceof DatanodeInfo) { + hostname = ((DatanodeInfo)host).getHostName(); + } else { + hostname = host.getHost(); + } + + // Construct query. + StringBuilder builder = new StringBuilder(); + builder.append("ugi=" + ugi); + + // Populate the rest of parameters. + Enumeration it = request.getParameterNames(); + while (it.hasMoreElements()) { + String key = it.nextElement().toString(); + String value = request.getParameter(key); + builder.append("&" + key + "=" + value); + } + + return new URI(scheme, null, hostname, + "https".equals(scheme) + ? (Integer)getServletContext().getAttribute("datanode.https.port") + : host.getInfoPort(), + "/streamFile" + i.getPath(), builder.toString(), null); + } + + private static JspHelper jspHelper = null; + + /** Select a datanode to service this request. + * Currently, this looks at no more than the first five blocks of a file, + * selecting a datanode randomly from the most represented. + */ + private static DatanodeID pickSrcDatanode(FileStatus i, + ClientProtocol nnproxy) throws IOException { + // a race condition can happen by initializing a static member this way. + // A proper fix should make JspHelper a singleton. Since it doesn't affect + // correctness, we leave it as is for now. + if (jspHelper == null) + jspHelper = new JspHelper(); + final LocatedBlocks blks = nnproxy.getBlockLocations( + i.getPath().toUri().getPath(), 0, 1); + if (i.getLen() == 0 || blks.getLocatedBlocks().size() <= 0) { + // pick a random datanode + return jspHelper.randomNode(); + } + return jspHelper.bestNode(blks); + } + + /** + * Service a GET request as described below. + * Request: + * {@code + * GET http://:/data[/] HTTP/1.1 + * } + */ + public void doGet(HttpServletRequest request, HttpServletResponse response) + throws IOException { + final UnixUserGroupInformation ugi = getUGI(request); + final ClientProtocol nnproxy = createNameNodeProxy(ugi); + + try { + final String path = request.getPathInfo() != null + ? request.getPathInfo() : "/"; + FileStatus info = nnproxy.getFileInfo(path); + if ((info != null) && !info.isDir()) { + response.sendRedirect(createUri(info, ugi, nnproxy, + request).toURL().toString()); + } else if (info == null){ + response.sendError(400, "cat: File not found " + path); + } else { + response.sendError(400, "cat: " + path + ": is a directory"); + } + } catch (URISyntaxException e) { + response.getWriter().println(e.toString()); + } catch (IOException e) { + response.sendError(400, e.getMessage()); + } + } + +} + diff --git a/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FsckServlet.java b/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FsckServlet.java new file mode 100644 index 0000000..220ef8f --- /dev/null +++ b/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FsckServlet.java @@ -0,0 +1,45 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.namenode; + +import java.util.*; +import java.io.*; +import org.apache.hadoop.conf.*; +import org.apache.commons.logging.*; +import javax.servlet.ServletContext; +import javax.servlet.ServletException; +import javax.servlet.http.HttpServlet; +import javax.servlet.http.HttpServletRequest; +import javax.servlet.http.HttpServletResponse; + +/** + * This class is used in Namesystem's jetty to do fsck on namenode. + */ +public class FsckServlet extends HttpServlet { + @SuppressWarnings("unchecked") + public void doGet(HttpServletRequest request, + HttpServletResponse response + ) throws ServletException, IOException { + Map pmap = request.getParameterMap(); + ServletContext context = getServletContext(); + NameNode nn = (NameNode) context.getAttribute("name.node"); + Configuration conf = (Configuration) context.getAttribute("name.conf"); + NamenodeFsck fscker = new NamenodeFsck(conf, nn, pmap, response); + fscker.fsck(); + } +} diff --git a/src/hdfs/org/apache/hadoop/hdfs/server/namenode/GetImageServlet.java b/src/hdfs/org/apache/hadoop/hdfs/server/namenode/GetImageServlet.java new file mode 100644 index 0000000..4a37b15 --- /dev/null +++ b/src/hdfs/org/apache/hadoop/hdfs/server/namenode/GetImageServlet.java @@ -0,0 +1,76 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.namenode; + +import java.util.*; +import java.io.*; +import javax.servlet.ServletContext; +import javax.servlet.ServletException; +import javax.servlet.http.HttpServlet; +import javax.servlet.http.HttpServletRequest; +import javax.servlet.http.HttpServletResponse; + +import org.apache.hadoop.util.StringUtils; + +/** + * This class is used in Namesystem's jetty to retrieve a file. + * Typically used by the Secondary NameNode to retrieve image and + * edit file for periodic checkpointing. + */ +public class GetImageServlet extends HttpServlet { + private static final long serialVersionUID = -7669068179452648952L; + + @SuppressWarnings("unchecked") + public void doGet(HttpServletRequest request, + HttpServletResponse response + ) throws ServletException, IOException { + Map pmap = request.getParameterMap(); + try { + ServletContext context = getServletContext(); + FSImage nnImage = (FSImage)context.getAttribute("name.system.image"); + TransferFsImage ff = new TransferFsImage(pmap, request, response); + if (ff.getImage()) { + response.setHeader(TransferFsImage.CONTENT_LENGTH, + String.valueOf(nnImage.getFsImageName().length())); + // send fsImage + TransferFsImage.getFileServer(response.getOutputStream(), + nnImage.getFsImageName(), + nnImage.imageTransferThrottler); + } else if (ff.getEdit()) { + response.setHeader(TransferFsImage.CONTENT_LENGTH, + String.valueOf(nnImage.getFsEditName().length())); + // send edits + TransferFsImage.getFileServer(response.getOutputStream(), + nnImage.getFsEditName(), + nnImage.imageTransferThrottler); + } else if (ff.putImage()) { + // issue a HTTP get request to download the new fsimage + nnImage.validateCheckpointUpload(ff.getToken()); + nnImage.checkpointUploadDone( + TransferFsImage.getFileClient(ff.getInfoServer(), "getimage=1", + nnImage.getFsImageNameCheckpoint(), true)); + } + } catch (Exception ie) { + String errMsg = "GetImage failed. " + StringUtils.stringifyException(ie); + response.sendError(HttpServletResponse.SC_GONE, errMsg); + throw new IOException(errMsg); + } finally { + response.getOutputStream().close(); + } + } +} diff --git a/src/hdfs/org/apache/hadoop/hdfs/server/namenode/Host2NodesMap.java b/src/hdfs/org/apache/hadoop/hdfs/server/namenode/Host2NodesMap.java new file mode 100644 index 0000000..a00022d --- /dev/null +++ b/src/hdfs/org/apache/hadoop/hdfs/server/namenode/Host2NodesMap.java @@ -0,0 +1,188 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.namenode; + +import java.util.*; +import java.util.concurrent.locks.ReadWriteLock; +import java.util.concurrent.locks.ReentrantReadWriteLock; + +class Host2NodesMap { + private HashMap map + = new HashMap(); + private Random r = new Random(); + private ReadWriteLock hostmapLock = new ReentrantReadWriteLock(); + + /** Check if node is already in the map. */ + boolean contains(DatanodeDescriptor node) { + if (node==null) { + return false; + } + + String host = node.getHost(); + hostmapLock.readLock().lock(); + try { + DatanodeDescriptor[] nodes = map.get(host); + if (nodes != null) { + for(DatanodeDescriptor containedNode:nodes) { + if (node==containedNode) { + return true; + } + } + } + } finally { + hostmapLock.readLock().unlock(); + } + return false; + } + + /** add node to the map + * return true if the node is added; false otherwise. + */ + boolean add(DatanodeDescriptor node) { + hostmapLock.writeLock().lock(); + try { + if (node==null || contains(node)) { + return false; + } + + String host = node.getHost(); + DatanodeDescriptor[] nodes = map.get(host); + DatanodeDescriptor[] newNodes; + if (nodes==null) { + newNodes = new DatanodeDescriptor[1]; + newNodes[0]=node; + } else { // rare case: more than one datanode on the host + newNodes = new DatanodeDescriptor[nodes.length+1]; + System.arraycopy(nodes, 0, newNodes, 0, nodes.length); + newNodes[nodes.length] = node; + } + map.put(host, newNodes); + return true; + } finally { + hostmapLock.writeLock().unlock(); + } + } + + /** remove node from the map + * return true if the node is removed; false otherwise. + */ + boolean remove(DatanodeDescriptor node) { + if (node==null) { + return false; + } + + String host = node.getHost(); + hostmapLock.writeLock().lock(); + try { + + DatanodeDescriptor[] nodes = map.get(host); + if (nodes==null) { + return false; + } + if (nodes.length==1) { + if (nodes[0]==node) { + map.remove(host); + return true; + } else { + return false; + } + } + //rare case + int i=0; + for(; i, FSInodeInfo { + protected byte[] name; + protected INodeDirectory parent; + protected long modificationTime; + protected volatile long accessTime; + + /** Simple wrapper for two counters : + * nsCount (namespace consumed) and dsCount (diskspace consumed). + */ + static class DirCounts { + long nsCount = 0; + long dsCount = 0; + + /** returns namespace count */ + long getNsCount() { + return nsCount; + } + /** returns diskspace count */ + long getDsCount() { + return dsCount; + } + } + + //Only updated by updatePermissionStatus(...). + //Other codes should not modify it. + private long permission; + + private static enum PermissionStatusFormat { + MODE(0, 16), + GROUP(MODE.OFFSET + MODE.LENGTH, 25), + USER(GROUP.OFFSET + GROUP.LENGTH, 23); + + final int OFFSET; + final int LENGTH; //bit length + final long MASK; + + PermissionStatusFormat(int offset, int length) { + OFFSET = offset; + LENGTH = length; + MASK = ((-1L) >>> (64 - LENGTH)) << OFFSET; + } + + long retrieve(long record) { + return (record & MASK) >>> OFFSET; + } + + long combine(long bits, long record) { + return (record & ~MASK) | (bits << OFFSET); + } + } + + protected INode() { + name = null; + parent = null; + modificationTime = 0; + accessTime = 0; + } + + INode(PermissionStatus permissions, long mTime, long atime) { + this.name = null; + this.parent = null; + this.modificationTime = mTime; + setAccessTime(atime); + setPermissionStatus(permissions); + } + + protected INode(String name, PermissionStatus permissions) { + this(permissions, 0L, 0L); + setLocalName(name); + } + + /** copy constructor + * + * @param other Other node to be copied + */ + INode(INode other) { + setLocalName(other.getLocalName()); + this.parent = other.getParent(); + setPermissionStatus(other.getPermissionStatus()); + setModificationTime(other.getModificationTime()); + setAccessTime(other.getAccessTime()); + } + + /** + * Check whether this is the root inode. + */ + boolean isRoot() { + return name.length == 0; + } + + /** Set the {@link PermissionStatus} */ + protected void setPermissionStatus(PermissionStatus ps) { + setUser(ps.getUserName()); + setGroup(ps.getGroupName()); + setPermission(ps.getPermission()); + } + /** Get the {@link PermissionStatus} */ + protected PermissionStatus getPermissionStatus() { + return new PermissionStatus(getUserName(),getGroupName(),getFsPermission()); + } + private synchronized void updatePermissionStatus( + PermissionStatusFormat f, long n) { + permission = f.combine(n, permission); + } + /** Get user name */ + public String getUserName() { + int n = (int)PermissionStatusFormat.USER.retrieve(permission); + return SerialNumberManager.INSTANCE.getUser(n); + } + /** Set user */ + protected void setUser(String user) { + int n = SerialNumberManager.INSTANCE.getUserSerialNumber(user); + updatePermissionStatus(PermissionStatusFormat.USER, n); + } + /** Get group name */ + public String getGroupName() { + int n = (int)PermissionStatusFormat.GROUP.retrieve(permission); + return SerialNumberManager.INSTANCE.getGroup(n); + } + /** Set group */ + protected void setGroup(String group) { + int n = SerialNumberManager.INSTANCE.getGroupSerialNumber(group); + updatePermissionStatus(PermissionStatusFormat.GROUP, n); + } + /** Get the {@link FsPermission} */ + public FsPermission getFsPermission() { + return new FsPermission( + (short)PermissionStatusFormat.MODE.retrieve(permission)); + } + protected short getFsPermissionShort() { + return (short)PermissionStatusFormat.MODE.retrieve(permission); + } + /** Set the {@link FsPermission} of this {@link INode} */ + protected void setPermission(FsPermission permission) { + updatePermissionStatus(PermissionStatusFormat.MODE, permission.toShort()); + } + + /** + * Check whether it's a directory + */ + public abstract boolean isDirectory(); + /** + * Collect all the blocks in all children of this INode. + * Count and return the number of files in the sub tree. + * Also clears references since this INode is deleted. + */ + abstract int collectSubtreeBlocksAndClear(List v); + + /** Compute {@link ContentSummary}. */ + public final ContentSummary computeContentSummary() { + long[] a = computeContentSummary(new long[]{0,0,0,0}); + return new ContentSummary(a[0], a[1], a[2], getNsQuota(), + a[3], getDsQuota()); + } + /** + * @return an array of three longs. + * 0: length, 1: file count, 2: directory count 3: disk space + */ + abstract long[] computeContentSummary(long[] summary); + + /** + * Get the quota set for this inode + * @return the quota if it is set; -1 otherwise + */ + long getNsQuota() { + return -1; + } + + long getDsQuota() { + return -1; + } + + boolean isQuotaSet() { + return getNsQuota() >= 0 || getDsQuota() >= 0; + } + + /** + * Adds total nubmer of names and total disk space taken under + * this tree to counts. + * Returns updated counts object. + */ + abstract DirCounts spaceConsumedInTree(DirCounts counts); + + /** + * Get local file name + * @return local file name + */ + String getLocalName() { + return DFSUtil.bytes2String(name); + } + + /** + * Get local file name + * @return local file name + */ + byte[] getLocalNameBytes() { + return name; + } + + /** + * Set local file name + */ + void setLocalName(String name) { + this.name = DFSUtil.string2Bytes(name); + } + + /** + * Set local file name + */ + void setLocalName(byte[] name) { + this.name = name; + } + + /** {@inheritDoc} */ + public String getFullPathName() { + // Get the full path name of this inode. + return FSDirectory.getFullPathName(this); + } + + /** {@inheritDoc} */ + public String toString() { + return "\"" + getLocalName() + "\":" + getPermissionStatus(); + } + + /** + * Get parent directory + * @return parent INode + */ + INodeDirectory getParent() { + return this.parent; + } + + /** + * Get last modification time of inode. + * @return access time + */ + public long getModificationTime() { + return this.modificationTime; + } + + /** + * Set last modification time of inode. + */ + void setModificationTime(long modtime) { + assert isDirectory(); + if (this.modificationTime <= modtime) { + this.modificationTime = modtime; + } + } + + /** + * Always set the last modification time of inode. + */ + void setModificationTimeForce(long modtime) { + assert !isDirectory(); + this.modificationTime = modtime; + } + + /** + * Get access time of inode. + * @return access time + */ + public long getAccessTime() { + return accessTime; + } + + /** + * Set last access time of inode. + */ + void setAccessTime(long atime) { + accessTime = atime; + } + + /** + * Is this inode being constructed? + */ + boolean isUnderConstruction() { + return false; + } + + /** + * Breaks file path into components. + * @param path + * @return array of byte arrays each of which represents + * a single path component. + */ + static byte[][] getPathComponents(String path) { + return getPathComponents(getPathNames(path)); + } + + /** Convert strings to byte arrays for path components. */ + static byte[][] getPathComponents(String[] strings) { + if (strings.length == 0) { + return new byte[][]{null}; + } + byte[][] bytes = new byte[strings.length][]; + for (int i = 0; i < strings.length; i++) + bytes[i] = DFSUtil.string2Bytes(strings[i]); + return bytes; + } + + /** + * Breaks file path into names. + * @param path + * @return array of names + */ + static String[] getPathNames(String path) { + if (path == null || !path.startsWith(Path.SEPARATOR)) { + return null; + } + return StringUtils.split(path, Path.SEPARATOR_CHAR); + } + + boolean removeNode() { + if (parent == null) { + return false; + } else { + + parent.removeChild(this); + parent = null; + return true; + } + } + + // + // Comparable interface + // + public int compareTo(byte[] o) { + return compareBytes(name, o); + } + + public boolean equals(Object o) { + if (!(o instanceof INode)) { + return false; + } + return Arrays.equals(this.name, ((INode)o).name); + } + + public int hashCode() { + return Arrays.hashCode(this.name); + } + + // + // static methods + // + /** + * Compare two byte arrays. + * + * @return a negative integer, zero, or a positive integer + * as defined by {@link #compareTo(byte[])}. + */ + static int compareBytes(byte[] a1, byte[] a2) { + if (a1==a2) + return 0; + int len1 = (a1==null ? 0 : a1.length); + int len2 = (a2==null ? 0 : a2.length); + int n = Math.min(len1, len2); + byte b1, b2; + for (int i=0; i blocks) { + return new LocatedBlocks(computeContentSummary().getLength(), blocks, + isUnderConstruction()); + } +} diff --git a/src/hdfs/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java b/src/hdfs/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java new file mode 100644 index 0000000..c29a9c2 --- /dev/null +++ b/src/hdfs/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java @@ -0,0 +1,456 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.namenode; + +import java.io.FileNotFoundException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.concurrent.TimeUnit; + +import org.apache.hadoop.fs.permission.FsAction; +import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.fs.permission.PermissionStatus; +import org.apache.hadoop.hdfs.DFSUtil; +import org.apache.hadoop.hdfs.protocol.Block; + +/** + * Directory INode class. + */ +class INodeDirectory extends INode { + protected static final int DEFAULT_FILES_PER_DIRECTORY = 5; + final static String ROOT_NAME = ""; + + private List children; + + INodeDirectory(String name, PermissionStatus permissions) { + super(name, permissions); + this.children = null; + } + + public INodeDirectory(PermissionStatus permissions, long mTime) { + super(permissions, mTime, 0); + this.children = null; + } + + /** constructor */ + INodeDirectory(byte[] localName, PermissionStatus permissions, long mTime) { + this(permissions, mTime); + this.name = localName; + } + + /** copy constructor + * + * @param other + */ + INodeDirectory(INodeDirectory other) { + super(other); + this.children = other.getChildren(); + } + + /** + * Check whether it's a directory + */ + public boolean isDirectory() { + return true; + } + + INode removeChild(INode node) { + assert children != null; + int low = Collections.binarySearch(children, node.name); + if (low >= 0) { + return children.remove(low); + } else { + return null; + } + } + + /** Replace a child that has the same name as newChild by newChild. + * + * @param newChild Child node to be added + */ + void replaceChild(INode newChild) { + if ( children == null ) { + throw new IllegalArgumentException("The directory is empty"); + } + int low = Collections.binarySearch(children, newChild.name); + if (low>=0) { // an old child exists so replace by the newChild + children.set(low, newChild); + } else { + throw new IllegalArgumentException("No child exists to be replaced"); + } + } + + INode getChild(String name) { + return getChildINode(DFSUtil.string2Bytes(name)); + } + + private INode getChildINode(byte[] name) { + if (children == null) { + return null; + } + int low = Collections.binarySearch(children, name); + if (low >= 0) { + return children.get(low); + } + return null; + } + + /** + */ + private INode getNode(byte[][] components) { + INode[] inode = new INode[1]; + getExistingPathINodes(components, inode); + return inode[0]; + } + + /** + * This is the external interface + */ + INode getNode(String path) { + return getNode(getPathComponents(path)); + } + + /** + * Retrieve existing INodes from a path. If existing is big enough to store + * all path components (existing and non-existing), then existing INodes + * will be stored starting from the root INode into existing[0]; if + * existing is not big enough to store all path components, then only the + * last existing and non existing INodes will be stored so that + * existing[existing.length-1] refers to the target INode. + * + *

+ * Example:
+ * Given the path /c1/c2/c3 where only /c1/c2 exists, resulting in the + * following path components: ["","c1","c2","c3"], + * + *

+ * getExistingPathINodes(["","c1","c2"], [?]) should fill the + * array with [c2]
+ * getExistingPathINodes(["","c1","c2","c3"], [?]) should fill the + * array with [null] + * + *

+ * getExistingPathINodes(["","c1","c2"], [?,?]) should fill the + * array with [c1,c2]
+ * getExistingPathINodes(["","c1","c2","c3"], [?,?]) should fill + * the array with [c2,null] + * + *

+ * getExistingPathINodes(["","c1","c2"], [?,?,?,?]) should fill + * the array with [rootINode,c1,c2,null],
+ * getExistingPathINodes(["","c1","c2","c3"], [?,?,?,?]) should + * fill the array with [rootINode,c1,c2,null] + * @param components array of path component name + * @param existing INode array to fill with existing INodes + * @return number of existing INodes in the path + */ + int getExistingPathINodes(byte[][] components, INode[] existing) { + assert compareBytes(this.name, components[0]) == 0 : + "Incorrect name " + getLocalName() + " expected " + components[0]; + + INode curNode = this; + int count = 0; + int index = existing.length - components.length; + if (index > 0) + index = 0; + while ((count < components.length) && (curNode != null)) { + if (index >= 0) + existing[index] = curNode; + if (!curNode.isDirectory() || (count == components.length - 1)) + break; // no more child, stop here + INodeDirectory parentDir = (INodeDirectory)curNode; + curNode = parentDir.getChildINode(components[count + 1]); + count += 1; + index += 1; + } + return count; + } + + /** + * Retrieve the existing INodes along the given path. The first INode + * always exist and is this INode. + * + * @param path the path to explore + * @return INodes array containing the existing INodes in the order they + * appear when following the path from the root INode to the + * deepest INodes. The array size will be the number of expected + * components in the path, and non existing components will be + * filled with null + * + * @see #getExistingPathINodes(byte[][], INode[]) + */ + INode[] getExistingPathINodes(String path) { + byte[][] components = getPathComponents(path); + INode[] inodes = new INode[components.length]; + + this.getExistingPathINodes(components, inodes); + + return inodes; + } + + /** + * Add a child inode to the directory. + * + * @param node INode to insert + * @param inheritPermission inherit permission from parent? + * @return null if the child with this name already exists; + * node, otherwise + */ + T addChild(final T node, boolean inheritPermission) { + return addChild(node, inheritPermission, true); + } + /** + * Add a child inode to the directory. + * + * @param node INode to insert + * @param inheritPermission inherit permission from parent? + * @param propagateModTime set parent's mod time to that of a child? + * @return null if the child with this name already exists; + * node, otherwise + */ + T addChild(final T node, boolean inheritPermission, + boolean propagateModTime) { + if (inheritPermission) { + FsPermission p = getFsPermission(); + //make sure the permission has wx for the user + if (!p.getUserAction().implies(FsAction.WRITE_EXECUTE)) { + p = new FsPermission(p.getUserAction().or(FsAction.WRITE_EXECUTE), + p.getGroupAction(), p.getOtherAction()); + } + node.setPermission(p); + } + + if (children == null) { + children = new ArrayList(DEFAULT_FILES_PER_DIRECTORY); + } + int low = Collections.binarySearch(children, node.name); + if(low >= 0) + return null; + node.parent = this; + children.add(-low - 1, node); + if (propagateModTime) { + // update modification time of the parent directory + setModificationTime(node.getModificationTime()); + } + if (node.getGroupName() == null) { + node.setGroup(getGroupName()); + } + return node; + } + + /** + * Search all children for the first child whose name is greater than + * the given name. + * + * If the given name is one of children's name, the next child's index + * is returned; Otherwise, return the insertion point: the index of the + * first child whose name's greater than the given name. + * + * @param name a name + * @return the index of the next child + */ + int nextChild(byte[] name) { + if (name.length == 0) { // empty name + return 0; + } + int nextPos = Collections.binarySearch(children, name) + 1; + if (nextPos >= 0) { // the name is in the list of children + return nextPos; + } + return -nextPos; // insert point + } + + /** + * Equivalent to addNode(path, newNode, false). + * @see #addNode(String, INode, boolean) + */ + T addNode(String path, T newNode) throws FileNotFoundException { + return addNode(path, newNode, false); + } + /** + * Add new INode to the file tree. + * Find the parent and insert + * + * @param path file path + * @param newNode INode to be added + * @param inheritPermission If true, copy the parent's permission to newNode. + * @return null if the node already exists; inserted INode, otherwise + * @throws FileNotFoundException if parent does not exist or + * is not a directory. + */ + T addNode(String path, T newNode, boolean inheritPermission + ) throws FileNotFoundException { + byte[][] pathComponents = getPathComponents(path); + if(addToParent(pathComponents, newNode, null, inheritPermission) == null) + return null; + return newNode; + } + + /** + * Add new inode to the parent if specified. + * Optimized version of addNode() if parent is not null. + * + * @return parent INode if new inode is inserted + * or null if it already exists. + * @throws FileNotFoundException if parent does not exist or + * is not a directory. + */ + INodeDirectory addToParent( + byte[][] pathComponents, + T newNode, + INodeDirectory parent, + boolean inheritPermission + ) throws FileNotFoundException { + return addToParent(pathComponents, newNode, parent, inheritPermission, true); + } + INodeDirectory addToParent( + byte[][] pathComponents, + T newNode, + INodeDirectory parent, + boolean inheritPermission, + boolean propagateModTime + ) throws FileNotFoundException { + + int pathLen = pathComponents.length; + if (pathLen < 2) // add root + return null; + if(parent == null) { + // Gets the parent INode + INode[] inodes = new INode[2]; + getExistingPathINodes(pathComponents, inodes); + INode inode = inodes[0]; + if (inode == null) { + throw new FileNotFoundException("Parent path does not exist: "+ + DFSUtil.byteArray2String(pathComponents)); + } + if (!inode.isDirectory()) { + throw new FileNotFoundException("Parent path is not a directory: "+ + DFSUtil.byteArray2String(pathComponents)); + } + parent = (INodeDirectory)inode; + } + // insert into the parent children list + newNode.name = pathComponents[pathLen-1]; + if(parent.addChild(newNode, inheritPermission, propagateModTime) == null) + return null; + return parent; + } + + /** {@inheritDoc} */ + DirCounts spaceConsumedInTree(DirCounts counts) { + counts.nsCount += 1; + if (children != null) { + for (INode child : children) { + child.spaceConsumedInTree(counts); + } + } + return counts; + } + + /** {@inheritDoc} */ + long[] computeContentSummary(long[] summary) { + if (children != null) { + for (INode child : children) { + child.computeContentSummary(summary); + } + } + summary[2]++; + return summary; + } + + /** + */ + List getChildren() { + return children==null ? new ArrayList() : children; + } + List getChildrenRaw() { + return children; + } + + int collectSubtreeBlocksAndClear(List v) { + int total = 1; + if (children == null) { + return total; + } + for (INode child : children) { + total += child.collectSubtreeBlocksAndClear(v); + } + parent = null; + children = null; + return total; + } + + /** + * Numbers of all blocks, files and directories under this directory. + * Current directory will be counted as well. + */ + public static class ItemCounts { + int numBlocks; + int numDirectories; + int numFiles; + long startTime; // time stamp when counting started + long finishTime; // time stamp when counting finished + } + + private ItemCounts itemCounts = null; + + /** + * Get item counts of the current directory. Need to do countItems() first + * if you need updated item counts. + * @return numbers of blocks, files and directories + */ + public ItemCounts getItemCounts() { + return itemCounts; + } + + /** + * Count items under the current directory + */ + public void countItems() { + itemCounts = new ItemCounts(); + itemCounts.startTime = System.currentTimeMillis(); + itemCounts.numDirectories = 1; // count the current directory + itemCounts.numFiles = 0; + itemCounts.numBlocks = 0; + for (INode child : children) { + countItemsRecursively(child); + } + itemCounts.finishTime = System.currentTimeMillis(); + } + + private void countItemsRecursively(INode curr) { + if (curr == null) { + return; + } + itemCounts.numDirectories++; + if (curr instanceof INodeDirectory) { + itemCounts.numDirectories++; + if (((INodeDirectory) curr).children != null) { + for (INode child : ((INodeDirectory) curr).children) { + countItemsRecursively(child); + } + } + } else { + itemCounts.numFiles++; + if (((INodeFile) curr).getBlocks() != null) { + itemCounts.numBlocks += ((INodeFile) curr).getBlocks().length; + } + } + } +} diff --git a/src/hdfs/org/apache/hadoop/hdfs/server/namenode/INodeDirectoryWithQuota.java b/src/hdfs/org/apache/hadoop/hdfs/server/namenode/INodeDirectoryWithQuota.java new file mode 100644 index 0000000..9a6409a --- /dev/null +++ b/src/hdfs/org/apache/hadoop/hdfs/server/namenode/INodeDirectoryWithQuota.java @@ -0,0 +1,163 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.namenode; + +import org.apache.hadoop.fs.permission.PermissionStatus; +import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException; +import org.apache.hadoop.hdfs.protocol.NSQuotaExceededException; +import org.apache.hadoop.hdfs.protocol.QuotaExceededException; + +/** + * Directory INode class that has a quota restriction + */ +class INodeDirectoryWithQuota extends INodeDirectory { + private long nsQuota; /// NameSpace quota + private long nsCount; + private long dsQuota; /// disk space quota + private long diskspace; + + /** Convert an existing directory inode to one with the given quota + * + * @param nsQuota Namespace quota to be assigned to this inode + * @param dsQuota Diskspace quota to be assigned to this indoe + * @param other The other inode from which all other properties are copied + */ + INodeDirectoryWithQuota(long nsQuota, long dsQuota, INodeDirectory other) + throws QuotaExceededException { + super(other); + INode.DirCounts counts = new INode.DirCounts(); + other.spaceConsumedInTree(counts); + this.nsCount= counts.getNsCount(); + this.diskspace = counts.getDsCount(); + setQuota(nsQuota, dsQuota); + } + + /** constructor with no quota verification */ + INodeDirectoryWithQuota( + PermissionStatus permissions, long modificationTime, + long nsQuota, long dsQuota) + { + super(permissions, modificationTime); + this.nsQuota = nsQuota; + this.dsQuota = dsQuota; + this.nsCount = 1; + } + + /** constructor with no quota verification */ + INodeDirectoryWithQuota(String name, PermissionStatus permissions, + long nsQuota, long dsQuota) + { + super(name, permissions); + this.nsQuota = nsQuota; + this.dsQuota = dsQuota; + this.nsCount = 1; + } + + /** Get this directory's namespace quota + * @return this directory's namespace quota + */ + long getNsQuota() { + return nsQuota; + } + + /** Get this directory's diskspace quota + * @return this directory's diskspace quota + */ + long getDsQuota() { + return dsQuota; + } + + /** Set this directory's quota + * + * @param nsQuota Namespace quota to be set + * @param dsQuota diskspace quota to be set + * + */ + void setQuota(long newNsQuota, long newDsQuota) throws QuotaExceededException { + nsQuota = newNsQuota; + dsQuota = newDsQuota; + } + + + @Override + DirCounts spaceConsumedInTree(DirCounts counts) { + counts.nsCount += nsCount; + counts.dsCount += diskspace; + return counts; + } + + /** Get the number of names in the subtree rooted at this directory + * @return the size of the subtree rooted at this directory + */ + long numItemsInTree() { + return nsCount; + } + + long diskspaceConsumed() { + return diskspace; + } + + /** Update the size of the tree + * + * @param nsDelta the change of the tree size + * @param dsDelta change to disk space occupied + */ + void updateNumItemsInTree(long nsDelta, long dsDelta) { + nsCount += nsDelta; + diskspace += dsDelta; + } + + /** Update the size of the tree + * + * @param nsDelta the change of the tree size + * @param dsDelta change to disk space occupied + **/ + void unprotectedUpdateNumItemsInTree(long nsDelta, long dsDelta) { + nsCount = nsCount + nsDelta; + diskspace = diskspace + dsDelta; + } + + /** + * Sets namespace and diskspace take by the directory rooted + * at this INode. This should be used carefully. It does not check + * for quota violations. + * + * @param namespace size of the directory to be set + * @param diskspace disk space take by all the nodes under this directory + */ + void setSpaceConsumed(long namespace, long diskspace) { + this.nsCount = namespace; + this.diskspace = diskspace; + } + + /** Verify if the namespace count disk space satisfies the quota restriction + * @throws QuotaExceededException if the given quota is less than the count + */ + void verifyQuota(long nsDelta, long dsDelta) throws QuotaExceededException { + long newCount = nsCount + nsDelta; + long newDiskspace = diskspace + dsDelta; + if (nsDelta>0 || dsDelta>0) { + if (nsQuota >= 0 && nsQuota < newCount) { + throw new NSQuotaExceededException(nsQuota, newCount); + } + if (dsQuota >= 0 && dsQuota < newDiskspace) { + throw new DSQuotaExceededException(dsQuota, newDiskspace); + } + } + } +} diff --git a/src/hdfs/org/apache/hadoop/hdfs/server/namenode/INodeFile.java b/src/hdfs/org/apache/hadoop/hdfs/server/namenode/INodeFile.java new file mode 100644 index 0000000..d776869 --- /dev/null +++ b/src/hdfs/org/apache/hadoop/hdfs/server/namenode/INodeFile.java @@ -0,0 +1,234 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.namenode; + +import java.io.IOException; +import java.util.List; + +import org.apache.hadoop.fs.permission.FsAction; +import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.fs.permission.PermissionStatus; +import org.apache.hadoop.hdfs.protocol.Block; +import org.apache.hadoop.hdfs.server.namenode.BlocksMap.BlockInfo; + +class INodeFile extends INode { + static final FsPermission UMASK = FsPermission.createImmutable((short)0111); + + //Number of bits for Block size + static final short BLOCKBITS = 48; + + //Header mask 64-bit representation + //Format: [16 bits for replication][48 bits for PreferredBlockSize] + static final long HEADERMASK = 0xffffL << BLOCKBITS; + + protected long header; + + protected BlockInfo blocks[] = null; + + INodeFile(PermissionStatus permissions, + int nrBlocks, short replication, long modificationTime, + long atime, long preferredBlockSize) { + this(permissions, new BlockInfo[nrBlocks], replication, + modificationTime, atime, preferredBlockSize); + } + + protected INodeFile() { + blocks = null; + header = 0; + } + + protected INodeFile(PermissionStatus permissions, BlockInfo[] blklist, + short replication, long modificationTime, + long atime, long preferredBlockSize) { + super(permissions, modificationTime, atime); + this.setReplication(replication); + this.setPreferredBlockSize(preferredBlockSize); + blocks = blklist; + } + + /** + * Set the {@link FsPermission} of this {@link INodeFile}. + * Since this is a file, + * the {@link FsAction#EXECUTE} action, if any, is ignored. + */ + protected void setPermission(FsPermission permission) { + super.setPermission(permission.applyUMask(UMASK)); + } + + public boolean isDirectory() { + return false; + } + + /** + * Get block replication for the file + * @return block replication value + */ + public short getReplication() { + return (short) ((header & HEADERMASK) >> BLOCKBITS); + } + + public void setReplication(short replication) { + if(replication <= 0) + throw new IllegalArgumentException("Unexpected value for the replication"); + header = ((long)replication << BLOCKBITS) | (header & ~HEADERMASK); + } + + /** + * Get preferred block size for the file + * @return preferred block size in bytes + */ + public long getPreferredBlockSize() { + return header & ~HEADERMASK; + } + + public void setPreferredBlockSize(long preferredBlkSize) + { + if((preferredBlkSize < 0) || (preferredBlkSize > ~HEADERMASK )) + throw new IllegalArgumentException("Unexpected value for the block size"); + header = (header & HEADERMASK) | (preferredBlkSize & ~HEADERMASK); + } + + /** + * Get file blocks + * @return file blocks + */ + BlockInfo[] getBlocks() { + return this.blocks; + } + + /** + * append array of blocks to this.blocks + */ + void appendBlocks(INodeFile [] inodes, int totalAddedBlocks) { + int size = this.blocks.length; + + BlockInfo[] newlist = new BlockInfo[size + totalAddedBlocks]; + System.arraycopy(this.blocks, 0, newlist, 0, size); + + for(INodeFile in: inodes) { + System.arraycopy(in.blocks, 0, newlist, size, in.blocks.length); + size += in.blocks.length; + } + + for(BlockInfo bi: this.blocks) { + bi.setINode(this); + } + this.blocks = newlist; + } + + /** + * add a block to the block list + */ + void addBlock(BlockInfo newblock) { + if (this.blocks == null) { + this.blocks = new BlockInfo[1]; + this.blocks[0] = newblock; + } else { + int size = this.blocks.length; + BlockInfo[] newlist = new BlockInfo[size + 1]; + System.arraycopy(this.blocks, 0, newlist, 0, size); + newlist[size] = newblock; + this.blocks = newlist; + } + } + + /** + * Set file block + */ + void setBlock(int idx, BlockInfo blk) { + this.blocks[idx] = blk; + } + + int collectSubtreeBlocksAndClear(List v) { + parent = null; + if(blocks != null && v != null) { + for (Block blk : blocks) { + v.add(blk); + } + } + blocks = null; + return 1; + } + + /** {@inheritDoc} */ + long[] computeContentSummary(long[] summary) { + long bytes = 0; + for(Block blk : blocks) { + bytes += blk.getNumBytes(); + } + summary[0] += bytes; + summary[1]++; + summary[3] += diskspaceConsumed(); + return summary; + } + + + + @Override + DirCounts spaceConsumedInTree(DirCounts counts) { + counts.nsCount += 1; + counts.dsCount += diskspaceConsumed(); + return counts; + } + + long diskspaceConsumed() { + return diskspaceConsumed(blocks); + } + + long diskspaceConsumed(Block[] blkArr) { + long size = 0; + if(blkArr == null) { + return 0; + } + for (Block blk : blkArr) { + if (blk != null) { + size += blk.getNumBytes(); + } + } + /* If the last block is being written to, use prefferedBlockSize + * rather than the actual block size. + */ + if (blkArr.length > 0 && blkArr[blkArr.length-1] != null && + isUnderConstruction()) { + size += getPreferredBlockSize() - blocks[blocks.length-1].getNumBytes(); + } + return size * getReplication(); + } + + /** + * Return the penultimate allocated block for this file. + */ + Block getPenultimateBlock() { + if (blocks == null || blocks.length <= 1) { + return null; + } + return blocks[blocks.length - 2]; + } + + INodeFileUnderConstruction toINodeFileUnderConstruction( + String clientName, String clientMachine, DatanodeDescriptor clientNode + ) throws IOException { + if (isUnderConstruction()) { + return (INodeFileUnderConstruction)this; + } + return new INodeFileUnderConstruction(name, + getReplication(), modificationTime, getPreferredBlockSize(), + blocks, getPermissionStatus(), + clientName, clientMachine, clientNode); + } +} diff --git a/src/hdfs/org/apache/hadoop/hdfs/server/namenode/INodeFileUnderConstruction.java b/src/hdfs/org/apache/hadoop/hdfs/server/namenode/INodeFileUnderConstruction.java new file mode 100644 index 0000000..8a516df --- /dev/null +++ b/src/hdfs/org/apache/hadoop/hdfs/server/namenode/INodeFileUnderConstruction.java @@ -0,0 +1,181 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.namenode; + +import java.io.IOException; + +import org.apache.hadoop.fs.permission.PermissionStatus; +import org.apache.hadoop.hdfs.protocol.Block; +import org.apache.hadoop.hdfs.server.namenode.BlocksMap.BlockInfo; + + +class INodeFileUnderConstruction extends INodeFile { + final String clientName; // lease holder + private final String clientMachine; + private final DatanodeDescriptor clientNode; // if client is a cluster node too. + + private int primaryNodeIndex = -1; //the node working on lease recovery + private DatanodeDescriptor[] targets = null; //locations for last block + private long lastRecoveryTime = 0; + + INodeFileUnderConstruction(PermissionStatus permissions, + short replication, + long preferredBlockSize, + long modTime, + String clientName, + String clientMachine, + DatanodeDescriptor clientNode) { + super(permissions.applyUMask(UMASK), 0, replication, modTime, modTime, + preferredBlockSize); + this.clientName = clientName; + this.clientMachine = clientMachine; + this.clientNode = clientNode; + } + + public INodeFileUnderConstruction(byte[] name, + short blockReplication, + long modificationTime, + long preferredBlockSize, + BlockInfo[] blocks, + PermissionStatus perm, + String clientName, + String clientMachine, + DatanodeDescriptor clientNode) { + super(perm, blocks, blockReplication, modificationTime, modificationTime, + preferredBlockSize); + setLocalName(name); + this.clientName = clientName; + this.clientMachine = clientMachine; + this.clientNode = clientNode; + } + + String getClientName() { + return clientName; + } + + String getClientMachine() { + return clientMachine; + } + + DatanodeDescriptor getClientNode() { + return clientNode; + } + + /** + * Is this inode being constructed? + */ + @Override + boolean isUnderConstruction() { + return true; + } + + DatanodeDescriptor[] getTargets() { + return targets; + } + + void setTargets(DatanodeDescriptor[] targets) { + this.targets = targets; + this.primaryNodeIndex = -1; + } + + // + // converts a INodeFileUnderConstruction into a INodeFile + // use the modification time as the access time + // + INodeFile convertToInodeFile() { + INodeFile obj = new INodeFile(getPermissionStatus(), + getBlocks(), + getReplication(), + getModificationTime(), + getModificationTime(), + getPreferredBlockSize()); + return obj; + + } + + /** + * remove a block from the block list. This block should be + * the last one on the list. + */ + void removeBlock(Block oldblock) throws IOException { + if (blocks == null) { + throw new IOException("Trying to delete non-existant block " + oldblock); + } + int size_1 = blocks.length - 1; + if (!blocks[size_1].equals(oldblock)) { + throw new IOException("Trying to delete non-last block " + oldblock); + } + + //copy to a new list + BlockInfo[] newlist = new BlockInfo[size_1]; + System.arraycopy(blocks, 0, newlist, 0, size_1); + blocks = newlist; + + // Remove the block locations for the last block. + targets = null; + } + + synchronized void setLastBlock(BlockInfo newblock, DatanodeDescriptor[] newtargets + ) throws IOException { + if (blocks == null) { + throw new IOException("Trying to update non-existant block (newblock=" + + newblock + ")"); + } + blocks[blocks.length - 1] = newblock; + setTargets(newtargets); + lastRecoveryTime = 0; + } + + /** + * Initialize lease recovery for this object + */ + void assignPrimaryDatanode() { + //assign the first alive datanode as the primary datanode + + if (targets.length == 0) { + NameNode.stateChangeLog.warn("BLOCK*" + + " INodeFileUnderConstruction.initLeaseRecovery:" + + " No blocks found, lease removed."); + } + + int previous = primaryNodeIndex; + //find an alive datanode beginning from previous + for(int i = 1; i <= targets.length; i++) { + int j = (previous + i)%targets.length; + if (targets[j].isAlive) { + DatanodeDescriptor primary = targets[primaryNodeIndex = j]; + primary.addBlockToBeRecovered(blocks[blocks.length - 1], targets); + NameNode.stateChangeLog.info("BLOCK* " + blocks[blocks.length - 1] + + " recovery started, primary=" + primary); + return; + } + } + } + + /** + * Update lastRecoveryTime if expired. + * @return true if lastRecoveryTimeis updated. + */ + synchronized boolean setLastRecoveryTime(long now) { + boolean expired = now - lastRecoveryTime > NameNode.LEASE_RECOVER_PERIOD; + if (expired) { + lastRecoveryTime = now; + } + return expired; + } +} diff --git a/src/hdfs/org/apache/hadoop/hdfs/server/namenode/JspHelper.java b/src/hdfs/org/apache/hadoop/hdfs/server/namenode/JspHelper.java new file mode 100644 index 0000000..6743a06 --- /dev/null +++ b/src/hdfs/org/apache/hadoop/hdfs/server/namenode/JspHelper.java @@ -0,0 +1,482 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdfs.server.namenode; + +import java.io.IOException; +import java.io.UnsupportedEncodingException; +import java.lang.management.ManagementFactory; +import java.lang.management.MemoryMXBean; +import java.lang.management.MemoryUsage; +import java.net.InetSocketAddress; +import java.net.Socket; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.Comparator; +import java.util.Random; +import java.util.TreeMap; +import java.util.TreeSet; +import java.util.Arrays; + +import javax.servlet.http.HttpServletRequest; +import javax.servlet.jsp.JspWriter; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdfs.DFSClient; +import org.apache.hadoop.hdfs.protocol.DatanodeID; +import org.apache.hadoop.hdfs.protocol.DatanodeInfo; +import org.apache.hadoop.hdfs.protocol.LocatedBlock; +import org.apache.hadoop.hdfs.protocol.LocatedBlocks; +import org.apache.hadoop.hdfs.protocol.FSConstants.UpgradeAction; +import org.apache.hadoop.hdfs.server.common.HdfsConstants; +import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport; +import org.apache.hadoop.hdfs.server.datanode.DataNode; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.util.StringUtils; +import org.apache.hadoop.net.NetUtils; +import org.apache.hadoop.security.*; + +public class JspHelper { + final static public String WEB_UGI_PROPERTY_NAME = "dfs.web.ugi"; + + static FSNamesystem fsn = null; + public static InetSocketAddress nameNodeAddr; + public static final Configuration conf = new Configuration(); + public static final UnixUserGroupInformation webUGI + = UnixUserGroupInformation.createImmutable( + conf.getStrings(WEB_UGI_PROPERTY_NAME)); + + // data structure to count number of blocks on datanodes. + private static class NodeRecord extends DatanodeInfo { + int frequency; + + public NodeRecord() { + frequency = -1; + } + public NodeRecord(DatanodeInfo info, int count) { + super(info); + this.frequency = count; + } + } + + // compare two records based on their frequency + private static class NodeRecordComparator implements Comparator { + + public int compare(NodeRecord o1, NodeRecord o2) { + if (o1.frequency < o2.frequency) { + return -1; + } else if (o1.frequency > o2.frequency) { + return 1; + } + return 0; + } + } + + public static final int defaultChunkSizeToView = + conf.getInt("dfs.default.chunk.view.size", 32 * 1024); + static Random rand = new Random(); + + public JspHelper() { + fsn = FSNamesystem.getFSNamesystem(); + if (DataNode.getDataNode() != null) { + nameNodeAddr = NameNode.getAddress(DataNode.getDataNode().getConf()); + } + else { + Configuration runningConf = fsn.getNameNode().getConf(); + nameNodeAddr = NameNode.getAddress(runningConf); + } + + UnixUserGroupInformation.saveToConf(conf, + UnixUserGroupInformation.UGI_PROPERTY_NAME, webUGI); + } + + public DatanodeID randomNode() throws IOException { + return fsn.getRandomDatanode(); + } + + /** + * prefer the node which has maximum local copies of all blocks + */ + public DatanodeInfo bestNode(LocatedBlocks blks) throws IOException { + // insert all known replica locations into a tree map where the + // key is the DatanodeInfo + TreeMap map = + new TreeMap(); + for (int i = 0; i < blks.getLocatedBlocks().size(); i++) { + DatanodeInfo [] nodes = blks.get(i).getLocations(); + for (int j = 0; j < nodes.length; j++) { + NodeRecord obj = map.get(nodes[j]); + if (obj != null) { + obj.frequency++; + } else { + map.put(nodes[j], new NodeRecord(nodes[j], 1)); + } + } + } + // sort all locations by their frequency of occurance + Collection values = map.values(); + NodeRecord[] nodes = (NodeRecord[]) + values.toArray(new NodeRecord[values.size()]); + Arrays.sort(nodes, new NodeRecordComparator()); + return bestNode(nodes, false); + } + + /** + * return a random node from the replicas of this block + */ + public DatanodeInfo bestNode(LocatedBlock blk) throws IOException { + DatanodeInfo [] nodes = blk.getLocations(); + return bestNode(nodes, true); + } + + /** + * Choose a datanode from the specified list. If doRamdom is true, then + * a random datanode is selected. Otherwise, a node that appears earlier + * in the list has more probability of being selected + */ + private DatanodeInfo bestNode(DatanodeInfo[] nodes, boolean doRandom) + throws IOException { + TreeSet deadNodes = new TreeSet(); + DatanodeInfo chosenNode = null; + int failures = 0; + Socket s = null; + int index = -1; + if (nodes == null || nodes.length == 0) { + throw new IOException("No nodes contain this block"); + } + while (s == null) { + if (chosenNode == null) { + do { + if (doRandom) { + index = rand.nextInt(nodes.length); + } else { + index++; + } + chosenNode = nodes[index]; + } while (deadNodes.contains(chosenNode)); + } + chosenNode = nodes[index]; + + //just ping to check whether the node is alive + InetSocketAddress targetAddr = NetUtils.createSocketAddr( + chosenNode.getHost() + ":" + chosenNode.getInfoPort()); + + try { + s = new Socket(); + s.connect(targetAddr, HdfsConstants.READ_TIMEOUT); + s.setSoTimeout(HdfsConstants.READ_TIMEOUT); + } catch (IOException e) { + deadNodes.add(chosenNode); + s.close(); + s = null; + failures++; + } + if (failures == nodes.length) + throw new IOException("Could not reach the block containing the data. Please try again"); + + } + s.close(); + return chosenNode; + } + public void streamBlockInAscii(InetSocketAddress addr, long blockId, + long genStamp, long blockSize, + long offsetIntoBlock, long chunkSizeToView, JspWriter out) + throws IOException { + if (chunkSizeToView == 0) return; + Socket s = new Socket(); + s.connect(addr, HdfsConstants.READ_TIMEOUT); + s.setSoTimeout(HdfsConstants.READ_TIMEOUT); + + long amtToRead = Math.min(chunkSizeToView, blockSize - offsetIntoBlock); + + // Use the block name for file name. + DFSClient.BlockReader blockReader = + DFSClient.BlockReader.newBlockReader(s, addr.toString() + ":" + blockId, + blockId, genStamp ,offsetIntoBlock, + amtToRead, + conf.getInt("io.file.buffer.size", + 4096)); + + byte[] buf = new byte[(int)amtToRead]; + int readOffset = 0; + int retries = 2; + while ( amtToRead > 0 ) { + int numRead; + try { + numRead = blockReader.readAll(buf, readOffset, (int)amtToRead); + } + catch (IOException e) { + retries--; + if (retries == 0) + throw new IOException("Could not read data from datanode"); + continue; + } + amtToRead -= numRead; + readOffset += numRead; + } + blockReader = null; + s.close(); + out.print(new String(buf)); + } + public void DFSNodesStatus(ArrayList live, + ArrayList dead, + ArrayList excluded) { + if (fsn != null) + fsn.DFSNodesStatus(live, dead, excluded); + } + public void addTableHeader(JspWriter out) throws IOException { + out.print(""); + out.print(""); + } + public void addTableRow(JspWriter out, String[] columns) throws IOException { + out.print(""); + for (int i = 0; i < columns.length; i++) { + out.print(""); + } + out.print(""); + } + public void addTableRow(JspWriter out, String[] columns, int row) throws IOException { + out.print(""); + + for (int i = 0; i < columns.length; i++) { + if (row/2*2 == row) {//even + out.print(""); + } else { + out.print(""); + + } + } + out.print(""); + } + public void addTableFooter(JspWriter out) throws IOException { + out.print("
"+columns[i]+"
"+columns[i]+"
"+columns[i]+"
"); + } + + public String getSafeModeText() { + if (!fsn.isInSafeMode()) + return ""; + return "Safe mode is ON. " + fsn.getSafeModeTip() + "
"; + } + + public static String getWarningText(FSNamesystem fsn) { + // Ideally this should be displayed in RED + long missingBlocks = fsn.getMissingBlocksCount(); + if (missingBlocks > 0) { + return "
WARNING :" + + " There are about " + missingBlocks + + " missing blocks. Please check the log or run fsck.

"; + } + return ""; + } + + public String getInodeLimitText() { + long inodes = fsn.dir.totalInodes(); + long blocks = fsn.getBlocksTotal(); + long maxobjects = fsn.getMaxObjects(); + MemoryMXBean mem = ManagementFactory.getMemoryMXBean(); + + MemoryUsage heap = mem.getHeapMemoryUsage(); + MemoryUsage nonHeap = mem.getNonHeapMemoryUsage(); + + long totalHeap = heap.getUsed(); + long maxHeap = heap.getMax(); + long commitedHeap = heap.getCommitted(); + long initHeap = heap.getInit(); + + long totalNonHeap = nonHeap.getUsed(); + long maxNonHeap = nonHeap.getMax(); + long commitedNonHeap = nonHeap.getCommitted(); + + long used = (totalHeap * 100)/maxHeap; + long usedNonHeap = (totalNonHeap * 100) / maxNonHeap; + + + String str = inodes + " files and directories, " + + blocks + " blocks = " + + (inodes + blocks) + " total"; + if (maxobjects != 0) { + long pct = ((inodes + blocks) * 100)/maxobjects; + str += " / " + maxobjects + " (" + pct + "%)"; + } + str += ". Heap Size is " + StringUtils.byteDesc(totalHeap) + " / " + + StringUtils.byteDesc(maxHeap) + " (" + used + "%). Commited Heap: " + + StringUtils.byteDesc(commitedHeap) + ". Init Heap: " + + StringUtils.byteDesc(initHeap) + ".
"; + str += " Non Heap Memory Size is " + StringUtils.byteDesc(totalNonHeap) + + " / " + StringUtils.byteDesc(maxNonHeap) + + " (" + usedNonHeap + "%). Commited Non Heap: " + + StringUtils.byteDesc(commitedNonHeap) + ".
"; + return str; + } + + public String getUpgradeStatusText() { + String statusText = ""; + try { + UpgradeStatusReport status = + fsn.distributedUpgradeProgress(UpgradeAction.GET_STATUS); + statusText = (status == null ? + "There are no upgrades in progress." : + status.getStatusText(false)); + } catch(IOException e) { + statusText = "Upgrade status unknown."; + } + return statusText; + } + + public void sortNodeList(ArrayList nodes, + String field, String order) { + + class NodeComapare implements Comparator { + static final int + FIELD_NAME = 1, + FIELD_LAST_CONTACT = 2, + FIELD_BLOCKS = 3, + FIELD_CAPACITY = 4, + FIELD_USED = 5, + FIELD_PERCENT_USED = 6, + FIELD_NONDFS_USED = 7, + FIELD_REMAINING = 8, + FIELD_PERCENT_REMAINING = 9, + SORT_ORDER_ASC = 1, + SORT_ORDER_DSC = 2; + + int sortField = FIELD_NAME; + int sortOrder = SORT_ORDER_ASC; + + public NodeComapare(String field, String order) { + if (field.equals("lastcontact")) { + sortField = FIELD_LAST_CONTACT; + } else if (field.equals("capacity")) { + sortField = FIELD_CAPACITY; + } else if (field.equals("used")) { + sortField = FIELD_USED; + } else if (field.equals("nondfsused")) { + sortField = FIELD_NONDFS_USED; + } else if (field.equals("remaining")) { + sortField = FIELD_REMAINING; + } else if (field.equals("pcused")) { + sortField = FIELD_PERCENT_USED; + } else if (field.equals("pcremaining")) { + sortField = FIELD_PERCENT_REMAINING; + } else if (field.equals("blocks")) { + sortField = FIELD_BLOCKS; + } else { + sortField = FIELD_NAME; + } + + if (order.equals("DSC")) { + sortOrder = SORT_ORDER_DSC; + } else { + sortOrder = SORT_ORDER_ASC; + } + } + + public int compare(DatanodeDescriptor d1, + DatanodeDescriptor d2) { + int ret = 0; + switch (sortField) { + case FIELD_LAST_CONTACT: + ret = (int) (d2.getLastUpdate() - d1.getLastUpdate()); + break; + case FIELD_CAPACITY: + long dlong = d1.getCapacity() - d2.getCapacity(); + ret = (dlong < 0) ? -1 : ((dlong > 0) ? 1 : 0); + break; + case FIELD_USED: + dlong = d1.getDfsUsed() - d2.getDfsUsed(); + ret = (dlong < 0) ? -1 : ((dlong > 0) ? 1 : 0); + break; + case FIELD_NONDFS_USED: + dlong = d1.getNonDfsUsed() - d2.getNonDfsUsed(); + ret = (dlong < 0) ? -1 : ((dlong > 0) ? 1 : 0); + break; + case FIELD_REMAINING: + dlong = d1.getRemaining() - d2.getRemaining(); + ret = (dlong < 0) ? -1 : ((dlong > 0) ? 1 : 0); + break; + case FIELD_PERCENT_USED: + double ddbl =((d1.getDfsUsedPercent())- + (d2.getDfsUsedPercent())); + ret = (ddbl < 0) ? -1 : ((ddbl > 0) ? 1 : 0); + break; + case FIELD_PERCENT_REMAINING: + ddbl =((d1.getRemainingPercent())- + (d2.getRemainingPercent())); + ret = (ddbl < 0) ? -1 : ((ddbl > 0) ? 1 : 0); + break; + case FIELD_BLOCKS: + ret = d1.numBlocks() - d2.numBlocks(); + break; + case FIELD_NAME: + ret = d1.getHostName().compareTo(d2.getHostName()); + break; + } + return (sortOrder == SORT_ORDER_DSC) ? -ret : ret; + } + } + + Collections.sort(nodes, new NodeComapare(field, order)); + } + + public static void printPathWithLinks(String dir, JspWriter out, int namenodeInfoPort ) throws IOException { + try { + String[] parts = dir.split(Path.SEPARATOR); + StringBuilder tempPath = new StringBuilder(dir.length()); + out.print("" + Path.SEPARATOR + ""); + tempPath.append(Path.SEPARATOR); + for (int i = 0; i < parts.length-1; i++) { + if (!parts[i].equals("")) { + tempPath.append(parts[i]); + out.print("" + parts[i] + "" + Path.SEPARATOR); + tempPath.append(Path.SEPARATOR); + } + } + if(parts.length > 0) { + out.print(parts[parts.length-1]); + } + } + catch (UnsupportedEncodingException ex) { + ex.printStackTrace(); + } + } + + public static void printGotoForm(JspWriter out, int namenodeInfoPort, String file) throws IOException { + out.print("

"); + out.print("Goto : "); + out.print(""); + out.print(""); + out.print(""); + out.print("
"); + } + + public static void createTitle(JspWriter out, + HttpServletRequest req, String file) throws IOException{ + if(file == null) file = ""; + int start = Math.max(0,file.length() - 100); + if(start != 0) + file = "..." + file.substring(start, file.length()); + out.print("HDFS:" + file + ""); + } +} diff --git a/src/hdfs/org/apache/hadoop/hdfs/server/namenode/LeaseExpiredException.java b/src/hdfs/org/apache/hadoop/hdfs/server/namenode/LeaseExpiredException.java new file mode 100644 index 0000000..bc47689 --- /dev/null +++ b/src/hdfs/org/apache/hadoop/hdfs/server/namenode/LeaseExpiredException.java @@ -0,0 +1,30 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdfs.server.namenode; + +import java.io.IOException; + +/** + * The lease that was being used to create this file has expired. + */ +public class LeaseExpiredException extends IOException { + public LeaseExpiredException(String msg) { + super(msg); + } +} diff --git a/src/hdfs/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java b/src/hdfs/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java new file mode 100644 index 0000000..52ffd15 --- /dev/null +++ b/src/hdfs/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java @@ -0,0 +1,421 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.namenode; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; +import java.util.Map; +import java.util.SortedMap; +import java.util.SortedSet; +import java.util.TreeMap; +import java.util.TreeSet; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdfs.protocol.FSConstants; + +/** + * LeaseManager does the lease housekeeping for writing on files. + * This class also provides useful static methods for lease recovery. + * + * Lease Recovery Algorithm + * 1) Namenode retrieves lease information + * 2) For each file f in the lease, consider the last block b of f + * 2.1) Get the datanodes which contains b + * 2.2) Assign one of the datanodes as the primary datanode p + + * 2.3) p obtains a new generation stamp form the namenode + * 2.4) p get the block info from each datanode + * 2.5) p computes the minimum block length + * 2.6) p updates the datanodes, which have a valid generation stamp, + * with the new generation stamp and the minimum block length + * 2.7) p acknowledges the namenode the update results + + * 2.8) Namenode updates the BlockInfo + * 2.9) Namenode removes f from the lease + * and removes the lease once all files have been removed + * 2.10) Namenode commit changes to edit log + */ +public class LeaseManager { + public static final Log LOG = LogFactory.getLog(LeaseManager.class); + + private final FSNamesystem fsnamesystem; + + private long softLimit = FSConstants.LEASE_SOFTLIMIT_PERIOD; + private long hardLimit = FSConstants.LEASE_HARDLIMIT_PERIOD; + + // + // Used for handling lock-leases + // Mapping: leaseHolder -> Lease + // + private SortedMap leases = new TreeMap(); + // Set of: Lease + private SortedSet sortedLeases = new TreeSet(); + + // + // Map path names to leases. It is protected by the sortedLeases lock. + // The map stores pathnames in lexicographical order. + // + private SortedMap sortedLeasesByPath = new TreeMap(); + + LeaseManager(FSNamesystem fsnamesystem) {this.fsnamesystem = fsnamesystem;} + + Lease getLease(String holder) { + return leases.get(holder); + } + + SortedSet getSortedLeases() {return sortedLeases;} + + /** @return the lease containing src */ + public Lease getLeaseByPath(String src) {return sortedLeasesByPath.get(src);} + + /** @return the number of leases currently in the system */ + public synchronized int countLease() {return sortedLeases.size();} + + /** @return the number of paths contained in all leases */ + synchronized int countPath() { + int count = 0; + for(Lease lease : sortedLeases) { + count += lease.getPaths().size(); + } + return count; + } + + /** + * Adds (or re-adds) the lease for the specified file. + */ + synchronized void addLease(String holder, String src) { + Lease lease = getLease(holder); + if (lease == null) { + lease = new Lease(holder); + leases.put(holder, lease); + sortedLeases.add(lease); + } else { + renewLease(lease); + } + sortedLeasesByPath.put(src, lease); + lease.paths.add(src); + } + + /** + * Remove the specified lease and src. + */ + synchronized void removeLease(Lease lease, String src) { + sortedLeasesByPath.remove(src); + if (!lease.removePath(src)) { + LOG.error(src + " not found in lease.paths (=" + lease.paths + ")"); + } + + if (!lease.hasPath()) { + leases.remove(lease.holder); + if (!sortedLeases.remove(lease)) { + LOG.error(lease + " not found in sortedLeases"); + } + } + } + + /** + * Remove the lease for the specified holder and src + */ + synchronized void removeLease(String holder, String src) { + Lease lease = getLease(holder); + if (lease != null) { + removeLease(lease, src); + } + } + + /** + * Finds the pathname for the specified pendingFile + */ + synchronized String findPath(INodeFileUnderConstruction pendingFile + ) throws IOException { + Lease lease = getLease(pendingFile.clientName); + if (lease != null) { + String src = lease.findPath(pendingFile); + if (src != null) { + return src; + } + } + throw new IOException("pendingFile (=" + pendingFile + ") not found." + + "(lease=" + lease + ")"); + } + + /** + * Renew the lease(s) held by the given client + */ + synchronized void renewLease(String holder) { + renewLease(getLease(holder)); + } + synchronized void renewLease(Lease lease) { + if (lease != null) { + sortedLeases.remove(lease); + lease.renew(); + sortedLeases.add(lease); + } + } + + /** + * for testing only + */ + synchronized void replaceLease(Lease newLease) { + leases.put(newLease.getHolder(), newLease); + sortedLeases.remove(newLease); + sortedLeases.add(newLease); + + for (String path : newLease.paths) { + sortedLeasesByPath.put(path, newLease); + } + } + /************************************************************ + * A Lease governs all the locks held by a single client. + * For each client there's a corresponding lease, whose + * timestamp is updated when the client periodically + * checks in. If the client dies and allows its lease to + * expire, all the corresponding locks can be released. + *************************************************************/ + class Lease implements Comparable { + private final String holder; + private long lastUpdate; + private final Collection paths = new TreeSet(); + + /** Only LeaseManager object can create a lease */ + private Lease(String holder) { + this.holder = holder; + renew(); + } + /** Only LeaseManager object can renew a lease */ + private void renew() { + this.lastUpdate = FSNamesystem.now(); + } + + /** @return true if the Hard Limit Timer has expired */ + public boolean expiredHardLimit() { + return FSNamesystem.now() - lastUpdate > hardLimit; + } + + /** @return true if the Soft Limit Timer has expired */ + public boolean expiredSoftLimit() { + return FSNamesystem.now() - lastUpdate > softLimit; + } + + /** + * @return the path associated with the pendingFile and null if not found. + */ + private String findPath(INodeFileUnderConstruction pendingFile) { + for(String src : paths) { + if (fsnamesystem.dir.getFileINode(src) == pendingFile) { + return src; + } + } + return null; + } + + /** Does this lease contain any path? */ + boolean hasPath() {return !paths.isEmpty();} + + boolean removePath(String src) { + return paths.remove(src); + } + + /** {@inheritDoc} */ + public String toString() { + return "[Lease. Holder: " + holder + + ", pendingcreates: " + paths.size() + "]"; + } + + /** {@inheritDoc} */ + public int compareTo(Lease o) { + Lease l1 = this; + Lease l2 = o; + long lu1 = l1.lastUpdate; + long lu2 = l2.lastUpdate; + if (lu1 < lu2) { + return -1; + } else if (lu1 > lu2) { + return 1; + } else { + return l1.holder.compareTo(l2.holder); + } + } + + /** {@inheritDoc} */ + public boolean equals(Object o) { + if (!(o instanceof Lease)) { + return false; + } + Lease obj = (Lease) o; + if (lastUpdate == obj.lastUpdate && + holder.equals(obj.holder)) { + return true; + } + return false; + } + + /** {@inheritDoc} */ + public int hashCode() { + return holder.hashCode(); + } + + Collection getPaths() { + return paths; + } + + void replacePath(String oldpath, String newpath) { + paths.remove(oldpath); + paths.add(newpath); + } + + String getHolder() { + return holder; + } + } + + synchronized void changeLease(String src, String dst, + String overwrite, String replaceBy) { + if (LOG.isDebugEnabled()) { + LOG.debug(getClass().getSimpleName() + ".changelease: " + + " src=" + src + ", dest=" + dst + + ", overwrite=" + overwrite + + ", replaceBy=" + replaceBy); + } + + final int len = overwrite.length(); + for(Map.Entry entry : findLeaseWithPrefixPath(src, sortedLeasesByPath)) { + final String oldpath = entry.getKey(); + final Lease lease = entry.getValue(); + //overwrite must be a prefix of oldpath + final String newpath = replaceBy + oldpath.substring(len); + if (LOG.isDebugEnabled()) { + LOG.debug("changeLease: replacing " + oldpath + " with " + newpath); + } + lease.replacePath(oldpath, newpath); + sortedLeasesByPath.remove(oldpath); + sortedLeasesByPath.put(newpath, lease); + } + } + + synchronized void removeLeaseWithPrefixPath(String prefix) { + for(Map.Entry entry : findLeaseWithPrefixPath(prefix, sortedLeasesByPath)) { + if (LOG.isDebugEnabled()) { + LOG.debug(LeaseManager.class.getSimpleName() + + ".removeLeaseWithPrefixPath: entry=" + entry); + } + removeLease(entry.getValue(), entry.getKey()); + } + } + + static private List> findLeaseWithPrefixPath( + String prefix, SortedMap path2lease) { + if (LOG.isDebugEnabled()) { + LOG.debug(LeaseManager.class.getSimpleName() + ".findLease: prefix=" + prefix); + } + + List> entries = new ArrayList>(); + final int srclen = prefix.length(); + + for(Map.Entry entry : path2lease.tailMap(prefix).entrySet()) { + final String p = entry.getKey(); + if (!p.startsWith(prefix)) { + return entries; + } + if (p.length() == srclen || p.charAt(srclen) == Path.SEPARATOR_CHAR) { + entries.add(entry); + } + } + return entries; + } + + public void setLeasePeriod(long softLimit, long hardLimit) { + this.softLimit = softLimit; + this.hardLimit = hardLimit; + } + + /****************************************************** + * Monitor checks for leases that have expired, + * and disposes of them. + ******************************************************/ + class Monitor implements Runnable { + final String name = getClass().getSimpleName(); + + /** Check leases periodically. */ + public void run() { + for(; fsnamesystem.isRunning(); ) { + fsnamesystem.writeLock(); + try { + if (!fsnamesystem.isInSafeMode()) { + checkLeases(); + } + } finally { + fsnamesystem.writeUnlock(); + } + + try { + Thread.sleep(2000); + } catch(InterruptedException ie) { + if (LOG.isDebugEnabled()) { + LOG.debug(name + " is interrupted", ie); + } + } + } + } + } + + /** Check the leases beginning from the oldest. */ + private synchronized void checkLeases() { + for(; sortedLeases.size() > 0; ) { + final Lease oldest = sortedLeases.first(); + if (!oldest.expiredHardLimit()) { + return; + } + + LOG.info("Lease " + oldest + " has expired hard limit"); + + final List removing = new ArrayList(); + // need to create a copy of the oldest lease paths, becuase + // internalReleaseLease() removes paths corresponding to empty files, + // i.e. it needs to modify the collection being iterated over + // causing ConcurrentModificationException + String[] leasePaths = new String[oldest.getPaths().size()]; + oldest.getPaths().toArray(leasePaths); + for(String p : leasePaths) { + try { + fsnamesystem.internalReleaseLease(oldest, p); + } catch (IOException e) { + LOG.error("Cannot release the path "+p+" in the lease "+oldest, e); + removing.add(p); + } + } + + for(String p : removing) { + removeLease(oldest, p); + } + } + } + + /** {@inheritDoc} */ + public synchronized String toString() { + return getClass().getSimpleName() + "= {" + + "\n leases=" + leases + + "\n sortedLeases=" + sortedLeases + + "\n sortedLeasesByPath=" + sortedLeasesByPath + + "\n}"; + } +} diff --git a/src/hdfs/org/apache/hadoop/hdfs/server/namenode/ListPathsServlet.java b/src/hdfs/org/apache/hadoop/hdfs/server/namenode/ListPathsServlet.java new file mode 100644 index 0000000..35e00dd --- /dev/null +++ b/src/hdfs/org/apache/hadoop/hdfs/server/namenode/ListPathsServlet.java @@ -0,0 +1,193 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.namenode; + +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdfs.HftpFileSystem; +import org.apache.hadoop.hdfs.protocol.ClientProtocol; +import org.apache.hadoop.hdfs.protocol.DirectoryListing; +import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; +import org.apache.hadoop.ipc.RemoteException; +import org.apache.hadoop.security.UnixUserGroupInformation; +import org.apache.hadoop.util.VersionInfo; + +import org.znerd.xmlenc.*; + +import java.io.IOException; +import java.io.PrintWriter; +import java.text.SimpleDateFormat; +import java.util.Date; +import java.util.HashMap; +import java.util.Map; +import java.util.Stack; +import java.util.regex.Pattern; +import javax.servlet.ServletException; +import javax.servlet.http.HttpServletRequest; +import javax.servlet.http.HttpServletResponse; + +/** + * Obtain meta-information about a filesystem. + * @see org.apache.hadoop.hdfs.HftpFileSystem + */ +public class ListPathsServlet extends DfsServlet { + /** For java.io.Serializable */ + private static final long serialVersionUID = 1L; + + public static final ThreadLocal df = + new ThreadLocal() { + protected SimpleDateFormat initialValue() { + return HftpFileSystem.getDateFormat(); + } + }; + + /** + * Write a node to output. + * Node information includes path, modification, permission, owner and group. + * For files, it also includes size, replication and block-size. + */ + static void writeInfo(String parent, HdfsFileStatus i, XMLOutputter doc) throws IOException { + final SimpleDateFormat ldf = df.get(); + doc.startTag(i.isDir() ? "directory" : "file"); + doc.attribute("path", i.getFullPath(new Path(parent)).toUri().getPath()); + doc.attribute("modified", ldf.format(new Date(i.getModificationTime()))); + doc.attribute("accesstime", ldf.format(new Date(i.getAccessTime()))); + if (!i.isDir()) { + doc.attribute("size", String.valueOf(i.getLen())); + doc.attribute("replication", String.valueOf(i.getReplication())); + doc.attribute("blocksize", String.valueOf(i.getBlockSize())); + } + doc.attribute("permission", (i.isDir()? "d": "-") + i.getPermission()); + doc.attribute("owner", i.getOwner()); + doc.attribute("group", i.getGroup()); + doc.endTag(); + } + + /** + * Build a map from the query string, setting values and defaults. + */ + protected Map buildRoot(HttpServletRequest request, + XMLOutputter doc) { + final String path = request.getPathInfo() != null + ? request.getPathInfo() : "/"; + final String exclude = request.getParameter("exclude") != null + ? request.getParameter("exclude") : "\\..*\\.crc"; + final String filter = request.getParameter("filter") != null + ? request.getParameter("filter") : ".*"; + final boolean recur = request.getParameter("recursive") != null + && "yes".equals(request.getParameter("recursive")); + + Map root = new HashMap(); + root.put("path", path); + root.put("recursive", recur ? "yes" : "no"); + root.put("filter", filter); + root.put("exclude", exclude); + root.put("time", df.get().format(new Date())); + root.put("version", VersionInfo.getVersion()); + return root; + } + + /** + * Service a GET request as described below. + * Request: + * {@code + * GET http://:/listPaths[/][[&option]*] HTTP/1.1 + * } + * + * Where option (default) in: + * recursive ("no") + * filter (".*") + * exclude ("\..*\.crc") + * + * Response: A flat list of files/directories in the following format: + * {@code + * + * + * + * + * } + */ + public void doGet(HttpServletRequest request, HttpServletResponse response) + throws ServletException, IOException { + final UnixUserGroupInformation ugi = getUGI(request); + final PrintWriter out = response.getWriter(); + final XMLOutputter doc = new XMLOutputter(out, "UTF-8"); + try { + final Map root = buildRoot(request, doc); + final String path = root.get("path"); + final boolean recur = "yes".equals(root.get("recursive")); + final Pattern filter = Pattern.compile(root.get("filter")); + final Pattern exclude = Pattern.compile(root.get("exclude")); + ClientProtocol nnproxy = createNameNodeProxy(ugi); + + doc.declaration(); + doc.startTag("listing"); + for (Map.Entry m : root.entrySet()) { + doc.attribute(m.getKey(), m.getValue()); + } + + HdfsFileStatus base = nnproxy.getHdfsFileInfo(path); + if ((base != null) && base.isDir()) { + writeInfo(path, base, doc); + } + + Stack pathstack = new Stack(); + pathstack.push(path); + while (!pathstack.empty()) { + String p = pathstack.pop(); + try { + byte[] lastReturnedName = HdfsFileStatus.EMPTY_NAME; + DirectoryListing thisListing; + do { + assert lastReturnedName != null; + thisListing = nnproxy.getPartialListing(p, lastReturnedName); + if (thisListing == null) { + if (lastReturnedName.length == 0) { + LOG.warn("ListPathsServlet - Path " + p + " does not exist"); + } + break; + } + HdfsFileStatus[] listing = thisListing.getPartialListing(); + for (HdfsFileStatus i : listing) { + String localName = i.getLocalName(); + if (exclude.matcher(localName).matches() + || !filter.matcher(localName).matches()) { + continue; + } + if (recur && i.isDir()) { + pathstack.push(new Path(p, localName).toUri().getPath()); + } + writeInfo(p, i, doc); + } + lastReturnedName = thisListing.getLastName(); + } while (thisListing.hasMore()); + } catch(RemoteException re) {re.writeXml(p, doc);} + } + if (doc != null) { + doc.endDocument(); + } + } finally { + if (out != null) { + out.close(); + } + } + } +} diff --git a/src/hdfs/org/apache/hadoop/hdfs/server/namenode/NameCache.java b/src/hdfs/org/apache/hadoop/hdfs/server/namenode/NameCache.java new file mode 100644 index 0000000..59c0a30 --- /dev/null +++ b/src/hdfs/org/apache/hadoop/hdfs/server/namenode/NameCache.java @@ -0,0 +1,155 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.namenode; + +import java.util.HashMap; +import java.util.Map; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; + +/** + * Caches frequently used names to facilitate reuse. + * (example: byte[] representation of the file name in {@link INode}). + * + * This class is used by initially adding all the file names. Cache + * tracks the number of times a name is used in a transient map. It promotes + * a name used more than {@code useThreshold} to the cache. + * + * One all the names are added, {@link #initialized()} should be called to + * finish initialization. The transient map where use count is tracked is + * discarded and cache is ready for use. + * + *

+ * This class must be synchronized externally. + * + * @param name to be added to the cache + */ +class NameCache { + /** + * Class for tracking use count of a name + */ + private class UseCount { + int count; + final K value; // Internal value for the name + + UseCount(final K value) { + count = 1; + this.value = value; + } + + void increment() { + count++; + } + + int get() { + return count; + } + } + + static final Log LOG = LogFactory.getLog(NameCache.class.getName()); + + /** indicates initialization is in progress */ + private boolean initialized = false; + + /** names used more than {@code useThreshold} is added to the cache */ + private final int useThreshold; + + /** of times a cache look up was successful */ + private int lookups = 0; + + /** Cached names */ + final HashMap cache = new HashMap(); + + /** Names and with number of occurrences tracked during initialization */ + Map transientMap = new HashMap(); + + /** + * Constructor + * @param useThreshold names occurring more than this is promoted to the + * cache + */ + NameCache(int useThreshold) { + this.useThreshold = useThreshold; + } + + /** + * Add a given name to the cache or track use count. + * exist. If the name already exists, then the internal value is returned. + * + * @param name name to be looked up + * @return internal value for the name if found; otherwise null + */ + K put(final K name) { + K internal = cache.get(name); + if (internal != null) { + lookups++; + return internal; + } + + // Track the usage count only during initialization + if (!initialized) { + UseCount useCount = transientMap.get(name); + if (useCount != null) { + useCount.increment(); + if (useCount.get() >= useThreshold) { + promote(name); + } + return useCount.value; + } + useCount = new UseCount(name); + transientMap.put(name, useCount); + } + return null; + } + + /** + * Lookup count when a lookup for a name returned cached object + * @return number of successful lookups + */ + int getLookupCount() { + return lookups; + } + + /** + * Size of the cache + * @return Number of names stored in the cache + */ + int size() { + return cache.size(); + } + + /** + * Mark the name cache as initialized. The use count is no longer tracked + * and the transient map used for initializing the cache is discarded to + * save heap space. + */ + void initialized() { + LOG.info("initialized with " + size() + " entries " + lookups + " lookups"); + this.initialized = true; + transientMap.clear(); + transientMap = null; + } + + /** Promote a frequently used name to the cache */ + private void promote(final K name) { + transientMap.remove(name); + cache.put(name, name); + lookups += useThreshold; + } +} diff --git a/src/hdfs/org/apache/hadoop/hdfs/server/namenode/NameNode.java b/src/hdfs/org/apache/hadoop/hdfs/server/namenode/NameNode.java new file mode 100644 index 0000000..caf161f --- /dev/null +++ b/src/hdfs/org/apache/hadoop/hdfs/server/namenode/NameNode.java @@ -0,0 +1,1238 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.namenode; + +import org.apache.commons.logging.*; + +import org.apache.hadoop.fs.ContentSummary; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.Trash; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.CorruptFileBlocks; +import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.fs.permission.PermissionStatus; +import org.apache.hadoop.hdfs.HDFSPolicyProvider; +import org.apache.hadoop.hdfs.protocol.*; +import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption; +import org.apache.hadoop.hdfs.server.common.IncorrectVersionException; +import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport; +import org.apache.hadoop.hdfs.server.namenode.FSNamesystem.CompleteFileStatus; +import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics; +import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations; +import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand; +import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol; +import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration; +import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol; +import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; +import org.apache.hadoop.hdfs.server.protocol.UpgradeCommand; +import org.apache.hadoop.http.HttpServer; +import org.apache.hadoop.ipc.*; +import org.apache.hadoop.conf.*; +import org.apache.hadoop.util.ReflectionUtils; +import org.apache.hadoop.util.StringUtils; +import org.apache.hadoop.net.NetUtils; +import org.apache.hadoop.net.NetworkTopology; +import org.apache.hadoop.security.SecurityUtil; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.security.AccessControlException; +import org.apache.hadoop.security.authorize.AuthorizationException; +import org.apache.hadoop.security.authorize.ConfiguredPolicy; +import org.apache.hadoop.security.authorize.PolicyProvider; +import org.apache.hadoop.security.authorize.RefreshAuthorizationPolicyProtocol; +import org.apache.hadoop.security.authorize.ServiceAuthorizationManager; + +import java.io.*; +import java.net.*; +import java.util.Collection; +import java.util.Iterator; +import java.util.Set; +import java.util.HashSet; +import java.util.List; +import java.util.ArrayList; + +/********************************************************** + * NameNode serves as both directory namespace manager and + * "inode table" for the Hadoop DFS. There is a single NameNode + * running in any DFS deployment. (Well, except when there + * is a second backup/failover NameNode.) + * + * The NameNode controls two critical tables: + * 1) filename->blocksequence (namespace) + * 2) block->machinelist ("inodes") + * + * The first table is stored on disk and is very precious. + * The second table is rebuilt every time the NameNode comes + * up. + * + * 'NameNode' refers to both this class as well as the 'NameNode server'. + * The 'FSNamesystem' class actually performs most of the filesystem + * management. The majority of the 'NameNode' class itself is concerned + * with exposing the IPC interface and the http server to the outside world, + * plus some configuration management. + * + * NameNode implements the ClientProtocol interface, which allows + * clients to ask for DFS services. ClientProtocol is not + * designed for direct use by authors of DFS client code. End-users + * should instead use the org.apache.nutch.hadoop.fs.FileSystem class. + * + * NameNode also implements the DatanodeProtocol interface, used by + * DataNode programs that actually store DFS data blocks. These + * methods are invoked repeatedly and automatically by all the + * DataNodes in a DFS deployment. + * + * NameNode also implements the NamenodeProtocol interface, used by + * secondary namenodes or rebalancing processes to get partial namenode's + * state, for example partial blocksMap etc. + **********************************************************/ +public class NameNode implements ClientProtocol, DatanodeProtocol, + NamenodeProtocol, FSConstants, + RefreshAuthorizationPolicyProtocol { + static{ + Configuration.addDefaultResource("hdfs-default.xml"); + Configuration.addDefaultResource("hdfs-site.xml"); + } + + public long getProtocolVersion(String protocol, + long clientVersion) throws IOException { + InetSocketAddress requestAddress = Server.get().getListenerAddress(); + boolean dnRequest = false, clientRequest = false; + // If dnProtocolAddress is null - there is only one server running + // otherwise check the address of the incoming request. + if (dnProtocolAddress == null || + dnProtocolAddress.equals(requestAddress)) { + dnRequest = true; + } + if (dnProtocolAddress == null || requestAddress.equals(serverAddress)) { + clientRequest = true; + } + if (protocol.equals(ClientProtocol.class.getName())) { + long namenodeVersion = ClientProtocol.versionID; + if (namenodeVersion > clientVersion && + !ProtocolCompatible.isCompatibleClientProtocol( + clientVersion, namenodeVersion)) { + throw new RPC.VersionIncompatible( + protocol, clientVersion, namenodeVersion); + } + return namenodeVersion; + } else if (protocol.equals(DatanodeProtocol.class.getName()) && dnRequest){ + return DatanodeProtocol.versionID; + } else if (protocol.equals(NamenodeProtocol.class.getName()) && clientRequest){ + return NamenodeProtocol.versionID; + } else if (protocol.equals(RefreshAuthorizationPolicyProtocol.class.getName()) && clientRequest){ + return RefreshAuthorizationPolicyProtocol.versionID; + } else { + throw new IOException("Unknown protocol to name node: " + protocol); + } + } + + // The number of handlers to be used to process datanode requests + public static final String DATANODE_PROTOCOL_HANDLERS = + "dfs.namenode.dn-handlers"; + // The address where the server processing datanode requests is running + public static final String DATANODE_PROTOCOL_ADDRESS = + "dfs.namenode.dn-address"; + public static final int DEFAULT_PORT = 8020; + + public static final Log LOG = LogFactory.getLog(NameNode.class.getName()); + public static final Log stateChangeLog = LogFactory.getLog("org.apache.hadoop.hdfs.StateChange"); + public FSNamesystem namesystem; // TODO: This should private. Use getNamesystem() instead. + /** RPC server */ + private Server server; + /** RPC server for datanodes */ + private Server dnProtocolServer; + /** RPC server address */ + private InetSocketAddress serverAddress = null; + /** RPC server for datanodes address */ + private InetSocketAddress dnProtocolAddress = null; + /** httpServer */ + private HttpServer httpServer; + /** HTTP server address */ + private InetSocketAddress httpAddress = null; + private Thread emptier; + /** only used for testing purposes */ + private boolean stopRequested = false; + /** Is service level authorization enabled? */ + private boolean serviceAuthEnabled = false; + /** Active configuration of the namenode */ + private Configuration conf; + + /** Format a new filesystem. Destroys any filesystem that may already + * exist at this location. **/ + public static void format(Configuration conf) throws IOException { + format(conf, false); + } + + static NameNodeMetrics myMetrics; + + public FSNamesystem getNamesystem() { + return namesystem; + } + + public static NameNodeMetrics getNameNodeMetrics() { + return myMetrics; + } + + public static InetSocketAddress getAddress(String address) { + return NetUtils.createSocketAddr(address, DEFAULT_PORT); + } + + /** + * Set the datanode server address property in the configuration + * @param conf Configuration to modify + * @param address the address in the form "hostname:port" + */ + public static void setDNProtocolAddress(Configuration conf, String address) { + conf.set(DATANODE_PROTOCOL_ADDRESS, address); + } + + /** + * Get the configured address of the datanode to run the RPC server + * processing requests from datanodes. Returns the address if it is + * configured, otherwise will return null. + * + * @param conf + * @return the address object or null if it is not configured + */ + public static InetSocketAddress getDNProtocolAddress(Configuration conf) { + String dnAddressString = conf.get(DATANODE_PROTOCOL_ADDRESS); + if (dnAddressString == null || dnAddressString.isEmpty()) + return null; + return getAddress(dnAddressString); + } + + public static InetSocketAddress getAddress(Configuration conf) { + return getAddress(FileSystem.getDefaultUri(conf).getAuthority()); + } + + public static URI getUri(InetSocketAddress namenode) { + int port = namenode.getPort(); + String portString = port == DEFAULT_PORT ? "" : (":"+port); + return URI.create("hdfs://"+ namenode.getHostName()+portString); + } + + /** + * Initialize name-node. + * + * @param conf the configuration + */ + private void initialize(Configuration conf) throws IOException { + this.conf = conf; + + // set service-level authorization security policy + if (serviceAuthEnabled = + conf.getBoolean( + ServiceAuthorizationManager.SERVICE_AUTHORIZATION_CONFIG, false)) { + PolicyProvider policyProvider = + (PolicyProvider)(ReflectionUtils.newInstance( + conf.getClass(PolicyProvider.POLICY_PROVIDER_CONFIG, + HDFSPolicyProvider.class, PolicyProvider.class), + conf)); + SecurityUtil.setPolicy(new ConfiguredPolicy(conf, policyProvider)); + } + + // This is a check that the port is free + // create a socket and bind to it, throw exception if port is busy + // This has to be done before we are reading Namesystem not to waste time and fail fast + InetSocketAddress clientSocket = NameNode.getAddress(conf); + ServerSocket socket = new ServerSocket(); + socket.bind(clientSocket); + socket.close(); + InetSocketAddress dnSocket = NameNode.getDNProtocolAddress(conf); + if (dnSocket != null) { + socket = new ServerSocket(); + socket.bind(dnSocket); + socket.close(); + //System.err.println("Tested " + dnSocket); + } + + + myMetrics = new NameNodeMetrics(conf, this); + + this.namesystem = new FSNamesystem(this, conf); + this.startDNServer(); + startHttpServer(conf); + } + + private void startTrashEmptier(Configuration conf) throws IOException { + this.emptier = new Thread(new Trash(conf).getEmptier(), "Trash Emptier"); + this.emptier.setDaemon(true); + this.emptier.start(); + } + + private void startHttpServer(Configuration conf) throws IOException { + String infoAddr = + NetUtils.getServerAddress(conf, "dfs.info.bindAddress", + "dfs.info.port", "dfs.http.address"); + InetSocketAddress infoSocAddr = NetUtils.createSocketAddr(infoAddr); + String infoHost = infoSocAddr.getHostName(); + int infoPort = infoSocAddr.getPort(); + this.httpServer = new HttpServer("hdfs", infoHost, infoPort, + infoPort == 0, conf); + if (conf.getBoolean("dfs.https.enable", false)) { + boolean needClientAuth = conf.getBoolean("dfs.https.need.client.auth", false); + InetSocketAddress secInfoSocAddr = NetUtils.createSocketAddr(conf.get( + "dfs.https.address", infoHost + ":" + 0)); + Configuration sslConf = new Configuration(false); + sslConf.addResource(conf.get("dfs.https.server.keystore.resource", + "ssl-server.xml")); + this.httpServer.addSslListener(secInfoSocAddr, sslConf, needClientAuth); + // assume same ssl port for all datanodes + InetSocketAddress datanodeSslPort = NetUtils.createSocketAddr(conf.get( + "dfs.datanode.https.address", infoHost + ":" + 50475)); + this.httpServer.setAttribute("datanode.https.port", datanodeSslPort + .getPort()); + } + this.httpServer.setAttribute("name.node", this); + this.httpServer.setAttribute("name.node.address", getNameNodeAddress()); + this.httpServer.setAttribute("name.system.image", getFSImage()); + this.httpServer.setAttribute("name.conf", conf); + this.httpServer.addInternalServlet("nnconf", "/nnconf", NameNodeConfServlet.class); + this.httpServer.addInternalServlet("fsck", "/fsck", FsckServlet.class); + this.httpServer.addInternalServlet("getimage", "/getimage", GetImageServlet.class); + this.httpServer.addInternalServlet("listPaths", "/listPaths/*", ListPathsServlet.class); + this.httpServer.addInternalServlet("data", "/data/*", FileDataServlet.class); + this.httpServer.addInternalServlet("checksum", "/fileChecksum/*", + FileChecksumServlets.RedirectServlet.class); + this.httpServer.start(); + + // The web-server port can be ephemeral... ensure we have the correct info + infoPort = this.httpServer.getPort(); + this.httpAddress = new InetSocketAddress(infoHost, infoPort); + conf.set("dfs.http.address", infoHost + ":" + infoPort); + LOG.info("Web-server up at: " + infoHost + ":" + infoPort); + } + + /** + * Start NameNode. + *

+ * The name-node can be started with one of the following startup options: + *

    + *
  • {@link StartupOption#REGULAR REGULAR} - normal name node startup
  • + *
  • {@link StartupOption#FORMAT FORMAT} - format name node
  • + *
  • {@link StartupOption#UPGRADE UPGRADE} - start the cluster + * upgrade and create a snapshot of the current file system state
  • + *
  • {@link StartupOption#ROLLBACK ROLLBACK} - roll the + * cluster back to the previous state
  • + *
+ * The option is passed via configuration field: + * dfs.namenode.startup + * + * The conf will be modified to reflect the actual ports on which + * the NameNode is up and running if the user passes the port as + * zero in the conf. + * + * @param conf confirguration + * @throws IOException + */ + public NameNode(Configuration conf) throws IOException { + try { + initialize(conf); + } catch (IOException e) { + this.stop(); + throw e; + } + } + + /** + * Wait for service to finish. + * (Normally, it runs forever.) + */ + public void join() { + try { + if (this.dnProtocolServer != null) { + this.dnProtocolServer.join(); + } + if (this.server != null) { + this.server.join(); + } + } catch (InterruptedException ie) { + } + } + + public void startServerForClientRequests() throws IOException { + if (this.server == null) { + InetSocketAddress socAddr = NameNode.getAddress(conf); + int handlerCount = conf.getInt("dfs.namenode.handler.count", 10); + + // create rpc server + this.server = RPC.getServer(this, socAddr.getHostName(), socAddr.getPort(), + handlerCount, false, conf); + // The rpc-server port can be ephemeral... ensure we have the correct info + this.serverAddress = this.server.getListenerAddress(); + FileSystem.setDefaultUri(conf, getUri(serverAddress)); + if (this.httpServer != null) { + // This means the server is being started once out of safemode + // and jetty is initialized already + this.httpServer.setAttribute("name.node.address", getNameNodeAddress()); + } + LOG.info("Namenode up at: " + this.serverAddress); + + + this.server.start(); + startTrashEmptier(conf); + } + } + + public void startDNServer() throws IOException { + InetSocketAddress dnAddr = NameNode.getDNProtocolAddress(conf); + int handlerCount = conf.getInt("dfs.namenode.handler.count", 10); + + if (dnAddr != null) { + int dnHandlerCount = conf.getInt(DATANODE_PROTOCOL_HANDLERS, handlerCount); + this.dnProtocolServer = RPC.getServer(this, dnAddr.getHostName(), + dnAddr.getPort(), dnHandlerCount, false, conf); + this.dnProtocolAddress = dnProtocolServer.getListenerAddress(); + NameNode.setDNProtocolAddress(conf, + dnProtocolAddress.getHostName() + ":" + dnProtocolAddress.getPort()); + LOG.info("Datanodes endpoint is up at: " + this.dnProtocolAddress); + } + + if (this.dnProtocolServer != null) { + this.dnProtocolServer.start(); + + } else { + this.startServerForClientRequests(); + } + } + + /** + * Stop all NameNode threads and wait for all to finish. + */ + public void stop() { + if (stopRequested) + return; + stopRequested = true; + try { + if (httpServer != null) httpServer.stop(); + } catch (Exception e) { + LOG.error(StringUtils.stringifyException(e)); + } + if(namesystem != null) namesystem.close(); + if(emptier != null) emptier.interrupt(); + if(server != null) server.stop(); + if (dnProtocolServer != null) dnProtocolServer.stop(); + if (myMetrics != null) { + myMetrics.shutdown(); + } + if (namesystem != null) { + namesystem.shutdown(); + } + } + + ///////////////////////////////////////////////////// + // NamenodeProtocol + ///////////////////////////////////////////////////// + /** + * return a list of blocks & their locations on datanode whose + * total size is size + * + * @param datanode on which blocks are located + * @param size total size of blocks + */ + public BlocksWithLocations getBlocks(DatanodeInfo datanode, long size) + throws IOException { + if(size <= 0) { + throw new IllegalArgumentException( + "Unexpected not positive size: "+size); + } + + return namesystem.getBlocks(datanode, size); + } + + ///////////////////////////////////////////////////// + // ClientProtocol + ///////////////////////////////////////////////////// + /** {@inheritDoc} */ + public LocatedBlocks getBlockLocations(String src, + long offset, + long length) throws IOException { + myMetrics.numGetBlockLocations.inc(); + return namesystem.getBlockLocations(getClientMachine(), + src, offset, length); + } + + private static String getClientMachine() { + String clientMachine = Server.getRemoteAddress(); + if (clientMachine == null) { + clientMachine = ""; + } + return clientMachine; + } + + /** {@inheritDoc} */ + public void create(String src, + FsPermission masked, + String clientName, + boolean overwrite, + short replication, + long blockSize + ) throws IOException { + String clientMachine = getClientMachine(); + if (stateChangeLog.isDebugEnabled()) { + stateChangeLog.debug("*DIR* NameNode.create: file " + +src+" for "+clientName+" at "+clientMachine); + } + if (!checkPathLength(src)) { + throw new IOException("create: Pathname too long. Limit " + + MAX_PATH_LENGTH + " characters, " + MAX_PATH_DEPTH + " levels."); + } + namesystem.startFile(src, + new PermissionStatus(UserGroupInformation.getCurrentUGI().getUserName(), + null, masked), + clientName, clientMachine, overwrite, replication, blockSize); + myMetrics.numFilesCreated.inc(); + myMetrics.numCreateFileOps.inc(); + } + + /** {@inheritDoc} */ + public LocatedBlock append(String src, String clientName) throws IOException { + String clientMachine = getClientMachine(); + if (stateChangeLog.isDebugEnabled()) { + stateChangeLog.debug("*DIR* NameNode.append: file " + +src+" for "+clientName+" at "+clientMachine); + } + LocatedBlock info = namesystem.appendFile(src, clientName, clientMachine); + myMetrics.numFilesAppended.inc(); + return info; + } + + /** {@inheritDoc} */ + public boolean setReplication(String src, + short replication + ) throws IOException { + boolean value = namesystem.setReplication(src, replication); + if (value) { + myMetrics.numSetReplication.inc(); + } + return value; + } + + /** {@inheritDoc} */ + public void setPermission(String src, FsPermission permissions + ) throws IOException { + namesystem.setPermission(src, permissions); + myMetrics.numSetPermission.inc(); + } + + /** {@inheritDoc} */ + public void setOwner(String src, String username, String groupname + ) throws IOException { + namesystem.setOwner(src, username, groupname); + myMetrics.numSetOwner.inc(); + } + + /** + */ + public LocatedBlock addBlock(String src, + String clientName) throws IOException { + stateChangeLog.debug("*BLOCK* NameNode.addBlock: file " + +src+" for "+clientName); + LocatedBlock locatedBlock = namesystem.getAdditionalBlock(src, clientName); + if (locatedBlock != null) + myMetrics.numAddBlockOps.inc(); + return locatedBlock; + } + + @Override + public LocatedBlock addBlock(String src, String clientName, + DatanodeInfo[] excludedNodes) + throws IOException { + return addBlock(src, clientName); + } + + /** + * The client needs to give up on the block. + */ + public void abandonBlock(Block b, String src, String holder + ) throws IOException { + stateChangeLog.debug("*BLOCK* NameNode.abandonBlock: " + +b+" of file "+src); + if (!namesystem.abandonBlock(b, src, holder)) { + throw new IOException("Cannot abandon block during write to " + src); + } + myMetrics.numAbandonBlock.inc(); + } + + /** {@inheritDoc} */ + public boolean complete(String src, String clientName) throws IOException { + stateChangeLog.debug("*DIR* NameNode.complete: " + src + " for " + clientName); + CompleteFileStatus returnCode = namesystem.completeFile(src, clientName); + if (returnCode == CompleteFileStatus.STILL_WAITING) { + return false; + } else if (returnCode == CompleteFileStatus.COMPLETE_SUCCESS) { + myMetrics.numCompleteFile.inc(); + return true; + } else { + throw new IOException("Could not complete write to file " + src + " by " + clientName); + } + } + + /** + * The client has detected an error on the specified located blocks + * and is reporting them to the server. For now, the namenode will + * mark the block as corrupt. In the future we might + * check the blocks are actually corrupt. + */ + public void reportBadBlocks(LocatedBlock[] blocks) throws IOException { + stateChangeLog.info("*DIR* NameNode.reportBadBlocks"); + myMetrics.numReportBadBlocks.inc(); + for (int i = 0; i < blocks.length; i++) { + Block blk = blocks[i].getBlock(); + DatanodeInfo[] nodes = blocks[i].getLocations(); + for (int j = 0; j < nodes.length; j++) { + DatanodeInfo dn = nodes[j]; + namesystem.markBlockAsCorrupt(blk, dn); + } + } + } + + /** {@inheritDoc} */ + public long nextGenerationStamp(Block block) throws IOException{ + myMetrics.numNextGenerationStamp.inc(); + return namesystem.nextGenerationStampForBlock(block); + } + + /** {@inheritDoc} */ + public void commitBlockSynchronization(Block block, + long newgenerationstamp, long newlength, + boolean closeFile, boolean deleteblock, DatanodeID[] newtargets + ) throws IOException { + namesystem.commitBlockSynchronization(block, + newgenerationstamp, newlength, closeFile, deleteblock, newtargets); + } + + public long getPreferredBlockSize(String filename) throws IOException { + return namesystem.getPreferredBlockSize(filename); + } + + /** + * {@inheritDoc} + */ + public void concat(String trg, String[] src) throws IOException { + namesystem.concat(trg, src); + } + + /** + */ + public boolean rename(String src, String dst) throws IOException { + stateChangeLog.debug("*DIR* NameNode.rename: " + src + " to " + dst); + if (!checkPathLength(dst)) { + throw new IOException("rename: Pathname too long. Limit " + + MAX_PATH_LENGTH + " characters, " + MAX_PATH_DEPTH + " levels."); + } + boolean ret = namesystem.renameTo(src, dst); + if (ret) { + myMetrics.numFilesRenamed.inc(); + } + return ret; + } + + /** + */ + @Deprecated + public boolean delete(String src) throws IOException { + return delete(src, true); + } + + /** {@inheritDoc} */ + public boolean delete(String src, boolean recursive) throws IOException { + if (stateChangeLog.isDebugEnabled()) { + stateChangeLog.debug("*DIR* Namenode.delete: src=" + src + + ", recursive=" + recursive); + } + boolean ret = namesystem.delete(src, recursive); + if (ret) + myMetrics.numDeleteFileOps.inc(); + return ret; + } + + /** + * Check path length does not exceed maximum. Returns true if + * length and depth are okay. Returns false if length is too long + * or depth is too great. + * + */ + private boolean checkPathLength(String src) { + Path srcPath = new Path(src); + return (src.length() <= MAX_PATH_LENGTH && + srcPath.depth() <= MAX_PATH_DEPTH); + } + + /** {@inheritDoc} */ + public boolean mkdirs(String src, FsPermission masked) throws IOException { + stateChangeLog.debug("*DIR* NameNode.mkdirs: " + src); + if (!checkPathLength(src)) { + throw new IOException("mkdirs: Pathname too long. Limit " + + MAX_PATH_LENGTH + " characters, " + MAX_PATH_DEPTH + " levels."); + } + boolean value = namesystem.mkdirs(src, + new PermissionStatus(UserGroupInformation.getCurrentUGI().getUserName(), + null, masked)); + if (value) { + myMetrics.numMkdirs.inc(); + } + return value; + } + + /** + */ + public void renewLease(String clientName) throws IOException { + myMetrics.numRenewLease.inc(); + namesystem.renewLease(clientName); + } + + /** + */ + public FileStatus[] getListing(String src) throws IOException { + FileStatus[] files = namesystem.getListing(src); + if (files != null) { + myMetrics.numGetListingOps.inc(); + } + return files; + } + + @Override + public HdfsFileStatus[] getHdfsListing(String src) throws IOException { + HdfsFileStatus[] files = namesystem.getHdfsListing(src); + if (files != null) { + myMetrics.numGetListingOps.inc(); + } + return files; + } + + @Override + public DirectoryListing getPartialListing(String src, byte[] startAfter) + throws IOException { + DirectoryListing files = namesystem.getPartialListing( + src, startAfter, false); + if (files != null) { + myMetrics.numGetListingOps.inc(); + } + return files; + } + + @Override + public LocatedDirectoryListing getLocatedPartialListing( + String src, byte[] startAfter) + throws IOException { + DirectoryListing files = namesystem.getPartialListing( + src, startAfter, true); + if (files != null) { + myMetrics.numGetListingOps.inc(); + } + return (LocatedDirectoryListing)files; + } + + /** + * Get the file info for a specific file. + * @param src The string representation of the path to the file + * @throws IOException if permission to access file is denied by the system + * @return object containing information regarding the file + * or null if file not found + */ + public FileStatus getFileInfo(String src) throws IOException { + myMetrics.numFileInfoOps.inc(); + return namesystem.getFileInfo(src); + } + + @Override + public HdfsFileStatus getHdfsFileInfo(String src) throws IOException { + HdfsFileStatus value = namesystem.getHdfsFileInfo(src); + myMetrics.numFileInfoOps.inc(); + return value; + } + + /** @inheritDoc */ + public long[] getStats() throws IOException { + return namesystem.getStats(); + } + + /** + */ + public DatanodeInfo[] getDatanodeReport(DatanodeReportType type) + throws IOException { + DatanodeInfo results[] = namesystem.datanodeReport(type); + if (results == null ) { + throw new IOException("Cannot find datanode report"); + } + return results; + } + + /** + * @inheritDoc + */ + public boolean setSafeMode(SafeModeAction action) throws IOException { + return namesystem.setSafeMode(action); + } + + /** + * Is the cluster currently in safe mode? + */ + public boolean isInSafeMode() { + return namesystem.isInSafeMode(); + } + + /** + * @inheritDoc + */ + public void saveNamespace() throws IOException { + namesystem.saveNamespace(); + myMetrics.numSaveNamespace.inc(); + } + + /** + * Refresh the list of datanodes that the namenode should allow to + * connect. Re-reads conf by creating new Configuration object and + * uses the files list in the configuration to update the list. + */ + public void refreshNodes() throws IOException { + namesystem.refreshNodes(new Configuration()); + myMetrics.numRefreshNodes.inc(); + } + + /** + * Returns the size of the current edit log. + */ + public long getEditLogSize() throws IOException { + return namesystem.getEditLogSize(); + } + + /** + * Roll the edit log. + */ + public CheckpointSignature rollEditLog() throws IOException { + return namesystem.rollEditLog(); + } + + /** + * Roll the image + */ + @Override + public void rollFsImage(CheckpointSignature newImageSignature) throws IOException { + namesystem.rollFSImage(newImageSignature); + } + + public void finalizeUpgrade() throws IOException { + namesystem.finalizeUpgrade(); + } + + public UpgradeStatusReport distributedUpgradeProgress(UpgradeAction action + ) throws IOException { + return namesystem.distributedUpgradeProgress(action); + } + + /** + * Dumps namenode state into specified file + */ + public void metaSave(String filename) throws IOException { + namesystem.metaSave(filename); + } + + /** + * {@inheritDoc} + * + * implement old API for backwards compatibility + */ + @Override @Deprecated + public FileStatus[] getCorruptFiles() throws IOException { + CorruptFileBlocks corruptFileBlocks = listCorruptFileBlocks("/", null); + Set filePaths = new HashSet(); + for (String file : corruptFileBlocks.getFiles()) { + filePaths.add(file); + } + + List fileStatuses = new ArrayList(filePaths.size()); + for (String f: filePaths) { + FileStatus fs = getFileInfo(f); + if (fs != null) + LOG.info("found fs for " + f); + else + LOG.info("found no fs for " + f); + fileStatuses.add(fs); + } + return fileStatuses.toArray(new FileStatus[fileStatuses.size()]); + } + + /** + * {@inheritDoc} + */ + @Override + public CorruptFileBlocks + listCorruptFileBlocks(String path, + String cookie) + throws IOException { + Collection fbs = + namesystem.listCorruptFileBlocks(path, cookie); + + String[] files = new String[fbs.size()]; + String lastCookie = ""; + int i = 0; + for(FSNamesystem.CorruptFileBlockInfo fb: fbs) { + files[i++] = fb.path; + lastCookie = fb.block.getBlockName(); + } + return new CorruptFileBlocks(files, lastCookie); + } + +/** {@inheritDoc} */ + public ContentSummary getContentSummary(String path) throws IOException { + return namesystem.getContentSummary(path); + } + + /** {@inheritDoc} */ + public void setQuota(String path, long namespaceQuota, long diskspaceQuota) + throws IOException { + namesystem.setQuota(path, namespaceQuota, diskspaceQuota); + myMetrics.numSetQuota.inc(); + } + + /** {@inheritDoc} */ + public void fsync(String src, String clientName) throws IOException { + namesystem.fsync(src, clientName); + myMetrics.numFsync.inc(); + } + + /** @inheritDoc */ + public void setTimes(String src, long mtime, long atime) throws IOException { + namesystem.setTimes(src, mtime, atime); + myMetrics.numSetTimes.inc(); + } + + //////////////////////////////////////////////////////////////// + // DatanodeProtocol + //////////////////////////////////////////////////////////////// + /** + */ + public DatanodeRegistration register(DatanodeRegistration nodeReg + ) throws IOException { + verifyVersion(nodeReg.getVersion()); + namesystem.registerDatanode(nodeReg); + myMetrics.numRegister.inc(); + + return nodeReg; + } + + /** + * Data node notify the name node that it is alive + * Return an array of block-oriented commands for the datanode to execute. + * This will be either a transfer or a delete operation. + */ + public DatanodeCommand[] sendHeartbeat(DatanodeRegistration nodeReg, + long capacity, + long dfsUsed, + long remaining, + int xmitsInProgress, + int xceiverCount) throws IOException { + verifyRequest(nodeReg); + myMetrics.numHeartbeat.inc(); + return namesystem.handleHeartbeat(nodeReg, capacity, dfsUsed, remaining, + xceiverCount, xmitsInProgress); + } + + public DatanodeCommand blockReport(DatanodeRegistration nodeReg, + long[] blocks) throws IOException { + verifyRequest(nodeReg); + myMetrics.numBlockReport.inc(); + BlockListAsLongs blist = new BlockListAsLongs(blocks); + stateChangeLog.debug("*BLOCK* NameNode.blockReport: " + +"from "+nodeReg.getName()+" "+blist.getNumberOfBlocks() +" blocks"); + + namesystem.processReport(nodeReg, blist); + if (getFSImage().isUpgradeFinalized()) + return DatanodeCommand.FINALIZE; + return null; + } + + public void blockReceived(DatanodeRegistration nodeReg, + Block blocks[], + String delHints[]) throws IOException { + verifyRequest(nodeReg); + myMetrics.numBlockReceived.inc(); + stateChangeLog.debug("*BLOCK* NameNode.blockReceived: " + +"from "+nodeReg.getName()+" "+blocks.length+" blocks."); + for (int i = 0; i < blocks.length; i++) { + namesystem.blockReceived(nodeReg, blocks[i], delHints[i]); + } + } + + /** + */ + public void errorReport(DatanodeRegistration nodeReg, + int errorCode, + String msg) throws IOException { + // Log error message from datanode + String dnName = (nodeReg == null ? "unknown DataNode" : nodeReg.getName()); + LOG.info("Error report from " + dnName + ": " + msg); + if (errorCode == DatanodeProtocol.NOTIFY) { + return; + } + verifyRequest(nodeReg); + if (errorCode == DatanodeProtocol.DISK_ERROR) { + LOG.warn("Volume failed on " + dnName); + } else if (errorCode == DatanodeProtocol.FATAL_DISK_ERROR) { + namesystem.removeDatanode(nodeReg); + } + } + + public NamespaceInfo versionRequest() throws IOException { + myMetrics.numVersionRequest.inc(); + return namesystem.getNamespaceInfo(); + } + + public UpgradeCommand processUpgradeCommand(UpgradeCommand comm) throws IOException { + return namesystem.processDistributedUpgradeCommand(comm); + } + + /** + * Verify request. + * + * Verifies correctness of the datanode version, registration ID, and + * if the datanode does not need to be shutdown. + * + * @param nodeReg data node registration + * @throws IOException + */ + public void verifyRequest(DatanodeRegistration nodeReg) throws IOException { + verifyVersion(nodeReg.getVersion()); + if (!namesystem.getRegistrationID().equals(nodeReg.getRegistrationID())) + throw new UnregisteredDatanodeException(nodeReg); + myMetrics.numVersionRequest.inc(); + } + + /** + * Verify version. + * + * @param version + * @throws IOException + */ + public void verifyVersion(int version) throws IOException { + if (version != LAYOUT_VERSION) + throw new IncorrectVersionException(version, "data node"); + } + + /** + * Returns the name of the fsImage file + */ + public File getFsImageName() throws IOException { + return getFSImage().getFsImageName(); + } + + public FSImage getFSImage() { + return namesystem.dir.fsImage; + } + + public Configuration getConf() { + return conf; + } + /** + * Returns the name of the fsImage file uploaded by periodic + * checkpointing + */ + public File[] getFsImageNameCheckpoint() throws IOException { + return getFSImage().getFsImageNameCheckpoint(); + } + + /** + * Returns the address on which the NameNodes is listening to. + * @return the address on which the NameNodes is listening to. + */ + public InetSocketAddress getNameNodeAddress() { + return serverAddress; + } + + public InetSocketAddress getNameNodeDNAddress() { + if (dnProtocolAddress == null) { + return serverAddress; + } + return dnProtocolAddress; + } + + /** + * Returns the address of the NameNodes http server, + * which is used to access the name-node web UI. + * + * @return the http address. + */ + public InetSocketAddress getHttpAddress() { + return httpAddress; + } + + NetworkTopology getNetworkTopology() { + return this.namesystem.clusterMap; + } + + /** + * Verify that configured directories exist, then + * Interactively confirm that formatting is desired + * for each existing directory and format them. + * + * @param conf + * @param isConfirmationNeeded + * @return true if formatting was aborted, false otherwise + * @throws IOException + */ + private static boolean format(Configuration conf, + boolean isConfirmationNeeded + ) throws IOException { + boolean allowFormat = conf.getBoolean("dfs.namenode.support.allowformat", + true); + if (!allowFormat) { + throw new IOException("The option dfs.namenode.support.allowformat is " + + "set to false for this filesystem, so it " + + "cannot be formatted. You will need to set " + + "dfs.namenode.support.allowformat parameter " + + "to true in order to format this filesystem"); + } + Collection dirsToFormat = FSNamesystem.getNamespaceDirs(conf); + Collection editDirsToFormat = + FSNamesystem.getNamespaceEditsDirs(conf); + for(Iterator it = dirsToFormat.iterator(); it.hasNext();) { + File curDir = it.next(); + if (!curDir.exists()) + continue; + if (isConfirmationNeeded) { + System.err.print("Re-format filesystem in " + curDir +" ? (Y or N) "); + if (!(System.in.read() == 'Y')) { + System.err.println("Format aborted in "+ curDir); + return true; + } + while(System.in.read() != '\n'); // discard the enter-key + } + } + + FSNamesystem nsys = new FSNamesystem(new FSImage(dirsToFormat, + editDirsToFormat), conf); + nsys.dir.fsImage.format(); + return false; + } + + private static boolean finalize(Configuration conf, + boolean isConfirmationNeeded + ) throws IOException { + Collection dirsToFormat = FSNamesystem.getNamespaceDirs(conf); + Collection editDirsToFormat = + FSNamesystem.getNamespaceEditsDirs(conf); + FSNamesystem nsys = new FSNamesystem(new FSImage(dirsToFormat, + editDirsToFormat), conf); + System.err.print( + "\"finalize\" will remove the previous state of the files system.\n" + + "Recent upgrade will become permanent.\n" + + "Rollback option will not be available anymore.\n"); + if (isConfirmationNeeded) { + System.err.print("Finalize filesystem state ? (Y or N) "); + if (!(System.in.read() == 'Y')) { + System.err.println("Finalize aborted."); + return true; + } + while(System.in.read() != '\n'); // discard the enter-key + } + nsys.dir.fsImage.finalizeUpgrade(); + return false; + } + + @Override + public void refreshServiceAcl() throws IOException { + if (!serviceAuthEnabled) { + throw new AuthorizationException("Service Level Authorization not enabled!"); + } + + SecurityUtil.getPolicy().refresh(); + } + + private static void printUsage() { + System.err.println( + "Usage: java NameNode [" + + StartupOption.FORMAT.getName() + "] | [" + + StartupOption.UPGRADE.getName() + "] | [" + + StartupOption.ROLLBACK.getName() + "] | [" + + StartupOption.FINALIZE.getName() + "] | [" + + StartupOption.IMPORT.getName() + "]"); + } + + private static StartupOption parseArguments(String args[]) { + int argsLen = (args == null) ? 0 : args.length; + StartupOption startOpt = StartupOption.REGULAR; + for(int i=0; i < argsLen; i++) { + String cmd = args[i]; + if (StartupOption.FORMAT.getName().equalsIgnoreCase(cmd)) { + startOpt = StartupOption.FORMAT; + } else if (StartupOption.REGULAR.getName().equalsIgnoreCase(cmd)) { + startOpt = StartupOption.REGULAR; + } else if (StartupOption.UPGRADE.getName().equalsIgnoreCase(cmd)) { + startOpt = StartupOption.UPGRADE; + } else if (StartupOption.ROLLBACK.getName().equalsIgnoreCase(cmd)) { + startOpt = StartupOption.ROLLBACK; + } else if (StartupOption.FINALIZE.getName().equalsIgnoreCase(cmd)) { + startOpt = StartupOption.FINALIZE; + } else if (StartupOption.IMPORT.getName().equalsIgnoreCase(cmd)) { + startOpt = StartupOption.IMPORT; + } else + return null; + } + return startOpt; + } + + private static void setStartupOption(Configuration conf, StartupOption opt) { + conf.set("dfs.namenode.startup", opt.toString()); + } + + static StartupOption getStartupOption(Configuration conf) { + return StartupOption.valueOf(conf.get("dfs.namenode.startup", + StartupOption.REGULAR.toString())); + } + + public static NameNode createNameNode(String argv[], + Configuration conf) throws IOException { + if (conf == null) + conf = new Configuration(); + StartupOption startOpt = parseArguments(argv); + if (startOpt == null) { + printUsage(); + return null; + } + setStartupOption(conf, startOpt); + + switch (startOpt) { + case FORMAT: + boolean aborted = format(conf, true); + System.exit(aborted ? 1 : 0); + case FINALIZE: + aborted = finalize(conf, true); + System.exit(aborted ? 1 : 0); + default: + } + + NameNode namenode = new NameNode(conf); + return namenode; + } + + /** + */ + public static void main(String argv[]) throws Exception { + try { + StringUtils.startupShutdownMessage(NameNode.class, argv, LOG); + NameNode namenode = createNameNode(argv, null); + if (namenode != null) + namenode.join(); + } catch (Throwable e) { + LOG.error(StringUtils.stringifyException(e)); + System.exit(-1); + } + } +} diff --git a/src/hdfs/org/apache/hadoop/hdfs/server/namenode/NameNodeConfServlet.java b/src/hdfs/org/apache/hadoop/hdfs/server/namenode/NameNodeConfServlet.java new file mode 100644 index 0000000..2793bae --- /dev/null +++ b/src/hdfs/org/apache/hadoop/hdfs/server/namenode/NameNodeConfServlet.java @@ -0,0 +1,117 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdfs.server.namenode; + +import java.io.IOException; +import java.io.PrintWriter; +import java.util.Arrays; + +import javax.servlet.ServletContext; +import javax.servlet.ServletException; +import javax.servlet.http.HttpServlet; +import javax.servlet.http.HttpServletRequest; +import javax.servlet.http.HttpServletResponse; + +import org.apache.hadoop.conf.Configuration; + +public class NameNodeConfServlet extends HttpServlet { + + NameNode nn = null; + Configuration nnConf = null; + private static long lastId = 0; // Used to generate unique element IDs + + @Override + public void init() throws ServletException { + super.init(); + ServletContext context = this.getServletContext(); + nnConf = (Configuration) context.getAttribute("name.conf"); + nn = (NameNode) context.getAttribute("name.node"); + } + + @Override + protected void doGet(HttpServletRequest req, HttpServletResponse resp) + throws ServletException, IOException { + if (req.getParameter("setPersistBlocks") != null) { + this.nn.getNamesystem().setPersistBlocks( + req.getParameter("setPersistBlocks").equals("ON") ? true : false); + resp.sendRedirect("/nnconf"); + return; + } + if (req.getParameter("setPermissionAuditLog") != null) { + this.nn.getNamesystem().setPermissionAuditLog( + req.getParameter("setPermissionAuditLog").equals("ON") ? true : false); + resp.sendRedirect("/nnconf"); + return; + } + PrintWriter out = resp.getWriter(); + String hostname = this.nn.getNameNodeAddress().toString(); + out.print(""); + out.printf("%s NameNode Admininstration\n", hostname); + out.print("\n"); + out.print("\n"); + out.printf("

%s " + + "NameNode Configuration Admin

\n", hostname); + showOptions(out); + out.print("\n"); + } + + @Override + protected void doPost(HttpServletRequest req, HttpServletResponse resp) + throws ServletException, IOException { + doGet(req, resp); + } + + private void showOptions(PrintWriter out) { + out.print("

Persist blocks to edits log on allocation

\n"); + out.printf("

Persisting Blocks:%s", + generateSelect(Arrays.asList("ON,OFF".split(",")), this.nn + .getNamesystem().getPersistBlocks() ? "ON" : "OFF", + "/nnconf?setPersistBlocks=")); + out.print("

Enable permission audit log

/n"); + out.printf("

Permission Audit Log Blocks:%s", + generateSelect(Arrays.asList("ON,OFF".split(",")), this.nn + .getNamesystem().getPermissionAuditLog() ? "ON" : "OFF", + "/nnconf?setPermissionAuditLog=")); + } + + /** + * Generate a HTML select control with a given list of choices and a given + * option selected. When the selection is changed, take the user to the + * submitUrl. The submitUrl can be made to include + * the option selected -- the first occurrence of the substring + * <CHOICE> will be replaced by the option chosen. + */ + private String generateSelect(Iterable choices, + String selectedChoice, String submitUrl) { + StringBuilder html = new StringBuilder(); + String id = "select" + lastId++; + html.append("\n"); + return html.toString(); + } + +} diff --git a/src/hdfs/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java b/src/hdfs/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java new file mode 100644 index 0000000..0587b0a --- /dev/null +++ b/src/hdfs/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java @@ -0,0 +1,868 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.namenode; + +import java.io.IOException; +import java.io.OutputStream; +import java.io.PrintWriter; +import java.net.InetSocketAddress; +import java.net.Socket; +import java.util.ArrayList; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Random; +import java.util.TreeSet; +import java.util.Collection; +import javax.servlet.http.HttpServletResponse; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.net.NetUtils; +import org.apache.hadoop.net.NodeBase; +import org.apache.hadoop.security.AccessControlException; +import org.apache.hadoop.hdfs.DFSClient; +import org.apache.hadoop.hdfs.protocol.Block; +import org.apache.hadoop.hdfs.protocol.DatanodeInfo; +import org.apache.hadoop.hdfs.protocol.LocatedBlock; +import org.apache.hadoop.hdfs.protocol.LocatedBlocks; +import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType; +import org.apache.hadoop.hdfs.server.common.HdfsConstants; +import org.apache.hadoop.fs.permission.PermissionStatus; + +/** + * This class provides rudimentary checking of DFS volumes for errors and + * sub-optimal conditions. + *

The tool scans all files and directories, starting from an indicated + * root path. The following abnormal conditions are detected and handled:

+ *
    + *
  • files with blocks that are completely missing from all datanodes.
    + * In this case the tool can perform one of the following actions: + *
      + *
    • none ({@link #FIXING_NONE})
    • + *
    • move corrupted files to /lost+found directory on DFS + * ({@link #FIXING_MOVE}). Remaining data blocks are saved as a + * block chains, representing longest consecutive series of valid blocks.
    • + *
    • delete corrupted files ({@link #FIXING_DELETE})
    • + *
    + *
  • + *
  • detect files with under-replicated or over-replicated blocks
  • + *
+ * Additionally, the tool collects a detailed overall DFS statistics, and + * optionally can print detailed statistics on block locations and replication + * factors of each file. + */ +public class NamenodeFsck { + public static final Log LOG = LogFactory.getLog(NameNode.class.getName()); + + // return string marking fsck status + public static final String CORRUPT_STATUS = "is CORRUPT"; + public static final String HEALTHY_STATUS = "is HEALTHY"; + public static final String NONEXISTENT_STATUS = "does not exist"; + public static final String FAILURE_STATUS = "FAILED"; + + /** Don't attempt any fixing . */ + public static final int FIXING_NONE = 0; + /** Move corrupted files to /lost+found . */ + public static final int FIXING_MOVE = 1; + /** Delete corrupted files. */ + public static final int FIXING_DELETE = 2; + + private NameNode nn; + private String lostFound = null; + private boolean lfInited = false; + private boolean lfInitedOk = false; + private boolean showFiles = false; + private boolean showOpenFiles = false; + private boolean showBlocks = false; + private boolean showLocations = false; + private boolean showRacks = false; + private boolean showCorruptFileBlocks = false; + private int fixing = FIXING_NONE; + private String path = "/"; + + // We return back N files that are corrupt; the list of files returned is + // ordered by block id; to allow continuation support, pass in the last block + // # from previous call + private String startBlockAfter = null; + + private Configuration conf; + private PrintWriter out; + + /** + * Filesystem checker. + * @param conf configuration (namenode config) + * @param nn namenode that this fsck is going to use + * @param pmap key=value[] map that is passed to the http servlet as url parameters + * @param response the object into which this servelet writes the url contents + * @throws IOException + */ + public NamenodeFsck(Configuration conf, + NameNode nn, + Map pmap, + HttpServletResponse response) throws IOException { + this.conf = conf; + this.nn = nn; + this.out = response.getWriter(); + for (Iterator it = pmap.keySet().iterator(); it.hasNext();) { + String key = it.next(); + if (key.equals("path")) { this.path = pmap.get("path")[0]; } + else if (key.equals("move")) { this.fixing = FIXING_MOVE; } + else if (key.equals("delete")) { this.fixing = FIXING_DELETE; } + else if (key.equals("files")) { this.showFiles = true; } + else if (key.equals("blocks")) { this.showBlocks = true; } + else if (key.equals("locations")) { this.showLocations = true; } + else if (key.equals("racks")) { this.showRacks = true; } + else if (key.equals("openforwrite")) {this.showOpenFiles = true; } + else if (key.equals("listcorruptfileblocks")) { + this.showCorruptFileBlocks = true; + } + else if (key.equals("startblockafter")) { + this.startBlockAfter = pmap.get("startblockafter")[0]; + } + } + } + + /** + * Check files on DFS, starting from the indicated path. + * @throws Exception + */ + public void fsck() throws IOException { + try { + FileStatus[] files = nn.namesystem.dir.getListing(path); + FsckResult res = new FsckResult(); + res.totalRacks = nn.getNetworkTopology().getNumOfRacks(); + res.totalDatanodes = nn.namesystem.getNumberOfDatanodes( + DatanodeReportType.LIVE); + res.setReplication((short) conf.getInt("dfs.replication", 3)); + if (files != null) { + if (showCorruptFileBlocks && showOpenFiles) { + listCorruptOpenFiles(); + + return; + } + + if (showCorruptFileBlocks) { + listCorruptFileBlocks(); + return; + } + + for (int i = 0; i < files.length; i++) { + check(files[i], res); + } + out.println(res); + // DFSck client scans for the string HEALTHY/CORRUPT to check the status + // of file system and return appropriate code. Changing the output + // string might break testcases. + if (res.isHealthy()) { + out.print("\n\nThe filesystem under path '" + path + "' " + HEALTHY_STATUS); + } else { + out.print("\n\nThe filesystem under path '" + path + "' " + CORRUPT_STATUS); + } + } else { + out.print("\n\nPath '" + path + "' " + NONEXISTENT_STATUS); + } + } catch (Exception e) { + String errMsg = "Fsck on path '" + path + "' " + FAILURE_STATUS; + LOG.warn(errMsg, e); + out.println(e.getMessage()); + out.print("\n\n" + errMsg); + } finally { + out.close(); + } + } + + static String buildSummaryResultForListCorruptFiles(int corruptFilesCount, + String pathName) { + + String summary = ""; + + if (corruptFilesCount == 0) { + summary = "Unable to locate any corrupt files under '" + pathName + + "'.\n\nPlease run a complete fsck to confirm if '" + pathName + + "' " + HEALTHY_STATUS; + } else if (corruptFilesCount == 1) { + summary = "There is at least 1 corrupt file under '" + pathName + + "', which " + CORRUPT_STATUS; + } else if (corruptFilesCount > 1) { + summary = "There are at least " + corruptFilesCount + + " corrupt files under '" + pathName + "', which " + CORRUPT_STATUS; + } else { + throw new IllegalArgumentException("corruptFilesCount must be positive"); + } + + return summary; + } + + private void listCorruptFileBlocks() throws AccessControlException, + IOException { + Collection corruptFiles = nn + .getNamesystem().listCorruptFileBlocks(path, startBlockAfter); + int numCorruptFiles = corruptFiles.size(); + String filler; + if (numCorruptFiles > 0) { + filler = Integer.toString(numCorruptFiles); + } else if (startBlockAfter == null) { + filler = "no"; + } else { + filler = "no more"; + } + for (FSNamesystem.CorruptFileBlockInfo c : corruptFiles) { + out.println(c.toString()); + } + out.println("\n\nThe filesystem under path '" + path + "' has " + filler + + " CORRUPT files"); + out.println(); + } + + + private void listCorruptOpenFiles() throws IOException { + int matchedCorruptFilesCount = 0; + // directory representation of path + String pathdir = path.endsWith(Path.SEPARATOR) ? path : path + Path.SEPARATOR; + FileStatus pathFileStatus = nn.getNamesystem().getFileInfo(pathdir); + List corruptFileStatusList = new ArrayList(); + checkForCorruptOpenFiles(pathFileStatus, corruptFileStatusList); + + for (FileStatus fileStatus : corruptFileStatusList) { + String currentPath = fileStatus.getPath().toString(); + if (currentPath.startsWith(pathdir) || currentPath.equals(path)) { + matchedCorruptFilesCount++; + + // print the header before listing first item + if (matchedCorruptFilesCount == 1 ) { + out.println("Here are a few files that may be corrupted:"); + out.println("==========================================="); + } + + out.println(currentPath); + } + } + + out.println(); + out.println(buildSummaryResultForListCorruptFiles(matchedCorruptFilesCount, + path)); + + } + + private void checkForCorruptOpenFiles( + FileStatus file, List corruptFiles + ) throws IOException { + String filePath = file.getPath().toUri().getPath(); + + if (file.isDir()) { + for (FileStatus fileStatus : nn.namesystem.dir.getListing(filePath)) { + checkForCorruptOpenFiles(fileStatus, corruptFiles); + } + + } else { + LeaseManager.Lease lease = + nn.getNamesystem().leaseManager.getLeaseByPath(filePath); + // Condition: + // 1. lease has expired hard limit + // 2. the file is open for write + // 3. the last block has 0 locations + if (lease != null && lease.expiredHardLimit()) { + LocatedBlocks blocks = + nn.getNamesystem().getBlockLocations(filePath, 0, file.getLen()); + List locatedBlockList = blocks.getLocatedBlocks(); + LocatedBlock lastBlock = + locatedBlockList.get(locatedBlockList.size() - 1); + + if (blocks.isUnderConstruction() && lastBlock.getLocations().length == 0) { + corruptFiles.add(file); + } + } + } + } + + private void check(FileStatus file, FsckResult res) throws IOException { + int minReplication = nn.namesystem.getMinReplication(); + String path = file.getPath().toString(); + boolean isOpen = false; + + if (file.isDir()) { + FileStatus[] files = nn.namesystem.dir.getListing(path); + if (files == null) { + return; + } + if (showFiles) { + out.println(path + " "); + } + res.totalDirs++; + for (int i = 0; i < files.length; i++) { + check(files[i], res); + } + return; + } + long fileLen = file.getLen(); + LocatedBlocks blocks = nn.namesystem.getBlockLocations(path, 0, fileLen); + if (blocks == null) { // the file is deleted + return; + } + isOpen = blocks.isUnderConstruction(); + if (isOpen && !showOpenFiles) { + // We collect these stats about open files to report with default options + res.totalOpenFilesSize += fileLen; + res.totalOpenFilesBlocks += blocks.locatedBlockCount(); + res.totalOpenFiles++; + return; + } + res.totalFiles++; + res.totalSize += fileLen; + res.totalBlocks += blocks.locatedBlockCount(); + if (showOpenFiles && isOpen) { + out.print(path + " " + fileLen + " bytes, " + + blocks.locatedBlockCount() + " block(s), OPENFORWRITE: "); + } else if (showFiles) { + out.print(path + " " + fileLen + " bytes, " + + blocks.locatedBlockCount() + " block(s): "); + } else { + out.print('.'); + } + if (res.totalFiles % 100 == 0) { out.println(); out.flush(); } + int missing = 0; + int corrupt = 0; + long missize = 0; + int underReplicatedPerFile = 0; + int misReplicatedPerFile = 0; + StringBuffer report = new StringBuffer(); + int i = 0; + for (LocatedBlock lBlk : blocks.getLocatedBlocks()) { + Block block = lBlk.getBlock(); + boolean isCorrupt = lBlk.isCorrupt(); + String blkName = block.toString(); + DatanodeInfo[] locs = lBlk.getLocations(); + res.totalReplicas += locs.length; + short targetFileReplication = file.getReplication(); + if (locs.length > targetFileReplication) { + res.excessiveReplicas += (locs.length - targetFileReplication); + res.numOverReplicatedBlocks += 1; + } + // Check if block is Corrupt + if (isCorrupt) { + corrupt++; + res.corruptBlocks++; + out.print("\n" + path + ": CORRUPT block " + block.getBlockName()+"\n"); + } + if (locs.length >= minReplication) + res.numMinReplicatedBlocks++; + if (locs.length < targetFileReplication && locs.length > 0) { + res.missingReplicas += (targetFileReplication - locs.length); + res.numUnderReplicatedBlocks += 1; + underReplicatedPerFile++; + if (!showFiles) { + out.print("\n" + path + ": "); + } + out.println(" Under replicated " + block + + ". Target Replicas is " + + targetFileReplication + " but found " + + locs.length + " replica(s)."); + } + // verify block placement policy + int missingRacks = BlockPlacementPolicy.getInstance(conf, null, nn.getNetworkTopology()). + verifyBlockPlacement(path, lBlk, Math.min(2,targetFileReplication)); + + if (missingRacks > 0) { + res.numMisReplicatedBlocks++; + misReplicatedPerFile++; + if (!showFiles) { + if(underReplicatedPerFile == 0) + out.println(); + out.print(path + ": "); + } + out.println(" Replica placement policy is violated for " + + block + + ". Block should be additionally replicated on " + + missingRacks + " more rack(s)."); + } + report.append(i + ". " + blkName + " len=" + block.getNumBytes()); + if (locs.length == 0) { + report.append(" MISSING!"); + res.addMissing(block.toString(), block.getNumBytes()); + missing++; + missize += block.getNumBytes(); + } else { + report.append(" repl=" + locs.length); + if (showLocations || showRacks) { + StringBuffer sb = new StringBuffer("["); + for (int j = 0; j < locs.length; j++) { + if (j > 0) { sb.append(", "); } + if (showRacks) + sb.append(NodeBase.getPath(locs[j])); + else + sb.append(locs[j]); + } + sb.append(']'); + report.append(" " + sb.toString()); + } + } + report.append('\n'); + i++; + } + if ((missing > 0) || (corrupt > 0)) { + if (!showFiles && (missing > 0)) { + out.print("\n" + path + ": MISSING " + missing + + " blocks of total size " + missize + " B."); + } + res.corruptFiles++; + switch(fixing) { + case FIXING_NONE: + break; + case FIXING_MOVE: + if (!isOpen) + lostFoundMove(file, blocks); + break; + case FIXING_DELETE: + if (!isOpen) + nn.namesystem.deleteInternal(path, false); + } + } + if (showFiles) { + if (missing > 0) { + out.print(" MISSING " + missing + " blocks of total size " + missize + " B\n"); + } else if (underReplicatedPerFile == 0 && misReplicatedPerFile == 0) { + out.print(" OK\n"); + } + if (showBlocks) { + out.print(report.toString() + "\n"); + } + } + } + + private void lostFoundMove(FileStatus file, LocatedBlocks blocks) + throws IOException { + final DFSClient dfs = new DFSClient(NameNode.getAddress(conf), conf); + try { + if (!lfInited) { + lostFoundInit(dfs); + } + if (!lfInitedOk) { + return; + } + String target = lostFound + file.getPath(); + String errmsg = "Failed to move " + file.getPath() + " to /lost+found"; + try { + PermissionStatus ps = new PermissionStatus( + file.getOwner(), file.getGroup(), file.getPermission()); + if (!nn.namesystem.dir.mkdirs(target, ps, false, FSNamesystem.now())) { + LOG.warn(errmsg); + return; + } + // create chains + int chain = 0; + OutputStream fos = null; + for (LocatedBlock lBlk : blocks.getLocatedBlocks()) { + LocatedBlock lblock = lBlk; + DatanodeInfo[] locs = lblock.getLocations(); + if (locs == null || locs.length == 0) { + if (fos != null) { + fos.flush(); + fos.close(); + fos = null; + } + continue; + } + if (fos == null) { + fos = dfs.create(target + "/" + chain, true); + if (fos != null) chain++; + else { + LOG.warn(errmsg + ": could not store chain " + chain); + // perhaps we should bail out here... + // return; + continue; + } + } + + // copy the block. It's a pity it's not abstracted from DFSInputStream ... + try { + copyBlock(dfs, lblock, fos); + } catch (Exception e) { + e.printStackTrace(); + // something went wrong copying this block... + LOG.warn(" - could not copy block " + lblock.getBlock() + " to " + target); + fos.flush(); + fos.close(); + fos = null; + } + } + if (fos != null) fos.close(); + LOG.warn("\n - moved corrupted file " + file.getPath() + " to /lost+found"); + dfs.delete(file.getPath().toString(), true); + } catch (Exception e) { + e.printStackTrace(); + LOG.warn(errmsg + ": " + e.getMessage()); + } + } finally { + dfs.close(); + } + } + + /* + * XXX (ab) Bulk of this method is copied verbatim from {@link DFSClient}, which is + * bad. Both places should be refactored to provide a method to copy blocks + * around. + */ + private void copyBlock(DFSClient dfs, LocatedBlock lblock, + OutputStream fos) throws Exception { + int failures = 0; + InetSocketAddress targetAddr = null; + TreeSet deadNodes = new TreeSet(); + Socket s = null; + DFSClient.BlockReader blockReader = null; + Block block = lblock.getBlock(); + + while (s == null) { + DatanodeInfo chosenNode; + + try { + chosenNode = bestNode(dfs, lblock.getLocations(), deadNodes); + targetAddr = NetUtils.createSocketAddr(chosenNode.getName()); + } catch (IOException ie) { + if (failures >= DFSClient.MAX_BLOCK_ACQUIRE_FAILURES) { + throw new IOException("Could not obtain block " + lblock); + } + LOG.info("Could not obtain block from any node: " + ie); + try { + Thread.sleep(10000); + } catch (InterruptedException iex) { + } + deadNodes.clear(); + failures++; + continue; + } + try { + s = new Socket(); + s.connect(targetAddr, HdfsConstants.READ_TIMEOUT); + s.setSoTimeout(HdfsConstants.READ_TIMEOUT); + + blockReader = + DFSClient.BlockReader.newBlockReader(s, targetAddr.toString() + ":" + + block.getBlockId(), + block.getBlockId(), + block.getGenerationStamp(), + 0, -1, + conf.getInt("io.file.buffer.size", 4096)); + + } catch (IOException ex) { + // Put chosen node into dead list, continue + LOG.info("Failed to connect to " + targetAddr + ":" + ex); + deadNodes.add(chosenNode); + if (s != null) { + try { + s.close(); + } catch (IOException iex) { + } + } + s = null; + } + } + if (blockReader == null) { + throw new Exception("Could not open data stream for " + lblock.getBlock()); + } + byte[] buf = new byte[1024]; + int cnt = 0; + boolean success = true; + long bytesRead = 0; + try { + while ((cnt = blockReader.read(buf, 0, buf.length)) > 0) { + fos.write(buf, 0, cnt); + bytesRead += cnt; + } + if ( bytesRead != block.getNumBytes() ) { + throw new IOException("Recorded block size is " + block.getNumBytes() + + ", but datanode returned " +bytesRead+" bytes"); + } + } catch (Exception e) { + e.printStackTrace(); + success = false; + } finally { + try {s.close(); } catch (Exception e1) {} + } + if (!success) + throw new Exception("Could not copy block data for " + lblock.getBlock()); + } + + /* + * XXX (ab) See comment above for copyBlock(). + * + * Pick the best node from which to stream the data. + * That's the local one, if available. + */ + Random r = new Random(); + private DatanodeInfo bestNode(DFSClient dfs, DatanodeInfo[] nodes, + TreeSet deadNodes) throws IOException { + if ((nodes == null) || + (nodes.length - deadNodes.size() < 1)) { + throw new IOException("No live nodes contain current block"); + } + DatanodeInfo chosenNode; + do { + chosenNode = nodes[r.nextInt(nodes.length)]; + } while (deadNodes.contains(chosenNode)); + return chosenNode; + } + + private void lostFoundInit(DFSClient dfs) { + lfInited = true; + try { + String lfName = "/lost+found"; + // check that /lost+found exists + if (!dfs.exists(lfName)) { + lfInitedOk = dfs.mkdirs(lfName); + lostFound = lfName; + } else if (!dfs.isDirectory(lfName)) { + LOG.warn("Cannot use /lost+found : a regular file with this name exists."); + lfInitedOk = false; + } else { // exists and isDirectory + lostFound = lfName; + lfInitedOk = true; + } + } catch (Exception e) { + e.printStackTrace(); + lfInitedOk = false; + } + if (lostFound == null) { + LOG.warn("Cannot initialize /lost+found ."); + lfInitedOk = false; + } + } + + /** + * @param args + */ + public int run(String[] args) throws Exception { + + return 0; + } + + /** + * FsckResult of checking, plus overall DFS statistics. + * + */ + public static class FsckResult { + private ArrayList missingIds = new ArrayList(); + private long missingSize = 0L; + private long corruptFiles = 0L; + private long corruptBlocks = 0L; + private long excessiveReplicas = 0L; + private long missingReplicas = 0L; + private long numOverReplicatedBlocks = 0L; + private long numUnderReplicatedBlocks = 0L; + private long numMisReplicatedBlocks = 0L; // blocks that do not satisfy block placement policy + private long numMinReplicatedBlocks = 0L; // minimally replicatedblocks + private int replication = 0; + private long totalBlocks = 0L; + private long totalOpenFilesBlocks = 0L; + private long totalFiles = 0L; + private long totalOpenFiles = 0L; + private long totalDirs = 0L; + private long totalSize = 0L; + private long totalOpenFilesSize = 0L; + private long totalReplicas = 0L; + private int totalDatanodes = 0; + private int totalRacks = 0; + + /** + * DFS is considered healthy if there are no missing blocks. + */ + public boolean isHealthy() { + return ((missingIds.size() == 0) && (corruptBlocks == 0)); + } + + /** Add a missing block name, plus its size. */ + public void addMissing(String id, long size) { + missingIds.add(id); + missingSize += size; + } + + /** Return a list of missing block names (as list of Strings). */ + public ArrayList getMissingIds() { + return missingIds; + } + + /** Return total size of missing data, in bytes. */ + public long getMissingSize() { + return missingSize; + } + + public void setMissingSize(long missingSize) { + this.missingSize = missingSize; + } + + /** Return the number of over-replicated blocks. */ + public long getExcessiveReplicas() { + return excessiveReplicas; + } + + public void setExcessiveReplicas(long overReplicatedBlocks) { + this.excessiveReplicas = overReplicatedBlocks; + } + + /** Return the actual replication factor. */ + public float getReplicationFactor() { + if (totalBlocks == 0) + return 0.0f; + return (float) (totalReplicas) / (float) totalBlocks; + } + + /** Return the number of under-replicated blocks. Note: missing blocks are not counted here.*/ + public long getMissingReplicas() { + return missingReplicas; + } + + public void setMissingReplicas(long underReplicatedBlocks) { + this.missingReplicas = underReplicatedBlocks; + } + + /** Return total number of directories encountered during this scan. */ + public long getTotalDirs() { + return totalDirs; + } + + public void setTotalDirs(long totalDirs) { + this.totalDirs = totalDirs; + } + + /** Return total number of files encountered during this scan. */ + public long getTotalFiles() { + return totalFiles; + } + + public void setTotalFiles(long totalFiles) { + this.totalFiles = totalFiles; + } + + /** Return total number of files opened for write encountered during this scan. */ + public long getTotalOpenFiles() { + return totalOpenFiles; + } + + /** Set total number of open files encountered during this scan. */ + public void setTotalOpenFiles(long totalOpenFiles) { + this.totalOpenFiles = totalOpenFiles; + } + + /** Return total size of scanned data, in bytes. */ + public long getTotalSize() { + return totalSize; + } + + public void setTotalSize(long totalSize) { + this.totalSize = totalSize; + } + + /** Return total size of open files data, in bytes. */ + public long getTotalOpenFilesSize() { + return totalOpenFilesSize; + } + + public void setTotalOpenFilesSize(long totalOpenFilesSize) { + this.totalOpenFilesSize = totalOpenFilesSize; + } + + /** Return the intended replication factor, against which the over/under- + * replicated blocks are counted. Note: this values comes from the current + * Configuration supplied for the tool, so it may be different from the + * value in DFS Configuration. + */ + public int getReplication() { + return replication; + } + + public void setReplication(int replication) { + this.replication = replication; + } + + /** Return the total number of blocks in the scanned area. */ + public long getTotalBlocks() { + return totalBlocks; + } + + public void setTotalBlocks(long totalBlocks) { + this.totalBlocks = totalBlocks; + } + + /** Return the total number of blocks held by open files. */ + public long getTotalOpenFilesBlocks() { + return totalOpenFilesBlocks; + } + + public void setTotalOpenFilesBlocks(long totalOpenFilesBlocks) { + this.totalOpenFilesBlocks = totalOpenFilesBlocks; + } + + public String toString() { + StringBuffer res = new StringBuffer(); + res.append("Status: " + (isHealthy() ? "HEALTHY" : "CORRUPT")); + res.append("\n Total size:\t" + totalSize + " B"); + if (totalOpenFilesSize != 0) + res.append(" (Total open files size: " + totalOpenFilesSize + " B)"); + res.append("\n Total dirs:\t" + totalDirs); + res.append("\n Total files:\t" + totalFiles); + if (totalOpenFiles != 0) + res.append(" (Files currently being written: " + + totalOpenFiles + ")"); + res.append("\n Total blocks (validated):\t" + totalBlocks); + if (totalBlocks > 0) res.append(" (avg. block size " + + (totalSize / totalBlocks) + " B)"); + if (totalOpenFilesBlocks != 0) + res.append(" (Total open file blocks (not validated): " + + totalOpenFilesBlocks + ")"); + if (corruptFiles > 0) { + res.append("\n ********************************"); + res.append("\n CORRUPT FILES:\t" + corruptFiles); + if (missingSize > 0) { + res.append("\n MISSING BLOCKS:\t" + missingIds.size()); + res.append("\n MISSING SIZE:\t\t" + missingSize + " B"); + } + if (corruptBlocks > 0) { + res.append("\n CORRUPT BLOCKS: \t" + corruptBlocks); + } + res.append("\n ********************************"); + } + res.append("\n Minimally replicated blocks:\t" + numMinReplicatedBlocks); + if (totalBlocks > 0) res.append(" (" + ((float) (numMinReplicatedBlocks * 100) / (float) totalBlocks) + " %)"); + res.append("\n Over-replicated blocks:\t" + numOverReplicatedBlocks); + if (totalBlocks > 0) res.append(" (" + ((float) (numOverReplicatedBlocks * 100) / (float) totalBlocks) + " %)"); + res.append("\n Under-replicated blocks:\t" + numUnderReplicatedBlocks); + if (totalBlocks > 0) res.append(" (" + ((float) (numUnderReplicatedBlocks * 100) / (float) totalBlocks) + " %)"); + res.append("\n Mis-replicated blocks:\t\t" + numMisReplicatedBlocks); + if (totalBlocks > 0) res.append(" (" + ((float) (numMisReplicatedBlocks * 100) / (float) totalBlocks) + " %)"); + res.append("\n Default replication factor:\t" + replication); + res.append("\n Average block replication:\t" + getReplicationFactor()); + res.append("\n Corrupt blocks:\t\t" + corruptBlocks); + res.append("\n Missing replicas:\t\t" + missingReplicas); + if (totalReplicas > 0) res.append(" (" + ((float) (missingReplicas * 100) / (float) totalReplicas) + " %)"); + res.append("\n Number of data-nodes:\t\t" + totalDatanodes); + res.append("\n Number of racks:\t\t" + totalRacks); + return res.toString(); + } + + /** Return the number of currupted files. */ + public long getCorruptFiles() { + return corruptFiles; + } + + public void setCorruptFiles(long corruptFiles) { + this.corruptFiles = corruptFiles; + } + } +} diff --git a/src/hdfs/org/apache/hadoop/hdfs/server/namenode/NotReplicatedYetException.java b/src/hdfs/org/apache/hadoop/hdfs/server/namenode/NotReplicatedYetException.java new file mode 100644 index 0000000..910cdf2 --- /dev/null +++ b/src/hdfs/org/apache/hadoop/hdfs/server/namenode/NotReplicatedYetException.java @@ -0,0 +1,30 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdfs.server.namenode; + +import java.io.IOException; + +/** + * The file has not finished being written to enough datanodes yet. + */ +public class NotReplicatedYetException extends IOException { + public NotReplicatedYetException(String msg) { + super(msg); + } +} diff --git a/src/hdfs/org/apache/hadoop/hdfs/server/namenode/PendingReplicationBlocks.java b/src/hdfs/org/apache/hadoop/hdfs/server/namenode/PendingReplicationBlocks.java new file mode 100644 index 0000000..dc1f066 --- /dev/null +++ b/src/hdfs/org/apache/hadoop/hdfs/server/namenode/PendingReplicationBlocks.java @@ -0,0 +1,251 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.namenode; + +import org.apache.hadoop.hdfs.protocol.Block; +import org.apache.hadoop.util.*; +import java.io.*; +import java.util.*; +import java.sql.Time; + +/*************************************************** + * PendingReplicationBlocks does the bookkeeping of all + * blocks that are getting replicated. + * + * It does the following: + * 1) record blocks that are getting replicated at this instant. + * 2) a coarse grain timer to track age of replication request + * 3) a thread that periodically identifies replication-requests + * that never made it. + * + ***************************************************/ +class PendingReplicationBlocks { + private Map pendingReplications; + private ArrayList timedOutItems; + Daemon timerThread = null; + private volatile boolean fsRunning = true; + + // + // It might take anywhere between 5 to 10 minutes before + // a request is timed out. + // + private long timeout = 5 * 60 * 1000; + private long defaultRecheckInterval = 5 * 60 * 1000; + + PendingReplicationBlocks(long timeoutPeriod) { + if ( timeoutPeriod > 0 ) { + this.timeout = timeoutPeriod; + } + init(); + } + + PendingReplicationBlocks() { + init(); + } + + void init() { + pendingReplications = new HashMap(); + timedOutItems = new ArrayList(); + this.timerThread = new Daemon(new PendingReplicationMonitor()); + timerThread.start(); + } + + /** + * Add a block to the list of pending Replications + */ + void add(Block block, int numReplicas) { + synchronized (pendingReplications) { + PendingBlockInfo found = pendingReplications.get(block); + if (found == null) { + pendingReplications.put(block, new PendingBlockInfo(numReplicas)); + } else { + found.incrementReplicas(numReplicas); + found.setTimeStamp(); + } + } + } + + /** + * One replication request for this block has finished. + * Decrement the number of pending replication requests + * for this block. + */ + void remove(Block block) { + synchronized (pendingReplications) { + PendingBlockInfo found = pendingReplications.get(block); + if (found != null) { + FSNamesystem.LOG.debug("Removing pending replication for block" + block); + found.decrementReplicas(); + if (found.getNumReplicas() <= 0) { + pendingReplications.remove(block); + } + } + } + } + + /** + * The total number of blocks that are undergoing replication + */ + int size() { + return pendingReplications.size(); + } + + /** + * How many copies of this block is pending replication? + */ + int getNumReplicas(Block block) { + synchronized (pendingReplications) { + PendingBlockInfo found = pendingReplications.get(block); + if (found != null) { + return found.getNumReplicas(); + } + } + return 0; + } + + /** + * Returns a list of blocks that have timed out their + * replication requests. Returns null if no blocks have + * timed out. + */ + Block[] getTimedOutBlocks() { + synchronized (timedOutItems) { + if (timedOutItems.size() <= 0) { + return null; + } + Block[] blockList = timedOutItems.toArray( + new Block[timedOutItems.size()]); + timedOutItems.clear(); + return blockList; + } + } + + /** + * An object that contains information about a block that + * is being replicated. It records the timestamp when the + * system started replicating the most recent copy of this + * block. It also records the number of replication + * requests that are in progress. + */ + static class PendingBlockInfo { + private long timeStamp; + private int numReplicasInProgress; + + PendingBlockInfo(int numReplicas) { + this.timeStamp = FSNamesystem.now(); + this.numReplicasInProgress = numReplicas; + } + + long getTimeStamp() { + return timeStamp; + } + + void setTimeStamp() { + timeStamp = FSNamesystem.now(); + } + + void incrementReplicas(int increment) { + numReplicasInProgress += increment; + } + + void decrementReplicas() { + numReplicasInProgress--; + assert(numReplicasInProgress >= 0); + } + + int getNumReplicas() { + return numReplicasInProgress; + } + } + + /* + * A periodic thread that scans for blocks that never finished + * their replication request. + */ + class PendingReplicationMonitor implements Runnable { + public void run() { + while (fsRunning) { + long period = Math.min(defaultRecheckInterval, timeout); + try { + pendingReplicationCheck(); + Thread.sleep(period); + } catch (InterruptedException ie) { + FSNamesystem.LOG.debug( + "PendingReplicationMonitor thread received exception. " + ie); + } + } + } + + /** + * Iterate through all items and detect timed-out items + */ + void pendingReplicationCheck() { + synchronized (pendingReplications) { + Iterator iter = pendingReplications.entrySet().iterator(); + long now = FSNamesystem.now(); + FSNamesystem.LOG.debug("PendingReplicationMonitor checking Q"); + while (iter.hasNext()) { + Map.Entry entry = (Map.Entry) iter.next(); + PendingBlockInfo pendingBlock = (PendingBlockInfo) entry.getValue(); + if (now > pendingBlock.getTimeStamp() + timeout) { + Block block = (Block) entry.getKey(); + synchronized (timedOutItems) { + timedOutItems.add(block); + } + FSNamesystem.LOG.warn( + "PendingReplicationMonitor timed out block " + block); + iter.remove(); + } + } + } + } + } + + /* + * Shuts down the pending replication monitor thread. + * Waits for the thread to exit. + */ + void stop() { + fsRunning = false; + timerThread.interrupt(); + try { + timerThread.join(3000); + } catch (InterruptedException ie) { + } + } + + /** + * Iterate through all items and print them. + */ + void metaSave(PrintWriter out) { + synchronized (pendingReplications) { + out.println("Metasave: Blocks being replicated: " + + pendingReplications.size()); + Iterator iter = pendingReplications.entrySet().iterator(); + while (iter.hasNext()) { + Map.Entry entry = (Map.Entry) iter.next(); + PendingBlockInfo pendingBlock = (PendingBlockInfo) entry.getValue(); + Block block = (Block) entry.getKey(); + out.println(block + + " StartTime: " + new Time(pendingBlock.timeStamp) + + " NumReplicaInProgress: " + + pendingBlock.numReplicasInProgress); + } + } + } +} diff --git a/src/hdfs/org/apache/hadoop/hdfs/server/namenode/PermissionChecker.java b/src/hdfs/org/apache/hadoop/hdfs/server/namenode/PermissionChecker.java new file mode 100644 index 0000000..768d5b9 --- /dev/null +++ b/src/hdfs/org/apache/hadoop/hdfs/server/namenode/PermissionChecker.java @@ -0,0 +1,179 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.namenode; + +import java.util.*; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.fs.permission.*; +import org.apache.hadoop.ipc.Server; +import org.apache.hadoop.security.AccessControlException; +import org.apache.hadoop.security.UserGroupInformation; + +/** Perform permission checking in {@link FSNamesystem}. */ +class PermissionChecker { + static final Log LOG = LogFactory.getLog(UserGroupInformation.class); + + final String user; + private final Set groups = new HashSet(); + final boolean isSuper; + + PermissionChecker(String fsOwner, String supergroup + ) throws AccessControlException{ + UserGroupInformation ugi = UserGroupInformation.getCurrentUGI(); + if (LOG.isDebugEnabled()) { + LOG.debug("ugi=" + ugi); + } + + if (ugi != null) { + user = ugi.getUserName(); + groups.addAll(Arrays.asList(ugi.getGroupNames())); + isSuper = user.equals(fsOwner) || groups.contains(supergroup); + } + else { + throw new AccessControlException("ugi = null"); + } + } + + boolean containsGroup(String group) {return groups.contains(group);} + + /** + * Check whether current user have permissions to access the path. + * Traverse is always checked. + * + * Parent path means the parent directory for the path. + * Ancestor path means the last (the closest) existing ancestor directory + * of the path. + * Note that if the parent path exists, + * then the parent path and the ancestor path are the same. + * + * For example, suppose the path is "/foo/bar/baz". + * No matter baz is a file or a directory, + * the parent path is "/foo/bar". + * If bar exists, then the ancestor path is also "/foo/bar". + * If bar does not exist and foo exists, + * then the ancestor path is "/foo". + * Further, if both foo and bar do not exist, + * then the ancestor path is "/". + * + * @param doCheckOwner Require user to be the owner of the path? + * @param ancestorAccess The access required by the ancestor of the path. + * @param parentAccess The access required by the parent of the path. + * @param access The access required by the path. + * @param subAccess If path is a directory, + * it is the access required of the path and all the sub-directories. + * If path is not a directory, there is no effect. + * @return a PermissionChecker object which caches data for later use. + * @throws AccessControlException + */ + void checkPermission(String path, INodeDirectory root, boolean doCheckOwner, + FsAction ancestorAccess, FsAction parentAccess, FsAction access, + FsAction subAccess) throws AccessControlException { + if (LOG.isDebugEnabled()) { + LOG.debug("ACCESS CHECK: " + this + + ", doCheckOwner=" + doCheckOwner + + ", ancestorAccess=" + ancestorAccess + + ", parentAccess=" + parentAccess + + ", access=" + access + + ", subAccess=" + subAccess); + } + + synchronized(root) { + INode[] inodes = root.getExistingPathINodes(path); + int ancestorIndex = inodes.length - 2; + for(; ancestorIndex >= 0 && inodes[ancestorIndex] == null; + ancestorIndex--); + checkTraverse(inodes, ancestorIndex); + + if (ancestorAccess != null && inodes.length > 1) { + check(inodes, ancestorIndex, ancestorAccess); + } + if (parentAccess != null && inodes.length > 1) { + check(inodes, inodes.length - 2, parentAccess); + } + if (access != null) { + check(inodes[inodes.length - 1], access); + } + if (subAccess != null) { + checkSubAccess(inodes[inodes.length - 1], subAccess); + } + if (doCheckOwner) { + checkOwner(inodes[inodes.length - 1]); + } + } + } + + private void checkOwner(INode inode) throws AccessControlException { + if (inode != null && user.equals(inode.getUserName())) { + return; + } + throw new AccessControlException("Permission denied"); + } + + private void checkTraverse(INode[] inodes, int last + ) throws AccessControlException { + for(int j = 0; j <= last; j++) { + check(inodes[j], FsAction.EXECUTE); + } + } + + private void checkSubAccess(INode inode, FsAction access + ) throws AccessControlException { + if (inode == null || !inode.isDirectory()) { + return; + } + + Stack directories = new Stack(); + for(directories.push((INodeDirectory)inode); !directories.isEmpty(); ) { + INodeDirectory d = directories.pop(); + check(d, access); + + for(INode child : d.getChildren()) { + if (child.isDirectory()) { + directories.push((INodeDirectory)child); + } + } + } + } + + private void check(INode[] inodes, int i, FsAction access + ) throws AccessControlException { + check(i >= 0? inodes[i]: null, access); + } + + private void check(INode inode, FsAction access + ) throws AccessControlException { + if (inode == null) { + return; + } + FsPermission mode = inode.getFsPermission(); + + if (user.equals(inode.getUserName())) { //user class + if (mode.getUserAction().implies(access)) { return; } + } + else if (groups.contains(inode.getGroupName())) { //group class + if (mode.getGroupAction().implies(access)) { return; } + } + else { //other class + if (mode.getOtherAction().implies(access)) { return; } + } + throw new AccessControlException("Permission denied: user=" + user + + ", access=" + access + ", inode=" + inode); + } +} diff --git a/src/hdfs/org/apache/hadoop/hdfs/server/namenode/SafeModeException.java b/src/hdfs/org/apache/hadoop/hdfs/server/namenode/SafeModeException.java new file mode 100644 index 0000000..71bcdfb --- /dev/null +++ b/src/hdfs/org/apache/hadoop/hdfs/server/namenode/SafeModeException.java @@ -0,0 +1,34 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdfs.server.namenode; + +import java.io.IOException; + +/** + * This exception is thrown when the name node is in safe mode. + * Client cannot modified namespace until the safe mode is off. + * + */ +public class SafeModeException extends IOException { + + public SafeModeException(String text, FSNamesystem.SafeModeInfo mode ) { + super(text + ". Name node is in safe mode.\n" + mode.getTurnOffTip()); + } + +} diff --git a/src/hdfs/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java b/src/hdfs/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java new file mode 100644 index 0000000..3316a4d --- /dev/null +++ b/src/hdfs/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java @@ -0,0 +1,614 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.namenode; + +import org.apache.commons.logging.*; + +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol; +import org.apache.hadoop.hdfs.server.common.HdfsConstants; +import org.apache.hadoop.hdfs.server.common.InconsistentFSStateException; +import org.apache.hadoop.ipc.*; +import org.apache.hadoop.conf.*; +import org.apache.hadoop.util.StringUtils; +import org.apache.hadoop.util.Daemon; +import org.apache.hadoop.http.HttpServer; +import org.apache.hadoop.net.NetUtils; + +import java.io.*; +import java.net.*; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Iterator; + +import org.apache.hadoop.metrics.jvm.JvmMetrics; + +/********************************************************** + * The Secondary NameNode is a helper to the primary NameNode. + * The Secondary is responsible for supporting periodic checkpoints + * of the HDFS metadata. The current design allows only one Secondary + * NameNode per HDFs cluster. + * + * The Secondary NameNode is a daemon that periodically wakes + * up (determined by the schedule specified in the configuration), + * triggers a periodic checkpoint and then goes back to sleep. + * The Secondary NameNode uses the ClientProtocol to talk to the + * primary NameNode. + * + **********************************************************/ +public class SecondaryNameNode implements Runnable { + + public static final Log LOG = + LogFactory.getLog(SecondaryNameNode.class.getName()); + + private String fsName; + private CheckpointStorage checkpointImage; + + private NamenodeProtocol namenode; + private Configuration conf; + private InetSocketAddress nameNodeAddr; + private volatile boolean shouldRun; + private HttpServer infoServer; + private int infoPort; + private String infoBindAddress; + + private Collection checkpointDirs; + private Collection checkpointEditsDirs; + private long checkpointPeriod; // in seconds + private long checkpointSize; // size (in MB) of current Edit Log + + /** + * Utility class to facilitate junit test error simulation. + */ + static class ErrorSimulator { + private static boolean[] simulation = null; // error simulation events + static void initializeErrorSimulationEvent(int numberOfEvents) { + simulation = new boolean[numberOfEvents]; + for (int i = 0; i < numberOfEvents; i++) { + simulation[i] = false; + } + } + + static boolean getErrorSimulation(int index) { + if(simulation == null) + return false; + assert(index < simulation.length); + return simulation[index]; + } + + static void setErrorSimulation(int index) { + assert(index < simulation.length); + simulation[index] = true; + } + + static void clearErrorSimulation(int index) { + assert(index < simulation.length); + simulation[index] = false; + } + } + + FSImage getFSImage() { + return checkpointImage; + } + + /** + * Create a connection to the primary namenode. + */ + public SecondaryNameNode(Configuration conf) throws IOException { + try { + initialize(conf); + } catch(IOException e) { + shutdown(); + throw e; + } + } + + /** + * Initialize SecondaryNameNode. + */ + private void initialize(Configuration conf) throws IOException { + // initiate Java VM metrics + JvmMetrics.init("SecondaryNameNode", conf.get("session.id")); + + // Create connection to the namenode. + shouldRun = true; + nameNodeAddr = NameNode.getAddress(conf); + + this.conf = conf; + this.namenode = + (NamenodeProtocol) RPC.waitForProxy(NamenodeProtocol.class, + NamenodeProtocol.versionID, nameNodeAddr, conf); + + // initialize checkpoint directories + fsName = getInfoServer(); + checkpointDirs = FSImage.getCheckpointDirs(conf, + "/tmp/hadoop/dfs/namesecondary"); + checkpointEditsDirs = FSImage.getCheckpointEditsDirs(conf, + "/tmp/hadoop/dfs/namesecondary"); + checkpointImage = new CheckpointStorage(conf); + checkpointImage.recoverCreate(checkpointDirs, checkpointEditsDirs); + + // Initialize other scheduling parameters from the configuration + checkpointPeriod = conf.getLong("fs.checkpoint.period", 3600); + checkpointSize = conf.getLong("fs.checkpoint.size", 4194304); + + // initialize the webserver for uploading files. + String infoAddr = + NetUtils.getServerAddress(conf, + "dfs.secondary.info.bindAddress", + "dfs.secondary.info.port", + "dfs.secondary.http.address"); + InetSocketAddress infoSocAddr = NetUtils.createSocketAddr(infoAddr); + infoBindAddress = infoSocAddr.getHostName(); + int tmpInfoPort = infoSocAddr.getPort(); + infoServer = new HttpServer("secondary", infoBindAddress, tmpInfoPort, + tmpInfoPort == 0, conf); + infoServer.setAttribute("name.system.image", checkpointImage); + this.infoServer.setAttribute("name.conf", conf); + infoServer.addInternalServlet("getimage", "/getimage", GetImageServlet.class); + infoServer.start(); + + // The web-server port can be ephemeral... ensure we have the correct info + infoPort = infoServer.getPort(); + conf.set("dfs.secondary.http.address", infoBindAddress + ":" +infoPort); + LOG.info("Secondary Web-server up at: " + infoBindAddress + ":" +infoPort); + LOG.warn("Checkpoint Period :" + checkpointPeriod + " secs " + + "(" + checkpointPeriod/60 + " min)"); + LOG.warn("Log Size Trigger :" + checkpointSize + " bytes " + + "(" + checkpointSize/1024 + " KB)"); + } + + /** + * Shut down this instance of the datanode. + * Returns only after shutdown is complete. + */ + public void shutdown() { + shouldRun = false; + try { + if (infoServer != null) infoServer.stop(); + } catch (Exception e) { + LOG.warn("Exception shutting down SecondaryNameNode", e); + } + try { + if (checkpointImage != null) checkpointImage.close(); + } catch(IOException e) { + LOG.warn(StringUtils.stringifyException(e)); + } + } + + // + // The main work loop + // + public void run() { + + // + // Poll the Namenode (once every 5 minutes) to find the size of the + // pending edit log. + // + long period = 5 * 60; // 5 minutes + long lastCheckpointTime = 0; + if (checkpointPeriod < period) { + period = checkpointPeriod; + } + + while (shouldRun) { + try { + Thread.sleep(1000 * period); + } catch (InterruptedException ie) { + // do nothing + } + if (!shouldRun) { + break; + } + try { + long now = System.currentTimeMillis(); + + long size = namenode.getEditLogSize(); + if (size >= checkpointSize || + now >= lastCheckpointTime + 1000 * checkpointPeriod) { + doCheckpoint(); + lastCheckpointTime = now; + } + } catch (IOException e) { + LOG.error("Exception in doCheckpoint: "); + LOG.error(StringUtils.stringifyException(e)); + e.printStackTrace(); + checkpointImage.imageDigest = null; + } catch (Throwable e) { + LOG.error("Throwable Exception in doCheckpoint: "); + LOG.error(StringUtils.stringifyException(e)); + e.printStackTrace(); + Runtime.getRuntime().exit(-1); + } + } + } + + /** + * Download fsimage and edits + * files from the name-node. + * @return true if a new image has been downloaded and needs to be loaded + * @throws IOException + */ + private boolean downloadCheckpointFiles(CheckpointSignature sig + ) throws IOException { + + checkpointImage.cTime = sig.cTime; + checkpointImage.checkpointTime = sig.checkpointTime; + + boolean downloadImage = true; + String fileid; + File[] srcNames; + if (sig.imageDigest.equals(checkpointImage.imageDigest)) { + downloadImage = false; + LOG.info("Image has not changed. Will not download image."); + } else { + // get fsimage + srcNames = checkpointImage.getImageFiles(); + assert srcNames.length > 0 : "No checkpoint targets."; + fileid = "getimage=1"; + TransferFsImage.getFileClient(fsName, fileid, srcNames, false); + checkpointImage.imageDigest = sig.imageDigest; + LOG.info("Downloaded file " + srcNames[0].getName() + " size " + + srcNames[0].length() + " bytes."); + } + // get edits file + fileid = "getedit=1"; + srcNames = checkpointImage.getEditsFiles(); + assert srcNames.length > 0 : "No checkpoint targets."; + TransferFsImage.getFileClient(fsName, fileid, srcNames, false); + LOG.info("Downloaded file " + srcNames[0].getName() + " size " + + srcNames[0].length() + " bytes."); + + checkpointImage.checkpointUploadDone(null); + + return downloadImage; + } + + /** + * Copy the new fsimage into the NameNode + */ + private void putFSImage(CheckpointSignature sig) throws IOException { + String fileid = "putimage=1&port=" + infoPort + + "&machine=" + + InetAddress.getLocalHost().getHostAddress() + + "&token=" + sig.toString(); + LOG.info("Posted URL " + fsName + fileid); + TransferFsImage.getFileClient(fsName, fileid, (File[])null, false); + } + + /** + * Returns the Jetty server that the Namenode is listening on. + */ + private String getInfoServer() throws IOException { + URI fsName = FileSystem.getDefaultUri(conf); + if (!"hdfs".equals(fsName.getScheme())) { + throw new IOException("This is not a DFS"); + } + return NetUtils.getServerAddress(conf, "dfs.info.bindAddress", + "dfs.info.port", "dfs.http.address"); + } + + /** + * Create a new checkpoint + */ + void doCheckpoint() throws IOException { + + LOG.info("Checkpoint starting"); + + // Do the required initialization of the merge work area. + startCheckpoint(); + + // Tell the namenode to start logging transactions in a new edit file + // Retuns a token that would be used to upload the merged image. + CheckpointSignature sig = (CheckpointSignature)namenode.rollEditLog(); + + // error simulation code for junit test + if (ErrorSimulator.getErrorSimulation(0)) { + throw new IOException("Simulating error0 " + + "after creating edits.new"); + } + + boolean loadImage = downloadCheckpointFiles(sig); // Fetch fsimage and edits + doMerge(sig, loadImage); // Do the merge + + // + // Upload the new image into the NameNode. Then tell the Namenode + // to make this new uploaded image as the most current image. + // + putFSImage(sig); + + // error simulation code for junit test + if (ErrorSimulator.getErrorSimulation(1)) { + throw new IOException("Simulating error1 " + + "after uploading new image to NameNode"); + } + + namenode.rollFsImage(new CheckpointSignature(checkpointImage)); + checkpointImage.endCheckpoint(); + + LOG.info("Checkpoint done. New Image Size: " + + checkpointImage.getFsImageName().length()); + } + + private void startCheckpoint() throws IOException { + checkpointImage.unlockAll(); + checkpointImage.getEditLog().close(); + checkpointImage.recoverCreate(checkpointDirs, checkpointEditsDirs); + checkpointImage.startCheckpoint(); + } + + /** + * Merge downloaded image and edits and write the new image into + * current storage directory. + */ + private void doMerge(CheckpointSignature sig, boolean loadImage) throws IOException { + FSNamesystem namesystem = + new FSNamesystem(checkpointImage, conf); + assert namesystem.dir.fsImage == checkpointImage; + checkpointImage.doMerge(sig, loadImage); + } + + /** + * @param argv The parameters passed to this program. + * @exception Exception if the filesystem does not exist. + * @return 0 on success, non zero on error. + */ + private int processArgs(String[] argv) throws Exception { + + if (argv.length < 1) { + printUsage(""); + return -1; + } + + int exitCode = -1; + int i = 0; + String cmd = argv[i++]; + + // + // verify that we have enough command line parameters + // + if ("-geteditsize".equals(cmd)) { + if (argv.length != 1) { + printUsage(cmd); + return exitCode; + } + } else if ("-checkpoint".equals(cmd)) { + if (argv.length != 1 && argv.length != 2) { + printUsage(cmd); + return exitCode; + } + if (argv.length == 2 && !"force".equals(argv[i])) { + printUsage(cmd); + return exitCode; + } + } + + exitCode = 0; + try { + if ("-checkpoint".equals(cmd)) { + long size = namenode.getEditLogSize(); + if (size >= checkpointSize || + argv.length == 2 && "force".equals(argv[i])) { + doCheckpoint(); + } else { + System.err.println("EditLog size " + size + " bytes is " + + "smaller than configured checkpoint " + + "size " + checkpointSize + " bytes."); + System.err.println("Skipping checkpoint."); + } + } else if ("-geteditsize".equals(cmd)) { + long size = namenode.getEditLogSize(); + System.out.println("EditLog size is " + size + " bytes"); + } else { + exitCode = -1; + LOG.error(cmd.substring(1) + ": Unknown command"); + printUsage(""); + } + } catch (RemoteException e) { + // + // This is a error returned by hadoop server. Print + // out the first line of the error mesage, ignore the stack trace. + exitCode = -1; + try { + String[] content; + content = e.getLocalizedMessage().split("\n"); + LOG.error(cmd.substring(1) + ": " + + content[0]); + } catch (Exception ex) { + LOG.error(cmd.substring(1) + ": " + + ex.getLocalizedMessage()); + } + } catch (IOException e) { + // + // IO exception encountered locally. + // + exitCode = -1; + LOG.error(cmd.substring(1) + ": " + + e.getLocalizedMessage()); + } finally { + // Does the RPC connection need to be closed? + } + return exitCode; + } + + /** + * Displays format of commands. + * @param cmd The command that is being executed. + */ + private void printUsage(String cmd) { + if ("-geteditsize".equals(cmd)) { + System.err.println("Usage: java SecondaryNameNode" + + " [-geteditsize]"); + } else if ("-checkpoint".equals(cmd)) { + System.err.println("Usage: java SecondaryNameNode" + + " [-checkpoint [force]]"); + } else { + System.err.println("Usage: java SecondaryNameNode " + + "[-checkpoint [force]] " + + "[-geteditsize] "); + } + } + + /** + * main() has some simple utility methods. + * @param argv Command line parameters. + * @exception Exception if the filesystem does not exist. + */ + public static void main(String[] argv) throws Exception { + StringUtils.startupShutdownMessage(SecondaryNameNode.class, argv, LOG); + Configuration tconf = new Configuration(); + if (argv.length >= 1) { + SecondaryNameNode secondary = new SecondaryNameNode(tconf); + int ret = secondary.processArgs(argv); + System.exit(ret); + } + + // Create a never ending deamon + Daemon checkpointThread = new Daemon(new SecondaryNameNode(tconf)); + checkpointThread.start(); + } + + static class CheckpointStorage extends FSImage { + /** + */ + CheckpointStorage(Configuration conf) throws IOException { + super(conf); + } + + @Override + public + boolean isConversionNeeded(StorageDirectory sd) { + return false; + } + + /** + * Analyze checkpoint directories. + * Create directories if they do not exist. + * Recover from an unsuccessful checkpoint is necessary. + * + * @param dataDirs + * @param editsDirs + * @throws IOException + */ + void recoverCreate(Collection dataDirs, + Collection editsDirs) throws IOException { + Collection tempDataDirs = new ArrayList(dataDirs); + Collection tempEditsDirs = new ArrayList(editsDirs); + this.storageDirs = new ArrayList(); + setStorageDirectories(tempDataDirs, tempEditsDirs); + for (Iterator it = + dirIterator(); it.hasNext();) { + StorageDirectory sd = it.next(); + boolean isAccessible = true; + try { // create directories if don't exist yet + if(!sd.getRoot().mkdirs()) { + // do nothing, directory is already created + } + } catch(SecurityException se) { + isAccessible = false; + } + if(!isAccessible) + throw new InconsistentFSStateException(sd.getRoot(), + "cannot access checkpoint directory."); + StorageState curState; + try { + curState = sd.analyzeStorage(HdfsConstants.StartupOption.REGULAR); + // sd is locked but not opened + switch(curState) { + case NON_EXISTENT: + // fail if any of the configured checkpoint dirs are inaccessible + throw new InconsistentFSStateException(sd.getRoot(), + "checkpoint directory does not exist or is not accessible."); + case NOT_FORMATTED: + break; // it's ok since initially there is no current and VERSION + case NORMAL: + break; + default: // recovery is possible + sd.doRecover(curState); + } + } catch (IOException ioe) { + sd.unlock(); + throw ioe; + } + } + } + + /** + * Prepare directories for a new checkpoint. + *

+ * Rename current to lastcheckpoint.tmp + * and recreate current. + * @throws IOException + */ + void startCheckpoint() throws IOException { + for(StorageDirectory sd : storageDirs) { + File curDir = sd.getCurrentDir(); + File tmpCkptDir = sd.getLastCheckpointTmp(); + assert !tmpCkptDir.exists() : + tmpCkptDir.getName() + " directory must not exist."; + if(curDir.exists()) { + // rename current to tmp + rename(curDir, tmpCkptDir); + } + if (!curDir.mkdir()) + throw new IOException("Cannot create directory " + curDir); + } + } + + void endCheckpoint() throws IOException { + for(StorageDirectory sd : storageDirs) { + File tmpCkptDir = sd.getLastCheckpointTmp(); + File prevCkptDir = sd.getPreviousCheckpoint(); + // delete previous dir + if (prevCkptDir.exists()) + deleteDir(prevCkptDir); + // rename tmp to previous + if (tmpCkptDir.exists()) + rename(tmpCkptDir, prevCkptDir); + } + } + + /** + * Merge image and edits, and verify consistency with the signature. + */ + private void doMerge(CheckpointSignature sig, boolean loadImage) throws IOException { + getEditLog().open(); + StorageDirectory sdName = null; + StorageDirectory sdEdits = null; + Iterator it = null; + if (loadImage) { + it = dirIterator(NameNodeDirType.IMAGE); + if (it.hasNext()) + sdName = it.next(); + if (sdName == null) + throw new IOException("Could not locate checkpoint fsimage"); + } + it = dirIterator(NameNodeDirType.EDITS); + if (it.hasNext()) + sdEdits = it.next(); + if (sdEdits == null) + throw new IOException("Could not locate checkpoint edits"); + if (loadImage) { + loadFSImage(FSImage.getImageFile(sdName, NameNodeFile.IMAGE)); + } + loadFSEdits(sdEdits); + sig.validateStorageInfo(this); + saveFSImage(); + } + } +} diff --git a/src/hdfs/org/apache/hadoop/hdfs/server/namenode/SerialNumberManager.java b/src/hdfs/org/apache/hadoop/hdfs/server/namenode/SerialNumberManager.java new file mode 100644 index 0000000..d042e15 --- /dev/null +++ b/src/hdfs/org/apache/hadoop/hdfs/server/namenode/SerialNumberManager.java @@ -0,0 +1,72 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.namenode; + +import java.util.*; + +/** Manage name-to-serial-number maps for users and groups. */ +class SerialNumberManager { + /** This is the only instance of {@link SerialNumberManager}.*/ + static final SerialNumberManager INSTANCE = new SerialNumberManager(); + + private SerialNumberMap usermap = new SerialNumberMap(); + private SerialNumberMap groupmap = new SerialNumberMap(); + + private SerialNumberManager() {} + + int getUserSerialNumber(String u) {return usermap.get(u);} + int getGroupSerialNumber(String g) {return groupmap.get(g);} + String getUser(int n) {return usermap.get(n);} + String getGroup(int n) {return groupmap.get(n);} + + { + getUserSerialNumber(null); + getGroupSerialNumber(null); + } + + private static class SerialNumberMap { + private int max = 0; + private int nextSerialNumber() {return max++;} + + private Map t2i = new HashMap(); + private Map i2t = new HashMap(); + + synchronized int get(T t) { + Integer sn = t2i.get(t); + if (sn == null) { + sn = nextSerialNumber(); + t2i.put(t, sn); + i2t.put(sn, t); + } + return sn; + } + + synchronized T get(int i) { + if (!i2t.containsKey(i)) { + throw new IllegalStateException("!i2t.containsKey(" + i + + "), this=" + this); + } + return i2t.get(i); + } + + /** {@inheritDoc} */ + public String toString() { + return "max=" + max + ",\n t2i=" + t2i + ",\n i2t=" + i2t; + } + } +} diff --git a/src/hdfs/org/apache/hadoop/hdfs/server/namenode/StreamFile.java b/src/hdfs/org/apache/hadoop/hdfs/server/namenode/StreamFile.java new file mode 100644 index 0000000..59138b8 --- /dev/null +++ b/src/hdfs/org/apache/hadoop/hdfs/server/namenode/StreamFile.java @@ -0,0 +1,93 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.namenode; + +import javax.servlet.*; +import javax.servlet.http.*; +import java.io.*; +import java.net.*; +import org.apache.hadoop.hdfs.DFSClient; +import org.apache.hadoop.hdfs.HftpFileSystem; +import org.apache.hadoop.hdfs.server.datanode.DataNode; +import org.apache.hadoop.security.UnixUserGroupInformation; +import org.apache.hadoop.conf.*; + +public class StreamFile extends DfsServlet { + static InetSocketAddress nameNodeAddr; + static DataNode datanode = null; + private static final Configuration masterConf = new Configuration(); + static { + if ((datanode = DataNode.getDataNode()) != null) { + nameNodeAddr = datanode.getNameNodeAddr(); + } + } + + /** getting a client for connecting to dfs */ + protected DFSClient getDFSClient(HttpServletRequest request) + throws IOException { + Configuration conf = new Configuration(masterConf); + UnixUserGroupInformation.saveToConf(conf, + UnixUserGroupInformation.UGI_PROPERTY_NAME, getUGI(request)); + return new DFSClient(nameNodeAddr, conf); + } + + public void doGet(HttpServletRequest request, HttpServletResponse response) + throws ServletException, IOException { + final String filename = request.getPathInfo() != null ? + request.getPathInfo() : "/"; + + String posStr = request.getParameter("seek"); + Long pos = posStr == null ? null : Long.valueOf(posStr); + if (filename == null || filename.length() == 0) { + response.setContentType("text/plain"); + PrintWriter out = response.getWriter(); + out.print("Invalid input"); + return; + } + DFSClient dfs = getDFSClient(request); + long contentLength = dfs.getFileInfo(filename).getLen(); + DFSClient.DFSInputStream in = dfs.open(filename); + if (pos != null) { + contentLength -= pos; + in.seek(pos); + } + + OutputStream os = response.getOutputStream(); + response.setHeader("Content-Disposition", "attachment; filename=\"" + + filename + "\""); + response.setHeader("isUnderConstruction", + in.isUnderConstruction() ? "true" : "false"); + response.setContentType("application/octet-stream"); + response.setHeader( + HftpFileSystem.CONTENT_LENGTH_FIELD, + String.valueOf(contentLength) + ); + + byte buf[] = new byte[4096]; + try { + int bytesRead; + while ((bytesRead = in.read(buf)) != -1) { + os.write(buf, 0, bytesRead); + } + } finally { + in.close(); + os.close(); + dfs.close(); + } + } +} diff --git a/src/hdfs/org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java b/src/hdfs/org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java new file mode 100644 index 0000000..cb01808 --- /dev/null +++ b/src/hdfs/org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java @@ -0,0 +1,230 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.namenode; + +import org.apache.commons.logging.*; + +import java.io.*; +import java.net.*; +import java.security.DigestInputStream; +import java.security.DigestOutputStream; +import java.security.MessageDigest; +import java.util.Iterator; +import java.util.Map; +import java.lang.Math; +import javax.servlet.http.HttpServletResponse; +import javax.servlet.http.HttpServletRequest; + +import org.apache.hadoop.hdfs.protocol.FSConstants; +import org.apache.hadoop.hdfs.util.DataTransferThrottler; +import org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode.ErrorSimulator; +import org.apache.hadoop.io.MD5Hash; + +/** + * This class provides fetching a specified file from the NameNode. + */ +class TransferFsImage implements FSConstants { + public final static String CONTENT_LENGTH = "Content-Length"; + public static final Log LOG = LogFactory.getLog(TransferFsImage.class.getName()); + + private boolean isGetImage; + private boolean isGetEdit; + private boolean isPutImage; + private int remoteport; + private String machineName; + private CheckpointSignature token; + + /** + * File downloader. + * @param pmap key=value[] map that is passed to the http servlet as + * url parameters + * @param request the object from which this servelet reads the url contents + * @param response the object into which this servelet writes the url contents + * @throws IOException + */ + public TransferFsImage(Map pmap, + HttpServletRequest request, + HttpServletResponse response + ) throws IOException { + isGetImage = isGetEdit = isPutImage = false; + remoteport = 0; + machineName = null; + token = null; + + for (Iterator it = pmap.keySet().iterator(); it.hasNext();) { + String key = it.next(); + if (key.equals("getimage")) { + isGetImage = true; + } else if (key.equals("getedit")) { + isGetEdit = true; + } else if (key.equals("putimage")) { + isPutImage = true; + } else if (key.equals("port")) { + remoteport = new Integer(pmap.get("port")[0]).intValue(); + } else if (key.equals("machine")) { + machineName = pmap.get("machine")[0]; + } else if (key.equals("token")) { + token = new CheckpointSignature(pmap.get("token")[0]); + } + } + + int numGets = (isGetImage?1:0) + (isGetEdit?1:0); + if ((numGets > 1) || (numGets == 0) && !isPutImage) { + throw new IOException("Illegal parameters to TransferFsImage"); + } + } + + boolean getEdit() { + return isGetEdit; + } + + boolean getImage() { + return isGetImage; + } + + boolean putImage() { + return isPutImage; + } + + CheckpointSignature getToken() { + return token; + } + + String getInfoServer() throws IOException{ + if (machineName == null || remoteport == 0) { + throw new IOException ("MachineName and port undefined"); + } + return machineName + ":" + remoteport; + } + + /** + * A server-side method to respond to a getfile http request + * Copies the contents of the local file into the output stream. + */ + static void getFileServer(OutputStream outstream, File localfile, DataTransferThrottler throttler) + throws IOException { + byte buf[] = new byte[BUFFER_SIZE]; + FileInputStream infile = null; + long totalReads = 0, totalSends = 0; + try { + infile = new FileInputStream(localfile); + if (ErrorSimulator.getErrorSimulation(2) + && localfile.getAbsolutePath().contains("secondary")) { + // throw exception only when the secondary sends its image + throw new IOException("If this exception is not caught by the " + + "name-node fs image will be truncated."); + } + + if (ErrorSimulator.getErrorSimulation(3) + && localfile.getAbsolutePath().contains("fsimage")) { + // Test sending image shorter than localfile + long len = localfile.length(); + buf = new byte[(int)Math.min(len/2, BUFFER_SIZE)]; + // This will read at most half of the image + // and the rest of the image will be sent over the wire + infile.read(buf); + } + int num = 1; + while (num > 0) { + long startRead = System.currentTimeMillis(); + num = infile.read(buf); + if (num <= 0) { + break; + } + outstream.write(buf, 0, num); + if (throttler != null) { + throttler.throttle(num); + } + } + } finally { + if (infile != null) { + infile.close(); + } + } + } + + /** + * Client-side Method to fetch file from a server + * Copies the response from the URL to a list of local files. + * + * @Return a digest of the received file if getChecksum is true + */ + static MD5Hash getFileClient(String fsName, String id, File[] localPath, + boolean getChecksum) + throws IOException { + byte[] buf = new byte[BUFFER_SIZE]; + StringBuffer str = new StringBuffer("http://"+fsName+"/getimage?"); + str.append(id); + + // + // open connection to remote server + // + URL url = new URL(str.toString()); + URLConnection connection = url.openConnection(); + long advertisedSize; + String contentLength = connection.getHeaderField(CONTENT_LENGTH); + if (contentLength != null) { + advertisedSize = Long.parseLong(contentLength); + } else { + throw new IOException(CONTENT_LENGTH + " header is not provided " + + "by the namenode when trying to fetch " + str); + } + long received = 0; + InputStream stream = connection.getInputStream(); + MessageDigest digester = null; + if (getChecksum) { + digester = MD5Hash.getDigester(); + stream = new DigestInputStream(stream, digester); + } + FileOutputStream[] output = null; + + try { + if (localPath != null) { + output = new FileOutputStream[localPath.length]; + for (int i = 0; i < output.length; i++) { + output[i] = new FileOutputStream(localPath[i]); + } + } + int num = 1; + while (num > 0) { + num = stream.read(buf); + if (num > 0 && localPath != null) { + received += num; + for (int i = 0; i < output.length; i++) { + output[i].write(buf, 0, num); + } + } + } + } finally { + stream.close(); + if (output != null) { + for (int i = 0; i < output.length; i++) { + if (output[i] != null) { + output[i].close(); + } + } + } + if (received != advertisedSize) { + throw new IOException("File " + str + " received length " + received + + " is not of the advertised size " + + advertisedSize); + } + } + return digester==null ? null : new MD5Hash(digester.digest()); + } +} diff --git a/src/hdfs/org/apache/hadoop/hdfs/server/namenode/UnderReplicatedBlocks.java b/src/hdfs/org/apache/hadoop/hdfs/server/namenode/UnderReplicatedBlocks.java new file mode 100644 index 0000000..29978dd --- /dev/null +++ b/src/hdfs/org/apache/hadoop/hdfs/server/namenode/UnderReplicatedBlocks.java @@ -0,0 +1,250 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.namenode; + +import java.util.*; + +import org.apache.hadoop.hdfs.protocol.Block; + +/* Class for keeping track of under replication blocks + * Blocks have replication priority, with priority 0 indicating the highest + * Blocks have only one replicas has the highest + */ +class UnderReplicatedBlocks implements Iterable { + static final int LEVEL = 3; + static public final int QUEUE_WITH_CORRUPT_BLOCKS = 2; + private List> priorityQueues = new ArrayList>(); + + /* constructor */ + UnderReplicatedBlocks() { + for(int i=0; i()); + } + } + + /** + * Empty the queues. + */ + void clear() { + for(int i=0; i set:priorityQueues) { + if(set.contains(block)) { return true; } + } + return false; + } + + /* Return the priority of a block + * @param block a under replication block + * @param curReplicas current number of replicas of the block + * @param expectedReplicas expected number of replicas of the block + */ + private int getPriority(Block block, + int curReplicas, + int decommissionedReplicas, + int expectedReplicas) { + if (curReplicas<0 || curReplicas>=expectedReplicas) { + return LEVEL; // no need to replicate + } else if(curReplicas==0) { + // If there are zero non-decommissioned replica but there are + // some decommissioned replicas, then assign them highest priority + if (decommissionedReplicas > 0) { + return 0; + } + return QUEUE_WITH_CORRUPT_BLOCKS; // keep these blocks in needed replication. + } else if(curReplicas==1) { + return 0; // highest priority + } else if(curReplicas*3= 0 && priLevel < LEVEL + && priorityQueues.get(priLevel).remove(block)) { + NameNode.stateChangeLog.debug( + "BLOCK* NameSystem.UnderReplicationBlock.remove: " + + "Removing block " + block + + " from priority queue "+ priLevel); + return true; + } else { + for(int i=0; i { + private int level; + private boolean isIteratorForLevel = false; + private List> iterators = new ArrayList>(); + + BlockIterator() { + level=0; + for(int i=0; i uos; + uos = UpgradeObjectCollection.getDistributedUpgrades(-4, + HdfsConstants.NodeType.NAME_NODE); + System.out.println(uos.size()); + um.startUpgrade(); + } +} diff --git a/src/hdfs/org/apache/hadoop/hdfs/server/namenode/UpgradeObjectNamenode.java b/src/hdfs/org/apache/hadoop/hdfs/server/namenode/UpgradeObjectNamenode.java new file mode 100644 index 0000000..1562069 --- /dev/null +++ b/src/hdfs/org/apache/hadoop/hdfs/server/namenode/UpgradeObjectNamenode.java @@ -0,0 +1,67 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.namenode; + +import java.io.IOException; + +import org.apache.hadoop.hdfs.protocol.FSConstants; +import org.apache.hadoop.hdfs.server.common.HdfsConstants; +import org.apache.hadoop.hdfs.server.common.UpgradeObject; +import org.apache.hadoop.hdfs.server.protocol.UpgradeCommand; + +/** + * Base class for name-node upgrade objects. + * Data-node upgrades are run in separate threads. + */ +public abstract class UpgradeObjectNamenode extends UpgradeObject { + + /** + * Process an upgrade command. + * RPC has only one very generic command for all upgrade related inter + * component communications. + * The actual command recognition and execution should be handled here. + * The reply is sent back also as an UpgradeCommand. + * + * @param command + * @return the reply command which is analyzed on the client side. + */ + public abstract UpgradeCommand processUpgradeCommand(UpgradeCommand command + ) throws IOException; + + public HdfsConstants.NodeType getType() { + return HdfsConstants.NodeType.NAME_NODE; + } + + /** + */ + public UpgradeCommand startUpgrade() throws IOException { + // broadcast that data-nodes must start the upgrade + return new UpgradeCommand(UpgradeCommand.UC_ACTION_START_UPGRADE, + getVersion(), (short)0); + } + + protected FSNamesystem getFSNamesystem() { + return FSNamesystem.getFSNamesystem(); + } + + public void forceProceed() throws IOException { + // do nothing by default + NameNode.LOG.info("forceProceed() is not defined for the upgrade. " + + getDescription()); + } +} diff --git a/src/hdfs/org/apache/hadoop/hdfs/server/namenode/metrics/FSNamesystemMBean.java b/src/hdfs/org/apache/hadoop/hdfs/server/namenode/metrics/FSNamesystemMBean.java new file mode 100644 index 0000000..1ce4036 --- /dev/null +++ b/src/hdfs/org/apache/hadoop/hdfs/server/namenode/metrics/FSNamesystemMBean.java @@ -0,0 +1,112 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.namenode.metrics; + +/** + * + * This Interface defines the methods to get the status of a the FSNamesystem of + * a name node. + * It is also used for publishing via JMX (hence we follow the JMX naming + * convention.) + * + * Note we have not used the MetricsDynamicMBeanBase to implement this + * because the interface for the NameNodeStateMBean is stable and should + * be published as an interface. + * + *

+ * Name Node runtime activity statistic info is report in another MBean + * @see org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeActivityMBean + * + */ +public interface FSNamesystemMBean { + + /** + * The state of the file system: Safemode or Operational + * @return the state + */ + public String getFSState(); + + + /** + * Number of allocated blocks in the system + * @return - number of allocated blocks + */ + public long getBlocksTotal(); + + /** + * Total storage capacity + * @return - total capacity in bytes + */ + public long getCapacityTotal(); + + + /** + * Free (unused) storage capacity + * @return - free capacity in bytes + */ + public long getCapacityRemaining(); + + /** + * Used storage capacity + * @return - used capacity in bytes + */ + public long getCapacityUsed(); + + + /** + * Total number of files and directories + * @return - num of files and directories + */ + public long getFilesTotal(); + + /** + * Blocks pending to be replicated + * @return - num of blocks to be replicated + */ + public long getPendingReplicationBlocks(); + + /** + * Blocks under replicated + * @return - num of blocks under replicated + */ + public long getUnderReplicatedBlocks(); + + /** + * Blocks scheduled for replication + * @return - num of blocks scheduled for replication + */ + public long getScheduledReplicationBlocks(); + + /** + * Total Load on the FSNamesystem + * @return - total load of FSNamesystem + */ + public int getTotalLoad(); + + /** + * Number of Live data nodes + * @return number of live data nodes + */ + public int getNumLiveDataNodes(); + + /** + * Number of dead data nodes + * @return number of dead data nodes + */ + public int getNumDeadDataNodes(); +} diff --git a/src/hdfs/org/apache/hadoop/hdfs/server/namenode/metrics/FSNamesystemMetrics.java b/src/hdfs/org/apache/hadoop/hdfs/server/namenode/metrics/FSNamesystemMetrics.java new file mode 100644 index 0000000..64fdcfc --- /dev/null +++ b/src/hdfs/org/apache/hadoop/hdfs/server/namenode/metrics/FSNamesystemMetrics.java @@ -0,0 +1,124 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.namenode.metrics; + + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; +import org.apache.hadoop.metrics.*; +import org.apache.hadoop.metrics.util.MetricsBase; +import org.apache.hadoop.metrics.util.MetricsIntValue; +import org.apache.hadoop.metrics.util.MetricsLongValue; +import org.apache.hadoop.metrics.util.MetricsRegistry; + +/** + * + * This class is for maintaining the various FSNamesystem status metrics + * and publishing them through the metrics interfaces. + * The SNamesystem creates and registers the JMX MBean. + *

+ * This class has a number of metrics variables that are publicly accessible; + * these variables (objects) have methods to update their values; + * for example: + *

{@link #filesTotal}.set() + * + */ +public class FSNamesystemMetrics implements Updater { + private static Log log = LogFactory.getLog(FSNamesystemMetrics.class); + final MetricsRecord metricsRecord; + public MetricsRegistry registry = new MetricsRegistry(); + + final MetricsIntValue filesTotal = new MetricsIntValue("FilesTotal", registry); + final MetricsLongValue blocksTotal = new MetricsLongValue("BlocksTotal", registry); + final MetricsIntValue capacityTotalGB = new MetricsIntValue("CapacityTotalGB", registry); + final MetricsIntValue capacityUsedGB = new MetricsIntValue("CapacityUsedGB", registry); + final MetricsIntValue capacityRemainingGB = new MetricsIntValue("CapacityRemainingGB", registry); + final MetricsIntValue totalLoad = new MetricsIntValue("TotalLoad", registry); + final MetricsIntValue pendingDeletionBlocks = new MetricsIntValue("PendingDeletionBlocks", registry); + final MetricsIntValue corruptBlocks = new MetricsIntValue("CorruptBlocks", registry); + final MetricsIntValue excessBlocks = new MetricsIntValue("ExcessBlocks", registry); + final MetricsIntValue pendingReplicationBlocks = new MetricsIntValue("PendingReplicationBlocks", registry); + final MetricsIntValue underReplicatedBlocks = new MetricsIntValue("UnderReplicatedBlocks", registry); + final MetricsIntValue scheduledReplicationBlocks = new MetricsIntValue("ScheduledReplicationBlocks", registry); + final MetricsIntValue missingBlocks = new MetricsIntValue("MissingBlocks", registry); + final MetricsIntValue blockCapacity = new MetricsIntValue("BlockCapacity", registry); + + public FSNamesystemMetrics(Configuration conf) { + String sessionId = conf.get("session.id"); + + // Create a record for FSNamesystem metrics + MetricsContext metricsContext = MetricsUtil.getContext("dfs"); + metricsRecord = MetricsUtil.createRecord(metricsContext, "FSNamesystem"); + metricsRecord.setTag("sessionId", sessionId); + metricsContext.registerUpdater(this); + log.info("Initializing FSNamesystemMetrics using context object:" + + metricsContext.getClass().getName()); + } + + private int roundBytesToGBytes(long bytes) { + return Math.round(((float)bytes/(1024 * 1024 * 1024))); + } + + /** + * Since this object is a registered updater, this method will be called + * periodically, e.g. every 5 seconds. + * We set the metrics value within this function before pushing it out. + * FSNamesystem updates its own local variables which are + * light weight compared to Metrics counters. + * + * Some of the metrics are explicity casted to int. Few metrics collectors + * do not handle long values. It is safe to cast to int for now as all these + * values fit in int value. + * Metrics related to DFS capacity are stored in bytes which do not fit in + * int, so they are rounded to GB + */ + public void doUpdates(MetricsContext unused) { + /** + * ToFix + * If the metrics counter were instead stored in the metrics objects themselves + * we could avoid copying the values on each update. + */ + synchronized (this) { + FSNamesystem fsNameSystem = FSNamesystem.getFSNamesystem(); + filesTotal.set((int)fsNameSystem.getFilesTotal()); + blocksTotal.set((int)fsNameSystem.getBlocksTotal()); + capacityTotalGB.set(roundBytesToGBytes(fsNameSystem.getCapacityTotal())); + capacityUsedGB.set(roundBytesToGBytes(fsNameSystem.getCapacityUsed())); + capacityRemainingGB.set(roundBytesToGBytes(fsNameSystem. + getCapacityRemaining())); + totalLoad.set(fsNameSystem.getTotalLoad()); + corruptBlocks.set((int)fsNameSystem.getCorruptReplicaBlocks()); + excessBlocks.set((int)fsNameSystem.getExcessBlocks()); + pendingDeletionBlocks.set((int)fsNameSystem.getPendingDeletionBlocks()); + pendingReplicationBlocks.set((int)fsNameSystem. + getPendingReplicationBlocks()); + underReplicatedBlocks.set((int)fsNameSystem.getUnderReplicatedBlocks()); + scheduledReplicationBlocks.set((int)fsNameSystem. + getScheduledReplicationBlocks()); + missingBlocks.set((int)fsNameSystem.getMissingBlocksCount()); + blockCapacity.set(fsNameSystem.getBlockCapacity()); + + for (MetricsBase m : registry.getMetricsList()) { + m.pushMetric(metricsRecord); + } + } + metricsRecord.update(); + } +} diff --git a/src/hdfs/org/apache/hadoop/hdfs/server/namenode/metrics/NameNodeActivtyMBean.java b/src/hdfs/org/apache/hadoop/hdfs/server/namenode/metrics/NameNodeActivtyMBean.java new file mode 100644 index 0000000..d502d2d --- /dev/null +++ b/src/hdfs/org/apache/hadoop/hdfs/server/namenode/metrics/NameNodeActivtyMBean.java @@ -0,0 +1,67 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.namenode.metrics; + +import javax.management.ObjectName; + +import org.apache.hadoop.metrics.util.MBeanUtil; +import org.apache.hadoop.metrics.util.MetricsDynamicMBeanBase; +import org.apache.hadoop.metrics.util.MetricsRegistry; + +/** + * + * This is the JMX MBean for reporting the NameNode Activity. + * The MBean is register using the name + * "hadoop:service=NameNode,name=NameNodeActivity" + * + * Many of the activity metrics are sampled and averaged on an interval + * which can be specified in the metrics config file. + *

+ * For the metrics that are sampled and averaged, one must specify + * a metrics context that does periodic update calls. Most metrics contexts do. + * The default Null metrics context however does NOT. So if you aren't + * using any other metrics context then you can turn on the viewing and averaging + * of sampled metrics by specifying the following two lines + * in the hadoop-meterics.properties file: +*

+ *        dfs.class=org.apache.hadoop.metrics.spi.NullContextWithUpdateThread
+ *        dfs.period=10
+ *  
+ *

+ * Note that the metrics are collected regardless of the context used. + * The context with the update thread is used to average the data periodically + * + * + * + * Impl details: We use a dynamic mbean that gets the list of the metrics + * from the metrics registry passed as an argument to the constructor + */ + +public class NameNodeActivtyMBean extends MetricsDynamicMBeanBase { + final private ObjectName mbeanName; + + protected NameNodeActivtyMBean(final MetricsRegistry mr) { + super(mr, "Activity statistics at the NameNode"); + mbeanName = MBeanUtil.registerMBean("NameNode", "NameNodeActivity", this); + } + + public void shutdown() { + if (mbeanName != null) + MBeanUtil.unregisterMBean(mbeanName); + } +} diff --git a/src/hdfs/org/apache/hadoop/hdfs/server/namenode/metrics/NameNodeMetrics.java b/src/hdfs/org/apache/hadoop/hdfs/server/namenode/metrics/NameNodeMetrics.java new file mode 100644 index 0000000..9336857 --- /dev/null +++ b/src/hdfs/org/apache/hadoop/hdfs/server/namenode/metrics/NameNodeMetrics.java @@ -0,0 +1,169 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.namenode.metrics; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdfs.server.namenode.NameNode; +import org.apache.hadoop.metrics.*; +import org.apache.hadoop.metrics.jvm.JvmMetrics; +import org.apache.hadoop.metrics.util.MetricsBase; +import org.apache.hadoop.metrics.util.MetricsIntValue; +import org.apache.hadoop.metrics.util.MetricsRegistry; +import org.apache.hadoop.metrics.util.MetricsTimeVaryingLong; +import org.apache.hadoop.metrics.util.MetricsTimeVaryingRate; + +/** + * + * This class is for maintaining the various NameNode activity statistics + * and publishing them through the metrics interfaces. + * This also registers the JMX MBean for RPC. + *

+ * This class has a number of metrics variables that are publicly accessible; + * these variables (objects) have methods to update their values; + * for example: + *

{@link #syncs}.inc() + * + */ +public class NameNodeMetrics implements Updater { + private static Log log = LogFactory.getLog(NameNodeMetrics.class); + private final MetricsRecord metricsRecord; + public MetricsRegistry registry = new MetricsRegistry(); + + private NameNodeActivtyMBean namenodeActivityMBean; + + public MetricsTimeVaryingLong numFilesCreated = + new MetricsTimeVaryingLong("FilesCreated", registry); + public MetricsTimeVaryingLong numFilesAppended = + new MetricsTimeVaryingLong("FilesAppended", registry); + public MetricsTimeVaryingLong numGetBlockLocations = + new MetricsTimeVaryingLong("GetBlockLocations", registry); + public MetricsTimeVaryingLong numFilesRenamed = + new MetricsTimeVaryingLong("FilesRenamed", registry); + public MetricsTimeVaryingLong numGetListingOps = + new MetricsTimeVaryingLong("GetListingOps", registry); + public MetricsTimeVaryingLong numCreateFileOps = + new MetricsTimeVaryingLong("CreateFileOps", registry); + public MetricsTimeVaryingLong numDeleteFileOps = + new MetricsTimeVaryingLong("DeleteFileOps", registry); + public MetricsTimeVaryingLong numFileInfoOps = + new MetricsTimeVaryingLong("FileInfoOps", registry); + public MetricsTimeVaryingLong numAddBlockOps = + new MetricsTimeVaryingLong("AddBlockOps", registry); + public MetricsTimeVaryingLong numSetReplication = + new MetricsTimeVaryingLong("SetReplication", registry); + public MetricsTimeVaryingLong numSetPermission = + new MetricsTimeVaryingLong("SetPermission", registry); + public MetricsTimeVaryingLong numSetOwner = + new MetricsTimeVaryingLong("SetOwner", registry); + public MetricsTimeVaryingLong numAbandonBlock = + new MetricsTimeVaryingLong("numAbandonBlock", registry); + public MetricsTimeVaryingLong numCompleteFile = + new MetricsTimeVaryingLong("numCompleteFile", registry); + public MetricsTimeVaryingLong numReportBadBlocks = + new MetricsTimeVaryingLong("numReportBadBlocks", registry); + public MetricsTimeVaryingLong numNextGenerationStamp = + new MetricsTimeVaryingLong("numNextGenerationStamp", registry); + public MetricsTimeVaryingLong numMkdirs = + new MetricsTimeVaryingLong("numMkdirs", registry); + public MetricsTimeVaryingLong numRenewLease = + new MetricsTimeVaryingLong("numRenewLease", registry); + public MetricsTimeVaryingLong numSaveNamespace = + new MetricsTimeVaryingLong("numSaveNamespace", registry); + public MetricsTimeVaryingLong numRefreshNodes = + new MetricsTimeVaryingLong("numRefreshNodes", registry); + public MetricsTimeVaryingLong numSetQuota = + new MetricsTimeVaryingLong("numSetQuota", registry); + public MetricsTimeVaryingLong numFsync = + new MetricsTimeVaryingLong("numFsync", registry); + public MetricsTimeVaryingLong numSetTimes = + new MetricsTimeVaryingLong("numSetTimes", registry); + public MetricsTimeVaryingLong numRegister = + new MetricsTimeVaryingLong("numRegister", registry); + public MetricsTimeVaryingLong numHeartbeat = + new MetricsTimeVaryingLong("numHeartbeat", registry); + public MetricsTimeVaryingLong numBlockReport = + new MetricsTimeVaryingLong("numBlockReport", registry); + public MetricsTimeVaryingLong numBlockReceived = + new MetricsTimeVaryingLong("numBlockReceived", registry); + public MetricsTimeVaryingLong numVersionRequest = + new MetricsTimeVaryingLong("numVersionRequest", registry); + + public MetricsTimeVaryingRate transactions = + new MetricsTimeVaryingRate("Transactions", registry, "Journal Transaction"); + public MetricsTimeVaryingRate syncs = + new MetricsTimeVaryingRate("Syncs", registry, "Journal Sync"); + public MetricsTimeVaryingLong transactionsBatchedInSync = + new MetricsTimeVaryingLong("JournalTransactionsBatchedInSync", registry, "Journal Transactions Batched In Sync"); + public MetricsTimeVaryingRate blockReport = + new MetricsTimeVaryingRate("blockReport", registry, "Block Report"); + public MetricsIntValue safeModeTime = + new MetricsIntValue("SafemodeTime", registry, "Duration in SafeMode at Startup"); + public MetricsIntValue fsImageLoadTime = + new MetricsIntValue("fsImageLoadTime", registry, "Time loading FS Image at Startup"); + public MetricsIntValue numBlocksCorrupted = + new MetricsIntValue("BlocksCorrupted", registry); + public MetricsIntValue numBufferedTransactions = + new MetricsIntValue("numBufferedTransactions", registry); + + + public NameNodeMetrics(Configuration conf, NameNode nameNode) { + String sessionId = conf.get("session.id"); + // Initiate Java VM metrics + JvmMetrics.init("NameNode", sessionId); + + + // Now the Mbean for the name node - this alos registers the MBean + namenodeActivityMBean = new NameNodeActivtyMBean(registry); + + // Create a record for NameNode metrics + MetricsContext metricsContext = MetricsUtil.getContext("dfs"); + metricsRecord = MetricsUtil.createRecord(metricsContext, "namenode"); + metricsRecord.setTag("sessionId", sessionId); + metricsContext.registerUpdater(this); + log.info("Initializing NameNodeMeterics using context object:" + + metricsContext.getClass().getName()); + } + + + + public void shutdown() { + if (namenodeActivityMBean != null) + namenodeActivityMBean.shutdown(); + } + + /** + * Since this object is a registered updater, this method will be called + * periodically, e.g. every 5 seconds. + */ + public void doUpdates(MetricsContext unused) { + synchronized (this) { + for (MetricsBase m : registry.getMetricsList()) { + m.pushMetric(metricsRecord); + } + } + metricsRecord.update(); + } + + public void resetAllMinMax() { + transactions.resetMinMax(); + syncs.resetMinMax(); + blockReport.resetMinMax(); + } +} diff --git a/src/hdfs/org/apache/hadoop/hdfs/server/protocol/BlockCommand.java b/src/hdfs/org/apache/hadoop/hdfs/server/protocol/BlockCommand.java new file mode 100644 index 0000000..e1c0645 --- /dev/null +++ b/src/hdfs/org/apache/hadoop/hdfs/server/protocol/BlockCommand.java @@ -0,0 +1,122 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.protocol; + +import java.io.*; +import java.util.List; + +import org.apache.hadoop.hdfs.protocol.Block; +import org.apache.hadoop.hdfs.protocol.DatanodeInfo; +import org.apache.hadoop.hdfs.server.namenode.DatanodeDescriptor.BlockTargetPair; +import org.apache.hadoop.io.*; + + +/**************************************************** + * A BlockCommand is an instruction to a datanode + * regarding some blocks under its control. It tells + * the DataNode to either invalidate a set of indicated + * blocks, or to copy a set of indicated blocks to + * another DataNode. + * + ****************************************************/ +public class BlockCommand extends DatanodeCommand { + Block blocks[]; + DatanodeInfo targets[][]; + + public BlockCommand() {} + + /** + * Create BlockCommand for transferring blocks to another datanode + * @param blocktargetlist blocks to be transferred + */ + public BlockCommand(int action, List blocktargetlist) { + super(action); + + blocks = new Block[blocktargetlist.size()]; + targets = new DatanodeInfo[blocks.length][]; + for(int i = 0; i < blocks.length; i++) { + BlockTargetPair p = blocktargetlist.get(i); + blocks[i] = p.block; + targets[i] = p.targets; + } + } + + private static final DatanodeInfo[][] EMPTY_TARGET = {}; + + /** + * Create BlockCommand for the given action + * @param blocks blocks related to the action + */ + public BlockCommand(int action, Block blocks[]) { + super(action); + this.blocks = blocks; + this.targets = EMPTY_TARGET; + } + + public Block[] getBlocks() { + return blocks; + } + + public DatanodeInfo[][] getTargets() { + return targets; + } + + /////////////////////////////////////////// + // Writable + /////////////////////////////////////////// + static { // register a ctor + WritableFactories.setFactory + (BlockCommand.class, + new WritableFactory() { + public Writable newInstance() { return new BlockCommand(); } + }); + } + + public void write(DataOutput out) throws IOException { + super.write(out); + out.writeInt(blocks.length); + for (int i = 0; i < blocks.length; i++) { + blocks[i].write(out); + } + out.writeInt(targets.length); + for (int i = 0; i < targets.length; i++) { + out.writeInt(targets[i].length); + for (int j = 0; j < targets[i].length; j++) { + targets[i][j].write(out); + } + } + } + + public void readFields(DataInput in) throws IOException { + super.readFields(in); + this.blocks = new Block[in.readInt()]; + for (int i = 0; i < blocks.length; i++) { + blocks[i] = new Block(); + blocks[i].readFields(in); + } + + this.targets = new DatanodeInfo[in.readInt()][]; + for (int i = 0; i < targets.length; i++) { + this.targets[i] = new DatanodeInfo[in.readInt()]; + for (int j = 0; j < targets[i].length; j++) { + targets[i][j] = new DatanodeInfo(); + targets[i][j].readFields(in); + } + } + } +} diff --git a/src/hdfs/org/apache/hadoop/hdfs/server/protocol/BlockMetaDataInfo.java b/src/hdfs/org/apache/hadoop/hdfs/server/protocol/BlockMetaDataInfo.java new file mode 100644 index 0000000..e9e47d4 --- /dev/null +++ b/src/hdfs/org/apache/hadoop/hdfs/server/protocol/BlockMetaDataInfo.java @@ -0,0 +1,58 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.protocol; + +import java.io.*; + +import org.apache.hadoop.hdfs.protocol.Block; +import org.apache.hadoop.io.*; + +/** + * Meta data information for a block + */ +public class BlockMetaDataInfo extends Block { + static final WritableFactory FACTORY = new WritableFactory() { + public Writable newInstance() { return new BlockMetaDataInfo(); } + }; + static { // register a ctor + WritableFactories.setFactory(BlockMetaDataInfo.class, FACTORY); + } + + private long lastScanTime; + + public BlockMetaDataInfo() {} + + public BlockMetaDataInfo(Block b, long lastScanTime) { + super(b); + this.lastScanTime = lastScanTime; + } + + public long getLastScanTime() {return lastScanTime;} + + /** {@inheritDoc} */ + public void write(DataOutput out) throws IOException { + super.write(out); + out.writeLong(lastScanTime); + } + + /** {@inheritDoc} */ + public void readFields(DataInput in) throws IOException { + super.readFields(in); + lastScanTime = in.readLong(); + } +} diff --git a/src/hdfs/org/apache/hadoop/hdfs/server/protocol/BlocksWithLocations.java b/src/hdfs/org/apache/hadoop/hdfs/server/protocol/BlocksWithLocations.java new file mode 100644 index 0000000..fed9e6c --- /dev/null +++ b/src/hdfs/org/apache/hadoop/hdfs/server/protocol/BlocksWithLocations.java @@ -0,0 +1,117 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.protocol; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; + +import org.apache.hadoop.hdfs.protocol.Block; +import org.apache.hadoop.io.Text; +import org.apache.hadoop.io.Writable; +import org.apache.hadoop.io.WritableUtils; + +/** A class to implement an array of BlockLocations + * It provide efficient customized serialization/deserialization methods + * in stead of using the default array (de)serialization provided by RPC + */ +public class BlocksWithLocations implements Writable { + + /** + * A class to keep track of a block and its locations + */ + public static class BlockWithLocations implements Writable { + Block block; + String datanodeIDs[]; + + /** default constructor */ + public BlockWithLocations() { + block = new Block(); + datanodeIDs = null; + } + + /** constructor */ + public BlockWithLocations(Block b, String[] datanodes) { + block = b; + datanodeIDs = datanodes; + } + + /** get the block */ + public Block getBlock() { + return block; + } + + /** get the block's locations */ + public String[] getDatanodes() { + return datanodeIDs; + } + + /** deserialization method */ + public void readFields(DataInput in) throws IOException { + block.readFields(in); + int len = WritableUtils.readVInt(in); // variable length integer + datanodeIDs = new String[len]; + for(int i=0; idatanode + * whose total size is equal to size + * @param datanode a data node + * @param size requested size + * @return a list of blocks & their locations + * @throws RemoteException if size is less than or equal to 0 or + datanode does not exist + */ + public BlocksWithLocations getBlocks(DatanodeInfo datanode, long size) + throws IOException; + + /** + * Get the size of the current edit log (in bytes). + * @return The number of bytes in the current edit log. + * @throws IOException + */ + public long getEditLogSize() throws IOException; + + /** + * Closes the current edit log and opens a new one. The + * call fails if the file system is in SafeMode. + * @throws IOException + * @return a unique token to identify this transaction. + */ + public CheckpointSignature rollEditLog() throws IOException; + + /** + * Rolls the fsImage log. It removes the old fsImage, copies the + * new image to fsImage, removes the old edits and renames edits.new + * to edits. The call fails if any of the four files are missing. + * + * @param newImageSignature the signature of the new fsimage + * @throws IOException + */ + public void rollFsImage(CheckpointSignature newImageSignature) + throws IOException; +} diff --git a/src/hdfs/org/apache/hadoop/hdfs/server/protocol/NamespaceInfo.java b/src/hdfs/org/apache/hadoop/hdfs/server/protocol/NamespaceInfo.java new file mode 100644 index 0000000..e47bcf3 --- /dev/null +++ b/src/hdfs/org/apache/hadoop/hdfs/server/protocol/NamespaceInfo.java @@ -0,0 +1,87 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdfs.server.protocol; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; + +import org.apache.hadoop.hdfs.protocol.FSConstants; +import org.apache.hadoop.hdfs.server.common.Storage; +import org.apache.hadoop.hdfs.server.common.StorageInfo; +import org.apache.hadoop.io.UTF8; +import org.apache.hadoop.io.Writable; +import org.apache.hadoop.io.WritableFactories; +import org.apache.hadoop.io.WritableFactory; + +/** + * NamespaceInfo is returned by the name-node in reply + * to a data-node handshake. + * + */ +public class NamespaceInfo extends StorageInfo implements Writable { + String buildVersion; + int distributedUpgradeVersion; + + public NamespaceInfo() { + super(); + buildVersion = null; + } + + public NamespaceInfo(int nsID, long cT, int duVersion) { + super(FSConstants.LAYOUT_VERSION, nsID, cT); + buildVersion = Storage.getBuildVersion(); + this.distributedUpgradeVersion = duVersion; + } + + public String getBuildVersion() { + return buildVersion; + } + + public int getDistributedUpgradeVersion() { + return distributedUpgradeVersion; + } + + ///////////////////////////////////////////////// + // Writable + ///////////////////////////////////////////////// + static { // register a ctor + WritableFactories.setFactory + (NamespaceInfo.class, + new WritableFactory() { + public Writable newInstance() { return new NamespaceInfo(); } + }); + } + + public void write(DataOutput out) throws IOException { + UTF8.writeString(out, getBuildVersion()); + out.writeInt(getLayoutVersion()); + out.writeInt(getNamespaceID()); + out.writeLong(getCTime()); + out.writeInt(getDistributedUpgradeVersion()); + } + + public void readFields(DataInput in) throws IOException { + buildVersion = UTF8.readString(in); + layoutVersion = in.readInt(); + namespaceID = in.readInt(); + cTime = in.readLong(); + distributedUpgradeVersion = in.readInt(); + } +} diff --git a/src/hdfs/org/apache/hadoop/hdfs/server/protocol/UpgradeCommand.java b/src/hdfs/org/apache/hadoop/hdfs/server/protocol/UpgradeCommand.java new file mode 100644 index 0000000..d2660c5 --- /dev/null +++ b/src/hdfs/org/apache/hadoop/hdfs/server/protocol/UpgradeCommand.java @@ -0,0 +1,92 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.protocol; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; + +import org.apache.hadoop.io.Writable; +import org.apache.hadoop.io.WritableFactories; +import org.apache.hadoop.io.WritableFactory; + +/** + * This as a generic distributed upgrade command. + * + * During the upgrade cluster components send upgrade commands to each other + * in order to obtain or share information with them. + * It is supposed that each upgrade defines specific upgrade command by + * deriving them from this class. + * The upgrade command contains version of the upgrade, which is verified + * on the receiving side and current status of the upgrade. + */ +public class UpgradeCommand extends DatanodeCommand { + final static int UC_ACTION_UNKNOWN = DatanodeProtocol.DNA_UNKNOWN; + public final static int UC_ACTION_REPORT_STATUS = 100; // report upgrade status + public final static int UC_ACTION_START_UPGRADE = 101; // start upgrade + + private int version; + private short upgradeStatus; + + public UpgradeCommand() { + super(UC_ACTION_UNKNOWN); + this.version = 0; + this.upgradeStatus = 0; + } + + public UpgradeCommand(int action, int version, short status) { + super(action); + this.version = version; + this.upgradeStatus = status; + } + + public int getVersion() { + return this.version; + } + + public short getCurrentStatus() { + return this.upgradeStatus; + } + + ///////////////////////////////////////////////// + // Writable + ///////////////////////////////////////////////// + static { // register a ctor + WritableFactories.setFactory + (UpgradeCommand.class, + new WritableFactory() { + public Writable newInstance() { return new UpgradeCommand(); } + }); + } + + /** + */ + public void write(DataOutput out) throws IOException { + super.write(out); + out.writeInt(this.version); + out.writeShort(this.upgradeStatus); + } + + /** + */ + public void readFields(DataInput in) throws IOException { + super.readFields(in); + this.version = in.readInt(); + this.upgradeStatus = in.readShort(); + } +} diff --git a/src/hdfs/org/apache/hadoop/hdfs/tools/DFSAdmin.java b/src/hdfs/org/apache/hadoop/hdfs/tools/DFSAdmin.java new file mode 100644 index 0000000..a11766f --- /dev/null +++ b/src/hdfs/org/apache/hadoop/hdfs/tools/DFSAdmin.java @@ -0,0 +1,864 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.tools; + +import java.io.IOException; +import java.net.InetSocketAddress; +import java.net.URI; +import java.util.List; + +import javax.security.auth.login.LoginException; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdfs.DistributedFileSystem; +import org.apache.hadoop.hdfs.DistributedFileSystem.DiskStatus; +import org.apache.hadoop.hdfs.protocol.DatanodeInfo; +import org.apache.hadoop.hdfs.protocol.FSConstants; +import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType; +import org.apache.hadoop.hdfs.protocol.FSConstants.UpgradeAction; +import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport; +import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; +import org.apache.hadoop.hdfs.server.namenode.NameNode; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.FsShell; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.shell.Command; +import org.apache.hadoop.fs.shell.CommandFormat; +import org.apache.hadoop.ipc.RPC; +import org.apache.hadoop.ipc.RemoteException; +import org.apache.hadoop.net.NetUtils; +import org.apache.hadoop.security.UnixUserGroupInformation; +import org.apache.hadoop.security.authorize.RefreshAuthorizationPolicyProtocol; +import org.apache.hadoop.util.StringUtils; +import org.apache.hadoop.util.ToolRunner; + +/** + * This class provides some DFS administrative access. + */ +public class DFSAdmin extends FsShell { + + /** + * An abstract class for the execution of a file system command + */ + abstract private static class DFSAdminCommand extends Command { + final DistributedFileSystem dfs; + /** Constructor */ + public DFSAdminCommand(FileSystem fs) { + super(fs.getConf()); + if (!(fs instanceof DistributedFileSystem)) { + throw new IllegalArgumentException("FileSystem " + fs.getUri() + + " is not a distributed file system"); + } + this.dfs = (DistributedFileSystem)fs; + } + } + + /** A class that supports command clearQuota */ + private static class ClearQuotaCommand extends DFSAdminCommand { + private static final String NAME = "clrQuota"; + private static final String USAGE = "-"+NAME+" ..."; + private static final String DESCRIPTION = USAGE + ": " + + "Clear the quota for each directory .\n" + + "\t\tBest effort for the directory. with fault reported if\n" + + "\t\t1. the directory does not exist or is a file, or\n" + + "\t\t2. user is not an administrator.\n" + + "\t\tIt does not fault if the directory has no quota."; + + /** Constructor */ + ClearQuotaCommand(String[] args, int pos, FileSystem fs) { + super(fs); + CommandFormat c = new CommandFormat(NAME, 1, Integer.MAX_VALUE); + List parameters = c.parse(args, pos); + this.args = parameters.toArray(new String[parameters.size()]); + } + + /** Check if a command is the clrQuota command + * + * @param cmd A string representation of a command starting with "-" + * @return true if this is a clrQuota command; false otherwise + */ + public static boolean matches(String cmd) { + return ("-"+NAME).equals(cmd); + } + + @Override + public String getCommandName() { + return NAME; + } + + @Override + public void run(Path path) throws IOException { + dfs.setQuota(path, FSConstants.QUOTA_RESET, FSConstants.QUOTA_DONT_SET); + } + } + + /** A class that supports command setQuota */ + private static class SetQuotaCommand extends DFSAdminCommand { + private static final String NAME = "setQuota"; + private static final String USAGE = + "-"+NAME+" ..."; + private static final String DESCRIPTION = + "-setQuota ...: " + + "Set the quota for each directory .\n" + + "\t\tThe directory quota is a long integer that puts a hard limit\n" + + "\t\ton the number of names in the directory tree\n" + + "\t\tBest effort for the directory, with faults reported if\n" + + "\t\t1. N is not a positive integer, or\n" + + "\t\t2. user is not an administrator, or\n" + + "\t\t3. the directory does not exist or is a file, or\n"; + + private final long quota; // the quota to be set + + /** Constructor */ + SetQuotaCommand(String[] args, int pos, FileSystem fs) { + super(fs); + CommandFormat c = new CommandFormat(NAME, 2, Integer.MAX_VALUE); + List parameters = c.parse(args, pos); + this.quota = Long.parseLong(parameters.remove(0)); + this.args = parameters.toArray(new String[parameters.size()]); + } + + /** Check if a command is the setQuota command + * + * @param cmd A string representation of a command starting with "-" + * @return true if this is a count command; false otherwise + */ + public static boolean matches(String cmd) { + return ("-"+NAME).equals(cmd); + } + + @Override + public String getCommandName() { + return NAME; + } + + @Override + public void run(Path path) throws IOException { + dfs.setQuota(path, quota, FSConstants.QUOTA_DONT_SET); + } + } + + /** A class that supports command clearSpaceQuota */ + private static class ClearSpaceQuotaCommand extends DFSAdminCommand { + private static final String NAME = "clrSpaceQuota"; + private static final String USAGE = "-"+NAME+" ..."; + private static final String DESCRIPTION = USAGE + ": " + + "Clear the disk space quota for each directory .\n" + + "\t\tBest effort for the directory. with fault reported if\n" + + "\t\t1. the directory does not exist or is a file, or\n" + + "\t\t2. user is not an administrator.\n" + + "\t\tIt does not fault if the directory has no quota."; + + /** Constructor */ + ClearSpaceQuotaCommand(String[] args, int pos, FileSystem fs) { + super(fs); + CommandFormat c = new CommandFormat(NAME, 1, Integer.MAX_VALUE); + List parameters = c.parse(args, pos); + this.args = parameters.toArray(new String[parameters.size()]); + } + + /** Check if a command is the clrQuota command + * + * @param cmd A string representation of a command starting with "-" + * @return true if this is a clrQuota command; false otherwise + */ + public static boolean matches(String cmd) { + return ("-"+NAME).equals(cmd); + } + + @Override + public String getCommandName() { + return NAME; + } + + @Override + public void run(Path path) throws IOException { + dfs.setQuota(path, FSConstants.QUOTA_DONT_SET, FSConstants.QUOTA_RESET); + } + } + + /** A class that supports command setQuota */ + private static class SetSpaceQuotaCommand extends DFSAdminCommand { + private static final String NAME = "setSpaceQuota"; + private static final String USAGE = + "-"+NAME+" ..."; + private static final String DESCRIPTION = USAGE + ": " + + "Set the disk space quota for each directory .\n" + + "\t\tThe space quota is a long integer that puts a hard limit\n" + + "\t\ton the total size of all the files under the directory tree.\n" + + "\t\tThe extra space required for replication is also counted. E.g.\n" + + "\t\ta 1GB file with replication of 3 consumes 3GB of the quota.\n\n" + + "\t\tQuota can also be speciefied with a binary prefix for terabytes,\n" + + "\t\tpetabytes etc (e.g. 50t is 50TB, 5m is 5MB, 3p is 3PB).\n" + + "\t\tBest effort for the directory, with faults reported if\n" + + "\t\t1. N is not a positive integer, or\n" + + "\t\t2. user is not an administrator, or\n" + + "\t\t3. the directory does not exist or is a file, or\n"; + + private long quota; // the quota to be set + + /** Constructor */ + SetSpaceQuotaCommand(String[] args, int pos, FileSystem fs) { + super(fs); + CommandFormat c = new CommandFormat(NAME, 2, Integer.MAX_VALUE); + List parameters = c.parse(args, pos); + String str = parameters.remove(0).trim(); + quota = StringUtils.TraditionalBinaryPrefix.string2long(str); + this.args = parameters.toArray(new String[parameters.size()]); + } + + /** Check if a command is the setQuota command + * + * @param cmd A string representation of a command starting with "-" + * @return true if this is a count command; false otherwise + */ + public static boolean matches(String cmd) { + return ("-"+NAME).equals(cmd); + } + + @Override + public String getCommandName() { + return NAME; + } + + @Override + public void run(Path path) throws IOException { + dfs.setQuota(path, FSConstants.QUOTA_DONT_SET, quota); + } + } + + /** + * Construct a DFSAdmin object. + */ + public DFSAdmin() { + this(null); + } + + /** + * Construct a DFSAdmin object. + */ + public DFSAdmin(Configuration conf) { + super(conf); + } + + /** + * Gives a report on how the FileSystem is doing. + * @exception IOException if the filesystem does not exist. + */ + public void report() throws IOException { + if (fs instanceof DistributedFileSystem) { + DistributedFileSystem dfs = (DistributedFileSystem) fs; + DiskStatus ds = dfs.getDiskStatus(); + long capacity = ds.getCapacity(); + long used = ds.getDfsUsed(); + long remaining = ds.getRemaining(); + long presentCapacity = used + remaining; + boolean mode = dfs.setSafeMode(FSConstants.SafeModeAction.SAFEMODE_GET); + UpgradeStatusReport status = + dfs.distributedUpgradeProgress(UpgradeAction.GET_STATUS); + + if (mode) { + System.out.println("Safe mode is ON"); + } + if (status != null) { + System.out.println(status.getStatusText(false)); + } + System.out.println("Configured Capacity: " + capacity + + " (" + StringUtils.byteDesc(capacity) + ")"); + System.out.println("Present Capacity: " + presentCapacity + + " (" + StringUtils.byteDesc(presentCapacity) + ")"); + System.out.println("DFS Remaining: " + remaining + + " (" + StringUtils.byteDesc(remaining) + ")"); + System.out.println("DFS Used: " + used + + " (" + StringUtils.byteDesc(used) + ")"); + System.out.println("DFS Used%: " + + StringUtils.limitDecimalTo2(((1.0 * used) / presentCapacity) * 100) + + "%"); + + /* These counts are not always upto date. They are updated after + * iteration of an internal list. Should be updated in a few seconds to + * minutes. Use "-metaSave" to list of all such blocks and accurate + * counts. + */ + System.out.println("Under replicated blocks: " + + dfs.getUnderReplicatedBlocksCount()); + System.out.println("Blocks with corrupt replicas: " + + dfs.getCorruptBlocksCount()); + System.out.println("Missing blocks: " + + dfs.getMissingBlocksCount()); + + System.out.println(); + + System.out.println("-------------------------------------------------"); + + DatanodeInfo[] live = dfs.getClient().datanodeReport( + DatanodeReportType.LIVE); + DatanodeInfo[] dead = dfs.getClient().datanodeReport( + DatanodeReportType.DEAD); + System.out.println("Datanodes available: " + live.length + + " (" + (live.length + dead.length) + " total, " + + dead.length + " dead)\n"); + + for (DatanodeInfo dn : live) { + System.out.println(dn.getDatanodeReport()); + System.out.println(); + } + for (DatanodeInfo dn : dead) { + System.out.println(dn.getDatanodeReport()); + System.out.println(); + } + } + } + + /** + * Safe mode maintenance command. + * Usage: java DFSAdmin -safemode [enter | leave | get] + * @param argv List of of command line parameters. + * @param idx The index of the command that is being processed. + * @exception IOException if the filesystem does not exist. + */ + public void setSafeMode(String[] argv, int idx) throws IOException { + if (!(fs instanceof DistributedFileSystem)) { + System.err.println("FileSystem is " + fs.getUri()); + return; + } + if (idx != argv.length - 1) { + printUsage("-safemode"); + return; + } + FSConstants.SafeModeAction action; + Boolean waitExitSafe = false; + + if ("leave".equalsIgnoreCase(argv[idx])) { + action = FSConstants.SafeModeAction.SAFEMODE_LEAVE; + } else if ("enter".equalsIgnoreCase(argv[idx])) { + action = FSConstants.SafeModeAction.SAFEMODE_ENTER; + } else if ("get".equalsIgnoreCase(argv[idx])) { + action = FSConstants.SafeModeAction.SAFEMODE_GET; + } else if ("wait".equalsIgnoreCase(argv[idx])) { + action = FSConstants.SafeModeAction.SAFEMODE_GET; + waitExitSafe = true; + } else { + printUsage("-safemode"); + return; + } + DistributedFileSystem dfs = (DistributedFileSystem) fs; + boolean inSafeMode = dfs.setSafeMode(action); + + // + // If we are waiting for safemode to exit, then poll and + // sleep till we are out of safemode. + // + if (waitExitSafe) { + while (inSafeMode) { + try { + Thread.sleep(5000); + } catch (java.lang.InterruptedException e) { + throw new IOException("Wait Interrupted"); + } + inSafeMode = dfs.setSafeMode(action); + } + } + + System.out.println("Safe mode is " + (inSafeMode ? "ON" : "OFF")); + } + + /** + * Command to ask the namenode to save the namespace. + * Usage: java DFSAdmin -saveNamespace + * @exception IOException + * @see org.apache.hadoop.hdfs.protocol.ClientProtocol#saveNamespace() + */ + public int saveNamespace() throws IOException { + int exitCode = -1; + + if (!(fs instanceof DistributedFileSystem)) { + System.err.println("FileSystem is " + fs.getUri()); + return exitCode; + } + + DistributedFileSystem dfs = (DistributedFileSystem) fs; + dfs.saveNamespace(); + exitCode = 0; + + return exitCode; + } + + /** + * Command to ask the namenode to reread the hosts and excluded hosts + * file. + * Usage: java DFSAdmin -refreshNodes + * @exception IOException + */ + public int refreshNodes() throws IOException { + int exitCode = -1; + + if (!(fs instanceof DistributedFileSystem)) { + System.err.println("FileSystem is " + fs.getUri()); + return exitCode; + } + + DistributedFileSystem dfs = (DistributedFileSystem) fs; + dfs.refreshNodes(); + exitCode = 0; + + return exitCode; + } + + private void printHelp(String cmd) { + String summary = "hadoop dfsadmin is the command to execute DFS administrative commands.\n" + + "The full syntax is: \n\n" + + "hadoop dfsadmin [-report] [-safemode ]\n" + + "\t[-saveNamespace]\n" + + "\t[-refreshNodes]\n" + + "\t[" + SetQuotaCommand.USAGE + "]\n" + + "\t[" + ClearQuotaCommand.USAGE +"]\n" + + "\t[" + SetSpaceQuotaCommand.USAGE + "]\n" + + "\t[" + ClearSpaceQuotaCommand.USAGE +"]\n" + + "\t[-refreshServiceAcl]\n" + + "\t[-help [cmd]]\n"; + + String report ="-report: \tReports basic filesystem information and statistics.\n"; + + String safemode = "-safemode : Safe mode maintenance command.\n" + + "\t\tSafe mode is a Namenode state in which it\n" + + "\t\t\t1. does not accept changes to the name space (read-only)\n" + + "\t\t\t2. does not replicate or delete blocks.\n" + + "\t\tSafe mode is entered automatically at Namenode startup, and\n" + + "\t\tleaves safe mode automatically when the configured minimum\n" + + "\t\tpercentage of blocks satisfies the minimum replication\n" + + "\t\tcondition. Safe mode can also be entered manually, but then\n" + + "\t\tit can only be turned off manually as well.\n"; + + String saveNamespace = "-saveNamespace:\t" + + "Save current namespace into storage directories and reset edits log.\n" + + "\t\tRequires superuser permissions and safe mode.\n"; + + String refreshNodes = "-refreshNodes: \tUpdates the set of hosts allowed " + + "to connect to namenode.\n\n" + + "\t\tRe-reads the config file to update values defined by \n" + + "\t\tdfs.hosts and dfs.host.exclude and reads the \n" + + "\t\tentires (hostnames) in those files.\n\n" + + "\t\tEach entry not defined in dfs.hosts but in \n" + + "\t\tdfs.hosts.exclude is decommissioned. Each entry defined \n" + + "\t\tin dfs.hosts and also in dfs.host.exclude is stopped from \n" + + "\t\tdecommissioning if it has aleady been marked for decommission.\n" + + "\t\tEntires not present in both the lists are decommissioned.\n"; + + String finalizeUpgrade = "-finalizeUpgrade: Finalize upgrade of HDFS.\n" + + "\t\tDatanodes delete their previous version working directories,\n" + + "\t\tfollowed by Namenode doing the same.\n" + + "\t\tThis completes the upgrade process.\n"; + + String upgradeProgress = "-upgradeProgress : \n" + + "\t\trequest current distributed upgrade status, \n" + + "\t\ta detailed status or force the upgrade to proceed.\n"; + + String metaSave = "-metasave : \tSave Namenode's primary data structures\n" + + "\t\tto in the directory specified by hadoop.log.dir property.\n" + + "\t\t will contain one line for each of the following\n" + + "\t\t\t1. Datanodes heart beating with Namenode\n" + + "\t\t\t2. Blocks waiting to be replicated\n" + + "\t\t\t3. Blocks currrently being replicated\n" + + "\t\t\t4. Blocks waiting to be deleted\n"; + + String refreshServiceAcl = "-refreshServiceAcl: Reload the service-level authorization policy file\n" + + "\t\tNamenode will reload the authorization policy file.\n"; + + String help = "-help [cmd]: \tDisplays help for the given command or all commands if none\n" + + "\t\tis specified.\n"; + + if ("report".equals(cmd)) { + System.out.println(report); + } else if ("safemode".equals(cmd)) { + System.out.println(safemode); + } else if ("saveNamespace".equals(cmd)) { + System.out.println(saveNamespace); + } else if ("refreshNodes".equals(cmd)) { + System.out.println(refreshNodes); + } else if ("finalizeUpgrade".equals(cmd)) { + System.out.println(finalizeUpgrade); + } else if ("upgradeProgress".equals(cmd)) { + System.out.println(upgradeProgress); + } else if ("metasave".equals(cmd)) { + System.out.println(metaSave); + } else if (SetQuotaCommand.matches("-"+cmd)) { + System.out.println(SetQuotaCommand.DESCRIPTION); + } else if (ClearQuotaCommand.matches("-"+cmd)) { + System.out.println(ClearQuotaCommand.DESCRIPTION); + } else if (SetSpaceQuotaCommand.matches("-"+cmd)) { + System.out.println(SetSpaceQuotaCommand.DESCRIPTION); + } else if (ClearSpaceQuotaCommand.matches("-"+cmd)) { + System.out.println(ClearSpaceQuotaCommand.DESCRIPTION); + } else if ("refreshServiceAcl".equals(cmd)) { + System.out.println(refreshServiceAcl); + } else if ("help".equals(cmd)) { + System.out.println(help); + } else { + System.out.println(summary); + System.out.println(report); + System.out.println(safemode); + System.out.println(saveNamespace); + System.out.println(refreshNodes); + System.out.println(finalizeUpgrade); + System.out.println(upgradeProgress); + System.out.println(metaSave); + System.out.println(SetQuotaCommand.DESCRIPTION); + System.out.println(ClearQuotaCommand.DESCRIPTION); + System.out.println(SetSpaceQuotaCommand.DESCRIPTION); + System.out.println(ClearSpaceQuotaCommand.DESCRIPTION); + System.out.println(refreshServiceAcl); + System.out.println(help); + System.out.println(); + ToolRunner.printGenericCommandUsage(System.out); + } + + } + + + /** + * Command to ask the namenode to finalize previously performed upgrade. + * Usage: java DFSAdmin -finalizeUpgrade + * @exception IOException + */ + public int finalizeUpgrade() throws IOException { + int exitCode = -1; + + if (!(fs instanceof DistributedFileSystem)) { + System.out.println("FileSystem is " + fs.getUri()); + return exitCode; + } + + DistributedFileSystem dfs = (DistributedFileSystem) fs; + dfs.finalizeUpgrade(); + exitCode = 0; + + return exitCode; + } + + /** + * Command to request current distributed upgrade status, + * a detailed status, or to force the upgrade to proceed. + * + * Usage: java DFSAdmin -upgradeProgress [status | details | force] + * @exception IOException + */ + public int upgradeProgress(String[] argv, int idx) throws IOException { + if (!(fs instanceof DistributedFileSystem)) { + System.out.println("FileSystem is " + fs.getUri()); + return -1; + } + if (idx != argv.length - 1) { + printUsage("-upgradeProgress"); + return -1; + } + + UpgradeAction action; + if ("status".equalsIgnoreCase(argv[idx])) { + action = UpgradeAction.GET_STATUS; + } else if ("details".equalsIgnoreCase(argv[idx])) { + action = UpgradeAction.DETAILED_STATUS; + } else if ("force".equalsIgnoreCase(argv[idx])) { + action = UpgradeAction.FORCE_PROCEED; + } else { + printUsage("-upgradeProgress"); + return -1; + } + + DistributedFileSystem dfs = (DistributedFileSystem) fs; + UpgradeStatusReport status = dfs.distributedUpgradeProgress(action); + String statusText = (status == null ? + "There are no upgrades in progress." : + status.getStatusText(action == UpgradeAction.DETAILED_STATUS)); + System.out.println(statusText); + return 0; + } + + /** + * Dumps DFS data structures into specified file. + * Usage: java DFSAdmin -metasave filename + * @param argv List of of command line parameters. + * @param idx The index of the command that is being processed. + * @exception IOException if an error accoured wile accessing + * the file or path. + */ + public int metaSave(String[] argv, int idx) throws IOException { + String pathname = argv[idx]; + DistributedFileSystem dfs = (DistributedFileSystem) fs; + dfs.metaSave(pathname); + System.out.println("Created file " + pathname + " on server " + + dfs.getUri()); + return 0; + } + + private static UnixUserGroupInformation getUGI(Configuration conf) + throws IOException { + UnixUserGroupInformation ugi = null; + try { + ugi = UnixUserGroupInformation.login(conf, true); + } catch (LoginException e) { + throw (IOException)(new IOException( + "Failed to get the current user's information.").initCause(e)); + } + return ugi; + } + + /** + * Refresh the authorization policy on the {@link NameNode}. + * @return exitcode 0 on success, non-zero on failure + * @throws IOException + */ + public int refreshServiceAcl() throws IOException { + // Get the current configuration + Configuration conf = getConf(); + + // Create the client + RefreshAuthorizationPolicyProtocol refreshProtocol = + (RefreshAuthorizationPolicyProtocol) + RPC.getProxy(RefreshAuthorizationPolicyProtocol.class, + RefreshAuthorizationPolicyProtocol.versionID, + NameNode.getAddress(conf), getUGI(conf), conf, + NetUtils.getSocketFactory(conf, + RefreshAuthorizationPolicyProtocol.class)); + + // Refresh the authorization policy in-effect + refreshProtocol.refreshServiceAcl(); + + return 0; + } + + /** + * Displays format of commands. + * @param cmd The command that is being executed. + */ + private static void printUsage(String cmd) { + if ("-report".equals(cmd)) { + System.err.println("Usage: java DFSAdmin" + + " [-report]"); + } else if ("-safemode".equals(cmd)) { + System.err.println("Usage: java DFSAdmin" + + " [-safemode enter | leave | get | wait]"); + } else if ("-saveNamespace".equals(cmd)) { + System.err.println("Usage: java DFSAdmin" + + " [-saveNamespace]"); + } else if ("-refreshNodes".equals(cmd)) { + System.err.println("Usage: java DFSAdmin" + + " [-refreshNodes]"); + } else if ("-finalizeUpgrade".equals(cmd)) { + System.err.println("Usage: java DFSAdmin" + + " [-finalizeUpgrade]"); + } else if ("-upgradeProgress".equals(cmd)) { + System.err.println("Usage: java DFSAdmin" + + " [-upgradeProgress status | details | force]"); + } else if ("-metasave".equals(cmd)) { + System.err.println("Usage: java DFSAdmin" + + " [-metasave filename]"); + } else if (SetQuotaCommand.matches(cmd)) { + System.err.println("Usage: java DFSAdmin" + + " [" + SetQuotaCommand.USAGE+"]"); + } else if (ClearQuotaCommand.matches(cmd)) { + System.err.println("Usage: java DFSAdmin" + + " ["+ClearQuotaCommand.USAGE+"]"); + } else if (SetSpaceQuotaCommand.matches(cmd)) { + System.err.println("Usage: java DFSAdmin" + + " [" + SetSpaceQuotaCommand.USAGE+"]"); + } else if (ClearSpaceQuotaCommand.matches(cmd)) { + System.err.println("Usage: java DFSAdmin" + + " ["+ClearSpaceQuotaCommand.USAGE+"]"); + } else if ("-refreshServiceAcl".equals(cmd)) { + System.err.println("Usage: java DFSAdmin" + + " [-refreshServiceAcl]"); + } else { + System.err.println("Usage: java DFSAdmin"); + System.err.println(" [-report]"); + System.err.println(" [-safemode enter | leave | get | wait]"); + System.err.println(" [-saveNamespace]"); + System.err.println(" [-refreshNodes]"); + System.err.println(" [-finalizeUpgrade]"); + System.err.println(" [-upgradeProgress status | details | force]"); + System.err.println(" [-metasave filename]"); + System.err.println(" [-refreshServiceAcl]"); + System.err.println(" ["+SetQuotaCommand.USAGE+"]"); + System.err.println(" ["+ClearQuotaCommand.USAGE+"]"); + System.err.println(" ["+SetSpaceQuotaCommand.USAGE+"]"); + System.err.println(" ["+ClearSpaceQuotaCommand.USAGE+"]"); + System.err.println(" [-help [cmd]]"); + System.err.println(); + ToolRunner.printGenericCommandUsage(System.err); + } + } + + /** + * @param argv The parameters passed to this program. + * @exception Exception if the filesystem does not exist. + * @return 0 on success, non zero on error. + */ + @Override + public int run(String[] argv) throws Exception { + + if (argv.length < 1) { + printUsage(""); + return -1; + } + + int exitCode = -1; + int i = 0; + String cmd = argv[i++]; + + // + // verify that we have enough command line parameters + // + if ("-safemode".equals(cmd)) { + if (argv.length != 2) { + printUsage(cmd); + return exitCode; + } + } else if ("-report".equals(cmd)) { + if (argv.length != 1) { + printUsage(cmd); + return exitCode; + } + } else if ("-saveNamespace".equals(cmd)) { + if (argv.length != 1) { + printUsage(cmd); + return exitCode; + } + } else if ("-refreshNodes".equals(cmd)) { + if (argv.length != 1) { + printUsage(cmd); + return exitCode; + } + } else if ("-finalizeUpgrade".equals(cmd)) { + if (argv.length != 1) { + printUsage(cmd); + return exitCode; + } + } else if ("-upgradeProgress".equals(cmd)) { + if (argv.length != 2) { + printUsage(cmd); + return exitCode; + } + } else if ("-metasave".equals(cmd)) { + if (argv.length != 2) { + printUsage(cmd); + return exitCode; + } + } else if ("-refreshServiceAcl".equals(cmd)) { + if (argv.length != 1) { + printUsage(cmd); + return exitCode; + } + } + + // initialize DFSAdmin + try { + // Substitute client address with dnprotocol address if it is configured + InetSocketAddress address = NameNode.getDNProtocolAddress(getConf()); + if (address != null) { + int dnPort = address.getPort(); + URI fileSystem = FileSystem.getDefaultUri(getConf()); + // Completely rebuilding filesystem URI with a new port + URI dnProtocolURI = new URI(fileSystem.getScheme(), fileSystem + .getUserInfo(), fileSystem.getHost(), dnPort, fileSystem.getPath(), + fileSystem.getQuery(), fileSystem.getFragment()); + FileSystem.setDefaultUri(getConf(), dnProtocolURI); + } + init(); + } catch (RPC.VersionMismatch v) { + System.err.println("Version Mismatch between client and server" + + "... command aborted."); + return exitCode; + } catch (IOException e) { + System.err.println(e.getMessage()); + System.err.println("Bad connection to DFS... command aborted."); + return exitCode; + } + + exitCode = 0; + try { + if ("-report".equals(cmd)) { + report(); + } else if ("-safemode".equals(cmd)) { + setSafeMode(argv, i); + } else if ("-saveNamespace".equals(cmd)) { + exitCode = saveNamespace(); + } else if ("-refreshNodes".equals(cmd)) { + exitCode = refreshNodes(); + } else if ("-finalizeUpgrade".equals(cmd)) { + exitCode = finalizeUpgrade(); + } else if ("-upgradeProgress".equals(cmd)) { + exitCode = upgradeProgress(argv, i); + } else if ("-metasave".equals(cmd)) { + exitCode = metaSave(argv, i); + } else if (ClearQuotaCommand.matches(cmd)) { + exitCode = new ClearQuotaCommand(argv, i, fs).runAll(); + } else if (SetQuotaCommand.matches(cmd)) { + exitCode = new SetQuotaCommand(argv, i, fs).runAll(); + } else if (ClearSpaceQuotaCommand.matches(cmd)) { + exitCode = new ClearSpaceQuotaCommand(argv, i, fs).runAll(); + } else if (SetSpaceQuotaCommand.matches(cmd)) { + exitCode = new SetSpaceQuotaCommand(argv, i, fs).runAll(); + } else if ("-refreshServiceAcl".equals(cmd)) { + exitCode = refreshServiceAcl(); + } else if ("-help".equals(cmd)) { + if (i < argv.length) { + printHelp(argv[i]); + } else { + printHelp(""); + } + } else { + exitCode = -1; + System.err.println(cmd.substring(1) + ": Unknown command"); + printUsage(""); + } + } catch (IllegalArgumentException arge) { + exitCode = -1; + System.err.println(cmd.substring(1) + ": " + arge.getLocalizedMessage()); + printUsage(cmd); + } catch (RemoteException e) { + // + // This is a error returned by hadoop server. Print + // out the first line of the error mesage, ignore the stack trace. + exitCode = -1; + try { + String[] content; + content = e.getLocalizedMessage().split("\n"); + System.err.println(cmd.substring(1) + ": " + + content[0]); + } catch (Exception ex) { + System.err.println(cmd.substring(1) + ": " + + ex.getLocalizedMessage()); + } + } catch (Exception e) { + exitCode = -1; + System.err.println(cmd.substring(1) + ": " + + e.getLocalizedMessage()); + } + return exitCode; + } + + /** + * main() has some simple utility methods. + * @param argv Command line parameters. + * @exception Exception if the filesystem does not exist. + */ + public static void main(String[] argv) throws Exception { + int res = ToolRunner.run(new DFSAdmin(), argv); + System.exit(res); + } +} diff --git a/src/hdfs/org/apache/hadoop/hdfs/tools/DFSck.java b/src/hdfs/org/apache/hadoop/hdfs/tools/DFSck.java new file mode 100644 index 0000000..ea2a232 --- /dev/null +++ b/src/hdfs/org/apache/hadoop/hdfs/tools/DFSck.java @@ -0,0 +1,248 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.tools; + +import java.io.IOException; +import java.io.InputStream; +import java.io.InputStreamReader; +import java.io.PrintStream; +import java.io.BufferedReader; +import java.net.URL; +import java.net.URLConnection; +import java.net.URLEncoder; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.conf.Configured; +import org.apache.hadoop.hdfs.server.namenode.NamenodeFsck; +import org.apache.hadoop.net.NetUtils; +import org.apache.hadoop.util.Tool; +import org.apache.hadoop.util.ToolRunner; +import org.apache.hadoop.security.SecurityUtil; + +/** + * This class provides rudimentary checking of DFS volumes for errors and + * sub-optimal conditions. + *

The tool scans all files and directories, starting from an indicated + * root path. The following abnormal conditions are detected and handled:

+ *
    + *
  • files with blocks that are completely missing from all datanodes.
    + * In this case the tool can perform one of the following actions: + *
      + *
    • none ({@link org.apache.hadoop.hdfs.server.namenode.NamenodeFsck#FIXING_NONE})
    • + *
    • move corrupted files to /lost+found directory on DFS + * ({@link org.apache.hadoop.hdfs.server.namenode.NamenodeFsck#FIXING_MOVE}). Remaining data blocks are saved as a + * block chains, representing longest consecutive series of valid blocks.
    • + *
    • delete corrupted files ({@link org.apache.hadoop.hdfs.server.namenode.NamenodeFsck#FIXING_DELETE})
    • + *
    + *
  • + *
  • detect files with under-replicated or over-replicated blocks
  • + *
+ * Additionally, the tool collects a detailed overall DFS statistics, and + * optionally can print detailed statistics on block locations and replication + * factors of each file. + * The tool also provides and option to filter open files during the scan. + * + */ +public class DFSck extends Configured implements Tool { + + DFSck() { + this.out = System.out; + } + + private final PrintStream out; + + /** + * Filesystem checker. + * @param conf current Configuration + * @throws Exception + */ + public DFSck(Configuration conf) throws Exception { + this(conf, System.out); + } + + public DFSck(Configuration conf, PrintStream out) throws IOException { + super(conf); + this.out = out; + } + + + private String getInfoServer() throws IOException { + return NetUtils.getServerAddress(getConf(), "dfs.info.bindAddress", + "dfs.info.port", "dfs.http.address"); + } + + /** + * Print fsck usage information + */ + static void printUsage() { + System.err.println("Usage: DFSck [-list-corruptfileblocks | " + + "[-move | -delete | -openforwrite ] " + + "[-files [-blocks [-locations | -racks]]]] "); + System.err.println("\t\tstart checking from this path"); + System.err.println("\t-move\tmove corrupted files to /lost+found"); + System.err.println("\t-delete\tdelete corrupted files"); + System.err.println("\t-files\tprint out files being checked"); + System.err.println("\t-openforwrite\tprint out files opened for write"); + System.err.println("\t-list-corruptfileblocks\tprint out list of missing " + + "blocks and files they belong to"); + System.err.println("\t-blocks\tprint out block report"); + System.err.println("\t-locations\tprint out locations for every block"); + System.err.println("\t-racks\tprint out network topology for data-node locations"); + System.err.println("\t\tBy default fsck ignores files opened for write, " + + "use -openforwrite to report such files. They are usually " + + " tagged CORRUPT or HEALTHY depending on their block " + + "allocation status"); + ToolRunner.printGenericCommandUsage(System.err); + } + + /** + * To get the list, we need to call iteratively until the server says + * there is no more left. + */ + private Integer listCorruptFileBlocks(String dir, String baseUrl) + throws IOException { + int errCode = -1; + int numCorrupt = 0; + String lastBlock = null; + final String noCorruptLine = "has no CORRUPT files"; + final String noMoreCorruptLine = "has no more CORRUPT files"; + boolean allDone = false; + while (!allDone) { + final StringBuffer url = new StringBuffer(baseUrl); + if (lastBlock != null) { + url.append("&startblockafter=").append(lastBlock); + } + URL path = new URL(url.toString()); + // SecurityUtil.fetchServiceTicket(path); + URLConnection connection = path.openConnection(); + InputStream stream = connection.getInputStream(); + BufferedReader input = new BufferedReader(new InputStreamReader(stream, + "UTF-8")); + try { + String line = null; + while ((line = input.readLine()) != null) { + if ((line.endsWith(noCorruptLine)) || + (line.endsWith(noMoreCorruptLine)) || + (line.endsWith(NamenodeFsck.NONEXISTENT_STATUS))) { + allDone = true; + break; + } + if ((line.isEmpty()) + || (line.startsWith("FSCK started by")) + || (line.startsWith("The filesystem under path"))) + continue; + numCorrupt++; + if (numCorrupt == 1) { + out.println("The list of corrupt files under path '" + + dir + "' are:"); + } + out.println(line); + try { + // Get the block # that we need to send in next call + lastBlock = line.split("\t")[0]; + } catch (Exception e) { + allDone = true; + break; + } + } + } finally { + input.close(); + } + } + out.println("The filesystem under path '" + dir + "' has " + + numCorrupt + " CORRUPT files"); + if (numCorrupt == 0) + errCode = 0; + return errCode; + } + + /** + * @param args + */ + public int run(String[] args) throws Exception { + String fsName = getInfoServer(); + if (args.length == 0) { + printUsage(); + return -1; + } + StringBuffer url = new StringBuffer("http://"+fsName+"/fsck?path="); + String dir = "/"; + // find top-level dir first + for (int idx = 0; idx < args.length; idx++) { + if (!args[idx].startsWith("-")) { dir = args[idx]; break; } + } + url.append(URLEncoder.encode(dir, "UTF-8")); + boolean doListCorruptFileBlocks = false; + for (int idx = 0; idx < args.length; idx++) { + if (args[idx].equals("-move")) { url.append("&move=1"); } + else if (args[idx].equals("-delete")) { url.append("&delete=1"); } + else if (args[idx].equals("-files")) { url.append("&files=1"); } + else if (args[idx].equals("-openforwrite")) { url.append("&openforwrite=1"); } + else if (args[idx].equals("-blocks")) { url.append("&blocks=1"); } + else if (args[idx].equals("-locations")) { url.append("&locations=1"); } + else if (args[idx].equals("-racks")) { url.append("&racks=1"); } + else if (args[idx].equals("-list-corruptfileblocks")) { + url.append("&listcorruptfileblocks=1"); + doListCorruptFileBlocks = true; + } + } + if (doListCorruptFileBlocks) { + return listCorruptFileBlocks(dir, url.toString()); + } + URL path = new URL(url.toString()); + URLConnection connection = path.openConnection(); + InputStream stream = connection.getInputStream(); + BufferedReader input = new BufferedReader(new InputStreamReader( + stream, "UTF-8")); + String line = null; + String lastLine = null; + int errCode = -1; + try { + while ((line = input.readLine()) != null) { + out.println(line); + lastLine = line; + } + } finally { + input.close(); + } + if (lastLine.endsWith(NamenodeFsck.HEALTHY_STATUS)) { + errCode = 0; + } else if (lastLine.endsWith(NamenodeFsck.CORRUPT_STATUS)) { + errCode = 1; + } else if (lastLine.endsWith(NamenodeFsck.NONEXISTENT_STATUS)) { + errCode = 0; + } + return errCode; + } + + static{ + Configuration.addDefaultResource("hdfs-default.xml"); + Configuration.addDefaultResource("hdfs-site.xml"); + } + + public static void main(String[] args) throws Exception { + // -files option is also used by GenericOptionsParser + // Make sure that is not the first argument for fsck + int res = -1; + if ((args.length == 0 ) || ("-files".equals(args[0]))) + printUsage(); + else + res = ToolRunner.run(new DFSck(new Configuration()), args); + System.exit(res); + } +} diff --git a/src/hdfs/org/apache/hadoop/hdfs/tools/HDFSConcat.java b/src/hdfs/org/apache/hadoop/hdfs/tools/HDFSConcat.java new file mode 100644 index 0000000..0c5f528 --- /dev/null +++ b/src/hdfs/org/apache/hadoop/hdfs/tools/HDFSConcat.java @@ -0,0 +1,54 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.tools; + + +import java.io.IOException; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdfs.DistributedFileSystem; + + +public class HDFSConcat { + private final static String def_uri = "hdfs://localhost:9000"; + /** + * @param args + */ + public static void main(String... args) throws IOException { + + if(args.length < 2) { + System.err.println("Usage HDFSConcat target srcs.."); + System.exit(0); + } + + Configuration conf = new Configuration(); + String uri = conf.get("fs.default.name", def_uri); + Path path = new Path(uri); + DistributedFileSystem dfs = + (DistributedFileSystem)FileSystem.get(path.toUri(), conf); + + Path [] srcs = new Path[args.length-1]; + for(int i=1; i hadoopObjectNames; + private MBeanServerConnection mbsc; + private String service = "NameNode", port ="", server="localhost"; + private String localVMPid = null; + + public JMXGet() { + } + + public void setService(String service) { + this.service = service; + } + + public void setPort(String port) { + this.port = port; + } + + public void setServer(String server) { + this.server = server; + } + + public void setLocalVMPid(String pid) { + this.localVMPid = pid; + } + + /** + * print all attributes' values + */ + public void printAllValues() throws Exception { + err("List of all the available keys:"); + + Object val = null; + + for (ObjectName oname: hadoopObjectNames) { + err(">>>>>>>>jmx name: " + oname.getCanonicalKeyPropertyListString()); + MBeanInfo mbinfo = mbsc.getMBeanInfo(oname); + MBeanAttributeInfo [] mbinfos = mbinfo.getAttributes(); + + for (MBeanAttributeInfo mb: mbinfos) { + val = mbsc.getAttribute(oname, mb.getName()); + System.out.format(format,mb.getName(),val.toString()); + } + } + } + + /** + * get single value by key + */ + public String getValue(String key) throws Exception{ + + Object val = null; + + for (ObjectName oname: hadoopObjectNames) { + try { + val = mbsc.getAttribute(oname, key); + } catch (AttributeNotFoundException anfe) { + /*just go to the next */ + continue; + } catch(ReflectionException re) { + if (re.getCause() instanceof NoSuchMethodException) { + continue; + } + } + err("Info: key = " + key + "; val = " + val); + break; + } + + return (val == null) ? null : val.toString(); + } + + + /** + * @param args + * @throws Exception + * initializes MBeanServer + */ + public void init() throws Exception{ + + err("init: server="+server+";port="+port+";service="+ + service+";localVMPid="+localVMPid); + + String url_string = null; + // build connection url + if (localVMPid != null) { + // from the file /tmp/hsperfdata* + url_string = ConnectorAddressLink.importFrom(Integer.parseInt(localVMPid)); + } else if (!port.isEmpty() && !server.isEmpty()) { + // using server and port + url_string = "service:jmx:rmi:///jndi/rmi://"+server+ ":"+port+"/jmxrmi"; + } // else url stays null + + // Create an RMI connector client and + // connect it to the RMI connector server + + if (url_string == null) { //assume local vm (for example for Testing) + mbsc = ManagementFactory.getPlatformMBeanServer(); + } else { + JMXServiceURL url = new JMXServiceURL(url_string); + + err("Create RMI connector and connect to the RMI connector server" + url); + + JMXConnector jmxc = JMXConnectorFactory.connect(url, null); + // Get an MBeanServerConnection + // + err("\nGet an MBeanServerConnection"); + mbsc = jmxc.getMBeanServerConnection(); + } + + // Get domains from MBeanServer + // + err("\nDomains:"); + + String domains[] = mbsc.getDomains(); + Arrays.sort(domains); + for (String domain : domains) { + err("\tDomain = " + domain); + } + + // Get MBeanServer's default domain + // + err("\nMBeanServer default domain = " + mbsc.getDefaultDomain()); + + // Get MBean count + // + err("\nMBean count = " + mbsc.getMBeanCount()); + + // Query MBean names for specific domain "hadoop" and service + ObjectName query = new ObjectName("hadoop:service="+service+",*"); + hadoopObjectNames = new ArrayList(5); + err("\nQuery MBeanServer MBeans:"); + Set names = + new TreeSet(mbsc.queryNames(query, null)); + + for (ObjectName name : names) { + hadoopObjectNames.add(name); + err("hadoop services: " + name); + } + + } + + /** + * Print JMXGet usage information + */ + static void printUsage(Options opts) { + HelpFormatter formatter = new HelpFormatter(); + formatter.printHelp("jmxget options are: ", opts); + } + + + /** + * @param msg + */ + private static void err(String msg) { + System.err.println(msg); + } + + /** + * parse args + */ + private static CommandLine parseArgs(Options opts, String ...args) + throws IllegalArgumentException{ + + OptionBuilder.withArgName("NameNode|DataNode"); + OptionBuilder.hasArg(); + OptionBuilder.withDescription("specify jmx service (NameNode by default)"); + Option jmx_service = OptionBuilder.create("service"); + + OptionBuilder.withArgName("mbean server"); + OptionBuilder.hasArg(); + OptionBuilder.withDescription("specify mbean server (localhost by default)"); + Option jmx_server = OptionBuilder.create("server"); + + OptionBuilder.withDescription("print help"); + Option jmx_help = OptionBuilder.create("help"); + + OptionBuilder.withArgName("mbean server port"); + OptionBuilder.hasArg(); + OptionBuilder.withDescription("specify mbean server port, " + + "if missing - it will try to connect to MBean Server in the same VM"); + Option jmx_port = OptionBuilder.create("port"); + + OptionBuilder.withArgName("VM's pid"); + OptionBuilder.hasArg(); + OptionBuilder.withDescription("connect to the VM on the same machine"); + Option jmx_localVM = OptionBuilder.create("localVM"); + + opts.addOption(jmx_server); + opts.addOption(jmx_help); + opts.addOption(jmx_service); + opts.addOption(jmx_port); + opts.addOption(jmx_localVM); + + CommandLine commandLine=null; + CommandLineParser parser = new GnuParser(); + try { + commandLine = parser.parse(opts, args, true); + } catch(ParseException e) { + printUsage(opts); + throw new IllegalArgumentException("invalid args: " + e.getMessage()); + } + return commandLine; + } + + /** + * main + * @param args + */ + public static void main(String[] args) { + + int res = -1; + + // parse arguments + Options opts = new Options(); + CommandLine commandLine = null; + try { + commandLine = parseArgs(opts, args); + } catch (IllegalArgumentException iae) { + commandLine = null; + } + + if (commandLine == null) { + // invalid arguments + err("Invalid args"); + printUsage(opts); + System.exit(-1); + } + + JMXGet jm = new JMXGet(); + + if (commandLine.hasOption("port")) { + jm.setPort(commandLine.getOptionValue("port")); + } + if (commandLine.hasOption("service")) { + jm.setService(commandLine.getOptionValue("service")); + } + if (commandLine.hasOption("server")) { + jm.setServer(commandLine.getOptionValue("server")); + } + + if (commandLine.hasOption("localVM")) { + // from the file /tmp/hsperfdata* + jm.setLocalVMPid(commandLine.getOptionValue("localVM")); + } + + if (commandLine.hasOption("help")) { + printUsage(opts); + System.exit(0); + } + + // rest of args + args = commandLine.getArgs(); + + try { + jm.init(); + + if (args.length == 0) { + jm.printAllValues(); + } else { + for (String key: args) { + err("key = " + key); + String val = jm.getValue(key); + if (val!=null) + System.out.format(JMXGet.format,key,val); + } + } + res = 0; + } catch (Exception re) { + re.printStackTrace(); + res = -1; + } + + System.exit(res); + } +} diff --git a/src/hdfs/org/apache/hadoop/hdfs/util/ByteArray.java b/src/hdfs/org/apache/hadoop/hdfs/util/ByteArray.java new file mode 100644 index 0000000..b52a2b0 --- /dev/null +++ b/src/hdfs/org/apache/hadoop/hdfs/util/ByteArray.java @@ -0,0 +1,52 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.util; + +import java.util.Arrays; + +/** + * Wrapper for byte[] to use byte[] as key in HashMap + */ +public class ByteArray { + private int hash = 0; // cache the hash code + private final byte[] bytes; + + public ByteArray(byte[] bytes) { + this.bytes = bytes; + } + + public byte[] getBytes() { + return bytes; + } + + @Override + public int hashCode() { + if (hash == 0) { + hash = Arrays.hashCode(bytes); + } + return hash; + } + + @Override + public boolean equals(Object o) { + if (!(o instanceof ByteArray)) { + return false; + } + return Arrays.equals(bytes, ((ByteArray)o).bytes); + } +} diff --git a/src/hdfs/org/apache/hadoop/hdfs/util/DataTransferThrottler.java b/src/hdfs/org/apache/hadoop/hdfs/util/DataTransferThrottler.java new file mode 100644 index 0000000..9e8bf58 --- /dev/null +++ b/src/hdfs/org/apache/hadoop/hdfs/util/DataTransferThrottler.java @@ -0,0 +1,111 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.util; + +/** + * a class to throttle the block transfers. + * This class is thread safe. It can be shared by multiple threads. + * The parameter bandwidthPerSec specifies the total bandwidth shared by + * threads. + */ +public class DataTransferThrottler { + private long period; // period over which bw is imposed + private long periodExtension; // Max period over which bw accumulates. + private long bytesPerPeriod; // total number of bytes can be sent in each period + private long curPeriodStart; // current period starting time + private long curReserve; // remaining bytes can be sent in the period + private long bytesAlreadyUsed; + + /** Constructor + * @param bandwidthPerSec bandwidth allowed in bytes per second. + */ + public DataTransferThrottler(long bandwidthPerSec) { + this(500, bandwidthPerSec); // by default throttling period is 500ms + } + + /** + * Constructor + * @param period in milliseconds. Bandwidth is enforced over this + * period. + * @param bandwidthPerSec bandwidth allowed in bytes per second. + */ + public DataTransferThrottler(long period, long bandwidthPerSec) { + this.curPeriodStart = System.currentTimeMillis(); + this.period = period; + this.curReserve = this.bytesPerPeriod = bandwidthPerSec*period/1000; + this.periodExtension = period*3; + } + + /** + * @return current throttle bandwidth in bytes per second. + */ + public synchronized long getBandwidth() { + return bytesPerPeriod*1000/period; + } + + /** + * Sets throttle bandwidth. This takes affect latest by the end of current + * period. + * + * @param bytesPerSecond + */ + public synchronized void setBandwidth(long bytesPerSecond) { + if ( bytesPerSecond <= 0 ) { + throw new IllegalArgumentException("" + bytesPerSecond); + } + bytesPerPeriod = bytesPerSecond*period/1000; + } + + /** Given the numOfBytes sent/received since last time throttle was called, + * make the current thread sleep if I/O rate is too fast + * compared to the given bandwidth. + * + * @param numOfBytes + * number of bytes sent/received since last time throttle was called + */ + public synchronized void throttle(long numOfBytes) { + if ( numOfBytes <= 0 ) { + return; + } + + curReserve -= numOfBytes; + bytesAlreadyUsed += numOfBytes; + + while (curReserve <= 0) { + long now = System.currentTimeMillis(); + long curPeriodEnd = curPeriodStart + period; + + if ( now < curPeriodEnd ) { + // Wait for next period so that curReserve can be increased. + try { + wait( curPeriodEnd - now ); + } catch (InterruptedException ignored) {} + } else if ( now < (curPeriodStart + periodExtension)) { + curPeriodStart = curPeriodEnd; + curReserve += bytesPerPeriod; + } else { + // discard the prev period. Throttler might not have + // been used for a long time. + curPeriodStart = now; + curReserve = bytesPerPeriod - bytesAlreadyUsed; + } + } + + bytesAlreadyUsed -= numOfBytes; + } +} diff --git a/src/hdfs/org/apache/hadoop/hdfs/util/GSet.java b/src/hdfs/org/apache/hadoop/hdfs/util/GSet.java new file mode 100644 index 0000000..e56054e --- /dev/null +++ b/src/hdfs/org/apache/hadoop/hdfs/util/GSet.java @@ -0,0 +1,81 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.util; + +/** + * A {@link GSet} is set, + * which supports the {@link #get(Object)} operation. + * The {@link #get(Object)} operation uses a key to lookup an element. + * + * Null element is not supported. + * + * @param The type of the keys. + * @param The type of the elements, which must be a subclass of the keys. + */ +public interface GSet extends Iterable { + /** + * @return The size of this set. + */ + int size(); + + /** + * Does this set contain an element corresponding to the given key? + * @param key The given key. + * @return true if the given key equals to a stored element. + * Otherwise, return false. + * @throws NullPointerException if key == null. + */ + boolean contains(K key); + + /** + * Return the stored element which is equal to the given key. + * This operation is similar to {@link java.util.Map#get(Object)}. + * @param key The given key. + * @return The stored element if it exists. + * Otherwise, return null. + * @throws NullPointerException if key == null. + */ + E get(K key); + + /** + * Add/replace an element. + * If the element does not exist, add it to the set. + * Otherwise, replace the existing element. + * + * Note that this operation + * is similar to {@link java.util.Map#put(Object, Object)} + * but is different from {@link java.util.Set#add(Object)} + * which does not replace the existing element if there is any. + * + * @param element The element being put. + * @return the previous stored element if there is any. + * Otherwise, return null. + * @throws NullPointerException if element == null. + */ + E put(E element); + + /** + * Remove the element corresponding to the given key. + * This operation is similar to {@link java.util.Map#remove(Object)}. + * @param key The key of the element being removed. + * @return If such element exists, return it. + * Otherwise, return null. + * @throws NullPointerException if key == null. + */ + E remove(K key); +} \ No newline at end of file diff --git a/src/hdfs/org/apache/hadoop/hdfs/util/GSetByHashMap.java b/src/hdfs/org/apache/hadoop/hdfs/util/GSetByHashMap.java new file mode 100644 index 0000000..6582d5b --- /dev/null +++ b/src/hdfs/org/apache/hadoop/hdfs/util/GSetByHashMap.java @@ -0,0 +1,65 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.util; + +import java.util.HashMap; +import java.util.Iterator; + +/** + * A {@link GSet} implementation by {@link HashMap}. + */ +public class GSetByHashMap implements GSet { + private final HashMap m; + + public GSetByHashMap(int initialCapacity, float loadFactor) { + m = new HashMap(initialCapacity, loadFactor); + } + + @Override + public int size() { + return m.size(); + } + + @Override + public boolean contains(K k) { + return m.containsKey(k); + } + + @Override + public E get(K k) { + return m.get(k); + } + + @Override + public E put(E element) { + if (element == null) { + throw new UnsupportedOperationException("Null element is not supported."); + } + return m.put(element, element); + } + + @Override + public E remove(K k) { + return m.remove(k); + } + + @Override + public Iterator iterator() { + return m.values().iterator(); + } +} diff --git a/src/hdfs/org/apache/hadoop/hdfs/util/LightWeightGSet.java b/src/hdfs/org/apache/hadoop/hdfs/util/LightWeightGSet.java new file mode 100644 index 0000000..3fc1860 --- /dev/null +++ b/src/hdfs/org/apache/hadoop/hdfs/util/LightWeightGSet.java @@ -0,0 +1,283 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.util; + +import java.io.PrintStream; +import java.util.ConcurrentModificationException; +import java.util.Iterator; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; + +/** + * A low memory footprint {@link GSet} implementation, + * which uses an array for storing the elements + * and linked lists for collision resolution. + * + * No rehash will be performed. + * Therefore, the internal array will never be resized. + * + * This class does not support null element. + * + * This class is not thread safe. + * + * @param Key type for looking up the elements + * @param Element type, which must be + * (1) a subclass of K, and + * (2) implementing {@link LinkedElement} interface. + */ +public class LightWeightGSet implements GSet { + /** + * Elements of {@link LightWeightGSet}. + */ + public static interface LinkedElement { + /** Set the next element. */ + public void setNext(LinkedElement next); + + /** Get the next element. */ + public LinkedElement getNext(); + } + + public static final Log LOG = LogFactory.getLog(GSet.class); + static final int MAX_ARRAY_LENGTH = 1 << 30; //prevent int overflow problem + static final int MIN_ARRAY_LENGTH = 1; + + /** + * An internal array of entries, which are the rows of the hash table. + * The size must be a power of two. + */ + private final LinkedElement[] entries; + /** A mask for computing the array index from the hash value of an element. */ + private final int hash_mask; + /** The size of the set (not the entry array). */ + private int size = 0; + /** Modification version for fail-fast. + * @see ConcurrentModificationException + */ + private volatile int modification = 0; + + /** + * @param recommended_length Recommended size of the internal array. + */ + public LightWeightGSet(final int recommended_length) { + final int actual = actualArrayLength(recommended_length); + LOG.info("recommended=" + recommended_length + ", actual=" + actual); + + entries = new LinkedElement[actual]; + hash_mask = entries.length - 1; + } + + //compute actual length + private static int actualArrayLength(int recommended) { + if (recommended > MAX_ARRAY_LENGTH) { + return MAX_ARRAY_LENGTH; + } else if (recommended < MIN_ARRAY_LENGTH) { + return MIN_ARRAY_LENGTH; + } else { + final int a = Integer.highestOneBit(recommended); + return a == recommended? a: a << 1; + } + } + + @Override + public int size() { + return size; + } + + private int getIndex(final K key) { + return key.hashCode() & hash_mask; + } + + private E convert(final LinkedElement e){ + @SuppressWarnings("unchecked") + final E r = (E)e; + return r; + } + + @Override + public E get(final K key) { + //validate key + if (key == null) { + throw new NullPointerException("key == null"); + } + + //find element + final int index = getIndex(key); + for(LinkedElement e = entries[index]; e != null; e = e.getNext()) { + if (e.equals(key)) { + return convert(e); + } + } + //element not found + return null; + } + + @Override + public boolean contains(final K key) { + return get(key) != null; + } + + @Override + public E put(final E element) { + //validate element + if (element == null) { + throw new NullPointerException("Null element is not supported."); + } + if (!(element instanceof LinkedElement)) { + throw new IllegalArgumentException( + "!(element instanceof LinkedElement), element.getClass()=" + + element.getClass()); + } + final LinkedElement e = (LinkedElement)element; + + //find index + final int index = getIndex(element); + + //remove if it already exists + final E existing = remove(index, element); + + //insert the element to the head of the linked list + modification++; + size++; + e.setNext(entries[index]); + entries[index] = e; + + return existing; + } + + /** + * Remove the element corresponding to the key, + * given key.hashCode() == index. + * + * @return If such element exists, return it. + * Otherwise, return null. + */ + private E remove(final int index, final K key) { + if (entries[index] == null) { + return null; + } else if (entries[index].equals(key)) { + //remove the head of the linked list + modification++; + size--; + final LinkedElement e = entries[index]; + entries[index] = e.getNext(); + e.setNext(null); + return convert(e); + } else { + //head != null and key is not equal to head + //search the element + LinkedElement prev = entries[index]; + for(LinkedElement curr = prev.getNext(); curr != null; ) { + if (curr.equals(key)) { + //found the element, remove it + modification++; + size--; + prev.setNext(curr.getNext()); + curr.setNext(null); + return convert(curr); + } else { + prev = curr; + curr = curr.getNext(); + } + } + //element not found + return null; + } + } + + @Override + public E remove(final K key) { + //validate key + if (key == null) { + throw new NullPointerException("key == null"); + } + return remove(getIndex(key), key); + } + + @Override + public Iterator iterator() { + return new SetIterator(); + } + + @Override + public String toString() { + final StringBuilder b = new StringBuilder(getClass().getSimpleName()); + b.append("(size=").append(size) + .append(String.format(", %08x", hash_mask)) + .append(", modification=").append(modification) + .append(", entries.length=").append(entries.length) + .append(")"); + return b.toString(); + } + + /** Print detailed information of this object. */ + public void printDetails(final PrintStream out) { + out.print(this + ", entries = ["); + for(int i = 0; i < entries.length; i++) { + if (entries[i] != null) { + LinkedElement e = entries[i]; + out.print("\n " + i + ": " + e); + for(e = e.getNext(); e != null; e = e.getNext()) { + out.print(" -> " + e); + } + } + } + out.println("\n]"); + } + + private class SetIterator implements Iterator { + /** The starting modification for fail-fast. */ + private final int startModification = modification; + /** The current index of the entry array. */ + private int index = -1; + /** The next element to return. */ + private LinkedElement next = nextNonemptyEntry(); + + /** Find the next nonempty entry starting at (index + 1). */ + private LinkedElement nextNonemptyEntry() { + for(index++; index < entries.length && entries[index] == null; index++); + return index < entries.length? entries[index]: null; + } + + @Override + public boolean hasNext() { + return next != null; + } + + @Override + public E next() { + if (modification != startModification) { + throw new ConcurrentModificationException("modification=" + modification + + " != startModification = " + startModification); + } + + final E e = convert(next); + + //find the next element + final LinkedElement n = next.getNext(); + next = n != null? n: nextNonemptyEntry(); + + return e; + } + + @Override + public void remove() { + throw new UnsupportedOperationException("Remove is not supported."); + } + } +} diff --git a/src/mapred/mapred-default.xml b/src/mapred/mapred-default.xml new file mode 100644 index 0000000..e1c0c70 --- /dev/null +++ b/src/mapred/mapred-default.xml @@ -0,0 +1,967 @@ + + + + + + + + + + + hadoop.job.history.location + + If job tracker is static the history files are stored + in this single well known place. If No value is set here, by default, + it is in the local file system at ${hadoop.log.dir}/history. + + + + + hadoop.job.history.user.location + + User can specify a location to store the history files of + a particular job. If nothing is specified, the logs are stored in + output directory. The files are stored in "_logs/history/" in the directory. + User can stop logging by giving the value "none". + + + + + mapred.job.tracker.history.completed.location + + The completed job history files are stored at this single well + known location. If nothing is specified, the files are stored at + ${hadoop.job.history.location}/done. + + + + + mapred.committer.job.setup.cleanup.needed + true + true, if job needs job-setup and job-cleanup. + false, otherwise + + + + + + + io.sort.factor + 10 + The number of streams to merge at once while sorting + files. This determines the number of open file handles. + + + + io.sort.mb + 100 + The total amount of buffer memory to use while sorting + files, in megabytes. By default, gives each merge stream 1MB, which + should minimize seeks. + + + + io.sort.record.percent + 0.05 + The percentage of io.sort.mb dedicated to tracking record + boundaries. Let this value be r, io.sort.mb be x. The maximum number + of records collected before the collection thread must block is equal + to (r * x) / 4 + + + + io.sort.spill.percent + 0.80 + The soft limit in either the buffer or record collection + buffers. Once reached, a thread will begin to spill the contents to disk + in the background. Note that this does not imply any chunking of data to + the spill. A value less than 0.5 is not recommended. + + + + io.map.index.skip + 0 + Number of index entries to skip between each entry. + Zero by default. Setting this to values larger than zero can + facilitate opening large map files using less memory. + + + + mapred.job.tracker + local + The host and port that the MapReduce job tracker runs + at. If "local", then jobs are run in-process as a single map + and reduce task. + + + + + mapred.job.tracker.http.address + 0.0.0.0:50030 + + The job tracker http server address and port the server will listen on. + If the port is 0 then the server will start on a free port. + + + + + mapred.job.tracker.handler.count + 10 + + The number of server threads for the JobTracker. This should be roughly + 4% of the number of tasktracker nodes. + + + + + mapred.task.tracker.report.address + 127.0.0.1:0 + The interface and port that task tracker server listens on. + Since it is only connected to by the tasks, it uses the local interface. + EXPERT ONLY. Should only be changed if your host does not have the loopback + interface. + + + + mapred.local.dir + ${hadoop.tmp.dir}/mapred/local + The local directory where MapReduce stores intermediate + data files. May be a comma-separated list of + directories on different devices in order to spread disk i/o. + Directories that do not exist are ignored. + + + + + mapred.system.dir + ${hadoop.tmp.dir}/mapred/system + The shared directory where MapReduce stores control files. + + + + + mapred.temp.dir + ${hadoop.tmp.dir}/mapred/temp + A shared directory for temporary files. + + + + + mapred.local.dir.minspacestart + 0 + If the space in mapred.local.dir drops under this, + do not ask for more tasks. + Value in bytes. + + + + + mapred.local.dir.minspacekill + 0 + If the space in mapred.local.dir drops under this, + do not ask more tasks until all the current ones have finished and + cleaned up. Also, to save the rest of the tasks we have running, + kill one of them, to clean up some space. Start with the reduce tasks, + then go with the ones that have finished the least. + Value in bytes. + + + + + mapred.tasktracker.expiry.interval + 600000 + Expert: The time-interval, in miliseconds, after which + a tasktracker is declared 'lost' if it doesn't send heartbeats. + + + + + mapred.tasktracker.instrumentation + org.apache.hadoop.mapred.TaskTrackerMetricsInst + Expert: The instrumentation class to associate with each TaskTracker. + + + + + mapred.tasktracker.memory_calculator_plugin + + + Name of the class whose instance will be used to query memory information + on the tasktracker. + + The class must be an instance of + org.apache.hadoop.util.MemoryCalculatorPlugin. If the value is null, the + tasktracker attempts to use a class appropriate to the platform. + Currently, the only platform supported is Linux. + + + + + mapred.tasktracker.taskmemorymanager.monitoring-interval + 5000 + The interval, in milliseconds, for which the tasktracker waits + between two cycles of monitoring its tasks' memory usage. Used only if + tasks' memory management is enabled via mapred.tasktracker.tasks.maxmemory. + + + + + mapred.tasktracker.tasks.sleeptime-before-sigkill + 5000 + The time, in milliseconds, the tasktracker waits for sending a + SIGKILL to a process, after it has been sent a SIGTERM. + + + + mapred.map.tasks + 2 + The default number of map tasks per job. + Ignored when mapred.job.tracker is "local". + + + + + mapred.reduce.tasks + 1 + The default number of reduce tasks per job. Typically set to 99% + of the cluster's reduce capacity, so that if a node fails the reduces can + still be executed in a single wave. + Ignored when mapred.job.tracker is "local". + + + + + mapreduce.tasktracker.outofband.heartbeat + false + Expert: Set this to true to let the tasktracker send an + out-of-band heartbeat on task-completion for better latency. + + + + + mapred.jobtracker.restart.recover + false + "true" to enable (job) recovery upon restart, + "false" to start afresh + + + + + mapred.jobtracker.job.history.block.size + 3145728 + The block size of the job history file. Since the job recovery + uses job history, its important to dump job history to disk as + soon as possible. Note that this is an expert level parameter. + The default value is set to 3 MB. + + + + + mapred.jobtracker.taskScheduler + org.apache.hadoop.mapred.JobQueueTaskScheduler + The class responsible for scheduling the tasks. + + + + mapred.jobtracker.taskScheduler.maxRunningTasksPerJob + + The maximum number of running tasks for a job before + it gets preempted. No limits if undefined. + + + + + mapred.map.max.attempts + 4 + Expert: The maximum number of attempts per map task. + In other words, framework will try to execute a map task these many number + of times before giving up on it. + + + + + mapred.reduce.max.attempts + 4 + Expert: The maximum number of attempts per reduce task. + In other words, framework will try to execute a reduce task these many number + of times before giving up on it. + + + + + mapred.reduce.parallel.copies + 5 + The default number of parallel transfers run by reduce + during the copy(shuffle) phase. + + + + + mapred.reduce.copy.backoff + 300 + The maximum amount of time (in seconds) a reducer spends on + fetching one map output before declaring it as failed. + + + + + mapreduce.reduce.shuffle.connect.timeout + 180000 + Expert: The maximum amount of time (in milli seconds) a reduce + task spends in trying to connect to a tasktracker for getting map output. + + + + + mapreduce.reduce.shuffle.read.timeout + 180000 + Expert: The maximum amount of time (in milli seconds) a reduce + task waits for map output data to be available for reading after obtaining + connection. + + + + + mapred.task.timeout + 600000 + The number of milliseconds before a task will be + terminated if it neither reads an input, writes an output, nor + updates its status string. + + + + + mapred.tasktracker.map.tasks.maximum + 2 + The maximum number of map tasks that will be run + simultaneously by a task tracker. + + + + + mapred.tasktracker.reduce.tasks.maximum + 2 + The maximum number of reduce tasks that will be run + simultaneously by a task tracker. + + + + + mapred.jobtracker.completeuserjobs.maximum + 100 + The maximum number of complete jobs per user to keep around + before delegating them to the job history. + + + + mapred.job.tracker.retiredjobs.cache.size + 1000 + The number of retired job status to keep in the cache. + + + + + mapred.job.tracker.jobhistory.lru.cache.size + 5 + The number of job history files loaded in memory. The jobs are + loaded when they are first accessed. The cache is cleared based on LRU. + + + + + mapred.jobtracker.instrumentation + org.apache.hadoop.mapred.JobTrackerMetricsInst + Expert: The instrumentation class to associate with each JobTracker. + + + + + mapred.child.java.opts + -Xmx200m + Java opts for the task tracker child processes. + The following symbol, if present, will be interpolated: @taskid@ is replaced + by current TaskID. Any other occurrences of '@' will go unchanged. + For example, to enable verbose gc logging to a file named for the taskid in + /tmp and to set the heap maximum to be a gigabyte, pass a 'value' of: + -Xmx1024m -verbose:gc -Xloggc:/tmp/@taskid@.gc + + The configuration variable mapred.child.ulimit can be used to control the + maximum virtual memory of the child processes. + + + + + mapred.child.env + + User added environment variables for the task tracker child + processes. Example : + 1) A=foo This will set the env variable A to foo + 2) B=$B:c This is inherit tasktracker's B env variable. + + + + + mapred.child.ulimit + + The maximum virtual memory, in KB, of a process launched by the + Map-Reduce framework. This can be used to control both the Mapper/Reducer + tasks and applications using Hadoop Pipes, Hadoop Streaming etc. + By default it is left unspecified to let cluster admins control it via + limits.conf and other such relevant mechanisms. + + Note: mapred.child.ulimit must be greater than or equal to the -Xmx passed to + JavaVM, else the VM might not start. + + + + + mapred.child.tmp + ./tmp + To set the value of tmp directory for map and reduce tasks. + If the value is an absolute path, it is directly assigned. Otherwise, it is + prepended with task's working directory. The java tasks are executed with + option -Djava.io.tmpdir='the absolute path of the tmp dir'. Pipes and + streaming are set with environment variable, + TMPDIR='the absolute path of the tmp dir' + + + + + mapred.inmem.merge.threshold + 1000 + The threshold, in terms of the number of files + for the in-memory merge process. When we accumulate threshold number of files + we initiate the in-memory merge and spill to disk. A value of 0 or less than + 0 indicates we want to DON'T have any threshold and instead depend only on + the ramfs's memory consumption to trigger the merge. + + + + + mapred.job.shuffle.merge.percent + 0.66 + The usage threshold at which an in-memory merge will be + initiated, expressed as a percentage of the total memory allocated to + storing in-memory map outputs, as defined by + mapred.job.shuffle.input.buffer.percent. + + + + + mapred.job.shuffle.input.buffer.percent + 0.70 + The percentage of memory to be allocated from the maximum heap + size to storing map outputs during the shuffle. + + + + + mapred.job.reduce.input.buffer.percent + 0.0 + The percentage of memory- relative to the maximum heap size- to + retain map outputs during the reduce. When the shuffle is concluded, any + remaining map outputs in memory must consume less than this threshold before + the reduce can begin. + + + + + mapred.map.tasks.speculative.execution + true + If true, then multiple instances of some map tasks + may be executed in parallel. + + + + mapred.reduce.tasks.speculative.execution + true + If true, then multiple instances of some reduce tasks + may be executed in parallel. + + + + mapred.job.reuse.jvm.num.tasks + 1 + How many tasks to run per jvm. If set to -1, there is + no limit. + + + + + mapred.min.split.size + 0 + The minimum size chunk that map input should be split + into. Note that some file formats may have minimum split sizes that + take priority over this setting. + + + + mapred.jobtracker.maxtasks.per.job + -1 + The maximum number of tasks for a single job. + A value of -1 indicates that there is no maximum. + + + + mapred.submit.replication + 10 + The replication level for submitted job files. This + should be around the square root of the number of nodes. + + + + + + mapred.tasktracker.dns.interface + default + The name of the Network Interface from which a task + tracker should report its IP address. + + + + + mapred.tasktracker.dns.nameserver + default + The host name or IP address of the name server (DNS) + which a TaskTracker should use to determine the host name used by + the JobTracker for communication and display purposes. + + + + + tasktracker.http.threads + 40 + The number of worker threads that for the http server. This is + used for map output fetching + + + + + mapred.task.tracker.http.address + 0.0.0.0:50060 + + The task tracker http server address and port. + If the port is 0 then the server will start on a free port. + + + + + keep.failed.task.files + false + Should the files for failed tasks be kept. This should only be + used on jobs that are failing, because the storage is never + reclaimed. It also prevents the map outputs from being erased + from the reduce directory as they are consumed. + + + + + + + mapred.output.compress + false + Should the job outputs be compressed? + + + + + mapred.output.compression.type + RECORD + If the job outputs are to compressed as SequenceFiles, how should + they be compressed? Should be one of NONE, RECORD or BLOCK. + + + + + mapred.output.compression.codec + org.apache.hadoop.io.compress.DefaultCodec + If the job outputs are compressed, how should they be compressed? + + + + + mapred.compress.map.output + false + Should the outputs of the maps be compressed before being + sent across the network. Uses SequenceFile compression. + + + + + mapred.map.output.compression.codec + org.apache.hadoop.io.compress.DefaultCodec + If the map outputs are compressed, how should they be + compressed? + + + + + map.sort.class + org.apache.hadoop.util.QuickSort + The default sort class for sorting keys. + + + + + mapred.userlog.limit.kb + 0 + The maximum size of user-logs of each task in KB. 0 disables the cap. + + + + + mapred.userlog.retain.hours + 24 + The maximum time, in hours, for which the user-logs are to be + retained. + + + + + mapred.hosts + + Names a file that contains the list of nodes that may + connect to the jobtracker. If the value is empty, all hosts are + permitted. + + + + mapred.hosts.exclude + + Names a file that contains the list of hosts that + should be excluded by the jobtracker. If the value is empty, no + hosts are excluded. + + + + mapred.heartbeats.in.second + 100 + Expert: Approximate number of heart-beats that could arrive + at JobTracker in a second. Assuming each RPC can be processed + in 10msec, the default value is made 100 RPCs in a second. + + + + + mapred.max.tracker.blacklists + 4 + The number of blacklists for a taskTracker by various jobs + after which the task tracker could be blacklisted across + all jobs. The tracker will be given a tasks later + (after a day). The tracker will become a healthy + tracker after a restart. + + + + + mapred.max.tracker.failures + 4 + The number of task-failures on a tasktracker of a given job + after which new tasks of that job aren't assigned to it. + + + + + jobclient.output.filter + FAILED + The filter for controlling the output of the task's userlogs sent + to the console of the JobClient. + The permissible options are: NONE, KILLED, FAILED, SUCCEEDED and + ALL. + + + + + mapred.job.tracker.persist.jobstatus.active + false + Indicates if persistency of job status information is + active or not. + + + + + mapred.job.tracker.persist.jobstatus.hours + 0 + The number of hours job status information is persisted in DFS. + The job status information will be available after it drops of the memory + queue and between jobtracker restarts. With a zero value the job status + information is not persisted at all in DFS. + + + + + mapred.job.tracker.persist.jobstatus.dir + /jobtracker/jobsInfo + The directory where the job status information is persisted + in a file system to be available after it drops of the memory queue and + between jobtracker restarts. + + + + + mapred.task.profile + false + To set whether the system should collect profiler + information for some of the tasks in this job? The information is stored + in the user log directory. The value is "true" if task profiling + is enabled. + + + + mapred.task.profile.maps + 0-2 + To set the ranges of map tasks to profile. + mapred.task.profile has to be set to true for the value to be accounted. + + + + + mapred.task.profile.reduces + 0-2 + To set the ranges of reduce tasks to profile. + mapred.task.profile has to be set to true for the value to be accounted. + + + + + mapred.line.input.format.linespermap + 1 + Number of lines per split in NLineInputFormat. + + + + + mapred.skip.attempts.to.start.skipping + 2 + The number of Task attempts AFTER which skip mode + will be kicked off. When skip mode is kicked off, the + tasks reports the range of records which it will process + next, to the TaskTracker. So that on failures, TT knows which + ones are possibly the bad records. On further executions, + those are skipped. + + + + + mapred.skip.map.auto.incr.proc.count + true + The flag which if set to true, + SkipBadRecords.COUNTER_MAP_PROCESSED_RECORDS is incremented + by MapRunner after invoking the map function. This value must be set to + false for applications which process the records asynchronously + or buffer the input records. For example streaming. + In such cases applications should increment this counter on their own. + + + + + mapred.skip.reduce.auto.incr.proc.count + true + The flag which if set to true, + SkipBadRecords.COUNTER_REDUCE_PROCESSED_GROUPS is incremented + by framework after invoking the reduce function. This value must be set to + false for applications which process the records asynchronously + or buffer the input records. For example streaming. + In such cases applications should increment this counter on their own. + + + + + mapred.skip.out.dir + + If no value is specified here, the skipped records are + written to the output directory at _logs/skip. + User can stop writing skipped records by giving the value "none". + + + + + mapred.skip.map.max.skip.records + 0 + The number of acceptable skip records surrounding the bad + record PER bad record in mapper. The number includes the bad record as well. + To turn the feature of detection/skipping of bad records off, set the + value to 0. + The framework tries to narrow down the skipped range by retrying + until this threshold is met OR all attempts get exhausted for this task. + Set the value to Long.MAX_VALUE to indicate that framework need not try to + narrow down. Whatever records(depends on application) get skipped are + acceptable. + + + + + mapred.skip.reduce.max.skip.groups + 0 + The number of acceptable skip groups surrounding the bad + group PER bad group in reducer. The number includes the bad group as well. + To turn the feature of detection/skipping of bad groups off, set the + value to 0. + The framework tries to narrow down the skipped range by retrying + until this threshold is met OR all attempts get exhausted for this task. + Set the value to Long.MAX_VALUE to indicate that framework need not try to + narrow down. Whatever groups(depends on application) get skipped are + acceptable. + + + + + + + + + job.end.retry.attempts + 0 + Indicates how many times hadoop should attempt to contact the + notification URL + + + + job.end.retry.interval + 30000 + Indicates time in milliseconds between notification URL retry + calls + + + + + hadoop.rpc.socket.factory.class.JobSubmissionProtocol + + SocketFactory to use to connect to a Map/Reduce master + (JobTracker). If null or empty, then use hadoop.rpc.socket.class.default. + + + + + mapred.task.cache.levels + 2 + This is the max level of the task cache. For example, if + the level is 2, the tasks cached are at the host level and at the rack + level. + + + + + mapred.queue.names + default + Comma separated list of queues configured for this jobtracker. + Jobs are added to queues and schedulers can configure different + scheduling properties for the various queues. To configure a property + for a queue, the name of the queue must match the name specified in this + value. Queue properties that are common to all schedulers are configured + here with the naming convention, mapred.queue.$QUEUE-NAME.$PROPERTY-NAME, + for e.g. mapred.queue.default.submit-job-acl. + The number of queues configured in this parameter could depend on the + type of scheduler being used, as specified in + mapred.jobtracker.taskScheduler. For example, the JobQueueTaskScheduler + supports only a single queue, which is the default configured here. + Before adding more queues, ensure that the scheduler you've configured + supports multiple queues. + + + + + mapred.acls.enabled + false + Specifies whether ACLs are enabled, and should be checked + for various operations. + + + + + mapred.job.queue.name + default + Queue to which a job is submitted. This must match one of the + queues defined in mapred.queue.names for the system. Also, the ACL setup + for the queue must allow the current user to submit a job to the queue. + Before specifying a queue, ensure that the system is configured with + the queue, and access is allowed for submitting jobs to the queue. + + + + + mapred.tasktracker.indexcache.mb + 10 + The maximum memory that a task tracker allows for the + index cache that is used when serving map outputs to reducers. + + + + + mapred.merge.recordsBeforeProgress + 10000 + The number of records to process during merge before + sending a progress notification to the TaskTracker. + + + + + mapred.reduce.slowstart.completed.maps + 0.05 + Fraction of the number of maps in the job which should be + complete before reduces are scheduled for the job. + + + + + mapred.task.tracker.task-controller + org.apache.hadoop.mapred.DefaultTaskController + TaskController which is used to launch and manage task execution + + + + + + + mapred.healthChecker.script.path + + Absolute path to the script which is + periodicallyrun by the node health monitoring service to determine if + the node is healthy or not. If the value of this key is empty or the + file does not exist in the location configured here, the node health + monitoring service is not started. + + + + mapred.healthChecker.interval + 60000 + Frequency of the node health script to be run, + in milliseconds + + + + mapred.healthChecker.script.timeout + 600000 + Time after node health script should be killed if + unresponsive and considered that the script has failed. + + + + mapred.healthChecker.script.args + + List of arguments which are to be passed to + node health script when it is being launched comma seperated. + + + + + + diff --git a/src/mapred/org/apache/hadoop/mapred/AdminOperationsProtocol.java b/src/mapred/org/apache/hadoop/mapred/AdminOperationsProtocol.java new file mode 100644 index 0000000..827495e --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/AdminOperationsProtocol.java @@ -0,0 +1,46 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapred; + +import java.io.IOException; + +import org.apache.hadoop.ipc.VersionedProtocol; + +/** + * Protocol for admin operations. This is a framework-public interface and is + * NOT_TO_BE_USED_BY_USERS_DIRECTLY. + */ +public interface AdminOperationsProtocol extends VersionedProtocol { + + /** + * Version 1: Initial version. Added refreshQueueAcls. + * Version 2: Added node refresh facility + */ + public static final long versionID = 2L; + + /** + * Refresh the queue acls in use currently. + */ + void refreshQueueAcls() throws IOException; + + /** + * Refresh the node list at the {@link JobTracker} + */ + void refreshNodes() throws IOException; +} diff --git a/src/mapred/org/apache/hadoop/mapred/BasicTypeSorterBase.java b/src/mapred/org/apache/hadoop/mapred/BasicTypeSorterBase.java new file mode 100644 index 0000000..fe59ce1 --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/BasicTypeSorterBase.java @@ -0,0 +1,242 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.mapred; + +import java.io.DataOutputStream; +import java.io.IOException; + +import org.apache.hadoop.io.DataOutputBuffer; +import org.apache.hadoop.io.OutputBuffer; +import org.apache.hadoop.io.RawComparator; +import org.apache.hadoop.io.SequenceFile.ValueBytes; +import org.apache.hadoop.util.Progress; +import org.apache.hadoop.mapred.JobConf; +import org.apache.hadoop.io.SequenceFile.Sorter.RawKeyValueIterator; +import org.apache.hadoop.util.Progressable; + +/** This class implements the sort interface using primitive int arrays as + * the data structures (that is why this class is called 'BasicType'SorterBase) + */ +abstract class BasicTypeSorterBase implements BufferSorter { + + protected OutputBuffer keyValBuffer; //the buffer used for storing + //key/values + protected int[] startOffsets; //the array used to store the start offsets of + //keys in keyValBuffer + protected int[] keyLengths; //the array used to store the lengths of + //keys + protected int[] valueLengths; //the array used to store the value lengths + protected int[] pointers; //the array of startOffsets's indices. This will + //be sorted at the end to contain a sorted array of + //indices to offsets + protected RawComparator comparator; //the comparator for the map output + protected int count; //the number of key/values + //the overhead of the arrays in memory + //12 => 4 for keyoffsets, 4 for keylengths, 4 for valueLengths, and + //4 for indices into startOffsets array in the + //pointers array (ignored the partpointers list itself) + static private final int BUFFERED_KEY_VAL_OVERHEAD = 16; + static private final int INITIAL_ARRAY_SIZE = 5; + //we maintain the max lengths of the key/val that we encounter. During + //iteration of the sorted results, we will create a DataOutputBuffer to + //return the keys. The max size of the DataOutputBuffer will be the max + //keylength that we encounter. Expose this value to model memory more + //accurately. + private int maxKeyLength = 0; + private int maxValLength = 0; + + //Reference to the Progressable object for sending KeepAlive + protected Progressable reporter; + + //Implementation of methods of the SorterBase interface + // + public void configure(JobConf conf) { + comparator = conf.getOutputKeyComparator(); + } + + public void setProgressable(Progressable reporter) { + this.reporter = reporter; + } + + public void addKeyValue(int recordOffset, int keyLength, int valLength) { + //Add the start offset of the key in the startOffsets array and the + //length in the keyLengths array. + if (startOffsets == null || count == startOffsets.length) + grow(); + startOffsets[count] = recordOffset; + keyLengths[count] = keyLength; + if (keyLength > maxKeyLength) { + maxKeyLength = keyLength; + } + if (valLength > maxValLength) { + maxValLength = valLength; + } + valueLengths[count] = valLength; + pointers[count] = count; + count++; + } + + public void setInputBuffer(OutputBuffer buffer) { + //store a reference to the keyValBuffer that we need to read during sort + this.keyValBuffer = buffer; + } + + public long getMemoryUtilized() { + //the total length of the arrays + the max{Key,Val}Length (this will be the + //max size of the DataOutputBuffers during the iteration of the sorted + //keys). + if (startOffsets != null) { + return (startOffsets.length) * BUFFERED_KEY_VAL_OVERHEAD + + maxKeyLength + maxValLength; + } + else { //nothing from this yet + return 0; + } + } + + public abstract RawKeyValueIterator sort(); + + public void close() { + //set count to 0; also, we don't reuse the arrays since we want to maintain + //consistency in the memory model + count = 0; + startOffsets = null; + keyLengths = null; + valueLengths = null; + pointers = null; + maxKeyLength = 0; + maxValLength = 0; + + //release the large key-value buffer so that the GC, if necessary, + //can collect it away + keyValBuffer = null; + } + + private void grow() { + int currLength = 0; + if (startOffsets != null) { + currLength = startOffsets.length; + } + int newLength = (int)(currLength * 1.1) + 1; + startOffsets = grow(startOffsets, newLength); + keyLengths = grow(keyLengths, newLength); + valueLengths = grow(valueLengths, newLength); + pointers = grow(pointers, newLength); + } + + private int[] grow(int[] old, int newLength) { + int[] result = new int[newLength]; + if(old != null) { + System.arraycopy(old, 0, result, 0, old.length); + } + return result; + } +} //BasicTypeSorterBase + +//Implementation of methods of the RawKeyValueIterator interface. These +//methods must be invoked to iterate over key/vals after sort is done. +// +class MRSortResultIterator implements RawKeyValueIterator { + + private int count; + private int[] pointers; + private int[] startOffsets; + private int[] keyLengths; + private int[] valLengths; + private int currStartOffsetIndex; + private int currIndexInPointers; + private OutputBuffer keyValBuffer; + private DataOutputBuffer key = new DataOutputBuffer(); + private InMemUncompressedBytes value = new InMemUncompressedBytes(); + + public MRSortResultIterator(OutputBuffer keyValBuffer, + int []pointers, int []startOffsets, + int []keyLengths, int []valLengths) { + this.count = pointers.length; + this.pointers = pointers; + this.startOffsets = startOffsets; + this.keyLengths = keyLengths; + this.valLengths = valLengths; + this.keyValBuffer = keyValBuffer; + } + + public Progress getProgress() { + return null; + } + + public DataOutputBuffer getKey() throws IOException { + int currKeyOffset = startOffsets[currStartOffsetIndex]; + int currKeyLength = keyLengths[currStartOffsetIndex]; + //reuse the same key + key.reset(); + key.write(keyValBuffer.getData(), currKeyOffset, currKeyLength); + return key; + } + + public ValueBytes getValue() throws IOException { + //value[i] is stored in the following byte range: + //startOffsets[i] + keyLengths[i] through valLengths[i] + value.reset(keyValBuffer, + startOffsets[currStartOffsetIndex] + keyLengths[currStartOffsetIndex], + valLengths[currStartOffsetIndex]); + return value; + } + + public boolean next() throws IOException { + if (count == currIndexInPointers) + return false; + currStartOffsetIndex = pointers[currIndexInPointers]; + currIndexInPointers++; + return true; + } + + public void close() { + return; + } + + //An implementation of the ValueBytes interface for the in-memory value + //buffers. + private static class InMemUncompressedBytes implements ValueBytes { + private byte[] data; + int start; + int dataSize; + private void reset(OutputBuffer d, int start, int length) + throws IOException { + data = d.getData(); + this.start = start; + dataSize = length; + } + + public int getSize() { + return dataSize; + } + + public void writeUncompressedBytes(DataOutputStream outStream) + throws IOException { + outStream.write(data, start, dataSize); + } + + public void writeCompressedBytes(DataOutputStream outStream) + throws IllegalArgumentException, IOException { + throw + new IllegalArgumentException("UncompressedBytes cannot be compressed!"); + } + + } // InMemUncompressedBytes + +} //MRSortResultIterator diff --git a/src/mapred/org/apache/hadoop/mapred/BufferSorter.java b/src/mapred/org/apache/hadoop/mapred/BufferSorter.java new file mode 100644 index 0000000..5f1220f --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/BufferSorter.java @@ -0,0 +1,74 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.mapred; + +import org.apache.hadoop.io.OutputBuffer; +import org.apache.hadoop.io.SequenceFile.Sorter.RawKeyValueIterator; +import org.apache.hadoop.util.Progressable; + +/** This class provides a generic sort interface that should be implemented + * by specific sort algorithms. The use case is the following: + * A user class writes key/value records to a buffer, and finally wants to + * sort the buffer. This interface defines methods by which the user class + * can update the interface implementation with the offsets of the records + * and the lengths of the keys/values. The user class gives a reference to + * the buffer when the latter wishes to sort the records written to the buffer + * so far. Typically, the user class decides the point at which sort should + * happen based on the memory consumed so far by the buffer and the data + * structures maintained by an implementation of this interface. That is why + * a method is provided to get the memory consumed so far by the datastructures + * in the interface implementation. + */ +interface BufferSorter extends JobConfigurable { + + /** Pass the Progressable object so that sort can call progress while it is sorting + * @param reporter the Progressable object reference + */ + public void setProgressable(Progressable reporter); + + /** When a key/value is added at a particular offset in the key/value buffer, + * this method is invoked by the user class so that the impl of this sort + * interface can update its datastructures. + * @param recordOffset the offset of the key in the buffer + * @param keyLength the length of the key + * @param valLength the length of the val in the buffer + */ + public void addKeyValue(int recordoffset, int keyLength, int valLength); + + /** The user class invokes this method to set the buffer that the specific + * sort algorithm should "indirectly" sort (generally, sort algorithm impl + * should access this buffer via comparators and sort offset-indices to the + * buffer). + * @param buffer the map output buffer + */ + public void setInputBuffer(OutputBuffer buffer); + + /** The framework invokes this method to get the memory consumed so far + * by an implementation of this interface. + * @return memoryUsed in bytes + */ + public long getMemoryUtilized(); + + /** Framework decides when to actually sort + */ + public RawKeyValueIterator sort(); + + /** Framework invokes this to signal the sorter to cleanup + */ + public void close(); +} diff --git a/src/mapred/org/apache/hadoop/mapred/Child.java b/src/mapred/org/apache/hadoop/mapred/Child.java new file mode 100644 index 0000000..a595e89 --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/Child.java @@ -0,0 +1,206 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapred; + +import java.io.ByteArrayOutputStream; +import java.io.File; +import java.io.IOException; +import java.io.PrintStream; +import java.net.InetSocketAddress; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.fs.FSError; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.FileUtil; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.ipc.RPC; +import org.apache.hadoop.mapred.JvmTask; +import org.apache.hadoop.metrics.MetricsContext; +import org.apache.hadoop.metrics.MetricsUtil; +import org.apache.hadoop.metrics.jvm.JvmMetrics; +import org.apache.log4j.LogManager; +import org.apache.hadoop.util.Shell; +import org.apache.hadoop.util.StringUtils; + +/** + * The main() for child processes. + */ + +class Child { + + public static final Log LOG = + LogFactory.getLog(Child.class); + + static volatile TaskAttemptID taskid = null; + static volatile boolean isCleanup; + + public static void main(String[] args) throws Throwable { + LOG.debug("Child starting"); + + JobConf defaultConf = new JobConf(); + String host = args[0]; + int port = Integer.parseInt(args[1]); + InetSocketAddress address = new InetSocketAddress(host, port); + final TaskAttemptID firstTaskid = TaskAttemptID.forName(args[2]); + final int SLEEP_LONGER_COUNT = 5; + int jvmIdInt = Integer.parseInt(args[3]); + JVMId jvmId = new JVMId(firstTaskid.getJobID(),firstTaskid.isMap(),jvmIdInt); + TaskUmbilicalProtocol umbilical = + (TaskUmbilicalProtocol)RPC.getProxy(TaskUmbilicalProtocol.class, + TaskUmbilicalProtocol.versionID, + address, + defaultConf); + int numTasksToExecute = -1; //-1 signifies "no limit" + int numTasksExecuted = 0; + Runtime.getRuntime().addShutdownHook(new Thread() { + public void run() { + try { + if (taskid != null) { + TaskLog.syncLogs(firstTaskid, taskid, isCleanup); + } + } catch (Throwable throwable) { + } + } + }); + Thread t = new Thread() { + public void run() { + //every so often wake up and syncLogs so that we can track + //logs of the currently running task + while (true) { + try { + Thread.sleep(5000); + if (taskid != null) { + TaskLog.syncLogs(firstTaskid, taskid, isCleanup); + } + } catch (InterruptedException ie) { + } catch (IOException iee) { + LOG.error("Error in syncLogs: " + iee); + System.exit(-1); + } + } + } + }; + t.setName("Thread for syncLogs"); + t.setDaemon(true); + t.start(); + + String pid = ""; + if (!Shell.WINDOWS) { + pid = System.getenv().get("JVM_PID"); + } + JvmContext context = new JvmContext(jvmId, pid); + int idleLoopCount = 0; + Task task = null; + try { + while (true) { + taskid = null; + JvmTask myTask = umbilical.getTask(context); + if (myTask.shouldDie()) { + break; + } else { + if (myTask.getTask() == null) { + taskid = null; + if (++idleLoopCount >= SLEEP_LONGER_COUNT) { + //we sleep for a bigger interval when we don't receive + //tasks for a while + Thread.sleep(1500); + } else { + Thread.sleep(500); + } + continue; + } + } + idleLoopCount = 0; + task = myTask.getTask(); + taskid = task.getTaskID(); + isCleanup = task.isTaskCleanupTask(); + // reset the statistics for the task + FileSystem.clearStatistics(); + + //create the index file so that the log files + //are viewable immediately + TaskLog.syncLogs(firstTaskid, taskid, isCleanup); + JobConf job = new JobConf(task.getJobFile()); + //setupWorkDir actually sets up the symlinks for the distributed + //cache. After a task exits we wipe the workdir clean, and hence + //the symlinks have to be rebuilt. + TaskRunner.setupWorkDir(job); + + numTasksToExecute = job.getNumTasksToExecutePerJvm(); + assert(numTasksToExecute != 0); + TaskLog.cleanup(job.getInt("mapred.userlog.retain.hours", 24)); + + task.setConf(job); + + defaultConf.addResource(new Path(task.getJobFile())); + + // Initiate Java VM metrics + JvmMetrics.init(task.getPhase().toString(), job.getSessionId()); + // use job-specified working directory + FileSystem.get(job).setWorkingDirectory(job.getWorkingDirectory()); + try { + task.run(job, umbilical); // run the task + } finally { + TaskLog.syncLogs(firstTaskid, taskid, isCleanup); + } + if (numTasksToExecute > 0 && ++numTasksExecuted == numTasksToExecute) { + break; + } + } + } catch (FSError e) { + LOG.fatal("FSError from child", e); + umbilical.fsError(taskid, e.getMessage()); + } catch (Exception exception) { + LOG.warn("Error running child", exception); + try { + if (task != null) { + // do cleanup for the task + task.taskCleanup(umbilical); + } + } catch (Exception e) { + LOG.info("Error cleaning up" + e); + } + // Report back any failures, for diagnostic purposes + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + exception.printStackTrace(new PrintStream(baos)); + if (taskid != null) { + umbilical.reportDiagnosticInfo(taskid, baos.toString()); + } + } catch (Throwable throwable) { + LOG.fatal("Error running child : " + + StringUtils.stringifyException(throwable)); + if (taskid != null) { + Throwable tCause = throwable.getCause(); + String cause = tCause == null + ? throwable.getMessage() + : StringUtils.stringifyException(tCause); + umbilical.fatalError(taskid, cause); + } + } finally { + RPC.stopProxy(umbilical); + MetricsContext metricsContext = MetricsUtil.getContext("mapred"); + metricsContext.close(); + // Shutting down log4j of the child-vm... + // This assumes that on return from Task.run() + // there is no more logging done. + LogManager.shutdown(); + } + } +} diff --git a/src/mapred/org/apache/hadoop/mapred/CleanupQueue.java b/src/mapred/org/apache/hadoop/mapred/CleanupQueue.java new file mode 100644 index 0000000..42da577 --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/CleanupQueue.java @@ -0,0 +1,141 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapred; + +import java.io.IOException; +import java.util.concurrent.LinkedBlockingQueue; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; + +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; + +class CleanupQueue { + + public static final Log LOG = + LogFactory.getLog(CleanupQueue.class); + + private static PathCleanupThread cleanupThread; + + /** + * Create a singleton path-clean-up queue. It can be used to delete + * paths(directories/files) in a separate thread. This constructor creates a + * clean-up thread and also starts it as a daemon. Callers can instantiate one + * CleanupQueue per JVM and can use it for deleting paths. Use + * {@link CleanupQueue#addToQueue(PathDeletionContext...)} to add paths for + * deletion. + */ + public CleanupQueue() { + synchronized (PathCleanupThread.class) { + if (cleanupThread == null) { + cleanupThread = new PathCleanupThread(); + } + } + } + + /** + * Contains info related to the path of the file/dir to be deleted + */ + static class PathDeletionContext { + String fullPath;// full path of file or dir + FileSystem fs; + + public PathDeletionContext(FileSystem fs, String fullPath) { + this.fs = fs; + this.fullPath = fullPath; + } + + protected String getPathForCleanup() { + return fullPath; + } + + /** + * Makes the path(and its subdirectories recursively) fully deletable + */ + protected void enablePathForCleanup() throws IOException { + // do nothing + } + } + + /** + * Adds the paths to the queue of paths to be deleted by cleanupThread. + */ + void addToQueue(PathDeletionContext... contexts) { + cleanupThread.addToQueue(contexts); + } + + protected static boolean deletePath(PathDeletionContext context) + throws IOException { + context.enablePathForCleanup(); + + if (LOG.isDebugEnabled()) { + LOG.debug("Trying to delete " + context.fullPath); + } + if (context.fs.exists(new Path(context.fullPath))) { + return context.fs.delete(new Path(context.fullPath), true); + } + return true; + } + + private static class PathCleanupThread extends Thread { + + // cleanup queue which deletes files/directories of the paths queued up. + private LinkedBlockingQueue queue = + new LinkedBlockingQueue(); + + public PathCleanupThread() { + setName("Directory/File cleanup thread"); + setDaemon(true); + start(); + } + + void addToQueue(PathDeletionContext[] contexts) { + for (PathDeletionContext context : contexts) { + try { + queue.put(context); + } catch(InterruptedException ie) {} + } + } + + public void run() { + if (LOG.isDebugEnabled()) { + LOG.debug(getName() + " started."); + } + PathDeletionContext context = null; + while (true) { + try { + context = queue.take(); + // delete the path. + if (!deletePath(context)) { + LOG.warn("CleanupThread:Unable to delete path " + context.fullPath); + } + else if (LOG.isDebugEnabled()) { + LOG.debug("DELETED " + context.fullPath); + } + } catch (InterruptedException t) { + LOG.warn("Interrupted deletion of " + context.fullPath); + return; + } catch (Exception e) { + LOG.warn("Error deleting path " + context.fullPath + ": " + e); + } + } + } + } +} diff --git a/src/mapred/org/apache/hadoop/mapred/Clock.java b/src/mapred/org/apache/hadoop/mapred/Clock.java new file mode 100644 index 0000000..edede0b --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/Clock.java @@ -0,0 +1,28 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapred; + +/** + * A clock class - can be mocked out for testing. + */ +class Clock { + long getTime() { + return System.currentTimeMillis(); + } +} \ No newline at end of file diff --git a/src/mapred/org/apache/hadoop/mapred/ClusterStatus.java b/src/mapred/org/apache/hadoop/mapred/ClusterStatus.java new file mode 100644 index 0000000..6f51841 --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/ClusterStatus.java @@ -0,0 +1,488 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapred; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collection; +import java.lang.management.ManagementFactory; +import java.lang.management.MemoryMXBean; +import java.lang.management.MemoryUsage; + + +import java.util.HashMap; +import java.util.Map; +import org.apache.hadoop.io.Text; +import org.apache.hadoop.io.Writable; +import org.apache.hadoop.io.WritableUtils; +import org.apache.hadoop.mapreduce.TaskType; + +/** + * Status information on the current state of the Map-Reduce cluster. + * + *

ClusterStatus provides clients with information such as: + *

    + *
  1. + * Size of the cluster. + *
  2. + *
  3. + * Name of the trackers. + *
  4. + *
  5. + * Task capacity of the cluster. + *
  6. + *
  7. + * The number of currently running map & reduce tasks. + *
  8. + *
  9. + * State of the JobTracker. + *
  10. + *

+ * + *

Clients can query for the latest ClusterStatus, via + * {@link JobClient#getClusterStatus()}.

+ * + * @see JobClient + */ +public class ClusterStatus implements Writable { + + private int numActiveTrackers; + private Collection activeTrackers = new ArrayList(); + private Collection blacklistedTrackers = new ArrayList(); + private int numBlacklistedTrackers; + private int numExcludedNodes; + private long ttExpiryInterval; + private int map_tasks; + private int reduce_tasks; + private int max_map_tasks; + private int max_reduce_tasks; + private int total_map_tasks; + private int total_reduce_tasks; + private JobTracker.State state; + private long used_memory; + private long max_memory; + private Collection taskTrackersDetails = + new ArrayList(); + // This is the map between task tracker name and the list of all tasks + // that belong to currently running jobs and are/were executed + // on this tasktracker + private Map> taskTrackerExtendedTasks = + new HashMap>(); + + ClusterStatus() {} + + /** + * Construct a new cluster status. + * + * @param trackers no. of tasktrackers in the cluster + * @param maps no. of currently running map-tasks in the cluster + * @param reduces no. of currently running reduce-tasks in the cluster + * @param maxMaps the maximum no. of map tasks in the cluster + * @param maxReduces the maximum no. of reduce tasks in the cluster + * @param state the {@link JobTracker.State} of the JobTracker + * @deprecated + */ + @Deprecated + ClusterStatus(int trackers, int maps, int reduces, int maxMaps, + int maxReduces, JobTracker.State state) { + this(trackers, 0, JobTracker.TASKTRACKER_EXPIRY_INTERVAL, maps, reduces, + maxMaps, maxReduces, state); + } + + /** + * Construct a new cluster status. + * + * @param trackers no. of tasktrackers in the cluster + * @param blacklists no of blacklisted task trackers in the cluster + * @param ttExpiryInterval the tasktracker expiry interval + * @param maps no. of currently running map-tasks in the cluster + * @param reduces no. of currently running reduce-tasks in the cluster + * @param maxMaps the maximum no. of map tasks in the cluster + * @param maxReduces the maximum no. of reduce tasks in the cluster + * @param state the {@link JobTracker.State} of the JobTracker + */ + ClusterStatus(int trackers, int blacklists, long ttExpiryInterval, + int maps, int reduces, + int maxMaps, int maxReduces, JobTracker.State state) { + this(trackers, blacklists, ttExpiryInterval, maps, reduces, maxMaps, + maxReduces, state, 0); + } + + /** + * @param numDecommissionedNodes number of decommission trackers + */ + ClusterStatus(int trackers, int blacklists, long ttExpiryInterval, + int maps, int reduces, int maxMaps, int maxReduces, + JobTracker.State state, int numDecommissionedNodes) { + numActiveTrackers = trackers; + numBlacklistedTrackers = blacklists; + this.numExcludedNodes = numDecommissionedNodes; + this.ttExpiryInterval = ttExpiryInterval; + map_tasks = maps; + reduce_tasks = reduces; + max_map_tasks = maxMaps; + max_reduce_tasks = maxReduces; + this.state = state; + + MemoryMXBean memoryMXBean = ManagementFactory.getMemoryMXBean(); + MemoryUsage status = memoryMXBean.getHeapMemoryUsage(); + used_memory = status.getUsed(); + max_memory = status.getMax(); + } + + /** + * Construct a new cluster status. + * + * @param activeTrackers active tasktrackers in the cluster + * @param blacklistedTrackers blacklisted tasktrackers in the cluster + * @param ttExpiryInterval the tasktracker expiry interval + * @param maps no. of currently running map-tasks in the cluster + * @param reduces no. of currently running reduce-tasks in the cluster + * @param maxMaps the maximum no. of map tasks in the cluster + * @param maxReduces the maximum no. of reduce tasks in the cluster + * @param state the {@link JobTracker.State} of the JobTracker + */ + ClusterStatus(Collection activeTrackers, + Collection blacklistedTrackers, + long ttExpiryInterval, + int maps, int reduces, int maxMaps, int maxReduces, + JobTracker.State state) { + this(activeTrackers, blacklistedTrackers, ttExpiryInterval, maps, reduces, + maxMaps, maxReduces, state, 0); + } + + /** + * @param numDecommissionNodes number of decommission trackers + */ + ClusterStatus(Collection activeTrackers, + Collection blacklistedTrackers, long ttExpiryInterval, + int maps, int reduces, int maxMaps, int maxReduces, + JobTracker.State state, int numDecommissionNodes) { + this(activeTrackers.size(), blacklistedTrackers.size(), ttExpiryInterval, + maps, reduces, maxMaps, maxReduces, state, numDecommissionNodes); + this.activeTrackers = activeTrackers; + this.blacklistedTrackers = blacklistedTrackers; + } + + + /** + * Get the number of task trackers in the cluster. + * + * @return the number of task trackers in the cluster. + */ + public int getTaskTrackers() { + return numActiveTrackers; + } + + /** + * Get the names of task trackers in the cluster. + * + * @return the active task trackers in the cluster. + */ + public Collection getActiveTrackerNames() { + return activeTrackers; + } + + /** + * Get the names of task trackers in the cluster. + * + * @return the blacklisted task trackers in the cluster. + */ + public Collection getBlacklistedTrackerNames() { + return blacklistedTrackers; + } + + /** + * Get the number of blacklisted task trackers in the cluster. + * + * @return the number of blacklisted task trackers in the cluster. + */ + public int getBlacklistedTrackers() { + return numBlacklistedTrackers; + } + + /** + * Get the number of excluded hosts in the cluster. + * @return the number of excluded hosts in the cluster. + */ + public int getNumExcludedNodes() { + return numExcludedNodes; + } + + /** + * Get the tasktracker expiry interval for the cluster + * @return the expiry interval in msec + */ + public long getTTExpiryInterval() { + return ttExpiryInterval; + } + + /** + * Get the number of currently running map tasks in the cluster. + * + * @return the number of currently running map tasks in the cluster. + */ + public int getMapTasks() { + return map_tasks; + } + + /** + * Get the number of currently running reduce tasks in the cluster. + * + * @return the number of currently running reduce tasks in the cluster. + */ + public int getReduceTasks() { + return reduce_tasks; + } + + + public int getTotalMapTasks() { + return total_map_tasks; + } + + public int getTotalReduceTasks() { + return total_reduce_tasks; + } + + /** + * Get the maximum capacity for running map tasks in the cluster. + * + * @return the maximum capacity for running map tasks in the cluster. + */ + public int getMaxMapTasks() { + return max_map_tasks; + } + + /** + * Get the maximum capacity for running reduce tasks in the cluster. + * + * @return the maximum capacity for running reduce tasks in the cluster. + */ + public int getMaxReduceTasks() { + return max_reduce_tasks; + } + + /** + * Get the current state of the JobTracker, + * as {@link JobTracker.State} + * + * @return the current state of the JobTracker. + */ + public JobTracker.State getJobTrackerState() { + return state; + } + + /** + * Get the total heap memory used by the JobTracker + * + * @return the size of heap memory used by the JobTracker + */ + public long getUsedMemory() { + return used_memory; + } + + ClusterStatus(Collection activeTrackers, + Collection blackListedTrackerInfo, + Collection taskTrackersInfo, + Collection runningJobs, + long ttExpiryInterval, + int maps, int reduces, int maxMaps, int maxReduces, + JobTracker.State state, int numDecommissionNodes) { + this(activeTrackers, blackListedTrackerInfo, ttExpiryInterval, + maps, reduces, maxMaps, maxReduces, state, numDecommissionNodes); + this.taskTrackersDetails = taskTrackersInfo; + initTrackersToTasksMap(runningJobs); + } + + + /** + * Get the TaskTrackerStatus for each task tracker in the cluster + * + * @return the collection of all task tracker statuses + */ + public Collection getTaskTrackersDetails() { + return taskTrackersDetails; + } + + /** + * Goes through the list of TaskStatus objects for each of the running jobs + * on the cluster and associates them with the name of the task tracker + * they are or were running on. + */ + private void initTrackersToTasksMap(Collection jobsInProgress) { + for (TaskTrackerStatus tracker : taskTrackersDetails) { + taskTrackerExtendedTasks.put(tracker.getTrackerName(), + new ArrayList()); + } + for (JobInProgress job : jobsInProgress) { + total_map_tasks += job.getTasks(TaskType.MAP).length; + total_reduce_tasks += job.getTasks(TaskType.REDUCE).length; + for (TaskInProgress task : job.getTasks(TaskType.REDUCE)) { + + TaskStatus[] taskStatuses = task.getTaskStatuses(); + for (TaskStatus status : taskStatuses) { + Collection trackerTasks = + taskTrackerExtendedTasks.get(status.getTaskTracker()); + if (trackerTasks == null) { + trackerTasks = new ArrayList(); + taskTrackerExtendedTasks.put(status.getTaskTracker(), + trackerTasks); + } + trackerTasks.add(status); + } + } + for (TaskInProgress task : job.getTasks(TaskType.MAP)) { + TaskStatus[] taskStatuses = task.getTaskStatuses(); + for (TaskStatus status : taskStatuses) { + Collection trackerTasks = + taskTrackerExtendedTasks.get(status.getTaskTracker()); + if (trackerTasks == null) { + trackerTasks = new ArrayList(); + taskTrackerExtendedTasks.put(status.getTaskTracker(), + trackerTasks); + } + trackerTasks.add(status); + } + } + } + } + + /** + * Get the collection of TaskStatus for all tasks that are running on this + * task tracker or have run on this task tracker and are part of the still + * running job. + * + * @param ttName TaskTracker name + * @return Collection of TaskStatus objects + */ + public Collection getTaskTrackerTasksStatuses(String ttName) { + return taskTrackerExtendedTasks.get(ttName); + } + + /** + * Get the maximum configured heap memory that can be used by the JobTracker + * + * @return the configured size of max heap memory that can be used by the JobTracker + */ + public long getMaxMemory() { + return max_memory; + } + + public void write(DataOutput out) throws IOException { + if (activeTrackers.size() == 0) { + out.writeInt(numActiveTrackers); + out.writeInt(0); + } else { + out.writeInt(activeTrackers.size()); + out.writeInt(activeTrackers.size()); + for (String tracker : activeTrackers) { + Text.writeString(out, tracker); + } + } + if (blacklistedTrackers.size() == 0) { + out.writeInt(numBlacklistedTrackers); + out.writeInt(0); + } else { + out.writeInt(blacklistedTrackers.size()); + out.writeInt(blacklistedTrackers.size()); + for (String tracker : blacklistedTrackers) { + Text.writeString(out, tracker); + } + } + out.writeInt(numExcludedNodes); + out.writeLong(ttExpiryInterval); + out.writeInt(map_tasks); + out.writeInt(reduce_tasks); + out.writeInt(max_map_tasks); + out.writeInt(max_reduce_tasks); + out.writeLong(used_memory); + out.writeLong(max_memory); + WritableUtils.writeEnum(out, state); + out.writeInt(total_map_tasks); + out.writeInt(total_reduce_tasks); + out.writeInt(taskTrackersDetails.size()); + for (TaskTrackerStatus status : taskTrackersDetails) { + status.write(out); + } + + out.writeInt(taskTrackerExtendedTasks.size()); + for (Map.Entry> trackerTasks : + taskTrackerExtendedTasks.entrySet()) { + Text.writeString(out, trackerTasks.getKey()); + + Collection tasks = trackerTasks.getValue(); + out.writeInt(tasks.size()); + + for (TaskStatus task : tasks) { + TaskStatus.writeTaskStatus(out, task); + } + } + } + + public void readFields(DataInput in) throws IOException { + numActiveTrackers = in.readInt(); + int numTrackerNames = in.readInt(); + if (numTrackerNames > 0) { + for (int i = 0; i < numTrackerNames; i++) { + String name = Text.readString(in); + activeTrackers.add(name); + } + } + numBlacklistedTrackers = in.readInt(); + numTrackerNames = in.readInt(); + if (numTrackerNames > 0) { + for (int i = 0; i < numTrackerNames; i++) { + String name = Text.readString(in); + blacklistedTrackers.add(name); + } + } + numExcludedNodes = in.readInt(); + ttExpiryInterval = in.readLong(); + map_tasks = in.readInt(); + reduce_tasks = in.readInt(); + max_map_tasks = in.readInt(); + max_reduce_tasks = in.readInt(); + used_memory = in.readLong(); + max_memory = in.readLong(); + state = WritableUtils.readEnum(in, JobTracker.State.class); + + total_map_tasks = in.readInt(); + total_reduce_tasks = in.readInt(); + int taskTrackers = in.readInt(); + for (int i = 0; i < taskTrackers; i++) { + TaskTrackerStatus status = new TaskTrackerStatus(); + status.readFields(in); + taskTrackersDetails.add(status); + } + int mapSize = in.readInt(); + for (int i = 0; i < mapSize; i++) { + String trackerName = Text.readString(in); + int numTasks = in.readInt(); + Collection tasks = new ArrayList(numTasks); + for (int j = 0; j < numTasks; j++) { + TaskStatus status = TaskStatus.readTaskStatus(in); + tasks.add(status); + } + + taskTrackerExtendedTasks.put(trackerName, tasks); + } + } +} diff --git a/src/mapred/org/apache/hadoop/mapred/CommitTaskAction.java b/src/mapred/org/apache/hadoop/mapred/CommitTaskAction.java new file mode 100644 index 0000000..8ab46df --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/CommitTaskAction.java @@ -0,0 +1,54 @@ +/* Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapred; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; + +/** + * Represents a directive from the {@link org.apache.hadoop.mapred.JobTracker} + * to the {@link org.apache.hadoop.mapred.TaskTracker} to commit the output + * of the task. + * + */ +class CommitTaskAction extends TaskTrackerAction { + private TaskAttemptID taskId; + + public CommitTaskAction() { + super(ActionType.COMMIT_TASK); + taskId = new TaskAttemptID(); + } + + public CommitTaskAction(TaskAttemptID taskId) { + super(ActionType.COMMIT_TASK); + this.taskId = taskId; + } + + public TaskAttemptID getTaskID() { + return taskId; + } + + public void write(DataOutput out) throws IOException { + taskId.write(out); + } + + public void readFields(DataInput in) throws IOException { + taskId.readFields(in); + } +} diff --git a/src/mapred/org/apache/hadoop/mapred/CompletedJobStatusStore.java b/src/mapred/org/apache/hadoop/mapred/CompletedJobStatusStore.java new file mode 100644 index 0000000..fa5e060 --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/CompletedJobStatusStore.java @@ -0,0 +1,327 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.mapred; + +import java.io.IOException; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FSDataInputStream; +import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; + +/** + * Persists and retrieves the Job info of a job into/from DFS. + *

+ * If the retain time is zero jobs are not persisted. + *

+ * A daemon thread cleans up job info files older than the retain time + *

+ * The retain time can be set with the 'persist.jobstatus.hours' + * configuration variable (it is in hours). + */ +class CompletedJobStatusStore implements Runnable { + private boolean active; + private String jobInfoDir; + private long retainTime; + private FileSystem fs; + private static final String JOB_INFO_STORE_DIR = "/jobtracker/jobsInfo"; + + public static final Log LOG = + LogFactory.getLog(CompletedJobStatusStore.class); + + private static long HOUR = 1000 * 60 * 60; + private static long SLEEP_TIME = 1 * HOUR; + + CompletedJobStatusStore(Configuration conf) throws IOException { + active = + conf.getBoolean("mapred.job.tracker.persist.jobstatus.active", false); + + if (active) { + retainTime = + conf.getInt("mapred.job.tracker.persist.jobstatus.hours", 0) * HOUR; + + jobInfoDir = + conf.get("mapred.job.tracker.persist.jobstatus.dir", JOB_INFO_STORE_DIR); + + Path path = new Path(jobInfoDir); + + // set the fs + this.fs = path.getFileSystem(conf); + if (!fs.exists(path)) { + fs.mkdirs(path); + } + + if (retainTime == 0) { + // as retain time is zero, all stored jobstatuses are deleted. + deleteJobStatusDirs(); + } + LOG.info("Completed job store activated/configured with retain-time : " + + retainTime + " , job-info-dir : " + jobInfoDir); + } else { + LOG.info("Completed job store is inactive"); + } + } + + /** + * Indicates if job status persistency is active or not. + * + * @return TRUE if active, FALSE otherwise. + */ + public boolean isActive() { + return active; + } + + public void run() { + if (retainTime > 0) { + while (true) { + deleteJobStatusDirs(); + try { + Thread.sleep(SLEEP_TIME); + } + catch (InterruptedException ex) { + break; + } + } + } + } + + private void deleteJobStatusDirs() { + try { + long currentTime = System.currentTimeMillis(); + FileStatus[] jobInfoFiles = fs.listStatus( + new Path[]{new Path(jobInfoDir)}); + + //noinspection ForLoopReplaceableByForEach + for (FileStatus jobInfo : jobInfoFiles) { + try { + if ((currentTime - jobInfo.getModificationTime()) > retainTime) { + fs.delete(jobInfo.getPath(), true); + } + } + catch (IOException ie) { + LOG.warn("Could not do housekeeping for [ " + + jobInfo.getPath() + "] job info : " + ie.getMessage(), ie); + } + } + } + catch (IOException ie) { + LOG.warn("Could not obtain job info files : " + ie.getMessage(), ie); + } + } + + private Path getInfoFilePath(JobID jobId) { + return new Path(jobInfoDir, jobId + ".info"); + } + + /** + * Persists a job in DFS. + * + * @param job the job about to be 'retired' + */ + public void store(JobInProgress job) { + if (active && retainTime > 0) { + JobID jobId = job.getStatus().getJobID(); + Path jobStatusFile = getInfoFilePath(jobId); + try { + FSDataOutputStream dataOut = fs.create(jobStatusFile); + + job.getStatus().write(dataOut); + + job.getProfile().write(dataOut); + + job.getCounters().write(dataOut); + + TaskCompletionEvent[] events = + job.getTaskCompletionEvents(0, Integer.MAX_VALUE); + dataOut.writeInt(events.length); + for (TaskCompletionEvent event : events) { + event.write(dataOut); + } + + dataOut.close(); + } catch (IOException ex) { + LOG.warn("Could not store [" + jobId + "] job info : " + + ex.getMessage(), ex); + try { + fs.delete(jobStatusFile, true); + } + catch (IOException ex1) { + //ignore + } + } + } + } + + private FSDataInputStream getJobInfoFile(JobID jobId) throws IOException { + Path jobStatusFile = getInfoFilePath(jobId); + return (fs.exists(jobStatusFile)) ? fs.open(jobStatusFile) : null; + } + + private JobStatus readJobStatus(FSDataInputStream dataIn) throws IOException { + JobStatus jobStatus = new JobStatus(); + jobStatus.readFields(dataIn); + return jobStatus; + } + + private JobProfile readJobProfile(FSDataInputStream dataIn) + throws IOException { + JobProfile jobProfile = new JobProfile(); + jobProfile.readFields(dataIn); + return jobProfile; + } + + private Counters readCounters(FSDataInputStream dataIn) throws IOException { + Counters counters = new Counters(); + counters.readFields(dataIn); + return counters; + } + + private TaskCompletionEvent[] readEvents(FSDataInputStream dataIn, + int offset, int len) + throws IOException { + int size = dataIn.readInt(); + if (offset > size) { + return TaskCompletionEvent.EMPTY_ARRAY; + } + if (offset + len > size) { + len = size - offset; + } + TaskCompletionEvent[] events = new TaskCompletionEvent[len]; + for (int i = 0; i < (offset + len); i++) { + TaskCompletionEvent event = new TaskCompletionEvent(); + event.readFields(dataIn); + if (i >= offset) { + events[i - offset] = event; + } + } + return events; + } + + /** + * This method retrieves JobStatus information from DFS stored using + * store method. + * + * @param jobId the jobId for which jobStatus is queried + * @return JobStatus object, null if not able to retrieve + */ + public JobStatus readJobStatus(JobID jobId) { + JobStatus jobStatus = null; + + if (null == jobId) { + LOG.warn("Could not read job status for null jobId"); + return null; + } + + if (active) { + try { + FSDataInputStream dataIn = getJobInfoFile(jobId); + if (dataIn != null) { + jobStatus = readJobStatus(dataIn); + dataIn.close(); + } + } catch (IOException ex) { + LOG.warn("Could not read [" + jobId + "] job status : " + ex, ex); + } + } + return jobStatus; + } + + /** + * This method retrieves JobProfile information from DFS stored using + * store method. + * + * @param jobId the jobId for which jobProfile is queried + * @return JobProfile object, null if not able to retrieve + */ + public JobProfile readJobProfile(JobID jobId) { + JobProfile jobProfile = null; + if (active) { + try { + FSDataInputStream dataIn = getJobInfoFile(jobId); + if (dataIn != null) { + readJobStatus(dataIn); + jobProfile = readJobProfile(dataIn); + dataIn.close(); + } + } catch (IOException ex) { + LOG.warn("Could not read [" + jobId + "] job profile : " + ex, ex); + } + } + return jobProfile; + } + + /** + * This method retrieves Counters information from DFS stored using + * store method. + * + * @param jobId the jobId for which Counters is queried + * @return Counters object, null if not able to retrieve + */ + public Counters readCounters(JobID jobId) { + Counters counters = null; + if (active) { + try { + FSDataInputStream dataIn = getJobInfoFile(jobId); + if (dataIn != null) { + readJobStatus(dataIn); + readJobProfile(dataIn); + counters = readCounters(dataIn); + dataIn.close(); + } + } catch (IOException ex) { + LOG.warn("Could not read [" + jobId + "] job counters : " + ex, ex); + } + } + return counters; + } + + /** + * This method retrieves TaskCompletionEvents information from DFS stored + * using store method. + * + * @param jobId the jobId for which TaskCompletionEvents is queried + * @param fromEventId events offset + * @param maxEvents max number of events + * @return TaskCompletionEvent[], empty array if not able to retrieve + */ + public TaskCompletionEvent[] readJobTaskCompletionEvents(JobID jobId, + int fromEventId, + int maxEvents) { + TaskCompletionEvent[] events = TaskCompletionEvent.EMPTY_ARRAY; + if (active) { + try { + FSDataInputStream dataIn = getJobInfoFile(jobId); + if (dataIn != null) { + readJobStatus(dataIn); + readJobProfile(dataIn); + readCounters(dataIn); + events = readEvents(dataIn, fromEventId, maxEvents); + dataIn.close(); + } + } catch (IOException ex) { + LOG.warn("Could not read [" + jobId + "] job events : " + ex, ex); + } + } + return events; + } + +} diff --git a/src/mapred/org/apache/hadoop/mapred/CompositeTaskTrackerInstrumentation.java b/src/mapred/org/apache/hadoop/mapred/CompositeTaskTrackerInstrumentation.java new file mode 100644 index 0000000..b994472 --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/CompositeTaskTrackerInstrumentation.java @@ -0,0 +1,85 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapred; + +import java.io.File; +import java.util.List; + +/** + * This TaskTrackerInstrumentation subclass forwards all the events it + * receives to a list of instrumentation objects, and can thus be used to + * attack multiple instrumentation objects to a TaskTracker. + */ +class CompositeTaskTrackerInstrumentation extends TaskTrackerInstrumentation { + + private List instrumentations; + + public CompositeTaskTrackerInstrumentation(TaskTracker tt, + List instrumentations) { + super(tt); + this.instrumentations = instrumentations; + } + + // Package-private getter methods for tests + List getInstrumentations() { + return instrumentations; + } + + @Override + public void completeTask(TaskAttemptID t) { + for (TaskTrackerInstrumentation tti: instrumentations) { + tti.completeTask(t); + } + } + + @Override + public void timedoutTask(TaskAttemptID t) { + for (TaskTrackerInstrumentation tti: instrumentations) { + tti.timedoutTask(t); + } + } + + @Override + public void taskFailedPing(TaskAttemptID t) { + for (TaskTrackerInstrumentation tti: instrumentations) { + tti.taskFailedPing(t); + } + } + + @Override + public void reportTaskLaunch(TaskAttemptID t, File stdout, File stderr) { + for (TaskTrackerInstrumentation tti: instrumentations) { + tti.reportTaskLaunch(t, stdout, stderr); + } + } + + @Override + public void reportTaskEnd(TaskAttemptID t) { + for (TaskTrackerInstrumentation tti: instrumentations) { + tti.reportTaskEnd(t); + } + } + + @Override + public void statusUpdate(Task task, TaskStatus taskStatus) { + for (TaskTrackerInstrumentation tti: instrumentations) { + tti.statusUpdate(task, taskStatus); + } + } +} diff --git a/src/mapred/org/apache/hadoop/mapred/Counters.java b/src/mapred/org/apache/hadoop/mapred/Counters.java new file mode 100644 index 0000000..52a4278 --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/Counters.java @@ -0,0 +1,708 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapred; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; +import java.text.ParseException; +import java.util.ArrayList; +import java.util.Collection; +import java.util.HashMap; +import java.util.IdentityHashMap; +import java.util.Iterator; +import java.util.Map; +import java.util.MissingResourceException; +import java.util.ResourceBundle; + +import org.apache.commons.logging.*; +import org.apache.hadoop.io.IntWritable; +import org.apache.hadoop.io.Text; +import org.apache.hadoop.io.Writable; +import org.apache.hadoop.io.WritableUtils; +import org.apache.hadoop.util.StringUtils; + +/** + * A set of named counters. + * + *

Counters represent global counters, defined either by the + * Map-Reduce framework or applications. Each Counter can be of + * any {@link Enum} type.

+ * + *

Counters are bunched into {@link Group}s, each comprising of + * counters from a particular Enum class. + * @deprecated Use {@link org.apache.hadoop.mapreduce.Counters} instead. + */ +@Deprecated +public class Counters implements Writable, Iterable { + private static final Log LOG = LogFactory.getLog(Counters.class); + private static final char GROUP_OPEN = '{'; + private static final char GROUP_CLOSE = '}'; + private static final char COUNTER_OPEN = '['; + private static final char COUNTER_CLOSE = ']'; + private static final char UNIT_OPEN = '('; + private static final char UNIT_CLOSE = ')'; + private static char[] charsToEscape = {GROUP_OPEN, GROUP_CLOSE, + COUNTER_OPEN, COUNTER_CLOSE, + UNIT_OPEN, UNIT_CLOSE}; + + //private static Log log = LogFactory.getLog("Counters.class"); + + /** + * A counter record, comprising its name and value. + */ + public static class Counter extends org.apache.hadoop.mapreduce.Counter { + + Counter() { + } + + Counter(String name, String displayName, long value) { + super(name, displayName); + increment(value); + } + + public void setDisplayName(String newName) { + super.setDisplayName(newName); + } + + /** + * Returns the compact stringified version of the counter in the format + * [(actual-name)(display-name)(value)] + */ + public synchronized String makeEscapedCompactString() { + StringBuffer buf = new StringBuffer(); + buf.append(COUNTER_OPEN); + + // Add the counter name + buf.append(UNIT_OPEN); + buf.append(escape(getName())); + buf.append(UNIT_CLOSE); + + // Add the display name + buf.append(UNIT_OPEN); + buf.append(escape(getDisplayName())); + buf.append(UNIT_CLOSE); + + // Add the value + buf.append(UNIT_OPEN); + buf.append(this.getValue()); + buf.append(UNIT_CLOSE); + + buf.append(COUNTER_CLOSE); + + return buf.toString(); + } + + // Checks for (content) equality of two (basic) counters + @Deprecated + synchronized boolean contentEquals(Counter c) { + return this.equals(c); + } + + /** + * What is the current value of this counter? + * @return the current value + */ + public synchronized long getCounter() { + return getValue(); + } + + } + + /** + * Group of counters, comprising of counters from a particular + * counter {@link Enum} class. + * + *

Grouphandles localization of the class name and the + * counter names.

+ */ + public static class Group implements Writable, Iterable { + private String groupName; + private String displayName; + private Map subcounters = new HashMap(); + + // Optional ResourceBundle for localization of group and counter names. + private ResourceBundle bundle = null; + + Group(String groupName) { + try { + bundle = getResourceBundle(groupName); + } + catch (MissingResourceException neverMind) { + } + this.groupName = groupName; + this.displayName = localize("CounterGroupName", groupName); + LOG.debug("Creating group " + groupName + " with " + + (bundle == null ? "nothing" : "bundle")); + } + + /** + * Returns the specified resource bundle, or throws an exception. + * @throws MissingResourceException if the bundle isn't found + */ + private static ResourceBundle getResourceBundle(String enumClassName) { + String bundleName = enumClassName.replace('$','_'); + return ResourceBundle.getBundle(bundleName); + } + + /** + * Returns raw name of the group. This is the name of the enum class + * for this group of counters. + */ + public String getName() { + return groupName; + } + + /** + * Returns localized name of the group. This is the same as getName() by + * default, but different if an appropriate ResourceBundle is found. + */ + public String getDisplayName() { + return displayName; + } + + /** + * Set the display name + */ + public void setDisplayName(String displayName) { + this.displayName = displayName; + } + + /** + * Returns the compact stringified version of the group in the format + * {(actual-name)(display-name)(value)[][][]} where [] are compact strings for the + * counters within. + */ + public String makeEscapedCompactString() { + StringBuffer buf = new StringBuffer(); + buf.append(GROUP_OPEN); // group start + + // Add the group name + buf.append(UNIT_OPEN); + buf.append(escape(getName())); + buf.append(UNIT_CLOSE); + + // Add the display name + buf.append(UNIT_OPEN); + buf.append(escape(getDisplayName())); + buf.append(UNIT_CLOSE); + + // write the value + for(Counter counter: subcounters.values()) { + buf.append(counter.makeEscapedCompactString()); + } + + buf.append(GROUP_CLOSE); // group end + return buf.toString(); + } + + @Override + public int hashCode() { + return subcounters.hashCode(); + } + + /** + * Checks for (content) equality of Groups + */ + @Override + public synchronized boolean equals(Object obj) { + boolean isEqual = false; + if (obj != null && obj instanceof Group) { + Group g = (Group) obj; + if (size() == g.size()) { + isEqual = true; + for (Map.Entry entry : subcounters.entrySet()) { + String key = entry.getKey(); + Counter c1 = entry.getValue(); + Counter c2 = g.getCounterForName(key); + if (!c1.contentEquals(c2)) { + isEqual = false; + break; + } + } + } + } + return isEqual; + } + + /** + * Returns the value of the specified counter, or 0 if the counter does + * not exist. + */ + public synchronized long getCounter(String counterName) { + for(Counter counter: subcounters.values()) { + if (counter != null && counter.getDisplayName().equals(counterName)) { + return counter.getValue(); + } + } + return 0L; + } + + /** + * Get the counter for the given id and create it if it doesn't exist. + * @param id the numeric id of the counter within the group + * @param name the internal counter name + * @return the counter + * @deprecated use {@link #getCounter(String)} instead + */ + @Deprecated + public synchronized Counter getCounter(int id, String name) { + return getCounterForName(name); + } + + /** + * Get the counter for the given name and create it if it doesn't exist. + * @param name the internal counter name + * @return the counter + */ + public synchronized Counter getCounterForName(String name) { + name = name.intern(); + Counter result = subcounters.get(name); + if (result == null) { + LOG.debug("Adding " + name); + result = new Counter(name, localize(name + ".name", name), 0L); + subcounters.put(name, result); + } + return result; + } + + /** + * Returns the number of counters in this group. + */ + public synchronized int size() { + return subcounters.size(); + } + + /** + * Looks up key in the ResourceBundle and returns the corresponding value. + * If the bundle or the key doesn't exist, returns the default value. + */ + private String localize(String key, String defaultValue) { + String result = defaultValue; + if (bundle != null) { + try { + result = bundle.getString(key); + } + catch (MissingResourceException mre) { + } + } + return result; + } + + public synchronized void write(DataOutput out) throws IOException { + Text.writeString(out, displayName); + WritableUtils.writeVInt(out, subcounters.size()); + for(Counter counter: subcounters.values()) { + counter.write(out); + } + } + + public synchronized void readFields(DataInput in) throws IOException { + displayName = Text.readString(in).intern(); + subcounters.clear(); + int size = WritableUtils.readVInt(in); + for(int i=0; i < size; i++) { + Counter counter = new Counter(); + counter.readFields(in); + subcounters.put(counter.getName(), counter); + } + } + + public synchronized Iterator iterator() { + return new ArrayList(subcounters.values()).iterator(); + } + } + + // Map from group name (enum class name) to map of int (enum ordinal) to + // counter record (name-value pair). + private Map counters = new HashMap(); + + /** + * A cache from enum values to the associated counter. Dramatically speeds up + * typical usage. + */ + private Map cache = new IdentityHashMap(); + + /** + * Returns the names of all counter classes. + * @return Set of counter names. + */ + public synchronized Collection getGroupNames() { + return counters.keySet(); + } + + public synchronized Iterator iterator() { + return counters.values().iterator(); + } + + /** + * Returns the named counter group, or an empty group if there is none + * with the specified name. + */ + public synchronized Group getGroup(String groupName) { + Group result = counters.get(groupName); + if (result == null) { + result = new Group(groupName); + counters.put(groupName, result); + } + return result; + } + + /** + * Find the counter for the given enum. The same enum will always return the + * same counter. + * @param key the counter key + * @return the matching counter object + */ + public synchronized Counter findCounter(Enum key) { + Counter counter = cache.get(key); + if (counter == null) { + Group group = getGroup(key.getDeclaringClass().getName()); + counter = group.getCounterForName(key.toString()); + cache.put(key, counter); + } + return counter; + } + + /** + * Find a counter given the group and the name. + * @param group the name of the group + * @param name the internal name of the counter + * @return the counter for that name + */ + public synchronized Counter findCounter(String group, String name) { + return getGroup(group).getCounterForName(name); + } + + /** + * Find a counter by using strings + * @param group the name of the group + * @param id the id of the counter within the group (0 to N-1) + * @param name the internal name of the counter + * @return the counter for that name + * @deprecated + */ + @Deprecated + public synchronized Counter findCounter(String group, int id, String name) { + return getGroup(group).getCounterForName(name); + } + + /** + * Increments the specified counter by the specified amount, creating it if + * it didn't already exist. + * @param key identifies a counter + * @param amount amount by which counter is to be incremented + */ + public synchronized void incrCounter(Enum key, long amount) { + findCounter(key).increment(amount); + } + + /** + * Increments the specified counter by the specified amount, creating it if + * it didn't already exist. + * @param group the name of the group + * @param counter the internal name of the counter + * @param amount amount by which counter is to be incremented + */ + public synchronized void incrCounter(String group, String counter, long amount) { + getGroup(group).getCounterForName(counter).increment(amount); + } + + /** + * Returns current value of the specified counter, or 0 if the counter + * does not exist. + */ + public synchronized long getCounter(Enum key) { + return findCounter(key).getValue(); + } + + /** + * Increments multiple counters by their amounts in another Counters + * instance. + * @param other the other Counters instance + */ + public synchronized void incrAllCounters(Counters other) { + for (Group otherGroup: other) { + Group group = getGroup(otherGroup.getName()); + group.displayName = otherGroup.displayName; + for (Counter otherCounter : otherGroup) { + Counter counter = group.getCounterForName(otherCounter.getName()); + counter.setDisplayName(otherCounter.getDisplayName()); + counter.increment(otherCounter.getValue()); + } + } + } + + /** + * Convenience method for computing the sum of two sets of counters. + */ + public static Counters sum(Counters a, Counters b) { + Counters counters = new Counters(); + counters.incrAllCounters(a); + counters.incrAllCounters(b); + return counters; + } + + /** + * Returns the total number of counters, by summing the number of counters + * in each group. + */ + public synchronized int size() { + int result = 0; + for (Group group : this) { + result += group.size(); + } + return result; + } + + /** + * Write the set of groups. + * The external format is: + * #groups (groupName group)* + * + * i.e. the number of groups followed by 0 or more groups, where each + * group is of the form: + * + * groupDisplayName #counters (false | true counter)* + * + * where each counter is of the form: + * + * name (false | true displayName) value + */ + public synchronized void write(DataOutput out) throws IOException { + out.writeInt(counters.size()); + for (Group group: counters.values()) { + Text.writeString(out, group.getName()); + group.write(out); + } + } + + /** + * Read a set of groups. + */ + public synchronized void readFields(DataInput in) throws IOException { + int numClasses = in.readInt(); + counters.clear(); + while (numClasses-- > 0) { + String groupName = Text.readString(in).intern(); + Group group = new Group(groupName); + group.readFields(in); + counters.put(groupName, group); + } + } + + /** + * Logs the current counter values. + * @param log The log to use. + */ + public void log(Log log) { + log.info("Counters: " + size()); + for(Group group: this) { + log.info(" " + group.getDisplayName()); + for (Counter counter: group) { + log.info(" " + counter.getDisplayName() + "=" + + counter.getCounter()); + } + } + } + + /** + * Return textual representation of the counter values. + */ + public synchronized String toString() { + StringBuilder sb = new StringBuilder("Counters: " + size()); + for (Group group: this) { + sb.append("\n\t" + group.getDisplayName()); + for (Counter counter: group) { + sb.append("\n\t\t" + counter.getDisplayName() + "=" + + counter.getCounter()); + } + } + return sb.toString(); + } + + /** + * Convert a counters object into a single line that is easy to parse. + * @return the string with "name=value" for each counter and separated by "," + */ + public synchronized String makeCompactString() { + StringBuffer buffer = new StringBuffer(); + boolean first = true; + for(Group group: this){ + for(Counter counter: group) { + if (first) { + first = false; + } else { + buffer.append(','); + } + buffer.append(group.getDisplayName()); + buffer.append('.'); + buffer.append(counter.getDisplayName()); + buffer.append(':'); + buffer.append(counter.getCounter()); + } + } + return buffer.toString(); + } + + /** + * Represent the counter in a textual format that can be converted back to + * its object form + * @return the string in the following format + * {(groupname)(group-displayname)[(countername)(displayname)(value)][][]}{}{} + */ + public synchronized String makeEscapedCompactString() { + StringBuffer buffer = new StringBuffer(); + for(Group group: this){ + buffer.append(group.makeEscapedCompactString()); + } + return buffer.toString(); + } + + // Extracts a block (data enclosed within delimeters) ignoring escape + // sequences. Throws ParseException if an incomplete block is found else + // returns null. + private static String getBlock(String str, char open, char close, + IntWritable index) throws ParseException { + StringBuilder split = new StringBuilder(); + int next = StringUtils.findNext(str, open, StringUtils.ESCAPE_CHAR, + index.get(), split); + split.setLength(0); // clear the buffer + if (next >= 0) { + ++next; // move over '(' + + next = StringUtils.findNext(str, close, StringUtils.ESCAPE_CHAR, + next, split); + if (next >= 0) { + ++next; // move over ')' + index.set(next); + return split.toString(); // found a block + } else { + throw new ParseException("Unexpected end of block", next); + } + } + return null; // found nothing + } + + /** + * Convert a stringified counter representation into a counter object. Note + * that the counter can be recovered if its stringified using + * {@link #makeEscapedCompactString()}. + * @return a Counter + */ + public static Counters fromEscapedCompactString(String compactString) + throws ParseException { + Counters counters = new Counters(); + IntWritable index = new IntWritable(0); + + // Get the group to work on + String groupString = + getBlock(compactString, GROUP_OPEN, GROUP_CLOSE, index); + + while (groupString != null) { + IntWritable groupIndex = new IntWritable(0); + + // Get the actual name + String groupName = + getBlock(groupString, UNIT_OPEN, UNIT_CLOSE, groupIndex); + groupName = unescape(groupName); + + // Get the display name + String groupDisplayName = + getBlock(groupString, UNIT_OPEN, UNIT_CLOSE, groupIndex); + groupDisplayName = unescape(groupDisplayName); + + // Get the counters + Group group = counters.getGroup(groupName); + group.setDisplayName(groupDisplayName); + + String counterString = + getBlock(groupString, COUNTER_OPEN, COUNTER_CLOSE, groupIndex); + + while (counterString != null) { + IntWritable counterIndex = new IntWritable(0); + + // Get the actual name + String counterName = + getBlock(counterString, UNIT_OPEN, UNIT_CLOSE, counterIndex); + counterName = unescape(counterName); + + // Get the display name + String counterDisplayName = + getBlock(counterString, UNIT_OPEN, UNIT_CLOSE, counterIndex); + counterDisplayName = unescape(counterDisplayName); + + // Get the value + long value = + Long.parseLong(getBlock(counterString, UNIT_OPEN, UNIT_CLOSE, + counterIndex)); + + // Add the counter + Counter counter = group.getCounterForName(counterName); + counter.setDisplayName(counterDisplayName); + counter.increment(value); + + // Get the next counter + counterString = + getBlock(groupString, COUNTER_OPEN, COUNTER_CLOSE, groupIndex); + } + + groupString = getBlock(compactString, GROUP_OPEN, GROUP_CLOSE, index); + } + return counters; + } + + // Escapes all the delimiters for counters i.e {,[,(,),],} + private static String escape(String string) { + return StringUtils.escapeString(string, StringUtils.ESCAPE_CHAR, + charsToEscape); + } + + // Unescapes all the delimiters for counters i.e {,[,(,),],} + private static String unescape(String string) { + return StringUtils.unEscapeString(string, StringUtils.ESCAPE_CHAR, + charsToEscape); + } + + @Override + public synchronized int hashCode() { + return counters.hashCode(); + } + + @Override + public synchronized boolean equals(Object obj) { + boolean isEqual = false; + if (obj != null && obj instanceof Counters) { + Counters other = (Counters) obj; + if (size() == other.size()) { + isEqual = true; + for (Map.Entry entry : this.counters.entrySet()) { + String key = entry.getKey(); + Group sourceGroup = entry.getValue(); + Group targetGroup = other.getGroup(key); + if (!sourceGroup.equals(targetGroup)) { + isEqual = false; + break; + } + } + } + } + return isEqual; + } +} diff --git a/src/mapred/org/apache/hadoop/mapred/DefaultJobHistoryParser.java b/src/mapred/org/apache/hadoop/mapred/DefaultJobHistoryParser.java new file mode 100644 index 0000000..a70c6f4 --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/DefaultJobHistoryParser.java @@ -0,0 +1,176 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapred; + +import java.util.*; +import java.io.*; + +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.mapred.JobHistory.Keys; +import org.apache.hadoop.mapred.JobHistory.Values; + +/** + * Default parser for job history files. It creates object model from + * job history file. + * + */ +public class DefaultJobHistoryParser { + + // This class is required to work around the Java compiler's lack of + // run-time information on generic classes. In particular, we need to be able + // to cast to this type without generating compiler warnings, which is only + // possible if it is a non-generic class. + + /** + * Populates a JobInfo object from the job's history log file. + * @param jobHistoryFile history file for this job. + * @param job a precreated JobInfo object, should be non-null. + * @param fs FileSystem where historyFile is present. + * @throws IOException + */ + public static void parseJobTasks(String jobHistoryFile, + JobHistory.JobInfo job, FileSystem fs) + throws IOException { + JobHistory.parseHistoryFromFS(jobHistoryFile, + new JobTasksParseListener(job), fs); + } + + /** + * Listener for Job's history log file, it populates JobHistory.JobInfo + * object with data from log file. + */ + static class JobTasksParseListener + implements JobHistory.Listener { + JobHistory.JobInfo job; + + JobTasksParseListener(JobHistory.JobInfo job) { + this.job = job; + } + + private JobHistory.Task getTask(String taskId) { + JobHistory.Task task = job.getAllTasks().get(taskId); + if (null == task) { + task = new JobHistory.Task(); + task.set(Keys.TASKID, taskId); + job.getAllTasks().put(taskId, task); + } + return task; + } + + private JobHistory.MapAttempt getMapAttempt( + String jobid, String jobTrackerId, String taskId, String taskAttemptId) { + + JobHistory.Task task = getTask(taskId); + JobHistory.MapAttempt mapAttempt = + (JobHistory.MapAttempt) task.getTaskAttempts().get(taskAttemptId); + if (null == mapAttempt) { + mapAttempt = new JobHistory.MapAttempt(); + mapAttempt.set(Keys.TASK_ATTEMPT_ID, taskAttemptId); + task.getTaskAttempts().put(taskAttemptId, mapAttempt); + } + return mapAttempt; + } + + private JobHistory.ReduceAttempt getReduceAttempt( + String jobid, String jobTrackerId, String taskId, String taskAttemptId) { + + JobHistory.Task task = getTask(taskId); + JobHistory.ReduceAttempt reduceAttempt = + (JobHistory.ReduceAttempt) task.getTaskAttempts().get(taskAttemptId); + if (null == reduceAttempt) { + reduceAttempt = new JobHistory.ReduceAttempt(); + reduceAttempt.set(Keys.TASK_ATTEMPT_ID, taskAttemptId); + task.getTaskAttempts().put(taskAttemptId, reduceAttempt); + } + return reduceAttempt; + } + + // JobHistory.Listener implementation + public void handle(JobHistory.RecordTypes recType, Map values) + throws IOException { + String jobTrackerId = values.get(JobHistory.Keys.JOBTRACKERID); + String jobid = values.get(Keys.JOBID); + + if (recType == JobHistory.RecordTypes.Job) { + job.handle(values); + }if (recType.equals(JobHistory.RecordTypes.Task)) { + String taskid = values.get(JobHistory.Keys.TASKID); + getTask(taskid).handle(values); + } else if (recType.equals(JobHistory.RecordTypes.MapAttempt)) { + String taskid = values.get(Keys.TASKID); + String mapAttemptId = values.get(Keys.TASK_ATTEMPT_ID); + + getMapAttempt(jobid, jobTrackerId, taskid, mapAttemptId).handle(values); + } else if (recType.equals(JobHistory.RecordTypes.ReduceAttempt)) { + String taskid = values.get(Keys.TASKID); + String reduceAttemptId = values.get(Keys.TASK_ATTEMPT_ID); + + getReduceAttempt(jobid, jobTrackerId, taskid, reduceAttemptId).handle(values); + } + } + } + + // call this only for jobs that succeeded for better results. + abstract static class NodesFilter implements JobHistory.Listener { + private Map> badNodesToNumFailedTasks = + new HashMap>(); + + Map> getValues(){ + return badNodesToNumFailedTasks; + } + String failureType; + + public void handle(JobHistory.RecordTypes recType, Map values) + throws IOException { + if (recType.equals(JobHistory.RecordTypes.MapAttempt) || + recType.equals(JobHistory.RecordTypes.ReduceAttempt)) { + if (failureType.equals(values.get(Keys.TASK_STATUS)) ) { + String hostName = values.get(Keys.HOSTNAME); + String taskid = values.get(Keys.TASKID); + Set tasks = badNodesToNumFailedTasks.get(hostName); + if (null == tasks ){ + tasks = new TreeSet(); + tasks.add(taskid); + badNodesToNumFailedTasks.put(hostName, tasks); + }else{ + tasks.add(taskid); + } + } + } + } + abstract void setFailureType(); + String getFailureType() { + return failureType; + } + NodesFilter() { + setFailureType(); + } + } + + static class FailedOnNodesFilter extends NodesFilter { + void setFailureType() { + failureType = Values.FAILED.name(); + } + } + static class KilledOnNodesFilter extends NodesFilter { + void setFailureType() { + failureType = Values.KILLED.name(); + } + } +} diff --git a/src/mapred/org/apache/hadoop/mapred/DefaultTaskController.java b/src/mapred/org/apache/hadoop/mapred/DefaultTaskController.java new file mode 100644 index 0000000..73b3bb7 --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/DefaultTaskController.java @@ -0,0 +1,154 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. +*/ + +package org.apache.hadoop.mapred; + +import java.io.IOException; +import java.util.List; + +import org.apache.hadoop.fs.FileUtil; +import org.apache.hadoop.mapred.JvmManager.JvmEnv; +import org.apache.hadoop.mapred.CleanupQueue.PathDeletionContext; +import org.apache.hadoop.util.ProcessTree; +import org.apache.hadoop.util.Shell; +import org.apache.hadoop.util.Shell.ShellCommandExecutor; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; + +/** + * The default implementation for controlling tasks. + * + * This class provides an implementation for launching and killing + * tasks that need to be run as the tasktracker itself. Hence, + * many of the initializing or cleanup methods are not required here. + */ +class DefaultTaskController extends TaskController { + + private static final Log LOG = + LogFactory.getLog(DefaultTaskController.class); + /** + * Launch a new JVM for the task. + * + * This method launches the new JVM for the task by executing the + * the JVM command using the {@link Shell.ShellCommandExecutor} + */ + void launchTaskJVM(TaskController.TaskControllerContext context) + throws IOException { + JvmEnv env = context.env; + List wrappedCommand = + TaskLog.captureOutAndError(env.setup, env.vargs, env.stdout, env.stderr, + env.logSize, true); + ShellCommandExecutor shexec = + new ShellCommandExecutor(wrappedCommand.toArray(new String[0]), + env.workDir, env.env); + // set the ShellCommandExecutor for later use. + context.shExec = shexec; + shexec.execute(); + } + + /** + * Initialize the task environment. + * + * Since tasks are launched as the tasktracker user itself, this + * method has no action to perform. + */ + void initializeTask(TaskController.TaskControllerContext context) { + // The default task controller does not need to set up + // any permissions for proper execution. + // So this is a dummy method. + return; + } + + + @Override + void setup() { + // nothing to setup + return; + } + + /* + * No need to do anything as we don't need to do as we dont need anything + * extra from what TaskTracker has done. + */ + @Override + void initializeJob(JobID jobId) { + } + + @Override + void terminateTask(TaskControllerContext context) { + ShellCommandExecutor shexec = context.shExec; + if (shexec != null) { + Process process = shexec.getProcess(); + if (Shell.WINDOWS) { + // Currently we don't use setsid on WINDOWS. + //So kill the process alone. + if (process != null) { + process.destroy(); + } + } + else { // In addition to the task JVM, kill its subprocesses also. + String pid = context.pid; + if (pid != null) { + if(ProcessTree.isSetsidAvailable) { + ProcessTree.terminateProcessGroup(pid); + }else { + ProcessTree.terminateProcess(pid); + } + } + } + } + } + + @Override + void killTask(TaskControllerContext context) { + ShellCommandExecutor shexec = context.shExec; + if (shexec != null) { + if (Shell.WINDOWS) { + //We don't do send kill process signal in case of windows as + //already we have done a process.destroy() in termintateTaskJVM() + return; + } + String pid = context.pid; + if (pid != null) { + if(ProcessTree.isSetsidAvailable) { + ProcessTree.killProcessGroup(pid); + }else { + ProcessTree.killProcess(pid); + } + } + } + } + + /** + * Enables the task for cleanup by changing permissions of the specified path + * in the local filesystem + */ + @Override + void enableTaskForCleanup(PathDeletionContext context) + throws IOException { + try { + FileUtil.chmod(context.fullPath, "a+rwx", true); + } catch(InterruptedException e) { + LOG.warn("Interrupted while setting permissions for " + context.fullPath + + " for deletion."); + } catch(IOException ioe) { + LOG.warn("Unable to change permissions of " + context.fullPath); + } + } +} diff --git a/src/mapred/org/apache/hadoop/mapred/DisallowedTaskTrackerException.java b/src/mapred/org/apache/hadoop/mapred/DisallowedTaskTrackerException.java new file mode 100644 index 0000000..e591d52 --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/DisallowedTaskTrackerException.java @@ -0,0 +1,35 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapred; + +import java.io.IOException; + + +/** + * This exception is thrown when a tasktracker tries to register or communicate + * with the jobtracker when it does not appear on the list of included nodes, + * or has been specifically excluded. + * + */ +class DisallowedTaskTrackerException extends IOException { + + public DisallowedTaskTrackerException(TaskTrackerStatus tracker) { + super("Tasktracker denied communication with jobtracker: " + tracker.getTrackerName()); + } +} diff --git a/src/mapred/org/apache/hadoop/mapred/EagerTaskInitializationListener.java b/src/mapred/org/apache/hadoop/mapred/EagerTaskInitializationListener.java new file mode 100644 index 0000000..2ea4fa1 --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/EagerTaskInitializationListener.java @@ -0,0 +1,180 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.mapred; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.Comparator; +import java.util.List; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.mapred.JobStatusChangeEvent.EventType; +import org.apache.hadoop.util.StringUtils; + +/** + * A {@link JobInProgressListener} which initializes the tasks for a job as soon + * as the job is added (using the {@link #jobAdded(JobInProgress)} method). + */ +class EagerTaskInitializationListener extends JobInProgressListener { + + private static final int DEFAULT_NUM_THREADS = 4; + private static final Log LOG = LogFactory.getLog( + EagerTaskInitializationListener.class.getName()); + + ///////////////////////////////////////////////////////////////// + // Used to init new jobs that have just been created + ///////////////////////////////////////////////////////////////// + class JobInitManager implements Runnable { + + public void run() { + JobInProgress job = null; + while (true) { + try { + synchronized (jobInitQueue) { + while (jobInitQueue.isEmpty()) { + jobInitQueue.wait(); + } + job = jobInitQueue.remove(0); + } + threadPool.execute(new InitJob(job)); + } catch (InterruptedException t) { + LOG.info("JobInitManagerThread interrupted."); + break; + } + } + LOG.info("Shutting down thread pool"); + threadPool.shutdownNow(); + } + } + + class InitJob implements Runnable { + + private JobInProgress job; + + public InitJob(JobInProgress job) { + this.job = job; + } + + public void run() { + ttm.initJob(job); + } + } + + private JobInitManager jobInitManager = new JobInitManager(); + private Thread jobInitManagerThread; + private List jobInitQueue = new ArrayList(); + private ExecutorService threadPool; + private int numThreads; + private TaskTrackerManager ttm; + + public EagerTaskInitializationListener(Configuration conf) { + numThreads = conf.getInt("mapred.jobinit.threads", DEFAULT_NUM_THREADS); + threadPool = Executors.newFixedThreadPool(numThreads); + } + + public void setTaskTrackerManager(TaskTrackerManager ttm) { + this.ttm = ttm; + } + + public void start() throws IOException { + this.jobInitManagerThread = new Thread(jobInitManager, "jobInitManager"); + jobInitManagerThread.setDaemon(true); + this.jobInitManagerThread.start(); + } + + public void terminate() throws IOException { + if (jobInitManagerThread != null && jobInitManagerThread.isAlive()) { + LOG.info("Stopping Job Init Manager thread"); + jobInitManagerThread.interrupt(); + try { + jobInitManagerThread.join(); + } catch (InterruptedException ex) { + ex.printStackTrace(); + } + } + } + + /** + * We add the JIP to the jobInitQueue, which is processed + * asynchronously to handle split-computation and build up + * the right TaskTracker/Block mapping. + */ + @Override + public void jobAdded(JobInProgress job) { + synchronized (jobInitQueue) { + jobInitQueue.add(job); + resortInitQueue(); + jobInitQueue.notifyAll(); + } + + } + + /** + * Sort jobs by priority and then by start time. + */ + private synchronized void resortInitQueue() { + Comparator comp = new Comparator() { + public int compare(JobInProgress o1, JobInProgress o2) { + int res = o1.getPriority().compareTo(o2.getPriority()); + if(res == 0) { + if(o1.getStartTime() < o2.getStartTime()) + res = -1; + else + res = (o1.getStartTime()==o2.getStartTime() ? 0 : 1); + } + + return res; + } + }; + + synchronized (jobInitQueue) { + Collections.sort(jobInitQueue, comp); + } + } + + @Override + public void jobRemoved(JobInProgress job) { + synchronized (jobInitQueue) { + jobInitQueue.remove(job); + } + } + + @Override + public void jobUpdated(JobChangeEvent event) { + if (event instanceof JobStatusChangeEvent) { + jobStateChanged((JobStatusChangeEvent)event); + } + } + + // called when the job's status is changed + private void jobStateChanged(JobStatusChangeEvent event) { + // Resort the job queue if the job-start-time or job-priority changes + if (event.getEventType() == EventType.START_TIME_CHANGED + || event.getEventType() == EventType.PRIORITY_CHANGED) { + synchronized (jobInitQueue) { + resortInitQueue(); + } + } + } + +} diff --git a/src/mapred/org/apache/hadoop/mapred/FileAlreadyExistsException.java b/src/mapred/org/apache/hadoop/mapred/FileAlreadyExistsException.java new file mode 100644 index 0000000..6babdc0 --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/FileAlreadyExistsException.java @@ -0,0 +1,37 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapred; + +import java.io.IOException; + +/** + * Used when target file already exists for any operation and + * is not configured to be overwritten. + */ +public class FileAlreadyExistsException + extends IOException { + + public FileAlreadyExistsException() { + super(); + } + + public FileAlreadyExistsException(String msg) { + super(msg); + } +} diff --git a/src/mapred/org/apache/hadoop/mapred/FileInputFormat.java b/src/mapred/org/apache/hadoop/mapred/FileInputFormat.java new file mode 100644 index 0000000..8700d70 --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/FileInputFormat.java @@ -0,0 +1,730 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapred; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.Comparator; +import java.util.HashSet; +import java.util.IdentityHashMap; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.Set; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.fs.BlockLocation; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.LocatedFileStatus; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.PathFilter; +import org.apache.hadoop.fs.RemoteIterator; +import org.apache.hadoop.net.NetworkTopology; +import org.apache.hadoop.net.Node; +import org.apache.hadoop.net.NodeBase; +import org.apache.hadoop.util.ReflectionUtils; +import org.apache.hadoop.util.StringUtils; + +/** + * A base class for file-based {@link InputFormat}. + * + *

FileInputFormat is the base class for all file-based + * InputFormats. This provides a generic implementation of + * {@link #getSplits(JobConf, int)}. + * Subclasses of FileInputFormat can also override the + * {@link #isSplitable(FileSystem, Path)} method to ensure input-files are + * not split-up and are processed as a whole by {@link Mapper}s. + * @deprecated Use {@link org.apache.hadoop.mapreduce.lib.input.FileInputFormat} + * instead. + */ +@Deprecated +public abstract class FileInputFormat implements InputFormat { + + public static final Log LOG = + LogFactory.getLog(FileInputFormat.class); + + private static final double SPLIT_SLOP = 1.1; // 10% slop + + private long minSplitSize = 1; + private static final PathFilter hiddenFileFilter = new PathFilter(){ + public boolean accept(Path p){ + String name = p.getName(); + return !name.startsWith("_") && !name.startsWith("."); + } + }; + protected void setMinSplitSize(long minSplitSize) { + this.minSplitSize = minSplitSize; + } + + /** + * Proxy PathFilter that accepts a path only if all filters given in the + * constructor do. Used by the listPaths() to apply the built-in + * hiddenFileFilter together with a user provided one (if any). + */ + private static class MultiPathFilter implements PathFilter { + private List filters; + + public MultiPathFilter(List filters) { + this.filters = filters; + } + + public boolean accept(Path path) { + for (PathFilter filter : filters) { + if (!filter.accept(path)) { + return false; + } + } + return true; + } + } + + /** + * Is the given filename splitable? Usually, true, but if the file is + * stream compressed, it will not be. + * + * FileInputFormat implementations can override this and return + * false to ensure that individual input files are never split-up + * so that {@link Mapper}s process entire files. + * + * @param fs the file system that the file is on + * @param filename the file name to check + * @return is this file splitable? + */ + protected boolean isSplitable(FileSystem fs, Path filename) { + return true; + } + + public abstract RecordReader getRecordReader(InputSplit split, + JobConf job, + Reporter reporter) + throws IOException; + + /** + * Set a PathFilter to be applied to the input paths for the map-reduce job. + * + * @param filter the PathFilter class use for filtering the input paths. + */ + public static void setInputPathFilter(JobConf conf, + Class filter) { + conf.setClass("mapred.input.pathFilter.class", filter, PathFilter.class); + } + + /** + * Get a PathFilter instance of the filter set for the input paths. + * + * @return the PathFilter instance set for the job, NULL if none has been set. + */ + public static PathFilter getInputPathFilter(JobConf conf) { + Class filterClass = conf.getClass( + "mapred.input.pathFilter.class", null, PathFilter.class); + return (filterClass != null) ? + ReflectionUtils.newInstance(filterClass, conf) : null; + } + + /** + * Add files in the input path recursively into the results. + * Mark this method as final. + * if a subclass overrides this method, it should instead override + * {@link #addInputPathRecursively(List, FileSystem, Path, PathFilter)}. + * + * @param result + * The List to store all files. + * @param fs + * The FileSystem. + * @param path + * The input path. + * @param inputFilter + * The input filter that can be used to filter files/dirs. + * @throws IOException + */ + final static protected void addInputPathRecursively(List result, + FileSystem fs, Path path, PathFilter inputFilter) + throws IOException { + for(FileStatus stat: fs.listStatus(path, inputFilter)) { + if (stat.isDir()) { + addInputPathRecursively(result, fs, stat.getPath(), inputFilter); + } else { + result.add(stat); + } + } + } + + /** List input directories. + * Mark this method to be final to make sure this method does not + * get overridden by any subclass. + * If a subclass historically overrides this method, now it needs to override + * {@link #listLocatedStatus(JobConf)} instead. + * + * @param job the job to list input paths for + * @return array of FileStatus objects + * @throws IOException if zero items. + */ + final static protected FileStatus[] listStatus(JobConf job) throws IOException { + Path[] dirs = getInputPaths(job); + if (dirs.length == 0) { + throw new IOException("No input paths specified in job"); + } + + // Whether we need to recursive look into the directory structure + boolean recursive = job.getBoolean("mapred.input.dir.recursive", false); + + List result = new ArrayList(); + List errors = new ArrayList(); + + // creates a MultiPathFilter with the hiddenFileFilter and the + // user provided one (if any). + List filters = new ArrayList(); + filters.add(hiddenFileFilter); + PathFilter jobFilter = getInputPathFilter(job); + if (jobFilter != null) { + filters.add(jobFilter); + } + PathFilter inputFilter = new MultiPathFilter(filters); + + for (Path p: dirs) { + FileSystem fs = p.getFileSystem(job); + FileStatus[] matches = fs.globStatus(p, inputFilter); + if (matches == null) { + errors.add(new IOException("Input path does not exist: " + p)); + } else if (matches.length == 0) { + errors.add(new IOException("Input Pattern " + p + " matches 0 files")); + } else { + for (FileStatus globStat: matches) { + if (globStat.isDir()) { + for(FileStatus stat: fs.listStatus(globStat.getPath(), + inputFilter)) { + if (recursive && stat.isDir()) { + addInputPathRecursively(result, fs, stat.getPath(), inputFilter); + } else { + result.add(stat); + } + } + } else { + result.add(globStat); + } + } + } + } + + if (!errors.isEmpty()) { + throw new InvalidInputException(errors); + } + LOG.info("Total input paths to process : " + result.size()); + return result.toArray(new FileStatus[result.size()]); + } + + /** + * Add files in the input path recursively into the results. + * @param result + * The List to store all files together with their block locations + * @param fs + * The FileSystem. + * @param path + * The input path. + * @param inputFilter + * The input filter that can be used to filter files/dirs. + * @throws IOException + */ + protected void addLocatedInputPathRecursively(List result, + FileSystem fs, Path path, PathFilter inputFilter) + throws IOException { + for(RemoteIterator itor = + fs.listLocatedStatus(path, inputFilter); itor.hasNext();) { + LocatedFileStatus stat = itor.next(); + if (stat.isDir()) { + addLocatedInputPathRecursively(result, fs, stat.getPath(), inputFilter); + } else { + result.add(stat); + } + } + } + + /** List input directories. + * The file locations are also returned together with the file status. + * Subclasses may override to, e.g., select only files matching a regular + * expression. + * + * @param job the job to list input paths for + * @return array of LocatedFileStatus objects + * @throws IOException if zero items. + */ + protected LocatedFileStatus[] listLocatedStatus(JobConf job) + throws IOException { + Path[] dirs = getInputPaths(job); + if (dirs.length == 0) { + throw new IOException("No input paths specified in job"); + } + + // Whether we need to recursive look into the directory structure + boolean recursive = job.getBoolean("mapred.input.dir.recursive", false); + + List result = new ArrayList(); + List errors = new ArrayList(); + + // creates a MultiPathFilter with the hiddenFileFilter and the + // user provided one (if any). + List filters = new ArrayList(); + filters.add(hiddenFileFilter); + PathFilter jobFilter = getInputPathFilter(job); + if (jobFilter != null) { + filters.add(jobFilter); + } + PathFilter inputFilter = new MultiPathFilter(filters); + + for (Path p: dirs) { + FileSystem fs = p.getFileSystem(job); + FileStatus[] matches = fs.globStatus(p, inputFilter); + if (matches == null) { + errors.add(new IOException("Input path does not exist: " + p)); + } else if (matches.length == 0) { + errors.add(new IOException("Input Pattern " + p + " matches 0 files")); + } else { + for (FileStatus globStat: matches) { + for(RemoteIterator itor = fs.listLocatedStatus( + globStat.getPath(), inputFilter); itor.hasNext();) { + LocatedFileStatus stat = itor.next(); + if (recursive && stat.isDir()) { + addLocatedInputPathRecursively( + result, fs, stat.getPath(), inputFilter); + } else { + result.add(stat); + } + } + } + } + } + + if (!errors.isEmpty()) { + throw new InvalidInputException(errors); + } + LOG.info("Total input paths to process : " + result.size()); + return result.toArray(new LocatedFileStatus[result.size()]); + } + + /** Splits files returned by {@link #listStatus(JobConf)} when + * they're too big.*/ + @SuppressWarnings("deprecation") + public InputSplit[] getSplits(JobConf job, int numSplits) + throws IOException { + LocatedFileStatus[] files = listLocatedStatus(job); + + long totalSize = 0; // compute total size + for (FileStatus file: files) { // check we have valid files + if (file.isDir()) { + throw new IOException("Not a file: "+ file.getPath()); + } + totalSize += file.getLen(); + } + + long goalSize = totalSize / (numSplits == 0 ? 1 : numSplits); + long minSize = Math.max(job.getLong("mapred.min.split.size", 1), + minSplitSize); + + // generate splits + ArrayList splits = new ArrayList(numSplits); + NetworkTopology clusterMap = new NetworkTopology(); + for (LocatedFileStatus file: files) { + Path path = file.getPath(); + FileSystem fs = path.getFileSystem(job); + long length = file.getLen(); + BlockLocation[] blkLocations = file.getBlockLocations(); + + if ((length != 0) && isSplitable(fs, path)) { + long blockSize = file.getBlockSize(); + long splitSize = computeSplitSize(goalSize, minSize, blockSize); + + long bytesRemaining = length; + while (((double) bytesRemaining)/splitSize > SPLIT_SLOP) { + String[] splitHosts = getSplitHosts(blkLocations, + length-bytesRemaining, splitSize, clusterMap); + splits.add(new FileSplit(path, length-bytesRemaining, splitSize, + splitHosts)); + bytesRemaining -= splitSize; + } + + if (bytesRemaining != 0) { + splits.add(new FileSplit(path, length-bytesRemaining, bytesRemaining, + blkLocations[blkLocations.length-1].getHosts())); + } + } else if (length != 0) { + String[] splitHosts = getSplitHosts(blkLocations,0,length,clusterMap); + splits.add(new FileSplit(path, 0, length, splitHosts)); + } else { + //Create empty hosts array for zero length files + splits.add(new FileSplit(path, 0, length, new String[0])); + } + } + LOG.debug("Total # of splits: " + splits.size()); + return splits.toArray(new FileSplit[splits.size()]); + } + + protected long computeSplitSize(long goalSize, long minSize, + long blockSize) { + return Math.max(minSize, Math.min(goalSize, blockSize)); + } + + protected int getBlockIndex(BlockLocation[] blkLocations, + long offset) { + for (int i = 0 ; i < blkLocations.length; i++) { + // is the offset inside this block? + if ((blkLocations[i].getOffset() <= offset) && + (offset < blkLocations[i].getOffset() + blkLocations[i].getLength())){ + return i; + } + } + BlockLocation last = blkLocations[blkLocations.length -1]; + long fileLength = last.getOffset() + last.getLength() -1; + throw new IllegalArgumentException("Offset " + offset + + " is outside of file (0.." + + fileLength + ")"); + } + + /** + * Sets the given comma separated paths as the list of inputs + * for the map-reduce job. + * + * @param conf Configuration of the job + * @param commaSeparatedPaths Comma separated paths to be set as + * the list of inputs for the map-reduce job. + */ + public static void setInputPaths(JobConf conf, String commaSeparatedPaths) { + setInputPaths(conf, StringUtils.stringToPath( + getPathStrings(commaSeparatedPaths))); + } + + /** + * Add the given comma separated paths to the list of inputs for + * the map-reduce job. + * + * @param conf The configuration of the job + * @param commaSeparatedPaths Comma separated paths to be added to + * the list of inputs for the map-reduce job. + */ + public static void addInputPaths(JobConf conf, String commaSeparatedPaths) { + for (String str : getPathStrings(commaSeparatedPaths)) { + addInputPath(conf, new Path(str)); + } + } + + /** + * Set the array of {@link Path}s as the list of inputs + * for the map-reduce job. + * + * @param conf Configuration of the job. + * @param inputPaths the {@link Path}s of the input directories/files + * for the map-reduce job. + */ + public static void setInputPaths(JobConf conf, Path... inputPaths) { + Path path = new Path(conf.getWorkingDirectory(), inputPaths[0]); + StringBuffer str = new StringBuffer(StringUtils.escapeString(path.toString())); + for(int i = 1; i < inputPaths.length;i++) { + str.append(StringUtils.COMMA_STR); + path = new Path(conf.getWorkingDirectory(), inputPaths[i]); + str.append(StringUtils.escapeString(path.toString())); + } + conf.set("mapred.input.dir", str.toString()); + } + + /** + * Add a {@link Path} to the list of inputs for the map-reduce job. + * + * @param conf The configuration of the job + * @param path {@link Path} to be added to the list of inputs for + * the map-reduce job. + */ + public static void addInputPath(JobConf conf, Path path ) { + path = new Path(conf.getWorkingDirectory(), path); + String dirStr = StringUtils.escapeString(path.toString()); + String dirs = conf.get("mapred.input.dir"); + conf.set("mapred.input.dir", dirs == null ? dirStr : + dirs + StringUtils.COMMA_STR + dirStr); + } + + // This method escapes commas in the glob pattern of the given paths. + private static String[] getPathStrings(String commaSeparatedPaths) { + int length = commaSeparatedPaths.length(); + int curlyOpen = 0; + int pathStart = 0; + boolean globPattern = false; + List pathStrings = new ArrayList(); + + for (int i=0; i mylist) { + Collections.sort(mylist, new Comparator () { + public int compare(NodeInfo obj1, NodeInfo obj2) { + + if (obj1 == null || obj2 == null) + return -1; + + if (obj1.getValue() == obj2.getValue()) { + return 0; + } + else { + return ((obj1.getValue() < obj2.getValue()) ? 1 : -1); + } + } + } + ); + } + + /** + * This function identifies and returns the hosts that contribute + * most for a given split. For calculating the contribution, rack + * locality is treated on par with host locality, so hosts from racks + * that contribute the most are preferred over hosts on racks that + * contribute less + * @param blkLocations The list of block locations + * @param offset + * @param splitSize + * @return array of hosts that contribute most to this split + * @throws IOException + */ + protected String[] getSplitHosts(BlockLocation[] blkLocations, + long offset, long splitSize, NetworkTopology clusterMap) + throws IOException { + + int startIndex = getBlockIndex(blkLocations, offset); + + long bytesInThisBlock = blkLocations[startIndex].getOffset() + + blkLocations[startIndex].getLength() - offset; + + //If this is the only block, just return + if (bytesInThisBlock >= splitSize) { + return blkLocations[startIndex].getHosts(); + } + + long bytesInFirstBlock = bytesInThisBlock; + int index = startIndex + 1; + splitSize -= bytesInThisBlock; + + while (splitSize > 0) { + bytesInThisBlock = + Math.min(splitSize, blkLocations[index++].getLength()); + splitSize -= bytesInThisBlock; + } + + long bytesInLastBlock = bytesInThisBlock; + int endIndex = index - 1; + + Map hostsMap = new IdentityHashMap(); + Map racksMap = new IdentityHashMap(); + String [] allTopos = new String[0]; + + // Build the hierarchy and aggregate the contribution of + // bytes at each level. See TestGetSplitHosts.java + + for (index = startIndex; index <= endIndex; index++) { + + // Establish the bytes in this block + if (index == startIndex) { + bytesInThisBlock = bytesInFirstBlock; + } + else if (index == endIndex) { + bytesInThisBlock = bytesInLastBlock; + } + else { + bytesInThisBlock = blkLocations[index].getLength(); + } + + allTopos = blkLocations[index].getTopologyPaths(); + + // If no topology information is available, just + // prefix a fakeRack + if (allTopos.length == 0) { + allTopos = fakeRacks(blkLocations, index); + } + + // NOTE: This code currently works only for one level of + // hierarchy (rack/host). However, it is relatively easy + // to extend this to support aggregation at different + // levels + + for (String topo: allTopos) { + + Node node, parentNode; + NodeInfo nodeInfo, parentNodeInfo; + + node = clusterMap.getNode(topo); + + if (node == null) { + node = new NodeBase(topo); + clusterMap.add(node); + } + + nodeInfo = hostsMap.get(node); + + if (nodeInfo == null) { + nodeInfo = new NodeInfo(node); + hostsMap.put(node,nodeInfo); + parentNode = node.getParent(); + parentNodeInfo = racksMap.get(parentNode); + if (parentNodeInfo == null) { + parentNodeInfo = new NodeInfo(parentNode); + racksMap.put(parentNode,parentNodeInfo); + } + parentNodeInfo.addLeaf(nodeInfo); + } + else { + nodeInfo = hostsMap.get(node); + parentNode = node.getParent(); + parentNodeInfo = racksMap.get(parentNode); + } + + nodeInfo.addValue(index, bytesInThisBlock); + parentNodeInfo.addValue(index, bytesInThisBlock); + + } // for all topos + + } // for all indices + + return identifyHosts(allTopos.length, racksMap); + } + + private String[] identifyHosts(int replicationFactor, + Map racksMap) { + + String [] retVal = new String[replicationFactor]; + + List rackList = new LinkedList(); + + rackList.addAll(racksMap.values()); + + // Sort the racks based on their contribution to this split + sortInDescendingOrder(rackList); + + boolean done = false; + int index = 0; + + // Get the host list for all our aggregated items, sort + // them and return the top entries + for (NodeInfo ni: rackList) { + + Set hostSet = ni.getLeaves(); + + ListhostList = new LinkedList(); + hostList.addAll(hostSet); + + // Sort the hosts in this rack based on their contribution + sortInDescendingOrder(hostList); + + for (NodeInfo host: hostList) { + // Strip out the port number from the host name + retVal[index++] = host.node.getName().split(":")[0]; + if (index == replicationFactor) { + done = true; + break; + } + } + + if (done == true) { + break; + } + } + return retVal; + } + + private String[] fakeRacks(BlockLocation[] blkLocations, int index) + throws IOException { + String[] allHosts = blkLocations[index].getHosts(); + String[] allTopos = new String[allHosts.length]; + for (int i = 0; i < allHosts.length; i++) { + allTopos[i] = NetworkTopology.DEFAULT_RACK + "/" + allHosts[i]; + } + return allTopos; + } + + + private static class NodeInfo { + final Node node; + final Set blockIds; + final Set leaves; + + private long value; + + NodeInfo(Node node) { + this.node = node; + blockIds = new HashSet(); + leaves = new HashSet(); + } + + long getValue() {return value;} + + void addValue(int blockIndex, long value) { + if (blockIds.add(blockIndex) == true) { + this.value += value; + } + } + + Set getLeaves() { return leaves;} + + void addLeaf(NodeInfo nodeInfo) { + leaves.add(nodeInfo); + } + } +} diff --git a/src/mapred/org/apache/hadoop/mapred/FileOutputCommitter.java b/src/mapred/org/apache/hadoop/mapred/FileOutputCommitter.java new file mode 100644 index 0000000..e5b002e --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/FileOutputCommitter.java @@ -0,0 +1,262 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapred; + +import java.io.IOException; +import java.net.URI; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.util.StringUtils; + +/** An {@link OutputCommitter} that commits files specified + * in job output directory i.e. ${mapred.output.dir}. + **/ +public class FileOutputCommitter extends OutputCommitter { + + public static final Log LOG = LogFactory.getLog( + "org.apache.hadoop.mapred.FileOutputCommitter"); +/** + * Temporary directory name + */ + public static final String TEMP_DIR_NAME = "_temporary"; + public static final String SUCCEEDED_FILE_NAME = "_SUCCESS"; + static final String SUCCESSFUL_JOB_OUTPUT_DIR_MARKER = + "mapreduce.fileoutputcommitter.marksuccessfuljobs"; + + public void setupJob(JobContext context) throws IOException { + JobConf conf = context.getJobConf(); + Path outputPath = FileOutputFormat.getOutputPath(conf); + if (outputPath != null) { + Path tmpDir = new Path(outputPath, FileOutputCommitter.TEMP_DIR_NAME); + FileSystem fileSys = tmpDir.getFileSystem(conf); + if (!fileSys.mkdirs(tmpDir)) { + LOG.error("Mkdirs failed to create " + tmpDir.toString()); + } + } + } + + private static boolean getOutputDirMarking(JobConf conf) { + return conf.getBoolean(SUCCESSFUL_JOB_OUTPUT_DIR_MARKER, + true); + } + + // Mark the output dir of the job for which the context is passed. + private void markSuccessfulOutputDir(JobContext context) + throws IOException { + JobConf conf = context.getJobConf(); + Path outputPath = FileOutputFormat.getOutputPath(conf); + if (outputPath != null) { + FileSystem fileSys = outputPath.getFileSystem(conf); + // create a file in the folder to mark it + if (fileSys.exists(outputPath)) { + Path filePath = new Path(outputPath, SUCCEEDED_FILE_NAME); + fileSys.create(filePath).close(); + } + } + } + + @Override + public void commitJob(JobContext context) throws IOException { + cleanupJob(context); + if (getOutputDirMarking(context.getJobConf())) { + markSuccessfulOutputDir(context); + } + } + + @Override + @Deprecated + public void cleanupJob(JobContext context) throws IOException { + JobConf conf = context.getJobConf(); + // do the clean up of temporary directory + Path outputPath = FileOutputFormat.getOutputPath(conf); + if (outputPath != null) { + Path tmpDir = new Path(outputPath, FileOutputCommitter.TEMP_DIR_NAME); + FileSystem fileSys = tmpDir.getFileSystem(conf); + context.getProgressible().progress(); + if (fileSys.exists(tmpDir)) { + fileSys.delete(tmpDir, true); + } + } else { + LOG.warn("Output path is null in cleanup"); + } + } + + /** + * Delete the temporary directory, including all of the work directories. + * @param context the job's context + * @param runState final run state of the job, should be + * {@link JobStatus#KILLED} or {@link JobStatus#FAILED} + */ + @Override + public void abortJob(JobContext context, int runState) throws IOException { + cleanupJob(context); + } + + public void setupTask(TaskAttemptContext context) throws IOException { + // FileOutputCommitter's setupTask doesn't do anything. Because the + // temporary task directory is created on demand when the + // task is writing. + } + + public void commitTask(TaskAttemptContext context) + throws IOException { + Path taskOutputPath = getTempTaskOutputPath(context); + TaskAttemptID attemptId = context.getTaskAttemptID(); + JobConf job = context.getJobConf(); + if (taskOutputPath != null) { + FileSystem fs = taskOutputPath.getFileSystem(job); + context.getProgressible().progress(); + if (fs.exists(taskOutputPath)) { + Path jobOutputPath = taskOutputPath.getParent().getParent(); + // Move the task outputs to their final place + moveTaskOutputs(context, fs, jobOutputPath, taskOutputPath); + // Delete the temporary task-specific output directory + if (!fs.delete(taskOutputPath, true)) { + LOG.info("Failed to delete the temporary output" + + " directory of task: " + attemptId + " - " + taskOutputPath); + } + LOG.info("Saved output of task '" + attemptId + "' to " + + jobOutputPath); + } + } + } + + private void moveTaskOutputs(TaskAttemptContext context, + FileSystem fs, + Path jobOutputDir, + Path taskOutput) + throws IOException { + TaskAttemptID attemptId = context.getTaskAttemptID(); + context.getProgressible().progress(); + if (fs.isFile(taskOutput)) { + Path finalOutputPath = getFinalPath(jobOutputDir, taskOutput, + getTempTaskOutputPath(context)); + if (!fs.rename(taskOutput, finalOutputPath)) { + if (!fs.delete(finalOutputPath, true)) { + throw new IOException("Failed to delete earlier output of task: " + + attemptId); + } + if (!fs.rename(taskOutput, finalOutputPath)) { + throw new IOException("Failed to save output of task: " + + attemptId); + } + } + LOG.debug("Moved " + taskOutput + " to " + finalOutputPath); + } else if(fs.getFileStatus(taskOutput).isDir()) { + FileStatus[] paths = fs.listStatus(taskOutput); + Path finalOutputPath = getFinalPath(jobOutputDir, taskOutput, + getTempTaskOutputPath(context)); + fs.mkdirs(finalOutputPath); + if (paths != null) { + for (FileStatus path : paths) { + moveTaskOutputs(context, fs, jobOutputDir, path.getPath()); + } + } + } + } + + public void abortTask(TaskAttemptContext context) throws IOException { + Path taskOutputPath = getTempTaskOutputPath(context); + try { + if (taskOutputPath != null) { + FileSystem fs = taskOutputPath.getFileSystem(context.getJobConf()); + context.getProgressible().progress(); + fs.delete(taskOutputPath, true); + } + } catch (IOException ie) { + LOG.warn("Error discarding output" + StringUtils.stringifyException(ie)); + } + } + + private Path getFinalPath(Path jobOutputDir, Path taskOutput, + Path taskOutputPath) throws IOException { + URI taskOutputUri = taskOutput.toUri(); + URI relativePath = taskOutputPath.toUri().relativize(taskOutputUri); + if (taskOutputUri == relativePath) {//taskOutputPath is not a parent of taskOutput + throw new IOException("Can not get the relative path: base = " + + taskOutputPath + " child = " + taskOutput); + } + if (relativePath.getPath().length() > 0) { + return new Path(jobOutputDir, relativePath.getPath()); + } else { + return jobOutputDir; + } + } + + public boolean needsTaskCommit(TaskAttemptContext context) + throws IOException { + try { + Path taskOutputPath = getTempTaskOutputPath(context); + if (taskOutputPath != null) { + context.getProgressible().progress(); + // Get the file-system for the task output directory + FileSystem fs = taskOutputPath.getFileSystem(context.getJobConf()); + // since task output path is created on demand, + // if it exists, task needs a commit + if (fs.exists(taskOutputPath)) { + return true; + } + } + } catch (IOException ioe) { + throw ioe; + } + return false; + } + + Path getTempTaskOutputPath(TaskAttemptContext taskContext) { + JobConf conf = taskContext.getJobConf(); + Path outputPath = FileOutputFormat.getOutputPath(conf); + if (outputPath != null) { + Path p = new Path(outputPath, + (FileOutputCommitter.TEMP_DIR_NAME + Path.SEPARATOR + + "_" + taskContext.getTaskAttemptID().toString())); + try { + FileSystem fs = p.getFileSystem(conf); + return p.makeQualified(fs); + } catch (IOException ie) { + LOG.warn(StringUtils .stringifyException(ie)); + return p; + } + } + return null; + } + + Path getWorkPath(TaskAttemptContext taskContext, Path basePath) + throws IOException { + // ${mapred.out.dir}/_temporary + Path jobTmpDir = new Path(basePath, FileOutputCommitter.TEMP_DIR_NAME); + FileSystem fs = jobTmpDir.getFileSystem(taskContext.getJobConf()); + if (!fs.exists(jobTmpDir)) { + throw new IOException("The temporary job-output directory " + + jobTmpDir.toString() + " doesn't exist!"); + } + // ${mapred.out.dir}/_temporary/_${taskid} + String taskid = taskContext.getTaskAttemptID().toString(); + Path taskTmpDir = new Path(jobTmpDir, "_" + taskid); + if (!fs.mkdirs(taskTmpDir)) { + throw new IOException("Mkdirs failed to create " + + taskTmpDir.toString()); + } + return taskTmpDir; + } +} diff --git a/src/mapred/org/apache/hadoop/mapred/FileOutputFormat.java b/src/mapred/org/apache/hadoop/mapred/FileOutputFormat.java new file mode 100644 index 0000000..d1a963d --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/FileOutputFormat.java @@ -0,0 +1,293 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapred; + +import java.io.IOException; +import java.text.NumberFormat; + +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.io.compress.CompressionCodec; +import org.apache.hadoop.util.Progressable; + +/** A base class for {@link OutputFormat}. */ +public abstract class FileOutputFormat implements OutputFormat { + + /** + * Set whether the output of the job is compressed. + * @param conf the {@link JobConf} to modify + * @param compress should the output of the job be compressed? + */ + public static void setCompressOutput(JobConf conf, boolean compress) { + conf.setBoolean("mapred.output.compress", compress); + } + + /** + * Is the job output compressed? + * @param conf the {@link JobConf} to look in + * @return true if the job output should be compressed, + * false otherwise + */ + public static boolean getCompressOutput(JobConf conf) { + return conf.getBoolean("mapred.output.compress", false); + } + + /** + * Set the {@link CompressionCodec} to be used to compress job outputs. + * @param conf the {@link JobConf} to modify + * @param codecClass the {@link CompressionCodec} to be used to + * compress the job outputs + */ + public static void + setOutputCompressorClass(JobConf conf, + Class codecClass) { + setCompressOutput(conf, true); + conf.setClass("mapred.output.compression.codec", codecClass, + CompressionCodec.class); + } + + /** + * Get the {@link CompressionCodec} for compressing the job outputs. + * @param conf the {@link JobConf} to look in + * @param defaultValue the {@link CompressionCodec} to return if not set + * @return the {@link CompressionCodec} to be used to compress the + * job outputs + * @throws IllegalArgumentException if the class was specified, but not found + */ + public static Class + getOutputCompressorClass(JobConf conf, + Class defaultValue) { + Class codecClass = defaultValue; + + String name = conf.get("mapred.output.compression.codec"); + if (name != null) { + try { + codecClass = + conf.getClassByName(name).asSubclass(CompressionCodec.class); + } catch (ClassNotFoundException e) { + throw new IllegalArgumentException("Compression codec " + name + + " was not found.", e); + } + } + return codecClass; + } + + public abstract RecordWriter getRecordWriter(FileSystem ignored, + JobConf job, String name, + Progressable progress) + throws IOException; + + public void checkOutputSpecs(FileSystem ignored, JobConf job) + throws FileAlreadyExistsException, + InvalidJobConfException, IOException { + // Ensure that the output directory is set and not already there + Path outDir = getOutputPath(job); + if (outDir == null && job.getNumReduceTasks() != 0) { + throw new InvalidJobConfException("Output directory not set in JobConf."); + } + if (outDir != null) { + FileSystem fs = outDir.getFileSystem(job); + // normalize the output directory + outDir = fs.makeQualified(outDir); + setOutputPath(job, outDir); + // check its existence + if (fs.exists(outDir)) { + throw new FileAlreadyExistsException("Output directory " + outDir + + " already exists"); + } + } + } + + /** + * Set the {@link Path} of the output directory for the map-reduce job. + * + * @param conf The configuration of the job. + * @param outputDir the {@link Path} of the output directory for + * the map-reduce job. + */ + public static void setOutputPath(JobConf conf, Path outputDir) { + outputDir = new Path(conf.getWorkingDirectory(), outputDir); + conf.set("mapred.output.dir", outputDir.toString()); + } + + /** + * Set the {@link Path} of the task's temporary output directory + * for the map-reduce job. + * + *

Note: Task output path is set by the framework. + *

+ * @param conf The configuration of the job. + * @param outputDir the {@link Path} of the output directory + * for the map-reduce job. + */ + + static void setWorkOutputPath(JobConf conf, Path outputDir) { + outputDir = new Path(conf.getWorkingDirectory(), outputDir); + conf.set("mapred.work.output.dir", outputDir.toString()); + } + + /** + * Get the {@link Path} to the output directory for the map-reduce job. + * + * @return the {@link Path} to the output directory for the map-reduce job. + * @see FileOutputFormat#getWorkOutputPath(JobConf) + */ + public static Path getOutputPath(JobConf conf) { + String name = conf.get("mapred.output.dir"); + return name == null ? null: new Path(name); + } + + /** + * Get the {@link Path} to the task's temporary output directory + * for the map-reduce job + * + *

Tasks' Side-Effect Files

+ * + *

Note: The following is valid only if the {@link OutputCommitter} + * is {@link FileOutputCommitter}. If OutputCommitter is not + * a FileOutputCommitter, the task's temporary output + * directory is same as {@link #getOutputPath(JobConf)} i.e. + * ${mapred.output.dir}$

+ * + *

Some applications need to create/write-to side-files, which differ from + * the actual job-outputs. + * + *

In such cases there could be issues with 2 instances of the same TIP + * (running simultaneously e.g. speculative tasks) trying to open/write-to the + * same file (path) on HDFS. Hence the application-writer will have to pick + * unique names per task-attempt (e.g. using the attemptid, say + * attempt_200709221812_0001_m_000000_0), not just per TIP.

+ * + *

To get around this the Map-Reduce framework helps the application-writer + * out by maintaining a special + * ${mapred.output.dir}/_temporary/_${taskid} + * sub-directory for each task-attempt on HDFS where the output of the + * task-attempt goes. On successful completion of the task-attempt the files + * in the ${mapred.output.dir}/_temporary/_${taskid} (only) + * are promoted to ${mapred.output.dir}. Of course, the + * framework discards the sub-directory of unsuccessful task-attempts. This + * is completely transparent to the application.

+ * + *

The application-writer can take advantage of this by creating any + * side-files required in ${mapred.work.output.dir} during execution + * of his reduce-task i.e. via {@link #getWorkOutputPath(JobConf)}, and the + * framework will move them out similarly - thus she doesn't have to pick + * unique paths per task-attempt.

+ * + *

Note: the value of ${mapred.work.output.dir} during + * execution of a particular task-attempt is actually + * ${mapred.output.dir}/_temporary/_{$taskid}, and this value is + * set by the map-reduce framework. So, just create any side-files in the + * path returned by {@link #getWorkOutputPath(JobConf)} from map/reduce + * task to take advantage of this feature.

+ * + *

The entire discussion holds true for maps of jobs with + * reducer=NONE (i.e. 0 reduces) since output of the map, in that case, + * goes directly to HDFS.

+ * + * @return the {@link Path} to the task's temporary output directory + * for the map-reduce job. + */ + public static Path getWorkOutputPath(JobConf conf) { + String name = conf.get("mapred.work.output.dir"); + return name == null ? null: new Path(name); + } + + /** + * Helper function to create the task's temporary output directory and + * return the path to the task's output file. + * + * @param conf job-configuration + * @param name temporary task-output filename + * @return path to the task's temporary output file + * @throws IOException + */ + public static Path getTaskOutputPath(JobConf conf, String name) + throws IOException { + // ${mapred.out.dir} + Path outputPath = getOutputPath(conf); + if (outputPath == null) { + throw new IOException("Undefined job output-path"); + } + + OutputCommitter committer = conf.getOutputCommitter(); + Path workPath = outputPath; + TaskAttemptContext context = new TaskAttemptContext(conf, + TaskAttemptID.forName(conf.get("mapred.task.id"))); + if (committer instanceof FileOutputCommitter) { + workPath = ((FileOutputCommitter)committer).getWorkPath(context, + outputPath); + } + + // ${mapred.out.dir}/_temporary/_${taskid}/${name} + return new Path(workPath, name); + } + + /** + * Helper function to generate a name that is unique for the task. + * + *

The generated name can be used to create custom files from within the + * different tasks for the job, the names for different tasks will not collide + * with each other.

+ * + *

The given name is postfixed with the task type, 'm' for maps, 'r' for + * reduces and the task partition number. For example, give a name 'test' + * running on the first map o the job the generated name will be + * 'test-m-00000'.

+ * + * @param conf the configuration for the job. + * @param name the name to make unique. + * @return a unique name accross all tasks of the job. + */ + public static String getUniqueName(JobConf conf, String name) { + int partition = conf.getInt("mapred.task.partition", -1); + if (partition == -1) { + throw new IllegalArgumentException( + "This method can only be called from within a Job"); + } + + String taskType = (conf.getBoolean("mapred.task.is.map", true)) ? "m" : "r"; + + NumberFormat numberFormat = NumberFormat.getInstance(); + numberFormat.setMinimumIntegerDigits(5); + numberFormat.setGroupingUsed(false); + + return name + "-" + taskType + "-" + numberFormat.format(partition); + } + + /** + * Helper function to generate a {@link Path} for a file that is unique for + * the task within the job output directory. + * + *

The path can be used to create custom files from within the map and + * reduce tasks. The path name will be unique for each task. The path parent + * will be the job output directory.

ls + * + *

This method uses the {@link #getUniqueName} method to make the file name + * unique for the task.

+ * + * @param conf the configuration for the job. + * @param name the name for the file. + * @return a unique path accross all tasks of the job. + */ + public static Path getPathForCustomFile(JobConf conf, String name) { + return new Path(getWorkOutputPath(conf), getUniqueName(conf, name)); + } +} + diff --git a/src/mapred/org/apache/hadoop/mapred/FileSplit.java b/src/mapred/org/apache/hadoop/mapred/FileSplit.java new file mode 100644 index 0000000..316f54d --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/FileSplit.java @@ -0,0 +1,112 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapred; + +import java.io.IOException; +import java.io.DataInput; +import java.io.DataOutput; + +import org.apache.hadoop.io.UTF8; +import org.apache.hadoop.fs.Path; + +/** A section of an input file. Returned by {@link + * InputFormat#getSplits(JobConf, int)} and passed to + * {@link InputFormat#getRecordReader(InputSplit,JobConf,Reporter)}. + * @deprecated Use {@link org.apache.hadoop.mapreduce.lib.input.FileSplit} + * instead. + */ +@Deprecated +public class FileSplit extends org.apache.hadoop.mapreduce.InputSplit + implements InputSplit { + // Use String instead of Path to save memory + private String file; + private long start; + private long length; + private String[] hosts; + + FileSplit() {} + + /** Constructs a split. + * @deprecated + * @param file the file name + * @param start the position of the first byte in the file to process + * @param length the number of bytes in the file to process + */ + @Deprecated + public FileSplit(Path file, long start, long length, JobConf conf) { + this(file, start, length, (String[])null); + } + + /** Constructs a split with host information + * + * @param file the file name + * @param start the position of the first byte in the file to process + * @param length the number of bytes in the file to process + * @param hosts the list of hosts containing the block, possibly null + */ + public FileSplit(Path file, long start, long length, String[] hosts) { + this.file = file == null ? null : file.toString().intern(); + this.start = start; + this.length = length; + this.hosts = hosts; + if (this.hosts != null) { + for (int h = 0; h < this.hosts.length; h++) { + this.hosts[h] = this.hosts[h] == null ? null : this.hosts[h].intern(); + } + } + } + + /** The file containing this split's data. + * We store String instead of Path to save memory. + */ + public Path getPath() { return file == null ? null : new Path(file); } + + /** The position of the first byte in the file to process. */ + public long getStart() { return start; } + + /** The number of bytes in the file to process. */ + public long getLength() { return length; } + + public String toString() { return file + ":" + start + "+" + length; } + + //////////////////////////////////////////// + // Writable methods + //////////////////////////////////////////// + + public void write(DataOutput out) throws IOException { + UTF8.writeString(out, file); + out.writeLong(start); + out.writeLong(length); + } + public void readFields(DataInput in) throws IOException { + file = UTF8.readString(in).intern(); + start = in.readLong(); + length = in.readLong(); + hosts = null; + } + + public String[] getLocations() throws IOException { + if (this.hosts == null) { + return new String[]{}; + } else { + return this.hosts; + } + } + +} diff --git a/src/mapred/org/apache/hadoop/mapred/HeartbeatResponse.java b/src/mapred/org/apache/hadoop/mapred/HeartbeatResponse.java new file mode 100644 index 0000000..a210b1f --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/HeartbeatResponse.java @@ -0,0 +1,136 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapred; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; +import java.util.HashSet; +import java.util.Map; +import java.util.HashMap; +import java.util.Set; + +import org.apache.hadoop.conf.Configurable; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.io.Writable; +import org.apache.hadoop.io.WritableUtils; + +/** + * The response sent by the {@link JobTracker} to the hearbeat sent + * periodically by the {@link TaskTracker} + * + */ +class HeartbeatResponse implements Writable, Configurable { + Configuration conf = null; + short responseId; + int heartbeatInterval; + TaskTrackerAction[] actions; + Set recoveredJobs = new HashSet(); + + HeartbeatResponse() {} + + HeartbeatResponse(short responseId, TaskTrackerAction[] actions) { + this.responseId = responseId; + this.actions = actions; + this.heartbeatInterval = MRConstants.HEARTBEAT_INTERVAL_MIN; + } + + public void setResponseId(short responseId) { + this.responseId = responseId; + } + + public short getResponseId() { + return responseId; + } + + public void setRecoveredJobs(Set ids) { + recoveredJobs = ids; + } + + public Set getRecoveredJobs() { + return recoveredJobs; + } + + public void setActions(TaskTrackerAction[] actions) { + this.actions = actions; + } + + public TaskTrackerAction[] getActions() { + return actions; + } + + public void setConf(Configuration conf) { + this.conf = conf; + } + + public Configuration getConf() { + return conf; + } + + public void setHeartbeatInterval(int interval) { + this.heartbeatInterval = interval; + } + + public int getHeartbeatInterval() { + return heartbeatInterval; + } + + public void write(DataOutput out) throws IOException { + out.writeShort(responseId); + out.writeInt(heartbeatInterval); + if (actions == null) { + WritableUtils.writeVInt(out, 0); + } else { + WritableUtils.writeVInt(out, actions.length); + for (TaskTrackerAction action : actions) { + WritableUtils.writeEnum(out, action.getActionId()); + action.write(out); + } + } + // Write the job ids of the jobs that were recovered + out.writeInt(recoveredJobs.size()); + for (JobID id : recoveredJobs) { + id.write(out); + } + } + + public void readFields(DataInput in) throws IOException { + this.responseId = in.readShort(); + this.heartbeatInterval = in.readInt(); + int length = WritableUtils.readVInt(in); + if (length > 0) { + actions = new TaskTrackerAction[length]; + for (int i=0; i < length; ++i) { + TaskTrackerAction.ActionType actionType = + WritableUtils.readEnum(in, TaskTrackerAction.ActionType.class); + actions[i] = TaskTrackerAction.createAction(actionType); + actions[i].readFields(in); + } + } else { + actions = null; + } + // Read the job ids of the jobs that were recovered + int size = in.readInt(); + for (int i = 0; i < size; ++i) { + JobID id = new JobID(); + id.readFields(in); + recoveredJobs.add(id); + } + } +} diff --git a/src/mapred/org/apache/hadoop/mapred/HistoryViewer.java b/src/mapred/org/apache/hadoop/mapred/HistoryViewer.java new file mode 100644 index 0000000..34a4d4f --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/HistoryViewer.java @@ -0,0 +1,617 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapred; + +import java.io.IOException; +import java.text.DecimalFormat; +import java.text.Format; +import java.text.ParseException; +import java.text.SimpleDateFormat; +import java.util.Arrays; +import java.util.Comparator; +import java.util.Date; +import java.util.Iterator; +import java.util.Map; +import java.util.Set; +import java.util.TreeMap; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.FileUtil; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.PathFilter; +import org.apache.hadoop.mapred.Counters.Counter; +import org.apache.hadoop.mapred.Counters.Group; +import org.apache.hadoop.mapred.DefaultJobHistoryParser.*; +import org.apache.hadoop.mapred.JobHistory.*; +import org.apache.hadoop.util.StringUtils; + +/** + * This class is to view job history files. + */ +class HistoryViewer { + private static SimpleDateFormat dateFormat = new SimpleDateFormat( + "d-MMM-yyyy HH:mm:ss"); + private FileSystem fs; + private Configuration conf; + private Path historyLogDir; + private String jobLogFile; + private JobHistory.JobInfo job; + private String trackerHostName; + private String trackerStartTime; + private String jobId; + private boolean printAll; + + private PathFilter jobLogFileFilter = new PathFilter() { + public boolean accept(Path path) { + return !(path.getName().endsWith(".xml")); + } + }; + + public HistoryViewer(String outputDir, Configuration conf, boolean printAll) + throws IOException { + this.conf = conf; + this.printAll = printAll; + Path output = new Path(outputDir); + historyLogDir = new Path(output, "_logs/history"); + try { + fs = output.getFileSystem(this.conf); + if (!fs.exists(output)) { + throw new IOException("History directory " + historyLogDir.toString() + + "does not exist"); + } + Path[] jobFiles = FileUtil.stat2Paths(fs.listStatus(historyLogDir, + jobLogFileFilter)); + if (jobFiles.length == 0) { + throw new IOException("Not a valid history directory " + + historyLogDir.toString()); + } + jobLogFile = jobFiles[0].toString(); + String[] jobDetails = + JobInfo.decodeJobHistoryFileName(jobFiles[0].getName()).split("_"); + trackerHostName = jobDetails[0]; + trackerStartTime = jobDetails[1]; + jobId = jobDetails[2] + "_" + jobDetails[3] + "_" + jobDetails[4]; + job = new JobHistory.JobInfo(jobId); + DefaultJobHistoryParser.parseJobTasks(jobFiles[0].toString(), job, fs); + } catch(Exception e) { + throw new IOException("Not able to initialize History viewer", e); + } + } + + public void print() throws IOException{ + printJobDetails(); + printTaskSummary(); + printJobAnalysis(); + printTasks("SETUP", "FAILED"); + printTasks("SETUP", "KILLED"); + printTasks("MAP", "FAILED"); + printTasks("MAP", "KILLED"); + printTasks("REDUCE", "FAILED"); + printTasks("REDUCE", "KILLED"); + printTasks("CLEANUP", "FAILED"); + printTasks("CLEANUP", "KILLED"); + if (printAll) { + printTasks("SETUP", "SUCCESS"); + printTasks("MAP", "SUCCESS"); + printTasks("REDUCE", "SUCCESS"); + printTasks("CLEANUP", "SUCCESS"); + printAllTaskAttempts("SETUP"); + printAllTaskAttempts("MAP"); + printAllTaskAttempts("REDUCE"); + printAllTaskAttempts("CLEANUP"); + } + NodesFilter filter = new FailedOnNodesFilter(); + printFailedAttempts(filter); + filter = new KilledOnNodesFilter(); + printFailedAttempts(filter); + } + + private void printJobDetails() throws IOException { + StringBuffer jobDetails = new StringBuffer(); + jobDetails.append("\nHadoop job: " ).append(jobId); + jobDetails.append("\n====================================="); + jobDetails.append("\nJob tracker host name: ").append(trackerHostName); + jobDetails.append("\njob tracker start time: ").append( + new Date(Long.parseLong(trackerStartTime))); + jobDetails.append("\nUser: ").append(job.get(Keys.USER)); + jobDetails.append("\nJobName: ").append(job.get(Keys.JOBNAME)); + jobDetails.append("\nJobConf: ").append(job.get(Keys.JOBCONF)); + jobDetails.append("\nSubmitted At: ").append(StringUtils. + getFormattedTimeWithDiff(dateFormat, + job.getLong(Keys.SUBMIT_TIME), 0)); + jobDetails.append("\nLaunched At: ").append(StringUtils. + getFormattedTimeWithDiff(dateFormat, + job.getLong(Keys.LAUNCH_TIME), + job.getLong(Keys.SUBMIT_TIME))); + jobDetails.append("\nFinished At: ").append(StringUtils. + getFormattedTimeWithDiff(dateFormat, + job.getLong(Keys.FINISH_TIME), + job.getLong(Keys.LAUNCH_TIME))); + jobDetails.append("\nStatus: ").append(((job.get(Keys.JOB_STATUS) == "") ? + "Incomplete" :job.get(Keys.JOB_STATUS))); + try { + printCounters(jobDetails, job); + } catch (ParseException p) { + throw new IOException(p); + } + jobDetails.append("\n====================================="); + System.out.println(jobDetails.toString()); + } + + private void printCounters(StringBuffer buff, JobInfo job) + throws ParseException { + Counters mapCounters = + Counters.fromEscapedCompactString(job.get(Keys.MAP_COUNTERS)); + Counters reduceCounters = + Counters.fromEscapedCompactString(job.get(Keys.REDUCE_COUNTERS)); + Counters totalCounters = + Counters.fromEscapedCompactString(job.get(Keys.COUNTERS)); + + // Killed jobs might not have counters + if (totalCounters == null) { + return; + } + buff.append("\nCounters: \n\n"); + buff.append(String.format("|%1$-30s|%2$-30s|%3$-10s|%4$-10s|%5$-10s|", + "Group Name", + "Counter name", + "Map Value", + "Reduce Value", + "Total Value")); + buff.append("\n------------------------------------------"+ + "---------------------------------------------"); + for (String groupName : totalCounters.getGroupNames()) { + Group totalGroup = totalCounters.getGroup(groupName); + Group mapGroup = mapCounters.getGroup(groupName); + Group reduceGroup = reduceCounters.getGroup(groupName); + Format decimal = new DecimalFormat(); + Iterator ctrItr = totalGroup.iterator(); + while (ctrItr.hasNext()) { + Counter counter = ctrItr.next(); + String name = counter.getDisplayName(); + String mapValue = decimal.format(mapGroup.getCounter(name)); + String reduceValue = decimal.format(reduceGroup.getCounter(name)); + String totalValue = decimal.format(counter.getValue()); + buff.append( + String.format("\n|%1$-30s|%2$-30s|%3$-10s|%4$-10s|%5$-10s", + totalGroup.getDisplayName(), + counter.getDisplayName(), + mapValue, reduceValue, totalValue)); + } + } + } + + private void printTasks(String taskType, String taskStatus) { + Map tasks = job.getAllTasks(); + StringBuffer taskList = new StringBuffer(); + taskList.append("\n").append(taskStatus).append(" "); + taskList.append(taskType).append(" task list for ").append(jobId); + taskList.append("\nTaskId\t\tStartTime\tFinishTime\tError"); + if (Values.MAP.name().equals(taskType)) { + taskList.append("\tInputSplits"); + } + taskList.append("\n===================================================="); + System.out.println(taskList.toString()); + for (JobHistory.Task task : tasks.values()) { + if (taskType.equals(task.get(Keys.TASK_TYPE)) && + (taskStatus.equals(task.get(Keys.TASK_STATUS)) + || taskStatus.equals("all"))) { + taskList.setLength(0); + taskList.append(task.get(Keys.TASKID)); + taskList.append("\t").append(StringUtils.getFormattedTimeWithDiff( + dateFormat, task.getLong(Keys.START_TIME), 0)); + taskList.append("\t").append(StringUtils.getFormattedTimeWithDiff( + dateFormat, task.getLong(Keys.FINISH_TIME), + task.getLong(Keys.START_TIME))); + taskList.append("\t").append(task.get(Keys.ERROR)); + if (Values.MAP.name().equals(taskType)) { + taskList.append("\t").append(task.get(Keys.SPLITS)); + } + System.out.println(taskList.toString()); + } + } + } + + private void printAllTaskAttempts(String taskType) { + Map tasks = job.getAllTasks(); + StringBuffer taskList = new StringBuffer(); + taskList.append("\n").append(taskType); + taskList.append(" task list for ").append(jobId); + taskList.append("\nTaskId\t\tStartTime"); + if (Values.REDUCE.name().equals(taskType)) { + taskList.append("\tShuffleFinished\tSortFinished"); + } + taskList.append("\tFinishTime\tHostName\tError\tTaskLogs"); + taskList.append("\n===================================================="); + System.out.println(taskList.toString()); + for (JobHistory.Task task : tasks.values()) { + for (JobHistory.TaskAttempt attempt : task.getTaskAttempts().values()) { + if (taskType.equals(task.get(Keys.TASK_TYPE))){ + taskList.setLength(0); + taskList.append(attempt.get(Keys.TASK_ATTEMPT_ID)).append("\t"); + taskList.append(StringUtils.getFormattedTimeWithDiff(dateFormat, + attempt.getLong(Keys.START_TIME), 0)).append("\t"); + if (Values.REDUCE.name().equals(taskType)) { + ReduceAttempt reduceAttempt = (ReduceAttempt)attempt; + taskList.append(StringUtils.getFormattedTimeWithDiff(dateFormat, + reduceAttempt.getLong(Keys.SHUFFLE_FINISHED), + reduceAttempt.getLong(Keys.START_TIME))); + taskList.append("\t"); + taskList.append(StringUtils.getFormattedTimeWithDiff(dateFormat, + reduceAttempt.getLong(Keys.SORT_FINISHED), + reduceAttempt.getLong(Keys.SHUFFLE_FINISHED))); + } + taskList.append(StringUtils.getFormattedTimeWithDiff(dateFormat, + attempt.getLong(Keys.FINISH_TIME), + attempt.getLong(Keys.START_TIME))); + taskList.append("\t"); + taskList.append(attempt.get(Keys.HOSTNAME)).append("\t"); + taskList.append(attempt.get(Keys.ERROR)); + String taskLogsUrl = JobHistory.getTaskLogsUrl(attempt); + taskList.append(taskLogsUrl != null ? taskLogsUrl : "n/a"); + System.out.println(taskList.toString()); + } + } + } + } + + private void printTaskSummary() { + Map tasks = job.getAllTasks(); + int totalMaps = 0; + int totalReduces = 0; + int totalCleanups = 0; + int totalSetups = 0; + int numFailedMaps = 0; + int numKilledMaps = 0; + int numFailedReduces = 0; + int numKilledReduces = 0; + int numFinishedCleanups = 0; + int numFailedCleanups = 0; + int numKilledCleanups = 0; + int numFinishedSetups = 0; + int numFailedSetups = 0; + int numKilledSetups = 0; + long mapStarted = 0; + long mapFinished = 0; + long reduceStarted = 0; + long reduceFinished = 0; + long cleanupStarted = 0; + long cleanupFinished = 0; + long setupStarted = 0; + long setupFinished = 0; + + Map allHosts = new TreeMap(); + + for (JobHistory.Task task : tasks.values()) { + Map attempts = task.getTaskAttempts(); + allHosts.put(task.get(Keys.HOSTNAME), ""); + for (TaskAttempt attempt : attempts.values()) { + long startTime = attempt.getLong(Keys.START_TIME); + long finishTime = attempt.getLong(Keys.FINISH_TIME); + if (Values.MAP.name().equals(task.get(Keys.TASK_TYPE))) { + if (mapStarted==0 || mapStarted > startTime) { + mapStarted = startTime; + } + if (mapFinished < finishTime) { + mapFinished = finishTime; + } + totalMaps++; + if (Values.FAILED.name().equals(attempt.get(Keys.TASK_STATUS))) { + numFailedMaps++; + } else if (Values.KILLED.name().equals( + attempt.get(Keys.TASK_STATUS))) { + numKilledMaps++; + } + } else if (Values.REDUCE.name().equals(task.get(Keys.TASK_TYPE))) { + if (reduceStarted==0||reduceStarted > startTime) { + reduceStarted = startTime; + } + if (reduceFinished < finishTime) { + reduceFinished = finishTime; + } + totalReduces++; + if (Values.FAILED.name().equals(attempt.get(Keys.TASK_STATUS))) { + numFailedReduces++; + } else if (Values.KILLED.name().equals( + attempt.get(Keys.TASK_STATUS))) { + numKilledReduces++; + } + } else if (Values.CLEANUP.name().equals(task.get(Keys.TASK_TYPE))){ + if (cleanupStarted==0||cleanupStarted > startTime) { + cleanupStarted = startTime; + } + if (cleanupFinished < finishTime) { + cleanupFinished = finishTime; + } + totalCleanups++; + if (Values.SUCCESS.name().equals(attempt.get(Keys.TASK_STATUS))) { + numFinishedCleanups++; + } else if (Values.FAILED.name().equals( + attempt.get(Keys.TASK_STATUS))) { + numFailedCleanups++; + } else if (Values.KILLED.name().equals( + attempt.get(Keys.TASK_STATUS))) { + numKilledCleanups++; + } + } else if (Values.SETUP.name().equals(task.get(Keys.TASK_TYPE))){ + if (setupStarted==0||setupStarted > startTime) { + setupStarted = startTime; + } + if (setupFinished < finishTime) { + setupFinished = finishTime; + } + totalSetups++; + if (Values.SUCCESS.name().equals(attempt.get(Keys.TASK_STATUS))) { + numFinishedSetups++; + } else if (Values.FAILED.name().equals( + attempt.get(Keys.TASK_STATUS))) { + numFailedSetups++; + } else if (Values.KILLED.name().equals( + attempt.get(Keys.TASK_STATUS))) { + numKilledSetups++; + } + } + } + } + + StringBuffer taskSummary = new StringBuffer(); + taskSummary.append("\nTask Summary"); + taskSummary.append("\n============================"); + taskSummary.append("\nKind\tTotal\t"); + taskSummary.append("Successful\tFailed\tKilled\tStartTime\tFinishTime"); + taskSummary.append("\n"); + taskSummary.append("\nSetup\t").append(totalSetups); + taskSummary.append("\t").append(numFinishedSetups); + taskSummary.append("\t\t").append(numFailedSetups); + taskSummary.append("\t").append(numKilledSetups); + taskSummary.append("\t").append(StringUtils.getFormattedTimeWithDiff( + dateFormat, setupStarted, 0)); + taskSummary.append("\t").append(StringUtils.getFormattedTimeWithDiff( + dateFormat, setupFinished, setupStarted)); + taskSummary.append("\nMap\t").append(totalMaps); + taskSummary.append("\t").append(job.getInt(Keys.FINISHED_MAPS)); + taskSummary.append("\t\t").append(numFailedMaps); + taskSummary.append("\t").append(numKilledMaps); + taskSummary.append("\t").append(StringUtils.getFormattedTimeWithDiff( + dateFormat, mapStarted, 0)); + taskSummary.append("\t").append(StringUtils.getFormattedTimeWithDiff( + dateFormat, mapFinished, mapStarted)); + taskSummary.append("\nReduce\t").append(totalReduces); + taskSummary.append("\t").append(job.getInt(Keys.FINISHED_REDUCES)); + taskSummary.append("\t\t").append(numFailedReduces); + taskSummary.append("\t").append(numKilledReduces); + taskSummary.append("\t").append(StringUtils.getFormattedTimeWithDiff( + dateFormat, reduceStarted, 0)); + taskSummary.append("\t").append(StringUtils.getFormattedTimeWithDiff( + dateFormat, reduceFinished, reduceStarted)); + taskSummary.append("\nCleanup\t").append(totalCleanups); + taskSummary.append("\t").append(numFinishedCleanups); + taskSummary.append("\t\t").append(numFailedCleanups); + taskSummary.append("\t").append(numKilledCleanups); + taskSummary.append("\t").append(StringUtils.getFormattedTimeWithDiff( + dateFormat, cleanupStarted, 0)); + taskSummary.append("\t").append(StringUtils.getFormattedTimeWithDiff( + dateFormat, cleanupFinished, cleanupStarted)); + taskSummary.append("\n============================\n"); + System.out.println(taskSummary.toString()); + } + + private void printFailedAttempts(NodesFilter filter) throws IOException { + JobHistory.parseHistoryFromFS(jobLogFile, filter, fs); + Map> badNodes = filter.getValues(); + StringBuffer attempts = new StringBuffer(); + if (badNodes.size() > 0) { + attempts.append("\n").append(filter.getFailureType()); + attempts.append(" task attempts by nodes"); + attempts.append("\nHostname\tFailedTasks"); + attempts.append("\n==============================="); + System.out.println(attempts.toString()); + for (Map.Entry> entry : badNodes.entrySet()) { + String node = entry.getKey(); + Set failedTasks = entry.getValue(); + attempts.setLength(0); + attempts.append(node).append("\t"); + for (String t : failedTasks) { + attempts.append(t).append(", "); + } + System.out.println(attempts.toString()); + } + } + } + + private void printJobAnalysis() { + if (!Values.SUCCESS.name().equals(job.get(Keys.JOB_STATUS))) { + System.out.println("No Analysis available as job did not finish"); + return; + } + + Map tasks = job.getAllTasks(); + int finishedMaps = job.getInt(Keys.FINISHED_MAPS); + int finishedReduces = job.getInt(Keys.FINISHED_REDUCES); + JobHistory.Task [] mapTasks = new JobHistory.Task[finishedMaps]; + JobHistory.Task [] reduceTasks = new JobHistory.Task[finishedReduces]; + int mapIndex = 0 , reduceIndex=0; + long avgMapTime = 0; + long avgReduceTime = 0; + long avgShuffleTime = 0; + + for (JobHistory.Task task : tasks.values()) { + Map attempts = task.getTaskAttempts(); + for (JobHistory.TaskAttempt attempt : attempts.values()) { + if (attempt.get(Keys.TASK_STATUS).equals(Values.SUCCESS.name())) { + long avgFinishTime = (attempt.getLong(Keys.FINISH_TIME) - + attempt.getLong(Keys.START_TIME)); + if (Values.MAP.name().equals(task.get(Keys.TASK_TYPE))) { + mapTasks[mapIndex++] = attempt; + avgMapTime += avgFinishTime; + } else if (Values.REDUCE.name().equals(task.get(Keys.TASK_TYPE))) { + reduceTasks[reduceIndex++] = attempt; + avgShuffleTime += (attempt.getLong(Keys.SHUFFLE_FINISHED) - + attempt.getLong(Keys.START_TIME)); + avgReduceTime += (attempt.getLong(Keys.FINISH_TIME) - + attempt.getLong(Keys.SHUFFLE_FINISHED)); + } + break; + } + } + } + if (finishedMaps > 0) { + avgMapTime /= finishedMaps; + } + if (finishedReduces > 0) { + avgReduceTime /= finishedReduces; + avgShuffleTime /= finishedReduces; + } + System.out.println("\nAnalysis"); + System.out.println("========="); + printAnalysis(mapTasks, cMap, "map", avgMapTime, 10); + printLast(mapTasks, "map", cFinishMapRed); + + if (reduceTasks.length > 0) { + printAnalysis(reduceTasks, cShuffle, "shuffle", avgShuffleTime, 10); + printLast(reduceTasks, "shuffle", cFinishShuffle); + + printAnalysis(reduceTasks, cReduce, "reduce", avgReduceTime, 10); + printLast(reduceTasks, "reduce", cFinishMapRed); + } + System.out.println("========="); + } + + private void printLast(JobHistory.Task [] tasks, + String taskType, + Comparator cmp + ) { + Arrays.sort(tasks, cFinishMapRed); + JobHistory.Task last = tasks[0]; + StringBuffer lastBuf = new StringBuffer(); + lastBuf.append("The last ").append(taskType); + lastBuf.append(" task ").append(last.get(Keys.TASKID)); + Long finishTime; + if ("shuffle".equals(taskType)) { + finishTime = last.getLong(Keys.SHUFFLE_FINISHED); + } else { + finishTime = last.getLong(Keys.FINISH_TIME); + } + lastBuf.append(" finished at (relative to the Job launch time): "); + lastBuf.append(StringUtils.getFormattedTimeWithDiff(dateFormat, + finishTime, job.getLong(Keys.LAUNCH_TIME))); + System.out.println(lastBuf.toString()); + } + + private void printAnalysis(JobHistory.Task [] tasks, + Comparator cmp, + String taskType, + long avg, + int showTasks) { + Arrays.sort(tasks, cmp); + JobHistory.Task min = tasks[tasks.length-1]; + StringBuffer details = new StringBuffer(); + details.append("\nTime taken by best performing "); + details.append(taskType).append(" task "); + details.append(min.get(Keys.TASKID)).append(": "); + if ("map".equals(taskType)) { + details.append(StringUtils.formatTimeDiff( + min.getLong(Keys.FINISH_TIME), + min.getLong(Keys.START_TIME))); + } else if ("shuffle".equals(taskType)) { + details.append(StringUtils.formatTimeDiff( + min.getLong(Keys.SHUFFLE_FINISHED), + min.getLong(Keys.START_TIME))); + } else { + details.append(StringUtils.formatTimeDiff( + min.getLong(Keys.FINISH_TIME), + min.getLong(Keys.SHUFFLE_FINISHED))); + } + details.append("\nAverage time taken by "); + details.append(taskType).append(" tasks: "); + details.append(StringUtils.formatTimeDiff(avg, 0)); + details.append("\nWorse performing "); + details.append(taskType).append(" tasks: "); + details.append("\nTaskId\t\tTimetaken"); + System.out.println(details.toString()); + for (int i = 0; i < showTasks && i < tasks.length; i++) { + details.setLength(0); + details.append(tasks[i].get(Keys.TASKID)).append(" "); + if ("map".equals(taskType)) { + details.append(StringUtils.formatTimeDiff( + tasks[i].getLong(Keys.FINISH_TIME), + tasks[i].getLong(Keys.START_TIME))); + } else if ("shuffle".equals(taskType)) { + details.append(StringUtils.formatTimeDiff( + tasks[i].getLong(Keys.SHUFFLE_FINISHED), + tasks[i].getLong(Keys.START_TIME))); + } else { + details.append(StringUtils.formatTimeDiff( + tasks[i].getLong(Keys.FINISH_TIME), + tasks[i].getLong(Keys.SHUFFLE_FINISHED))); + } + System.out.println(details.toString()); + } + } + + private Comparator cMap = + new Comparator() { + public int compare(JobHistory.Task t1, JobHistory.Task t2) { + long l1 = t1.getLong(Keys.FINISH_TIME) - t1.getLong(Keys.START_TIME); + long l2 = t2.getLong(Keys.FINISH_TIME) - t2.getLong(Keys.START_TIME); + return (l2 < l1 ? -1 : (l2 == l1 ? 0 : 1)); + } + }; + + private Comparator cShuffle = + new Comparator() { + public int compare(JobHistory.Task t1, JobHistory.Task t2) { + long l1 = t1.getLong(Keys.SHUFFLE_FINISHED) - + t1.getLong(Keys.START_TIME); + long l2 = t2.getLong(Keys.SHUFFLE_FINISHED) - + t2.getLong(Keys.START_TIME); + return (l2 < l1 ? -1 : (l2 == l1 ? 0 : 1)); + } + }; + + private Comparator cFinishShuffle = + new Comparator() { + public int compare(JobHistory.Task t1, JobHistory.Task t2) { + long l1 = t1.getLong(Keys.SHUFFLE_FINISHED); + long l2 = t2.getLong(Keys.SHUFFLE_FINISHED); + return (l2 < l1 ? -1 : (l2 == l1 ? 0 : 1)); + } + }; + + private Comparator cFinishMapRed = + new Comparator() { + public int compare(JobHistory.Task t1, JobHistory.Task t2) { + long l1 = t1.getLong(Keys.FINISH_TIME); + long l2 = t2.getLong(Keys.FINISH_TIME); + return (l2 < l1 ? -1 : (l2 == l1 ? 0 : 1)); + } + }; + + private Comparator cReduce = + new Comparator() { + public int compare(JobHistory.Task t1, JobHistory.Task t2) { + long l1 = t1.getLong(Keys.FINISH_TIME) - + t1.getLong(Keys.SHUFFLE_FINISHED); + long l2 = t2.getLong(Keys.FINISH_TIME) - + t2.getLong(Keys.SHUFFLE_FINISHED); + return (l2 < l1 ? -1 : (l2 == l1 ? 0 : 1)); + } + }; +} diff --git a/src/mapred/org/apache/hadoop/mapred/ID.java b/src/mapred/org/apache/hadoop/mapred/ID.java new file mode 100644 index 0000000..b01f70b --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/ID.java @@ -0,0 +1,41 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapred; + +/** + * A general identifier, which internally stores the id + * as an integer. This is the super class of {@link JobID}, + * {@link TaskID} and {@link TaskAttemptID}. + * + * @see JobID + * @see TaskID + * @see TaskAttemptID + */ +@Deprecated +public abstract class ID extends org.apache.hadoop.mapreduce.ID { + + /** constructs an ID object from the given int */ + public ID(int id) { + super(id); + } + + protected ID() { + } + +} diff --git a/src/mapred/org/apache/hadoop/mapred/IFile.java b/src/mapred/org/apache/hadoop/mapred/IFile.java new file mode 100644 index 0000000..574f6be --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/IFile.java @@ -0,0 +1,567 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.mapred; + +import java.io.EOFException; +import java.io.File; +import java.io.FileOutputStream; +import java.io.IOException; +import java.io.InputStream; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FSDataInputStream; +import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.io.DataInputBuffer; +import org.apache.hadoop.io.DataOutputBuffer; +import org.apache.hadoop.io.WritableUtils; +import org.apache.hadoop.io.compress.CodecPool; +import org.apache.hadoop.io.compress.CompressionCodec; +import org.apache.hadoop.io.compress.CompressionOutputStream; +import org.apache.hadoop.io.compress.Compressor; +import org.apache.hadoop.io.compress.Decompressor; +import org.apache.hadoop.io.serializer.SerializationFactory; +import org.apache.hadoop.io.serializer.Serializer; + +/** + * IFile is the simple format + * for the intermediate map-outputs in Map-Reduce. + * + * There is a Writer to write out map-outputs in this format and + * a Reader to read files of this format. + */ +class IFile { + + private static final int EOF_MARKER = -1; + + /** + * IFile.Writer to write out intermediate map-outputs. + */ + public static class Writer { + FSDataOutputStream out; + boolean ownOutputStream = false; + long start = 0; + FSDataOutputStream rawOut; + + CompressionOutputStream compressedOut; + Compressor compressor; + boolean compressOutput = false; + + long decompressedBytesWritten = 0; + long compressedBytesWritten = 0; + + // Count records written to disk + private long numRecordsWritten = 0; + private final Counters.Counter writtenRecordsCounter; + + IFileOutputStream checksumOut; + + Class keyClass; + Class valueClass; + Serializer keySerializer; + Serializer valueSerializer; + + DataOutputBuffer buffer = new DataOutputBuffer(); + + public Writer(Configuration conf, FileSystem fs, Path file, + Class keyClass, Class valueClass, + CompressionCodec codec, + Counters.Counter writesCounter) throws IOException { + this(conf, fs.create(file), keyClass, valueClass, codec, + writesCounter); + ownOutputStream = true; + } + + public Writer(Configuration conf, FSDataOutputStream out, + Class keyClass, Class valueClass, + CompressionCodec codec, Counters.Counter writesCounter) + throws IOException { + this.writtenRecordsCounter = writesCounter; + this.checksumOut = new IFileOutputStream(out); + this.rawOut = out; + this.start = this.rawOut.getPos(); + + if (codec != null) { + this.compressor = CodecPool.getCompressor(codec); + this.compressor.reset(); + this.compressedOut = codec.createOutputStream(checksumOut, compressor); + this.out = new FSDataOutputStream(this.compressedOut, null); + this.compressOutput = true; + } else { + this.out = new FSDataOutputStream(checksumOut,null); + } + + this.keyClass = keyClass; + this.valueClass = valueClass; + SerializationFactory serializationFactory = new SerializationFactory(conf); + this.keySerializer = serializationFactory.getSerializer(keyClass); + this.keySerializer.open(buffer); + this.valueSerializer = serializationFactory.getSerializer(valueClass); + this.valueSerializer.open(buffer); + } + + public void close() throws IOException { + + // Close the serializers + keySerializer.close(); + valueSerializer.close(); + + // Write EOF_MARKER for key/value length + WritableUtils.writeVInt(out, EOF_MARKER); + WritableUtils.writeVInt(out, EOF_MARKER); + decompressedBytesWritten += 2 * WritableUtils.getVIntSize(EOF_MARKER); + + //Flush the stream + out.flush(); + + if (compressOutput) { + // Flush + compressedOut.finish(); + compressedOut.resetState(); + } + + // Close the underlying stream iff we own it... + if (ownOutputStream) { + out.close(); + } + else { + // Write the checksum + checksumOut.finish(); + } + + compressedBytesWritten = rawOut.getPos() - start; + + if (compressOutput) { + // Return back the compressor + CodecPool.returnCompressor(compressor); + compressor = null; + } + + out = null; + if(writtenRecordsCounter != null) { + writtenRecordsCounter.increment(numRecordsWritten); + } + } + + public void append(K key, V value) throws IOException { + if (key.getClass() != keyClass) + throw new IOException("wrong key class: "+ key.getClass() + +" is not "+ keyClass); + if (value.getClass() != valueClass) + throw new IOException("wrong value class: "+ value.getClass() + +" is not "+ valueClass); + + // Append the 'key' + keySerializer.serialize(key); + int keyLength = buffer.getLength(); + if (keyLength < 0) { + throw new IOException("Negative key-length not allowed: " + keyLength + + " for " + key); + } + + // Append the 'value' + valueSerializer.serialize(value); + int valueLength = buffer.getLength() - keyLength; + if (valueLength < 0) { + throw new IOException("Negative value-length not allowed: " + + valueLength + " for " + value); + } + + // Write the record out + WritableUtils.writeVInt(out, keyLength); // key length + WritableUtils.writeVInt(out, valueLength); // value length + out.write(buffer.getData(), 0, buffer.getLength()); // data + + // Reset + buffer.reset(); + + // Update bytes written + decompressedBytesWritten += keyLength + valueLength + + WritableUtils.getVIntSize(keyLength) + + WritableUtils.getVIntSize(valueLength); + ++numRecordsWritten; + } + + public void append(DataInputBuffer key, DataInputBuffer value) + throws IOException { + int keyLength = key.getLength() - key.getPosition(); + if (keyLength < 0) { + throw new IOException("Negative key-length not allowed: " + keyLength + + " for " + key); + } + + int valueLength = value.getLength() - value.getPosition(); + if (valueLength < 0) { + throw new IOException("Negative value-length not allowed: " + + valueLength + " for " + value); + } + + WritableUtils.writeVInt(out, keyLength); + WritableUtils.writeVInt(out, valueLength); + out.write(key.getData(), key.getPosition(), keyLength); + out.write(value.getData(), value.getPosition(), valueLength); + + // Update bytes written + decompressedBytesWritten += keyLength + valueLength + + WritableUtils.getVIntSize(keyLength) + + WritableUtils.getVIntSize(valueLength); + ++numRecordsWritten; + } + + public long getRawLength() { + return decompressedBytesWritten; + } + + public long getCompressedLength() { + return compressedBytesWritten; + } + } + + /** + * IFile.Reader to read intermediate map-outputs. + */ + public static class Reader { + private static final int DEFAULT_BUFFER_SIZE = 128*1024; + private static final int MAX_VINT_SIZE = 9; + + // Count records read from disk + private long numRecordsRead = 0; + private final Counters.Counter readRecordsCounter; + + final InputStream in; // Possibly decompressed stream that we read + Decompressor decompressor; + long bytesRead = 0; + final long fileLength; + boolean eof = false; + final IFileInputStream checksumIn; + + byte[] buffer = null; + int bufferSize = DEFAULT_BUFFER_SIZE; + DataInputBuffer dataIn = new DataInputBuffer(); + + int recNo = 1; + + /** + * Construct an IFile Reader. + * + * @param conf Configuration File + * @param fs FileSystem + * @param file Path of the file to be opened. This file should have + * checksum bytes for the data at the end of the file. + * @param codec codec + * @param readsCounter Counter for records read from disk + * @throws IOException + */ + public Reader(Configuration conf, FileSystem fs, Path file, + CompressionCodec codec, + Counters.Counter readsCounter) throws IOException { + this(conf, fs.open(file), + fs.getFileStatus(file).getLen(), + codec, readsCounter); + } + + /** + * Construct an IFile Reader. + * + * @param conf Configuration File + * @param in The input stream + * @param length Length of the data in the stream, including the checksum + * bytes. + * @param codec codec + * @param readsCounter Counter for records read from disk + * @throws IOException + */ + public Reader(Configuration conf, FSDataInputStream in, long length, + CompressionCodec codec, + Counters.Counter readsCounter) throws IOException { + readRecordsCounter = readsCounter; + checksumIn = new IFileInputStream(in,length); + if (codec != null) { + decompressor = CodecPool.getDecompressor(codec); + this.in = codec.createInputStream(checksumIn, decompressor); + } else { + this.in = checksumIn; + } + this.fileLength = length; + + if (conf != null) { + bufferSize = conf.getInt("io.file.buffer.size", DEFAULT_BUFFER_SIZE); + } + } + + public long getLength() { + return fileLength - checksumIn.getSize(); + } + + public long getPosition() throws IOException { + return checksumIn.getPosition(); + } + + /** + * Read upto len bytes into buf starting at offset off. + * + * @param buf buffer + * @param off offset + * @param len length of buffer + * @return the no. of bytes read + * @throws IOException + */ + private int readData(byte[] buf, int off, int len) throws IOException { + int bytesRead = 0; + while (bytesRead < len) { + int n = in.read(buf, off+bytesRead, len-bytesRead); + if (n < 0) { + return bytesRead; + } + bytesRead += n; + } + return len; + } + + void readNextBlock(int minSize) throws IOException { + if (buffer == null) { + buffer = new byte[bufferSize]; + dataIn.reset(buffer, 0, 0); + } + buffer = + rejigData(buffer, + (bufferSize < minSize) ? new byte[minSize << 1] : buffer); + bufferSize = buffer.length; + } + + private byte[] rejigData(byte[] source, byte[] destination) + throws IOException{ + // Copy remaining data into the destination array + int bytesRemaining = dataIn.getLength()-dataIn.getPosition(); + if (bytesRemaining > 0) { + System.arraycopy(source, dataIn.getPosition(), + destination, 0, bytesRemaining); + } + + // Read as much data as will fit from the underlying stream + int n = readData(destination, bytesRemaining, + (destination.length - bytesRemaining)); + dataIn.reset(destination, 0, (bytesRemaining + n)); + + return destination; + } + + public boolean next(DataInputBuffer key, DataInputBuffer value) + throws IOException { + // Sanity check + if (eof) { + throw new EOFException("Completed reading " + bytesRead); + } + + // Check if we have enough data to read lengths + if ((dataIn.getLength() - dataIn.getPosition()) < 2*MAX_VINT_SIZE) { + readNextBlock(2*MAX_VINT_SIZE); + } + + // Read key and value lengths + int oldPos = dataIn.getPosition(); + int keyLength = WritableUtils.readVInt(dataIn); + int valueLength = WritableUtils.readVInt(dataIn); + int pos = dataIn.getPosition(); + bytesRead += pos - oldPos; + + // Check for EOF + if (keyLength == EOF_MARKER && valueLength == EOF_MARKER) { + eof = true; + return false; + } + + // Sanity check + if (keyLength < 0) { + throw new IOException("Rec# " + recNo + ": Negative key-length: " + + keyLength); + } + if (valueLength < 0) { + throw new IOException("Rec# " + recNo + ": Negative value-length: " + + valueLength); + } + + final int recordLength = keyLength + valueLength; + + // Check if we have the raw key/value in the buffer + if ((dataIn.getLength()-pos) < recordLength) { + readNextBlock(recordLength); + + // Sanity check + if ((dataIn.getLength() - dataIn.getPosition()) < recordLength) { + throw new EOFException("Rec# " + recNo + ": Could read the next " + + " record"); + } + } + + // Setup the key and value + pos = dataIn.getPosition(); + byte[] data = dataIn.getData(); + key.reset(data, pos, keyLength); + value.reset(data, (pos + keyLength), valueLength); + + // Position for the next record + long skipped = dataIn.skip(recordLength); + if (skipped != recordLength) { + throw new IOException("Rec# " + recNo + ": Failed to skip past record " + + "of length: " + recordLength); + } + + // Record the bytes read + bytesRead += recordLength; + + ++recNo; + ++numRecordsRead; + + return true; + } + + public void close() throws IOException { + // Return the decompressor + if (decompressor != null) { + decompressor.reset(); + CodecPool.returnDecompressor(decompressor); + decompressor = null; + } + + // Close the underlying stream + in.close(); + + // Release the buffer + dataIn = null; + buffer = null; + if(readRecordsCounter != null) { + readRecordsCounter.increment(numRecordsRead); + } + } + } + + /** + * IFile.InMemoryReader to read map-outputs present in-memory. + */ + public static class InMemoryReader extends Reader { + RamManager ramManager; + TaskAttemptID taskAttemptId; + + public InMemoryReader(RamManager ramManager, TaskAttemptID taskAttemptId, + byte[] data, int start, int length) + throws IOException { + super(null, null, length - start, null, null); + this.ramManager = ramManager; + this.taskAttemptId = taskAttemptId; + + buffer = data; + bufferSize = (int)fileLength; + dataIn.reset(buffer, start, length); + } + + @Override + public long getPosition() throws IOException { + // InMemoryReader does not initialize streams like Reader, so in.getPos() + // would not work. Instead, return the number of uncompressed bytes read, + // which will be correct since in-memory data is not compressed. + return bytesRead; + } + + @Override + public long getLength() { + return fileLength; + } + + private void dumpOnError() { + File dumpFile = new File("../output/" + taskAttemptId + ".dump"); + System.err.println("Dumping corrupt map-output of " + taskAttemptId + + " to " + dumpFile.getAbsolutePath()); + try { + FileOutputStream fos = new FileOutputStream(dumpFile); + fos.write(buffer, 0, bufferSize); + fos.close(); + } catch (IOException ioe) { + System.err.println("Failed to dump map-output of " + taskAttemptId); + } + } + + public boolean next(DataInputBuffer key, DataInputBuffer value) + throws IOException { + try { + // Sanity check + if (eof) { + throw new EOFException("Completed reading " + bytesRead); + } + + // Read key and value lengths + int oldPos = dataIn.getPosition(); + int keyLength = WritableUtils.readVInt(dataIn); + int valueLength = WritableUtils.readVInt(dataIn); + int pos = dataIn.getPosition(); + bytesRead += pos - oldPos; + + // Check for EOF + if (keyLength == EOF_MARKER && valueLength == EOF_MARKER) { + eof = true; + return false; + } + + // Sanity check + if (keyLength < 0) { + throw new IOException("Rec# " + recNo + ": Negative key-length: " + + keyLength); + } + if (valueLength < 0) { + throw new IOException("Rec# " + recNo + ": Negative value-length: " + + valueLength); + } + + final int recordLength = keyLength + valueLength; + + // Setup the key and value + pos = dataIn.getPosition(); + byte[] data = dataIn.getData(); + key.reset(data, pos, keyLength); + value.reset(data, (pos + keyLength), valueLength); + + // Position for the next record + long skipped = dataIn.skip(recordLength); + if (skipped != recordLength) { + throw new IOException("Rec# " + recNo + ": Failed to skip past record of length: " + + recordLength); + } + + // Record the byte + bytesRead += recordLength; + + ++recNo; + + return true; + } catch (IOException ioe) { + dumpOnError(); + throw ioe; + } + } + + public void close() { + // Release + dataIn = null; + buffer = null; + + // Inform the RamManager + ramManager.unreserve(bufferSize); + } + } +} diff --git a/src/mapred/org/apache/hadoop/mapred/IFileInputStream.java b/src/mapred/org/apache/hadoop/mapred/IFileInputStream.java new file mode 100644 index 0000000..115ea2b --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/IFileInputStream.java @@ -0,0 +1,186 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapred; + +import java.io.EOFException; +import java.io.IOException; +import java.io.InputStream; + +import org.apache.hadoop.fs.ChecksumException; +import org.apache.hadoop.io.IOUtils; +import org.apache.hadoop.util.DataChecksum; +/** + * A checksum input stream, used for IFiles. + * Used to validate the checksum of files created by {@link IFileOutputStream}. + */ + +class IFileInputStream extends InputStream { + + private final InputStream in; //The input stream to be verified for checksum. + private final long length; //The total length of the input file + private final long dataLength; + private DataChecksum sum; + private long currentOffset = 0; + private final byte b[] = new byte[1]; + private byte csum[] = null; + private int checksumSize; + + /** + * Create a checksum input stream that reads + * @param in The input stream to be verified for checksum. + * @param len The length of the input stream including checksum bytes. + */ + public IFileInputStream(InputStream in, long len) { + this.in = in; + sum = DataChecksum.newDataChecksum(DataChecksum.CHECKSUM_CRC32, + Integer.MAX_VALUE); + checksumSize = sum.getChecksumSize(); + length = len; + dataLength = length - checksumSize; + } + + /** + * Close the input stream. Note that we need to read to the end of the + * stream to validate the checksum. + */ + @Override + public void close() throws IOException { + if (currentOffset < dataLength) { + byte[] t = new byte[Math.min((int) + (Integer.MAX_VALUE & (dataLength - currentOffset)), 32 * 1024)]; + while (currentOffset < dataLength) { + int n = read(t, 0, t.length); + if (0 == n) { + throw new EOFException("Could not validate checksum"); + } + } + } + in.close(); + } + + @Override + public long skip(long n) throws IOException { + throw new IOException("Skip not supported for IFileInputStream"); + } + + public long getPosition() { + return (currentOffset >= dataLength) ? dataLength : currentOffset; + } + + public long getSize() { + return checksumSize; + } + + /** + * Read bytes from the stream. + * At EOF, checksum is validated, but the checksum + * bytes are not passed back in the buffer. + */ + public int read(byte[] b, int off, int len) throws IOException { + + if (currentOffset >= dataLength) { + return -1; + } + + return doRead(b,off,len); + } + + /** + * Read bytes from the stream. + * At EOF, checksum is validated and sent back + * as the last four bytes of the buffer. The caller should handle + * these bytes appropriately + */ + public int readWithChecksum(byte[] b, int off, int len) throws IOException { + + if (currentOffset == length) { + return -1; + } + else if (currentOffset >= dataLength) { + // If the previous read drained off all the data, then just return + // the checksum now. Note that checksum validation would have + // happened in the earlier read + int lenToCopy = (int) (checksumSize - (currentOffset - dataLength)); + if (len < lenToCopy) { + lenToCopy = len; + } + System.arraycopy(csum, (int) (currentOffset - dataLength), b, off, + lenToCopy); + currentOffset += lenToCopy; + return lenToCopy; + } + + int bytesRead = doRead(b,off,len); + + if (currentOffset == dataLength) { + if (len >= bytesRead + checksumSize) { + System.arraycopy(csum, 0, b, off + bytesRead, checksumSize); + bytesRead += checksumSize; + currentOffset += checksumSize; + } + } + return bytesRead; + } + + private int doRead(byte[]b, int off, int len) throws IOException { + + // If we are trying to read past the end of data, just read + // the left over data + if (currentOffset + len > dataLength) { + len = (int) dataLength - (int)currentOffset; + } + + int bytesRead = in.read(b, off, len); + + if (bytesRead < 0) { + throw new ChecksumException("Checksum Error", 0); + } + + sum.update(b,off,bytesRead); + + currentOffset += bytesRead; + + if (currentOffset == dataLength) { + // The last four bytes are checksum. Strip them and verify + csum = new byte[checksumSize]; + IOUtils.readFully(in, csum, 0, checksumSize); + if (!sum.compare(csum, 0)) { + throw new ChecksumException("Checksum Error", 0); + } + } + return bytesRead; + } + + + @Override + public int read() throws IOException { + b[0] = 0; + int l = read(b,0,1); + if (l < 0) return l; + + // Upgrade the b[0] to an int so as not to misinterpret the + // first bit of the byte as a sign bit + int result = 0xFF & b[0]; + return result; + } + + public byte[] getChecksum() { + return csum; + } +} diff --git a/src/mapred/org/apache/hadoop/mapred/IFileOutputStream.java b/src/mapred/org/apache/hadoop/mapred/IFileOutputStream.java new file mode 100644 index 0000000..84e022a --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/IFileOutputStream.java @@ -0,0 +1,93 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapred; + +import java.io.IOException; +import java.io.OutputStream; +import java.io.FilterOutputStream; + +import org.apache.hadoop.util.DataChecksum; +/** + * A Checksum output stream. + * Checksum for the contents of the file is calculated and + * appended to the end of the file on close of the stream. + * Used for IFiles + */ +class IFileOutputStream extends FilterOutputStream { + /** + * The output stream to be checksummed. + */ + private final DataChecksum sum; + private byte[] barray; + private boolean closed = false; + private boolean finished = false; + + /** + * Create a checksum output stream that writes + * the bytes to the given stream. + * @param out + */ + public IFileOutputStream(OutputStream out) { + super(out); + sum = DataChecksum.newDataChecksum(DataChecksum.CHECKSUM_CRC32, + Integer.MAX_VALUE); + barray = new byte[sum.getChecksumSize()]; + } + + @Override + public void close() throws IOException { + if (closed) { + return; + } + closed = true; + finish(); + out.close(); + } + + /** + * Finishes writing data to the output stream, by writing + * the checksum bytes to the end. The underlying stream is not closed. + * @throws IOException + */ + public void finish() throws IOException { + if (finished) { + return; + } + finished = true; + sum.writeValue(barray, 0, false); + out.write (barray, 0, sum.getChecksumSize()); + out.flush(); + } + + /** + * Write bytes to the stream. + */ + @Override + public void write(byte[] b, int off, int len) throws IOException { + sum.update(b, off,len); + out.write(b,off,len); + } + + @Override + public void write(int b) throws IOException { + barray[0] = (byte) (b & 0xFF); + write(barray,0,1); + } + +} diff --git a/src/mapred/org/apache/hadoop/mapred/IndexCache.java b/src/mapred/org/apache/hadoop/mapred/IndexCache.java new file mode 100644 index 0000000..1d3bfb0 --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/IndexCache.java @@ -0,0 +1,166 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.mapred; + +import java.io.IOException; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.LinkedBlockingQueue; +import java.util.concurrent.atomic.AtomicInteger; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.fs.Path; + +class IndexCache { + + private final JobConf conf; + private final int totalMemoryAllowed; + private AtomicInteger totalMemoryUsed = new AtomicInteger(); + private static final Log LOG = LogFactory.getLog(IndexCache.class); + + private final ConcurrentHashMap cache = + new ConcurrentHashMap(); + + private final LinkedBlockingQueue queue = + new LinkedBlockingQueue(); + + public IndexCache(JobConf conf) { + this.conf = conf; + totalMemoryAllowed = + conf.getInt("mapred.tasktracker.indexcache.mb", 10) * 1024 * 1024; + LOG.info("IndexCache created with max memory = " + totalMemoryAllowed); + } + + /** + * This method gets the index information for the given mapId and reduce. + * It reads the index file into cache if it is not already present. + * @param mapId + * @param reduce + * @param fileName The file to read the index information from if it is not + * already present in the cache + * @return The Index Information + * @throws IOException + */ + public IndexRecord getIndexInformation(String mapId, int reduce, + Path fileName) throws IOException { + + IndexInformation info = cache.get(mapId); + + if (info == null) { + info = readIndexFileToCache(fileName, mapId); + } else { + synchronized (info) { + while (null == info.mapSpillRecord) { + try { + info.wait(); + } catch (InterruptedException e) { + throw new IOException("Interrupted waiting for construction", e); + } + } + } + LOG.debug("IndexCache HIT: MapId " + mapId + " found"); + } + + if (info.mapSpillRecord.size() == 0 || + info.mapSpillRecord.size() < reduce) { + throw new IOException("Invalid request " + + " Map Id = " + mapId + " Reducer = " + reduce + + " Index Info Length = " + info.mapSpillRecord.size()); + } + return info.mapSpillRecord.getIndex(reduce); + } + + private IndexInformation readIndexFileToCache(Path indexFileName, + String mapId) throws IOException { + IndexInformation info; + IndexInformation newInd = new IndexInformation(); + if ((info = cache.putIfAbsent(mapId, newInd)) != null) { + synchronized (info) { + while (null == info.mapSpillRecord) { + try { + info.wait(); + } catch (InterruptedException e) { + throw new IOException("Interrupted waiting for construction", e); + } + } + } + LOG.debug("IndexCache HIT: MapId " + mapId + " found"); + return info; + } + LOG.debug("IndexCache MISS: MapId " + mapId + " not found") ; + SpillRecord tmp = null; + try { + tmp = new SpillRecord(indexFileName, conf); + } catch (Throwable e) { + tmp = new SpillRecord(0); + cache.remove(mapId); + throw new IOException("Error Reading IndexFile", e); + } finally { + synchronized (newInd) { + newInd.mapSpillRecord = tmp; + newInd.notifyAll(); + } + } + queue.add(mapId); + + if (totalMemoryUsed.addAndGet(newInd.getSize()) > totalMemoryAllowed) { + freeIndexInformation(); + } + return newInd; + } + + /** + * This method removes the map from the cache. It should be called when + * a map output on this tracker is discarded. + * @param mapId The taskID of this map. + */ + public void removeMap(String mapId) { + IndexInformation info = cache.remove(mapId); + if (info != null) { + totalMemoryUsed.addAndGet(-info.getSize()); + if (!queue.remove(mapId)) { + LOG.warn("Map ID" + mapId + " not found in queue!!"); + } + } else { + LOG.info("Map ID " + mapId + " not found in cache"); + } + } + + /** + * Bring memory usage below totalMemoryAllowed. + */ + private synchronized void freeIndexInformation() { + while (totalMemoryUsed.get() > totalMemoryAllowed) { + String s = queue.remove(); + IndexInformation info = cache.remove(s); + if (info != null) { + totalMemoryUsed.addAndGet(-info.getSize()); + } + } + } + + private static class IndexInformation { + SpillRecord mapSpillRecord; + + int getSize() { + return mapSpillRecord == null + ? 0 + : mapSpillRecord.size() * MapTask.MAP_OUTPUT_INDEX_RECORD_LENGTH; + } + } +} diff --git a/src/mapred/org/apache/hadoop/mapred/InputFormat.java b/src/mapred/org/apache/hadoop/mapred/InputFormat.java new file mode 100644 index 0000000..cfcfe85 --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/InputFormat.java @@ -0,0 +1,100 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapred; + +import java.io.IOException; + +import org.apache.hadoop.fs.FileSystem; + +/** + * InputFormat describes the input-specification for a + * Map-Reduce job. + * + *

The Map-Reduce framework relies on the InputFormat of the + * job to:

+ *

    + *
  1. + * Validate the input-specification of the job. + *
  2. + * Split-up the input file(s) into logical {@link InputSplit}s, each of + * which is then assigned to an individual {@link Mapper}. + *
  3. + *
  4. + * Provide the {@link RecordReader} implementation to be used to glean + * input records from the logical InputSplit for processing by + * the {@link Mapper}. + *
  5. + *
+ * + *

The default behavior of file-based {@link InputFormat}s, typically + * sub-classes of {@link FileInputFormat}, is to split the + * input into logical {@link InputSplit}s based on the total size, in + * bytes, of the input files. However, the {@link FileSystem} blocksize of + * the input files is treated as an upper bound for input splits. A lower bound + * on the split size can be set via + * + * mapred.min.split.size.

+ * + *

Clearly, logical splits based on input-size is insufficient for many + * applications since record boundaries are to respected. In such cases, the + * application has to also implement a {@link RecordReader} on whom lies the + * responsibilty to respect record-boundaries and present a record-oriented + * view of the logical InputSplit to the individual task. + * + * @see InputSplit + * @see RecordReader + * @see JobClient + * @see FileInputFormat + * @deprecated Use {@link org.apache.hadoop.mapreduce.InputFormat} instead. + */ +@Deprecated +public interface InputFormat { + + /** + * Logically split the set of input files for the job. + * + *

Each {@link InputSplit} is then assigned to an individual {@link Mapper} + * for processing.

+ * + *

Note: The split is a logical split of the inputs and the + * input files are not physically split into chunks. For e.g. a split could + * be <input-file-path, start, offset> tuple. + * + * @param job job configuration. + * @param numSplits the desired number of splits, a hint. + * @return an array of {@link InputSplit}s for the job. + */ + InputSplit[] getSplits(JobConf job, int numSplits) throws IOException; + + /** + * Get the {@link RecordReader} for the given {@link InputSplit}. + * + *

It is the responsibility of the RecordReader to respect + * record boundaries while processing the logical split to present a + * record-oriented view to the individual task.

+ * + * @param split the {@link InputSplit} + * @param job the job that this split belongs to + * @return a {@link RecordReader} + */ + RecordReader getRecordReader(InputSplit split, + JobConf job, + Reporter reporter) throws IOException; +} + diff --git a/src/mapred/org/apache/hadoop/mapred/InputSplit.java b/src/mapred/org/apache/hadoop/mapred/InputSplit.java new file mode 100644 index 0000000..8e9086f --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/InputSplit.java @@ -0,0 +1,55 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapred; + +import java.io.IOException; +import org.apache.hadoop.io.Writable; + +/** + * InputSplit represents the data to be processed by an + * individual {@link Mapper}. + * + *

Typically, it presents a byte-oriented view on the input and is the + * responsibility of {@link RecordReader} of the job to process this and present + * a record-oriented view. + * + * @see InputFormat + * @see RecordReader + * @deprecated Use {@link org.apache.hadoop.mapreduce.InputSplit} instead. + */ +@Deprecated +public interface InputSplit extends Writable { + + /** + * Get the total number of bytes in the data of the InputSplit. + * + * @return the number of bytes in the input split. + * @throws IOException + */ + long getLength() throws IOException; + + /** + * Get the list of hostnames where the input split is located. + * + * @return list of hostnames where data of the InputSplit is + * located as an array of Strings. + * @throws IOException + */ + String[] getLocations() throws IOException; +} diff --git a/src/mapred/org/apache/hadoop/mapred/InterTrackerProtocol.java b/src/mapred/org/apache/hadoop/mapred/InterTrackerProtocol.java new file mode 100644 index 0000000..f76bc34 --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/InterTrackerProtocol.java @@ -0,0 +1,145 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapred; + +import java.io.IOException; + +import org.apache.hadoop.ipc.VersionedProtocol; + +/** + * Protocol that a TaskTracker and the central JobTracker use to communicate. + * The JobTracker is the Server, which implements this protocol. + */ +interface InterTrackerProtocol extends VersionedProtocol { + /** + * version 3 introduced to replace + * emitHearbeat/pollForNewTask/pollForTaskWithClosedJob with + * {@link #heartbeat(TaskTrackerStatus, boolean, boolean, boolean, short)} + * version 4 changed TaskReport for HADOOP-549. + * version 5 introduced that removes locateMapOutputs and instead uses + * getTaskCompletionEvents to figure finished maps and fetch the outputs + * version 6 adds maxTasks to TaskTrackerStatus for HADOOP-1245 + * version 7 replaces maxTasks by maxMapTasks and maxReduceTasks in + * TaskTrackerStatus for HADOOP-1274 + * Version 8: HeartbeatResponse is added with the next heartbeat interval. + * version 9 changes the counter representation for HADOOP-2248 + * version 10 changes the TaskStatus representation for HADOOP-2208 + * version 11 changes string to JobID in getTaskCompletionEvents(). + * version 12 changes the counters representation for HADOOP-1915 + * version 13 added call getBuildVersion() for HADOOP-236 + * Version 14: replaced getFilesystemName with getSystemDir for HADOOP-3135 + * Version 15: Changed format of Task and TaskStatus for HADOOP-153 + * Version 16: adds ResourceStatus to TaskTrackerStatus for HADOOP-3759 + * Version 17: Changed format of Task and TaskStatus for HADOOP-3150 + * Version 18: Changed status message due to changes in TaskStatus + * Version 19: Changed heartbeat to piggyback JobTracker restart information + so that the TaskTracker can synchronize itself. + * Version 20: Changed status message due to changes in TaskStatus + * (HADOOP-4232) + * Version 21: Changed information reported in TaskTrackerStatus' + * ResourceStatus and the corresponding accessor methods + * (HADOOP-4035) + * Version 22: Replaced parameter 'initialContact' with 'restarted' + * in heartbeat method (HADOOP-4305) + * Version 23: Added parameter 'initialContact' again in heartbeat method + * (HADOOP-4869) + * Version 24: Changed format of Task and TaskStatus for HADOOP-4759 + * Version 25: JobIDs are passed in response to JobTracker restart + * Version 26: Added numRequiredSlots to TaskStatus for MAPREDUCE-516 + * Version 27: Adding node health status to TaskStatus for MAPREDUCE-211 + * Version 28: Adding available memory and CPU usage information on TT + * to TaskTrackerStatus for MAPREDUCE-1218 + */ + public static final long versionID = 28L; + + public final static int TRACKERS_OK = 0; + public final static int UNKNOWN_TASKTRACKER = 1; + + /** + * Called regularly by the {@link TaskTracker} to update the status of its + * tasks within the job tracker. {@link JobTracker} responds with a + * {@link HeartbeatResponse} that directs the + * {@link TaskTracker} to undertake a series of 'actions' + * (see {@link org.apache.hadoop.mapred.TaskTrackerAction.ActionType}). + * + * {@link TaskTracker} must also indicate whether this is the first + * interaction (since state refresh) and acknowledge the last response + * it recieved from the {@link JobTracker} + * + * @param status the status update + * @param restarted true if the process has just started or + * restarted, false otherwise + * @param initialContact true if this is first interaction since + * 'refresh', false otherwise. + * @param acceptNewTasks true if the {@link TaskTracker} is + * ready to accept new tasks to run. + * @param responseId the last responseId successfully acted upon by the + * {@link TaskTracker}. + * @return a {@link org.apache.hadoop.mapred.HeartbeatResponse} with + * fresh instructions. + */ + HeartbeatResponse heartbeat(TaskTrackerStatus status, + boolean restarted, + boolean initialContact, + boolean acceptNewTasks, + short responseId) + throws IOException; + + /** + * The task tracker calls this once, to discern where it can find + * files referred to by the JobTracker + */ + public String getFilesystemName() throws IOException; + + /** + * Report a problem to the job tracker. + * @param taskTracker the name of the task tracker + * @param errorClass the kind of error (eg. the class that was thrown) + * @param errorMessage the human readable error message + * @throws IOException if there was a problem in communication or on the + * remote side + */ + public void reportTaskTrackerError(String taskTracker, + String errorClass, + String errorMessage) throws IOException; + /** + * Get task completion events for the jobid, starting from fromEventId. + * Returns empty aray if no events are available. + * @param jobid job id + * @param fromEventId event id to start from. + * @param maxEvents the max number of events we want to look at + * @return array of task completion events. + * @throws IOException + */ + TaskCompletionEvent[] getTaskCompletionEvents(JobID jobid, int fromEventId + , int maxEvents) throws IOException; + + /** + * Grab the jobtracker system directory path where job-specific files are to be placed. + * + * @return the system directory where job-specific files are to be placed. + */ + public String getSystemDir(); + + + /** + * Returns the buildVersion of the JobTracker + */ + public String getBuildVersion() throws IOException; +} diff --git a/src/mapred/org/apache/hadoop/mapred/InvalidFileTypeException.java b/src/mapred/org/apache/hadoop/mapred/InvalidFileTypeException.java new file mode 100644 index 0000000..b2dca0c --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/InvalidFileTypeException.java @@ -0,0 +1,38 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapred; + +import java.io.IOException; + +/** + * Used when file type differs from the desired file type. like + * getting a file when a directory is expected. Or a wrong file type. + */ +public class InvalidFileTypeException + extends IOException { + + public InvalidFileTypeException() { + super(); + } + + public InvalidFileTypeException(String msg) { + super(msg); + } + +} diff --git a/src/mapred/org/apache/hadoop/mapred/InvalidInputException.java b/src/mapred/org/apache/hadoop/mapred/InvalidInputException.java new file mode 100644 index 0000000..f331197 --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/InvalidInputException.java @@ -0,0 +1,63 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.mapred; + +import java.io.IOException; +import java.util.List; +import java.util.Iterator; + +/** + * This class wraps a list of problems with the input, so that the user + * can get a list of problems together instead of finding and fixing them one + * by one. + */ +public class InvalidInputException extends IOException { + private List problems; + + /** + * Create the exception with the given list. + * @param probs the list of problems to report. this list is not copied. + */ + public InvalidInputException(List probs) { + problems = probs; + } + + /** + * Get the complete list of the problems reported. + * @return the list of problems, which must not be modified + */ + public List getProblems() { + return problems; + } + + /** + * Get a summary message of the problems found. + * @return the concatenated messages from all of the problems. + */ + public String getMessage() { + StringBuffer result = new StringBuffer(); + Iterator itr = problems.iterator(); + while(itr.hasNext()) { + result.append(itr.next().getMessage()); + if (itr.hasNext()) { + result.append("\n"); + } + } + return result.toString(); + } +} diff --git a/src/mapred/org/apache/hadoop/mapred/InvalidJobConfException.java b/src/mapred/org/apache/hadoop/mapred/InvalidJobConfException.java new file mode 100644 index 0000000..1f02369 --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/InvalidJobConfException.java @@ -0,0 +1,38 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapred; + +import java.io.IOException; + +/** + * This exception is thrown when jobconf misses some mendatory attributes + * or value of some attributes is invalid. + */ +public class InvalidJobConfException + extends IOException { + + public InvalidJobConfException() { + super(); + } + + public InvalidJobConfException(String msg) { + super(msg); + } + +} diff --git a/src/mapred/org/apache/hadoop/mapred/IsolationRunner.java b/src/mapred/org/apache/hadoop/mapred/IsolationRunner.java new file mode 100644 index 0000000..ecc73bb --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/IsolationRunner.java @@ -0,0 +1,221 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.mapred; + +import java.io.DataInputStream; +import java.io.File; +import java.io.IOException; +import java.net.URL; +import java.net.URLClassLoader; +import java.util.ArrayList; +import java.util.List; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.LocalDirAllocator; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.io.BytesWritable; +import org.apache.hadoop.io.SequenceFile; +import org.apache.hadoop.io.Text; +import org.apache.hadoop.io.Writable; +import org.apache.hadoop.io.WritableComparable; +import org.apache.hadoop.mapred.JvmTask; + +public class IsolationRunner { + private static final Log LOG = + LogFactory.getLog(IsolationRunner.class.getName()); + + private static class FakeUmbilical implements TaskUmbilicalProtocol { + + public long getProtocolVersion(String protocol, long clientVersion) { + return TaskUmbilicalProtocol.versionID; + } + + public void done(TaskAttemptID taskid) throws IOException { + LOG.info("Task " + taskid + " reporting done."); + } + + public void fsError(TaskAttemptID taskId, String message) throws IOException { + LOG.info("Task " + taskId + " reporting file system error: " + message); + } + + public void shuffleError(TaskAttemptID taskId, String message) throws IOException { + LOG.info("Task " + taskId + " reporting shuffle error: " + message); + } + + public void fatalError(TaskAttemptID taskId, String msg) throws IOException{ + LOG.info("Task " + taskId + " reporting fatal error: " + msg); + } + + public JvmTask getTask(JvmContext context) throws IOException { + return null; + } + + public boolean ping(TaskAttemptID taskid) throws IOException { + return true; + } + + public void commitPending(TaskAttemptID taskId, TaskStatus taskStatus) + throws IOException, InterruptedException { + statusUpdate(taskId, taskStatus); + } + + public boolean canCommit(TaskAttemptID taskid) throws IOException { + return true; + } + + public boolean statusUpdate(TaskAttemptID taskId, TaskStatus taskStatus) + throws IOException, InterruptedException { + StringBuffer buf = new StringBuffer("Task "); + buf.append(taskId); + buf.append(" making progress to "); + buf.append(taskStatus.getProgress()); + String state = taskStatus.getStateString(); + if (state != null) { + buf.append(" and state of "); + buf.append(state); + } + LOG.info(buf.toString()); + // ignore phase + // ignore counters + return true; + } + + public void reportDiagnosticInfo(TaskAttemptID taskid, String trace) throws IOException { + LOG.info("Task " + taskid + " has problem " + trace); + } + + public MapTaskCompletionEventsUpdate getMapCompletionEvents(JobID jobId, + int fromEventId, int maxLocs, TaskAttemptID id) throws IOException { + return new MapTaskCompletionEventsUpdate(TaskCompletionEvent.EMPTY_ARRAY, + false); + } + + public void reportNextRecordRange(TaskAttemptID taskid, + SortedRanges.Range range) throws IOException { + LOG.info("Task " + taskid + " reportedNextRecordRange " + range); + } + } + + private static ClassLoader makeClassLoader(JobConf conf, + File workDir) throws IOException { + List cp = new ArrayList(); + + String jar = conf.getJar(); + if (jar != null) { // if jar exists, it into workDir + File[] libs = new File(workDir, "lib").listFiles(); + if (libs != null) { + for (int i = 0; i < libs.length; i++) { + cp.add(new URL("file:" + libs[i].toString())); + } + } + cp.add(new URL("file:" + new File(workDir, "classes/").toString())); + cp.add(new URL("file:" + workDir.toString() + "/")); + } + return new URLClassLoader(cp.toArray(new URL[cp.size()])); + } + + /** + * Create empty sequence files for any of the map outputs that we don't have. + * @param fs the filesystem to create the files in + * @param dir the directory name to create the files in + * @param conf the jobconf + * @throws IOException if something goes wrong writing + */ + private static void fillInMissingMapOutputs(FileSystem fs, + TaskAttemptID taskId, + int numMaps, + JobConf conf) throws IOException { + Class keyClass + = conf.getMapOutputKeyClass().asSubclass(WritableComparable.class); + Class valueClass + = conf.getMapOutputValueClass().asSubclass(Writable.class); + MapOutputFile namer = new MapOutputFile(taskId.getJobID()); + namer.setConf(conf); + for(int i=0; i/job.xml"); + System.exit(1); + } + File jobFilename = new File(args[0]); + if (!jobFilename.exists() || !jobFilename.isFile()) { + System.out.println(jobFilename + " is not a valid job file."); + System.exit(1); + } + JobConf conf = new JobConf(new Path(jobFilename.toString())); + TaskAttemptID taskId = TaskAttemptID.forName(conf.get("mapred.task.id")); + boolean isMap = conf.getBoolean("mapred.task.is.map", true); + int partition = conf.getInt("mapred.task.partition", 0); + + // setup the local and user working directories + FileSystem local = FileSystem.getLocal(conf); + LocalDirAllocator lDirAlloc = new LocalDirAllocator("mapred.local.dir"); + File workDirName = new File(lDirAlloc.getLocalPathToRead( + TaskTracker.getLocalTaskDir( + taskId.getJobID().toString(), + taskId.toString()) + + Path.SEPARATOR + "work", + conf). toString()); + local.setWorkingDirectory(new Path(workDirName.toString())); + FileSystem.get(conf).setWorkingDirectory(conf.getWorkingDirectory()); + + // set up a classloader with the right classpath + ClassLoader classLoader = makeClassLoader(conf, workDirName); + Thread.currentThread().setContextClassLoader(classLoader); + conf.setClassLoader(classLoader); + + Task task; + if (isMap) { + Path localSplit = new Path(new Path(jobFilename.toString()).getParent(), + "split.dta"); + DataInputStream splitFile = FileSystem.getLocal(conf).open(localSplit); + String splitClass = Text.readString(splitFile); + BytesWritable split = new BytesWritable(); + split.readFields(splitFile); + splitFile.close(); + task = new MapTask(jobFilename.toString(), taskId, partition, + splitClass, split, 1, conf.getUser()); + } else { + int numMaps = conf.getNumMapTasks(); + fillInMissingMapOutputs(local, taskId, numMaps, conf); + task = new ReduceTask(jobFilename.toString(), taskId, partition, numMaps, + 1, conf.getUser()); + } + task.setConf(conf); + task.run(conf, new FakeUmbilical()); + } + +} diff --git a/src/mapred/org/apache/hadoop/mapred/JSPUtil.java b/src/mapred/org/apache/hadoop/mapred/JSPUtil.java new file mode 100644 index 0000000..e1c3c44 --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/JSPUtil.java @@ -0,0 +1,730 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.mapred; + +import java.io.IOException; +import java.io.UnsupportedEncodingException; +import java.net.URLEncoder; +import java.util.Collection; +import java.util.Date; +import java.util.Iterator; +import java.util.LinkedHashMap; +import java.util.Map; + +import javax.servlet.http.HttpServletRequest; +import javax.servlet.http.HttpServletResponse; +import javax.servlet.jsp.JspWriter; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.mapred.JobHistory.JobInfo; +import org.apache.hadoop.mapred.JobTracker.RetireJobInfo; +import org.apache.hadoop.mapreduce.TaskType; +import org.apache.hadoop.util.ServletUtil; +import org.apache.hadoop.util.StringUtils; + +class JSPUtil { + private static final String PRIVATE_ACTIONS_KEY = "webinterface.private.actions"; + + public static final Configuration conf = new Configuration(); + + //LRU based cache + private static final Map jobHistoryCache = + new LinkedHashMap(); + + private static final int CACHE_SIZE = + conf.getInt("mapred.job.tracker.jobhistory.lru.cache.size", 5); + + private static final Log LOG = LogFactory.getLog(JSPUtil.class); + /** + * Method used to process the request from the job page based on the + * request which it has received. For example like changing priority. + * + * @param request HTTP request Object. + * @param response HTTP response object. + * @param tracker {@link JobTracker} instance + * @throws IOException + */ + public static void processButtons(HttpServletRequest request, + HttpServletResponse response, JobTracker tracker) throws IOException { + + if (conf.getBoolean(PRIVATE_ACTIONS_KEY, false) + && request.getParameter("killJobs") != null) { + String[] jobs = request.getParameterValues("jobCheckBox"); + if (jobs != null) { + for (String job : jobs) { + tracker.killJob(JobID.forName(job)); + } + } + } + + if (conf.getBoolean(PRIVATE_ACTIONS_KEY, false) && + request.getParameter("changeJobPriority") != null) { + String[] jobs = request.getParameterValues("jobCheckBox"); + + if (jobs != null) { + JobPriority jobPri = JobPriority.valueOf(request + .getParameter("setJobPriority")); + + for (String job : jobs) { + tracker.setJobPriority(JobID.forName(job), jobPri); + } + } + } + } + + /** + * Method used to generate the Job table for Job pages. + * + * @param label display heading to be used in the job table. + * @param jobs vector of jobs to be displayed in table. + * @param refresh refresh interval to be used in jobdetails page. + * @param rowId beginning row id to be used in the table. + * @return + * @throws IOException + */ + public static String generateJobTable(String label, Collection jobs + , int refresh, int rowId) throws IOException { + + boolean isModifiable = label.equals("Running") + && conf.getBoolean( + PRIVATE_ACTIONS_KEY, false); + StringBuffer sb = new StringBuffer(); + + sb.append("\n"); + + if (jobs.size() > 0) { + if (isModifiable) { + sb.append(""); + sb.append(""); + sb.append(""); + sb.append(""); + sb.append(""); + sb.append(""); + sb.append(""); + } else { + sb.append(""); + } + + int totalMaps = 0; + int comMaps = 0; + int totalReduces = 0; + int comReduces = 0; + for (Iterator it = jobs.iterator(); it.hasNext(); ) { + JobInProgress job = it.next(); + totalMaps += job.desiredMaps(); + totalReduces += job.desiredReduces(); + comMaps += job.finishedMaps(); + comReduces += job.finishedReduces(); + } + + sb.append(""); + sb.append(""); + sb.append(""); + sb.append(""); + sb.append(""); + sb.append(""); + sb.append(""); + sb.append(""); + sb.append(""); + sb.append("\n"); + for (Iterator it = jobs.iterator(); it.hasNext(); ++rowId) { + JobInProgress job = it.next(); + JobProfile profile = job.getProfile(); + JobStatus status = job.getStatus(); + JobID jobid = profile.getJobID(); + + int desiredMaps = job.desiredMaps(); + int desiredReduces = job.desiredReduces(); + int completedMaps = job.finishedMaps(); + int completedReduces = job.finishedReduces(); + String name = profile.getJobName(); + String abbreviatedName + = (name.length() > 76 ? name.substring(0,76) + "..." : name); + + String jobpri = job.getPriority().toString(); + String schedulingInfo = job.getStatus().getSchedulingInfo(); + + if (isModifiable) { + sb.append(""); + } else { + sb.append(""); + } + + sb.append("" + "" + "" + "" + "\n"); + } + if (isModifiable) { + sb.append("\n"); + } + } else { + sb.append("\n"); + } + sb.append("
"); + sb.append(""); + sb.append(""); + sb.append(""); + sb.append(""); + sb.append(" 
 
JobidPriority" + + "UserNameMap % CompleteMap Total " + totalMaps + "Maps Completed " + comMaps + "Reduce % CompleteReduce Total " + totalReduces + "Reduces Completed " + comReduces + "Job Scheduling Information
" + jobid + "" + jobpri + "" + profile.getUser() + "" + ("".equals(abbreviatedName) ? " " : abbreviatedName) + + "" + + StringUtils.formatPercent(status.mapProgress(), 2) + + ServletUtil.percentageGraph(status.mapProgress() * 100, 80) + + "" + desiredMaps + "" + completedMaps + + "" + + StringUtils.formatPercent(status.reduceProgress(), 2) + + ServletUtil.percentageGraph(status.reduceProgress() * 100, 80) + + "" + desiredReduces + " " + completedReduces + + "" + schedulingInfo + + "
none" + + "
\n"); + + return sb.toString(); + } + + /** + * Given jobId, resolve the link to jobdetailshistory.jsp + * @param tracker JobTracker + * @param jobId JobID + * @return the link to the page jobdetailshistory.jsp for the job + */ + public static String getJobDetailsHistoryLink(JobTracker tracker, + String jobId) { + RetireJobInfo info = tracker.retireJobs.get(JobID.forName(jobId)); + String historyFileUrl = getHistoryFileUrl(info); + String result = (historyFileUrl == null ? "" : + "jobdetailshistory.jsp?jobid=" + jobId + "&logFile=" + + historyFileUrl); + return result; + } + + /** + * Given jobId, taskid resolve the link to taskdetailshistory.jsp + * @param tracker JobTracker + * @param jobId JobID + * @param tid String + * @return the link to the page jobdetailshistory.jsp for the job + */ + public static String getTaskDetailsHistoryLink(JobTracker tracker, + String jobId, + String tid) { + RetireJobInfo info = tracker.retireJobs.get(JobID.forName(jobId)); + String historyFileUrl = getHistoryFileUrl(info); + String result = (historyFileUrl == null ? "" : + "taskdetailshistory.jsp?jobid=" + jobId + "&logFile=" + + historyFileUrl + "&taskid=" + tid); + return result; + } + + /** + * Obtain history file URL from RetireJobInfo + * @param info RetireJobInfo + * @return corresponding history file url, null if cannot creat one + */ + private static String getHistoryFileUrl(RetireJobInfo info) { + String historyFile = info.getHistoryFile(); + String historyFileUrl = null; + if (historyFile != null && !historyFile.equals("")) { + try { + historyFileUrl = URLEncoder.encode(info.getHistoryFile(), "UTF-8"); + } catch (UnsupportedEncodingException e) { + LOG.warn("Can't create history url ", e); + } + } + return historyFileUrl; + } + + @SuppressWarnings("unchecked") + public static String generateRetiredJobTable(JobTracker tracker, int rowId) + throws IOException { + + StringBuffer sb = new StringBuffer(); + sb.append("\n"); + + Iterator iterator = + tracker.retireJobs.getAll().descendingIterator(); + if (!iterator.hasNext()) { + sb.append("\n"); + } else { + sb.append(""); + + sb.append(""); + sb.append(""); + sb.append(""); + sb.append(""); + sb.append(""); + sb.append(""); + sb.append(""); + sb.append(""); + sb.append(""); + sb.append(""); + sb.append("\n"); + for (int i = 0; i < 100 && iterator.hasNext(); i++) { + RetireJobInfo info = iterator.next(); + String historyFileUrl = getHistoryFileUrl(info); + sb.append(""); + + String name = info.profile.getJobName(); + String abbreviatedName + = (name.length() > 76 ? name.substring(0,76) + "..." : name); + + sb.append( + "" + + + "" + + "" + + "" + + "" + + "" + + "" + + + "" + + + "" + + + "" + + + "\n"); + rowId++; + } + } + sb.append("
none" + + "
JobidPriorityUserNameStateStart TimeFinish TimeMap % CompleteReduce % CompleteJob Scheduling Information
" + + + (historyFileUrl == null ? "" : + "") + + + info.status.getJobId() + "" + + info.status.getJobPriority().toString() + "" + info.profile.getUser() + + "" + abbreviatedName + + "" + JobStatus.getJobRunState(info.status.getRunState()) + + "" + new Date(info.status.getStartTime()) + "" + new Date(info.finishTime) + "" + StringUtils.formatPercent(info.status.mapProgress(), 2) + + ServletUtil.percentageGraph(info.status.mapProgress() * 100, 80) + + "" + StringUtils.formatPercent(info.status.reduceProgress(), 2) + + ServletUtil.percentageGraph( + info.status.reduceProgress() * 100, 80) + + "" + info.status.getSchedulingInfo() + "
\n"); + return sb.toString(); + } + + static JobInfo getJobInfo(HttpServletRequest request, FileSystem fs) + throws IOException { + String jobid = request.getParameter("jobid"); + String logFile = request.getParameter("logFile"); + synchronized(jobHistoryCache) { + JobInfo jobInfo = jobHistoryCache.remove(jobid); + if (jobInfo == null) { + jobInfo = new JobHistory.JobInfo(jobid); + LOG.info("Loading Job History file "+jobid + ". Cache size is " + + jobHistoryCache.size()); + DefaultJobHistoryParser.parseJobTasks( logFile, jobInfo, fs) ; + } + jobHistoryCache.put(jobid, jobInfo); + if (jobHistoryCache.size() > CACHE_SIZE) { + Iterator> it = + jobHistoryCache.entrySet().iterator(); + String removeJobId = it.next().getKey(); + it.remove(); + LOG.info("Job History file removed form cache "+removeJobId); + } + return jobInfo; + } + } + + @SuppressWarnings("unchecked") + public static void generateRetiredJobXml(JspWriter out, JobTracker tracker, int rowId) + throws IOException { + + Iterator iterator = + tracker.retireJobs.getAll().descendingIterator(); + + for (int i = 0; i < 100 && iterator.hasNext(); i++) { + RetireJobInfo info = iterator.next(); + JobStatus status = info.status; + StringBuilder sb = new StringBuilder(); + sb.append(""); + sb.append("" + status.getJobId() + ""); + sb.append("jobdetailshistory.jsp?jobid=" + status.getJobId() + + "&logFile=" + + URLEncoder.encode(info.getHistoryFile().toString(), "UTF-8") + + ""); + sb.append("" + status.getJobPriority().toString() + + ""); + sb.append("" + info.profile.getUser() + ""); + sb.append("" + info.profile.getJobName() + ""); + sb.append("" + JobStatus.getJobRunState(status.getRunState()) + + ""); + sb.append("" + new Date(status.getStartTime()) + + ""); + sb.append("" + new Date(info.finishTime) + + ""); + sb.append("" + StringUtils.formatPercent( + status.mapProgress(), 2) + ""); + sb.append("" + StringUtils.formatPercent( + status.reduceProgress(), 2) + ""); + sb.append("" + status.getSchedulingInfo() + ""); + sb.append("\n"); + out.write(sb.toString()); + rowId++; + } + } + + /** + * Method used to generate the cluster resource utilization table + */ + public static String generateClusterResTable(JobTracker tracker) + throws IOException { + ResourceReporter reporter = tracker.getResourceReporter(); + if (reporter == null) { + return ""; + } + StringBuffer sb = new StringBuffer(); + sb.append("\n"); + sb.append("\n"); + sb.append("\n"); + sb.append("\n"); + sb.append("\n"); + sb.append("\n"); + sb.append("\n"); + sb.append("\n"); + sb.append("\n"); + sb.append("\n"); + sb.append("\n"); + sb.append(String.format( + "\n", + reporter.getClusterCpuTotalGHz(), + reporter.getClusterCpuUsageGHz(), + Math.min(reporter.getClusterCpuUsageGHz() / + reporter.getClusterCpuTotalGHz() * 100D, 100D))); + sb.append(String.format( + "\n", + reporter.getClusterMemTotalGB(), + reporter.getClusterMemUsageGB(), + reporter.getClusterMemUsageGB() / + reporter.getClusterMemTotalGB() * 100D, + reporter.getReportedTaskTrackers())); + sb.append("\n"); + sb.append("
CPUMEMReported
TotalUsed%TotalUsed%
%.1f GHz%.1f GHz%.1f%%%.1f GB%.1f GB%.1f%%%d
\n"); + return sb.toString(); + } + + /** + * Method used to generate the Job table for Job pages with resource + * utilization information obtain from {@link ResourceReporter}. + * + * @param label display heading to be used in the job table. + * @param jobs vector of jobs to be displayed in table. + * @param refresh refresh interval to be used in jobdetails page. + * @param rowId beginning row id to be used in the table. + * @return + * @throws IOException + */ + public static String generateJobTableWithResourceInfo(String label, + Collection jobs, int refresh, int rowId, + JobTracker tracker) throws IOException { + ResourceReporter reporter = tracker.getResourceReporter(); + + if (reporter == null) { + return generateJobTable(label, jobs, refresh, rowId); + } + boolean isModifiable = label.equals("Running") + && conf.getBoolean(PRIVATE_ACTIONS_KEY, false); + StringBuffer sb = new StringBuffer(); + + sb.append("\n"); + + if (jobs.size() > 0) { + if (isModifiable) { + sb.append(""); + sb.append(""); + sb.append(""); + sb.append(""); + sb.append(""); + sb.append(""); + sb.append(""); + } else { + sb.append(""); + } + + int totalMaps = 0; + int comMaps = 0; + int totalReduces = 0; + int comReduces = 0; + for (Iterator it = jobs.iterator(); it.hasNext(); ) { + JobInProgress job = it.next(); + totalMaps += job.desiredMaps(); + totalReduces += job.desiredReduces(); + comMaps += job.finishedMaps(); + comReduces += job.finishedReduces(); + } + sb.append(""); + sb.append(""); + sb.append(""); + sb.append(""); + sb.append(""); + sb.append(""); + sb.append(""); + sb.append(""); + sb.append(""); + sb.append(""); + sb.append(""); + sb.append(""); + sb.append(""); + sb.append("\n"); + for (Iterator it = jobs.iterator(); it.hasNext(); ++rowId) { + JobInProgress job = it.next(); + JobProfile profile = job.getProfile(); + JobStatus status = job.getStatus(); + JobID jobid = profile.getJobID(); + + int desiredMaps = job.desiredMaps(); + int desiredReduces = job.desiredReduces(); + int completedMaps = job.finishedMaps(); + int completedReduces = job.finishedReduces(); + String name = profile.getJobName(); + String jobpri = job.getPriority().toString(); + + if (isModifiable) { + sb.append(""); + } else { + sb.append(""); + } + String cpu = "-"; + String mem = "-"; + String memMax = "-"; + String cpuCost = "-"; + String memCost = "-"; + if (reporter.getJobCpuCumulatedUsageTime(jobid) != + ResourceReporter.UNAVAILABLE) { + cpu = String.format("%.2f%%", + reporter.getJobCpuPercentageOnCluster(jobid)); + if (reporter.getJobCpuPercentageOnCluster(jobid) > 50) { + cpu = "" + cpu + ""; + } + mem = String.format("%.2f%%", + reporter.getJobMemPercentageOnCluster(jobid)); + if (reporter.getJobMemPercentageOnCluster(jobid) > 50) { + mem = "" + mem + ""; + } + cpuCost = String.format("%.2f", + reporter.getJobCpuCumulatedUsageTime(jobid) / 1000D); + memCost = String.format("%.2f", + reporter.getJobMemCumulatedUsageTime(jobid) / 1000D); + memMax = String.format("%.2f%%", + reporter.getJobMemMaxPercentageOnBox(jobid)); + if (reporter.getJobMemMaxPercentageOnBox(jobid) > 50) { + memMax = "" + memMax + ""; + } + } + + sb.append("" + "" + "" + "" + + "" + + "" + + "" + + "\n"); + } + if (isModifiable) { + sb.append("\n"); + } + } else { + sb.append("\n"); + } + sb.append("
"); + sb.append(""); + sb.append(""); + sb.append(""); + sb.append(""); + sb.append(" 
 
JobidPriority" + + "UserNameMap % CompleteMap Total " + totalMaps + "Maps Completed " + comMaps + "Reduce % CompleteReduce Total " + totalReduces + "Reduces Completed " + comReduces + "CPU NowCPU Cumulated Cluster-secMEM NowMEM Cumulated Cluster-secMEM Max/Node
" + jobid + "" + jobpri + "" + profile.getUser() + "" + ("".equals(name) ? " " : name) + "" + + StringUtils.formatPercent(status.mapProgress(), 2) + + ServletUtil.percentageGraph(status.mapProgress() * 100, 80) + + "" + desiredMaps + "" + completedMaps + + "" + + StringUtils.formatPercent(status.reduceProgress(), 2) + + ServletUtil.percentageGraph(status.reduceProgress() * 100, 80) + + "" + desiredReduces + " " + completedReduces + + "" + cpu + "" + cpuCost + "" + mem + "" + memCost + "" + memMax + "
none" + + "
\n"); + + return sb.toString(); + } + + /** + * Method used to generate the txt based Job table for Job pages. + * + * @param jobs vector of jobs to be displayed in table. + * @param colSeparator the char used to separate columns + * @param rowSeparator the char used to separate records + * @return a String contains the table + * @throws IOException + */ + public static String generateTxtJobTable(Collection jobs, + JobTracker tracker) throws IOException { + char colSeparator = '\t'; + char rowSeparator = '\n'; + + StringBuffer sb = new StringBuffer(); + sb.append("01.JOBID" + colSeparator + + "02.START" + colSeparator + + "03.FINISH" + colSeparator + + "04.USER" + colSeparator + + "05.NAME" + colSeparator + + "06.BLACK_TT" + colSeparator + + "07.PRIORITY" + colSeparator + + "08.MAP_TOTAL" + colSeparator + + "09.MAP_COMPLETE" + colSeparator + + "10.MAP_RUN" + colSeparator + + "11.MAP_SPECU" + colSeparator + + "12.MAP_NONLOC" + colSeparator + + "13.MAP_KILLED" + colSeparator + + "14.MAP_FAILED" + colSeparator + + "15.RED_TOTAL" + colSeparator + + "16.RED_COMPLETE" + colSeparator + + "17.RED_RUN" + colSeparator + + "18.RED_SPECU" + colSeparator + + "19.RED_KILLED" + colSeparator + + "20.RED_FAILED" + colSeparator + + "21.%MEM" + colSeparator + + "22.%MEM_MAX" + colSeparator + + "23.%MEM_PEAK" + colSeparator + + "24.MEM_MS" + colSeparator + + "25.%CPU" + colSeparator + + "26.%CPU_MAX" + colSeparator + + "27.CPU_MS" + rowSeparator); + + if (jobs.size() > 0) { + for (Iterator it = jobs.iterator(); it.hasNext();) { + JobInProgress job = it.next(); + JobProfile profile = job.getProfile(); + String user = profile.getUser(); + String name = profile.getJobName(). + replace(' ', '_').replace('\t', '_').replace('\n', '_'); + int desiredMaps = job.desiredMaps(); + int desiredReduces = job.desiredReduces(); + int runningMaps = 0; + int failedMaps = 0; + int killedMaps = 0; + for (TaskInProgress tip: job.getTasks(TaskType.MAP)) { + if (tip.isRunning()) { + runningMaps += tip.getActiveTasks().size(); + tip.numKilledTasks(); + failedMaps += tip.numTaskFailures(); + killedMaps += tip.numKilledTasks(); + } + } + int runningReduces = 0; + int failedReduces = 0; + int killedReduces = 0; + for (TaskInProgress tip: job.getTasks(TaskType.REDUCE)) { + if (tip.isRunning()) { + runningReduces += tip.getActiveTasks().size(); + failedReduces += tip.numTaskFailures(); + killedReduces += tip.numKilledTasks(); + } + } + int completedMaps = job.finishedMaps(); + int completedReduces = job.finishedReduces(); + int nonLocalRunningMaps = job.getNonLocalRunningMaps().size(); + long submitTime = job.getStartTime(); + long finishTime = job.getFinishTime(); + String jobpri = job.getPriority().toString(); + JobID jobId = job.getJobID(); + double mem = 0, memMax = 0, memMaxPeak = 0, memCost = 0; + double cpu = 0, cpuMax = 0, cpuCost = 0; + ResourceReporter reporter = tracker.getResourceReporter(); + if (reporter != null) { + mem = reporter.getJobCpuPercentageOnCluster(jobId); + memMax = reporter.getJobMemMaxPercentageOnBox(jobId); + memMaxPeak = reporter.getJobMemMaxPercentageOnBoxAllTime(jobId); + memCost = reporter.getJobMemCumulatedUsageTime(jobId); + cpu = reporter.getJobCpuPercentageOnCluster(jobId); + cpuMax = reporter.getJobCpuMaxPercentageOnBox(jobId); + cpuCost = reporter.getJobCpuCumulatedUsageTime(jobId); + } + sb.append(jobId.toString() + colSeparator + + submitTime + colSeparator + + finishTime + colSeparator + + user + colSeparator + + name + colSeparator + + job.getNoOfBlackListedTrackers() + colSeparator + + jobpri + colSeparator + + desiredMaps + colSeparator + + completedMaps + colSeparator + + runningMaps + colSeparator + + job.speculativeMapTasks + colSeparator + + nonLocalRunningMaps + colSeparator + + killedMaps + colSeparator + + failedMaps + colSeparator + + desiredReduces + colSeparator + + completedReduces + colSeparator + + runningReduces + colSeparator + + job.speculativeReduceTasks + colSeparator + + killedReduces + colSeparator + + failedReduces + colSeparator + + mem + colSeparator + + memMax + colSeparator + + memMaxPeak + colSeparator + + memCost + colSeparator + + cpu + colSeparator + + cpuMax + colSeparator + + cpuCost + rowSeparator); + } + } + return sb.toString(); + } +} diff --git a/src/mapred/org/apache/hadoop/mapred/JVMId.java b/src/mapred/org/apache/hadoop/mapred/JVMId.java new file mode 100644 index 0000000..ab76a61 --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/JVMId.java @@ -0,0 +1,148 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapred; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; +import java.text.NumberFormat; + +class JVMId extends ID { + boolean isMap; + JobID jobId; + private static final String JVM = "jvm"; + private static NumberFormat idFormat = NumberFormat.getInstance(); + static { + idFormat.setGroupingUsed(false); + idFormat.setMinimumIntegerDigits(6); + } + + public JVMId(JobID jobId, boolean isMap, int id) { + super(id); + this.isMap = isMap; + this.jobId = jobId; + } + + public JVMId (String jtIdentifier, int jobId, boolean isMap, int id) { + this(new JobID(jtIdentifier, jobId), isMap, id); + } + + public JVMId() { + jobId = new JobID(); + } + + public boolean isMapJVM() { + return isMap; + } + public JobID getJobId() { + return jobId; + } + public boolean equals(Object o) { + if(o == null) + return false; + if(o.getClass().equals(JVMId.class)) { + JVMId that = (JVMId)o; + return this.id==that.id + && this.isMap == that.isMap + && this.jobId.equals(that.jobId); + } + else return false; + } + + /**Compare TaskInProgressIds by first jobIds, then by tip numbers. Reduces are + * defined as greater then maps.*/ + @Override + public int compareTo(org.apache.hadoop.mapreduce.ID o) { + JVMId that = (JVMId)o; + int jobComp = this.jobId.compareTo(that.jobId); + if(jobComp == 0) { + if(this.isMap == that.isMap) { + return this.id - that.id; + } else { + return this.isMap ? -1 : 1; + } + } else { + return jobComp; + } + } + + @Override + public String toString() { + return appendTo(new StringBuilder(JVM)).toString(); + } + + /** + * Add the unique id to the given StringBuilder. + * @param builder the builder to append to + * @return the passed in builder. + */ + protected StringBuilder appendTo(StringBuilder builder) { + return jobId.appendTo(builder). + append(SEPARATOR). + append(isMap ? 'm' : 'r'). + append(SEPARATOR). + append(idFormat.format(id)); + } + + @Override + public int hashCode() { + return jobId.hashCode() * 11 + id; + } + + @Override + public void readFields(DataInput in) throws IOException { + super.readFields(in); + this.jobId.readFields(in); + this.isMap = in.readBoolean(); + } + + @Override + public void write(DataOutput out) throws IOException { + super.write(out); + jobId.write(out); + out.writeBoolean(isMap); + } + + /** Construct a JVMId object from given string + * @return constructed JVMId object or null if the given String is null + * @throws IllegalArgumentException if the given string is malformed + */ + public static JVMId forName(String str) + throws IllegalArgumentException { + if(str == null) + return null; + try { + String[] parts = str.split("_"); + if(parts.length == 5) { + if(parts[0].equals(JVM)) { + boolean isMap = false; + if(parts[3].equals("m")) isMap = true; + else if(parts[3].equals("r")) isMap = false; + else throw new Exception(); + return new JVMId(parts[1], Integer.parseInt(parts[2]), + isMap, Integer.parseInt(parts[4])); + } + } + }catch (Exception ex) {//fall below + } + throw new IllegalArgumentException("TaskId string : " + str + + " is not properly formed"); + } + +} diff --git a/src/mapred/org/apache/hadoop/mapred/JobChangeEvent.java b/src/mapred/org/apache/hadoop/mapred/JobChangeEvent.java new file mode 100644 index 0000000..8c1290e --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/JobChangeEvent.java @@ -0,0 +1,37 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.mapred; + +/** + * {@link JobChangeEvent} is used to capture state changes in a job. A job can + * change its state w.r.t priority, progress, run-state etc. + */ +abstract class JobChangeEvent { + private JobInProgress jip; + + JobChangeEvent(JobInProgress jip) { + this.jip = jip; + } + + /** + * Get the job object for which the change is reported + */ + JobInProgress getJobInProgress() { + return jip; + } +} \ No newline at end of file diff --git a/src/mapred/org/apache/hadoop/mapred/JobClient.java b/src/mapred/org/apache/hadoop/mapred/JobClient.java new file mode 100644 index 0000000..a118c2e --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/JobClient.java @@ -0,0 +1,2374 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.mapred; + +import java.io.BufferedReader; +import java.io.BufferedWriter; +import java.io.DataInput; +import java.io.DataOutput; +import java.io.DataOutputStream; +import java.io.FileNotFoundException; +import java.io.FileOutputStream; +import java.io.IOException; +import java.io.FileInputStream; +import java.io.InputStream; +import java.io.InputStreamReader; +import java.io.OutputStream; +import java.io.OutputStreamWriter; +import java.net.InetAddress; +import java.net.InetSocketAddress; +import java.net.URI; +import java.net.URISyntaxException; +import java.net.URL; +import java.net.URLConnection; +import java.net.UnknownHostException; +import java.util.Arrays; +import java.util.Set; +import java.util.ArrayList; +import java.util.HashSet; +import java.util.Collection; +import java.util.Comparator; +import java.util.List; +import java.util.Random; +import java.util.Map; +import java.util.HashMap; + +import javax.security.auth.login.LoginException; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.conf.Configured; +import org.apache.hadoop.filecache.DistributedCache; +import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.FileUtil; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.io.BytesWritable; +import org.apache.hadoop.io.DataOutputBuffer; +import org.apache.hadoop.io.IOUtils; +import org.apache.hadoop.io.Text; +import org.apache.hadoop.io.Writable; +import org.apache.hadoop.io.WritableUtils; +import org.apache.hadoop.io.serializer.SerializationFactory; +import org.apache.hadoop.io.serializer.Serializer; +import org.apache.hadoop.ipc.RPC; +import org.apache.hadoop.mapred.Counters.Counter; +import org.apache.hadoop.mapred.Counters.Group; +import org.apache.hadoop.net.NetUtils; +import org.apache.hadoop.security.UnixUserGroupInformation; +import org.apache.hadoop.util.ReflectionUtils; +import org.apache.hadoop.util.StringUtils; +import org.apache.hadoop.util.Tool; +import org.apache.hadoop.util.ToolRunner; +import org.apache.hadoop.io.MD5Hash; + +/** + * JobClient is the primary interface for the user-job to interact + * with the {@link JobTracker}. + * + * JobClient provides facilities to submit jobs, track their + * progress, access component-tasks' reports/logs, get the Map-Reduce cluster + * status information etc. + * + *

The job submission process involves: + *

    + *
  1. + * Checking the input and output specifications of the job. + *
  2. + *
  3. + * Computing the {@link InputSplit}s for the job. + *
  4. + *
  5. + * Setup the requisite accounting information for the {@link DistributedCache} + * of the job, if necessary. + *
  6. + *
  7. + * Copying the job's jar and configuration to the map-reduce system directory + * on the distributed file-system. + *
  8. + *
  9. + * Submitting the job to the JobTracker and optionally monitoring + * it's status. + *
  10. + *

+ * + * Normally the user creates the application, describes various facets of the + * job via {@link JobConf} and then uses the JobClient to submit + * the job and monitor its progress. + * + *

Here is an example on how to use JobClient:

+ *

+ *     // Create a new JobConf
+ *     JobConf job = new JobConf(new Configuration(), MyJob.class);
+ *     
+ *     // Specify various job-specific parameters     
+ *     job.setJobName("myjob");
+ *     
+ *     job.setInputPath(new Path("in"));
+ *     job.setOutputPath(new Path("out"));
+ *     
+ *     job.setMapperClass(MyJob.MyMapper.class);
+ *     job.setReducerClass(MyJob.MyReducer.class);
+ *
+ *     // Submit the job, then poll for progress until the job is complete
+ *     JobClient.runJob(job);
+ * 

+ * + *

Job Control

+ * + *

At times clients would chain map-reduce jobs to accomplish complex tasks + * which cannot be done via a single map-reduce job. This is fairly easy since + * the output of the job, typically, goes to distributed file-system and that + * can be used as the input for the next job.

+ * + *

However, this also means that the onus on ensuring jobs are complete + * (success/failure) lies squarely on the clients. In such situations the + * various job-control options are: + *

    + *
  1. + * {@link #runJob(JobConf)} : submits the job and returns only after + * the job has completed. + *
  2. + *
  3. + * {@link #submitJob(JobConf)} : only submits the job, then poll the + * returned handle to the {@link RunningJob} to query status and make + * scheduling decisions. + *
  4. + *
  5. + * {@link JobConf#setJobEndNotificationURI(String)} : setup a notification + * on job-completion, thus avoiding polling. + *
  6. + *

+ * + * @see JobConf + * @see ClusterStatus + * @see Tool + * @see DistributedCache + */ +public class JobClient extends Configured implements MRConstants, Tool { + private static final Log LOG = LogFactory.getLog(JobClient.class); + public static enum TaskStatusFilter { NONE, KILLED, FAILED, SUCCEEDED, ALL } + private TaskStatusFilter taskOutputFilter = TaskStatusFilter.FAILED; + private static final long MAX_JOBPROFILE_AGE = 1000 * 2; + + private static Random r = new Random(); + + static{ + Configuration.addDefaultResource("mapred-default.xml"); + Configuration.addDefaultResource("mapred-site.xml"); + } + + /** + * A NetworkedJob is an implementation of RunningJob. It holds + * a JobProfile object to provide some info, and interacts with the + * remote service to provide certain functionality. + */ + class NetworkedJob implements RunningJob { + JobProfile profile; + JobStatus status; + long statustime; + + /** + * We store a JobProfile and a timestamp for when we last + * acquired the job profile. If the job is null, then we cannot + * perform any of the tasks. The job might be null if the JobTracker + * has completely forgotten about the job. (eg, 24 hours after the + * job completes.) + */ + public NetworkedJob(JobStatus job) throws IOException { + this.status = job; + this.profile = jobSubmitClient.getJobProfile(job.getJobID()); + this.statustime = System.currentTimeMillis(); + } + + /** + * Some methods rely on having a recent job profile object. Refresh + * it, if necessary + */ + synchronized void ensureFreshStatus() throws IOException { + if (System.currentTimeMillis() - statustime > MAX_JOBPROFILE_AGE) { + updateStatus(); + } + } + + /** Some methods need to update status immediately. So, refresh + * immediately + * @throws IOException + */ + synchronized void updateStatus() throws IOException { + this.status = jobSubmitClient.getJobStatus(profile.getJobID()); + this.statustime = System.currentTimeMillis(); + } + + /** + * An identifier for the job + */ + public JobID getID() { + return profile.getJobID(); + } + + /** @deprecated This method is deprecated and will be removed. Applications should + * rather use {@link #getID()}.*/ + @Deprecated + public String getJobID() { + return profile.getJobID().toString(); + } + + /** + * The user-specified job name + */ + public String getJobName() { + return profile.getJobName(); + } + + /** + * The name of the job file + */ + public String getJobFile() { + return profile.getJobFile(); + } + + /** + * A URL where the job's status can be seen + */ + public String getTrackingURL() { + return profile.getURL().toString(); + } + + /** + * A float between 0.0 and 1.0, indicating the % of map work + * completed. + */ + public float mapProgress() throws IOException { + ensureFreshStatus(); + return status.mapProgress(); + } + + /** + * A float between 0.0 and 1.0, indicating the % of reduce work + * completed. + */ + public float reduceProgress() throws IOException { + ensureFreshStatus(); + return status.reduceProgress(); + } + + /** + * A float between 0.0 and 1.0, indicating the % of cleanup work + * completed. + */ + public float cleanupProgress() throws IOException { + ensureFreshStatus(); + return status.cleanupProgress(); + } + + /** + * A float between 0.0 and 1.0, indicating the % of setup work + * completed. + */ + public float setupProgress() throws IOException { + ensureFreshStatus(); + return status.setupProgress(); + } + + /** + * Returns immediately whether the whole job is done yet or not. + */ + public synchronized boolean isComplete() throws IOException { + updateStatus(); + return (status.getRunState() == JobStatus.SUCCEEDED || + status.getRunState() == JobStatus.FAILED || + status.getRunState() == JobStatus.KILLED); + } + + /** + * True iff job completed successfully. + */ + public synchronized boolean isSuccessful() throws IOException { + updateStatus(); + return status.getRunState() == JobStatus.SUCCEEDED; + } + + /** + * Blocks until the job is finished + */ + public void waitForCompletion() throws IOException { + while (!isComplete()) { + try { + Thread.sleep(5000); + } catch (InterruptedException ie) { + } + } + } + + /** + * Tells the service to get the state of the current job. + */ + public synchronized int getJobState() throws IOException { + updateStatus(); + return status.getRunState(); + } + + /** + * Tells the service to terminate the current job. + */ + public synchronized void killJob() throws IOException { + jobSubmitClient.killJob(getID()); + } + + + /** Set the priority of the job. + * @param priority new priority of the job. + */ + public synchronized void setJobPriority(String priority) + throws IOException { + jobSubmitClient.setJobPriority(getID(), priority); + } + + /** + * Kill indicated task attempt. + * @param taskId the id of the task to kill. + * @param shouldFail if true the task is failed and added to failed tasks list, otherwise + * it is just killed, w/o affecting job failure status. + */ + public synchronized void killTask(TaskAttemptID taskId, boolean shouldFail) throws IOException { + jobSubmitClient.killTask(taskId, shouldFail); + } + + /** @deprecated Applications should rather use {@link #killTask(TaskAttemptID, boolean)}*/ + @Deprecated + public synchronized void killTask(String taskId, boolean shouldFail) throws IOException { + killTask(TaskAttemptID.forName(taskId), shouldFail); + } + + /** + * Fetch task completion events from jobtracker for this job. + */ + public synchronized TaskCompletionEvent[] getTaskCompletionEvents( + int startFrom) throws IOException{ + return jobSubmitClient.getTaskCompletionEvents( + getID(), startFrom, 10); + } + + /** + * Dump stats to screen + */ + @Override + public String toString() { + try { + updateStatus(); + } catch (IOException e) { + } + return "Job: " + profile.getJobID() + "\n" + + "file: " + profile.getJobFile() + "\n" + + "tracking URL: " + profile.getURL() + "\n" + + "map() completion: " + status.mapProgress() + "\n" + + "reduce() completion: " + status.reduceProgress(); + } + + /** + * Returns the counters for this job + */ + public Counters getCounters() throws IOException { + return jobSubmitClient.getJobCounters(getID()); + } + + @Override + public String[] getTaskDiagnostics(TaskAttemptID id) throws IOException { + return jobSubmitClient.getTaskDiagnostics(id); + } + } + + private JobSubmissionProtocol jobSubmitClient; + private Path sysDir = null; + + private FileSystem fs = null; + + /** + * Create a job client. + */ + public JobClient() { + } + + /** + * Build a job client with the given {@link JobConf}, and connect to the + * default {@link JobTracker}. + * + * @param conf the job configuration. + * @throws IOException + */ + public JobClient(JobConf conf) throws IOException { + setConf(conf); + init(conf); + } + + /** + * Connect to the default {@link JobTracker}. + * @param conf the job configuration. + * @throws IOException + */ + public void init(JobConf conf) throws IOException { + String tracker = conf.get("mapred.job.tracker", "local"); + if ("local".equals(tracker)) { + this.jobSubmitClient = new LocalJobRunner(conf); + } else { + this.jobSubmitClient = createRPCProxy(JobTracker.getAddress(conf), conf); + } + } + + private JobSubmissionProtocol createRPCProxy(InetSocketAddress addr, + Configuration conf) throws IOException { + return (JobSubmissionProtocol) RPC.getProxy(JobSubmissionProtocol.class, + JobSubmissionProtocol.versionID, addr, getUGI(conf), conf, + NetUtils.getSocketFactory(conf, JobSubmissionProtocol.class)); + } + + /** + * Build a job client, connect to the indicated job tracker. + * + * @param jobTrackAddr the job tracker to connect to. + * @param conf configuration. + */ + public JobClient(InetSocketAddress jobTrackAddr, + Configuration conf) throws IOException { + jobSubmitClient = createRPCProxy(jobTrackAddr, conf); + } + + /** + * Close the JobClient. + */ + public synchronized void close() throws IOException { + if (!(jobSubmitClient instanceof LocalJobRunner)) { + RPC.stopProxy(jobSubmitClient); + } + } + + /** + * Get a filesystem handle. We need this to prepare jobs + * for submission to the MapReduce system. + * + * @return the filesystem handle. + */ + public synchronized FileSystem getFs() throws IOException { + if (this.fs == null) { + Path sysDir = getSystemDir(); + this.fs = sysDir.getFileSystem(getConf()); + } + return fs; + } + + /* see if two file systems are the same or not + * + */ + private boolean compareFs(FileSystem srcFs, FileSystem destFs) { + URI srcUri = srcFs.getUri(); + URI dstUri = destFs.getUri(); + if (srcUri.getScheme() == null) { + return false; + } + if (!srcUri.getScheme().equals(dstUri.getScheme())) { + return false; + } + String srcHost = srcUri.getHost(); + String dstHost = dstUri.getHost(); + if ((srcHost != null) && (dstHost != null)) { + try { + srcHost = InetAddress.getByName(srcHost).getCanonicalHostName(); + dstHost = InetAddress.getByName(dstHost).getCanonicalHostName(); + } catch(UnknownHostException ue) { + return false; + } + if (!srcHost.equals(dstHost)) { + return false; + } + } + else if (srcHost == null && dstHost != null) { + return false; + } + else if (srcHost != null && dstHost == null) { + return false; + } + //check for ports + if (srcUri.getPort() != dstUri.getPort()) { + return false; + } + return true; + } + + private Path copyRemoteFiles(FileSystem jtFs, Path parentDir, + Path originalPath, JobConf job, + short replication) throws IOException { + + return copyRemoteFiles(jtFs, parentDir, originalPath, job, replication, + null); + } + + // copies a file to the jobtracker filesystem and returns the path where it + // was copied to + private Path copyRemoteFiles(FileSystem jtFs, Path parentDir, + Path originalPath, JobConf job, + short replication, String md5) + throws IOException { + //check if we do not need to copy the files + // is jt using the same file system. + // just checking for uri strings... doing no dns lookups + // to see if the filesystems are the same. This is not optimal. + // but avoids name resolution. + + FileSystem remoteFs = null; + remoteFs = originalPath.getFileSystem(job); + if (compareFs(remoteFs, jtFs)) { + return originalPath; + } + + // This function is overloaded to support cache sharing when enabled + if (md5 != null) { + // Check if file already exists in cache + Path basePath = parentDir; + Path realPath = new Path(basePath, md5 + "_" + originalPath.getName()); + Path qualifiedRealPath = realPath.makeQualified(jtFs); + if (filesInCache.contains(qualifiedRealPath)) { + // We "touch" the file to update its access time + // This is done only 10% of the time to reduce load on the namenode + if (r.nextLong() % 10 == 0) { + jtFs.setTimes(realPath, -1, System.currentTimeMillis()); + } + + return qualifiedRealPath; + } + + // This loop should not even loop most of the time + Path newPath; + do { + newPath = new Path(basePath, "tmp_" + originalPath.getName() + + r.nextLong()); + } while (jtFs.exists(newPath)); + + FileUtil.copy(remoteFs, originalPath, jtFs, newPath, false, job); + jtFs.setReplication(newPath, replication); + jtFs.setPermission(newPath, new FsPermission(JOB_DIR_PERMISSION)); + + LOG.info ("Uploading new shared jar: " + realPath.toString()); + + if (!jtFs.rename(newPath, realPath)) { + // if there are multiple clients racing to upload the new jar - only + // one of them will succeed. Check if we failed because the file already + // exists. if so, ignore and move on + if (!jtFs.exists(realPath)) + throw new IOException ("Unable to upload or find shared jar: " + realPath.toString()); + } + + // Update the list + filesInCache.add(qualifiedRealPath); + + return qualifiedRealPath; + } + + // this might have name collisions. copy will throw an exception + // parse the original path to create new path + + Path newPath = new Path(parentDir, originalPath.getName()); + FileUtil.copy(remoteFs, originalPath, jtFs, newPath, false, job); + jtFs.setReplication(newPath, replication); + return jtFs.makeQualified(newPath); + } + + private Set filesInCache = null; + private long filesInCacheTs = 0; + private final static long FCACHE_REFRESH_INTERVAL = 1000L * 60 * 60; + + private void populateFileListings(FileSystem fs, Path[] f) { + + long now = System.currentTimeMillis(); + if (filesInCache != null && + now - filesInCacheTs < FCACHE_REFRESH_INTERVAL) { + // the list of uploaded files has been refreshed recently. + return; + } + + filesInCache = new HashSet(); + + for (int i = 0; i < f.length; i++) + localizeFileListings(fs, f[i]); + + filesInCacheTs = now; + } + + private void localizeFileListings(FileSystem fs, Path f) { + FileStatus[] lstatus; + try { + lstatus = fs.listStatus(f); + + for (int i = 0; i < lstatus.length; i++) { + if (!lstatus[i].isDir()) { + filesInCache.add(lstatus[i].getPath()); + } + } + } catch (Exception e) { + // If something goes wrong, the worst that can happen is that files don't + // get cached. Noting fatal. + } + } + + private static class FileInfo { + String md5; + long fileLength; + long timeStamp; + + public FileInfo(String md5, long fileLength, long timeStamp) { + this.md5 = md5; + this.fileLength = fileLength; + this.timeStamp = timeStamp; + } + } + + Map fileInfo; + + /** + * configure the jobconf of the user with the command line options of + * -libjars, -files, -archives + * @param conf + * @throws IOException + */ + private void configureCommandLineOptions(JobConf job, Path uploadFileDir, + boolean shared) + throws IOException { + + if (!(job.getBoolean("mapred.used.genericoptionsparser", false))) { + LOG.warn("Use GenericOptionsParser for parsing the arguments. " + + "Applications should implement Tool for the same."); + } + + // get all the command line arguments into the + // jobconf passed in by the user conf + String files = null; + String libjars = null; + String archives = null; + + files = job.get("tmpfiles"); + libjars = job.get("tmpjars"); + archives = job.get("tmparchives"); + + // get the tmpjars from jobconfig and override the static conf with this if it is set + String libjars2 = job.get("tmpjars"); + if((libjars2 != null) && (!libjars2.isEmpty())) { + libjars = libjars2; + } + + // get the tmpfiles from jobconfig and override the static conf with this if it is set + String files2 = job.get("tmpfiles"); + if((files2 != null) && (!files2.isEmpty())) { + files = files2; + } + + /* + * set this user's id in job configuration, so later job files can be + * accessed using this user's id + */ + UnixUserGroupInformation ugi = getUGI(job); + + // + // Figure out what fs the JobTracker is using. Copy the + // job to it, under a temporary name. This allows DFS to work, + // and under the local fs also provides UNIX-like object loading + // semantics. (that is, if the job file is deleted right after + // submission, we can still run the submission to completion) + // + + // Create a number of filenames in the JobTracker's fs namespace + FileSystem fs = getFs(); + LOG.debug("default FileSystem: " + fs.getUri()); + + uploadFileDir = fs.makeQualified(uploadFileDir); + uploadFileDir = new Path(uploadFileDir.toUri().getPath()); + FsPermission mapredSysPerms = new FsPermission(JOB_DIR_PERMISSION); + + if (!fs.exists(uploadFileDir)) { + FileSystem.mkdirs(fs, uploadFileDir, mapredSysPerms); + } + Path filesDir = new Path(uploadFileDir, "files"); + Path archivesDir = new Path(uploadFileDir, "archives"); + Path libjarsDir = new Path(uploadFileDir, "libjars"); + short replication = (short)job.getInt("mapred.submit.replication", 10); + + if (shared) { + populateFileListings(fs, + new Path[] { filesDir, archivesDir, libjarsDir}); + } + + fileInfo = new HashMap(); + + String originalJar = job.getJar(); + + if (originalJar != null) { + // use jar name if job is not named. + if ("".equals(job.getJobName())) { + job.setJobName(new Path(originalJar).getName()); + } + + Path uploadJarPath; + + Path originalJarPath = new Path(originalJar); + originalJarPath = + originalJarPath.makeQualified(FileSystem.getLocal(job)); + + try { + // If sharing is turned on, we load the job jar into the distributed + // cache + + if (shared) { + if (!fs.exists(libjarsDir)) { + FileSystem.mkdirs(fs, libjarsDir, mapredSysPerms); + } + + MD5Hash md5hash = MD5Hash.digest(new + FileInputStream(originalJarPath.toUri().getPath())); + uploadJarPath = copyRemoteFiles(fs, libjarsDir, originalJarPath, job, + replication, md5hash.toString()); + URI pathURI = new URI(uploadJarPath.toUri().toString()); + + DistributedCache.addSharedArchiveToClassPath(uploadJarPath, job); + fileInfo.put(pathURI, new FileInfo(md5hash.toString(), + md5hash.getFileLength(), 0)); + } else { + // Otherwise we copy jar to JT's filesystem + uploadJarPath = new Path(uploadFileDir, "job.jar"); + fs.copyFromLocalFile(originalJarPath, uploadJarPath); + } + } catch (URISyntaxException ue) { + // should not throw an uri exception + throw new IOException("Failed to create uri for " + originalJar); + } + + job.setJar(uploadJarPath.toString()); + fs.setReplication(uploadJarPath, replication); + + try { + fs.setPermission(uploadJarPath, new FsPermission(JOB_FILE_PERMISSION)); + } catch (IOException ioe) { + LOG.warn("Unable to set job jar permission"); + } + } else { + LOG.warn("No job jar file set. User classes may not be found. "+ + "See JobConf(Class) or JobConf#setJar(String)."); + } + + // add all the command line files/ jars and archive + // first copy them to jobtrackers filesystem + + if (files != null) { + if (!fs.exists(filesDir)) { + FileSystem.mkdirs(fs, filesDir, mapredSysPerms); + } + + String[] fileArr = files.split(","); + for (String tmpFile: fileArr) { + Path tmp = new Path(tmpFile); + + Path newPath; + FileStatus fStatus = null; + MD5Hash md5hash = null; + try { + if (shared) { + md5hash + = MD5Hash.digest(new FileInputStream(tmp.toUri().getPath())); + newPath = copyRemoteFiles(fs, filesDir, tmp, job, replication, + md5hash.toString()); + + URI pathURI = new URI(newPath.toUri().toString() + "#" + + newPath.getName()); + + DistributedCache.addSharedCacheFile(pathURI, job); + fileInfo.put(pathURI, new FileInfo(md5hash.toString(), + md5hash.getFileLength(), + 0)); + + } else { + newPath = copyRemoteFiles(fs, filesDir, tmp, job, replication); + fStatus = DistributedCache.getFileStatus(job, newPath.toUri()); + + URI pathURI = new URI(newPath.toUri().toString() + "#" + + newPath.getName()); + + DistributedCache.addCacheFile(pathURI, job); + fileInfo.put(pathURI, new FileInfo(null, + fStatus.getLen(), + fStatus.getModificationTime())); + } + + } catch(URISyntaxException ue) { + //should not throw a uri exception + throw new IOException("Failed to create uri for " + tmpFile); + } + DistributedCache.createSymlink(job); + } + } + + if (libjars != null) { + if (!fs.exists(libjarsDir)) { + FileSystem.mkdirs(fs, libjarsDir, mapredSysPerms); + } + + String[] libjarsArr = libjars.split(","); + for (String tmpjars: libjarsArr) { + Path tmp = new Path(tmpjars); + + Path newPath; + if (shared) { + MD5Hash md5hash + = MD5Hash.digest(new FileInputStream(tmp.toUri().getPath())); + newPath = copyRemoteFiles(fs, libjarsDir, tmp, job, replication, + md5hash.toString()); + DistributedCache.addSharedArchiveToClassPath(newPath, job); + + fileInfo.put(newPath.makeQualified(newPath.getFileSystem(job)).toUri(), + new FileInfo(md5hash.toString(), + md5hash.getFileLength(), + 0)); + } else { + newPath = copyRemoteFiles(fs, libjarsDir, tmp, job, replication); + DistributedCache.addArchiveToClassPath(newPath, job); + + FileStatus fStatus = DistributedCache.getFileStatus(job, + newPath.toUri()); + fileInfo.put(newPath.makeQualified(newPath.getFileSystem(job)).toUri(), + new FileInfo(null, + fStatus.getLen(), + fStatus.getModificationTime())); + } + } + } + + if (archives != null) { + if (!fs.exists(archivesDir)) { + FileSystem.mkdirs(fs, archivesDir, mapredSysPerms); + } + + String[] archivesArr = archives.split(","); + for (String tmpArchives: archivesArr) { + Path tmp = new Path(tmpArchives); + + Path newPath; + MD5Hash md5hash = null; + FileStatus fStatus = null; + try { + if (shared) { + md5hash + = MD5Hash.digest(new FileInputStream(tmp.toUri().getPath())); + newPath = copyRemoteFiles(fs, archivesDir, tmp, job, replication, + md5hash.toString()); + URI pathURI = new URI(newPath.toUri().toString() + "#" + + newPath.getName()); + + DistributedCache.addSharedCacheArchive(pathURI, job); + fileInfo.put(pathURI, new FileInfo(md5hash.toString(), + md5hash.getFileLength(), + 0)); + + } else { + newPath = copyRemoteFiles(fs, archivesDir, tmp, job, replication); + fStatus = DistributedCache.getFileStatus(job, newPath.toUri()); + + URI pathURI = new URI(newPath.toUri().toString() + "#" + + newPath.getName()); + + DistributedCache.addCacheArchive(pathURI, job); + fileInfo.put(pathURI, new FileInfo(null, + fStatus.getLen(), + fStatus.getModificationTime())); + } + } catch(URISyntaxException ue) { + //should not throw an uri excpetion + throw new IOException("Failed to create uri for " + tmpArchives); + } + DistributedCache.createSymlink(job); + } + } + + // set the timestamps and md5 of the archives and files + URI[] tarchives = DistributedCache.getSharedCacheArchives(job); + if (tarchives != null) { + StringBuffer archiveLength = new StringBuffer(); + FileStatus fStatus; + FileInfo info; + long fileLength; + + for (int i = 0; i < tarchives.length; i++) { + if (i != 0) archiveLength.append(','); + + info = fileInfo.get(tarchives[i]); + if (info == null) { + fStatus = DistributedCache.getFileStatus(job, tarchives[i]); + fileLength = fStatus.getLen(); + } else { + fileLength = info.fileLength; + } + + archiveLength.append(fileLength); + } + + DistributedCache.setSharedArchiveLength(job, archiveLength.toString()); + } + + URI[] tfiles = DistributedCache.getSharedCacheFiles(job); + if (tfiles != null) { + StringBuffer fileLength = new StringBuffer(); + FileStatus fStatus; + FileInfo info; + long len; + + for (int i = 0; i < tfiles.length; i++) { + if (i != 0) fileLength.append(','); + + info = fileInfo.get(tfiles[i]); + if (info == null) { + fStatus = DistributedCache.getFileStatus(job, tfiles[i]); + len = fStatus.getLen(); + } else { + len = info.fileLength; + } + + fileLength.append(len); + } + + DistributedCache.setSharedFileLength(job, fileLength.toString()); + } + + tarchives = DistributedCache.getCacheArchives(job); + if (tarchives != null) { + StringBuffer archiveTimestamps = new StringBuffer(); + FileInfo info; + long timeStamp; + + for (int i = 0; i < tarchives.length; i++) { + if (i != 0) archiveTimestamps.append(','); + + info = fileInfo.get(tarchives[i]); + if (info == null) { + timeStamp = DistributedCache.getTimestamp(job, tarchives[i]); + } else { + timeStamp = info.timeStamp; + } + archiveTimestamps.append(timeStamp); + } + + DistributedCache.setArchiveTimestamps(job, archiveTimestamps.toString()); + } + + tfiles = DistributedCache.getCacheFiles(job); + if (tfiles != null) { + StringBuffer fileTimestamps = new StringBuffer(); + FileInfo info; + long timeStamp; + + for (int i = 0; i < tfiles.length; i++) { + if (i != 0) fileTimestamps.append(','); + + info = fileInfo.get(tfiles[i]); + if (info == null) { + timeStamp = DistributedCache.getTimestamp(job, tfiles[i]); + } else { + timeStamp = info.timeStamp; + } + fileTimestamps.append(timeStamp); + } + + DistributedCache.setFileTimestamps(job, fileTimestamps.toString()); + } + + // Set the user's name, group and working directory + job.setUser(ugi.getUserName()); + if (ugi.getGroupNames() != null && ugi.getGroupNames().length > 0) { + job.set("group.name", ugi.getGroupNames()[0]); + } + if (job.getWorkingDirectory() == null) { + job.setWorkingDirectory(fs.getWorkingDirectory()); + } + } + + private UnixUserGroupInformation getUGI(Configuration job) throws IOException { + UnixUserGroupInformation ugi = null; + try { + ugi = UnixUserGroupInformation.login(job, true); + } catch (LoginException e) { + throw (IOException)(new IOException( + "Failed to get the current user's information.").initCause(e)); + } + return ugi; + } + + /** + * Submit a job to the MR system. + * + * This returns a handle to the {@link RunningJob} which can be used to track + * the running-job. + * + * @param jobFile the job configuration. + * @return a handle to the {@link RunningJob} which can be used to track the + * running-job. + * @throws FileNotFoundException + * @throws InvalidJobConfException + * @throws IOException + */ + public RunningJob submitJob(String jobFile) throws FileNotFoundException, + InvalidJobConfException, + IOException { + // Load in the submitted job details + JobConf job = new JobConf(jobFile); + return submitJob(job); + } + + // job files are world-wide readable and owner writable + final private static FsPermission JOB_FILE_PERMISSION = + FsPermission.createImmutable((short) 0644); // rw-r--r-- + + // job submission directory is world readable/writable/executable + final static FsPermission JOB_DIR_PERMISSION = + FsPermission.createImmutable((short) 0777); // rwx-rwx-rwx + + /** + * Submit a job to the MR system. + * This returns a handle to the {@link RunningJob} which can be used to track + * the running-job. + * + * @param job the job configuration. + * @return a handle to the {@link RunningJob} which can be used to track the + * running-job. + * @throws FileNotFoundException + * @throws IOException + */ + public RunningJob submitJob(JobConf job) throws FileNotFoundException, + IOException { + try { + return submitJobInternal(job); + } catch (InterruptedException ie) { + throw new IOException("interrupted", ie); + } catch (ClassNotFoundException cnfe) { + throw new IOException("class not found", cnfe); + } + } + + /** + * Internal method for submitting jobs to the system. + * @param job the configuration to submit + * @return a proxy object for the running job + * @throws FileNotFoundException + * @throws ClassNotFoundException + * @throws InterruptedException + * @throws IOException + */ + public + RunningJob submitJobInternal(JobConf job + ) throws FileNotFoundException, + ClassNotFoundException, + InterruptedException, + IOException { + /* + * configure the command line options correctly on the submitting dfs + */ + boolean shared = job.getBoolean("mapred.cache.shared.enabled", false); + + JobID jobId = jobSubmitClient.getNewJobId(); + Path submitJobDir = new Path(getSystemDir(), jobId.toString()); + Path sharedFilesDir = + new Path(getSystemDir(), "CAR"); + Path submitSplitFile = new Path(submitJobDir, "job.split"); + + configureCommandLineOptions(job, + (shared) ? sharedFilesDir : submitJobDir, + shared); + + Path submitJobFile = new Path(submitJobDir, "job.xml"); + int reduces = job.getNumReduceTasks(); + JobContext context = new JobContext(job, jobId); + + // Check the output specification + if (reduces == 0 ? job.getUseNewMapper() : job.getUseNewReducer()) { + org.apache.hadoop.mapreduce.OutputFormat output = + ReflectionUtils.newInstance(context.getOutputFormatClass(), job); + output.checkOutputSpecs(context); + } else { + job.getOutputFormat().checkOutputSpecs(fs, job); + } + + // Create the splits for the job + LOG.debug("Creating splits at " + fs.makeQualified(submitSplitFile)); + int maps; + if (job.getUseNewMapper()) { + maps = writeNewSplits(context, submitSplitFile); + } else { + maps = writeOldSplits(job, submitSplitFile); + } + job.set("mapred.job.split.file", submitSplitFile.toString()); + job.setNumMapTasks(maps); + + // Write job file to JobTracker's fs + FSDataOutputStream out = + FileSystem.create(fs, submitJobFile, + new FsPermission(JOB_FILE_PERMISSION)); + + try { + job.writeXml(out); + } finally { + out.close(); + } + + // + // Now, actually submit the job (using the submit name) + // + JobStatus status = jobSubmitClient.submitJob(jobId); + if (status != null) { + return new NetworkedJob(status); + } else { + throw new IOException("Could not launch job"); + } + } + + private int writeOldSplits(JobConf job, + Path submitSplitFile) throws IOException { + InputSplit[] splits = + job.getInputFormat().getSplits(job, job.getNumMapTasks()); + + // if the number of tasks exceed a configured limit, then display an + // apppropriate error message to the user. This check is also done by + // the JobTracker and is the right place to enforce it. But the check is + // done here too so that we can display an appropriate error message + // to the user. Here we check only the number of mappers whereas the + // JobTrcker applies this limit against the sum of mappers and reducers. + int maxTasks = job.getInt("mapred.jobtracker.maxtasks.per.job", -1); + if (maxTasks!= -1 && splits.length > maxTasks) { + throw new IOException( + "The number of tasks for this job " + + splits.length + + " exceeds the configured limit " + maxTasks); + } + + // sort the splits into order based on size, so that the biggest + // go first + Arrays.sort(splits, new Comparator() { + public int compare(InputSplit a, InputSplit b) { + try { + long left = a.getLength(); + long right = b.getLength(); + if (left == right) { + return 0; + } else if (left < right) { + return 1; + } else { + return -1; + } + } catch (IOException ie) { + throw new RuntimeException("Problem getting input split size", + ie); + } + } + }); + DataOutputStream out = writeSplitsFileHeader(job, submitSplitFile, splits.length); + + try { + DataOutputBuffer buffer = new DataOutputBuffer(); + RawSplit rawSplit = new RawSplit(); + for(InputSplit split: splits) { + rawSplit.setClassName(split.getClass().getName()); + buffer.reset(); + split.write(buffer); + rawSplit.setDataLength(split.getLength()); + rawSplit.setBytes(buffer.getData(), 0, buffer.getLength()); + rawSplit.setLocations(split.getLocations()); + rawSplit.write(out); + } + } finally { + out.close(); + } + return splits.length; + } + + private static class NewSplitComparator + implements Comparator{ + + @Override + public int compare(org.apache.hadoop.mapreduce.InputSplit o1, + org.apache.hadoop.mapreduce.InputSplit o2) { + try { + long len1 = o1.getLength(); + long len2 = o2.getLength(); + if (len1 < len2) { + return 1; + } else if (len1 == len2) { + return 0; + } else { + return -1; + } + } catch (IOException ie) { + throw new RuntimeException("exception in compare", ie); + } catch (InterruptedException ie) { + throw new RuntimeException("exception in compare", ie); + } + } + } + + @SuppressWarnings("unchecked") + private + int writeNewSplits(JobContext job, Path submitSplitFile + ) throws IOException, InterruptedException, + ClassNotFoundException { + JobConf conf = job.getJobConf(); + org.apache.hadoop.mapreduce.InputFormat input = + ReflectionUtils.newInstance(job.getInputFormatClass(), job.getJobConf()); + + List splits = input.getSplits(job); + T[] array = (T[]) + splits.toArray(new org.apache.hadoop.mapreduce.InputSplit[splits.size()]); + + // sort the splits into order based on size, so that the biggest + // go first + Arrays.sort(array, new NewSplitComparator()); + DataOutputStream out = writeSplitsFileHeader(conf, submitSplitFile, + array.length); + try { + if (array.length != 0) { + DataOutputBuffer buffer = new DataOutputBuffer(); + RawSplit rawSplit = new RawSplit(); + SerializationFactory factory = new SerializationFactory(conf); + Serializer serializer = + factory.getSerializer((Class) array[0].getClass()); + serializer.open(buffer); + for(T split: array) { + rawSplit.setClassName(split.getClass().getName()); + buffer.reset(); + serializer.serialize(split); + rawSplit.setDataLength(split.getLength()); + rawSplit.setBytes(buffer.getData(), 0, buffer.getLength()); + rawSplit.setLocations(split.getLocations()); + rawSplit.write(out); + } + serializer.close(); + } + } finally { + out.close(); + } + return array.length; + } + + /** + * Checks if the job directory is clean and has all the required components + * for (re) starting the job + */ + public static boolean isJobDirValid(Path jobDirPath, FileSystem fs) + throws IOException { + FileStatus[] contents = fs.listStatus(jobDirPath); + int matchCount = 0; + if (contents != null && contents.length >=2) { + for (FileStatus status : contents) { + if ("job.xml".equals(status.getPath().getName())) { + ++matchCount; + } + if ("job.split".equals(status.getPath().getName())) { + ++matchCount; + } + } + if (matchCount == 2) { + return true; + } + } + return false; + } + + static class RawSplit implements Writable { + private String splitClass; + private BytesWritable bytes = new BytesWritable(); + private String[] locations; + long dataLength; + + public void setBytes(byte[] data, int offset, int length) { + bytes.set(data, offset, length); + } + + public void setClassName(String className) { + splitClass = className; + } + + public String getClassName() { + return splitClass; + } + + public BytesWritable getBytes() { + return bytes; + } + + public void clearBytes() { + bytes = null; + } + + public void setLocations(String[] locations) { + this.locations = locations; + } + + public String[] getLocations() { + return locations; + } + + public void readFields(DataInput in) throws IOException { + splitClass = Text.readString(in); + dataLength = in.readLong(); + bytes.readFields(in); + int len = WritableUtils.readVInt(in); + locations = new String[len]; + for(int i=0; i < len; ++i) { + locations[i] = Text.readString(in); + } + } + + public void write(DataOutput out) throws IOException { + Text.writeString(out, splitClass); + out.writeLong(dataLength); + bytes.write(out); + WritableUtils.writeVInt(out, locations.length); + for(int i = 0; i < locations.length; i++) { + Text.writeString(out, locations[i]); + } + } + + public long getDataLength() { + return dataLength; + } + public void setDataLength(long l) { + dataLength = l; + } + + } + + private static final int CURRENT_SPLIT_FILE_VERSION = 0; + private static final byte[] SPLIT_FILE_HEADER = "SPL".getBytes(); + + private DataOutputStream writeSplitsFileHeader(Configuration conf, + Path filename, + int length + ) throws IOException { + // write the splits to a file for the job tracker + FileSystem fs = filename.getFileSystem(conf); + FSDataOutputStream out = + FileSystem.create(fs, filename, new FsPermission(JOB_FILE_PERMISSION)); + out.write(SPLIT_FILE_HEADER); + WritableUtils.writeVInt(out, CURRENT_SPLIT_FILE_VERSION); + WritableUtils.writeVInt(out, length); + return out; + } + + /** Create the list of input splits and write them out in a file for + *the JobTracker. The format is: + * + * + * for each split: + * + * @param splits the input splits to write out + * @param out the stream to write to + */ + private void writeOldSplitsFile(InputSplit[] splits, + FSDataOutputStream out) throws IOException { + } + + /** + * Read a splits file into a list of raw splits + * @param in the stream to read from + * @return the complete list of splits + * @throws IOException + */ + static RawSplit[] readSplitFile(DataInput in) throws IOException { + byte[] header = new byte[SPLIT_FILE_HEADER.length]; + in.readFully(header); + if (!Arrays.equals(SPLIT_FILE_HEADER, header)) { + throw new IOException("Invalid header on split file"); + } + int vers = WritableUtils.readVInt(in); + if (vers != CURRENT_SPLIT_FILE_VERSION) { + throw new IOException("Unsupported split version " + vers); + } + int len = WritableUtils.readVInt(in); + RawSplit[] result = new RawSplit[len]; + for(int i=0; i < len; ++i) { + result[i] = new RawSplit(); + result[i].readFields(in); + } + return result; + } + + /** + * Get an {@link RunningJob} object to track an ongoing job. Returns + * null if the id does not correspond to any known job. + * + * @param jobid the jobid of the job. + * @return the {@link RunningJob} handle to track the job, null if the + * jobid doesn't correspond to any known job. + * @throws IOException + */ + public RunningJob getJob(JobID jobid) throws IOException { + JobStatus status = jobSubmitClient.getJobStatus(jobid); + if (status != null) { + return new NetworkedJob(status); + } else { + return null; + } + } + + /**@deprecated Applications should rather use {@link #getJob(JobID)}. + */ + @Deprecated + public RunningJob getJob(String jobid) throws IOException { + return getJob(JobID.forName(jobid)); + } + + /** + * Get the information of the current state of the map tasks of a job. + * + * @param jobId the job to query. + * @return the list of all of the map tips. + * @throws IOException + */ + public TaskReport[] getMapTaskReports(JobID jobId) throws IOException { + return jobSubmitClient.getMapTaskReports(jobId); + } + + /**@deprecated Applications should rather use {@link #getMapTaskReports(JobID)}*/ + @Deprecated + public TaskReport[] getMapTaskReports(String jobId) throws IOException { + return getMapTaskReports(JobID.forName(jobId)); + } + + /** + * Get the information of the current state of the reduce tasks of a job. + * + * @param jobId the job to query. + * @return the list of all of the reduce tips. + * @throws IOException + */ + public TaskReport[] getReduceTaskReports(JobID jobId) throws IOException { + return jobSubmitClient.getReduceTaskReports(jobId); + } + + /** + * Get the information of the current state of the cleanup tasks of a job. + * + * @param jobId the job to query. + * @return the list of all of the cleanup tips. + * @throws IOException + */ + public TaskReport[] getCleanupTaskReports(JobID jobId) throws IOException { + return jobSubmitClient.getCleanupTaskReports(jobId); + } + + /** + * Get the information of the current state of the setup tasks of a job. + * + * @param jobId the job to query. + * @return the list of all of the setup tips. + * @throws IOException + */ + public TaskReport[] getSetupTaskReports(JobID jobId) throws IOException { + return jobSubmitClient.getSetupTaskReports(jobId); + } + + /**@deprecated Applications should rather use {@link #getReduceTaskReports(JobID)}*/ + @Deprecated + public TaskReport[] getReduceTaskReports(String jobId) throws IOException { + return getReduceTaskReports(JobID.forName(jobId)); + } + + /** + * Display the information about a job's tasks, of a particular type and + * in a particular state + * + * @param jobId the ID of the job + * @param type the type of the task (map/reduce/setup/cleanup) + * @param state the state of the task + * (pending/running/completed/failed/killed) + */ + public void displayTasks(JobID jobId, String type, String state) + throws IOException { + TaskReport[] reports = new TaskReport[0]; + if (type.equals("map")) { + reports = getMapTaskReports(jobId); + } else if (type.equals("reduce")) { + reports = getReduceTaskReports(jobId); + } else if (type.equals("setup")) { + reports = getSetupTaskReports(jobId); + } else if (type.equals("cleanup")) { + reports = getCleanupTaskReports(jobId); + } + for (TaskReport report : reports) { + TIPStatus status = report.getCurrentStatus(); + if ((state.equals("pending") && status ==TIPStatus.PENDING) || + (state.equals("running") && status ==TIPStatus.RUNNING) || + (state.equals("completed") && status == TIPStatus.COMPLETE) || + (state.equals("failed") && status == TIPStatus.FAILED) || + (state.equals("killed") && status == TIPStatus.KILLED)) { + printTaskAttempts(report); + } + } + } + private void printTaskAttempts(TaskReport report) { + if (report.getCurrentStatus() == TIPStatus.COMPLETE) { + System.out.println(report.getSuccessfulTaskAttempt()); + } else if (report.getCurrentStatus() == TIPStatus.RUNNING) { + for (TaskAttemptID t : + report.getRunningTaskAttempts()) { + System.out.println(t); + } + } + } + /** + * Get status information about the Map-Reduce cluster. + * + * @return the status information about the Map-Reduce cluster as an object + * of {@link ClusterStatus}. + * @throws IOException + */ + public ClusterStatus getClusterStatus() throws IOException { + return getClusterStatus(false); + } + + /** + * Get status information about the Map-Reduce cluster. + * + * @param detailed if true then get a detailed status including the + * tracker names + * @return the status information about the Map-Reduce cluster as an object + * of {@link ClusterStatus}. + * @throws IOException + */ + public ClusterStatus getClusterStatus(boolean detailed) throws IOException { + return jobSubmitClient.getClusterStatus(detailed); + } + + + /** + * Get the jobs that are not completed and not failed. + * + * @return array of {@link JobStatus} for the running/to-be-run jobs. + * @throws IOException + */ + public JobStatus[] jobsToComplete() throws IOException { + return jobSubmitClient.jobsToComplete(); + } + + private static void downloadProfile(TaskCompletionEvent e + ) throws IOException { + URLConnection connection = + new URL(getTaskLogURL(e.getTaskAttemptId(), e.getTaskTrackerHttp()) + + "&filter=profile").openConnection(); + InputStream in = connection.getInputStream(); + OutputStream out = new FileOutputStream(e.getTaskAttemptId() + ".profile"); + IOUtils.copyBytes(in, out, 64 * 1024, true); + } + + /** + * Get the jobs that are submitted. + * + * @return array of {@link JobStatus} for the submitted jobs. + * @throws IOException + */ + public JobStatus[] getAllJobs() throws IOException { + return jobSubmitClient.getAllJobs(); + } + + /** + * Utility that submits a job, then polls for progress until the job is + * complete. + * + * @param job the job configuration. + * @throws IOException if the job fails + */ + public static RunningJob runJob(JobConf job) throws IOException { + JobClient jc = new JobClient(job); + RunningJob rj = jc.submitJob(job); + try { + if (!jc.monitorAndPrintJob(job, rj)) { + throw new IOException("Job failed!"); + } + } catch (InterruptedException ie) { + Thread.currentThread().interrupt(); + } + return rj; + } + + /** + * Monitor a job and print status in real-time as progress is made and tasks + * fail. + * @param conf the job's configuration + * @param job the job to track + * @return true if the job succeeded + * @throws IOException if communication to the JobTracker fails + */ + public boolean monitorAndPrintJob(JobConf conf, + RunningJob job + ) throws IOException, InterruptedException { + String lastReport = null; + TaskStatusFilter filter; + filter = getTaskOutputFilter(conf); + JobID jobId = job.getID(); + LOG.info("Running job: " + jobId); + int eventCounter = 0; + boolean profiling = conf.getProfileEnabled(); + Configuration.IntegerRanges mapRanges = conf.getProfileTaskRange(true); + Configuration.IntegerRanges reduceRanges = conf.getProfileTaskRange(false); + + while (!job.isComplete()) { + Thread.sleep(MAX_JOBPROFILE_AGE); + String report = + (" map " + StringUtils.formatPercent(job.mapProgress(), 0)+ + " reduce " + + StringUtils.formatPercent(job.reduceProgress(), 0)); + if (!report.equals(lastReport)) { + LOG.info(report); + lastReport = report; + } + + TaskCompletionEvent[] events = + job.getTaskCompletionEvents(eventCounter); + eventCounter += events.length; + for(TaskCompletionEvent event : events){ + TaskCompletionEvent.Status status = event.getTaskStatus(); + if (profiling && + (status == TaskCompletionEvent.Status.SUCCEEDED || + status == TaskCompletionEvent.Status.FAILED) && + (event.isMap ? mapRanges : reduceRanges). + isIncluded(event.idWithinJob())) { + downloadProfile(event); + } + switch(filter){ + case NONE: + break; + case SUCCEEDED: + if (event.getTaskStatus() == + TaskCompletionEvent.Status.SUCCEEDED){ + LOG.info(event.toString()); + displayTaskLogs(event.getTaskAttemptId(), event.getTaskTrackerHttp()); + } + break; + case FAILED: + if (event.getTaskStatus() == + TaskCompletionEvent.Status.FAILED){ + LOG.info(event.toString()); + // Displaying the task diagnostic information + TaskAttemptID taskId = event.getTaskAttemptId(); + String[] taskDiagnostics = + jobSubmitClient.getTaskDiagnostics(taskId); + if (taskDiagnostics != null) { + for(String diagnostics : taskDiagnostics){ + System.err.println(diagnostics); + } + } + // Displaying the task logs + displayTaskLogs(event.getTaskAttemptId(), event.getTaskTrackerHttp()); + } + break; + case KILLED: + if (event.getTaskStatus() == TaskCompletionEvent.Status.KILLED){ + LOG.info(event.toString()); + } + break; + case ALL: + LOG.info(event.toString()); + displayTaskLogs(event.getTaskAttemptId(), event.getTaskTrackerHttp()); + break; + } + } + } + LOG.info("Job complete: " + jobId); + Counters counters = job.getCounters(); + if (counters != null) { + counters.log(LOG); + } + return job.isSuccessful(); + } + + static String getTaskLogURL(TaskAttemptID taskId, String baseUrl) { + return (baseUrl + "/tasklog?plaintext=true&taskid=" + taskId); + } + + private static void displayTaskLogs(TaskAttemptID taskId, String baseUrl) + throws IOException { + // The tasktracker for a 'failed/killed' job might not be around... + if (baseUrl != null) { + // Construct the url for the tasklogs + String taskLogUrl = getTaskLogURL(taskId, baseUrl); + + // Copy tasks's stdout of the JobClient + getTaskLogs(taskId, new URL(taskLogUrl+"&filter=stdout"), System.out); + + // Copy task's stderr to stderr of the JobClient + getTaskLogs(taskId, new URL(taskLogUrl+"&filter=stderr"), System.err); + } + } + + private static void getTaskLogs(TaskAttemptID taskId, URL taskLogUrl, + OutputStream out) { + try { + URLConnection connection = taskLogUrl.openConnection(); + BufferedReader input = + new BufferedReader(new InputStreamReader(connection.getInputStream())); + BufferedWriter output = + new BufferedWriter(new OutputStreamWriter(out)); + try { + String logData = null; + while ((logData = input.readLine()) != null) { + if (logData.length() > 0) { + output.write(taskId + ": " + logData + "\n"); + output.flush(); + } + } + } finally { + input.close(); + } + }catch(IOException ioe){ + LOG.warn("Error reading task output" + ioe.getMessage()); + } + } + + static Configuration getConfiguration(String jobTrackerSpec) + { + Configuration conf = new Configuration(); + if (jobTrackerSpec != null) { + if (jobTrackerSpec.indexOf(":") >= 0) { + conf.set("mapred.job.tracker", jobTrackerSpec); + } else { + String classpathFile = "hadoop-" + jobTrackerSpec + ".xml"; + URL validate = conf.getResource(classpathFile); + if (validate == null) { + throw new RuntimeException(classpathFile + " not found on CLASSPATH"); + } + conf.addResource(classpathFile); + } + } + return conf; + } + + /** + * Sets the output filter for tasks. only those tasks are printed whose + * output matches the filter. + * @param newValue task filter. + */ + @Deprecated + public void setTaskOutputFilter(TaskStatusFilter newValue){ + this.taskOutputFilter = newValue; + } + + /** + * Get the task output filter out of the JobConf. + * + * @param job the JobConf to examine. + * @return the filter level. + */ + public static TaskStatusFilter getTaskOutputFilter(JobConf job) { + return TaskStatusFilter.valueOf(job.get("jobclient.output.filter", + "FAILED")); + } + + /** + * Modify the JobConf to set the task output filter. + * + * @param job the JobConf to modify. + * @param newValue the value to set. + */ + public static void setTaskOutputFilter(JobConf job, + TaskStatusFilter newValue) { + job.set("jobclient.output.filter", newValue.toString()); + } + + /** + * Returns task output filter. + * @return task filter. + */ + @Deprecated + public TaskStatusFilter getTaskOutputFilter(){ + return this.taskOutputFilter; + } + + private String getJobPriorityNames() { + StringBuffer sb = new StringBuffer(); + for (JobPriority p : JobPriority.values()) { + sb.append(p.name()).append(" "); + } + return sb.substring(0, sb.length()-1); + } + + /** + * Display usage of the command-line tool and terminate execution + */ + private void displayUsage(String cmd) { + String prefix = "Usage: JobClient "; + String jobPriorityValues = getJobPriorityNames(); + String taskTypes = "map, reduce, setup, cleanup"; + String taskStates = "running, completed"; + if("-submit".equals(cmd)) { + System.err.println(prefix + "[" + cmd + " ]"); + } else if ("-status".equals(cmd) || "-kill".equals(cmd)) { + System.err.println(prefix + "[" + cmd + " ]"); + } else if ("-counter".equals(cmd)) { + System.err.println(prefix + "[" + cmd + " ]"); + } else if ("-events".equals(cmd)) { + System.err.println(prefix + "[" + cmd + " <#-of-events>]"); + } else if ("-history".equals(cmd)) { + System.err.println(prefix + "[" + cmd + " ]"); + } else if ("-list".equals(cmd)) { + System.err.println(prefix + "[" + cmd + " [all]]"); + } else if ("-kill-task".equals(cmd) || "-fail-task".equals(cmd)) { + System.err.println(prefix + "[" + cmd + " ]"); + } else if ("-set-priority".equals(cmd)) { + System.err.println(prefix + "[" + cmd + " ]. " + + "Valid values for priorities are: " + + jobPriorityValues); + } else if ("-list-active-trackers".equals(cmd)) { + System.err.println(prefix + "[" + cmd + "]"); + } else if ("-list-blacklisted-trackers".equals(cmd)) { + System.err.println(prefix + "[" + cmd + "]"); + } else if ("-list-trackers".equals(cmd)) { + System.err.println(prefix + "[" + cmd + "]"); + } else if ("-list-attempt-ids".equals(cmd)) { + System.err.println(prefix + "[" + cmd + + " ]. " + + "Valid values for are " + taskTypes + ". " + + "Valid values for are " + taskStates); + } else { + System.err.printf(prefix + " \n"); + System.err.printf("\t[-submit ]\n"); + System.err.printf("\t[-status ]\n"); + System.err.printf("\t[-counter ]\n"); + System.err.printf("\t[-kill ]\n"); + System.err.printf("\t[-set-priority ]. " + + "Valid values for priorities are: " + + jobPriorityValues + "\n"); + System.err.printf("\t[-events <#-of-events>]\n"); + System.err.printf("\t[-history ]\n"); + System.err.printf("\t[-list [all]]\n"); + System.err.printf("\t[-list-active-trackers]\n"); + System.err.printf("\t[-list-blacklisted-trackers]\n"); + System.err.printf("\t[-list-trackers]\n"); + System.err.println("\t[-list-attempt-ids " + + "]\n"); + System.err.printf("\t[-kill-task ]\n"); + System.err.printf("\t[-fail-task ]\n\n"); + ToolRunner.printGenericCommandUsage(System.out); + } + } + + public int run(String[] argv) throws Exception { + int exitCode = -1; + if (argv.length < 1) { + displayUsage(""); + return exitCode; + } + // process arguments + String cmd = argv[0]; + String submitJobFile = null; + String jobid = null; + String taskid = null; + String outputDir = null; + String counterGroupName = null; + String counterName = null; + String newPriority = null; + String taskType = null; + String taskState = null; + int fromEvent = 0; + int nEvents = 0; + boolean getStatus = false; + boolean getCounter = false; + boolean killJob = false; + boolean listEvents = false; + boolean viewHistory = false; + boolean viewAllHistory = false; + boolean listJobs = false; + boolean listAllJobs = false; + boolean listActiveTrackers = false; + boolean listBlacklistedTrackers = false; + boolean listTrackers = false; + boolean displayTasks = false; + boolean killTask = false; + boolean failTask = false; + boolean setJobPriority = false; + + if ("-submit".equals(cmd)) { + if (argv.length != 2) { + displayUsage(cmd); + return exitCode; + } + submitJobFile = argv[1]; + } else if ("-status".equals(cmd)) { + if (argv.length != 2) { + displayUsage(cmd); + return exitCode; + } + jobid = argv[1]; + getStatus = true; + } else if("-counter".equals(cmd)) { + if (argv.length != 4) { + displayUsage(cmd); + return exitCode; + } + getCounter = true; + jobid = argv[1]; + counterGroupName = argv[2]; + counterName = argv[3]; + } else if ("-kill".equals(cmd)) { + if (argv.length != 2) { + displayUsage(cmd); + return exitCode; + } + jobid = argv[1]; + killJob = true; + } else if ("-set-priority".equals(cmd)) { + if (argv.length != 3) { + displayUsage(cmd); + return exitCode; + } + jobid = argv[1]; + newPriority = argv[2]; + try { + JobPriority jp = JobPriority.valueOf(newPriority); + } catch (IllegalArgumentException iae) { + displayUsage(cmd); + return exitCode; + } + setJobPriority = true; + } else if ("-events".equals(cmd)) { + if (argv.length != 4) { + displayUsage(cmd); + return exitCode; + } + jobid = argv[1]; + fromEvent = Integer.parseInt(argv[2]); + nEvents = Integer.parseInt(argv[3]); + listEvents = true; + } else if ("-history".equals(cmd)) { + if (argv.length != 2 && !(argv.length == 3 && "all".equals(argv[1]))) { + displayUsage(cmd); + return exitCode; + } + viewHistory = true; + if (argv.length == 3 && "all".equals(argv[1])) { + viewAllHistory = true; + outputDir = argv[2]; + } else { + outputDir = argv[1]; + } + } else if ("-list".equals(cmd)) { + if (argv.length != 1 && !(argv.length == 2 && "all".equals(argv[1]))) { + displayUsage(cmd); + return exitCode; + } + if (argv.length == 2 && "all".equals(argv[1])) { + listAllJobs = true; + } else { + listJobs = true; + } + } else if("-kill-task".equals(cmd)) { + if(argv.length != 2) { + displayUsage(cmd); + return exitCode; + } + killTask = true; + taskid = argv[1]; + } else if("-fail-task".equals(cmd)) { + if(argv.length != 2) { + displayUsage(cmd); + return exitCode; + } + failTask = true; + taskid = argv[1]; + } else if ("-list-active-trackers".equals(cmd)) { + if (argv.length != 1) { + displayUsage(cmd); + return exitCode; + } + listActiveTrackers = true; + } else if ("-list-blacklisted-trackers".equals(cmd)) { + if (argv.length != 1) { + displayUsage(cmd); + return exitCode; + } + listBlacklistedTrackers = true; + } else if ("-list-trackers".equals(cmd)) { + if (argv.length != 1) { + displayUsage(cmd); + return exitCode; + } + listTrackers = true; + } else if ("-list-attempt-ids".equals(cmd)) { + if (argv.length != 4) { + displayUsage(cmd); + return exitCode; + } + jobid = argv[1]; + taskType = argv[2]; + taskState = argv[3]; + displayTasks = true; + } else { + displayUsage(cmd); + return exitCode; + } + + // initialize JobClient + JobConf conf = null; + if (submitJobFile != null) { + conf = new JobConf(submitJobFile); + } else { + conf = new JobConf(getConf()); + } + init(conf); + + // Submit the request + try { + if (submitJobFile != null) { + RunningJob job = submitJob(conf); + System.out.println("Created job " + job.getID()); + exitCode = 0; + } else if (getStatus) { + RunningJob job = getJob(JobID.forName(jobid)); + if (job == null) { + System.out.println("Could not find job " + jobid); + } else { + System.out.println(); + System.out.println(job); + Counters counters = job.getCounters(); + if (counters != null) { + System.out.println(counters); + } else { + System.out.println("Counters not available. Job is retired."); + } + exitCode = 0; + } + } else if (getCounter) { + RunningJob job = getJob(JobID.forName(jobid)); + if (job == null) { + System.out.println("Could not find job " + jobid); + } else { + Counters counters = job.getCounters(); + if (counters == null) { + System.out.println("Counters not available for retired job " + + jobid); + exitCode = -1; + } else { + Group group = counters.getGroup(counterGroupName); + Counter counter = group.getCounterForName(counterName); + System.out.println(counter.getCounter()); + exitCode = 0; + } + } + } else if (killJob) { + RunningJob job = getJob(JobID.forName(jobid)); + if (job == null) { + System.out.println("Could not find job " + jobid); + } else { + job.killJob(); + System.out.println("Killed job " + jobid); + exitCode = 0; + } + } else if (setJobPriority) { + RunningJob job = getJob(JobID.forName(jobid)); + if (job == null) { + System.out.println("Could not find job " + jobid); + } else { + job.setJobPriority(newPriority); + System.out.println("Changed job priority."); + exitCode = 0; + } + } else if (viewHistory) { + viewHistory(outputDir, viewAllHistory); + exitCode = 0; + } else if (listEvents) { + listEvents(JobID.forName(jobid), fromEvent, nEvents); + exitCode = 0; + } else if (listJobs) { + listJobs(); + exitCode = 0; + } else if (listAllJobs) { + listAllJobs(); + exitCode = 0; + } else if (listActiveTrackers) { + listActiveTrackers(); + exitCode = 0; + } else if (listBlacklistedTrackers) { + listBlacklistedTrackers(); + exitCode = 0; + } else if (listTrackers) { + listTrackers(); + exitCode = 0; + } else if (displayTasks) { + displayTasks(JobID.forName(jobid), taskType, taskState); + } else if(killTask) { + if(jobSubmitClient.killTask(TaskAttemptID.forName(taskid), false)) { + System.out.println("Killed task " + taskid); + exitCode = 0; + } else { + System.out.println("Could not kill task " + taskid); + exitCode = -1; + } + } else if(failTask) { + if(jobSubmitClient.killTask(TaskAttemptID.forName(taskid), true)) { + System.out.println("Killed task " + taskid + " by failing it"); + exitCode = 0; + } else { + System.out.println("Could not fail task " + taskid); + exitCode = -1; + } + } + } finally { + close(); + } + return exitCode; + } + + private void viewHistory(String outputDir, boolean all) + throws IOException { + HistoryViewer historyViewer = new HistoryViewer(outputDir, + getConf(), all); + historyViewer.print(); + } + + /** + * List the events for the given job + * @param jobId the job id for the job's events to list + * @throws IOException + */ + private void listEvents(JobID jobId, int fromEventId, int numEvents) + throws IOException { + TaskCompletionEvent[] events = + jobSubmitClient.getTaskCompletionEvents(jobId, fromEventId, numEvents); + System.out.println("Task completion events for " + jobId); + System.out.println("Number of events (from " + fromEventId + + ") are: " + events.length); + for(TaskCompletionEvent event: events) { + System.out.println(event.getTaskStatus() + " " + event.getTaskAttemptId() + " " + + getTaskLogURL(event.getTaskAttemptId(), + event.getTaskTrackerHttp())); + } + } + + /** + * Dump a list of currently running jobs + * @throws IOException + */ + private void listJobs() throws IOException { + JobStatus[] jobs = jobsToComplete(); + if (jobs == null) + jobs = new JobStatus[0]; + + System.out.printf("%d jobs currently running\n", jobs.length); + displayJobList(jobs); + } + + /** + * Dump a list of all jobs submitted. + * @throws IOException + */ + private void listAllJobs() throws IOException { + JobStatus[] jobs = getAllJobs(); + if (jobs == null) + jobs = new JobStatus[0]; + System.out.printf("%d jobs submitted\n", jobs.length); + System.out.printf("States are:\n\tRunning : 1\tSucceded : 2" + + "\tFailed : 3\tPrep : 4\n"); + displayJobList(jobs); + } + + /** + * Display the list of active trackers + */ + private void listActiveTrackers() throws IOException { + ClusterStatus c = jobSubmitClient.getClusterStatus(true); + Collection trackers = c.getActiveTrackerNames(); + for (String trackerName : trackers) { + System.out.println(trackerName); + } + } + + /** + * Display the list of blacklisted trackers + */ + private void listBlacklistedTrackers() throws IOException { + ClusterStatus c = jobSubmitClient.getClusterStatus(true); + Collection trackers = c.getBlacklistedTrackerNames(); + for (String trackerName : trackers) { + System.out.println(trackerName); + } + } + + /** + * Display the stats of the cluster with per tracker details + * @throws IOException + */ + private void listTrackers() throws IOException { + ClusterStatus fullStatus = jobSubmitClient.getClusterStatus(true); + Collection trackers = + fullStatus.getTaskTrackersDetails(); + Set activeTrackers = + new HashSet(fullStatus.getActiveTrackerNames()); + List mapsProgress = new ArrayList(); + List reducesProgress = new ArrayList(); + int finishedMapsFromRunningJobs = 0; + int finishedReducesFromRunningJobs = 0; + + System.out.println("Total Map Tasks in Running Jobs: " + + fullStatus.getTotalMapTasks()); + System.out.println("Total Reduce Tasks in Running Jobs: " + + fullStatus.getTotalReduceTasks()); + + for (TaskTrackerStatus tracker : trackers) { + System.out.println(tracker.getTrackerName()); + + + //List tasks = tracker.getTaskReports(); + Collection tasks = + fullStatus.getTaskTrackerTasksStatuses(tracker.getTrackerName()); + for (TaskStatus task : tasks) { + TaskStatus.State state = task.getRunState(); + if (task.getIsMap() && + (state == TaskStatus.State.RUNNING || + state == TaskStatus.State.UNASSIGNED)) { + mapsProgress.add(task.getProgress()); + } else if (!task.getIsMap() && + (state == TaskStatus.State.RUNNING || + state == TaskStatus.State.UNASSIGNED)) { + reducesProgress.add(task.getProgress()); + } else if (task.getIsMap() && state == TaskStatus.State.SUCCEEDED) { + finishedMapsFromRunningJobs++; + } else if (!task.getIsMap() && state == TaskStatus.State.SUCCEEDED) { + finishedReducesFromRunningJobs++; + } + } + + if (activeTrackers.contains(tracker.getTrackerName())) { + System.out.println("\tActive"); + } else { + System.out.println("\tBlacklisted"); + } + System.out.println("\tLast Seen: " + tracker.getLastSeen()); + + System.out.println("\tMap Tasks Running: " + tracker.countMapTasks() + + "/" + tracker.getMaxMapSlots()); + System.out.println("\tMap Tasks Progress: " + mapsProgress.toString()); + System.out.println("\tFinished Map Tasks From Running Jobs: " + + finishedMapsFromRunningJobs); + System.out.println("\tReduce Tasks Running: " + + tracker.countReduceTasks() + "/" + tracker.getMaxReduceSlots()); + System.out.println("\tReduce Tasks Progress: " + + reducesProgress.toString()); + + System.out.println("\tTask Tracker Failures: " + tracker.getFailures()); + mapsProgress.clear(); + reducesProgress.clear(); + } + } + + + void displayJobList(JobStatus[] jobs) { + System.out.printf("JobId\tState\tStartTime\tUserName\tPriority\tSchedulingInfo\n"); + for (JobStatus job : jobs) { + System.out.printf("%s\t%d\t%d\t%s\t%s\t%s\n", job.getJobID(), job.getRunState(), + job.getStartTime(), job.getUsername(), + job.getJobPriority().name(), job.getSchedulingInfo()); + } + } + + /** + * Get status information about the max available Maps in the cluster. + * + * @return the max available Maps in the cluster + * @throws IOException + */ + public int getDefaultMaps() throws IOException { + return getClusterStatus().getMaxMapTasks(); + } + + /** + * Get status information about the max available Reduces in the cluster. + * + * @return the max available Reduces in the cluster + * @throws IOException + */ + public int getDefaultReduces() throws IOException { + return getClusterStatus().getMaxReduceTasks(); + } + + /** + * Grab the jobtracker system directory path where job-specific files are to be placed. + * + * @return the system directory where job-specific files are to be placed. + */ + public Path getSystemDir() { + if (sysDir == null) { + sysDir = new Path(jobSubmitClient.getSystemDir()); + } + return sysDir; + } + + + /** + * Return an array of queue information objects about all the Job Queues + * configured. + * + * @return Array of JobQueueInfo objects + * @throws IOException + */ + public JobQueueInfo[] getQueues() throws IOException { + return jobSubmitClient.getQueues(); + } + + /** + * Gets all the jobs which were added to particular Job Queue + * + * @param queueName name of the Job Queue + * @return Array of jobs present in the job queue + * @throws IOException + */ + + public JobStatus[] getJobsFromQueue(String queueName) throws IOException { + return jobSubmitClient.getJobsFromQueue(queueName); + } + + /** + * Gets the queue information associated to a particular Job Queue + * + * @param queueName name of the job queue. + * @return Queue information associated to particular queue. + * @throws IOException + */ + public JobQueueInfo getQueueInfo(String queueName) throws IOException { + return jobSubmitClient.getQueueInfo(queueName); + } + + /** + * Gets the Queue ACLs for current user + * @return array of QueueAclsInfo object for current user. + * @throws IOException + */ + public QueueAclsInfo[] getQueueAclsForCurrentUser() throws IOException { + return jobSubmitClient.getQueueAclsForCurrentUser(); + } + + /** + */ + public static void main(String argv[]) throws Exception { + int res = ToolRunner.run(new JobClient(), argv); + System.exit(res); + } +} + diff --git a/src/mapred/org/apache/hadoop/mapred/JobConf.java b/src/mapred/org/apache/hadoop/mapred/JobConf.java new file mode 100644 index 0000000..bb69847 --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/JobConf.java @@ -0,0 +1,2009 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapred; + + +import java.io.IOException; + +import java.net.URL; +import java.net.URLDecoder; +import java.util.Enumeration; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.filecache.DistributedCache; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.conf.Configuration; + +import org.apache.hadoop.io.*; +import org.apache.hadoop.io.compress.CompressionCodec; + +import org.apache.hadoop.mapred.lib.IdentityMapper; +import org.apache.hadoop.mapred.lib.IdentityReducer; +import org.apache.hadoop.mapred.lib.HashPartitioner; +import org.apache.hadoop.mapred.lib.KeyFieldBasedComparator; +import org.apache.hadoop.mapred.lib.KeyFieldBasedPartitioner; +import org.apache.hadoop.util.ReflectionUtils; +import org.apache.hadoop.util.Tool; + +/** + * A map/reduce job configuration. + * + *

JobConf is the primary interface for a user to describe a + * map-reduce job to the Hadoop framework for execution. The framework tries to + * faithfully execute the job as-is described by JobConf, however: + *

    + *
  1. + * Some configuration parameters might have been marked as + * + * final by administrators and hence cannot be altered. + *
  2. + *
  3. + * While some job parameters are straight-forward to set + * (e.g. {@link #setNumReduceTasks(int)}), some parameters interact subtly + * rest of the framework and/or job-configuration and is relatively more + * complex for the user to control finely (e.g. {@link #setNumMapTasks(int)}). + *
  4. + *

+ * + *

JobConf typically specifies the {@link Mapper}, combiner + * (if any), {@link Partitioner}, {@link Reducer}, {@link InputFormat} and + * {@link OutputFormat} implementations to be used etc. + * + *

Optionally JobConf is used to specify other advanced facets + * of the job such as Comparators to be used, files to be put in + * the {@link DistributedCache}, whether or not intermediate and/or job outputs + * are to be compressed (and how), debugability via user-provided scripts + * ( {@link #setMapDebugScript(String)}/{@link #setReduceDebugScript(String)}), + * for doing post-processing on task logs, task's stdout, stderr, syslog. + * and etc.

+ * + *

Here is an example on how to configure a job via JobConf:

+ *

+ *     // Create a new JobConf
+ *     JobConf job = new JobConf(new Configuration(), MyJob.class);
+ *
+ *     // Specify various job-specific parameters
+ *     job.setJobName("myjob");
+ *
+ *     FileInputFormat.setInputPaths(job, new Path("in"));
+ *     FileOutputFormat.setOutputPath(job, new Path("out"));
+ *
+ *     job.setMapperClass(MyJob.MyMapper.class);
+ *     job.setCombinerClass(MyJob.MyReducer.class);
+ *     job.setReducerClass(MyJob.MyReducer.class);
+ *
+ *     job.setInputFormat(SequenceFileInputFormat.class);
+ *     job.setOutputFormat(SequenceFileOutputFormat.class);
+ * 

+ * + * @see JobClient + * @see ClusterStatus + * @see Tool + * @see DistributedCache + * @deprecated Use {@link Configuration} instead + */ +@Deprecated +public class JobConf extends Configuration { + + private static final Log LOG = LogFactory.getLog(JobConf.class); + + static{ + Configuration.addDefaultResource("mapred-default.xml"); + Configuration.addDefaultResource("mapred-site.xml"); + } + + /** + * @deprecated Use {@link #MAPRED_JOB_MAP_MEMORY_MB_PROPERTY} and + * {@link #MAPRED_JOB_REDUCE_MEMORY_MB_PROPERTY} + */ + @Deprecated + public static final String MAPRED_TASK_MAXVMEM_PROPERTY = + "mapred.task.maxvmem"; + + /** + * @deprecated + */ + @Deprecated + public static final String UPPER_LIMIT_ON_TASK_VMEM_PROPERTY = + "mapred.task.limit.maxvmem"; + + /** + * @deprecated + */ + @Deprecated + public static final String MAPRED_TASK_DEFAULT_MAXVMEM_PROPERTY = + "mapred.task.default.maxvmem"; + + /** + * @deprecated + */ + @Deprecated + public static final String MAPRED_TASK_MAXPMEM_PROPERTY = + "mapred.task.maxpmem"; + + /** + * A value which if set for memory related configuration options, + * indicates that the options are turned off. + */ + public static final long DISABLED_MEMORY_LIMIT = -1L; + + /** + * Name of the queue to which jobs will be submitted, if no queue + * name is mentioned. + */ + public static final String DEFAULT_QUEUE_NAME = "default"; + + static final String MAPRED_JOB_MAP_MEMORY_MB_PROPERTY = + "mapred.job.map.memory.mb"; + + static final String MAPRED_JOB_REDUCE_MEMORY_MB_PROPERTY = + "mapred.job.reduce.memory.mb"; + + /** + * Configuration key to set the java command line options for the child + * map and reduce tasks. + * + * Java opts for the task tracker child processes. + * The following symbol, if present, will be interpolated: @taskid@. + * It is replaced by current TaskID. Any other occurrences of '@' will go + * unchanged. + * For example, to enable verbose gc logging to a file named for the taskid in + * /tmp and to set the heap maximum to be a gigabyte, pass a 'value' of: + * -Xmx1024m -verbose:gc -Xloggc:/tmp/@taskid@.gc + * + * The configuration variable {@link #MAPRED_TASK_ULIMIT} can be used to + * control the maximum virtual memory of the child processes. + * + * The configuration variable {@link #MAPRED_TASK_ENV} can be used to pass + * other environment variables to the child processes. + * + * @deprecated Use {@link #MAPRED_MAP_TASK_JAVA_OPTS} or + * {@link #MAPRED_REDUCE_TASK_JAVA_OPTS} + */ + @Deprecated + public static final String MAPRED_TASK_JAVA_OPTS = "mapred.child.java.opts"; + + /** + * Configuration key to set the java command line options for the map tasks. + * + * Java opts for the task tracker child map processes. + * The following symbol, if present, will be interpolated: @taskid@. + * It is replaced by current TaskID. Any other occurrences of '@' will go + * unchanged. + * For example, to enable verbose gc logging to a file named for the taskid in + * /tmp and to set the heap maximum to be a gigabyte, pass a 'value' of: + * -Xmx1024m -verbose:gc -Xloggc:/tmp/@taskid@.gc + * + * The configuration variable {@link #MAPRED_MAP_TASK_ULIMIT} can be used to + * control the maximum virtual memory of the map processes. + * + * The configuration variable {@link #MAPRED_MAP_TASK_ENV} can be used to pass + * other environment variables to the map processes. + */ + public static final String MAPRED_MAP_TASK_JAVA_OPTS = + "mapred.map.child.java.opts"; + + /** + * Configuration key to set the java command line options for the reduce tasks. + * + * Java opts for the task tracker child reduce processes. + * The following symbol, if present, will be interpolated: @taskid@. + * It is replaced by current TaskID. Any other occurrences of '@' will go + * unchanged. + * For example, to enable verbose gc logging to a file named for the taskid in + * /tmp and to set the heap maximum to be a gigabyte, pass a 'value' of: + * -Xmx1024m -verbose:gc -Xloggc:/tmp/@taskid@.gc + * + * The configuration variable {@link #MAPRED_REDUCE_TASK_ULIMIT} can be used + * to control the maximum virtual memory of the reduce processes. + * + * The configuration variable {@link #MAPRED_REDUCE_TASK_ENV} can be used to + * pass process environment variables to the reduce processes. + */ + public static final String MAPRED_REDUCE_TASK_JAVA_OPTS = + "mapred.reduce.child.java.opts"; + + public static final String DEFAULT_MAPRED_TASK_JAVA_OPTS = "-Xmx200m"; + + /** + * Configuration key to set the maximum virutal memory available to the child + * map and reduce tasks (in kilo-bytes). + * + * Note: This must be greater than or equal to the -Xmx passed to the JavaVM + * via {@link #MAPRED_TASK_JAVA_OPTS}, else the VM might not start. + * + * @deprecated Use {@link #MAPRED_MAP_TASK_ULIMIT} or + * {@link #MAPRED_REDUCE_TASK_ULIMIT} + */ + @Deprecated + public static final String MAPRED_TASK_ULIMIT = "mapred.child.ulimit"; + + /** + * Configuration key to set the maximum virutal memory available to the + * map tasks (in kilo-bytes). + * + * Note: This must be greater than or equal to the -Xmx passed to the JavaVM + * via {@link #MAPRED_MAP_TASK_JAVA_OPTS}, else the VM might not start. + */ + public static final String MAPRED_MAP_TASK_ULIMIT = "mapred.map.child.ulimit"; + + /** + * Configuration key to set the maximum virutal memory available to the + * reduce tasks (in kilo-bytes). + * + * Note: This must be greater than or equal to the -Xmx passed to the JavaVM + * via {@link #MAPRED_REDUCE_TASK_JAVA_OPTS}, else the VM might not start. + */ + public static final String MAPRED_REDUCE_TASK_ULIMIT = + "mapred.reduce.child.ulimit"; + + /** + * Configuration key to set the environment of the child map/reduce tasks. + * + * The format of the value is k1=v1,k2=v2. Further it can + * reference existing environment variables via $key. + * + * Example: + *
    + *
  • A=foo - This will set the env variable A to foo.
  • + *
  • B=$X:c This is inherit tasktracker's X env variable.
  • + *
+ * + * @deprecated Use {@link #MAPRED_MAP_TASK_ENV} or + * {@link #MAPRED_REDUCE_TASK_ENV} + */ + @Deprecated + public static final String MAPRED_TASK_ENV = "mapred.child.env"; + + /** + * Configuration key to set the maximum virutal memory available to the + * map tasks. + * + * The format of the value is k1=v1,k2=v2. Further it can + * reference existing environment variables via $key. + * + * Example: + *
    + *
  • A=foo - This will set the env variable A to foo.
  • + *
  • B=$X:c This is inherit tasktracker's X env variable.
  • + *
+ */ + public static final String MAPRED_MAP_TASK_ENV = "mapred.map.child.env"; + + /** + * Configuration key to set the maximum virutal memory available to the + * reduce tasks. + * + * The format of the value is k1=v1,k2=v2. Further it can + * reference existing environment variables via $key. + * + * Example: + *
    + *
  • A=foo - This will set the env variable A to foo.
  • + *
  • B=$X:c This is inherit tasktracker's X env variable.
  • + *
+ */ + public static final String MAPRED_REDUCE_TASK_ENV = + "mapred.reduce.child.env"; + + /** + * Construct a map/reduce job configuration. + */ + public JobConf() { + checkAndWarnDeprecation(); + } + + /** + * Construct a map/reduce job configuration. + * + * @param exampleClass a class whose containing jar is used as the job's jar. + */ + public JobConf(Class exampleClass) { + setJarByClass(exampleClass); + checkAndWarnDeprecation(); + } + + /** + * Construct a map/reduce job configuration. + * + * @param conf a Configuration whose settings will be inherited. + */ + public JobConf(Configuration conf) { + super(conf); + checkAndWarnDeprecation(); + } + + + /** Construct a map/reduce job configuration. + * + * @param conf a Configuration whose settings will be inherited. + * @param exampleClass a class whose containing jar is used as the job's jar. + */ + public JobConf(Configuration conf, Class exampleClass) { + this(conf); + setJarByClass(exampleClass); + } + + + /** Construct a map/reduce configuration. + * + * @param config a Configuration-format XML job description file. + */ + public JobConf(String config) { + this(new Path(config)); + } + + /** Construct a map/reduce configuration. + * + * @param config a Configuration-format XML job description file. + */ + public JobConf(Path config) { + super(); + addResource(config); + checkAndWarnDeprecation(); + } + + /** A new map/reduce configuration where the behavior of reading from the + * default resources can be turned off. + *

+ * If the parameter {@code loadDefaults} is false, the new instance + * will not load resources from the default files. + * + * @param loadDefaults specifies whether to load from the default files + */ + public JobConf(boolean loadDefaults) { + super(loadDefaults); + checkAndWarnDeprecation(); + } + + /** + * Get the user jar for the map-reduce job. + * + * @return the user jar for the map-reduce job. + */ + public String getJar() { return get("mapred.jar"); } + + /** + * Set the user jar for the map-reduce job. + * + * @param jar the user jar for the map-reduce job. + */ + public void setJar(String jar) { set("mapred.jar", jar); } + + /** + * Set the job's jar file by finding an example class location. + * + * @param cls the example class. + */ + public void setJarByClass(Class cls) { + String jar = findContainingJar(cls); + if (jar != null) { + setJar(jar); + } + } + + public String[] getLocalDirs() throws IOException { + return getStrings("mapred.local.dir"); + } + + /** + * Use MRAsyncDiskService.moveAndDeleteAllVolumes instead. + * @see org.apache.hadoop.util.MRAsyncDiskService#cleanupAllVolumes() + */ + @Deprecated + public void deleteLocalFiles() throws IOException { + String[] localDirs = getLocalDirs(); + for (int i = 0; i < localDirs.length; i++) { + FileSystem.getLocal(this).delete(new Path(localDirs[i])); + } + } + + public void deleteLocalFiles(String subdir) throws IOException { + String[] localDirs = getLocalDirs(); + for (int i = 0; i < localDirs.length; i++) { + FileSystem.getLocal(this).delete(new Path(localDirs[i], subdir)); + } + } + + /** + * Constructs a local file name. Files are distributed among configured + * local directories. + */ + public Path getLocalPath(String pathString) throws IOException { + return getLocalPath("mapred.local.dir", pathString); + } + + /** + * Get the reported username for this job. + * + * @return the username + */ + public String getUser() { + return get("user.name"); + } + + /** + * Set the reported username for this job. + * + * @param user the username for this job. + */ + public void setUser(String user) { + set("user.name", user); + } + + + + /** + * Set whether the framework should keep the intermediate files for + * failed tasks. + * + * @param keep true if framework should keep the intermediate files + * for failed tasks, false otherwise. + * + */ + public void setKeepFailedTaskFiles(boolean keep) { + setBoolean("keep.failed.task.files", keep); + } + + /** + * Should the temporary files for failed tasks be kept? + * + * @return should the files be kept? + */ + public boolean getKeepFailedTaskFiles() { + return getBoolean("keep.failed.task.files", false); + } + + /** + * Set a regular expression for task names that should be kept. + * The regular expression ".*_m_000123_0" would keep the files + * for the first instance of map 123 that ran. + * + * @param pattern the java.util.regex.Pattern to match against the + * task names. + */ + public void setKeepTaskFilesPattern(String pattern) { + set("keep.task.files.pattern", pattern); + } + + /** + * Get the regular expression that is matched against the task names + * to see if we need to keep the files. + * + * @return the pattern as a string, if it was set, othewise null. + */ + public String getKeepTaskFilesPattern() { + return get("keep.task.files.pattern"); + } + + /** + * Set the current working directory for the default file system. + * + * @param dir the new current working directory. + */ + public void setWorkingDirectory(Path dir) { + dir = new Path(getWorkingDirectory(), dir); + set("mapred.working.dir", dir.toString()); + } + + /** + * Get the current working directory for the default file system. + * + * @return the directory name. + */ + public Path getWorkingDirectory() { + String name = get("mapred.working.dir"); + if (name != null) { + return new Path(name); + } else { + try { + Path dir = FileSystem.get(this).getWorkingDirectory(); + set("mapred.working.dir", dir.toString()); + return dir; + } catch (IOException e) { + throw new RuntimeException(e); + } + } + } + + /** + * Sets the number of tasks that a spawned task JVM should run + * before it exits + * @param numTasks the number of tasks to execute; defaults to 1; + * -1 signifies no limit + */ + public void setNumTasksToExecutePerJvm(int numTasks) { + setInt("mapred.job.reuse.jvm.num.tasks", numTasks); + } + + /** + * Get the number of tasks that a spawned JVM should execute + */ + public int getNumTasksToExecutePerJvm() { + return getInt("mapred.job.reuse.jvm.num.tasks", 1); + } + + /** + * Get the {@link InputFormat} implementation for the map-reduce job, + * defaults to {@link TextInputFormat} if not specified explicity. + * + * @return the {@link InputFormat} implementation for the map-reduce job. + */ + public InputFormat getInputFormat() { + return ReflectionUtils.newInstance(getClass("mapred.input.format.class", + TextInputFormat.class, + InputFormat.class), + this); + } + + /** + * Set the {@link InputFormat} implementation for the map-reduce job. + * + * @param theClass the {@link InputFormat} implementation for the map-reduce + * job. + */ + public void setInputFormat(Class theClass) { + setClass("mapred.input.format.class", theClass, InputFormat.class); + } + + /** + * Get the {@link OutputFormat} implementation for the map-reduce job, + * defaults to {@link TextOutputFormat} if not specified explicity. + * + * @return the {@link OutputFormat} implementation for the map-reduce job. + */ + public OutputFormat getOutputFormat() { + return ReflectionUtils.newInstance(getClass("mapred.output.format.class", + TextOutputFormat.class, + OutputFormat.class), + this); + } + + /** + * Get the {@link OutputCommitter} implementation for the map-reduce job, + * defaults to {@link FileOutputCommitter} if not specified explicitly. + * + * @return the {@link OutputCommitter} implementation for the map-reduce job. + */ + public OutputCommitter getOutputCommitter() { + return (OutputCommitter)ReflectionUtils.newInstance( + getClass("mapred.output.committer.class", FileOutputCommitter.class, + OutputCommitter.class), this); + } + + /** + * Set the {@link OutputCommitter} implementation for the map-reduce job. + * + * @param theClass the {@link OutputCommitter} implementation for the map-reduce + * job. + */ + public void setOutputCommitter(Class theClass) { + setClass("mapred.output.committer.class", theClass, OutputCommitter.class); + } + + /** + * Set the {@link OutputFormat} implementation for the map-reduce job. + * + * @param theClass the {@link OutputFormat} implementation for the map-reduce + * job. + */ + public void setOutputFormat(Class theClass) { + setClass("mapred.output.format.class", theClass, OutputFormat.class); + } + + /** + * Should the map outputs be compressed before transfer? + * Uses the SequenceFile compression. + * + * @param compress should the map outputs be compressed? + */ + public void setCompressMapOutput(boolean compress) { + setBoolean("mapred.compress.map.output", compress); + } + + /** + * Are the outputs of the maps be compressed? + * + * @return true if the outputs of the maps are to be compressed, + * false otherwise. + */ + public boolean getCompressMapOutput() { + return getBoolean("mapred.compress.map.output", false); + } + + /** + * Set the given class as the {@link CompressionCodec} for the map outputs. + * + * @param codecClass the {@link CompressionCodec} class that will compress + * the map outputs. + */ + public void + setMapOutputCompressorClass(Class codecClass) { + setCompressMapOutput(true); + setClass("mapred.map.output.compression.codec", codecClass, + CompressionCodec.class); + } + + /** + * Get the {@link CompressionCodec} for compressing the map outputs. + * + * @param defaultValue the {@link CompressionCodec} to return if not set + * @return the {@link CompressionCodec} class that should be used to compress the + * map outputs. + * @throws IllegalArgumentException if the class was specified, but not found + */ + public Class + getMapOutputCompressorClass(Class defaultValue) { + Class codecClass = defaultValue; + String name = get("mapred.map.output.compression.codec"); + if (name != null) { + try { + codecClass = getClassByName(name).asSubclass(CompressionCodec.class); + } catch (ClassNotFoundException e) { + throw new IllegalArgumentException("Compression codec " + name + + " was not found.", e); + } + } + return codecClass; + } + + /** + * Get the key class for the map output data. If it is not set, use the + * (final) output key class. This allows the map output key class to be + * different than the final output key class. + * + * @return the map output key class. + */ + public Class getMapOutputKeyClass() { + Class retv = getClass("mapred.mapoutput.key.class", null, Object.class); + if (retv == null) { + retv = getOutputKeyClass(); + } + return retv; + } + + /** + * Set the key class for the map output data. This allows the user to + * specify the map output key class to be different than the final output + * value class. + * + * @param theClass the map output key class. + */ + public void setMapOutputKeyClass(Class theClass) { + setClass("mapred.mapoutput.key.class", theClass, Object.class); + } + + /** + * Get the value class for the map output data. If it is not set, use the + * (final) output value class This allows the map output value class to be + * different than the final output value class. + * + * @return the map output value class. + */ + public Class getMapOutputValueClass() { + Class retv = getClass("mapred.mapoutput.value.class", null, + Object.class); + if (retv == null) { + retv = getOutputValueClass(); + } + return retv; + } + + /** + * Set the value class for the map output data. This allows the user to + * specify the map output value class to be different than the final output + * value class. + * + * @param theClass the map output value class. + */ + public void setMapOutputValueClass(Class theClass) { + setClass("mapred.mapoutput.value.class", theClass, Object.class); + } + + /** + * Get the key class for the job output data. + * + * @return the key class for the job output data. + */ + public Class getOutputKeyClass() { + return getClass("mapred.output.key.class", + LongWritable.class, Object.class); + } + + /** + * Set the key class for the job output data. + * + * @param theClass the key class for the job output data. + */ + public void setOutputKeyClass(Class theClass) { + setClass("mapred.output.key.class", theClass, Object.class); + } + + /** + * Get the {@link RawComparator} comparator used to compare keys. + * + * @return the {@link RawComparator} comparator used to compare keys. + */ + public RawComparator getOutputKeyComparator() { + Class theClass = getClass("mapred.output.key.comparator.class", + null, RawComparator.class); + if (theClass != null) + return ReflectionUtils.newInstance(theClass, this); + return WritableComparator.get(getMapOutputKeyClass().asSubclass(WritableComparable.class)); + } + + /** + * Set the {@link RawComparator} comparator used to compare keys. + * + * @param theClass the {@link RawComparator} comparator used to + * compare keys. + * @see #setOutputValueGroupingComparator(Class) + */ + public void setOutputKeyComparatorClass(Class theClass) { + setClass("mapred.output.key.comparator.class", + theClass, RawComparator.class); + } + + /** + * Set the {@link KeyFieldBasedComparator} options used to compare keys. + * + * @param keySpec the key specification of the form -k pos1[,pos2], where, + * pos is of the form f[.c][opts], where f is the number + * of the key field to use, and c is the number of the first character from + * the beginning of the field. Fields and character posns are numbered + * starting with 1; a character position of zero in pos2 indicates the + * field's last character. If '.c' is omitted from pos1, it defaults to 1 + * (the beginning of the field); if omitted from pos2, it defaults to 0 + * (the end of the field). opts are ordering options. The supported options + * are: + * -n, (Sort numerically) + * -r, (Reverse the result of comparison) + */ + public void setKeyFieldComparatorOptions(String keySpec) { + setOutputKeyComparatorClass(KeyFieldBasedComparator.class); + set("mapred.text.key.comparator.options", keySpec); + } + + /** + * Get the {@link KeyFieldBasedComparator} options + */ + public String getKeyFieldComparatorOption() { + return get("mapred.text.key.comparator.options"); + } + + /** + * Set the {@link KeyFieldBasedPartitioner} options used for + * {@link Partitioner} + * + * @param keySpec the key specification of the form -k pos1[,pos2], where, + * pos is of the form f[.c][opts], where f is the number + * of the key field to use, and c is the number of the first character from + * the beginning of the field. Fields and character posns are numbered + * starting with 1; a character position of zero in pos2 indicates the + * field's last character. If '.c' is omitted from pos1, it defaults to 1 + * (the beginning of the field); if omitted from pos2, it defaults to 0 + * (the end of the field). + */ + public void setKeyFieldPartitionerOptions(String keySpec) { + setPartitionerClass(KeyFieldBasedPartitioner.class); + set("mapred.text.key.partitioner.options", keySpec); + } + + /** + * Get the {@link KeyFieldBasedPartitioner} options + */ + public String getKeyFieldPartitionerOption() { + return get("mapred.text.key.partitioner.options"); + } + + /** + * Get the user defined {@link WritableComparable} comparator for + * grouping keys of inputs to the reduce. + * + * @return comparator set by the user for grouping values. + * @see #setOutputValueGroupingComparator(Class) for details. + */ + public RawComparator getOutputValueGroupingComparator() { + Class theClass = getClass("mapred.output.value.groupfn.class", null, + RawComparator.class); + if (theClass == null) { + return getOutputKeyComparator(); + } + + return ReflectionUtils.newInstance(theClass, this); + } + + /** + * Set the user defined {@link RawComparator} comparator for + * grouping keys in the input to the reduce. + * + *

This comparator should be provided if the equivalence rules for keys + * for sorting the intermediates are different from those for grouping keys + * before each call to + * {@link Reducer#reduce(Object, java.util.Iterator, OutputCollector, Reporter)}.

+ * + *

For key-value pairs (K1,V1) and (K2,V2), the values (V1, V2) are passed + * in a single call to the reduce function if K1 and K2 compare as equal.

+ * + *

Since {@link #setOutputKeyComparatorClass(Class)} can be used to control + * how keys are sorted, this can be used in conjunction to simulate + * secondary sort on values.

+ * + *

Note: This is not a guarantee of the reduce sort being + * stable in any sense. (In any case, with the order of available + * map-outputs to the reduce being non-deterministic, it wouldn't make + * that much sense.)

+ * + * @param theClass the comparator class to be used for grouping keys. + * It should implement RawComparator. + * @see #setOutputKeyComparatorClass(Class) + */ + public void setOutputValueGroupingComparator( + Class theClass) { + setClass("mapred.output.value.groupfn.class", + theClass, RawComparator.class); + } + + /** + * Should the framework use the new context-object code for running + * the mapper? + * @return true, if the new api should be used + */ + public boolean getUseNewMapper() { + return getBoolean("mapred.mapper.new-api", false); + } + /** + * Set whether the framework should use the new api for the mapper. + * This is the default for jobs submitted with the new Job api. + * @param flag true, if the new api should be used + */ + public void setUseNewMapper(boolean flag) { + setBoolean("mapred.mapper.new-api", flag); + } + + /** + * Should the framework use the new context-object code for running + * the reducer? + * @return true, if the new api should be used + */ + public boolean getUseNewReducer() { + return getBoolean("mapred.reducer.new-api", false); + } + /** + * Set whether the framework should use the new api for the reducer. + * This is the default for jobs submitted with the new Job api. + * @param flag true, if the new api should be used + */ + public void setUseNewReducer(boolean flag) { + setBoolean("mapred.reducer.new-api", flag); + } + + /** + * Get the value class for job outputs. + * + * @return the value class for job outputs. + */ + public Class getOutputValueClass() { + return getClass("mapred.output.value.class", Text.class, Object.class); + } + + /** + * Set the value class for job outputs. + * + * @param theClass the value class for job outputs. + */ + public void setOutputValueClass(Class theClass) { + setClass("mapred.output.value.class", theClass, Object.class); + } + + /** + * Get the {@link Mapper} class for the job. + * + * @return the {@link Mapper} class for the job. + */ + public Class getMapperClass() { + return getClass("mapred.mapper.class", IdentityMapper.class, Mapper.class); + } + + /** + * Set the {@link Mapper} class for the job. + * + * @param theClass the {@link Mapper} class for the job. + */ + public void setMapperClass(Class theClass) { + setClass("mapred.mapper.class", theClass, Mapper.class); + } + + /** + * Get the {@link MapRunnable} class for the job. + * + * @return the {@link MapRunnable} class for the job. + */ + public Class getMapRunnerClass() { + return getClass("mapred.map.runner.class", + MapRunner.class, MapRunnable.class); + } + + /** + * Expert: Set the {@link MapRunnable} class for the job. + * + * Typically used to exert greater control on {@link Mapper}s. + * + * @param theClass the {@link MapRunnable} class for the job. + */ + public void setMapRunnerClass(Class theClass) { + setClass("mapred.map.runner.class", theClass, MapRunnable.class); + } + + /** + * Get the {@link Partitioner} used to partition {@link Mapper}-outputs + * to be sent to the {@link Reducer}s. + * + * @return the {@link Partitioner} used to partition map-outputs. + */ + public Class getPartitionerClass() { + return getClass("mapred.partitioner.class", + HashPartitioner.class, Partitioner.class); + } + + /** + * Set the {@link Partitioner} class used to partition + * {@link Mapper}-outputs to be sent to the {@link Reducer}s. + * + * @param theClass the {@link Partitioner} used to partition map-outputs. + */ + public void setPartitionerClass(Class theClass) { + setClass("mapred.partitioner.class", theClass, Partitioner.class); + } + + /** + * Get the {@link Reducer} class for the job. + * + * @return the {@link Reducer} class for the job. + */ + public Class getReducerClass() { + return getClass("mapred.reducer.class", + IdentityReducer.class, Reducer.class); + } + + /** + * Set the {@link Reducer} class for the job. + * + * @param theClass the {@link Reducer} class for the job. + */ + public void setReducerClass(Class theClass) { + setClass("mapred.reducer.class", theClass, Reducer.class); + } + + /** + * Get the user-defined combiner class used to combine map-outputs + * before being sent to the reducers. Typically the combiner is same as the + * the {@link Reducer} for the job i.e. {@link #getReducerClass()}. + * + * @return the user-defined combiner class used to combine map-outputs. + */ + public Class getCombinerClass() { + return getClass("mapred.combiner.class", null, Reducer.class); + } + + /** + * Set the user-defined combiner class used to combine map-outputs + * before being sent to the reducers. + * + *

The combiner is an application-specified aggregation operation, which + * can help cut down the amount of data transferred between the + * {@link Mapper} and the {@link Reducer}, leading to better performance.

+ * + *

The framework may invoke the combiner 0, 1, or multiple times, in both + * the mapper and reducer tasks. In general, the combiner is called as the + * sort/merge result is written to disk. The combiner must: + *

    + *
  • be side-effect free
  • + *
  • have the same input and output key types and the same input and + * output value types
  • + *

+ * + *

Typically the combiner is same as the Reducer for the + * job i.e. {@link #setReducerClass(Class)}.

+ * + * @param theClass the user-defined combiner class used to combine + * map-outputs. + */ + public void setCombinerClass(Class theClass) { + setClass("mapred.combiner.class", theClass, Reducer.class); + } + + /** + * Should speculative execution be used for this job? + * Defaults to true. + * + * @return true if speculative execution be used for this job, + * false otherwise. + */ + public boolean getSpeculativeExecution() { + return (getMapSpeculativeExecution() || getReduceSpeculativeExecution()); + } + + /** + * Turn speculative execution on or off for this job. + * + * @param speculativeExecution true if speculative execution + * should be turned on, else false. + */ + public void setSpeculativeExecution(boolean speculativeExecution) { + setMapSpeculativeExecution(speculativeExecution); + setReduceSpeculativeExecution(speculativeExecution); + } + + /** + * Should speculative execution be used for this job for map tasks? + * Defaults to true. + * + * @return true if speculative execution be + * used for this job for map tasks, + * false otherwise. + */ + public boolean getMapSpeculativeExecution() { + return getBoolean("mapred.map.tasks.speculative.execution", true); + } + + /** + * Turn speculative execution on or off for this job for map tasks. + * + * @param speculativeExecution true if speculative execution + * should be turned on for map tasks, + * else false. + */ + public void setMapSpeculativeExecution(boolean speculativeExecution) { + setBoolean("mapred.map.tasks.speculative.execution", speculativeExecution); + } + + /** + * Should speculative execution be used for this job for reduce tasks? + * Defaults to true. + * + * @return true if speculative execution be used + * for reduce tasks for this job, + * false otherwise. + */ + public boolean getReduceSpeculativeExecution() { + return getBoolean("mapred.reduce.tasks.speculative.execution", true); + } + + /** + * Turn speculative execution on or off for this job for reduce tasks. + * + * @param speculativeExecution true if speculative execution + * should be turned on for reduce tasks, + * else false. + */ + public void setReduceSpeculativeExecution(boolean speculativeExecution) { + setBoolean("mapred.reduce.tasks.speculative.execution", + speculativeExecution); + } + + /** + * Get progress gap to invoke speculative execution for maps. + * + * @param mapSpeculativeGap get value for speculative gap. + */ + public float getMapSpeculativeGap() { + return getFloat("mapred.speculative.map.gap", 0.2f); + } + + /** + * Set progress gap to invoke speculative execution for maps. + * + * @param mapSpeculativeGap New value for speculative gap. + */ + public void setMapSpeculativeGap(float mapSpeculativeGap) { + set("mapred.speculative.map.gap", "" + mapSpeculativeGap); + } + + /** + * Get time to wait before invoking speculative execution for maps. + */ + public long getMapSpeculativeLag() { + return getLong("mapred.speculative.map.lag", 60 * 1000); + } + + /** + * Set time to wait before invoking speculative execution for maps. + * + * @param mapSpeculativeLag New value for speculative lag. + */ + public void setMapSpeculativeLag(long mapSpeculativeLag) { + set("mapred.speculative.map.lag", "" + mapSpeculativeLag); + } + + + /** + * Get progress gap to invoke speculative execution for reduces. + */ + public float getReduceSpeculativeGap() { + return getFloat("mapred.speculative.reduce.gap", 0.2f); + } + + /** + * Set progress gap to invoke speculative execution for reduces. + * + * @param reduceSpeculativeGap New value for speculative gap. + */ + public void setReduceSpeculativeGap(float reduceSpeculativeGap) { + set("mapred.speculative.reduce.gap", "" + reduceSpeculativeGap); + } + + /** + * Get time to wait before invoking speculative execution for reduces. + */ + public long getReduceSpeculativeLag() { + return getLong("mapred.speculative.reduce.lag", 60 * 1000); + } + + /** + * Set time to wait before invoking speculative execution for reduces. + * + * @param reduceSpeculativeLag New value for speculative lag. + */ + public void setReduceSpeculativeLag(long reduceSpeculativeLag) { + set("mapred.speculative.reduce.lag", "" + reduceSpeculativeLag); + } + + /** + * Set minimum projected task duration in seconds + * before invoking speculative execution on mappers + * + * @param mapSpeculativeDuration New value for speculative duration + */ + public void setMapSpeculativeDuration(long mapSpeculativeDuration) { + set("mapred.speculative.map.duration", "" + mapSpeculativeDuration); + } + + /** + * Set minimum projected task duration in seconds + * before invoking speculative execution on reducers + * + * @param reduceSpeculativeDuration New value for speculative duration + */ + public void setReduceSpeculativeDuration(long reduceSpeculativeDuration) { + set("mapred.speculative.reduce.duration", "" + reduceSpeculativeDuration); + } + + + /** + * Get minimum projected task duration in seconds + * before invoking speculative execution on mappers + * + * Disabled by default + */ + public long getMapSpeculativeDuration() { + return getLong("mapred.speculative.map.duration", 0L); + } + + /** + * Get minimum projected task duration in seconds + * before invoking speculative execution on reducers + * + * Disabled by default + */ + public long getReduceSpeculativeDuration() { + return getLong("mapred.speculative.reduce.duration", 0L); + } + + /** + * Get configured the number of reduce tasks for this job. + * Defaults to 1. + * + * @return the number of reduce tasks for this job. + */ + public int getNumMapTasks() { return getInt("mapred.map.tasks", 1); } + + /** + * Set the number of map tasks for this job. + * + *

Note: This is only a hint to the framework. The actual + * number of spawned map tasks depends on the number of {@link InputSplit}s + * generated by the job's {@link InputFormat#getSplits(JobConf, int)}. + * + * A custom {@link InputFormat} is typically used to accurately control + * the number of map tasks for the job.

+ * + *

How many maps?

+ * + *

The number of maps is usually driven by the total size of the inputs + * i.e. total number of blocks of the input files.

+ * + *

The right level of parallelism for maps seems to be around 10-100 maps + * per-node, although it has been set up to 300 or so for very cpu-light map + * tasks. Task setup takes awhile, so it is best if the maps take at least a + * minute to execute.

+ * + *

The default behavior of file-based {@link InputFormat}s is to split the + * input into logical {@link InputSplit}s based on the total size, in + * bytes, of input files. However, the {@link FileSystem} blocksize of the + * input files is treated as an upper bound for input splits. A lower bound + * on the split size can be set via + * + * mapred.min.split.size.

+ * + *

Thus, if you expect 10TB of input data and have a blocksize of 128MB, + * you'll end up with 82,000 maps, unless {@link #setNumMapTasks(int)} is + * used to set it even higher.

+ * + * @param n the number of map tasks for this job. + * @see InputFormat#getSplits(JobConf, int) + * @see FileInputFormat + * @see FileSystem#getDefaultBlockSize() + * @see FileStatus#getBlockSize() + */ + public void setNumMapTasks(int n) { setInt("mapred.map.tasks", n); } + + /** + * Get configured the number of reduce tasks for this job. Defaults to + * 1. + * + * @return the number of reduce tasks for this job. + */ + public int getNumReduceTasks() { return getInt("mapred.reduce.tasks", 1); } + + /** + * Set the requisite number of reduce tasks for this job. + * + *

How many reduces?

+ * + *

The right number of reduces seems to be 0.95 or + * 1.75 multiplied by (<no. of nodes> * + * + * mapred.tasktracker.reduce.tasks.maximum). + *

+ * + *

With 0.95 all of the reduces can launch immediately and + * start transfering map outputs as the maps finish. With 1.75 + * the faster nodes will finish their first round of reduces and launch a + * second wave of reduces doing a much better job of load balancing.

+ * + *

Increasing the number of reduces increases the framework overhead, but + * increases load balancing and lowers the cost of failures.

+ * + *

The scaling factors above are slightly less than whole numbers to + * reserve a few reduce slots in the framework for speculative-tasks, failures + * etc.

+ * + *

Reducer NONE

+ * + *

It is legal to set the number of reduce-tasks to zero.

+ * + *

In this case the output of the map-tasks directly go to distributed + * file-system, to the path set by + * {@link FileOutputFormat#setOutputPath(JobConf, Path)}. Also, the + * framework doesn't sort the map-outputs before writing it out to HDFS.

+ * + * @param n the number of reduce tasks for this job. + */ + public void setNumReduceTasks(int n) { setInt("mapred.reduce.tasks", n); } + + + /** + * Specify whether job-setup and job-cleanup is needed for the job + * + * @param needed If true, job-setup and job-cleanup will be + * considered from {@link OutputCommitter} + * else ignored. + */ + public void setJobSetupCleanupNeeded(boolean needed) { + setBoolean("mapred.committer.job.setup.cleanup.needed", needed); + } + + + + /** + * Get whether job-setup and job-cleanup is needed for the job + * + * @return boolean + */ + public boolean getJobSetupCleanupNeeded() { + return getBoolean("mapred.committer.job.setup.cleanup.needed", true); + } + + /** + * Specify whether task-cleanup is needed for the job + * + * @param needed If true, task-cleanup will be considered + * from {@link OutputCommitter} else ignored. + */ + public void setTaskCleanupNeeded(boolean needed) { + setBoolean("mapred.committer.task.cleanup.needed", needed); + } + + /** + * Get whether task-cleanup is needed for the job + * The purpose of the task-cleanup task is to perform OutputCommitter.abort(). + * If there is no need to run this method, we can disable task-cleanup to + * improve latency. + * + * @return boolean + */ + public boolean getTaskCleanupNeeded() { + return getBoolean("mapred.committer.task.cleanup.needed", true); + } + + /** + * Get the configured number of maximum attempts that will be made to run a + * map task, as specified by the mapred.map.max.attempts + * property. If this property is not already set, the default is 4 attempts. + * + * @return the max number of attempts per map task. + */ + public int getMaxMapAttempts() { + return getInt("mapred.map.max.attempts", 4); + } + + /** + * Expert: Set the number of maximum attempts that will be made to run a + * map task. + * + * @param n the number of attempts per map task. + */ + public void setMaxMapAttempts(int n) { + setInt("mapred.map.max.attempts", n); + } + + /** + * Get the configured number of maximum attempts that will be made to run a + * reduce task, as specified by the mapred.reduce.max.attempts + * property. If this property is not already set, the default is 4 attempts. + * + * @return the max number of attempts per reduce task. + */ + public int getMaxReduceAttempts() { + return getInt("mapred.reduce.max.attempts", 4); + } + /** + * Expert: Set the number of maximum attempts that will be made to run a + * reduce task. + * + * @param n the number of attempts per reduce task. + */ + public void setMaxReduceAttempts(int n) { + setInt("mapred.reduce.max.attempts", n); + } + + /** + * Get the user-specified job name. This is only used to identify the + * job to the user. + * + * @return the job's name, defaulting to "". + */ + public String getJobName() { + return get("mapred.job.name", ""); + } + + /** + * Set the user-specified job name. + * + * @param name the job's new name. + */ + public void setJobName(String name) { + set("mapred.job.name", name); + } + + /** + * Get the user-specified session identifier. The default is the empty string. + * + * The session identifier is used to tag metric data that is reported to some + * performance metrics system via the org.apache.hadoop.metrics API. The + * session identifier is intended, in particular, for use by Hadoop-On-Demand + * (HOD) which allocates a virtual Hadoop cluster dynamically and transiently. + * HOD will set the session identifier by modifying the mapred-site.xml file + * before starting the cluster. + * + * When not running under HOD, this identifer is expected to remain set to + * the empty string. + * + * @return the session identifier, defaulting to "". + */ + public String getSessionId() { + return get("session.id", ""); + } + + /** + * Set the user-specified session identifier. + * + * @param sessionId the new session id. + */ + public void setSessionId(String sessionId) { + set("session.id", sessionId); + } + + /** + * Set the maximum no. of failures of a given job per tasktracker. + * If the no. of task failures exceeds noFailures, the + * tasktracker is blacklisted for this job. + * + * @param noFailures maximum no. of failures of a given job per tasktracker. + */ + public void setMaxTaskFailuresPerTracker(int noFailures) { + setInt("mapred.max.tracker.failures", noFailures); + } + + /** + * Expert: Get the maximum no. of failures of a given job per tasktracker. + * If the no. of task failures exceeds this, the tasktracker is + * blacklisted for this job. + * + * @return the maximum no. of failures of a given job per tasktracker. + */ + public int getMaxTaskFailuresPerTracker() { + return getInt("mapred.max.tracker.failures", 4); + } + + /** + * Get the maximum percentage of map tasks that can fail without + * the job being aborted. + * + * Each map task is executed a minimum of {@link #getMaxMapAttempts()} + * attempts before being declared as failed. + * + * Defaults to zero, i.e. any failed map-task results in + * the job being declared as {@link JobStatus#FAILED}. + * + * @return the maximum percentage of map tasks that can fail without + * the job being aborted. + */ + public int getMaxMapTaskFailuresPercent() { + return getInt("mapred.max.map.failures.percent", 0); + } + + /** + * Expert: Set the maximum percentage of map tasks that can fail without the + * job being aborted. + * + * Each map task is executed a minimum of {@link #getMaxMapAttempts} attempts + * before being declared as failed. + * + * @param percent the maximum percentage of map tasks that can fail without + * the job being aborted. + */ + public void setMaxMapTaskFailuresPercent(int percent) { + setInt("mapred.max.map.failures.percent", percent); + } + + /** + * Get the maximum percentage of reduce tasks that can fail without + * the job being aborted. + * + * Each reduce task is executed a minimum of {@link #getMaxReduceAttempts()} + * attempts before being declared as failed. + * + * Defaults to zero, i.e. any failed reduce-task results + * in the job being declared as {@link JobStatus#FAILED}. + * + * @return the maximum percentage of reduce tasks that can fail without + * the job being aborted. + */ + public int getMaxReduceTaskFailuresPercent() { + return getInt("mapred.max.reduce.failures.percent", 0); + } + + /** + * Set the maximum percentage of reduce tasks that can fail without the job + * being aborted. + * + * Each reduce task is executed a minimum of {@link #getMaxReduceAttempts()} + * attempts before being declared as failed. + * + * @param percent the maximum percentage of reduce tasks that can fail without + * the job being aborted. + */ + public void setMaxReduceTaskFailuresPercent(int percent) { + setInt("mapred.max.reduce.failures.percent", percent); + } + + /** + * Set {@link JobPriority} for this job. + * + * @param prio the {@link JobPriority} for this job. + */ + public void setJobPriority(JobPriority prio) { + set("mapred.job.priority", prio.toString()); + } + + /** + * Get the {@link JobPriority} for this job. + * + * @return the {@link JobPriority} for this job. + */ + public JobPriority getJobPriority() { + String prio = get("mapred.job.priority"); + if(prio == null) { + return JobPriority.NORMAL; + } + + return JobPriority.valueOf(prio); + } + + /** + * Get whether the task profiling is enabled. + * @return true if some tasks will be profiled + */ + public boolean getProfileEnabled() { + return getBoolean("mapred.task.profile", false); + } + + /** + * Set whether the system should collect profiler information for some of + * the tasks in this job? The information is stored in the user log + * directory. + * @param newValue true means it should be gathered + */ + public void setProfileEnabled(boolean newValue) { + setBoolean("mapred.task.profile", newValue); + } + + /** + * Get the profiler configuration arguments. + * + * The default value for this property is + * "-agentlib:hprof=cpu=samples,heap=sites,force=n,thread=y,verbose=n,file=%s" + * + * @return the parameters to pass to the task child to configure profiling + */ + public String getProfileParams() { + return get("mapred.task.profile.params", + "-agentlib:hprof=cpu=samples,heap=sites,force=n,thread=y," + + "verbose=n,file=%s"); + } + + /** + * Set the profiler configuration arguments. If the string contains a '%s' it + * will be replaced with the name of the profiling output file when the task + * runs. + * + * This value is passed to the task child JVM on the command line. + * + * @param value the configuration string + */ + public void setProfileParams(String value) { + set("mapred.task.profile.params", value); + } + + /** + * Get the range of maps or reduces to profile. + * @param isMap is the task a map? + * @return the task ranges + */ + public IntegerRanges getProfileTaskRange(boolean isMap) { + return getRange((isMap ? "mapred.task.profile.maps" : + "mapred.task.profile.reduces"), "0-2"); + } + + /** + * Set the ranges of maps or reduces to profile. setProfileEnabled(true) + * must also be called. + * @param newValue a set of integer ranges of the map ids + */ + public void setProfileTaskRange(boolean isMap, String newValue) { + // parse the value to make sure it is legal + new Configuration.IntegerRanges(newValue); + set((isMap ? "mapred.task.profile.maps" : "mapred.task.profile.reduces"), + newValue); + } + + /** + * Set the debug script to run when the map tasks fail. + * + *

The debug script can aid debugging of failed map tasks. The script is + * given task's stdout, stderr, syslog, jobconf files as arguments.

+ * + *

The debug command, run on the node where the map failed, is:

+ *

+ * $script $stdout $stderr $syslog $jobconf. + *

+ * + *

The script file is distributed through {@link DistributedCache} + * APIs. The script needs to be symlinked.

+ * + *

Here is an example on how to submit a script + *

+   * job.setMapDebugScript("./myscript");
+   * DistributedCache.createSymlink(job);
+   * DistributedCache.addCacheFile("/debug/scripts/myscript#myscript");
+   * 

+ * + * @param mDbgScript the script name + */ + public void setMapDebugScript(String mDbgScript) { + set("mapred.map.task.debug.script", mDbgScript); + } + + /** + * Get the map task's debug script. + * + * @return the debug Script for the mapred job for failed map tasks. + * @see #setMapDebugScript(String) + */ + public String getMapDebugScript() { + return get("mapred.map.task.debug.script"); + } + + /** + * Set the debug script to run when the reduce tasks fail. + * + *

The debug script can aid debugging of failed reduce tasks. The script + * is given task's stdout, stderr, syslog, jobconf files as arguments.

+ * + *

The debug command, run on the node where the map failed, is:

+ *

+ * $script $stdout $stderr $syslog $jobconf. + *

+ * + *

The script file is distributed through {@link DistributedCache} + * APIs. The script file needs to be symlinked

+ * + *

Here is an example on how to submit a script + *

+   * job.setReduceDebugScript("./myscript");
+   * DistributedCache.createSymlink(job);
+   * DistributedCache.addCacheFile("/debug/scripts/myscript#myscript");
+   * 

+ * + * @param rDbgScript the script name + */ + public void setReduceDebugScript(String rDbgScript) { + set("mapred.reduce.task.debug.script", rDbgScript); + } + + /** + * Get the reduce task's debug Script + * + * @return the debug script for the mapred job for failed reduce tasks. + * @see #setReduceDebugScript(String) + */ + public String getReduceDebugScript() { + return get("mapred.reduce.task.debug.script"); + } + + /** + * Get the uri to be invoked in-order to send a notification after the job + * has completed (success/failure). + * + * @return the job end notification uri, null if it hasn't + * been set. + * @see #setJobEndNotificationURI(String) + */ + public String getJobEndNotificationURI() { + return get("job.end.notification.url"); + } + + /** + * Set the uri to be invoked in-order to send a notification after the job + * has completed (success/failure). + * + *

The uri can contain 2 special parameters: $jobId and + * $jobStatus. Those, if present, are replaced by the job's + * identifier and completion-status respectively.

+ * + *

This is typically used by application-writers to implement chaining of + * Map-Reduce jobs in an asynchronous manner.

+ * + * @param uri the job end notification uri + * @see JobStatus + * @see Job Completion and Chaining + */ + public void setJobEndNotificationURI(String uri) { + set("job.end.notification.url", uri); + } + + /** + * Get job-specific shared directory for use as scratch space + * + *

+ * When a job starts, a shared directory is created at location + * + * ${mapred.local.dir}/taskTracker/jobcache/$jobid/work/ . + * This directory is exposed to the users through + * job.local.dir . + * So, the tasks can use this space + * as scratch space and share files among them.

+ * This value is available as System property also. + * + * @return The localized job specific shared directory + */ + public String getJobLocalDir() { + return get("job.local.dir"); + } + + /** + * Get memory required to run a map task of the job, in MB. + * + * If a value is specified in the configuration, it is returned. + * Else, it returns {@link #DISABLED_MEMORY_LIMIT}. + *

+ * For backward compatibility, if the job configuration sets the + * key {@link #MAPRED_TASK_MAXVMEM_PROPERTY} to a value different + * from {@link #DISABLED_MEMORY_LIMIT}, that value will be used + * after converting it from bytes to MB. + * @return memory required to run a map task of the job, in MB, + * or {@link #DISABLED_MEMORY_LIMIT} if unset. + */ + public long getMemoryForMapTask() { + long value = getDeprecatedMemoryValue(); + if (value == DISABLED_MEMORY_LIMIT) { + value = normalizeMemoryConfigValue( + getLong(JobConf.MAPRED_JOB_MAP_MEMORY_MB_PROPERTY, + DISABLED_MEMORY_LIMIT)); + } + return value; + } + + public void setMemoryForMapTask(long mem) { + setLong(JobConf.MAPRED_JOB_MAP_MEMORY_MB_PROPERTY, mem); + } + + /** + * Get memory required to run a reduce task of the job, in MB. + * + * If a value is specified in the configuration, it is returned. + * Else, it returns {@link #DISABLED_MEMORY_LIMIT}. + *

+ * For backward compatibility, if the job configuration sets the + * key {@link #MAPRED_TASK_MAXVMEM_PROPERTY} to a value different + * from {@link #DISABLED_MEMORY_LIMIT}, that value will be used + * after converting it from bytes to MB. + * @return memory required to run a reduce task of the job, in MB, + * or {@link #DISABLED_MEMORY_LIMIT} if unset. + */ + public long getMemoryForReduceTask() { + long value = getDeprecatedMemoryValue(); + if (value == DISABLED_MEMORY_LIMIT) { + value = normalizeMemoryConfigValue( + getLong(JobConf.MAPRED_JOB_REDUCE_MEMORY_MB_PROPERTY, + DISABLED_MEMORY_LIMIT)); + } + return value; + } + + // Return the value set to the key MAPRED_TASK_MAXVMEM_PROPERTY, + // converted into MBs. + // Returns DISABLED_MEMORY_LIMIT if unset, or set to a negative + // value. + private long getDeprecatedMemoryValue() { + long oldValue = getLong(MAPRED_TASK_MAXVMEM_PROPERTY, + DISABLED_MEMORY_LIMIT); + oldValue = normalizeMemoryConfigValue(oldValue); + if (oldValue != DISABLED_MEMORY_LIMIT) { + oldValue /= (1024*1024); + } + return oldValue; + } + + public void setMemoryForReduceTask(long mem) { + setLong(JobConf.MAPRED_JOB_REDUCE_MEMORY_MB_PROPERTY, mem); + } + + /** + * Return the name of the queue to which this job is submitted. + * Defaults to 'default'. + * + * @return name of the queue + */ + public String getQueueName() { + return get("mapred.job.queue.name", DEFAULT_QUEUE_NAME); + } + + /** + * Set the name of the queue to which this job should be submitted. + * + * @param queueName Name of the queue + */ + public void setQueueName(String queueName) { + set("mapred.job.queue.name", queueName); + } + + /** + * Normalize the negative values in configuration + * + * @param val + * @return normalized value + */ + public static long normalizeMemoryConfigValue(long val) { + if (val < 0) { + val = DISABLED_MEMORY_LIMIT; + } + return val; + } + + /** + * Compute the number of slots required to run a single map task-attempt + * of this job. + * @param slotSizePerMap cluster-wide value of the amount of memory required + * to run a map-task + * @return the number of slots required to run a single map task-attempt + * 1 if memory parameters are disabled. + */ + int computeNumSlotsPerMap(long slotSizePerMap) { + if ((slotSizePerMap==DISABLED_MEMORY_LIMIT) || + (getMemoryForMapTask()==DISABLED_MEMORY_LIMIT)) { + return 1; + } + return (int)(Math.ceil((float)getMemoryForMapTask() / (float)slotSizePerMap)); + } + + /** + * Compute the number of slots required to run a single reduce task-attempt + * of this job. + * @param slotSizePerReduce cluster-wide value of the amount of memory + * required to run a reduce-task + * @return the number of slots required to run a single reduce task-attempt + * 1 if memory parameters are disabled. + */ + int computeNumSlotsPerReduce(long slotSizePerReduce) { + if ((slotSizePerReduce==DISABLED_MEMORY_LIMIT) || + (getMemoryForReduceTask()==DISABLED_MEMORY_LIMIT)) { + return 1; + } + return + (int)(Math.ceil((float)getMemoryForReduceTask() / (float)slotSizePerReduce)); + } + + /** + * Find a jar that contains a class of the same name, if any. + * It will return a jar file, even if that is not the first thing + * on the class path that has a class with the same name. + * + * @param my_class the class to find. + * @return a jar file that contains the class, or null. + * @throws IOException + */ + private static String findContainingJar(Class my_class) { + ClassLoader loader = my_class.getClassLoader(); + String class_file = my_class.getName().replaceAll("\\.", "/") + ".class"; + try { + for(Enumeration itr = loader.getResources(class_file); + itr.hasMoreElements();) { + URL url = (URL) itr.nextElement(); + if ("jar".equals(url.getProtocol())) { + String toReturn = url.getPath(); + if (toReturn.startsWith("file:")) { + toReturn = toReturn.substring("file:".length()); + } + toReturn = URLDecoder.decode(toReturn, "UTF-8"); + return toReturn.replaceAll("!.*$", ""); + } + } + } catch (IOException e) { + throw new RuntimeException(e); + } + return null; + } + + + /** + * Get the memory required to run a task of this job, in bytes. See + * {@link #MAPRED_TASK_MAXVMEM_PROPERTY} + *

+ * This method is deprecated. Now, different memory limits can be + * set for map and reduce tasks of a job, in MB. + *

+ * For backward compatibility, if the job configuration sets the + * key {@link #MAPRED_TASK_MAXVMEM_PROPERTY} to a value different + * from {@link #DISABLED_MEMORY_LIMIT}, that value is returned. + * Otherwise, this method will return the larger of the values returned by + * {@link #getMemoryForMapTask()} and {@link #getMemoryForReduceTask()} + * after converting them into bytes. + * + * @return Memory required to run a task of this job, in bytes, + * or {@link #DISABLED_MEMORY_LIMIT}, if unset. + * @see #setMaxVirtualMemoryForTask(long) + * @deprecated Use {@link #getMemoryForMapTask()} and + * {@link #getMemoryForReduceTask()} + */ + @Deprecated + public long getMaxVirtualMemoryForTask() { + LOG.warn( + "getMaxVirtualMemoryForTask() is deprecated. " + + "Instead use getMemoryForMapTask() and getMemoryForReduceTask()"); + + long value = getLong(MAPRED_TASK_MAXVMEM_PROPERTY, DISABLED_MEMORY_LIMIT); + value = normalizeMemoryConfigValue(value); + if (value == DISABLED_MEMORY_LIMIT) { + value = Math.max(getMemoryForMapTask(), getMemoryForReduceTask()); + value = normalizeMemoryConfigValue(value); + if (value != DISABLED_MEMORY_LIMIT) { + value *= 1024*1024; + } + } + return value; + } + + /** + * Set the maximum amount of memory any task of this job can use. See + * {@link #MAPRED_TASK_MAXVMEM_PROPERTY} + *

+ * mapred.task.maxvmem is split into + * mapred.job.map.memory.mb + * and mapred.job.map.memory.mb,mapred + * each of the new key are set + * as mapred.task.maxvmem / 1024 + * as new values are in MB + * + * @param vmem Maximum amount of virtual memory in bytes any task of this job + * can use. + * @see #getMaxVirtualMemoryForTask() + * @deprecated + * Use {@link #setMemoryForMapTask(long mem)} and + * Use {@link #setMemoryForReduceTask(long mem)} + */ + @Deprecated + public void setMaxVirtualMemoryForTask(long vmem) { + LOG.warn("setMaxVirtualMemoryForTask() is deprecated."+ + "Instead use setMemoryForMapTask() and setMemoryForReduceTask()"); + if(vmem != DISABLED_MEMORY_LIMIT && vmem < 0) { + setMemoryForMapTask(DISABLED_MEMORY_LIMIT); + setMemoryForReduceTask(DISABLED_MEMORY_LIMIT); + } + + if(get(JobConf.MAPRED_TASK_MAXVMEM_PROPERTY) == null) { + setMemoryForMapTask(vmem / (1024 * 1024)); //Changing bytes to mb + setMemoryForReduceTask(vmem / (1024 * 1024));//Changing bytes to mb + }else{ + this.setLong(JobConf.MAPRED_TASK_MAXVMEM_PROPERTY,vmem); + } + } + + /** + * @deprecated this variable is deprecated and nolonger in use. + */ + @Deprecated + public long getMaxPhysicalMemoryForTask() { + LOG.warn("The API getMaxPhysicalMemoryForTask() is deprecated." + + " Refer to the APIs getMemoryForMapTask() and" + + " getMemoryForReduceTask() for details."); + return -1; + } + + /* + * @deprecated this + */ + @Deprecated + public void setMaxPhysicalMemoryForTask(long mem) { + LOG.warn("The API setMaxPhysicalMemoryForTask() is deprecated." + + " The value set is ignored. Refer to " + + " setMemoryForMapTask() and setMemoryForReduceTask() for details."); + } + + static String deprecatedString(String key) { + return "The variable " + key + " is no longer used."; + } + + private void checkAndWarnDeprecation() { + if(get(JobConf.MAPRED_TASK_MAXVMEM_PROPERTY) != null) { + LOG.warn(JobConf.deprecatedString(JobConf.MAPRED_TASK_MAXVMEM_PROPERTY) + + " Instead use " + JobConf.MAPRED_JOB_MAP_MEMORY_MB_PROPERTY + + " and " + JobConf.MAPRED_JOB_REDUCE_MEMORY_MB_PROPERTY); + } + } + + +} + diff --git a/src/mapred/org/apache/hadoop/mapred/JobConfigurable.java b/src/mapred/org/apache/hadoop/mapred/JobConfigurable.java new file mode 100644 index 0000000..e6c2304 --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/JobConfigurable.java @@ -0,0 +1,29 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapred; + +/** That what may be configured. */ +@Deprecated +public interface JobConfigurable { + /** Initializes a new instance from a {@link JobConf}. + * + * @param job the configuration + */ + void configure(JobConf job); +} diff --git a/src/mapred/org/apache/hadoop/mapred/JobContext.java b/src/mapred/org/apache/hadoop/mapred/JobContext.java new file mode 100644 index 0000000..987b4c0 --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/JobContext.java @@ -0,0 +1,58 @@ +/* Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapred; + +import org.apache.hadoop.util.Progressable; + +/** + * @deprecated Use {@link org.apache.hadoop.mapreduce.JobContext} instead. + */ +@Deprecated +public class JobContext extends org.apache.hadoop.mapreduce.JobContext { + private JobConf job; + private Progressable progress; + + JobContext(JobConf conf, org.apache.hadoop.mapreduce.JobID jobId, + Progressable progress) { + super(conf, jobId); + this.job = conf; + this.progress = progress; + } + + JobContext(JobConf conf, org.apache.hadoop.mapreduce.JobID jobId) { + this(conf, jobId, Reporter.NULL); + } + + /** + * Get the job Configuration + * + * @return JobConf + */ + public JobConf getJobConf() { + return job; + } + + /** + * Get the progress mechanism for reporting progress. + * + * @return progress mechanism + */ + public Progressable getProgressible() { + return progress; + } +} diff --git a/src/mapred/org/apache/hadoop/mapred/JobEndNotifier.java b/src/mapred/org/apache/hadoop/mapred/JobEndNotifier.java new file mode 100644 index 0000000..fc5a6e3 --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/JobEndNotifier.java @@ -0,0 +1,242 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapred; + +import java.io.IOException; +import java.util.concurrent.BlockingQueue; +import java.util.concurrent.DelayQueue; +import java.util.concurrent.Delayed; +import java.util.concurrent.TimeUnit; + +import org.apache.commons.httpclient.HttpClient; +import org.apache.commons.httpclient.HttpMethod; +import org.apache.commons.httpclient.URI; +import org.apache.commons.httpclient.methods.GetMethod; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; + +public class JobEndNotifier { + private static final Log LOG = + LogFactory.getLog(JobEndNotifier.class.getName()); + + private static Thread thread; + private static volatile boolean running; + private static BlockingQueue queue = + new DelayQueue(); + + public static void startNotifier() { + running = true; + thread = new Thread( + new Runnable() { + public void run() { + try { + while (running) { + sendNotification(queue.take()); + } + } + catch (InterruptedException irex) { + if (running) { + LOG.error("Thread has ended unexpectedly", irex); + } + } + } + + private void sendNotification(JobEndStatusInfo notification) { + try { + int code = httpNotification(notification.getUri()); + if (code != 200) { + throw new IOException("Invalid response status code: " + code); + } + } + catch (IOException ioex) { + LOG.error("Notification failure [" + notification + "]", ioex); + if (notification.configureForRetry()) { + try { + queue.put(notification); + } + catch (InterruptedException iex) { + LOG.error("Notification queuing error [" + notification + "]", + iex); + } + } + } + catch (Exception ex) { + LOG.error("Notification failure [" + notification + "]", ex); + } + } + + } + + ); + thread.start(); + } + + public static void stopNotifier() { + running = false; + thread.interrupt(); + } + + private static JobEndStatusInfo createNotification(JobConf conf, + JobStatus status) { + JobEndStatusInfo notification = null; + String uri = conf.getJobEndNotificationURI(); + if (uri != null) { + // +1 to make logic for first notification identical to a retry + int retryAttempts = conf.getInt("job.end.retry.attempts", 0) + 1; + long retryInterval = conf.getInt("job.end.retry.interval", 30000); + if (uri.contains("$jobId")) { + uri = uri.replace("$jobId", status.getJobID().toString()); + } + if (uri.contains("$jobStatus")) { + String statusStr = + (status.getRunState() == JobStatus.SUCCEEDED) ? "SUCCEEDED" : + (status.getRunState() == JobStatus.FAILED) ? "FAILED" : "KILLED"; + uri = uri.replace("$jobStatus", statusStr); + } + notification = new JobEndStatusInfo(uri, retryAttempts, retryInterval); + } + return notification; + } + + public static void registerNotification(JobConf jobConf, JobStatus status) { + JobEndStatusInfo notification = createNotification(jobConf, status); + if (notification != null) { + try { + queue.put(notification); + } + catch (InterruptedException iex) { + LOG.error("Notification queuing failure [" + notification + "]", iex); + } + } + } + + private static int httpNotification(String uri) throws IOException { + URI url = new URI(uri, false); + HttpClient m_client = new HttpClient(); + HttpMethod method = new GetMethod(url.getEscapedURI()); + method.setRequestHeader("Accept", "*/*"); + return m_client.executeMethod(method); + } + + // for use by the LocalJobRunner, without using a thread&queue, + // simple synchronous way + public static void localRunnerNotification(JobConf conf, JobStatus status) { + JobEndStatusInfo notification = createNotification(conf, status); + if (notification != null) { + while (notification.configureForRetry()) { + try { + int code = httpNotification(notification.getUri()); + if (code != 200) { + throw new IOException("Invalid response status code: " + code); + } + else { + break; + } + } + catch (IOException ioex) { + LOG.error("Notification error [" + notification.getUri() + "]", ioex); + } + catch (Exception ex) { + LOG.error("Notification error [" + notification.getUri() + "]", ex); + } + try { + synchronized (Thread.currentThread()) { + Thread.currentThread().sleep(notification.getRetryInterval()); + } + } + catch (InterruptedException iex) { + LOG.error("Notification retry error [" + notification + "]", iex); + } + } + } + } + + private static class JobEndStatusInfo implements Delayed { + private String uri; + private int retryAttempts; + private long retryInterval; + private long delayTime; + + JobEndStatusInfo(String uri, int retryAttempts, long retryInterval) { + this.uri = uri; + this.retryAttempts = retryAttempts; + this.retryInterval = retryInterval; + this.delayTime = System.currentTimeMillis(); + } + + public String getUri() { + return uri; + } + + public int getRetryAttempts() { + return retryAttempts; + } + + public long getRetryInterval() { + return retryInterval; + } + + public long getDelayTime() { + return delayTime; + } + + public boolean configureForRetry() { + boolean retry = false; + if (getRetryAttempts() > 0) { + retry = true; + delayTime = System.currentTimeMillis() + retryInterval; + } + retryAttempts--; + return retry; + } + + public long getDelay(TimeUnit unit) { + long n = this.delayTime - System.currentTimeMillis(); + return unit.convert(n, TimeUnit.MILLISECONDS); + } + + public int compareTo(Delayed d) { + return (int)(delayTime - ((JobEndStatusInfo)d).delayTime); + } + + @Override + public boolean equals(Object o) { + if (!(o instanceof JobEndStatusInfo)) { + return false; + } + if (delayTime == ((JobEndStatusInfo)o).delayTime) { + return true; + } + return false; + } + + @Override + public int hashCode() { + return 37 * 17 + (int) (delayTime^(delayTime>>>32)); + } + + @Override + public String toString() { + return "URL: " + uri + " remaining retries: " + retryAttempts + + " interval: " + retryInterval; + } + + } + +} diff --git a/src/mapred/org/apache/hadoop/mapred/JobHistory.java b/src/mapred/org/apache/hadoop/mapred/JobHistory.java new file mode 100644 index 0000000..946b09c --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/JobHistory.java @@ -0,0 +1,2187 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapred; + +import java.io.BufferedReader; +import java.io.File; +import java.io.FileFilter; +import java.io.FileOutputStream; +import java.io.IOException; +import java.io.InputStreamReader; +import java.io.PrintWriter; +import java.io.UnsupportedEncodingException; +import java.net.URLDecoder; +import java.net.URLEncoder; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.Iterator; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; +import java.util.TreeMap; +import java.util.Map.Entry; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.LinkedBlockingQueue; +import java.util.concurrent.ThreadPoolExecutor; +import java.util.concurrent.TimeUnit; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FSDataInputStream; +import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.PathFilter; +import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.util.StringUtils; + +/** + * Provides methods for writing to and reading from job history. + * Job History works in an append mode, JobHistory and its inner classes provide methods + * to log job events. + * + * JobHistory is split into multiple files, format of each file is plain text where each line + * is of the format [type (key=value)*], where type identifies the type of the record. + * Type maps to UID of one of the inner classes of this class. + * + * Job history is maintained in a master index which contains star/stop times of all jobs with + * a few other job level properties. Apart from this each job's history is maintained in a seperate history + * file. name of job history files follows the format jobtrackerId_jobid + * + * For parsing the job history it supports a listener based interface where each line is parsed + * and passed to listener. The listener can create an object model of history or look for specific + * events and discard rest of the history. + * + * CHANGE LOG : + * Version 0 : The history has the following format : + * TAG KEY1="VALUE1" KEY2="VALUE2" and so on. + TAG can be Job, Task, MapAttempt or ReduceAttempt. + Note that a '"' is the line delimiter. + * Version 1 : Changes the line delimiter to '.' + Values are now escaped for unambiguous parsing. + Added the Meta tag to store version info. + */ +public class JobHistory { + + static final long VERSION = 1L; + public static final Log LOG = LogFactory.getLog(JobHistory.class); + private static final String DELIMITER = " "; + static final char LINE_DELIMITER_CHAR = '.'; + static final char[] charsToEscape = new char[] {'"', '=', + LINE_DELIMITER_CHAR}; + static final String DIGITS = "[0-9]+"; + + static final String KEY = "(\\w+)"; + // value is any character other than quote, but escaped quotes can be there + static final String VALUE = "[^\"\\\\]*+(?:\\\\.[^\"\\\\]*+)*+"; + + static final Pattern pattern = Pattern.compile(KEY + "=" + "\"" + VALUE + "\""); + + public static final int JOB_NAME_TRIM_LENGTH = 50; + private static String JOBTRACKER_UNIQUE_STRING = null; + private static String LOG_DIR = null; + private static boolean disableHistory = true; + private static final String SECONDARY_FILE_SUFFIX = ".recover"; + private static long jobHistoryBlockSize = 0; + private static String jobtrackerHostname; + private static JobHistoryFilesManager fileManager = null; + final static FsPermission HISTORY_DIR_PERMISSION = + FsPermission.createImmutable((short) 0755); // rwxr-x--- + final static FsPermission HISTORY_FILE_PERMISSION = + FsPermission.createImmutable((short) 0744); // rwxr----- + private static FileSystem LOGDIR_FS; // log dir filesystem + private static FileSystem DONEDIR_FS; // Done dir filesystem + private static JobConf jtConf; + private static Path DONE = null; // folder for completed jobs + /** + * A filter for conf files + */ + private static final PathFilter CONF_FILTER = new PathFilter() { + public boolean accept(Path path) { + return path.getName().endsWith("_conf.xml"); + } + }; + + private static class LogTask implements Runnable { + String data; + PrintWriter out; + + LogTask(PrintWriter out, String data) { + this.out = out; + this.data = data; + } + + @Override + public void run() { + out.println(data); + } + } + + private static class CloseWriters implements Runnable { + + List writer; + + CloseWriters(List writer) { + this.writer = writer; + } + + @Override + public void run() { + for (PrintWriter out : writer) { + out.close(); + } + synchronized (writer) { + writer.clear(); + writer.notify(); + } + } + } + + + private static Map jobHistoryFileMap = + Collections.synchronizedMap( + new LinkedHashMap()); + + private static class MovedFileInfo { + private final String historyFile; + private final long timestamp; + public MovedFileInfo(String historyFile, long timestamp) { + this.historyFile = historyFile; + this.timestamp = timestamp; + } + } + + /** + * Given the job id, return the history file path from the cache + */ + public static String getHistoryFilePath(JobID jobId) { + MovedFileInfo info = jobHistoryFileMap.get(jobId); + if (info == null) { + return null; + } + return info.historyFile; + } + + /** + * A class that manages all the files related to a job. For now + * - writers : list of open files + * - job history filename + * - job conf filename + */ + private static class JobHistoryFilesManager { + // a private (virtual) folder for all the files related to a running job + private static class FilesHolder { + ArrayList writers = new ArrayList(); + Path historyFilename; // path of job history file + Path confFilename; // path of job's conf + } + + private ThreadPoolExecutor ioExecutor = null; + private ThreadPoolExecutor executor = null; + private final Configuration conf; + private final JobTracker jobTracker; + private int maxThreads; + + // cache from job-key to files associated with it. + private Map fileCache = + new ConcurrentHashMap(); + + JobHistoryFilesManager(Configuration conf, JobTracker jobTracker) + throws IOException { + this.conf = conf; + this.jobTracker = jobTracker; + this.maxThreads = conf.getInt("mapred.jobtracker.historythreads.maximum", 3); + } + + void startIOExecutor() { + ioExecutor = new ThreadPoolExecutor(1, 1, 1, TimeUnit.HOURS, + new LinkedBlockingQueue()); + } + + void start() { + executor = new ThreadPoolExecutor(1, maxThreads, 1, + TimeUnit.HOURS, new LinkedBlockingQueue()); + } + + private FilesHolder getFileHolder(JobID id) { + FilesHolder holder = fileCache.get(id); + if (holder == null) { + holder = new FilesHolder(); + fileCache.put(id, holder); + } + return holder; + } + + void addWriteTask(LogTask task) { + ioExecutor.execute(task); + } + + void addCloseTask(CloseWriters close) { + ioExecutor.execute(close); + } + + void addWriter(JobID id, PrintWriter writer) { + FilesHolder holder = getFileHolder(id); + holder.writers.add(writer); + } + + void setHistoryFile(JobID id, Path file) { + FilesHolder holder = getFileHolder(id); + holder.historyFilename = file; + } + + void setConfFile(JobID id, Path file) { + FilesHolder holder = getFileHolder(id); + holder.confFilename = file; + } + + ArrayList getWriters(JobID id) { + FilesHolder holder = fileCache.get(id); + return holder == null ? null : holder.writers; + } + + Path getHistoryFile(JobID id) { + FilesHolder holder = fileCache.get(id); + return holder == null ? null : holder.historyFilename; + } + + Path getConfFileWriters(JobID id) { + FilesHolder holder = fileCache.get(id); + return holder == null ? null : holder.confFilename; + } + + void purgeJob(JobID id) { + fileCache.remove(id); + } + + void moveToDone(final JobID id) { + if (disableHistory) { + return; + } + final List paths = new ArrayList(); + final Path historyFile = fileManager.getHistoryFile(id); + if (historyFile == null) { + LOG.info("No file for job-history with " + id + " found in cache!"); + } else { + paths.add(historyFile); + } + + final Path confPath = fileManager.getConfFileWriters(id); + if (confPath == null) { + LOG.info("No file for jobconf with " + id + " found in cache!"); + } else { + paths.add(confPath); + } + + executor.execute(new Runnable() { + + public void run() { + //move the files to DONE folder + try { + List writers = fileManager.getWriters(id); + synchronized (writers) { + while (writers.size() > 0) { + writers.wait(); + } + } + + for (Path path : paths) { + //check if path exists, in case of retries it may not exist + if (LOGDIR_FS.exists(path)) { + LOG.info("Moving " + path.toString() + " to " + + DONE.toString()); + DONEDIR_FS.moveFromLocalFile(path, DONE); + DONEDIR_FS.setPermission(new Path(DONE, path.getName()), + new FsPermission(HISTORY_FILE_PERMISSION)); + } + } + } catch (Throwable e) { + LOG.error("Unable to move history file to DONE folder.", e); + } + String historyFileDonePath = null; + if (historyFile != null) { + historyFileDonePath = new Path(DONE, + historyFile.getName()).toString(); + } + + jobHistoryFileMap.put(id, new MovedFileInfo(historyFileDonePath, + System.currentTimeMillis())); + jobTracker.historyFileCopied(id, historyFileDonePath); + + //purge the job from the cache + fileManager.purgeJob(id); + } + + }); + } + } + /** + * Record types are identifiers for each line of log in history files. + * A record type appears as the first token in a single line of log. + */ + public static enum RecordTypes { + Jobtracker, Job, Task, MapAttempt, ReduceAttempt, Meta + } + + /** + * Job history files contain key="value" pairs, where keys belong to this enum. + * It acts as a global namespace for all keys. + */ + public static enum Keys { + JOBTRACKERID, + START_TIME, FINISH_TIME, JOBID, JOBNAME, USER, JOBCONF, SUBMIT_TIME, + LAUNCH_TIME, TOTAL_MAPS, TOTAL_REDUCES, FAILED_MAPS, FAILED_REDUCES, + FINISHED_MAPS, FINISHED_REDUCES, JOB_STATUS, TASKID, HOSTNAME, TASK_TYPE, + ERROR, TASK_ATTEMPT_ID, TASK_STATUS, COPY_PHASE, SORT_PHASE, REDUCE_PHASE, + SHUFFLE_FINISHED, SORT_FINISHED, COUNTERS, SPLITS, JOB_PRIORITY, HTTP_PORT, + TRACKER_NAME, STATE_STRING, VERSION, MAP_COUNTERS, REDUCE_COUNTERS + } + + /** + * This enum contains some of the values commonly used by history log events. + * since values in history can only be strings - Values.name() is used in + * most places in history file. + */ + public static enum Values { + SUCCESS, FAILED, KILLED, MAP, REDUCE, CLEANUP, RUNNING, PREP, SETUP + } + + /** + * Initialize JobHistory files. + * @param conf Jobconf of the job tracker. + * @param hostname jobtracker's hostname + * @param jobTrackerStartTime jobtracker's start time + * @return true if intialized properly + * false otherwise + */ + public static boolean init(JobTracker jobTracker, JobConf conf, + String hostname, long jobTrackerStartTime){ + try { + LOG_DIR = conf.get("hadoop.job.history.location" , + "file:///" + new File( + System.getProperty("hadoop.log.dir")).getAbsolutePath() + + File.separator + "history"); + JOBTRACKER_UNIQUE_STRING = hostname + "_" + + String.valueOf(jobTrackerStartTime) + "_"; + jobtrackerHostname = hostname; + Path logDir = new Path(LOG_DIR); + LOGDIR_FS = logDir.getFileSystem(conf); + if (!LOGDIR_FS.exists(logDir)){ + if (!LOGDIR_FS.mkdirs(logDir, new FsPermission(HISTORY_DIR_PERMISSION))) { + throw new IOException("Mkdirs failed to create " + logDir.toString()); + } + } + conf.set("hadoop.job.history.location", LOG_DIR); + disableHistory = false; + // set the job history block size (default is 3MB) + jobHistoryBlockSize = + conf.getLong("mapred.jobtracker.job.history.block.size", + 3 * 1024 * 1024); + jtConf = conf; + + // initialize the file manager + fileManager = new JobHistoryFilesManager(conf, jobTracker); + } catch(IOException e) { + LOG.error("Failed to initialize JobHistory log file", e); + disableHistory = true; + } + fileManager.startIOExecutor(); + return !(disableHistory); + } + + static boolean initDone(JobConf conf, FileSystem fs){ + try { + //if completed job history location is set, use that + String doneLocation = conf. + get("mapred.job.tracker.history.completed.location"); + if (doneLocation != null) { + DONE = fs.makeQualified(new Path(doneLocation)); + DONEDIR_FS = fs; + } else { + DONE = new Path(LOG_DIR, "done"); + DONEDIR_FS = LOGDIR_FS; + } + + //If not already present create the done folder with appropriate + //permission + if (!DONEDIR_FS.exists(DONE)) { + LOG.info("Creating DONE folder at "+ DONE); + if (! DONEDIR_FS.mkdirs(DONE, + new FsPermission(HISTORY_DIR_PERMISSION))) { + throw new IOException("Mkdirs failed to create " + DONE.toString()); + } + } + + fileManager.start(); + } catch(IOException e) { + LOG.error("Failed to initialize JobHistory log file", e); + disableHistory = true; + } + return !(disableHistory); + } + + + /** + * Manages job-history's meta information such as version etc. + * Helps in logging version information to the job-history and recover + * version information from the history. + */ + static class MetaInfoManager implements Listener { + private long version = 0L; + private KeyValuePair pairs = new KeyValuePair(); + + // Extract the version of the history that was used to write the history + public MetaInfoManager(String line) throws IOException { + if (null != line) { + // Parse the line + parseLine(line, this, false); + } + } + + // Get the line delimiter + char getLineDelim() { + if (version == 0) { + return '"'; + } else { + return LINE_DELIMITER_CHAR; + } + } + + // Checks if the values are escaped or not + boolean isValueEscaped() { + // Note that the values are not escaped in version 0 + return version != 0; + } + + public void handle(RecordTypes recType, Map values) + throws IOException { + // Check if the record is of type META + if (RecordTypes.Meta == recType) { + pairs.handle(values); + version = pairs.getLong(Keys.VERSION); // defaults to 0 + } + } + + /** + * Logs history meta-info to the history file. This needs to be called once + * per history file. + * @param jobId job id, assigned by jobtracker. + */ + static void logMetaInfo(ArrayList writers){ + if (!disableHistory){ + if (null != writers){ + JobHistory.log(writers, RecordTypes.Meta, + new Keys[] {Keys.VERSION}, + new String[] {String.valueOf(VERSION)}); + } + } + } + } + + /** Escapes the string especially for {@link JobHistory} + */ + static String escapeString(String data) { + return StringUtils.escapeString(data, StringUtils.ESCAPE_CHAR, + charsToEscape); + } + + /** + * Parses history file and invokes Listener.handle() for + * each line of history. It can be used for looking through history + * files for specific items without having to keep whole history in memory. + * @param path path to history file + * @param l Listener for history events + * @param fs FileSystem where history file is present + * @throws IOException + */ + public static void parseHistoryFromFS(String path, Listener l, FileSystem fs) + throws IOException{ + FSDataInputStream in = fs.open(new Path(path)); + BufferedReader reader = new BufferedReader(new InputStreamReader (in)); + try { + String line = null; + StringBuffer buf = new StringBuffer(); + + // Read the meta-info line. Note that this might a jobinfo line for files + // written with older format + line = reader.readLine(); + + // Check if the file is empty + if (line == null) { + return; + } + + // Get the information required for further processing + MetaInfoManager mgr = new MetaInfoManager(line); + boolean isEscaped = mgr.isValueEscaped(); + String lineDelim = String.valueOf(mgr.getLineDelim()); + String escapedLineDelim = + StringUtils.escapeString(lineDelim, StringUtils.ESCAPE_CHAR, + mgr.getLineDelim()); + + do { + buf.append(line); + if (!line.trim().endsWith(lineDelim) + || line.trim().endsWith(escapedLineDelim)) { + buf.append("\n"); + continue; + } + parseLine(buf.toString(), l, isEscaped); + buf = new StringBuffer(); + } while ((line = reader.readLine())!= null); + } finally { + try { reader.close(); } catch (IOException ex) {} + } + } + + /** + * Parse a single line of history. + * @param line + * @param l + * @throws IOException + */ + private static void parseLine(String line, Listener l, boolean isEscaped) + throws IOException{ + // extract the record type + int idx = line.indexOf(' '); + String recType = line.substring(0, idx); + String data = line.substring(idx+1, line.length()); + + Matcher matcher = pattern.matcher(data); + Map parseBuffer = new HashMap(); + + while(matcher.find()){ + String tuple = matcher.group(0); + String []parts = StringUtils.split(tuple, StringUtils.ESCAPE_CHAR, '='); + String value = parts[1].substring(1, parts[1].length() -1); + if (isEscaped) { + value = StringUtils.unEscapeString(value, StringUtils.ESCAPE_CHAR, + charsToEscape); + } + parseBuffer.put(Keys.valueOf(parts[0]), value); + } + + l.handle(RecordTypes.valueOf(recType), parseBuffer); + + parseBuffer.clear(); + } + + + /** + * Log a raw record type with keys and values. This is method is generally not used directly. + * @param recordType type of log event + * @param key key + * @param value value + */ + + static void log(PrintWriter out, RecordTypes recordType, Keys key, + String value){ + value = escapeString(value); + out.println(recordType.name() + DELIMITER + key + "=\"" + value + "\"" + + DELIMITER + LINE_DELIMITER_CHAR); + } + + /** + * Log a number of keys and values with record. the array length of keys and values + * should be same. + * @param recordType type of log event + * @param keys type of log event + * @param values type of log event + */ + + static void log(ArrayList writers, RecordTypes recordType, + Keys[] keys, String[] values) { + StringBuffer buf = new StringBuffer(recordType.name()); + buf.append(DELIMITER); + for(int i =0; i< keys.length; i++){ + buf.append(keys[i]); + buf.append("=\""); + values[i] = escapeString(values[i]); + buf.append(values[i]); + buf.append("\""); + buf.append(DELIMITER); + } + buf.append(LINE_DELIMITER_CHAR); + + for (PrintWriter out : writers) { + LogTask task = new LogTask(out, buf.toString()); + fileManager.addWriteTask(task); + } + } + + /** + * Returns history disable status. by default history is enabled so this + * method returns false. + * @return true if history logging is disabled, false otherwise. + */ + public static boolean isDisableHistory() { + return disableHistory; + } + + /** + * Enable/disable history logging. Default value is false, so history + * is enabled by default. + * @param disableHistory true if history should be disabled, false otherwise. + */ + public static void setDisableHistory(boolean disableHistory) { + JobHistory.disableHistory = disableHistory; + } + + /** + * Get the history location + */ + static Path getJobHistoryLocation() { + return new Path(LOG_DIR); + } + + /** + * Get the history location for completed jobs + */ + static Path getCompletedJobHistoryLocation() { + return DONE; + } + + /** + * Base class contais utility stuff to manage types key value pairs with enums. + */ + static class KeyValuePair{ + private Map values = new HashMap(); + + /** + * Get 'String' value for given key. Most of the places use Strings as + * values so the default get' method returns 'String'. This method never returns + * null to ease on GUIs. if no value is found it returns empty string "" + * @param k + * @return if null it returns empty string - "" + */ + public String get(Keys k){ + String s = values.get(k); + return s == null ? "" : s; + } + /** + * Convert value from history to int and return. + * if no value is found it returns 0. + * @param k key + */ + public int getInt(Keys k){ + String s = values.get(k); + if (null != s){ + return Integer.parseInt(s); + } + return 0; + } + /** + * Convert value from history to int and return. + * if no value is found it returns 0. + * @param k + */ + public long getLong(Keys k){ + String s = values.get(k); + if (null != s){ + return Long.parseLong(s); + } + return 0; + } + /** + * Set value for the key. + * @param k + * @param s + */ + public void set(Keys k, String s){ + values.put(k, s); + } + /** + * Adds all values in the Map argument to its own values. + * @param m + */ + public void set(Map m){ + values.putAll(m); + } + /** + * Reads values back from the history, input is same Map as passed to Listener by parseHistory(). + * @param values + */ + public synchronized void handle(Map values){ + set(values); + } + /** + * Returns Map containing all key-values. + */ + public Map getValues(){ + return values; + } + } + + /** + * Helper class for logging or reading back events related to job start, finish or failure. + */ + public static class JobInfo extends KeyValuePair{ + + private Map allTasks = new TreeMap(); + + /** Create new JobInfo */ + public JobInfo(String jobId){ + set(Keys.JOBID, jobId); + } + + /** + * Returns all map and reduce tasks . + */ + public Map getAllTasks() { return allTasks; } + + /** + * Get the path of the locally stored job file + * @param jobId id of the job + * @return the path of the job file on the local file system + */ + public static String getLocalJobFilePath(JobID jobId){ + return System.getProperty("hadoop.log.dir") + File.separator + + jobId + "_conf.xml"; + } + + /** + * Helper function to encode the URL of the path of the job-history + * log file. + * + * @param logFile path of the job-history file + * @return URL encoded path + * @throws IOException + */ + public static String encodeJobHistoryFilePath(String logFile) + throws IOException { + Path rawPath = new Path(logFile); + String encodedFileName = null; + try { + encodedFileName = URLEncoder.encode(rawPath.getName(), "UTF-8"); + } catch (UnsupportedEncodingException uee) { + IOException ioe = new IOException(); + ioe.initCause(uee); + ioe.setStackTrace(uee.getStackTrace()); + throw ioe; + } + + Path encodedPath = new Path(rawPath.getParent(), encodedFileName); + return encodedPath.toString(); + } + + /** + * Helper function to encode the URL of the filename of the job-history + * log file. + * + * @param logFileName file name of the job-history file + * @return URL encoded filename + * @throws IOException + */ + public static String encodeJobHistoryFileName(String logFileName) + throws IOException { + String encodedFileName = null; + try { + encodedFileName = URLEncoder.encode(logFileName, "UTF-8"); + } catch (UnsupportedEncodingException uee) { + IOException ioe = new IOException(); + ioe.initCause(uee); + ioe.setStackTrace(uee.getStackTrace()); + throw ioe; + } + return encodedFileName; + } + + /** + * Helper function to decode the URL of the filename of the job-history + * log file. + * + * @param logFileName file name of the job-history file + * @return URL decoded filename + * @throws IOException + */ + public static String decodeJobHistoryFileName(String logFileName) + throws IOException { + String decodedFileName = null; + try { + decodedFileName = URLDecoder.decode(logFileName, "UTF-8"); + } catch (UnsupportedEncodingException uee) { + IOException ioe = new IOException(); + ioe.initCause(uee); + ioe.setStackTrace(uee.getStackTrace()); + throw ioe; + } + return decodedFileName; + } + + /** + * Get the job name from the job conf + */ + static String getJobName(JobConf jobConf) { + String jobName = jobConf.getJobName(); + if (jobName == null || jobName.length() == 0) { + jobName = "NA"; + } + return jobName; + } + + /** + * Get the user name from the job conf + */ + public static String getUserName(JobConf jobConf) { + String user = jobConf.getUser(); + if (user == null || user.length() == 0) { + user = "NA"; + } + return user; + } + + /** + * Get the job history file path given the history filename + */ + public static Path getJobHistoryLogLocation(String logFileName) + { + return LOG_DIR == null ? null : new Path(LOG_DIR, logFileName); + } + + /** + * Get the user job history file path + */ + public static Path getJobHistoryLogLocationForUser(String logFileName, + JobConf jobConf) { + // find user log directory + Path userLogFile = null; + Path outputPath = FileOutputFormat.getOutputPath(jobConf); + String userLogDir = jobConf.get("hadoop.job.history.user.location", + outputPath == null + ? null + : outputPath.toString()); + if ("none".equals(userLogDir)) { + userLogDir = null; + } + if (userLogDir != null) { + userLogDir = userLogDir + Path.SEPARATOR + "_logs" + Path.SEPARATOR + + "history"; + userLogFile = new Path(userLogDir, logFileName); + } + return userLogFile; + } + + /** + * Generates the job history filename for a new job + */ + private static String getNewJobHistoryFileName(JobConf jobConf, JobID id) { + return JOBTRACKER_UNIQUE_STRING + + id.toString() + "_" + getUserName(jobConf) + "_" + + trimJobName(getJobName(jobConf)); + } + + /** + * Trims the job-name if required + */ + private static String trimJobName(String jobName) { + if (jobName.length() > JOB_NAME_TRIM_LENGTH) { + jobName = jobName.substring(0, JOB_NAME_TRIM_LENGTH); + } + return jobName; + } + + private static String escapeRegexChars( String string ) { + return "\\Q"+string.replaceAll("\\\\E", "\\\\E\\\\\\\\E\\\\Q")+"\\E"; + } + + /** + * Recover the job history filename from the history folder. + * Uses the following pattern + * $jt-hostname_[0-9]*_$job-id_$user-$job-name* + * @param jobConf the job conf + * @param id job id + */ + public static synchronized String getJobHistoryFileName(JobConf jobConf, + JobID id) + throws IOException { + return getJobHistoryFileName(jobConf, id, new Path(LOG_DIR), LOGDIR_FS); + } + + static synchronized String getDoneJobHistoryFileName(JobConf jobConf, + JobID id) throws IOException { + if (DONE == null) { + return null; + } + return getJobHistoryFileName(jobConf, id, DONE, DONEDIR_FS); + } + + /** + * @param dir The directory where to search. + */ + private static synchronized String getJobHistoryFileName(JobConf jobConf, + JobID id, Path dir, FileSystem fs) + throws IOException { + String user = getUserName(jobConf); + String jobName = trimJobName(getJobName(jobConf)); + + if (LOG_DIR == null) { + return null; + } + + // Make the pattern matching the job's history file + final Pattern historyFilePattern = + Pattern.compile(jobtrackerHostname + "_" + DIGITS + "_" + + id.toString() + "_" + user + "_" + + escapeRegexChars(jobName) + "+"); + // a path filter that matches 4 parts of the filenames namely + // - jt-hostname + // - job-id + // - username + // - jobname + PathFilter filter = new PathFilter() { + public boolean accept(Path path) { + String fileName = path.getName(); + try { + fileName = decodeJobHistoryFileName(fileName); + } catch (IOException ioe) { + LOG.info("Error while decoding history file " + fileName + "." + + " Ignoring file.", ioe); + return false; + } + return historyFilePattern.matcher(fileName).find(); + } + }; + + FileStatus[] statuses = fs.listStatus(dir, filter); + String filename = null; + if (statuses.length == 0) { + LOG.info("Nothing to recover for job " + id); + } else { + // return filename considering that fact the name can be a + // secondary filename like filename.recover + filename = getPrimaryFilename(statuses[0].getPath().getName(), jobName); + LOG.info("Recovered job history filename for job " + id + " is " + + filename); + } + return filename; + } + + // removes all extra extensions from a filename and returns the core/primary + // filename + private static String getPrimaryFilename(String filename, String jobName) + throws IOException{ + filename = decodeJobHistoryFileName(filename); + // Remove the '.recover' suffix if it exists + if (filename.endsWith(jobName + SECONDARY_FILE_SUFFIX)) { + int newLength = filename.length() - SECONDARY_FILE_SUFFIX.length(); + filename = filename.substring(0, newLength); + } + return encodeJobHistoryFileName(filename); + } + + /** Since there was a restart, there should be a master file and + * a recovery file. Once the recovery is complete, the master should be + * deleted as an indication that the recovery file should be treated as the + * master upon completion or next restart. + * @param fileName the history filename that needs checkpointing + * @param conf Job conf + * @throws IOException + */ + static synchronized void checkpointRecovery(String fileName, JobConf conf) + throws IOException { + Path logPath = JobHistory.JobInfo.getJobHistoryLogLocation(fileName); + if (logPath != null) { + LOG.info("Deleting job history file " + logPath.getName()); + LOGDIR_FS.delete(logPath, false); + } + // do the same for the user file too + logPath = JobHistory.JobInfo.getJobHistoryLogLocationForUser(fileName, + conf); + if (logPath != null) { + FileSystem fs = logPath.getFileSystem(conf); + fs.delete(logPath, false); + } + } + + static String getSecondaryJobHistoryFile(String filename) + throws IOException { + return encodeJobHistoryFileName( + decodeJobHistoryFileName(filename) + SECONDARY_FILE_SUFFIX); + } + + /** Selects one of the two files generated as a part of recovery. + * The thumb rule is that always select the oldest file. + * This call makes sure that only one file is left in the end. + * @param conf job conf + * @param logFilePath Path of the log file + * @throws IOException + */ + public synchronized static Path recoverJobHistoryFile(JobConf conf, + Path logFilePath) + throws IOException { + Path ret; + String logFileName = logFilePath.getName(); + String tmpFilename = getSecondaryJobHistoryFile(logFileName); + Path logDir = logFilePath.getParent(); + Path tmpFilePath = new Path(logDir, tmpFilename); + if (LOGDIR_FS.exists(logFilePath)) { + LOG.info(logFileName + " exists!"); + if (LOGDIR_FS.exists(tmpFilePath)) { + LOG.info("Deleting " + tmpFilename + + " and using " + logFileName + " for recovery."); + LOGDIR_FS.delete(tmpFilePath, false); + } + ret = tmpFilePath; + } else { + LOG.info(logFileName + " doesnt exist! Using " + + tmpFilename + " for recovery."); + if (LOGDIR_FS.exists(tmpFilePath)) { + LOG.info("Renaming " + tmpFilename + " to " + logFileName); + LOGDIR_FS.rename(tmpFilePath, logFilePath); + ret = tmpFilePath; + } else { + ret = logFilePath; + } + } + + // do the same for the user files too + logFilePath = getJobHistoryLogLocationForUser(logFileName, conf); + if (logFilePath != null) { + FileSystem fs = logFilePath.getFileSystem(conf); + logDir = logFilePath.getParent(); + tmpFilePath = new Path(logDir, tmpFilename); + if (fs.exists(logFilePath)) { + LOG.info(logFileName + " exists!"); + if (fs.exists(tmpFilePath)) { + LOG.info("Deleting " + tmpFilename + " and making " + logFileName + + " as the master history file for user."); + fs.delete(tmpFilePath, false); + } + } else { + LOG.info(logFileName + " doesnt exist! Using " + + tmpFilename + " as the master history file for user."); + if (fs.exists(tmpFilePath)) { + LOG.info("Renaming " + tmpFilename + " to " + logFileName + + " in user directory"); + fs.rename(tmpFilePath, logFilePath); + } + } + } + + return ret; + } + + /** Finalize the recovery and make one file in the end. + * This invloves renaming the recover file to the master file. + * Note that this api should be invoked only if recovery is involved. + * @param id Job id + * @param conf the job conf + * @throws IOException + */ + static synchronized void finalizeRecovery(JobID id, JobConf conf) + throws IOException { + Path tmpLogPath = fileManager.getHistoryFile(id); + if (tmpLogPath == null) { + LOG.debug("No file for job with " + id + " found in cache!"); + return; + } + String tmpLogFileName = tmpLogPath.getName(); + + // get the primary filename from the cached filename + String masterLogFileName = + getPrimaryFilename(tmpLogFileName, getJobName(conf)); + Path masterLogPath = new Path(tmpLogPath.getParent(), masterLogFileName); + + // rename the tmp file to the master file. Note that this should be + // done only when the file is closed and handles are released. + LOG.info("Renaming " + tmpLogFileName + " to " + masterLogFileName); + LOGDIR_FS.rename(tmpLogPath, masterLogPath); + // update the cache + fileManager.setHistoryFile(id, masterLogPath); + + // do the same for the user file too + masterLogPath = + JobHistory.JobInfo.getJobHistoryLogLocationForUser(masterLogFileName, + conf); + tmpLogPath = + JobHistory.JobInfo.getJobHistoryLogLocationForUser(tmpLogFileName, + conf); + if (masterLogPath != null) { + FileSystem fs = masterLogPath.getFileSystem(conf); + if (fs.exists(tmpLogPath)) { + LOG.info("Renaming " + tmpLogFileName + " to " + masterLogFileName + + " in user directory"); + fs.rename(tmpLogPath, masterLogPath); + } + } + } + + /** + * Deletes job data from the local disk. + * For now just deletes the localized copy of job conf + */ + static void cleanupJob(JobID id) { + String localJobFilePath = JobInfo.getLocalJobFilePath(id); + File f = new File (localJobFilePath); + LOG.info("Deleting localized job conf at " + f); + if (!f.delete()) { + LOG.debug("Failed to delete file " + f); + } + } + + /** + * Delete job conf from the history folder. + */ + static void deleteConfFiles() throws IOException { + LOG.info("Cleaning up config files from the job history folder"); + FileSystem fs = new Path(LOG_DIR).getFileSystem(jtConf); + FileStatus[] status = fs.listStatus(new Path(LOG_DIR), CONF_FILTER); + for (FileStatus s : status) { + LOG.info("Deleting conf file " + s.getPath()); + fs.delete(s.getPath(), false); + } + } + + /** + * Move the completed job into the completed folder. + * This assumes that the jobhistory file is closed and all operations on the + * jobhistory file is complete. + * This *should* be the last call to jobhistory for a given job. + */ + static void markCompleted(JobID id) throws IOException { + fileManager.moveToDone(id); + } + + /** + * Log job submitted event to history. Creates a new file in history + * for the job. if history file creation fails, it disables history + * for all other events. + * @param jobId job id assigned by job tracker. + * @param jobConf job conf of the job + * @param jobConfPath path to job conf xml file in HDFS. + * @param submitTime time when job tracker received the job + * @throws IOException + * @deprecated Use + * {@link #logSubmitted(JobID, JobConf, String, long, boolean)} instead. + */ + public static void logSubmitted(JobID jobId, JobConf jobConf, + String jobConfPath, long submitTime) + throws IOException { + logSubmitted(jobId, jobConf, jobConfPath, submitTime, true); + } + + public static void logSubmitted(JobID jobId, JobConf jobConf, + String jobConfPath, long submitTime, + boolean restarted) + throws IOException { + FileSystem fs = null; + String userLogDir = null; + String jobUniqueString = JOBTRACKER_UNIQUE_STRING + jobId; + + if (!disableHistory){ + // Get the username and job name to be used in the actual log filename; + // sanity check them too + String jobName = getJobName(jobConf); + + String user = getUserName(jobConf); + + // get the history filename + String logFileName = null; + if (restarted) { + logFileName = getJobHistoryFileName(jobConf, jobId); + if (logFileName == null) { + logFileName = + encodeJobHistoryFileName(getNewJobHistoryFileName(jobConf, jobId)); + } else { + String parts[] = logFileName.split("_"); + //TODO this is a hack :( + // jobtracker-hostname_jobtracker-identifier_ + String jtUniqueString = parts[0] + "_" + parts[1] + "_"; + jobUniqueString = jtUniqueString + jobId.toString(); + } + } else { + logFileName = + encodeJobHistoryFileName(getNewJobHistoryFileName(jobConf, jobId)); + } + + // setup the history log file for this job + Path logFile = getJobHistoryLogLocation(logFileName); + + // find user log directory + Path userLogFile = + getJobHistoryLogLocationForUser(logFileName, jobConf); + + try{ + FSDataOutputStream out = null; + PrintWriter writer = null; + + if (LOG_DIR != null) { + // create output stream for logging in hadoop.job.history.location + if (restarted) { + logFile = recoverJobHistoryFile(jobConf, logFile); + logFileName = logFile.getName(); + } + + int defaultBufferSize = + LOGDIR_FS.getConf().getInt("io.file.buffer.size", 4096); + out = LOGDIR_FS.create(logFile, + new FsPermission(HISTORY_FILE_PERMISSION), + true, + defaultBufferSize, + LOGDIR_FS.getDefaultReplication(), + jobHistoryBlockSize, null); + writer = new PrintWriter(out); + fileManager.addWriter(jobId, writer); + + // cache it ... + fileManager.setHistoryFile(jobId, logFile); + } + if (userLogFile != null) { + // Get the actual filename as recoverJobHistoryFile() might return + // a different filename + userLogDir = userLogFile.getParent().toString(); + userLogFile = new Path(userLogDir, logFileName); + + // create output stream for logging + // in hadoop.job.history.user.location + fs = userLogFile.getFileSystem(jobConf); + + out = fs.create(userLogFile, true, 4096); + writer = new PrintWriter(out); + fileManager.addWriter(jobId, writer); + } + + ArrayList writers = fileManager.getWriters(jobId); + // Log the history meta info + JobHistory.MetaInfoManager.logMetaInfo(writers); + + //add to writer as well + JobHistory.log(writers, RecordTypes.Job, + new Keys[]{Keys.JOBID, Keys.JOBNAME, Keys.USER, Keys.SUBMIT_TIME, Keys.JOBCONF }, + new String[]{jobId.toString(), jobName, user, + String.valueOf(submitTime) , jobConfPath} + ); + + }catch(IOException e){ + LOG.error("Failed creating job history log file, disabling history", e); + disableHistory = true; + } + } + // Always store job conf on local file system + String localJobFilePath = JobInfo.getLocalJobFilePath(jobId); + File localJobFile = new File(localJobFilePath); + FileOutputStream jobOut = null; + try { + jobOut = new FileOutputStream(localJobFile); + jobConf.writeXml(jobOut); + if (LOG.isDebugEnabled()) { + LOG.debug("Job conf for " + jobId + " stored at " + + localJobFile.getAbsolutePath()); + } + } catch (IOException ioe) { + LOG.error("Failed to store job conf on the local filesystem ", ioe); + } finally { + if (jobOut != null) { + try { + jobOut.close(); + } catch (IOException ie) { + LOG.info("Failed to close the job configuration file " + + StringUtils.stringifyException(ie)); + } + } + } + + /* Storing the job conf on the log dir */ + Path jobFilePath = null; + if (LOG_DIR != null) { + jobFilePath = new Path(LOG_DIR + File.separator + + jobUniqueString + "_conf.xml"); + fileManager.setConfFile(jobId, jobFilePath); + } + Path userJobFilePath = null; + if (userLogDir != null) { + userJobFilePath = new Path(userLogDir + File.separator + + jobUniqueString + "_conf.xml"); + } + FSDataOutputStream jobFileOut = null; + try { + if (LOG_DIR != null) { + int defaultBufferSize = + LOGDIR_FS.getConf().getInt("io.file.buffer.size", 4096); + if (!LOGDIR_FS.exists(jobFilePath)) { + jobFileOut = LOGDIR_FS.create(jobFilePath, + new FsPermission(HISTORY_FILE_PERMISSION), + true, + defaultBufferSize, + LOGDIR_FS.getDefaultReplication(), + LOGDIR_FS.getDefaultBlockSize(), null); + jobConf.writeXml(jobFileOut); + jobFileOut.close(); + } + } + if (userLogDir != null) { + fs = new Path(userLogDir).getFileSystem(jobConf); + jobFileOut = fs.create(userJobFilePath); + jobConf.writeXml(jobFileOut); + } + if (LOG.isDebugEnabled()) { + LOG.debug("Job conf for " + jobId + " stored at " + + jobFilePath + "and" + userJobFilePath ); + } + } catch (IOException ioe) { + LOG.error("Failed to store job conf in the log dir", ioe); + } finally { + if (jobFileOut != null) { + try { + jobFileOut.close(); + } catch (IOException ie) { + LOG.info("Failed to close the job configuration file " + + StringUtils.stringifyException(ie)); + } + } + } + } + /** + * Logs launch time of job. + * + * @param jobId job id, assigned by jobtracker. + * @param startTime start time of job. + * @param totalMaps total maps assigned by jobtracker. + * @param totalReduces total reduces. + */ + public static void logInited(JobID jobId, long startTime, + int totalMaps, int totalReduces) { + if (!disableHistory){ + ArrayList writer = fileManager.getWriters(jobId); + + if (null != writer){ + JobHistory.log(writer, RecordTypes.Job, + new Keys[] {Keys.JOBID, Keys.LAUNCH_TIME, Keys.TOTAL_MAPS, + Keys.TOTAL_REDUCES, Keys.JOB_STATUS}, + new String[] {jobId.toString(), String.valueOf(startTime), + String.valueOf(totalMaps), + String.valueOf(totalReduces), + Values.PREP.name()}); + } + } + } + + /** + * Logs the job as RUNNING. + * + * @param jobId job id, assigned by jobtracker. + * @param startTime start time of job. + * @param totalMaps total maps assigned by jobtracker. + * @param totalReduces total reduces. + * @deprecated Use {@link #logInited(JobID, long, int, int)} and + * {@link #logStarted(JobID)} + */ + @Deprecated + public static void logStarted(JobID jobId, long startTime, + int totalMaps, int totalReduces) { + logStarted(jobId); + } + + /** + * Logs job as running + * @param jobId job id, assigned by jobtracker. + */ + public static void logStarted(JobID jobId){ + if (!disableHistory){ + ArrayList writer = fileManager.getWriters(jobId); + + if (null != writer){ + JobHistory.log(writer, RecordTypes.Job, + new Keys[] {Keys.JOBID, Keys.JOB_STATUS}, + new String[] {jobId.toString(), + Values.RUNNING.name()}); + } + } + } + + /** + * Log job finished. closes the job file in history. + * @param jobId job id, assigned by jobtracker. + * @param finishTime finish time of job in ms. + * @param finishedMaps no of maps successfully finished. + * @param finishedReduces no of reduces finished sucessfully. + * @param failedMaps no of failed map tasks. + * @param failedReduces no of failed reduce tasks. + * @param counters the counters from the job + */ + public static void logFinished(JobID jobId, long finishTime, + int finishedMaps, int finishedReduces, + int failedMaps, int failedReduces, + Counters mapCounters, + Counters reduceCounters, + Counters counters){ + if (!disableHistory){ + // close job file for this job + ArrayList writer = fileManager.getWriters(jobId); + + if (null != writer){ + JobHistory.log(writer, RecordTypes.Job, + new Keys[] {Keys.JOBID, Keys.FINISH_TIME, + Keys.JOB_STATUS, Keys.FINISHED_MAPS, + Keys.FINISHED_REDUCES, + Keys.FAILED_MAPS, Keys.FAILED_REDUCES, + Keys.MAP_COUNTERS, Keys.REDUCE_COUNTERS, + Keys.COUNTERS}, + new String[] {jobId.toString(), Long.toString(finishTime), + Values.SUCCESS.name(), + String.valueOf(finishedMaps), + String.valueOf(finishedReduces), + String.valueOf(failedMaps), + String.valueOf(failedReduces), + mapCounters.makeEscapedCompactString(), + reduceCounters.makeEscapedCompactString(), + counters.makeEscapedCompactString()}); + + CloseWriters close = new CloseWriters(writer); + fileManager.addCloseTask(close); + } + Thread historyCleaner = new Thread(new HistoryCleaner()); + historyCleaner.start(); + } + } + /** + * Logs job failed event. Closes the job history log file. + * @param jobid job id + * @param timestamp time when job failure was detected in ms. + * @param finishedMaps no finished map tasks. + * @param finishedReduces no of finished reduce tasks. + */ + public static void logFailed(JobID jobid, long timestamp, int finishedMaps, + int finishedReduces, Counters counters){ + if (!disableHistory){ + ArrayList writer = fileManager.getWriters(jobid); + + if (null != writer){ + JobHistory.log(writer, RecordTypes.Job, + new Keys[] {Keys.JOBID, Keys.FINISH_TIME, + Keys.JOB_STATUS, Keys.FINISHED_MAPS, + Keys.FINISHED_REDUCES, Keys.COUNTERS}, + new String[] {jobid.toString(), + String.valueOf(timestamp), + Values.FAILED.name(), + String.valueOf(finishedMaps), + String.valueOf(finishedReduces), + counters.makeEscapedCompactString()}); + CloseWriters close = new CloseWriters(writer); + fileManager.addCloseTask(close); + } + } + } + /** + * Logs job killed event. Closes the job history log file. + * + * @param jobid + * job id + * @param timestamp + * time when job killed was issued in ms. + * @param finishedMaps + * no finished map tasks. + * @param finishedReduces + * no of finished reduce tasks. + */ + public static void logKilled(JobID jobid, long timestamp, int finishedMaps, + int finishedReduces, Counters counters) { + if (!disableHistory) { + ArrayList writer = fileManager.getWriters(jobid); + + if (null != writer) { + JobHistory.log(writer, RecordTypes.Job, + new Keys[] {Keys.JOBID, + Keys.FINISH_TIME, Keys.JOB_STATUS, Keys.FINISHED_MAPS, + Keys.FINISHED_REDUCES, Keys.COUNTERS }, + new String[] {jobid.toString(), + String.valueOf(timestamp), Values.KILLED.name(), + String.valueOf(finishedMaps), + String.valueOf(finishedReduces), + counters.makeEscapedCompactString()}); + CloseWriters close = new CloseWriters(writer); + fileManager.addCloseTask(close); + } + } + } + /** + * Log job's priority. + * @param jobid job id + * @param priority Jobs priority + */ + public static void logJobPriority(JobID jobid, JobPriority priority){ + if (!disableHistory){ + ArrayList writer = fileManager.getWriters(jobid); + + if (null != writer){ + JobHistory.log(writer, RecordTypes.Job, + new Keys[] {Keys.JOBID, Keys.JOB_PRIORITY}, + new String[] {jobid.toString(), priority.toString()}); + } + } + } + /** + * Log job's submit-time/launch-time + * @param jobid job id + * @param submitTime job's submit time + * @param launchTime job's launch time + * @param restartCount number of times the job got restarted + * @deprecated Use {@link #logJobInfo(JobID, long, long)} instead. + */ + public static void logJobInfo(JobID jobid, long submitTime, long launchTime, + int restartCount){ + logJobInfo(jobid, submitTime, launchTime); + } + + public static void logJobInfo(JobID jobid, long submitTime, long launchTime) + { + if (!disableHistory){ + ArrayList writer = fileManager.getWriters(jobid); + + if (null != writer){ + JobHistory.log(writer, RecordTypes.Job, + new Keys[] {Keys.JOBID, Keys.SUBMIT_TIME, + Keys.LAUNCH_TIME}, + new String[] {jobid.toString(), + String.valueOf(submitTime), + String.valueOf(launchTime)}); + } + } + } + } + + /** + * Helper class for logging or reading back events related to Task's start, finish or failure. + * All events logged by this class are logged in a separate file per job in + * job tracker history. These events map to TIPs in jobtracker. + */ + public static class Task extends KeyValuePair{ + private Map taskAttempts = new TreeMap(); + + /** + * Log start time of task (TIP). + * @param taskId task id + * @param taskType MAP or REDUCE + * @param startTime startTime of tip. + */ + public static void logStarted(TaskID taskId, String taskType, + long startTime, String splitLocations) { + if (!disableHistory){ + JobID id = taskId.getJobID(); + ArrayList writer = fileManager.getWriters(id); + + if (null != writer){ + JobHistory.log(writer, RecordTypes.Task, + new Keys[]{Keys.TASKID, Keys.TASK_TYPE , + Keys.START_TIME, Keys.SPLITS}, + new String[]{taskId.toString(), taskType, + String.valueOf(startTime), + splitLocations}); + } + } + } + /** + * Log finish time of task. + * @param taskId task id + * @param taskType MAP or REDUCE + * @param finishTime finish timeof task in ms + */ + public static void logFinished(TaskID taskId, String taskType, + long finishTime, Counters counters){ + if (!disableHistory){ + JobID id = taskId.getJobID(); + ArrayList writer = fileManager.getWriters(id); + + if (null != writer){ + JobHistory.log(writer, RecordTypes.Task, + new Keys[]{Keys.TASKID, Keys.TASK_TYPE, + Keys.TASK_STATUS, Keys.FINISH_TIME, + Keys.COUNTERS}, + new String[]{ taskId.toString(), taskType, Values.SUCCESS.name(), + String.valueOf(finishTime), + counters.makeEscapedCompactString()}); + } + } + } + + /** + * Update the finish time of task. + * @param taskId task id + * @param finishTime finish time of task in ms + */ + public static void logUpdates(TaskID taskId, long finishTime){ + if (!disableHistory){ + JobID id = taskId.getJobID(); + ArrayList writer = fileManager.getWriters(id); + + if (null != writer){ + JobHistory.log(writer, RecordTypes.Task, + new Keys[]{Keys.TASKID, Keys.FINISH_TIME}, + new String[]{ taskId.toString(), + String.valueOf(finishTime)}); + } + } + } + + /** + * Log job failed event. + * @param taskId task id + * @param taskType MAP or REDUCE. + * @param time timestamp when job failed detected. + * @param error error message for failure. + */ + public static void logFailed(TaskID taskId, String taskType, long time, String error){ + logFailed(taskId, taskType, time, error, null); + } + + /** + * @param failedDueToAttempt The attempt that caused the failure, if any + */ + public static void logFailed(TaskID taskId, String taskType, long time, + String error, + TaskAttemptID failedDueToAttempt){ + if (!disableHistory){ + JobID id = taskId.getJobID(); + ArrayList writer = fileManager.getWriters(id); + + if (null != writer){ + String failedAttempt = failedDueToAttempt == null + ? "" + : failedDueToAttempt.toString(); + JobHistory.log(writer, RecordTypes.Task, + new Keys[]{Keys.TASKID, Keys.TASK_TYPE, + Keys.TASK_STATUS, Keys.FINISH_TIME, + Keys.ERROR, Keys.TASK_ATTEMPT_ID}, + new String[]{ taskId.toString(), taskType, + Values.FAILED.name(), + String.valueOf(time) , error, + failedAttempt}); + } + } + } + /** + * Returns all task attempts for this task. + */ + public Map getTaskAttempts(){ + return this.taskAttempts; + } + } + + /** + * Base class for Map and Reduce TaskAttempts. + */ + public static class TaskAttempt extends Task{} + + /** + * Helper class for logging or reading back events related to start, finish or failure of + * a Map Attempt on a node. + */ + public static class MapAttempt extends TaskAttempt{ + /** + * Log start time of this map task attempt. + * @param taskAttemptId task attempt id + * @param startTime start time of task attempt as reported by task tracker. + * @param hostName host name of the task attempt. + * @deprecated Use + * {@link #logStarted(TaskAttemptID, long, String, int, String)} + */ + @Deprecated + public static void logStarted(TaskAttemptID taskAttemptId, long startTime, String hostName){ + logStarted(taskAttemptId, startTime, hostName, -1, Values.MAP.name()); + } + + /** + * Log start time of this map task attempt. + * + * @param taskAttemptId task attempt id + * @param startTime start time of task attempt as reported by task tracker. + * @param trackerName name of the tracker executing the task attempt. + * @param httpPort http port of the task tracker executing the task attempt + * @param taskType Whether the attempt is cleanup or setup or map + */ + public static void logStarted(TaskAttemptID taskAttemptId, long startTime, + String trackerName, int httpPort, + String taskType) { + if (!disableHistory){ + JobID id = taskAttemptId.getJobID(); + ArrayList writer = fileManager.getWriters(id); + + if (null != writer){ + JobHistory.log(writer, RecordTypes.MapAttempt, + new Keys[]{ Keys.TASK_TYPE, Keys.TASKID, + Keys.TASK_ATTEMPT_ID, Keys.START_TIME, + Keys.TRACKER_NAME, Keys.HTTP_PORT}, + new String[]{taskType, + taskAttemptId.getTaskID().toString(), + taskAttemptId.toString(), + String.valueOf(startTime), trackerName, + httpPort == -1 ? "" : + String.valueOf(httpPort)}); + } + } + } + + /** + * Log finish time of map task attempt. + * @param taskAttemptId task attempt id + * @param finishTime finish time + * @param hostName host name + * @deprecated Use + * {@link #logFinished(TaskAttemptID, long, String, String, String, Counters)} + */ + @Deprecated + public static void logFinished(TaskAttemptID taskAttemptId, long finishTime, + String hostName){ + logFinished(taskAttemptId, finishTime, hostName, Values.MAP.name(), "", + new Counters()); + } + + /** + * Log finish time of map task attempt. + * + * @param taskAttemptId task attempt id + * @param finishTime finish time + * @param hostName host name + * @param taskType Whether the attempt is cleanup or setup or map + * @param stateString state string of the task attempt + * @param counter counters of the task attempt + */ + public static void logFinished(TaskAttemptID taskAttemptId, + long finishTime, + String hostName, + String taskType, + String stateString, + Counters counter) { + if (!disableHistory){ + JobID id = taskAttemptId.getJobID(); + ArrayList writer = fileManager.getWriters(id); + + if (null != writer){ + JobHistory.log(writer, RecordTypes.MapAttempt, + new Keys[]{ Keys.TASK_TYPE, Keys.TASKID, + Keys.TASK_ATTEMPT_ID, Keys.TASK_STATUS, + Keys.FINISH_TIME, Keys.HOSTNAME, + Keys.STATE_STRING, Keys.COUNTERS}, + new String[]{taskType, + taskAttemptId.getTaskID().toString(), + taskAttemptId.toString(), + Values.SUCCESS.name(), + String.valueOf(finishTime), hostName, + stateString, + counter.makeEscapedCompactString()}); + } + } + } + + /** + * Log task attempt failed event. + * @param taskAttemptId task attempt id + * @param timestamp timestamp + * @param hostName hostname of this task attempt. + * @param error error message if any for this task attempt. + * @deprecated Use + * {@link #logFailed(TaskAttemptID, long, String, String, String)} + */ + @Deprecated + public static void logFailed(TaskAttemptID taskAttemptId, + long timestamp, String hostName, + String error) { + logFailed(taskAttemptId, timestamp, hostName, error, Values.MAP.name()); + } + + /** + * Log task attempt failed event. + * + * @param taskAttemptId task attempt id + * @param timestamp timestamp + * @param hostName hostname of this task attempt. + * @param error error message if any for this task attempt. + * @param taskType Whether the attempt is cleanup or setup or map + */ + public static void logFailed(TaskAttemptID taskAttemptId, + long timestamp, String hostName, + String error, String taskType) { + if (!disableHistory){ + JobID id = taskAttemptId.getJobID(); + ArrayList writer = fileManager.getWriters(id); + + if (null != writer){ + JobHistory.log(writer, RecordTypes.MapAttempt, + new Keys[]{Keys.TASK_TYPE, Keys.TASKID, + Keys.TASK_ATTEMPT_ID, Keys.TASK_STATUS, + Keys.FINISH_TIME, Keys.HOSTNAME, Keys.ERROR}, + new String[]{ taskType, + taskAttemptId.getTaskID().toString(), + taskAttemptId.toString(), + Values.FAILED.name(), + String.valueOf(timestamp), + hostName, error}); + } + } + } + + /** + * Log task attempt killed event. + * @param taskAttemptId task attempt id + * @param timestamp timestamp + * @param hostName hostname of this task attempt. + * @param error error message if any for this task attempt. + * @deprecated Use + * {@link #logKilled(TaskAttemptID, long, String, String, String)} + */ + @Deprecated + public static void logKilled(TaskAttemptID taskAttemptId, + long timestamp, String hostName, String error){ + logKilled(taskAttemptId, timestamp, hostName, error, Values.MAP.name()); + } + + /** + * Log task attempt killed event. + * + * @param taskAttemptId task attempt id + * @param timestamp timestamp + * @param hostName hostname of this task attempt. + * @param error error message if any for this task attempt. + * @param taskType Whether the attempt is cleanup or setup or map + */ + public static void logKilled(TaskAttemptID taskAttemptId, + long timestamp, String hostName, + String error, String taskType) { + if (!disableHistory){ + JobID id = taskAttemptId.getJobID(); + ArrayList writer = fileManager.getWriters(id); + + if (null != writer){ + JobHistory.log(writer, RecordTypes.MapAttempt, + new Keys[]{Keys.TASK_TYPE, Keys.TASKID, + Keys.TASK_ATTEMPT_ID, Keys.TASK_STATUS, + Keys.FINISH_TIME, Keys.HOSTNAME, + Keys.ERROR}, + new String[]{ taskType, + taskAttemptId.getTaskID().toString(), + taskAttemptId.toString(), + Values.KILLED.name(), + String.valueOf(timestamp), + hostName, error}); + } + } + } + } + /** + * Helper class for logging or reading back events related to start, finish or failure of + * a Map Attempt on a node. + */ + public static class ReduceAttempt extends TaskAttempt{ + /** + * Log start time of Reduce task attempt. + * @param taskAttemptId task attempt id + * @param startTime start time + * @param hostName host name + * @deprecated Use + * {@link #logStarted(TaskAttemptID, long, String, int, String)} + */ + @Deprecated + public static void logStarted(TaskAttemptID taskAttemptId, + long startTime, String hostName){ + logStarted(taskAttemptId, startTime, hostName, -1, Values.REDUCE.name()); + } + + /** + * Log start time of Reduce task attempt. + * + * @param taskAttemptId task attempt id + * @param startTime start time + * @param trackerName tracker name + * @param httpPort the http port of the tracker executing the task attempt + * @param taskType Whether the attempt is cleanup or setup or reduce + */ + public static void logStarted(TaskAttemptID taskAttemptId, + long startTime, String trackerName, + int httpPort, + String taskType) { + if (!disableHistory){ + JobID id = taskAttemptId.getJobID(); + ArrayList writer = fileManager.getWriters(id); + + if (null != writer){ + JobHistory.log(writer, RecordTypes.ReduceAttempt, + new Keys[]{ Keys.TASK_TYPE, Keys.TASKID, + Keys.TASK_ATTEMPT_ID, Keys.START_TIME, + Keys.TRACKER_NAME, Keys.HTTP_PORT}, + new String[]{taskType, + taskAttemptId.getTaskID().toString(), + taskAttemptId.toString(), + String.valueOf(startTime), trackerName, + httpPort == -1 ? "" : + String.valueOf(httpPort)}); + } + } + } + + /** + * Log finished event of this task. + * @param taskAttemptId task attempt id + * @param shuffleFinished shuffle finish time + * @param sortFinished sort finish time + * @param finishTime finish time of task + * @param hostName host name where task attempt executed + * @deprecated Use + * {@link #logFinished(TaskAttemptID, long, long, long, String, String, String, Counters)} + */ + @Deprecated + public static void logFinished(TaskAttemptID taskAttemptId, long shuffleFinished, + long sortFinished, long finishTime, + String hostName){ + logFinished(taskAttemptId, shuffleFinished, sortFinished, + finishTime, hostName, Values.REDUCE.name(), + "", new Counters()); + } + + /** + * Log finished event of this task. + * + * @param taskAttemptId task attempt id + * @param shuffleFinished shuffle finish time + * @param sortFinished sort finish time + * @param finishTime finish time of task + * @param hostName host name where task attempt executed + * @param taskType Whether the attempt is cleanup or setup or reduce + * @param stateString the state string of the attempt + * @param counter counters of the attempt + */ + public static void logFinished(TaskAttemptID taskAttemptId, + long shuffleFinished, + long sortFinished, long finishTime, + String hostName, String taskType, + String stateString, Counters counter) { + if (!disableHistory){ + JobID id = taskAttemptId.getJobID(); + ArrayList writer = fileManager.getWriters(id); + + if (null != writer){ + JobHistory.log(writer, RecordTypes.ReduceAttempt, + new Keys[]{ Keys.TASK_TYPE, Keys.TASKID, + Keys.TASK_ATTEMPT_ID, Keys.TASK_STATUS, + Keys.SHUFFLE_FINISHED, Keys.SORT_FINISHED, + Keys.FINISH_TIME, Keys.HOSTNAME, + Keys.STATE_STRING, Keys.COUNTERS}, + new String[]{taskType, + taskAttemptId.getTaskID().toString(), + taskAttemptId.toString(), + Values.SUCCESS.name(), + String.valueOf(shuffleFinished), + String.valueOf(sortFinished), + String.valueOf(finishTime), hostName, + stateString, + counter.makeEscapedCompactString()}); + } + } + } + + /** + * Log failed reduce task attempt. + * @param taskAttemptId task attempt id + * @param timestamp time stamp when task failed + * @param hostName host name of the task attempt. + * @param error error message of the task. + * @deprecated Use + * {@link #logFailed(TaskAttemptID, long, String, String, String)} + */ + @Deprecated + public static void logFailed(TaskAttemptID taskAttemptId, long timestamp, + String hostName, String error){ + logFailed(taskAttemptId, timestamp, hostName, error, Values.REDUCE.name()); + } + + /** + * Log failed reduce task attempt. + * + * @param taskAttemptId task attempt id + * @param timestamp time stamp when task failed + * @param hostName host name of the task attempt. + * @param error error message of the task. + * @param taskType Whether the attempt is cleanup or setup or reduce + */ + public static void logFailed(TaskAttemptID taskAttemptId, long timestamp, + String hostName, String error, + String taskType) { + if (!disableHistory){ + JobID id = taskAttemptId.getJobID(); + ArrayList writer = fileManager.getWriters(id); + + if (null != writer){ + JobHistory.log(writer, RecordTypes.ReduceAttempt, + new Keys[]{ Keys.TASK_TYPE, Keys.TASKID, + Keys.TASK_ATTEMPT_ID, Keys.TASK_STATUS, + Keys.FINISH_TIME, Keys.HOSTNAME, + Keys.ERROR }, + new String[]{ taskType, + taskAttemptId.getTaskID().toString(), + taskAttemptId.toString(), + Values.FAILED.name(), + String.valueOf(timestamp), hostName, error }); + } + } + } + + /** + * Log killed reduce task attempt. + * @param taskAttemptId task attempt id + * @param timestamp time stamp when task failed + * @param hostName host name of the task attempt. + * @param error error message of the task. + * @deprecated Use + * {@link #logKilled(TaskAttemptID, long, String, String, String)} + */ + @Deprecated + public static void logKilled(TaskAttemptID taskAttemptId, long timestamp, + String hostName, String error) { + logKilled(taskAttemptId, timestamp, hostName, error, Values.REDUCE.name()); + } + + /** + * Log killed reduce task attempt. + * + * @param taskAttemptId task attempt id + * @param timestamp time stamp when task failed + * @param hostName host name of the task attempt. + * @param error error message of the task. + * @param taskType Whether the attempt is cleanup or setup or reduce + */ + public static void logKilled(TaskAttemptID taskAttemptId, long timestamp, + String hostName, String error, + String taskType) { + if (!disableHistory){ + JobID id = taskAttemptId.getJobID(); + ArrayList writer = fileManager.getWriters(id); + + if (null != writer){ + JobHistory.log(writer, RecordTypes.ReduceAttempt, + new Keys[]{ Keys.TASK_TYPE, Keys.TASKID, + Keys.TASK_ATTEMPT_ID, Keys.TASK_STATUS, + Keys.FINISH_TIME, Keys.HOSTNAME, + Keys.ERROR }, + new String[]{ taskType, + taskAttemptId.getTaskID().toString(), + taskAttemptId.toString(), + Values.KILLED.name(), + String.valueOf(timestamp), + hostName, error }); + } + } + } + } + + /** + * Callback interface for reading back log events from JobHistory. This interface + * should be implemented and passed to JobHistory.parseHistory() + * + */ + public static interface Listener{ + /** + * Callback method for history parser. + * @param recType type of record, which is the first entry in the line. + * @param values a map of key-value pairs as thry appear in history. + * @throws IOException + */ + public void handle(RecordTypes recType, Map values) throws IOException; + } + + /** + * Delete history files older than one month. Update master index and remove all + * jobs older than one month. Also if a job tracker has no jobs in last one month + * remove reference to the job tracker. + * + */ + public static class HistoryCleaner implements Runnable{ + static final long ONE_DAY_IN_MS = 24 * 60 * 60 * 1000L; + static final long THIRTY_DAYS_IN_MS = 30 * ONE_DAY_IN_MS; + private long now; + private static boolean isRunning = false; + private static long lastRan = 0; + + /** + * Cleans up history data. + */ + public void run(){ + if (isRunning){ + return; + } + now = System.currentTimeMillis(); + // clean history only once a day at max + if (lastRan != 0 && (now - lastRan) < ONE_DAY_IN_MS) { + return; + } + lastRan = now; + isRunning = true; + try { + FileStatus[] historyFiles = DONEDIR_FS.listStatus(DONE); + // delete if older than 30 days + if (historyFiles != null) { + for (FileStatus f : historyFiles) { + if (now - f.getModificationTime() > THIRTY_DAYS_IN_MS) { + DONEDIR_FS.delete(f.getPath(), true); + LOG.info("Deleting old history file : " + f.getPath()); + } + } + } + + //walking over the map to purge entries from jobHistoryFileMap + synchronized (jobHistoryFileMap) { + Iterator> it = + jobHistoryFileMap.entrySet().iterator(); + while (it.hasNext()) { + MovedFileInfo info = it.next().getValue(); + if (now - info.timestamp > THIRTY_DAYS_IN_MS) { + it.remove(); + } else { + //since entries are in sorted timestamp order, no more entries + //are required to be checked + break; + } + } + } + } catch (IOException ie) { + LOG.info("Error cleaning up history directory" + + StringUtils.stringifyException(ie)); + } + isRunning = false; + } + + static long getLastRan() { + return lastRan; + } + } + + /** + * Return the TaskLogsUrl of a particular TaskAttempt + * + * @param attempt + * @return the taskLogsUrl. null if http-port or tracker-name or + * task-attempt-id are unavailable. + */ + public static String getTaskLogsUrl(JobHistory.TaskAttempt attempt) { + if (attempt.get(Keys.HTTP_PORT).equals("") + || attempt.get(Keys.TRACKER_NAME).equals("") + || attempt.get(Keys.TASK_ATTEMPT_ID).equals("")) { + return null; + } + + String taskTrackerName = + JobInProgress.convertTrackerNameToHostName( + attempt.get(Keys.TRACKER_NAME)); + return TaskLogServlet.getTaskLogUrl(taskTrackerName, attempt + .get(Keys.HTTP_PORT), attempt.get(Keys.TASK_ATTEMPT_ID)); + } +} diff --git a/src/mapred/org/apache/hadoop/mapred/JobID.java b/src/mapred/org/apache/hadoop/mapred/JobID.java new file mode 100644 index 0000000..e4ea6e1 --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/JobID.java @@ -0,0 +1,117 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapred; + +import java.io.DataInput; +import java.io.IOException; + +/** + * JobID represents the immutable and unique identifier for + * the job. JobID consists of two parts. First part + * represents the jobtracker identifier, so that jobID to jobtracker map + * is defined. For cluster setup this string is the jobtracker + * start time, for local setting, it is "local". + * Second part of the JobID is the job number.
+ * An example JobID is : + * job_200707121733_0003 , which represents the third job + * running at the jobtracker started at 200707121733. + *

+ * Applications should never construct or parse JobID strings, but rather + * use appropriate constructors or {@link #forName(String)} method. + * + * @see TaskID + * @see TaskAttemptID + */ +@Deprecated +public class JobID extends org.apache.hadoop.mapreduce.JobID { + /** + * Constructs a JobID object + * @param jtIdentifier jobTracker identifier + * @param id job number + */ + public JobID(String jtIdentifier, int id) { + super(jtIdentifier, id); + } + + public JobID() { } + + /** + * Downgrade a new JobID to an old one + * @param old a new or old JobID + * @return either old or a new JobID build to match old + */ + public static JobID downgrade(org.apache.hadoop.mapreduce.JobID old) { + if (old instanceof JobID) { + return (JobID) old; + } else { + return new JobID(old.getJtIdentifier(), old.getId()); + } + } + + @Deprecated + public static JobID read(DataInput in) throws IOException { + JobID jobId = new JobID(); + jobId.readFields(in); + return jobId; + } + + /** Construct a JobId object from given string + * @return constructed JobId object or null if the given String is null + * @throws IllegalArgumentException if the given string is malformed + */ + public static JobID forName(String str) throws IllegalArgumentException { + return (JobID) org.apache.hadoop.mapreduce.JobID.forName(str); + } + + /** + * Returns a regex pattern which matches task IDs. Arguments can + * be given null, in which case that part of the regex will be generic. + * For example to obtain a regex matching any job + * run on the jobtracker started at 200707121733, we would use : + *

 
+   * JobID.getTaskIDsPattern("200707121733", null);
+   * 
+ * which will return : + *
 "job_200707121733_[0-9]*" 
+ * @param jtIdentifier jobTracker identifier, or null + * @param jobId job number, or null + * @return a regex pattern matching JobIDs + */ + @Deprecated + public static String getJobIDsPattern(String jtIdentifier, Integer jobId) { + StringBuilder builder = new StringBuilder(JOB).append(SEPARATOR); + builder.append(getJobIDsPatternWOPrefix(jtIdentifier, jobId)); + return builder.toString(); + } + + @Deprecated + static StringBuilder getJobIDsPatternWOPrefix(String jtIdentifier, + Integer jobId) { + StringBuilder builder = new StringBuilder(); + if (jtIdentifier != null) { + builder.append(jtIdentifier); + } else { + builder.append("[^").append(SEPARATOR).append("]*"); + } + builder.append(SEPARATOR) + .append(jobId != null ? idFormat.format(jobId) : "[0-9]*"); + return builder; + } + +} diff --git a/src/mapred/org/apache/hadoop/mapred/JobInProgress.java b/src/mapred/org/apache/hadoop/mapred/JobInProgress.java new file mode 100644 index 0000000..8b656df --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/JobInProgress.java @@ -0,0 +1,3734 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.mapred; + +import java.io.DataInputStream; +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.Comparator; +import java.util.HashMap; +import java.util.HashSet; +import java.util.IdentityHashMap; +import java.util.Iterator; +import java.util.LinkedHashSet; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.TreeMap; +import java.util.Vector; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.LocalFileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.mapred.JobHistory.Values; +import org.apache.hadoop.mapred.CleanupQueue.PathDeletionContext; +import org.apache.hadoop.metrics.MetricsContext; +import org.apache.hadoop.metrics.MetricsRecord; +import org.apache.hadoop.metrics.MetricsUtil; +import org.apache.hadoop.net.NetUtils; +import org.apache.hadoop.net.NetworkTopology; +import org.apache.hadoop.net.Node; +import org.apache.hadoop.util.StringUtils; +import org.apache.hadoop.mapreduce.TaskType; +import org.apache.hadoop.mapreduce.server.jobtracker.TaskTracker; + +/************************************************************* + * JobInProgress maintains all the info for keeping + * a Job on the straight and narrow. It keeps its JobProfile + * and its latest JobStatus, plus a set of tables for + * doing bookkeeping of its Tasks. + * *********************************************************** + */ +public class JobInProgress { + /** + * Used when the a kill is issued to a job which is initializing. + */ + static class KillInterruptedException extends InterruptedException { + private static final long serialVersionUID = 1L; + public KillInterruptedException(String msg) { + super(msg); + } + } + + static final Log LOG = LogFactory.getLog(JobInProgress.class); + + JobProfile profile; + JobStatus status; + Path jobFile = null; + Path localJobFile = null; + + TaskInProgress maps[] = new TaskInProgress[0]; + TaskInProgress reduces[] = new TaskInProgress[0]; + TaskInProgress cleanup[] = new TaskInProgress[0]; + TaskInProgress setup[] = new TaskInProgress[0]; + int numMapTasks = 0; + int numReduceTasks = 0; + long memoryPerMap; + long memoryPerReduce; + volatile int numSlotsPerMap = 1; + volatile int numSlotsPerReduce = 1; + int maxTaskFailuresPerTracker; + + // Counters to track currently running/finished/failed Map/Reduce task-attempts + int runningMapTasks = 0; + int runningReduceTasks = 0; + int finishedMapTasks = 0; + int finishedReduceTasks = 0; + int failedMapTasks = 0; + int failedReduceTasks = 0; + + static final float DEFAULT_COMPLETED_MAPS_PERCENT_FOR_REDUCE_SLOWSTART = 0.05f; + int completedMapsForReduceSlowstart = 0; + int rushReduceReduces = 5; + int rushReduceMaps = 5; + + // runningMapTasks include speculative tasks, so we need to capture + // speculative tasks separately + int speculativeMapTasks = 0; + int speculativeReduceTasks = 0; + boolean garbageCollected = false; + private static AtomicInteger totalSpeculativeMapTasks = new AtomicInteger(0); + private static AtomicInteger totalSpeculativeReduceTasks = + new AtomicInteger(0); + + int mapFailuresPercent = 0; + int reduceFailuresPercent = 0; + int failedMapTIPs = 0; + int failedReduceTIPs = 0; + private volatile boolean launchedCleanup = false; + private volatile boolean launchedSetup = false; + private volatile boolean jobKilled = false; + private volatile boolean jobFailed = false; + boolean jobSetupCleanupNeeded; + boolean taskCleanupNeeded; + + JobPriority priority = JobPriority.NORMAL; + JobTracker jobtracker; + + // NetworkTopology Node to the set of TIPs + Map> nonRunningMapCache; + + // Map of NetworkTopology Node to set of running TIPs + Map> runningMapCache; + + // A list of non-local non-running maps + List nonLocalMaps; + + // A set of non-local running maps + Set nonLocalRunningMaps; + + // A list of non-running reduce TIPs + List nonRunningReduces; + + // A set of running reduce TIPs + Set runningReduces; + + // A list of cleanup tasks for the map task attempts, to be launched + List mapCleanupTasks = new LinkedList(); + + // A list of cleanup tasks for the reduce task attempts, to be launched + List reduceCleanupTasks = new LinkedList(); + + int maxLevel; + + /** + * A special value indicating that + * {@link #findNewMapTask(TaskTrackerStatus, int, int, int)} should + * schedule any available map tasks for this job, including speculative tasks. + */ + int anyCacheLevel; + + /** + * A special value indicating that + * {@link #findNewMapTask(TaskTrackerStatus, int, int, int)} should + * schedule any only off-switch and speculative map tasks for this job. + */ + private static final int NON_LOCAL_CACHE_LEVEL = -1; + + private int taskCompletionEventTracker = 0; + List taskCompletionEvents; + + // The maximum percentage of trackers in cluster added to the 'blacklist'. + private static final double CLUSTER_BLACKLIST_PERCENT = 0.25; + + // The maximum percentage of fetch failures allowed for a map + private static final double MAX_ALLOWED_FETCH_FAILURES_PERCENT = 0.5; + + // No. of tasktrackers in the cluster + private volatile int clusterSize = 0; + + // The no. of tasktrackers where >= conf.getMaxTaskFailuresPerTracker() + // tasks have failed + private volatile int flakyTaskTrackers = 0; + // Map of trackerHostName -> no. of task failures + private Map trackerToFailuresMap = + new TreeMap(); + + //Confine estimation algorithms to an "oracle" class that JIP queries. + ResourceEstimator resourceEstimator; + + long startTime; + long launchTime; + long finishTime; + + // Indicates how many times the job got restarted + int restartCount; + + JobConf conf; + AtomicBoolean tasksInited = new AtomicBoolean(false); + private JobInitKillStatus jobInitKillStatus = new JobInitKillStatus(); + + LocalFileSystem localFs; + JobID jobId; + volatile private boolean hasSpeculativeMaps; + volatile private boolean hasSpeculativeReduces; + long inputLength = 0; + private String user; + private String historyFile = ""; + private boolean historyFileCopied; + + // Per-job counters + public static enum Counter { + NUM_FAILED_MAPS, + NUM_FAILED_REDUCES, + TOTAL_LAUNCHED_MAPS, + TOTAL_LAUNCHED_REDUCES, + OTHER_LOCAL_MAPS, + DATA_LOCAL_MAPS, + RACK_LOCAL_MAPS, + SLOTS_MILLIS_MAPS, + SLOTS_MILLIS_REDUCES, + SLOTS_MILLIS_REDUCES_COPY, + SLOTS_MILLIS_REDUCES_SORT, + SLOTS_MILLIS_REDUCES_REDUCE, + FALLOW_SLOTS_MILLIS_MAPS, + FALLOW_SLOTS_MILLIS_REDUCES, + LOCAL_MAP_INPUT_BYTES, + RACK_MAP_INPUT_BYTES + } + Counters jobCounters = new Counters(); + + MetricsRecord jobMetrics; + + // Maximum no. of fetch-failure notifications after which + // the map task is killed + private static final int MAX_FETCH_FAILURES_NOTIFICATIONS = 3; + + // Map of mapTaskId -> no. of fetch failures + private Map mapTaskIdToFetchFailuresMap = + new TreeMap(); + + private Object schedulingInfo; + + // Don't lower speculativeCap below one TT's worth (for small clusters) + private static final int MIN_SPEC_CAP = 10; + private static final float MIN_SLOTS_CAP = 0.01f; + private static final float TOTAL_SPECULATIVECAP = 0.1f; + public static final String SPECULATIVE_SLOWTASK_THRESHOLD = + "mapreduce.job.speculative.slowtaskthreshold"; + public static final String RUSH_REDUCER_MAP_THRESHOLD = + "mapred.job.rushreduce.map.threshold"; + public static final String RUSH_REDUCER_REDUCE_THRESHOLD = + "mapred.job.rushreduce.reduce.threshold"; + public static final String SPECULATIVECAP = + "mapreduce.job.speculative.speculativecap"; + public static final String SPECULATIVE_SLOWNODE_THRESHOLD = + "mapreduce.job.speculative.slownodethreshold"; + public static final String SPECULATIVE_REFRESH_TIMEOUT = + "mapreduce.job.speculative.refresh.timeout"; + + //thresholds for speculative execution + float slowTaskThreshold; + float speculativeCap; + float slowNodeThreshold; + + //Statistics are maintained for a couple of things + //mapTaskStats is used for maintaining statistics about + //the completion time of map tasks on the trackers. On a per + //tracker basis, the mean time for task completion is maintained + private DataStatistics mapTaskStats = new DataStatistics(); + //reduceTaskStats is used for maintaining statistics about + //the completion time of reduce tasks on the trackers. On a per + //tracker basis, the mean time for task completion is maintained + private DataStatistics reduceTaskStats = new DataStatistics(); + //trackerMapStats used to maintain a mapping from the tracker to the + //the statistics about completion time of map tasks + private Map trackerMapStats = + new HashMap(); + //trackerReduceStats used to maintain a mapping from the tracker to the + //the statistics about completion time of reduce tasks + private Map trackerReduceStats = + new HashMap(); + //runningMapStats used to maintain the RUNNING map tasks' statistics + private DataStatistics runningMapTaskStats = new DataStatistics(); + //runningReduceStats used to maintain the RUNNING reduce tasks' statistics + private DataStatistics runningReduceTaskStats = new DataStatistics(); + + private static class FallowSlotInfo { + long timestamp; + int numSlots; + + public FallowSlotInfo(long timestamp, int numSlots) { + this.timestamp = timestamp; + this.numSlots = numSlots; + } + + public long getTimestamp() { + return timestamp; + } + + public void setTimestamp(long timestamp) { + this.timestamp = timestamp; + } + + public int getNumSlots() { + return numSlots; + } + + public void setNumSlots(int numSlots) { + this.numSlots = numSlots; + } + } + + private Map trackersReservedForMaps = + new HashMap(); + private Map trackersReservedForReduces = + new HashMap(); + + private long lastSpeculativeMapRefresh, lastSpeculativeReduceRefresh; + private long speculativeRefreshTimeout; + private List candidateSpeculativeMaps, candidateSpeculativeReduces; + + /** + * Create an almost empty JobInProgress, which can be used only for tests + */ + protected JobInProgress(JobID jobid, JobConf conf, JobTracker tracker) { + this.conf = conf; + this.jobId = jobid; + this.numMapTasks = conf.getNumMapTasks(); + this.numReduceTasks = conf.getNumReduceTasks(); + this.maxLevel = NetworkTopology.DEFAULT_HOST_LEVEL; + this.anyCacheLevel = this.maxLevel+1; + this.jobtracker = tracker; + this.restartCount = 0; + this.status = new JobStatus(jobid, 0.0f, 0.0f, JobStatus.PREP); + this.profile = new JobProfile(conf.getUser(), jobid, "", "", + conf.getJobName(), conf.getQueueName()); + this.memoryPerMap = conf.getMemoryForMapTask(); + this.memoryPerReduce = conf.getMemoryForReduceTask(); + this.maxTaskFailuresPerTracker = conf.getMaxTaskFailuresPerTracker(); + this.nonLocalMaps = new LinkedList(); + this.nonLocalRunningMaps = new LinkedHashSet(); + this.runningMapCache = new IdentityHashMap>(); + this.nonRunningReduces = new LinkedList(); + this.runningReduces = new LinkedHashSet(); + this.resourceEstimator = new ResourceEstimator(this); + + this.nonLocalMaps = new LinkedList(); + this.nonLocalRunningMaps = new LinkedHashSet(); + this.runningMapCache = new IdentityHashMap>(); + this.nonRunningReduces = new LinkedList(); + this.runningReduces = new LinkedHashSet(); + jobSetupCleanupNeeded = true; + this.taskCompletionEvents = new ArrayList + (numMapTasks + numReduceTasks + 10); + + this.slowTaskThreshold = Math.max(0.0f, + conf.getFloat(JobInProgress.SPECULATIVE_SLOWTASK_THRESHOLD,1.0f)); + this.speculativeCap = conf.getFloat( + JobInProgress.SPECULATIVECAP,0.1f); + this.slowNodeThreshold = conf.getFloat( + JobInProgress.SPECULATIVE_SLOWNODE_THRESHOLD,1.0f); + this.speculativeRefreshTimeout = conf.getLong( + JobInProgress.SPECULATIVE_REFRESH_TIMEOUT, 5000L); + hasSpeculativeMaps = conf.getMapSpeculativeExecution(); + hasSpeculativeReduces = conf.getReduceSpeculativeExecution(); + LOG.info(jobId + ": hasSpeculativeMaps = " + hasSpeculativeMaps + + ", hasSpeculativeReduces = " + hasSpeculativeReduces); + } + + /** + * Create a JobInProgress with the given job file, plus a handle + * to the tracker. + */ + public JobInProgress(JobID jobid, JobTracker jobtracker, + JobConf default_conf) throws IOException { + this(jobid, jobtracker, default_conf, 0); + } + + public JobInProgress(JobID jobid, JobTracker jobtracker, + JobConf default_conf, int rCount) throws IOException { + this(jobid, jobtracker, default_conf, null, rCount); + } + + JobInProgress(JobID jobid, JobTracker jobtracker, + JobConf default_conf, String user, int rCount) + throws IOException { + this.restartCount = rCount; + this.jobId = jobid; + String url = "http://" + jobtracker.getJobTrackerMachine() + ":" + + jobtracker.getInfoPort() + "/jobdetails.jsp?jobid=" + jobid; + this.jobtracker = jobtracker; + this.status = new JobStatus(jobid, 0.0f, 0.0f, JobStatus.PREP); + this.jobtracker.getInstrumentation().addPrepJob(conf, jobid); + this.startTime = JobTracker.getClock().getTime(); + status.setStartTime(startTime); + this.localFs = FileSystem.getLocal(default_conf); + + JobConf default_job_conf = new JobConf(default_conf); + this.localJobFile = default_job_conf.getLocalPath(JobTracker.SUBDIR + +"/"+jobid + ".xml"); + + if (user == null) { + this.user = conf.getUser(); + } else { + this.user = user; + } + LOG.info("User : " + this.user); + + Path jobDir = jobtracker.getSystemDirectoryForJob(jobId); + FileSystem fs = jobDir.getFileSystem(default_conf); + jobFile = new Path(jobDir, "job.xml"); + + if (!localFs.exists(localJobFile)) { + fs.copyToLocalFile(jobFile, localJobFile); + } + + conf = new JobConf(localJobFile); + this.priority = conf.getJobPriority(); + this.status.setJobPriority(this.priority); + this.profile = new JobProfile(user, jobid, + jobFile.toString(), url, conf.getJobName(), + conf.getQueueName()); + + this.numMapTasks = conf.getNumMapTasks(); + this.numReduceTasks = conf.getNumReduceTasks(); + this.memoryPerMap = conf.getMemoryForMapTask(); + this.memoryPerReduce = conf.getMemoryForReduceTask(); + this.taskCompletionEvents = new ArrayList + (numMapTasks + numReduceTasks + 10); + this.jobSetupCleanupNeeded = conf.getJobSetupCleanupNeeded(); + this.taskCleanupNeeded = conf.getTaskCleanupNeeded(); + LOG.info("Setup and cleanup tasks: jobSetupCleanupNeeded = " + + jobSetupCleanupNeeded + ", taskCleanupNeeded = " + taskCleanupNeeded); + + this.mapFailuresPercent = conf.getMaxMapTaskFailuresPercent(); + this.reduceFailuresPercent = conf.getMaxReduceTaskFailuresPercent(); + this.maxTaskFailuresPerTracker = conf.getMaxTaskFailuresPerTracker(); + + MetricsContext metricsContext = MetricsUtil.getContext("mapred"); + this.jobMetrics = MetricsUtil.createRecord(metricsContext, "job"); + this.jobMetrics.setTag("user", conf.getUser()); + this.jobMetrics.setTag("sessionId", conf.getSessionId()); + this.jobMetrics.setTag("jobName", conf.getJobName()); + this.jobMetrics.setTag("jobId", jobid.toString()); + hasSpeculativeMaps = conf.getMapSpeculativeExecution(); + hasSpeculativeReduces = conf.getReduceSpeculativeExecution(); + this.maxLevel = jobtracker.getNumTaskCacheLevels(); + this.anyCacheLevel = this.maxLevel+1; + this.nonLocalMaps = new LinkedList(); + this.nonLocalRunningMaps = new LinkedHashSet(); + this.runningMapCache = new IdentityHashMap>(); + this.nonRunningReduces = new LinkedList(); + this.runningReduces = new LinkedHashSet(); + this.resourceEstimator = new ResourceEstimator(this); + this.slowTaskThreshold = Math.max(0.0f, + conf.getFloat(SPECULATIVE_SLOWTASK_THRESHOLD,1.0f)); + this.speculativeCap = conf.getFloat(SPECULATIVECAP,0.1f); + this.slowNodeThreshold = conf.getFloat(SPECULATIVE_SLOWNODE_THRESHOLD,1.0f); + } + + public static void copyJobFileLocally(Path jobDir, JobID jobid, + JobConf default_conf) throws IOException { + + FileSystem fs = jobDir.getFileSystem(default_conf); + JobConf default_job_conf = new JobConf(default_conf); + Path localJobFile = default_job_conf.getLocalPath(JobTracker.SUBDIR + "/" + + jobid + ".xml"); + Path jobFile = new Path(jobDir, "job.xml"); + fs.copyToLocalFile(jobFile, localJobFile); + } + + /** + * Called periodically by JobTrackerMetrics to update the metrics for + * this job. + */ + public void updateMetrics() { + Counters counters = getCounters(); + for (Counters.Group group : counters) { + jobMetrics.setTag("group", group.getDisplayName()); + for (Counters.Counter counter : group) { + jobMetrics.setTag("counter", counter.getDisplayName()); + jobMetrics.setMetric("value", (float) counter.getCounter()); + jobMetrics.update(); + } + } + } + + /** + * Called when the job is complete + */ + public void cleanUpMetrics() { + // Deletes all metric data for this job (in internal table in metrics package). + // This frees up RAM and possibly saves network bandwidth, since otherwise + // the metrics package implementation might continue to send these job metrics + // after the job has finished. + jobMetrics.removeTag("group"); + jobMetrics.removeTag("counter"); + jobMetrics.remove(); + } + + private void printCache (Map> cache) { + LOG.info("The taskcache info:"); + for (Map.Entry> n : cache.entrySet()) { + List tips = n.getValue(); + LOG.info("Cached TIPs on node: " + n.getKey()); + for (TaskInProgress tip : tips) { + LOG.info("tip : " + tip.getTIPId()); + } + } + } + + Map> createCache(JobClient.RawSplit[] splits, + int maxLevel) { + Map> cache = + new IdentityHashMap>(maxLevel); + + for (int i = 0; i < splits.length; i++) { + String[] splitLocations = splits[i].getLocations(); + if (splitLocations.length == 0) { + nonLocalMaps.add(maps[i]); + continue; + } + + for(String host: splitLocations) { + Node node = jobtracker.resolveAndAddToTopology(host); + LOG.debug("tip:" + maps[i].getTIPId() + " has split on node:" + node); + for (int j = 0; j < maxLevel; j++) { + List hostMaps = cache.get(node); + if (hostMaps == null) { + hostMaps = new ArrayList(); + cache.put(node, hostMaps); + hostMaps.add(maps[i]); + } + //check whether the hostMaps already contains an entry for a TIP + //This will be true for nodes that are racks and multiple nodes in + //the rack contain the input for a tip. Note that if it already + //exists in the hostMaps, it must be the last element there since + //we process one TIP at a time sequentially in the split-size order + if (hostMaps.get(hostMaps.size() - 1) != maps[i]) { + hostMaps.add(maps[i]); + } + node = node.getParent(); + } + } + } + return cache; + } + + /** + * Check if the job has been initialized. + * @return true if the job has been initialized, + * false otherwise + */ + public boolean inited() { + return tasksInited.get(); + } + + boolean hasRestarted() { + return restartCount > 0; + } + + /** + * Get the number of slots required to run a single map task-attempt. + * @return the number of slots required to run a single map task-attempt + */ + int getNumSlotsPerMap() { + return numSlotsPerMap; + } + + /** + * Set the number of slots required to run a single map task-attempt. + * This is typically set by schedulers which support high-ram jobs. + * @param slots the number of slots required to run a single map task-attempt + */ + void setNumSlotsPerMap(int numSlotsPerMap) { + this.numSlotsPerMap = numSlotsPerMap; + } + + /** + * Get the number of slots required to run a single reduce task-attempt. + * @return the number of slots required to run a single reduce task-attempt + */ + int getNumSlotsPerReduce() { + return numSlotsPerReduce; + } + + /** + * Set the number of slots required to run a single reduce task-attempt. + * This is typically set by schedulers which support high-ram jobs. + * @param slots the number of slots required to run a single reduce + * task-attempt + */ + void setNumSlotsPerReduce(int numSlotsPerReduce) { + this.numSlotsPerReduce = numSlotsPerReduce; + } + + /** + * Construct the splits, etc. This is invoked from an async + * thread so that split-computation doesn't block anyone. + */ + public synchronized void initTasks() + throws IOException, KillInterruptedException { + if (tasksInited.get() || isComplete()) { + return; + } + synchronized(jobInitKillStatus){ + if(jobInitKillStatus.killed || jobInitKillStatus.initStarted) { + return; + } + jobInitKillStatus.initStarted = true; + } + + LOG.info("Initializing " + jobId); + + // log job info + JobHistory.JobInfo.logSubmitted(getJobID(), conf, jobFile.toString(), + this.startTime, hasRestarted()); + // log the job priority + setPriority(this.priority); + + // + // read input splits and create a map per a split + // + String jobFile = profile.getJobFile(); + + Path sysDir = new Path(this.jobtracker.getSystemDir()); + FileSystem fs = sysDir.getFileSystem(conf); + DataInputStream splitFile = + fs.open(new Path(conf.get("mapred.job.split.file"))); + JobClient.RawSplit[] splits; + try { + splits = JobClient.readSplitFile(splitFile); + } finally { + splitFile.close(); + } + numMapTasks = splits.length; + + + // if the number of splits is larger than a configured value + // then fail the job. + int maxTasks = jobtracker.getMaxTasksPerJob(); + if (maxTasks > 0 && numMapTasks + numReduceTasks > maxTasks) { + throw new IOException( + "The number of tasks for this job " + + (numMapTasks + numReduceTasks) + + " exceeds the configured limit " + maxTasks); + } + jobtracker.getInstrumentation().addWaitingMaps(getJobID(), numMapTasks); + jobtracker.getInstrumentation().addWaitingReduces(getJobID(), numReduceTasks); + + maps = new TaskInProgress[numMapTasks]; + for(int i=0; i < numMapTasks; ++i) { + inputLength += splits[i].getDataLength(); + maps[i] = new TaskInProgress(jobId, jobFile, + splits[i], + jobtracker, conf, this, i, numSlotsPerMap); + } + LOG.info("Input size for job " + jobId + " = " + inputLength + + ". Number of splits = " + splits.length); + if (numMapTasks > 0) { + nonRunningMapCache = createCache(splits, maxLevel); + } + + // set the launch time + this.launchTime = JobTracker.getClock().getTime(); + jobtracker.getInstrumentation().addLaunchedJobs( + this.launchTime - this.startTime); + + // + // Create reduce tasks + // + this.reduces = new TaskInProgress[numReduceTasks]; + for (int i = 0; i < numReduceTasks; i++) { + reduces[i] = new TaskInProgress(jobId, jobFile, + numMapTasks, i, + jobtracker, conf, this, numSlotsPerReduce); + nonRunningReduces.add(reduces[i]); + } + + // Calculate the minimum number of maps to be complete before + // we should start scheduling reduces + completedMapsForReduceSlowstart = + (int)Math.ceil( + (conf.getFloat("mapred.reduce.slowstart.completed.maps", + DEFAULT_COMPLETED_MAPS_PERCENT_FOR_REDUCE_SLOWSTART) * + numMapTasks)); + // The thresholds of total maps and reduces for scheduling reducers + // immediately. + rushReduceMaps = + conf.getInt(RUSH_REDUCER_MAP_THRESHOLD, rushReduceMaps); + rushReduceReduces = + conf.getInt(RUSH_REDUCER_REDUCE_THRESHOLD, rushReduceReduces); + + initSetupCleanupTasks(jobFile); + + synchronized(jobInitKillStatus){ + jobInitKillStatus.initDone = true; + if(jobInitKillStatus.killed) { + throw new KillInterruptedException("Job " + jobId + " killed in init"); + } + } + + tasksInited.set(true); + JobHistory.JobInfo.logInited(profile.getJobID(), this.launchTime, + numMapTasks, numReduceTasks); + + // Log the number of map and reduce tasks + LOG.info("Job " + jobId + " initialized successfully with " + numMapTasks + + " map tasks and " + numReduceTasks + " reduce tasks."); + } + + // Returns true if the job is empty (0 maps, 0 reduces and no setup-cleanup) + // else return false. + synchronized boolean isJobEmpty() { + return maps.length == 0 && reduces.length == 0 && !jobSetupCleanupNeeded; + } + + // Should be called once the init is done. This will complete the job + // because the job is empty (0 maps, 0 reduces and no setup-cleanup). + synchronized void completeEmptyJob() { + jobComplete(); + } + + synchronized void completeSetup() { + setupComplete(); + } + + private void initSetupCleanupTasks(String jobFile) { + if (!jobSetupCleanupNeeded) { + LOG.info("Setup/Cleanup not needed for job" + jobId); + // nothing to initialize + return; + } + + // create cleanup two cleanup tips, one map and one reduce. + cleanup = new TaskInProgress[2]; + + // cleanup map tip. This map doesn't use any splits. Just assign an empty + // split. + JobClient.RawSplit emptySplit = new JobClient.RawSplit(); + cleanup[0] = new TaskInProgress(jobId, jobFile, emptySplit, + jobtracker, conf, this, numMapTasks, 1); + cleanup[0].setJobCleanupTask(); + + // cleanup reduce tip. + cleanup[1] = new TaskInProgress(jobId, jobFile, numMapTasks, + numReduceTasks, jobtracker, conf, this, 1); + cleanup[1].setJobCleanupTask(); + + // create two setup tips, one map and one reduce. + setup = new TaskInProgress[2]; + + // setup map tip. This map doesn't use any split. Just assign an empty + // split. + setup[0] = new TaskInProgress(jobId, jobFile, emptySplit, + jobtracker, conf, this, numMapTasks + 1, 1); + setup[0].setJobSetupTask(); + + // setup reduce tip. + setup[1] = new TaskInProgress(jobId, jobFile, numMapTasks, + numReduceTasks + 1, jobtracker, conf, this, 1); + setup[1].setJobSetupTask(); + } + + synchronized boolean isSetupCleanupRequired() { + return jobSetupCleanupNeeded; + } + + void setupComplete() { + status.setSetupProgress(1.0f); + if (this.status.getRunState() == JobStatus.PREP) { + changeStateTo(JobStatus.RUNNING); + JobHistory.JobInfo.logStarted(profile.getJobID()); + } + } + + ///////////////////////////////////////////////////// + // Accessors for the JobInProgress + ///////////////////////////////////////////////////// + public String getConf(String key) { + return this.conf.get(key); + } + + public JobProfile getProfile() { + return profile; + } + public JobStatus getStatus() { + return status; + } + public synchronized long getLaunchTime() { + return launchTime; + } + public long getStartTime() { + return startTime; + } + public long getFinishTime() { + return finishTime; + } + public int desiredMaps() { + return numMapTasks; + } + boolean getMapSpeculativeExecution() { + return hasSpeculativeMaps; + } + boolean getReduceSpeculativeExecution() { + return hasSpeculativeReduces; + } + long getMemoryForMapTask() { + return memoryPerMap; + } + + long getMemoryForReduceTask() { + return memoryPerReduce; + } + public synchronized int finishedMaps() { + return finishedMapTasks; + } + public int desiredReduces() { + return numReduceTasks; + } + public synchronized int runningMaps() { + return runningMapTasks; + } + public synchronized int runningReduces() { + return runningReduceTasks; + } + public synchronized int finishedReduces() { + return finishedReduceTasks; + } + public synchronized int pendingMaps() { + return numMapTasks - runningMapTasks - failedMapTIPs - + finishedMapTasks + speculativeMapTasks; + } + public synchronized int pendingReduces() { + return numReduceTasks - runningReduceTasks - failedReduceTIPs - + finishedReduceTasks + speculativeReduceTasks; + } + public int getNumSlotsPerTask(TaskType taskType) { + if (taskType == TaskType.MAP) { + return numSlotsPerMap; + } else if (taskType == TaskType.REDUCE) { + return numSlotsPerReduce; + } else { + return 1; + } + } + public JobPriority getPriority() { + return this.priority; + } + public void setPriority(JobPriority priority) { + if(priority == null) { + priority = JobPriority.NORMAL; + } + + synchronized (this) { + this.priority = priority; + status.setJobPriority(priority); + } + // log and change to the job's priority + JobHistory.JobInfo.logJobPriority(jobId, priority); + } + + // Update the job start/launch time (upon restart) and log to history + synchronized void updateJobInfo(long startTime, long launchTime) { + // log and change to the job's start/launch time + this.startTime = startTime; + this.launchTime = launchTime; + JobHistory.JobInfo.logJobInfo(jobId, startTime, launchTime); + } + + /** + * Get the number of times the job has restarted + */ + int getNumRestarts() { + return restartCount; + } + + long getInputLength() { + return inputLength; + } + + boolean isCleanupLaunched() { + return launchedCleanup; + } + + boolean isSetupLaunched() { + return launchedSetup; + } + + /** + * Get all the tasks of the desired type in this job. + * @param type {@link TaskType} of the tasks required + * @return An array of {@link TaskInProgress} matching the given type. + * Returns an empty array if no tasks are found for the given type. + */ + TaskInProgress[] getTasks(TaskType type) { + TaskInProgress[] tasks = null; + switch (type) { + case MAP: + { + tasks = maps; + } + break; + case REDUCE: + { + tasks = reduces; + } + break; + case JOB_SETUP: + { + tasks = setup; + } + break; + case JOB_CLEANUP: + { + tasks = cleanup; + } + break; + default: + { + tasks = new TaskInProgress[0]; + } + break; + } + + return tasks; + } + + /** + * Return the nonLocalRunningMaps + * @return + */ + Set getNonLocalRunningMaps() + { + return nonLocalRunningMaps; + } + + /** + * Return the runningMapCache + * @return + */ + Map> getRunningMapCache() + { + return runningMapCache; + } + + /** + * Return runningReduces + * @return + */ + Set getRunningReduces() + { + return runningReduces; + } + + /** + * Get the job configuration + * @return the job's configuration + */ + JobConf getJobConf() { + return conf; + } + + /** + * Get the job user/owner + * @return the job's user/owner + */ + String getUser() { + return user; + } + + /** + * Return a vector of completed TaskInProgress objects + */ + public synchronized Vector reportTasksInProgress(boolean shouldBeMap, + boolean shouldBeComplete) { + + Vector results = new Vector(); + TaskInProgress tips[] = null; + if (shouldBeMap) { + tips = maps; + } else { + tips = reduces; + } + for (int i = 0; i < tips.length; i++) { + if (tips[i].isComplete() == shouldBeComplete) { + results.add(tips[i]); + } + } + return results; + } + + /** + * Return a vector of cleanup TaskInProgress objects + */ + public synchronized Vector reportCleanupTIPs( + boolean shouldBeComplete) { + + Vector results = new Vector(); + for (int i = 0; i < cleanup.length; i++) { + if (cleanup[i].isComplete() == shouldBeComplete) { + results.add(cleanup[i]); + } + } + return results; + } + + /** + * Return a vector of setup TaskInProgress objects + */ + public synchronized Vector reportSetupTIPs( + boolean shouldBeComplete) { + + Vector results = new Vector(); + for (int i = 0; i < setup.length; i++) { + if (setup[i].isComplete() == shouldBeComplete) { + results.add(setup[i]); + } + } + return results; + } + + //////////////////////////////////////////////////// + // Status update methods + //////////////////////////////////////////////////// + + /** + * Assuming {@link JobTracker} is locked on entry. + */ + public synchronized void updateTaskStatus(TaskInProgress tip, + TaskStatus status) { + + double oldProgress = tip.getProgress(); // save old progress + boolean wasRunning = tip.isRunning(); + boolean wasComplete = tip.isComplete(); + boolean wasPending = tip.isOnlyCommitPending(); + TaskAttemptID taskid = status.getTaskID(); + boolean wasAttemptRunning = tip.isAttemptRunning(taskid); + + // If the TIP is already completed and the task reports as SUCCEEDED then + // mark the task as KILLED. + // In case of task with no promotion the task tracker will mark the task + // as SUCCEEDED. + // User has requested to kill the task, but TT reported SUCCEEDED, + // mark the task KILLED. + if ((wasComplete || tip.wasKilled(taskid)) && + (status.getRunState() == TaskStatus.State.SUCCEEDED)) { + status.setRunState(TaskStatus.State.KILLED); + } + + // When a task has just reported its state as FAILED_UNCLEAN/KILLED_UNCLEAN, + // if the job is complete or cleanup task is switched off, + // make the task's state FAILED/KILLED without launching cleanup attempt. + // Note that if task is already a cleanup attempt, + // we don't change the state to make sure the task gets a killTaskAction + if ((this.isComplete() || jobFailed || jobKilled || !taskCleanupNeeded) && + !tip.isCleanupAttempt(taskid)) { + if (status.getRunState() == TaskStatus.State.FAILED_UNCLEAN) { + status.setRunState(TaskStatus.State.FAILED); + } else if (status.getRunState() == TaskStatus.State.KILLED_UNCLEAN) { + status.setRunState(TaskStatus.State.KILLED); + } + } + + boolean change = tip.updateStatus(status); + if (change) { + TaskStatus.State state = status.getRunState(); + // get the TaskTrackerStatus where the task ran + TaskTracker taskTracker = + this.jobtracker.getTaskTracker(tip.machineWhereTaskRan(taskid)); + TaskTrackerStatus ttStatus = + (taskTracker == null) ? null : taskTracker.getStatus(); + String httpTaskLogLocation = null; + + if (null != ttStatus){ + String host; + if (NetUtils.getStaticResolution(ttStatus.getHost()) != null) { + host = NetUtils.getStaticResolution(ttStatus.getHost()); + } else { + host = ttStatus.getHost(); + } + httpTaskLogLocation = "http://" + host + ":" + ttStatus.getHttpPort(); + //+ "/tasklog?plaintext=true&taskid=" + status.getTaskID(); + } + + TaskCompletionEvent taskEvent = null; + if (state == TaskStatus.State.SUCCEEDED) { + taskEvent = new TaskCompletionEvent( + taskCompletionEventTracker, + taskid, + tip.idWithinJob(), + status.getIsMap() && + !tip.isJobCleanupTask() && + !tip.isJobSetupTask(), + TaskCompletionEvent.Status.SUCCEEDED, + httpTaskLogLocation + ); + taskEvent.setTaskRunTime((int)(status.getFinishTime() + - status.getStartTime())); + tip.setSuccessEventNumber(taskCompletionEventTracker); + } else if (state == TaskStatus.State.COMMIT_PENDING) { + // If it is the first attempt reporting COMMIT_PENDING + // ask the task to commit. + if (!wasComplete && !wasPending) { + tip.doCommit(taskid); + } + return; + } else if (state == TaskStatus.State.FAILED_UNCLEAN || + state == TaskStatus.State.KILLED_UNCLEAN) { + tip.incompleteSubTask(taskid, this.status); + // add this task, to be rescheduled as cleanup attempt + if (tip.isMapTask()) { + mapCleanupTasks.add(taskid); + } else { + reduceCleanupTasks.add(taskid); + } + // Remove the task entry from jobtracker + jobtracker.removeTaskEntry(taskid); + } + //For a failed task update the JT datastructures. + else if (state == TaskStatus.State.FAILED || + state == TaskStatus.State.KILLED) { + // Get the event number for the (possibly) previously successful + // task. If there exists one, then set that status to OBSOLETE + int eventNumber; + if ((eventNumber = tip.getSuccessEventNumber()) != -1) { + TaskCompletionEvent t = + this.taskCompletionEvents.get(eventNumber); + if (t.getTaskAttemptId().equals(taskid)) + t.setTaskStatus(TaskCompletionEvent.Status.OBSOLETE); + } + + // Tell the job to fail the relevant task + failedTask(tip, taskid, status, taskTracker, + wasRunning, wasComplete, wasAttemptRunning); + + // Did the task failure lead to tip failure? + TaskCompletionEvent.Status taskCompletionStatus = + (state == TaskStatus.State.FAILED ) ? + TaskCompletionEvent.Status.FAILED : + TaskCompletionEvent.Status.KILLED; + if (tip.isFailed()) { + taskCompletionStatus = TaskCompletionEvent.Status.TIPFAILED; + } + taskEvent = new TaskCompletionEvent(taskCompletionEventTracker, + taskid, + tip.idWithinJob(), + status.getIsMap() && + !tip.isJobCleanupTask() && + !tip.isJobSetupTask(), + taskCompletionStatus, + httpTaskLogLocation + ); + } + + // Add the 'complete' task i.e. successful/failed + // It _is_ safe to add the TaskCompletionEvent.Status.SUCCEEDED + // *before* calling TIP.completedTask since: + // a. One and only one task of a TIP is declared as a SUCCESS, the + // other (speculative tasks) are marked KILLED by the TaskCommitThread + // b. TIP.completedTask *does not* throw _any_ exception at all. + if (taskEvent != null) { + this.taskCompletionEvents.add(taskEvent); + taskCompletionEventTracker++; + JobTrackerStatistics.TaskTrackerStat ttStat = jobtracker. + getStatistics().getTaskTrackerStat(tip.machineWhereTaskRan(taskid)); + if(ttStat != null) { // ttStat can be null in case of lost tracker + ttStat.incrTotalTasks(); + } + if (state == TaskStatus.State.SUCCEEDED) { + completedTask(tip, status); + if(ttStat != null) { + ttStat.incrSucceededTasks(); + } + } + } + } + + // + // Update JobInProgress status + // + if(LOG.isDebugEnabled()) { + LOG.debug("Taking progress for " + tip.getTIPId() + " from " + + oldProgress + " to " + tip.getProgress()); + } + + if (!tip.isJobCleanupTask() && !tip.isJobSetupTask()) { + double progressDelta = tip.getProgress() - oldProgress; + if (tip.isMapTask()) { + this.status.setMapProgress((float) (this.status.mapProgress() + + progressDelta / maps.length)); + } else { + this.status.setReduceProgress((float) (this.status.reduceProgress() + + (progressDelta / reduces.length))); + } + } + } + + String getHistoryFile() { + return historyFile; + } + + synchronized void setHistoryFile(String file) { + this.historyFile = file; + } + + boolean isHistoryFileCopied() { + return historyFileCopied; + } + + synchronized void setHistoryFileCopied() { + this.historyFileCopied = true; + } + + /** + * Returns the job-level counters. + * + * @return the job-level counters. + */ + public synchronized Counters getJobCounters() { + return jobCounters; + } + + /** + * Returns map phase counters by summing over all map tasks in progress. + */ + public synchronized Counters getMapCounters() { + return incrementTaskCounters(new Counters(), maps); + } + + /** + * Returns map phase counters by summing over all map tasks in progress. + */ + public synchronized Counters getReduceCounters() { + return incrementTaskCounters(new Counters(), reduces); + } + + /** + * Returns the total job counters, by adding together the job, + * the map and the reduce counters. + */ + public Counters getCounters() { + Counters result = new Counters(); + synchronized (this) { + result.incrAllCounters(getJobCounters()); + } + + incrementTaskCounters(result, maps); + return incrementTaskCounters(result, reduces); + } + + /** + * Increments the counters with the counters from each task. + * @param counters the counters to increment + * @param tips the tasks to add in to counters + * @return counters the same object passed in as counters + */ + private Counters incrementTaskCounters(Counters counters, + TaskInProgress[] tips) { + for (TaskInProgress tip : tips) { + counters.incrAllCounters(tip.getCounters()); + } + return counters; + } + + ///////////////////////////////////////////////////// + // Create/manage tasks + ///////////////////////////////////////////////////// + /** + * Return a MapTask, if appropriate, to run on the given tasktracker + */ + public synchronized Task obtainNewMapTask(TaskTrackerStatus tts, + int clusterSize, + int numUniqueHosts + ) throws IOException { + return obtainNewMapTask(tts, clusterSize, numUniqueHosts, anyCacheLevel); + } + /** + * Return a MapTask, if appropriate, to run on the given tasktracker + */ + public synchronized Task obtainNewMapTask(TaskTrackerStatus tts, + int clusterSize, + int numUniqueHosts, + int maxCacheLevel + ) throws IOException { + if (status.getRunState() != JobStatus.RUNNING) { + LOG.info("Cannot create task split for " + profile.getJobID()); + return null; + } + + int target = findNewMapTask(tts, clusterSize, numUniqueHosts, + maxCacheLevel); + if (target == -1) { + return null; + } + + Task result = maps[target].getTaskToRun(tts.getTrackerName()); + if (result != null) { + addRunningTaskToTIP(maps[target], result.getTaskID(), tts, true); + } + + return result; + } + + /* + * Return task cleanup attempt if any, to run on a given tracker + */ + public Task obtainTaskCleanupTask(TaskTrackerStatus tts, + boolean isMapSlot) + throws IOException { + if (!tasksInited.get()) { + return null; + } + + if (this.status.getRunState() != JobStatus.RUNNING || + jobFailed || jobKilled) { + return null; + } + + if (isMapSlot) { + if (mapCleanupTasks.isEmpty()) + return null; + } else { + if (reduceCleanupTasks.isEmpty()) + return null; + } + + synchronized (this) { + if (this.status.getRunState() != JobStatus.RUNNING || + jobFailed || jobKilled) { + return null; + } + String taskTracker = tts.getTrackerName(); + if (!shouldRunOnTaskTracker(taskTracker)) { + return null; + } + TaskAttemptID taskid = null; + TaskInProgress tip = null; + if (isMapSlot) { + if (!mapCleanupTasks.isEmpty()) { + taskid = mapCleanupTasks.remove(0); + tip = maps[taskid.getTaskID().getId()]; + } + } else { + if (!reduceCleanupTasks.isEmpty()) { + taskid = reduceCleanupTasks.remove(0); + tip = reduces[taskid.getTaskID().getId()]; + } + } + if (tip != null) { + return tip.addRunningTask(taskid, taskTracker, true); + } + return null; + } + } + + public synchronized Task obtainNewLocalMapTask(TaskTrackerStatus tts, + int clusterSize, + int numUniqueHosts) + throws IOException { + if (!tasksInited.get()) { + LOG.info("Cannot create task split for " + profile.getJobID()); + return null; + } + + int target = findNewMapTask(tts, clusterSize, numUniqueHosts, maxLevel); + if (target == -1) { + return null; + } + + Task result = maps[target].getTaskToRun(tts.getTrackerName()); + if (result != null) { + addRunningTaskToTIP(maps[target], result.getTaskID(), tts, true); + } + + return result; + } + + public synchronized Task obtainNewNonLocalMapTask(TaskTrackerStatus tts, + int clusterSize, + int numUniqueHosts) + throws IOException { + if (!tasksInited.get()) { + LOG.info("Cannot create task split for " + profile.getJobID()); + return null; + } + + int target = findNewMapTask(tts, clusterSize, numUniqueHosts, + NON_LOCAL_CACHE_LEVEL); + if (target == -1) { + return null; + } + + Task result = maps[target].getTaskToRun(tts.getTrackerName()); + if (result != null) { + addRunningTaskToTIP(maps[target], result.getTaskID(), tts, true); + } + + return result; + } + + /** + * Return a CleanupTask, if appropriate, to run on the given tasktracker + * + */ + public Task obtainJobCleanupTask(TaskTrackerStatus tts, + int clusterSize, + int numUniqueHosts, + boolean isMapSlot + ) throws IOException { + if(!tasksInited.get() || !jobSetupCleanupNeeded) { + return null; + } + + synchronized(this) { + if (!canLaunchJobCleanupTask()) { + return null; + } + + String taskTracker = tts.getTrackerName(); + // Update the last-known clusterSize + this.clusterSize = clusterSize; + if (!shouldRunOnTaskTracker(taskTracker)) { + return null; + } + + List cleanupTaskList = new ArrayList(); + if (isMapSlot) { + cleanupTaskList.add(cleanup[0]); + } else { + cleanupTaskList.add(cleanup[1]); + } + TaskInProgress tip = findTaskFromList(cleanupTaskList, + tts, numUniqueHosts, false); + if (tip == null) { + return null; + } + + // Now launch the cleanupTask + Task result = tip.getTaskToRun(tts.getTrackerName()); + + if (result != null) { + addRunningTaskToTIP(tip, result.getTaskID(), tts, true); + if (jobFailed) { + result.setJobCleanupTaskState + (org.apache.hadoop.mapreduce.JobStatus.State.FAILED); + } else if (jobKilled) { + result.setJobCleanupTaskState + (org.apache.hadoop.mapreduce.JobStatus.State.KILLED); + } else { + result.setJobCleanupTaskState + (org.apache.hadoop.mapreduce.JobStatus.State.SUCCEEDED); + } + } + return result; + } + + } + + /** + * Check whether cleanup task can be launched for the job. + * + * Cleanup task can be launched if it is not already launched + * or job is Killed + * or all maps and reduces are complete + * @return true/false + */ + private synchronized boolean canLaunchJobCleanupTask() { + // check if the job is running + if (status.getRunState() != JobStatus.RUNNING && + status.getRunState() != JobStatus.PREP) { + return false; + } + // check if cleanup task has been launched already or if setup isn't + // launched already. The later check is useful when number of maps is + // zero. + if (launchedCleanup || !isSetupFinished()) { + return false; + } + // check if job has failed or killed + if (jobKilled || jobFailed) { + return true; + } + // Check if all maps and reducers have finished. + boolean launchCleanupTask = + ((finishedMapTasks + failedMapTIPs) == (numMapTasks)); + if (launchCleanupTask) { + launchCleanupTask = + ((finishedReduceTasks + failedReduceTIPs) == numReduceTasks); + } + return launchCleanupTask; + } + + /** + * Return a SetupTask, if appropriate, to run on the given tasktracker + * + */ + public Task obtainJobSetupTask(TaskTrackerStatus tts, + int clusterSize, + int numUniqueHosts, + boolean isMapSlot + ) throws IOException { + if(!tasksInited.get() || !jobSetupCleanupNeeded) { + return null; + } + + synchronized(this) { + if (!canLaunchSetupTask()) { + return null; + } + String taskTracker = tts.getTrackerName(); + // Update the last-known clusterSize + this.clusterSize = clusterSize; + if (!shouldRunOnTaskTracker(taskTracker)) { + return null; + } + + List setupTaskList = new ArrayList(); + if (isMapSlot) { + setupTaskList.add(setup[0]); + } else { + setupTaskList.add(setup[1]); + } + TaskInProgress tip = findTaskFromList(setupTaskList, + tts, numUniqueHosts, false); + if (tip == null) { + return null; + } + + // Now launch the setupTask + Task result = tip.getTaskToRun(tts.getTrackerName()); + if (result != null) { + addRunningTaskToTIP(tip, result.getTaskID(), tts, true); + } + return result; + } + } + + /** + * Can we start schedule reducers? + * @return true/false + */ + public synchronized boolean scheduleReduces() { + // Start scheduling reducers if we have enough maps finished or + // if the job has very few mappers or reducers. + return numMapTasks <= rushReduceMaps || + numReduceTasks <= rushReduceReduces || + finishedMapTasks >= completedMapsForReduceSlowstart; + } + + /** + * Check whether setup task can be launched for the job. + * + * Setup task can be launched after the tasks are inited + * and Job is in PREP state + * and if it is not already launched + * or job is not Killed/Failed + * @return true/false + */ + private synchronized boolean canLaunchSetupTask() { + return (tasksInited.get() && status.getRunState() == JobStatus.PREP && + !launchedSetup && !jobKilled && !jobFailed); + } + + + /** + * Return a ReduceTask, if appropriate, to run on the given tasktracker. + * We don't have cache-sensitivity for reduce tasks, as they + * work on temporary MapRed files. + */ + public synchronized Task obtainNewReduceTask(TaskTrackerStatus tts, + int clusterSize, + int numUniqueHosts + ) throws IOException { + if (status.getRunState() != JobStatus.RUNNING) { + LOG.info("Cannot create task split for " + profile.getJobID()); + return null; + } + + // Ensure we have sufficient map outputs ready to shuffle before + // scheduling reduces + if (!scheduleReduces()) { + return null; + } + + int target = findNewReduceTask(tts, clusterSize, numUniqueHosts); + if (target == -1) { + return null; + } + + Task result = reduces[target].getTaskToRun(tts.getTrackerName()); + if (result != null) { + addRunningTaskToTIP(reduces[target], result.getTaskID(), tts, true); + } + + return result; + } + + // returns the (cache)level at which the nodes matches + private int getMatchingLevelForNodes(Node n1, Node n2) { + int count = 0; + do { + if (n1.equals(n2)) { + return count; + } + ++count; + n1 = n1.getParent(); + n2 = n2.getParent(); + } while (n1 != null && n2 != null); + return this.maxLevel; + } + + /** + * Populate the data structures as a task is scheduled. + * + * Assuming {@link JobTracker} is locked on entry. + * + * @param tip The tip for which the task is added + * @param id The attempt-id for the task + * @param tts task-tracker status + * @param isScheduled Whether this task is scheduled from the JT or has + * joined back upon restart + */ + synchronized void addRunningTaskToTIP(TaskInProgress tip, TaskAttemptID id, + TaskTrackerStatus tts, + boolean isScheduled) { + // Make an entry in the tip if the attempt is not scheduled i.e externally + // added + if (!isScheduled) { + tip.addRunningTask(id, tts.getTrackerName()); + } + final JobTrackerInstrumentation metrics = jobtracker.getInstrumentation(); + + // keeping the earlier ordering intact + String name; + String splits = ""; + Enum counter = null; + if (tip.isJobSetupTask()) { + launchedSetup = true; + name = Values.SETUP.name(); + } else if (tip.isJobCleanupTask()) { + launchedCleanup = true; + name = Values.CLEANUP.name(); + } else if (tip.isMapTask()) { + ++runningMapTasks; + name = Values.MAP.name(); + counter = Counter.TOTAL_LAUNCHED_MAPS; + splits = tip.getSplitNodes(); + if (tip.getActiveTasks().size() > 1) { + speculativeMapTasks++; + if (!garbageCollected) { + totalSpeculativeMapTasks.incrementAndGet(); + } + metrics.speculateMap(id); + } + metrics.launchMap(id); + } else { + ++runningReduceTasks; + name = Values.REDUCE.name(); + counter = Counter.TOTAL_LAUNCHED_REDUCES; + if (tip.getActiveTasks().size() > 1) { + speculativeReduceTasks++; + if (!garbageCollected) { + totalSpeculativeReduceTasks.incrementAndGet(); + } + metrics.speculateReduce(id); + } + metrics.launchReduce(id); + } + // Note that the logs are for the scheduled tasks only. Tasks that join on + // restart has already their logs in place. + if (tip.isFirstAttempt(id)) { + JobHistory.Task.logStarted(tip.getTIPId(), name, + tip.getExecStartTime(), splits); + } + if (!tip.isJobSetupTask() && !tip.isJobCleanupTask()) { + jobCounters.incrCounter(counter, 1); + } + + //TODO The only problem with these counters would be on restart. + // The jobtracker updates the counter only when the task that is scheduled + // if from a non-running tip and is local (data, rack ...). But upon restart + // as the reports come from the task tracker, there is no good way to infer + // when exactly to increment the locality counters. The only solution is to + // increment the counters for all the tasks irrespective of + // - whether the tip is running or not + // - whether its a speculative task or not + // + // So to simplify, increment the data locality counter whenever there is + // data locality. + if (tip.isMapTask() && !tip.isJobSetupTask() && !tip.isJobCleanupTask()) { + // increment the data locality counter for maps + Node tracker = jobtracker.getNode(tts.getHost()); + int level = this.maxLevel; + // find the right level across split locations + for (String local : maps[tip.getIdWithinJob()].getSplitLocations()) { + Node datanode = jobtracker.getNode(local); + int newLevel = this.maxLevel; + if (tracker != null && datanode != null) { + newLevel = getMatchingLevelForNodes(tracker, datanode); + } + if (newLevel < level) { + level = newLevel; + // an optimization + if (level == 0) { + break; + } + } + } + switch (level) { + case 0 : + LOG.info("Choosing data-local task " + tip.getTIPId()); + jobCounters.incrCounter(Counter.DATA_LOCAL_MAPS, 1); + metrics.launchDataLocalMap(id); + break; + case 1: + LOG.info("Choosing rack-local task " + tip.getTIPId()); + jobCounters.incrCounter(Counter.RACK_LOCAL_MAPS, 1); + metrics.launchRackLocalMap(id); + break; + default : + // check if there is any locality + if (level != this.maxLevel) { + LOG.info("Choosing cached task at level " + level + tip.getTIPId()); + jobCounters.incrCounter(Counter.OTHER_LOCAL_MAPS, 1); + } + break; + } + } + } + + static String convertTrackerNameToHostName(String trackerName) { + // Ugly! + // Convert the trackerName to it's host name + int indexOfColon = trackerName.indexOf(":"); + String trackerHostName = (indexOfColon == -1) ? + trackerName : + trackerName.substring(0, indexOfColon); + return trackerHostName.substring("tracker_".length()); + } + + /** + * Note that a task has failed on a given tracker and add the tracker + * to the blacklist iff too many trackers in the cluster i.e. + * (clusterSize * CLUSTER_BLACKLIST_PERCENT) haven't turned 'flaky' already. + * + * @param taskTracker task-tracker on which a task failed + */ + synchronized void addTrackerTaskFailure(String trackerName, + TaskTracker taskTracker) { + if (flakyTaskTrackers < (clusterSize * CLUSTER_BLACKLIST_PERCENT)) { + String trackerHostName = convertTrackerNameToHostName(trackerName); + + Integer trackerFailures = trackerToFailuresMap.get(trackerHostName); + if (trackerFailures == null) { + trackerFailures = 0; + } + trackerToFailuresMap.put(trackerHostName, ++trackerFailures); + + // Check if this tasktracker has turned 'flaky' + if (trackerFailures.intValue() == maxTaskFailuresPerTracker) { + ++flakyTaskTrackers; + + // Cancel reservations if appropriate + if (taskTracker != null) { + if (trackersReservedForMaps.containsKey(taskTracker)) { + taskTracker.unreserveSlots(TaskType.MAP, this); + } + if (trackersReservedForReduces.containsKey(taskTracker)) { + taskTracker.unreserveSlots(TaskType.REDUCE, this); + } + } + LOG.info("TaskTracker at '" + trackerHostName + "' turned 'flaky'"); + } + } + } + + public synchronized void reserveTaskTracker(TaskTracker taskTracker, + TaskType type, int numSlots) { + Map map = + (type == TaskType.MAP) ? trackersReservedForMaps : trackersReservedForReduces; + + long now = JobTracker.getClock().getTime(); + + FallowSlotInfo info = map.get(taskTracker); + int reservedSlots = 0; + if (info == null) { + info = new FallowSlotInfo(now, numSlots); + reservedSlots = numSlots; + } else { + // Increment metering info if the reservation is changing + if (info.getNumSlots() != numSlots) { + Enum counter = + (type == TaskType.MAP) ? + Counter.FALLOW_SLOTS_MILLIS_MAPS : + Counter.FALLOW_SLOTS_MILLIS_REDUCES; + long fallowSlotMillis = (now - info.getTimestamp()) * info.getNumSlots(); + jobCounters.incrCounter(counter, fallowSlotMillis); + + // Update + reservedSlots = numSlots - info.getNumSlots(); + info.setTimestamp(now); + info.setNumSlots(numSlots); + } + } + map.put(taskTracker, info); + if (type == TaskType.MAP) { + jobtracker.getInstrumentation().addReservedMapSlots(reservedSlots); + } + else { + jobtracker.getInstrumentation().addReservedReduceSlots(reservedSlots); + } + jobtracker.incrementReservations(type, reservedSlots); + } + + public synchronized void unreserveTaskTracker(TaskTracker taskTracker, + TaskType type) { + Map map = + (type == TaskType.MAP) ? trackersReservedForMaps : + trackersReservedForReduces; + + FallowSlotInfo info = map.get(taskTracker); + if (info == null) { + LOG.warn("Cannot find information about fallow slots for " + + taskTracker.getTrackerName()); + return; + } + + long now = JobTracker.getClock().getTime(); + + Enum counter = + (type == TaskType.MAP) ? + Counter.FALLOW_SLOTS_MILLIS_MAPS : + Counter.FALLOW_SLOTS_MILLIS_REDUCES; + long fallowSlotMillis = (now - info.getTimestamp()) * info.getNumSlots(); + jobCounters.incrCounter(counter, fallowSlotMillis); + + map.remove(taskTracker); + if (type == TaskType.MAP) { + jobtracker.getInstrumentation().decReservedMapSlots(info.getNumSlots()); + } + else { + jobtracker.getInstrumentation().decReservedReduceSlots( + info.getNumSlots()); + } + jobtracker.decrementReservations(type, info.getNumSlots()); + } + + public int getNumReservedTaskTrackersForMaps() { + return trackersReservedForMaps.size(); + } + + public int getNumReservedTaskTrackersForReduces() { + return trackersReservedForReduces.size(); + } + + private int getTrackerTaskFailures(String trackerName) { + String trackerHostName = convertTrackerNameToHostName(trackerName); + Integer failedTasks = trackerToFailuresMap.get(trackerHostName); + return (failedTasks != null) ? failedTasks.intValue() : 0; + } + + /** + * Get the black listed trackers for the job + * + * @return List of blacklisted tracker names + */ + List getBlackListedTrackers() { + List blackListedTrackers = new ArrayList(); + for (Map.Entry e : trackerToFailuresMap.entrySet()) { + if (e.getValue().intValue() >= maxTaskFailuresPerTracker) { + blackListedTrackers.add(e.getKey()); + } + } + return blackListedTrackers; + } + + /** + * Get the no. of 'flaky' tasktrackers for a given job. + * + * @return the no. of 'flaky' tasktrackers for a given job. + */ + int getNoOfBlackListedTrackers() { + return flakyTaskTrackers; + } + + /** + * Get the information on tasktrackers and no. of errors which occurred + * on them for a given job. + * + * @return the map of tasktrackers and no. of errors which occurred + * on them for a given job. + */ + synchronized Map getTaskTrackerErrors() { + // Clone the 'trackerToFailuresMap' and return the copy + Map trackerErrors = + new TreeMap(trackerToFailuresMap); + return trackerErrors; + } + + /** + * Remove a map TIP from the lists for running maps. + * Called when a map fails/completes (note if a map is killed, + * it won't be present in the list since it was completed earlier) + * @param tip the tip that needs to be retired + */ + private synchronized void retireMap(TaskInProgress tip) { + if (runningMapCache == null) { + LOG.warn("Running cache for maps missing!! " + + "Job details are missing."); + return; + } + + String[] splitLocations = tip.getSplitLocations(); + + // Remove the TIP from the list for running non-local maps + if (splitLocations.length == 0) { + nonLocalRunningMaps.remove(tip); + return; + } + + // Remove from the running map caches + for(String host: splitLocations) { + Node node = jobtracker.getNode(host); + + for (int j = 0; j < maxLevel; ++j) { + Set hostMaps = runningMapCache.get(node); + if (hostMaps != null) { + hostMaps.remove(tip); + if (hostMaps.size() == 0) { + runningMapCache.remove(node); + } + } + node = node.getParent(); + } + } + } + + /** + * Remove a reduce TIP from the list for running-reduces + * Called when a reduce fails/completes + * @param tip the tip that needs to be retired + */ + private synchronized void retireReduce(TaskInProgress tip) { + if (runningReduces == null) { + LOG.warn("Running list for reducers missing!! " + + "Job details are missing."); + return; + } + runningReduces.remove(tip); + } + + /** + * Adds a map tip to the list of running maps. + * @param tip the tip that needs to be scheduled as running + */ + protected synchronized void scheduleMap(TaskInProgress tip) { + runningMapTaskStats.add(0.0f); + if (runningMapCache == null) { + LOG.warn("Running cache for maps is missing!! " + + "Job details are missing."); + return; + } + String[] splitLocations = tip.getSplitLocations(); + + // Add the TIP to the list of non-local running TIPs + if (splitLocations.length == 0) { + nonLocalRunningMaps.add(tip); + return; + } + + for(String host: splitLocations) { + Node node = jobtracker.getNode(host); + + for (int j = 0; j < maxLevel; ++j) { + Set hostMaps = runningMapCache.get(node); + if (hostMaps == null) { + // create a cache if needed + hostMaps = new LinkedHashSet(); + runningMapCache.put(node, hostMaps); + } + hostMaps.add(tip); + node = node.getParent(); + } + } + } + + /** + * Adds a reduce tip to the list of running reduces + * @param tip the tip that needs to be scheduled as running + */ + protected synchronized void scheduleReduce(TaskInProgress tip) { + runningReduceTaskStats.add(0.0f); + if (runningReduces == null) { + LOG.warn("Running cache for reducers missing!! " + + "Job details are missing."); + return; + } + runningReduces.add(tip); + } + + /** + * Adds the failed TIP in the front of the list for non-running maps + * @param tip the tip that needs to be failed + */ + private synchronized void failMap(TaskInProgress tip) { + if (nonRunningMapCache == null) { + LOG.warn("Non-running cache for maps missing!! " + + "Job details are missing."); + return; + } + + // 1. Its added everywhere since other nodes (having this split local) + // might have removed this tip from their local cache + // 2. Give high priority to failed tip - fail early + + String[] splitLocations = tip.getSplitLocations(); + + // Add the TIP in the front of the list for non-local non-running maps + if (splitLocations.length == 0) { + nonLocalMaps.add(0, tip); + return; + } + + for(String host: splitLocations) { + Node node = jobtracker.getNode(host); + + for (int j = 0; j < maxLevel; ++j) { + List hostMaps = nonRunningMapCache.get(node); + if (hostMaps == null) { + hostMaps = new LinkedList(); + nonRunningMapCache.put(node, hostMaps); + } + hostMaps.add(0, tip); + node = node.getParent(); + } + } + } + + /** + * Adds a failed TIP in the front of the list for non-running reduces + * @param tip the tip that needs to be failed + */ + private synchronized void failReduce(TaskInProgress tip) { + if (nonRunningReduces == null) { + LOG.warn("Failed cache for reducers missing!! " + + "Job details are missing."); + return; + } + nonRunningReduces.add(0, tip); + } + + /** + * Find a non-running task in the passed list of TIPs + * @param tips a collection of TIPs + * @param ttStatus the status of tracker that has requested a task to run + * @param numUniqueHosts number of unique hosts that run trask trackers + * @param removeFailedTip whether to remove the failed tips + */ + private synchronized TaskInProgress findTaskFromList( + Collection tips, TaskTrackerStatus ttStatus, + int numUniqueHosts, + boolean removeFailedTip) { + Iterator iter = tips.iterator(); + while (iter.hasNext()) { + TaskInProgress tip = iter.next(); + + // Select a tip if + // 1. runnable : still needs to be run and is not completed + // 2. ~running : no other node is running it + // 3. earlier attempt failed : has not failed on this host + // and has failed on all the other hosts + // A TIP is removed from the list if + // (1) this tip is scheduled + // (2) if the passed list is a level 0 (host) cache + // (3) when the TIP is non-schedulable (running, killed, complete) + if (tip.isRunnable() && !tip.isRunning()) { + // check if the tip has failed on this host + if (!tip.hasFailedOnMachine(ttStatus.getHost()) || + tip.getNumberOfFailedMachines() >= numUniqueHosts) { + // check if the tip has failed on all the nodes + iter.remove(); + return tip; + } else if (removeFailedTip) { + // the case where we want to remove a failed tip from the host cache + // point#3 in the TIP removal logic above + iter.remove(); + } + } else { + // see point#3 in the comment above for TIP removal logic + iter.remove(); + } + } + return null; + } + + public boolean hasSpeculativeMaps() { + return hasSpeculativeMaps; + } + + public boolean hasSpeculativeReduces() { + return hasSpeculativeReduces; + } + + + /** + * Given a candidate set of tasks, find and order the ones that + * can be speculated and return the same. + */ + protected synchronized List findSpeculativeTaskCandidates + (Collection list) { + ArrayList candidates = new ArrayList(); + + long now = JobTracker.getClock().getTime(); + Iterator iter = list.iterator(); + while (iter.hasNext()) { + TaskInProgress tip = iter.next(); + if (tip.canBeSpeculated(now)) { + candidates.add(tip); + } + } + if (candidates.size() > 0 ) { + Comparator LateComparator = + new EstimatedTimeLeftComparator(now); + + Collections.sort(candidates, LateComparator); + } + return candidates; + } + + protected synchronized TaskInProgress findSpeculativeTask( + List candidates, String taskTrackerName, + String taskTrackerHost, TaskType taskType) { + if (candidates.isEmpty()) { + return null; + } + + if (isSlowTracker(taskTrackerName) || atSpeculativeCap(taskType)) { + return null; + } + + long now = JobTracker.getClock().getTime(); + Iterator iter = candidates.iterator(); + while (iter.hasNext()) { + TaskInProgress tip = iter.next(); + if (tip.hasRunOnMachine(taskTrackerHost, taskTrackerName)) + continue; + + // either we are going to speculate this task or it's not speculatable + iter.remove(); + + if (!tip.canBeSpeculated(now)) { + // if it can't be speculated, then: + // A. it has completed/failed etc. - in which case makes sense to never + // speculate again + // B. it's relative progress does not allow speculation. in this case + // it's fair to treat it as if it was never eligible for speculation + // to begin with. + continue; + } + + LOG.info("Chose task " + tip.getTIPId() + " to speculate." + + " Statistics: Task's : " + + tip.getCurrentProgressRate(JobTracker.getClock().getTime()) + + " Job's : " + (tip.isMapTask() ? + runningMapTaskStats : runningReduceTaskStats)); + + + return tip; + + } + + return null; + } + + /** + * Find new map task + * @param tts The task tracker that is asking for a task + * @param clusterSize The number of task trackers in the cluster + * @param numUniqueHosts The number of hosts that run task trackers + * @param avgProgress The average progress of this kind of task in this job + * @param maxCacheLevel The maximum topology level until which to schedule + * maps. + * A value of {@link #anyCacheLevel} implies any + * available task (node-local, rack-local, off-switch and + * speculative tasks). + * A value of {@link #NON_LOCAL_CACHE_LEVEL} implies only + * off-switch/speculative tasks should be scheduled. + * @return the index in tasks of the selected task (or -1 for no task) + */ + private synchronized int findNewMapTask(final TaskTrackerStatus tts, + final int clusterSize, + final int numUniqueHosts, + final int maxCacheLevel) { + if (numMapTasks == 0) { + if(LOG.isDebugEnabled()) { + LOG.debug("No maps to schedule for " + profile.getJobID()); + } + return -1; + } + + String taskTracker = tts.getTrackerName(); + TaskInProgress tip = null; + + // + // Update the last-known clusterSize + // + this.clusterSize = clusterSize; + + if (!shouldRunOnTaskTracker(taskTracker)) { + return -1; + } + + // Check to ensure this TaskTracker has enough resources to + // run tasks from this job + long outSize = resourceEstimator.getEstimatedMapOutputSize(); + long availSpace = tts.getResourceStatus().getAvailableSpace(); + final long SAVETY_BUFFER = + conf.getLong("mapred.map.reserved.disk.mb", 300) * 1024 * 1024; + if (availSpace < outSize + SAVETY_BUFFER) { + LOG.warn("No room for map task. Node " + tts.getHost() + + " has " + availSpace + + " bytes free; The safty buffer is " + SAVETY_BUFFER + + " bytes; but we expect map to take " + outSize); + + return -1; //see if a different TIP might work better. + } + + + // For scheduling a map task, we have two caches and a list (optional) + // I) one for non-running task + // II) one for running task (this is for handling speculation) + // III) a list of TIPs that have empty locations (e.g., dummy splits), + // the list is empty if all TIPs have associated locations + + // First a look up is done on the non-running cache and on a miss, a look + // up is done on the running cache. The order for lookup within the cache: + // 1. from local node to root [bottom up] + // 2. breadth wise for all the parent nodes at max level + + // We fall to linear scan of the list (III above) if we have misses in the + // above caches + + Node node = jobtracker.getNode(tts.getHost()); + + // + // I) Non-running TIP : + // + + // 1. check from local node to the root [bottom up cache lookup] + // i.e if the cache is available and the host has been resolved + // (node!=null) + if (node != null) { + Node key = node; + int level = 0; + // maxCacheLevel might be greater than this.maxLevel if findNewMapTask is + // called to schedule any task (local, rack-local, off-switch or speculative) + // tasks or it might be NON_LOCAL_CACHE_LEVEL (i.e. -1) if findNewMapTask is + // (i.e. -1) if findNewMapTask is to only schedule off-switch/speculative + // tasks + int maxLevelToSchedule = Math.min(maxCacheLevel, maxLevel); + for (level = 0;level < maxLevelToSchedule; ++level) { + List cacheForLevel = nonRunningMapCache.get(key); + if (cacheForLevel != null) { + tip = findTaskFromList(cacheForLevel, tts, + numUniqueHosts,level == 0); + if (tip != null) { + // Add to running cache + scheduleMap(tip); + + // remove the cache if its empty + if (cacheForLevel.size() == 0) { + nonRunningMapCache.remove(key); + } + + return tip.getIdWithinJob(); + } + } + key = key.getParent(); + } + + // Check if we need to only schedule a local task (node-local/rack-local) + if (level == maxCacheLevel) { + return -1; + } + } + + //2. Search breadth-wise across parents at max level for non-running + // TIP if + // - cache exists and there is a cache miss + // - node information for the tracker is missing (tracker's topology + // info not obtained yet) + + // collection of node at max level in the cache structure + Collection nodesAtMaxLevel = jobtracker.getNodesAtMaxLevel(); + + // get the node parent at max level + Node nodeParentAtMaxLevel = + (node == null) ? null : JobTracker.getParentNode(node, maxLevel - 1); + + for (Node parent : nodesAtMaxLevel) { + + // skip the parent that has already been scanned + if (parent == nodeParentAtMaxLevel) { + continue; + } + + List cache = nonRunningMapCache.get(parent); + if (cache != null) { + tip = findTaskFromList(cache, tts, numUniqueHosts, false); + if (tip != null) { + // Add to the running cache + scheduleMap(tip); + + // remove the cache if empty + if (cache.size() == 0) { + nonRunningMapCache.remove(parent); + } + LOG.info("Choosing a non-local task " + tip.getTIPId()); + return tip.getIdWithinJob(); + } + } + } + + // 3. Search non-local tips for a new task + tip = findTaskFromList(nonLocalMaps, tts, numUniqueHosts, false); + if (tip != null) { + // Add to the running list + scheduleMap(tip); + + LOG.info("Choosing a non-local task " + tip.getTIPId()); + return tip.getIdWithinJob(); + } + + // + // II) Running TIP : + // + + if (hasSpeculativeMaps) { + tip = getSpeculativeMap(tts.getTrackerName(), tts.getHost()); + if (tip != null) { + LOG.info("Choosing a non-local task " + tip.getTIPId() + + " for speculation"); + return tip.getIdWithinJob(); + } + } + + return -1; + } + + private synchronized TaskInProgress getSpeculativeMap(String taskTrackerName, + String taskTrackerHost) { + + long now = JobTracker.getClock().getTime(); + if ((now - lastSpeculativeMapRefresh) > speculativeRefreshTimeout) { + //////// Populate allTips with all TaskInProgress + Set allTips = new HashSet(); + + // collection of node at max level in the cache structure + Collection nodesAtMaxLevel = jobtracker.getNodesAtMaxLevel(); + // Add all tasks from max-level nodes breadth-wise + for (Node parent : nodesAtMaxLevel) { + Set cache = runningMapCache.get(parent); + if (cache != null) { + allTips.addAll(cache); + } + } + // Add all non-local TIPs + allTips.addAll(nonLocalRunningMaps); + candidateSpeculativeMaps = findSpeculativeTaskCandidates(allTips); + lastSpeculativeMapRefresh = now; + } + + ///////// Select a TIP to run on + TaskInProgress tip = findSpeculativeTask(candidateSpeculativeMaps, taskTrackerName, + taskTrackerHost, TaskType.MAP); + + if (tip != null) { + LOG.info("Choosing map task " + tip.getTIPId() + + " for speculative execution"); + } else { + if (LOG.isDebugEnabled()) { + LOG.debug("No speculative map task found for tracker " + taskTrackerName); + } + } + + return tip; + } + + /** + * Find new reduce task + * @param tts The task tracker that is asking for a task + * @param clusterSize The number of task trackers in the cluster + * @param numUniqueHosts The number of hosts that run task trackers + * @param avgProgress The average progress of this kind of task in this job + * @return the index in tasks of the selected task (or -1 for no task) + */ + private synchronized int findNewReduceTask(TaskTrackerStatus tts, + int clusterSize, + int numUniqueHosts) { + if (numReduceTasks == 0) { + if(LOG.isDebugEnabled()) { + LOG.debug("No reduces to schedule for " + profile.getJobID()); + } + return -1; + } + + String taskTracker = tts.getTrackerName(); + TaskInProgress tip = null; + + // Update the last-known clusterSize + this.clusterSize = clusterSize; + + if (!shouldRunOnTaskTracker(taskTracker)) { + return -1; + } + + long outSize = resourceEstimator.getEstimatedReduceInputSize(); + long availSpace = tts.getResourceStatus().getAvailableSpace(); + final long SAVETY_BUFFER = + conf.getLong("mapred.reduce.reserved.disk.mb", 300) * 1024 * 1024; + if (availSpace < outSize + SAVETY_BUFFER) { + LOG.warn("No room for reduce task. Node " + taskTracker + + " has " + availSpace + + " bytes free; The safty buffer is " + SAVETY_BUFFER + + " bytes; but we expect map to take " + outSize); + + return -1; //see if a different TIP might work better. + } + + // 1. check for a never-executed reduce tip + // reducers don't have a cache and so pass -1 to explicitly call that out + tip = findTaskFromList(nonRunningReduces, tts, numUniqueHosts, false); + if (tip != null) { + scheduleReduce(tip); + return tip.getIdWithinJob(); + } + + // 2. check for a reduce tip to be speculated + if (hasSpeculativeReduces) { + tip = getSpeculativeReduce(tts.getTrackerName(), tts.getHost()); + if (tip != null) { + scheduleReduce(tip); + return tip.getIdWithinJob(); + } + } + + return -1; + } + + private synchronized TaskInProgress getSpeculativeReduce( + String taskTrackerName, String taskTrackerHost) { + + long now = JobTracker.getClock().getTime(); + if ((now - lastSpeculativeReduceRefresh) > speculativeRefreshTimeout) { + candidateSpeculativeReduces = findSpeculativeTaskCandidates(runningReduces); + lastSpeculativeReduceRefresh = now; + } + + TaskInProgress tip = findSpeculativeTask( + candidateSpeculativeReduces, taskTrackerName, taskTrackerHost, TaskType.REDUCE); + + if (tip != null) { + LOG.info("Choosing reduce task " + tip.getTIPId() + + " for speculative execution"); + } else { + if (LOG.isDebugEnabled()) { + LOG.debug("No speculative reduce task found for tracker " + taskTrackerHost); + } + } + return tip; + } + + private boolean shouldRunOnTaskTracker(String taskTracker) { + // + // Check if too many tasks of this job have failed on this + // tasktracker prior to assigning it a new one. + // + int taskTrackerFailedTasks = getTrackerTaskFailures(taskTracker); + if ((flakyTaskTrackers < (clusterSize * CLUSTER_BLACKLIST_PERCENT)) && + taskTrackerFailedTasks >= maxTaskFailuresPerTracker) { + if (LOG.isDebugEnabled()) { + String flakyTracker = convertTrackerNameToHostName(taskTracker); + LOG.debug("Ignoring the black-listed tasktracker: '" + flakyTracker + + "' for assigning a new task"); + } + return false; + } + return true; + } + + + /** + * Metering: Occupied Slots * (Finish - Start) + * @param tip {@link TaskInProgress} to be metered which just completed, + * cannot be null + * @param status {@link TaskStatus} of the completed task, cannot be + * null + */ + private void meterTaskAttempt(TaskInProgress tip, TaskStatus status) { + Counter slotCounter = + (tip.isMapTask()) ? Counter.SLOTS_MILLIS_MAPS : + Counter.SLOTS_MILLIS_REDUCES; + jobCounters.incrCounter(slotCounter, + tip.getNumSlotsRequired() * + (status.getFinishTime() - status.getStartTime())); + if (!tip.isMapTask()) { + jobCounters.incrCounter(Counter.SLOTS_MILLIS_REDUCES_COPY, + tip.getNumSlotsRequired() * + (status.getShuffleFinishTime() - status.getStartTime())); + jobCounters.incrCounter(Counter.SLOTS_MILLIS_REDUCES_SORT, + tip.getNumSlotsRequired() * + (status.getSortFinishTime() - status.getShuffleFinishTime())); + jobCounters.incrCounter(Counter.SLOTS_MILLIS_REDUCES_REDUCE, + tip.getNumSlotsRequired() * + (status.getFinishTime() - status.getSortFinishTime())); + } + } + + /** + * A taskid assigned to this JobInProgress has reported in successfully. + */ + public synchronized boolean completedTask(TaskInProgress tip, + TaskStatus status) + { + TaskAttemptID taskid = status.getTaskID(); + int oldNumAttempts = tip.getActiveTasks().size(); + final JobTrackerInstrumentation metrics = jobtracker.getInstrumentation(); + + // Metering + meterTaskAttempt(tip, status); + + // Sanity check: is the TIP already complete? + // It _is_ safe to not decrement running{Map|Reduce}Tasks and + // finished{Map|Reduce}Tasks variables here because one and only + // one task-attempt of a TIP gets to completedTask. This is because + // the TaskCommitThread in the JobTracker marks other, completed, + // speculative tasks as _complete_. + if (tip.isComplete()) { + // Mark this task as KILLED + tip.alreadyCompletedTask(taskid); + + // Let the JobTracker cleanup this taskid if the job isn't running + if (this.status.getRunState() != JobStatus.RUNNING) { + jobtracker.markCompletedTaskAttempt(status.getTaskTracker(), taskid); + } + return false; + } + + + LOG.info("Task '" + taskid + "' has completed " + tip.getTIPId() + + " successfully."); + + // Mark the TIP as complete + tip.completed(taskid); + resourceEstimator.updateWithCompletedTask(status, tip); + + // Update jobhistory + TaskTrackerStatus ttStatus = + this.jobtracker.getTaskTrackerStatus(status.getTaskTracker()); + String trackerHostname = jobtracker.getNode(ttStatus.getHost()).toString(); + String taskType = getTaskType(tip); + if (status.getIsMap()){ + JobHistory.MapAttempt.logStarted(status.getTaskID(), status.getStartTime(), + status.getTaskTracker(), + ttStatus.getHttpPort(), + taskType); + JobHistory.MapAttempt.logFinished(status.getTaskID(), status.getFinishTime(), + trackerHostname, taskType, + status.getStateString(), + status.getCounters()); + }else{ + JobHistory.ReduceAttempt.logStarted( status.getTaskID(), status.getStartTime(), + status.getTaskTracker(), + ttStatus.getHttpPort(), + taskType); + JobHistory.ReduceAttempt.logFinished(status.getTaskID(), status.getShuffleFinishTime(), + status.getSortFinishTime(), status.getFinishTime(), + trackerHostname, + taskType, + status.getStateString(), + status.getCounters()); + } + JobHistory.Task.logFinished(tip.getTIPId(), + taskType, + tip.getExecFinishTime(), + status.getCounters()); + + int newNumAttempts = tip.getActiveTasks().size(); + if (tip.isJobSetupTask()) { + // setup task has finished. kill the extra setup tip + killSetupTip(!tip.isMapTask()); + setupComplete(); + } else if (tip.isJobCleanupTask()) { + // cleanup task has finished. Kill the extra cleanup tip + if (tip.isMapTask()) { + // kill the reduce tip + cleanup[1].kill(); + } else { + cleanup[0].kill(); + } + + // + // The Job is done + // if the job is failed, then mark the job failed. + if (jobFailed) { + terminateJob(JobStatus.FAILED); + } + // if the job is killed, then mark the job killed. + if (jobKilled) { + terminateJob(JobStatus.KILLED); + } + else { + jobComplete(); + } + // The job has been killed/failed/successful + // JobTracker should cleanup this task + jobtracker.markCompletedTaskAttempt(status.getTaskTracker(), taskid); + } else if (tip.isMapTask()) { + runningMapTasks -= 1; + // check if this was a sepculative task + if (oldNumAttempts > 1) { + speculativeMapTasks -= (oldNumAttempts - newNumAttempts); + if (!garbageCollected) { + totalSpeculativeMapTasks.addAndGet(newNumAttempts - oldNumAttempts); + } + } + if (tip.isSpeculativeAttempt(taskid)) { + metrics.speculativeSucceededMap(taskid); + } + int level = getLocalityLevel(tip, ttStatus); + long inputBytes = tip.getCounters() + .getGroup("org.apache.hadoop.mapred.Task$Counter") + .getCounter("Map input bytes"); + switch (level) { + case 0: jobCounters.incrCounter(Counter.LOCAL_MAP_INPUT_BYTES, + inputBytes); + metrics.addLocalMapInputBytes(inputBytes); + break; + case 1: jobCounters.incrCounter(Counter.RACK_MAP_INPUT_BYTES, + inputBytes); + metrics.addRackMapInputBytes(inputBytes); + break; + default:metrics.addMapInputBytes(inputBytes); + break; + } + finishedMapTasks += 1; + metrics.completeMap(taskid); + if (!garbageCollected) { + if (!tip.isJobSetupTask() && hasSpeculativeMaps) { + updateTaskTrackerStats(tip,ttStatus,trackerMapStats,mapTaskStats); + } + } + // remove the completed map from the resp running caches + retireMap(tip); + if ((finishedMapTasks + failedMapTIPs) == (numMapTasks)) { + this.status.setMapProgress(1.0f); + } + } else { + runningReduceTasks -= 1; + if (oldNumAttempts > 1) { + speculativeReduceTasks -= (oldNumAttempts - newNumAttempts); + if (!garbageCollected) { + totalSpeculativeReduceTasks.addAndGet(newNumAttempts - oldNumAttempts); + } + } + if (tip.isSpeculativeAttempt(taskid)) { + metrics.speculativeSucceededReduce(taskid); + } + finishedReduceTasks += 1; + metrics.completeReduce(taskid); + if (!garbageCollected) { + if (!tip.isJobSetupTask() && hasSpeculativeReduces) { + updateTaskTrackerStats(tip,ttStatus,trackerReduceStats,reduceTaskStats); + } + } + // remove the completed reduces from the running reducers set + retireReduce(tip); + if ((finishedReduceTasks + failedReduceTIPs) == (numReduceTasks)) { + this.status.setReduceProgress(1.0f); + } + } + + // is job complete? + if (!jobSetupCleanupNeeded && canLaunchJobCleanupTask()) { + jobComplete(); + } + + return true; + } + + /** + * Job state change must happen thru this call + */ + private void changeStateTo(int newState) { + int oldState = this.status.getRunState(); + if (oldState == newState) { + return; //old and new states are same + } + this.status.setRunState(newState); + + //update the metrics + if (oldState == JobStatus.PREP) { + this.jobtracker.getInstrumentation().decPrepJob(conf, jobId); + } else if (oldState == JobStatus.RUNNING) { + this.jobtracker.getInstrumentation().decRunningJob(conf, jobId); + } + + if (newState == JobStatus.PREP) { + this.jobtracker.getInstrumentation().addPrepJob(conf, jobId); + } else if (newState == JobStatus.RUNNING) { + this.jobtracker.getInstrumentation().addRunningJob(conf, jobId); + } + + } + + private void incHmonCounters(Counters counters) { + // Get hmon information and put them in counters + long cpuTime, memTime, memPeak, cpuGCycles; + ResourceReporter reporter = jobtracker.getResourceReporter(); + if (reporter != null) { + JobID jobid = status.getJobID(); + cpuTime = (long)reporter.getJobCpuCumulatedUsageTime(jobid); + memTime = (long)reporter.getJobMemCumulatedUsageTime(jobid); + memPeak = (long)reporter.getJobMemMaxPercentageOnBoxAllTime(jobid); + cpuGCycles = (long)reporter.getJobCpuCumulatedGigaCycles(jobid); + counters.incrCounter("hmon", "cpuTime", cpuTime); + counters.incrCounter("hmon", "cpuGCycles", cpuGCycles); + counters.incrCounter("hmon", "memTime", memTime); + counters.incrCounter("hmon", "memPeak", memPeak); + } + } + + private void setExtendedMetricsCounters(Counters counters) { + counters.incrCounter("extMet", "submit_time", + getLaunchTime() - getStartTime()); + for (int i = 0; i < setup.length; i++) { + if (setup[i].isComplete()) { + counters.incrCounter("extMet", "setup_time", + setup[i].getExecFinishTime() - setup[i].getStartTime()); + break; + } + } + for (int i = cleanup.length - 1; i >= 0; i--) { + if (cleanup[i].isComplete()) { + counters.incrCounter("extMet", "cleanup_time", + cleanup[i].getExecFinishTime() - cleanup[i].getStartTime()); + break; + } + } + long totalMapWaitTime = 0; + long maxMapWaitTime = 0; + long totalMaps = 0; + for (int i = 0; i < maps.length; i++) { + if (maps[i].isComplete()) { + long waitTime = maps[i].getExecStartTime() - getLaunchTime(); + if (waitTime > maxMapWaitTime) + maxMapWaitTime = waitTime; + totalMapWaitTime += waitTime; + ++totalMaps; + } + } + counters.incrCounter("extMet", "avg_map_wait_time", + totalMaps > 0 ? (totalMapWaitTime / totalMaps) : 0); + counters.incrCounter("extMet", "max_map_wait_time", + maxMapWaitTime); + } + + /** + * The job is done since all it's component tasks are either + * successful or have failed. + */ + private void jobComplete() { + final JobTrackerInstrumentation metrics = jobtracker.getInstrumentation(); + // + // All tasks are complete, then the job is done! + // + if (this.status.getRunState() == JobStatus.RUNNING || + this.status.getRunState() == JobStatus.PREP) { + changeStateTo(JobStatus.SUCCEEDED); + this.status.setCleanupProgress(1.0f); + if (maps.length == 0) { + this.status.setMapProgress(1.0f); + } + if (reduces.length == 0) { + this.status.setReduceProgress(1.0f); + } + this.finishTime = JobTracker.getClock().getTime(); + LOG.info("Job " + this.status.getJobID() + + " has completed successfully."); + + // Log the job summary (this should be done prior to logging to + // job-history to ensure job-counters are in-sync + JobSummary.logJobSummary(this, jobtracker.getClusterStatus(false)); + + Counters counters = getCounters(); + incHmonCounters(counters); + setExtendedMetricsCounters(counters); + // Log job-history + JobHistory.JobInfo.logFinished(this.status.getJobID(), finishTime, + this.finishedMapTasks, + this.finishedReduceTasks, failedMapTasks, + failedReduceTasks, getMapCounters(), + getReduceCounters(), counters); + // Note that finalize will close the job history handles which garbage collect + // might try to finalize + garbageCollect(); + + metrics.completeJob(this.conf, this.status.getJobID()); + } + } + + private synchronized void terminateJob(int jobTerminationState) { + if ((status.getRunState() == JobStatus.RUNNING) || + (status.getRunState() == JobStatus.PREP)) { + this.finishTime = JobTracker.getClock().getTime(); + this.status.setMapProgress(1.0f); + this.status.setReduceProgress(1.0f); + this.status.setCleanupProgress(1.0f); + + Counters counters = getCounters(); + incHmonCounters(counters); + setExtendedMetricsCounters(counters); + if (jobTerminationState == JobStatus.FAILED) { + changeStateTo(JobStatus.FAILED); + + // Log the job summary + JobSummary.logJobSummary(this, jobtracker.getClusterStatus(false)); + + // Log to job-history + JobHistory.JobInfo.logFailed(this.status.getJobID(), finishTime, + this.finishedMapTasks, + this.finishedReduceTasks, counters); + } else { + changeStateTo(JobStatus.KILLED); + + // Log the job summary + JobSummary.logJobSummary(this, jobtracker.getClusterStatus(false)); + + // Log to job-history + JobHistory.JobInfo.logKilled(this.status.getJobID(), finishTime, + this.finishedMapTasks, + this.finishedReduceTasks, counters); + } + garbageCollect(); + + jobtracker.getInstrumentation().terminateJob( + this.conf, this.status.getJobID()); + if (jobTerminationState == JobStatus.FAILED) { + jobtracker.getInstrumentation().failedJob( + this.conf, this.status.getJobID()); + } else { + jobtracker.getInstrumentation().killedJob( + this.conf, this.status.getJobID()); + } + } + } + + /** + * Terminate the job and all its component tasks. + * Calling this will lead to marking the job as failed/killed. Cleanup + * tip will be launched. If the job has not inited, it will directly call + * terminateJob as there is no need to launch cleanup tip. + * This method is reentrant. + * @param jobTerminationState job termination state + */ + private synchronized void terminate(int jobTerminationState) { + if(!tasksInited.get()) { + //init could not be done, we just terminate directly. + terminateJob(jobTerminationState); + return; + } + + if ((status.getRunState() == JobStatus.RUNNING) || + (status.getRunState() == JobStatus.PREP)) { + LOG.info("Killing job '" + this.status.getJobID() + "'"); + if (jobTerminationState == JobStatus.FAILED) { + if(jobFailed) {//reentrant + return; + } + jobFailed = true; + } else if (jobTerminationState == JobStatus.KILLED) { + if(jobKilled) {//reentrant + return; + } + jobKilled = true; + } + // clear all unclean tasks + clearUncleanTasks(); + // + // kill all TIPs. + // + for (int i = 0; i < setup.length; i++) { + setup[i].kill(); + } + for (int i = 0; i < maps.length; i++) { + maps[i].kill(); + } + for (int i = 0; i < reduces.length; i++) { + reduces[i].kill(); + } + + if (!jobSetupCleanupNeeded) { + terminateJob(jobTerminationState); + } + } + } + + private void cancelReservedSlots() { + // Make a copy of the set of TaskTrackers to prevent a + // ConcurrentModificationException ... + Set tm = + new HashSet(trackersReservedForMaps.keySet()); + for (TaskTracker tt : tm) { + tt.unreserveSlots(TaskType.MAP, this); + } + + Set tr = + new HashSet(trackersReservedForReduces.keySet()); + for (TaskTracker tt : tr) { + tt.unreserveSlots(TaskType.REDUCE, this); + } + } + private void clearUncleanTasks() { + TaskAttemptID taskid = null; + TaskInProgress tip = null; + while (!mapCleanupTasks.isEmpty()) { + taskid = mapCleanupTasks.remove(0); + tip = maps[taskid.getTaskID().getId()]; + updateTaskStatus(tip, tip.getTaskStatus(taskid)); + } + while (!reduceCleanupTasks.isEmpty()) { + taskid = reduceCleanupTasks.remove(0); + tip = reduces[taskid.getTaskID().getId()]; + updateTaskStatus(tip, tip.getTaskStatus(taskid)); + } + } + + /** + * Kill the job and all its component tasks. This method should be called from + * jobtracker and should return fast as it locks the jobtracker. + */ + public void kill() { + boolean killNow = false; + synchronized(jobInitKillStatus) { + jobInitKillStatus.killed = true; + //if not in middle of init, terminate it now + if(!jobInitKillStatus.initStarted || jobInitKillStatus.initDone) { + //avoiding nested locking by setting flag + killNow = true; + } + } + if(killNow) { + terminate(JobStatus.KILLED); + } + } + + /** + * Fails the job and all its component tasks. This should be called only from + * {@link JobInProgress} or {@link JobTracker}. Look at + * {@link JobTracker#failJob(JobInProgress)} for more details. + */ + synchronized void fail() { + terminate(JobStatus.FAILED); + } + + /** + * A task assigned to this JobInProgress has reported in as failed. + * Most of the time, we'll just reschedule execution. However, after + * many repeated failures we may instead decide to allow the entire + * job to fail or succeed if the user doesn't care about a few tasks failing. + * + * Even if a task has reported as completed in the past, it might later + * be reported as failed. That's because the TaskTracker that hosts a map + * task might die before the entire job can complete. If that happens, + * we need to schedule reexecution so that downstream reduce tasks can + * obtain the map task's output. + */ + private void failedTask(TaskInProgress tip, TaskAttemptID taskid, + TaskStatus status, + TaskTracker taskTracker, boolean wasRunning, + boolean wasComplete, boolean wasAttemptRunning) { + final JobTrackerInstrumentation metrics = jobtracker.getInstrumentation(); + // check if the TIP is already failed + boolean wasFailed = tip.isFailed(); + + // Mark the taskid as FAILED or KILLED + tip.incompleteSubTask(taskid, this.status); + + boolean isRunning = tip.isRunning(); + boolean isComplete = tip.isComplete(); + + if (wasAttemptRunning) { + // We are decrementing counters without looking for isRunning , + // because we increment the counters when we obtain + // new map task attempt or reduce task attempt.We do not really check + // for tip being running. + // Whenever we obtain new task attempt following counters are incremented. + // ++runningMapTasks; + //......... + // metrics.launchMap(id); + // hence we are decrementing the same set. + if (!tip.isJobCleanupTask() && !tip.isJobSetupTask()) { + if (tip.isMapTask()) { + runningMapTasks -= 1; + metrics.failedMap(taskid); + } else { + runningReduceTasks -= 1; + metrics.failedReduce(taskid); + } + } + + // Metering + meterTaskAttempt(tip, status); + } + + //update running count on task failure. + if (wasRunning && !isRunning) { + if (tip.isJobCleanupTask()) { + launchedCleanup = false; + } else if (tip.isJobSetupTask()) { + launchedSetup = false; + } else if (tip.isMapTask()) { + // remove from the running queue and put it in the non-running cache + // if the tip is not complete i.e if the tip still needs to be run + if (!isComplete) { + retireMap(tip); + failMap(tip); + } + } else { + // remove from the running queue and put in the failed queue if the tip + // is not complete + if (!isComplete) { + retireReduce(tip); + failReduce(tip); + } + } + } + + // The case when the map was complete but the task tracker went down. + // However, we don't need to do any metering here... + if (wasComplete && !isComplete) { + if (tip.isMapTask()) { + // Put the task back in the cache. This will help locality for cases + // where we have a different TaskTracker from the same rack/switch + // asking for a task. + // We bother about only those TIPs that were successful + // earlier (wasComplete and !isComplete) + // (since they might have been removed from the cache of other + // racks/switches, if the input split blocks were present there too) + failMap(tip); + finishedMapTasks -= 1; + } + } + + // update job history + // get taskStatus from tip + TaskStatus taskStatus = tip.getTaskStatus(taskid); + String taskTrackerName = taskStatus.getTaskTracker(); + String taskTrackerHostName = convertTrackerNameToHostName(taskTrackerName); + int taskTrackerPort = -1; + TaskTrackerStatus taskTrackerStatus = + (taskTracker == null) ? null : taskTracker.getStatus(); + if (taskTrackerStatus != null) { + taskTrackerPort = taskTrackerStatus.getHttpPort(); + } + long startTime = taskStatus.getStartTime(); + long finishTime = taskStatus.getFinishTime(); + List taskDiagnosticInfo = tip.getDiagnosticInfo(taskid); + String diagInfo = taskDiagnosticInfo == null ? "" : + StringUtils.arrayToString(taskDiagnosticInfo.toArray(new String[0])); + String taskType = getTaskType(tip); + if (taskStatus.getIsMap()) { + JobHistory.MapAttempt.logStarted(taskid, startTime, + taskTrackerName, taskTrackerPort, taskType); + if (taskStatus.getRunState() == TaskStatus.State.FAILED) { + JobHistory.MapAttempt.logFailed(taskid, finishTime, + taskTrackerHostName, diagInfo, taskType); + } else { + JobHistory.MapAttempt.logKilled(taskid, finishTime, + taskTrackerHostName, diagInfo, taskType); + } + } else { + JobHistory.ReduceAttempt.logStarted(taskid, startTime, + taskTrackerName, taskTrackerPort, taskType); + if (taskStatus.getRunState() == TaskStatus.State.FAILED) { + JobHistory.ReduceAttempt.logFailed(taskid, finishTime, + taskTrackerHostName, diagInfo, taskType); + } else { + JobHistory.ReduceAttempt.logKilled(taskid, finishTime, + taskTrackerHostName, diagInfo, taskType); + } + } + + // After this, try to assign tasks with the one after this, so that + // the failed task goes to the end of the list. + if (!tip.isJobCleanupTask() && !tip.isJobSetupTask()) { + if (tip.isMapTask()) { + failedMapTasks++; + } else { + failedReduceTasks++; + } + } + + // + // Note down that a task has failed on this tasktracker + // + if (status.getRunState() == TaskStatus.State.FAILED) { + addTrackerTaskFailure(taskTrackerName, taskTracker); + } + + // + // Let the JobTracker know that this task has failed + // + jobtracker.markCompletedTaskAttempt(status.getTaskTracker(), taskid); + + // + // Check if we need to kill the job because of too many failures or + // if the job is complete since all component tasks have completed + + // We do it once per TIP and that too for the task that fails the TIP + if (!wasFailed && tip.isFailed()) { + // + // Allow upto 'mapFailuresPercent' of map tasks to fail or + // 'reduceFailuresPercent' of reduce tasks to fail + // + boolean killJob = tip.isJobCleanupTask() || tip.isJobSetupTask() ? true : + tip.isMapTask() ? + ((++failedMapTIPs*100) > (mapFailuresPercent*numMapTasks)) : + ((++failedReduceTIPs*100) > (reduceFailuresPercent*numReduceTasks)); + + if (killJob) { + LOG.info("Aborting job " + profile.getJobID()); + JobHistory.Task.logFailed(tip.getTIPId(), + taskType, + finishTime, + diagInfo); + if (tip.isJobCleanupTask()) { + // kill the other tip + if (tip.isMapTask()) { + cleanup[1].kill(); + } else { + cleanup[0].kill(); + } + terminateJob(JobStatus.FAILED); + } else { + if (tip.isJobSetupTask()) { + // kill the other tip + killSetupTip(!tip.isMapTask()); + } + fail(); + } + } + + // + // Update the counters + // + if (!tip.isJobCleanupTask() && !tip.isJobSetupTask()) { + if (tip.isMapTask()) { + jobCounters.incrCounter(Counter.NUM_FAILED_MAPS, 1); + } else { + jobCounters.incrCounter(Counter.NUM_FAILED_REDUCES, 1); + } + } + } + } + + void killSetupTip(boolean isMap) { + if (isMap) { + setup[0].kill(); + } else { + setup[1].kill(); + } + } + + boolean isSetupFinished() { + // if there is no setup to be launched, consider setup is finished. + if ((tasksInited.get() && setup.length == 0) || + setup[0].isComplete() || setup[0].isFailed() || setup[1].isComplete() + || setup[1].isFailed()) { + return true; + } + return false; + } + + /** + * Fail a task with a given reason, but without a status object. + * + * Assuming {@link JobTracker} is locked on entry. + * + * @param tip The task's tip + * @param taskid The task id + * @param reason The reason that the task failed + * @param trackerName The task tracker the task failed on + */ + public void failedTask(TaskInProgress tip, TaskAttemptID taskid, String reason, + TaskStatus.Phase phase, TaskStatus.State state, + String trackerName) { + TaskStatus status = TaskStatus.createTaskStatus(tip.isMapTask(), + taskid, + 0.0f, + tip.isMapTask() ? + numSlotsPerMap : + numSlotsPerReduce, + state, + reason, + reason, + trackerName, phase, + new Counters()); + // update the actual start-time of the attempt + TaskStatus oldStatus = tip.getTaskStatus(taskid); + long startTime = oldStatus == null + ? JobTracker.getClock().getTime() + : oldStatus.getStartTime(); + status.setStartTime(startTime); + status.setFinishTime(JobTracker.getClock().getTime()); + boolean wasComplete = tip.isComplete(); + updateTaskStatus(tip, status); + boolean isComplete = tip.isComplete(); + if (wasComplete && !isComplete) { // mark a successful tip as failed + String taskType = getTaskType(tip); + JobHistory.Task.logFailed(tip.getTIPId(), taskType, + tip.getExecFinishTime(), reason, taskid); + } + } + + + /** + * The job is dead. We're now GC'ing it, getting rid of the job + * from all tables. Be sure to remove all of this job's tasks + * from the various tables. + */ + synchronized void garbageCollect() { + // Cancel task tracker reservation + cancelReservedSlots(); + + // Remove the remaining speculative tasks counts + totalSpeculativeReduceTasks.addAndGet(-speculativeReduceTasks); + totalSpeculativeMapTasks.addAndGet(-speculativeMapTasks); + garbageCollected = true; + + // Let the JobTracker know that a job is complete + jobtracker.getInstrumentation().decWaitingMaps(getJobID(), pendingMaps()); + jobtracker.getInstrumentation().decWaitingReduces(getJobID(), pendingReduces()); + jobtracker.storeCompletedJob(this); + jobtracker.finalizeJob(this); + + try { + // Definitely remove the local-disk copy of the job file + if (localJobFile != null) { + localFs.delete(localJobFile, true); + localJobFile = null; + } + + // clean up splits + for (int i = 0; i < maps.length; i++) { + maps[i].clearSplit(); + } + + // JobClient always creates a new directory with job files + // so we remove that directory to cleanup + // Delete temp dfs dirs created if any, like in case of + // speculative exn of reduces. + Path tempDir = jobtracker.getSystemDirectoryForJob(getJobID()); + new CleanupQueue().addToQueue(new PathDeletionContext( + FileSystem.get(conf), tempDir.toUri().getPath())); + } catch (IOException e) { + LOG.warn("Error cleaning up "+profile.getJobID()+": "+e); + } + + cleanUpMetrics(); + // free up the memory used by the data structures + this.nonRunningMapCache = null; + this.runningMapCache = null; + this.nonRunningReduces = null; + this.runningReduces = null; + this.trackerMapStats = null; + this.trackerReduceStats = null; + } + + /** + * Return the TaskInProgress that matches the tipid. + */ + public synchronized TaskInProgress getTaskInProgress(TaskID tipid) { + if (tipid.isMap()) { + if (cleanup.length > 0 && tipid.equals(cleanup[0].getTIPId())) { // cleanup map tip + return cleanup[0]; + } + if (setup.length > 0 && tipid.equals(setup[0].getTIPId())) { //setup map tip + return setup[0]; + } + for (int i = 0; i < maps.length; i++) { + if (tipid.equals(maps[i].getTIPId())){ + return maps[i]; + } + } + } else { + if (cleanup.length > 0 && tipid.equals(cleanup[1].getTIPId())) { // cleanup reduce tip + return cleanup[1]; + } + if (setup.length > 0 && tipid.equals(setup[1].getTIPId())) { //setup reduce tip + return setup[1]; + } + for (int i = 0; i < reduces.length; i++) { + if (tipid.equals(reduces[i].getTIPId())){ + return reduces[i]; + } + } + } + return null; + } + + /** + * Find the details of someplace where a map has finished + * @param mapId the id of the map + * @return the task status of the completed task + */ + public synchronized TaskStatus findFinishedMap(int mapId) { + TaskInProgress tip = maps[mapId]; + if (tip.isComplete()) { + TaskStatus[] statuses = tip.getTaskStatuses(); + for(int i=0; i < statuses.length; i++) { + if (statuses[i].getRunState() == TaskStatus.State.SUCCEEDED) { + return statuses[i]; + } + } + } + return null; + } + + synchronized int getNumTaskCompletionEvents() { + return taskCompletionEvents.size(); + } + + synchronized public TaskCompletionEvent[] getTaskCompletionEvents( + int fromEventId, int maxEvents) { + TaskCompletionEvent[] events = TaskCompletionEvent.EMPTY_ARRAY; + if (taskCompletionEvents.size() > fromEventId) { + int actualMax = Math.min(maxEvents, + (taskCompletionEvents.size() - fromEventId)); + events = taskCompletionEvents.subList(fromEventId, actualMax + fromEventId).toArray(events); + } + return events; + } + + synchronized public int getTaskCompletionEventsSize() { + return taskCompletionEvents.size(); + } + + synchronized void fetchFailureNotification(TaskInProgress tip, + TaskAttemptID mapTaskId, + String trackerName) { + Integer fetchFailures = mapTaskIdToFetchFailuresMap.get(mapTaskId); + fetchFailures = (fetchFailures == null) ? 1 : (fetchFailures+1); + mapTaskIdToFetchFailuresMap.put(mapTaskId, fetchFailures); + LOG.info("Failed fetch notification #" + fetchFailures + " for task " + + mapTaskId); + + float failureRate = (float)fetchFailures / runningReduceTasks; + // declare faulty if fetch-failures >= max-allowed-failures + boolean isMapFaulty = (failureRate >= MAX_ALLOWED_FETCH_FAILURES_PERCENT) + ? true + : false; + if (fetchFailures >= MAX_FETCH_FAILURES_NOTIFICATIONS + && isMapFaulty) { + LOG.info("Too many fetch-failures for output of task: " + mapTaskId + + " ... killing it"); + + failedTask(tip, mapTaskId, "Too many fetch-failures", + (tip.isMapTask() ? TaskStatus.Phase.MAP : + TaskStatus.Phase.REDUCE), + TaskStatus.State.FAILED, trackerName); + + mapTaskIdToFetchFailuresMap.remove(mapTaskId); + } + } + + /** + * @return The JobID of this JobInProgress. + */ + public JobID getJobID() { + return jobId; + } + + public synchronized Object getSchedulingInfo() { + return this.schedulingInfo; + } + + public synchronized void setSchedulingInfo(Object schedulingInfo) { + this.schedulingInfo = schedulingInfo; + this.status.setSchedulingInfo(schedulingInfo.toString()); + } + + /** + * To keep track of kill and initTasks status of this job. initTasks() take + * a lock on JobInProgress object. kill should avoid waiting on + * JobInProgress lock since it may take a while to do initTasks(). + */ + private static class JobInitKillStatus { + //flag to be set if kill is called + boolean killed; + + boolean initStarted; + boolean initDone; + } + + boolean isComplete() { + return status.isJobComplete(); + } + + /** + * Get the task type for logging it to {@link JobHistory}. + */ + private String getTaskType(TaskInProgress tip) { + if (tip.isJobCleanupTask()) { + return Values.CLEANUP.name(); + } else if (tip.isJobSetupTask()) { + return Values.SETUP.name(); + } else if (tip.isMapTask()) { + return Values.MAP.name(); + } else { + return Values.REDUCE.name(); + } + } + + /** + * Get the level of locality that a given task would have if launched on + * a particular TaskTracker. Returns 0 if the task has data on that machine, + * 1 if it has data on the same rack, etc (depending on number of levels in + * the network hierarchy). + */ + int getLocalityLevel(TaskInProgress tip, TaskTrackerStatus tts) { + Node tracker = jobtracker.getNode(tts.getHost()); + int level = this.maxLevel; + // find the right level across split locations + for (String local : maps[tip.getIdWithinJob()].getSplitLocations()) { + Node datanode = jobtracker.getNode(local); + int newLevel = this.maxLevel; + if (tracker != null && datanode != null) { + newLevel = getMatchingLevelForNodes(tracker, datanode); + } + if (newLevel < level) { + level = newLevel; + // an optimization + if (level == 0) { + break; + } + } + } + return level; + } + + /** + * Test method to set the cluster sizes + */ + void setClusterSize(int clusterSize) { + this.clusterSize = clusterSize; + } + + static class JobSummary { + static final Log LOG = LogFactory.getLog(JobSummary.class); + + // Escape sequences + static final char EQUALS = '='; + static final char[] charsToEscape = + {StringUtils.COMMA, EQUALS, StringUtils.ESCAPE_CHAR}; + + /** + * Log a summary of the job's runtime. + * + * @param job {@link JobInProgress} whose summary is to be logged, cannot + * be null. + * @param cluster {@link ClusterStatus} of the cluster on which the job was + * run, cannot be null + */ + public static void logJobSummary(JobInProgress job, ClusterStatus cluster) { + JobStatus status = job.getStatus(); + JobProfile profile = job.getProfile(); + String user = StringUtils.escapeString(profile.getUser(), + StringUtils.ESCAPE_CHAR, + charsToEscape); + String queue = StringUtils.escapeString(profile.getQueueName(), + StringUtils.ESCAPE_CHAR, + charsToEscape); + Counters jobCounters = job.getJobCounters(); + long mapSlotSeconds = + (jobCounters.getCounter(Counter.SLOTS_MILLIS_MAPS) + + jobCounters.getCounter(Counter.FALLOW_SLOTS_MILLIS_MAPS)) / 1000; + long reduceSlotSeconds = + (jobCounters.getCounter(Counter.SLOTS_MILLIS_REDUCES) + + jobCounters.getCounter(Counter.FALLOW_SLOTS_MILLIS_REDUCES)) / 1000; + + LOG.info("jobId=" + job.getJobID() + StringUtils.COMMA + + "submitTime" + EQUALS + job.getStartTime() + StringUtils.COMMA + + "launchTime" + EQUALS + job.getLaunchTime() + StringUtils.COMMA + + "finishTime" + EQUALS + job.getFinishTime() + StringUtils.COMMA + + "numMaps" + EQUALS + job.getTasks(TaskType.MAP).length + + StringUtils.COMMA + + "numSlotsPerMap" + EQUALS + job.getNumSlotsPerMap() + + StringUtils.COMMA + + "numReduces" + EQUALS + job.getTasks(TaskType.REDUCE).length + + StringUtils.COMMA + + "numSlotsPerReduce" + EQUALS + job.getNumSlotsPerReduce() + + StringUtils.COMMA + + "user" + EQUALS + user + StringUtils.COMMA + + "queue" + EQUALS + queue + StringUtils.COMMA + + "status" + EQUALS + + JobStatus.getJobRunState(status.getRunState()) + + StringUtils.COMMA + + "mapSlotSeconds" + EQUALS + mapSlotSeconds + StringUtils.COMMA + + "reduceSlotsSeconds" + EQUALS + reduceSlotSeconds + + StringUtils.COMMA + + "clusterMapCapacity" + EQUALS + cluster.getMaxMapTasks() + + StringUtils.COMMA + + "clusterReduceCapacity" + EQUALS + cluster.getMaxReduceTasks() + ); + } + } + + /** + * Check to see if the maximum number of speculative tasks are + * already being executed currently. + * @param tasks the set of tasks to test + * @param type the type of task (MAP/REDUCE) that we are considering + * @return has the cap been reached? + */ + private boolean atSpeculativeCap(TaskType type) { + float numTasks = (type == TaskType.MAP) ? + (float)(runningMapTasks - speculativeMapTasks) : + (float)(runningReduceTasks - speculativeReduceTasks); + + if (numTasks == 0){ + return true; // avoid divide by zero + } + int speculativeTaskCount = type == TaskType.MAP ? speculativeMapTasks + : speculativeReduceTasks; + int totalSpeculativeTaskCount = type == TaskType.MAP ? + totalSpeculativeMapTasks.get() : totalSpeculativeReduceTasks.get(); + //return true if totalSpecTask < max(10, 0.01 * total-slots, + // 0.1 * total-running-tasks) + + if (speculativeTaskCount < MIN_SPEC_CAP) { + return false; // at least one slow tracker's worth of slots(default=10) + } + ClusterStatus c = jobtracker.getClusterStatus(false); + int numSlots = (type == TaskType.MAP ? c.getMaxMapTasks() : c.getMaxReduceTasks()); + if ((float)speculativeTaskCount < numSlots * MIN_SLOTS_CAP) { + return false; + } + // Check if the total CAP has been reached + if (totalSpeculativeTaskCount >= numSlots * TOTAL_SPECULATIVECAP) { + return true; + } + boolean atCap = (((float)(speculativeTaskCount)/numTasks) >= speculativeCap); + if (LOG.isDebugEnabled()) { + LOG.debug("SpeculativeCap is "+speculativeCap+", specTasks/numTasks is " + + ((float)(speculativeTaskCount)/numTasks)+ + ", so atSpecCap() is returning "+atCap); + } + return atCap; + } + + /** + * A class for comparing the estimated time to completion of two tasks + */ + private static class EstimatedTimeLeftComparator + implements Comparator { + private long time; + public EstimatedTimeLeftComparator(long now) { + this.time = now; + } + /** + * Estimated time to completion is measured as: + * % of task left to complete (1 - progress) / progress rate of the task. + * + * This assumes that tasks are linear in their progress, which is + * often wrong, especially since progress for reducers is currently + * calculated by evenly weighting their three stages (shuffle, sort, map) + * which rarely account for 1/3 each. This should be fixed in the future + * by calculating progressRate more intelligently or splitting these + * multi-phase tasks into individual tasks. + * + * The ordering this comparator defines is: task1 < task2 if task1 is + * estimated to finish farther in the future => compare(t1,t2) returns -1 + */ + public int compare(TaskInProgress tip1, TaskInProgress tip2) { + //we have to use the Math.max in the denominator to avoid divide by zero + //error because prog and progRate can both be zero (if one is zero, + //the other one will be 0 too). + //We use inverse of time_reminaing=[(1- prog) / progRate] + //so that (1-prog) is in denom. because tasks can have arbitrarily + //low progRates in practice (e.g. a task that is half done after 1000 + //seconds will have progRate of 0.0000005) so we would rather + //use Math.maxnon (1-prog) by putting it in the denominator + //which will cause tasks with prog=1 look 99.99% done instead of 100% + //which is okay + double t1 = tip1.getCurrentProgressRate(time) / Math.max(0.0001, + 1.0 - tip1.getProgress()); + double t2 = tip2.getCurrentProgressRate(time) / Math.max(0.0001, + 1.0 - tip2.getProgress()); + if (t1 < t2) return -1; + else if (t2 < t1) return 1; + else return 0; + } + } + /** + * Compares the ave progressRate of tasks that have finished on this + * taskTracker to the ave of all succesfull tasks thus far to see if this + * TT one is too slow for speculating. + * slowNodeThreshold is used to determine the number of standard deviations + * @param taskTracker the name of the TaskTracker we are checking + * @return is this TaskTracker slow + */ + protected boolean isSlowTracker(String taskTracker) { + if (trackerMapStats.get(taskTracker) != null && + trackerMapStats.get(taskTracker).mean() - + mapTaskStats.mean() > mapTaskStats.std()*slowNodeThreshold) { + if (LOG.isDebugEnabled()) { + LOG.debug("Tracker " + taskTracker + + " declared slow. trackerMapStats.get(taskTracker).mean() :" + trackerMapStats.get(taskTracker).mean() + + " mapTaskStats :" + mapTaskStats); + } + return true; + } + if (trackerReduceStats.get(taskTracker) != null && + trackerReduceStats.get(taskTracker).mean() - + reduceTaskStats.mean() > reduceTaskStats.std()*slowNodeThreshold) { + if (LOG.isDebugEnabled()) { + LOG.debug("Tracker " + taskTracker + + " declared slow. trackerReduceStats.get(taskTracker).mean() :" + trackerReduceStats.get(taskTracker).mean() + + " reduceTaskStats :" + reduceTaskStats); + } + return true; + } + return false; + } + static class DataStatistics{ + private int count = 0; + private double sum = 0; + private double sumSquares = 0; + + public DataStatistics() { + } + + public DataStatistics(double initNum) { + this.count = 1; + this.sum = initNum; + this.sumSquares = initNum * initNum; + } + + synchronized public void add(double newNum) { + this.count++; + this.sum += newNum; + this.sumSquares += newNum * newNum; + } + + synchronized public void updateStatistics(double old, double update) { + sub(old); + add(update); + } + + private void sub(double oldNum) { + this.count--; + this.sum = Math.max(this.sum -= oldNum, 0.0d); + this.sumSquares = Math.max(this.sumSquares -= oldNum * oldNum, 0.0d); + } + + public double mean() { + return sum/count; + } + + public double var() { + // E(X^2) - E(X)^2 + return Math.max((sumSquares/count) - mean() * mean(), 0.0d); + } + + public double std() { + return Math.sqrt(this.var()); + } + + public String toString() { + return "DataStatistics: count is " + count + ", sum is " + sum + + ", sumSquares is " + sumSquares + " mean is " + mean() + " std() is " + std(); + } + } + + private void updateTaskTrackerStats(TaskInProgress tip, TaskTrackerStatus ttStatus, + Map trackerStats, DataStatistics overallStats) { + float tipDuration = tip.getExecFinishTime() - + tip.getDispatchTime(tip.getSuccessfulTaskid()); + DataStatistics ttStats = + trackerStats.get(ttStatus.getTrackerName()); + double oldMean = 0.0d; + //We maintain the mean of TaskTrackers' means. That way, we get a single + //data-point for every tracker (used in the evaluation in isSlowTracker) + if (ttStats != null) { + oldMean = ttStats.mean(); + ttStats.add(tipDuration); + overallStats.updateStatistics(oldMean, ttStats.mean()); + } else { + trackerStats.put(ttStatus.getTrackerName(), + (ttStats = new DataStatistics(tipDuration))); + overallStats.add(tipDuration); + } + if (LOG.isDebugEnabled()) { + LOG.debug("Added mean of " +ttStats.mean() + " to trackerStats of type "+ + (tip.isMapTask() ? "Map" : "Reduce") + + " on "+ttStatus.getTrackerName()+". DataStatistics is now: " + + trackerStats.get(ttStatus.getTrackerName())); + } + } + + public DataStatistics getRunningTaskStatistics(boolean isMap) { + if (isMap) { + return runningMapTaskStats; + } else { + return runningReduceTaskStats; + } + } + + public float getSlowTaskThreshold() { + return slowTaskThreshold; + } + + public static int getTotalSpeculativeMapTasks() { + return totalSpeculativeMapTasks.get(); + } + + public static int getTotalSpeculativeReduceTasks() { + return totalSpeculativeReduceTasks.get(); + } +} diff --git a/src/mapred/org/apache/hadoop/mapred/JobInProgressListener.java b/src/mapred/org/apache/hadoop/mapred/JobInProgressListener.java new file mode 100644 index 0000000..499ca62 --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/JobInProgressListener.java @@ -0,0 +1,47 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.mapred; + +import java.io.IOException; + +/** + * A listener for changes in a {@link JobInProgress job}'s lifecycle in the + * {@link JobTracker}. + */ +abstract class JobInProgressListener { + + /** + * Invoked when a new job has been added to the {@link JobTracker}. + * @param job The added job. + * @throws IOException + */ + public abstract void jobAdded(JobInProgress job) throws IOException; + + /** + * Invoked when a job has been removed from the {@link JobTracker}. + * @param job The removed job. + */ + public abstract void jobRemoved(JobInProgress job); + + /** + * Invoked when a job has been updated in the {@link JobTracker}. + * This change in the job is tracker using {@link JobChangeEvent}. + * @param event the event that tracks the change + */ + public abstract void jobUpdated(JobChangeEvent event); +} diff --git a/src/mapred/org/apache/hadoop/mapred/JobInProgress_Counter.properties b/src/mapred/org/apache/hadoop/mapred/JobInProgress_Counter.properties new file mode 100644 index 0000000..aa96eb4 --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/JobInProgress_Counter.properties @@ -0,0 +1,14 @@ +# ResourceBundle properties file for job-level counters + +CounterGroupName= Job Counters + +NUM_FAILED_MAPS.name= Failed map tasks +NUM_FAILED_REDUCES.name= Failed reduce tasks +TOTAL_LAUNCHED_MAPS.name= Launched map tasks +TOTAL_LAUNCHED_REDUCES.name= Launched reduce tasks +OTHER_LOCAL_MAPS.name= Other local map tasks +DATA_LOCAL_MAPS.name= Data-local map tasks +RACK_LOCAL_MAPS.name= Rack-local map tasks +FALLOW_SLOTS_MILLIS_MAPS.name= Total time spent by all maps waiting after reserving slots (ms) +FALLOW_SLOTS_MILLIS_REDUCES.name= Total time spent by all reduces waiting after reserving slots (ms) + diff --git a/src/mapred/org/apache/hadoop/mapred/JobPriority.java b/src/mapred/org/apache/hadoop/mapred/JobPriority.java new file mode 100644 index 0000000..3bcfff0 --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/JobPriority.java @@ -0,0 +1,32 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.mapred; + +/** + * Used to describe the priority of the running job. + * + */ +public enum JobPriority { + + VERY_HIGH, + HIGH, + NORMAL, + LOW, + VERY_LOW; + +} diff --git a/src/mapred/org/apache/hadoop/mapred/JobProfile.java b/src/mapred/org/apache/hadoop/mapred/JobProfile.java new file mode 100644 index 0000000..9ad8a48 --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/JobProfile.java @@ -0,0 +1,183 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.mapred; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; +import java.net.URL; + +import org.apache.hadoop.io.Text; +import org.apache.hadoop.io.Writable; +import org.apache.hadoop.io.WritableFactories; +import org.apache.hadoop.io.WritableFactory; + +/************************************************** + * A JobProfile is a MapReduce primitive. Tracks a job, + * whether living or dead. + * + **************************************************/ +public class JobProfile implements Writable { + + static { // register a ctor + WritableFactories.setFactory + (JobProfile.class, + new WritableFactory() { + public Writable newInstance() { return new JobProfile(); } + }); + } + + String user; + final JobID jobid; + String jobFile; + String url; + String name; + String queueName; + + /** + * Construct an empty {@link JobProfile}. + */ + public JobProfile() { + jobid = new JobID(); + } + + /** + * Construct a {@link JobProfile} the userid, jobid, + * job config-file, job-details url and job name. + * + * @param user userid of the person who submitted the job. + * @param jobid id of the job. + * @param jobFile job configuration file. + * @param url link to the web-ui for details of the job. + * @param name user-specified job name. + */ + public JobProfile(String user, org.apache.hadoop.mapreduce.JobID jobid, + String jobFile, String url, + String name) { + this(user, jobid, jobFile, url, name, JobConf.DEFAULT_QUEUE_NAME); + } + + /** + * Construct a {@link JobProfile} the userid, jobid, + * job config-file, job-details url and job name. + * + * @param user userid of the person who submitted the job. + * @param jobid id of the job. + * @param jobFile job configuration file. + * @param url link to the web-ui for details of the job. + * @param name user-specified job name. + * @param queueName name of the queue to which the job is submitted + */ + public JobProfile(String user, org.apache.hadoop.mapreduce.JobID jobid, + String jobFile, String url, + String name, String queueName) { + this.user = user; + this.jobid = JobID.downgrade(jobid); + this.jobFile = jobFile; + this.url = url; + this.name = name; + this.queueName = queueName; + } + + /** + * @deprecated use JobProfile(String, JobID, String, String, String) instead + */ + @Deprecated + public JobProfile(String user, String jobid, String jobFile, String url, + String name) { + this(user, JobID.forName(jobid), jobFile, url, name); + } + + /** + * Get the user id. + */ + public String getUser() { + return user; + } + + /** + * Get the job id. + */ + public JobID getJobID() { + return jobid; + } + + /** + * @deprecated use getJobID() instead + */ + @Deprecated + public String getJobId() { + return jobid.toString(); + } + + /** + * Get the configuration file for the job. + */ + public String getJobFile() { + return jobFile; + } + + /** + * Get the link to the web-ui for details of the job. + */ + public URL getURL() { + try { + return new URL(url); + } catch (IOException ie) { + return null; + } + } + + /** + * Get the user-specified job name. + */ + public String getJobName() { + return name; + } + + /** + * Get the name of the queue to which the job is submitted. + * @return name of the queue. + */ + public String getQueueName() { + return queueName; + } + + /////////////////////////////////////// + // Writable + /////////////////////////////////////// + public void write(DataOutput out) throws IOException { + jobid.write(out); + Text.writeString(out, jobFile); + Text.writeString(out, url); + Text.writeString(out, user); + Text.writeString(out, name); + Text.writeString(out, queueName); + } + + public void readFields(DataInput in) throws IOException { + jobid.readFields(in); + this.jobFile = Text.readString(in); + this.url = Text.readString(in); + this.user = Text.readString(in); + this.name = Text.readString(in); + this.queueName = Text.readString(in); + } +} + + diff --git a/src/mapred/org/apache/hadoop/mapred/JobQueueClient.java b/src/mapred/org/apache/hadoop/mapred/JobQueueClient.java new file mode 100644 index 0000000..898aa7b --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/JobQueueClient.java @@ -0,0 +1,194 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.mapred; + +import java.io.IOException; + +import org.apache.hadoop.conf.Configured; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.util.Tool; +import org.apache.hadoop.util.ToolRunner; +/** + * JobQueueClient is interface provided to the user in order + * to get JobQueue related information from the {@link JobTracker} + * + * It provides the facility to list the JobQueues present and ability to + * view the list of jobs within a specific JobQueue + * +**/ + +class JobQueueClient extends Configured implements Tool { + + JobClient jc; + + public JobQueueClient() { + } + + public JobQueueClient(JobConf conf) throws IOException { + setConf(conf); + } + + private void init(JobConf conf) throws IOException { + setConf(conf); + jc = new JobClient(conf); + } + + @Override + public int run(String[] argv) throws Exception { + int exitcode = -1; + + if(argv.length < 1){ + displayUsage(""); + return exitcode; + } + String cmd = argv[0]; + boolean displayQueueList = false; + boolean displayQueueInfoWithJobs = false; + boolean displayQueueInfoWithoutJobs = false; + boolean displayQueueAclsInfoForCurrentUser = false; + + if("-list".equals(cmd)){ + displayQueueList = true; + }else if("-showacls".equals(cmd)) { + displayQueueAclsInfoForCurrentUser = true; + }else if("-info".equals(cmd)){ + if(argv.length == 2 && !(argv[1].equals("-showJobs"))) { + displayQueueInfoWithoutJobs = true; + } else if(argv.length == 3){ + if(argv[2].equals("-showJobs")){ + displayQueueInfoWithJobs = true; + }else { + displayUsage(cmd); + return exitcode; + } + }else { + displayUsage(cmd); + return exitcode; + } + } else { + displayUsage(cmd); + return exitcode; + } + JobConf conf = new JobConf(getConf()); + init(conf); + if (displayQueueList) { + displayQueueList(); + exitcode = 0; + } else if (displayQueueInfoWithoutJobs){ + displayQueueInfo(argv[1],false); + exitcode = 0; + } else if (displayQueueInfoWithJobs) { + displayQueueInfo(argv[1],true); + exitcode = 0; + }else if (displayQueueAclsInfoForCurrentUser) { + this.displayQueueAclsInfoForCurrentUser(); + exitcode = 0; + } + + return exitcode; + } + + /** + * Method used to display information pertaining to a Single JobQueue + * registered with the {@link QueueManager}. Display of the Jobs is + * determine by the boolean + * + * @throws IOException + */ + + private void displayQueueInfo(String queue, boolean showJobs) throws IOException { + JobQueueInfo schedInfo = jc.getQueueInfo(queue); + if (schedInfo == null) { + System.out.printf("Queue Name : %s has no scheduling information \n", queue); + } else { + System.out.printf("Queue Name : %s \n", schedInfo.getQueueName()); + System.out.printf("Scheduling Info : %s \n",schedInfo.getSchedulingInfo()); + } + if (showJobs) { + System.out.printf("Job List\n"); + JobStatus[] jobs = jc.getJobsFromQueue(queue); + if (jobs == null) + jobs = new JobStatus[0]; + jc.displayJobList(jobs); + } + } + + /** + * Method used to display the list of the JobQueues registered + * with the {@link QueueManager} + * + * @throws IOException + */ + private void displayQueueList() throws IOException { + JobQueueInfo[] queues = jc.getQueues(); + for (JobQueueInfo queue : queues) { + String schedInfo = queue.getSchedulingInfo(); + if(schedInfo.trim().equals("")){ + schedInfo = "N/A"; + } + System.out.printf("Queue Name : %s \n", queue.getQueueName()); + System.out.printf("Scheduling Info : %s \n",queue.getSchedulingInfo()); + } + } + + private void displayQueueAclsInfoForCurrentUser() throws IOException { + QueueAclsInfo[] queueAclsInfoList = jc.getQueueAclsForCurrentUser(); + UserGroupInformation ugi = UserGroupInformation.readFrom(getConf()); + if (queueAclsInfoList.length > 0) { + System.out.println("Queue acls for user : " + + ugi.getUserName()); + System.out.println("\nQueue Operations"); + System.out.println("====================="); + for (QueueAclsInfo queueInfo : queueAclsInfoList) { + System.out.print(queueInfo.getQueueName() + " "); + String[] ops = queueInfo.getOperations(); + int max = ops.length - 1; + for (int j = 0; j < ops.length; j++) { + System.out.print(ops[j].replaceFirst("acl-", "")); + if (j < max) { + System.out.print(","); + } + } + System.out.println(); + } + } else { + System.out.println("User " + + ugi.getUserName() + + " does not have access to any queue. \n"); + } + } + + private void displayUsage(String cmd) { + String prefix = "Usage: JobQueueClient "; + if ("-queueinfo".equals(cmd)){ + System.err.println(prefix + "[" + cmd + " [-showJobs]]"); + }else { + System.err.printf(prefix + " \n"); + System.err.printf("\t[-list]\n"); + System.err.printf("\t[-info [-showJobs]]\n"); + System.err.printf("\t[-showacls] \n\n"); + ToolRunner.printGenericCommandUsage(System.out); + } + } + + public static void main(String[] argv) throws Exception { + int res = ToolRunner.run(new JobQueueClient(), argv); + System.exit(res); + } + +} diff --git a/src/mapred/org/apache/hadoop/mapred/JobQueueInfo.java b/src/mapred/org/apache/hadoop/mapred/JobQueueInfo.java new file mode 100644 index 0000000..bac4178 --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/JobQueueInfo.java @@ -0,0 +1,118 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.mapred; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; + +import org.apache.hadoop.io.Text; +import org.apache.hadoop.io.Writable; + +/** + * Class that contains the information regarding the Job Queues which are + * maintained by the Hadoop Map/Reduce framework. + * + */ + +public class JobQueueInfo implements Writable { + + private String queueName = ""; + //The scheduling Information object is read back as String. + //Once the scheduling information is set there is no way to recover it. + private String schedulingInfo; + + + /** + * Default constructor for Job Queue Info. + * + */ + public JobQueueInfo() { + + } + /** + * Construct a new JobQueueInfo object using the queue name and the + * scheduling information passed. + * + * @param queueName Name of the job queue + * @param schedulingInfo Scheduling Information associated with the job + * queue + */ + public JobQueueInfo(String queueName, String schedulingInfo) { + this.queueName = queueName; + this.schedulingInfo = schedulingInfo; + } + + + /** + * Set the queue name of the JobQueueInfo + * + * @param queueName Name of the job queue. + */ + public void setQueueName(String queueName) { + this.queueName = queueName; + } + + /** + * Get the queue name from JobQueueInfo + * + * @return queue name + */ + public String getQueueName() { + return queueName; + } + + /** + * Set the scheduling information associated to particular job queue + * + * @param schedulingInfo + */ + public void setSchedulingInfo(String schedulingInfo) { + this.schedulingInfo = schedulingInfo; + } + + /** + * Gets the scheduling information associated to particular job queue. + * If nothing is set would return "N/A" + * + * @return Scheduling information associated to particular Job Queue + */ + public String getSchedulingInfo() { + if(schedulingInfo != null) { + return schedulingInfo; + }else { + return "N/A"; + } + } + + @Override + public void readFields(DataInput in) throws IOException { + queueName = Text.readString(in); + schedulingInfo = Text.readString(in); + } + + @Override + public void write(DataOutput out) throws IOException { + Text.writeString(out, queueName); + if(schedulingInfo!= null) { + Text.writeString(out, schedulingInfo); + }else { + Text.writeString(out, "N/A"); + } + } +} diff --git a/src/mapred/org/apache/hadoop/mapred/JobQueueJobInProgressListener.java b/src/mapred/org/apache/hadoop/mapred/JobQueueJobInProgressListener.java new file mode 100644 index 0000000..621c50e --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/JobQueueJobInProgressListener.java @@ -0,0 +1,145 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.mapred; + +import java.util.Collection; +import java.util.Collections; +import java.util.Comparator; +import java.util.Map; +import java.util.TreeMap; + +import org.apache.hadoop.mapred.JobStatusChangeEvent.EventType; + +/** + * A {@link JobInProgressListener} that maintains the jobs being managed in + * a queue. By default the queue is FIFO, but it is possible to use custom + * queue ordering by using the + * {@link #JobQueueJobInProgressListener(Collection)} constructor. + */ +class JobQueueJobInProgressListener extends JobInProgressListener { + + /** A class that groups all the information from a {@link JobInProgress} that + * is necessary for scheduling a job. + */ + static class JobSchedulingInfo { + private JobPriority priority; + private long startTime; + private JobID id; + + public JobSchedulingInfo(JobInProgress jip) { + this(jip.getStatus()); + } + + public JobSchedulingInfo(JobStatus status) { + priority = status.getJobPriority(); + startTime = status.getStartTime(); + id = status.getJobID(); + } + + JobPriority getPriority() {return priority;} + long getStartTime() {return startTime;} + JobID getJobID() {return id;} + } + + static final Comparator FIFO_JOB_QUEUE_COMPARATOR + = new Comparator() { + public int compare(JobSchedulingInfo o1, JobSchedulingInfo o2) { + int res = o1.getPriority().compareTo(o2.getPriority()); + if (res == 0) { + if (o1.getStartTime() < o2.getStartTime()) { + res = -1; + } else { + res = (o1.getStartTime() == o2.getStartTime() ? 0 : 1); + } + } + if (res == 0) { + res = o1.getJobID().compareTo(o2.getJobID()); + } + return res; + } + }; + + private Map jobQueue; + + public JobQueueJobInProgressListener() { + this(new TreeMap(FIFO_JOB_QUEUE_COMPARATOR)); + } + + /** + * For clients that want to provide their own job priorities. + * @param jobQueue A collection whose iterator returns jobs in priority order. + */ + protected JobQueueJobInProgressListener(Map jobQueue) { + this.jobQueue = Collections.synchronizedMap(jobQueue); + } + + /** + * Returns a synchronized view of the job queue. + */ + public Collection getJobQueue() { + return jobQueue.values(); + } + + @Override + public void jobAdded(JobInProgress job) { + jobQueue.put(new JobSchedulingInfo(job.getStatus()), job); + } + + // Job will be removed once the job completes + @Override + public void jobRemoved(JobInProgress job) {} + + private void jobCompleted(JobSchedulingInfo oldInfo) { + jobQueue.remove(oldInfo); + } + + @Override + public synchronized void jobUpdated(JobChangeEvent event) { + JobInProgress job = event.getJobInProgress(); + if (event instanceof JobStatusChangeEvent) { + // Check if the ordering of the job has changed + // For now priority and start-time can change the job ordering + JobStatusChangeEvent statusEvent = (JobStatusChangeEvent)event; + JobSchedulingInfo oldInfo = + new JobSchedulingInfo(statusEvent.getOldStatus()); + if (statusEvent.getEventType() == EventType.PRIORITY_CHANGED + || statusEvent.getEventType() == EventType.START_TIME_CHANGED) { + // Make a priority change + reorderJobs(job, oldInfo); + } else if (statusEvent.getEventType() == EventType.RUN_STATE_CHANGED) { + // Check if the job is complete + int runState = statusEvent.getNewStatus().getRunState(); + if (runState == JobStatus.SUCCEEDED + || runState == JobStatus.FAILED + || runState == JobStatus.KILLED) { + jobCompleted(oldInfo); + } + } + } + } + + private void reorderJobs(JobInProgress job, JobSchedulingInfo oldInfo) { + synchronized (jobQueue) { + jobQueue.remove(oldInfo); + jobQueue.put(new JobSchedulingInfo(job), job); + } + } + +} diff --git a/src/mapred/org/apache/hadoop/mapred/JobQueueTaskScheduler.java b/src/mapred/org/apache/hadoop/mapred/JobQueueTaskScheduler.java new file mode 100644 index 0000000..dbcd641 --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/JobQueueTaskScheduler.java @@ -0,0 +1,311 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.mapred; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.mapreduce.server.jobtracker.TaskTracker; + +/** + * A {@link TaskScheduler} that keeps jobs in a queue in priority order (FIFO + * by default). + */ +class JobQueueTaskScheduler extends TaskScheduler { + + private static final int MIN_CLUSTER_SIZE_FOR_PADDING = 3; + public static final Log LOG = LogFactory.getLog(JobQueueTaskScheduler.class); + + protected JobQueueJobInProgressListener jobQueueJobInProgressListener; + protected EagerTaskInitializationListener eagerTaskInitializationListener; + private float padFraction; + + public JobQueueTaskScheduler() { + this.jobQueueJobInProgressListener = new JobQueueJobInProgressListener(); + } + + @Override + public synchronized void start() throws IOException { + super.start(); + taskTrackerManager.addJobInProgressListener(jobQueueJobInProgressListener); + eagerTaskInitializationListener.setTaskTrackerManager(taskTrackerManager); + eagerTaskInitializationListener.start(); + taskTrackerManager.addJobInProgressListener( + eagerTaskInitializationListener); + } + + @Override + public synchronized void terminate() throws IOException { + if (jobQueueJobInProgressListener != null) { + taskTrackerManager.removeJobInProgressListener( + jobQueueJobInProgressListener); + } + if (eagerTaskInitializationListener != null) { + taskTrackerManager.removeJobInProgressListener( + eagerTaskInitializationListener); + eagerTaskInitializationListener.terminate(); + } + super.terminate(); + } + + @Override + public synchronized void setConf(Configuration conf) { + super.setConf(conf); + padFraction = conf.getFloat("mapred.jobtracker.taskalloc.capacitypad", + 0.01f); + this.eagerTaskInitializationListener = + new EagerTaskInitializationListener(conf); + } + + @Override + public synchronized List assignTasks(TaskTracker taskTracker) + throws IOException { + TaskTrackerStatus taskTrackerStatus = taskTracker.getStatus(); + ClusterStatus clusterStatus = taskTrackerManager.getClusterStatus(); + final int numTaskTrackers = clusterStatus.getTaskTrackers(); + final int clusterMapCapacity = clusterStatus.getMaxMapTasks(); + final int clusterReduceCapacity = clusterStatus.getMaxReduceTasks(); + + Collection jobQueue = + jobQueueJobInProgressListener.getJobQueue(); + + // + // Get map + reduce counts for the current tracker. + // + final int trackerMapCapacity = taskTrackerStatus.getMaxMapSlots(); + final int trackerReduceCapacity = taskTrackerStatus.getMaxReduceSlots(); + final int trackerRunningMaps = taskTrackerStatus.countMapTasks(); + final int trackerRunningReduces = taskTrackerStatus.countReduceTasks(); + + // Assigned tasks + List assignedTasks = new ArrayList(); + + // + // Compute (running + pending) map and reduce task numbers across pool + // + int remainingReduceLoad = 0; + int remainingMapLoad = 0; + synchronized (jobQueue) { + for (JobInProgress job : jobQueue) { + if (job.getStatus().getRunState() == JobStatus.RUNNING) { + remainingMapLoad += (job.desiredMaps() - job.finishedMaps()); + if (job.scheduleReduces()) { + remainingReduceLoad += + (job.desiredReduces() - job.finishedReduces()); + } + } + } + } + + // Compute the 'load factor' for maps and reduces + double mapLoadFactor = 0.0; + if (clusterMapCapacity > 0) { + mapLoadFactor = (double)remainingMapLoad / clusterMapCapacity; + } + double reduceLoadFactor = 0.0; + if (clusterReduceCapacity > 0) { + reduceLoadFactor = (double)remainingReduceLoad / clusterReduceCapacity; + } + + // + // In the below steps, we allocate first map tasks (if appropriate), + // and then reduce tasks if appropriate. We go through all jobs + // in order of job arrival; jobs only get serviced if their + // predecessors are serviced, too. + // + + // + // We assign tasks to the current taskTracker if the given machine + // has a workload that's less than the maximum load of that kind of + // task. + // However, if the cluster is close to getting loaded i.e. we don't + // have enough _padding_ for speculative executions etc., we only + // schedule the "highest priority" task i.e. the task from the job + // with the highest priority. + // + + final int trackerCurrentMapCapacity = + Math.min((int)Math.ceil(mapLoadFactor * trackerMapCapacity), + trackerMapCapacity); + int availableMapSlots = trackerCurrentMapCapacity - trackerRunningMaps; + boolean exceededMapPadding = false; + if (availableMapSlots > 0) { + exceededMapPadding = + exceededPadding(true, clusterStatus, trackerMapCapacity); + } + + int numLocalMaps = 0; + int numNonLocalMaps = 0; + scheduleMaps: + for (int i=0; i < availableMapSlots; ++i) { + synchronized (jobQueue) { + for (JobInProgress job : jobQueue) { + if (job.getStatus().getRunState() != JobStatus.RUNNING) { + continue; + } + + Task t = null; + + // Try to schedule a node-local or rack-local Map task + t = + job.obtainNewLocalMapTask(taskTrackerStatus, numTaskTrackers, + taskTrackerManager.getNumberOfUniqueHosts()); + if (t != null) { + assignedTasks.add(t); + ++numLocalMaps; + + // Don't assign map tasks to the hilt! + // Leave some free slots in the cluster for future task-failures, + // speculative tasks etc. beyond the highest priority job + if (exceededMapPadding) { + break scheduleMaps; + } + + // Try all jobs again for the next Map task + break; + } + + // Try to schedule a node-local or rack-local Map task + t = + job.obtainNewNonLocalMapTask(taskTrackerStatus, numTaskTrackers, + taskTrackerManager.getNumberOfUniqueHosts()); + + if (t != null) { + assignedTasks.add(t); + ++numNonLocalMaps; + + // We assign at most 1 off-switch or speculative task + // This is to prevent TaskTrackers from stealing local-tasks + // from other TaskTrackers. + break scheduleMaps; + } + } + } + } + int assignedMaps = assignedTasks.size(); + + // + // Same thing, but for reduce tasks + // However we _never_ assign more than 1 reduce task per heartbeat + // + final int trackerCurrentReduceCapacity = + Math.min((int)Math.ceil(reduceLoadFactor * trackerReduceCapacity), + trackerReduceCapacity); + final int availableReduceSlots = + Math.min((trackerCurrentReduceCapacity - trackerRunningReduces), 1); + boolean exceededReducePadding = false; + if (availableReduceSlots > 0) { + exceededReducePadding = exceededPadding(false, clusterStatus, + trackerReduceCapacity); + synchronized (jobQueue) { + for (JobInProgress job : jobQueue) { + if (job.getStatus().getRunState() != JobStatus.RUNNING || + job.numReduceTasks == 0) { + continue; + } + + Task t = + job.obtainNewReduceTask(taskTrackerStatus, numTaskTrackers, + taskTrackerManager.getNumberOfUniqueHosts() + ); + if (t != null) { + assignedTasks.add(t); + break; + } + + // Don't assign reduce tasks to the hilt! + // Leave some free slots in the cluster for future task-failures, + // speculative tasks etc. beyond the highest priority job + if (exceededReducePadding) { + break; + } + } + } + } + + if (LOG.isDebugEnabled()) { + LOG.debug("Task assignments for " + taskTrackerStatus.getTrackerName() + " --> " + + "[" + mapLoadFactor + ", " + trackerMapCapacity + ", " + + trackerCurrentMapCapacity + ", " + trackerRunningMaps + "] -> [" + + (trackerCurrentMapCapacity - trackerRunningMaps) + ", " + + assignedMaps + " (" + numLocalMaps + ", " + numNonLocalMaps + + ")] [" + reduceLoadFactor + ", " + trackerReduceCapacity + ", " + + trackerCurrentReduceCapacity + "," + trackerRunningReduces + + "] -> [" + (trackerCurrentReduceCapacity - trackerRunningReduces) + + ", " + (assignedTasks.size()-assignedMaps) + "]"); + } + + return assignedTasks; + } + + private boolean exceededPadding(boolean isMapTask, + ClusterStatus clusterStatus, + int maxTaskTrackerSlots) { + int numTaskTrackers = clusterStatus.getTaskTrackers(); + int totalTasks = + (isMapTask) ? clusterStatus.getMapTasks() : + clusterStatus.getReduceTasks(); + int totalTaskCapacity = + isMapTask ? clusterStatus.getMaxMapTasks() : + clusterStatus.getMaxReduceTasks(); + + Collection jobQueue = + jobQueueJobInProgressListener.getJobQueue(); + + boolean exceededPadding = false; + synchronized (jobQueue) { + int totalNeededTasks = 0; + for (JobInProgress job : jobQueue) { + if (job.getStatus().getRunState() != JobStatus.RUNNING || + job.numReduceTasks == 0) { + continue; + } + + // + // Beyond the highest-priority task, reserve a little + // room for failures and speculative executions; don't + // schedule tasks to the hilt. + // + totalNeededTasks += + isMapTask ? job.desiredMaps() : job.desiredReduces(); + int padding = 0; + if (numTaskTrackers > MIN_CLUSTER_SIZE_FOR_PADDING) { + padding = + Math.min(maxTaskTrackerSlots, + (int) (totalNeededTasks * padFraction)); + } + if (totalTasks + padding >= totalTaskCapacity) { + exceededPadding = true; + break; + } + } + } + + return exceededPadding; + } + + @Override + public synchronized Collection getJobs(String queueName) { + return jobQueueJobInProgressListener.getJobQueue(); + } +} diff --git a/src/mapred/org/apache/hadoop/mapred/JobStatus.java b/src/mapred/org/apache/hadoop/mapred/JobStatus.java new file mode 100644 index 0000000..2081b04 --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/JobStatus.java @@ -0,0 +1,358 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.mapred; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; + +import org.apache.hadoop.io.Text; +import org.apache.hadoop.io.Writable; +import org.apache.hadoop.io.WritableFactories; +import org.apache.hadoop.io.WritableFactory; +import org.apache.hadoop.io.WritableUtils; + +/************************************************** + * Describes the current status of a job. This is + * not intended to be a comprehensive piece of data. + * For that, look at JobProfile. + **************************************************/ +public class JobStatus implements Writable, Cloneable { + + static { // register a ctor + WritableFactories.setFactory + (JobStatus.class, + new WritableFactory() { + public Writable newInstance() { return new JobStatus(); } + }); + } + + public static final int RUNNING = 1; + public static final int SUCCEEDED = 2; + public static final int FAILED = 3; + public static final int PREP = 4; + public static final int KILLED = 5; + + private static final String UNKNOWN = "UNKNOWN"; + private static final String[] runStates = + {UNKNOWN, "RUNNING", "SUCCEEDED", "FAILED", "PREP", "KILLED"}; + + /** + * Helper method to get human-readable state of the job. + * @param state job state + * @return human-readable state of the job + */ + public static String getJobRunState(int state) { + if (state < 1 || state >= runStates.length) { + return UNKNOWN; + } + return runStates[state]; + } + + private JobID jobid; + private float mapProgress; + private float reduceProgress; + private float cleanupProgress; + private float setupProgress; + private int runState; + private long startTime; + private String user; + private JobPriority priority; + private String schedulingInfo="NA"; + + /** + */ + public JobStatus() { + } + + /** + * Create a job status object for a given jobid. + * @param jobid The jobid of the job + * @param mapProgress The progress made on the maps + * @param reduceProgress The progress made on the reduces + * @param cleanupProgress The progress made on cleanup + * @param runState The current state of the job + */ + public JobStatus(JobID jobid, float mapProgress, float reduceProgress, + float cleanupProgress, int runState) { + this(jobid, mapProgress, reduceProgress, cleanupProgress, runState, + JobPriority.NORMAL); + } + + /** + * Create a job status object for a given jobid. + * @param jobid The jobid of the job + * @param mapProgress The progress made on the maps + * @param reduceProgress The progress made on the reduces + * @param runState The current state of the job + */ + public JobStatus(JobID jobid, float mapProgress, float reduceProgress, + int runState) { + this(jobid, mapProgress, reduceProgress, 0.0f, runState); + } + + /** + * Create a job status object for a given jobid. + * @param jobid The jobid of the job + * @param mapProgress The progress made on the maps + * @param reduceProgress The progress made on the reduces + * @param runState The current state of the job + * @param jp Priority of the job. + */ + public JobStatus(JobID jobid, float mapProgress, float reduceProgress, + float cleanupProgress, int runState, JobPriority jp) { + this(jobid, 0.0f, mapProgress, reduceProgress, + cleanupProgress, runState, jp); + } + + /** + * Create a job status object for a given jobid. + * @param jobid The jobid of the job + * @param setupProgress The progress made on the setup + * @param mapProgress The progress made on the maps + * @param reduceProgress The progress made on the reduces + * @param cleanupProgress The progress made on the cleanup + * @param runState The current state of the job + * @param jp Priority of the job. + */ + public JobStatus(JobID jobid, float setupProgress, float mapProgress, + float reduceProgress, float cleanupProgress, + int runState, JobPriority jp) { + this.jobid = jobid; + this.setupProgress = setupProgress; + this.mapProgress = mapProgress; + this.reduceProgress = reduceProgress; + this.cleanupProgress = cleanupProgress; + this.runState = runState; + this.user = "nobody"; + if (jp == null) { + throw new IllegalArgumentException("Job Priority cannot be null."); + } + priority = jp; + } + + /** + * Create a job status object for a given jobid. + * @param jobid The jobid of the job + * @param setupProgress The progress made on the setup + * @param mapProgress The progress made on the maps + * @param reduceProgress The progress made on the reduces + * @param cleanupProgress The progress made on the cleanup + * @param runState The current state of the job + * @param jp Priority of the job. + * @param user Userid of the person who submitted the job. + */ + public JobStatus(JobID jobid, float setupProgress, float mapProgress, + float reduceProgress, float cleanupProgress, + int runState, JobPriority jp, String user) { + this.jobid = jobid; + this.setupProgress = setupProgress; + this.mapProgress = mapProgress; + this.reduceProgress = reduceProgress; + this.cleanupProgress = cleanupProgress; + this.runState = runState; + this.user = user; + if (jp == null) { + throw new IllegalArgumentException("Job Priority cannot be null."); + } + priority = jp; + } + + /** + * @deprecated use getJobID instead + */ + @Deprecated + public String getJobId() { return jobid.toString(); } + + /** + * @return The jobid of the Job + */ + public JobID getJobID() { return jobid; } + + /** + * @return Percentage of progress in maps + */ + public synchronized float mapProgress() { return mapProgress; } + + /** + * Sets the map progress of this job + * @param p The value of map progress to set to + */ + synchronized void setMapProgress(float p) { + this.mapProgress = (float) Math.min(1.0, Math.max(0.0, p)); + } + + /** + * @return Percentage of progress in cleanup + */ + public synchronized float cleanupProgress() { return cleanupProgress; } + + /** + * Sets the cleanup progress of this job + * @param p The value of cleanup progress to set to + */ + synchronized void setCleanupProgress(float p) { + this.cleanupProgress = (float) Math.min(1.0, Math.max(0.0, p)); + } + + /** + * @return Percentage of progress in setup + */ + public synchronized float setupProgress() { return setupProgress; } + + /** + * Sets the setup progress of this job + * @param p The value of setup progress to set to + */ + synchronized void setSetupProgress(float p) { + this.setupProgress = (float) Math.min(1.0, Math.max(0.0, p)); + } + + /** + * @return Percentage of progress in reduce + */ + public synchronized float reduceProgress() { return reduceProgress; } + + /** + * Sets the reduce progress of this Job + * @param p The value of reduce progress to set to + */ + synchronized void setReduceProgress(float p) { + this.reduceProgress = (float) Math.min(1.0, Math.max(0.0, p)); + } + + /** + * @return running state of the job + */ + public synchronized int getRunState() { return runState; } + + /** + * Change the current run state of the job. + */ + public synchronized void setRunState(int state) { + this.runState = state; + } + + /** + * Set the start time of the job + * @param startTime The startTime of the job + */ + synchronized void setStartTime(long startTime) { this.startTime = startTime;} + + /** + * @return start time of the job + */ + synchronized public long getStartTime() { return startTime;} + + @Override + public Object clone() { + try { + return super.clone(); + } catch (CloneNotSupportedException cnse) { + // Shouldn't happen since we do implement Clonable + throw new InternalError(cnse.toString()); + } + } + + /** + * @param user The username of the job + */ + synchronized void setUsername(String userName) { this.user = userName;} + + /** + * @return the username of the job + */ + public synchronized String getUsername() { return this.user;} + + /** + * Gets the Scheduling information associated to a particular Job. + * @return the scheduling information of the job + */ + public synchronized String getSchedulingInfo() { + return schedulingInfo; + } + + /** + * Used to set the scheduling information associated to a particular Job. + * + * @param schedulingInfo Scheduling information of the job + */ + public synchronized void setSchedulingInfo(String schedulingInfo) { + this.schedulingInfo = schedulingInfo; + } + + /** + * Return the priority of the job + * @return job priority + */ + public synchronized JobPriority getJobPriority() { return priority; } + + /** + * Set the priority of the job, defaulting to NORMAL. + * @param jp new job priority + */ + public synchronized void setJobPriority(JobPriority jp) { + if (jp == null) { + throw new IllegalArgumentException("Job priority cannot be null."); + } + priority = jp; + } + + /** + * Returns true if the status is for a completed job. + */ + public synchronized boolean isJobComplete() { + return (runState == JobStatus.SUCCEEDED || runState == JobStatus.FAILED + || runState == JobStatus.KILLED); + } + + /////////////////////////////////////// + // Writable + /////////////////////////////////////// + public synchronized void write(DataOutput out) throws IOException { + jobid.write(out); + out.writeFloat(setupProgress); + out.writeFloat(mapProgress); + out.writeFloat(reduceProgress); + out.writeFloat(cleanupProgress); + out.writeInt(runState); + out.writeLong(startTime); + Text.writeString(out, user); + WritableUtils.writeEnum(out, priority); + Text.writeString(out, schedulingInfo); + } + + public synchronized void readFields(DataInput in) throws IOException { + this.jobid = JobID.read(in); + this.setupProgress = in.readFloat(); + this.mapProgress = in.readFloat(); + this.reduceProgress = in.readFloat(); + this.cleanupProgress = in.readFloat(); + this.runState = in.readInt(); + this.startTime = in.readLong(); + this.user = Text.readString(in); + this.priority = WritableUtils.readEnum(in, JobPriority.class); + this.schedulingInfo = Text.readString(in); + } + + // A utility to convert new job runstates to the old ones. + static int getOldNewJobRunState( + org.apache.hadoop.mapreduce.JobStatus.State state) { + return state.getValue(); + } +} diff --git a/src/mapred/org/apache/hadoop/mapred/JobStatusChangeEvent.java b/src/mapred/org/apache/hadoop/mapred/JobStatusChangeEvent.java new file mode 100644 index 0000000..b3817f9 --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/JobStatusChangeEvent.java @@ -0,0 +1,74 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.mapred; + +/** + * {@link JobStatusChangeEvent} tracks the change in job's status. Job's + * status can change w.r.t + * - run state i.e PREP, RUNNING, FAILED, KILLED, SUCCEEDED + * - start time + * - priority + * Note that job times can change as the job can get restarted. + */ +class JobStatusChangeEvent extends JobChangeEvent { + // Events in job status that can lead to a job-status change + static enum EventType {RUN_STATE_CHANGED, START_TIME_CHANGED, PRIORITY_CHANGED} + + private JobStatus oldStatus; + private JobStatus newStatus; + private EventType eventType; + + JobStatusChangeEvent(JobInProgress jip, EventType eventType, + JobStatus oldStatus, JobStatus newStatus) { + super(jip); + this.oldStatus = oldStatus; + this.newStatus = newStatus; + this.eventType = eventType; + } + + /** + * Create a {@link JobStatusChangeEvent} indicating the state has changed. + * Note that here we assume that the state change doesnt care about the old + * state. + */ + JobStatusChangeEvent(JobInProgress jip, EventType eventType, JobStatus status) + { + this(jip, eventType, status, status); + } + + /** + * Returns a event-type that caused the state change + */ + EventType getEventType() { + return eventType; + } + + /** + * Get the old job status + */ + JobStatus getOldStatus() { + return oldStatus; + } + + /** + * Get the new job status as a result of the events + */ + JobStatus getNewStatus() { + return newStatus; + } +} \ No newline at end of file diff --git a/src/mapred/org/apache/hadoop/mapred/JobSubmissionProtocol.java b/src/mapred/org/apache/hadoop/mapred/JobSubmissionProtocol.java new file mode 100644 index 0000000..822723f --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/JobSubmissionProtocol.java @@ -0,0 +1,226 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapred; + +import java.io.IOException; + +import org.apache.hadoop.ipc.VersionedProtocol; + +/** + * Protocol that a JobClient and the central JobTracker use to communicate. The + * JobClient can use these methods to submit a Job for execution, and learn about + * the current system status. + */ +interface JobSubmissionProtocol extends VersionedProtocol { + /* + *Changing the versionID to 2L since the getTaskCompletionEvents method has + *changed. + *Changed to 4 since killTask(String,boolean) is added + *Version 4: added jobtracker state to ClusterStatus + *Version 5: max_tasks in ClusterStatus is replaced by + * max_map_tasks and max_reduce_tasks for HADOOP-1274 + * Version 6: change the counters representation for HADOOP-2248 + * Version 7: added getAllJobs for HADOOP-2487 + * Version 8: change {job|task}id's to use corresponding objects rather that strings. + * Version 9: change the counter representation for HADOOP-1915 + * Version 10: added getSystemDir for HADOOP-3135 + * Version 11: changed JobProfile to include the queue name for HADOOP-3698 + * Version 12: Added getCleanupTaskReports and + * cleanupProgress to JobStatus as part of HADOOP-3150 + * Version 13: Added getJobQueueInfos and getJobQueueInfo(queue name) + * and getAllJobs(queue) as a part of HADOOP-3930 + * Version 14: Added setPriority for HADOOP-4124 + * Version 15: Added KILLED status to JobStatus as part of HADOOP-3924 + * Version 16: Added getSetupTaskReports and + * setupProgress to JobStatus as part of HADOOP-4261 + * Version 17: getClusterStatus returns the amount of memory used by + * the server. HADOOP-4435 + * Version 18: Added blacklisted trackers to the ClusterStatus + * for HADOOP-4305 + * Version 19: Modified TaskReport to have TIP status and modified the + * method getClusterStatus() to take a boolean argument + * for HADOOP-4807 + * Version 20: Modified ClusterStatus to have the tasktracker expiry + * interval for HADOOP-4939 + * Version 21: Added method getQueueAclsForCurrentUser to get queue acls info + * for a user' + * Version 22: MAPRED-892 extending ClusterStatus to have information about + * individual task trackers. + */ + public static final long versionID = 22L; + + /** + * Allocate a name for the job. + * @return a unique job name for submitting jobs. + * @throws IOException + */ + public JobID getNewJobId() throws IOException; + + /** + * Submit a Job for execution. Returns the latest profile for + * that job. + * The job files should be submitted in system-dir/jobName. + */ + public JobStatus submitJob(JobID jobName) throws IOException; + + /** + * Get the current status of the cluster + * @param detailed if true then report tracker names as well + * @return summary of the state of the cluster + */ + public ClusterStatus getClusterStatus(boolean detailed) throws IOException; + + + /** + * Kill the indicated job + */ + public void killJob(JobID jobid) throws IOException; + + /** + * Set the priority of the specified job + * @param jobid ID of the job + * @param priority Priority to be set for the job + */ + public void setJobPriority(JobID jobid, String priority) + throws IOException; + /** + * Kill indicated task attempt. + * @param taskId the id of the task to kill. + * @param shouldFail if true the task is failed and added to failed tasks list, otherwise + * it is just killed, w/o affecting job failure status. + */ + public boolean killTask(TaskAttemptID taskId, boolean shouldFail) throws IOException; + + /** + * Grab a handle to a job that is already known to the JobTracker. + * @return Profile of the job, or null if not found. + */ + public JobProfile getJobProfile(JobID jobid) throws IOException; + + /** + * Grab a handle to a job that is already known to the JobTracker. + * @return Status of the job, or null if not found. + */ + public JobStatus getJobStatus(JobID jobid) throws IOException; + + /** + * Grab the current job counters + */ + public Counters getJobCounters(JobID jobid) throws IOException; + + /** + * Grab a bunch of info on the map tasks that make up the job + */ + public TaskReport[] getMapTaskReports(JobID jobid) throws IOException; + + /** + * Grab a bunch of info on the reduce tasks that make up the job + */ + public TaskReport[] getReduceTaskReports(JobID jobid) throws IOException; + + /** + * Grab a bunch of info on the cleanup tasks that make up the job + */ + public TaskReport[] getCleanupTaskReports(JobID jobid) throws IOException; + + /** + * Grab a bunch of info on the setup tasks that make up the job + */ + public TaskReport[] getSetupTaskReports(JobID jobid) throws IOException; + + /** + * A MapReduce system always operates on a single filesystem. This + * function returns the fs name. ('local' if the localfs; 'addr:port' + * if dfs). The client can then copy files into the right locations + * prior to submitting the job. + */ + public String getFilesystemName() throws IOException; + + /** + * Get the jobs that are not completed and not failed + * @return array of JobStatus for the running/to-be-run + * jobs. + */ + public JobStatus[] jobsToComplete() throws IOException; + + /** + * Get all the jobs submitted. + * @return array of JobStatus for the submitted jobs + */ + public JobStatus[] getAllJobs() throws IOException; + + /** + * Get task completion events for the jobid, starting from fromEventId. + * Returns empty aray if no events are available. + * @param jobid job id + * @param fromEventId event id to start from. + * @param maxEvents the max number of events we want to look at + * @return array of task completion events. + * @throws IOException + */ + public TaskCompletionEvent[] getTaskCompletionEvents(JobID jobid + , int fromEventId, int maxEvents) throws IOException; + + /** + * Get the diagnostics for a given task in a given job + * @param taskId the id of the task + * @return an array of the diagnostic messages + */ + public String[] getTaskDiagnostics(TaskAttemptID taskId) + throws IOException; + + /** + * Grab the jobtracker system directory path where job-specific files are to be placed. + * + * @return the system directory where job-specific files are to be placed. + */ + public String getSystemDir(); + + /** + * Gets set of Job Queues associated with the Job Tracker + * + * @return Array of the Job Queue Information Object + * @throws IOException + */ + public JobQueueInfo[] getQueues() throws IOException; + + /** + * Gets scheduling information associated with the particular Job queue + * + * @param queue Queue Name + * @return Scheduling Information of the Queue + * @throws IOException + */ + public JobQueueInfo getQueueInfo(String queue) throws IOException; + + /** + * Gets all the jobs submitted to the particular Queue + * @param queue Queue name + * @return array of JobStatus for the submitted jobs + * @throws IOException + */ + public JobStatus[] getJobsFromQueue(String queue) throws IOException; + + /** + * Gets the Queue ACLs for current user + * @return array of QueueAclsInfo object for current user. + * @throws IOException + */ + public QueueAclsInfo[] getQueueAclsForCurrentUser() throws IOException; +} diff --git a/src/mapred/org/apache/hadoop/mapred/JobTracker.java b/src/mapred/org/apache/hadoop/mapred/JobTracker.java new file mode 100644 index 0000000..d882062 --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/JobTracker.java @@ -0,0 +1,4939 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.mapred; + + +import java.io.BufferedReader; +import java.io.BufferedWriter; +import java.io.File; +import java.io.FileInputStream; +import java.io.FileOutputStream; +import java.io.IOException; +import java.io.PrintWriter; +import java.io.InputStreamReader; +import java.io.OutputStreamWriter; +import java.io.Writer; +import java.net.BindException; +import java.net.InetSocketAddress; +import java.net.UnknownHostException; +import java.text.ParseException; +import java.text.SimpleDateFormat; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.Comparator; +import java.util.Date; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Iterator; +import java.util.LinkedHashMap; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.Properties; +import java.util.Set; +import java.util.TreeMap; +import java.util.TreeSet; +import java.util.Vector; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.CopyOnWriteArrayList; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.Executors; + +import javax.security.auth.login.LoginException; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FSDataInputStream; +import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.LocalDirAllocator; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.http.HttpServer; +import org.apache.hadoop.ipc.RPC; +import org.apache.hadoop.ipc.Server; +import org.apache.hadoop.ipc.RPC.VersionMismatch; +import org.apache.hadoop.mapred.JobHistory.Keys; +import org.apache.hadoop.mapred.JobHistory.Listener; +import org.apache.hadoop.mapred.JobHistory.Values; +import org.apache.hadoop.mapred.JobInProgress.KillInterruptedException; +import org.apache.hadoop.mapred.JobStatusChangeEvent.EventType; +import org.apache.hadoop.mapred.CleanupQueue.PathDeletionContext; +import org.apache.hadoop.mapred.TaskTrackerStatus.TaskTrackerHealthStatus; +import org.apache.hadoop.net.DNSToSwitchMapping; +import org.apache.hadoop.net.NetUtils; +import org.apache.hadoop.net.NetworkTopology; +import org.apache.hadoop.net.Node; +import org.apache.hadoop.net.NodeBase; +import org.apache.hadoop.net.ScriptBasedMapping; +import org.apache.hadoop.security.AccessControlException; +import org.apache.hadoop.security.PermissionChecker; +import org.apache.hadoop.security.SecurityUtil; +import org.apache.hadoop.security.UnixUserGroupInformation; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.security.authorize.AuthorizationException; +import org.apache.hadoop.security.authorize.ConfiguredPolicy; +import org.apache.hadoop.security.authorize.PolicyProvider; +import org.apache.hadoop.security.authorize.RefreshAuthorizationPolicyProtocol; +import org.apache.hadoop.security.authorize.ServiceAuthorizationManager; +import org.apache.hadoop.util.HostsFileReader; +import org.apache.hadoop.util.MRAsyncDiskService; +import org.apache.hadoop.util.ReflectionUtils; +import org.apache.hadoop.util.StringUtils; +import org.apache.hadoop.util.VersionInfo; + +import org.apache.hadoop.mapreduce.ClusterMetrics; +import org.apache.hadoop.mapreduce.TaskType; +import org.apache.hadoop.mapreduce.server.jobtracker.TaskTracker; + +/******************************************************* + * JobTracker is the central location for submitting and + * tracking MR jobs in a network environment. + * + *******************************************************/ +public class JobTracker implements MRConstants, InterTrackerProtocol, + JobSubmissionProtocol, TaskTrackerManager, + RefreshAuthorizationPolicyProtocol, AdminOperationsProtocol { + + static{ + Configuration.addDefaultResource("mapred-default.xml"); + Configuration.addDefaultResource("mapred-site.xml"); + } + + static long TASKTRACKER_EXPIRY_INTERVAL = 10 * 60 * 1000; + static long RETIRE_JOB_INTERVAL; + static long RETIRE_JOB_CHECK_INTERVAL; + + + // The interval after which one fault of a tracker will be discarded, + // if there are no faults during this. + private static long UPDATE_FAULTY_TRACKER_INTERVAL = 24 * 60 * 60 * 1000; + // The maximum percentage of trackers in cluster added + // to the 'blacklist' across all the jobs. + private static double MAX_BLACKLIST_PERCENT = 0.50; + // A tracker is blacklisted across jobs only if number of + // blacklists are X% above the average number of blacklists. + // X is the blacklist threshold here. + private double AVERAGE_BLACKLIST_THRESHOLD = 0.50; + // The maximum number of blacklists for a tracker after which the + // tracker could be blacklisted across all jobs + private int MAX_BLACKLISTS_PER_TRACKER = 4; + + // Approximate number of heartbeats that could arrive JobTracker + // in a second + static final String JT_HEARTBEATS_IN_SECOND = "mapred.heartbeats.in.second"; + private int NUM_HEARTBEATS_IN_SECOND; + private final int DEFAULT_NUM_HEARTBEATS_IN_SECOND = 100; + private final int MIN_NUM_HEARTBEATS_IN_SECOND = 1; + + // Constants for cache expiry + private long CLEAR_CACHE_INTERVAL = 0; // milliseconds + private long EXPIRE_CACHE_THRESHOLD = 0; // milliseconds + + // Scaling factor for heartbeats, used for testing only + static final String JT_HEARTBEATS_SCALING_FACTOR = + "mapreduce.jobtracker.heartbeats.scaling.factor"; + private float HEARTBEATS_SCALING_FACTOR; + private final float MIN_HEARTBEATS_SCALING_FACTOR = 0.01f; + private final float DEFAULT_HEARTBEATS_SCALING_FACTOR = 1.0f; + + public static enum State { INITIALIZING, RUNNING } + State state = State.INITIALIZING; + private static final int FS_ACCESS_RETRY_PERIOD = 10000; + + private DNSToSwitchMapping dnsToSwitchMapping; + NetworkTopology clusterMap = new NetworkTopology(); + private int numTaskCacheLevels; // the max level to which we cache tasks + + private static Clock clock = null; + + static final Clock DEFAULT_CLOCK = new Clock(); + /** + * {@link #nodesAtMaxLevel} is using the keySet from {@link ConcurrentHashMap} + * so that it can be safely written to and iterated on via 2 separate threads. + * Note: It can only be iterated from a single thread which is feasible since + * the only iteration is done in {@link JobInProgress} under the + * {@link JobTracker} lock. + */ + private Set nodesAtMaxLevel = + Collections.newSetFromMap(new ConcurrentHashMap()); + final TaskScheduler taskScheduler; + private final ResourceReporter resourceReporter; + private final List jobInProgressListeners = + new CopyOnWriteArrayList(); + + private static final LocalDirAllocator lDirAlloc = + new LocalDirAllocator("mapred.local.dir"); + // system directories are world-wide readable and owner readable + final static FsPermission SYSTEM_DIR_PERMISSION = + FsPermission.createImmutable((short) 0733); // rwx-wx-wx + + // system files should have 700 permission + final static FsPermission SYSTEM_FILE_PERMISSION = + FsPermission.createImmutable((short) 0700); // rwx------ + + private MRAsyncDiskService asyncDiskService; + + // set before starting JobTracker shutdown + private volatile boolean shutdown = false; + + /** + * A client tried to submit a job before the Job Tracker was ready. + */ + public static class IllegalStateException extends IOException { + public IllegalStateException(String msg) { + super(msg); + } + } + + /** + * The maximum no. of 'completed' (successful/failed/killed) + * jobs kept in memory per-user. + */ + final int MAX_COMPLETE_USER_JOBS_IN_MEMORY; + + /** + * The minimum time (in ms) that a job's information has to remain + * in the JobTracker's memory before it is retired. + */ + static int MIN_TIME_BEFORE_RETIRE = 0; + + /** + * If this is set, then the next iteration of RetireJob thread will + * retire most completed jobs + */ + static volatile boolean RETIRE_COMPLETED_JOBS = false; + + + private final AtomicInteger nextJobId = new AtomicInteger(1); + + public static final Log LOG = LogFactory.getLog(JobTracker.class); + + /** + * Returns JobTracker's clock. Note that the correct clock implementation will + * be obtained only when the JobTracker is initialized. If the JobTracker is + * not initialized then the default clock i.e {@link Clock} is returned. + */ + static Clock getClock() { + return clock == null ? DEFAULT_CLOCK : clock; + } + + /** + * Start the JobTracker with given configuration. + * + * The conf will be modified to reflect the actual ports on which + * the JobTracker is up and running if the user passes the port as + * zero. + * + * @param conf configuration for the JobTracker. + * @throws IOException + */ + public static JobTracker startTracker(JobConf conf + ) throws IOException, + InterruptedException { + return startTracker(conf, generateNewIdentifier()); + } + + public static JobTracker startTracker(JobConf conf, String identifier) + throws IOException, InterruptedException { + JobTracker result = null; + while (true) { + try { + result = new JobTracker(conf, identifier); + result.taskScheduler.setTaskTrackerManager(result); + break; + } catch (VersionMismatch e) { + throw e; + } catch (BindException e) { + throw e; + } catch (UnknownHostException e) { + throw e; + } catch (AccessControlException ace) { + // in case of jobtracker not having right access + // bail out + throw ace; + } catch (IOException e) { + LOG.warn("Error starting tracker: " + + StringUtils.stringifyException(e)); + } + Thread.sleep(1000); + } + if (result != null) { + JobEndNotifier.startNotifier(); + } + return result; + } + + public void stopTracker() throws IOException { + JobEndNotifier.stopNotifier(); + close(); + } + + public long getProtocolVersion(String protocol, + long clientVersion) throws IOException { + if (protocol.equals(InterTrackerProtocol.class.getName())) { + return InterTrackerProtocol.versionID; + } else if (protocol.equals(JobSubmissionProtocol.class.getName())){ + return JobSubmissionProtocol.versionID; + } else if (protocol.equals(RefreshAuthorizationPolicyProtocol.class.getName())){ + return RefreshAuthorizationPolicyProtocol.versionID; + } else if (protocol.equals(AdminOperationsProtocol.class.getName())){ + return AdminOperationsProtocol.versionID; + } else { + throw new IOException("Unknown protocol to job tracker: " + protocol); + } + } + + /** + * A thread to timeout tasks that have been assigned to task trackers, + * but that haven't reported back yet. + * Note that I included a stop() method, even though there is no place + * where JobTrackers are cleaned up. + */ + private class ExpireLaunchingTasks implements Runnable { + /** + * This is a map of the tasks that have been assigned to task trackers, + * but that have not yet been seen in a status report. + * map: task-id -> time-assigned + */ + private Map launchingTasks = + new LinkedHashMap(); + + public void run() { + while (!shutdown) { + try { + // Every 3 minutes check for any tasks that are overdue + Thread.sleep(TASKTRACKER_EXPIRY_INTERVAL/3); + long now = getClock().getTime(); + LOG.debug("Starting launching task sweep"); + synchronized (JobTracker.this) { + synchronized (launchingTasks) { + Iterator> itr = + launchingTasks.entrySet().iterator(); + while (itr.hasNext()) { + Map.Entry pair = itr.next(); + TaskAttemptID taskId = pair.getKey(); + long age = now - (pair.getValue()).longValue(); + LOG.info(taskId + " is " + age + " ms debug."); + if (age > TASKTRACKER_EXPIRY_INTERVAL) { + LOG.info("Launching task " + taskId + " timed out."); + TaskInProgress tip = null; + tip = taskidToTIPMap.get(taskId); + if (tip != null) { + JobInProgress job = tip.getJob(); + String trackerName = getAssignedTracker(taskId); + TaskTrackerStatus trackerStatus = + getTaskTrackerStatus(trackerName); + + // This might happen when the tasktracker has already + // expired and this thread tries to call failedtask + // again. expire tasktracker should have called failed + // task! + if (trackerStatus != null) + job.failedTask(tip, taskId, "Error launching task", + tip.isMapTask()? TaskStatus.Phase.MAP: + TaskStatus.Phase.STARTING, + TaskStatus.State.FAILED, + trackerName); + } + itr.remove(); + } else { + // the tasks are sorted by start time, so once we find + // one that we want to keep, we are done for this cycle. + break; + } + } + } + } + } catch (InterruptedException ie) { + // ignore. if shutting down, while cond. will catch it + } catch (Exception e) { + LOG.error("Expire Launching Task Thread got exception: " + + StringUtils.stringifyException(e)); + } + } + } + + public void addNewTask(TaskAttemptID taskName) { + synchronized (launchingTasks) { + launchingTasks.put(taskName, + getClock().getTime()); + } + } + + public void removeTask(TaskAttemptID taskName) { + synchronized (launchingTasks) { + launchingTasks.remove(taskName); + } + } + } + + /////////////////////////////////////////////////////// + // Used to expire TaskTrackers that have gone down + /////////////////////////////////////////////////////// + class ExpireTrackers implements Runnable { + + public ExpireTrackers() { + } + /** + * The run method lives for the life of the JobTracker, and removes TaskTrackers + * that have not checked in for some time. + */ + public void run() { + while (!shutdown) { + try { + // + // Thread runs periodically to check whether trackers should be expired. + // The sleep interval must be no more than half the maximum expiry time + // for a task tracker. + // + Thread.sleep(TASKTRACKER_EXPIRY_INTERVAL / 3); + + // + // Loop through all expired items in the queue + // + // Need to lock the JobTracker here since we are + // manipulating it's data-structures via + // ExpireTrackers.run -> JobTracker.lostTaskTracker -> + // JobInProgress.failedTask -> JobTracker.markCompleteTaskAttempt + // Also need to lock JobTracker before locking 'taskTracker' & + // 'trackerExpiryQueue' to prevent deadlock: + // @see {@link JobTracker.processHeartbeat(TaskTrackerStatus, boolean)} + synchronized (JobTracker.this) { + synchronized (taskTrackers) { + synchronized (trackerExpiryQueue) { + long now = getClock().getTime(); + TaskTrackerStatus leastRecent = null; + while ((trackerExpiryQueue.size() > 0) && + (leastRecent = trackerExpiryQueue.first()) != null && + ((now - leastRecent.getLastSeen()) > TASKTRACKER_EXPIRY_INTERVAL)) { + + + // Remove profile from head of queue + trackerExpiryQueue.remove(leastRecent); + String trackerName = leastRecent.getTrackerName(); + + // Figure out if last-seen time should be updated, or if tracker is dead + TaskTracker current = getTaskTracker(trackerName); + TaskTrackerStatus newProfile = + (current == null ) ? null : current.getStatus(); + // Items might leave the taskTracker set through other means; the + // status stored in 'taskTrackers' might be null, which means the + // tracker has already been destroyed. + if (newProfile != null) { + if ((now - newProfile.getLastSeen()) > TASKTRACKER_EXPIRY_INTERVAL) { + removeTracker(current); + // remove the mapping from the hosts list + String hostname = newProfile.getHost(); + hostnameToTaskTracker.get(hostname).remove(trackerName); + } else { + // Update time by inserting latest profile + trackerExpiryQueue.add(newProfile); + } + } + } + } + } + } + } catch (InterruptedException iex) { + // ignore. if shutting down, while cond. will catch it + } catch (Exception t) { + LOG.error("Tracker Expiry Thread got exception: " + + StringUtils.stringifyException(t)); + } + } + } + + } + + synchronized void historyFileCopied(JobID jobid, String historyFile) { + JobInProgress job = getJob(jobid); + if (job != null) { //found in main cache + job.setHistoryFileCopied(); + if (historyFile != null) { + job.setHistoryFile(historyFile); + } + return; + } + RetireJobInfo jobInfo = retireJobs.get(jobid); + if (jobInfo != null) { //found in retired cache + if (historyFile != null) { + jobInfo.setHistoryFile(historyFile); + } + } + } + + /** + * An Attempt and it's corresponding TaskInProgress + * There is a unique TIP per Attempt. Hence the attempt + * can be used as the unique key to identify this tuple + * (in a Collection for example) + */ + public static final class TaskAttemptIDWithTip + implements Comparable { + public final TaskAttemptID attemptId; + public final TaskInProgress tip; + + public TaskAttemptIDWithTip(TaskAttemptID attemptId, TaskInProgress tip) { + this.attemptId = attemptId; + this.tip = tip; + } + + public boolean equals(Object o) { + TaskAttemptIDWithTip that = (TaskAttemptIDWithTip)o; + return this.attemptId.equals(that.attemptId); + } + + public int hashCode() { + return attemptId.hashCode(); + } + + public int compareTo(TaskAttemptIDWithTip that) { + return this.attemptId.compareTo(that.attemptId); + } + } + + static class RetireJobInfo { + final JobStatus status; + final JobProfile profile; + final long finishTime; + private String historyFile; + RetireJobInfo(JobStatus status, JobProfile profile, long finishTime, + String historyFile) { + this.status = status; + this.profile = profile; + this.finishTime = finishTime; + this.historyFile = historyFile; + } + void setHistoryFile(String file) { + this.historyFile = file; + } + String getHistoryFile() { + return historyFile; + } + } + /////////////////////////////////////////////////////// + // Used to remove old finished Jobs that have been around for too long + /////////////////////////////////////////////////////// + class RetireJobs implements Runnable { + private final Map jobIDStatusMap = + new HashMap(); + private final LinkedList jobRetireInfoQ = + new LinkedList(); + + public RetireJobs() { + } + + synchronized void addToCache(JobInProgress job) { + RetireJobInfo info = new RetireJobInfo(job.getStatus(), + job.getProfile(), job.getFinishTime(), job.getHistoryFile()); + jobRetireInfoQ.add(info); + jobIDStatusMap.put(info.status.getJobID(), info); + if (jobRetireInfoQ.size() > retiredJobsCacheSize) { + RetireJobInfo removed = jobRetireInfoQ.remove(); + jobIDStatusMap.remove(removed.status.getJobID()); + LOG.info("Retired job removed from cache " + removed.status.getJobID()); + } + } + + synchronized RetireJobInfo get(JobID jobId) { + return jobIDStatusMap.get(jobId); + } + + @SuppressWarnings("unchecked") + synchronized LinkedList getAll() { + return (LinkedList) jobRetireInfoQ.clone(); + } + + synchronized LinkedList getAllJobStatus() { + LinkedList list = new LinkedList(); + for (RetireJobInfo info : jobRetireInfoQ) { + list.add(info.status); + } + return list; + } + + private boolean minConditionToRetire(JobInProgress job, long now) { + return job.getStatus().getRunState() != JobStatus.RUNNING && + job.getStatus().getRunState() != JobStatus.PREP && + (job.getFinishTime() + MIN_TIME_BEFORE_RETIRE < now) && + (job.isHistoryFileCopied() || JobHistory.isDisableHistory()); + } + /** + * The run method lives for the life of the JobTracker, + * and removes Jobs that are not still running, but which + * finished a long time ago. + */ + public void run() { + while (!shutdown) { + try { + Thread.sleep(RETIRE_JOB_CHECK_INTERVAL); + List retiredJobs = new ArrayList(); + long now = getClock().getTime(); + long retireBefore = now - RETIRE_JOB_INTERVAL; + + synchronized (jobs) { + for(JobInProgress job: jobs.values()) { + if (minConditionToRetire(job, now) && + (RETIRE_COMPLETED_JOBS || (job.getFinishTime() < retireBefore))) { + retiredJobs.add(job); + } + } + RETIRE_COMPLETED_JOBS = false; // all completed jobs are now almost retired. + } + synchronized (userToJobsMap) { + Iterator>> + userToJobsMapIt = userToJobsMap.entrySet().iterator(); + while (userToJobsMapIt.hasNext()) { + Map.Entry> entry = + userToJobsMapIt.next(); + ArrayList userJobs = entry.getValue(); + Iterator it = userJobs.iterator(); + while (it.hasNext() && + userJobs.size() > MAX_COMPLETE_USER_JOBS_IN_MEMORY) { + JobInProgress jobUser = it.next(); + if (retiredJobs.contains(jobUser)) { + LOG.info("Removing from userToJobsMap: " + + jobUser.getJobID()); + it.remove(); + } else if (minConditionToRetire(jobUser, now)) { + LOG.info("User limit exceeded. Marking job: " + + jobUser.getJobID() + " for retire."); + retiredJobs.add(jobUser); + it.remove(); + } + } + if (userJobs.isEmpty()) { + userToJobsMapIt.remove(); + } + } + } + if (!retiredJobs.isEmpty()) { + List toBeDeleted = new ArrayList(); + synchronized (JobTracker.this) { + synchronized (jobs) { + synchronized (taskScheduler) { + for (JobInProgress job: retiredJobs) { + removeJobTasks(job); + jobs.remove(job.getProfile().getJobID()); + for (JobInProgressListener l : jobInProgressListeners) { + l.jobRemoved(job); + } + String jobUser = job.getProfile().getUser(); + LOG.info("Retired job with id: '" + + job.getProfile().getJobID() + "' of user '" + + jobUser + "'"); + toBeDeleted.add(job.getProfile().getJobID()); + addToCache(job); + } + } + } + } + for (JobID id : toBeDeleted) { + // clean up job files from the local disk + JobHistory.JobInfo.cleanupJob(id); + } + } + } catch (InterruptedException t) { + // ignore. if shutting down, while cond. will catch it + } catch (Throwable t) { + LOG.error("Error in retiring job:\n" + + StringUtils.stringifyException(t)); + } + } + } + } + + ///////////////////////////////////////////////////////////////////// + // Used to expire files in cache that hasn't been accessed for a while + ///////////////////////////////////////////////////////////////////// + + // This class is called every CLEAR_CACHE_INTERVAL seconds + private class ExpireUnusedFilesInCache implements Runnable { + JobTracker jt = null; + + final Path sharedPath; + + final Path[] cachePath; + + public ExpireUnusedFilesInCache(JobTracker jt, Path sharedPath) { + this.jt = jt; + this.sharedPath = sharedPath; + + this.cachePath = new Path[3]; + this.cachePath[0] = new Path(sharedPath, "files"); + this.cachePath[1] = new Path(sharedPath, "archives"); + this.cachePath[2] = new Path(sharedPath, "libjars"); + } + + public void run() { + long currentTime = getClock().getTime(); + + for (int i = 0; i < cachePath.length; i++) { + try { + if (!fs.exists(cachePath[i])) continue; + + FileStatus[] fStatus = fs.listStatus(cachePath[i]); + + for (int j = 0; j < fStatus.length; j++) { + if (!fStatus[j].isDir()) { + long atime = fStatus[j].getAccessTime(); + + if (currentTime - atime > EXPIRE_CACHE_THRESHOLD) { + fs.delete(fStatus[j].getPath(), false); + } + } + } + } catch (IOException ioe) { + LOG.error("IOException when clearing cache"); + } + } + } + } + + enum ReasonForBlackListing { + EXCEEDING_FAILURES, + NODE_UNHEALTHY + } + + // The FaultInfo which indicates the number of faults of a tracker + // and when the last fault occurred + // and whether the tracker is blacklisted across all jobs or not + private static class FaultInfo { + static final String FAULT_FORMAT_STRING = "%d failures on the tracker"; + int numFaults = 0; + long lastUpdated; + boolean blacklisted; + + private boolean isHealthy; + private HashMaprfbMap; + + FaultInfo() { + numFaults = 0; + lastUpdated = getClock().getTime(); + blacklisted = false; + rfbMap = new HashMap(); + } + + void setFaultCount(int num) { + numFaults = num; + } + + void setLastUpdated(long timeStamp) { + lastUpdated = timeStamp; + } + + int getFaultCount() { + return numFaults; + } + + long getLastUpdated() { + return lastUpdated; + } + + boolean isBlacklisted() { + return blacklisted; + } + + void setBlacklist(ReasonForBlackListing rfb, + String trackerFaultReport) { + blacklisted = true; + this.rfbMap.put(rfb, trackerFaultReport); + } + + public void setHealthy(boolean isHealthy) { + this.isHealthy = isHealthy; + } + + public boolean isHealthy() { + return isHealthy; + } + + public String getTrackerFaultReport() { + StringBuffer sb = new StringBuffer(); + for(String reasons : rfbMap.values()) { + sb.append(reasons); + sb.append("\n"); + } + return sb.toString(); + } + + Set getReasonforblacklisting() { + return this.rfbMap.keySet(); + } + + public void unBlacklist() { + this.blacklisted = false; + this.rfbMap.clear(); + } + + public boolean removeBlackListedReason(ReasonForBlackListing rfb) { + String str = rfbMap.remove(rfb); + return str!=null; + } + + public void addBlackListedReason(ReasonForBlackListing rfb, String reason) { + this.rfbMap.put(rfb, reason); + } + + } + + private class FaultyTrackersInfo { + // A map from hostName to its faults + private Map potentiallyFaultyTrackers = + new HashMap(); + // This count gives the number of blacklisted trackers in the cluster + // at any time. This is maintained to avoid iteration over + // the potentiallyFaultyTrackers to get blacklisted trackers. And also + // this count doesn't include blacklisted trackers which are lost, + // although the fault info is maintained for lost trackers. + private volatile int numBlacklistedTrackers = 0; + + /** + * Increments faults(blacklist by job) for the tracker by one. + * + * Adds the tracker to the potentially faulty list. + * Assumes JobTracker is locked on the entry. + * + * @param hostName + */ + void incrementFaults(String hostName) { + synchronized (potentiallyFaultyTrackers) { + FaultInfo fi = getFaultInfo(hostName, true); + int numFaults = fi.getFaultCount(); + ++numFaults; + fi.setFaultCount(numFaults); + fi.setLastUpdated(getClock().getTime()); + if (exceedsFaults(fi)) { + LOG.info("Adding " + hostName + " to the blacklist" + + " across all jobs"); + String reason = String.format(FaultInfo.FAULT_FORMAT_STRING, + numFaults); + blackListTracker(hostName, reason, + ReasonForBlackListing.EXCEEDING_FAILURES); + } + } + } + + private void incrBlackListedTrackers(int count) { + numBlacklistedTrackers += count; + getInstrumentation().addBlackListedTrackers(count); + } + + private void decrBlackListedTrackers(int count) { + numBlacklistedTrackers -= count; + getInstrumentation().decBlackListedTrackers(count); + } + + private void blackListTracker(String hostName, String reason, ReasonForBlackListing rfb) { + FaultInfo fi = getFaultInfo(hostName, true); + boolean blackListed = fi.isBlacklisted(); + if(blackListed) { + if (LOG.isDebugEnabled()) { + LOG.debug("Adding blacklisted reason for tracker : " + hostName + + " Reason for blacklisting is : " + rfb); + } + if (!fi.getReasonforblacklisting().contains(rfb)) { + LOG.info("Adding blacklisted reason for tracker : " + hostName + + " Reason for blacklisting is : " + rfb); + } + fi.addBlackListedReason(rfb, reason); + } else { + LOG.info("Blacklisting tracker : " + hostName + + " Reason for blacklisting is : " + rfb); + Set trackers = + hostnameToTaskTracker.get(hostName); + synchronized (trackers) { + for (TaskTracker tracker : trackers) { + tracker.cancelAllReservations(); + } + } + removeHostCapacity(hostName); + fi.setBlacklist(rfb, reason); + } + } + + private boolean canUnBlackListTracker(String hostName, + ReasonForBlackListing rfb) { + FaultInfo fi = getFaultInfo(hostName, false); + if(fi == null) { + return false; + } + + Set rfbSet = fi.getReasonforblacklisting(); + return fi.isBlacklisted() && rfbSet.contains(rfb); + } + + private void unBlackListTracker(String hostName, + ReasonForBlackListing rfb) { + // check if you can black list the tracker then call this methods + FaultInfo fi = getFaultInfo(hostName, false); + if(fi.removeBlackListedReason(rfb)) { + if(fi.getReasonforblacklisting().isEmpty()) { + addHostCapacity(hostName); + LOG.info("Unblacklisting tracker : " + hostName); + fi.unBlacklist(); + //We have unBlackListed tracker, so tracker should + //definitely be healthy. Check fault count if fault count + //is zero don't keep it memory. + if(fi.numFaults == 0) { + potentiallyFaultyTrackers.remove(hostName); + } + } + } + } + + // Assumes JobTracker is locked on the entry + private FaultInfo getFaultInfo(String hostName, + boolean createIfNeccessary) { + FaultInfo fi = null; + synchronized (potentiallyFaultyTrackers) { + fi = potentiallyFaultyTrackers.get(hostName); + if (fi == null && createIfNeccessary) { + fi = new FaultInfo(); + potentiallyFaultyTrackers.put(hostName, fi); + } + } + return fi; + } + + /** + * Blacklists the tracker across all jobs if + *
    + *
  1. #faults are more than + * MAX_BLACKLISTS_PER_TRACKER (configurable) blacklists
  2. + *
  3. #faults is 50% (configurable) above the average #faults
  4. + *
  5. 50% the cluster is not blacklisted yet
  6. + *
+ */ + private boolean exceedsFaults(FaultInfo fi) { + int faultCount = fi.getFaultCount(); + if (faultCount >= MAX_BLACKLISTS_PER_TRACKER) { + // calculate avgBlackLists + long clusterSize = getClusterStatus().getTaskTrackers(); + long sum = 0; + for (FaultInfo f : potentiallyFaultyTrackers.values()) { + sum += f.getFaultCount(); + } + double avg = (double) sum / clusterSize; + + long totalCluster = clusterSize + numBlacklistedTrackers; + if ((faultCount - avg) > (AVERAGE_BLACKLIST_THRESHOLD * avg) && + numBlacklistedTrackers < (totalCluster * MAX_BLACKLIST_PERCENT)) { + return true; + } + } + return false; + } + + /** + * Removes the tracker from blacklist and + * from potentially faulty list, when it is restarted. + * + * Assumes JobTracker is locked on the entry. + * + * @param hostName + */ + void markTrackerHealthy(String hostName) { + synchronized (potentiallyFaultyTrackers) { + FaultInfo fi = potentiallyFaultyTrackers.remove(hostName); + if (fi != null && fi.isBlacklisted()) { + LOG.info("Removing " + hostName + " from blacklist"); + addHostCapacity(hostName); + } + } + } + + /** + * Check whether tasks can be assigned to the tracker. + * + * One fault of the tracker is discarded if there + * are no faults during one day. So, the tracker will get a + * chance again to run tasks of a job. + * Assumes JobTracker is locked on the entry. + * + * @param hostName The tracker name + * @param now The current time + * + * @return true if the tracker is blacklisted + * false otherwise + */ + boolean shouldAssignTasksToTracker(String hostName, long now) { + synchronized (potentiallyFaultyTrackers) { + FaultInfo fi = potentiallyFaultyTrackers.get(hostName); + if (fi != null && + (now - fi.getLastUpdated()) > UPDATE_FAULTY_TRACKER_INTERVAL) { + int numFaults = fi.getFaultCount() - 1; + fi.setFaultCount(numFaults); + fi.setLastUpdated(now); + if (canUnBlackListTracker(hostName, + ReasonForBlackListing.EXCEEDING_FAILURES)) { + unBlackListTracker(hostName, + ReasonForBlackListing.EXCEEDING_FAILURES); + } + } + return (fi != null && fi.isBlacklisted()); + } + } + + private void removeHostCapacity(String hostName) { + synchronized (taskTrackers) { + // remove the capacity of trackers on this host + int numTrackersOnHost = 0; + for (TaskTrackerStatus status : getStatusesOnHost(hostName)) { + int mapSlots = status.getMaxMapSlots(); + totalMapTaskCapacity -= mapSlots; + int reduceSlots = status.getMaxReduceSlots(); + totalReduceTaskCapacity -= reduceSlots; + ++numTrackersOnHost; + getInstrumentation().addBlackListedMapSlots( + mapSlots); + getInstrumentation().addBlackListedReduceSlots( + reduceSlots); + } + uniqueHostsMap.remove(hostName); + incrBlackListedTrackers(numTrackersOnHost); + } + } + + // This is called on tracker's restart or after a day of blacklist. + private void addHostCapacity(String hostName) { + synchronized (taskTrackers) { + int numTrackersOnHost = 0; + // add the capacity of trackers on the host + for (TaskTrackerStatus status : getStatusesOnHost(hostName)) { + int mapSlots = status.getMaxMapSlots(); + totalMapTaskCapacity += mapSlots; + int reduceSlots = status.getMaxReduceSlots(); + totalReduceTaskCapacity += reduceSlots; + numTrackersOnHost++; + getInstrumentation().decBlackListedMapSlots(mapSlots); + getInstrumentation().decBlackListedReduceSlots(reduceSlots); + } + uniqueHostsMap.put(hostName, + numTrackersOnHost); + decrBlackListedTrackers(numTrackersOnHost); + } + } + + /** + * Whether a host is blacklisted across all the jobs. + * + * Assumes JobTracker is locked on the entry. + * @param hostName + * @return + */ + boolean isBlacklisted(String hostName) { + synchronized (potentiallyFaultyTrackers) { + FaultInfo fi = null; + if ((fi = potentiallyFaultyTrackers.get(hostName)) != null) { + return fi.isBlacklisted(); + } + } + return false; + } + + // Assumes JobTracker is locked on the entry. + int getFaultCount(String hostName) { + synchronized (potentiallyFaultyTrackers) { + FaultInfo fi = null; + if ((fi = potentiallyFaultyTrackers.get(hostName)) != null) { + return fi.getFaultCount(); + } + } + return 0; + } + + // Assumes JobTracker is locked on the entry. + Set getReasonForBlackListing(String hostName) { + synchronized (potentiallyFaultyTrackers) { + FaultInfo fi = null; + if ((fi = potentiallyFaultyTrackers.get(hostName)) != null) { + return fi.getReasonforblacklisting(); + } + } + return null; + } + + + // Assumes JobTracker is locked on the entry. + void setNodeHealthStatus(String hostName, boolean isHealthy, String reason) { + FaultInfo fi = null; + // If tracker is not healthy, create a fault info object + // blacklist it. + if (!isHealthy) { + fi = getFaultInfo(hostName, true); + fi.setHealthy(isHealthy); + synchronized (potentiallyFaultyTrackers) { + blackListTracker(hostName, reason, + ReasonForBlackListing.NODE_UNHEALTHY); + } + } else { + fi = getFaultInfo(hostName, false); + if (fi == null) { + return; + } else { + if (canUnBlackListTracker(hostName, + ReasonForBlackListing.NODE_UNHEALTHY)) { + unBlackListTracker(hostName, ReasonForBlackListing.NODE_UNHEALTHY); + } + } + } + } + } + + /** + * Get all task tracker statuses on given host + * + * Assumes JobTracker is locked on the entry + * @param hostName + * @return {@link java.util.List} of {@link TaskTrackerStatus} + */ + private List getStatusesOnHost(String hostName) { + List statuses = new ArrayList(); + synchronized (taskTrackers) { + for (TaskTracker tt : taskTrackers.values()) { + TaskTrackerStatus status = tt.getStatus(); + if (hostName.equals(status.getHost())) { + statuses.add(status); + } + } + } + return statuses; + } + + /////////////////////////////////////////////////////// + // Used to recover the jobs upon restart + /////////////////////////////////////////////////////// + class RecoveryManager { + Set jobsToRecover; // set of jobs to be recovered + + private int totalEventsRecovered = 0; + private int restartCount = 0; + private boolean shouldRecover = false; + + Set recoveredTrackers = + Collections.synchronizedSet(new HashSet()); + + /** A custom listener that replays the events in the order in which the + * events (task attempts) occurred. + */ + class JobRecoveryListener implements Listener { + // The owner job + private JobInProgress jip; + + private JobHistory.JobInfo job; // current job's info object + + // Maintain the count of the (attempt) events recovered + private int numEventsRecovered = 0; + + // Maintains open transactions + private Map hangingAttempts = + new HashMap(); + + // Whether there are any updates for this job + private boolean hasUpdates = false; + + public JobRecoveryListener(JobInProgress jip) { + this.jip = jip; + this.job = new JobHistory.JobInfo(jip.getJobID().toString()); + } + + /** + * Process a task. Note that a task might commit a previously pending + * transaction. + */ + private void processTask(String taskId, JobHistory.Task task) { + // Any TASK info commits the previous transaction + boolean hasHanging = hangingAttempts.remove(taskId) != null; + if (hasHanging) { + numEventsRecovered += 2; + } + + TaskID id = TaskID.forName(taskId); + TaskInProgress tip = getTip(id); + + updateTip(tip, task); + } + + /** + * Adds a task-attempt in the listener + */ + private void processTaskAttempt(String taskAttemptId, + JobHistory.TaskAttempt attempt) { + TaskAttemptID id = TaskAttemptID.forName(taskAttemptId); + + // Check if the transaction for this attempt can be committed + String taskStatus = attempt.get(Keys.TASK_STATUS); + TaskAttemptID taskID = TaskAttemptID.forName(taskAttemptId); + JobInProgress jip = getJob(taskID.getJobID()); + JobStatus prevStatus = (JobStatus)jip.getStatus().clone(); + + if (taskStatus.length() > 0) { + // This means this is an update event + if (taskStatus.equals(Values.SUCCESS.name())) { + // Mark this attempt as hanging + hangingAttempts.put(id.getTaskID().toString(), taskAttemptId); + addSuccessfulAttempt(jip, id, attempt); + } else { + addUnsuccessfulAttempt(jip, id, attempt); + numEventsRecovered += 2; + } + } else { + createTaskAttempt(jip, id, attempt); + } + + JobStatus newStatus = (JobStatus)jip.getStatus().clone(); + if (prevStatus.getRunState() != newStatus.getRunState()) { + if(LOG.isDebugEnabled()) + LOG.debug("Status changed hence informing prevStatus" + prevStatus + " currentStatus "+ newStatus); + JobStatusChangeEvent event = + new JobStatusChangeEvent(jip, EventType.RUN_STATE_CHANGED, + prevStatus, newStatus); + updateJobInProgressListeners(event); + } + } + + public void handle(JobHistory.RecordTypes recType, Map values) throws IOException { + if (recType == JobHistory.RecordTypes.Job) { + // Update the meta-level job information + job.handle(values); + + // Forcefully init the job as we have some updates for it + checkAndInit(); + } else if (recType.equals(JobHistory.RecordTypes.Task)) { + String taskId = values.get(Keys.TASKID); + + // Create a task + JobHistory.Task task = new JobHistory.Task(); + task.handle(values); + + // Ignore if its a cleanup task + if (isCleanup(task)) { + return; + } + + // Process the task i.e update the tip state + processTask(taskId, task); + } else if (recType.equals(JobHistory.RecordTypes.MapAttempt)) { + String attemptId = values.get(Keys.TASK_ATTEMPT_ID); + + // Create a task attempt + JobHistory.MapAttempt attempt = new JobHistory.MapAttempt(); + attempt.handle(values); + + // Ignore if its a cleanup task + if (isCleanup(attempt)) { + return; + } + + // Process the attempt i.e update the attempt state via job + processTaskAttempt(attemptId, attempt); + } else if (recType.equals(JobHistory.RecordTypes.ReduceAttempt)) { + String attemptId = values.get(Keys.TASK_ATTEMPT_ID); + + // Create a task attempt + JobHistory.ReduceAttempt attempt = new JobHistory.ReduceAttempt(); + attempt.handle(values); + + // Ignore if its a cleanup task + if (isCleanup(attempt)) { + return; + } + + // Process the attempt i.e update the job state via job + processTaskAttempt(attemptId, attempt); + } + } + + // Check if the task is of type CLEANUP + private boolean isCleanup(JobHistory.Task task) { + String taskType = task.get(Keys.TASK_TYPE); + return Values.CLEANUP.name().equals(taskType); + } + + // Init the job if its ready for init. Also make sure that the scheduler + // is updated + private void checkAndInit() throws IOException { + String jobStatus = this.job.get(Keys.JOB_STATUS); + if (Values.PREP.name().equals(jobStatus)) { + hasUpdates = true; + LOG.info("Calling init from RM for job " + jip.getJobID().toString()); + try { + initJob(jip); + } catch (Throwable t) { + LOG.error(jip.getJobID() + ": Job initialization failed : \n" + + StringUtils.stringifyException(t)); + failJob(jip); + throw new IOException(t); + } + } + } + + void close() { + if (hasUpdates) { + // Apply the final (job-level) updates + JobStatusChangeEvent event = updateJob(jip, job); + + synchronized (JobTracker.this) { + // Update the job listeners + updateJobInProgressListeners(event); + } + } + } + + public int getNumEventsRecovered() { + return numEventsRecovered; + } + + } + + public RecoveryManager() { + jobsToRecover = new TreeSet(); + } + + public boolean contains(JobID id) { + return jobsToRecover.contains(id); + } + + void addJobForRecovery(JobID id) { + jobsToRecover.add(id); + } + + public boolean shouldRecover() { + return shouldRecover; + } + + public boolean shouldSchedule() { + return recoveredTrackers.isEmpty(); + } + + private void markTracker(String trackerName) { + recoveredTrackers.add(trackerName); + } + + void unMarkTracker(String trackerName) { + recoveredTrackers.remove(trackerName); + } + + Set getJobsToRecover() { + return jobsToRecover; + } + + /** Check if the given string represents a job-id or not + */ + private boolean isJobNameValid(String str) { + if(str == null) { + return false; + } + String[] parts = str.split("_"); + if(parts.length == 3) { + if(parts[0].equals("job")) { + // other 2 parts should be parseable + return JobTracker.validateIdentifier(parts[1]) + && JobTracker.validateJobNumber(parts[2]); + } + } + return false; + } + + // checks if the job dir has the required files + public void checkAndAddJob(FileStatus status) throws IOException { + String fileName = status.getPath().getName(); + if (isJobNameValid(fileName)) { + if (JobClient.isJobDirValid(status.getPath(), fs)) { + recoveryManager.addJobForRecovery(JobID.forName(fileName)); + shouldRecover = true; // enable actual recovery if num-files > 1 + } else { + LOG.info("Found an incomplete job directory " + fileName + "." + + " Deleting it!!"); + fs.delete(status.getPath(), true); + } + } + } + + private JobStatusChangeEvent updateJob(JobInProgress jip, + JobHistory.JobInfo job) { + // Change the job priority + String jobpriority = job.get(Keys.JOB_PRIORITY); + JobPriority priority = JobPriority.valueOf(jobpriority); + // It's important to update this via the jobtracker's api as it will + // take care of updating the event listeners too + setJobPriority(jip.getJobID(), priority); + + // Save the previous job status + JobStatus oldStatus = (JobStatus)jip.getStatus().clone(); + + // Set the start/launch time only if there are recovered tasks + // Increment the job's restart count + jip.updateJobInfo(job.getLong(JobHistory.Keys.SUBMIT_TIME), + job.getLong(JobHistory.Keys.LAUNCH_TIME)); + + // Save the new job status + JobStatus newStatus = (JobStatus)jip.getStatus().clone(); + + return new JobStatusChangeEvent(jip, EventType.START_TIME_CHANGED, oldStatus, + newStatus); + } + + private void updateTip(TaskInProgress tip, JobHistory.Task task) { + long startTime = task.getLong(Keys.START_TIME); + if (startTime != 0) { + tip.setExecStartTime(startTime); + } + + long finishTime = task.getLong(Keys.FINISH_TIME); + // For failed tasks finish-time will be missing + if (finishTime != 0) { + tip.setExecFinishTime(finishTime); + } + + String cause = task.get(Keys.TASK_ATTEMPT_ID); + if (cause.length() > 0) { + // This means that the this is a FAILED events + TaskAttemptID id = TaskAttemptID.forName(cause); + TaskStatus status = tip.getTaskStatus(id); + synchronized (JobTracker.this) { + // This will add the tip failed event in the new log + tip.getJob().failedTask(tip, id, status.getDiagnosticInfo(), + status.getPhase(), status.getRunState(), + status.getTaskTracker()); + } + } + } + + private void createTaskAttempt(JobInProgress job, + TaskAttemptID attemptId, + JobHistory.TaskAttempt attempt) { + TaskID id = attemptId.getTaskID(); + String type = attempt.get(Keys.TASK_TYPE); + TaskInProgress tip = job.getTaskInProgress(id); + + // I. Get the required info + TaskStatus taskStatus = null; + String trackerName = attempt.get(Keys.TRACKER_NAME); + String trackerHostName = + JobInProgress.convertTrackerNameToHostName(trackerName); + // recover the port information. + int port = 0; // default to 0 + String hport = attempt.get(Keys.HTTP_PORT); + if (hport != null && hport.length() > 0) { + port = attempt.getInt(Keys.HTTP_PORT); + } + + long attemptStartTime = attempt.getLong(Keys.START_TIME); + + // II. Create the (appropriate) task status + if (type.equals(Values.MAP.name())) { + taskStatus = + new MapTaskStatus(attemptId, 0.0f, job.getNumSlotsPerTask(TaskType.MAP), + TaskStatus.State.RUNNING, "", "", trackerName, + TaskStatus.Phase.MAP, new Counters()); + } else { + taskStatus = + new ReduceTaskStatus(attemptId, 0.0f, job.getNumSlotsPerTask(TaskType.REDUCE), + TaskStatus.State.RUNNING, "", "", trackerName, + TaskStatus.Phase.REDUCE, new Counters()); + } + + // Set the start time + taskStatus.setStartTime(attemptStartTime); + + List ttStatusList = new ArrayList(); + ttStatusList.add(taskStatus); + + // III. Create the dummy tasktracker status + TaskTrackerStatus ttStatus = + new TaskTrackerStatus(trackerName, trackerHostName, port, ttStatusList, + 0 , 0, 0); + ttStatus.setLastSeen(getClock().getTime()); + + synchronized (JobTracker.this) { + synchronized (taskTrackers) { + synchronized (trackerExpiryQueue) { + // IV. Register a new tracker + TaskTracker taskTracker = getTaskTracker(trackerName); + boolean isTrackerRegistered = (taskTracker != null); + if (!isTrackerRegistered) { + markTracker(trackerName); // add the tracker to recovery-manager + taskTracker = new TaskTracker(trackerName); + taskTracker.setStatus(ttStatus); + addNewTracker(taskTracker); + } + + // V. Update the tracker status + // This will update the meta info of the jobtracker and also add the + // tracker status if missing i.e register it + updateTaskTrackerStatus(trackerName, ttStatus); + } + } + // Register the attempt with job and tip, under JobTracker lock. + // Since, as of today they are atomic through heartbeat. + // VI. Register the attempt + // a) In the job + job.addRunningTaskToTIP(tip, attemptId, ttStatus, false); + + createTaskEntry(attemptId, trackerName, tip); + + // b) In the tip + tip.updateStatus(taskStatus); + } + + // VII. Make an entry in the launched tasks + expireLaunchingTasks.addNewTask(attemptId); + } + + private void addSuccessfulAttempt(JobInProgress job, + TaskAttemptID attemptId, + JobHistory.TaskAttempt attempt) { + // I. Get the required info + TaskID taskId = attemptId.getTaskID(); + String type = attempt.get(Keys.TASK_TYPE); + + TaskInProgress tip = job.getTaskInProgress(taskId); + long attemptFinishTime = attempt.getLong(Keys.FINISH_TIME); + + // Get the task status and the tracker name and make a copy of it + TaskStatus taskStatus = (TaskStatus)tip.getTaskStatus(attemptId).clone(); + taskStatus.setFinishTime(attemptFinishTime); + + String stateString = attempt.get(Keys.STATE_STRING); + + // Update the basic values + taskStatus.setStateString(stateString); + taskStatus.setProgress(1.0f); + taskStatus.setRunState(TaskStatus.State.SUCCEEDED); + + // Set the shuffle/sort finished times + if (type.equals(Values.REDUCE.name())) { + long shuffleTime = + Long.parseLong(attempt.get(Keys.SHUFFLE_FINISHED)); + long sortTime = + Long.parseLong(attempt.get(Keys.SORT_FINISHED)); + taskStatus.setShuffleFinishTime(shuffleTime); + taskStatus.setSortFinishTime(sortTime); + } + + // Add the counters + String counterString = attempt.get(Keys.COUNTERS); + Counters counter = null; + //TODO Check if an exception should be thrown + try { + counter = Counters.fromEscapedCompactString(counterString); + } catch (ParseException pe) { + counter = new Counters(); // Set it to empty counter + } + taskStatus.setCounters(counter); + + synchronized (JobTracker.this) { + // II. Replay the status + job.updateTaskStatus(tip, taskStatus); + } + + // III. Prevent the task from expiry + expireLaunchingTasks.removeTask(attemptId); + } + + private void addUnsuccessfulAttempt(JobInProgress job, + TaskAttemptID attemptId, + JobHistory.TaskAttempt attempt) { + // I. Get the required info + TaskID taskId = attemptId.getTaskID(); + TaskInProgress tip = job.getTaskInProgress(taskId); + long attemptFinishTime = attempt.getLong(Keys.FINISH_TIME); + + TaskStatus taskStatus = (TaskStatus)tip.getTaskStatus(attemptId).clone(); + taskStatus.setFinishTime(attemptFinishTime); + + // Reset the progress + taskStatus.setProgress(0.0f); + + String stateString = attempt.get(Keys.STATE_STRING); + taskStatus.setStateString(stateString); + + boolean hasFailed = + attempt.get(Keys.TASK_STATUS).equals(Values.FAILED.name()); + // Set the state failed/killed + if (hasFailed) { + taskStatus.setRunState(TaskStatus.State.FAILED); + } else { + taskStatus.setRunState(TaskStatus.State.KILLED); + } + + // Get/Set the error msg + String diagInfo = attempt.get(Keys.ERROR); + taskStatus.setDiagnosticInfo(diagInfo); // diag info + + synchronized (JobTracker.this) { + // II. Update the task status + job.updateTaskStatus(tip, taskStatus); + } + + // III. Prevent the task from expiry + expireLaunchingTasks.removeTask(attemptId); + } + + Path getRestartCountFile() { + return new Path(getSystemDir(), "jobtracker.info"); + } + + Path getTempRestartCountFile() { + return new Path(getSystemDir(), "jobtracker.info.recover"); + } + + /** + * Initialize the recovery process. It simply creates a jobtracker.info file + * in the jobtracker's system directory and writes its restart count in it. + * For the first start, the jobtracker writes '0' in it. Upon subsequent + * restarts the jobtracker replaces the count with its current count which + * is (old count + 1). The whole purpose of this api is to obtain restart + * counts across restarts to avoid attempt-id clashes. + * + * Note that in between if the jobtracker.info files goes missing then the + * jobtracker will disable recovery and continue. + * + */ + void updateRestartCount() throws IOException { + Path restartFile = getRestartCountFile(); + Path tmpRestartFile = getTempRestartCountFile(); + FileSystem fs = restartFile.getFileSystem(conf); + FsPermission filePerm = new FsPermission(SYSTEM_FILE_PERMISSION); + + // read the count from the jobtracker info file + if (fs.exists(restartFile)) { + fs.delete(tmpRestartFile, false); // delete the tmp file + } else if (fs.exists(tmpRestartFile)) { + // if .rec exists then delete the main file and rename the .rec to main + fs.rename(tmpRestartFile, restartFile); // rename .rec to main file + } else { + // For the very first time the jobtracker will create a jobtracker.info + // file. If the jobtracker has restarted then disable recovery as files' + // needed for recovery are missing. + + // disable recovery if this is a restart + shouldRecover = false; + + // write the jobtracker.info file + try { + FSDataOutputStream out = FileSystem.create(fs, restartFile, + filePerm); + out.writeInt(0); + out.close(); + } catch (IOException ioe) { + LOG.warn("Writing to file " + restartFile + " failed!"); + LOG.warn("FileSystem is not ready yet!"); + fs.delete(restartFile, false); + throw ioe; + } + return; + } + + FSDataInputStream in = fs.open(restartFile); + try { + // read the old count + restartCount = in.readInt(); + ++restartCount; // increment the restart count + } catch (IOException ioe) { + LOG.warn("System directory is garbled. Failed to read file " + + restartFile); + LOG.warn("Jobtracker recovery is not possible with garbled" + + " system directory! Please delete the system directory and" + + " restart the jobtracker. Note that deleting the system" + + " directory will result in loss of all the running jobs."); + throw new RuntimeException(ioe); + } finally { + if (in != null) { + in.close(); + } + } + + // Write back the new restart count and rename the old info file + //TODO This is similar to jobhistory recovery, maybe this common code + // can be factored out. + + // write to the tmp file + FSDataOutputStream out = FileSystem.create(fs, tmpRestartFile, filePerm); + out.writeInt(restartCount); + out.close(); + + // delete the main file + fs.delete(restartFile, false); + + // rename the .rec to main file + fs.rename(tmpRestartFile, restartFile); + } + + public void recover() { + if (!shouldRecover()) { + // clean up jobs structure + jobsToRecover.clear(); + return; + } + + LOG.info("Restart count of the jobtracker : " + restartCount); + + // I. Init the jobs and cache the recovered job history filenames + Map jobHistoryFilenameMap = new HashMap(); + Iterator idIter = jobsToRecover.iterator(); + JobInProgress job = null; + File jobIdFile = null; + + // 0. Cleanup + try { + JobHistory.JobInfo.deleteConfFiles(); + } catch (IOException ioe) { + LOG.info("Error in cleaning up job history folder", ioe); + } + + while (idIter.hasNext()) { + JobID id = idIter.next(); + LOG.info("Trying to recover details of job " + id); + try { + // 1. Recover job owner and create JIP + jobIdFile = + new File(lDirAlloc.getLocalPathToRead(SUBDIR + "/" + id, conf).toString()); + + String user = null; + if (jobIdFile != null && jobIdFile.exists()) { + LOG.info("File " + jobIdFile + " exists for job " + id); + FileInputStream in = new FileInputStream(jobIdFile); + BufferedReader reader = null; + try { + reader = new BufferedReader(new InputStreamReader(in)); + user = reader.readLine(); + LOG.info("Recovered user " + user + " for job " + id); + } finally { + if (reader != null) { + reader.close(); + } + in.close(); + } + } + if (user == null) { + throw new RuntimeException("Incomplete job " + id); + } + + // Create the job + job = new JobInProgress(id, JobTracker.this, conf, user, + restartCount); + + // 2. Check if the user has appropriate access + // Get the user group info for the job's owner + UserGroupInformation ugi = + UserGroupInformation.readFrom(job.getJobConf()); + LOG.info("Submitting job " + id + " on behalf of user " + + ugi.getUserName() + " in groups : " + + StringUtils.arrayToString(ugi.getGroupNames())); + + // check the access + try { + checkAccess(job, QueueManager.QueueOperation.SUBMIT_JOB, ugi); + } catch (Throwable t) { + LOG.warn("Access denied for user " + ugi.getUserName() + + " in groups : [" + + StringUtils.arrayToString(ugi.getGroupNames()) + "]"); + throw t; + } + + // 3. Get the log file and the file path + String logFileName = + JobHistory.JobInfo.getJobHistoryFileName(job.getJobConf(), id); + if (logFileName != null) { + Path jobHistoryFilePath = + JobHistory.JobInfo.getJobHistoryLogLocation(logFileName); + + // 4. Recover the history file. This involved + // - deleting file.recover if file exists + // - renaming file.recover to file if file doesnt exist + // This makes sure that the (master) file exists + JobHistory.JobInfo.recoverJobHistoryFile(job.getJobConf(), + jobHistoryFilePath); + + // 5. Cache the history file name as it costs one dfs access + jobHistoryFilenameMap.put(job.getJobID(), jobHistoryFilePath); + } else { + LOG.info("No history file found for job " + id); + idIter.remove(); // remove from recovery list + } + + // 6. Sumbit the job to the jobtracker + addJob(id, job); + } catch (Throwable t) { + LOG.warn("Failed to recover job " + id + " Ignoring the job.", t); + idIter.remove(); + if (jobIdFile != null) { + jobIdFile.delete(); + jobIdFile = null; + } + if (job != null) { + job.fail(); + job = null; + } + continue; + } + } + + long recoveryStartTime = getClock().getTime(); + + // II. Recover each job + idIter = jobsToRecover.iterator(); + while (idIter.hasNext()) { + JobID id = idIter.next(); + JobInProgress pJob = getJob(id); + + // 1. Get the required info + // Get the recovered history file + Path jobHistoryFilePath = jobHistoryFilenameMap.get(pJob.getJobID()); + String logFileName = jobHistoryFilePath.getName(); + + FileSystem fs; + try { + fs = jobHistoryFilePath.getFileSystem(conf); + } catch (IOException ioe) { + LOG.warn("Failed to get the filesystem for job " + id + ". Ignoring.", + ioe); + continue; + } + + // 2. Parse the history file + // Note that this also involves job update + JobRecoveryListener listener = new JobRecoveryListener(pJob); + try { + JobHistory.parseHistoryFromFS(jobHistoryFilePath.toString(), + listener, fs); + } catch (Throwable t) { + LOG.info("Error reading history file of job " + pJob.getJobID() + + ". Ignoring the error and continuing.", t); + } + + // 3. Close the listener + listener.close(); + + // 4. Update the recovery metric + totalEventsRecovered += listener.getNumEventsRecovered(); + + // 5. Cleanup history + // Delete the master log file as an indication that the new file + // should be used in future + try { + synchronized (pJob) { + JobHistory.JobInfo.checkpointRecovery(logFileName, + pJob.getJobConf()); + } + } catch (Throwable t) { + LOG.warn("Failed to delete log file (" + logFileName + ") for job " + + id + ". Continuing.", t); + } + + if (pJob.isComplete()) { + idIter.remove(); // no need to keep this job info as its successful + } + } + + recoveryDuration = getClock().getTime() - recoveryStartTime; + hasRecovered = true; + + // III. Finalize the recovery + synchronized (trackerExpiryQueue) { + // Make sure that the tracker statuses in the expiry-tracker queue + // are updated + long now = getClock().getTime(); + int size = trackerExpiryQueue.size(); + for (int i = 0; i < size ; ++i) { + // Get the first tasktracker + TaskTrackerStatus taskTracker = trackerExpiryQueue.first(); + + // Remove it + trackerExpiryQueue.remove(taskTracker); + + // Set the new time + taskTracker.setLastSeen(now); + + // Add back to get the sorted list + trackerExpiryQueue.add(taskTracker); + } + } + + LOG.info("Restoration complete"); + } + + int totalEventsRecovered() { + return totalEventsRecovered; + } + } + + private final JobTrackerInstrumentation myInstrumentation; + + ///////////////////////////////////////////////////////////////// + // The real JobTracker + //////////////////////////////////////////////////////////////// + int port; + String localMachine; + private String trackerIdentifier; + long startTime; + int totalSubmissions = 0; + private int totalMapTaskCapacity; + private int totalReduceTaskCapacity; + private HostsFileReader hostsReader; + + // JobTracker recovery variables + private volatile boolean hasRestarted = false; + private volatile boolean hasRecovered = false; + private volatile long recoveryDuration; + + // + // Properties to maintain while running Jobs and Tasks: + // + // 1. Each Task is always contained in a single Job. A Job succeeds when all its + // Tasks are complete. + // + // 2. Every running or successful Task is assigned to a Tracker. Idle Tasks are not. + // + // 3. When a Tracker fails, all of its assigned Tasks are marked as failures. + // + // 4. A Task might need to be reexecuted if it (or the machine it's hosted on) fails + // before the Job is 100% complete. Sometimes an upstream Task can fail without + // reexecution if all downstream Tasks that require its output have already obtained + // the necessary files. + // + + // All the known jobs. (jobid->JobInProgress) + Map jobs = + Collections.synchronizedMap(new TreeMap()); + + + // (user -> list of JobInProgress) + TreeMap> userToJobsMap = + new TreeMap>(); + + // (trackerID --> list of jobs to cleanup) + Map> trackerToJobsToCleanup = + new HashMap>(); + + // (trackerID --> list of tasks to cleanup) + Map> trackerToTasksToCleanup = + new HashMap>(); + + // All the known TaskInProgress items, mapped to by taskids (taskid->TIP) + Map taskidToTIPMap = + new TreeMap(); + // This is used to keep track of all trackers running on one host. While + // decommissioning the host, all the trackers on the host will be lost. + Map> hostnameToTaskTracker = + Collections.synchronizedMap(new TreeMap>()); + + + // (taskid --> trackerID) + TreeMap taskidToTrackerMap = new TreeMap(); + + // (trackerID->TreeSet of taskids running at that tracker) + HashMap> trackerToTaskMap = + new HashMap>(); + + // (trackerID -> TreeSet of completed taskids running at that tracker) + TreeMap> trackerToMarkedTasksMap = + new TreeMap>(); + + // (trackerID --> last sent HeartBeatResponse) + Map trackerToHeartbeatResponseMap = + new TreeMap(); + + // (hostname --> Node (NetworkTopology)) + Map hostnameToNodeMap = + Collections.synchronizedMap(new TreeMap()); + + // job-id->username during staging + Map jobToUserMap = + Collections.synchronizedMap(new TreeMap()); + + // Number of resolved entries + int numResolved; + + private FaultyTrackersInfo faultyTrackers = new FaultyTrackersInfo(); + + private JobTrackerStatistics statistics = + new JobTrackerStatistics(); + // + // Watch and expire TaskTracker objects using these structures. + // We can map from Name->TaskTrackerStatus, or we can expire by time. + // + int totalMaps = 0; + int totalReduces = 0; + private int occupiedMapSlots = 0; + private int occupiedReduceSlots = 0; + private int reservedMapSlots = 0; + private int reservedReduceSlots = 0; + private HashMap taskTrackers = + new HashMap(); + MapuniqueHostsMap = new ConcurrentHashMap(); + ExpireTrackers expireTrackers = new ExpireTrackers(); + Thread expireTrackersThread = null; + RetireJobs retireJobs = new RetireJobs(); + Thread retireJobsThread = null; + final int retiredJobsCacheSize; + ExpireLaunchingTasks expireLaunchingTasks = new ExpireLaunchingTasks(); + Thread expireLaunchingTaskThread = new Thread(expireLaunchingTasks, + "expireLaunchingTasks"); + + CompletedJobStatusStore completedJobStatusStore = null; + Thread completedJobsStoreThread = null; + RecoveryManager recoveryManager; + + /** + * It might seem like a bug to maintain a TreeSet of tasktracker objects, + * which can be updated at any time. But that's not what happens! We + * only update status objects in the taskTrackers table. Status objects + * are never updated once they enter the expiry queue. Instead, we wait + * for them to expire and remove them from the expiry queue. If a status + * object has been updated in the taskTracker table, the latest status is + * reinserted. Otherwise, we assume the tracker has expired. + */ + TreeSet trackerExpiryQueue = + new TreeSet( + new Comparator() { + public int compare(TaskTrackerStatus p1, TaskTrackerStatus p2) { + if (p1.getLastSeen() < p2.getLastSeen()) { + return -1; + } else if (p1.getLastSeen() > p2.getLastSeen()) { + return 1; + } else { + return (p1.getTrackerName().compareTo(p2.getTrackerName())); + } + } + } + ); + + // Used to provide an HTML view on Job, Task, and TaskTracker structures + final HttpServer infoServer; + int infoPort; + + Server interTrackerServer; + + // Some jobs are stored in a local system directory. We can delete + // the files when we're done with the job. + static final String SUBDIR = "jobTracker"; + FileSystem fs = null; + Path systemDir = null; + JobConf conf; + private final UserGroupInformation mrOwner; + private final String supergroup; + + long limitMaxMemForMapTasks; + long limitMaxMemForReduceTasks; + long memSizeForMapSlotOnJT; + long memSizeForReduceSlotOnJT; + + private QueueManager queueManager; + + /** + * Start the JobTracker process, listen on the indicated port + */ + JobTracker(JobConf conf) throws IOException, InterruptedException { + this(conf, generateNewIdentifier()); + } + + /** + * Start the JobTracker process, listen on the indicated port + */ + JobTracker(JobConf conf, Clock clock) + throws IOException, InterruptedException { + this(conf, generateNewIdentifier()); + JobTracker.clock = clock; + } + + JobTracker(JobConf conf, String identifier) + throws IOException, InterruptedException { + // find the owner of the process + if (conf.getBoolean("hadoop.disable.shell",false)){ + conf.setStrings(UnixUserGroupInformation.UGI_PROPERTY_NAME, new String[]{"hadoop", "hadoop"}); + } + + try { + mrOwner = UnixUserGroupInformation.login(conf); + } catch (LoginException e) { + throw new IOException(StringUtils.stringifyException(e)); + } + + supergroup = conf.get("mapred.permissions.supergroup", "supergroup"); + LOG.info("Starting jobtracker with owner as " + mrOwner.getUserName() + + " and supergroup as " + supergroup); + + // + // Grab some static constants + // + TASKTRACKER_EXPIRY_INTERVAL = + conf.getLong("mapred.tasktracker.expiry.interval", 10 * 60 * 1000); + RETIRE_JOB_INTERVAL = conf.getLong("mapred.jobtracker.retirejob.interval", 24 * 60 * 60 * 1000); + RETIRE_JOB_CHECK_INTERVAL = conf.getLong("mapred.jobtracker.retirejob.check", 60 * 1000); + retiredJobsCacheSize = + conf.getInt("mapred.job.tracker.retiredjobs.cache.size", 1000); + MAX_COMPLETE_USER_JOBS_IN_MEMORY = conf.getInt("mapred.jobtracker.completeuserjobs.maximum", 100); + MIN_TIME_BEFORE_RETIRE = conf.getInt("mapred.jobtracker.mintime.before.retirejob", 0); + + MAX_BLACKLISTS_PER_TRACKER = + conf.getInt("mapred.max.tracker.blacklists", 4); + + NUM_HEARTBEATS_IN_SECOND = + conf.getInt(JT_HEARTBEATS_IN_SECOND, DEFAULT_NUM_HEARTBEATS_IN_SECOND); + if (NUM_HEARTBEATS_IN_SECOND < MIN_NUM_HEARTBEATS_IN_SECOND) { + NUM_HEARTBEATS_IN_SECOND = DEFAULT_NUM_HEARTBEATS_IN_SECOND; + } + + HEARTBEATS_SCALING_FACTOR = + conf.getFloat(JT_HEARTBEATS_SCALING_FACTOR, + DEFAULT_HEARTBEATS_SCALING_FACTOR); + if (HEARTBEATS_SCALING_FACTOR < MIN_HEARTBEATS_SCALING_FACTOR) { + HEARTBEATS_SCALING_FACTOR = DEFAULT_HEARTBEATS_SCALING_FACTOR; + } + + //This configuration is there solely for tuning purposes and + //once this feature has been tested in real clusters and an appropriate + //value for the threshold has been found, this config might be taken out. + AVERAGE_BLACKLIST_THRESHOLD = + conf.getFloat("mapred.cluster.average.blacklist.threshold", 0.5f); + + // This is a directory of temporary submission files. We delete it + // on startup, and can delete any files that we're done with + this.conf = conf; + JobConf jobConf = new JobConf(conf); + + initializeTaskMemoryRelatedConfig(); + + // Read the hosts/exclude files to restrict access to the jobtracker. + this.hostsReader = new HostsFileReader(conf.get("mapred.hosts", ""), + conf.get("mapred.hosts.exclude", "")); + + Configuration queuesConf = new Configuration(this.conf); + queueManager = new QueueManager(queuesConf); + + // Create the scheduler + Class schedulerClass + = conf.getClass("mapred.jobtracker.taskScheduler", + JobQueueTaskScheduler.class, TaskScheduler.class); + taskScheduler = (TaskScheduler) ReflectionUtils.newInstance(schedulerClass, conf); + + // Create the resourceReporter + Class reporterClass + = conf.getClass("mapred.jobtracker.resourceReporter", null, + ResourceReporter.class); + if (reporterClass != null) { + resourceReporter = + (ResourceReporter) ReflectionUtils.newInstance(reporterClass, conf); + LOG.info("Resource reporter: " + reporterClass.getClass() + + " is created"); + } else { + resourceReporter = null; + LOG.warn("Resource reporter is not configured. It will be disabled."); + } + + // Set ports, start RPC servers, setup security policy etc. + InetSocketAddress addr = getAddress(conf); + this.localMachine = addr.getHostName(); + this.port = addr.getPort(); + + // Set service-level authorization security policy + if (conf.getBoolean( + ServiceAuthorizationManager.SERVICE_AUTHORIZATION_CONFIG, false)) { + PolicyProvider policyProvider = + (PolicyProvider)(ReflectionUtils.newInstance( + conf.getClass(PolicyProvider.POLICY_PROVIDER_CONFIG, + MapReducePolicyProvider.class, PolicyProvider.class), + conf)); + SecurityUtil.setPolicy(new ConfiguredPolicy(conf, policyProvider)); + } + + int handlerCount = conf.getInt("mapred.job.tracker.handler.count", 10); + this.interTrackerServer = RPC.getServer(this, addr.getHostName(), addr.getPort(), handlerCount, false, conf); + if (LOG.isDebugEnabled()) { + Properties p = System.getProperties(); + for (Iterator it = p.keySet().iterator(); it.hasNext();) { + String key = (String) it.next(); + String val = p.getProperty(key); + LOG.debug("Property '" + key + "' is " + val); + } + } + + String infoAddr = + NetUtils.getServerAddress(conf, "mapred.job.tracker.info.bindAddress", + "mapred.job.tracker.info.port", + "mapred.job.tracker.http.address"); + InetSocketAddress infoSocAddr = NetUtils.createSocketAddr(infoAddr); + String infoBindAddress = infoSocAddr.getHostName(); + int tmpInfoPort = infoSocAddr.getPort(); + this.startTime = getClock().getTime(); + infoServer = new HttpServer("job", infoBindAddress, tmpInfoPort, + tmpInfoPort == 0, conf); + infoServer.setAttribute("job.tracker", this); + // initialize history parameters. + boolean historyInitialized = JobHistory.init(this, conf, this.localMachine, + this.startTime); + + infoServer.addServlet("reducegraph", "/taskgraph", TaskGraphServlet.class); + infoServer.start(); + + this.trackerIdentifier = identifier; + + // Initialize instrumentation + JobTrackerInstrumentation tmp; + Class metricsInst = + getInstrumentationClass(jobConf); + try { + java.lang.reflect.Constructor c = + metricsInst.getConstructor(new Class[] {JobTracker.class, JobConf.class} ); + tmp = c.newInstance(this, jobConf); + } catch(Exception e) { + //Reflection can throw lots of exceptions -- handle them all by + //falling back on the default. + LOG.error("failed to initialize job tracker metrics", e); + tmp = new JobTrackerMetricsInst(this, jobConf); + } + myInstrumentation = tmp; + + int excludedAtStart = hostsReader.getExcludedHosts().size(); + myInstrumentation.setDecommissionedTrackers(excludedAtStart); + + // The rpc/web-server ports can be ephemeral ports... + // ... ensure we have the correct info + this.port = interTrackerServer.getListenerAddress().getPort(); + this.conf.set("mapred.job.tracker", (this.localMachine + ":" + this.port)); + LOG.info("JobTracker up at: " + this.port); + this.infoPort = this.infoServer.getPort(); + this.conf.set("mapred.job.tracker.http.address", + infoBindAddress + ":" + this.infoPort); + LOG.info("JobTracker webserver: " + this.infoServer.getPort()); + + // start the recovery manager + recoveryManager = new RecoveryManager(); + + // start async disk service for asynchronous deletion service + asyncDiskService = new MRAsyncDiskService(FileSystem.getLocal(jobConf), + jobConf.getLocalDirs(), conf); + + while (!Thread.currentThread().isInterrupted()) { + try { + // if we haven't contacted the namenode go ahead and do it + if (fs == null) { + fs = FileSystem.get(conf); + } + // clean up the system dir, which will only work if hdfs is out of + // safe mode + if(systemDir == null) { + systemDir = new Path(getSystemDir()); + } + // Make sure that the backup data is preserved + FileStatus[] systemDirData = fs.listStatus(this.systemDir); + // Check if the history is enabled .. as we cant have persistence with + // history disabled + if (conf.getBoolean("mapred.jobtracker.restart.recover", false) + && !JobHistory.isDisableHistory() + && systemDirData != null) { + for (FileStatus status : systemDirData) { + try { + recoveryManager.checkAndAddJob(status); + } catch (Throwable t) { + LOG.warn("Failed to add the job " + status.getPath().getName(), + t); + } + } + + // Check if there are jobs to be recovered + hasRestarted = recoveryManager.shouldRecover(); + if (hasRestarted) { + break; // if there is something to recover else clean the sys dir + } + } + LOG.info("Cleaning up the system directory"); + fs.delete(systemDir, true); + if (FileSystem.mkdirs(fs, systemDir, + new FsPermission(SYSTEM_DIR_PERMISSION))) { + break; + } + LOG.error("Mkdirs failed to create " + systemDir); + } catch (AccessControlException ace) { + LOG.warn("Failed to operate on mapred.system.dir (" + systemDir + + ") because of permissions."); + LOG.warn("Manually delete the mapred.system.dir (" + systemDir + + ") and then start the JobTracker."); + LOG.warn("Bailing out ... "); + throw ace; + } catch (IOException ie) { + LOG.info("problem cleaning system directory: " + systemDir, ie); + } + Thread.sleep(FS_ACCESS_RETRY_PERIOD); + } + + if (Thread.currentThread().isInterrupted()) { + throw new InterruptedException(); + } + + // Same with 'localDir' except it's always on the local disk. + if (!hasRestarted) { + asyncDiskService.moveAndDeleteFromEachVolume(SUBDIR); + } + + // Initialize history DONE folder + if (historyInitialized) { + JobHistory.initDone(conf, fs); + String historyLogDir = + JobHistory.getCompletedJobHistoryLocation().toString(); + infoServer.setAttribute("historyLogDir", historyLogDir); + FileSystem historyFS = new Path(historyLogDir).getFileSystem(conf); + infoServer.setAttribute("fileSys", historyFS); + } + + this.dnsToSwitchMapping = ReflectionUtils.newInstance( + conf.getClass("topology.node.switch.mapping.impl", ScriptBasedMapping.class, + DNSToSwitchMapping.class), conf); + this.numTaskCacheLevels = conf.getInt("mapred.task.cache.levels", + NetworkTopology.DEFAULT_HOST_LEVEL); + + //initializes the job status store + completedJobStatusStore = new CompletedJobStatusStore(conf); + + + // Initialize the shared cache expiry thread + // this thread needs to be started unconditionally because some clients may use + // shared caching and some may not. we cannot control it with the cache sharing + // option used by the client: + // - the client may default to cache sharing = false + // - the jobconf serialized by the JT may set cache sharing = true (because client + // does not supply a value and server applies it's own value first) + // - tasks are not correctly localized as a result + + // How long between each cache check (default is one day) + CLEAR_CACHE_INTERVAL = conf.getLong("mapred.cache.shared.check_interval", + 24 * 60 * 60 * 1000); + + // How long must a file be untouched to be purged from the cache (default + // is one day) + EXPIRE_CACHE_THRESHOLD = + conf.getLong("mapred.cache.shared.expire_threshold", + 24 * 60 * 60 * 1000); + + ExpireUnusedFilesInCache eufic = new ExpireUnusedFilesInCache(this, + new Path(getSystemDir(), "CAR")); + Executors.newScheduledThreadPool(1) + .scheduleAtFixedRate(eufic, + CLEAR_CACHE_INTERVAL, + CLEAR_CACHE_INTERVAL, + TimeUnit.MILLISECONDS); + } + + private static SimpleDateFormat getDateFormat() { + return new SimpleDateFormat("yyyyMMddHHmm"); + } + + private static String generateNewIdentifier() { + return getDateFormat().format(new Date()); + } + + static boolean validateIdentifier(String id) { + try { + // the jobtracker id should be 'date' parseable + getDateFormat().parse(id); + return true; + } catch (ParseException pe) {} + return false; + } + + static boolean validateJobNumber(String id) { + try { + // the job number should be integer parseable + Integer.parseInt(id); + return true; + } catch (IllegalArgumentException pe) {} + return false; + } + + /** + * Whether the JT has restarted + */ + public boolean hasRestarted() { + return hasRestarted; + } + + /** + * Whether the JT has recovered upon restart + */ + public boolean hasRecovered() { + return hasRecovered; + } + + /** + * How long the jobtracker took to recover from restart. + */ + public long getRecoveryDuration() { + return hasRestarted() + ? recoveryDuration + : 0; + } + + public static Class getInstrumentationClass(Configuration conf) { + return conf.getClass("mapred.jobtracker.instrumentation", + JobTrackerMetricsInst.class, JobTrackerInstrumentation.class); + } + + public static void setInstrumentationClass(Configuration conf, Class t) { + conf.setClass("mapred.jobtracker.instrumentation", + t, JobTrackerInstrumentation.class); + } + + JobTrackerInstrumentation getInstrumentation() { + return myInstrumentation; + } + + public static InetSocketAddress getAddress(Configuration conf) { + String jobTrackerStr = + conf.get("mapred.job.tracker", "localhost:8012"); + return NetUtils.createSocketAddr(jobTrackerStr); + } + + /** + * Run forever + */ + public void offerService() throws InterruptedException, IOException { + // Prepare for recovery. This is done irrespective of the status of restart + // flag. + while (true) { + try { + recoveryManager.updateRestartCount(); + break; + } catch (IOException ioe) { + LOG.warn("Failed to initialize recovery manager. ", ioe); + // wait for some time + Thread.sleep(FS_ACCESS_RETRY_PERIOD); + LOG.warn("Retrying..."); + } + } + + taskScheduler.start(); + + // Start the recovery after starting the scheduler + try { + recoveryManager.recover(); + } catch (Throwable t) { + LOG.warn("Recovery manager crashed! Ignoring.", t); + } + // refresh the node list as the recovery manager might have added + // disallowed trackers + refreshHosts(); + + this.expireTrackersThread = new Thread(this.expireTrackers, + "expireTrackers"); + this.expireTrackersThread.setDaemon(true); + this.expireTrackersThread.start(); + this.retireJobsThread = new Thread(this.retireJobs, "retireJobs"); + this.retireJobsThread.setDaemon(true); + this.retireJobsThread.start(); + expireLaunchingTaskThread.setDaemon(true); + expireLaunchingTaskThread.start(); + + if (completedJobStatusStore.isActive()) { + completedJobsStoreThread = new Thread(completedJobStatusStore, + "completedjobsStore-housekeeper"); + completedJobsStoreThread.start(); + } + + // start the inter-tracker server once the jt is ready + this.interTrackerServer.start(); + + synchronized (this) { + state = State.RUNNING; + } + LOG.info("Starting RUNNING"); + + this.interTrackerServer.join(); + LOG.info("Stopped interTrackerServer"); + } + + void close() throws IOException { + if (this.infoServer != null) { + LOG.info("Stopping infoServer"); + try { + this.infoServer.stop(); + } catch (Exception ex) { + LOG.warn("Exception shutting down JobTracker", ex); + } + } + if (this.interTrackerServer != null) { + LOG.info("Stopping interTrackerServer"); + this.interTrackerServer.stop(); + } + + shutdown = true; + + if (this.expireTrackersThread != null && this.expireTrackersThread.isAlive()) { + LOG.info("Stopping expireTrackers"); + this.expireTrackersThread.interrupt(); + try { + this.expireTrackersThread.join(); + } catch (InterruptedException ex) { + ex.printStackTrace(); + } + } + if (this.retireJobsThread != null && this.retireJobsThread.isAlive()) { + LOG.info("Stopping retirer"); + this.retireJobsThread.interrupt(); + try { + this.retireJobsThread.join(); + } catch (InterruptedException ex) { + ex.printStackTrace(); + } + } + if (taskScheduler != null) { + taskScheduler.terminate(); + } + if (this.expireLaunchingTaskThread != null && this.expireLaunchingTaskThread.isAlive()) { + LOG.info("Stopping expireLaunchingTasks"); + this.expireLaunchingTaskThread.interrupt(); + try { + this.expireLaunchingTaskThread.join(); + } catch (InterruptedException ex) { + ex.printStackTrace(); + } + } + if (this.completedJobsStoreThread != null && + this.completedJobsStoreThread.isAlive()) { + LOG.info("Stopping completedJobsStore thread"); + this.completedJobsStoreThread.interrupt(); + try { + this.completedJobsStoreThread.join(); + } catch (InterruptedException ex) { + ex.printStackTrace(); + } + } + LOG.info("stopped all jobtracker services"); + return; + } + + /////////////////////////////////////////////////////// + // Maintain lookup tables; called by JobInProgress + // and TaskInProgress + /////////////////////////////////////////////////////// + void createTaskEntry(TaskAttemptID taskid, String taskTracker, TaskInProgress tip) { + LOG.info("Adding task (" + tip.getAttemptType(taskid) + ") " + + "'" + taskid + "' to tip " + + tip.getTIPId() + ", for tracker '" + taskTracker + "'"); + + // taskid --> tracker + taskidToTrackerMap.put(taskid, taskTracker); + + // tracker --> taskid + Set taskset = trackerToTaskMap.get(taskTracker); + if (taskset == null) { + taskset = new HashSet(); + trackerToTaskMap.put(taskTracker, taskset); + } + taskset.add(new TaskAttemptIDWithTip(taskid, tip)); + + // taskid --> TIP + taskidToTIPMap.put(taskid, tip); + + } + + void removeTaskEntry(TaskAttemptID taskid) { + // taskid --> tracker + String tracker = taskidToTrackerMap.remove(taskid); + + // tracker --> taskid + if (tracker != null) { + Set taskset = trackerToTaskMap.get(tracker); + if (taskset != null) { + taskset.remove(new TaskAttemptIDWithTip(taskid, null)); + } + } + + // taskid --> TIP + if (taskidToTIPMap.remove(taskid) != null) { + LOG.info("Removing task '" + taskid + "'"); + } + } + + /** + * Mark a 'task' for removal later. + * This function assumes that the JobTracker is locked on entry. + * + * @param taskTracker the tasktracker at which the 'task' was running + * @param taskid completed (success/failure/killed) task + */ + void markCompletedTaskAttempt(String taskTracker, TaskAttemptID taskid) { + // tracker --> taskid + Set taskset = trackerToMarkedTasksMap.get(taskTracker); + if (taskset == null) { + taskset = new TreeSet(); + trackerToMarkedTasksMap.put(taskTracker, taskset); + } + taskset.add(taskid); + + LOG.debug("Marked '" + taskid + "' from '" + taskTracker + "'"); + } + + /** + * Mark all 'non-running' jobs of the job for pruning. + * This function assumes that the JobTracker is locked on entry. + * + * @param job the completed job + */ + void markCompletedJob(JobInProgress job) { + for (TaskInProgress tip : job.getTasks(TaskType.JOB_SETUP)) { + for (TaskStatus taskStatus : tip.getTaskStatuses()) { + if (taskStatus.getRunState() != TaskStatus.State.RUNNING && + taskStatus.getRunState() != TaskStatus.State.COMMIT_PENDING && + taskStatus.getRunState() != TaskStatus.State.UNASSIGNED) { + markCompletedTaskAttempt(taskStatus.getTaskTracker(), + taskStatus.getTaskID()); + } + } + } + for (TaskInProgress tip : job.getTasks(TaskType.MAP)) { + for (TaskStatus taskStatus : tip.getTaskStatuses()) { + if (taskStatus.getRunState() != TaskStatus.State.RUNNING && + taskStatus.getRunState() != TaskStatus.State.COMMIT_PENDING && + taskStatus.getRunState() != TaskStatus.State.FAILED_UNCLEAN && + taskStatus.getRunState() != TaskStatus.State.KILLED_UNCLEAN && + taskStatus.getRunState() != TaskStatus.State.UNASSIGNED) { + markCompletedTaskAttempt(taskStatus.getTaskTracker(), + taskStatus.getTaskID()); + } + } + } + for (TaskInProgress tip : job.getTasks(TaskType.REDUCE)) { + for (TaskStatus taskStatus : tip.getTaskStatuses()) { + if (taskStatus.getRunState() != TaskStatus.State.RUNNING && + taskStatus.getRunState() != TaskStatus.State.COMMIT_PENDING && + taskStatus.getRunState() != TaskStatus.State.FAILED_UNCLEAN && + taskStatus.getRunState() != TaskStatus.State.KILLED_UNCLEAN && + taskStatus.getRunState() != TaskStatus.State.UNASSIGNED) { + markCompletedTaskAttempt(taskStatus.getTaskTracker(), + taskStatus.getTaskID()); + } + } + } + } + + /** + * Remove all 'marked' tasks running on a given {@link TaskTracker} + * from the {@link JobTracker}'s data-structures. + * This function assumes that the JobTracker is locked on entry. + * + * @param taskTracker tasktracker whose 'non-running' tasks are to be purged + */ + void removeMarkedTasks(String taskTracker) { + // Purge all the 'marked' tasks which were running at taskTracker + Set markedTaskSet = + trackerToMarkedTasksMap.get(taskTracker); + if (markedTaskSet != null) { + for (TaskAttemptID taskid : markedTaskSet) { + removeTaskEntry(taskid); + if (LOG.isDebugEnabled()) { + LOG.debug("Removed marked completed task '" + taskid + "' from '" + + taskTracker + "'"); + } + } + // Clear + trackerToMarkedTasksMap.remove(taskTracker); + } + } + + /** + * Call {@link #removeTaskEntry(String)} for each of the + * job's tasks. + * When the JobTracker is retiring the long-completed + * job, either because it has outlived {@link #RETIRE_JOB_INTERVAL} + * or the limit of {@link #MAX_COMPLETE_USER_JOBS_IN_MEMORY} jobs + * has been reached, we can afford to nuke all it's tasks; a little + * unsafe, but practically feasible. + * + * @param job the job about to be 'retired' + */ + synchronized void removeJobTasks(JobInProgress job) { + // iterate over all the task types + for (TaskType type : TaskType.values()) { + // iterate over all the tips of the type under consideration + for (TaskInProgress tip : job.getTasks(type)) { + // iterate over all the task-ids in the tip under consideration + for (TaskAttemptID id : tip.getAllTaskAttemptIDs()) { + // remove the task-id entry from the jobtracker + removeTaskEntry(id); + } + } + } + } + + /** + * Safe clean-up all data structures at the end of the + * job (success/failure/killed). + * Here we also ensure that for a given user we maintain + * information for only MAX_COMPLETE_USER_JOBS_IN_MEMORY jobs + * on the JobTracker. + * + * @param job completed job. + */ + synchronized void finalizeJob(JobInProgress job) { + // Mark the 'non-running' tasks for pruning + markCompletedJob(job); + + JobEndNotifier.registerNotification(job.getJobConf(), job.getStatus()); + + // start the merge of log files + JobID id = job.getStatus().getJobID(); + if (job.hasRestarted()) { + try { + JobHistory.JobInfo.finalizeRecovery(id, job.getJobConf()); + } catch (IOException ioe) { + LOG.info("Failed to finalize the log file recovery for job " + id, ioe); + } + } + + // mark the job as completed + try { + JobHistory.JobInfo.markCompleted(id); + } catch (IOException ioe) { + LOG.info("Failed to mark job " + id + " as completed!", ioe); + } + + final JobTrackerInstrumentation metrics = getInstrumentation(); + metrics.finalizeJob(conf, id); + + long now = getClock().getTime(); + + // mark the job for cleanup at all the trackers + addJobForCleanup(id); + + try { + File userFileForJob = + new File(lDirAlloc.getLocalPathToRead(SUBDIR + "/" + id, + conf).toString()); + if (userFileForJob != null) { + userFileForJob.delete(); + } + } catch (IOException ioe) { + LOG.info("Failed to delete job id mapping for job " + id, ioe); + } + + // add the blacklisted trackers to potentially faulty list + if (job.getStatus().getRunState() == JobStatus.SUCCEEDED) { + if (job.getNoOfBlackListedTrackers() > 0) { + for (String hostName : job.getBlackListedTrackers()) { + faultyTrackers.incrementFaults(hostName); + } + } + } + + String jobUser = job.getProfile().getUser(); + //add to the user to jobs mapping + synchronized (userToJobsMap) { + ArrayList userJobs = userToJobsMap.get(jobUser); + if (userJobs == null) { + userJobs = new ArrayList(); + userToJobsMap.put(jobUser, userJobs); + } + userJobs.add(job); + } + } + + /////////////////////////////////////////////////////// + // Accessors for objects that want info on jobs, tasks, + // trackers, etc. + /////////////////////////////////////////////////////// + public int getTotalSubmissions() { + return totalSubmissions; + } + public String getJobTrackerMachine() { + return localMachine; + } + + /** + * Get the unique identifier (ie. timestamp) of this job tracker start. + * @return a string with a unique identifier + */ + public String getTrackerIdentifier() { + return trackerIdentifier; + } + + public int getTrackerPort() { + return port; + } + public int getInfoPort() { + return infoPort; + } + public long getStartTime() { + return startTime; + } + public Vector runningJobs() { + Vector v = new Vector(); + for (Iterator it = jobs.values().iterator(); it.hasNext();) { + JobInProgress jip = (JobInProgress) it.next(); + JobStatus status = jip.getStatus(); + if (status.getRunState() == JobStatus.RUNNING) { + v.add(jip); + } + } + return v; + } + /** + * Version that is called from a timer thread, and therefore needs to be + * careful to synchronize. + */ + public synchronized List getRunningJobs() { + synchronized (jobs) { + return runningJobs(); + } + } + public Vector failedJobs() { + Vector v = new Vector(); + for (Iterator it = jobs.values().iterator(); it.hasNext();) { + JobInProgress jip = (JobInProgress) it.next(); + JobStatus status = jip.getStatus(); + if ((status.getRunState() == JobStatus.FAILED) + || (status.getRunState() == JobStatus.KILLED)) { + v.add(jip); + } + } + return v; + } + + public synchronized List getFailedJobs() { + synchronized (jobs) { + return failedJobs(); + } + } + + public Vector completedJobs() { + Vector v = new Vector(); + for (Iterator it = jobs.values().iterator(); it.hasNext();) { + JobInProgress jip = (JobInProgress) it.next(); + JobStatus status = jip.getStatus(); + if (status.getRunState() == JobStatus.SUCCEEDED) { + v.add(jip); + } + } + return v; + } + + public synchronized List getCompletedJobs() { + synchronized (jobs) { + return completedJobs(); + } + } + + /** + * Get all the task trackers in the cluster + * + * @return {@link Collection} of {@link TaskTrackerStatus} + */ + // lock to taskTrackers should hold JT lock first. + public synchronized Collection taskTrackers() { + Collection ttStatuses; + synchronized (taskTrackers) { + ttStatuses = + new ArrayList(taskTrackers.values().size()); + for (TaskTracker tt : taskTrackers.values()) { + ttStatuses.add(tt.getStatus()); + } + } + return ttStatuses; + } + + /** + * Get the active task tracker statuses in the cluster + * + * @return {@link Collection} of active {@link TaskTrackerStatus} + */ + // This method is synchronized to make sure that the locking order + // "taskTrackers lock followed by faultyTrackers.potentiallyFaultyTrackers + // lock" is under JobTracker lock to avoid deadlocks. + synchronized public Collection activeTaskTrackers() { + Collection activeTrackers = + new ArrayList(); + synchronized (taskTrackers) { + for ( TaskTracker tt : taskTrackers.values()) { + TaskTrackerStatus status = tt.getStatus(); + if (!faultyTrackers.isBlacklisted(status.getHost())) { + activeTrackers.add(status); + } + } + } + return activeTrackers; + } + + /** + * Get the active and blacklisted task tracker names in the cluster. The first + * element in the returned list contains the list of active tracker names. + * The second element in the returned list contains the list of blacklisted + * tracker names. + */ + // This method is synchronized to make sure that the locking order + // "taskTrackers lock followed by faultyTrackers.potentiallyFaultyTrackers + // lock" is under JobTracker lock to avoid deadlocks. + synchronized public List> taskTrackerNames() { + List activeTrackers = + new ArrayList(); + List blacklistedTrackers = + new ArrayList(); + synchronized (taskTrackers) { + for (TaskTracker tt : taskTrackers.values()) { + TaskTrackerStatus status = tt.getStatus(); + if (!faultyTrackers.isBlacklisted(status.getHost())) { + activeTrackers.add(status.getTrackerName()); + } else { + blacklistedTrackers.add(status.getTrackerName()); + } + } + } + List> result = new ArrayList>(2); + result.add(activeTrackers); + result.add(blacklistedTrackers); + return result; + } + + /** + * Get the blacklisted task tracker statuses in the cluster + * + * @return {@link Collection} of blacklisted {@link TaskTrackerStatus} + */ + // This method is synchronized to make sure that the locking order + // "taskTrackers lock followed by faultyTrackers.potentiallyFaultyTrackers + // lock" is under JobTracker lock to avoid deadlocks. + synchronized public Collection blacklistedTaskTrackers() { + Collection blacklistedTrackers = + new ArrayList(); + synchronized (taskTrackers) { + for (TaskTracker tt : taskTrackers.values()) { + TaskTrackerStatus status = tt.getStatus(); + if (faultyTrackers.isBlacklisted(status.getHost())) { + blacklistedTrackers.add(status); + } + } + } + return blacklistedTrackers; + } + + synchronized int getFaultCount(String hostName) { + return faultyTrackers.getFaultCount(hostName); + } + + /** + * Get the number of blacklisted trackers across all the jobs + * + * @return + */ + int getBlacklistedTrackerCount() { + return faultyTrackers.numBlacklistedTrackers; + } + + /** + * Whether the tracker is blacklisted or not + * + * @param trackerID + * + * @return true if blacklisted, false otherwise + */ + synchronized public boolean isBlacklisted(String trackerID) { + TaskTrackerStatus status = getTaskTrackerStatus(trackerID); + if (status != null) { + return faultyTrackers.isBlacklisted(status.getHost()); + } + return false; + } + + // lock to taskTrackers should hold JT lock first. + synchronized public TaskTrackerStatus getTaskTrackerStatus(String trackerID) { + TaskTracker taskTracker; + synchronized (taskTrackers) { + taskTracker = taskTrackers.get(trackerID); + } + return (taskTracker == null) ? null : taskTracker.getStatus(); + } + + // lock to taskTrackers should hold JT lock first. + synchronized public TaskTracker getTaskTracker(String trackerID) { + synchronized (taskTrackers) { + return taskTrackers.get(trackerID); + } + } + + JobTrackerStatistics getStatistics() { + return statistics; + } + /** + * Adds a new node to the jobtracker. It involves adding it to the expiry + * thread and adding it for resolution + * + * Assumes JobTracker, taskTrackers and trackerExpiryQueue is locked on entry + * + * @param status Task Tracker's status + */ + void addNewTracker(TaskTracker taskTracker) { + TaskTrackerStatus status = taskTracker.getStatus(); + trackerExpiryQueue.add(status); + + // Register the tracker if its not registered + String hostname = status.getHost(); + if (getNode(status.getTrackerName()) == null) { + // Making the network location resolution inline .. + resolveAndAddToTopology(hostname); + } + + // add it to the set of tracker per host + Set trackers = hostnameToTaskTracker.get(hostname); + if (trackers == null) { + trackers = Collections.synchronizedSet(new HashSet()); + hostnameToTaskTracker.put(hostname, trackers); + } + statistics.taskTrackerAdded(status.getTrackerName()); + getInstrumentation().addTrackers(1); + LOG.info("Adding tracker " + status.getTrackerName() + " to host " + + hostname); + trackers.add(taskTracker); + } + + public Node resolveAndAddToTopology(String name) { + List tmpList = new ArrayList(1); + tmpList.add(name); + List rNameList = dnsToSwitchMapping.resolve(tmpList); + String rName = rNameList.get(0); + String networkLoc = NodeBase.normalize(rName); + return addHostToNodeMapping(name, networkLoc); + } + + private Node addHostToNodeMapping(String host, String networkLoc) { + Node node = null; + synchronized (nodesAtMaxLevel) { + if ((node = clusterMap.getNode(networkLoc+"/"+host)) == null) { + node = new NodeBase(host, networkLoc); + clusterMap.add(node); + if (node.getLevel() < getNumTaskCacheLevels()) { + LOG.fatal("Got a host whose level is: " + node.getLevel() + "." + + " Should get at least a level of value: " + + getNumTaskCacheLevels()); + try { + stopTracker(); + } catch (IOException ie) { + LOG.warn("Exception encountered during shutdown: " + + StringUtils.stringifyException(ie)); + System.exit(-1); + } + } + hostnameToNodeMap.put(host, node); + // Make an entry for the node at the max level in the cache + nodesAtMaxLevel.add(getParentNode(node, getNumTaskCacheLevels() - 1)); + } + } + return node; + } + + /** + * Returns a collection of nodes at the max level + */ + public Collection getNodesAtMaxLevel() { + return nodesAtMaxLevel; + } + + public static Node getParentNode(Node node, int level) { + for (int i = 0; i < level; ++i) { + node = node.getParent(); + } + return node; + } + + /** + * Return the Node in the network topology that corresponds to the hostname + */ + public Node getNode(String name) { + return hostnameToNodeMap.get(name); + } + public int getNumTaskCacheLevels() { + return numTaskCacheLevels; + } + public int getNumResolvedTaskTrackers() { + return numResolved; + } + + public int getNumberOfUniqueHosts() { + return uniqueHostsMap.size(); + } + + public void addJobInProgressListener(JobInProgressListener listener) { + jobInProgressListeners.add(listener); + } + + public void removeJobInProgressListener(JobInProgressListener listener) { + jobInProgressListeners.remove(listener); + } + + // Update the listeners about the job + // Assuming JobTracker is locked on entry. + void updateJobInProgressListeners(JobChangeEvent event) { + for (JobInProgressListener listener : jobInProgressListeners) { + listener.jobUpdated(event); + } + } + + /** + * Return the {@link QueueManager} associated with the JobTracker. + */ + public QueueManager getQueueManager() { + return queueManager; + } + + //////////////////////////////////////////////////// + // InterTrackerProtocol + //////////////////////////////////////////////////// + + public String getBuildVersion() throws IOException{ + return VersionInfo.getBuildVersion(); + } + + /** + * The periodic heartbeat mechanism between the {@link TaskTracker} and + * the {@link JobTracker}. + * + * The {@link JobTracker} processes the status information sent by the + * {@link TaskTracker} and responds with instructions to start/stop + * tasks or jobs, and also 'reset' instructions during contingencies. + */ + public HeartbeatResponse heartbeat(TaskTrackerStatus status, + boolean restarted, + boolean initialContact, + boolean acceptNewTasks, + short responseId) + throws IOException { + if (LOG.isDebugEnabled()) { + LOG.debug("Got heartbeat from: " + status.getTrackerName() + + " (restarted: " + restarted + + " initialContact: " + initialContact + + " acceptNewTasks: " + acceptNewTasks + ")" + + " with responseId: " + responseId); + } + + short newResponseId; + boolean shouldSchedule, addRestartInfo = false; + TaskTrackerStatus taskTrackerStatus; + String trackerName; + + synchronized (this) { + + // Make sure heartbeat is from a tasktracker allowed by the jobtracker. + if (!acceptTaskTracker(status)) { + throw new DisallowedTaskTrackerException(status); + } + + // First check if the last heartbeat response got through + trackerName = status.getTrackerName(); + long now = getClock().getTime(); + boolean isBlacklisted = false; + if (restarted) { + faultyTrackers.markTrackerHealthy(status.getHost()); + } else { + isBlacklisted = + faultyTrackers.shouldAssignTasksToTracker(status.getHost(), now); + } + + HeartbeatResponse prevHeartbeatResponse = + trackerToHeartbeatResponseMap.get(trackerName); + + if (initialContact != true) { + // If this isn't the 'initial contact' from the tasktracker, + // there is something seriously wrong if the JobTracker has + // no record of the 'previous heartbeat'; if so, ask the + // tasktracker to re-initialize itself. + if (prevHeartbeatResponse == null) { + // This is the first heartbeat from the old tracker to the newly + // started JobTracker + if (hasRestarted()) { + addRestartInfo = true; + // inform the recovery manager about this tracker joining back + recoveryManager.unMarkTracker(trackerName); + } else { + // Jobtracker might have restarted but no recovery is needed + // otherwise this code should not be reached + LOG.warn("Serious problem, cannot find record of 'previous' " + + "heartbeat for '" + trackerName + + "'; reinitializing the tasktracker"); + return new HeartbeatResponse(responseId, + new TaskTrackerAction[] {new ReinitTrackerAction()}); + } + + } else { + + // It is completely safe to not process a 'duplicate' heartbeat from a + // {@link TaskTracker} since it resends the heartbeat when rpcs are + // lost see {@link TaskTracker.transmitHeartbeat()}; + // acknowledge it by re-sending the previous response to let the + // {@link TaskTracker} go forward. + if (prevHeartbeatResponse.getResponseId() != responseId) { + LOG.info("Ignoring 'duplicate' heartbeat from '" + + trackerName + "'; resending the previous 'lost' response"); + return prevHeartbeatResponse; + } + } + } + + // Process this heartbeat + newResponseId = (short)(responseId + 1); + status.setLastSeen(now); + if (!processHeartbeat(status, initialContact)) { + if (prevHeartbeatResponse != null) { + trackerToHeartbeatResponseMap.remove(trackerName); + } + return new HeartbeatResponse(newResponseId, + new TaskTrackerAction[] {new ReinitTrackerAction()}); + } + + + shouldSchedule = recoveryManager.shouldSchedule() && + acceptNewTasks && + !faultyTrackers.isBlacklisted(status.getHost()); + + taskTrackerStatus = + shouldSchedule ? getTaskTrackerStatus(trackerName) : null; + + } // synchronized JobTracker + + // Initialize the response to be sent for the heartbeat + HeartbeatResponse response = new HeartbeatResponse(newResponseId, null); + List actions = new ArrayList(); + List tasks = null; + + // Check for setup/cleanup tasks to be executed on the tasktracker + if (shouldSchedule) { + if (taskTrackerStatus == null) { + LOG.warn("Unknown task tracker polling; ignoring: " + trackerName); + } else { + tasks = getSetupAndCleanupTasks(taskTrackerStatus); + } + } + + synchronized (this) { + + // Check for map/reduce tasks to be executed on the tasktracker + if ((taskTrackerStatus != null) && (tasks == null)) { + tasks = taskScheduler.assignTasks(taskTrackers.get(trackerName)); + } + + if (tasks != null) { + for (Task task : tasks) { + + TaskAttemptID taskid = task.getTaskID(); + JobInProgress job = getJob(taskid.getJobID()); + + if (job != null) { + createTaskEntry (taskid, taskTrackerStatus.getTrackerName(), + job.getTaskInProgress(taskid.getTaskID())); + } else { + // because we do not hold the jobtracker lock throughout this + // routine - there is a small chance that the job for the task + // we are trying to schedule no longer exists. ignore such tasks + LOG.warn("Unable to find job corresponding to task: " + taskid.toString()); + } + + expireLaunchingTasks.addNewTask(task.getTaskID()); + LOG.debug(trackerName + " -> LaunchTask: " + task.getTaskID()); + actions.add(new LaunchTaskAction(task)); + } + } + + // Check for tasks to be killed + List killTasksList = getTasksToKill(trackerName); + if (killTasksList != null) { + actions.addAll(killTasksList); + } + + // Check for jobs to be killed/cleanedup + List killJobsList = getJobsForCleanup(trackerName); + if (killJobsList != null) { + actions.addAll(killJobsList); + } + + // Check for tasks whose outputs can be saved + List commitTasksList = getTasksToSave(status); + if (commitTasksList != null) { + actions.addAll(commitTasksList); + } + + // calculate next heartbeat interval and put in heartbeat response + int nextInterval = getNextHeartbeatInterval(); + response.setHeartbeatInterval(nextInterval); + response.setActions( + actions.toArray(new TaskTrackerAction[actions.size()])); + + // check if the restart info is req + if (addRestartInfo) { + response.setRecoveredJobs(recoveryManager.getJobsToRecover()); + } + + // Update the trackerToHeartbeatResponseMap + trackerToHeartbeatResponseMap.put(trackerName, response); + + // Done processing the hearbeat, now remove 'marked' tasks + removeMarkedTasks(trackerName); + + return response; + + } // synchronized JobTracker + } + + /** + * Calculates next heartbeat interval using cluster size. + * Heartbeat interval is incremented by 1 second for every 100 nodes by default. + * @return next heartbeat interval. + */ + public int getNextHeartbeatInterval() { + // get the no of task trackers + int clusterSize = getClusterStatus().getTaskTrackers(); + int heartbeatInterval = Math.max( + (int)(1000 * HEARTBEATS_SCALING_FACTOR * + Math.ceil((double)clusterSize / + NUM_HEARTBEATS_IN_SECOND)), + HEARTBEAT_INTERVAL_MIN) ; + return heartbeatInterval; + } + + /** + * Return if the specified tasktracker is in the hosts list, + * if one was configured. If none was configured, then this + * returns true. + */ + private boolean inHostsList(TaskTrackerStatus status) { + Set hostsList = hostsReader.getHosts(); + return (hostsList.isEmpty() || hostsList.contains(status.getHost())); + } + + /** + * Return if the specified tasktracker is in the exclude list. + */ + private boolean inExcludedHostsList(TaskTrackerStatus status) { + Set excludeList = hostsReader.getExcludedHosts(); + return excludeList.contains(status.getHost()); + } + + /** + * Returns true if the tasktracker is in the hosts list and + * not in the exclude list. + */ + private boolean acceptTaskTracker(TaskTrackerStatus status) { + return (inHostsList(status) && !inExcludedHostsList(status)); + } + + /** + * Update the last recorded status for the given task tracker. + * It assumes that the taskTrackers are locked on entry. + * @param trackerName The name of the tracker + * @param status The new status for the task tracker + * @return Was an old status found? + */ + boolean updateTaskTrackerStatus(String trackerName, + TaskTrackerStatus status) { + TaskTracker tt = getTaskTracker(trackerName); + TaskTrackerStatus oldStatus = (tt == null) ? null : tt.getStatus(); + if (oldStatus != null) { + totalMaps -= oldStatus.countMapTasks(); + totalReduces -= oldStatus.countReduceTasks(); + occupiedMapSlots -= oldStatus.countOccupiedMapSlots(); + occupiedReduceSlots -= oldStatus.countOccupiedReduceSlots(); + getInstrumentation().decRunningMaps(oldStatus.countMapTasks()); + getInstrumentation().decRunningReduces(oldStatus.countReduceTasks()); + getInstrumentation().decOccupiedMapSlots(oldStatus.countOccupiedMapSlots()); + getInstrumentation().decOccupiedReduceSlots(oldStatus.countOccupiedReduceSlots()); + if (!faultyTrackers.isBlacklisted(oldStatus.getHost())) { + int mapSlots = oldStatus.getMaxMapSlots(); + totalMapTaskCapacity -= mapSlots; + int reduceSlots = oldStatus.getMaxReduceSlots(); + totalReduceTaskCapacity -= reduceSlots; + } + if (status == null) { + taskTrackers.remove(trackerName); + Integer numTaskTrackersInHost = + uniqueHostsMap.get(oldStatus.getHost()); + if (numTaskTrackersInHost != null) { + numTaskTrackersInHost --; + if (numTaskTrackersInHost > 0) { + uniqueHostsMap.put(oldStatus.getHost(), numTaskTrackersInHost); + } + else { + uniqueHostsMap.remove(oldStatus.getHost()); + } + } + } + } + if (status != null) { + totalMaps += status.countMapTasks(); + totalReduces += status.countReduceTasks(); + occupiedMapSlots += status.countOccupiedMapSlots(); + occupiedReduceSlots += status.countOccupiedReduceSlots(); + getInstrumentation().addRunningMaps(status.countMapTasks()); + getInstrumentation().addRunningReduces(status.countReduceTasks()); + getInstrumentation().addOccupiedMapSlots(status.countOccupiedMapSlots()); + getInstrumentation().addOccupiedReduceSlots(status.countOccupiedReduceSlots()); + if (!faultyTrackers.isBlacklisted(status.getHost())) { + int mapSlots = status.getMaxMapSlots(); + totalMapTaskCapacity += mapSlots; + int reduceSlots = status.getMaxReduceSlots(); + totalReduceTaskCapacity += reduceSlots; + } + boolean alreadyPresent = false; + TaskTracker taskTracker = taskTrackers.get(trackerName); + if (taskTracker != null) { + alreadyPresent = true; + } else { + taskTracker = new TaskTracker(trackerName); + } + + taskTracker.setStatus(status); + taskTrackers.put(trackerName, taskTracker); + + if (LOG.isDebugEnabled()) { + int runningMaps = 0, runningReduces = 0; + int commitPendingMaps = 0, commitPendingReduces = 0; + int unassignedMaps = 0, unassignedReduces = 0; + int miscMaps = 0, miscReduces = 0; + List taskReports = status.getTaskReports(); + for (Iterator it = taskReports.iterator(); it.hasNext();) { + TaskStatus ts = (TaskStatus) it.next(); + boolean isMap = ts.getIsMap(); + TaskStatus.State state = ts.getRunState(); + if (state == TaskStatus.State.RUNNING) { + if (isMap) { ++runningMaps; } + else { ++runningReduces; } + } else if (state == TaskStatus.State.UNASSIGNED) { + if (isMap) { ++unassignedMaps; } + else { ++unassignedReduces; } + } else if (state == TaskStatus.State.COMMIT_PENDING) { + if (isMap) { ++commitPendingMaps; } + else { ++commitPendingReduces; } + } else { + if (isMap) { ++miscMaps; } + else { ++miscReduces; } + } + } + LOG.debug(trackerName + ": Status -" + + " running(m) = " + runningMaps + + " unassigned(m) = " + unassignedMaps + + " commit_pending(m) = " + commitPendingMaps + + " misc(m) = " + miscMaps + + " running(r) = " + runningReduces + + " unassigned(r) = " + unassignedReduces + + " commit_pending(r) = " + commitPendingReduces + + " misc(r) = " + miscReduces); + } + + if (!alreadyPresent) { + Integer numTaskTrackersInHost = + uniqueHostsMap.get(status.getHost()); + if (numTaskTrackersInHost == null) { + numTaskTrackersInHost = 0; + } + numTaskTrackersInHost ++; + uniqueHostsMap.put(status.getHost(), numTaskTrackersInHost); + } + } + getInstrumentation().setMapSlots(totalMapTaskCapacity); + getInstrumentation().setReduceSlots(totalReduceTaskCapacity); + return oldStatus != null; + } + + // Increment the number of reserved slots in the cluster. + // This method assumes the caller has JobTracker lock. + void incrementReservations(TaskType type, int reservedSlots) { + if (type.equals(TaskType.MAP)) { + reservedMapSlots += reservedSlots; + } else if (type.equals(TaskType.REDUCE)) { + reservedReduceSlots += reservedSlots; + } + } + + // Decrement the number of reserved slots in the cluster. + // This method assumes the caller has JobTracker lock. + void decrementReservations(TaskType type, int reservedSlots) { + if (type.equals(TaskType.MAP)) { + reservedMapSlots -= reservedSlots; + } else if (type.equals(TaskType.REDUCE)) { + reservedReduceSlots -= reservedSlots; + } + } + + private void updateNodeHealthStatus(TaskTrackerStatus trackerStatus) { + TaskTrackerHealthStatus status = trackerStatus.getHealthStatus(); + synchronized (faultyTrackers) { + faultyTrackers.setNodeHealthStatus(trackerStatus.getHost(), + status.isNodeHealthy(), status.getHealthReport()); + } + } + + /** + * Process incoming heartbeat messages from the task trackers. + */ + synchronized boolean processHeartbeat( + TaskTrackerStatus trackerStatus, + boolean initialContact) { + String trackerName = trackerStatus.getTrackerName(); + + synchronized (taskTrackers) { + synchronized (trackerExpiryQueue) { + boolean seenBefore = updateTaskTrackerStatus(trackerName, + trackerStatus); + TaskTracker taskTracker = getTaskTracker(trackerName); + if (initialContact) { + // If it's first contact, then clear out + // any state hanging around + if (seenBefore) { + lostTaskTracker(taskTracker); + } + } else { + // If not first contact, there should be some record of the tracker + if (!seenBefore) { + LOG.warn("Status from unknown Tracker : " + trackerName); + updateTaskTrackerStatus(trackerName, null); + return false; + } + } + + if (initialContact) { + // if this is lost tracker that came back now, and if it blacklisted + // increment the count of blacklisted trackers in the cluster + if (isBlacklisted(trackerName)) { + faultyTrackers.incrBlackListedTrackers(1); + } + addNewTracker(taskTracker); + } + } + } + + updateTaskStatuses(trackerStatus); + updateNodeHealthStatus(trackerStatus); + + return true; + } + + /** + * A tracker wants to know if any of its Tasks have been + * closed (because the job completed, whether successfully or not) + */ + synchronized List getTasksToKill(String taskTracker) { + + Set taskset = trackerToTaskMap.get(taskTracker); + List killList = new ArrayList(); + if (taskset != null) { + for (TaskAttemptIDWithTip onetask : taskset) { + TaskAttemptID killTaskId = onetask.attemptId; + TaskInProgress tip = onetask.tip; + + if (tip == null) { + continue; + } + if (tip.shouldClose(killTaskId)) { + // + // This is how the JobTracker ends a task at the TaskTracker. + // It may be successfully completed, or may be killed in + // mid-execution. + // + if (!tip.getJob().isComplete()) { + killList.add(new KillTaskAction(killTaskId)); + LOG.debug(taskTracker + " -> KillTaskAction: " + killTaskId); + } + } + } + } + + // add the stray attempts for uninited jobs + synchronized (trackerToTasksToCleanup) { + Set set = trackerToTasksToCleanup.remove(taskTracker); + if (set != null) { + for (TaskAttemptID id : set) { + killList.add(new KillTaskAction(id)); + } + } + } + return killList; + } + + /** + * Add a job to cleanup for the tracker. + */ + private void addJobForCleanup(JobID id) { + for (String taskTracker : taskTrackers.keySet()) { + LOG.debug("Marking job " + id + " for cleanup by tracker " + taskTracker); + synchronized (trackerToJobsToCleanup) { + Set jobsToKill = trackerToJobsToCleanup.get(taskTracker); + if (jobsToKill == null) { + jobsToKill = new HashSet(); + trackerToJobsToCleanup.put(taskTracker, jobsToKill); + } + jobsToKill.add(id); + } + } + } + + /** + * A tracker wants to know if any job needs cleanup because the job completed. + */ + private List getJobsForCleanup(String taskTracker) { + Set jobs = null; + synchronized (trackerToJobsToCleanup) { + jobs = trackerToJobsToCleanup.remove(taskTracker); + } + if (jobs != null) { + // prepare the actions list + List killList = new ArrayList(); + for (JobID killJobId : jobs) { + killList.add(new KillJobAction(killJobId)); + LOG.debug(taskTracker + " -> KillJobAction: " + killJobId); + } + + return killList; + } + return null; + } + + /** + * A tracker wants to know if any of its Tasks can be committed + */ + synchronized List getTasksToSave( + TaskTrackerStatus tts) { + List taskStatuses = tts.getTaskReports(); + if (taskStatuses != null) { + List saveList = new ArrayList(); + for (TaskStatus taskStatus : taskStatuses) { + if (taskStatus.getRunState() == TaskStatus.State.COMMIT_PENDING) { + TaskAttemptID taskId = taskStatus.getTaskID(); + TaskInProgress tip = taskidToTIPMap.get(taskId); + if (tip == null) { + continue; + } + if (tip.shouldCommit(taskId)) { + saveList.add(new CommitTaskAction(taskId)); + LOG.debug(tts.getTrackerName() + + " -> CommitTaskAction: " + taskId); + } + } + } + return saveList; + } + return null; + } + + // returns cleanup tasks first, then setup tasks. + List getSetupAndCleanupTasks( + TaskTrackerStatus taskTracker) throws IOException { + int maxMapTasks = taskTracker.getMaxMapSlots(); + int maxReduceTasks = taskTracker.getMaxReduceSlots(); + int numMaps = taskTracker.countOccupiedMapSlots(); + int numReduces = taskTracker.countOccupiedReduceSlots(); + int numTaskTrackers = getClusterStatus().getTaskTrackers(); + int numUniqueHosts = getNumberOfUniqueHosts(); + + List cachedJobs = new ArrayList (); + + // get a snapshot of all the jobs in the system + synchronized (jobs) { + cachedJobs.addAll(jobs.values()); + } + + Task t = null; + + if (numMaps < maxMapTasks) { + for (JobInProgress job: cachedJobs) { + t = job.obtainJobCleanupTask(taskTracker, numTaskTrackers, + numUniqueHosts, true); + if (t != null) { + return Collections.singletonList(t); + } + } + for (JobInProgress job: cachedJobs) { + t = job.obtainTaskCleanupTask(taskTracker, true); + if (t != null) { + return Collections.singletonList(t); + } + } + for (JobInProgress job: cachedJobs) { + t = job.obtainJobSetupTask(taskTracker, numTaskTrackers, + numUniqueHosts, true); + if (t != null) { + return Collections.singletonList(t); + } + } + } + if (numReduces < maxReduceTasks) { + for (JobInProgress job: cachedJobs) { + t = job.obtainJobCleanupTask(taskTracker, numTaskTrackers, + numUniqueHosts, false); + if (t != null) { + return Collections.singletonList(t); + } + } + for (JobInProgress job: cachedJobs) { + t = job.obtainTaskCleanupTask(taskTracker, false); + if (t != null) { + return Collections.singletonList(t); + } + } + for (JobInProgress job: cachedJobs) { + t = job.obtainJobSetupTask(taskTracker, numTaskTrackers, + numUniqueHosts, false); + if (t != null) { + return Collections.singletonList(t); + } + } + } + + return null; + } + + /** + * Grab the local fs name + */ + public synchronized String getFilesystemName() throws IOException { + if (fs == null) { + throw new IllegalStateException("FileSystem object not available yet"); + } + return fs.getUri().toString(); + } + + + public void reportTaskTrackerError(String taskTracker, + String errorClass, + String errorMessage) throws IOException { + LOG.warn("Report from " + taskTracker + ": " + errorMessage); + } + + /** + * Remove the job_ from jobids to get the unique string. + */ + static String getJobUniqueString(String jobid) { + return jobid.substring(4); + } + + //////////////////////////////////////////////////// + // JobSubmissionProtocol + //////////////////////////////////////////////////// + + /** + * Allocates a new JobId string. + */ + public JobID getNewJobId() throws IOException { + JobID id = new JobID(getTrackerIdentifier(), nextJobId.getAndIncrement()); + + // get the user group info + UserGroupInformation ugi = UserGroupInformation.getCurrentUGI(); + + // mark the user for this id + jobToUserMap.put(id, ugi.getUserName()); + + LOG.info("Job id " + id + " assigned to user " + ugi.getUserName()); + + return id; + } + + private File persistUserName(JobID jobId, UserGroupInformation ugi) + throws IOException { + // persist + File userFileForJob = new File( + lDirAlloc.getLocalPathForWrite( + SUBDIR + "/" + jobId, conf).toString()); + if (userFileForJob == null) { + LOG.info("Failed to create job-id file for job " + + jobId + " at " + userFileForJob); + } else { + FileOutputStream fout = new FileOutputStream(userFileForJob); + BufferedWriter writer = null; + try { + writer = new BufferedWriter(new OutputStreamWriter(fout)); + writer.write(ugi.getUserName() + "\n"); + } finally { + if (writer != null) { + writer.close(); + } + fout.close(); + } + LOG.info("Job " + jobId + + " user info persisted to file : " + userFileForJob); + } + return userFileForJob; + } + + private final Object submitLock = new Object(); + /** + * JobTracker.submitJob() kicks off a new job. + * + * Create a 'JobInProgress' object, which contains both JobProfile + * and JobStatus. Those two sub-objects are sometimes shipped outside + * of the JobTracker. But JobInProgress adds info that's useful for + * the JobTracker alone. + */ + public JobStatus submitJob(JobID jobId) throws IOException { + UserGroupInformation ugi; + Path jobDir; + synchronized (submitLock) { + synchronized (this) { + + if (jobs.containsKey(jobId)) { + //job already running, don't start twice + return jobs.get(jobId).getStatus(); + } + + // check if the owner is uploding the splits or not + // get the user group info + ugi = UserGroupInformation.getCurrentUGI(); + + // check if the user invoking this api is the owner of this job + if (!jobToUserMap.get(jobId).equals(ugi.getUserName())) { + throw new IOException("User " + ugi.getUserName() + + " is not the owner of the job " + jobId); + } + + jobDir = this.getSystemDirectoryForJob(jobId); + jobToUserMap.remove(jobId); + } + + File userFileForJob = persistUserName(jobId, ugi); + JobInProgress.copyJobFileLocally(jobDir, jobId, this.conf); + + synchronized (this) { + JobInProgress job = null; + try { + job = new JobInProgress(jobId, this, this.conf, ugi.getUserName(), 0); + } catch (Exception e) { + if (userFileForJob != null) { + userFileForJob.delete(); + } + throw new IOException(e); + } + + String queue = job.getProfile().getQueueName(); + if (!(queueManager.getQueues().contains(queue))) { + new CleanupQueue().addToQueue(new PathDeletionContext( + FileSystem.get(conf), + getSystemDirectoryForJob(jobId).toUri().getPath())); + job.fail(); + if (userFileForJob != null) { + userFileForJob.delete(); + } + throw new IOException("Queue \"" + queue + "\" does not exist"); + } + + // check for access + try { + checkAccess(job, QueueManager.QueueOperation.SUBMIT_JOB); + } catch (IOException ioe) { + LOG.warn("Access denied for user " + job.getJobConf().getUser() + + ". Ignoring job " + jobId, ioe); + job.fail(); + if (userFileForJob != null) { + userFileForJob.delete(); + } + new CleanupQueue().addToQueue(new PathDeletionContext( + FileSystem.get(conf), + getSystemDirectoryForJob(jobId).toUri().getPath())); + throw ioe; + } + + // Check the job if it cannot run in the cluster because of invalid memory + // requirements. + try { + checkMemoryRequirements(job); + } catch (IOException ioe) { + new CleanupQueue().addToQueue(new PathDeletionContext( + FileSystem.get(conf), + getSystemDirectoryForJob(jobId).toUri().getPath())); + throw ioe; + } + + return addJob(jobId, job); + } + } + } + + /** + * Adds a job to the jobtracker. Make sure that the checks are inplace before + * adding a job. This is the core job submission logic + * @param jobId The id for the job submitted which needs to be added + */ + protected synchronized JobStatus addJob(JobID jobId, JobInProgress job) { + totalSubmissions++; + + synchronized (jobs) { + synchronized (taskScheduler) { + jobs.put(job.getProfile().getJobID(), job); + for (JobInProgressListener listener : jobInProgressListeners) { + try { + listener.jobAdded(job); + } catch (IOException ioe) { + LOG.warn("Failed to add and so skipping the job : " + + job.getJobID() + ". Exception : " + ioe); + } + } + } + } + myInstrumentation.submitJob(job.getJobConf(), jobId); + LOG.info("Job " + jobId + " added successfully for user '" + + job.getJobConf().getUser() + "' to queue '" + + job.getJobConf().getQueueName() + "'"); + return job.getStatus(); + } + + // Check whether the specified operation can be performed + // related to the job. + private void checkAccess(JobInProgress job, + QueueManager.QueueOperation oper) + throws IOException { + // get the user group info + UserGroupInformation ugi = UserGroupInformation.getCurrentUGI(); + checkAccess(job, oper, ugi); + } + + // use the passed ugi for checking the access + private void checkAccess(JobInProgress job, QueueManager.QueueOperation oper, + UserGroupInformation ugi) throws IOException { + // get the queue + String queue = job.getProfile().getQueueName(); + if (!queueManager.hasAccess(queue, job, oper, ugi)) { + throw new AccessControlException("User " + + ugi.getUserName() + + " cannot perform " + + "operation " + oper + " on queue " + queue + + ".\n Please run \"hadoop queue -showacls\" " + + "command to find the queues you have access" + + " to ."); + } + } + + /**@deprecated use {@link #getClusterStatus(boolean)}*/ + @Deprecated + public synchronized ClusterStatus getClusterStatus() { + return getClusterStatus(false); + } + + public synchronized ClusterStatus getClusterStatus(boolean detailed) { + synchronized (taskTrackers) { + if (detailed) { + List> trackerNames = taskTrackerNames(); + return new ClusterStatus(trackerNames.get(0), + trackerNames.get(1), + taskTrackers(), + getRunningJobs(), + TASKTRACKER_EXPIRY_INTERVAL, + totalMaps, + totalReduces, + totalMapTaskCapacity, + totalReduceTaskCapacity, + state, getExcludedNodes().size() + ); + } else { + return new ClusterStatus(taskTrackers.size() - + getBlacklistedTrackerCount(), + getBlacklistedTrackerCount(), + TASKTRACKER_EXPIRY_INTERVAL, + totalMaps, + totalReduces, + totalMapTaskCapacity, + totalReduceTaskCapacity, + state, getExcludedNodes().size()); + } + } + } + + public synchronized ClusterMetrics getClusterMetrics() { + return new ClusterMetrics(totalMaps, + totalReduces, occupiedMapSlots, occupiedReduceSlots, + reservedMapSlots, reservedReduceSlots, + totalMapTaskCapacity, totalReduceTaskCapacity, + totalSubmissions, + taskTrackers.size() - getBlacklistedTrackerCount(), + getBlacklistedTrackerCount(), getExcludedNodes().size()) ; + } + + public synchronized void killJob(JobID jobid) throws IOException { + if (null == jobid) { + LOG.info("Null jobid object sent to JobTracker.killJob()"); + return; + } + + JobInProgress job = jobs.get(jobid); + + if (null == job) { + LOG.info("killJob(): JobId " + jobid.toString() + " is not a valid job"); + return; + } + + checkAccess(job, QueueManager.QueueOperation.ADMINISTER_JOBS); + killJob(job); + } + + private synchronized void killJob(JobInProgress job) { + LOG.info("Killing job " + job.getJobID()); + JobStatus prevStatus = (JobStatus)job.getStatus().clone(); + job.kill(); + + // Inform the listeners if the job is killed + // Note : + // If the job is killed in the PREP state then the listeners will be + // invoked + // If the job is killed in the RUNNING state then cleanup tasks will be + // launched and the updateTaskStatuses() will take care of it + JobStatus newStatus = (JobStatus)job.getStatus().clone(); + if (prevStatus.getRunState() != newStatus.getRunState() + && newStatus.getRunState() == JobStatus.KILLED) { + JobStatusChangeEvent event = + new JobStatusChangeEvent(job, EventType.RUN_STATE_CHANGED, prevStatus, + newStatus); + updateJobInProgressListeners(event); + } + } + + public void initJob(JobInProgress job) { + if (null == job) { + LOG.info("Init on null job is not valid"); + return; + } + + try { + JobStatus prevStatus = (JobStatus)job.getStatus().clone(); + LOG.info("Initializing " + job.getJobID()); + job.initTasks(); + + // Here the job *should* be in the PREP state. + // From here there are 3 ways : + // - job requires setup : the job remains in PREP state and + // setup is launched to move the job in RUNNING state + // - job is complete (no setup required and no tasks) : complete + // the job and move it to SUCCEEDED + // - job has tasks but doesnt require setup : make the job RUNNING. + if (job.isJobEmpty()) { // is the job empty? + completeEmptyJob(job); // complete it + } else if (!job.isSetupCleanupRequired()) { // setup/cleanup not required + job.completeSetup(); // complete setup and make job running + } + + // Inform the listeners if the job state has changed + // Note : that the job will be in PREP state. + JobStatus newStatus = (JobStatus)job.getStatus().clone(); + if (prevStatus.getRunState() != newStatus.getRunState()) { + JobStatusChangeEvent event = + new JobStatusChangeEvent(job, EventType.RUN_STATE_CHANGED, prevStatus, + newStatus); + synchronized (JobTracker.this) { + updateJobInProgressListeners(event); + } + } + } catch (KillInterruptedException kie) { + // If job was killed during initialization, job state will be KILLED + LOG.error(job.getJobID() + ": Job initialization interrupted:\n" + + StringUtils.stringifyException(kie)); + killJob(job); + } catch (Throwable t) { + // If the job initialization is failed, job state will be FAILED + LOG.error(job.getJobID() + ": Job initialization failed:\n" + + StringUtils.stringifyException(t)); + failJob(job); + } + } + + private synchronized void completeEmptyJob(JobInProgress job) { + job.completeEmptyJob(); + } + + /** + * Fail a job and inform the listeners. Other components in the framework + * should use this to fail a job. + */ + public synchronized void failJob(JobInProgress job) { + if (null == job) { + LOG.info("Fail on null job is not valid"); + return; + } + + JobStatus prevStatus = (JobStatus)job.getStatus().clone(); + LOG.info("Failing job " + job.getJobID()); + job.fail(); + + // Inform the listeners if the job state has changed + JobStatus newStatus = (JobStatus)job.getStatus().clone(); + if (prevStatus.getRunState() != newStatus.getRunState()) { + JobStatusChangeEvent event = + new JobStatusChangeEvent(job, EventType.RUN_STATE_CHANGED, prevStatus, + newStatus); + updateJobInProgressListeners(event); + } + } + + /** + * Set the priority of a job + * @param jobid id of the job + * @param priority new priority of the job + */ + public synchronized void setJobPriority(JobID jobid, + String priority) + throws IOException { + JobInProgress job = jobs.get(jobid); + if (null == job) { + LOG.info("setJobPriority(): JobId " + jobid.toString() + + " is not a valid job"); + return; + } + checkAccess(job, QueueManager.QueueOperation.ADMINISTER_JOBS); + JobPriority newPriority = JobPriority.valueOf(priority); + setJobPriority(jobid, newPriority); + } + + void storeCompletedJob(JobInProgress job) { + //persists the job info in DFS + completedJobStatusStore.store(job); + } + + /** + * Check if the job has been initialized. + * + * @param job {@link JobInProgress} to be checked + * @return true if the job has been initialized, + * false otherwise + */ + private boolean isJobInited(JobInProgress job) { + return job.inited(); + } + + public JobProfile getJobProfile(JobID jobid) { + synchronized (this) { + JobInProgress job = jobs.get(jobid); + if (job != null) { + // Safe to call JobInProgress.getProfile while holding the lock + // on the JobTracker since it isn't a synchronized method + return job.getProfile(); + } else { + RetireJobInfo info = retireJobs.get(jobid); + if (info != null) { + return info.profile; + } + } + } + return completedJobStatusStore.readJobProfile(jobid); + } + public JobStatus getJobStatus(JobID jobid) { + if (null == jobid) { + LOG.warn("JobTracker.getJobStatus() cannot get status for null jobid"); + return null; + } + synchronized (this) { + JobInProgress job = jobs.get(jobid); + if (job != null) { + // Safe to call JobInProgress.getStatus while holding the lock + // on the JobTracker since it isn't a synchronized method + return job.getStatus(); + } else { + + RetireJobInfo info = retireJobs.get(jobid); + if (info != null) { + return info.status; + } + } + } + return completedJobStatusStore.readJobStatus(jobid); + } + private static final Counters EMPTY_COUNTERS + = new Counters(); + + public Counters getJobCounters(JobID jobid) { + JobInProgress job; + synchronized (this) { + job = jobs.get(jobid); + } + if (job != null) { + if (!isJobInited(job)) { + return EMPTY_COUNTERS; + } + return job.getCounters(); + } + return completedJobStatusStore.readCounters(jobid); + } + private static final TaskReport[] EMPTY_TASK_REPORTS = new TaskReport[0]; + public synchronized TaskReport[] getMapTaskReports(JobID jobid) { + JobInProgress job = jobs.get(jobid); + if (job == null || !isJobInited(job)) { + return EMPTY_TASK_REPORTS; + } else { + Vector reports = new Vector(); + Vector completeMapTasks = + job.reportTasksInProgress(true, true); + for (Iterator it = completeMapTasks.iterator(); it.hasNext();) { + TaskInProgress tip = (TaskInProgress) it.next(); + reports.add(tip.generateSingleReport()); + } + Vector incompleteMapTasks = + job.reportTasksInProgress(true, false); + for (Iterator it = incompleteMapTasks.iterator(); it.hasNext();) { + TaskInProgress tip = (TaskInProgress) it.next(); + reports.add(tip.generateSingleReport()); + } + return reports.toArray(new TaskReport[reports.size()]); + } + } + + public synchronized TaskReport[] getReduceTaskReports(JobID jobid) { + JobInProgress job = jobs.get(jobid); + if (job == null || !isJobInited(job)) { + return EMPTY_TASK_REPORTS; + } else { + Vector reports = new Vector(); + Vector completeReduceTasks = job.reportTasksInProgress(false, true); + for (Iterator it = completeReduceTasks.iterator(); it.hasNext();) { + TaskInProgress tip = (TaskInProgress) it.next(); + reports.add(tip.generateSingleReport()); + } + Vector incompleteReduceTasks = job.reportTasksInProgress(false, false); + for (Iterator it = incompleteReduceTasks.iterator(); it.hasNext();) { + TaskInProgress tip = (TaskInProgress) it.next(); + reports.add(tip.generateSingleReport()); + } + return reports.toArray(new TaskReport[reports.size()]); + } + } + + public synchronized TaskReport[] getCleanupTaskReports(JobID jobid) { + JobInProgress job = jobs.get(jobid); + if (job == null || !isJobInited(job)) { + return EMPTY_TASK_REPORTS; + } else { + Vector reports = new Vector(); + Vector completeTasks = job.reportCleanupTIPs(true); + for (Iterator it = completeTasks.iterator(); + it.hasNext();) { + TaskInProgress tip = (TaskInProgress) it.next(); + reports.add(tip.generateSingleReport()); + } + Vector incompleteTasks = job.reportCleanupTIPs(false); + for (Iterator it = incompleteTasks.iterator(); + it.hasNext();) { + TaskInProgress tip = (TaskInProgress) it.next(); + reports.add(tip.generateSingleReport()); + } + return reports.toArray(new TaskReport[reports.size()]); + } + + } + + public synchronized TaskReport[] getSetupTaskReports(JobID jobid) { + JobInProgress job = jobs.get(jobid); + if (job == null || !isJobInited(job)) { + return EMPTY_TASK_REPORTS; + } else { + Vector reports = new Vector(); + Vector completeTasks = job.reportSetupTIPs(true); + for (Iterator it = completeTasks.iterator(); + it.hasNext();) { + TaskInProgress tip = (TaskInProgress) it.next(); + reports.add(tip.generateSingleReport()); + } + Vector incompleteTasks = job.reportSetupTIPs(false); + for (Iterator it = incompleteTasks.iterator(); + it.hasNext();) { + TaskInProgress tip = (TaskInProgress) it.next(); + reports.add(tip.generateSingleReport()); + } + return reports.toArray(new TaskReport[reports.size()]); + } + } + static final String MAPRED_CLUSTER_MAP_MEMORY_MB_PROPERTY = + "mapred.cluster.map.memory.mb"; + static final String MAPRED_CLUSTER_REDUCE_MEMORY_MB_PROPERTY = + "mapred.cluster.reduce.memory.mb"; + + static final String MAPRED_CLUSTER_MAX_MAP_MEMORY_MB_PROPERTY = + "mapred.cluster.max.map.memory.mb"; + static final String MAPRED_CLUSTER_MAX_REDUCE_MEMORY_MB_PROPERTY = + "mapred.cluster.max.reduce.memory.mb"; + + /* + * Returns a list of TaskCompletionEvent for the given job, + * starting from fromEventId. + * @see org.apache.hadoop.mapred.JobSubmissionProtocol#getTaskCompletionEvents(java.lang.String, int, int) + */ + public TaskCompletionEvent[] getTaskCompletionEvents( + JobID jobid, int fromEventId, int maxEvents) throws IOException{ + + JobInProgress job = this.jobs.get(jobid); + if (null != job) { + return job.inited() ? job.getTaskCompletionEvents(fromEventId, maxEvents) + : TaskCompletionEvent.EMPTY_ARRAY; + } + + return completedJobStatusStore.readJobTaskCompletionEvents(jobid, fromEventId, maxEvents); + } + + private static final String[] EMPTY_TASK_DIAGNOSTICS = new String[0]; + /** + * Get the diagnostics for a given task + * @param taskId the id of the task + * @return an array of the diagnostic messages + */ + public synchronized String[] getTaskDiagnostics(TaskAttemptID taskId) + throws IOException { + List taskDiagnosticInfo = null; + JobID jobId = taskId.getJobID(); + TaskID tipId = taskId.getTaskID(); + JobInProgress job = jobs.get(jobId); + if (job != null && isJobInited(job)) { + TaskInProgress tip = job.getTaskInProgress(tipId); + if (tip != null) { + taskDiagnosticInfo = tip.getDiagnosticInfo(taskId); + } + + } + return ((taskDiagnosticInfo == null) ? EMPTY_TASK_DIAGNOSTICS : + taskDiagnosticInfo.toArray(new String[taskDiagnosticInfo.size()])); + } + + /** Get all the TaskStatuses from the tipid. */ + TaskStatus[] getTaskStatuses(TaskID tipid) { + TaskInProgress tip = getTip(tipid); + return (tip == null ? new TaskStatus[0] + : tip.getTaskStatuses()); + } + + /** Returns the TaskStatus for a particular taskid. */ + TaskStatus getTaskStatus(TaskAttemptID taskid) { + TaskInProgress tip = getTip(taskid.getTaskID()); + return (tip == null ? null + : tip.getTaskStatus(taskid)); + } + + /** + * Returns the counters for the specified task in progress. + */ + Counters getTipCounters(TaskID tipid) { + TaskInProgress tip = getTip(tipid); + return (tip == null ? null : tip.getCounters()); + } + + /** + * Returns the configured task scheduler for this job tracker. + * @return the configured task scheduler + */ + TaskScheduler getTaskScheduler() { + return taskScheduler; + } + + /** + * Returns specified TaskInProgress, or null. + */ + public TaskInProgress getTip(TaskID tipid) { + JobInProgress job = jobs.get(tipid.getJobID()); + return (job == null ? null : job.getTaskInProgress(tipid)); + } + + /** Mark a Task to be killed */ + public synchronized boolean killTask(TaskAttemptID taskid, boolean shouldFail) throws IOException{ + TaskInProgress tip = taskidToTIPMap.get(taskid); + if(tip != null) { + checkAccess(tip.getJob(), QueueManager.QueueOperation.ADMINISTER_JOBS); + return tip.killTask(taskid, shouldFail); + } + else { + LOG.info("Kill task attempt failed since task " + taskid + " was not found"); + return false; + } + } + + /** + * Get tracker name for a given task id. + * @param taskId the name of the task + * @return The name of the task tracker + */ + public synchronized String getAssignedTracker(TaskAttemptID taskId) { + return taskidToTrackerMap.get(taskId); + } + + public JobStatus[] jobsToComplete() { + return getJobStatus(jobs.values(), true); + } + + public JobStatus[] getAllJobs() { + List list = new ArrayList(); + list.addAll(Arrays.asList(getJobStatus(jobs.values(),false))); + list.addAll(retireJobs.getAllJobStatus()); + return list.toArray(new JobStatus[list.size()]); + } + + /** + * @see org.apache.hadoop.mapred.JobSubmissionProtocol#getSystemDir() + */ + public String getSystemDir() { + Path sysDir = new Path(conf.get("mapred.system.dir", "/tmp/hadoop/mapred/system")); + return fs.makeQualified(sysDir).toString(); + } + + /////////////////////////////////////////////////////////////// + // JobTracker methods + /////////////////////////////////////////////////////////////// + public JobInProgress getJob(JobID jobid) { + return jobs.get(jobid); + } + + // Get the job directory in system directory + Path getSystemDirectoryForJob(JobID id) { + return new Path(getSystemDir(), id.toString()); + } + + /** + * Change the run-time priority of the given job. + * @param jobId job id + * @param priority new {@link JobPriority} for the job + */ + synchronized void setJobPriority(JobID jobId, JobPriority priority) { + JobInProgress job = jobs.get(jobId); + if (job != null) { + synchronized (taskScheduler) { + JobStatus oldStatus = (JobStatus)job.getStatus().clone(); + job.setPriority(priority); + JobStatus newStatus = (JobStatus)job.getStatus().clone(); + JobStatusChangeEvent event = + new JobStatusChangeEvent(job, EventType.PRIORITY_CHANGED, oldStatus, + newStatus); + updateJobInProgressListeners(event); + } + } else { + LOG.warn("Trying to change the priority of an unknown job: " + jobId); + } + } + + //////////////////////////////////////////////////// + // Methods to track all the TaskTrackers + //////////////////////////////////////////////////// + /** + * Accept and process a new TaskTracker profile. We might + * have known about the TaskTracker previously, or it might + * be brand-new. All task-tracker structures have already + * been updated. Just process the contained tasks and any + * jobs that might be affected. + */ + void updateTaskStatuses(TaskTrackerStatus status) { + String trackerName = status.getTrackerName(); + for (TaskStatus report : status.getTaskReports()) { + report.setTaskTracker(trackerName); + TaskAttemptID taskId = report.getTaskID(); + + // expire it + expireLaunchingTasks.removeTask(taskId); + + JobInProgress job = getJob(taskId.getJobID()); + if (job == null) { + // if job is not there in the cleanup list ... add it + synchronized (trackerToJobsToCleanup) { + Set jobs = trackerToJobsToCleanup.get(trackerName); + if (jobs == null) { + jobs = new HashSet(); + trackerToJobsToCleanup.put(trackerName, jobs); + } + jobs.add(taskId.getJobID()); + } + continue; + } + + if (!job.inited()) { + // if job is not yet initialized ... kill the attempt + synchronized (trackerToTasksToCleanup) { + Set tasks = trackerToTasksToCleanup.get(trackerName); + if (tasks == null) { + tasks = new HashSet(); + trackerToTasksToCleanup.put(trackerName, tasks); + } + tasks.add(taskId); + } + continue; + } + + TaskInProgress tip = taskidToTIPMap.get(taskId); + // Check if the tip is known to the jobtracker. In case of a restarted + // jt, some tasks might join in later + if (tip != null || hasRestarted()) { + if (tip == null) { + tip = job.getTaskInProgress(taskId.getTaskID()); + job.addRunningTaskToTIP(tip, taskId, status, false); + createTaskEntry(taskId, trackerName, tip); + } + + // Update the job and inform the listeners if necessary + JobStatus prevStatus = (JobStatus)job.getStatus().clone(); + // Clone TaskStatus object here, because JobInProgress + // or TaskInProgress can modify this object and + // the changes should not get reflected in TaskTrackerStatus. + // An old TaskTrackerStatus is used later in countMapTasks, etc. + job.updateTaskStatus(tip, (TaskStatus)report.clone()); + JobStatus newStatus = (JobStatus)job.getStatus().clone(); + + // Update the listeners if an incomplete job completes + if (prevStatus.getRunState() != newStatus.getRunState()) { + JobStatusChangeEvent event = + new JobStatusChangeEvent(job, EventType.RUN_STATE_CHANGED, + prevStatus, newStatus); + updateJobInProgressListeners(event); + } + } else { + LOG.info("Serious problem. While updating status, cannot find taskid " + + report.getTaskID()); + } + + // Process 'failed fetch' notifications + List failedFetchMaps = report.getFetchFailedMaps(); + if (failedFetchMaps != null) { + for (TaskAttemptID mapTaskId : failedFetchMaps) { + TaskInProgress failedFetchMap = taskidToTIPMap.get(mapTaskId); + + if (failedFetchMap != null) { + // Gather information about the map which has to be failed, if need be + String failedFetchTrackerName = getAssignedTracker(mapTaskId); + if (failedFetchTrackerName == null) { + failedFetchTrackerName = "Lost task tracker"; + } + failedFetchMap.getJob().fetchFailureNotification(failedFetchMap, + mapTaskId, + failedFetchTrackerName); + } + } + } + } + } + + /** + * We lost the task tracker! All task-tracker structures have + * already been updated. Just process the contained tasks and any + * jobs that might be affected. + */ + void lostTaskTracker(TaskTracker taskTracker) { + String trackerName = taskTracker.getTrackerName(); + LOG.info("Lost tracker '" + trackerName + "'"); + + // remove the tracker from the local structures + synchronized (trackerToJobsToCleanup) { + trackerToJobsToCleanup.remove(trackerName); + } + + synchronized (trackerToTasksToCleanup) { + trackerToTasksToCleanup.remove(trackerName); + } + + // Inform the recovery manager + recoveryManager.unMarkTracker(trackerName); + + Set lostTasks = trackerToTaskMap.get(trackerName); + trackerToTaskMap.remove(trackerName); + + if (lostTasks != null) { + // List of jobs which had any of their tasks fail on this tracker + Set jobsWithFailures = new HashSet(); + for (TaskAttemptIDWithTip oneTask : lostTasks) { + TaskAttemptID taskId = oneTask.attemptId; + TaskInProgress tip = oneTask.tip; + JobInProgress job = tip.getJob(); + + // Completed reduce tasks never need to be failed, because + // their outputs go to dfs + // And completed maps with zero reducers of the job + // never need to be failed. + if (!tip.isComplete() || + (tip.isMapTask() && !tip.isJobSetupTask() && + job.desiredReduces() != 0)) { + // if the job is done, we don't want to change anything + if (job.getStatus().getRunState() == JobStatus.RUNNING || + job.getStatus().getRunState() == JobStatus.PREP) { + // the state will be KILLED_UNCLEAN, if the task(map or reduce) + // was RUNNING on the tracker + TaskStatus.State killState = (tip.isRunningTask(taskId) && + !tip.isJobSetupTask() && !tip.isJobCleanupTask()) ? + TaskStatus.State.KILLED_UNCLEAN : TaskStatus.State.KILLED; + job.failedTask(tip, taskId, ("Lost task tracker: " + trackerName), + (tip.isMapTask() ? + TaskStatus.Phase.MAP : + TaskStatus.Phase.REDUCE), + killState, + trackerName); + jobsWithFailures.add(job); + } + } else { + // Completed 'reduce' task and completed 'maps' with zero + // reducers of the job, not failed; + // only removed from data-structures. + markCompletedTaskAttempt(trackerName, taskId); + } + } + + // Penalize this tracker for each of the jobs which + // had any tasks running on it when it was 'lost' + // Also, remove any reserved slots on this tasktracker + for (JobInProgress job : jobsWithFailures) { + job.addTrackerTaskFailure(trackerName, taskTracker); + } + + // Cleanup + taskTracker.cancelAllReservations(); + + // Purge 'marked' tasks, needs to be done + // here to prevent hanging references! + removeMarkedTasks(trackerName); + } + } + + /** + * Rereads the config to get hosts and exclude list file names. + * Rereads the files to update the hosts and exclude lists. + */ + public synchronized void refreshNodes() throws IOException { + // check access + PermissionChecker.checkSuperuserPrivilege(mrOwner, supergroup); + + // call the actual api + refreshHosts(); + } + + private synchronized void refreshHosts() throws IOException { + // Reread the config to get mapred.hosts and mapred.hosts.exclude filenames. + // Update the file names and refresh internal includes and excludes list + LOG.info("Refreshing hosts information"); + Configuration conf = new Configuration(); + + hostsReader.updateFileNames(conf.get("mapred.hosts",""), + conf.get("mapred.hosts.exclude", "")); + hostsReader.refresh(); + + Set excludeSet = new HashSet(); + for(Map.Entry eSet : taskTrackers.entrySet()) { + String trackerName = eSet.getKey(); + TaskTrackerStatus status = eSet.getValue().getStatus(); + // Check if not include i.e not in host list or in hosts list but excluded + if (!inHostsList(status) || inExcludedHostsList(status)) { + excludeSet.add(status.getHost()); // add to rejected trackers + } + } + decommissionNodes(excludeSet); + int totalExcluded = hostsReader.getExcludedHosts().size(); + getInstrumentation().setDecommissionedTrackers(totalExcluded); + } + + // Remove a tracker from the system + private void removeTracker(TaskTracker tracker) { + String trackerName = tracker.getTrackerName(); + // Remove completely after marking the tasks as 'KILLED' + lostTaskTracker(tracker); + // tracker is lost, and if it is blacklisted, remove + // it from the count of blacklisted trackers in the cluster + if (isBlacklisted(trackerName)) { + faultyTrackers.decrBlackListedTrackers(1); + } + updateTaskTrackerStatus(trackerName, null); + statistics.taskTrackerRemoved(trackerName); + getInstrumentation().decTrackers(1); + } + + // main decommission + synchronized void decommissionNodes(Set hosts) + throws IOException { + LOG.info("Decommissioning " + hosts.size() + " nodes"); + // create a list of tracker hostnames + synchronized (taskTrackers) { + synchronized (trackerExpiryQueue) { + int trackersDecommissioned = 0; + for (String host : hosts) { + LOG.info("Decommissioning host " + host); + Set trackers = hostnameToTaskTracker.remove(host); + if (trackers != null) { + for (TaskTracker tracker : trackers) { + LOG.info("Decommission: Losing tracker " + tracker.getTrackerName() + + " on host " + host); + removeTracker(tracker); + } + trackersDecommissioned += trackers.size(); + } + LOG.info("Host " + host + " is ready for decommissioning"); + } + } + } + } + + /** + * Returns a set of excluded nodes. + */ + Collection getExcludedNodes() { + return hostsReader.getExcludedHosts(); + } + + /** + * Get the localized job file path on the job trackers local file system + * @param jobId id of the job + * @return the path of the job conf file on the local file system + */ + public static String getLocalJobFilePath(JobID jobId){ + return JobHistory.JobInfo.getLocalJobFilePath(jobId); + } + //////////////////////////////////////////////////////////// + // main() + //////////////////////////////////////////////////////////// + + /** + * Start the JobTracker process. This is used only for debugging. As a rule, + * JobTracker should be run as part of the DFS Namenode process. + */ + public static void main(String argv[] + ) throws IOException, InterruptedException { + StringUtils.startupShutdownMessage(JobTracker.class, argv, LOG); + + try { + if(argv.length == 0) { + JobTracker tracker = startTracker(new JobConf()); + tracker.offerService(); + } + else { + if ("-dumpConfiguration".equals(argv[0]) && argv.length == 1) { + dumpConfiguration(new PrintWriter(System.out)); + } + else { + System.out.println("usage: JobTracker [-dumpConfiguration]"); + System.exit(-1); + } + } + } catch (Throwable e) { + LOG.fatal(StringUtils.stringifyException(e)); + System.exit(-1); + } + } + /** + * Dumps the configuration properties in Json format + * @param writer {@link}Writer object to which the output is written + * @throws IOException + */ + private static void dumpConfiguration(Writer writer) throws IOException { + Configuration.dumpConfiguration(new JobConf(), writer); + writer.write("\n"); + // get the QueueManager configuration properties + QueueManager.dumpConfiguration(writer); + writer.write("\n"); + } + + @Override + public JobQueueInfo[] getQueues() throws IOException { + return queueManager.getJobQueueInfos(); + } + + + @Override + public JobQueueInfo getQueueInfo(String queue) throws IOException { + return queueManager.getJobQueueInfo(queue); + } + + @Override + public JobStatus[] getJobsFromQueue(String queue) throws IOException { + Collection jips = taskScheduler.getJobs(queue); + return getJobStatus(jips,false); + } + + @Override + public QueueAclsInfo[] getQueueAclsForCurrentUser() throws IOException{ + return queueManager.getQueueAcls( + UserGroupInformation.getCurrentUGI()); + } + private synchronized JobStatus[] getJobStatus(Collection jips, + boolean toComplete) { + if(jips == null || jips.isEmpty()) { + return new JobStatus[]{}; + } + ArrayList jobStatusList = new ArrayList(); + for(JobInProgress jip : jips) { + JobStatus status = jip.getStatus(); + status.setStartTime(jip.getStartTime()); + status.setUsername(jip.getProfile().getUser()); + if(toComplete) { + if(status.getRunState() == JobStatus.RUNNING || + status.getRunState() == JobStatus.PREP) { + jobStatusList.add(status); + } + }else { + jobStatusList.add(status); + } + } + return (JobStatus[]) jobStatusList.toArray( + new JobStatus[jobStatusList.size()]); + } + + /** + * Returns the confgiured maximum number of tasks for a single job + */ + int getMaxTasksPerJob() { + return conf.getInt("mapred.jobtracker.maxtasks.per.job", -1); + } + + @Override + public void refreshServiceAcl() throws IOException { + if (!conf.getBoolean( + ServiceAuthorizationManager.SERVICE_AUTHORIZATION_CONFIG, false)) { + throw new AuthorizationException("Service Level Authorization not enabled!"); + } + SecurityUtil.getPolicy().refresh(); + } + + private void initializeTaskMemoryRelatedConfig() { + memSizeForMapSlotOnJT = + JobConf.normalizeMemoryConfigValue(conf.getLong( + JobTracker.MAPRED_CLUSTER_MAP_MEMORY_MB_PROPERTY, + JobConf.DISABLED_MEMORY_LIMIT)); + memSizeForReduceSlotOnJT = + JobConf.normalizeMemoryConfigValue(conf.getLong( + JobTracker.MAPRED_CLUSTER_REDUCE_MEMORY_MB_PROPERTY, + JobConf.DISABLED_MEMORY_LIMIT)); + + if (conf.get(JobConf.UPPER_LIMIT_ON_TASK_VMEM_PROPERTY) != null) { + LOG.warn( + JobConf.deprecatedString( + JobConf.UPPER_LIMIT_ON_TASK_VMEM_PROPERTY)+ + " instead use "+JobTracker.MAPRED_CLUSTER_MAX_MAP_MEMORY_MB_PROPERTY+ + " and " + JobTracker.MAPRED_CLUSTER_MAX_REDUCE_MEMORY_MB_PROPERTY + ); + + limitMaxMemForMapTasks = limitMaxMemForReduceTasks = + JobConf.normalizeMemoryConfigValue( + conf.getLong( + JobConf.UPPER_LIMIT_ON_TASK_VMEM_PROPERTY, + JobConf.DISABLED_MEMORY_LIMIT)); + if (limitMaxMemForMapTasks != JobConf.DISABLED_MEMORY_LIMIT && + limitMaxMemForMapTasks >= 0) { + limitMaxMemForMapTasks = limitMaxMemForReduceTasks = + limitMaxMemForMapTasks / + (1024 * 1024); //Converting old values in bytes to MB + } + } else { + limitMaxMemForMapTasks = + JobConf.normalizeMemoryConfigValue( + conf.getLong( + JobTracker.MAPRED_CLUSTER_MAX_MAP_MEMORY_MB_PROPERTY, + JobConf.DISABLED_MEMORY_LIMIT)); + limitMaxMemForReduceTasks = + JobConf.normalizeMemoryConfigValue( + conf.getLong( + JobTracker.MAPRED_CLUSTER_MAX_REDUCE_MEMORY_MB_PROPERTY, + JobConf.DISABLED_MEMORY_LIMIT)); + } + + LOG.info(new StringBuilder().append("Scheduler configured with ").append( + "(memSizeForMapSlotOnJT, memSizeForReduceSlotOnJT,").append( + " limitMaxMemForMapTasks, limitMaxMemForReduceTasks) (").append( + memSizeForMapSlotOnJT).append(", ").append(memSizeForReduceSlotOnJT) + .append(", ").append(limitMaxMemForMapTasks).append(", ").append( + limitMaxMemForReduceTasks).append(")")); + } + + private boolean perTaskMemoryConfigurationSetOnJT() { + if (limitMaxMemForMapTasks == JobConf.DISABLED_MEMORY_LIMIT + || limitMaxMemForReduceTasks == JobConf.DISABLED_MEMORY_LIMIT + || memSizeForMapSlotOnJT == JobConf.DISABLED_MEMORY_LIMIT + || memSizeForReduceSlotOnJT == JobConf.DISABLED_MEMORY_LIMIT) { + return false; + } + return true; + } + + /** + * Check the job if it has invalid requirements and throw and IOException if does have. + * + * @param job + * @throws IOException + */ + private void checkMemoryRequirements(JobInProgress job) + throws IOException { + if (!perTaskMemoryConfigurationSetOnJT()) { + LOG.debug("Per-Task memory configuration is not set on JT. " + + "Not checking the job for invalid memory requirements."); + return; + } + + boolean invalidJob = false; + String msg = ""; + long maxMemForMapTask = job.getMemoryForMapTask(); + long maxMemForReduceTask = job.getMemoryForReduceTask(); + + if (maxMemForMapTask == JobConf.DISABLED_MEMORY_LIMIT + || maxMemForReduceTask == JobConf.DISABLED_MEMORY_LIMIT) { + invalidJob = true; + msg = "Invalid job requirements."; + } + + if (maxMemForMapTask > limitMaxMemForMapTasks + || maxMemForReduceTask > limitMaxMemForReduceTasks) { + invalidJob = true; + msg = "Exceeds the cluster's max-memory-limit."; + } + + if (invalidJob) { + StringBuilder jobStr = + new StringBuilder().append(job.getJobID().toString()).append("(") + .append(maxMemForMapTask).append(" memForMapTasks ").append( + maxMemForReduceTask).append(" memForReduceTasks): "); + LOG.warn(jobStr.toString() + msg); + + throw new IOException(jobStr.toString() + msg); + } + } + + @Override + public void refreshQueueAcls() throws IOException{ + LOG.info("Refreshing queue acls. requested by : " + + UserGroupInformation.getCurrentUGI().getUserName()); + this.queueManager.refreshAcls(new Configuration(this.conf)); + } + + synchronized String getReasonsForBlacklisting(String host) { + FaultInfo fi = faultyTrackers.getFaultInfo(host, false); + if (fi == null) { + return ""; + } + return fi.getTrackerFaultReport(); + } + + /** Test Methods */ + synchronized Set getReasonForBlackList(String host) { + FaultInfo fi = faultyTrackers.getFaultInfo(host, false); + if (fi == null) { + return new HashSet(); + } + return fi.getReasonforblacklisting(); + } + + /* + * This method is synchronized to make sure that the locking order + * "faultyTrackers.potentiallyFaultyTrackers lock followed by taskTrackers + * lock" is under JobTracker lock to avoid deadlocks. + */ + synchronized void incrementFaults(String hostName) { + faultyTrackers.incrementFaults(hostName); + } + + public synchronized void retireCompletedJobs() { + synchronized (jobs) { + RETIRE_COMPLETED_JOBS = true; + } + } + + /** + * @return the pluggable object for obtaining job resource information + */ + public ResourceReporter getResourceReporter() { + return resourceReporter; + } +} diff --git a/src/mapred/org/apache/hadoop/mapred/JobTrackerInstrumentation.java b/src/mapred/org/apache/hadoop/mapred/JobTrackerInstrumentation.java new file mode 100644 index 0000000..0da9ee2 --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/JobTrackerInstrumentation.java @@ -0,0 +1,193 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.mapred; + +class JobTrackerInstrumentation { + + protected final JobTracker tracker; + + public JobTrackerInstrumentation(JobTracker jt, JobConf conf) { + tracker = jt; + } + + public void launchMap(TaskAttemptID taskAttemptID) + { } + + public void completeMap(TaskAttemptID taskAttemptID) + { } + + public void failedMap(TaskAttemptID taskAttemptID) + { } + + public void launchReduce(TaskAttemptID taskAttemptID) + { } + + public void completeReduce(TaskAttemptID taskAttemptID) + { } + + public void failedReduce(TaskAttemptID taskAttemptID) + { } + + public void submitJob(JobConf conf, JobID id) + { } + + public void completeJob(JobConf conf, JobID id) + { } + + public void terminateJob(JobConf conf, JobID id) + { } + + public void finalizeJob(JobConf conf, JobID id) + { } + + public void addWaitingMaps(JobID id, int task) + { } + + public void decWaitingMaps(JobID id, int task) + { } + + public void addWaitingReduces(JobID id, int task) + { } + + public void decWaitingReduces(JobID id, int task) + { } + + public void setMapSlots(int slots) + { } + + public void setReduceSlots(int slots) + { } + + public void addBlackListedMapSlots(int slots) + { } + + public void decBlackListedMapSlots(int slots) + { } + + public void addBlackListedReduceSlots(int slots) + { } + + public void decBlackListedReduceSlots(int slots) + { } + + public void addReservedMapSlots(int slots) + { } + + public void decReservedMapSlots(int slots) + { } + + public void addReservedReduceSlots(int slots) + { } + + public void decReservedReduceSlots(int slots) + { } + + public void addOccupiedMapSlots(int slots) + { } + + public void decOccupiedMapSlots(int slots) + { } + + public void addOccupiedReduceSlots(int slots) + { } + + public void decOccupiedReduceSlots(int slots) + { } + + public void failedJob(JobConf conf, JobID id) + { } + + public void killedJob(JobConf conf, JobID id) + { } + + public void addPrepJob(JobConf conf, JobID id) + { } + + public void decPrepJob(JobConf conf, JobID id) + { } + + public void addRunningJob(JobConf conf, JobID id) + { } + + public void decRunningJob(JobConf conf, JobID id) + { } + + public void addRunningMaps(int tasks) + { } + + public void decRunningMaps(int tasks) + { } + + public void addRunningReduces(int tasks) + { } + + public void decRunningReduces(int tasks) + { } + + public void killedMap(TaskAttemptID taskAttemptID) + { } + + public void killedReduce(TaskAttemptID taskAttemptID) + { } + + public void addTrackers(int trackers) + { } + + public void decTrackers(int trackers) + { } + + public void addBlackListedTrackers(int trackers) + { } + + public void decBlackListedTrackers(int trackers) + { } + + public void setDecommissionedTrackers(int trackers) + { } + + public synchronized void addLaunchedJobs(long submitTime) + { } + + public synchronized void speculateMap(TaskAttemptID taskAttemptID) + { } + + public synchronized void speculateReduce(TaskAttemptID taskAttemptID) + { } + + public synchronized void speculativeSucceededMap(TaskAttemptID taskAttemptID) + { } + + public synchronized void speculativeSucceededReduce(TaskAttemptID + taskAttemptID) + { } + + public synchronized void launchDataLocalMap(TaskAttemptID taskAttemptID) + { } + + public synchronized void launchRackLocalMap(TaskAttemptID taskAttemptID) + { } + + public synchronized void addMapInputBytes(long size) + { } + + public synchronized void addLocalMapInputBytes(long size) + { } + + public synchronized void addRackMapInputBytes(long size) + { } +} diff --git a/src/mapred/org/apache/hadoop/mapred/JobTrackerMetricsInst.java b/src/mapred/org/apache/hadoop/mapred/JobTrackerMetricsInst.java new file mode 100644 index 0000000..5f4c9ae --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/JobTrackerMetricsInst.java @@ -0,0 +1,818 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.mapred; + +import java.util.ArrayList; +import java.util.Iterator; +import java.util.List; +import java.math.*; + +import org.apache.hadoop.mapred.Counters.Group; +import org.apache.hadoop.mapreduce.Counter; +import org.apache.hadoop.metrics.MetricsContext; +import org.apache.hadoop.metrics.MetricsRecord; +import org.apache.hadoop.metrics.MetricsUtil; +import org.apache.hadoop.metrics.Updater; +import org.apache.hadoop.metrics.jvm.JvmMetrics; + +class JobTrackerMetricsInst extends JobTrackerInstrumentation implements Updater { + private final MetricsRecord metricsRecord; + + private int numMapTasksLaunched = 0; + private int numMapTasksCompleted = 0; + private int numMapTasksFailed = 0; + private int numReduceTasksLaunched = 0; + private int numReduceTasksCompleted = 0; + private int numReduceTasksFailed = 0; + private int numJobsSubmitted = 0; + private int numJobsCompleted = 0; + private int numWaitingMaps = 0; + private int numWaitingReduces = 0; + + private int numSpeculativeMaps = 0; + private int numSpeculativeReduces = 0; + private int numSpeculativeSucceededMaps = 0; + private int numSpeculativeSucceededReduces = 0; + private int numDataLocalMaps = 0; + private int numRackLocalMaps = 0; + + private final Counters countersToMetrics = new Counters(); + + //Cluster status fields. + private volatile int numMapSlots = 0; + private volatile int numReduceSlots = 0; + private int numBlackListedMapSlots = 0; + private int numBlackListedReduceSlots = 0; + + private int numReservedMapSlots = 0; + private int numReservedReduceSlots = 0; + private int numOccupiedMapSlots = 0; + private int numOccupiedReduceSlots = 0; + + private int numJobsFailed = 0; + private int numJobsKilled = 0; + + private int numJobsPreparing = 0; + private int numJobsRunning = 0; + + private int numRunningMaps = 0; + private int numRunningReduces = 0; + + private int numMapTasksKilled = 0; + private int numReduceTasksKilled = 0; + + private int numTrackers = 0; + private int numTrackersBlackListed = 0; + private int numTrackersDecommissioned = 0; + + //Extended JobTracker Metrics + private long extMetUpdatePeriod = 0; + private long extMetLastUpdateTime = 0; + private double totalCpu = 0; + private double totalMemory = 0; + private double avgSetupTime = 0; + private long totalSubmitTime = 0; + private long numJobsLaunched = 0; + private double avgCpuUtil = 0; + private double avgJobShare = 0; + private double devJobShare = 0; + private double covJobShare = 0; + private long totalMapInputBytes = 0; + private long localMapInputBytes = 0; + private long rackMapInputBytes = 0; + + + public JobTrackerMetricsInst(JobTracker tracker, JobConf conf) { + super(tracker, conf); + String sessionId = conf.getSessionId(); + // Initiate JVM Metrics + JvmMetrics.init("JobTracker", sessionId); + // Create a record for map-reduce metrics + MetricsContext context = MetricsUtil.getContext("mapred"); + metricsRecord = MetricsUtil.createRecord(context, "jobtracker"); + metricsRecord.setTag("sessionId", sessionId); + context.registerUpdater(this); + + extMetUpdatePeriod = conf.getLong( + "mapred.jobtrakcer.extended.metric.update.period", + 5 * 60 * 1000); + } + + /** + * Since this object is a registered updater, this method will be called + * periodically, e.g. every 5 seconds. + */ + public void doUpdates(MetricsContext unused) { + // In case of running in LocalMode tracker == null + if (tracker != null) { + synchronized (tracker) { + synchronized (this) { + numRunningMaps = 0; + numRunningReduces = 0; + + numWaitingMaps = 0; + numWaitingReduces = 0; + + List jobs = tracker.getRunningJobs(); + for (JobInProgress jip : jobs) { + for (TaskInProgress tip : jip.maps) { + if (tip.isRunning()) { + numRunningMaps++; + } else if (tip.isRunnable()) { + numWaitingMaps++; + } + } + for (TaskInProgress tip : jip.reduces) { + if (tip.isRunning()) { + numRunningReduces++; + } else if (tip.isRunnable()) { + numWaitingReduces++; + } + + } + } + } + } + } + synchronized (this) { + metricsRecord.setMetric("map_slots", numMapSlots); + metricsRecord.setMetric("reduce_slots", numReduceSlots); + metricsRecord.incrMetric("blacklisted_maps", numBlackListedMapSlots); + metricsRecord.incrMetric("blacklisted_reduces", + numBlackListedReduceSlots); + metricsRecord.incrMetric("maps_launched", numMapTasksLaunched); + metricsRecord.incrMetric("maps_completed", numMapTasksCompleted); + metricsRecord.incrMetric("maps_failed", numMapTasksFailed); + metricsRecord.incrMetric("reduces_launched", numReduceTasksLaunched); + metricsRecord.incrMetric("reduces_completed", numReduceTasksCompleted); + metricsRecord.incrMetric("reduces_failed", numReduceTasksFailed); + metricsRecord.incrMetric("jobs_submitted", numJobsSubmitted); + metricsRecord.incrMetric("jobs_completed", numJobsCompleted); + metricsRecord.setMetric("waiting_maps", numWaitingMaps); + metricsRecord.setMetric("waiting_reduces", numWaitingReduces); + metricsRecord.incrMetric("num_speculative_maps", numSpeculativeMaps); + metricsRecord.incrMetric("num_speculative_reduces", numSpeculativeReduces); + metricsRecord.incrMetric("num_speculative_succeeded_maps", + numSpeculativeSucceededMaps); + metricsRecord.incrMetric("num_speculative_succeeded_reduces", + numSpeculativeSucceededReduces); + metricsRecord.incrMetric("num_dataLocal_maps", numDataLocalMaps); + metricsRecord.incrMetric("num_rackLocal_maps", numRackLocalMaps); + + metricsRecord.incrMetric("reserved_map_slots", numReservedMapSlots); + metricsRecord.incrMetric("reserved_reduce_slots", numReservedReduceSlots); + metricsRecord.incrMetric("occupied_map_slots", numOccupiedMapSlots); + metricsRecord.incrMetric("occupied_reduce_slots", numOccupiedReduceSlots); + + metricsRecord.incrMetric("jobs_failed", numJobsFailed); + metricsRecord.incrMetric("jobs_killed", numJobsKilled); + + metricsRecord.incrMetric("jobs_preparing", numJobsPreparing); + metricsRecord.incrMetric("jobs_running", numJobsRunning); + + metricsRecord.incrMetric("running_maps", numRunningMaps); + metricsRecord.incrMetric("running_reduces", numRunningReduces); + + metricsRecord.incrMetric("maps_killed", numMapTasksKilled); + metricsRecord.incrMetric("reduces_killed", numReduceTasksKilled); + + metricsRecord.incrMetric("trackers", numTrackers); + metricsRecord.incrMetric("trackers_blacklisted", numTrackersBlackListed); + metricsRecord.setMetric("trackers_decommissioned", + numTrackersDecommissioned); + + metricsRecord.incrMetric("num_launched_jobs", numJobsLaunched); + metricsRecord.incrMetric("total_submit_time", totalSubmitTime); + + metricsRecord.incrMetric("total_map_input_bytes", totalMapInputBytes); + metricsRecord.incrMetric("local_map_input_bytes", localMapInputBytes); + metricsRecord.incrMetric("rack_map_input_bytes", rackMapInputBytes); + + for (Group group: countersToMetrics) { + String groupName = group.getName(); + for (Counter counter : group) { + String name = groupName + "_" + counter.getName(); + name = name.replaceAll("[^a-zA-Z_]", "_").toLowerCase(); + metricsRecord.incrMetric(name, counter.getValue()); + } + } + clearCounters(); + + numMapTasksLaunched = 0; + numMapTasksCompleted = 0; + numMapTasksFailed = 0; + numReduceTasksLaunched = 0; + numReduceTasksCompleted = 0; + numReduceTasksFailed = 0; + numJobsSubmitted = 0; + numJobsCompleted = 0; + numWaitingMaps = 0; + numWaitingReduces = 0; + numBlackListedMapSlots = 0; + numBlackListedReduceSlots = 0; + numSpeculativeMaps = 0; + numSpeculativeReduces = 0; + numSpeculativeSucceededMaps = 0; + numSpeculativeSucceededReduces = 0; + numDataLocalMaps = 0; + numRackLocalMaps = 0; + + numReservedMapSlots = 0; + numReservedReduceSlots = 0; + numOccupiedMapSlots = 0; + numOccupiedReduceSlots = 0; + + numJobsFailed = 0; + numJobsKilled = 0; + + numJobsPreparing = 0; + numJobsRunning = 0; + + numRunningMaps = 0; + numRunningReduces = 0; + + numMapTasksKilled = 0; + numReduceTasksKilled = 0; + + numTrackers = 0; + numTrackersBlackListed = 0; + + totalSubmitTime = 0; + numJobsLaunched = 0; + + totalMapInputBytes = 0; + localMapInputBytes = 0; + rackMapInputBytes = 0; + } + + long now = JobTracker.getClock().getTime(); + if ((now - extMetLastUpdateTime >= extMetUpdatePeriod) && (tracker != null)) { + synchronized (tracker) { + // Not syncrhonized on JobTrackerMetricsInst for a reason + updateExtendedMetrics(); + } + } + + metricsRecord.update(); + } + + public void updateExtendedMetrics() { + // When getting into this method we cannot have a JobTrackerMetricsInst lock + // because we are locking JobInProgress in this method and there is + // another code path that locks JobInProgress and then locks the Metrics + long now = JobTracker.getClock().getTime(); + + double newTotalCpu = 0; + double newTotalMemory = 0; + double newAvgSetupTime = 0; + double newAvgCpuUtil = 0; + double newAvgJobShare = 0; + long totalRunningJobs = 0; + long[] totalUsedTimeOfTasks = new long[1]; + long[] partUsedTimeOfTasks = new long[1]; + + long[] totalWaitTimeMapsLaunched = new long[1]; + long[] totalWaitTimeMapsPending = new long[1]; + long[] numMapsLaunched = new long[1]; + long[] numMapsPending = new long[1]; + totalWaitTimeMapsLaunched[0] = 0; + totalWaitTimeMapsPending[0] = 0; + numMapsLaunched[0] = 0; + numMapsPending[0] = 0; + + List runningJobList = tracker.getRunningJobs(); + for (Iterator it = runningJobList.iterator(); + it.hasNext();) { + JobInProgress job = it.next(); + if (!job.inited()) { + it.remove(); + } + } + double [] runningJobShare = new double[runningJobList.size()]; + + ResourceReporter reporter = tracker.getResourceReporter(); + + int ijob = 0; + double totalSlotsTime = (numMapSlots + numReduceSlots) + * (now - extMetLastUpdateTime); + for (JobInProgress job: runningJobList) { + // calculate the time of setup tasks + newAvgSetupTime += getSetupTimeOfTasks( + tracker.getSetupTaskReports(job.getJobID()), now); + + // calculate the wait time of map tasks. + calculateWaitTimeOfTasks(job, + tracker.getMapTaskReports(job.getJobID()), + totalWaitTimeMapsLaunched, + totalWaitTimeMapsPending, + numMapsLaunched, numMapsPending, + now, extMetLastUpdateTime); + + // calculate the sum of wall-time of all tasks + totalUsedTimeOfTasks[0] = 0; + partUsedTimeOfTasks[0] = 0; + calculateUsedTimeOfTasks(job, + tracker.getSetupTaskReports(job.getJobID()), + totalUsedTimeOfTasks, partUsedTimeOfTasks, + now, extMetLastUpdateTime); + calculateUsedTimeOfTasks(job, + tracker.getMapTaskReports(job.getJobID()), + totalUsedTimeOfTasks, partUsedTimeOfTasks, + now, extMetLastUpdateTime); + calculateUsedTimeOfTasks(job, + tracker.getReduceTaskReports(job.getJobID()), + totalUsedTimeOfTasks, partUsedTimeOfTasks, + now, extMetLastUpdateTime); + calculateUsedTimeOfTasks(job, + tracker.getCleanupTaskReports(job.getJobID()), + totalUsedTimeOfTasks, partUsedTimeOfTasks, + now, extMetLastUpdateTime); + + if (totalSlotsTime != 0) { + runningJobShare[ijob] = (double) partUsedTimeOfTasks[0] / + totalSlotsTime; + } else { + runningJobShare[ijob] = 0; + } + newAvgJobShare += runningJobShare[ijob]; + ++ijob; + + // Compute the CPU and memory usage + if (reporter != null) { + double cpuUse = reporter.getJobCpuCumulatedUsageTime(job.getJobID()); + if (cpuUse != ResourceReporter.UNAVAILABLE && + totalUsedTimeOfTasks[0] != 0) { + newAvgCpuUtil += cpuUse / totalUsedTimeOfTasks[0]; + } + + double cpu = reporter.getJobCpuPercentageOnCluster(job.getJobID()); + double memory = reporter.getJobMemPercentageOnCluster(job.getJobID()); + newTotalCpu += cpu != ResourceReporter.UNAVAILABLE ? cpu : 0; + newTotalMemory += memory != ResourceReporter.UNAVAILABLE ? memory : 0; + } + } + totalRunningJobs = runningJobList.size(); + + double newDevJobShare = devJobShare; + double newCovJobShare = covJobShare; + if (totalRunningJobs != 0) { + newAvgCpuUtil /= totalRunningJobs; + newAvgSetupTime /= totalRunningJobs; + newAvgJobShare /= totalRunningJobs; + newDevJobShare = 0; + if (runningJobShare != null) { + for (int i = 0; i < runningJobShare.length; ++i) + newDevJobShare += (runningJobShare[i] - newAvgJobShare) * + (runningJobShare[i] - newAvgJobShare); + } + newDevJobShare = Math.sqrt(newDevJobShare / totalRunningJobs); + if (newAvgJobShare != 0) { + newCovJobShare = newDevJobShare / newAvgJobShare; + } else { + newCovJobShare = 0; + } + } + synchronized (this) { + totalCpu = newTotalCpu; + totalMemory = newTotalMemory; + avgCpuUtil = newAvgCpuUtil; + avgJobShare = newAvgJobShare; + devJobShare = newDevJobShare; + covJobShare = newCovJobShare; + + metricsRecord.setMetric("total_cpu", (float) totalCpu); + metricsRecord.setMetric("total_memory", (float) totalMemory); + metricsRecord.setMetric("avg_cpu_utilization", (float) avgCpuUtil); + metricsRecord.setMetric("avg_waittime_maps_launched", + (float) totalWaitTimeMapsLaunched[0] / numMapsLaunched[0] / 1000); + metricsRecord.setMetric("avg_waittime_maps_pending", + (float) totalWaitTimeMapsPending[0] / numMapsPending[0] / 1000); + metricsRecord.setMetric("avg_waittime_maps", + (float) (totalWaitTimeMapsLaunched[0] + totalWaitTimeMapsPending[0]) + / (numMapsLaunched[0] + numMapsPending[0]) / 1000); + metricsRecord.setMetric("avg_setup_time", (float) avgSetupTime / 1000); + metricsRecord.setMetric("num_running_jobs_share", (float) totalRunningJobs); + metricsRecord.setMetric("avg_job_share", (float) avgJobShare); + metricsRecord.setMetric("dev_job_share", (float) devJobShare); + metricsRecord.setMetric("cov_job_share", (float) covJobShare); + + extMetLastUpdateTime = now; + + } + } + + private long getSetupTimeOfTasks(TaskReport[] reports, long now) { + if (reports.length == 0) + return 0; + int nSetupTasks = 0; + long duration = 0; + for (int i = 0; i < reports.length; i++) { + TaskReport report = reports[i]; + if (report.getCurrentStatus() == TIPStatus.COMPLETE) { + duration = report.getFinishTime() - report.getStartTime(); + ++nSetupTasks; + } else + if (report.getCurrentStatus() == TIPStatus.RUNNING) { + duration = now - report.getStartTime(); + ++nSetupTasks; + } + } + if (nSetupTasks > 1) { + return 0; + } else { + return duration; + } + } + + private void calculateWaitTimeOfTasks(JobInProgress job, + TaskReport[] reports, + long totalWaitTimeOfLaunchedTasks[], + long totalWaitTimeOfPendingTasks[], + long numLaunchedTasks[], + long numPendingTasks[], + long now, long last) { + long aggrWaitLaunched = 0; + long aggrWaitPending = 0; + long numPending = 0; + long numLaunched = 0; + for (int i = 0; i < reports.length; i++) { + TaskReport report = reports[i]; + if (report.getCurrentStatus() == TIPStatus.PENDING) { + aggrWaitPending += (now - job.getLaunchTime()); + numPending++; + } else if ((report.getCurrentStatus() == TIPStatus.COMPLETE || + report.getCurrentStatus() == TIPStatus.RUNNING) && + report.getFinishTime() >= last) { + aggrWaitLaunched += (report.getStartTime() - job.getLaunchTime()); + numLaunched++; + } + } + totalWaitTimeOfLaunchedTasks[0] += aggrWaitLaunched; + totalWaitTimeOfPendingTasks[0] += aggrWaitPending; + numLaunchedTasks[0] += numLaunched; + numPendingTasks[0] += numPending; + } + + private void calculateUsedTimeOfTasks(JobInProgress job, + TaskReport[] reports, + long totalUsedTimeOfTasks[], + long partUsedTimeOfTasks[], + long now, long last) { + long aggrUsed = 0; + long aggrPartUsed = 0; + for (int i = 0; i < reports.length; i++) { + TaskReport report = reports[i]; + if (report.getCurrentStatus() == TIPStatus.COMPLETE || + report.getCurrentStatus() == TIPStatus.KILLED) { + aggrUsed += report.getFinishTime() - report.getStartTime(); + if (report.getFinishTime() > last) { + aggrPartUsed += report.getFinishTime() - last; + } + } else if (report.getCurrentStatus() == TIPStatus.RUNNING) { + aggrUsed += now - report.getStartTime(); + if (report.getStartTime() > last) + aggrPartUsed += now - report.getStartTime(); + else + aggrPartUsed += now - last; + } + } + totalUsedTimeOfTasks[0] += aggrUsed; + partUsedTimeOfTasks[0] += aggrPartUsed; + } + + @Override + public synchronized void launchMap(TaskAttemptID taskAttemptID) { + ++numMapTasksLaunched; + decWaitingMaps(taskAttemptID.getJobID(), 1); + } + @Override + public synchronized void launchDataLocalMap(TaskAttemptID taskAttemptID) { + ++numDataLocalMaps; + } + @Override + public synchronized void launchRackLocalMap(TaskAttemptID taskAttemptID) { + ++numRackLocalMaps; + } + + @Override + public synchronized void completeMap(TaskAttemptID taskAttemptID) { + ++numMapTasksCompleted; + } + + @Override + public synchronized void speculateMap(TaskAttemptID taskAttemptID) { + ++numSpeculativeMaps; + } + + public synchronized void speculativeSucceededMap( + TaskAttemptID taskAttemptID) { + ++numSpeculativeSucceededMaps; + } + + public synchronized void speculativeSucceededReduce( + TaskAttemptID taskAttemptID) { + ++numSpeculativeSucceededReduces; + } + + @Override + public synchronized void failedMap(TaskAttemptID taskAttemptID) { + ++numMapTasksFailed; + addWaitingMaps(taskAttemptID.getJobID(), 1); + } + + @Override + public synchronized void launchReduce(TaskAttemptID taskAttemptID) { + ++numReduceTasksLaunched; + decWaitingReduces(taskAttemptID.getJobID(), 1); + } + + @Override + public synchronized void completeReduce(TaskAttemptID taskAttemptID) { + ++numReduceTasksCompleted; + } + + @Override + public synchronized void speculateReduce(TaskAttemptID taskAttemptID) { + ++numSpeculativeReduces; + } + + @Override + public synchronized void failedReduce(TaskAttemptID taskAttemptID) { + ++numReduceTasksFailed; + addWaitingReduces(taskAttemptID.getJobID(), 1); + } + + @Override + public synchronized void submitJob(JobConf conf, JobID id) { + ++numJobsSubmitted; + } + + @Override + public synchronized void completeJob(JobConf conf, JobID id) { + collectJobCounters(id); + ++numJobsCompleted; + } + + @Override + public synchronized void addWaitingMaps(JobID id, int task) { + } + + @Override + public synchronized void decWaitingMaps(JobID id, int task) { + } + + @Override + public synchronized void addWaitingReduces(JobID id, int task) { + } + + @Override + public synchronized void decWaitingReduces(JobID id, int task){ + } + + @Override + public synchronized void setMapSlots(int slots) { + numMapSlots = slots; + } + + @Override + public synchronized void setReduceSlots(int slots) { + numReduceSlots = slots; + } + + @Override + public synchronized void addBlackListedMapSlots(int slots){ + numBlackListedMapSlots += slots; + } + + @Override + public synchronized void decBlackListedMapSlots(int slots){ + numBlackListedMapSlots -= slots; + } + + @Override + public synchronized void addBlackListedReduceSlots(int slots){ + numBlackListedReduceSlots += slots; + } + + @Override + public synchronized void decBlackListedReduceSlots(int slots){ + numBlackListedReduceSlots -= slots; + } + + @Override + public synchronized void addReservedMapSlots(int slots) + { + numReservedMapSlots += slots; + } + + @Override + public synchronized void decReservedMapSlots(int slots) + { + numReservedMapSlots -= slots; + } + + @Override + public synchronized void addReservedReduceSlots(int slots) + { + numReservedReduceSlots += slots; + } + + @Override + public synchronized void decReservedReduceSlots(int slots) + { + numReservedReduceSlots -= slots; + } + + @Override + public synchronized void addOccupiedMapSlots(int slots) + { + numOccupiedMapSlots += slots; + } + + @Override + public synchronized void decOccupiedMapSlots(int slots) + { + numOccupiedMapSlots -= slots; + } + + @Override + public synchronized void addOccupiedReduceSlots(int slots) + { + numOccupiedReduceSlots += slots; + } + + @Override + public synchronized void decOccupiedReduceSlots(int slots) + { + numOccupiedReduceSlots -= slots; + } + + @Override + public synchronized void failedJob(JobConf conf, JobID id) + { + numJobsFailed++; + } + + @Override + public synchronized void killedJob(JobConf conf, JobID id) + { + numJobsKilled++; + } + + @Override + public synchronized void addPrepJob(JobConf conf, JobID id) + { + numJobsPreparing++; + } + + @Override + public synchronized void decPrepJob(JobConf conf, JobID id) + { + numJobsPreparing--; + } + + @Override + public synchronized void addRunningJob(JobConf conf, JobID id) + { + numJobsRunning++; + } + + @Override + public synchronized void decRunningJob(JobConf conf, JobID id) + { + numJobsRunning--; + } + + @Override + public synchronized void addRunningMaps(int task) + { + } + + @Override + public synchronized void decRunningMaps(int task) + { + } + + @Override + public synchronized void addRunningReduces(int task) + { + } + + @Override + public synchronized void decRunningReduces(int task) + { + } + + @Override + public synchronized void killedMap(TaskAttemptID taskAttemptID) + { + numMapTasksKilled++; + } + + @Override + public synchronized void killedReduce(TaskAttemptID taskAttemptID) + { + numReduceTasksKilled++; + } + + @Override + public synchronized void addTrackers(int trackers) + { + numTrackers += trackers; + } + + @Override + public synchronized void decTrackers(int trackers) + { + numTrackers -= trackers; + } + + @Override + public synchronized void addBlackListedTrackers(int trackers) + { + numTrackersBlackListed += trackers; + } + + @Override + public synchronized void decBlackListedTrackers(int trackers) + { + numTrackersBlackListed -= trackers; + } + + @Override + public synchronized void setDecommissionedTrackers(int trackers) + { + numTrackersDecommissioned = trackers; + } + + @Override + public synchronized void addLaunchedJobs(long submitTime) + { + ++numJobsLaunched; + totalSubmitTime += submitTime; + } + + @Override + public synchronized void addMapInputBytes(long size) { + totalMapInputBytes += size; + } + + @Override + public synchronized void addLocalMapInputBytes(long size) { + localMapInputBytes += size; + addMapInputBytes(size); + } + + @Override + public synchronized void addRackMapInputBytes(long size) { + rackMapInputBytes += size; + addMapInputBytes(size); + } + + @Override + public void terminateJob(JobConf conf, JobID id) { + collectJobCounters(id); + } + + private synchronized void collectJobCounters(JobID id) { + JobInProgress job = tracker.jobs.get(id); + Counters jobCounter = job.getCounters(); + for (JobInProgress.Counter key : JobInProgress.Counter.values()) { + countersToMetrics.findCounter(key). + increment(jobCounter.findCounter(key).getValue()); + } + for (Task.Counter key : Task.Counter.values()) { + countersToMetrics.findCounter(key). + increment(jobCounter.findCounter(key).getValue()); + } + for (Counter counter : jobCounter.getGroup(Task.FILESYSTEM_COUNTER_GROUP)) { + countersToMetrics.incrCounter( + Task.FILESYSTEM_COUNTER_GROUP, counter.getName(), counter.getValue()); + } + } + /* + * Set everything in the counters to zero + */ + private void clearCounters() { + for (Group g : countersToMetrics) { + for (Counter c : g) { + c.setValue(0); + } + } + } +} diff --git a/src/mapred/org/apache/hadoop/mapred/JobTrackerStatistics.java b/src/mapred/org/apache/hadoop/mapred/JobTrackerStatistics.java new file mode 100644 index 0000000..58e6bfe --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/JobTrackerStatistics.java @@ -0,0 +1,87 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.mapred; + +import java.util.HashMap; +import java.util.Map; + +import org.apache.hadoop.mapred.StatisticsCollector.Stat; + +/** + * Collects the job tracker statistics. + * + */ +class JobTrackerStatistics { + + final StatisticsCollector collector; + final Map ttStats = + new HashMap(); + + JobTrackerStatistics() { + collector = new StatisticsCollector(); + collector.start(); + } + + synchronized void taskTrackerAdded(String name) { + TaskTrackerStat stat = ttStats.get(name); + if(stat == null) { + stat = new TaskTrackerStat(name); + ttStats.put(name, stat); + } + } + + synchronized void taskTrackerRemoved(String name) { + TaskTrackerStat stat = ttStats.remove(name); + if(stat != null) { + stat.remove(); + } + } + + synchronized TaskTrackerStat getTaskTrackerStat(String name) { + return ttStats.get(name); + } + + class TaskTrackerStat { + final String totalTasksKey; + final Stat totalTasksStat; + + final String succeededTasksKey; + final Stat succeededTasksStat; + + TaskTrackerStat(String trackerName) { + totalTasksKey = trackerName+"-"+"totalTasks"; + totalTasksStat = collector.createStat(totalTasksKey); + succeededTasksKey = trackerName+"-"+"succeededTasks"; + succeededTasksStat = collector.createStat(succeededTasksKey); + } + + synchronized void incrTotalTasks() { + totalTasksStat.inc(); + } + + synchronized void incrSucceededTasks() { + succeededTasksStat.inc(); + } + + synchronized void remove() { + collector.removeStat(totalTasksKey); + collector.removeStat(succeededTasksKey); + } + + } +} diff --git a/src/mapred/org/apache/hadoop/mapred/JvmContext.java b/src/mapred/org/apache/hadoop/mapred/JvmContext.java new file mode 100644 index 0000000..1c2d936 --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/JvmContext.java @@ -0,0 +1,57 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapred; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; + +import org.apache.hadoop.io.Text; +import org.apache.hadoop.io.Writable; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; + +class JvmContext implements Writable { + + public static final Log LOG = + LogFactory.getLog(JvmContext.class); + + JVMId jvmId; + String pid; + + JvmContext() { + jvmId = new JVMId(); + pid = ""; + } + + JvmContext(JVMId id, String pid) { + jvmId = id; + this.pid = pid; + } + + public void readFields(DataInput in) throws IOException { + jvmId.readFields(in); + this.pid = Text.readString(in); + } + + public void write(DataOutput out) throws IOException { + jvmId.write(out); + Text.writeString(out, pid); + } +} diff --git a/src/mapred/org/apache/hadoop/mapred/JvmManager.java b/src/mapred/org/apache/hadoop/mapred/JvmManager.java new file mode 100644 index 0000000..6d420b8 --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/JvmManager.java @@ -0,0 +1,507 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapred; + +import java.io.File; +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Random; +import java.util.Vector; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.fs.FileUtil; +import org.apache.hadoop.mapred.TaskController.TaskControllerContext; +import org.apache.hadoop.mapred.TaskTracker.TaskInProgress; +import org.apache.hadoop.util.ProcessTree; +import org.apache.hadoop.util.Shell.ShellCommandExecutor; + +class JvmManager { + + public static final Log LOG = + LogFactory.getLog("org.apache.hadoop.mapred.JvmManager"); + + JvmManagerForType mapJvmManager; + + JvmManagerForType reduceJvmManager; + + public JvmEnv constructJvmEnv(List setup, Vectorvargs, + File stdout,File stderr,long logSize, File workDir, + Map env, JobConf conf) { + return new JvmEnv(setup,vargs,stdout,stderr,logSize,workDir,env,conf); + } + + public JvmManager(TaskTracker tracker) { + mapJvmManager = new JvmManagerForType(tracker.getMaxCurrentMapTasks(), + true, tracker); + reduceJvmManager = new JvmManagerForType(tracker.getMaxCurrentReduceTasks(), + false, tracker); + } + + /* + * Saves pid of the given taskJvm + */ + void setPidToJvm(JVMId jvmId, String pid) { + if (jvmId.isMapJVM()) { + mapJvmManager.jvmIdToPid.put(jvmId, pid); + } + else { + reduceJvmManager.jvmIdToPid.put(jvmId, pid); + } + } + + /* + * Returns the pid of the task + */ + String getPid(TaskRunner t) { + if (t != null && t.getTask() != null) { + if (t.getTask().isMapTask()) { + JVMId id = mapJvmManager.runningTaskToJvm.get(t); + if (id != null) { + return mapJvmManager.jvmIdToPid.get(id); + } + } else { + JVMId id = reduceJvmManager.runningTaskToJvm.get(t); + if (id != null) { + return reduceJvmManager.jvmIdToPid.get(id); + } + } + } + return null; + } + + + public void stop() { + mapJvmManager.stop(); + reduceJvmManager.stop(); + } + + public boolean isJvmKnown(JVMId jvmId) { + if (jvmId.isMapJVM()) { + return mapJvmManager.isJvmknown(jvmId); + } else { + return reduceJvmManager.isJvmknown(jvmId); + } + } + + public void launchJvm(TaskRunner t, JvmEnv env) { + if (t.getTask().isMapTask()) { + mapJvmManager.reapJvm(t, env); + } else { + reduceJvmManager.reapJvm(t, env); + } + } + + public TaskInProgress getTaskForJvm(JVMId jvmId) { + if (jvmId.isMapJVM()) { + return mapJvmManager.getTaskForJvm(jvmId); + } else { + return reduceJvmManager.getTaskForJvm(jvmId); + } + } + public void taskFinished(TaskRunner tr) { + if (tr.getTask().isMapTask()) { + mapJvmManager.taskFinished(tr); + } else { + reduceJvmManager.taskFinished(tr); + } + } + + public void taskKilled(TaskRunner tr) { + if (tr.getTask().isMapTask()) { + mapJvmManager.taskKilled(tr); + } else { + reduceJvmManager.taskKilled(tr); + } + } + + public void killJvm(JVMId jvmId) { + if (jvmId.isMap) { + mapJvmManager.killJvm(jvmId); + } else { + reduceJvmManager.killJvm(jvmId); + } + } + + private static class JvmManagerForType { + //Mapping from the JVM IDs to running Tasks + Map jvmToRunningTask = + new HashMap(); + //Mapping from the tasks to JVM IDs + Map runningTaskToJvm = + new HashMap(); + //Mapping from the JVM IDs to Reduce JVM processes + Map jvmIdToRunner = + new HashMap(); + //Mapping from the JVM IDs to process IDs + Map jvmIdToPid = + new HashMap(); + + int maxJvms; + boolean isMap; + + Random rand = new Random(System.currentTimeMillis()); + private TaskTracker tracker; + + public JvmManagerForType(int maxJvms, boolean isMap, + TaskTracker tracker) { + this.maxJvms = maxJvms; + this.isMap = isMap; + this.tracker = tracker; + } + + synchronized public void setRunningTaskForJvm(JVMId jvmId, + TaskRunner t) { + jvmToRunningTask.put(jvmId, t); + runningTaskToJvm.put(t,jvmId); + jvmIdToRunner.get(jvmId).setBusy(true); + } + + synchronized public TaskInProgress getTaskForJvm(JVMId jvmId) { + if (jvmToRunningTask.containsKey(jvmId)) { + //Incase of JVM reuse, tasks are returned to previously launched + //JVM via this method. However when a new task is launched + //the task being returned has to be initialized. + TaskRunner taskRunner = jvmToRunningTask.get(jvmId); + JvmRunner jvmRunner = jvmIdToRunner.get(jvmId); + Task task = taskRunner.getTaskInProgress().getTask(); + TaskControllerContext context = + new TaskController.TaskControllerContext(); + context.env = jvmRunner.env; + context.task = task; + //If we are returning the same task as which the JVM was launched + //we don't initialize task once again. + if(!jvmRunner.env.conf.get("mapred.task.id"). + equals(task.getTaskID().toString())) { + tracker.getTaskController().initializeTask(context); + } + + jvmRunner.taskGiven(task); + return taskRunner.getTaskInProgress(); + + } + return null; + } + + synchronized public boolean isJvmknown(JVMId jvmId) { + return jvmIdToRunner.containsKey(jvmId); + } + + synchronized public void taskFinished(TaskRunner tr) { + JVMId jvmId = runningTaskToJvm.remove(tr); + if (jvmId != null) { + jvmToRunningTask.remove(jvmId); + JvmRunner jvmRunner; + if ((jvmRunner = jvmIdToRunner.get(jvmId)) != null) { + jvmRunner.taskRan(); + } + } + } + + synchronized public void taskKilled(TaskRunner tr) { + JVMId jvmId = runningTaskToJvm.remove(tr); + if (jvmId != null) { + jvmToRunningTask.remove(jvmId); + killJvm(jvmId); + } + } + + synchronized public void killJvm(JVMId jvmId) { + JvmRunner jvmRunner; + if ((jvmRunner = jvmIdToRunner.get(jvmId)) != null) { + jvmRunner.kill(); + } + } + + synchronized public void stop() { + //since the kill() method invoked later on would remove + //an entry from the jvmIdToRunner map, we create a + //copy of the values and iterate over it (if we don't + //make a copy, we will encounter concurrentModification + //exception + List list = new ArrayList(); + list.addAll(jvmIdToRunner.values()); + for (JvmRunner jvm : list) { + jvm.kill(); + } + } + + synchronized private void removeJvm(JVMId jvmId) { + jvmIdToRunner.remove(jvmId); + jvmIdToPid.remove(jvmId); + } + private synchronized void reapJvm( + TaskRunner t, JvmEnv env) { + if (t.getTaskInProgress().wasKilled()) { + //the task was killed in-flight + //no need to do the rest of the operations + return; + } + boolean spawnNewJvm = false; + JobID jobId = t.getTask().getJobID(); + //Check whether there is a free slot to start a new JVM. + //,or, Kill a (idle) JVM and launch a new one + //When this method is called, we *must* + // (1) spawn a new JVM (if we are below the max) + // (2) find an idle JVM (that belongs to the same job), or, + // (3) kill an idle JVM (from a different job) + // (the order of return is in the order above) + int numJvmsSpawned = jvmIdToRunner.size(); + JvmRunner runnerToKill = null; + if (numJvmsSpawned >= maxJvms) { + //go through the list of JVMs for all jobs. + Iterator> jvmIter = + jvmIdToRunner.entrySet().iterator(); + + while (jvmIter.hasNext()) { + JvmRunner jvmRunner = jvmIter.next().getValue(); + JobID jId = jvmRunner.jvmId.getJobId(); + //look for a free JVM for this job; if one exists then just break + if (jId.equals(jobId) && !jvmRunner.isBusy() && !jvmRunner.ranAll()){ + setRunningTaskForJvm(jvmRunner.jvmId, t); //reserve the JVM + LOG.info("No new JVM spawned for jobId/taskid: " + + jobId+"/"+t.getTask().getTaskID() + + ". Attempting to reuse: " + jvmRunner.jvmId); + return; + } + //Cases when a JVM is killed: + // (1) the JVM under consideration belongs to the same job + // (passed in the argument). In this case, kill only when + // the JVM ran all the tasks it was scheduled to run (in terms + // of count). + // (2) the JVM under consideration belongs to a different job and is + // currently not busy + //But in both the above cases, we see if we can assign the current + //task to an idle JVM (hence we continue the loop even on a match) + if ((jId.equals(jobId) && jvmRunner.ranAll()) || + (!jId.equals(jobId) && !jvmRunner.isBusy())) { + runnerToKill = jvmRunner; + spawnNewJvm = true; + } + } + } else { + spawnNewJvm = true; + } + + if (spawnNewJvm) { + if (runnerToKill != null) { + LOG.info("Killing JVM: " + runnerToKill.jvmId); + runnerToKill.kill(); + } + spawnNewJvm(jobId, env, t); + return; + } + //*MUST* never reach this + throw new RuntimeException("Inconsistent state!!! " + + "JVM Manager reached an unstable state " + + "while reaping a JVM for task: " + t.getTask().getTaskID()+ + " " + getDetails()); + } + + private String getDetails() { + StringBuffer details = new StringBuffer(); + details.append("Number of active JVMs:"). + append(jvmIdToRunner.size()); + Iterator jvmIter = + jvmIdToRunner.keySet().iterator(); + while (jvmIter.hasNext()) { + JVMId jvmId = jvmIter.next(); + details.append("\n JVMId "). + append(jvmId.toString()). + append(" #Tasks ran: "). + append(jvmIdToRunner.get(jvmId).numTasksRan). + append(" Currently busy? "). + append(jvmIdToRunner.get(jvmId).busy). + append(" Currently running: "). + append(jvmToRunningTask.get(jvmId).getTask().getTaskID().toString()); + } + return details.toString(); + } + + private void spawnNewJvm(JobID jobId, JvmEnv env, + TaskRunner t) { + JvmRunner jvmRunner = new JvmRunner(env,jobId); + jvmIdToRunner.put(jvmRunner.jvmId, jvmRunner); + //spawn the JVM in a new thread. Note that there will be very little + //extra overhead of launching the new thread for a new JVM since + //most of the cost is involved in launching the process. Moreover, + //since we are going to be using the JVM for running many tasks, + //the thread launch cost becomes trivial when amortized over all + //tasks. Doing it this way also keeps code simple. + jvmRunner.setDaemon(true); + jvmRunner.setName("JVM Runner " + jvmRunner.jvmId + " spawned."); + setRunningTaskForJvm(jvmRunner.jvmId, t); + LOG.info(jvmRunner.getName()); + jvmRunner.start(); + } + synchronized private void updateOnJvmExit(JVMId jvmId, + int exitCode) { + removeJvm(jvmId); + TaskRunner t = jvmToRunningTask.remove(jvmId); + + if (t != null) { + runningTaskToJvm.remove(t); + if (exitCode != 0) { + t.setExitCode(exitCode); + } + t.signalDone(); + } + } + + private class JvmRunner extends Thread { + JvmEnv env; + volatile boolean killed = false; + volatile int numTasksRan; + final int numTasksToRun; + JVMId jvmId; + volatile boolean busy = true; + private ShellCommandExecutor shexec; // shell terminal for running the task + //context used for starting JVM + private TaskControllerContext initalContext; + + private List tasksGiven = new ArrayList(); + + void taskGiven(Task task) { + tasksGiven.add(task); + } + + public JvmRunner(JvmEnv env, JobID jobId) { + this.env = env; + this.jvmId = new JVMId(jobId, isMap, rand.nextInt()); + this.numTasksToRun = env.conf.getNumTasksToExecutePerJvm(); + LOG.info("In JvmRunner constructed JVM ID: " + jvmId); + } + public void run() { + runChild(env); + + // Post-JVM-exit logs processing. Truncate the logs. + truncateJVMLogs(); + } + + public void runChild(JvmEnv env) { + initalContext = new TaskControllerContext(); + try { + env.vargs.add(Integer.toString(jvmId.getId())); + //Launch the task controller to run task JVM + initalContext.task = jvmToRunningTask.get(jvmId).getTask(); + initalContext.env = env; + tracker.getTaskController().initializeTask(initalContext); + tracker.getTaskController().launchTaskJVM(initalContext); + } catch (IOException ioe) { + // do nothing + // error and output are appropriately redirected + } finally { // handle the exit code + shexec = initalContext.shExec; + if (shexec == null) { + return; + } + + kill(); + + int exitCode = shexec.getExitCode(); + updateOnJvmExit(jvmId, exitCode); + LOG.info("JVM : " + jvmId +" exited. Number of tasks it ran: " + + numTasksRan); + try { + // In case of jvm-reuse, + //the task jvm cleans up the common workdir for every + //task at the beginning of each task in the task JVM. + //For the last task, we do it here. + if (env.conf.getNumTasksToExecutePerJvm() != 1) { + tracker.directoryCleanupThread.addToQueue( + TaskTracker.buildTaskControllerPathDeletionContexts( + tracker.getLocalFileSystem(), tracker.getLocalDirs(), + initalContext.task, + true /* workDir */, + tracker.getTaskController())); + } + } catch (IOException ie){} + } + } + + public void kill() { + if (!killed) { + killed = true; + TaskController controller = tracker.getTaskController(); + // Check inital context before issuing a kill to prevent situations + // where kill is issued before task is launched. + if (initalContext != null && initalContext.env != null) { + initalContext.pid = jvmIdToPid.get(jvmId); + initalContext.sleeptimeBeforeSigkill = tracker.getJobConf() + .getLong("mapred.tasktracker.tasks.sleeptime-before-sigkill", + ProcessTree.DEFAULT_SLEEPTIME_BEFORE_SIGKILL); + controller.destroyTaskJVM(initalContext); + } else { + LOG.info(String.format("JVM Not killed %s but just removed", jvmId + .toString())); + } + removeJvm(jvmId); + } + } + + // Post-JVM-exit logs processing. Truncate the logs. + private void truncateJVMLogs() { + Task firstTask = initalContext.task; + tracker.getTaskLogsMonitor().addProcessForLogTruncation( + firstTask.getTaskID(), tasksGiven); + } + + public void taskRan() { + busy = false; + numTasksRan++; + } + + public boolean ranAll() { + return(numTasksRan == numTasksToRun); + } + public void setBusy(boolean busy) { + this.busy = busy; + } + public boolean isBusy() { + return busy; + } + } + } + static class JvmEnv { //Helper class + List vargs; + List setup; + File stdout; + File stderr; + File workDir; + long logSize; + JobConf conf; + Map env; + + public JvmEnv(List setup, Vector vargs, File stdout, + File stderr, long logSize, File workDir, Map env, + JobConf conf) { + this.setup = setup; + this.vargs = vargs; + this.stdout = stdout; + this.stderr = stderr; + this.workDir = workDir; + this.env = env; + this.conf = conf; + } + } +} diff --git a/src/mapred/org/apache/hadoop/mapred/JvmTask.java b/src/mapred/org/apache/hadoop/mapred/JvmTask.java new file mode 100644 index 0000000..9541245 --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/JvmTask.java @@ -0,0 +1,63 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapred; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; +import org.apache.hadoop.io.Writable; + +class JvmTask implements Writable { + Task t; + boolean shouldDie; + public JvmTask(Task t, boolean shouldDie) { + this.t = t; + this.shouldDie = shouldDie; + } + public JvmTask() {} + public Task getTask() { + return t; + } + public boolean shouldDie() { + return shouldDie; + } + public void write(DataOutput out) throws IOException { + out.writeBoolean(shouldDie); + if (t != null) { + out.writeBoolean(true); + out.writeBoolean(t.isMapTask()); + t.write(out); + } else { + out.writeBoolean(false); + } + } + public void readFields(DataInput in) throws IOException { + shouldDie = in.readBoolean(); + boolean taskComing = in.readBoolean(); + if (taskComing) { + boolean isMap = in.readBoolean(); + if (isMap) { + t = new MapTask(); + } else { + t = new ReduceTask(); + } + t.readFields(in); + } + } +} diff --git a/src/mapred/org/apache/hadoop/mapred/KeyValueLineRecordReader.java b/src/mapred/org/apache/hadoop/mapred/KeyValueLineRecordReader.java new file mode 100644 index 0000000..f49413a --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/KeyValueLineRecordReader.java @@ -0,0 +1,115 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapred; + +import java.io.IOException; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.io.LongWritable; +import org.apache.hadoop.io.Text; + +/** + * This class treats a line in the input as a key/value pair separated by a + * separator character. The separator can be specified in config file + * under the attribute name key.value.separator.in.input.line. The default + * separator is the tab character ('\t'). + */ +public class KeyValueLineRecordReader implements RecordReader { + + private final LineRecordReader lineRecordReader; + + private byte separator = (byte) '\t'; + + private LongWritable dummyKey; + + private Text innerValue; + + public Class getKeyClass() { return Text.class; } + + public Text createKey() { + return new Text(); + } + + public Text createValue() { + return new Text(); + } + + public KeyValueLineRecordReader(Configuration job, FileSplit split) + throws IOException { + + lineRecordReader = new LineRecordReader(job, split); + dummyKey = lineRecordReader.createKey(); + innerValue = lineRecordReader.createValue(); + String sepStr = job.get("key.value.separator.in.input.line", "\t"); + this.separator = (byte) sepStr.charAt(0); + } + + public static int findSeparator(byte[] utf, int start, int length, byte sep) { + for (int i = start; i < (start + length); i++) { + if (utf[i] == sep) { + return i; + } + } + return -1; + } + + /** Read key/value pair in a line. */ + public synchronized boolean next(Text key, Text value) + throws IOException { + Text tKey = key; + Text tValue = value; + byte[] line = null; + int lineLen = -1; + if (lineRecordReader.next(dummyKey, innerValue)) { + line = innerValue.getBytes(); + lineLen = innerValue.getLength(); + } else { + return false; + } + if (line == null) + return false; + int pos = findSeparator(line, 0, lineLen, this.separator); + if (pos == -1) { + tKey.set(line, 0, lineLen); + tValue.set(""); + } else { + int keyLen = pos; + byte[] keyBytes = new byte[keyLen]; + System.arraycopy(line, 0, keyBytes, 0, keyLen); + int valLen = lineLen - keyLen - 1; + byte[] valBytes = new byte[valLen]; + System.arraycopy(line, pos + 1, valBytes, 0, valLen); + tKey.set(keyBytes); + tValue.set(valBytes); + } + return true; + } + + public float getProgress() { + return lineRecordReader.getProgress(); + } + + public synchronized long getPos() throws IOException { + return lineRecordReader.getPos(); + } + + public synchronized void close() throws IOException { + lineRecordReader.close(); + } +} diff --git a/src/mapred/org/apache/hadoop/mapred/KeyValueTextInputFormat.java b/src/mapred/org/apache/hadoop/mapred/KeyValueTextInputFormat.java new file mode 100644 index 0000000..d2d3a76 --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/KeyValueTextInputFormat.java @@ -0,0 +1,56 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapred; + +import java.io.IOException; + +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.io.Text; +import org.apache.hadoop.io.compress.CompressionCodecFactory; + +/** + * An {@link InputFormat} for plain text files. Files are broken into lines. + * Either linefeed or carriage-return are used to signal end of line. Each line + * is divided into key and value parts by a separator byte. If no such a byte + * exists, the key will be the entire line and value will be empty. + */ +public class KeyValueTextInputFormat extends FileInputFormat + implements JobConfigurable { + + private CompressionCodecFactory compressionCodecs = null; + + public void configure(JobConf conf) { + compressionCodecs = new CompressionCodecFactory(conf); + } + + protected boolean isSplitable(FileSystem fs, Path file) { + return compressionCodecs.getCodec(file) == null; + } + + public RecordReader getRecordReader(InputSplit genericSplit, + JobConf job, + Reporter reporter) + throws IOException { + + reporter.setStatus(genericSplit.toString()); + return new KeyValueLineRecordReader(job, (FileSplit) genericSplit); + } + +} diff --git a/src/mapred/org/apache/hadoop/mapred/KillJobAction.java b/src/mapred/org/apache/hadoop/mapred/KillJobAction.java new file mode 100644 index 0000000..1aa019f --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/KillJobAction.java @@ -0,0 +1,59 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapred; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; + + +/** + * Represents a directive from the {@link org.apache.hadoop.mapred.JobTracker} + * to the {@link org.apache.hadoop.mapred.TaskTracker} to kill the task of + * a job and cleanup resources. + * + */ +class KillJobAction extends TaskTrackerAction { + final JobID jobId; + + public KillJobAction() { + super(ActionType.KILL_JOB); + jobId = new JobID(); + } + + public KillJobAction(JobID jobId) { + super(ActionType.KILL_JOB); + this.jobId = jobId; + } + + public JobID getJobID() { + return jobId; + } + + @Override + public void write(DataOutput out) throws IOException { + jobId.write(out); + } + + @Override + public void readFields(DataInput in) throws IOException { + jobId.readFields(in); + } + +} diff --git a/src/mapred/org/apache/hadoop/mapred/KillTaskAction.java b/src/mapred/org/apache/hadoop/mapred/KillTaskAction.java new file mode 100644 index 0000000..176bac8 --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/KillTaskAction.java @@ -0,0 +1,57 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapred; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; + + +/** + * Represents a directive from the {@link org.apache.hadoop.mapred.JobTracker} + * to the {@link org.apache.hadoop.mapred.TaskTracker} to kill a task. + * + */ +class KillTaskAction extends TaskTrackerAction { + final TaskAttemptID taskId; + + public KillTaskAction() { + super(ActionType.KILL_TASK); + taskId = new TaskAttemptID(); + } + + public KillTaskAction(TaskAttemptID taskId) { + super(ActionType.KILL_TASK); + this.taskId = taskId; + } + + public TaskAttemptID getTaskID() { + return taskId; + } + + @Override + public void write(DataOutput out) throws IOException { + taskId.write(out); + } + + @Override + public void readFields(DataInput in) throws IOException { + taskId.readFields(in); + } +} diff --git a/src/mapred/org/apache/hadoop/mapred/LaunchTaskAction.java b/src/mapred/org/apache/hadoop/mapred/LaunchTaskAction.java new file mode 100644 index 0000000..c3b626e --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/LaunchTaskAction.java @@ -0,0 +1,61 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapred; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; + +/** + * Represents a directive from the {@link org.apache.hadoop.mapred.JobTracker} + * to the {@link org.apache.hadoop.mapred.TaskTracker} to launch a new task. + * + */ +class LaunchTaskAction extends TaskTrackerAction { + private Task task; + + public LaunchTaskAction() { + super(ActionType.LAUNCH_TASK); + } + + public LaunchTaskAction(Task task) { + super(ActionType.LAUNCH_TASK); + this.task = task; + } + + public Task getTask() { + return task; + } + + public void write(DataOutput out) throws IOException { + out.writeBoolean(task.isMapTask()); + task.write(out); + } + + public void readFields(DataInput in) throws IOException { + boolean isMapTask = in.readBoolean(); + if (isMapTask) { + task = new MapTask(); + } else { + task = new ReduceTask(); + } + task.readFields(in); + } + +} diff --git a/src/mapred/org/apache/hadoop/mapred/LimitTasksPerJobTaskScheduler.java b/src/mapred/org/apache/hadoop/mapred/LimitTasksPerJobTaskScheduler.java new file mode 100644 index 0000000..4313b4e --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/LimitTasksPerJobTaskScheduler.java @@ -0,0 +1,200 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.mapred; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.List; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.mapreduce.server.jobtracker.TaskTracker; + +/** + * A {@link TaskScheduler} that limits the maximum number of tasks + * running for a job. The limit is set by means of the + * mapred.jobtracker.scheduler.maxRunningTasksPerJob + * property. + */ +class LimitTasksPerJobTaskScheduler extends JobQueueTaskScheduler { + + private static final Log LOG = LogFactory.getLog( + "org.apache.hadoop.mapred.TaskLimitedJobQueueTaskScheduler"); + + public static final String MAX_TASKS_PER_JOB_PROPERTY = + "mapred.jobtracker.taskScheduler.maxRunningTasksPerJob"; + + private long maxTasksPerJob; + + public LimitTasksPerJobTaskScheduler() { + super(); + } + + @Override + public synchronized void start() throws IOException { + super.start(); + QueueManager queueManager = taskTrackerManager.getQueueManager(); + String queueName = queueManager.getJobQueueInfos()[0].getQueueName(); + queueManager.setSchedulerInfo(queueName + ,"Maximum Tasks Per Job :: " + String.valueOf(maxTasksPerJob)); + } + + @Override + public synchronized void setConf(Configuration conf) { + super.setConf(conf); + maxTasksPerJob = conf.getLong(MAX_TASKS_PER_JOB_PROPERTY ,Long.MAX_VALUE); + if (maxTasksPerJob <= 0) { + String msg = MAX_TASKS_PER_JOB_PROPERTY + + " is set to zero or a negative value. Aborting."; + LOG.fatal(msg); + throw new RuntimeException (msg); + } + } + + @Override + public synchronized List assignTasks(TaskTracker taskTracker) + throws IOException { + TaskTrackerStatus taskTrackerStatus = taskTracker.getStatus(); + final int numTaskTrackers = + taskTrackerManager.getClusterStatus().getTaskTrackers(); + Collection jobQueue = + jobQueueJobInProgressListener.getJobQueue(); + Task task; + + /* Stats about the current taskTracker */ + final int mapTasksNumber = taskTrackerStatus.countMapTasks(); + final int reduceTasksNumber = taskTrackerStatus.countReduceTasks(); + final int maximumMapTasksNumber = taskTrackerStatus.getMaxMapSlots(); + final int maximumReduceTasksNumber = taskTrackerStatus.getMaxReduceSlots(); + + /* + * Statistics about the whole cluster. Most are approximate because of + * concurrency + */ + final int[] maxMapAndReduceLoad = getMaxMapAndReduceLoad( + maximumMapTasksNumber, maximumReduceTasksNumber); + final int maximumMapLoad = maxMapAndReduceLoad[0]; + final int maximumReduceLoad = maxMapAndReduceLoad[1]; + + + final int beginAtStep; + /* + * When step == 0, this loop starts as many map tasks it can wrt + * maxTasksPerJob + * When step == 1, this loop starts as many reduce tasks it can wrt + * maxTasksPerJob + * When step == 2, this loop starts as many map tasks it can + * When step == 3, this loop starts as many reduce tasks it can + * + * It may seem that we would improve this loop by queuing jobs we cannot + * start in steps 0 and 1 because of maxTasksPerJob, and using that queue + * in step 2 and 3. + * A first thing to notice is that the time with the current algorithm is + * logarithmic, because it is the sum of (p^k) for k from 1 to N, were + * N is the number of jobs and p is the probability for a job to not exceed + * limits The probability for the cache to be useful would be similar to + * p^N, that is 1/(e^N), whereas its size and the time spent to manage it + * would be in ln(N). + * So it is not a good idea. + */ + if (maxTasksPerJob != Long.MAX_VALUE) { + beginAtStep = 0; + } + else { + beginAtStep = 2; + } + List assignedTasks = new ArrayList(); + scheduleTasks: + for (int step = beginAtStep; step <= 3; ++step) { + /* If we reached the maximum load for this step, go to the next */ + if ((step == 0 || step == 2) && mapTasksNumber >= maximumMapLoad || + (step == 1 || step == 3) && reduceTasksNumber >= maximumReduceLoad) { + continue; + } + /* For each job, start its tasks */ + synchronized (jobQueue) { + for (JobInProgress job : jobQueue) { + /* Ignore non running jobs */ + if (job.getStatus().getRunState() != JobStatus.RUNNING) { + continue; + } + /* Check that we're not exceeding the global limits */ + if ((step == 0 || step == 1) + && (job.runningMaps() + job.runningReduces() >= maxTasksPerJob)) { + continue; + } + if (step == 0 || step == 2) { + task = job.obtainNewMapTask(taskTrackerStatus, numTaskTrackers, + taskTrackerManager.getNumberOfUniqueHosts()); + } + else { + task = job.obtainNewReduceTask(taskTrackerStatus, numTaskTrackers, + taskTrackerManager.getNumberOfUniqueHosts()); + } + if (task != null) { + assignedTasks.add(task); + break scheduleTasks; + } + } + } + } + return assignedTasks; + } + + /** + * Determine the maximum number of maps or reduces that we are willing to run + * on a taskTracker which accept a maximum of localMaxMapLoad maps and + * localMaxReduceLoad reduces + * @param localMaxMapLoad The local maximum number of map tasks for a host + * @param localMaxReduceLoad The local maximum number of reduce tasks for a + * host + * @return An array of the two maximums: map then reduce. + */ + protected synchronized int[] getMaxMapAndReduceLoad(int localMaxMapLoad, + int localMaxReduceLoad) { + // Approximate because of concurrency + final int numTaskTrackers = + taskTrackerManager.getClusterStatus().getTaskTrackers(); + /* Hold the result */ + int maxMapLoad = 0; + int maxReduceLoad = 0; + int neededMaps = 0; + int neededReduces = 0; + Collection jobQueue = + jobQueueJobInProgressListener.getJobQueue(); + synchronized (jobQueue) { + for (JobInProgress job : jobQueue) { + if (job.getStatus().getRunState() == JobStatus.RUNNING) { + neededMaps += job.desiredMaps() - job.finishedMaps(); + neededReduces += job.desiredReduces() - job.finishedReduces(); + } + } + } + if (numTaskTrackers > 0) { + maxMapLoad = Math.min(localMaxMapLoad, (int) Math + .ceil((double) neededMaps / numTaskTrackers)); + maxReduceLoad = Math.min(localMaxReduceLoad, (int) Math + .ceil((double) neededReduces / numTaskTrackers)); + } + return new int[] { maxMapLoad, maxReduceLoad }; + } + +} diff --git a/src/mapred/org/apache/hadoop/mapred/LineRecordReader.java b/src/mapred/org/apache/hadoop/mapred/LineRecordReader.java new file mode 100644 index 0000000..3355e1d --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/LineRecordReader.java @@ -0,0 +1,174 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapred; + +import java.io.IOException; +import java.io.InputStream; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FSDataInputStream; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.io.LongWritable; +import org.apache.hadoop.io.Text; +import org.apache.hadoop.io.compress.CompressionCodec; +import org.apache.hadoop.io.compress.CompressionCodecFactory; +import org.apache.commons.logging.LogFactory; +import org.apache.commons.logging.Log; + +/** + * Treats keys as offset in file and value as line. + * @deprecated Use + * {@link org.apache.hadoop.mapreduce.lib.input.LineRecordReader} instead. + */ +@Deprecated +public class LineRecordReader implements RecordReader { + private static final Log LOG + = LogFactory.getLog(LineRecordReader.class.getName()); + + private CompressionCodecFactory compressionCodecs = null; + private long start; + private long pos; + private long end; + private LineReader in; + int maxLineLength; + + /** + * A class that provides a line reader from an input stream. + * @deprecated Use {@link org.apache.hadoop.util.LineReader} instead. + */ + @Deprecated + public static class LineReader extends org.apache.hadoop.util.LineReader { + LineReader(InputStream in) { + super(in); + } + LineReader(InputStream in, int bufferSize) { + super(in, bufferSize); + } + public LineReader(InputStream in, Configuration conf) throws IOException { + super(in, conf); + } + } + + public LineRecordReader(Configuration job, + FileSplit split) throws IOException { + this.maxLineLength = job.getInt("mapred.linerecordreader.maxlength", + Integer.MAX_VALUE); + start = split.getStart(); + end = start + split.getLength(); + final Path file = split.getPath(); + compressionCodecs = new CompressionCodecFactory(job); + final CompressionCodec codec = compressionCodecs.getCodec(file); + + // open the file and seek to the start of the split + FileSystem fs = file.getFileSystem(job); + FSDataInputStream fileIn = fs.open(split.getPath()); + boolean skipFirstLine = false; + if (codec != null) { + in = new LineReader(codec.createInputStream(fileIn), job); + end = Long.MAX_VALUE; + } else { + if (start != 0) { + skipFirstLine = true; + --start; + fileIn.seek(start); + } + in = new LineReader(fileIn, job); + } + if (skipFirstLine) { // skip first line and re-establish "start". + start += in.readLine(new Text(), 0, + (int)Math.min((long)Integer.MAX_VALUE, end - start)); + } + this.pos = start; + } + + public LineRecordReader(InputStream in, long offset, long endOffset, + int maxLineLength) { + this.maxLineLength = maxLineLength; + this.in = new LineReader(in); + this.start = offset; + this.pos = offset; + this.end = endOffset; + } + + public LineRecordReader(InputStream in, long offset, long endOffset, + Configuration job) + throws IOException{ + this.maxLineLength = job.getInt("mapred.linerecordreader.maxlength", + Integer.MAX_VALUE); + this.in = new LineReader(in, job); + this.start = offset; + this.pos = offset; + this.end = endOffset; + } + + public LongWritable createKey() { + return new LongWritable(); + } + + public Text createValue() { + return new Text(); + } + + /** Read a line. */ + public synchronized boolean next(LongWritable key, Text value) + throws IOException { + + while (pos < end) { + key.set(pos); + + int newSize = in.readLine(value, maxLineLength, + Math.max((int)Math.min(Integer.MAX_VALUE, end-pos), + maxLineLength)); + if (newSize == 0) { + return false; + } + pos += newSize; + if (newSize < maxLineLength) { + return true; + } + + // line too long. try again + LOG.info("Skipped line of size " + newSize + " at pos " + (pos - newSize)); + } + + return false; + } + + /** + * Get the progress within the split + */ + public float getProgress() { + if (start == end) { + return 0.0f; + } else { + return Math.min(1.0f, (pos - start) / (float)(end - start)); + } + } + + public synchronized long getPos() throws IOException { + return pos; + } + + public synchronized void close() throws IOException { + if (in != null) { + in.close(); + } + } +} diff --git a/src/mapred/org/apache/hadoop/mapred/LinuxTaskController.java b/src/mapred/org/apache/hadoop/mapred/LinuxTaskController.java new file mode 100644 index 0000000..c0e938f --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/LinuxTaskController.java @@ -0,0 +1,607 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. +*/ +package org.apache.hadoop.mapred; + +import java.io.BufferedWriter; +import java.io.File; +import java.io.FileWriter; +import java.io.IOException; +import java.io.PrintWriter; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileUtil; +import org.apache.hadoop.fs.LocalFileSystem; +import org.apache.hadoop.mapred.JvmManager.JvmEnv; +import org.apache.hadoop.mapred.CleanupQueue.PathDeletionContext; +import org.apache.hadoop.util.StringUtils; +import org.apache.hadoop.util.Shell.ShellCommandExecutor; + +/** + * A {@link TaskController} that runs the task JVMs as the user + * who submits the job. + * + * This class executes a setuid executable to implement methods + * of the {@link TaskController}, including launching the task + * JVM and killing it when needed, and also initializing and + * finalizing the task environment. + *

The setuid executable is launched using the command line:

+ *

task-controller user-name command command-args, where

+ *

user-name is the name of the owner who submits the job

+ *

command is one of the cardinal value of the + * {@link LinuxTaskController.TaskCommands} enumeration

+ *

command-args depends on the command being launched.

+ * + * In addition to running and killing tasks, the class also + * sets up appropriate access for the directories and files + * that will be used by the tasks. + */ +class LinuxTaskController extends TaskController { + + private static final Log LOG = + LogFactory.getLog(LinuxTaskController.class); + + // Name of the executable script that will contain the child + // JVM command line. See writeCommand for details. + private static final String COMMAND_FILE = "taskjvm.sh"; + + // Path to the setuid executable. + private static String taskControllerExe; + + static { + // the task-controller is expected to be under the $HADOOP_HOME/bin + // directory. + File hadoopBin = new File(System.getenv("HADOOP_HOME"), "bin"); + taskControllerExe = + new File(hadoopBin, "task-controller").getAbsolutePath(); + } + + // The list of directory paths specified in the + // variable mapred.local.dir. This is used to determine + // which among the list of directories is picked up + // for storing data for a particular task. + private String[] mapredLocalDirs; + + // permissions to set on files and directories created. + // When localized files are handled securely, this string + // will change to something more restrictive. Until then, + // it opens up the permissions for all, so that the tasktracker + // and job owners can access files together. + private static final String FILE_PERMISSIONS = "ugo+rwx"; + + // permissions to set on components of the path leading to + // localized files and directories. Read and execute permissions + // are required for different users to be able to access the + // files. + private static final String PATH_PERMISSIONS = "go+rx"; + + public LinuxTaskController() { + super(); + } + + @Override + public void setConf(Configuration conf) { + super.setConf(conf); + mapredLocalDirs = conf.getStrings("mapred.local.dir"); + //Setting of the permissions of the local directory is done in + //setup() + } + + /** + * List of commands that the setuid script will execute. + */ + enum TaskCommands { + LAUNCH_TASK_JVM, + TERMINATE_TASK_JVM, + KILL_TASK_JVM, + ENABLE_TASK_FOR_CLEANUP + } + + /** + * Launch a task JVM that will run as the owner of the job. + * + * This method launches a task JVM by executing a setuid + * executable that will switch to the user and run the + * task. + */ + @Override + void launchTaskJVM(TaskController.TaskControllerContext context) + throws IOException { + JvmEnv env = context.env; + // get the JVM command line. + String cmdLine = + TaskLog.buildCommandLine(env.setup, env.vargs, env.stdout, env.stderr, + env.logSize, true); + + StringBuffer sb = new StringBuffer(); + //export out all the environment variable before child command as + //the setuid/setgid binaries would not be getting, any environmental + //variables which begin with LD_*. + for(Entry entry : env.env.entrySet()) { + sb.append("export "); + sb.append(entry.getKey()); + sb.append("="); + sb.append(entry.getValue()); + sb.append("\n"); + } + sb.append(cmdLine); + // write the command to a file in the + // task specific cache directory + writeCommand(sb.toString(), getTaskCacheDirectory(context)); + + // Call the taskcontroller with the right parameters. + List launchTaskJVMArgs = buildLaunchTaskArgs(context); + ShellCommandExecutor shExec = buildTaskControllerExecutor( + TaskCommands.LAUNCH_TASK_JVM, + env.conf.getUser(), + launchTaskJVMArgs, env.workDir, env.env); + context.shExec = shExec; + try { + shExec.execute(); + } catch (Exception e) { + LOG.warn("Exception thrown while launching task JVM : " + + StringUtils.stringifyException(e)); + LOG.warn("Exit code from task is : " + shExec.getExitCode()); + LOG.warn("Output from task-contoller is : " + shExec.getOutput()); + throw new IOException(e); + } + if(LOG.isDebugEnabled()) { + LOG.debug("output after executing task jvm = " + shExec.getOutput()); + } + } + + /** + * Helper method that runs a LinuxTaskController command + * + * @param taskCommand + * @param user + * @param cmdArgs + * @param env + * @throws IOException + */ + private void runCommand(TaskCommands taskCommand, String user, + List cmdArgs, File workDir, Map env) + throws IOException { + + ShellCommandExecutor shExec = + buildTaskControllerExecutor(taskCommand, user, cmdArgs, workDir, env); + try { + shExec.execute(); + } catch (Exception e) { + LOG.warn("Exit code from " + taskCommand.toString() + " is : " + + shExec.getExitCode()); + LOG.warn("Exception thrown by " + taskCommand.toString() + " : " + + StringUtils.stringifyException(e)); + LOG.info("Output from LinuxTaskController's " + taskCommand.toString() + + " follows:"); + logOutput(shExec.getOutput()); + throw new IOException(e); + } + if (LOG.isDebugEnabled()) { + LOG.info("Output from LinuxTaskController's " + taskCommand.toString() + + " follows:"); + logOutput(shExec.getOutput()); + } + } + + /** + * Returns list of arguments to be passed while launching task VM. + * See {@code buildTaskControllerExecutor(TaskCommands, + * String, List, JvmEnv)} documentation. + * @param context + * @return Argument to be used while launching Task VM + */ + private List buildLaunchTaskArgs(TaskControllerContext context) { + List commandArgs = new ArrayList(3); + String taskId = context.task.getTaskID().toString(); + String jobId = getJobId(context); + LOG.debug("getting the task directory as: " + + getTaskCacheDirectory(context)); + commandArgs.add(getDirectoryChosenForTask( + new File(getTaskCacheDirectory(context)), + context)); + commandArgs.add(jobId); + if(!context.task.isTaskCleanupTask()) { + commandArgs.add(taskId); + }else { + commandArgs.add(taskId + TaskTracker.TASK_CLEANUP_SUFFIX); + } + return commandArgs; + } + + private List buildTaskCleanupArgs( + TaskControllerPathDeletionContext context) { + List commandArgs = new ArrayList(3); + commandArgs.add(context.mapredLocalDir.toUri().getPath()); + commandArgs.add(context.task.getJobID().toString()); + + String workDir = ""; + if (context.isWorkDir) { + workDir = "/work"; + } + if (context.task.isTaskCleanupTask()) { + commandArgs.add(context.task.getTaskID() + TaskTracker.TASK_CLEANUP_SUFFIX + + workDir); + } else { + commandArgs.add(context.task.getTaskID() + workDir); + } + + return commandArgs; + } + + /** + * Enables the task for cleanup by changing permissions of the specified path + * in the local filesystem + */ + @Override + void enableTaskForCleanup(PathDeletionContext context) + throws IOException { + if (LOG.isDebugEnabled()) { + LOG.debug("Going to do " + TaskCommands.ENABLE_TASK_FOR_CLEANUP.toString() + + " for " + context.fullPath); + } + + if (context instanceof TaskControllerPathDeletionContext) { + TaskControllerPathDeletionContext tContext = + (TaskControllerPathDeletionContext) context; + + if (tContext.task.getUser() != null && + tContext.fs instanceof LocalFileSystem) { + try { + runCommand(TaskCommands.ENABLE_TASK_FOR_CLEANUP, + tContext.task.getUser(), + buildTaskCleanupArgs(tContext), null, null); + } catch(IOException e) { + LOG.warn("Uanble to change permissions for " + tContext.fullPath); + } + } + else { + throw new IllegalArgumentException("Either user is null or the " + + "file system is not local file system."); + } + } + else { + throw new IllegalArgumentException("PathDeletionContext provided is not " + + "TaskControllerPathDeletionContext."); + } + } + + private void logOutput(String output) { + String shExecOutput = output; + if (shExecOutput != null) { + for (String str : shExecOutput.split("\n")) { + LOG.info(str); + } + } + } + + // get the Job ID from the information in the TaskControllerContext + private String getJobId(TaskControllerContext context) { + String taskId = context.task.getTaskID().toString(); + TaskAttemptID tId = TaskAttemptID.forName(taskId); + String jobId = tId.getJobID().toString(); + return jobId; + } + + // Get the directory from the list of directories configured + // in mapred.local.dir chosen for storing data pertaining to + // this task. + private String getDirectoryChosenForTask(File directory, + TaskControllerContext context) { + String jobId = getJobId(context); + String taskId = context.task.getTaskID().toString(); + for (String dir : mapredLocalDirs) { + File mapredDir = new File(dir); + File taskDir = new File(mapredDir, TaskTracker.getLocalTaskDir( + jobId, taskId, context.task.isTaskCleanupTask())); + if (directory.equals(taskDir)) { + return dir; + } + } + + LOG.error("Couldn't parse task cache directory correctly"); + throw new IllegalArgumentException("invalid task cache directory " + + directory.getAbsolutePath()); + } + + /** + * Setup appropriate permissions for directories and files that + * are used by the task. + * + * As the LinuxTaskController launches tasks as a user, different + * from the daemon, all directories and files that are potentially + * used by the tasks are setup with appropriate permissions that + * will allow access. + * + * Until secure data handling is implemented (see HADOOP-4491 and + * HADOOP-4493, for e.g.), the permissions are set up to allow + * read, write and execute access for everyone. This will be + * changed to restricted access as data is handled securely. + */ + void initializeTask(TaskControllerContext context) { + // Setup permissions for the job and task cache directories. + setupTaskCacheFileAccess(context); + // setup permissions for task log directory + setupTaskLogFileAccess(context); + } + + // Allows access for the task to create log files under + // the task log directory + private void setupTaskLogFileAccess(TaskControllerContext context) { + TaskAttemptID taskId = context.task.getTaskID(); + File f = TaskLog.getTaskLogFile(taskId, TaskLog.LogName.SYSLOG); + String taskAttemptLogDir = f.getParentFile().getAbsolutePath(); + changeDirectoryPermissions(taskAttemptLogDir, FILE_PERMISSIONS, false); + } + + // Allows access for the task to read, write and execute + // the files under the job and task cache directories + private void setupTaskCacheFileAccess(TaskControllerContext context) { + String taskId = context.task.getTaskID().toString(); + JobID jobId = JobID.forName(getJobId(context)); + //Change permission for the task across all the disks + for(String localDir : mapredLocalDirs) { + File f = new File(localDir); + File taskCacheDir = new File(f,TaskTracker.getLocalTaskDir( + jobId.toString(), taskId, context.task.isTaskCleanupTask())); + if(taskCacheDir.exists()) { + changeDirectoryPermissions(taskCacheDir.getPath(), + FILE_PERMISSIONS, true); + } + }//end of local directory Iteration + } + + // convenience method to execute chmod. + private void changeDirectoryPermissions(String dir, String mode, + boolean isRecursive) { + int ret = 0; + try { + ret = FileUtil.chmod(dir, mode, isRecursive); + } catch (Exception e) { + LOG.warn("Exception in changing permissions for directory " + dir + + ". Exception: " + e.getMessage()); + } + if (ret != 0) { + LOG.warn("Could not change permissions for directory " + dir); + } + } + /** + * Builds the command line for launching/terminating/killing task JVM. + * Following is the format for launching/terminating/killing task JVM + *
+ * For launching following is command line argument: + *
+ * {@code user-name command tt-root job_id task_id} + *
+ * For terminating/killing task jvm. + * {@code user-name command tt-root task-pid} + * + * @param command command to be executed. + * @param userName user name + * @param cmdArgs list of extra arguments + * @param env JVM environment variables. + * @return {@link ShellCommandExecutor} + * @throws IOException + */ + private ShellCommandExecutor buildTaskControllerExecutor( + TaskCommands command, String userName, List cmdArgs, + File workDir, Map env) + throws IOException { + String[] taskControllerCmd = new String[3 + cmdArgs.size()]; + taskControllerCmd[0] = getTaskControllerExecutablePath(); + taskControllerCmd[1] = userName; + taskControllerCmd[2] = String.valueOf(command.ordinal()); + int i = 3; + for (String cmdArg : cmdArgs) { + taskControllerCmd[i++] = cmdArg; + } + if (LOG.isDebugEnabled()) { + for (String cmd : taskControllerCmd) { + LOG.debug("taskctrl command = " + cmd); + } + } + ShellCommandExecutor shExec = null; + if(workDir != null && workDir.exists()) { + shExec = new ShellCommandExecutor(taskControllerCmd, + workDir, env); + } else { + shExec = new ShellCommandExecutor(taskControllerCmd); + } + + return shExec; + } + + // Return the task specific directory under the cache. + private String getTaskCacheDirectory(TaskControllerContext context) { + // In the case of JVM reuse, the task specific directory + // is different from what is set with respect with + // env.workDir. Hence building this from the taskId everytime. + String taskId = context.task.getTaskID().toString(); + File cacheDirForJob = context.env.workDir.getParentFile().getParentFile(); + if(context.task.isTaskCleanupTask()) { + taskId = taskId + TaskTracker.TASK_CLEANUP_SUFFIX; + } + return new File(cacheDirForJob, taskId).getAbsolutePath(); + } + + // Write the JVM command line to a file under the specified directory + // Note that the JVM will be launched using a setuid executable, and + // could potentially contain strings defined by a user. Hence, to + // prevent special character attacks, we write the command line to + // a file and execute it. + private void writeCommand(String cmdLine, + String directory) throws IOException { + + PrintWriter pw = null; + String commandFile = directory + File.separator + COMMAND_FILE; + LOG.info("Writing commands to " + commandFile); + try { + FileWriter fw = new FileWriter(commandFile); + BufferedWriter bw = new BufferedWriter(fw); + pw = new PrintWriter(bw); + pw.write(cmdLine); + } catch (IOException ioe) { + LOG.error("Caught IOException while writing JVM command line to file. " + + ioe.getMessage()); + } finally { + if (pw != null) { + pw.close(); + } + // set execute permissions for all on the file. + File f = new File(commandFile); + if (f.exists()) { + f.setReadable(true, false); + f.setExecutable(true, false); + } + } + } + + protected String getTaskControllerExecutablePath() { + return taskControllerExe; + } + + /** + * Sets up the permissions of the following directories: + * + * Job cache directory + * Archive directory + * Hadoop log directories + * + */ + @Override + void setup() { + //set up job cache directory and associated permissions + String localDirs[] = this.mapredLocalDirs; + for(String localDir : localDirs) { + //Cache root + File cacheDirectory = new File(localDir,TaskTracker.getCacheSubdir()); + File jobCacheDirectory = new File(localDir,TaskTracker.getJobCacheSubdir()); + if(!cacheDirectory.exists()) { + if(!cacheDirectory.mkdirs()) { + LOG.warn("Unable to create cache directory : " + + cacheDirectory.getPath()); + } + } + if(!jobCacheDirectory.exists()) { + if(!jobCacheDirectory.mkdirs()) { + LOG.warn("Unable to create job cache directory : " + + jobCacheDirectory.getPath()); + } + } + //Give world writable permission for every directory under + //mapred-local-dir. + //Child tries to write files under it when executing. + changeDirectoryPermissions(localDir, FILE_PERMISSIONS, true); + }//end of local directory manipulations + //setting up perms for user logs + File taskLog = TaskLog.getUserLogDir(); + changeDirectoryPermissions(taskLog.getPath(), FILE_PERMISSIONS,false); + } + + /* + * Create Job directories across disks and set their permissions to 777 + * This way when tasks are run we just need to setup permissions for + * task folder. + */ + @Override + void initializeJob(JobID jobid) { + for(String localDir : this.mapredLocalDirs) { + File jobDirectory = new File(localDir, + TaskTracker.getLocalJobDir(jobid.toString())); + if(!jobDirectory.exists()) { + if(!jobDirectory.mkdir()) { + LOG.warn("Unable to create job cache directory : " + + jobDirectory.getPath()); + continue; + } + } + //Should be recursive because the jar and work folders might be + //present under the job cache directory + changeDirectoryPermissions( + jobDirectory.getPath(), FILE_PERMISSIONS, true); + } + } + + /** + * API which builds the command line to be pass to LinuxTaskController + * binary to terminate/kill the task. See + * {@code buildTaskControllerExecutor(TaskCommands, + * String, List, JvmEnv)} documentation. + * + * + * @param context context of task which has to be passed kill signal. + * + */ + private List buildKillTaskCommandArgs(TaskControllerContext + context){ + List killTaskJVMArgs = new ArrayList(); + killTaskJVMArgs.add(context.pid); + return killTaskJVMArgs; + } + + /** + * Convenience method used to sending appropriate Kill signal to the task + * VM + * @param context + * @param command + * @throws IOException + */ + private void finishTask(TaskControllerContext context, + TaskCommands command) throws IOException{ + if(context.task == null) { + LOG.info("Context task null not killing the JVM"); + return; + } + ShellCommandExecutor shExec = buildTaskControllerExecutor( + command, context.env.conf.getUser(), + buildKillTaskCommandArgs(context), context.env.workDir, + context.env.env); + try { + shExec.execute(); + } catch (Exception e) { + LOG.warn("Output from task-contoller is : " + shExec.getOutput()); + throw new IOException(e); + } + } + + @Override + void terminateTask(TaskControllerContext context) { + try { + finishTask(context, TaskCommands.TERMINATE_TASK_JVM); + } catch (Exception e) { + LOG.warn("Exception thrown while sending kill to the Task VM " + + StringUtils.stringifyException(e)); + } + } + + @Override + void killTask(TaskControllerContext context) { + try { + finishTask(context, TaskCommands.KILL_TASK_JVM); + } catch (Exception e) { + LOG.warn("Exception thrown while sending destroy to the Task VM " + + StringUtils.stringifyException(e)); + } + } +} + diff --git a/src/mapred/org/apache/hadoop/mapred/LocalJobRunner.java b/src/mapred/org/apache/hadoop/mapred/LocalJobRunner.java new file mode 100644 index 0000000..a4e1ac0 --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/LocalJobRunner.java @@ -0,0 +1,483 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapred; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.io.BytesWritable; +import org.apache.hadoop.io.DataOutputBuffer; +import org.apache.hadoop.io.serializer.SerializationFactory; +import org.apache.hadoop.io.serializer.Serializer; +import org.apache.hadoop.mapred.JobTrackerMetricsInst; +import org.apache.hadoop.mapred.JvmTask; +import org.apache.hadoop.mapred.JobClient.RawSplit; +import org.apache.hadoop.util.ReflectionUtils; + +/** Implements MapReduce locally, in-process, for debugging. */ +class LocalJobRunner implements JobSubmissionProtocol { + public static final Log LOG = + LogFactory.getLog(LocalJobRunner.class); + + private FileSystem fs; + private HashMap jobs = new HashMap(); + private JobConf conf; + private int map_tasks = 0; + private int reduce_tasks = 0; + + private JobTrackerInstrumentation myMetrics = null; + + private static final String jobDir = "localRunner/"; + + public long getProtocolVersion(String protocol, long clientVersion) { + return JobSubmissionProtocol.versionID; + } + + private class Job extends Thread + implements TaskUmbilicalProtocol { + private Path file; + private JobID id; + private JobConf job; + + private JobStatus status; + private ArrayList mapIds = new ArrayList(); + private MapOutputFile mapoutputFile; + private JobProfile profile; + private Path localFile; + private FileSystem localFs; + boolean killed = false; + + // Counters summed over all the map/reduce tasks which + // have successfully completed + private Counters completedTaskCounters = new Counters(); + + // Current counters, including incomplete task(s) + private Counters currentCounters = new Counters(); + + public long getProtocolVersion(String protocol, long clientVersion) { + return TaskUmbilicalProtocol.versionID; + } + + public Job(JobID jobid, JobConf conf) throws IOException { + this.file = new Path(getSystemDir(), jobid + "/job.xml"); + this.id = jobid; + this.mapoutputFile = new MapOutputFile(jobid); + this.mapoutputFile.setConf(conf); + + this.localFile = new JobConf(conf).getLocalPath(jobDir+id+".xml"); + this.localFs = FileSystem.getLocal(conf); + + file.getFileSystem(conf).copyToLocalFile(file, localFile); + this.job = new JobConf(localFile); + profile = new JobProfile(job.getUser(), id, file.toString(), + "http://localhost:8080/", job.getJobName()); + status = new JobStatus(id, 0.0f, 0.0f, JobStatus.RUNNING); + + jobs.put(id, this); + + this.start(); + } + + JobProfile getProfile() { + return profile; + } + + @Override + public void run() { + JobID jobId = profile.getJobID(); + JobContext jContext = new JobContext(conf, jobId); + OutputCommitter outputCommitter = job.getOutputCommitter(); + try { + // split input into minimum number of splits + RawSplit[] rawSplits; + if (job.getUseNewMapper()) { + org.apache.hadoop.mapreduce.InputFormat input = + ReflectionUtils.newInstance(jContext.getInputFormatClass(), jContext.getJobConf()); + + List splits = input.getSplits(jContext); + rawSplits = new RawSplit[splits.size()]; + DataOutputBuffer buffer = new DataOutputBuffer(); + SerializationFactory factory = new SerializationFactory(conf); + Serializer serializer = + factory.getSerializer(splits.get(0).getClass()); + serializer.open(buffer); + for (int i = 0; i < splits.size(); i++) { + buffer.reset(); + serializer.serialize(splits.get(i)); + RawSplit rawSplit = new RawSplit(); + rawSplit.setClassName(splits.get(i).getClass().getName()); + rawSplit.setDataLength(splits.get(i).getLength()); + rawSplit.setBytes(buffer.getData(), 0, buffer.getLength()); + rawSplit.setLocations(splits.get(i).getLocations()); + rawSplits[i] = rawSplit; + } + + } else { + InputSplit[] splits = job.getInputFormat().getSplits(job, 1); + rawSplits = new RawSplit[splits.length]; + DataOutputBuffer buffer = new DataOutputBuffer(); + for (int i = 0; i < splits.length; i++) { + buffer.reset(); + splits[i].write(buffer); + RawSplit rawSplit = new RawSplit(); + rawSplit.setClassName(splits[i].getClass().getName()); + rawSplit.setDataLength(splits[i].getLength()); + rawSplit.setBytes(buffer.getData(), 0, buffer.getLength()); + rawSplit.setLocations(splits[i].getLocations()); + rawSplits[i] = rawSplit; + } + } + + int numReduceTasks = job.getNumReduceTasks(); + if (numReduceTasks > 1 || numReduceTasks < 0) { + // we only allow 0 or 1 reducer in local mode + numReduceTasks = 1; + job.setNumReduceTasks(1); + } + outputCommitter.setupJob(jContext); + status.setSetupProgress(1.0f); + + for (int i = 0; i < rawSplits.length; i++) { + if (!this.isInterrupted()) { + TaskAttemptID mapId = new TaskAttemptID(new TaskID(jobId, true, i),0); + mapIds.add(mapId); + MapTask map = new MapTask(file.toString(), + mapId, i, + rawSplits[i].getClassName(), + rawSplits[i].getBytes(), 1, + job.getUser()); + JobConf localConf = new JobConf(job); + map.setJobFile(localFile.toString()); + map.localizeConfiguration(localConf); + map.setConf(localConf); + map_tasks += 1; + myMetrics.launchMap(mapId); + map.run(localConf, this); + myMetrics.completeMap(mapId); + map_tasks -= 1; + updateCounters(map); + } else { + throw new InterruptedException(); + } + } + TaskAttemptID reduceId = + new TaskAttemptID(new TaskID(jobId, false, 0), 0); + try { + if (numReduceTasks > 0) { + // move map output to reduce input + for (int i = 0; i < mapIds.size(); i++) { + if (!this.isInterrupted()) { + TaskAttemptID mapId = mapIds.get(i); + Path mapOut = this.mapoutputFile.getOutputFile(mapId); + Path reduceIn = this.mapoutputFile.getInputFileForWrite( + mapId.getTaskID(),reduceId, + localFs.getLength(mapOut)); + if (!localFs.mkdirs(reduceIn.getParent())) { + throw new IOException("Mkdirs failed to create " + + reduceIn.getParent().toString()); + } + if (!localFs.rename(mapOut, reduceIn)) + throw new IOException("Couldn't rename " + mapOut); + } else { + throw new InterruptedException(); + } + } + if (!this.isInterrupted()) { + ReduceTask reduce = new ReduceTask(file.toString(), + reduceId, 0, mapIds.size(), + 1, job.getUser()); + JobConf localConf = new JobConf(job); + reduce.setJobFile(localFile.toString()); + reduce.localizeConfiguration(localConf); + reduce.setConf(localConf); + reduce_tasks += 1; + myMetrics.launchReduce(reduce.getTaskID()); + reduce.run(localConf, this); + myMetrics.completeReduce(reduce.getTaskID()); + reduce_tasks -= 1; + updateCounters(reduce); + } else { + throw new InterruptedException(); + } + } + } finally { + for (TaskAttemptID mapId: mapIds) { + this.mapoutputFile.removeAll(mapId); + } + if (numReduceTasks == 1) { + this.mapoutputFile.removeAll(reduceId); + } + } + // delete the temporary directory in output directory + outputCommitter.commitJob(jContext); + status.setCleanupProgress(1.0f); + + if (killed) { + this.status.setRunState(JobStatus.KILLED); + } else { + this.status.setRunState(JobStatus.SUCCEEDED); + } + + JobEndNotifier.localRunnerNotification(job, status); + + } catch (Throwable t) { + try { + outputCommitter.abortJob(jContext, JobStatus.FAILED); + } catch (IOException ioe) { + LOG.info("Error cleaning up job:" + id); + } + status.setCleanupProgress(1.0f); + if (killed) { + this.status.setRunState(JobStatus.KILLED); + } else { + this.status.setRunState(JobStatus.FAILED); + } + LOG.warn(id, t); + + JobEndNotifier.localRunnerNotification(job, status); + + } finally { + try { + file.getFileSystem(job).delete(file.getParent(), true); // delete submit dir + localFs.delete(localFile, true); // delete local copy + } catch (IOException e) { + LOG.warn("Error cleaning up "+id+": "+e); + } + } + } + + // TaskUmbilicalProtocol methods + + public JvmTask getTask(JvmContext context) { return null; } + + public boolean statusUpdate(TaskAttemptID taskId, TaskStatus taskStatus) + throws IOException, InterruptedException { + LOG.info(taskStatus.getStateString()); + float taskIndex = mapIds.indexOf(taskId); + if (taskIndex >= 0) { // mapping + float numTasks = mapIds.size(); + status.setMapProgress(taskIndex/numTasks + taskStatus.getProgress()/numTasks); + } else { + status.setReduceProgress(taskStatus.getProgress()); + } + currentCounters = Counters.sum(completedTaskCounters, taskStatus.getCounters()); + + // ignore phase + + return true; + } + + /** + * Task is reporting that it is in commit_pending + * and it is waiting for the commit Response + */ + public void commitPending(TaskAttemptID taskid, + TaskStatus taskStatus) + throws IOException, InterruptedException { + statusUpdate(taskid, taskStatus); + } + + /** + * Updates counters corresponding to completed tasks. + * @param task A map or reduce task which has just been + * successfully completed + */ + private void updateCounters(Task task) { + completedTaskCounters.incrAllCounters(task.getCounters()); + } + + public void reportDiagnosticInfo(TaskAttemptID taskid, String trace) { + // Ignore for now + } + + public void reportNextRecordRange(TaskAttemptID taskid, + SortedRanges.Range range) throws IOException { + LOG.info("Task " + taskid + " reportedNextRecordRange " + range); + } + + public boolean ping(TaskAttemptID taskid) throws IOException { + return true; + } + + public boolean canCommit(TaskAttemptID taskid) + throws IOException { + return true; + } + + public void done(TaskAttemptID taskId) throws IOException { + int taskIndex = mapIds.indexOf(taskId); + if (taskIndex >= 0) { // mapping + status.setMapProgress(1.0f); + } else { + status.setReduceProgress(1.0f); + } + } + + public synchronized void fsError(TaskAttemptID taskId, String message) + throws IOException { + LOG.fatal("FSError: "+ message + "from task: " + taskId); + } + + public void shuffleError(TaskAttemptID taskId, String message) throws IOException { + LOG.fatal("shuffleError: "+ message + "from task: " + taskId); + } + + public synchronized void fatalError(TaskAttemptID taskId, String msg) + throws IOException { + LOG.fatal("Fatal: "+ msg + "from task: " + taskId); + } + + public MapTaskCompletionEventsUpdate getMapCompletionEvents(JobID jobId, + int fromEventId, int maxLocs, TaskAttemptID id) throws IOException { + return new MapTaskCompletionEventsUpdate(TaskCompletionEvent.EMPTY_ARRAY, + false); + } + + } + + public LocalJobRunner(JobConf conf) throws IOException { + this.fs = FileSystem.getLocal(conf); + this.conf = conf; + myMetrics = new JobTrackerMetricsInst(null, new JobConf(conf)); + } + + // JobSubmissionProtocol methods + + private static int jobid = 0; + public synchronized JobID getNewJobId() { + return new JobID("local", ++jobid); + } + + public JobStatus submitJob(JobID jobid) throws IOException { + return new Job(jobid, this.conf).status; + } + + public void killJob(JobID id) { + jobs.get(id).killed = true; + jobs.get(id).interrupt(); + } + + public void setJobPriority(JobID id, String jp) throws IOException { + throw new UnsupportedOperationException("Changing job priority " + + "in LocalJobRunner is not supported."); + } + + /** Throws {@link UnsupportedOperationException} */ + public boolean killTask(TaskAttemptID taskId, boolean shouldFail) throws IOException { + throw new UnsupportedOperationException("Killing tasks in " + + "LocalJobRunner is not supported"); + } + + public JobProfile getJobProfile(JobID id) { + Job job = jobs.get(id); + if(job != null) + return job.getProfile(); + else + return null; + } + + public TaskReport[] getMapTaskReports(JobID id) { + return new TaskReport[0]; + } + public TaskReport[] getReduceTaskReports(JobID id) { + return new TaskReport[0]; + } + public TaskReport[] getCleanupTaskReports(JobID id) { + return new TaskReport[0]; + } + public TaskReport[] getSetupTaskReports(JobID id) { + return new TaskReport[0]; + } + + public JobStatus getJobStatus(JobID id) { + Job job = jobs.get(id); + if(job != null) + return job.status; + else + return null; + } + + public Counters getJobCounters(JobID id) { + Job job = jobs.get(id); + return job.currentCounters; + } + + public String getFilesystemName() throws IOException { + return fs.getUri().toString(); + } + + public ClusterStatus getClusterStatus(boolean detailed) { + return new ClusterStatus(1, 0, 0, map_tasks, reduce_tasks, 1, 1, + JobTracker.State.RUNNING); + } + + public JobStatus[] jobsToComplete() {return null;} + + public TaskCompletionEvent[] getTaskCompletionEvents(JobID jobid + , int fromEventId, int maxEvents) throws IOException { + return TaskCompletionEvent.EMPTY_ARRAY; + } + + public JobStatus[] getAllJobs() {return null;} + + + /** + * Returns the diagnostic information for a particular task in the given job. + * To be implemented + */ + public String[] getTaskDiagnostics(TaskAttemptID taskid) + throws IOException{ + return new String [0]; + } + + /** + * @see org.apache.hadoop.mapred.JobSubmissionProtocol#getSystemDir() + */ + public String getSystemDir() { + Path sysDir = new Path(conf.get("mapred.system.dir", "/tmp/hadoop/mapred/system")); + return fs.makeQualified(sysDir).toString(); + } + + @Override + public JobStatus[] getJobsFromQueue(String queue) throws IOException { + return null; + } + + @Override + public JobQueueInfo[] getQueues() throws IOException { + return null; + } + + + @Override + public JobQueueInfo getQueueInfo(String queue) throws IOException { + return null; + } + + @Override + public QueueAclsInfo[] getQueueAclsForCurrentUser() throws IOException{ + return null; +} +} diff --git a/src/mapred/org/apache/hadoop/mapred/MRConstants.java b/src/mapred/org/apache/hadoop/mapred/MRConstants.java new file mode 100644 index 0000000..fc60649 --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/MRConstants.java @@ -0,0 +1,59 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.mapred; + +/******************************* + * Some handy constants + * + *******************************/ +interface MRConstants { + // + // Timeouts, constants + // + public static final int HEARTBEAT_INTERVAL_MIN = 3 * 1000; + + public static final long COUNTER_UPDATE_INTERVAL = 60 * 1000; + + // + // Result codes + // + public static int SUCCESS = 0; + public static int FILE_NOT_FOUND = -1; + + /** + * The custom http header used for the map output length. + */ + public static final String MAP_OUTPUT_LENGTH = "Map-Output-Length"; + + /** + * The custom http header used for the "raw" map output length. + */ + public static final String RAW_MAP_OUTPUT_LENGTH = "Raw-Map-Output-Length"; + + /** + * The map task from which the map output data is being transferred + */ + public static final String FROM_MAP_TASK = "from-map-task"; + + /** + * The reduce task number for which this map output is being transferred + */ + public static final String FOR_REDUCE_TASK = "for-reduce-task"; + + public static final String WORKDIR = "work"; +} diff --git a/src/mapred/org/apache/hadoop/mapred/MapFileOutputFormat.java b/src/mapred/org/apache/hadoop/mapred/MapFileOutputFormat.java new file mode 100644 index 0000000..7085723 --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/MapFileOutputFormat.java @@ -0,0 +1,109 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapred; + +import java.io.IOException; +import java.util.Arrays; + +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.FileUtil; + +import org.apache.hadoop.io.MapFile; +import org.apache.hadoop.io.WritableComparable; +import org.apache.hadoop.io.Writable; +import org.apache.hadoop.io.SequenceFile.CompressionType; +import org.apache.hadoop.io.compress.CompressionCodec; +import org.apache.hadoop.io.compress.DefaultCodec; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.util.Progressable; +import org.apache.hadoop.util.ReflectionUtils; + +/** An {@link OutputFormat} that writes {@link MapFile}s. */ +public class MapFileOutputFormat +extends FileOutputFormat { + + public RecordWriter getRecordWriter(FileSystem ignored, JobConf job, + String name, Progressable progress) + throws IOException { + // get the path of the temporary output file + Path file = FileOutputFormat.getTaskOutputPath(job, name); + + FileSystem fs = file.getFileSystem(job); + CompressionCodec codec = null; + CompressionType compressionType = CompressionType.NONE; + if (getCompressOutput(job)) { + // find the kind of compression to do + compressionType = SequenceFileOutputFormat.getOutputCompressionType(job); + + // find the right codec + Class codecClass = getOutputCompressorClass(job, + DefaultCodec.class); + codec = ReflectionUtils.newInstance(codecClass, job); + } + + // ignore the progress parameter, since MapFile is local + final MapFile.Writer out = + new MapFile.Writer(job, fs, file.toString(), + job.getOutputKeyClass().asSubclass(WritableComparable.class), + job.getOutputValueClass().asSubclass(Writable.class), + compressionType, codec, + progress); + + return new RecordWriter() { + + public void write(WritableComparable key, Writable value) + throws IOException { + + out.append(key, value); + } + + public void close(Reporter reporter) throws IOException { out.close();} + }; + } + + /** Open the output generated by this format. */ + public static MapFile.Reader[] getReaders(FileSystem ignored, Path dir, + Configuration conf) + throws IOException { + FileSystem fs = dir.getFileSystem(conf); + Path[] names = FileUtil.stat2Paths(fs.listStatus(dir)); + + // sort names, so that hash partitioning works + Arrays.sort(names); + + MapFile.Reader[] parts = new MapFile.Reader[names.length]; + for (int i = 0; i < names.length; i++) { + parts[i] = new MapFile.Reader(fs, names[i].toString(), conf); + } + return parts; + } + + /** Get an entry from output generated by this class. */ + public static + Writable getEntry(MapFile.Reader[] readers, + Partitioner partitioner, + K key, + V value) throws IOException { + int part = partitioner.getPartition(key, value, readers.length); + return readers[part].get(key, value); + } + +} + diff --git a/src/mapred/org/apache/hadoop/mapred/MapOutputFile.java b/src/mapred/org/apache/hadoop/mapred/MapOutputFile.java new file mode 100644 index 0000000..f8a25e0 --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/MapOutputFile.java @@ -0,0 +1,204 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapred; + +import java.io.IOException; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.LocalDirAllocator; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.util.MRAsyncDiskService; +import org.apache.hadoop.util.StringUtils; + +/** + * Manipulate the working area for the transient store for maps and reduces. + */ +class MapOutputFile { + + private JobConf conf; + private JobID jobId; + public static final Log LOG = LogFactory.getLog(MapOutputFile.class); + + MapOutputFile() { + } + + MapOutputFile(JobID jobId, MRAsyncDiskService asyncDiskService) { + this.jobId = jobId; + this.asyncDiskService = asyncDiskService; + } + + MapOutputFile(JobID jobId) { + this(jobId, null); + } + + private MRAsyncDiskService asyncDiskService = null; + private LocalDirAllocator lDirAlloc = + new LocalDirAllocator("mapred.local.dir"); + + /** Return the path to local map output file created earlier + * @param mapTaskId a map task id + */ + public Path getOutputFile(TaskAttemptID mapTaskId) + throws IOException { + return lDirAlloc.getLocalPathToRead(TaskTracker.getIntermediateOutputDir( + jobId.toString(), mapTaskId.toString()) + + "/file.out", conf); + } + + /** Create a local map output file name. + * @param mapTaskId a map task id + * @param size the size of the file + */ + public Path getOutputFileForWrite(TaskAttemptID mapTaskId, long size) + throws IOException { + return lDirAlloc.getLocalPathForWrite(TaskTracker.getIntermediateOutputDir( + jobId.toString(), mapTaskId.toString()) + + "/file.out", size, conf); + } + + /** Return the path to a local map output index file created earlier + * @param mapTaskId a map task id + */ + public Path getOutputIndexFile(TaskAttemptID mapTaskId) + throws IOException { + return lDirAlloc.getLocalPathToRead(TaskTracker.getIntermediateOutputDir( + jobId.toString(), mapTaskId.toString()) + + "/file.out.index", conf); + } + + /** Create a local map output index file name. + * @param mapTaskId a map task id + * @param size the size of the file + */ + public Path getOutputIndexFileForWrite(TaskAttemptID mapTaskId, long size) + throws IOException { + return lDirAlloc.getLocalPathForWrite(TaskTracker.getIntermediateOutputDir( + jobId.toString(), mapTaskId.toString()) + + "/file.out.index", + size, conf); + } + + /** Return a local map spill file created earlier. + * @param mapTaskId a map task id + * @param spillNumber the number + */ + public Path getSpillFile(TaskAttemptID mapTaskId, int spillNumber) + throws IOException { + return lDirAlloc.getLocalPathToRead(TaskTracker.getIntermediateOutputDir( + jobId.toString(), mapTaskId.toString()) + + "/spill" + + spillNumber + ".out", conf); + } + + /** Create a local map spill file name. + * @param mapTaskId a map task id + * @param spillNumber the number + * @param size the size of the file + */ + public Path getSpillFileForWrite(TaskAttemptID mapTaskId, int spillNumber, + long size) throws IOException { + return lDirAlloc.getLocalPathForWrite(TaskTracker.getIntermediateOutputDir( + jobId.toString(), mapTaskId.toString()) + + "/spill" + + spillNumber + ".out", size, conf); + } + + /** Return a local map spill index file created earlier + * @param mapTaskId a map task id + * @param spillNumber the number + */ + public Path getSpillIndexFile(TaskAttemptID mapTaskId, int spillNumber) + throws IOException { + return lDirAlloc.getLocalPathToRead(TaskTracker.getIntermediateOutputDir( + jobId.toString(), mapTaskId.toString()) + + "/spill" + + spillNumber + ".out.index", conf); + } + + /** Create a local map spill index file name. + * @param mapTaskId a map task id + * @param spillNumber the number + * @param size the size of the file + */ + public Path getSpillIndexFileForWrite(TaskAttemptID mapTaskId, int spillNumber, + long size) throws IOException { + return lDirAlloc.getLocalPathForWrite(TaskTracker.getIntermediateOutputDir( + jobId.toString(), mapTaskId.toString()) + + "/spill" + spillNumber + + ".out.index", size, conf); + } + + /** Return a local reduce input file created earlier + * @param mapTaskId a map task id + * @param reduceTaskId a reduce task id + */ + public Path getInputFile(int mapId, TaskAttemptID reduceTaskId) + throws IOException { + // TODO *oom* should use a format here + return lDirAlloc.getLocalPathToRead(TaskTracker.getIntermediateOutputDir( + jobId.toString(), reduceTaskId.toString()) + + "/map_" + mapId + ".out", + conf); + } + + /** Create a local reduce input file name. + * @param mapTaskId a map task id + * @param reduceTaskId a reduce task id + * @param size the size of the file + */ + public Path getInputFileForWrite(TaskID mapId, TaskAttemptID reduceTaskId, + long size) + throws IOException { + // TODO *oom* should use a format here + return lDirAlloc.getLocalPathForWrite(TaskTracker.getIntermediateOutputDir( + jobId.toString(), reduceTaskId.toString()) + + "/map_" + mapId.getId() + ".out", + size, conf); + } + + /** Removes all of the files related to a task. */ + public void removeAll(TaskAttemptID taskId) throws IOException { + String toBeDeleted = + TaskTracker.getIntermediateOutputDir(jobId.toString(), taskId.toString()); + if (asyncDiskService != null) { + asyncDiskService.moveAndDeleteFromEachVolume(toBeDeleted); + LOG.info("Move and then delete map ouput " + + toBeDeleted + " for task " + taskId); + return; + } + LOG.info("Delete map ouput " + toBeDeleted + " for task " + taskId); + conf.deleteLocalFiles(toBeDeleted); + } + + public void setConf(Configuration conf) { + if (conf instanceof JobConf) { + this.conf = (JobConf) conf; + } else { + this.conf = new JobConf(conf); + } + } + + public void setJobId(JobID jobId) { + this.jobId = jobId; + } + +} diff --git a/src/mapred/org/apache/hadoop/mapred/MapReduceBase.java b/src/mapred/org/apache/hadoop/mapred/MapReduceBase.java new file mode 100644 index 0000000..0c71aae --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/MapReduceBase.java @@ -0,0 +1,43 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapred; + +import java.io.IOException; + +import org.apache.hadoop.io.Closeable; +import org.apache.hadoop.mapred.JobConfigurable; + +/** + * Base class for {@link Mapper} and {@link Reducer} implementations. + * + *

Provides default no-op implementations for a few methods, most non-trivial + * applications need to override some of them.

+ */ +@Deprecated +public class MapReduceBase implements Closeable, JobConfigurable { + + /** Default implementation that does nothing. */ + public void close() throws IOException { + } + + /** Default implementation that does nothing. */ + public void configure(JobConf job) { + } + +} diff --git a/src/mapred/org/apache/hadoop/mapred/MapReducePolicyProvider.java b/src/mapred/org/apache/hadoop/mapred/MapReducePolicyProvider.java new file mode 100644 index 0000000..bbffa98 --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/MapReducePolicyProvider.java @@ -0,0 +1,45 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.mapred; + +import org.apache.hadoop.security.authorize.PolicyProvider; +import org.apache.hadoop.security.authorize.RefreshAuthorizationPolicyProtocol; +import org.apache.hadoop.security.authorize.Service; + +/** + * {@link PolicyProvider} for Map-Reduce protocols. + */ +public class MapReducePolicyProvider extends PolicyProvider { + private static final Service[] mapReduceServices = + new Service[] { + new Service("security.inter.tracker.protocol.acl", + InterTrackerProtocol.class), + new Service("security.job.submission.protocol.acl", + JobSubmissionProtocol.class), + new Service("security.task.umbilical.protocol.acl", + TaskUmbilicalProtocol.class), + new Service("security.refresh.policy.protocol.acl", + RefreshAuthorizationPolicyProtocol.class), + }; + + @Override + public Service[] getServices() { + return mapReduceServices; + } + +} diff --git a/src/mapred/org/apache/hadoop/mapred/MapRunnable.java b/src/mapred/org/apache/hadoop/mapred/MapRunnable.java new file mode 100644 index 0000000..6b112b3 --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/MapRunnable.java @@ -0,0 +1,50 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapred; + +import java.io.IOException; + +/** + * Expert: Generic interface for {@link Mapper}s. + * + *

Custom implementations of MapRunnable can exert greater + * control on map processing e.g. multi-threaded, asynchronous mappers etc.

+ * + * @see Mapper + * @deprecated Use {@link org.apache.hadoop.mapreduce.Mapper} instead. + */ +@Deprecated +public interface MapRunnable + extends JobConfigurable { + + /** + * Start mapping input <key, value> pairs. + * + *

Mapping of input records to output records is complete when this method + * returns.

+ * + * @param input the {@link RecordReader} to read the input records. + * @param output the {@link OutputCollector} to collect the outputrecords. + * @param reporter {@link Reporter} to report progress, status-updates etc. + * @throws IOException + */ + void run(RecordReader input, OutputCollector output, + Reporter reporter) + throws IOException; +} diff --git a/src/mapred/org/apache/hadoop/mapred/MapRunner.java b/src/mapred/org/apache/hadoop/mapred/MapRunner.java new file mode 100644 index 0000000..dcb7e7c --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/MapRunner.java @@ -0,0 +1,64 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapred; + +import java.io.IOException; + +import org.apache.hadoop.util.ReflectionUtils; + +/** Default {@link MapRunnable} implementation.*/ +public class MapRunner + implements MapRunnable { + + private Mapper mapper; + private boolean incrProcCount; + + @SuppressWarnings("unchecked") + public void configure(JobConf job) { + this.mapper = ReflectionUtils.newInstance(job.getMapperClass(), job); + //increment processed counter only if skipping feature is enabled + this.incrProcCount = SkipBadRecords.getMapperMaxSkipRecords(job)>0 && + SkipBadRecords.getAutoIncrMapperProcCount(job); + } + + public void run(RecordReader input, OutputCollector output, + Reporter reporter) + throws IOException { + try { + // allocate key & value instances that are re-used for all entries + K1 key = input.createKey(); + V1 value = input.createValue(); + + while (input.next(key, value)) { + // map pair to output + mapper.map(key, value, output, reporter); + if(incrProcCount) { + reporter.incrCounter(SkipBadRecords.COUNTER_GROUP, + SkipBadRecords.COUNTER_MAP_PROCESSED_RECORDS, 1); + } + } + } finally { + mapper.close(); + } + } + + protected Mapper getMapper() { + return mapper; + } +} diff --git a/src/mapred/org/apache/hadoop/mapred/MapTask.java b/src/mapred/org/apache/hadoop/mapred/MapTask.java new file mode 100644 index 0000000..49b36e7 --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/MapTask.java @@ -0,0 +1,1572 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapred; + +import static org.apache.hadoop.mapred.Task.Counter.COMBINE_INPUT_RECORDS; +import static org.apache.hadoop.mapred.Task.Counter.COMBINE_OUTPUT_RECORDS; +import static org.apache.hadoop.mapred.Task.Counter.MAP_INPUT_BYTES; +import static org.apache.hadoop.mapred.Task.Counter.MAP_INPUT_RECORDS; +import static org.apache.hadoop.mapred.Task.Counter.MAP_OUTPUT_BYTES; +import static org.apache.hadoop.mapred.Task.Counter.MAP_OUTPUT_RECORDS; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.DataOutputStream; +import java.io.IOException; +import java.io.OutputStream; +import java.lang.reflect.Constructor; +import java.lang.reflect.InvocationTargetException; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.locks.Condition; +import java.util.concurrent.locks.ReentrantLock; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.LocalFileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.io.BytesWritable; +import org.apache.hadoop.io.DataInputBuffer; +import org.apache.hadoop.io.RawComparator; +import org.apache.hadoop.io.SequenceFile; +import org.apache.hadoop.io.Text; +import org.apache.hadoop.io.SequenceFile.CompressionType; +import org.apache.hadoop.io.compress.CompressionCodec; +import org.apache.hadoop.io.compress.DefaultCodec; +import org.apache.hadoop.io.serializer.Deserializer; +import org.apache.hadoop.io.serializer.SerializationFactory; +import org.apache.hadoop.io.serializer.Serializer; +import org.apache.hadoop.mapred.IFile.Writer; +import org.apache.hadoop.mapred.Merger.Segment; +import org.apache.hadoop.mapred.SortedRanges.SkipRangeIterator; +import org.apache.hadoop.mapreduce.TaskAttemptContext; +import org.apache.hadoop.util.IndexedSortable; +import org.apache.hadoop.util.IndexedSorter; +import org.apache.hadoop.util.Progress; +import org.apache.hadoop.util.QuickSort; +import org.apache.hadoop.util.ReflectionUtils; +import org.apache.hadoop.util.StringUtils; + +/** A Map task. */ +class MapTask extends Task { + /** + * The size of each record in the index file for the map-outputs. + */ + public static final int MAP_OUTPUT_INDEX_RECORD_LENGTH = 24; + + + private BytesWritable split = new BytesWritable(); + private String splitClass; + private final static int APPROX_HEADER_LENGTH = 150; + + private static final Log LOG = LogFactory.getLog(MapTask.class.getName()); + + { // set phase for this task + setPhase(TaskStatus.Phase.MAP); + } + + public MapTask() { + super(); + } + + public MapTask(String jobFile, TaskAttemptID taskId, + int partition, String splitClass, BytesWritable split, + int numSlotsRequired, String username) { + super(jobFile, taskId, partition, numSlotsRequired, username); + this.splitClass = splitClass; + this.split = split; + } + + @Override + public boolean isMapTask() { + return true; + } + + @Override + public void localizeConfiguration(JobConf conf) throws IOException { + super.localizeConfiguration(conf); + if (isMapOrReduce()) { + Path localSplit = new Path(new Path(getJobFile()).getParent(), + "split.dta"); + LOG.debug("Writing local split to " + localSplit); + DataOutputStream out = FileSystem.getLocal(conf).create(localSplit); + Text.writeString(out, splitClass); + split.write(out); + out.close(); + } + } + + @Override + public TaskRunner createRunner(TaskTracker tracker, + TaskTracker.TaskInProgress tip) { + return new MapTaskRunner(tip, tracker, this.conf); + } + + @Override + public void write(DataOutput out) throws IOException { + super.write(out); + if (isMapOrReduce()) { + Text.writeString(out, splitClass); + split.write(out); + split = null; + } + } + + @Override + public void readFields(DataInput in) throws IOException { + super.readFields(in); + if (isMapOrReduce()) { + splitClass = Text.readString(in); + split.readFields(in); + } + } + + /** + * This class wraps the user's record reader to update the counters and progress + * as records are read. + * @param + * @param + */ + class TrackedRecordReader + implements RecordReader { + private RecordReader rawIn; + private Counters.Counter inputByteCounter; + private Counters.Counter inputRecordCounter; + private TaskReporter reporter; + private long beforePos = -1; + private long afterPos = -1; + + TrackedRecordReader(RecordReader raw, TaskReporter reporter) + throws IOException{ + rawIn = raw; + inputRecordCounter = reporter.getCounter(MAP_INPUT_RECORDS); + inputByteCounter = reporter.getCounter(MAP_INPUT_BYTES); + this.reporter = reporter; + } + + public K createKey() { + return rawIn.createKey(); + } + + public V createValue() { + return rawIn.createValue(); + } + + public synchronized boolean next(K key, V value) + throws IOException { + boolean ret = moveToNext(key, value); + if (ret) { + incrCounters(); + } + return ret; + } + + protected void incrCounters() { + inputRecordCounter.increment(1); + inputByteCounter.increment(afterPos - beforePos); + } + + protected synchronized boolean moveToNext(K key, V value) + throws IOException { + reporter.setProgress(getProgress()); + beforePos = getPos(); + boolean ret = rawIn.next(key, value); + afterPos = getPos(); + return ret; + } + + public long getPos() throws IOException { return rawIn.getPos(); } + public void close() throws IOException { rawIn.close(); } + public float getProgress() throws IOException { + return rawIn.getProgress(); + } + TaskReporter getTaskReporter() { + return reporter; + } + } + + /** + * This class skips the records based on the failed ranges from previous + * attempts. + */ + class SkippingRecordReader extends TrackedRecordReader { + private SkipRangeIterator skipIt; + private SequenceFile.Writer skipWriter; + private boolean toWriteSkipRecs; + private TaskUmbilicalProtocol umbilical; + private Counters.Counter skipRecCounter; + private long recIndex = -1; + + SkippingRecordReader(RecordReader raw, TaskUmbilicalProtocol umbilical, + TaskReporter reporter) throws IOException{ + super(raw, reporter); + this.umbilical = umbilical; + this.skipRecCounter = reporter.getCounter(Counter.MAP_SKIPPED_RECORDS); + this.toWriteSkipRecs = toWriteSkipRecs() && + SkipBadRecords.getSkipOutputPath(conf)!=null; + skipIt = getSkipRanges().skipRangeIterator(); + } + + public synchronized boolean next(K key, V value) + throws IOException { + if(!skipIt.hasNext()) { + LOG.warn("Further records got skipped."); + return false; + } + boolean ret = moveToNext(key, value); + long nextRecIndex = skipIt.next(); + long skip = 0; + while(recIndex0 && skipIt.skippedAllRanges() && skipWriter!=null) { + skipWriter.close(); + } + skipRecCounter.increment(skip); + reportNextRecordRange(umbilical, recIndex); + if (ret) { + incrCounters(); + } + return ret; + } + + protected synchronized boolean moveToNext(K key, V value) + throws IOException { + recIndex++; + return super.moveToNext(key, value); + } + + @SuppressWarnings("unchecked") + private void writeSkippedRec(K key, V value) throws IOException{ + if(skipWriter==null) { + Path skipDir = SkipBadRecords.getSkipOutputPath(conf); + Path skipFile = new Path(skipDir, getTaskID().toString()); + skipWriter = + SequenceFile.createWriter( + skipFile.getFileSystem(conf), conf, skipFile, + (Class) createKey().getClass(), + (Class) createValue().getClass(), + CompressionType.BLOCK, getTaskReporter()); + } + skipWriter.append(key, value); + } + } + + @Override + public void run(final JobConf job, final TaskUmbilicalProtocol umbilical) + throws IOException, ClassNotFoundException, InterruptedException { + this.umbilical = umbilical; + + // start thread that will handle communication with parent + TaskReporter reporter = new TaskReporter(getProgress(), umbilical); + reporter.startCommunicationThread(); + boolean useNewApi = job.getUseNewMapper(); + initialize(job, getJobID(), reporter, useNewApi); + + // check if it is a cleanupJobTask + if (jobCleanup) { + runJobCleanupTask(umbilical, reporter); + return; + } + if (jobSetup) { + runJobSetupTask(umbilical, reporter); + return; + } + if (taskCleanup) { + runTaskCleanupTask(umbilical, reporter); + return; + } + + if (useNewApi) { + runNewMapper(job, split, umbilical, reporter); + } else { + runOldMapper(job, split, umbilical, reporter); + } + done(umbilical, reporter); + } + + @SuppressWarnings("unchecked") + private + void runOldMapper(final JobConf job, + final BytesWritable rawSplit, + final TaskUmbilicalProtocol umbilical, + TaskReporter reporter + ) throws IOException, InterruptedException, + ClassNotFoundException { + InputSplit inputSplit = null; + // reinstantiate the split + try { + inputSplit = (InputSplit) + ReflectionUtils.newInstance(job.getClassByName(splitClass), job); + } catch (ClassNotFoundException exp) { + IOException wrap = new IOException("Split class " + splitClass + + " not found"); + wrap.initCause(exp); + throw wrap; + } + DataInputBuffer splitBuffer = new DataInputBuffer(); + splitBuffer.reset(split.getBytes(), 0, split.getLength()); + inputSplit.readFields(splitBuffer); + + updateJobWithSplit(job, inputSplit); + reporter.setInputSplit(inputSplit); + + RecordReader rawIn = // open input + job.getInputFormat().getRecordReader(inputSplit, job, reporter); + RecordReader in = isSkipping() ? + new SkippingRecordReader(rawIn, umbilical, reporter) : + new TrackedRecordReader(rawIn, reporter); + job.setBoolean("mapred.skip.on", isSkipping()); + + + int numReduceTasks = conf.getNumReduceTasks(); + LOG.info("numReduceTasks: " + numReduceTasks); + MapOutputCollector collector = null; + if (numReduceTasks > 0) { + collector = new MapOutputBuffer(umbilical, job, reporter); + } else { + collector = new DirectMapOutputCollector(umbilical, job, reporter); + } + MapRunnable runner = + ReflectionUtils.newInstance(job.getMapRunnerClass(), job); + + try { + runner.run(in, new OldOutputCollector(collector, conf), reporter); + collector.flush(); + } finally { + //close + in.close(); // close input + collector.close(); + } + } + + /** + * Update the job with details about the file split + * @param job the job configuration to update + * @param inputSplit the file split + */ + private void updateJobWithSplit(final JobConf job, InputSplit inputSplit) { + if (inputSplit instanceof FileSplit) { + FileSplit fileSplit = (FileSplit) inputSplit; + job.set("map.input.file", fileSplit.getPath().toString()); + job.setLong("map.input.start", fileSplit.getStart()); + job.setLong("map.input.length", fileSplit.getLength()); + } + LOG.info("split: " + inputSplit.toString()); + } + + static class NewTrackingRecordReader + extends org.apache.hadoop.mapreduce.RecordReader { + private final org.apache.hadoop.mapreduce.RecordReader real; + private final org.apache.hadoop.mapreduce.Counter inputRecordCounter; + private final TaskReporter reporter; + + NewTrackingRecordReader(org.apache.hadoop.mapreduce.RecordReader real, + TaskReporter reporter) { + this.real = real; + this.reporter = reporter; + this.inputRecordCounter = reporter.getCounter(MAP_INPUT_RECORDS); + } + + @Override + public void close() throws IOException { + real.close(); + } + + @Override + public K getCurrentKey() throws IOException, InterruptedException { + return real.getCurrentKey(); + } + + @Override + public V getCurrentValue() throws IOException, InterruptedException { + return real.getCurrentValue(); + } + + @Override + public float getProgress() throws IOException, InterruptedException { + return real.getProgress(); + } + + @Override + public void initialize(org.apache.hadoop.mapreduce.InputSplit split, + org.apache.hadoop.mapreduce.TaskAttemptContext context + ) throws IOException, InterruptedException { + real.initialize(split, context); + } + + @Override + public boolean nextKeyValue() throws IOException, InterruptedException { + boolean result = real.nextKeyValue(); + if (result) { + inputRecordCounter.increment(1); + } + reporter.setProgress(getProgress()); + return result; + } + } + + /** + * Since the mapred and mapreduce Partitioners don't share a common interface + * (JobConfigurable is deprecated and a subtype of mapred.Partitioner), the + * partitioner lives in Old/NewOutputCollector. Note that, for map-only jobs, + * the configured partitioner should not be called. It's common for + * partitioners to compute a result mod numReduces, which causes a div0 error + */ + private static class OldOutputCollector implements OutputCollector { + private final Partitioner partitioner; + private final MapOutputCollector collector; + private final int numPartitions; + + @SuppressWarnings("unchecked") + OldOutputCollector(MapOutputCollector collector, JobConf conf) { + numPartitions = conf.getNumReduceTasks(); + if (numPartitions > 0) { + partitioner = (Partitioner) + ReflectionUtils.newInstance(conf.getPartitionerClass(), conf); + } else { + partitioner = new Partitioner() { + @Override + public void configure(JobConf job) { } + @Override + public int getPartition(K key, V value, int numPartitions) { + return -1; + } + }; + } + this.collector = collector; + } + + @Override + public void collect(K key, V value) throws IOException { + try { + collector.collect(key, value, + partitioner.getPartition(key, value, numPartitions)); + } catch (InterruptedException ie) { + Thread.currentThread().interrupt(); + throw new IOException("interrupt exception", ie); + } + } + } + + private class NewDirectOutputCollector + extends org.apache.hadoop.mapreduce.RecordWriter { + private final org.apache.hadoop.mapreduce.RecordWriter out; + + private final TaskReporter reporter; + + private final Counters.Counter mapOutputRecordCounter; + + @SuppressWarnings("unchecked") + NewDirectOutputCollector(org.apache.hadoop.mapreduce.JobContext jobContext, + JobConf job, TaskUmbilicalProtocol umbilical, TaskReporter reporter) + throws IOException, ClassNotFoundException, InterruptedException { + this.reporter = reporter; + out = outputFormat.getRecordWriter(taskContext); + mapOutputRecordCounter = + reporter.getCounter(MAP_OUTPUT_RECORDS); + } + + @Override + @SuppressWarnings("unchecked") + public void write(K key, V value) + throws IOException, InterruptedException { + reporter.progress(); + out.write(key, value); + mapOutputRecordCounter.increment(1); + } + + @Override + public void close(TaskAttemptContext context) + throws IOException,InterruptedException { + reporter.progress(); + if (out != null) { + out.close(context); + } + } + } + + private class NewOutputCollector + extends org.apache.hadoop.mapreduce.RecordWriter { + private final MapOutputCollector collector; + private final org.apache.hadoop.mapreduce.Partitioner partitioner; + private final int partitions; + + @SuppressWarnings("unchecked") + NewOutputCollector(org.apache.hadoop.mapreduce.JobContext jobContext, + JobConf job, + TaskUmbilicalProtocol umbilical, + TaskReporter reporter + ) throws IOException, ClassNotFoundException { + collector = new MapOutputBuffer(umbilical, job, reporter); + partitions = jobContext.getNumReduceTasks(); + if (partitions > 0) { + partitioner = (org.apache.hadoop.mapreduce.Partitioner) + ReflectionUtils.newInstance(jobContext.getPartitionerClass(), job); + } else { + partitioner = new org.apache.hadoop.mapreduce.Partitioner() { + @Override + public int getPartition(K key, V value, int numPartitions) { + return -1; + } + }; + } + } + + @Override + public void write(K key, V value) throws IOException, InterruptedException { + collector.collect(key, value, + partitioner.getPartition(key, value, partitions)); + } + + @Override + public void close(TaskAttemptContext context + ) throws IOException,InterruptedException { + try { + collector.flush(); + } catch (ClassNotFoundException cnf) { + throw new IOException("can't find class ", cnf); + } + collector.close(); + } + } + + @SuppressWarnings("unchecked") + private + void runNewMapper(final JobConf job, + final BytesWritable rawSplit, + final TaskUmbilicalProtocol umbilical, + TaskReporter reporter + ) throws IOException, ClassNotFoundException, + InterruptedException { + // make a task context so we can get the classes + org.apache.hadoop.mapreduce.TaskAttemptContext taskContext = + new org.apache.hadoop.mapreduce.TaskAttemptContext(job, getTaskID()); + // make a mapper + org.apache.hadoop.mapreduce.Mapper mapper = + (org.apache.hadoop.mapreduce.Mapper) + ReflectionUtils.newInstance(taskContext.getMapperClass(), job); + // make the input format + org.apache.hadoop.mapreduce.InputFormat inputFormat = + (org.apache.hadoop.mapreduce.InputFormat) + ReflectionUtils.newInstance(taskContext.getInputFormatClass(), job); + // rebuild the input split + org.apache.hadoop.mapreduce.InputSplit split = null; + DataInputBuffer splitBuffer = new DataInputBuffer(); + splitBuffer.reset(rawSplit.getBytes(), 0, rawSplit.getLength()); + SerializationFactory factory = new SerializationFactory(job); + Deserializer + deserializer = + (Deserializer) + factory.getDeserializer(job.getClassByName(splitClass)); + deserializer.open(splitBuffer); + split = deserializer.deserialize(null); + + org.apache.hadoop.mapreduce.RecordReader input = + new NewTrackingRecordReader + (inputFormat.createRecordReader(split, taskContext), reporter); + + job.setBoolean("mapred.skip.on", isSkipping()); + org.apache.hadoop.mapreduce.RecordWriter output = null; + org.apache.hadoop.mapreduce.Mapper.Context + mapperContext = null; + try { + Constructor contextConstructor = + org.apache.hadoop.mapreduce.Mapper.Context.class.getConstructor + (new Class[]{org.apache.hadoop.mapreduce.Mapper.class, + Configuration.class, + org.apache.hadoop.mapreduce.TaskAttemptID.class, + org.apache.hadoop.mapreduce.RecordReader.class, + org.apache.hadoop.mapreduce.RecordWriter.class, + org.apache.hadoop.mapreduce.OutputCommitter.class, + org.apache.hadoop.mapreduce.StatusReporter.class, + org.apache.hadoop.mapreduce.InputSplit.class}); + + // get an output object + if (job.getNumReduceTasks() == 0) { + output = + new NewDirectOutputCollector(taskContext, job, umbilical, reporter); + } else { + output = new NewOutputCollector(taskContext, job, umbilical, reporter); + } + + mapperContext = contextConstructor.newInstance(mapper, job, getTaskID(), + input, output, committer, + reporter, split); + + input.initialize(split, mapperContext); + mapper.run(mapperContext); + input.close(); + output.close(mapperContext); + } catch (NoSuchMethodException e) { + throw new IOException("Can't find Context constructor", e); + } catch (InstantiationException e) { + throw new IOException("Can't create Context", e); + } catch (InvocationTargetException e) { + throw new IOException("Can't invoke Context constructor", e); + } catch (IllegalAccessException e) { + throw new IOException("Can't invoke Context constructor", e); + } + } + + interface MapOutputCollector { + + public void collect(K key, V value, int partition + ) throws IOException, InterruptedException; + public void close() throws IOException, InterruptedException; + + public void flush() throws IOException, InterruptedException, + ClassNotFoundException; + + } + + class DirectMapOutputCollector + implements MapOutputCollector { + + private RecordWriter out = null; + + private TaskReporter reporter = null; + + private final Counters.Counter mapOutputRecordCounter; + + @SuppressWarnings("unchecked") + public DirectMapOutputCollector(TaskUmbilicalProtocol umbilical, + JobConf job, TaskReporter reporter) throws IOException { + this.reporter = reporter; + String finalName = getOutputName(getPartition()); + FileSystem fs = FileSystem.get(job); + + out = job.getOutputFormat().getRecordWriter(fs, job, finalName, reporter); + + mapOutputRecordCounter = reporter.getCounter(MAP_OUTPUT_RECORDS); + } + + public void close() throws IOException { + if (this.out != null) { + out.close(this.reporter); + } + + } + + public void flush() throws IOException, InterruptedException, + ClassNotFoundException { + } + + public void collect(K key, V value, int partition) throws IOException { + reporter.progress(); + out.write(key, value); + mapOutputRecordCounter.increment(1); + } + + } + + class MapOutputBuffer + implements MapOutputCollector, IndexedSortable { + private final int partitions; + private final JobConf job; + private final TaskReporter reporter; + private final Class keyClass; + private final Class valClass; + private final RawComparator comparator; + private final SerializationFactory serializationFactory; + private final Serializer keySerializer; + private final Serializer valSerializer; + private final CombinerRunner combinerRunner; + private final CombineOutputCollector combineCollector; + + // Compression for map-outputs + private CompressionCodec codec = null; + + // k/v accounting + private volatile int kvstart = 0; // marks beginning of spill + private volatile int kvend = 0; // marks beginning of collectable + private int kvindex = 0; // marks end of collected + private final int[] kvoffsets; // indices into kvindices + private final int[] kvindices; // partition, k/v offsets into kvbuffer + private volatile int bufstart = 0; // marks beginning of spill + private volatile int bufend = 0; // marks beginning of collectable + private volatile int bufvoid = 0; // marks the point where we should stop + // reading at the end of the buffer + private int bufindex = 0; // marks end of collected + private int bufmark = 0; // marks end of record + private byte[] kvbuffer; // main output buffer + private static final int PARTITION = 0; // partition offset in acct + private static final int KEYSTART = 1; // key offset in acct + private static final int VALSTART = 2; // val offset in acct + private static final int ACCTSIZE = 3; // total #fields in acct + private static final int RECSIZE = + (ACCTSIZE + 1) * 4; // acct bytes per record + + // spill accounting + private volatile int numSpills = 0; + private volatile Throwable sortSpillException = null; + private final int softRecordLimit; + private final int softBufferLimit; + private final int minSpillsForCombine; + private final IndexedSorter sorter; + private final ReentrantLock spillLock = new ReentrantLock(); + private final Condition spillDone = spillLock.newCondition(); + private final Condition spillReady = spillLock.newCondition(); + private final BlockingBuffer bb = new BlockingBuffer(); + private volatile boolean spillThreadRunning = false; + private final SpillThread spillThread = new SpillThread(); + + private final FileSystem localFs; + private final FileSystem rfs; + + private final Counters.Counter mapOutputByteCounter; + private final Counters.Counter mapOutputRecordCounter; + private final Counters.Counter combineOutputCounter; + + private ArrayList indexCacheList; + private int totalIndexCacheMemory; + private static final int INDEX_CACHE_MEMORY_LIMIT = 1024 * 1024; + + @SuppressWarnings("unchecked") + public MapOutputBuffer(TaskUmbilicalProtocol umbilical, JobConf job, + TaskReporter reporter + ) throws IOException, ClassNotFoundException { + this.job = job; + this.reporter = reporter; + localFs = FileSystem.getLocal(job); + partitions = job.getNumReduceTasks(); + + rfs = ((LocalFileSystem)localFs).getRaw(); + + indexCacheList = new ArrayList(); + + //sanity checks + final float spillper = job.getFloat("io.sort.spill.percent",(float)0.8); + final float recper = job.getFloat("io.sort.record.percent",(float)0.05); + final int sortmb = job.getInt("io.sort.mb", 100); + if (spillper > (float)1.0 || spillper < (float)0.0) { + throw new IOException("Invalid \"io.sort.spill.percent\": " + spillper); + } + if (recper > (float)1.0 || recper < (float)0.01) { + throw new IOException("Invalid \"io.sort.record.percent\": " + recper); + } + if ((sortmb & 0x7FF) != sortmb) { + throw new IOException("Invalid \"io.sort.mb\": " + sortmb); + } + sorter = ReflectionUtils.newInstance( + job.getClass("map.sort.class", QuickSort.class, IndexedSorter.class), job); + LOG.info("io.sort.mb = " + sortmb); + // buffers and accounting + int maxMemUsage = sortmb << 20; + int recordCapacity = (int)(maxMemUsage * recper); + recordCapacity -= recordCapacity % RECSIZE; + kvbuffer = new byte[maxMemUsage - recordCapacity]; + bufvoid = kvbuffer.length; + recordCapacity /= RECSIZE; + kvoffsets = new int[recordCapacity]; + kvindices = new int[recordCapacity * ACCTSIZE]; + softBufferLimit = (int)(kvbuffer.length * spillper); + softRecordLimit = (int)(kvoffsets.length * spillper); + LOG.info("data buffer = " + softBufferLimit + "/" + kvbuffer.length); + LOG.info("record buffer = " + softRecordLimit + "/" + kvoffsets.length); + // k/v serialization + comparator = job.getOutputKeyComparator(); + keyClass = (Class)job.getMapOutputKeyClass(); + valClass = (Class)job.getMapOutputValueClass(); + serializationFactory = new SerializationFactory(job); + keySerializer = serializationFactory.getSerializer(keyClass); + keySerializer.open(bb); + valSerializer = serializationFactory.getSerializer(valClass); + valSerializer.open(bb); + // counters + mapOutputByteCounter = reporter.getCounter(MAP_OUTPUT_BYTES); + mapOutputRecordCounter = reporter.getCounter(MAP_OUTPUT_RECORDS); + Counters.Counter combineInputCounter = + reporter.getCounter(COMBINE_INPUT_RECORDS); + combineOutputCounter = reporter.getCounter(COMBINE_OUTPUT_RECORDS); + // compression + if (job.getCompressMapOutput()) { + Class codecClass = + job.getMapOutputCompressorClass(DefaultCodec.class); + codec = ReflectionUtils.newInstance(codecClass, job); + } + // combiner + combinerRunner = CombinerRunner.create(job, getTaskID(), + combineInputCounter, + reporter, null); + if (combinerRunner != null) { + combineCollector= new CombineOutputCollector(combineOutputCounter); + } else { + combineCollector = null; + } + minSpillsForCombine = job.getInt("min.num.spills.for.combine", 3); + spillThread.setDaemon(true); + spillThread.setName("SpillThread"); + spillLock.lock(); + try { + spillThread.start(); + while (!spillThreadRunning) { + spillDone.await(); + } + } catch (InterruptedException e) { + throw (IOException)new IOException("Spill thread failed to initialize" + ).initCause(sortSpillException); + } finally { + spillLock.unlock(); + } + if (sortSpillException != null) { + throw (IOException)new IOException("Spill thread failed to initialize" + ).initCause(sortSpillException); + } + } + + public synchronized void collect(K key, V value, int partition + ) throws IOException { + reporter.progress(); + if (key.getClass() != keyClass) { + throw new IOException("Type mismatch in key from map: expected " + + keyClass.getName() + ", recieved " + + key.getClass().getName()); + } + if (value.getClass() != valClass) { + throw new IOException("Type mismatch in value from map: expected " + + valClass.getName() + ", recieved " + + value.getClass().getName()); + } + final int kvnext = (kvindex + 1) % kvoffsets.length; + spillLock.lock(); + try { + boolean kvfull; + do { + if (sortSpillException != null) { + throw (IOException)new IOException("Spill failed" + ).initCause(sortSpillException); + } + // sufficient acct space + kvfull = kvnext == kvstart; + final boolean kvsoftlimit = ((kvnext > kvend) + ? kvnext - kvend > softRecordLimit + : kvend - kvnext <= kvoffsets.length - softRecordLimit); + if (kvstart == kvend && kvsoftlimit) { + LOG.info("Spilling map output: record full = " + kvsoftlimit); + startSpill(); + } + if (kvfull) { + try { + while (kvstart != kvend) { + reporter.progress(); + spillDone.await(); + } + } catch (InterruptedException e) { + throw (IOException)new IOException( + "Collector interrupted while waiting for the writer" + ).initCause(e); + } + } + } while (kvfull); + } finally { + spillLock.unlock(); + } + + try { + // serialize key bytes into buffer + int keystart = bufindex; + keySerializer.serialize(key); + if (bufindex < keystart) { + // wrapped the key; reset required + bb.reset(); + keystart = 0; + } + // serialize value bytes into buffer + final int valstart = bufindex; + valSerializer.serialize(value); + int valend = bb.markRecord(); + + if (partition < 0 || partition >= partitions) { + throw new IOException("Illegal partition for " + key + " (" + + partition + ")"); + } + + mapOutputRecordCounter.increment(1); + mapOutputByteCounter.increment(valend >= keystart + ? valend - keystart + : (bufvoid - keystart) + valend); + + // update accounting info + int ind = kvindex * ACCTSIZE; + kvoffsets[kvindex] = ind; + kvindices[ind + PARTITION] = partition; + kvindices[ind + KEYSTART] = keystart; + kvindices[ind + VALSTART] = valstart; + kvindex = kvnext; + } catch (MapBufferTooSmallException e) { + LOG.info("Record too large for in-memory buffer: " + e.getMessage()); + spillSingleRecord(key, value, partition); + mapOutputRecordCounter.increment(1); + return; + } + + } + + /** + * Compare logical range, st i, j MOD offset capacity. + * Compare by partition, then by key. + * @see IndexedSortable#compare + */ + public int compare(int i, int j) { + final int ii = kvoffsets[i % kvoffsets.length]; + final int ij = kvoffsets[j % kvoffsets.length]; + // sort by partition + if (kvindices[ii + PARTITION] != kvindices[ij + PARTITION]) { + return kvindices[ii + PARTITION] - kvindices[ij + PARTITION]; + } + // sort by key + return comparator.compare(kvbuffer, + kvindices[ii + KEYSTART], + kvindices[ii + VALSTART] - kvindices[ii + KEYSTART], + kvbuffer, + kvindices[ij + KEYSTART], + kvindices[ij + VALSTART] - kvindices[ij + KEYSTART]); + } + + /** + * Swap logical indices st i, j MOD offset capacity. + * @see IndexedSortable#swap + */ + public void swap(int i, int j) { + i %= kvoffsets.length; + j %= kvoffsets.length; + int tmp = kvoffsets[i]; + kvoffsets[i] = kvoffsets[j]; + kvoffsets[j] = tmp; + } + + /** + * Inner class managing the spill of serialized records to disk. + */ + protected class BlockingBuffer extends DataOutputStream { + + public BlockingBuffer() { + this(new Buffer()); + } + + private BlockingBuffer(OutputStream out) { + super(out); + } + + /** + * Mark end of record. Note that this is required if the buffer is to + * cut the spill in the proper place. + */ + public int markRecord() { + bufmark = bufindex; + return bufindex; + } + + /** + * Set position from last mark to end of writable buffer, then rewrite + * the data between last mark and kvindex. + * This handles a special case where the key wraps around the buffer. + * If the key is to be passed to a RawComparator, then it must be + * contiguous in the buffer. This recopies the data in the buffer back + * into itself, but starting at the beginning of the buffer. Note that + * reset() should only be called immediately after detecting + * this condition. To call it at any other time is undefined and would + * likely result in data loss or corruption. + * @see #markRecord() + */ + protected synchronized void reset() throws IOException { + // spillLock unnecessary; If spill wraps, then + // bufindex < bufstart < bufend so contention is impossible + // a stale value for bufstart does not affect correctness, since + // we can only get false negatives that force the more + // conservative path + int headbytelen = bufvoid - bufmark; + bufvoid = bufmark; + if (bufindex + headbytelen < bufstart) { + System.arraycopy(kvbuffer, 0, kvbuffer, headbytelen, bufindex); + System.arraycopy(kvbuffer, bufvoid, kvbuffer, 0, headbytelen); + bufindex += headbytelen; + } else { + byte[] keytmp = new byte[bufindex]; + System.arraycopy(kvbuffer, 0, keytmp, 0, bufindex); + bufindex = 0; + out.write(kvbuffer, bufmark, headbytelen); + out.write(keytmp); + } + } + } + + public class Buffer extends OutputStream { + private final byte[] scratch = new byte[1]; + + @Override + public synchronized void write(int v) + throws IOException { + scratch[0] = (byte)v; + write(scratch, 0, 1); + } + + /** + * Attempt to write a sequence of bytes to the collection buffer. + * This method will block if the spill thread is running and it + * cannot write. + * @throws MapBufferTooSmallException if record is too large to + * deserialize into the collection buffer. + */ + @Override + public synchronized void write(byte b[], int off, int len) + throws IOException { + boolean buffull = false; + boolean wrap = false; + spillLock.lock(); + try { + do { + if (sortSpillException != null) { + throw (IOException)new IOException("Spill failed" + ).initCause(sortSpillException); + } + + // sufficient buffer space? + if (bufstart <= bufend && bufend <= bufindex) { + buffull = bufindex + len > bufvoid; + wrap = (bufvoid - bufindex) + bufstart > len; + } else { + // bufindex <= bufstart <= bufend + // bufend <= bufindex <= bufstart + wrap = false; + buffull = bufindex + len > bufstart; + } + + if (kvstart == kvend) { + // spill thread not running + if (kvend != kvindex) { + // we have records we can spill + final boolean bufsoftlimit = (bufindex > bufend) + ? bufindex - bufend > softBufferLimit + : bufend - bufindex < bufvoid - softBufferLimit; + if (bufsoftlimit || (buffull && !wrap)) { + LOG.info("Spilling map output: buffer full= " + bufsoftlimit); + startSpill(); + } + } else if (buffull && !wrap) { + // We have no buffered records, and this record is too large + // to write into kvbuffer. We must spill it directly from + // collect + final int size = ((bufend <= bufindex) + ? bufindex - bufend + : (bufvoid - bufend) + bufindex) + len; + bufstart = bufend = bufindex = bufmark = 0; + kvstart = kvend = kvindex = 0; + bufvoid = kvbuffer.length; + throw new MapBufferTooSmallException(size + " bytes"); + } + } + + if (buffull && !wrap) { + try { + while (kvstart != kvend) { + reporter.progress(); + spillDone.await(); + } + } catch (InterruptedException e) { + throw (IOException)new IOException( + "Buffer interrupted while waiting for the writer" + ).initCause(e); + } + } + } while (buffull && !wrap); + } finally { + spillLock.unlock(); + } + // here, we know that we have sufficient space to write + if (buffull) { + final int gaplen = bufvoid - bufindex; + System.arraycopy(b, off, kvbuffer, bufindex, gaplen); + len -= gaplen; + off += gaplen; + bufindex = 0; + } + System.arraycopy(b, off, kvbuffer, bufindex, len); + bufindex += len; + } + } + + public synchronized void flush() throws IOException, ClassNotFoundException, + InterruptedException { + LOG.info("Starting flush of map output"); + spillLock.lock(); + try { + while (kvstart != kvend) { + reporter.progress(); + spillDone.await(); + } + if (sortSpillException != null) { + throw (IOException)new IOException("Spill failed" + ).initCause(sortSpillException); + } + if (kvend != kvindex) { + kvend = kvindex; + bufend = bufmark; + sortAndSpill(); + } + } catch (InterruptedException e) { + throw (IOException)new IOException( + "Buffer interrupted while waiting for the writer" + ).initCause(e); + } finally { + spillLock.unlock(); + } + assert !spillLock.isHeldByCurrentThread(); + // shut down spill thread and wait for it to exit. Since the preceding + // ensures that it is finished with its work (and sortAndSpill did not + // throw), we elect to use an interrupt instead of setting a flag. + // Spilling simultaneously from this thread while the spill thread + // finishes its work might be both a useful way to extend this and also + // sufficient motivation for the latter approach. + try { + spillThread.interrupt(); + spillThread.join(); + } catch (InterruptedException e) { + throw (IOException)new IOException("Spill failed" + ).initCause(e); + } + // release sort buffer before the merge + kvbuffer = null; + mergeParts(); + } + + public void close() { } + + protected class SpillThread extends Thread { + + @Override + public void run() { + spillLock.lock(); + spillThreadRunning = true; + try { + while (true) { + spillDone.signal(); + while (kvstart == kvend) { + spillReady.await(); + } + try { + spillLock.unlock(); + sortAndSpill(); + } catch (Exception e) { + sortSpillException = e; + } catch (Throwable t) { + sortSpillException = t; + String logMsg = "Task " + getTaskID() + " failed : " + + StringUtils.stringifyException(t); + reportFatalError(getTaskID(), t, logMsg); + } finally { + spillLock.lock(); + if (bufend < bufindex && bufindex < bufstart) { + bufvoid = kvbuffer.length; + } + kvstart = kvend; + bufstart = bufend; + } + } + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + } finally { + spillLock.unlock(); + spillThreadRunning = false; + } + } + } + + private synchronized void startSpill() { + LOG.info("bufstart = " + bufstart + "; bufend = " + bufmark + + "; bufvoid = " + bufvoid); + LOG.info("kvstart = " + kvstart + "; kvend = " + kvindex + + "; length = " + kvoffsets.length); + kvend = kvindex; + bufend = bufmark; + spillReady.signal(); + } + + private void sortAndSpill() throws IOException, ClassNotFoundException, + InterruptedException { + //approximate the length of the output file to be the length of the + //buffer + header lengths for the partitions + long size = (bufend >= bufstart + ? bufend - bufstart + : (bufvoid - bufend) + bufstart) + + partitions * APPROX_HEADER_LENGTH; + FSDataOutputStream out = null; + try { + // create spill file + final SpillRecord spillRec = new SpillRecord(partitions); + final Path filename = mapOutputFile.getSpillFileForWrite(getTaskID(), + numSpills, size); + out = rfs.create(filename); + + final int endPosition = (kvend > kvstart) + ? kvend + : kvoffsets.length + kvend; + sorter.sort(MapOutputBuffer.this, kvstart, endPosition, reporter); + int spindex = kvstart; + IndexRecord rec = new IndexRecord(); + InMemValBytes value = new InMemValBytes(); + for (int i = 0; i < partitions; ++i) { + IFile.Writer writer = null; + try { + long segmentStart = out.getPos(); + writer = new Writer(job, out, keyClass, valClass, codec, + spilledRecordsCounter); + if (combinerRunner == null) { + // spill directly + DataInputBuffer key = new DataInputBuffer(); + while (spindex < endPosition && + kvindices[kvoffsets[spindex % kvoffsets.length] + + PARTITION] == i) { + final int kvoff = kvoffsets[spindex % kvoffsets.length]; + getVBytesForOffset(kvoff, value); + key.reset(kvbuffer, kvindices[kvoff + KEYSTART], + (kvindices[kvoff + VALSTART] - + kvindices[kvoff + KEYSTART])); + writer.append(key, value); + ++spindex; + } + } else { + int spstart = spindex; + while (spindex < endPosition && + kvindices[kvoffsets[spindex % kvoffsets.length] + + PARTITION] == i) { + ++spindex; + } + // Note: we would like to avoid the combiner if we've fewer + // than some threshold of records for a partition + if (spstart != spindex) { + combineCollector.setWriter(writer); + RawKeyValueIterator kvIter = + new MRResultIterator(spstart, spindex); + combinerRunner.combine(kvIter, combineCollector); + } + } + + // close the writer + writer.close(); + + // record offsets + rec.startOffset = segmentStart; + rec.rawLength = writer.getRawLength(); + rec.partLength = writer.getCompressedLength(); + spillRec.putIndex(rec, i); + + writer = null; + } finally { + if (null != writer) writer.close(); + } + } + + if (totalIndexCacheMemory >= INDEX_CACHE_MEMORY_LIMIT) { + // create spill index file + Path indexFilename = mapOutputFile.getSpillIndexFileForWrite( + getTaskID(), numSpills, + partitions * MAP_OUTPUT_INDEX_RECORD_LENGTH); + spillRec.writeToFile(indexFilename, job); + } else { + indexCacheList.add(spillRec); + totalIndexCacheMemory += + spillRec.size() * MAP_OUTPUT_INDEX_RECORD_LENGTH; + } + LOG.info("Finished spill " + numSpills); + ++numSpills; + } finally { + if (out != null) out.close(); + } + } + + /** + * Handles the degenerate case where serialization fails to fit in + * the in-memory buffer, so we must spill the record from collect + * directly to a spill file. Consider this "losing". + */ + private void spillSingleRecord(final K key, final V value, + int partition) throws IOException { + long size = kvbuffer.length + partitions * APPROX_HEADER_LENGTH; + FSDataOutputStream out = null; + try { + // create spill file + final SpillRecord spillRec = new SpillRecord(partitions); + final Path filename = mapOutputFile.getSpillFileForWrite(getTaskID(), + numSpills, size); + out = rfs.create(filename); + + // we don't run the combiner for a single record + IndexRecord rec = new IndexRecord(); + for (int i = 0; i < partitions; ++i) { + IFile.Writer writer = null; + try { + long segmentStart = out.getPos(); + // Create a new codec, don't care! + writer = new IFile.Writer(job, out, keyClass, valClass, codec, + spilledRecordsCounter); + + if (i == partition) { + final long recordStart = out.getPos(); + writer.append(key, value); + // Note that our map byte count will not be accurate with + // compression + mapOutputByteCounter.increment(out.getPos() - recordStart); + } + writer.close(); + + // record offsets + rec.startOffset = segmentStart; + rec.rawLength = writer.getRawLength(); + rec.partLength = writer.getCompressedLength(); + spillRec.putIndex(rec, i); + + writer = null; + } catch (IOException e) { + if (null != writer) writer.close(); + throw e; + } + } + if (totalIndexCacheMemory >= INDEX_CACHE_MEMORY_LIMIT) { + // create spill index file + Path indexFilename = mapOutputFile.getSpillIndexFileForWrite( + getTaskID(), numSpills, + partitions * MAP_OUTPUT_INDEX_RECORD_LENGTH); + spillRec.writeToFile(indexFilename, job); + } else { + indexCacheList.add(spillRec); + totalIndexCacheMemory += + spillRec.size() * MAP_OUTPUT_INDEX_RECORD_LENGTH; + } + ++numSpills; + } finally { + if (out != null) out.close(); + } + } + + /** + * Given an offset, populate vbytes with the associated set of + * deserialized value bytes. Should only be called during a spill. + */ + private void getVBytesForOffset(int kvoff, InMemValBytes vbytes) { + final int nextindex = (kvoff / ACCTSIZE == + (kvend - 1 + kvoffsets.length) % kvoffsets.length) + ? bufend + : kvindices[(kvoff + ACCTSIZE + KEYSTART) % kvindices.length]; + int vallen = (nextindex >= kvindices[kvoff + VALSTART]) + ? nextindex - kvindices[kvoff + VALSTART] + : (bufvoid - kvindices[kvoff + VALSTART]) + nextindex; + vbytes.reset(kvbuffer, kvindices[kvoff + VALSTART], vallen); + } + + /** + * Inner class wrapping valuebytes, used for appendRaw. + */ + protected class InMemValBytes extends DataInputBuffer { + private byte[] buffer; + private int start; + private int length; + + public void reset(byte[] buffer, int start, int length) { + this.buffer = buffer; + this.start = start; + this.length = length; + + if (start + length > bufvoid) { + this.buffer = new byte[this.length]; + final int taillen = bufvoid - start; + System.arraycopy(buffer, start, this.buffer, 0, taillen); + System.arraycopy(buffer, 0, this.buffer, taillen, length-taillen); + this.start = 0; + } + + super.reset(this.buffer, this.start, this.length); + } + } + + protected class MRResultIterator implements RawKeyValueIterator { + private final DataInputBuffer keybuf = new DataInputBuffer(); + private final InMemValBytes vbytes = new InMemValBytes(); + private final int end; + private int current; + public MRResultIterator(int start, int end) { + this.end = end; + current = start - 1; + } + public boolean next() throws IOException { + return ++current < end; + } + public DataInputBuffer getKey() throws IOException { + final int kvoff = kvoffsets[current % kvoffsets.length]; + keybuf.reset(kvbuffer, kvindices[kvoff + KEYSTART], + kvindices[kvoff + VALSTART] - kvindices[kvoff + KEYSTART]); + return keybuf; + } + public DataInputBuffer getValue() throws IOException { + getVBytesForOffset(kvoffsets[current % kvoffsets.length], vbytes); + return vbytes; + } + public Progress getProgress() { + return null; + } + public void close() { } + } + + private void mergeParts() throws IOException, InterruptedException, + ClassNotFoundException { + // get the approximate size of the final output/index files + long finalOutFileSize = 0; + long finalIndexFileSize = 0; + final Path[] filename = new Path[numSpills]; + final TaskAttemptID mapId = getTaskID(); + + for(int i = 0; i < numSpills; i++) { + filename[i] = mapOutputFile.getSpillFile(mapId, i); + finalOutFileSize += rfs.getFileStatus(filename[i]).getLen(); + } + if (numSpills == 1) { //the spill is the final output + rfs.rename(filename[0], + new Path(filename[0].getParent(), "file.out")); + if (indexCacheList.size() == 0) { + rfs.rename(mapOutputFile.getSpillIndexFile(mapId, 0), + new Path(filename[0].getParent(),"file.out.index")); + } else { + indexCacheList.get(0).writeToFile( + new Path(filename[0].getParent(),"file.out.index"), job); + } + return; + } + + // read in paged indices + for (int i = indexCacheList.size(); i < numSpills; ++i) { + Path indexFileName = mapOutputFile.getSpillIndexFile(mapId, i); + indexCacheList.add(new SpillRecord(indexFileName, job)); + } + + //make correction in the length to include the sequence file header + //lengths for each partition + finalOutFileSize += partitions * APPROX_HEADER_LENGTH; + finalIndexFileSize = partitions * MAP_OUTPUT_INDEX_RECORD_LENGTH; + Path finalOutputFile = mapOutputFile.getOutputFileForWrite(mapId, + finalOutFileSize); + Path finalIndexFile = mapOutputFile.getOutputIndexFileForWrite( + mapId, finalIndexFileSize); + + //The output stream for the final single output file + FSDataOutputStream finalOut = rfs.create(finalOutputFile, true, 4096); + + if (numSpills == 0) { + //create dummy files + IndexRecord rec = new IndexRecord(); + SpillRecord sr = new SpillRecord(partitions); + try { + for (int i = 0; i < partitions; i++) { + long segmentStart = finalOut.getPos(); + Writer writer = + new Writer(job, finalOut, keyClass, valClass, codec, null); + writer.close(); + rec.startOffset = segmentStart; + rec.rawLength = writer.getRawLength(); + rec.partLength = writer.getCompressedLength(); + sr.putIndex(rec, i); + } + sr.writeToFile(finalIndexFile, job); + } finally { + finalOut.close(); + } + return; + } + { + IndexRecord rec = new IndexRecord(); + final SpillRecord spillRec = new SpillRecord(partitions); + for (int parts = 0; parts < partitions; parts++) { + //create the segments to be merged + List> segmentList = + new ArrayList>(numSpills); + for(int i = 0; i < numSpills; i++) { + IndexRecord indexRecord = indexCacheList.get(i).getIndex(parts); + + Segment s = + new Segment(job, rfs, filename[i], indexRecord.startOffset, + indexRecord.partLength, codec, true); + segmentList.add(i, s); + + if (LOG.isDebugEnabled()) { + LOG.debug("MapId=" + mapId + " Reducer=" + parts + + "Spill =" + i + "(" + indexRecord.startOffset + "," + + indexRecord.rawLength + ", " + indexRecord.partLength + ")"); + } + } + + //merge + @SuppressWarnings("unchecked") + RawKeyValueIterator kvIter = Merger.merge(job, rfs, + keyClass, valClass, codec, + segmentList, job.getInt("io.sort.factor", 100), + new Path(mapId.toString()), + job.getOutputKeyComparator(), reporter, + null, spilledRecordsCounter); + + //write merged output to disk + long segmentStart = finalOut.getPos(); + Writer writer = + new Writer(job, finalOut, keyClass, valClass, codec, + spilledRecordsCounter); + if (combinerRunner == null || numSpills < minSpillsForCombine) { + Merger.writeFile(kvIter, writer, reporter, job); + } else { + combineCollector.setWriter(writer); + combinerRunner.combine(kvIter, combineCollector); + } + + //close + writer.close(); + + // record offsets + rec.startOffset = segmentStart; + rec.rawLength = writer.getRawLength(); + rec.partLength = writer.getCompressedLength(); + spillRec.putIndex(rec, parts); + } + spillRec.writeToFile(finalIndexFile, job); + finalOut.close(); + for(int i = 0; i < numSpills; i++) { + rfs.delete(filename[i],true); + } + } + } + + } // MapOutputBuffer + + /** + * Exception indicating that the allocated sort buffer is insufficient + * to hold the current record. + */ + @SuppressWarnings("serial") + private static class MapBufferTooSmallException extends IOException { + public MapBufferTooSmallException(String s) { + super(s); + } + } + +} diff --git a/src/mapred/org/apache/hadoop/mapred/MapTaskCompletionEventsUpdate.java b/src/mapred/org/apache/hadoop/mapred/MapTaskCompletionEventsUpdate.java new file mode 100644 index 0000000..8bb4d14 --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/MapTaskCompletionEventsUpdate.java @@ -0,0 +1,67 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + package org.apache.hadoop.mapred; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; + +import org.apache.hadoop.io.Writable; + +/** + * A class that represents the communication between the tasktracker and child + * tasks w.r.t the map task completion events. It also indicates whether the + * child task should reset its events index. + */ +class MapTaskCompletionEventsUpdate implements Writable { + TaskCompletionEvent[] events; + boolean reset; + + public MapTaskCompletionEventsUpdate() { } + + public MapTaskCompletionEventsUpdate(TaskCompletionEvent[] events, + boolean reset) { + this.events = events; + this.reset = reset; + } + + public boolean shouldReset() { + return reset; + } + + public TaskCompletionEvent[] getMapTaskCompletionEvents() { + return events; + } + + public void write(DataOutput out) throws IOException { + out.writeBoolean(reset); + out.writeInt(events.length); + for (TaskCompletionEvent event : events) { + event.write(out); + } + } + + public void readFields(DataInput in) throws IOException { + reset = in.readBoolean(); + events = new TaskCompletionEvent[in.readInt()]; + for (int i = 0; i < events.length; ++i) { + events[i] = new TaskCompletionEvent(); + events[i].readFields(in); + } + } +} diff --git a/src/mapred/org/apache/hadoop/mapred/MapTaskRunner.java b/src/mapred/org/apache/hadoop/mapred/MapTaskRunner.java new file mode 100644 index 0000000..d027712 --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/MapTaskRunner.java @@ -0,0 +1,65 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.mapred; + +import java.io.*; + +import org.apache.hadoop.mapred.TaskTracker.TaskInProgress; + +/** Runs a map task. */ +class MapTaskRunner extends TaskRunner { + + public MapTaskRunner(TaskInProgress task, TaskTracker tracker, JobConf conf) { + super(task, tracker, conf); + } + + /** Delete any temporary files from previous failed attempts. */ + public boolean prepare() throws IOException { + if (!super.prepare()) { + return false; + } + + mapOutputFile.removeAll(getTask().getTaskID()); + return true; + } + + /** Delete all of the temporary map output files. */ + public void close() throws IOException { + LOG.info(getTask()+" done; removing files."); + mapOutputFile.removeAll(getTask().getTaskID()); + } + + @Override + public String getChildJavaOpts(JobConf jobConf, String defaultValue) { + return jobConf.get(JobConf.MAPRED_MAP_TASK_JAVA_OPTS, + super.getChildJavaOpts(jobConf, + JobConf.DEFAULT_MAPRED_TASK_JAVA_OPTS)); + } + + @Override + public int getChildUlimit(JobConf jobConf) { + return jobConf.getInt(JobConf.MAPRED_MAP_TASK_ULIMIT, + super.getChildUlimit(jobConf)); + } + + @Override + public String getChildEnv(JobConf jobConf) { + return jobConf.get(JobConf.MAPRED_MAP_TASK_ENV, super.getChildEnv(jobConf)); + } + +} diff --git a/src/mapred/org/apache/hadoop/mapred/MapTaskStatus.java b/src/mapred/org/apache/hadoop/mapred/MapTaskStatus.java new file mode 100644 index 0000000..8d5cf00 --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/MapTaskStatus.java @@ -0,0 +1,57 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapred; + + +class MapTaskStatus extends TaskStatus { + + public MapTaskStatus() {} + + public MapTaskStatus(TaskAttemptID taskid, float progress, int numSlots, + State runState, String diagnosticInfo, String stateString, + String taskTracker, Phase phase, Counters counters) { + super(taskid, progress, numSlots, runState, diagnosticInfo, stateString, + taskTracker, phase, counters); + } + + @Override + public boolean getIsMap() { + return true; + } + + @Override + public long getShuffleFinishTime() { + throw new UnsupportedOperationException("getShuffleFinishTime() not supported for MapTask"); + } + + @Override + void setShuffleFinishTime(long shuffleFinishTime) { + throw new UnsupportedOperationException("setShuffleFinishTime() not supported for MapTask"); + } + + @Override + public long getSortFinishTime() { + throw new UnsupportedOperationException("getSortFinishTime() not supported for MapTask"); + } + + @Override + void setSortFinishTime(long sortFinishTime) { + throw new UnsupportedOperationException("setSortFinishTime() not supported for MapTask"); + } +} diff --git a/src/mapred/org/apache/hadoop/mapred/Mapper.java b/src/mapred/org/apache/hadoop/mapred/Mapper.java new file mode 100644 index 0000000..7fa44a9 --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/Mapper.java @@ -0,0 +1,159 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapred; + +import java.io.IOException; + +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.io.Closeable; +import org.apache.hadoop.io.SequenceFile; +import org.apache.hadoop.io.compress.CompressionCodec; + +/** + * Maps input key/value pairs to a set of intermediate key/value pairs. + * + *

Maps are the individual tasks which transform input records into a + * intermediate records. The transformed intermediate records need not be of + * the same type as the input records. A given input pair may map to zero or + * many output pairs.

+ * + *

The Hadoop Map-Reduce framework spawns one map task for each + * {@link InputSplit} generated by the {@link InputFormat} for the job. + * Mapper implementations can access the {@link JobConf} for the + * job via the {@link JobConfigurable#configure(JobConf)} and initialize + * themselves. Similarly they can use the {@link Closeable#close()} method for + * de-initialization.

+ * + *

The framework then calls + * {@link #map(Object, Object, OutputCollector, Reporter)} + * for each key/value pair in the InputSplit for that task.

+ * + *

All intermediate values associated with a given output key are + * subsequently grouped by the framework, and passed to a {@link Reducer} to + * determine the final output. Users can control the grouping by specifying + * a Comparator via + * {@link JobConf#setOutputKeyComparatorClass(Class)}.

+ * + *

The grouped Mapper outputs are partitioned per + * Reducer. Users can control which keys (and hence records) go to + * which Reducer by implementing a custom {@link Partitioner}. + * + *

Users can optionally specify a combiner, via + * {@link JobConf#setCombinerClass(Class)}, to perform local aggregation of the + * intermediate outputs, which helps to cut down the amount of data transferred + * from the Mapper to the Reducer. + * + *

The intermediate, grouped outputs are always stored in + * {@link SequenceFile}s. Applications can specify if and how the intermediate + * outputs are to be compressed and which {@link CompressionCodec}s are to be + * used via the JobConf.

+ * + *

If the job has + * zero + * reduces then the output of the Mapper is directly written + * to the {@link FileSystem} without grouping by keys.

+ * + *

Example:

+ *

+ *     public class MyMapper<K extends WritableComparable, V extends Writable> 
+ *     extends MapReduceBase implements Mapper<K, V, K, V> {
+ *     
+ *       static enum MyCounters { NUM_RECORDS }
+ *       
+ *       private String mapTaskId;
+ *       private String inputFile;
+ *       private int noRecords = 0;
+ *       
+ *       public void configure(JobConf job) {
+ *         mapTaskId = job.get("mapred.task.id");
+ *         inputFile = job.get("map.input.file");
+ *       }
+ *       
+ *       public void map(K key, V val,
+ *                       OutputCollector<K, V> output, Reporter reporter)
+ *       throws IOException {
+ *         // Process the <key, value> pair (assume this takes a while)
+ *         // ...
+ *         // ...
+ *         
+ *         // Let the framework know that we are alive, and kicking!
+ *         // reporter.progress();
+ *         
+ *         // Process some more
+ *         // ...
+ *         // ...
+ *         
+ *         // Increment the no. of <key, value> pairs processed
+ *         ++noRecords;
+ *
+ *         // Increment counters
+ *         reporter.incrCounter(NUM_RECORDS, 1);
+ *        
+ *         // Every 100 records update application-level status
+ *         if ((noRecords%100) == 0) {
+ *           reporter.setStatus(mapTaskId + " processed " + noRecords + 
+ *                              " from input-file: " + inputFile); 
+ *         }
+ *         
+ *         // Output the result
+ *         output.collect(key, val);
+ *       }
+ *     }
+ * 

+ * + *

Applications may write a custom {@link MapRunnable} to exert greater + * control on map processing e.g. multi-threaded Mappers etc.

+ * + * @see JobConf + * @see InputFormat + * @see Partitioner + * @see Reducer + * @see MapReduceBase + * @see MapRunnable + * @see SequenceFile + * @deprecated Use {@link org.apache.hadoop.mapreduce.Mapper} instead. + */ +@Deprecated +public interface Mapper extends JobConfigurable, Closeable { + + /** + * Maps a single input key/value pair into an intermediate key/value pair. + * + *

Output pairs need not be of the same types as input pairs. A given + * input pair may map to zero or many output pairs. Output pairs are + * collected with calls to + * {@link OutputCollector#collect(Object,Object)}.

+ * + *

Applications can use the {@link Reporter} provided to report progress + * or just indicate that they are alive. In scenarios where the application + * takes an insignificant amount of time to process individual key/value + * pairs, this is crucial since the framework might assume that the task has + * timed-out and kill that task. The other way of avoiding this is to set + * + * mapred.task.timeout to a high-enough value (or even zero for no + * time-outs).

+ * + * @param key the input key. + * @param value the input value. + * @param output collects mapped keys and values. + * @param reporter facility to report progress. + */ + void map(K1 key, V1 value, OutputCollector output, Reporter reporter) + throws IOException; +} diff --git a/src/mapred/org/apache/hadoop/mapred/MergeSorter.java b/src/mapred/org/apache/hadoop/mapred/MergeSorter.java new file mode 100644 index 0000000..5e3fbe7 --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/MergeSorter.java @@ -0,0 +1,80 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.mapred; + +import java.util.Comparator; +import org.apache.hadoop.io.IntWritable; +import org.apache.hadoop.util.MergeSort; +import org.apache.hadoop.io.SequenceFile.Sorter.RawKeyValueIterator; + +/** This class implements the sort method from BasicTypeSorterBase class as + * MergeSort. Note that this class is really a wrapper over the actual + * mergesort implementation that is there in the util package. The main intent + * of providing this class is to setup the input data for the util.MergeSort + * algo so that the latter doesn't need to bother about the various data + * structures that have been created for the Map output but rather concentrate + * on the core algorithm (thereby allowing easy integration of a mergesort + * implementation). The bridge between this class and the util.MergeSort class + * is the Comparator. + */ +class MergeSorter extends BasicTypeSorterBase +implements Comparator { + private static int progressUpdateFrequency = 10000; + private int progressCalls = 0; + + /** The sort method derived from BasicTypeSorterBase and overridden here*/ + public RawKeyValueIterator sort() { + MergeSort m = new MergeSort(this); + int count = super.count; + if (count == 0) return null; + int [] pointers = super.pointers; + int [] pointersCopy = new int[count]; + System.arraycopy(pointers, 0, pointersCopy, 0, count); + m.mergeSort(pointers, pointersCopy, 0, count); + return new MRSortResultIterator(super.keyValBuffer, pointersCopy, + super.startOffsets, super.keyLengths, super.valueLengths); + } + /** The implementation of the compare method from Comparator. Note that + * Comparator.compare takes objects as inputs and so the int values are + * wrapped in (reusable) IntWritables from the class util.MergeSort + * @param i + * @param j + * @return int as per the specification of Comparator.compare + */ + public int compare (IntWritable i, IntWritable j) { + // indicate we're making progress but do a batch update + if (progressCalls < progressUpdateFrequency) { + progressCalls++; + } else { + progressCalls = 0; + reporter.progress(); + } + return comparator.compare(keyValBuffer.getData(), startOffsets[i.get()], + keyLengths[i.get()], + keyValBuffer.getData(), startOffsets[j.get()], + keyLengths[j.get()]); + } + + /** Add the extra memory that will be utilized by the sort method */ + public long getMemoryUtilized() { + //this is memory that will be actually utilized (considering the temp + //array that will be allocated by the sort() method (mergesort)) + return super.getMemoryUtilized() + super.count * 4; + } + +} diff --git a/src/mapred/org/apache/hadoop/mapred/Merger.java b/src/mapred/org/apache/hadoop/mapred/Merger.java new file mode 100644 index 0000000..8774cc1 --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/Merger.java @@ -0,0 +1,559 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.mapred; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.Comparator; +import java.util.List; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FSDataInputStream; +import org.apache.hadoop.fs.ChecksumFileSystem; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.LocalDirAllocator; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.io.DataInputBuffer; +import org.apache.hadoop.io.RawComparator; +import org.apache.hadoop.io.compress.CompressionCodec; +import org.apache.hadoop.mapred.IFile.Reader; +import org.apache.hadoop.mapred.IFile.Writer; +import org.apache.hadoop.util.PriorityQueue; +import org.apache.hadoop.util.Progress; +import org.apache.hadoop.util.Progressable; + +class Merger { + private static final Log LOG = LogFactory.getLog(Merger.class); + + // Local directories + private static LocalDirAllocator lDirAlloc = + new LocalDirAllocator("mapred.local.dir"); + + public static + RawKeyValueIterator merge(Configuration conf, FileSystem fs, + Class keyClass, Class valueClass, + CompressionCodec codec, + Path[] inputs, boolean deleteInputs, + int mergeFactor, Path tmpDir, + RawComparator comparator, Progressable reporter, + Counters.Counter readsCounter, + Counters.Counter writesCounter) + throws IOException { + return + new MergeQueue(conf, fs, inputs, deleteInputs, codec, comparator, + reporter).merge(keyClass, valueClass, + mergeFactor, tmpDir, + readsCounter, writesCounter); + } + + public static + RawKeyValueIterator merge(Configuration conf, FileSystem fs, + Class keyClass, Class valueClass, + CompressionCodec codec, + List> segments, + int mergeFactor, Path tmpDir, + RawComparator comparator, Progressable reporter, + Counters.Counter readsCounter, + Counters.Counter writesCounter) + throws IOException { + return new MergeQueue(conf, fs, segments, comparator, reporter, + false, codec).merge(keyClass, valueClass, + mergeFactor, tmpDir, + readsCounter, writesCounter); + + } + + public static + RawKeyValueIterator merge(Configuration conf, FileSystem fs, + Class keyClass, Class valueClass, + List> segments, + int mergeFactor, Path tmpDir, + RawComparator comparator, Progressable reporter, + Counters.Counter readsCounter, + Counters.Counter writesCounter) + throws IOException { + return merge(conf, fs, keyClass, valueClass, segments, mergeFactor, tmpDir, + comparator, reporter, false, readsCounter, writesCounter); + } + + public static + RawKeyValueIterator merge(Configuration conf, FileSystem fs, + Class keyClass, Class valueClass, + List> segments, + int mergeFactor, Path tmpDir, + RawComparator comparator, Progressable reporter, + boolean sortSegments, + Counters.Counter readsCounter, + Counters.Counter writesCounter) + throws IOException { + return new MergeQueue(conf, fs, segments, comparator, reporter, + sortSegments).merge(keyClass, valueClass, + mergeFactor, tmpDir, + readsCounter, writesCounter); + } + + static + RawKeyValueIterator merge(Configuration conf, FileSystem fs, + Class keyClass, Class valueClass, + List> segments, + int mergeFactor, int inMemSegments, Path tmpDir, + RawComparator comparator, Progressable reporter, + boolean sortSegments, + Counters.Counter readsCounter, + Counters.Counter writesCounter) + throws IOException { + return new MergeQueue(conf, fs, segments, comparator, reporter, + sortSegments).merge(keyClass, valueClass, + mergeFactor, inMemSegments, + tmpDir, + readsCounter, writesCounter); + } + + + static + RawKeyValueIterator merge(Configuration conf, FileSystem fs, + Class keyClass, Class valueClass, + CompressionCodec codec, + List> segments, + int mergeFactor, int inMemSegments, Path tmpDir, + RawComparator comparator, Progressable reporter, + boolean sortSegments, + Counters.Counter readsCounter, + Counters.Counter writesCounter) + throws IOException { + return new MergeQueue(conf, fs, segments, comparator, reporter, + sortSegments, codec).merge(keyClass, valueClass, + mergeFactor, inMemSegments, + tmpDir, + readsCounter, writesCounter); +} + + public static + void writeFile(RawKeyValueIterator records, Writer writer, + Progressable progressable, Configuration conf) + throws IOException { + long progressBar = conf.getLong("mapred.merge.recordsBeforeProgress", + 10000); + long recordCtr = 0; + while(records.next()) { + writer.append(records.getKey(), records.getValue()); + + if (((recordCtr++) % progressBar) == 0) { + progressable.progress(); + } + } +} + + public static class Segment { + Reader reader = null; + DataInputBuffer key = new DataInputBuffer(); + DataInputBuffer value = new DataInputBuffer(); + + Configuration conf = null; + FileSystem fs = null; + Path file = null; + boolean preserve = false; + CompressionCodec codec = null; + long segmentOffset = 0; + long segmentLength = -1; + + public Segment(Configuration conf, FileSystem fs, Path file, + CompressionCodec codec, boolean preserve) throws IOException { + this(conf, fs, file, 0, fs.getFileStatus(file).getLen(), codec, preserve); + } + + public Segment(Configuration conf, FileSystem fs, Path file, + long segmentOffset, long segmentLength, CompressionCodec codec, + boolean preserve) throws IOException { + this.conf = conf; + this.fs = fs; + this.file = file; + this.codec = codec; + this.preserve = preserve; + + this.segmentOffset = segmentOffset; + this.segmentLength = segmentLength; + } + + public Segment(Reader reader, boolean preserve) { + this.reader = reader; + this.preserve = preserve; + + this.segmentLength = reader.getLength(); + } + + private void init(Counters.Counter readsCounter) throws IOException { + if (reader == null) { + FSDataInputStream in = fs.open(file); + in.seek(segmentOffset); + reader = new Reader(conf, in, segmentLength, codec, readsCounter); + } + } + + DataInputBuffer getKey() { return key; } + DataInputBuffer getValue() { return value; } + + long getLength() { + return (reader == null) ? + segmentLength : reader.getLength(); + } + + boolean next() throws IOException { + return reader.next(key, value); + } + + void close() throws IOException { + reader.close(); + + if (!preserve && fs != null) { + fs.delete(file, false); + } + } + + public long getPosition() throws IOException { + return reader.getPosition(); + } + } + + private static class MergeQueue + extends PriorityQueue> implements RawKeyValueIterator { + Configuration conf; + FileSystem fs; + CompressionCodec codec; + + List> segments = new ArrayList>(); + + RawComparator comparator; + + private long totalBytesProcessed; + private float progPerByte; + private Progress mergeProgress = new Progress(); + + Progressable reporter; + + DataInputBuffer key; + DataInputBuffer value; + + Segment minSegment; + Comparator> segmentComparator = + new Comparator>() { + public int compare(Segment o1, Segment o2) { + if (o1.getLength() == o2.getLength()) { + return 0; + } + + return o1.getLength() < o2.getLength() ? -1 : 1; + } + }; + + + public MergeQueue(Configuration conf, FileSystem fs, + Path[] inputs, boolean deleteInputs, + CompressionCodec codec, RawComparator comparator, + Progressable reporter) + throws IOException { + this.conf = conf; + this.fs = fs; + this.codec = codec; + this.comparator = comparator; + this.reporter = reporter; + + for (Path file : inputs) { + segments.add(new Segment(conf, fs, file, codec, !deleteInputs)); + } + + // Sort segments on file-lengths + Collections.sort(segments, segmentComparator); + } + + public MergeQueue(Configuration conf, FileSystem fs, + List> segments, RawComparator comparator, + Progressable reporter) { + this(conf, fs, segments, comparator, reporter, false); + } + + public MergeQueue(Configuration conf, FileSystem fs, + List> segments, RawComparator comparator, + Progressable reporter, boolean sortSegments) { + this.conf = conf; + this.fs = fs; + this.comparator = comparator; + this.segments = segments; + this.reporter = reporter; + if (sortSegments) { + Collections.sort(segments, segmentComparator); + } + } + + public MergeQueue(Configuration conf, FileSystem fs, + List> segments, RawComparator comparator, + Progressable reporter, boolean sortSegments, CompressionCodec codec) { + this(conf, fs, segments, comparator, reporter, sortSegments); + this.codec = codec; + } + + public void close() throws IOException { + Segment segment; + while((segment = pop()) != null) { + segment.close(); + } + } + + public DataInputBuffer getKey() throws IOException { + return key; + } + + public DataInputBuffer getValue() throws IOException { + return value; + } + + private void adjustPriorityQueue(Segment reader) throws IOException{ + long startPos = reader.getPosition(); + boolean hasNext = reader.next(); + long endPos = reader.getPosition(); + totalBytesProcessed += endPos - startPos; + mergeProgress.set(totalBytesProcessed * progPerByte); + if (hasNext) { + adjustTop(); + } else { + pop(); + reader.close(); + } + } + + public boolean next() throws IOException { + if (size() == 0) + return false; + + if (minSegment != null) { + //minSegment is non-null for all invocations of next except the first + //one. For the first invocation, the priority queue is ready for use + //but for the subsequent invocations, first adjust the queue + adjustPriorityQueue(minSegment); + if (size() == 0) { + minSegment = null; + return false; + } + } + minSegment = top(); + + key = minSegment.getKey(); + value = minSegment.getValue(); + + return true; + } + + @SuppressWarnings("unchecked") + protected boolean lessThan(Object a, Object b) { + DataInputBuffer key1 = ((Segment)a).getKey(); + DataInputBuffer key2 = ((Segment)b).getKey(); + int s1 = key1.getPosition(); + int l1 = key1.getLength() - s1; + int s2 = key2.getPosition(); + int l2 = key2.getLength() - s2; + + return comparator.compare(key1.getData(), s1, l1, key2.getData(), s2, l2) < 0; + } + + public RawKeyValueIterator merge(Class keyClass, Class valueClass, + int factor, Path tmpDir, + Counters.Counter readsCounter, + Counters.Counter writesCounter) + throws IOException { + return merge(keyClass, valueClass, factor, 0, tmpDir, + readsCounter, writesCounter); + } + + RawKeyValueIterator merge(Class keyClass, Class valueClass, + int factor, int inMem, Path tmpDir, + Counters.Counter readsCounter, + Counters.Counter writesCounter) + throws IOException { + LOG.info("Merging " + segments.size() + " sorted segments"); + + //create the MergeStreams from the sorted map created in the constructor + //and dump the final output to a file + int numSegments = segments.size(); + int origFactor = factor; + int passNo = 1; + do { + //get the factor for this pass of merge. We assume in-memory segments + //are the first entries in the segment list and that the pass factor + //doesn't apply to them + factor = getPassFactor(factor, passNo, numSegments - inMem); + if (1 == passNo) { + factor += inMem; + } + List> segmentsToMerge = + new ArrayList>(); + int segmentsConsidered = 0; + int numSegmentsToConsider = factor; + long startBytes = 0; // starting bytes of segments of this merge + while (true) { + //extract the smallest 'factor' number of segments + //Call cleanup on the empty segments (no key/value data) + List> mStream = + getSegmentDescriptors(numSegmentsToConsider); + for (Segment segment : mStream) { + // Initialize the segment at the last possible moment; + // this helps in ensuring we don't use buffers until we need them + segment.init(readsCounter); + long startPos = segment.getPosition(); + boolean hasNext = segment.next(); + long endPos = segment.getPosition(); + startBytes += endPos - startPos; + + if (hasNext) { + segmentsToMerge.add(segment); + segmentsConsidered++; + } + else { + segment.close(); + numSegments--; //we ignore this segment for the merge + } + } + //if we have the desired number of segments + //or looked at all available segments, we break + if (segmentsConsidered == factor || + segments.size() == 0) { + break; + } + + numSegmentsToConsider = factor - segmentsConsidered; + } + + //feed the streams to the priority queue + initialize(segmentsToMerge.size()); + clear(); + for (Segment segment : segmentsToMerge) { + put(segment); + } + + //if we have lesser number of segments remaining, then just return the + //iterator, else do another single level merge + if (numSegments <= factor) { + // Reset totalBytesProcessed to track the progress of the final merge. + // This is considered the progress of the reducePhase, the 3rd phase + // of reduce task. Currently totalBytesProcessed is not used in sort + // phase of reduce task(i.e. when intermediate merges happen). + totalBytesProcessed = startBytes; + + //calculate the length of the remaining segments. Required for + //calculating the merge progress + long totalBytes = 0; + for (int i = 0; i < segmentsToMerge.size(); i++) { + totalBytes += segmentsToMerge.get(i).getLength(); + } + if (totalBytes != 0) //being paranoid + progPerByte = 1.0f / (float)totalBytes; + + if (totalBytes != 0) + mergeProgress.set(totalBytesProcessed * progPerByte); + else + mergeProgress.set(1.0f); // Last pass and no segments left - we're done + + LOG.info("Down to the last merge-pass, with " + numSegments + + " segments left of total size: " + totalBytes + " bytes"); + return this; + } else { + LOG.info("Merging " + segmentsToMerge.size() + + " intermediate segments out of a total of " + + (segments.size()+segmentsToMerge.size())); + + //we want to spread the creation of temp files on multiple disks if + //available under the space constraints + long approxOutputSize = 0; + for (Segment s : segmentsToMerge) { + approxOutputSize += s.getLength() + + ChecksumFileSystem.getApproxChkSumLength( + s.getLength()); + } + Path tmpFilename = + new Path(tmpDir, "intermediate").suffix("." + passNo); + + Path outputFile = lDirAlloc.getLocalPathForWrite( + tmpFilename.toString(), + approxOutputSize, conf); + + Writer writer = + new Writer(conf, fs, outputFile, keyClass, valueClass, codec, + writesCounter); + writeFile(this, writer, reporter, conf); + writer.close(); + + //we finished one single level merge; now clean up the priority + //queue + this.close(); + + // Add the newly create segment to the list of segments to be merged + Segment tempSegment = + new Segment(conf, fs, outputFile, codec, false); + segments.add(tempSegment); + numSegments = segments.size(); + Collections.sort(segments, segmentComparator); + + passNo++; + } + //we are worried about only the first pass merge factor. So reset the + //factor to what it originally was + factor = origFactor; + } while(true); + } + + /** + * Determine the number of segments to merge in a given pass. Assuming more + * than factor segments, the first pass should attempt to bring the total + * number of segments - 1 to be divisible by the factor - 1 (each pass + * takes X segments and produces 1) to minimize the number of merges. + */ + private int getPassFactor(int factor, int passNo, int numSegments) { + if (passNo > 1 || numSegments <= factor || factor == 1) + return factor; + int mod = (numSegments - 1) % (factor - 1); + if (mod == 0) + return factor; + return mod + 1; + } + + /** Return (& remove) the requested number of segment descriptors from the + * sorted map. + */ + private List> getSegmentDescriptors(int numDescriptors) { + if (numDescriptors > segments.size()) { + List> subList = new ArrayList>(segments); + segments.clear(); + return subList; + } + + List> subList = + new ArrayList>(segments.subList(0, numDescriptors)); + for (int i=0; i < numDescriptors; ++i) { + segments.remove(0); + } + return subList; + } + + public Progress getProgress() { + return mergeProgress; + } + + } +} diff --git a/src/mapred/org/apache/hadoop/mapred/MultiFileInputFormat.java b/src/mapred/org/apache/hadoop/mapred/MultiFileInputFormat.java new file mode 100644 index 0000000..aad4138 --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/MultiFileInputFormat.java @@ -0,0 +1,105 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapred; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.FileUtil; +import org.apache.hadoop.fs.Path; + +/** + * An abstract {@link InputFormat} that returns {@link MultiFileSplit}'s + * in {@link #getSplits(JobConf, int)} method. Splits are constructed from + * the files under the input paths. Each split returned contains nearly + * equal content length.
+ * Subclasses implement {@link #getRecordReader(InputSplit, JobConf, Reporter)} + * to construct RecordReader's for MultiFileSplit's. + * @see MultiFileSplit + * @deprecated Use {@link org.apache.hadoop.mapred.lib.CombineFileInputFormat} instead + */ +@Deprecated +public abstract class MultiFileInputFormat + extends FileInputFormat { + + @Override + public InputSplit[] getSplits(JobConf job, int numSplits) + throws IOException { + + Path[] paths = FileUtil.stat2Paths(FileInputFormat.listStatus(job)); + List splits = new ArrayList(Math.min(numSplits, paths.length)); + if (paths.length != 0) { + // HADOOP-1818: Manage splits only if there are paths + long[] lengths = new long[paths.length]; + long totLength = 0; + for(int i=0; i= goalLength) { + return i - startIndex + 1; + } + } + return lengths.length - startIndex; + } + + @Override + public abstract RecordReader getRecordReader(InputSplit split, + JobConf job, Reporter reporter) + throws IOException; +} diff --git a/src/mapred/org/apache/hadoop/mapred/MultiFileSplit.java b/src/mapred/org/apache/hadoop/mapred/MultiFileSplit.java new file mode 100644 index 0000000..d6d5b4e --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/MultiFileSplit.java @@ -0,0 +1,86 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapred; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; +import java.util.HashSet; +import java.util.Set; + +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.BlockLocation; +import org.apache.hadoop.io.Text; +import org.apache.hadoop.io.Text; +import org.apache.hadoop.mapred.lib.CombineFileSplit; + +/** + * A sub-collection of input files. Unlike {@link FileSplit}, MultiFileSplit + * class does not represent a split of a file, but a split of input files + * into smaller sets. The atomic unit of split is a file.
+ * MultiFileSplit can be used to implement {@link RecordReader}'s, with + * reading one record per file. + * @see FileSplit + * @see MultiFileInputFormat + * @deprecated Use {@link org.apache.hadoop.mapred.lib.CombineFileSplit} instead + */ +@Deprecated +public class MultiFileSplit extends CombineFileSplit { + + MultiFileSplit() {} + + public MultiFileSplit(JobConf job, Path[] files, long[] lengths) { + super(job, files, lengths); + } + + public String[] getLocations() throws IOException { + HashSet hostSet = new HashSet(); + for (Path file : getPaths()) { + FileSystem fs = file.getFileSystem(getJob()); + FileStatus status = fs.getFileStatus(file); + BlockLocation[] blkLocations = fs.getFileBlockLocations(status, + 0, status.getLen()); + if (blkLocations != null && blkLocations.length > 0) { + addToSet(hostSet, blkLocations[0].getHosts()); + } + } + return hostSet.toArray(new String[hostSet.size()]); + } + + private void addToSet(Set set, String[] array) { + for(String s:array) + set.add(s); + } + + @Override + public String toString() { + StringBuffer sb = new StringBuffer(); + for(int i=0; i < getPaths().length; i++) { + sb.append(getPath(i).toUri().getPath() + ":0+" + getLength(i)); + if (i < getPaths().length -1) { + sb.append("\n"); + } + } + + return sb.toString(); + } +} + diff --git a/src/mapred/org/apache/hadoop/mapred/NodeHealthCheckerService.java b/src/mapred/org/apache/hadoop/mapred/NodeHealthCheckerService.java new file mode 100644 index 0000000..25f0679 --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/NodeHealthCheckerService.java @@ -0,0 +1,367 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapred; + +import java.io.File; +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Timer; +import java.util.TimerTask; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.mapred.TaskTrackerStatus.TaskTrackerHealthStatus; +import org.apache.hadoop.util.StringUtils; +import org.apache.hadoop.util.Shell.ExitCodeException; +import org.apache.hadoop.util.Shell.ShellCommandExecutor; + +/** + * + * The class which provides functionality of checking the health of the node and + * reporting back to the service for which the health checker has been asked to + * report. + */ +class NodeHealthCheckerService { + + private static Log LOG = LogFactory.getLog(NodeHealthCheckerService.class); + + /** Absolute path to the health script. */ + private String nodeHealthScript; + /** Delay after which node health script to be executed */ + private long intervalTime; + /** Time after which the script should be timedout */ + private long scriptTimeout; + /** Timer used to schedule node health monitoring script execution */ + private Timer nodeHealthScriptScheduler; + + /** ShellCommandExecutor used to execute monitoring script */ + ShellCommandExecutor shexec = null; + + /** Configuration used by the checker */ + private Configuration conf; + + /** Pattern used for searching in the output of the node health script */ + static private final String ERROR_PATTERN = "ERROR"; + + /* Configuration keys */ + static final String HEALTH_CHECK_SCRIPT_PROPERTY = "mapred.healthChecker.script.path"; + + static final String HEALTH_CHECK_INTERVAL_PROPERTY = "mapred.healthChecker.interval"; + + static final String HEALTH_CHECK_FAILURE_INTERVAL_PROPERTY = "mapred.healthChecker.script.timeout"; + + static final String HEALTH_CHECK_SCRIPT_ARGUMENTS_PROPERTY = "mapred.healthChecker.script.args"; + /* end of configuration keys */ + /** Time out error message */ + static final String NODE_HEALTH_SCRIPT_TIMED_OUT_MSG = "Node health script timed out"; + + /** Default frequency of running node health script */ + private static final long DEFAULT_HEALTH_CHECK_INTERVAL = 10 * 60 * 1000; + /** Default script time out period */ + private static final long DEFAULT_HEALTH_SCRIPT_FAILURE_INTERVAL = 2 * DEFAULT_HEALTH_CHECK_INTERVAL; + + private boolean isHealthy; + + private String healthReport; + + private long lastReportedTime; + + private TimerTask timer; + + + private enum HealthCheckerExitStatus { + SUCCESS, + TIMED_OUT, + FAILED_WITH_EXIT_CODE, + FAILED_WITH_EXCEPTION, + FAILED + } + + + /** + * Class which is used by the {@link Timer} class to periodically execute the + * node health script. + * + */ + private class NodeHealthMonitorExecutor extends TimerTask { + + String exceptionStackTrace = ""; + + public NodeHealthMonitorExecutor(String[] args) { + ArrayList execScript = new ArrayList(); + execScript.add(nodeHealthScript); + if (args != null) { + execScript.addAll(Arrays.asList(args)); + } + shexec = new ShellCommandExecutor((String[]) execScript + .toArray(new String[execScript.size()]), null, null, scriptTimeout); + } + + @Override + public void run() { + HealthCheckerExitStatus status = HealthCheckerExitStatus.SUCCESS; + try { + shexec.execute(); + } catch (ExitCodeException e) { + // ignore the exit code of the script + status = HealthCheckerExitStatus.FAILED_WITH_EXIT_CODE; + } catch (Exception e) { + LOG.warn("Caught exception : " + e.getMessage()); + if (!shexec.isTimedOut()) { + status = HealthCheckerExitStatus.FAILED_WITH_EXCEPTION; + } else { + status = HealthCheckerExitStatus.TIMED_OUT; + } + exceptionStackTrace = StringUtils.stringifyException(e); + } finally { + if (status == HealthCheckerExitStatus.SUCCESS) { + if (hasErrors(shexec.getOutput())) { + status = HealthCheckerExitStatus.FAILED; + } + } + reportHealthStatus(status); + } + } + + /** + * Method which is used to parse output from the node health monitor and + * send to the report address. + * + * The timed out script or script which causes IOException output is + * ignored. + * + * The node is marked unhealthy if + *
    + *
  1. The node health script times out
  2. + *
  3. The node health scripts output has a line which begins with ERROR
  4. + *
  5. An exception is thrown while executing the script
  6. + *
+ * If the script throws {@link IOException} or {@link ExitCodeException} the + * output is ignored and node is left remaining healthy, as script might + * have syntax error. + * + * @param status + */ + void reportHealthStatus(HealthCheckerExitStatus status) { + long now = System.currentTimeMillis(); + switch (status) { + case SUCCESS: + setHealthStatus(true, "", now); + break; + case TIMED_OUT: + setHealthStatus(false, NODE_HEALTH_SCRIPT_TIMED_OUT_MSG); + break; + case FAILED_WITH_EXCEPTION: + setHealthStatus(false, exceptionStackTrace); + break; + case FAILED_WITH_EXIT_CODE: + setHealthStatus(true, "", now); + break; + case FAILED: + setHealthStatus(false, shexec.getOutput()); + break; + } + } + + /** + * Method to check if the output string has line which begins with ERROR. + * + * @param output + * string + * @return true if output string has error pattern in it. + */ + private boolean hasErrors(String output) { + String[] splits = output.split("\n"); + for (String split : splits) { + if (split.startsWith(ERROR_PATTERN)) { + return true; + } + } + return false; + } + } + + public NodeHealthCheckerService(Configuration conf) { + this.conf = conf; + this.lastReportedTime = System.currentTimeMillis(); + this.isHealthy = true; + this.healthReport = ""; + initialize(conf); + } + + /* + * Method which initializes the values for the script path and interval time. + */ + private void initialize(Configuration conf) { + this.nodeHealthScript = conf.get(HEALTH_CHECK_SCRIPT_PROPERTY); + this.intervalTime = conf.getLong(HEALTH_CHECK_INTERVAL_PROPERTY, + DEFAULT_HEALTH_CHECK_INTERVAL); + this.scriptTimeout = conf.getLong(HEALTH_CHECK_FAILURE_INTERVAL_PROPERTY, + DEFAULT_HEALTH_SCRIPT_FAILURE_INTERVAL); + String[] args = conf.getStrings(HEALTH_CHECK_SCRIPT_ARGUMENTS_PROPERTY, + new String[] {}); + timer = new NodeHealthMonitorExecutor(args); + } + + /** + * Method used to start the Node health monitoring. + * + */ + void start() { + // if health script path is not configured don't start the thread. + if (!shouldRun(conf)) { + LOG.info("Not starting node health monitor"); + return; + } + nodeHealthScriptScheduler = new Timer("NodeHealthMonitor-Timer", true); + // Start the timer task immediately and + // then periodically at interval time. + nodeHealthScriptScheduler.scheduleAtFixedRate(timer, 0, intervalTime); + } + + /** + * Method used to terminate the node health monitoring service. + * + */ + void stop() { + if (!shouldRun(conf)) { + return; + } + nodeHealthScriptScheduler.cancel(); + if (shexec != null) { + Process p = shexec.getProcess(); + if (p != null) { + p.destroy(); + } + } + } + + /** + * Gets the if the node is healthy or not + * + * @return true if node is healthy + */ + private boolean isHealthy() { + return isHealthy; + } + + /** + * Sets if the node is healhty or not. + * + * @param isHealthy + * if or not node is healthy + */ + private synchronized void setHealthy(boolean isHealthy) { + this.isHealthy = isHealthy; + } + + /** + * Returns output from health script. if node is healthy then an empty string + * is returned. + * + * @return output from health script + */ + private String getHealthReport() { + return healthReport; + } + + /** + * Sets the health report from the node health script. + * + * @param healthReport + */ + private synchronized void setHealthReport(String healthReport) { + this.healthReport = healthReport; + } + + /** + * Returns time stamp when node health script was last run. + * + * @return timestamp when node health script was last run + */ + private long getLastReportedTime() { + return lastReportedTime; + } + + /** + * Sets the last run time of the node health script. + * + * @param lastReportedTime + */ + private synchronized void setLastReportedTime(long lastReportedTime) { + this.lastReportedTime = lastReportedTime; + } + + /** + * Method used to determine if or not node health monitoring service should be + * started or not. Returns true if following conditions are met: + * + *
    + *
  1. Path to Node health check script is not empty
  2. + *
  3. Node health check script file exists
  4. + *
+ * + * @param conf + * @return true if node health monitoring service can be started. + */ + static boolean shouldRun(Configuration conf) { + String nodeHealthScript = conf.get(HEALTH_CHECK_SCRIPT_PROPERTY); + if (nodeHealthScript == null || nodeHealthScript.trim().isEmpty()) { + return false; + } + File f = new File(nodeHealthScript); + return f.exists() && f.canExecute(); + } + + private synchronized void setHealthStatus(boolean isHealthy, String output) { + this.setHealthy(isHealthy); + this.setHealthReport(output); + } + + private synchronized void setHealthStatus(boolean isHealthy, String output, + long time) { + this.setHealthStatus(isHealthy, output); + this.setLastReportedTime(time); + } + + /** + * Method to populate the fields for the {@link TaskTrackerHealthStatus} + * + * @param healthStatus + */ + synchronized void setHealthStatus(TaskTrackerHealthStatus healthStatus) { + healthStatus.setNodeHealthy(this.isHealthy()); + healthStatus.setHealthReport(this.getHealthReport()); + healthStatus.setLastReported(this.getLastReportedTime()); + } + + /** + * Test method to directly access the timer which node + * health checker would use. + * + * + * @return Timer task + */ + //XXX:Not to be used directly. + TimerTask getTimer() { + return timer; + } +} diff --git a/src/mapred/org/apache/hadoop/mapred/OutputCollector.java b/src/mapred/org/apache/hadoop/mapred/OutputCollector.java new file mode 100644 index 0000000..c8d709d --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/OutputCollector.java @@ -0,0 +1,41 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapred; + +import java.io.IOException; + +/** + * Collects the <key, value> pairs output by {@link Mapper}s + * and {@link Reducer}s. + * + *

OutputCollector is the generalization of the facility + * provided by the Map-Reduce framework to collect data output by either the + * Mapper or the Reducer i.e. intermediate outputs + * or the output of the job.

+ */ +public interface OutputCollector { + + /** Adds a key/value pair to the output. + * + * @param key the key to collect. + * @param value to value to collect. + * @throws IOException + */ + void collect(K key, V value) throws IOException; +} diff --git a/src/mapred/org/apache/hadoop/mapred/OutputCommitter.java b/src/mapred/org/apache/hadoop/mapred/OutputCommitter.java new file mode 100644 index 0000000..3c63f9f --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/OutputCommitter.java @@ -0,0 +1,237 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapred; + +import java.io.IOException; + +/** + * OutputCommitter describes the commit of task output for a + * Map-Reduce job. + * + *

The Map-Reduce framework relies on the OutputCommitter of + * the job to:

+ *

    + *
  1. + * Setup the job during initialization. For example, create the temporary + * output directory for the job during the initialization of the job. + *
  2. + *
  3. + * Cleanup the job after the job completion. For example, remove the + * temporary output directory after the job completion. + *
  4. + *
  5. + * Setup the task temporary output. + *
  6. + *
  7. + * Check whether a task needs a commit. This is to avoid the commit + * procedure if a task does not need commit. + *
  8. + *
  9. + * Commit of the task output. + *
  10. + *
  11. + * Discard the task commit. + *
  12. + *
+ * + * @see FileOutputCommitter + * @see JobContext + * @see TaskAttemptContext + * @deprecated Use {@link org.apache.hadoop.mapreduce.OutputCommitter} instead. + */ +@Deprecated +public abstract class OutputCommitter + extends org.apache.hadoop.mapreduce.OutputCommitter { + /** + * For the framework to setup the job output during initialization + * + * @param jobContext Context of the job whose output is being written. + * @throws IOException if temporary output could not be created + */ + public abstract void setupJob(JobContext jobContext) throws IOException; + + /** + * For cleaning up the job's output after job completion + * @deprecated use {@link #commitJob(JobContext)} or + * {@link #abortJob(JobContext, int)} instead + */ + @Deprecated + public void cleanupJob(JobContext jobContext) throws IOException { } + + /** + * For committing job's output after successful job completion. Note that this + * is invoked for jobs with final run state as {@link JobStatus#SUCCEEDED}. + * + * @param jobContext Context of the job whose output is being written. + * @throws IOException + */ + public void commitJob(JobContext jobContext) throws IOException { + cleanupJob(jobContext); + } + + /** + * For cleaning up the job's output after job failure. + * + * @param jobContext Context of the job whose output is being written. + * @param status Final run state of the job, should be + * {@link JobStatus#KILLED} or {@link JobStatus#FAILED} + * @throws IOException + */ + public void abortJob(JobContext jobContext, int status) + throws IOException { + cleanupJob(jobContext); + } + + /** + * Sets up output for the task. + * + * @param taskContext Context of the task whose output is being written. + * @throws IOException + */ + public abstract void setupTask(TaskAttemptContext taskContext) + throws IOException; + + /** + * Check whether task needs a commit + * + * @param taskContext + * @return true/false + * @throws IOException + */ + public abstract boolean needsTaskCommit(TaskAttemptContext taskContext) + throws IOException; + + /** + * To promote the task's temporary output to final output location + * + * The task's output is moved to the job's output directory. + * + * @param taskContext Context of the task whose output is being written. + * @throws IOException if commit is not + */ + public abstract void commitTask(TaskAttemptContext taskContext) + throws IOException; + + /** + * Discard the task output + * + * @param taskContext + * @throws IOException + */ + public abstract void abortTask(TaskAttemptContext taskContext) + throws IOException; + + /** + * This method implements the new interface by calling the old method. Note + * that the input types are different between the new and old apis and this + * is a bridge between the two. + */ + @Override + public final void setupJob(org.apache.hadoop.mapreduce.JobContext jobContext + ) throws IOException { + setupJob((JobContext) jobContext); + } + + /** + * This method implements the new interface by calling the old method. Note + * that the input types are different between the new and old apis and this + * is a bridge between the two. + */ + @Override + @Deprecated + public final void cleanupJob(org.apache.hadoop.mapreduce.JobContext context + ) throws IOException { + cleanupJob((JobContext) context); + } + + /** + * This method implements the new interface by calling the old method. Note + * that the input types are different between the new and old apis and this + * is a bridge between the two. + */ + @Override + public final void commitJob(org.apache.hadoop.mapreduce.JobContext context + ) throws IOException { + commitJob((JobContext) context); + } + + /** + * This method implements the new interface by calling the old method. Note + * that the input types are different between the new and old apis and this + * is a bridge between the two. + */ + @Override + public final void abortJob(org.apache.hadoop.mapreduce.JobContext context, + org.apache.hadoop.mapreduce.JobStatus.State runState) + throws IOException { + int state = JobStatus.getOldNewJobRunState(runState); + if (state != JobStatus.FAILED && state != JobStatus.KILLED) { + throw new IOException ("Invalid job run state : " + runState.name()); + } + abortJob((JobContext) context, state); + } + + /** + * This method implements the new interface by calling the old method. Note + * that the input types are different between the new and old apis and this + * is a bridge between the two. + */ + @Override + public final + void setupTask(org.apache.hadoop.mapreduce.TaskAttemptContext taskContext + ) throws IOException { + setupTask((TaskAttemptContext) taskContext); + } + + /** + * This method implements the new interface by calling the old method. Note + * that the input types are different between the new and old apis and this + * is a bridge between the two. + */ + @Override + public final boolean + needsTaskCommit(org.apache.hadoop.mapreduce.TaskAttemptContext taskContext + ) throws IOException { + return needsTaskCommit((TaskAttemptContext) taskContext); + } + + /** + * This method implements the new interface by calling the old method. Note + * that the input types are different between the new and old apis and this + * is a bridge between the two. + */ + @Override + public final + void commitTask(org.apache.hadoop.mapreduce.TaskAttemptContext taskContext + ) throws IOException { + commitTask((TaskAttemptContext) taskContext); + } + + /** + * This method implements the new interface by calling the old method. Note + * that the input types are different between the new and old apis and this + * is a bridge between the two. + */ + @Override + public final + void abortTask(org.apache.hadoop.mapreduce.TaskAttemptContext taskContext + ) throws IOException { + abortTask((TaskAttemptContext) taskContext); + } +} diff --git a/src/mapred/org/apache/hadoop/mapred/OutputFormat.java b/src/mapred/org/apache/hadoop/mapred/OutputFormat.java new file mode 100644 index 0000000..bf5bec1 --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/OutputFormat.java @@ -0,0 +1,78 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapred; + +import java.io.IOException; + +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.util.Progressable; + +/** + * OutputFormat describes the output-specification for a + * Map-Reduce job. + * + *

The Map-Reduce framework relies on the OutputFormat of the + * job to:

+ *

    + *
  1. + * Validate the output-specification of the job. For e.g. check that the + * output directory doesn't already exist. + *
  2. + * Provide the {@link RecordWriter} implementation to be used to write out + * the output files of the job. Output files are stored in a + * {@link FileSystem}. + *
  3. + *
+ * + * @see RecordWriter + * @see JobConf + * @deprecated Use {@link org.apache.hadoop.mapreduce.OutputFormat} instead. + */ +@Deprecated +public interface OutputFormat { + + /** + * Get the {@link RecordWriter} for the given job. + * + * @param ignored + * @param job configuration for the job whose output is being written. + * @param name the unique name for this part of the output. + * @param progress mechanism for reporting progress while writing to file. + * @return a {@link RecordWriter} to write the output for the job. + * @throws IOException + */ + RecordWriter getRecordWriter(FileSystem ignored, JobConf job, + String name, Progressable progress) + throws IOException; + + /** + * Check for validity of the output-specification for the job. + * + *

This is to validate the output specification for the job when it is + * a job is submitted. Typically checks that it does not already exist, + * throwing an exception when it already exists, so that output is not + * overwritten.

+ * + * @param ignored + * @param job job configuration. + * @throws IOException when output should not be attempted + */ + void checkOutputSpecs(FileSystem ignored, JobConf job) throws IOException; +} + diff --git a/src/mapred/org/apache/hadoop/mapred/OutputLogFilter.java b/src/mapred/org/apache/hadoop/mapred/OutputLogFilter.java new file mode 100644 index 0000000..05dd8f3 --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/OutputLogFilter.java @@ -0,0 +1,41 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapred; + +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.PathFilter; + +/** + * This class filters log files from directory given + * It doesnt accept paths having _logs. + * This can be used to list paths of output directory as follows: + * Path[] fileList = FileUtil.stat2Paths(fs.listStatus(outDir, + * new OutputLogFilter())); + * @deprecated Use + * {@link org.apache.hadoop.mapred.Utils.OutputFileUtils.OutputLogFilter} + * instead. + */ +public class OutputLogFilter implements PathFilter { + private static final PathFilter LOG_FILTER = + new Utils.OutputFileUtils.OutputLogFilter(); + + public boolean accept(Path path) { + return LOG_FILTER.accept(path); + } +} diff --git a/src/mapred/org/apache/hadoop/mapred/Partitioner.java b/src/mapred/org/apache/hadoop/mapred/Partitioner.java new file mode 100644 index 0000000..00f47e6 --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/Partitioner.java @@ -0,0 +1,49 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapred; + +/** + * Partitions the key space. + * + *

Partitioner controls the partitioning of the keys of the + * intermediate map-outputs. The key (or a subset of the key) is used to derive + * the partition, typically by a hash function. The total number of partitions + * is the same as the number of reduce tasks for the job. Hence this controls + * which of the m reduce tasks the intermediate key (and hence the + * record) is sent for reduction.

+ * + * @see Reducer + * @deprecated Use {@link org.apache.hadoop.mapreduce.Partitioner} instead. + */ +@Deprecated +public interface Partitioner extends JobConfigurable { + + /** + * Get the paritition number for a given key (hence record) given the total + * number of partitions i.e. number of reduce-tasks for the job. + * + *

Typically a hash function on a all or a subset of the key.

+ * + * @param key the key to be paritioned. + * @param value the entry value. + * @param numPartitions the total number of partitions. + * @return the partition number for the key. + */ + int getPartition(K2 key, V2 value, int numPartitions); +} diff --git a/src/mapred/org/apache/hadoop/mapred/QueueAclsInfo.java b/src/mapred/org/apache/hadoop/mapred/QueueAclsInfo.java new file mode 100644 index 0000000..f9056e1 --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/QueueAclsInfo.java @@ -0,0 +1,80 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.mapred; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; +import org.apache.hadoop.io.Text; +import org.apache.hadoop.io.Writable; +import org.apache.hadoop.io.WritableUtils; + +/** + * Class to encapsulate Queue ACLs for a particular + * user. + * + */ +class QueueAclsInfo implements Writable { + + private String queueName; + private String[] operations; + /** + * Default constructor for QueueAclsInfo. + * + */ + QueueAclsInfo() { + + } + + /** + * Construct a new QueueAclsInfo object using the queue name and the + * queue operations array + * + * @param queueName Name of the job queue + * @param queue operations + * + */ + QueueAclsInfo(String queueName, String[] operations) { + this.queueName = queueName; + this.operations = operations; + } + + String getQueueName() { + return queueName; + } + + void setQueueName(String queueName) { + this.queueName = queueName; + } + + String[] getOperations() { + return operations; + } + + @Override + public void readFields(DataInput in) throws IOException { + queueName = Text.readString(in); + operations = WritableUtils.readStringArray(in); + } + + @Override + public void write(DataOutput out) throws IOException { + Text.writeString(out, queueName); + WritableUtils.writeStringArray(out, operations); + } +} diff --git a/src/mapred/org/apache/hadoop/mapred/QueueManager.java b/src/mapred/org/apache/hadoop/mapred/QueueManager.java new file mode 100644 index 0000000..9c10b42 --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/QueueManager.java @@ -0,0 +1,383 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapred; + +import java.io.IOException; +import java.io.PrintWriter; +import java.io.Writer; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.Set; +import java.util.TreeSet; +import java.io.IOException; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.security.SecurityUtil.AccessControlList; +import org.apache.hadoop.util.StringUtils; + +/** + * Class that exposes information about queues maintained by the Hadoop + * Map/Reduce framework. + * + * The Map/Reduce framework can be configured with one or more queues, + * depending on the scheduler it is configured with. While some + * schedulers work only with one queue, some schedulers support multiple + * queues. + * + * Queues can be configured with various properties. Some of these + * properties are common to all schedulers, and those are handled by this + * class. Schedulers might also associate several custom properties with + * queues. Where such a case exists, the queue name must be used to link + * the common properties with the scheduler specific ones. + */ +class QueueManager { + + private static final Log LOG = LogFactory.getLog(QueueManager.class); + + // Prefix in configuration for queue related keys + private static final String QUEUE_CONF_PROPERTY_NAME_PREFIX + = "mapred.queue."; + // Configured queues + private Set queueNames; + // Map of a queue and ACL property name with an ACL + private HashMap aclsMap; + // Map of a queue name to any generic object that represents + // scheduler information + private HashMap schedulerInfoObjects; + // Whether ACLs are enabled in the system or not. + private boolean aclsEnabled; + + //Resource in which queue acls are configured. + static final String QUEUE_ACLS_FILE_NAME = "mapred-queue-acls.xml"; + + /** + * Enum representing an operation that can be performed on a queue. + */ + static enum QueueOperation { + SUBMIT_JOB ("acl-submit-job", false), + ADMINISTER_JOBS ("acl-administer-jobs", true); + // TODO: Add ACL for LIST_JOBS when we have ability to authenticate + // users in UI + // TODO: Add ACL for CHANGE_ACL when we have an admin tool for + // configuring queues. + + private final String aclName; + private final boolean jobOwnerAllowed; + + QueueOperation(String aclName, boolean jobOwnerAllowed) { + this.aclName = aclName; + this.jobOwnerAllowed = jobOwnerAllowed; + } + + final String getAclName() { + return aclName; + } + + final boolean isJobOwnerAllowed() { + return jobOwnerAllowed; + } + } + + /** + * Construct a new QueueManager using configuration specified in the passed + * in {@link org.apache.hadoop.conf.Configuration} object. + * + * @param conf Configuration object where queue configuration is specified. + */ + public QueueManager(Configuration conf) { + queueNames = new TreeSet(); + aclsMap = new HashMap(); + schedulerInfoObjects = new HashMap(); + initialize(conf); + } + + /** + * Return the set of queues configured in the system. + * + * The number of queues configured should be dependent on the Scheduler + * configured. Note that some schedulers work with only one queue, whereas + * others can support multiple queues. + * + * @return Set of queue names. + */ + public synchronized Set getQueues() { + return queueNames; + } + + /** + * Return true if the given {@link QueueManager.QueueOperation} can be + * performed by the specified user on the given queue. + * + * An operation is allowed if all users are provided access for this + * operation, or if either the user or any of the groups specified is + * provided access. + * + * @param queueName Queue on which the operation needs to be performed. + * @param oper The operation to perform + * @param ugi The user and groups who wish to perform the operation. + * + * @return true if the operation is allowed, false otherwise. + */ + public synchronized boolean hasAccess(String queueName, QueueOperation oper, + UserGroupInformation ugi) { + return hasAccess(queueName, null, oper, ugi); + } + + /** + * Return true if the given {@link QueueManager.QueueOperation} can be + * performed by the specified user on the specified job in the given queue. + * + * An operation is allowed either if the owner of the job is the user + * performing the task, all users are provided access for this + * operation, or if either the user or any of the groups specified is + * provided access. + * + * If the {@link QueueManager.QueueOperation} is not job specific then the + * job parameter is ignored. + * + * @param queueName Queue on which the operation needs to be performed. + * @param job The {@link JobInProgress} on which the operation is being + * performed. + * @param oper The operation to perform + * @param ugi The user and groups who wish to perform the operation. + * + * @return true if the operation is allowed, false otherwise. + */ + public synchronized boolean hasAccess(String queueName, JobInProgress job, + QueueOperation oper, + UserGroupInformation ugi) { + if (!aclsEnabled) { + return true; + } + + if (LOG.isDebugEnabled()) { + LOG.debug("checking access for : " + toFullPropertyName(queueName, + oper.getAclName())); + } + + if (oper.isJobOwnerAllowed()) { + if (job != null && job.getJobConf().getUser().equals(ugi.getUserName())) { + return true; + } + } + + AccessControlList acl = aclsMap.get(toFullPropertyName(queueName, oper.getAclName())); + if (acl == null) { + return false; + } + + // Check the ACL list + boolean allowed = acl.allAllowed(); + if (!allowed) { + // Check the allowed users list + if (acl.getUsers().contains(ugi.getUserName())) { + allowed = true; + } else { + // Check the allowed groups list + Set allowedGroups = acl.getGroups(); + for (String group : ugi.getGroupNames()) { + if (allowedGroups.contains(group)) { + allowed = true; + break; + } + } + } + } + + return allowed; + } + + /** + * Set a generic Object that represents scheduling information relevant + * to a queue. + * + * A string representation of this Object will be used by the framework + * to display in user facing applications like the JobTracker web UI and + * the hadoop CLI. + * + * @param queueName queue for which the scheduling information is to be set. + * @param queueInfo scheduling information for this queue. + */ + public synchronized void setSchedulerInfo(String queueName, + Object queueInfo) { + schedulerInfoObjects.put(queueName, queueInfo); + } + + /** + * Return the scheduler information configured for this queue. + * + * @param queueName queue for which the scheduling information is required. + * @return The scheduling information for this queue. + * + * @see #setSchedulerInfo(String, Object) + */ + public synchronized Object getSchedulerInfo(String queueName) { + return schedulerInfoObjects.get(queueName); + } + + /** + * Refresh the acls for the configured queues in the system by reading + * it from mapred-queue-acls.xml. + * + * The previous acls are removed. Previously configured queues and + * if or not acl is disabled is retained. + * + * @throws IOException when queue ACL configuration file is invalid. + */ + synchronized void refreshAcls(Configuration conf) throws IOException { + try { + HashMap newAclsMap = + getQueueAcls(conf); + aclsMap = newAclsMap; + } catch (Throwable t) { + String exceptionString = StringUtils.stringifyException(t); + LOG.warn("Queue ACLs could not be refreshed because there was an " + + "exception in parsing the configuration: "+ exceptionString + + ". Existing ACLs are retained."); + throw new IOException(exceptionString); + } + + } + + private void checkDeprecation(Configuration conf) { + for(String queue: queueNames) { + for (QueueOperation oper : QueueOperation.values()) { + String key = toFullPropertyName(queue, oper.getAclName()); + String aclString = conf.get(key); + if(aclString != null) { + LOG.warn("Configuring queue ACLs in mapred-site.xml or " + + "hadoop-site.xml is deprecated. Configure queue ACLs in " + + QUEUE_ACLS_FILE_NAME); + return; + } + } + } + } + + private HashMap getQueueAcls(Configuration conf) { + checkDeprecation(conf); + conf.addResource(QUEUE_ACLS_FILE_NAME); + HashMap aclsMap = + new HashMap(); + for (String queue : queueNames) { + for (QueueOperation oper : QueueOperation.values()) { + String key = toFullPropertyName(queue, oper.getAclName()); + String aclString = conf.get(key, "*"); + aclsMap.put(key, new AccessControlList(aclString)); + } + } + return aclsMap; + } + + private void initialize(Configuration conf) { + aclsEnabled = conf.getBoolean("mapred.acls.enabled", false); + String[] queues = conf.getStrings("mapred.queue.names", + new String[] {JobConf.DEFAULT_QUEUE_NAME}); + addToSet(queueNames, queues); + aclsMap = getQueueAcls(conf); + } + + private static final String toFullPropertyName(String queue, + String property) { + return QUEUE_CONF_PROPERTY_NAME_PREFIX + queue + "." + property; + } + + private static final void addToSet(Set set, String[] elems) { + for (String elem : elems) { + set.add(elem); + } + } + + synchronized JobQueueInfo[] getJobQueueInfos() { + ArrayList queueInfoList = new ArrayList(); + for(String queue : queueNames) { + Object schedulerInfo = schedulerInfoObjects.get(queue); + if(schedulerInfo != null) { + queueInfoList.add(new JobQueueInfo(queue,schedulerInfo.toString())); + }else { + queueInfoList.add(new JobQueueInfo(queue,null)); + } + } + return (JobQueueInfo[]) queueInfoList.toArray(new JobQueueInfo[queueInfoList + .size()]); + } + + JobQueueInfo getJobQueueInfo(String queue) { + Object schedulingInfo = schedulerInfoObjects.get(queue); + if(schedulingInfo!=null){ + return new JobQueueInfo(queue,schedulingInfo.toString()); + }else { + return new JobQueueInfo(queue,null); + } + } + + /** + * Generates the array of QueueAclsInfo object. The array consists of only those queues + * for which user has acls + * + * @return QueueAclsInfo[] + * @throws java.io.IOException + */ + synchronized QueueAclsInfo[] getQueueAcls(UserGroupInformation + ugi) throws IOException { + //List of all QueueAclsInfo objects , this list is returned + ArrayList queueAclsInfolist = + new ArrayList(); + QueueOperation[] operations = QueueOperation.values(); + for (String queueName : queueNames) { + QueueAclsInfo queueAclsInfo = null; + ArrayList operationsAllowed = null; + for (QueueOperation operation : operations) { + if (hasAccess(queueName, operation, ugi)) { + if (operationsAllowed == null) { + operationsAllowed = new ArrayList(); + } + operationsAllowed.add(operation.getAclName()); + } + } + if (operationsAllowed != null) { + //There is atleast 1 operation supported for queue + //, hence initialize queueAclsInfo + queueAclsInfo = new QueueAclsInfo(queueName, operationsAllowed.toArray + (new String[operationsAllowed.size()])); + queueAclsInfolist.add(queueAclsInfo); + } + } + return queueAclsInfolist.toArray(new QueueAclsInfo[ + queueAclsInfolist.size()]); + } + + /** + * prints the configuration of QueueManager in Json format. + * The method should be modified accordingly whenever + * QueueManager(Configuration) constructor is modified. + * @param writer {@link}Writer object to which the configuration properties + * are printed in json format + * @throws IOException + */ + static void dumpConfiguration(Writer writer) throws IOException { + Configuration conf = new Configuration(false); + conf.addResource(QUEUE_ACLS_FILE_NAME); + Configuration.dumpConfiguration(conf, writer); + } + +} diff --git a/src/mapred/org/apache/hadoop/mapred/RamManager.java b/src/mapred/org/apache/hadoop/mapred/RamManager.java new file mode 100644 index 0000000..38baf6b --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/RamManager.java @@ -0,0 +1,44 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.mapred; + +import java.io.InputStream; + +/** + * RamManager manages a memory pool of a configured limit. + */ +interface RamManager { + /** + * Reserve memory for data coming through the given input-stream. + * + * @param requestedSize size of memory requested + * @param in input stream + * @throws InterruptedException + * @return true if memory was allocated immediately, + * else false + */ + boolean reserve(int requestedSize, InputStream in) + throws InterruptedException; + + /** + * Return memory to the pool. + * + * @param requestedSize size of memory returned to the pool + */ + void unreserve(int requestedSize); +} diff --git a/src/mapred/org/apache/hadoop/mapred/RawKeyValueIterator.java b/src/mapred/org/apache/hadoop/mapred/RawKeyValueIterator.java new file mode 100644 index 0000000..e8f0185 --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/RawKeyValueIterator.java @@ -0,0 +1,66 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.mapred; + +import java.io.IOException; + +import org.apache.hadoop.io.DataInputBuffer; +import org.apache.hadoop.util.Progress; + +/** + * RawKeyValueIterator is an iterator used to iterate over + * the raw keys and values during sort/merge of intermediate data. + */ +public interface RawKeyValueIterator { + /** + * Gets the current raw key. + * + * @return Gets the current raw key as a DataInputBuffer + * @throws IOException + */ + DataInputBuffer getKey() throws IOException; + + /** + * Gets the current raw value. + * + * @return Gets the current raw value as a DataInputBuffer + * @throws IOException + */ + DataInputBuffer getValue() throws IOException; + + /** + * Sets up the current key and value (for getKey and getValue). + * + * @return true if there exists a key/value, + * false otherwise. + * @throws IOException + */ + boolean next() throws IOException; + + /** + * Closes the iterator so that the underlying streams can be closed. + * + * @throws IOException + */ + void close() throws IOException; + + /** Gets the Progress object; this has a float (0.0 - 1.0) + * indicating the bytes processed by the iterator so far + */ + Progress getProgress(); +} diff --git a/src/mapred/org/apache/hadoop/mapred/RecordReader.java b/src/mapred/org/apache/hadoop/mapred/RecordReader.java new file mode 100644 index 0000000..cf8fcd5 --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/RecordReader.java @@ -0,0 +1,84 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapred; + +import java.io.IOException; +import java.io.DataInput; + +/** + * RecordReader reads <key, value> pairs from an + * {@link InputSplit}. + * + *

RecordReader, typically, converts the byte-oriented view of + * the input, provided by the InputSplit, and presents a + * record-oriented view for the {@link Mapper} & {@link Reducer} tasks for + * processing. It thus assumes the responsibility of processing record + * boundaries and presenting the tasks with keys and values.

+ * + * @see InputSplit + * @see InputFormat + */ +public interface RecordReader { + /** + * Reads the next key/value pair from the input for processing. + * + * @param key the key to read data into + * @param value the value to read data into + * @return true iff a key/value was read, false if at EOF + */ + boolean next(K key, V value) throws IOException; + + /** + * Create an object of the appropriate type to be used as a key. + * + * @return a new key object. + */ + K createKey(); + + /** + * Create an object of the appropriate type to be used as a value. + * + * @return a new value object. + */ + V createValue(); + + /** + * Returns the current position in the input. + * + * @return the current position in the input. + * @throws IOException + */ + long getPos() throws IOException; + + /** + * Close this {@link InputSplit} to future operations. + * + * @throws IOException + */ + public void close() throws IOException; + + /** + * How much of the input has the {@link RecordReader} consumed i.e. + * has been processed by? + * + * @return progress from 0.0 to 1.0. + * @throws IOException + */ + float getProgress() throws IOException; +} diff --git a/src/mapred/org/apache/hadoop/mapred/RecordWriter.java b/src/mapred/org/apache/hadoop/mapred/RecordWriter.java new file mode 100644 index 0000000..aace8ee --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/RecordWriter.java @@ -0,0 +1,51 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapred; + +import java.io.IOException; + +import org.apache.hadoop.fs.FileSystem; + +/** + * RecordWriter writes the output <key, value> pairs + * to an output file. + + *

RecordWriter implementations write the job outputs to the + * {@link FileSystem}. + * + * @see OutputFormat + */ +public interface RecordWriter { + /** + * Writes a key/value pair. + * + * @param key the key to write. + * @param value the value to write. + * @throws IOException + */ + void write(K key, V value) throws IOException; + + /** + * Close this RecordWriter to future operations. + * + * @param reporter facility to report progress. + * @throws IOException + */ + void close(Reporter reporter) throws IOException; +} diff --git a/src/mapred/org/apache/hadoop/mapred/ReduceTask.java b/src/mapred/org/apache/hadoop/mapred/ReduceTask.java new file mode 100644 index 0000000..7e5674d --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/ReduceTask.java @@ -0,0 +1,2826 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapred; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.File; +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.lang.Math; +import java.lang.reflect.Constructor; +import java.lang.reflect.InvocationTargetException; +import java.net.URI; +import java.net.URL; +import java.net.URLClassLoader; +import java.net.URLConnection; +import java.text.DecimalFormat; +import java.util.ArrayList; +import java.util.Collections; +import java.util.Comparator; +import java.util.HashMap; +import java.util.HashSet; +import java.util.LinkedHashMap; +import java.util.Iterator; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.Random; +import java.util.Set; +import java.util.SortedSet; +import java.util.TreeSet; +import java.util.concurrent.ConcurrentHashMap; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.ChecksumFileSystem; +import org.apache.hadoop.fs.FSError; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.LocalFileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.io.DataInputBuffer; +import org.apache.hadoop.io.IOUtils; +import org.apache.hadoop.io.IntWritable; +import org.apache.hadoop.io.RawComparator; +import org.apache.hadoop.io.SequenceFile; +import org.apache.hadoop.io.Writable; +import org.apache.hadoop.io.WritableFactories; +import org.apache.hadoop.io.WritableFactory; +import org.apache.hadoop.io.WritableUtils; +import org.apache.hadoop.io.SequenceFile.CompressionType; +import org.apache.hadoop.io.compress.CodecPool; +import org.apache.hadoop.io.compress.CompressionCodec; +import org.apache.hadoop.io.compress.Decompressor; +import org.apache.hadoop.io.compress.DefaultCodec; +import org.apache.hadoop.mapred.IFile.*; +import org.apache.hadoop.mapred.Merger.Segment; +import org.apache.hadoop.mapred.SortedRanges.SkipRangeIterator; +import org.apache.hadoop.mapred.TaskTracker.TaskInProgress; +import org.apache.hadoop.mapreduce.TaskAttemptContext; +import org.apache.hadoop.metrics.MetricsContext; +import org.apache.hadoop.metrics.MetricsRecord; +import org.apache.hadoop.metrics.MetricsUtil; +import org.apache.hadoop.metrics.Updater; +import org.apache.hadoop.util.Progress; +import org.apache.hadoop.util.Progressable; +import org.apache.hadoop.util.ReflectionUtils; +import org.apache.hadoop.util.StringUtils; + +/** A Reduce task. */ +class ReduceTask extends Task { + + static { // register a ctor + WritableFactories.setFactory + (ReduceTask.class, + new WritableFactory() { + public Writable newInstance() { return new ReduceTask(); } + }); + } + + private static final Log LOG = LogFactory.getLog(ReduceTask.class.getName()); + private int numMaps; + private ReduceCopier reduceCopier; + + private CompressionCodec codec; + + + { + getProgress().setStatus("reduce"); + setPhase(TaskStatus.Phase.SHUFFLE); // phase to start with + } + + private Progress copyPhase; + private Progress sortPhase; + private Progress reducePhase; + private Counters.Counter reduceShuffleBytes = + getCounters().findCounter(Counter.REDUCE_SHUFFLE_BYTES); + private Counters.Counter reduceInputKeyCounter = + getCounters().findCounter(Counter.REDUCE_INPUT_GROUPS); + private Counters.Counter reduceInputValueCounter = + getCounters().findCounter(Counter.REDUCE_INPUT_RECORDS); + private Counters.Counter reduceOutputCounter = + getCounters().findCounter(Counter.REDUCE_OUTPUT_RECORDS); + private Counters.Counter reduceCombineOutputCounter = + getCounters().findCounter(Counter.COMBINE_OUTPUT_RECORDS); + + // A custom comparator for map output files. Here the ordering is determined + // by the file's size and path. In case of files with same size and different + // file paths, the first parameter is considered smaller than the second one. + // In case of files with same size and path are considered equal. + private Comparator mapOutputFileComparator = + new Comparator() { + public int compare(FileStatus a, FileStatus b) { + if (a.getLen() < b.getLen()) + return -1; + else if (a.getLen() == b.getLen()) + if (a.getPath().toString().equals(b.getPath().toString())) + return 0; + else + return -1; + else + return 1; + } + }; + + // A sorted set for keeping a set of map output files on disk + private final SortedSet mapOutputFilesOnDisk = + new TreeSet(mapOutputFileComparator); + + public ReduceTask() { + super(); + } + + public ReduceTask(String jobFile, TaskAttemptID taskId, + int partition, int numMaps, int numSlotsRequired, + String username) { + super(jobFile, taskId, partition, numSlotsRequired, username); + this.numMaps = numMaps; + } + + private CompressionCodec initCodec() { + // check if map-outputs are to be compressed + if (conf.getCompressMapOutput()) { + Class codecClass = + conf.getMapOutputCompressorClass(DefaultCodec.class); + return ReflectionUtils.newInstance(codecClass, conf); + } + + return null; + } + + @Override + public TaskRunner createRunner(TaskTracker tracker, TaskInProgress tip) + throws IOException { + return new ReduceTaskRunner(tip, tracker, this.conf); + } + + @Override + public boolean isMapTask() { + return false; + } + + public int getNumMaps() { return numMaps; } + + /** + * Localize the given JobConf to be specific for this task. + */ + @Override + public void localizeConfiguration(JobConf conf) throws IOException { + super.localizeConfiguration(conf); + conf.setNumMapTasks(numMaps); + } + + @Override + public void write(DataOutput out) throws IOException { + super.write(out); + + out.writeInt(numMaps); // write the number of maps + } + + @Override + public void readFields(DataInput in) throws IOException { + super.readFields(in); + + numMaps = in.readInt(); + } + + // Get the input files for the reducer. + private Path[] getMapFiles(FileSystem fs, boolean isLocal) + throws IOException { + List fileList = new ArrayList(); + if (isLocal) { + // for local jobs + for(int i = 0; i < numMaps; ++i) { + fileList.add(mapOutputFile.getInputFile(i, getTaskID())); + } + } else { + // for non local jobs + for (FileStatus filestatus : mapOutputFilesOnDisk) { + fileList.add(filestatus.getPath()); + } + } + return fileList.toArray(new Path[0]); + } + + private class ReduceValuesIterator + extends ValuesIterator { + public ReduceValuesIterator (RawKeyValueIterator in, + RawComparator comparator, + Class keyClass, + Class valClass, + Configuration conf, Progressable reporter) + throws IOException { + super(in, comparator, keyClass, valClass, conf, reporter); + } + + @Override + public VALUE next() { + reduceInputValueCounter.increment(1); + return moveToNext(); + } + + protected VALUE moveToNext() { + return super.next(); + } + + public void informReduceProgress() { + reducePhase.set(super.in.getProgress().get()); // update progress + reporter.progress(); + } + } + + private class SkippingReduceValuesIterator + extends ReduceValuesIterator { + private SkipRangeIterator skipIt; + private TaskUmbilicalProtocol umbilical; + private Counters.Counter skipGroupCounter; + private Counters.Counter skipRecCounter; + private long grpIndex = -1; + private Class keyClass; + private Class valClass; + private SequenceFile.Writer skipWriter; + private boolean toWriteSkipRecs; + private boolean hasNext; + private TaskReporter reporter; + + public SkippingReduceValuesIterator(RawKeyValueIterator in, + RawComparator comparator, Class keyClass, + Class valClass, Configuration conf, TaskReporter reporter, + TaskUmbilicalProtocol umbilical) throws IOException { + super(in, comparator, keyClass, valClass, conf, reporter); + this.umbilical = umbilical; + this.skipGroupCounter = + reporter.getCounter(Counter.REDUCE_SKIPPED_GROUPS); + this.skipRecCounter = + reporter.getCounter(Counter.REDUCE_SKIPPED_RECORDS); + this.toWriteSkipRecs = toWriteSkipRecs() && + SkipBadRecords.getSkipOutputPath(conf)!=null; + this.keyClass = keyClass; + this.valClass = valClass; + this.reporter = reporter; + skipIt = getSkipRanges().skipRangeIterator(); + mayBeSkip(); + } + + void nextKey() throws IOException { + super.nextKey(); + mayBeSkip(); + } + + boolean more() { + return super.more() && hasNext; + } + + private void mayBeSkip() throws IOException { + hasNext = skipIt.hasNext(); + if(!hasNext) { + LOG.warn("Further groups got skipped."); + return; + } + grpIndex++; + long nextGrpIndex = skipIt.next(); + long skip = 0; + long skipRec = 0; + while(grpIndex0 && skipIt.skippedAllRanges() && skipWriter!=null) { + skipWriter.close(); + } + skipGroupCounter.increment(skip); + skipRecCounter.increment(skipRec); + reportNextRecordRange(umbilical, grpIndex); + } + + @SuppressWarnings("unchecked") + private void writeSkippedRec(KEY key, VALUE value) throws IOException{ + if(skipWriter==null) { + Path skipDir = SkipBadRecords.getSkipOutputPath(conf); + Path skipFile = new Path(skipDir, getTaskID().toString()); + skipWriter = SequenceFile.createWriter( + skipFile.getFileSystem(conf), conf, skipFile, + keyClass, valClass, + CompressionType.BLOCK, reporter); + } + skipWriter.append(key, value); + } + } + + @Override + @SuppressWarnings("unchecked") + public void run(JobConf job, final TaskUmbilicalProtocol umbilical) + throws IOException, InterruptedException, ClassNotFoundException { + this.umbilical = umbilical; + job.setBoolean("mapred.skip.on", isSkipping()); + + if (isMapOrReduce()) { + copyPhase = getProgress().addPhase("copy"); + sortPhase = getProgress().addPhase("sort"); + reducePhase = getProgress().addPhase("reduce"); + } + // start thread that will handle communication with parent + TaskReporter reporter = new TaskReporter(getProgress(), umbilical); + reporter.startCommunicationThread(); + boolean useNewApi = job.getUseNewReducer(); + initialize(job, getJobID(), reporter, useNewApi); + + // check if it is a cleanupJobTask + if (jobCleanup) { + runJobCleanupTask(umbilical, reporter); + return; + } + if (jobSetup) { + runJobSetupTask(umbilical, reporter); + return; + } + if (taskCleanup) { + runTaskCleanupTask(umbilical, reporter); + return; + } + + // Initialize the codec + codec = initCodec(); + + boolean isLocal = "local".equals(job.get("mapred.job.tracker", "local")); + if (!isLocal) { + reduceCopier = new ReduceCopier(umbilical, job, reporter); + if (!reduceCopier.fetchOutputs()) { + if(reduceCopier.mergeThrowable instanceof FSError) { + throw (FSError)reduceCopier.mergeThrowable; + } + throw new IOException("Task: " + getTaskID() + + " - The reduce copier failed", reduceCopier.mergeThrowable); + } + } + copyPhase.complete(); // copy is already complete + setPhase(TaskStatus.Phase.SORT); + statusUpdate(umbilical); + + final FileSystem rfs = FileSystem.getLocal(job).getRaw(); + RawKeyValueIterator rIter = isLocal + ? Merger.merge(job, rfs, job.getMapOutputKeyClass(), + job.getMapOutputValueClass(), codec, getMapFiles(rfs, true), + !conf.getKeepFailedTaskFiles(), job.getInt("io.sort.factor", 100), + new Path(getTaskID().toString()), job.getOutputKeyComparator(), + reporter, spilledRecordsCounter, null) + : reduceCopier.createKVIterator(job, rfs, reporter); + + // free up the data structures + mapOutputFilesOnDisk.clear(); + + sortPhase.complete(); // sort is complete + setPhase(TaskStatus.Phase.REDUCE); + statusUpdate(umbilical); + Class keyClass = job.getMapOutputKeyClass(); + Class valueClass = job.getMapOutputValueClass(); + RawComparator comparator = job.getOutputValueGroupingComparator(); + + if (useNewApi) { + runNewReducer(job, umbilical, reporter, rIter, comparator, + keyClass, valueClass); + } else { + runOldReducer(job, umbilical, reporter, rIter, comparator, + keyClass, valueClass); + } + done(umbilical, reporter); + } + + @SuppressWarnings("unchecked") + private + void runOldReducer(JobConf job, + TaskUmbilicalProtocol umbilical, + final TaskReporter reporter, + RawKeyValueIterator rIter, + RawComparator comparator, + Class keyClass, + Class valueClass) throws IOException { + Reducer reducer = + ReflectionUtils.newInstance(job.getReducerClass(), job); + // make output collector + String finalName = getOutputName(getPartition()); + + FileSystem fs = FileSystem.get(job); + + final RecordWriter out = + job.getOutputFormat().getRecordWriter(fs, job, finalName, reporter); + + OutputCollector collector = + new OutputCollector() { + public void collect(OUTKEY key, OUTVALUE value) + throws IOException { + out.write(key, value); + reduceOutputCounter.increment(1); + // indicate that progress update needs to be sent + reporter.progress(); + } + }; + + // apply reduce function + try { + //increment processed counter only if skipping feature is enabled + boolean incrProcCount = SkipBadRecords.getReducerMaxSkipGroups(job)>0 && + SkipBadRecords.getAutoIncrReducerProcCount(job); + + ReduceValuesIterator values = isSkipping() ? + new SkippingReduceValuesIterator(rIter, + comparator, keyClass, valueClass, + job, reporter, umbilical) : + new ReduceValuesIterator(rIter, + job.getOutputValueGroupingComparator(), keyClass, valueClass, + job, reporter); + values.informReduceProgress(); + while (values.more()) { + reduceInputKeyCounter.increment(1); + reducer.reduce(values.getKey(), values, collector, reporter); + if(incrProcCount) { + reporter.incrCounter(SkipBadRecords.COUNTER_GROUP, + SkipBadRecords.COUNTER_REDUCE_PROCESSED_GROUPS, 1); + } + values.nextKey(); + values.informReduceProgress(); + } + + //Clean up: repeated in catch block below + reducer.close(); + out.close(reporter); + //End of clean up. + } catch (IOException ioe) { + try { + reducer.close(); + } catch (IOException ignored) {} + + try { + out.close(reporter); + } catch (IOException ignored) {} + + throw ioe; + } + } + + static class NewTrackingRecordWriter + extends org.apache.hadoop.mapreduce.RecordWriter { + private final org.apache.hadoop.mapreduce.RecordWriter real; + private final org.apache.hadoop.mapreduce.Counter outputRecordCounter; + + NewTrackingRecordWriter(org.apache.hadoop.mapreduce.RecordWriter real, + org.apache.hadoop.mapreduce.Counter recordCounter) { + this.real = real; + this.outputRecordCounter = recordCounter; + } + + @Override + public void close(TaskAttemptContext context) throws IOException, + InterruptedException { + real.close(context); + } + + @Override + public void write(K key, V value) throws IOException, InterruptedException { + real.write(key,value); + outputRecordCounter.increment(1); + } + } + + @SuppressWarnings("unchecked") + private + void runNewReducer(JobConf job, + final TaskUmbilicalProtocol umbilical, + final TaskReporter reporter, + RawKeyValueIterator rIter, + RawComparator comparator, + Class keyClass, + Class valueClass + ) throws IOException,InterruptedException, + ClassNotFoundException { + // wrap value iterator to report progress. + final RawKeyValueIterator rawIter = rIter; + rIter = new RawKeyValueIterator() { + public void close() throws IOException { + rawIter.close(); + } + public DataInputBuffer getKey() throws IOException { + return rawIter.getKey(); + } + public Progress getProgress() { + return rawIter.getProgress(); + } + public DataInputBuffer getValue() throws IOException { + return rawIter.getValue(); + } + public boolean next() throws IOException { + boolean ret = rawIter.next(); + reducePhase.set(rawIter.getProgress().get()); + reporter.progress(); + return ret; + } + }; + // make a task context so we can get the classes + org.apache.hadoop.mapreduce.TaskAttemptContext taskContext = + new org.apache.hadoop.mapreduce.TaskAttemptContext(job, getTaskID()); + // make a reducer + org.apache.hadoop.mapreduce.Reducer reducer = + (org.apache.hadoop.mapreduce.Reducer) + ReflectionUtils.newInstance(taskContext.getReducerClass(), job); + org.apache.hadoop.mapreduce.RecordWriter output = + (org.apache.hadoop.mapreduce.RecordWriter) + outputFormat.getRecordWriter(taskContext); + org.apache.hadoop.mapreduce.RecordWriter trackedRW = + new NewTrackingRecordWriter(output, reduceOutputCounter); + job.setBoolean("mapred.skip.on", isSkipping()); + org.apache.hadoop.mapreduce.Reducer.Context + reducerContext = createReduceContext(reducer, job, getTaskID(), + rIter, reduceInputKeyCounter, + reduceInputValueCounter, + trackedRW, committer, + reporter, comparator, keyClass, + valueClass); + reducer.run(reducerContext); + output.close(reducerContext); + } + + private static enum CopyOutputErrorType { + NO_ERROR, + READ_ERROR, + OTHER_ERROR + }; + + class ReduceCopier implements MRConstants { + + /** Reference to the umbilical object */ + private TaskUmbilicalProtocol umbilical; + private final TaskReporter reporter; + + /** Reference to the task object */ + + /** Number of ms before timing out a copy */ + private static final int STALLED_COPY_TIMEOUT = 3 * 60 * 1000; + + /** Max events to fetch in one go from the tasktracker */ + private static final int MAX_EVENTS_TO_FETCH = 10000; + + /** + * our reduce task instance + */ + private ReduceTask reduceTask; + + /** + * the list of map outputs currently being copied + */ + private List scheduledCopies; + + /** + * the results of dispatched copy attempts + */ + private List copyResults; + + /** + * the number of outputs to copy in parallel + */ + private int numCopiers; + + /** + * a number that is set to the max #fetches we'd schedule and then + * pause the schduling + */ + private int maxInFlight; + + /** + * the amount of time spent on fetching one map output before considering + * it as failed and notifying the jobtracker about it. + */ + private int maxBackoff; + + /** + * busy hosts from which copies are being backed off + * Map of host -> next contact time + */ + private Map penaltyBox; + + /** + * the set of unique hosts from which we are copying + */ + private Set uniqueHosts; + + /** + * A reference to the RamManager for writing the map outputs to. + */ + + private ShuffleRamManager ramManager; + + /** + * A reference to the local file system for writing the map outputs to. + */ + private FileSystem localFileSys; + + private FileSystem rfs; + /** + * Number of files to merge at a time + */ + private int ioSortFactor; + + /** + * A reference to the throwable object (if merge throws an exception) + */ + private volatile Throwable mergeThrowable; + + /** + * A flag to indicate when to exit localFS merge + */ + private volatile boolean exitLocalFSMerge = false; + + /** + * A flag to indicate when to exit getMapEvents thread + */ + private volatile boolean exitGetMapEvents = false; + + /** + * When we accumulate maxInMemOutputs number of files in ram, we merge/spill + */ + private final int maxInMemOutputs; + + /** + * Usage threshold for in-memory output accumulation. + */ + private final float maxInMemCopyPer; + + /** + * Maximum memory usage of map outputs to merge from memory into + * the reduce, in bytes. + */ + private final long maxInMemReduce; + + /** + * The threads for fetching the files. + */ + private List copiers = null; + + /** + * The object for metrics reporting. + */ + private ShuffleClientMetrics shuffleClientMetrics = null; + + /** + * the minimum interval between tasktracker polls + */ + private static final long MIN_POLL_INTERVAL = 1000; + + /** + * a list of map output locations for fetch retrials + */ + private List retryFetches = + new ArrayList(); + + /** + * The set of required map outputs + */ + private Set copiedMapOutputs = + Collections.synchronizedSet(new TreeSet()); + + /** + * The set of obsolete map taskids. + */ + private Set obsoleteMapIds = + Collections.synchronizedSet(new TreeSet()); + + private Random random = null; + + /** + * the max of all the map completion times + */ + private int maxMapRuntime; + + /** + * Maximum number of fetch-retries per-map. + */ + private volatile int maxFetchRetriesPerMap; + + /** + * Combiner runner, if a combiner is needed + */ + private CombinerRunner combinerRunner; + + /** + * Resettable collector used for combine. + */ + private CombineOutputCollector combineCollector = null; + + /** + * Maximum percent of failed fetch attempt before killing the reduce task. + */ + private static final float MAX_ALLOWED_FAILED_FETCH_ATTEMPT_PERCENT = 0.5f; + + /** + * Minimum percent of progress required to keep the reduce alive. + */ + private static final float MIN_REQUIRED_PROGRESS_PERCENT = 0.5f; + + /** + * Maximum percent of shuffle execution time required to keep the reducer alive. + */ + private static final float MAX_ALLOWED_STALL_TIME_PERCENT = 0.5f; + + /** + * Minimum number of map fetch retries. + */ + private static final int MIN_FETCH_RETRIES_PER_MAP = 2; + + /** + * The minimum percentage of maps yet to be copied, + * which indicates end of shuffle + */ + private static final float MIN_PENDING_MAPS_PERCENT = 0.25f; + /** + * Maximum no. of unique maps from which we failed to fetch map-outputs + * even after {@link #maxFetchRetriesPerMap} retries; after this the + * reduce task is failed. + */ + private int maxFailedUniqueFetches = 5; + + /** + * The maps from which we fail to fetch map-outputs + * even after {@link #maxFetchRetriesPerMap} retries. + */ + Set fetchFailedMaps = new TreeSet(); + + /** + * A map of taskId -> no. of failed fetches + */ + Map mapTaskToFailedFetchesMap = + new HashMap(); + + /** + * Initial backoff interval (milliseconds) + */ + private static final int BACKOFF_INIT = 4000; + + /** + * The interval for logging in the shuffle + */ + private static final int MIN_LOG_TIME = 60000; + + /** + * List of in-memory map-outputs. + */ + private final List mapOutputsFilesInMemory = + Collections.synchronizedList(new LinkedList()); + + /** + * The map for (Hosts, List of MapIds from this Host) maintaining + * map output locations + */ + private final Map> mapLocations = + new ConcurrentHashMap>(); + + /** + * This class contains the methods that should be used for metrics-reporting + * the specific metrics for shuffle. This class actually reports the + * metrics for the shuffle client (the ReduceTask), and hence the name + * ShuffleClientMetrics. + */ + class ShuffleClientMetrics implements Updater { + private MetricsRecord shuffleMetrics = null; + private int numFailedFetches = 0; + private int numSuccessFetches = 0; + private long numBytes = 0; + private int numThreadsBusy = 0; + ShuffleClientMetrics(JobConf conf) { + MetricsContext metricsContext = MetricsUtil.getContext("mapred"); + this.shuffleMetrics = + MetricsUtil.createRecord(metricsContext, "shuffleInput"); + this.shuffleMetrics.setTag("user", conf.getUser()); + this.shuffleMetrics.setTag("jobName", conf.getJobName()); + this.shuffleMetrics.setTag("jobId", ReduceTask.this.getJobID().toString()); + this.shuffleMetrics.setTag("taskId", getTaskID().toString()); + this.shuffleMetrics.setTag("sessionId", conf.getSessionId()); + metricsContext.registerUpdater(this); + } + public synchronized void inputBytes(long numBytes) { + this.numBytes += numBytes; + } + public synchronized void failedFetch() { + ++numFailedFetches; + } + public synchronized void successFetch() { + ++numSuccessFetches; + } + public synchronized void threadBusy() { + ++numThreadsBusy; + } + public synchronized void threadFree() { + --numThreadsBusy; + } + public void doUpdates(MetricsContext unused) { + synchronized (this) { + shuffleMetrics.incrMetric("shuffle_input_bytes", numBytes); + shuffleMetrics.incrMetric("shuffle_failed_fetches", + numFailedFetches); + shuffleMetrics.incrMetric("shuffle_success_fetches", + numSuccessFetches); + if (numCopiers != 0) { + shuffleMetrics.setMetric("shuffle_fetchers_busy_percent", + 100*((float)numThreadsBusy/numCopiers)); + } else { + shuffleMetrics.setMetric("shuffle_fetchers_busy_percent", 0); + } + numBytes = 0; + numSuccessFetches = 0; + numFailedFetches = 0; + } + shuffleMetrics.update(); + } + } + + /** Represents the result of an attempt to copy a map output */ + private class CopyResult { + + // the map output location against which a copy attempt was made + private final MapOutputLocation loc; + + // the size of the file copied, -1 if the transfer failed + private final long size; + + //a flag signifying whether a copy result is obsolete + private static final int OBSOLETE = -2; + + private CopyOutputErrorType error = CopyOutputErrorType.NO_ERROR; + CopyResult(MapOutputLocation loc, long size) { + this.loc = loc; + this.size = size; + } + + CopyResult(MapOutputLocation loc, long size, CopyOutputErrorType error) { + this.loc = loc; + this.size = size; + this.error = error; + } + + public boolean getSuccess() { return size >= 0; } + public boolean isObsolete() { + return size == OBSOLETE; + } + public long getSize() { return size; } + public String getHost() { return loc.getHost(); } + public MapOutputLocation getLocation() { return loc; } + public CopyOutputErrorType getError() { return error; } + } + + private int nextMapOutputCopierId = 0; + private boolean reportReadErrorImmediately; + + /** + * Abstraction to track a map-output. + */ + private class MapOutputLocation { + TaskAttemptID taskAttemptId; + TaskID taskId; + String ttHost; + URL taskOutput; + + public MapOutputLocation(TaskAttemptID taskAttemptId, + String ttHost, URL taskOutput) { + this.taskAttemptId = taskAttemptId; + this.taskId = this.taskAttemptId.getTaskID(); + this.ttHost = ttHost; + this.taskOutput = taskOutput; + } + + public TaskAttemptID getTaskAttemptId() { + return taskAttemptId; + } + + public TaskID getTaskId() { + return taskId; + } + + public String getHost() { + return ttHost; + } + + public URL getOutputLocation() { + return taskOutput; + } + } + + /** Describes the output of a map; could either be on disk or in-memory. */ + private class MapOutput { + final TaskID mapId; + final TaskAttemptID mapAttemptId; + + final Path file; + final Configuration conf; + + byte[] data; + final boolean inMemory; + long compressedSize; + + public MapOutput(TaskID mapId, TaskAttemptID mapAttemptId, + Configuration conf, Path file, long size) { + this.mapId = mapId; + this.mapAttemptId = mapAttemptId; + + this.conf = conf; + this.file = file; + this.compressedSize = size; + + this.data = null; + + this.inMemory = false; + } + + public MapOutput(TaskID mapId, TaskAttemptID mapAttemptId, byte[] data, int compressedLength) { + this.mapId = mapId; + this.mapAttemptId = mapAttemptId; + + this.file = null; + this.conf = null; + + this.data = data; + this.compressedSize = compressedLength; + + this.inMemory = true; + } + + public void discard() throws IOException { + if (inMemory) { + data = null; + } else { + FileSystem fs = file.getFileSystem(conf); + fs.delete(file, true); + } + } + } + + class ShuffleRamManager implements RamManager { + /* Maximum percentage of the in-memory limit that a single shuffle can + * consume*/ + private static final float MAX_SINGLE_SHUFFLE_SEGMENT_FRACTION = 0.25f; + + /* Maximum percentage of shuffle-threads which can be stalled + * simultaneously after which a merge is triggered. */ + private static final float MAX_STALLED_SHUFFLE_THREADS_FRACTION = 0.75f; + + private final long maxSize; + private final long maxSingleShuffleLimit; + + private long size = 0; + + private Object dataAvailable = new Object(); + private long fullSize = 0; + private int numPendingRequests = 0; + private int numRequiredMapOutputs = 0; + private int numClosed = 0; + private boolean closed = false; + + public ShuffleRamManager(Configuration conf) throws IOException { + final float maxInMemCopyUse = + conf.getFloat("mapred.job.shuffle.input.buffer.percent", 0.70f); + if (maxInMemCopyUse > 1.0 || maxInMemCopyUse < 0.0) { + throw new IOException("mapred.job.shuffle.input.buffer.percent" + + maxInMemCopyUse); + } + maxSize = (long)Math.min( + Runtime.getRuntime().maxMemory() * maxInMemCopyUse, + Integer.MAX_VALUE); + maxSingleShuffleLimit = (long)(maxSize * MAX_SINGLE_SHUFFLE_SEGMENT_FRACTION); + LOG.info("ShuffleRamManager: MemoryLimit=" + maxSize + + ", MaxSingleShuffleLimit=" + maxSingleShuffleLimit); + } + + public synchronized boolean reserve(int requestedSize, InputStream in) + throws InterruptedException { + // Wait till the request can be fulfilled... + while ((size + requestedSize) > maxSize) { + + // Close the input... + if (in != null) { + try { + in.close(); + } catch (IOException ie) { + LOG.info("Failed to close connection with: " + ie); + } finally { + in = null; + } + } + + // Track pending requests + synchronized (dataAvailable) { + ++numPendingRequests; + dataAvailable.notify(); + } + + // Wait for memory to free up + wait(); + + // Track pending requests + synchronized (dataAvailable) { + --numPendingRequests; + } + } + + size += requestedSize; + + return (in != null); + } + + public synchronized void unreserve(int requestedSize) { + size -= requestedSize; + + synchronized (dataAvailable) { + fullSize -= requestedSize; + --numClosed; + } + + // Notify the threads blocked on RamManager.reserve + notifyAll(); + } + + public boolean waitForDataToMerge() throws InterruptedException { + boolean done = false; + synchronized (dataAvailable) { + // Start in-memory merge if manager has been closed or... + while (!closed + && + // In-memory threshold exceeded and at least two segments + // have been fetched + (getPercentUsed() < maxInMemCopyPer || numClosed < 2) + && + // More than "mapred.inmem.merge.threshold" map outputs + // have been fetched into memory + (maxInMemOutputs <= 0 || numClosed < maxInMemOutputs) + && + // More than MAX... threads are blocked on the RamManager + // or the blocked threads are the last map outputs to be + // fetched. If numRequiredMapOutputs is zero, either + // setNumCopiedMapOutputs has not been called (no map ouputs + // have been fetched, so there is nothing to merge) or the + // last map outputs being transferred without + // contention, so a merge would be premature. + (numPendingRequests < + numCopiers*MAX_STALLED_SHUFFLE_THREADS_FRACTION && + (0 == numRequiredMapOutputs || + numPendingRequests < numRequiredMapOutputs))) { + dataAvailable.wait(); + } + done = closed; + } + return done; + } + + public void closeInMemoryFile(int requestedSize) { + synchronized (dataAvailable) { + fullSize += requestedSize; + ++numClosed; + dataAvailable.notify(); + } + } + + public void setNumCopiedMapOutputs(int numRequiredMapOutputs) { + synchronized (dataAvailable) { + this.numRequiredMapOutputs = numRequiredMapOutputs; + dataAvailable.notify(); + } + } + + public void close() { + synchronized (dataAvailable) { + closed = true; + LOG.info("Closed ram manager"); + dataAvailable.notify(); + } + } + + private float getPercentUsed() { + return (float)fullSize/maxSize; + } + + boolean canFitInMemory(long requestedSize) { + return (requestedSize < Integer.MAX_VALUE && + requestedSize < maxSingleShuffleLimit); + } + } + + /** Copies map outputs as they become available */ + private class MapOutputCopier extends Thread { + // basic/unit connection timeout (in milliseconds) + private final static int UNIT_CONNECT_TIMEOUT = 30 * 1000; + // default read timeout (in milliseconds) + private final static int DEFAULT_READ_TIMEOUT = 3 * 60 * 1000; + private final int shuffleConnectionTimeout; + private final int shuffleReadTimeout; + + private MapOutputLocation currentLocation = null; + private int id = nextMapOutputCopierId++; + private Reporter reporter; + private boolean readError = false; + + // Decompression of map-outputs + private CompressionCodec codec = null; + private Decompressor decompressor = null; + private volatile boolean shutdown = false; + + public MapOutputCopier(JobConf job, Reporter reporter) { + setName("MapOutputCopier " + reduceTask.getTaskID() + "." + id); + LOG.debug(getName() + " created"); + this.reporter = reporter; + + shuffleConnectionTimeout = + job.getInt("mapreduce.reduce.shuffle.connect.timeout", STALLED_COPY_TIMEOUT); + shuffleReadTimeout = + job.getInt("mapreduce.reduce.shuffle.read.timeout", DEFAULT_READ_TIMEOUT); + + if (job.getCompressMapOutput()) { + Class codecClass = + job.getMapOutputCompressorClass(DefaultCodec.class); + codec = ReflectionUtils.newInstance(codecClass, job); + decompressor = CodecPool.getDecompressor(codec); + } + setDaemon(true); + } + + public void stopCopier() { + shutdown = true; + this.interrupt(); + } + + + /** + * Fail the current file that we are fetching + * @return were we currently fetching? + */ + public synchronized boolean fail() { + if (currentLocation != null) { + finish(-1, CopyOutputErrorType.OTHER_ERROR); + return true; + } else { + return false; + } + } + + /** + * Get the current map output location. + */ + public synchronized MapOutputLocation getLocation() { + return currentLocation; + } + + private synchronized void start(MapOutputLocation loc) { + currentLocation = loc; + } + + private synchronized void finish(long size, CopyOutputErrorType error) { + if (currentLocation != null) { + LOG.debug(getName() + " finishing " + currentLocation + " =" + size); + synchronized (copyResults) { + copyResults.add(new CopyResult(currentLocation, size, error)); + copyResults.notify(); + } + currentLocation = null; + } + } + + /** Loop forever and fetch map outputs as they become available. + * The thread exits when it is interrupted by {@link ReduceTaskRunner} + */ + @Override + public void run() { + while (!shutdown) { + try { + MapOutputLocation loc = null; + long size = -1; + + synchronized (scheduledCopies) { + while (scheduledCopies.isEmpty()) { + scheduledCopies.wait(); + } + loc = scheduledCopies.remove(0); + } + CopyOutputErrorType error = CopyOutputErrorType.OTHER_ERROR; + readError = false; + try { + shuffleClientMetrics.threadBusy(); + start(loc); + size = copyOutput(loc); + shuffleClientMetrics.successFetch(); + error = CopyOutputErrorType.NO_ERROR; + } catch (IOException e) { + LOG.warn(reduceTask.getTaskID() + " copy failed: " + + loc.getTaskAttemptId() + " from " + loc.getHost()); + LOG.warn(StringUtils.stringifyException(e)); + shuffleClientMetrics.failedFetch(); + if (readError) { + error = CopyOutputErrorType.READ_ERROR; + } + // Reset + size = -1; + } finally { + shuffleClientMetrics.threadFree(); + finish(size, error); + } + } catch (InterruptedException e) { + if (shutdown) + break; // ALL DONE + } catch (FSError e) { + LOG.error("Task: " + reduceTask.getTaskID() + " - FSError: " + + StringUtils.stringifyException(e)); + try { + umbilical.fsError(reduceTask.getTaskID(), e.getMessage()); + } catch (IOException io) { + LOG.error("Could not notify TT of FSError: " + + StringUtils.stringifyException(io)); + } + } catch (Throwable th) { + String msg = getTaskID() + " : Map output copy failure : " + + StringUtils.stringifyException(th); + reportFatalError(getTaskID(), th, msg); + } + } + + if (decompressor != null) { + CodecPool.returnDecompressor(decompressor); + } + + } + + /** Copies a a map output from a remote host, via HTTP. + * @param currentLocation the map output location to be copied + * @return the path (fully qualified) of the copied file + * @throws IOException if there is an error copying the file + * @throws InterruptedException if the copier should give up + */ + private long copyOutput(MapOutputLocation loc + ) throws IOException, InterruptedException { + // check if we still need to copy the output from this location + if (copiedMapOutputs.contains(loc.getTaskId()) || + obsoleteMapIds.contains(loc.getTaskAttemptId())) { + return CopyResult.OBSOLETE; + } + + // a temp filename. If this file gets created in ramfs, we're fine, + // else, we will check the localFS to find a suitable final location + // for this path + TaskAttemptID reduceId = reduceTask.getTaskID(); + Path filename = new Path("/" + TaskTracker.getIntermediateOutputDir( + reduceId.getJobID().toString(), + reduceId.toString()) + + "/map_" + + loc.getTaskId().getId() + ".out"); + + // Copy the map output to a temp file whose name is unique to this attempt + Path tmpMapOutput = new Path(filename+"-"+id); + + // Copy the map output + MapOutput mapOutput = getMapOutput(loc, tmpMapOutput, + reduceId.getTaskID().getId()); + if (mapOutput == null) { + throw new IOException("Failed to fetch map-output for " + + loc.getTaskAttemptId() + " from " + + loc.getHost()); + } + + // The size of the map-output + long bytes = mapOutput.compressedSize; + + // lock the ReduceTask while we do the rename + synchronized (ReduceTask.this) { + if (copiedMapOutputs.contains(loc.getTaskId())) { + mapOutput.discard(); + return CopyResult.OBSOLETE; + } + + // Special case: discard empty map-outputs + if (bytes == 0) { + try { + mapOutput.discard(); + } catch (IOException ioe) { + LOG.info("Couldn't discard output of " + loc.getTaskId()); + } + + // Note that we successfully copied the map-output + noteCopiedMapOutput(loc.getTaskId()); + + return bytes; + } + + // Process map-output + if (mapOutput.inMemory) { + // Save it in the synchronized list of map-outputs + mapOutputsFilesInMemory.add(mapOutput); + } else { + // Rename the temporary file to the final file; + // ensure it is on the same partition + tmpMapOutput = mapOutput.file; + filename = new Path(tmpMapOutput.getParent(), filename.getName()); + if (!localFileSys.rename(tmpMapOutput, filename)) { + localFileSys.delete(tmpMapOutput, true); + bytes = -1; + throw new IOException("Failed to rename map output " + + tmpMapOutput + " to " + filename); + } + + synchronized (mapOutputFilesOnDisk) { + addToMapOutputFilesOnDisk(localFileSys.getFileStatus(filename)); + } + } + + // Note that we successfully copied the map-output + noteCopiedMapOutput(loc.getTaskId()); + } + + return bytes; + } + + /** + * Save the map taskid whose output we just copied. + * This function assumes that it has been synchronized on ReduceTask.this. + * + * @param taskId map taskid + */ + private void noteCopiedMapOutput(TaskID taskId) { + copiedMapOutputs.add(taskId); + ramManager.setNumCopiedMapOutputs(numMaps - copiedMapOutputs.size()); + } + + /** + * Get the map output into a local file (either in the inmemory fs or on the + * local fs) from the remote server. + * We use the file system so that we generate checksum files on the data. + * @param mapOutputLoc map-output to be fetched + * @param filename the filename to write the data into + * @param connectionTimeout number of milliseconds for connection timeout + * @param readTimeout number of milliseconds for read timeout + * @return the path of the file that got created + * @throws IOException when something goes wrong + */ + private MapOutput getMapOutput(MapOutputLocation mapOutputLoc, + Path filename, int reduce) + throws IOException, InterruptedException { + // Connect + URLConnection connection = + mapOutputLoc.getOutputLocation().openConnection(); + InputStream input = getInputStream(connection, shuffleConnectionTimeout, + shuffleReadTimeout); + + // Validate header from map output + TaskAttemptID mapId = null; + try { + mapId = + TaskAttemptID.forName(connection.getHeaderField(FROM_MAP_TASK)); + } catch (IllegalArgumentException ia) { + LOG.warn("Invalid map id ", ia); + return null; + } + TaskAttemptID expectedMapId = mapOutputLoc.getTaskAttemptId(); + if (!mapId.equals(expectedMapId)) { + LOG.warn("data from wrong map:" + mapId + + " arrived to reduce task " + reduce + + ", where as expected map output should be from " + expectedMapId); + return null; + } + + long decompressedLength = + Long.parseLong(connection.getHeaderField(RAW_MAP_OUTPUT_LENGTH)); + long compressedLength = + Long.parseLong(connection.getHeaderField(MAP_OUTPUT_LENGTH)); + + if (compressedLength < 0 || decompressedLength < 0) { + LOG.warn(getName() + " invalid lengths in map output header: id: " + + mapId + " compressed len: " + compressedLength + + ", decompressed len: " + decompressedLength); + return null; + } + int forReduce = + (int)Integer.parseInt(connection.getHeaderField(FOR_REDUCE_TASK)); + + if (forReduce != reduce) { + LOG.warn("data for the wrong reduce: " + forReduce + + " with compressed len: " + compressedLength + + ", decompressed len: " + decompressedLength + + " arrived to reduce task " + reduce); + return null; + } + LOG.info("header: " + mapId + ", compressed len: " + compressedLength + + ", decompressed len: " + decompressedLength); + + //We will put a file in memory if it meets certain criteria: + //1. The size of the (decompressed) file should be less than 25% of + // the total inmem fs + //2. There is space available in the inmem fs + + // Check if this map-output can be saved in-memory + boolean shuffleInMemory = ramManager.canFitInMemory(decompressedLength); + + // Shuffle + MapOutput mapOutput = null; + if (shuffleInMemory) { + LOG.info("Shuffling " + decompressedLength + " bytes (" + + compressedLength + " raw bytes) " + + "into RAM from " + mapOutputLoc.getTaskAttemptId()); + + mapOutput = shuffleInMemory(mapOutputLoc, connection, input, + (int)decompressedLength, + (int)compressedLength); + } else { + LOG.info("Shuffling " + decompressedLength + " bytes (" + + compressedLength + " raw bytes) " + + "into Local-FS from " + mapOutputLoc.getTaskAttemptId()); + + mapOutput = shuffleToDisk(mapOutputLoc, input, filename, + compressedLength); + } + + return mapOutput; + } + + /** + * The connection establishment is attempted multiple times and is given up + * only on the last failure. Instead of connecting with a timeout of + * X, we try connecting with a timeout of x < X but multiple times. + */ + private InputStream getInputStream(URLConnection connection, + int connectionTimeout, + int readTimeout) + throws IOException { + int unit = 0; + if (connectionTimeout < 0) { + throw new IOException("Invalid timeout " + + "[timeout = " + connectionTimeout + " ms]"); + } else if (connectionTimeout > 0) { + unit = (UNIT_CONNECT_TIMEOUT > connectionTimeout) + ? connectionTimeout + : UNIT_CONNECT_TIMEOUT; + } + // set the read timeout to the total timeout + connection.setReadTimeout(readTimeout); + // set the connect timeout to the unit-connect-timeout + connection.setConnectTimeout(unit); + while (true) { + try { + connection.connect(); + break; + } catch (IOException ioe) { + // update the total remaining connect-timeout + connectionTimeout -= unit; + + // throw an exception if we have waited for timeout amount of time + // note that the updated value if timeout is used here + if (connectionTimeout == 0) { + throw ioe; + } + + // reset the connect timeout for the last try + if (connectionTimeout < unit) { + unit = connectionTimeout; + // reset the connect time out for the final connect + connection.setConnectTimeout(unit); + } + } + } + try { + return connection.getInputStream(); + } catch (IOException ioe) { + readError = true; + throw ioe; + } + } + + private MapOutput shuffleInMemory(MapOutputLocation mapOutputLoc, + URLConnection connection, + InputStream input, + int mapOutputLength, + int compressedLength) + throws IOException, InterruptedException { + // Reserve ram for the map-output + boolean createdNow = ramManager.reserve(mapOutputLength, input); + + // Reconnect if we need to + if (!createdNow) { + // Reconnect + try { + connection = mapOutputLoc.getOutputLocation().openConnection(); + input = getInputStream(connection, shuffleConnectionTimeout, + shuffleReadTimeout); + } catch (IOException ioe) { + LOG.info("Failed reopen connection to fetch map-output from " + + mapOutputLoc.getHost()); + + // Inform the ram-manager + ramManager.closeInMemoryFile(mapOutputLength); + ramManager.unreserve(mapOutputLength); + + throw ioe; + } + } + + IFileInputStream checksumIn = + new IFileInputStream(input,compressedLength); + + input = checksumIn; + + // Are map-outputs compressed? + if (codec != null) { + decompressor.reset(); + input = codec.createInputStream(input, decompressor); + } + + // Copy map-output into an in-memory buffer + byte[] shuffleData = new byte[mapOutputLength]; + MapOutput mapOutput = + new MapOutput(mapOutputLoc.getTaskId(), + mapOutputLoc.getTaskAttemptId(), shuffleData, compressedLength); + + int bytesRead = 0; + try { + int n = input.read(shuffleData, 0, shuffleData.length); + while (n > 0) { + bytesRead += n; + shuffleClientMetrics.inputBytes(n); + + // indicate we're making progress + reporter.progress(); + n = input.read(shuffleData, bytesRead, + (shuffleData.length-bytesRead)); + } + + LOG.info("Read " + bytesRead + " bytes from map-output for " + + mapOutputLoc.getTaskAttemptId()); + + input.close(); + } catch (IOException ioe) { + LOG.info("Failed to shuffle from " + mapOutputLoc.getTaskAttemptId(), + ioe); + + // Inform the ram-manager + ramManager.closeInMemoryFile(mapOutputLength); + ramManager.unreserve(mapOutputLength); + + // Discard the map-output + try { + mapOutput.discard(); + } catch (IOException ignored) { + LOG.info("Failed to discard map-output from " + + mapOutputLoc.getTaskAttemptId(), ignored); + } + mapOutput = null; + + // Close the streams + IOUtils.cleanup(LOG, input); + + // Re-throw + readError = true; + throw ioe; + } + + // Close the in-memory file + ramManager.closeInMemoryFile(mapOutputLength); + + // Sanity check + if (bytesRead != mapOutputLength) { + // Inform the ram-manager + ramManager.unreserve(mapOutputLength); + + // Discard the map-output + try { + mapOutput.discard(); + } catch (IOException ignored) { + // IGNORED because we are cleaning up + LOG.info("Failed to discard map-output from " + + mapOutputLoc.getTaskAttemptId(), ignored); + } + mapOutput = null; + + throw new IOException("Incomplete map output received for " + + mapOutputLoc.getTaskAttemptId() + " from " + + mapOutputLoc.getOutputLocation() + " (" + + bytesRead + " instead of " + + mapOutputLength + ")" + ); + } + + // TODO: Remove this after a 'fix' for HADOOP-3647 + if (mapOutputLength > 0) { + DataInputBuffer dib = new DataInputBuffer(); + dib.reset(shuffleData, 0, shuffleData.length); + LOG.info("Rec #1 from " + mapOutputLoc.getTaskAttemptId() + " -> (" + + WritableUtils.readVInt(dib) + ", " + + WritableUtils.readVInt(dib) + ") from " + + mapOutputLoc.getHost()); + } + + return mapOutput; + } + + private MapOutput shuffleToDisk(MapOutputLocation mapOutputLoc, + InputStream input, + Path filename, + long mapOutputLength) + throws IOException { + // Find out a suitable location for the output on local-filesystem + Path localFilename = + lDirAlloc.getLocalPathForWrite(filename.toUri().getPath(), + mapOutputLength, conf); + + MapOutput mapOutput = + new MapOutput(mapOutputLoc.getTaskId(), mapOutputLoc.getTaskAttemptId(), + conf, localFileSys.makeQualified(localFilename), + mapOutputLength); + + + // Copy data to local-disk + OutputStream output = null; + long bytesRead = 0; + try { + output = rfs.create(localFilename); + + byte[] buf = new byte[64 * 1024]; + int n = -1; + try { + n = input.read(buf, 0, buf.length); + } catch (IOException ioe) { + readError = true; + throw ioe; + } + while (n > 0) { + bytesRead += n; + shuffleClientMetrics.inputBytes(n); + output.write(buf, 0, n); + + // indicate we're making progress + reporter.progress(); + try { + n = input.read(buf, 0, buf.length); + } catch (IOException ioe) { + readError = true; + throw ioe; + } + } + + LOG.info("Read " + bytesRead + " bytes from map-output for " + + mapOutputLoc.getTaskAttemptId()); + + output.close(); + input.close(); + } catch (IOException ioe) { + LOG.info("Failed to shuffle from " + mapOutputLoc.getTaskAttemptId(), + ioe); + + // Discard the map-output + try { + mapOutput.discard(); + } catch (IOException ignored) { + LOG.info("Failed to discard map-output from " + + mapOutputLoc.getTaskAttemptId(), ignored); + } + mapOutput = null; + + // Close the streams + IOUtils.cleanup(LOG, input, output); + + // Re-throw + throw ioe; + } + + // Sanity check + if (bytesRead != mapOutputLength) { + try { + mapOutput.discard(); + } catch (Exception ioe) { + // IGNORED because we are cleaning up + LOG.info("Failed to discard map-output from " + + mapOutputLoc.getTaskAttemptId(), ioe); + } catch (Throwable t) { + String msg = getTaskID() + " : Failed in shuffle to disk :" + + StringUtils.stringifyException(t); + reportFatalError(getTaskID(), t, msg); + } + mapOutput = null; + + throw new IOException("Incomplete map output received for " + + mapOutputLoc.getTaskAttemptId() + " from " + + mapOutputLoc.getOutputLocation() + " (" + + bytesRead + " instead of " + + mapOutputLength + ")" + ); + } + + return mapOutput; + + } + + } // MapOutputCopier + + private void configureClasspath(JobConf conf) + throws IOException { + + // get the task and the current classloader which will become the parent + Task task = ReduceTask.this; + ClassLoader parent = conf.getClassLoader(); + + // get the work directory which holds the elements we are dynamically + // adding to the classpath + File workDir = new File(task.getJobFile()).getParentFile(); + ArrayList urllist = new ArrayList(); + + // add the jars and directories to the classpath + String jar = conf.getJar(); + if (jar != null) { + File jobCacheDir = new File(new Path(jar).getParent().toString()); + + File[] libs = new File(jobCacheDir, "lib").listFiles(); + if (libs != null) { + for (int i = 0; i < libs.length; i++) { + urllist.add(libs[i].toURL()); + } + } + urllist.add(new File(jobCacheDir, "classes").toURL()); + urllist.add(jobCacheDir.toURL()); + + } + urllist.add(workDir.toURL()); + + // create a new classloader with the old classloader as its parent + // then set that classloader as the one used by the current jobconf + URL[] urls = urllist.toArray(new URL[urllist.size()]); + URLClassLoader loader = new URLClassLoader(urls, parent); + conf.setClassLoader(loader); + } + + public ReduceCopier(TaskUmbilicalProtocol umbilical, JobConf conf, + TaskReporter reporter + )throws ClassNotFoundException, IOException { + + configureClasspath(conf); + this.reporter = reporter; + this.shuffleClientMetrics = new ShuffleClientMetrics(conf); + this.umbilical = umbilical; + this.reduceTask = ReduceTask.this; + + this.scheduledCopies = new ArrayList(100); + this.copyResults = new ArrayList(100); + this.numCopiers = conf.getInt("mapred.reduce.parallel.copies", 5); + this.maxInFlight = 4 * numCopiers; + this.maxBackoff = conf.getInt("mapred.reduce.copy.backoff", 300); + Counters.Counter combineInputCounter = + reporter.getCounter(Task.Counter.COMBINE_INPUT_RECORDS); + this.combinerRunner = CombinerRunner.create(conf, getTaskID(), + combineInputCounter, + reporter, null); + if (combinerRunner != null) { + combineCollector = + new CombineOutputCollector(reduceCombineOutputCounter); + } + + this.ioSortFactor = conf.getInt("io.sort.factor", 10); + // the exponential backoff formula + // backoff (t) = init * base^(t-1) + // so for max retries we get + // backoff(1) + .... + backoff(max_fetch_retries) ~ max + // solving which we get + // max_fetch_retries ~ log((max * (base - 1) / init) + 1) / log(base) + // for the default value of max = 300 (5min) we get max_fetch_retries = 6 + // the order is 4,8,16,32,64,128. sum of which is 252 sec = 4.2 min + + // optimizing for the base 2 + this.maxFetchRetriesPerMap = Math.max(MIN_FETCH_RETRIES_PER_MAP, + getClosestPowerOf2((this.maxBackoff * 1000 / BACKOFF_INIT) + 1)); + this.maxFailedUniqueFetches = Math.min(numMaps, + this.maxFailedUniqueFetches); + this.maxInMemOutputs = conf.getInt("mapred.inmem.merge.threshold", 1000); + this.maxInMemCopyPer = + conf.getFloat("mapred.job.shuffle.merge.percent", 0.66f); + final float maxRedPer = + conf.getFloat("mapred.job.reduce.input.buffer.percent", 0f); + if (maxRedPer > 1.0 || maxRedPer < 0.0) { + throw new IOException("mapred.job.reduce.input.buffer.percent" + + maxRedPer); + } + this.maxInMemReduce = (int)Math.min( + Runtime.getRuntime().maxMemory() * maxRedPer, Integer.MAX_VALUE); + + // Setup the RamManager + ramManager = new ShuffleRamManager(conf); + + localFileSys = FileSystem.getLocal(conf); + + rfs = ((LocalFileSystem)localFileSys).getRaw(); + + // hosts -> next contact time + this.penaltyBox = new LinkedHashMap(); + + // hostnames + this.uniqueHosts = new HashSet(); + + // Seed the random number generator with a reasonably globally unique seed + long randomSeed = System.nanoTime() + + (long)Math.pow(this.reduceTask.getPartition(), + (this.reduceTask.getPartition()%10) + ); + this.random = new Random(randomSeed); + this.maxMapRuntime = 0; + this.reportReadErrorImmediately = + conf.getBoolean("mapreduce.reduce.shuffle.notify.readerror", true); + } + + private boolean busyEnough(int numInFlight) { + return numInFlight > maxInFlight; + } + + + public boolean fetchOutputs() throws IOException { + int totalFailures = 0; + int numInFlight = 0, numCopied = 0; + DecimalFormat mbpsFormat = new DecimalFormat("0.00"); + final Progress copyPhase = + reduceTask.getProgress().phase(); + LocalFSMerger localFSMergerThread = null; + InMemFSMergeThread inMemFSMergeThread = null; + GetMapEventsThread getMapEventsThread = null; + + for (int i = 0; i < numMaps; i++) { + copyPhase.addPhase(); // add sub-phase per file + } + + copiers = new ArrayList(numCopiers); + + // start all the copying threads + for (int i=0; i < numCopiers; i++) { + MapOutputCopier copier = new MapOutputCopier(conf, reporter); + copiers.add(copier); + copier.start(); + } + + //start the on-disk-merge thread + localFSMergerThread = new LocalFSMerger((LocalFileSystem)localFileSys); + //start the in memory merger thread + inMemFSMergeThread = new InMemFSMergeThread(); + localFSMergerThread.start(); + inMemFSMergeThread.start(); + + // start the map events thread + getMapEventsThread = new GetMapEventsThread(); + getMapEventsThread.start(); + + // start the clock for bandwidth measurement + long startTime = System.currentTimeMillis(); + long currentTime = startTime; + long lastProgressTime = startTime; + long lastOutputTime = 0; + + // loop until we get all required outputs + while (copiedMapOutputs.size() < numMaps && mergeThrowable == null) { + + currentTime = System.currentTimeMillis(); + boolean logNow = false; + if (currentTime - lastOutputTime > MIN_LOG_TIME) { + lastOutputTime = currentTime; + logNow = true; + } + if (logNow) { + LOG.info(reduceTask.getTaskID() + " Need another " + + (numMaps - copiedMapOutputs.size()) + " map output(s) " + + "where " + numInFlight + " is already in progress"); + } + + // Put the hash entries for the failed fetches. + Iterator locItr = retryFetches.iterator(); + + while (locItr.hasNext()) { + MapOutputLocation loc = locItr.next(); + List locList = + mapLocations.get(loc.getHost()); + + // Check if the list exists. Map output location mapping is cleared + // once the jobtracker restarts and is rebuilt from scratch. + // Note that map-output-location mapping will be recreated and hence + // we continue with the hope that we might find some locations + // from the rebuild map. + if (locList != null) { + // Add to the beginning of the list so that this map is + //tried again before the others and we can hasten the + //re-execution of this map should there be a problem + locList.add(0, loc); + } + } + + if (retryFetches.size() > 0) { + LOG.info(reduceTask.getTaskID() + ": " + + "Got " + retryFetches.size() + + " map-outputs from previous failures"); + } + // clear the "failed" fetches hashmap + retryFetches.clear(); + + // now walk through the cache and schedule what we can + int numScheduled = 0; + int numDups = 0; + + synchronized (scheduledCopies) { + + // Randomize the map output locations to prevent + // all reduce-tasks swamping the same tasktracker + List hostList = new ArrayList(); + hostList.addAll(mapLocations.keySet()); + + Collections.shuffle(hostList, this.random); + + Iterator hostsItr = hostList.iterator(); + + while (hostsItr.hasNext()) { + + String host = hostsItr.next(); + + List knownOutputsByLoc = + mapLocations.get(host); + + // Check if the list exists. Map output location mapping is + // cleared once the jobtracker restarts and is rebuilt from + // scratch. + // Note that map-output-location mapping will be recreated and + // hence we continue with the hope that we might find some + // locations from the rebuild map and add then for fetching. + if (knownOutputsByLoc == null || knownOutputsByLoc.size() == 0) { + continue; + } + + //Identify duplicate hosts here + if (uniqueHosts.contains(host)) { + numDups += knownOutputsByLoc.size(); + continue; + } + + Long penaltyEnd = penaltyBox.get(host); + boolean penalized = false; + + if (penaltyEnd != null) { + if (currentTime < penaltyEnd.longValue()) { + penalized = true; + } else { + penaltyBox.remove(host); + } + } + + if (penalized) + continue; + + synchronized (knownOutputsByLoc) { + + locItr = knownOutputsByLoc.iterator(); + + while (locItr.hasNext()) { + + MapOutputLocation loc = locItr.next(); + + // Do not schedule fetches from OBSOLETE maps + if (obsoleteMapIds.contains(loc.getTaskAttemptId())) { + locItr.remove(); + continue; + } + + uniqueHosts.add(host); + scheduledCopies.add(loc); + locItr.remove(); // remove from knownOutputs + numInFlight++; numScheduled++; + + break; //we have a map from this host + } + } + } + scheduledCopies.notifyAll(); + } + + if (numScheduled > 0 || logNow) { + LOG.info(reduceTask.getTaskID() + " Scheduled " + numScheduled + + " outputs (" + penaltyBox.size() + + " slow hosts and" + numDups + " dup hosts)"); + } + + if (penaltyBox.size() > 0 && logNow) { + LOG.info("Penalized(slow) Hosts: "); + for (String host : penaltyBox.keySet()) { + LOG.info(host + " Will be considered after: " + + ((penaltyBox.get(host) - currentTime)/1000) + " seconds."); + } + } + + // if we have no copies in flight and we can't schedule anything + // new, just wait for a bit + try { + if (numInFlight == 0 && numScheduled == 0) { + // we should indicate progress as we don't want TT to think + // we're stuck and kill us + reporter.progress(); + Thread.sleep(5000); + } + } catch (InterruptedException e) { } // IGNORE + + while (numInFlight > 0 && mergeThrowable == null) { + LOG.debug(reduceTask.getTaskID() + " numInFlight = " + + numInFlight); + //the call to getCopyResult will either + //1) return immediately with a null or a valid CopyResult object, + // or + //2) if the numInFlight is above maxInFlight, return with a + // CopyResult object after getting a notification from a + // fetcher thread, + //So, when getCopyResult returns null, we can be sure that + //we aren't busy enough and we should go and get more mapcompletion + //events from the tasktracker + CopyResult cr = getCopyResult(numInFlight); + + if (cr == null) { + break; + } + + if (cr.getSuccess()) { // a successful copy + numCopied++; + lastProgressTime = System.currentTimeMillis(); + reduceShuffleBytes.increment(cr.getSize()); + + long secsSinceStart = + (System.currentTimeMillis()-startTime)/1000+1; + float mbs = ((float)reduceShuffleBytes.getCounter())/(1024*1024); + float transferRate = mbs/secsSinceStart; + + copyPhase.startNextPhase(); + copyPhase.setStatus("copy (" + numCopied + " of " + numMaps + + " at " + + mbpsFormat.format(transferRate) + " MB/s)"); + + // Note successful fetch for this mapId to invalidate + // (possibly) old fetch-failures + fetchFailedMaps.remove(cr.getLocation().getTaskId()); + } else if (cr.isObsolete()) { + //ignore + LOG.info(reduceTask.getTaskID() + + " Ignoring obsolete copy result for Map Task: " + + cr.getLocation().getTaskAttemptId() + " from host: " + + cr.getHost()); + } else { + retryFetches.add(cr.getLocation()); + + // note the failed-fetch + TaskAttemptID mapTaskId = cr.getLocation().getTaskAttemptId(); + TaskID mapId = cr.getLocation().getTaskId(); + + totalFailures++; + Integer noFailedFetches = + mapTaskToFailedFetchesMap.get(mapTaskId); + noFailedFetches = + (noFailedFetches == null) ? 1 : (noFailedFetches + 1); + mapTaskToFailedFetchesMap.put(mapTaskId, noFailedFetches); + LOG.info("Task " + getTaskID() + ": Failed fetch #" + + noFailedFetches + " from " + mapTaskId); + + // half the number of max fetch retries per map during + // the end of shuffle + int fetchRetriesPerMap = maxFetchRetriesPerMap; + int pendingCopies = numMaps - numCopied; + + // The check noFailedFetches != maxFetchRetriesPerMap is + // required to make sure of the notification in case of a + // corner case : + // when noFailedFetches reached maxFetchRetriesPerMap and + // reducer reached the end of shuffle, then we may miss sending + // a notification if the difference between + // noFailedFetches and fetchRetriesPerMap is not divisible by 2 + if (pendingCopies <= numMaps * MIN_PENDING_MAPS_PERCENT && + noFailedFetches != maxFetchRetriesPerMap) { + fetchRetriesPerMap = fetchRetriesPerMap >> 1; + } + + // did the fetch fail too many times? + // using a hybrid technique for notifying the jobtracker. + // a. the first notification is sent after max-retries + // b. subsequent notifications are sent after 2 retries. + // c. send notification immediately if it is a read error and + // "mapreduce.reduce.shuffle.notify.readerror" set true. + if ((reportReadErrorImmediately && cr.getError().equals( + CopyOutputErrorType.READ_ERROR)) || + ((noFailedFetches >= fetchRetriesPerMap) + && ((noFailedFetches - fetchRetriesPerMap) % 2) == 0)) { + synchronized (ReduceTask.this) { + taskStatus.addFetchFailedMap(mapTaskId); + reporter.progress(); + LOG.info("Failed to fetch map-output from " + mapTaskId + + " even after MAX_FETCH_RETRIES_PER_MAP retries... " + + " or it is a read error, " + + " reporting to the JobTracker"); + } + } + // note unique failed-fetch maps + if (noFailedFetches == maxFetchRetriesPerMap) { + fetchFailedMaps.add(mapId); + + // did we have too many unique failed-fetch maps? + // and did we fail on too many fetch attempts? + // and did we progress enough + // or did we wait for too long without any progress? + + // check if the reducer is healthy + boolean reducerHealthy = + (((float)totalFailures / (totalFailures + numCopied)) + < MAX_ALLOWED_FAILED_FETCH_ATTEMPT_PERCENT); + + // check if the reducer has progressed enough + boolean reducerProgressedEnough = + (((float)numCopied / numMaps) + >= MIN_REQUIRED_PROGRESS_PERCENT); + + // check if the reducer is stalled for a long time + // duration for which the reducer is stalled + int stallDuration = + (int)(System.currentTimeMillis() - lastProgressTime); + // duration for which the reducer ran with progress + int shuffleProgressDuration = + (int)(lastProgressTime - startTime); + // min time the reducer should run without getting killed + int minShuffleRunDuration = + (shuffleProgressDuration > maxMapRuntime) + ? shuffleProgressDuration + : maxMapRuntime; + boolean reducerStalled = + (((float)stallDuration / minShuffleRunDuration) + >= MAX_ALLOWED_STALL_TIME_PERCENT); + + // kill if not healthy and has insufficient progress + if ((fetchFailedMaps.size() >= maxFailedUniqueFetches || + fetchFailedMaps.size() == (numMaps - copiedMapOutputs.size())) + && !reducerHealthy + && (!reducerProgressedEnough || reducerStalled)) { + LOG.fatal("Shuffle failed with too many fetch failures " + + "and insufficient progress!" + + "Killing task " + getTaskID() + "."); + umbilical.shuffleError(getTaskID(), + "Exceeded MAX_FAILED_UNIQUE_FETCHES " + maxFailedUniqueFetches + ";" + + " bailing-out."); + } + } + + // back off exponentially until num_retries <= max_retries + // back off by max_backoff/2 on subsequent failed attempts + currentTime = System.currentTimeMillis(); + int currentBackOff = noFailedFetches <= fetchRetriesPerMap + ? BACKOFF_INIT + * (1 << (noFailedFetches - 1)) + : (this.maxBackoff * 1000 / 2); + // If it is read error, + // back off for maxMapRuntime/2 + // during end of shuffle, + // backoff for min(maxMapRuntime/2, currentBackOff) + if (cr.getError().equals(CopyOutputErrorType.READ_ERROR)) { + int backOff = maxMapRuntime >> 1; + if (pendingCopies <= numMaps * MIN_PENDING_MAPS_PERCENT) { + backOff = Math.min(backOff, currentBackOff); + } + currentBackOff = backOff; + } + + penaltyBox.put(cr.getHost(), currentTime + currentBackOff); + LOG.warn(reduceTask.getTaskID() + " adding host " + + cr.getHost() + " to penalty box, next contact in " + + (currentBackOff/1000) + " seconds"); + } + uniqueHosts.remove(cr.getHost()); + numInFlight--; + } + } + + // all done, inform the copiers to exit + exitGetMapEvents= true; + try { + getMapEventsThread.join(); + LOG.info("getMapsEventsThread joined."); + } catch (InterruptedException ie) { + LOG.info("getMapsEventsThread threw an exception: " + + StringUtils.stringifyException(ie)); + } + + synchronized (copiers) { + synchronized (scheduledCopies) { + for (MapOutputCopier copier : copiers) { + copier.stopCopier(); + } + copiers.clear(); + } + } + + // copiers are done, exit and notify the waiting merge threads + synchronized (mapOutputFilesOnDisk) { + exitLocalFSMerge = true; + mapOutputFilesOnDisk.notify(); + } + + ramManager.close(); + + //Do a merge of in-memory files (if there are any) + if (mergeThrowable == null) { + try { + // Wait for the on-disk merge to complete + localFSMergerThread.join(); + LOG.info("Interleaved on-disk merge complete: " + + mapOutputFilesOnDisk.size() + " files left."); + + //wait for an ongoing merge (if it is in flight) to complete + inMemFSMergeThread.join(); + LOG.info("In-memory merge complete: " + + mapOutputsFilesInMemory.size() + " files left."); + } catch (InterruptedException ie) { + LOG.warn(reduceTask.getTaskID() + + " Final merge of the inmemory files threw an exception: " + + StringUtils.stringifyException(ie)); + // check if the last merge generated an error + if (mergeThrowable != null) { + mergeThrowable = ie; + } + return false; + } + } + return mergeThrowable == null && copiedMapOutputs.size() == numMaps; + } + + private long createInMemorySegments( + List> inMemorySegments, long leaveBytes) + throws IOException { + long totalSize = 0L; + synchronized (mapOutputsFilesInMemory) { + // fullSize could come from the RamManager, but files can be + // closed but not yet present in mapOutputsFilesInMemory + long fullSize = 0L; + for (MapOutput mo : mapOutputsFilesInMemory) { + fullSize += mo.data.length; + } + while(fullSize > leaveBytes) { + MapOutput mo = mapOutputsFilesInMemory.remove(0); + totalSize += mo.data.length; + fullSize -= mo.data.length; + Reader reader = + new InMemoryReader(ramManager, mo.mapAttemptId, + mo.data, 0, mo.data.length); + Segment segment = + new Segment(reader, true); + inMemorySegments.add(segment); + } + } + return totalSize; + } + + /** + * Create a RawKeyValueIterator from copied map outputs. All copying + * threads have exited, so all of the map outputs are available either in + * memory or on disk. We also know that no merges are in progress, so + * synchronization is more lax, here. + * + * The iterator returned must satisfy the following constraints: + * 1. Fewer than io.sort.factor files may be sources + * 2. No more than maxInMemReduce bytes of map outputs may be resident + * in memory when the reduce begins + * + * If we must perform an intermediate merge to satisfy (1), then we can + * keep the excluded outputs from (2) in memory and include them in the + * first merge pass. If not, then said outputs must be written to disk + * first. + */ + @SuppressWarnings("unchecked") + private RawKeyValueIterator createKVIterator( + JobConf job, FileSystem fs, Reporter reporter) throws IOException { + + // merge config params + Class keyClass = (Class)job.getMapOutputKeyClass(); + Class valueClass = (Class)job.getMapOutputValueClass(); + boolean keepInputs = job.getKeepFailedTaskFiles(); + final Path tmpDir = new Path(getTaskID().toString()); + final RawComparator comparator = + (RawComparator)job.getOutputKeyComparator(); + + // segments required to vacate memory + List> memDiskSegments = new ArrayList>(); + long inMemToDiskBytes = 0; + if (mapOutputsFilesInMemory.size() > 0) { + TaskID mapId = mapOutputsFilesInMemory.get(0).mapId; + inMemToDiskBytes = createInMemorySegments(memDiskSegments, + maxInMemReduce); + final int numMemDiskSegments = memDiskSegments.size(); + if (numMemDiskSegments > 0 && + ioSortFactor > mapOutputFilesOnDisk.size()) { + // must spill to disk, but can't retain in-mem for intermediate merge + final Path outputPath = mapOutputFile.getInputFileForWrite(mapId, + reduceTask.getTaskID(), inMemToDiskBytes); + final RawKeyValueIterator rIter = Merger.merge(job, fs, + keyClass, valueClass, memDiskSegments, numMemDiskSegments, + tmpDir, comparator, reporter, spilledRecordsCounter, null); + final Writer writer = new Writer(job, fs, outputPath, + keyClass, valueClass, codec, null); + try { + Merger.writeFile(rIter, writer, reporter, job); + addToMapOutputFilesOnDisk(fs.getFileStatus(outputPath)); + } catch (Exception e) { + if (null != outputPath) { + fs.delete(outputPath, true); + } + throw new IOException("Final merge failed", e); + } finally { + if (null != writer) { + writer.close(); + } + } + LOG.info("Merged " + numMemDiskSegments + " segments, " + + inMemToDiskBytes + " bytes to disk to satisfy " + + "reduce memory limit"); + inMemToDiskBytes = 0; + memDiskSegments.clear(); + } else if (inMemToDiskBytes != 0) { + LOG.info("Keeping " + numMemDiskSegments + " segments, " + + inMemToDiskBytes + " bytes in memory for " + + "intermediate, on-disk merge"); + } + } + + // segments on disk + List> diskSegments = new ArrayList>(); + long onDiskBytes = inMemToDiskBytes; + Path[] onDisk = getMapFiles(fs, false); + for (Path file : onDisk) { + onDiskBytes += fs.getFileStatus(file).getLen(); + diskSegments.add(new Segment(job, fs, file, codec, keepInputs)); + } + LOG.info("Merging " + onDisk.length + " files, " + + onDiskBytes + " bytes from disk"); + Collections.sort(diskSegments, new Comparator>() { + public int compare(Segment o1, Segment o2) { + if (o1.getLength() == o2.getLength()) { + return 0; + } + return o1.getLength() < o2.getLength() ? -1 : 1; + } + }); + + // build final list of segments from merged backed by disk + in-mem + List> finalSegments = new ArrayList>(); + long inMemBytes = createInMemorySegments(finalSegments, 0); + LOG.info("Merging " + finalSegments.size() + " segments, " + + inMemBytes + " bytes from memory into reduce"); + if (0 != onDiskBytes) { + final int numInMemSegments = memDiskSegments.size(); + diskSegments.addAll(0, memDiskSegments); + memDiskSegments.clear(); + RawKeyValueIterator diskMerge = Merger.merge( + job, fs, keyClass, valueClass, codec, diskSegments, + ioSortFactor, numInMemSegments, tmpDir, comparator, + reporter, false, spilledRecordsCounter, null); + diskSegments.clear(); + if (0 == finalSegments.size()) { + return diskMerge; + } + finalSegments.add(new Segment( + new RawKVIteratorReader(diskMerge, onDiskBytes), true)); + } + return Merger.merge(job, fs, keyClass, valueClass, + finalSegments, finalSegments.size(), tmpDir, + comparator, reporter, spilledRecordsCounter, null); + } + + class RawKVIteratorReader extends IFile.Reader { + + private final RawKeyValueIterator kvIter; + + public RawKVIteratorReader(RawKeyValueIterator kvIter, long size) + throws IOException { + super(null, null, size, null, spilledRecordsCounter); + this.kvIter = kvIter; + } + + public boolean next(DataInputBuffer key, DataInputBuffer value) + throws IOException { + if (kvIter.next()) { + final DataInputBuffer kb = kvIter.getKey(); + final DataInputBuffer vb = kvIter.getValue(); + final int kp = kb.getPosition(); + final int klen = kb.getLength() - kp; + key.reset(kb.getData(), kp, klen); + final int vp = vb.getPosition(); + final int vlen = vb.getLength() - vp; + value.reset(vb.getData(), vp, vlen); + bytesRead += klen + vlen; + return true; + } + return false; + } + + public long getPosition() throws IOException { + return bytesRead; + } + + public void close() throws IOException { + kvIter.close(); + } + } + + private CopyResult getCopyResult(int numInFlight) { + synchronized (copyResults) { + while (copyResults.isEmpty()) { + try { + //The idea is that if we have scheduled enough, we can wait until + //we hear from one of the copiers. + if (busyEnough(numInFlight)) { + copyResults.wait(); + } else { + return null; + } + } catch (InterruptedException e) { } + } + return copyResults.remove(0); + } + } + + private void addToMapOutputFilesOnDisk(FileStatus status) { + synchronized (mapOutputFilesOnDisk) { + mapOutputFilesOnDisk.add(status); + mapOutputFilesOnDisk.notify(); + } + } + + + + /** Starts merging the local copy (on disk) of the map's output so that + * most of the reducer's input is sorted i.e overlapping shuffle + * and merge phases. + */ + private class LocalFSMerger extends Thread { + private LocalFileSystem localFileSys; + + public LocalFSMerger(LocalFileSystem fs) { + this.localFileSys = fs; + setName("Thread for merging on-disk files"); + setDaemon(true); + } + + @SuppressWarnings("unchecked") + public void run() { + try { + LOG.info(reduceTask.getTaskID() + " Thread started: " + getName()); + while(!exitLocalFSMerge){ + synchronized (mapOutputFilesOnDisk) { + while (!exitLocalFSMerge && + mapOutputFilesOnDisk.size() < (2 * ioSortFactor - 1)) { + LOG.info(reduceTask.getTaskID() + " Thread waiting: " + getName()); + mapOutputFilesOnDisk.wait(); + } + } + if(exitLocalFSMerge) {//to avoid running one extra time in the end + break; + } + List mapFiles = new ArrayList(); + long approxOutputSize = 0; + int bytesPerSum = + reduceTask.getConf().getInt("io.bytes.per.checksum", 512); + LOG.info(reduceTask.getTaskID() + "We have " + + mapOutputFilesOnDisk.size() + " map outputs on disk. " + + "Triggering merge of " + ioSortFactor + " files"); + // 1. Prepare the list of files to be merged. This list is prepared + // using a list of map output files on disk. Currently we merge + // io.sort.factor files into 1. + synchronized (mapOutputFilesOnDisk) { + for (int i = 0; i < ioSortFactor; ++i) { + FileStatus filestatus = mapOutputFilesOnDisk.first(); + mapOutputFilesOnDisk.remove(filestatus); + mapFiles.add(filestatus.getPath()); + approxOutputSize += filestatus.getLen(); + } + } + + // sanity check + if (mapFiles.size() == 0) { + return; + } + + // add the checksum length + approxOutputSize += ChecksumFileSystem + .getChecksumLength(approxOutputSize, + bytesPerSum); + + // 2. Start the on-disk merge process + Path outputPath = + lDirAlloc.getLocalPathForWrite(mapFiles.get(0).toString(), + approxOutputSize, conf) + .suffix(".merged"); + Writer writer = + new Writer(conf,rfs, outputPath, + conf.getMapOutputKeyClass(), + conf.getMapOutputValueClass(), + codec, null); + RawKeyValueIterator iter = null; + Path tmpDir = new Path(reduceTask.getTaskID().toString()); + try { + iter = Merger.merge(conf, rfs, + conf.getMapOutputKeyClass(), + conf.getMapOutputValueClass(), + codec, mapFiles.toArray(new Path[mapFiles.size()]), + true, ioSortFactor, tmpDir, + conf.getOutputKeyComparator(), reporter, + spilledRecordsCounter, null); + + Merger.writeFile(iter, writer, reporter, conf); + writer.close(); + } catch (Exception e) { + localFileSys.delete(outputPath, true); + throw new IOException (StringUtils.stringifyException(e)); + } + + synchronized (mapOutputFilesOnDisk) { + addToMapOutputFilesOnDisk(localFileSys.getFileStatus(outputPath)); + } + + LOG.info(reduceTask.getTaskID() + + " Finished merging " + mapFiles.size() + + " map output files on disk of total-size " + + approxOutputSize + "." + + " Local output file is " + outputPath + " of size " + + localFileSys.getFileStatus(outputPath).getLen()); + } + } catch (Exception e) { + LOG.warn(reduceTask.getTaskID() + + " Merging of the local FS files threw an exception: " + + StringUtils.stringifyException(e)); + if (mergeThrowable == null) { + mergeThrowable = e; + } + } catch (Throwable t) { + String msg = getTaskID() + " : Failed to merge on the local FS" + + StringUtils.stringifyException(t); + reportFatalError(getTaskID(), t, msg); + } + } + } + + private class InMemFSMergeThread extends Thread { + + public InMemFSMergeThread() { + setName("Thread for merging in memory files"); + setDaemon(true); + } + + public void run() { + LOG.info(reduceTask.getTaskID() + " Thread started: " + getName()); + try { + boolean exit = false; + do { + exit = ramManager.waitForDataToMerge(); + if (!exit) { + doInMemMerge(); + } + } while (!exit); + } catch (Exception e) { + LOG.warn(reduceTask.getTaskID() + + " Merge of the inmemory files threw an exception: " + + StringUtils.stringifyException(e)); + ReduceCopier.this.mergeThrowable = e; + } catch (Throwable t) { + String msg = getTaskID() + " : Failed to merge in memory" + + StringUtils.stringifyException(t); + reportFatalError(getTaskID(), t, msg); + } + } + + @SuppressWarnings("unchecked") + private void doInMemMerge() throws IOException{ + if (mapOutputsFilesInMemory.size() == 0) { + return; + } + + //name this output file same as the name of the first file that is + //there in the current list of inmem files (this is guaranteed to + //be absent on the disk currently. So we don't overwrite a prev. + //created spill). Also we need to create the output file now since + //it is not guaranteed that this file will be present after merge + //is called (we delete empty files as soon as we see them + //in the merge method) + + //figure out the mapId + TaskID mapId = mapOutputsFilesInMemory.get(0).mapId; + + List> inMemorySegments = new ArrayList>(); + long mergeOutputSize = createInMemorySegments(inMemorySegments, 0); + int noInMemorySegments = inMemorySegments.size(); + + Path outputPath = mapOutputFile.getInputFileForWrite(mapId, + reduceTask.getTaskID(), mergeOutputSize); + + Writer writer = + new Writer(conf, rfs, outputPath, + conf.getMapOutputKeyClass(), + conf.getMapOutputValueClass(), + codec, null); + + RawKeyValueIterator rIter = null; + try { + LOG.info("Initiating in-memory merge with " + noInMemorySegments + + " segments..."); + + rIter = Merger.merge(conf, rfs, + (Class)conf.getMapOutputKeyClass(), + (Class)conf.getMapOutputValueClass(), + inMemorySegments, inMemorySegments.size(), + new Path(reduceTask.getTaskID().toString()), + conf.getOutputKeyComparator(), reporter, + spilledRecordsCounter, null); + + if (combinerRunner == null) { + Merger.writeFile(rIter, writer, reporter, conf); + } else { + combineCollector.setWriter(writer); + combinerRunner.combine(rIter, combineCollector); + } + writer.close(); + + LOG.info(reduceTask.getTaskID() + + " Merge of the " + noInMemorySegments + + " files in-memory complete." + + " Local file is " + outputPath + " of size " + + localFileSys.getFileStatus(outputPath).getLen()); + } catch (Exception e) { + //make sure that we delete the ondisk file that we created + //earlier when we invoked cloneFileAttributes + localFileSys.delete(outputPath, true); + throw (IOException)new IOException + ("Intermediate merge failed").initCause(e); + } + + // Note the output of the merge + FileStatus status = localFileSys.getFileStatus(outputPath); + synchronized (mapOutputFilesOnDisk) { + addToMapOutputFilesOnDisk(status); + } + } + } + + private class GetMapEventsThread extends Thread { + + private IntWritable fromEventId = new IntWritable(0); + private static final long SLEEP_TIME = 1000; + + public GetMapEventsThread() { + setName("Thread for polling Map Completion Events"); + setDaemon(true); + } + + @Override + public void run() { + + LOG.info(reduceTask.getTaskID() + " Thread started: " + getName()); + + do { + try { + int numNewMaps = getMapCompletionEvents(); + if (numNewMaps > 0) { + LOG.info(reduceTask.getTaskID() + ": " + + "Got " + numNewMaps + " new map-outputs"); + } + Thread.sleep(SLEEP_TIME); + } + catch (InterruptedException e) { + // ignore. if we are shutting down - the while condition + // will check for it and exit. otherwise this could be a + // spurious interrupt due to log4j interaction + } + catch (Throwable t) { + String msg = reduceTask.getTaskID() + + " GetMapEventsThread Ignoring exception : " + + StringUtils.stringifyException(t); + reportFatalError(getTaskID(), t, msg); + } + } while (!exitGetMapEvents); + + LOG.info("GetMapEventsThread exiting"); + + } + + /** + * Queries the {@link TaskTracker} for a set of map-completion events + * from a given event ID. + * @throws IOException + */ + private int getMapCompletionEvents() throws IOException { + + int numNewMaps = 0; + + MapTaskCompletionEventsUpdate update = + umbilical.getMapCompletionEvents(reduceTask.getJobID(), + fromEventId.get(), + MAX_EVENTS_TO_FETCH, + reduceTask.getTaskID()); + TaskCompletionEvent events[] = update.getMapTaskCompletionEvents(); + + // Check if the reset is required. + // Since there is no ordering of the task completion events at the + // reducer, the only option to sync with the new jobtracker is to reset + // the events index + if (update.shouldReset()) { + fromEventId.set(0); + obsoleteMapIds.clear(); // clear the obsolete map + mapLocations.clear(); // clear the map locations mapping + } + + // Update the last seen event ID + fromEventId.set(fromEventId.get() + events.length); + + // Process the TaskCompletionEvents: + // 1. Save the SUCCEEDED maps in knownOutputs to fetch the outputs. + // 2. Save the OBSOLETE/FAILED/KILLED maps in obsoleteOutputs to stop + // fetching from those maps. + // 3. Remove TIPFAILED maps from neededOutputs since we don't need their + // outputs at all. + for (TaskCompletionEvent event : events) { + switch (event.getTaskStatus()) { + case SUCCEEDED: + { + URI u = URI.create(event.getTaskTrackerHttp()); + String host = u.getHost(); + TaskAttemptID taskId = event.getTaskAttemptId(); + int duration = event.getTaskRunTime(); + if (duration > maxMapRuntime) { + maxMapRuntime = duration; + // adjust max-fetch-retries based on max-map-run-time + maxFetchRetriesPerMap = Math.max(MIN_FETCH_RETRIES_PER_MAP, + getClosestPowerOf2((maxMapRuntime / BACKOFF_INIT) + 1)); + } + URL mapOutputLocation = new URL(event.getTaskTrackerHttp() + + "/mapOutput?job=" + taskId.getJobID() + + "&map=" + taskId + + "&reduce=" + getPartition()); + List loc = mapLocations.get(host); + if (loc == null) { + loc = Collections.synchronizedList + (new LinkedList()); + mapLocations.put(host, loc); + } + loc.add(new MapOutputLocation(taskId, host, mapOutputLocation)); + numNewMaps ++; + } + break; + case FAILED: + case KILLED: + case OBSOLETE: + { + obsoleteMapIds.add(event.getTaskAttemptId()); + LOG.info("Ignoring obsolete output of " + event.getTaskStatus() + + " map-task: '" + event.getTaskAttemptId() + "'"); + } + break; + case TIPFAILED: + { + copiedMapOutputs.add(event.getTaskAttemptId().getTaskID()); + LOG.info("Ignoring output of failed map TIP: '" + + event.getTaskAttemptId() + "'"); + } + break; + } + } + return numNewMaps; + } + } + } + + /** + * Return the exponent of the power of two closest to the given + * positive value, or zero if value leq 0. + * This follows the observation that the msb of a given value is + * also the closest power of two, unless the bit following it is + * set. + */ + private static int getClosestPowerOf2(int value) { + if (value <= 0) + throw new IllegalArgumentException("Undefined for " + value); + final int hob = Integer.highestOneBit(value); + return Integer.numberOfTrailingZeros(hob) + + (((hob >>> 1) & value) == 0 ? 0 : 1); + } +} diff --git a/src/mapred/org/apache/hadoop/mapred/ReduceTaskRunner.java b/src/mapred/org/apache/hadoop/mapred/ReduceTaskRunner.java new file mode 100644 index 0000000..f8cc2bd --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/ReduceTaskRunner.java @@ -0,0 +1,71 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.mapred; + +import java.io.*; + +import org.apache.hadoop.mapred.TaskTracker.TaskInProgress; + +/** Runs a reduce task. */ +class ReduceTaskRunner extends TaskRunner { + + public ReduceTaskRunner(TaskInProgress task, TaskTracker tracker, + JobConf conf) throws IOException { + + super(task, tracker, conf); + } + + /** Assemble all of the map output files */ + public boolean prepare() throws IOException { + if (!super.prepare()) { + return false; + } + + // cleanup from failures + mapOutputFile.removeAll(getTask().getTaskID()); + return true; + } + + + /** Delete all of the temporary map output files. */ + public void close() throws IOException { + LOG.info(getTask()+" done; removing files."); + getTask().getProgress().setStatus("closed"); + mapOutputFile.removeAll(getTask().getTaskID()); + } + + @Override + public String getChildJavaOpts(JobConf jobConf, String defaultValue) { + return jobConf.get(JobConf.MAPRED_REDUCE_TASK_JAVA_OPTS, + super.getChildJavaOpts(jobConf, + JobConf.DEFAULT_MAPRED_TASK_JAVA_OPTS)); + } + + @Override + public int getChildUlimit(JobConf jobConf) { + return jobConf.getInt(JobConf.MAPRED_REDUCE_TASK_ULIMIT, + super.getChildUlimit(jobConf)); + } + + @Override + public String getChildEnv(JobConf jobConf) { + return jobConf.get(JobConf.MAPRED_REDUCE_TASK_ENV, + super.getChildEnv(jobConf)); + } + +} diff --git a/src/mapred/org/apache/hadoop/mapred/ReduceTaskStatus.java b/src/mapred/org/apache/hadoop/mapred/ReduceTaskStatus.java new file mode 100644 index 0000000..31d3831 --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/ReduceTaskStatus.java @@ -0,0 +1,151 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapred; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + + + +class ReduceTaskStatus extends TaskStatus { + + private long shuffleFinishTime; + private long sortFinishTime; + private List failedFetchTasks = new ArrayList(1); + + public ReduceTaskStatus() {} + + public ReduceTaskStatus(TaskAttemptID taskid, float progress, int numSlots, + State runState, String diagnosticInfo, String stateString, + String taskTracker, Phase phase, Counters counters) { + super(taskid, progress, numSlots, runState, diagnosticInfo, stateString, + taskTracker, phase, counters); + } + + @Override + public Object clone() { + ReduceTaskStatus myClone = (ReduceTaskStatus)super.clone(); + myClone.failedFetchTasks = new ArrayList(failedFetchTasks); + return myClone; + } + + @Override + public boolean getIsMap() { + return false; + } + + @Override + void setFinishTime(long finishTime) { + if (shuffleFinishTime == 0) { + this.shuffleFinishTime = finishTime; + } + if (sortFinishTime == 0){ + this.sortFinishTime = finishTime; + } + super.setFinishTime(finishTime); + } + + @Override + public long getShuffleFinishTime() { + return shuffleFinishTime; + } + + @Override + void setShuffleFinishTime(long shuffleFinishTime) { + this.shuffleFinishTime = shuffleFinishTime; + } + + @Override + public long getSortFinishTime() { + return sortFinishTime; + } + + @Override + void setSortFinishTime(long sortFinishTime) { + this.sortFinishTime = sortFinishTime; + if (0 == this.shuffleFinishTime){ + this.shuffleFinishTime = sortFinishTime; + } + } + + @Override + public List getFetchFailedMaps() { + return failedFetchTasks; + } + + @Override + void addFetchFailedMap(TaskAttemptID mapTaskId) { + failedFetchTasks.add(mapTaskId); + } + + @Override + synchronized void statusUpdate(TaskStatus status) { + super.statusUpdate(status); + + if (status.getShuffleFinishTime() != 0) { + this.shuffleFinishTime = status.getShuffleFinishTime(); + } + + if (status.getSortFinishTime() != 0) { + sortFinishTime = status.getSortFinishTime(); + } + + List newFetchFailedMaps = status.getFetchFailedMaps(); + if (failedFetchTasks == null) { + failedFetchTasks = newFetchFailedMaps; + } else if (newFetchFailedMaps != null){ + failedFetchTasks.addAll(newFetchFailedMaps); + } + } + + @Override + synchronized void clearStatus() { + super.clearStatus(); + failedFetchTasks.clear(); + } + + @Override + public void readFields(DataInput in) throws IOException { + super.readFields(in); + shuffleFinishTime = in.readLong(); + sortFinishTime = in.readLong(); + int noFailedFetchTasks = in.readInt(); + failedFetchTasks = new ArrayList(noFailedFetchTasks); + for (int i=0; i < noFailedFetchTasks; ++i) { + TaskAttemptID id = new TaskAttemptID(); + id.readFields(in); + failedFetchTasks.add(id); + } + } + + @Override + public void write(DataOutput out) throws IOException { + super.write(out); + out.writeLong(shuffleFinishTime); + out.writeLong(sortFinishTime); + out.writeInt(failedFetchTasks.size()); + for (TaskAttemptID taskId : failedFetchTasks) { + taskId.write(out); + } + } + +} diff --git a/src/mapred/org/apache/hadoop/mapred/Reducer.java b/src/mapred/org/apache/hadoop/mapred/Reducer.java new file mode 100644 index 0000000..e87cdef --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/Reducer.java @@ -0,0 +1,201 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapred; + +import java.io.IOException; + +import java.util.Iterator; + +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.io.Closeable; + +/** + * Reduces a set of intermediate values which share a key to a smaller set of + * values. + * + *

The number of Reducers for the job is set by the user via + * {@link JobConf#setNumReduceTasks(int)}. Reducer implementations + * can access the {@link JobConf} for the job via the + * {@link JobConfigurable#configure(JobConf)} method and initialize themselves. + * Similarly they can use the {@link Closeable#close()} method for + * de-initialization.

+ + *

Reducer has 3 primary phases:

+ *
    + *
  1. + * + *

    Shuffle

    + * + *

    Reducer is input the grouped output of a {@link Mapper}. + * In the phase the framework, for each Reducer, fetches the + * relevant partition of the output of all the Mappers, via HTTP. + *

    + *
  2. + * + *
  3. + *

    Sort

    + * + *

    The framework groups Reducer inputs by keys + * (since different Mappers may have output the same key) in this + * stage.

    + * + *

    The shuffle and sort phases occur simultaneously i.e. while outputs are + * being fetched they are merged.

    + * + *
    SecondarySort
    + * + *

    If equivalence rules for keys while grouping the intermediates are + * different from those for grouping keys before reduction, then one may + * specify a Comparator via + * {@link JobConf#setOutputValueGroupingComparator(Class)}.Since + * {@link JobConf#setOutputKeyComparatorClass(Class)} can be used to + * control how intermediate keys are grouped, these can be used in conjunction + * to simulate secondary sort on values.

    + * + * + * For example, say that you want to find duplicate web pages and tag them + * all with the url of the "best" known example. You would set up the job + * like: + *
      + *
    • Map Input Key: url
    • + *
    • Map Input Value: document
    • + *
    • Map Output Key: document checksum, url pagerank
    • + *
    • Map Output Value: url
    • + *
    • Partitioner: by checksum
    • + *
    • OutputKeyComparator: by checksum and then decreasing pagerank
    • + *
    • OutputValueGroupingComparator: by checksum
    • + *
    + *
  4. + * + *
  5. + *

    Reduce

    + * + *

    In this phase the + * {@link #reduce(Object, Iterator, OutputCollector, Reporter)} + * method is called for each <key, (list of values)> pair in + * the grouped inputs.

    + *

    The output of the reduce task is typically written to the + * {@link FileSystem} via + * {@link OutputCollector#collect(Object, Object)}.

    + *
  6. + *
+ * + *

The output of the Reducer is not re-sorted.

+ * + *

Example:

+ *

+ *     public class MyReducer<K extends WritableComparable, V extends Writable> 
+ *     extends MapReduceBase implements Reducer<K, V, K, V> {
+ *     
+ *       static enum MyCounters { NUM_RECORDS }
+ *        
+ *       private String reduceTaskId;
+ *       private int noKeys = 0;
+ *       
+ *       public void configure(JobConf job) {
+ *         reduceTaskId = job.get("mapred.task.id");
+ *       }
+ *       
+ *       public void reduce(K key, Iterator<V> values,
+ *                          OutputCollector<K, V> output, 
+ *                          Reporter reporter)
+ *       throws IOException {
+ *       
+ *         // Process
+ *         int noValues = 0;
+ *         while (values.hasNext()) {
+ *           V value = values.next();
+ *           
+ *           // Increment the no. of values for this key
+ *           ++noValues;
+ *           
+ *           // Process the <key, value> pair (assume this takes a while)
+ *           // ...
+ *           // ...
+ *           
+ *           // Let the framework know that we are alive, and kicking!
+ *           if ((noValues%10) == 0) {
+ *             reporter.progress();
+ *           }
+ *         
+ *           // Process some more
+ *           // ...
+ *           // ...
+ *           
+ *           // Output the <key, value> 
+ *           output.collect(key, value);
+ *         }
+ *         
+ *         // Increment the no. of <key, list of values> pairs processed
+ *         ++noKeys;
+ *         
+ *         // Increment counters
+ *         reporter.incrCounter(NUM_RECORDS, 1);
+ *         
+ *         // Every 100 keys update application-level status
+ *         if ((noKeys%100) == 0) {
+ *           reporter.setStatus(reduceTaskId + " processed " + noKeys);
+ *         }
+ *       }
+ *     }
+ * 

+ * + * @see Mapper + * @see Partitioner + * @see Reporter + * @see MapReduceBase + * @deprecated Use {@link org.apache.hadoop.mapreduce.Reducer} instead. + */ +@Deprecated +public interface Reducer extends JobConfigurable, Closeable { + + /** + * Reduces values for a given key. + * + *

The framework calls this method for each + * <key, (list of values)> pair in the grouped inputs. + * Output values must be of the same type as input values. Input keys must + * not be altered. The framework will reuse the key and value objects + * that are passed into the reduce, therefore the application should clone + * the objects they want to keep a copy of. In many cases, all values are + * combined into zero or one value. + *

+ * + *

Output pairs are collected with calls to + * {@link OutputCollector#collect(Object,Object)}.

+ * + *

Applications can use the {@link Reporter} provided to report progress + * or just indicate that they are alive. In scenarios where the application + * takes an insignificant amount of time to process individual key/value + * pairs, this is crucial since the framework might assume that the task has + * timed-out and kill that task. The other way of avoiding this is to set + * + * mapred.task.timeout to a high-enough value (or even zero for no + * time-outs).

+ * + * @param key the key. + * @param values the list of values to reduce. + * @param output to collect keys and combined values. + * @param reporter facility to report progress. + */ + void reduce(K2 key, Iterator values, + OutputCollector output, Reporter reporter) + throws IOException; + +} diff --git a/src/mapred/org/apache/hadoop/mapred/ReinitTrackerAction.java b/src/mapred/org/apache/hadoop/mapred/ReinitTrackerAction.java new file mode 100644 index 0000000..9a750c2 --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/ReinitTrackerAction.java @@ -0,0 +1,40 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapred; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; + +/** + * Represents a directive from the {@link org.apache.hadoop.mapred.JobTracker} + * to the {@link org.apache.hadoop.mapred.TaskTracker} to reinitialize itself. + * + */ +class ReinitTrackerAction extends TaskTrackerAction { + + public ReinitTrackerAction() { + super(ActionType.REINIT_TRACKER); + } + + public void write(DataOutput out) throws IOException {} + + public void readFields(DataInput in) throws IOException {} + +} diff --git a/src/mapred/org/apache/hadoop/mapred/Reporter.java b/src/mapred/org/apache/hadoop/mapred/Reporter.java new file mode 100644 index 0000000..f2e5697 --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/Reporter.java @@ -0,0 +1,119 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapred; + +import org.apache.hadoop.mapred.Counters.Counter; +import org.apache.hadoop.util.Progressable; + +/** + * A facility for Map-Reduce applications to report progress and update + * counters, status information etc. + * + *

{@link Mapper} and {@link Reducer} can use the Reporter + * provided to report progress or just indicate that they are alive. In + * scenarios where the application takes an insignificant amount of time to + * process individual key/value pairs, this is crucial since the framework + * might assume that the task has timed-out and kill that task. + * + *

Applications can also update {@link Counters} via the provided + * Reporter .

+ * + * @see Progressable + * @see Counters + */ +public interface Reporter extends Progressable { + + /** + * A constant of Reporter type that does nothing. + */ + public static final Reporter NULL = new Reporter() { + public void setStatus(String s) { + } + public void progress() { + } + public Counter getCounter(Enum name) { + return null; + } + public Counter getCounter(String group, String name) { + return null; + } + public void incrCounter(Enum key, long amount) { + } + public void incrCounter(String group, String counter, long amount) { + } + public InputSplit getInputSplit() throws UnsupportedOperationException { + throw new UnsupportedOperationException("NULL reporter has no input"); + } + }; + + /** + * Set the status description for the task. + * + * @param status brief description of the current status. + */ + public abstract void setStatus(String status); + + /** + * Get the {@link Counter} of the given group with the given name. + * + * @param name counter name + * @return the Counter of the given group/name. + */ + public abstract Counter getCounter(Enum name); + + /** + * Get the {@link Counter} of the given group with the given name. + * + * @param group counter group + * @param name counter name + * @return the Counter of the given group/name. + */ + public abstract Counter getCounter(String group, String name); + + /** + * Increments the counter identified by the key, which can be of + * any {@link Enum} type, by the specified amount. + * + * @param key key to identify the counter to be incremented. The key can be + * be any Enum. + * @param amount A non-negative amount by which the counter is to + * be incremented. + */ + public abstract void incrCounter(Enum key, long amount); + + /** + * Increments the counter identified by the group and counter name + * by the specified amount. + * + * @param group name to identify the group of the counter to be incremented. + * @param counter name to identify the counter within the group. + * @param amount A non-negative amount by which the counter is to + * be incremented. + */ + public abstract void incrCounter(String group, String counter, long amount); + + /** + * Get the {@link InputSplit} object for a map. + * + * @return the InputSplit that the map is reading from. + * @throws UnsupportedOperationException if called outside a mapper + */ + public abstract InputSplit getInputSplit() + throws UnsupportedOperationException; +} diff --git a/src/mapred/org/apache/hadoop/mapred/ResourceEstimator.java b/src/mapred/org/apache/hadoop/mapred/ResourceEstimator.java new file mode 100644 index 0000000..673defc --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/ResourceEstimator.java @@ -0,0 +1,110 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.mapred; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; + +/** + * Class responsible for modeling the resource consumption of running tasks. + * + * For now, we just do temp space for maps + * + * There is one ResourceEstimator per JobInProgress + * + */ +class ResourceEstimator { + + //Log with JobInProgress + private static final Log LOG = LogFactory.getLog( + "org.apache.hadoop.mapred.ResourceEstimator"); + + private long completedMapsInputSize; + private long completedMapsOutputSize; + + private int completedMapsUpdates; + final private JobInProgress job; + final private int threshholdToUse; + + public ResourceEstimator(JobInProgress job) { + this.job = job; + threshholdToUse = job.desiredMaps()/ 10; + } + + protected synchronized void updateWithCompletedTask(TaskStatus ts, + TaskInProgress tip) { + + //-1 indicates error, which we don't average in. + if(tip.isMapTask() && ts.getOutputSize() != -1) { + completedMapsUpdates++; + + completedMapsInputSize+=(tip.getMapInputSize()+1); + completedMapsOutputSize+=ts.getOutputSize(); + + if(LOG.isDebugEnabled()) { + LOG.debug("completedMapsUpdates:"+completedMapsUpdates+" "+ + "completedMapsInputSize:"+completedMapsInputSize+" " + + "completedMapsOutputSize:"+completedMapsOutputSize); + } + } + } + + /** + * @return estimated length of this job's total map output + */ + protected synchronized long getEstimatedTotalMapOutputSize() { + if(completedMapsUpdates < threshholdToUse) { + return 0; + } else { + long inputSize = job.getInputLength() + job.desiredMaps(); + //add desiredMaps() so that randomwriter case doesn't blow up + long estimate = Math.round((inputSize * + completedMapsOutputSize * 2.0)/completedMapsInputSize); + if (LOG.isDebugEnabled()) { + LOG.debug("estimate total map output will be " + estimate); + } + return estimate; + } + } + + /** + * @return estimated length of this job's average map output + */ + long getEstimatedMapOutputSize() { + long estimate = 0L; + if (job.desiredMaps() > 0) { + estimate = getEstimatedTotalMapOutputSize() / job.desiredMaps(); + } + return estimate; + } + + /** + * + * @return estimated length of this job's average reduce input + */ + long getEstimatedReduceInputSize() { + if(job.desiredReduces() == 0) {//no reduce output, so no size + return 0; + } else { + return getEstimatedTotalMapOutputSize() / job.desiredReduces(); + //estimate that each reduce gets an equal share of total map output + } + } + + +} diff --git a/src/mapred/org/apache/hadoop/mapred/ResourceReporter.java b/src/mapred/org/apache/hadoop/mapred/ResourceReporter.java new file mode 100644 index 0000000..ec1f992 --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/ResourceReporter.java @@ -0,0 +1,114 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapred; + +import org.apache.hadoop.conf.Configurable; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.mapreduce.JobID; + +/** + * A pluggable for obtaining cluster and job resource information + */ +abstract class ResourceReporter implements Configurable { + static public int UNAVAILABLE = -1; + + protected Configuration conf; + + + public Configuration getConf() { + return conf; + } + + public void setConf(Configuration conf) { + this.conf = conf; + } + + /** + * @param jobid Job Id + * @return CPU percentage of this job on the cluster + */ + public abstract double getJobCpuPercentageOnCluster(JobID jobid); + + /** + * @param jobid Job Id + * @return Memory percentage of this job on the cluster + */ + public abstract double getJobMemPercentageOnCluster(JobID jobid); + + /** + * @param jobid Job Id + * @return Maximum current value of CPU percentage per node + */ + public abstract double getJobCpuMaxPercentageOnBox(JobID jobid); + + /** + * @param jobid Job Id + * @return Maximum current value of Memory percentage per node + */ + public abstract double getJobMemMaxPercentageOnBox(JobID jobid); + + /** + * @param jobid Job Id + * @return Maximum current value of Memory percentage per node + */ + public abstract double getJobMemMaxPercentageOnBoxAllTime(JobID jobid); + + /** + * @param jobid Job Id + * @return Cumulated CPU time (cluster-millisecond) + */ + public abstract double getJobCpuCumulatedUsageTime(JobID jobid); + + /** + * @param jobid Job Id + * @return Cumulated memory usage time (cluster-millisecond) + */ + public abstract double getJobMemCumulatedUsageTime(JobID jobid); + + /** + * @param jobid Job Id + * @return Total CPU giga-cycle used by this job + */ + public abstract double getJobCpuCumulatedGigaCycles(JobID jobid); + + /** + * @return Total CPU clock in GHz of the cluster + */ + public abstract double getClusterCpuTotalGHz(); + + /** + * @return Total memory in GB of the cluster + */ + public abstract double getClusterMemTotalGB(); + + /** + * @return Total CPU usage in GHz of the cluster + */ + public abstract double getClusterCpuUsageGHz(); + + /** + * @return Total memory usage in GB of the cluster + */ + public abstract double getClusterMemUsageGB(); + + /** + * @return Total memory usage in GB of the cluster + */ + public abstract int getReportedTaskTrackers(); +} diff --git a/src/mapred/org/apache/hadoop/mapred/RunningJob.java b/src/mapred/org/apache/hadoop/mapred/RunningJob.java new file mode 100644 index 0000000..91cd202 --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/RunningJob.java @@ -0,0 +1,192 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapred; + +import java.io.IOException; + + +/** + * RunningJob is the user-interface to query for details on a + * running Map-Reduce job. + * + *

Clients can get hold of RunningJob via the {@link JobClient} + * and then query the running-job for details such as name, configuration, + * progress etc.

+ * + * @see JobClient + */ +public interface RunningJob { + /** + * Get the job identifier. + * + * @return the job identifier. + */ + public JobID getID(); + + /** @deprecated This method is deprecated and will be removed. Applications should + * rather use {@link #getID()}. + */ + @Deprecated + public String getJobID(); + + /** + * Get the name of the job. + * + * @return the name of the job. + */ + public String getJobName(); + + /** + * Get the path of the submitted job configuration. + * + * @return the path of the submitted job configuration. + */ + public String getJobFile(); + + /** + * Get the URL where some job progress information will be displayed. + * + * @return the URL where some job progress information will be displayed. + */ + public String getTrackingURL(); + + /** + * Get the progress of the job's map-tasks, as a float between 0.0 + * and 1.0. When all map tasks have completed, the function returns 1.0. + * + * @return the progress of the job's map-tasks. + * @throws IOException + */ + public float mapProgress() throws IOException; + + /** + * Get the progress of the job's reduce-tasks, as a float between 0.0 + * and 1.0. When all reduce tasks have completed, the function returns 1.0. + * + * @return the progress of the job's reduce-tasks. + * @throws IOException + */ + public float reduceProgress() throws IOException; + + /** + * Get the progress of the job's cleanup-tasks, as a float between 0.0 + * and 1.0. When all cleanup tasks have completed, the function returns 1.0. + * + * @return the progress of the job's cleanup-tasks. + * @throws IOException + */ + public float cleanupProgress() throws IOException; + + /** + * Get the progress of the job's setup-tasks, as a float between 0.0 + * and 1.0. When all setup tasks have completed, the function returns 1.0. + * + * @return the progress of the job's setup-tasks. + * @throws IOException + */ + public float setupProgress() throws IOException; + + /** + * Check if the job is finished or not. + * This is a non-blocking call. + * + * @return true if the job is complete, else false. + * @throws IOException + */ + public boolean isComplete() throws IOException; + + /** + * Check if the job completed successfully. + * + * @return true if the job succeeded, else false. + * @throws IOException + */ + public boolean isSuccessful() throws IOException; + + /** + * Blocks until the job is complete. + * + * @throws IOException + */ + public void waitForCompletion() throws IOException; + + /** + * Returns the current state of the Job. + * {@link JobStatus} + * + * @throws IOException + */ + public int getJobState() throws IOException; + + /** + * Kill the running job. Blocks until all job tasks have been + * killed as well. If the job is no longer running, it simply returns. + * + * @throws IOException + */ + public void killJob() throws IOException; + + /** + * Set the priority of a running job. + * @param priority the new priority for the job. + * @throws IOException + */ + public void setJobPriority(String priority) throws IOException; + + /** + * Get events indicating completion (success/failure) of component tasks. + * + * @param startFrom index to start fetching events from + * @return an array of {@link TaskCompletionEvent}s + * @throws IOException + */ + public TaskCompletionEvent[] getTaskCompletionEvents(int startFrom) + throws IOException; + + /** + * Kill indicated task attempt. + * + * @param taskId the id of the task to be terminated. + * @param shouldFail if true the task is failed and added to failed tasks + * list, otherwise it is just killed, w/o affecting + * job failure status. + * @throws IOException + */ + public void killTask(TaskAttemptID taskId, boolean shouldFail) throws IOException; + + /** @deprecated Applications should rather use {@link #killTask(TaskAttemptID, boolean)}*/ + @Deprecated + public void killTask(String taskId, boolean shouldFail) throws IOException; + + /** + * Gets the counters for this job. + * + * @return the counters for this job. + * @throws IOException + */ + public Counters getCounters() throws IOException; + + /** + * Gets the diagnostic messages for a given task attempt. + * @param taskid + * @return the list of diagnostic messages for the task + * @throws IOException + */ + public String[] getTaskDiagnostics(TaskAttemptID taskid) throws IOException; +} diff --git a/src/mapred/org/apache/hadoop/mapred/SequenceFileAsBinaryInputFormat.java b/src/mapred/org/apache/hadoop/mapred/SequenceFileAsBinaryInputFormat.java new file mode 100644 index 0000000..c96c45c --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/SequenceFileAsBinaryInputFormat.java @@ -0,0 +1,140 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.mapred; + +import java.io.IOException; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.io.BytesWritable; +import org.apache.hadoop.io.DataOutputBuffer; +import org.apache.hadoop.io.SequenceFile; +import org.apache.hadoop.mapred.InputSplit; +import org.apache.hadoop.mapred.JobConf; +import org.apache.hadoop.mapred.RecordReader; +import org.apache.hadoop.mapred.Reporter; +import org.apache.hadoop.mapred.SequenceFileInputFormat; + +/** + * InputFormat reading keys, values from SequenceFiles in binary (raw) + * format. + */ +public class SequenceFileAsBinaryInputFormat + extends SequenceFileInputFormat { + + public SequenceFileAsBinaryInputFormat() { + super(); + } + + public RecordReader getRecordReader( + InputSplit split, JobConf job, Reporter reporter) + throws IOException { + return new SequenceFileAsBinaryRecordReader(job, (FileSplit)split); + } + + /** + * Read records from a SequenceFile as binary (raw) bytes. + */ + public static class SequenceFileAsBinaryRecordReader + implements RecordReader { + private SequenceFile.Reader in; + private long start; + private long end; + private boolean done = false; + private DataOutputBuffer buffer = new DataOutputBuffer(); + private SequenceFile.ValueBytes vbytes; + + public SequenceFileAsBinaryRecordReader(Configuration conf, FileSplit split) + throws IOException { + Path path = split.getPath(); + FileSystem fs = path.getFileSystem(conf); + this.in = new SequenceFile.Reader(fs, path, conf); + this.end = split.getStart() + split.getLength(); + if (split.getStart() > in.getPosition()) + in.sync(split.getStart()); // sync to start + this.start = in.getPosition(); + vbytes = in.createValueBytes(); + done = start >= end; + } + + public BytesWritable createKey() { + return new BytesWritable(); + } + + public BytesWritable createValue() { + return new BytesWritable(); + } + + /** + * Retrieve the name of the key class for this SequenceFile. + * @see org.apache.hadoop.io.SequenceFile.Reader#getKeyClassName + */ + public String getKeyClassName() { + return in.getKeyClassName(); + } + + /** + * Retrieve the name of the value class for this SequenceFile. + * @see org.apache.hadoop.io.SequenceFile.Reader#getValueClassName + */ + public String getValueClassName() { + return in.getValueClassName(); + } + + /** + * Read raw bytes from a SequenceFile. + */ + public synchronized boolean next(BytesWritable key, BytesWritable val) + throws IOException { + if (done) return false; + long pos = in.getPosition(); + boolean eof = -1 == in.nextRawKey(buffer); + if (!eof) { + key.set(buffer.getData(), 0, buffer.getLength()); + buffer.reset(); + in.nextRawValue(vbytes); + vbytes.writeUncompressedBytes(buffer); + val.set(buffer.getData(), 0, buffer.getLength()); + buffer.reset(); + } + return !(done = (eof || (pos >= end && in.syncSeen()))); + } + + public long getPos() throws IOException { + return in.getPosition(); + } + + public void close() throws IOException { + in.close(); + } + + /** + * Return the progress within the input split + * @return 0.0 to 1.0 of the input byte range + */ + public float getProgress() throws IOException { + if (end == start) { + return 0.0f; + } else { + return Math.min(1.0f, (float)((in.getPosition() - start) / + (double)(end - start))); + } + } + } +} diff --git a/src/mapred/org/apache/hadoop/mapred/SequenceFileAsBinaryOutputFormat.java b/src/mapred/org/apache/hadoop/mapred/SequenceFileAsBinaryOutputFormat.java new file mode 100644 index 0000000..3330c89 --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/SequenceFileAsBinaryOutputFormat.java @@ -0,0 +1,187 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.mapred; + +import java.io.IOException; +import java.io.DataOutputStream; + +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; + +import org.apache.hadoop.io.WritableComparable; +import org.apache.hadoop.io.Writable; +import org.apache.hadoop.io.BytesWritable; +import org.apache.hadoop.io.SequenceFile; +import org.apache.hadoop.io.SequenceFile.CompressionType; +import org.apache.hadoop.io.SequenceFile.ValueBytes; +import org.apache.hadoop.io.compress.CompressionCodec; +import org.apache.hadoop.io.compress.DefaultCodec; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.util.ReflectionUtils; +import org.apache.hadoop.util.Progressable; + +/** + * An {@link OutputFormat} that writes keys, values to + * {@link SequenceFile}s in binary(raw) format + */ +public class SequenceFileAsBinaryOutputFormat + extends SequenceFileOutputFormat { + + /** + * Inner class used for appendRaw + */ + static protected class WritableValueBytes implements ValueBytes { + private BytesWritable value; + + public WritableValueBytes() { + this.value = null; + } + public WritableValueBytes(BytesWritable value) { + this.value = value; + } + + public void reset(BytesWritable value) { + this.value = value; + } + + public void writeUncompressedBytes(DataOutputStream outStream) + throws IOException { + outStream.write(value.getBytes(), 0, value.getLength()); + } + + public void writeCompressedBytes(DataOutputStream outStream) + throws IllegalArgumentException, IOException { + throw + new UnsupportedOperationException("WritableValueBytes doesn't support " + + "RECORD compression"); + } + public int getSize(){ + return value.getLength(); + } + } + + /** + * Set the key class for the {@link SequenceFile} + *

This allows the user to specify the key class to be different + * from the actual class ({@link BytesWritable}) used for writing

+ * + * @param conf the {@link JobConf} to modify + * @param theClass the SequenceFile output key class. + */ + static public void setSequenceFileOutputKeyClass(JobConf conf, + Class theClass) { + conf.setClass("mapred.seqbinary.output.key.class", theClass, Object.class); + } + + /** + * Set the value class for the {@link SequenceFile} + *

This allows the user to specify the value class to be different + * from the actual class ({@link BytesWritable}) used for writing

+ * + * @param conf the {@link JobConf} to modify + * @param theClass the SequenceFile output key class. + */ + static public void setSequenceFileOutputValueClass(JobConf conf, + Class theClass) { + conf.setClass("mapred.seqbinary.output.value.class", + theClass, Object.class); + } + + /** + * Get the key class for the {@link SequenceFile} + * + * @return the key class of the {@link SequenceFile} + */ + static public Class getSequenceFileOutputKeyClass(JobConf conf) { + return conf.getClass("mapred.seqbinary.output.key.class", + conf.getOutputKeyClass().asSubclass(WritableComparable.class), + WritableComparable.class); + } + + /** + * Get the value class for the {@link SequenceFile} + * + * @return the value class of the {@link SequenceFile} + */ + static public Class getSequenceFileOutputValueClass(JobConf conf) { + return conf.getClass("mapred.seqbinary.output.value.class", + conf.getOutputValueClass().asSubclass(Writable.class), + Writable.class); + } + + @Override + public RecordWriter + getRecordWriter(FileSystem ignored, JobConf job, + String name, Progressable progress) + throws IOException { + // get the path of the temporary output file + Path file = FileOutputFormat.getTaskOutputPath(job, name); + + FileSystem fs = file.getFileSystem(job); + CompressionCodec codec = null; + CompressionType compressionType = CompressionType.NONE; + if (getCompressOutput(job)) { + // find the kind of compression to do + compressionType = getOutputCompressionType(job); + + // find the right codec + Class codecClass = getOutputCompressorClass(job, + DefaultCodec.class); + codec = ReflectionUtils.newInstance(codecClass, job); + } + final SequenceFile.Writer out = + SequenceFile.createWriter(fs, job, file, + getSequenceFileOutputKeyClass(job), + getSequenceFileOutputValueClass(job), + compressionType, + codec, + progress); + + return new RecordWriter() { + + private WritableValueBytes wvaluebytes = new WritableValueBytes(); + + public void write(BytesWritable bkey, BytesWritable bvalue) + throws IOException { + + wvaluebytes.reset(bvalue); + out.appendRaw(bkey.getBytes(), 0, bkey.getLength(), wvaluebytes); + wvaluebytes.reset(null); + } + + public void close(Reporter reporter) throws IOException { + out.close(); + } + + }; + + } + + @Override + public void checkOutputSpecs(FileSystem ignored, JobConf job) + throws IOException { + super.checkOutputSpecs(ignored, job); + if (getCompressOutput(job) && + getOutputCompressionType(job) == CompressionType.RECORD ){ + throw new InvalidJobConfException("SequenceFileAsBinaryOutputFormat " + + "doesn't support Record Compression" ); + } + + } + +} diff --git a/src/mapred/org/apache/hadoop/mapred/SequenceFileAsTextInputFormat.java b/src/mapred/org/apache/hadoop/mapred/SequenceFileAsTextInputFormat.java new file mode 100644 index 0000000..78eb1c7 --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/SequenceFileAsTextInputFormat.java @@ -0,0 +1,45 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapred; + +import java.io.IOException; + +import org.apache.hadoop.io.Text; + +/** + * This class is similar to SequenceFileInputFormat, except it generates SequenceFileAsTextRecordReader + * which converts the input keys and values to their String forms by calling toString() method. + */ +public class SequenceFileAsTextInputFormat + extends SequenceFileInputFormat { + + public SequenceFileAsTextInputFormat() { + super(); + } + + public RecordReader getRecordReader(InputSplit split, + JobConf job, + Reporter reporter) + throws IOException { + + reporter.setStatus(split.toString()); + + return new SequenceFileAsTextRecordReader(job, (FileSplit) split); + } +} diff --git a/src/mapred/org/apache/hadoop/mapred/SequenceFileAsTextRecordReader.java b/src/mapred/org/apache/hadoop/mapred/SequenceFileAsTextRecordReader.java new file mode 100644 index 0000000..e3b92a1 --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/SequenceFileAsTextRecordReader.java @@ -0,0 +1,82 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapred; + +import java.io.IOException; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.io.Text; +import org.apache.hadoop.io.Writable; +import org.apache.hadoop.io.WritableComparable; + +/** + * This class converts the input keys and values to their String forms by calling toString() + * method. This class to SequenceFileAsTextInputFormat class is as LineRecordReader + * class to TextInputFormat class. + */ +public class SequenceFileAsTextRecordReader + implements RecordReader { + + private final SequenceFileRecordReader + sequenceFileRecordReader; + + private WritableComparable innerKey; + private Writable innerValue; + + public SequenceFileAsTextRecordReader(Configuration conf, FileSplit split) + throws IOException { + sequenceFileRecordReader = + new SequenceFileRecordReader(conf, split); + innerKey = sequenceFileRecordReader.createKey(); + innerValue = sequenceFileRecordReader.createValue(); + } + + public Text createKey() { + return new Text(); + } + + public Text createValue() { + return new Text(); + } + + /** Read key/value pair in a line. */ + public synchronized boolean next(Text key, Text value) throws IOException { + Text tKey = key; + Text tValue = value; + if (!sequenceFileRecordReader.next(innerKey, innerValue)) { + return false; + } + tKey.set(innerKey.toString()); + tValue.set(innerValue.toString()); + return true; + } + + public float getProgress() throws IOException { + return sequenceFileRecordReader.getProgress(); + } + + public synchronized long getPos() throws IOException { + return sequenceFileRecordReader.getPos(); + } + + public synchronized void close() throws IOException { + sequenceFileRecordReader.close(); + } + +} diff --git a/src/mapred/org/apache/hadoop/mapred/SequenceFileInputFilter.java b/src/mapred/org/apache/hadoop/mapred/SequenceFileInputFilter.java new file mode 100644 index 0000000..52d51f9 --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/SequenceFileInputFilter.java @@ -0,0 +1,305 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapred; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.security.DigestException; +import java.security.MessageDigest; +import java.security.NoSuchAlgorithmException; +import java.util.regex.Pattern; +import java.util.regex.PatternSyntaxException; + +import org.apache.hadoop.conf.Configurable; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.io.BytesWritable; +import org.apache.hadoop.io.Text; +import org.apache.hadoop.util.ReflectionUtils; + +/** + * A class that allows a map/red job to work on a sample of sequence files. + * The sample is decided by the filter class set by the job. + * + */ + +public class SequenceFileInputFilter + extends SequenceFileInputFormat { + + final private static String FILTER_CLASS = "sequencefile.filter.class"; + final private static String FILTER_FREQUENCY + = "sequencefile.filter.frequency"; + final private static String FILTER_REGEX = "sequencefile.filter.regex"; + + public SequenceFileInputFilter() { + } + + /** Create a record reader for the given split + * @param split file split + * @param job job configuration + * @param reporter reporter who sends report to task tracker + * @return RecordReader + */ + public RecordReader getRecordReader(InputSplit split, + JobConf job, Reporter reporter) + throws IOException { + + reporter.setStatus(split.toString()); + + return new FilterRecordReader(job, (FileSplit) split); + } + + + /** set the filter class + * + * @param conf application configuration + * @param filterClass filter class + */ + public static void setFilterClass(Configuration conf, Class filterClass) { + conf.set(FILTER_CLASS, filterClass.getName()); + } + + + /** + * filter interface + */ + public interface Filter extends Configurable { + /** filter function + * Decide if a record should be filtered or not + * @param key record key + * @return true if a record is accepted; return false otherwise + */ + public abstract boolean accept(Object key); + } + + /** + * base class for Filters + */ + public static abstract class FilterBase implements Filter { + Configuration conf; + + public Configuration getConf() { + return conf; + } + } + + /** Records filter by matching key to regex + */ + public static class RegexFilter extends FilterBase { + private Pattern p; + /** Define the filtering regex and stores it in conf + * @param conf where the regex is set + * @param regex regex used as a filter + */ + public static void setPattern(Configuration conf, String regex) + throws PatternSyntaxException { + try { + Pattern.compile(regex); + } catch (PatternSyntaxException e) { + throw new IllegalArgumentException("Invalid pattern: "+regex); + } + conf.set(FILTER_REGEX, regex); + } + + public RegexFilter() { } + + /** configure the Filter by checking the configuration + */ + public void setConf(Configuration conf) { + String regex = conf.get(FILTER_REGEX); + if (regex==null) + throw new RuntimeException(FILTER_REGEX + "not set"); + this.p = Pattern.compile(regex); + this.conf = conf; + } + + + /** Filtering method + * If key matches the regex, return true; otherwise return false + * @see org.apache.hadoop.mapred.SequenceFileInputFilter.Filter#accept(Object) + */ + public boolean accept(Object key) { + return p.matcher(key.toString()).matches(); + } + } + + /** This class returns a percentage of records + * The percentage is determined by a filtering frequency f using + * the criteria record# % f == 0. + * For example, if the frequency is 10, one out of 10 records is returned. + */ + public static class PercentFilter extends FilterBase { + private int frequency; + private int count; + + /** set the frequency and stores it in conf + * @param conf configuration + * @param frequency filtering frequencey + */ + public static void setFrequency(Configuration conf, int frequency){ + if (frequency<=0) + throw new IllegalArgumentException( + "Negative " + FILTER_FREQUENCY + ": "+frequency); + conf.setInt(FILTER_FREQUENCY, frequency); + } + + public PercentFilter() { } + + /** configure the filter by checking the configuration + * + * @param conf configuration + */ + public void setConf(Configuration conf) { + this.frequency = conf.getInt("sequencefile.filter.frequency", 10); + if (this.frequency <=0) { + throw new RuntimeException( + "Negative "+FILTER_FREQUENCY+": "+this.frequency); + } + this.conf = conf; + } + + /** Filtering method + * If record# % frequency==0, return true; otherwise return false + * @see org.apache.hadoop.mapred.SequenceFileInputFilter.Filter#accept(Object) + */ + public boolean accept(Object key) { + boolean accepted = false; + if (count == 0) + accepted = true; + if (++count == frequency) { + count = 0; + } + return accepted; + } + } + + /** This class returns a set of records by examing the MD5 digest of its + * key against a filtering frequency f. The filtering criteria is + * MD5(key) % f == 0. + */ + public static class MD5Filter extends FilterBase { + private int frequency; + private static final MessageDigest DIGESTER; + public static final int MD5_LEN = 16; + private byte [] digest = new byte[MD5_LEN]; + + static { + try { + DIGESTER = MessageDigest.getInstance("MD5"); + } catch (NoSuchAlgorithmException e) { + throw new RuntimeException(e); + } + } + + + /** set the filtering frequency in configuration + * + * @param conf configuration + * @param frequency filtering frequency + */ + public static void setFrequency(Configuration conf, int frequency){ + if (frequency<=0) + throw new IllegalArgumentException( + "Negative " + FILTER_FREQUENCY + ": "+frequency); + conf.setInt(FILTER_FREQUENCY, frequency); + } + + public MD5Filter() { } + + /** configure the filter according to configuration + * + * @param conf configuration + */ + public void setConf(Configuration conf) { + this.frequency = conf.getInt(FILTER_FREQUENCY, 10); + if (this.frequency <=0) { + throw new RuntimeException( + "Negative "+FILTER_FREQUENCY+": "+this.frequency); + } + this.conf = conf; + } + + /** Filtering method + * If MD5(key) % frequency==0, return true; otherwise return false + * @see org.apache.hadoop.mapred.SequenceFileInputFilter.Filter#accept(Object) + */ + public boolean accept(Object key) { + try { + long hashcode; + if (key instanceof Text) { + hashcode = MD5Hashcode((Text)key); + } else if (key instanceof BytesWritable) { + hashcode = MD5Hashcode((BytesWritable)key); + } else { + ByteBuffer bb; + bb = Text.encode(key.toString()); + hashcode = MD5Hashcode(bb.array(), 0, bb.limit()); + } + if (hashcode/frequency*frequency==hashcode) + return true; + } catch(Exception e) { + LOG.warn(e); + throw new RuntimeException(e); + } + return false; + } + + private long MD5Hashcode(Text key) throws DigestException { + return MD5Hashcode(key.getBytes(), 0, key.getLength()); + } + + private long MD5Hashcode(BytesWritable key) throws DigestException { + return MD5Hashcode(key.getBytes(), 0, key.getLength()); + } + synchronized private long MD5Hashcode(byte[] bytes, + int start, int length) throws DigestException { + DIGESTER.update(bytes, 0, length); + DIGESTER.digest(digest, 0, MD5_LEN); + long hashcode=0; + for (int i = 0; i < 8; i++) + hashcode |= ((digest[i] & 0xffL) << (8*(7-i))); + return hashcode; + } + } + + private static class FilterRecordReader + extends SequenceFileRecordReader { + + private Filter filter; + + public FilterRecordReader(Configuration conf, FileSplit split) + throws IOException { + super(conf, split); + // instantiate filter + filter = (Filter)ReflectionUtils.newInstance( + conf.getClass(FILTER_CLASS, PercentFilter.class), + conf); + } + + public synchronized boolean next(K key, V value) throws IOException { + while (next(key)) { + if (filter.accept(key)) { + getCurrentValue(value); + return true; + } + } + + return false; + } + } +} diff --git a/src/mapred/org/apache/hadoop/mapred/SequenceFileInputFormat.java b/src/mapred/org/apache/hadoop/mapred/SequenceFileInputFormat.java new file mode 100644 index 0000000..7504d7a --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/SequenceFileInputFormat.java @@ -0,0 +1,68 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapred; + +import java.io.IOException; + +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.LocatedFileStatus; +import org.apache.hadoop.fs.Path; + +import org.apache.hadoop.io.SequenceFile; +import org.apache.hadoop.io.MapFile; + +/** An {@link InputFormat} for {@link SequenceFile}s. + * @deprecated Use + * {@link org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat} + * instead. + */ +@Deprecated +public class SequenceFileInputFormat extends FileInputFormat { + + public SequenceFileInputFormat() { + setMinSplitSize(SequenceFile.SYNC_INTERVAL); + } + + @Override + protected LocatedFileStatus[] listLocatedStatus(JobConf job) throws IOException { + LocatedFileStatus[] files = super.listLocatedStatus(job); + for (int i = 0; i < files.length; i++) { + FileStatus file = files[i]; + if (file.isDir()) { // it's a MapFile + Path dataFile = new Path(file.getPath(), MapFile.DATA_FILE_NAME); + FileSystem fs = file.getPath().getFileSystem(job); + // use the data file + files[i] = fs.listLocatedStatus(dataFile).next(); + } + } + return files; + } + + public RecordReader getRecordReader(InputSplit split, + JobConf job, Reporter reporter) + throws IOException { + + reporter.setStatus(split.toString()); + + return new SequenceFileRecordReader(job, (FileSplit) split); + } + +} + diff --git a/src/mapred/org/apache/hadoop/mapred/SequenceFileOutputFormat.java b/src/mapred/org/apache/hadoop/mapred/SequenceFileOutputFormat.java new file mode 100644 index 0000000..2c9348a --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/SequenceFileOutputFormat.java @@ -0,0 +1,123 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapred; + +import java.io.IOException; +import java.util.Arrays; + +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.FileUtil; + +import org.apache.hadoop.io.SequenceFile; +import org.apache.hadoop.io.SequenceFile.CompressionType; +import org.apache.hadoop.io.compress.CompressionCodec; +import org.apache.hadoop.io.compress.DefaultCodec; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.util.*; + +/** An {@link OutputFormat} that writes {@link SequenceFile}s. + * @deprecated Use + * {@link org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat} + * instead. + */ +@Deprecated +public class SequenceFileOutputFormat extends FileOutputFormat { + + public RecordWriter getRecordWriter( + FileSystem ignored, JobConf job, + String name, Progressable progress) + throws IOException { + // get the path of the temporary output file + Path file = FileOutputFormat.getTaskOutputPath(job, name); + + FileSystem fs = file.getFileSystem(job); + CompressionCodec codec = null; + CompressionType compressionType = CompressionType.NONE; + if (getCompressOutput(job)) { + // find the kind of compression to do + compressionType = getOutputCompressionType(job); + + // find the right codec + Class codecClass = getOutputCompressorClass(job, + DefaultCodec.class); + codec = ReflectionUtils.newInstance(codecClass, job); + } + final SequenceFile.Writer out = + SequenceFile.createWriter(fs, job, file, + job.getOutputKeyClass(), + job.getOutputValueClass(), + compressionType, + codec, + progress); + + return new RecordWriter() { + + public void write(K key, V value) + throws IOException { + + out.append(key, value); + } + + public void close(Reporter reporter) throws IOException { out.close();} + }; + } + + /** Open the output generated by this format. */ + public static SequenceFile.Reader[] getReaders(Configuration conf, Path dir) + throws IOException { + FileSystem fs = dir.getFileSystem(conf); + Path[] names = FileUtil.stat2Paths(fs.listStatus(dir)); + + // sort names, so that hash partitioning works + Arrays.sort(names); + + SequenceFile.Reader[] parts = new SequenceFile.Reader[names.length]; + for (int i = 0; i < names.length; i++) { + parts[i] = new SequenceFile.Reader(fs, names[i], conf); + } + return parts; + } + + /** + * Get the {@link CompressionType} for the output {@link SequenceFile}. + * @param conf the {@link JobConf} + * @return the {@link CompressionType} for the output {@link SequenceFile}, + * defaulting to {@link CompressionType#RECORD} + */ + public static CompressionType getOutputCompressionType(JobConf conf) { + String val = conf.get("mapred.output.compression.type", + CompressionType.RECORD.toString()); + return CompressionType.valueOf(val); + } + + /** + * Set the {@link CompressionType} for the output {@link SequenceFile}. + * @param conf the {@link JobConf} to modify + * @param style the {@link CompressionType} for the output + * {@link SequenceFile} + */ + public static void setOutputCompressionType(JobConf conf, + CompressionType style) { + setCompressOutput(conf, true); + conf.set("mapred.output.compression.type", style.toString()); + } + +} + diff --git a/src/mapred/org/apache/hadoop/mapred/SequenceFileRecordReader.java b/src/mapred/org/apache/hadoop/mapred/SequenceFileRecordReader.java new file mode 100644 index 0000000..704508e --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/SequenceFileRecordReader.java @@ -0,0 +1,128 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapred; + +import java.io.IOException; + + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.io.*; +import org.apache.hadoop.util.ReflectionUtils; + +/** An {@link RecordReader} for {@link SequenceFile}s. */ +public class SequenceFileRecordReader implements RecordReader { + + private SequenceFile.Reader in; + private long start; + private long end; + private boolean more = true; + protected Configuration conf; + + public SequenceFileRecordReader(Configuration conf, FileSplit split) + throws IOException { + Path path = split.getPath(); + FileSystem fs = path.getFileSystem(conf); + this.in = new SequenceFile.Reader(fs, path, conf); + this.end = split.getStart() + split.getLength(); + this.conf = conf; + + if (split.getStart() > in.getPosition()) + in.sync(split.getStart()); // sync to start + + this.start = in.getPosition(); + more = start < end; + } + + + /** The class of key that must be passed to {@link + * #next(Object, Object)}.. */ + public Class getKeyClass() { return in.getKeyClass(); } + + /** The class of value that must be passed to {@link + * #next(Object, Object)}.. */ + public Class getValueClass() { return in.getValueClass(); } + + @SuppressWarnings("unchecked") + public K createKey() { + return (K) ReflectionUtils.newInstance(getKeyClass(), conf); + } + + @SuppressWarnings("unchecked") + public V createValue() { + return (V) ReflectionUtils.newInstance(getValueClass(), conf); + } + + public synchronized boolean next(K key, V value) throws IOException { + if (!more) return false; + long pos = in.getPosition(); + boolean remaining = (in.next(key) != null); + if (remaining) { + getCurrentValue(value); + } + if (pos >= end && in.syncSeen()) { + more = false; + } else { + more = remaining; + } + return more; + } + + protected synchronized boolean next(K key) + throws IOException { + if (!more) return false; + long pos = in.getPosition(); + boolean remaining = (in.next(key) != null); + if (pos >= end && in.syncSeen()) { + more = false; + } else { + more = remaining; + } + return more; + } + + protected synchronized void getCurrentValue(V value) + throws IOException { + in.getCurrentValue(value); + } + + /** + * Return the progress within the input split + * @return 0.0 to 1.0 of the input byte range + */ + public float getProgress() throws IOException { + if (end == start) { + return 0.0f; + } else { + return Math.min(1.0f, (in.getPosition() - start) / (float)(end - start)); + } + } + + public synchronized long getPos() throws IOException { + return in.getPosition(); + } + + protected synchronized void seek(long pos) throws IOException { + in.seek(pos); + } + public synchronized void close() throws IOException { in.close(); } + +} + diff --git a/src/mapred/org/apache/hadoop/mapred/SkipBadRecords.java b/src/mapred/org/apache/hadoop/mapred/SkipBadRecords.java new file mode 100644 index 0000000..fd24e4e --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/SkipBadRecords.java @@ -0,0 +1,308 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapred; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.Path; + +/** + * Utility class for skip bad records functionality. It contains various + * settings related to skipping of bad records. + * + *

Hadoop provides an optional mode of execution in which the bad records + * are detected and skipped in further attempts. + * + *

This feature can be used when map/reduce tasks crashes deterministically on + * certain input. This happens due to bugs in the map/reduce function. The usual + * course would be to fix these bugs. But sometimes this is not possible; + * perhaps the bug is in third party libraries for which the source code is + * not available. Due to this, the task never reaches to completion even with + * multiple attempts and complete data for that task is lost.

+ * + *

With this feature, only a small portion of data is lost surrounding + * the bad record, which may be acceptable for some user applications. + * see {@link SkipBadRecords#setMapperMaxSkipRecords(Configuration, long)}

+ * + *

The skipping mode gets kicked off after certain no of failures + * see {@link SkipBadRecords#setAttemptsToStartSkipping(Configuration, int)}

+ * + *

In the skipping mode, the map/reduce task maintains the record range which + * is getting processed at all times. Before giving the input to the + * map/reduce function, it sends this record range to the Task tracker. + * If task crashes, the Task tracker knows which one was the last reported + * range. On further attempts that range get skipped.

+ */ +public class SkipBadRecords { + + /** + * Special counters which are written by the application and are + * used by the framework for detecting bad records. For detecting bad records + * these counters must be incremented by the application. + */ + public static final String COUNTER_GROUP = "SkippingTaskCounters"; + + /** + * Number of processed map records. + * @see SkipBadRecords#getAutoIncrMapperProcCount(Configuration) + */ + public static final String COUNTER_MAP_PROCESSED_RECORDS = + "MapProcessedRecords"; + + /** + * Number of processed reduce groups. + * @see SkipBadRecords#getAutoIncrReducerProcCount(Configuration) + */ + public static final String COUNTER_REDUCE_PROCESSED_GROUPS = + "ReduceProcessedGroups"; + + private static final String ATTEMPTS_TO_START_SKIPPING = + "mapred.skip.attempts.to.start.skipping"; + private static final String AUTO_INCR_MAP_PROC_COUNT = + "mapred.skip.map.auto.incr.proc.count"; + private static final String AUTO_INCR_REDUCE_PROC_COUNT = + "mapred.skip.reduce.auto.incr.proc.count"; + private static final String OUT_PATH = "mapred.skip.out.dir"; + private static final String MAPPER_MAX_SKIP_RECORDS = + "mapred.skip.map.max.skip.records"; + private static final String REDUCER_MAX_SKIP_GROUPS = + "mapred.skip.reduce.max.skip.groups"; + + /** + * Get the number of Task attempts AFTER which skip mode + * will be kicked off. When skip mode is kicked off, the + * tasks reports the range of records which it will process + * next to the TaskTracker. So that on failures, TT knows which + * ones are possibly the bad records. On further executions, + * those are skipped. + * Default value is 2. + * + * @param conf the configuration + * @return attemptsToStartSkipping no of task attempts + */ + public static int getAttemptsToStartSkipping(Configuration conf) { + return conf.getInt(ATTEMPTS_TO_START_SKIPPING, 2); + } + + /** + * Set the number of Task attempts AFTER which skip mode + * will be kicked off. When skip mode is kicked off, the + * tasks reports the range of records which it will process + * next to the TaskTracker. So that on failures, TT knows which + * ones are possibly the bad records. On further executions, + * those are skipped. + * Default value is 2. + * + * @param conf the configuration + * @param attemptsToStartSkipping no of task attempts + */ + public static void setAttemptsToStartSkipping(Configuration conf, + int attemptsToStartSkipping) { + conf.setInt(ATTEMPTS_TO_START_SKIPPING, attemptsToStartSkipping); + } + + /** + * Get the flag which if set to true, + * {@link SkipBadRecords#COUNTER_MAP_PROCESSED_RECORDS} is incremented + * by MapRunner after invoking the map function. This value must be set to + * false for applications which process the records asynchronously + * or buffer the input records. For example streaming. + * In such cases applications should increment this counter on their own. + * Default value is true. + * + * @param conf the configuration + * @return true if auto increment + * {@link SkipBadRecords#COUNTER_MAP_PROCESSED_RECORDS}. + * false otherwise. + */ + public static boolean getAutoIncrMapperProcCount(Configuration conf) { + return conf.getBoolean(AUTO_INCR_MAP_PROC_COUNT, true); + } + + /** + * Set the flag which if set to true, + * {@link SkipBadRecords#COUNTER_MAP_PROCESSED_RECORDS} is incremented + * by MapRunner after invoking the map function. This value must be set to + * false for applications which process the records asynchronously + * or buffer the input records. For example streaming. + * In such cases applications should increment this counter on their own. + * Default value is true. + * + * @param conf the configuration + * @param autoIncr whether to auto increment + * {@link SkipBadRecords#COUNTER_MAP_PROCESSED_RECORDS}. + */ + public static void setAutoIncrMapperProcCount(Configuration conf, + boolean autoIncr) { + conf.setBoolean(AUTO_INCR_MAP_PROC_COUNT, autoIncr); + } + + /** + * Get the flag which if set to true, + * {@link SkipBadRecords#COUNTER_REDUCE_PROCESSED_GROUPS} is incremented + * by framework after invoking the reduce function. This value must be set to + * false for applications which process the records asynchronously + * or buffer the input records. For example streaming. + * In such cases applications should increment this counter on their own. + * Default value is true. + * + * @param conf the configuration + * @return true if auto increment + * {@link SkipBadRecords#COUNTER_REDUCE_PROCESSED_GROUPS}. + * false otherwise. + */ + public static boolean getAutoIncrReducerProcCount(Configuration conf) { + return conf.getBoolean(AUTO_INCR_REDUCE_PROC_COUNT, true); + } + + /** + * Set the flag which if set to true, + * {@link SkipBadRecords#COUNTER_REDUCE_PROCESSED_GROUPS} is incremented + * by framework after invoking the reduce function. This value must be set to + * false for applications which process the records asynchronously + * or buffer the input records. For example streaming. + * In such cases applications should increment this counter on their own. + * Default value is true. + * + * @param conf the configuration + * @param autoIncr whether to auto increment + * {@link SkipBadRecords#COUNTER_REDUCE_PROCESSED_GROUPS}. + */ + public static void setAutoIncrReducerProcCount(Configuration conf, + boolean autoIncr) { + conf.setBoolean(AUTO_INCR_REDUCE_PROC_COUNT, autoIncr); + } + + /** + * Get the directory to which skipped records are written. By default it is + * the sub directory of the output _logs directory. + * User can stop writing skipped records by setting the value null. + * + * @param conf the configuration. + * @return path skip output directory. Null is returned if this is not set + * and output directory is also not set. + */ + public static Path getSkipOutputPath(Configuration conf) { + String name = conf.get(OUT_PATH); + if(name!=null) { + if("none".equals(name)) { + return null; + } + return new Path(name); + } + Path outPath = FileOutputFormat.getOutputPath(new JobConf(conf)); + return outPath==null ? null : new Path(outPath, + "_logs"+Path.SEPARATOR+"skip"); + } + + /** + * Set the directory to which skipped records are written. By default it is + * the sub directory of the output _logs directory. + * User can stop writing skipped records by setting the value null. + * + * @param conf the configuration. + * @param path skip output directory path + */ + public static void setSkipOutputPath(JobConf conf, Path path) { + String pathStr = null; + if(path==null) { + pathStr = "none"; + } else { + pathStr = path.toString(); + } + conf.set(OUT_PATH, pathStr); + } + + /** + * Get the number of acceptable skip records surrounding the bad record PER + * bad record in mapper. The number includes the bad record as well. + * To turn the feature of detection/skipping of bad records off, set the + * value to 0. + * The framework tries to narrow down the skipped range by retrying + * until this threshold is met OR all attempts get exhausted for this task. + * Set the value to Long.MAX_VALUE to indicate that framework need not try to + * narrow down. Whatever records(depends on application) get skipped are + * acceptable. + * Default value is 0. + * + * @param conf the configuration + * @return maxSkipRecs acceptable skip records. + */ + public static long getMapperMaxSkipRecords(Configuration conf) { + return conf.getLong(MAPPER_MAX_SKIP_RECORDS, 0); + } + + /** + * Set the number of acceptable skip records surrounding the bad record PER + * bad record in mapper. The number includes the bad record as well. + * To turn the feature of detection/skipping of bad records off, set the + * value to 0. + * The framework tries to narrow down the skipped range by retrying + * until this threshold is met OR all attempts get exhausted for this task. + * Set the value to Long.MAX_VALUE to indicate that framework need not try to + * narrow down. Whatever records(depends on application) get skipped are + * acceptable. + * Default value is 0. + * + * @param conf the configuration + * @param maxSkipRecs acceptable skip records. + */ + public static void setMapperMaxSkipRecords(Configuration conf, + long maxSkipRecs) { + conf.setLong(MAPPER_MAX_SKIP_RECORDS, maxSkipRecs); + } + + /** + * Get the number of acceptable skip groups surrounding the bad group PER + * bad group in reducer. The number includes the bad group as well. + * To turn the feature of detection/skipping of bad groups off, set the + * value to 0. + * The framework tries to narrow down the skipped range by retrying + * until this threshold is met OR all attempts get exhausted for this task. + * Set the value to Long.MAX_VALUE to indicate that framework need not try to + * narrow down. Whatever groups(depends on application) get skipped are + * acceptable. + * Default value is 0. + * + * @param conf the configuration + * @return maxSkipGrps acceptable skip groups. + */ + public static long getReducerMaxSkipGroups(Configuration conf) { + return conf.getLong(REDUCER_MAX_SKIP_GROUPS, 0); + } + + /** + * Set the number of acceptable skip groups surrounding the bad group PER + * bad group in reducer. The number includes the bad group as well. + * To turn the feature of detection/skipping of bad groups off, set the + * value to 0. + * The framework tries to narrow down the skipped range by retrying + * until this threshold is met OR all attempts get exhausted for this task. + * Set the value to Long.MAX_VALUE to indicate that framework need not try to + * narrow down. Whatever groups(depends on application) get skipped are + * acceptable. + * Default value is 0. + * + * @param conf the configuration + * @param maxSkipGrps acceptable skip groups. + */ + public static void setReducerMaxSkipGroups(Configuration conf, + long maxSkipGrps) { + conf.setLong(REDUCER_MAX_SKIP_GROUPS, maxSkipGrps); + } +} diff --git a/src/mapred/org/apache/hadoop/mapred/SortedRanges.java b/src/mapred/org/apache/hadoop/mapred/SortedRanges.java new file mode 100644 index 0000000..a6531fc --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/SortedRanges.java @@ -0,0 +1,383 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapred; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; +import java.util.Iterator; +import java.util.SortedSet; +import java.util.TreeSet; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.io.Writable; + +/** + * Keeps the Ranges sorted by startIndex. + * The added ranges are always ensured to be non-overlapping. + * Provides the SkipRangeIterator, which skips the Ranges + * stored in this object. + */ +class SortedRanges implements Writable{ + + private static final Log LOG = + LogFactory.getLog(SortedRanges.class); + + private TreeSet ranges = new TreeSet(); + private long indicesCount; + + /** + * Get Iterator which skips the stored ranges. + * The Iterator.next() call return the index starting from 0. + * @return SkipRangeIterator + */ + synchronized SkipRangeIterator skipRangeIterator(){ + return new SkipRangeIterator(ranges.iterator()); + } + + /** + * Get the no of indices stored in the ranges. + * @return indices count + */ + synchronized long getIndicesCount() { + return indicesCount; + } + + /** + * Get the sorted set of ranges. + * @return ranges + */ + synchronized SortedSet getRanges() { + return ranges; + } + + /** + * Add the range indices. It is ensured that the added range + * doesn't overlap the existing ranges. If it overlaps, the + * existing overlapping ranges are removed and a single range + * having the superset of all the removed ranges and this range + * is added. + * If the range is of 0 length, doesn't do anything. + * @param range Range to be added. + */ + synchronized void add(Range range){ + if(range.isEmpty()) { + return; + } + + long startIndex = range.getStartIndex(); + long endIndex = range.getEndIndex(); + //make sure that there are no overlapping ranges + SortedSet headSet = ranges.headSet(range); + if(headSet.size()>0) { + Range previousRange = headSet.last(); + LOG.debug("previousRange "+previousRange); + if(startIndex=previousRange.getEndIndex() ? + endIndex : previousRange.getEndIndex(); + } + } + + Iterator tailSetIt = ranges.tailSet(range).iterator(); + while(tailSetIt.hasNext()) { + Range nextRange = tailSetIt.next(); + LOG.debug("nextRange "+nextRange +" startIndex:"+startIndex+ + " endIndex:"+endIndex); + if(endIndex>=nextRange.getStartIndex()) { + //nextRange overlaps this range + //remove the nextRange + tailSetIt.remove(); + indicesCount-=nextRange.getLength(); + if(endIndex headSet = ranges.headSet(range); + if(headSet.size()>0) { + Range previousRange = headSet.last(); + LOG.debug("previousRange "+previousRange); + if(startIndex tailSetIt = ranges.tailSet(range).iterator(); + while(tailSetIt.hasNext()) { + Range nextRange = tailSetIt.next(); + LOG.debug("nextRange "+nextRange +" startIndex:"+startIndex+ + " endIndex:"+endIndex); + if(endIndex>nextRange.getStartIndex()) { + //nextRange overlaps this range + //narrow down the nextRange + tailSetIt.remove(); + indicesCount-=nextRange.getLength(); + if(endIndexstart) { + Range recRange = new Range(start, end-start); + ranges.add(recRange); + indicesCount+=recRange.getLength(); + LOG.debug("added "+recRange); + } + } + + public synchronized void readFields(DataInput in) throws IOException { + indicesCount = in.readLong(); + ranges = new TreeSet(); + int size = in.readInt(); + for(int i=0;i it = ranges.iterator(); + while(it.hasNext()) { + Range range = it.next(); + range.write(out); + } + } + + public String toString() { + StringBuffer sb = new StringBuffer(); + Iterator it = ranges.iterator(); + while(it.hasNext()) { + Range range = it.next(); + sb.append(range.toString()+"\n"); + } + return sb.toString(); + } + + /** + * Index Range. Comprises of start index and length. + * A Range can be of 0 length also. The Range stores indices + * of type long. + */ + static class Range implements Comparable, Writable{ + private long startIndex; + private long length; + + Range(long startIndex, long length) { + if(length<0) { + throw new RuntimeException("length can't be negative"); + } + this.startIndex = startIndex; + this.length = length; + } + + Range() { + this(0,0); + } + + /** + * Get the start index. Start index in inclusive. + * @return startIndex. + */ + long getStartIndex() { + return startIndex; + } + + /** + * Get the end index. End index is exclusive. + * @return endIndex. + */ + long getEndIndex() { + return startIndex + length; + } + + /** + * Get Length. + * @return length + */ + long getLength() { + return length; + } + + /** + * Range is empty if its length is zero. + * @return true if empty + * false otherwise. + */ + boolean isEmpty() { + return length==0; + } + + public boolean equals(Object o) { + if(o!=null && o instanceof Range) { + Range range = (Range)o; + return startIndex==range.startIndex && + length==range.length; + } + return false; + } + + public int hashCode() { + return Long.valueOf(startIndex).hashCode() + + Long.valueOf(length).hashCode(); + } + + public int compareTo(Range o) { + if(this.equals(o)) { + return 0; + } + return (this.startIndex > o.startIndex) ? 1:-1; + } + + public void readFields(DataInput in) throws IOException { + startIndex = in.readLong(); + length = in.readLong(); + } + + public void write(DataOutput out) throws IOException { + out.writeLong(startIndex); + out.writeLong(length); + } + + public String toString() { + return startIndex +":" + length; + } + } + + /** + * Index Iterator which skips the stored ranges. + */ + static class SkipRangeIterator implements Iterator { + Iterator rangeIterator; + Range range = new Range(); + long next = -1; + + /** + * Constructor + * @param rangeIterator the iterator which gives the ranges. + */ + SkipRangeIterator(Iterator rangeIterator) { + this.rangeIterator = rangeIterator; + doNext(); + } + + /** + * Returns true till the index reaches Long.MAX_VALUE. + * @return true next index exists. + * false otherwise. + */ + public synchronized boolean hasNext() { + return next=range.getEndIndex() && rangeIterator.hasNext()) { + range = rangeIterator.next(); + skipIfInRange(); + } + } + + private void skipIfInRange() { + if(next>=range.getStartIndex() && + nexttrue if all ranges have been skipped. + * false otherwise. + */ + synchronized boolean skippedAllRanges() { + return !rangeIterator.hasNext() && next>range.getEndIndex(); + } + + /** + * Remove is not supported. Doesn't apply. + */ + public void remove() { + throw new UnsupportedOperationException("remove not supported."); + } + + } + +} diff --git a/src/mapred/org/apache/hadoop/mapred/SpillRecord.java b/src/mapred/org/apache/hadoop/mapred/SpillRecord.java new file mode 100644 index 0000000..7595898 --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/SpillRecord.java @@ -0,0 +1,153 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.mapred; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.LongBuffer; +import java.util.zip.CRC32; +import java.util.zip.CheckedInputStream; +import java.util.zip.CheckedOutputStream; +import java.util.zip.Checksum; + +import org.apache.hadoop.fs.ChecksumException; +import org.apache.hadoop.fs.FSDataInputStream; +import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.io.IOUtils; + +import static org.apache.hadoop.mapred.MapTask.MAP_OUTPUT_INDEX_RECORD_LENGTH; + +class SpillRecord { + + /** Backing store */ + private final ByteBuffer buf; + /** View of backing storage as longs */ + private final LongBuffer entries; + + public SpillRecord(int numPartitions) { + buf = ByteBuffer.allocate( + numPartitions * MapTask.MAP_OUTPUT_INDEX_RECORD_LENGTH); + entries = buf.asLongBuffer(); + } + + public SpillRecord(Path indexFileName, JobConf job) throws IOException { + this(indexFileName, job, new CRC32()); + } + + public SpillRecord(Path indexFileName, JobConf job, Checksum crc) + throws IOException { + + final FileSystem rfs = FileSystem.getLocal(job).getRaw(); + final FSDataInputStream in = rfs.open(indexFileName); + try { + final long length = rfs.getFileStatus(indexFileName).getLen(); + final int partitions = (int) length / MAP_OUTPUT_INDEX_RECORD_LENGTH; + final int size = partitions * MAP_OUTPUT_INDEX_RECORD_LENGTH; + + buf = ByteBuffer.allocate(size); + if (crc != null) { + crc.reset(); + CheckedInputStream chk = new CheckedInputStream(in, crc); + IOUtils.readFully(chk, buf.array(), 0, size); + if (chk.getChecksum().getValue() != in.readLong()) { + throw new ChecksumException("Checksum error reading spill index: " + + indexFileName, -1); + } + } else { + IOUtils.readFully(in, buf.array(), 0, size); + } + entries = buf.asLongBuffer(); + } finally { + in.close(); + } + } + + /** + * Return number of IndexRecord entries in this spill. + */ + public int size() { + return entries.capacity() / (MapTask.MAP_OUTPUT_INDEX_RECORD_LENGTH / 8); + } + + /** + * Get spill offsets for given partition. + */ + public IndexRecord getIndex(int partition) { + final int pos = partition * MapTask.MAP_OUTPUT_INDEX_RECORD_LENGTH / 8; + return new IndexRecord(entries.get(pos), entries.get(pos + 1), + entries.get(pos + 2)); + } + + /** + * Set spill offsets for given partition. + */ + public void putIndex(IndexRecord rec, int partition) { + final int pos = partition * MapTask.MAP_OUTPUT_INDEX_RECORD_LENGTH / 8; + entries.put(pos, rec.startOffset); + entries.put(pos + 1, rec.rawLength); + entries.put(pos + 2, rec.partLength); + } + + /** + * Write this spill record to the location provided. + */ + public void writeToFile(Path loc, JobConf job) + throws IOException { + writeToFile(loc, job, new CRC32()); + } + + public void writeToFile(Path loc, JobConf job, Checksum crc) + throws IOException { + final FileSystem rfs = FileSystem.getLocal(job).getRaw(); + CheckedOutputStream chk = null; + final FSDataOutputStream out = rfs.create(loc); + try { + if (crc != null) { + crc.reset(); + chk = new CheckedOutputStream(out, crc); + chk.write(buf.array()); + out.writeLong(chk.getChecksum().getValue()); + } else { + out.write(buf.array()); + } + } finally { + if (chk != null) { + chk.close(); + } else { + out.close(); + } + } + } + +} + +class IndexRecord { + long startOffset; + long rawLength; + long partLength; + + public IndexRecord() { } + + public IndexRecord(long startOffset, long rawLength, long partLength) { + this.startOffset = startOffset; + this.rawLength = rawLength; + this.partLength = partLength; + } +} diff --git a/src/mapred/org/apache/hadoop/mapred/StatisticsCollector.java b/src/mapred/org/apache/hadoop/mapred/StatisticsCollector.java new file mode 100644 index 0000000..dc557ad --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/StatisticsCollector.java @@ -0,0 +1,294 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.mapred; + +import java.util.Collections; +import java.util.HashMap; +import java.util.LinkedHashMap; +import java.util.LinkedList; +import java.util.Map; +import java.util.Timer; +import java.util.TimerTask; + +import org.apache.hadoop.mapred.StatisticsCollector.Stat.TimeStat; + +/** + * Collects the statistics in time windows. + */ +class StatisticsCollector { + + private static final int DEFAULT_PERIOD = 5; + + static final TimeWindow + SINCE_START = new TimeWindow("Since Start", -1, -1); + + static final TimeWindow + LAST_WEEK = new TimeWindow("Last Week", 7 * 24 * 60 * 60, 60 * 60); + + static final TimeWindow + LAST_DAY = new TimeWindow("Last Day", 24 * 60 * 60, 60 * 60); + + static final TimeWindow + LAST_HOUR = new TimeWindow("Last Hour", 60 * 60, 60); + + static final TimeWindow + LAST_MINUTE = new TimeWindow("Last Minute", 60, 10); + + static final TimeWindow[] DEFAULT_COLLECT_WINDOWS = { + StatisticsCollector.SINCE_START, + StatisticsCollector.LAST_DAY, + StatisticsCollector.LAST_HOUR + }; + + private final int period; + private boolean started; + + private final Map updaters = + new LinkedHashMap(); + private final Map statistics = new HashMap(); + + StatisticsCollector() { + this(DEFAULT_PERIOD); + } + + StatisticsCollector(int period) { + this.period = period; + } + + synchronized void start() { + if (started) { + return; + } + Timer timer = new Timer("Timer thread for monitoring ", true); + TimerTask task = new TimerTask() { + public void run() { + update(); + } + }; + long millis = period * 1000; + timer.scheduleAtFixedRate(task, millis, millis); + started = true; + } + + protected synchronized void update() { + for (StatUpdater c : updaters.values()) { + c.update(); + } + } + + Map getUpdaters() { + return Collections.unmodifiableMap(updaters); + } + + Map getStatistics() { + return Collections.unmodifiableMap(statistics); + } + + synchronized Stat createStat(String name) { + return createStat(name, DEFAULT_COLLECT_WINDOWS); + } + + synchronized Stat createStat(String name, TimeWindow[] windows) { + if (statistics.get(name) != null) { + throw new RuntimeException("Stat with name "+ name + + " is already defined"); + } + Map timeStats = + new LinkedHashMap(); + for (TimeWindow window : windows) { + StatUpdater collector = updaters.get(window); + if (collector == null) { + if(SINCE_START.equals(window)) { + collector = new StatUpdater(); + } else { + collector = new TimeWindowStatUpdater(window, period); + } + updaters.put(window, collector); + } + TimeStat timeStat = new TimeStat(); + collector.addTimeStat(name, timeStat); + timeStats.put(window, timeStat); + } + + Stat stat = new Stat(name, timeStats); + statistics.put(name, stat); + return stat; + } + + synchronized Stat removeStat(String name) { + Stat stat = statistics.remove(name); + if (stat != null) { + for (StatUpdater collector : updaters.values()) { + collector.removeTimeStat(name); + } + } + return stat; + } + + static class TimeWindow { + final String name; + final int windowSize; + final int updateGranularity; + TimeWindow(String name, int windowSize, int updateGranularity) { + if (updateGranularity > windowSize) { + throw new RuntimeException( + "Invalid TimeWindow: updateGranularity > windowSize"); + } + this.name = name; + this.windowSize = windowSize; + this.updateGranularity = updateGranularity; + } + + public int hashCode() { + return name.hashCode() + updateGranularity + windowSize; + } + + public boolean equals(Object obj) { + if (this == obj) + return true; + if (obj == null) + return false; + if (getClass() != obj.getClass()) + return false; + final TimeWindow other = (TimeWindow) obj; + if (name == null) { + if (other.name != null) + return false; + } else if (!name.equals(other.name)) + return false; + if (updateGranularity != other.updateGranularity) + return false; + if (windowSize != other.windowSize) + return false; + return true; + } + } + + static class Stat { + final String name; + private Map timeStats; + + private Stat(String name, Map timeStats) { + this.name = name; + this.timeStats = timeStats; + } + + public synchronized void inc(int incr) { + for (TimeStat ts : timeStats.values()) { + ts.inc(incr); + } + } + + public synchronized void inc() { + inc(1); + } + + public synchronized Map getValues() { + return Collections.unmodifiableMap(timeStats); + } + + static class TimeStat { + private final LinkedList buckets = new LinkedList(); + private int value; + private int currentValue; + + public synchronized int getValue() { + return value; + } + + private synchronized void inc(int i) { + currentValue += i; + } + + private synchronized void addBucket() { + buckets.addLast(currentValue); + setValueToCurrent(); + } + + private synchronized void setValueToCurrent() { + value += currentValue; + currentValue = 0; + } + + private synchronized void removeBucket() { + int removed = buckets.removeFirst(); + value -= removed; + } + } + } + + private static class StatUpdater { + + protected final Map statToCollect = + new HashMap(); + + synchronized void addTimeStat(String name, TimeStat s) { + statToCollect.put(name, s); + } + + synchronized TimeStat removeTimeStat(String name) { + return statToCollect.remove(name); + } + + synchronized void update() { + for (TimeStat stat : statToCollect.values()) { + stat.setValueToCurrent(); + } + } + } + + /** + * Updates TimeWindow statistics in buckets. + * + */ + private static class TimeWindowStatUpdater extends StatUpdater{ + + final int collectBuckets; + final int updatesPerBucket; + + private int updates; + private int buckets; + + TimeWindowStatUpdater(TimeWindow w, int updatePeriod) { + if (updatePeriod > w.updateGranularity) { + throw new RuntimeException( + "Invalid conf: updatePeriod > updateGranularity"); + } + collectBuckets = w.windowSize / w.updateGranularity; + updatesPerBucket = w.updateGranularity / updatePeriod; + } + + synchronized void update() { + updates++; + if (updates == updatesPerBucket) { + for(TimeStat stat : statToCollect.values()) { + stat.addBucket(); + } + updates = 0; + buckets++; + if (buckets > collectBuckets) { + for (TimeStat stat : statToCollect.values()) { + stat.removeBucket(); + } + buckets--; + } + } + } + } + +} diff --git a/src/mapred/org/apache/hadoop/mapred/TIPStatus.java b/src/mapred/org/apache/hadoop/mapred/TIPStatus.java new file mode 100644 index 0000000..0ce4424 --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/TIPStatus.java @@ -0,0 +1,24 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.mapred; + +/** The states of a {@link TaskInProgress} as seen by the JobTracker. + */ +public enum TIPStatus { + PENDING, RUNNING, COMPLETE, KILLED, FAILED; +} \ No newline at end of file diff --git a/src/mapred/org/apache/hadoop/mapred/Task.java b/src/mapred/org/apache/hadoop/mapred/Task.java new file mode 100644 index 0000000..a98fc7d --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/Task.java @@ -0,0 +1,1328 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapred; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; +import java.lang.reflect.Constructor; +import java.lang.reflect.InvocationTargetException; +import java.text.NumberFormat; +import java.util.HashMap; +import java.util.Iterator; +import java.util.Map; +import java.util.NoSuchElementException; +import java.util.concurrent.atomic.AtomicBoolean; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configurable; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.LocalDirAllocator; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.FileSystem.Statistics; +import org.apache.hadoop.io.BytesWritable; +import org.apache.hadoop.io.DataInputBuffer; +import org.apache.hadoop.io.RawComparator; +import org.apache.hadoop.io.Text; +import org.apache.hadoop.io.Writable; +import org.apache.hadoop.io.WritableUtils; +import org.apache.hadoop.io.serializer.Deserializer; +import org.apache.hadoop.io.serializer.SerializationFactory; +import org.apache.hadoop.mapred.IFile.Writer; +import org.apache.hadoop.mapreduce.JobStatus; +import org.apache.hadoop.net.NetUtils; +import org.apache.hadoop.util.Progress; +import org.apache.hadoop.util.Progressable; +import org.apache.hadoop.util.ReflectionUtils; +import org.apache.hadoop.util.StringUtils; +import org.apache.hadoop.util.ResourceCalculatorPlugin; +import org.apache.hadoop.util.ResourceCalculatorPlugin.*; + +/** + * Base class for tasks. + * + * This is NOT a public interface. + */ +abstract public class Task implements Writable, Configurable { + private static final Log LOG = + LogFactory.getLog(Task.class); + + // Counters used by Task subclasses + public static enum Counter { + MAP_INPUT_RECORDS, + MAP_OUTPUT_RECORDS, + MAP_SKIPPED_RECORDS, + MAP_INPUT_BYTES, + MAP_OUTPUT_BYTES, + COMBINE_INPUT_RECORDS, + COMBINE_OUTPUT_RECORDS, + REDUCE_INPUT_GROUPS, + REDUCE_SHUFFLE_BYTES, + REDUCE_INPUT_RECORDS, + REDUCE_OUTPUT_RECORDS, + REDUCE_SKIPPED_GROUPS, + REDUCE_SKIPPED_RECORDS, + SPILLED_RECORDS, + CPU_MILLISECONDS, + PHYSICAL_MEMORY_BYTES, + VIRTUAL_MEMORY_BYTES + } + + /** + * Counters to measure the usage of the different file systems. + * Always return the String array with two elements. First one is the name of + * BYTES_READ counter and second one is of the BYTES_WRITTEN counter. + */ + protected static String[] getFileSystemCounterNames(String uriScheme) { + String scheme = uriScheme.toUpperCase(); + return new String[]{scheme+"_BYTES_READ", scheme+"_BYTES_WRITTEN"}; + } + + /** + * Name of the FileSystem counters' group + */ + protected static final String FILESYSTEM_COUNTER_GROUP = "FileSystemCounters"; + + /////////////////////////////////////////////////////////// + // Helper methods to construct task-output paths + /////////////////////////////////////////////////////////// + + /** Construct output file names so that, when an output directory listing is + * sorted lexicographically, positions correspond to output partitions.*/ + private static final NumberFormat NUMBER_FORMAT = NumberFormat.getInstance(); + static { + NUMBER_FORMAT.setMinimumIntegerDigits(5); + NUMBER_FORMAT.setGroupingUsed(false); + } + + static synchronized String getOutputName(int partition) { + return "part-" + NUMBER_FORMAT.format(partition); + } + + //////////////////////////////////////////// + // Fields + //////////////////////////////////////////// + + private String jobFile; // job configuration file + private TaskAttemptID taskId; // unique, includes job id + private int partition; // id within job + TaskStatus taskStatus; // current status of the task + protected JobStatus.State jobRunStateForCleanup; + protected boolean jobCleanup = false; + protected boolean jobSetup = false; + protected boolean taskCleanup = false; + + //skip ranges based on failed ranges from previous attempts + private SortedRanges skipRanges = new SortedRanges(); + private boolean skipping = false; + private boolean writeSkipRecs = true; + + //currently processing record start index + private volatile long currentRecStartIndex; + private Iterator currentRecIndexIterator = + skipRanges.skipRangeIterator(); + + protected JobConf conf; + protected MapOutputFile mapOutputFile = new MapOutputFile(); + protected LocalDirAllocator lDirAlloc; + private final static int MAX_RETRIES = 10; + protected JobContext jobContext; + protected TaskAttemptContext taskContext; + protected org.apache.hadoop.mapreduce.OutputFormat outputFormat; + protected org.apache.hadoop.mapreduce.OutputCommitter committer; + protected String username; + protected final Counters.Counter spilledRecordsCounter; + private int numSlotsRequired; + private String pidFile = ""; + protected TaskUmbilicalProtocol umbilical; + private ResourceCalculatorPlugin resourceCalculator = null; + private long initCpuCumulativeTime = 0; + + // An opaque data field used to attach extra data to each task. This is used + // by the Hadoop scheduler for Mesos to associate a Mesos task ID with each + // task and recover these IDs on the TaskTracker. + protected BytesWritable extraData = new BytesWritable(); + + //////////////////////////////////////////// + // Constructors + //////////////////////////////////////////// + + public Task() { + taskStatus = TaskStatus.createTaskStatus(isMapTask()); + taskId = new TaskAttemptID(); + spilledRecordsCounter = counters.findCounter(Counter.SPILLED_RECORDS); + } + + public Task(String jobFile, TaskAttemptID taskId, int partition, + int numSlotsRequired, String username) { + this.username = username; + this.jobFile = jobFile; + this.taskId = taskId; + + this.partition = partition; + this.numSlotsRequired = numSlotsRequired; + this.taskStatus = TaskStatus.createTaskStatus(isMapTask(), this.taskId, + 0.0f, numSlotsRequired, + TaskStatus.State.UNASSIGNED, + "", "", "", + isMapTask() ? + TaskStatus.Phase.MAP : + TaskStatus.Phase.SHUFFLE, + counters); + this.mapOutputFile.setJobId(taskId.getJobID()); + spilledRecordsCounter = counters.findCounter(Counter.SPILLED_RECORDS); + } + + //////////////////////////////////////////// + // Accessors + //////////////////////////////////////////// + public void setJobFile(String jobFile) { this.jobFile = jobFile; } + public String getJobFile() { return jobFile; } + public TaskAttemptID getTaskID() { return taskId; } + public int getNumSlotsRequired() { + return numSlotsRequired; + } + + Counters getCounters() { return counters; } + + + /** + * Get the job name for this task. + * @return the job name + */ + public JobID getJobID() { + return taskId.getJobID(); + } + + /** + * Get the index of this task within the job. + * @return the integer part of the task id + */ + public int getPartition() { + return partition; + } + /** + * Return current phase of the task. + * needs to be synchronized as communication thread sends the phase every second + * @return the curent phase of the task + */ + public synchronized TaskStatus.Phase getPhase(){ + return this.taskStatus.getPhase(); + } + /** + * Set current phase of the task. + * @param phase task phase + */ + protected synchronized void setPhase(TaskStatus.Phase phase){ + this.taskStatus.setPhase(phase); + } + + /** + * Get whether to write skip records. + */ + protected boolean toWriteSkipRecs() { + return writeSkipRecs; + } + + /** + * Set whether to write skip records. + */ + protected void setWriteSkipRecs(boolean writeSkipRecs) { + this.writeSkipRecs = writeSkipRecs; + } + + /** + * Report a fatal error to the parent (task) tracker. + */ + protected void reportFatalError(TaskAttemptID id, Throwable throwable, + String logMsg) { + LOG.fatal(logMsg); + Throwable tCause = throwable.getCause(); + String cause = tCause == null + ? StringUtils.stringifyException(throwable) + : StringUtils.stringifyException(tCause); + try { + umbilical.fatalError(id, cause); + } catch (IOException ioe) { + LOG.fatal("Failed to contact the tasktracker", ioe); + System.exit(-1); + } + } + + /** + * Get skipRanges. + */ + public SortedRanges getSkipRanges() { + return skipRanges; + } + + /** + * Set skipRanges. + */ + public void setSkipRanges(SortedRanges skipRanges) { + this.skipRanges = skipRanges; + } + + /** + * Is Task in skipping mode. + */ + public boolean isSkipping() { + return skipping; + } + + /** + * Sets whether to run Task in skipping mode. + * @param skipping + */ + public void setSkipping(boolean skipping) { + this.skipping = skipping; + } + + /** + * Return current state of the task. + * needs to be synchronized as communication thread + * sends the state every second + * @return + */ + synchronized TaskStatus.State getState(){ + return this.taskStatus.getRunState(); + } + /** + * Set current state of the task. + * @param state + */ + synchronized void setState(TaskStatus.State state){ + this.taskStatus.setRunState(state); + } + + void setTaskCleanupTask() { + taskCleanup = true; + } + + boolean isTaskCleanupTask() { + return taskCleanup; + } + + boolean isJobCleanupTask() { + return jobCleanup; + } + + boolean isJobAbortTask() { + // the task is an abort task if its marked for cleanup and the final + // expected state is either failed or killed. + return isJobCleanupTask() + && (jobRunStateForCleanup == JobStatus.State.KILLED + || jobRunStateForCleanup == JobStatus.State.FAILED); + } + + boolean isJobSetupTask() { + return jobSetup; + } + + void setJobSetupTask() { + jobSetup = true; + } + + void setJobCleanupTask() { + jobCleanup = true; + } + + /** + * Sets the task to do job abort in the cleanup. + * @param status the final runstate of the job + */ + void setJobCleanupTaskState(JobStatus.State status) { + jobRunStateForCleanup = status; + } + + boolean isMapOrReduce() { + return !jobSetup && !jobCleanup && !taskCleanup; + } + + String getUser() { + return username; + } + //////////////////////////////////////////// + // Writable methods + //////////////////////////////////////////// + + public void write(DataOutput out) throws IOException { + Text.writeString(out, jobFile); + taskId.write(out); + out.writeInt(partition); + out.writeInt(numSlotsRequired); + taskStatus.write(out); + skipRanges.write(out); + out.writeBoolean(skipping); + out.writeBoolean(jobCleanup); + if (jobCleanup) { + WritableUtils.writeEnum(out, jobRunStateForCleanup); + } + out.writeBoolean(jobSetup); + Text.writeString(out, username); + out.writeBoolean(writeSkipRecs); + out.writeBoolean(taskCleanup); + extraData.write(out); + } + + public void readFields(DataInput in) throws IOException { + jobFile = Text.readString(in); + taskId = TaskAttemptID.read(in); + partition = in.readInt(); + numSlotsRequired = in.readInt(); + taskStatus.readFields(in); + this.mapOutputFile.setJobId(taskId.getJobID()); + skipRanges.readFields(in); + currentRecIndexIterator = skipRanges.skipRangeIterator(); + currentRecStartIndex = currentRecIndexIterator.next(); + skipping = in.readBoolean(); + jobCleanup = in.readBoolean(); + if (jobCleanup) { + jobRunStateForCleanup = + WritableUtils.readEnum(in, JobStatus.State.class); + } + jobSetup = in.readBoolean(); + username = Text.readString(in); + writeSkipRecs = in.readBoolean(); + taskCleanup = in.readBoolean(); + if (taskCleanup) { + setPhase(TaskStatus.Phase.CLEANUP); + } + extraData.readFields(in); + } + + @Override + public String toString() { return taskId.toString(); } + + /** + * Localize the given JobConf to be specific for this task. + */ + public void localizeConfiguration(JobConf conf) throws IOException { + conf.set("mapred.tip.id", taskId.getTaskID().toString()); + conf.set("mapred.task.id", taskId.toString()); + conf.setBoolean("mapred.task.is.map", isMapTask()); + conf.setInt("mapred.task.partition", partition); + conf.set("mapred.job.id", taskId.getJobID().toString()); + } + + /** Run this task as a part of the named job. This method is executed in the + * child process and is what invokes user-supplied map, reduce, etc. methods. + * @param umbilical for progress reports + */ + public abstract void run(JobConf job, TaskUmbilicalProtocol umbilical) + throws IOException, ClassNotFoundException, InterruptedException; + + + /** Return an approprate thread runner for this task. + * @param tip TODO*/ + public abstract TaskRunner createRunner(TaskTracker tracker, + TaskTracker.TaskInProgress tip) throws IOException; + + /** The number of milliseconds between progress reports. */ + public static final int PROGRESS_INTERVAL = 3000; + + private transient Progress taskProgress = new Progress(); + + // Current counters + private transient Counters counters = new Counters(); + + /* flag to track whether task is done */ + private AtomicBoolean taskDone = new AtomicBoolean(false); + + public abstract boolean isMapTask(); + + public Progress getProgress() { return taskProgress; } + + public void initialize(JobConf job, JobID id, + Reporter reporter, + boolean useNewApi) throws IOException, + ClassNotFoundException, + InterruptedException { + jobContext = new JobContext(job, id, reporter); + taskContext = new TaskAttemptContext(job, taskId, reporter); + if (getState() == TaskStatus.State.UNASSIGNED) { + setState(TaskStatus.State.RUNNING); + } + if (useNewApi) { + LOG.debug("using new api for output committer"); + outputFormat = + ReflectionUtils.newInstance(taskContext.getOutputFormatClass(), job); + committer = outputFormat.getOutputCommitter(taskContext); + } else { + committer = conf.getOutputCommitter(); + } + Path outputPath = FileOutputFormat.getOutputPath(conf); + if (outputPath != null) { + if ((committer instanceof FileOutputCommitter)) { + FileOutputFormat.setWorkOutputPath(conf, + ((FileOutputCommitter)committer).getTempTaskOutputPath(taskContext)); + } else { + FileOutputFormat.setWorkOutputPath(conf, outputPath); + } + } + committer.setupTask(taskContext); + Class clazz = conf.getClass( + TaskTracker.MAPRED_TASKTRACKER_MEMORY_CALCULATOR_PLUGIN_PROPERTY, + null, ResourceCalculatorPlugin.class); + resourceCalculator = ResourceCalculatorPlugin + .getResourceCalculatorPlugin(clazz, conf); + LOG.info(" Using ResourceCalculatorPlugin : " + resourceCalculator); + if (resourceCalculator != null) { + initCpuCumulativeTime = + resourceCalculator.getProcResourceValues().getCumulativeCpuTime(); + } + } + + protected class TaskReporter + extends org.apache.hadoop.mapreduce.StatusReporter + implements Runnable, Reporter { + private TaskUmbilicalProtocol umbilical; + private InputSplit split = null; + private Progress taskProgress; + private Thread pingThread = null; + /** + * flag that indicates whether progress update needs to be sent to parent. + * If true, it has been set. If false, it has been reset. + * Using AtomicBoolean since we need an atomic read & reset method. + */ + private AtomicBoolean progressFlag = new AtomicBoolean(false); + + TaskReporter(Progress taskProgress, + TaskUmbilicalProtocol umbilical) { + this.umbilical = umbilical; + this.taskProgress = taskProgress; + } + // getters and setters for flag + void setProgressFlag() { + progressFlag.set(true); + } + boolean resetProgressFlag() { + return progressFlag.getAndSet(false); + } + public void setStatus(String status) { + taskProgress.setStatus(status); + // indicate that progress update needs to be sent + setProgressFlag(); + } + public void setProgress(float progress) { + taskProgress.set(progress); + // indicate that progress update needs to be sent + setProgressFlag(); + } + public void progress() { + // indicate that progress update needs to be sent + setProgressFlag(); + } + public Counters.Counter getCounter(String group, String name) { + Counters.Counter counter = null; + if (counters != null) { + counter = counters.findCounter(group, name); + } + return counter; + } + public Counters.Counter getCounter(Enum name) { + return counters == null ? null : counters.findCounter(name); + } + public void incrCounter(Enum key, long amount) { + if (counters != null) { + counters.incrCounter(key, amount); + } + setProgressFlag(); + } + public void incrCounter(String group, String counter, long amount) { + if (counters != null) { + counters.incrCounter(group, counter, amount); + } + if(skipping && SkipBadRecords.COUNTER_GROUP.equals(group) && ( + SkipBadRecords.COUNTER_MAP_PROCESSED_RECORDS.equals(counter) || + SkipBadRecords.COUNTER_REDUCE_PROCESSED_GROUPS.equals(counter))) { + //if application reports the processed records, move the + //currentRecStartIndex to the next. + //currentRecStartIndex is the start index which has not yet been + //finished and is still in task's stomach. + for(int i=0;i URIScheme and value->FileSystemStatisticUpdater + */ + private Map statisticUpdaters = + new HashMap(); + + private synchronized void updateCounters() { + for(Statistics stat: FileSystem.getAllStatistics()) { + String uriScheme = stat.getScheme(); + FileSystemStatisticUpdater updater = statisticUpdaters.get(uriScheme); + if(updater==null) {//new FileSystem has been found in the cache + updater = new FileSystemStatisticUpdater(uriScheme, stat); + statisticUpdaters.put(uriScheme, updater); + } + updater.updateCounters(); + } + updateResourceCounters(); + } + + public void done(TaskUmbilicalProtocol umbilical, + TaskReporter reporter + ) throws IOException, InterruptedException { + LOG.info("Task:" + taskId + " is done." + + " And is in the process of commiting"); + updateCounters(); + + // check whether the commit is required. + boolean commitRequired = committer.needsTaskCommit(taskContext); + if (commitRequired) { + int retries = MAX_RETRIES; + setState(TaskStatus.State.COMMIT_PENDING); + // say the task tracker that task is commit pending + while (true) { + try { + umbilical.commitPending(taskId, taskStatus); + break; + } catch (InterruptedException ie) { + // ignore + } catch (IOException ie) { + LOG.warn("Failure sending commit pending: " + + StringUtils.stringifyException(ie)); + if (--retries == 0) { + System.exit(67); + } + } + } + //wait for commit approval and commit + commit(umbilical, reporter, committer); + } + taskDone.set(true); + reporter.stopCommunicationThread(); + sendLastUpdate(umbilical); + //signal the tasktracker that we are done + sendDone(umbilical); + } + + protected void statusUpdate(TaskUmbilicalProtocol umbilical) + throws IOException { + int retries = MAX_RETRIES; + while (true) { + try { + if (!umbilical.statusUpdate(getTaskID(), taskStatus)) { + LOG.warn("Parent died. Exiting "+taskId); + System.exit(66); + } + taskStatus.clearStatus(); + return; + } catch (InterruptedException ie) { + Thread.currentThread().interrupt(); // interrupt ourself + } catch (IOException ie) { + LOG.warn("Failure sending status update: " + + StringUtils.stringifyException(ie)); + if (--retries == 0) { + throw ie; + } + } + } + } + + private void sendLastUpdate(TaskUmbilicalProtocol umbilical) + throws IOException { + // send a final status report + taskStatus.statusUpdate(taskProgress.get(), + taskProgress.toString(), + counters); + statusUpdate(umbilical); + } + + private void sendDone(TaskUmbilicalProtocol umbilical) throws IOException { + int retries = MAX_RETRIES; + while (true) { + try { + umbilical.done(getTaskID()); + LOG.info("Task '" + taskId + "' done."); + return; + } catch (IOException ie) { + LOG.warn("Failure signalling completion: " + + StringUtils.stringifyException(ie)); + if (--retries == 0) { + throw ie; + } + } + } + } + + private void commit(TaskUmbilicalProtocol umbilical, + TaskReporter reporter, + org.apache.hadoop.mapreduce.OutputCommitter committer + ) throws IOException { + int retries = MAX_RETRIES; + while (true) { + try { + while (!umbilical.canCommit(taskId)) { + try { + Thread.sleep(1000); + } catch(InterruptedException ie) { + //ignore + } + reporter.setProgressFlag(); + } + break; + } catch (IOException ie) { + LOG.warn("Failure asking whether task can commit: " + + StringUtils.stringifyException(ie)); + if (--retries == 0) { + //if it couldn't query successfully then delete the output + discardOutput(taskContext); + System.exit(68); + } + } + } + + // task can Commit now + try { + LOG.info("Task " + taskId + " is allowed to commit now"); + committer.commitTask(taskContext); + return; + } catch (IOException iee) { + LOG.warn("Failure committing: " + + StringUtils.stringifyException(iee)); + //if it couldn't commit a successfully then delete the output + discardOutput(taskContext); + throw iee; + } + } + + private + void discardOutput(TaskAttemptContext taskContext) { + try { + committer.abortTask(taskContext); + } catch (IOException ioe) { + LOG.warn("Failure cleaning up: " + + StringUtils.stringifyException(ioe)); + } + } + + protected void runTaskCleanupTask(TaskUmbilicalProtocol umbilical, + TaskReporter reporter) + throws IOException, InterruptedException { + taskCleanup(umbilical); + done(umbilical, reporter); + } + + void taskCleanup(TaskUmbilicalProtocol umbilical) + throws IOException { + // set phase for this task + setPhase(TaskStatus.Phase.CLEANUP); + getProgress().setStatus("cleanup"); + statusUpdate(umbilical); + LOG.info("Runnning cleanup for the task"); + // do the cleanup + committer.abortTask(taskContext); + } + + protected void runJobCleanupTask(TaskUmbilicalProtocol umbilical, + TaskReporter reporter + ) throws IOException, InterruptedException { + // set phase for this task + setPhase(TaskStatus.Phase.CLEANUP); + getProgress().setStatus("cleanup"); + statusUpdate(umbilical); + // do the cleanup + LOG.info("Cleaning up job"); + if (jobRunStateForCleanup == JobStatus.State.FAILED + || jobRunStateForCleanup == JobStatus.State.KILLED) { + LOG.info("Aborting job with runstate : " + jobRunStateForCleanup); + committer.abortJob(jobContext, jobRunStateForCleanup); + } else if (jobRunStateForCleanup == JobStatus.State.SUCCEEDED){ + LOG.info("Committing job"); + committer.commitJob(jobContext); + } else { + throw new IOException("Invalid state of the job for cleanup. State found " + + jobRunStateForCleanup + " expecting " + + JobStatus.State.SUCCEEDED + ", " + + JobStatus.State.FAILED + " or " + + JobStatus.State.KILLED); + } + done(umbilical, reporter); + } + + protected void runJobSetupTask(TaskUmbilicalProtocol umbilical, + TaskReporter reporter + ) throws IOException, InterruptedException { + // do the setup + getProgress().setStatus("setup"); + committer.setupJob(jobContext); + done(umbilical, reporter); + } + + public void setConf(Configuration conf) { + if (conf instanceof JobConf) { + this.conf = (JobConf) conf; + } else { + this.conf = new JobConf(conf); + } + this.mapOutputFile.setConf(this.conf); + this.lDirAlloc = new LocalDirAllocator("mapred.local.dir"); + // add the static resolutions (this is required for the junit to + // work on testcases that simulate multiple nodes on a single physical + // node. + String hostToResolved[] = conf.getStrings("hadoop.net.static.resolutions"); + if (hostToResolved != null) { + for (String str : hostToResolved) { + String name = str.substring(0, str.indexOf('=')); + String resolvedName = str.substring(str.indexOf('=') + 1); + NetUtils.addStaticResolution(name, resolvedName); + } + } + } + + public Configuration getConf() { + return this.conf; + } + + /** + * OutputCollector for the combiner. + */ + protected static class CombineOutputCollector + implements OutputCollector { + private Writer writer; + private Counters.Counter outCounter; + public CombineOutputCollector(Counters.Counter outCounter) { + this.outCounter = outCounter; + } + public synchronized void setWriter(Writer writer) { + this.writer = writer; + } + public synchronized void collect(K key, V value) + throws IOException { + outCounter.increment(1); + writer.append(key, value); + } + } + + /** Iterates values while keys match in sorted input. */ + static class ValuesIterator implements Iterator { + protected RawKeyValueIterator in; //input iterator + private KEY key; // current key + private KEY nextKey; + private VALUE value; // current value + private boolean hasNext; // more w/ this key + private boolean more; // more in file + private RawComparator comparator; + protected Progressable reporter; + private Deserializer keyDeserializer; + private Deserializer valDeserializer; + private DataInputBuffer keyIn = new DataInputBuffer(); + private DataInputBuffer valueIn = new DataInputBuffer(); + + public ValuesIterator (RawKeyValueIterator in, + RawComparator comparator, + Class keyClass, + Class valClass, Configuration conf, + Progressable reporter) + throws IOException { + this.in = in; + this.comparator = comparator; + this.reporter = reporter; + SerializationFactory serializationFactory = new SerializationFactory(conf); + this.keyDeserializer = serializationFactory.getDeserializer(keyClass); + this.keyDeserializer.open(keyIn); + this.valDeserializer = serializationFactory.getDeserializer(valClass); + this.valDeserializer.open(this.valueIn); + readNextKey(); + key = nextKey; + nextKey = null; // force new instance creation + hasNext = more; + } + + RawKeyValueIterator getRawIterator() { return in; } + + /// Iterator methods + + public boolean hasNext() { return hasNext; } + + private int ctr = 0; + public VALUE next() { + if (!hasNext) { + throw new NoSuchElementException("iterate past last value"); + } + try { + readNextValue(); + readNextKey(); + } catch (IOException ie) { + throw new RuntimeException("problem advancing post rec#"+ctr, ie); + } + reporter.progress(); + return value; + } + + public void remove() { throw new RuntimeException("not implemented"); } + + /// Auxiliary methods + + /** Start processing next unique key. */ + void nextKey() throws IOException { + // read until we find a new key + while (hasNext) { + readNextKey(); + } + ++ctr; + + // move the next key to the current one + KEY tmpKey = key; + key = nextKey; + nextKey = tmpKey; + hasNext = more; + } + + /** True iff more keys remain. */ + boolean more() { + return more; + } + + /** The current key. */ + KEY getKey() { + return key; + } + + /** + * read the next key + */ + private void readNextKey() throws IOException { + more = in.next(); + if (more) { + DataInputBuffer nextKeyBytes = in.getKey(); + keyIn.reset(nextKeyBytes.getData(), nextKeyBytes.getPosition(), nextKeyBytes.getLength()); + nextKey = keyDeserializer.deserialize(nextKey); + hasNext = key != null && (comparator.compare(key, nextKey) == 0); + } else { + hasNext = false; + } + } + + /** + * Read the next value + * @throws IOException + */ + private void readNextValue() throws IOException { + DataInputBuffer nextValueBytes = in.getValue(); + valueIn.reset(nextValueBytes.getData(), nextValueBytes.getPosition(), nextValueBytes.getLength()); + value = valDeserializer.deserialize(value); + } + } + + protected static class CombineValuesIterator + extends ValuesIterator { + + private final Counters.Counter combineInputCounter; + + public CombineValuesIterator(RawKeyValueIterator in, + RawComparator comparator, Class keyClass, + Class valClass, Configuration conf, Reporter reporter, + Counters.Counter combineInputCounter) throws IOException { + super(in, comparator, keyClass, valClass, conf, reporter); + this.combineInputCounter = combineInputCounter; + } + + public VALUE next() { + combineInputCounter.increment(1); + return super.next(); + } + } + + private static final Constructor + contextConstructor; + static { + try { + contextConstructor = + org.apache.hadoop.mapreduce.Reducer.Context.class.getConstructor + (new Class[]{org.apache.hadoop.mapreduce.Reducer.class, + Configuration.class, + org.apache.hadoop.mapreduce.TaskAttemptID.class, + RawKeyValueIterator.class, + org.apache.hadoop.mapreduce.Counter.class, + org.apache.hadoop.mapreduce.Counter.class, + org.apache.hadoop.mapreduce.RecordWriter.class, + org.apache.hadoop.mapreduce.OutputCommitter.class, + org.apache.hadoop.mapreduce.StatusReporter.class, + RawComparator.class, + Class.class, + Class.class}); + } catch (NoSuchMethodException nme) { + throw new IllegalArgumentException("Can't find constructor"); + } + } + + @SuppressWarnings("unchecked") + protected static + org.apache.hadoop.mapreduce.Reducer.Context + createReduceContext(org.apache.hadoop.mapreduce.Reducer + reducer, + Configuration job, + org.apache.hadoop.mapreduce.TaskAttemptID taskId, + RawKeyValueIterator rIter, + org.apache.hadoop.mapreduce.Counter inputKeyCounter, + org.apache.hadoop.mapreduce.Counter inputValueCounter, + org.apache.hadoop.mapreduce.RecordWriter output, + org.apache.hadoop.mapreduce.OutputCommitter committer, + org.apache.hadoop.mapreduce.StatusReporter reporter, + RawComparator comparator, + Class keyClass, Class valueClass + ) throws IOException, ClassNotFoundException { + try { + + return contextConstructor.newInstance(reducer, job, taskId, + rIter, inputKeyCounter, + inputValueCounter, output, + committer, reporter, comparator, + keyClass, valueClass); + } catch (InstantiationException e) { + throw new IOException("Can't create Context", e); + } catch (InvocationTargetException e) { + throw new IOException("Can't invoke Context constructor", e); + } catch (IllegalAccessException e) { + throw new IOException("Can't invoke Context constructor", e); + } + } + + protected static abstract class CombinerRunner { + protected final Counters.Counter inputCounter; + protected final JobConf job; + protected final TaskReporter reporter; + + CombinerRunner(Counters.Counter inputCounter, + JobConf job, + TaskReporter reporter) { + this.inputCounter = inputCounter; + this.job = job; + this.reporter = reporter; + } + + /** + * Run the combiner over a set of inputs. + * @param iterator the key/value pairs to use as input + * @param collector the output collector + */ + abstract void combine(RawKeyValueIterator iterator, + OutputCollector collector + ) throws IOException, InterruptedException, + ClassNotFoundException; + + static + CombinerRunner create(JobConf job, + TaskAttemptID taskId, + Counters.Counter inputCounter, + TaskReporter reporter, + org.apache.hadoop.mapreduce.OutputCommitter committer + ) throws ClassNotFoundException { + Class> cls = + (Class>) job.getCombinerClass(); + if (cls != null) { + return new OldCombinerRunner(cls, job, inputCounter, reporter); + } + // make a task context so we can get the classes + org.apache.hadoop.mapreduce.TaskAttemptContext taskContext = + new org.apache.hadoop.mapreduce.TaskAttemptContext(job, taskId); + Class> newcls = + (Class>) + taskContext.getCombinerClass(); + if (newcls != null) { + return new NewCombinerRunner(newcls, job, taskId, taskContext, + inputCounter, reporter, committer); + } + + return null; + } + } + + protected static class OldCombinerRunner extends CombinerRunner { + private final Class> combinerClass; + private final Class keyClass; + private final Class valueClass; + private final RawComparator comparator; + + protected OldCombinerRunner(Class> cls, + JobConf conf, + Counters.Counter inputCounter, + TaskReporter reporter) { + super(inputCounter, conf, reporter); + combinerClass = cls; + keyClass = (Class) job.getMapOutputKeyClass(); + valueClass = (Class) job.getMapOutputValueClass(); + comparator = (RawComparator) job.getOutputKeyComparator(); + } + + @SuppressWarnings("unchecked") + protected void combine(RawKeyValueIterator kvIter, + OutputCollector combineCollector + ) throws IOException { + Reducer combiner = + ReflectionUtils.newInstance(combinerClass, job); + try { + CombineValuesIterator values = + new CombineValuesIterator(kvIter, comparator, keyClass, + valueClass, job, Reporter.NULL, + inputCounter); + while (values.more()) { + combiner.reduce(values.getKey(), values, combineCollector, + Reporter.NULL); + values.nextKey(); + } + } finally { + combiner.close(); + } + } + } + + protected static class NewCombinerRunner extends CombinerRunner { + private final Class> + reducerClass; + private final org.apache.hadoop.mapreduce.TaskAttemptID taskId; + private final RawComparator comparator; + private final Class keyClass; + private final Class valueClass; + private final org.apache.hadoop.mapreduce.OutputCommitter committer; + + NewCombinerRunner(Class reducerClass, + JobConf job, + org.apache.hadoop.mapreduce.TaskAttemptID taskId, + org.apache.hadoop.mapreduce.TaskAttemptContext context, + Counters.Counter inputCounter, + TaskReporter reporter, + org.apache.hadoop.mapreduce.OutputCommitter committer) { + super(inputCounter, job, reporter); + this.reducerClass = reducerClass; + this.taskId = taskId; + keyClass = (Class) context.getMapOutputKeyClass(); + valueClass = (Class) context.getMapOutputValueClass(); + comparator = (RawComparator) context.getSortComparator(); + this.committer = committer; + } + + private static class OutputConverter + extends org.apache.hadoop.mapreduce.RecordWriter { + OutputCollector output; + OutputConverter(OutputCollector output) { + this.output = output; + } + + @Override + public void close(org.apache.hadoop.mapreduce.TaskAttemptContext context){ + } + + @Override + public void write(K key, V value + ) throws IOException, InterruptedException { + output.collect(key,value); + } + } + + @Override + void combine(RawKeyValueIterator iterator, + OutputCollector collector + ) throws IOException, InterruptedException, + ClassNotFoundException { + // make a reducer + org.apache.hadoop.mapreduce.Reducer reducer = + (org.apache.hadoop.mapreduce.Reducer) + ReflectionUtils.newInstance(reducerClass, job); + org.apache.hadoop.mapreduce.Reducer.Context + reducerContext = createReduceContext(reducer, job, taskId, + iterator, null, inputCounter, + new OutputConverter(collector), + committer, + reporter, comparator, keyClass, + valueClass); + reducer.run(reducerContext); + } + } + + BytesWritable getExtraData() { + return extraData; + } + + void setExtraData(BytesWritable extraData) { + this.extraData = extraData; + } +} diff --git a/src/mapred/org/apache/hadoop/mapred/TaskAttemptContext.java b/src/mapred/org/apache/hadoop/mapred/TaskAttemptContext.java new file mode 100644 index 0000000..6a409b6 --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/TaskAttemptContext.java @@ -0,0 +1,62 @@ +/* Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapred; + +import org.apache.hadoop.util.Progressable; + +/** + * @deprecated Use {@link org.apache.hadoop.mapreduce.TaskAttemptContext} + * instead. + */ +@Deprecated +public class TaskAttemptContext + extends org.apache.hadoop.mapreduce.TaskAttemptContext { + private Progressable progress; + + TaskAttemptContext(JobConf conf, TaskAttemptID taskid) { + this(conf, taskid, Reporter.NULL); + } + + TaskAttemptContext(JobConf conf, TaskAttemptID taskid, + Progressable progress) { + super(conf, taskid); + this.progress = progress; + } + + /** + * Get the taskAttemptID. + * + * @return TaskAttemptID + */ + public TaskAttemptID getTaskAttemptID() { + return (TaskAttemptID) super.getTaskAttemptID(); + } + + public Progressable getProgressible() { + return progress; + } + + public JobConf getJobConf() { + return (JobConf) getConfiguration(); + } + + @Override + public void progress() { + progress.progress(); + } +} diff --git a/src/mapred/org/apache/hadoop/mapred/TaskAttemptID.java b/src/mapred/org/apache/hadoop/mapred/TaskAttemptID.java new file mode 100644 index 0000000..a1c5f93 --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/TaskAttemptID.java @@ -0,0 +1,149 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapred; + +import java.io.DataInput; +import java.io.IOException; + +/** + * TaskAttemptID represents the immutable and unique identifier for + * a task attempt. Each task attempt is one particular instance of a Map or + * Reduce Task identified by its TaskID. + * + * TaskAttemptID consists of 2 parts. First part is the + * {@link TaskID}, that this TaskAttemptID belongs to. + * Second part is the task attempt number.
+ * An example TaskAttemptID is : + * attempt_200707121733_0003_m_000005_0 , which represents the + * zeroth task attempt for the fifth map task in the third job + * running at the jobtracker started at 200707121733. + *

+ * Applications should never construct or parse TaskAttemptID strings + * , but rather use appropriate constructors or {@link #forName(String)} + * method. + * + * @see JobID + * @see TaskID + */ +@Deprecated +public class TaskAttemptID extends org.apache.hadoop.mapreduce.TaskAttemptID { + + /** + * Constructs a TaskAttemptID object from given {@link TaskID}. + * @param taskId TaskID that this task belongs to + * @param id the task attempt number + */ + public TaskAttemptID(TaskID taskId, int id) { + super(taskId, id); + } + + /** + * Constructs a TaskId object from given parts. + * @param jtIdentifier jobTracker identifier + * @param jobId job number + * @param isMap whether the tip is a map + * @param taskId taskId number + * @param id the task attempt number + */ + public TaskAttemptID(String jtIdentifier, int jobId, boolean isMap, + int taskId, int id) { + this(new TaskID(jtIdentifier, jobId, isMap, taskId), id); + } + + public TaskAttemptID() { + super(new TaskID(), 0); + } + + /** + * Downgrade a new TaskAttemptID to an old one + * @param old the new id + * @return either old or a new TaskAttemptID constructed to match old + */ + public static + TaskAttemptID downgrade(org.apache.hadoop.mapreduce.TaskAttemptID old) { + if (old instanceof TaskAttemptID) { + return (TaskAttemptID) old; + } else { + return new TaskAttemptID(TaskID.downgrade(old.getTaskID()), old.getId()); + } + } + + public TaskID getTaskID() { + return (TaskID) super.getTaskID(); + } + + public JobID getJobID() { + return (JobID) super.getJobID(); + } + + @Deprecated + public static TaskAttemptID read(DataInput in) throws IOException { + TaskAttemptID taskId = new TaskAttemptID(); + taskId.readFields(in); + return taskId; + } + + /** Construct a TaskAttemptID object from given string + * @return constructed TaskAttemptID object or null if the given String is null + * @throws IllegalArgumentException if the given string is malformed + */ + public static TaskAttemptID forName(String str + ) throws IllegalArgumentException { + return (TaskAttemptID) + org.apache.hadoop.mapreduce.TaskAttemptID.forName(str); + } + + /** + * Returns a regex pattern which matches task attempt IDs. Arguments can + * be given null, in which case that part of the regex will be generic. + * For example to obtain a regex matching all task attempt IDs + * of any jobtracker, in any job, of the first + * map task, we would use : + *

 
+   * TaskAttemptID.getTaskAttemptIDsPattern(null, null, true, 1, null);
+   * 
+ * which will return : + *
 "attempt_[^_]*_[0-9]*_m_000001_[0-9]*" 
+ * @param jtIdentifier jobTracker identifier, or null + * @param jobId job number, or null + * @param isMap whether the tip is a map, or null + * @param taskId taskId number, or null + * @param attemptId the task attempt number, or null + * @return a regex pattern matching TaskAttemptIDs + */ + @Deprecated + public static String getTaskAttemptIDsPattern(String jtIdentifier, + Integer jobId, Boolean isMap, Integer taskId, Integer attemptId) { + StringBuilder builder = new StringBuilder(ATTEMPT).append(SEPARATOR); + builder.append(getTaskAttemptIDsPatternWOPrefix(jtIdentifier, jobId, + isMap, taskId, attemptId)); + return builder.toString(); + } + + @Deprecated + static StringBuilder getTaskAttemptIDsPatternWOPrefix(String jtIdentifier + , Integer jobId, Boolean isMap, Integer taskId, Integer attemptId) { + StringBuilder builder = new StringBuilder(); + builder.append(TaskID.getTaskIDsPatternWOPrefix(jtIdentifier + , jobId, isMap, taskId)) + .append(SEPARATOR) + .append(attemptId != null ? attemptId : "[0-9]*"); + return builder; + } +} diff --git a/src/mapred/org/apache/hadoop/mapred/TaskCompletionEvent.java b/src/mapred/org/apache/hadoop/mapred/TaskCompletionEvent.java new file mode 100644 index 0000000..7ce86a4 --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/TaskCompletionEvent.java @@ -0,0 +1,234 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapred; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; + +import org.apache.hadoop.io.Writable; +import org.apache.hadoop.io.WritableUtils; + +/** + * This is used to track task completion events on + * job tracker. + */ +public class TaskCompletionEvent implements Writable{ + static public enum Status {FAILED, KILLED, SUCCEEDED, OBSOLETE, TIPFAILED}; + + private int eventId; + private String taskTrackerHttp; + private int taskRunTime; // using int since runtime is the time difference + private TaskAttemptID taskId; + Status status; + boolean isMap = false; + private int idWithinJob; + public static final TaskCompletionEvent[] EMPTY_ARRAY = + new TaskCompletionEvent[0]; + /** + * Default constructor for Writable. + * + */ + public TaskCompletionEvent(){ + taskId = new TaskAttemptID(); + } + + /** + * Constructor. eventId should be created externally and incremented + * per event for each job. + * @param eventId event id, event id should be unique and assigned in + * incrementally, starting from 0. + * @param taskId task id + * @param status task's status + * @param taskTrackerHttp task tracker's host:port for http. + */ + public TaskCompletionEvent(int eventId, + TaskAttemptID taskId, + int idWithinJob, + boolean isMap, + Status status, + String taskTrackerHttp){ + + this.taskId = taskId; + this.idWithinJob = idWithinJob; + this.isMap = isMap; + this.eventId = eventId; + this.status =status; + this.taskTrackerHttp = taskTrackerHttp; + } + /** + * Returns event Id. + * @return event id + */ + public int getEventId() { + return eventId; + } + /** + * Returns task id. + * @return task id + * @deprecated use {@link #getTaskAttemptId()} instead. + */ + @Deprecated + public String getTaskId() { + return taskId.toString(); + } + + /** + * Returns task id. + * @return task id + */ + public TaskAttemptID getTaskAttemptId() { + return taskId; + } + + /** + * Returns enum Status.SUCESS or Status.FAILURE. + * @return task tracker status + */ + public Status getTaskStatus() { + return status; + } + /** + * http location of the tasktracker where this task ran. + * @return http location of tasktracker user logs + */ + public String getTaskTrackerHttp() { + return taskTrackerHttp; + } + + /** + * Returns time (in millisec) the task took to complete. + */ + public int getTaskRunTime() { + return taskRunTime; + } + + /** + * Set the task completion time + * @param taskCompletionTime time (in millisec) the task took to complete + */ + public void setTaskRunTime(int taskCompletionTime) { + this.taskRunTime = taskCompletionTime; + } + + /** + * set event Id. should be assigned incrementally starting from 0. + * @param eventId + */ + public void setEventId( + int eventId) { + this.eventId = eventId; + } + /** + * Sets task id. + * @param taskId + * @deprecated use {@link #setTaskID(TaskAttemptID)} instead. + */ + @Deprecated + public void setTaskId(String taskId) { + this.taskId = TaskAttemptID.forName(taskId); + } + + /** + * Sets task id. + * @param taskId + */ + public void setTaskID(TaskAttemptID taskId) { + this.taskId = taskId; + } + + /** + * Set task status. + * @param status + */ + public void setTaskStatus( + Status status) { + this.status = status; + } + /** + * Set task tracker http location. + * @param taskTrackerHttp + */ + public void setTaskTrackerHttp( + String taskTrackerHttp) { + this.taskTrackerHttp = taskTrackerHttp; + } + + @Override + public String toString(){ + StringBuffer buf = new StringBuffer(); + buf.append("Task Id : "); + buf.append(taskId); + buf.append(", Status : "); + buf.append(status.name()); + return buf.toString(); + } + + @Override + public boolean equals(Object o) { + if(o == null) + return false; + if(o.getClass().equals(TaskCompletionEvent.class)) { + TaskCompletionEvent event = (TaskCompletionEvent) o; + return this.isMap == event.isMapTask() + && this.eventId == event.getEventId() + && this.idWithinJob == event.idWithinJob() + && this.status.equals(event.getTaskStatus()) + && this.taskId.equals(event.getTaskAttemptId()) + && this.taskRunTime == event.getTaskRunTime() + && this.taskTrackerHttp.equals(event.getTaskTrackerHttp()); + } + return false; + } + + @Override + public int hashCode() { + return toString().hashCode(); + } + + public boolean isMapTask() { + return isMap; + } + + public int idWithinJob() { + return idWithinJob; + } + ////////////////////////////////////////////// + // Writable + ////////////////////////////////////////////// + public void write(DataOutput out) throws IOException { + taskId.write(out); + WritableUtils.writeVInt(out, idWithinJob); + out.writeBoolean(isMap); + WritableUtils.writeEnum(out, status); + WritableUtils.writeString(out, taskTrackerHttp); + WritableUtils.writeVInt(out, taskRunTime); + WritableUtils.writeVInt(out, eventId); + } + + public void readFields(DataInput in) throws IOException { + taskId.readFields(in); + idWithinJob = WritableUtils.readVInt(in); + isMap = in.readBoolean(); + status = WritableUtils.readEnum(in, Status.class); + taskTrackerHttp = WritableUtils.readString(in); + taskRunTime = WritableUtils.readVInt(in); + eventId = WritableUtils.readVInt(in); + } +} diff --git a/src/mapred/org/apache/hadoop/mapred/TaskController.java b/src/mapred/org/apache/hadoop/mapred/TaskController.java new file mode 100644 index 0000000..80872ed --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/TaskController.java @@ -0,0 +1,235 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. +*/ +package org.apache.hadoop.mapred; + +import java.io.IOException; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configurable; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.mapred.CleanupQueue.PathDeletionContext; +import org.apache.hadoop.mapred.JvmManager.JvmEnv; +import org.apache.hadoop.util.StringUtils; +import org.apache.hadoop.util.Shell.ShellCommandExecutor; + +/** + * Controls initialization, finalization and clean up of tasks, and + * also the launching and killing of task JVMs. + * + * This class defines the API for initializing, finalizing and cleaning + * up of tasks, as also the launching and killing task JVMs. + * Subclasses of this class will implement the logic required for + * performing the actual actions. + */ +abstract class TaskController implements Configurable { + + private Configuration conf; + + public static final Log LOG = LogFactory.getLog(TaskController.class); + + public Configuration getConf() { + return conf; + } + + public void setConf(Configuration conf) { + this.conf = conf; + } + + /** + * Setup task controller component. + * + */ + abstract void setup(); + + + /** + * Launch a task JVM + * + * This method defines how a JVM will be launched to run a task. + * @param context the context associated to the task + */ + abstract void launchTaskJVM(TaskControllerContext context) + throws IOException; + + /** + * Top level cleanup a task JVM method. + * + * The current implementation does the following. + *
    + *
  1. Sends a graceful terminate signal to task JVM allowing its sub-process + * to cleanup.
  2. + *
  3. Waits for stipulated period
  4. + *
  5. Sends a forceful kill signal to task JVM, terminating all its + * sub-process forcefully.
  6. + *
+ **/ + private class DestoryJVMTaskRunnable implements Runnable { + TaskControllerContext context; + /** + * @param context the task for which kill signal has to be sent. + */ + public DestoryJVMTaskRunnable(TaskControllerContext context) { + this.context = context; + } + @Override + public void run() { + terminateTask(context); + try { + Thread.sleep(context.sleeptimeBeforeSigkill); + } catch (InterruptedException e) { + LOG.warn("Sleep interrupted : " + + StringUtils.stringifyException(e)); + } + killTask(context); + } + } + + /** + * Use DestoryJVMTaskRunnable to kill task JVM asynchronously. + */ + final void destroyTaskJVM(TaskControllerContext context) { + Thread taskJVMDestoryer = new Thread(new DestoryJVMTaskRunnable(context)); + taskJVMDestoryer.start(); + } + + /** + * Perform initializing actions required before a task can run. + * + * For instance, this method can be used to setup appropriate + * access permissions for files and directories that will be + * used by tasks. Tasks use the job cache, log, PID and distributed cache + * directories and files as part of their functioning. Typically, + * these files are shared between the daemon and the tasks + * themselves. So, a TaskController that is launching tasks + * as different users can implement this method to setup + * appropriate ownership and permissions for these directories + * and files. + */ + abstract void initializeTask(TaskControllerContext context); + + + /** + * Contains task information required for the task controller. + */ + static class TaskControllerContext { + // task being executed + Task task; + // the JVM environment for the task + JvmEnv env; + // the Shell executor executing the JVM for this task + ShellCommandExecutor shExec; + // process handle of task JVM + String pid; + // waiting time before sending SIGKILL to task JVM after sending SIGTERM + long sleeptimeBeforeSigkill; + } + + /** + * Contains info related to the path of the file/dir to be deleted. This info + * is needed by task-controller to build the full path of the file/dir + */ + static class TaskControllerPathDeletionContext extends PathDeletionContext { + Task task; + boolean isWorkDir; + TaskController taskController; + + /** + * mapredLocalDir is the base dir under which to-be-deleted taskWorkDir or + * taskAttemptDir exists. fullPath of taskAttemptDir or taskWorkDir + * is built using mapredLocalDir, jobId, taskId, etc. + */ + Path mapredLocalDir; + + public TaskControllerPathDeletionContext(FileSystem fs, Path mapredLocalDir, + Task task, boolean isWorkDir, TaskController taskController) { + super(fs, null); + this.task = task; + this.isWorkDir = isWorkDir; + this.taskController = taskController; + this.mapredLocalDir = mapredLocalDir; + } + + @Override + protected String getPathForCleanup() { + if (fullPath == null) { + fullPath = buildPathForDeletion(); + } + return fullPath; + } + + /** + * Builds the path of taskAttemptDir OR taskWorkDir based on + * mapredLocalDir, jobId, taskId, etc + */ + String buildPathForDeletion() { + String subDir = TaskTracker.getLocalTaskDir(task.getJobID().toString(), + task.getTaskID().toString(), task.isTaskCleanupTask()); + if (isWorkDir) { + subDir = subDir + Path.SEPARATOR + "work"; + } + return mapredLocalDir.toUri().getPath() + Path.SEPARATOR + subDir; + } + + /** + * Makes the path(and its subdirectories recursively) fully deletable by + * setting proper permissions(777) by task-controller + */ + @Override + protected void enablePathForCleanup() throws IOException { + getPathForCleanup();// allow init of fullPath + if (fs.exists(new Path(fullPath))) { + taskController.enableTaskForCleanup(this); + } + } + } + + /** + * Method which is called after the job is localized so that task controllers + * can implement their own job localization logic. + * + * @param tip Task of job for which localization happens. + */ + abstract void initializeJob(JobID jobId); + + /** + * Sends a graceful terminate signal to taskJVM and it sub-processes. + * + * @param context task context + */ + abstract void terminateTask(TaskControllerContext context); + + /** + * Sends a KILL signal to forcefully terminate the taskJVM and its + * sub-processes. + * + * @param context task context + */ + + abstract void killTask(TaskControllerContext context); + + /** + * Enable the task for cleanup by changing permissions of the path + * @param context path deletion context + * @throws IOException + */ + abstract void enableTaskForCleanup(PathDeletionContext context) + throws IOException; +} diff --git a/src/mapred/org/apache/hadoop/mapred/TaskGraphServlet.java b/src/mapred/org/apache/hadoop/mapred/TaskGraphServlet.java new file mode 100644 index 0000000..01a9690 --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/TaskGraphServlet.java @@ -0,0 +1,235 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.mapred; + +import java.io.IOException; +import java.io.PrintWriter; + +import javax.servlet.ServletException; +import javax.servlet.http.HttpServlet; +import javax.servlet.http.HttpServletRequest; +import javax.servlet.http.HttpServletResponse; + +/** The servlet that outputs svg graphics for map / reduce task + * statuses + */ +public class TaskGraphServlet extends HttpServlet { + + private static final long serialVersionUID = -1365683739392460020L; + + /**height of the graph w/o margins*/ + public static final int width = 600; + + /**height of the graph w/o margins*/ + public static final int height = 200; + + /**margin space on y axis */ + public static final int ymargin = 20; + + /**margin space on x axis */ + public static final int xmargin = 80; + + private static final float oneThird = 1f / 3f; + + @Override + public void doGet(HttpServletRequest request, HttpServletResponse response) + throws ServletException, IOException { + + response.setContentType("image/svg+xml"); + + JobTracker tracker = + (JobTracker) getServletContext().getAttribute("job.tracker"); + + String jobIdStr = request.getParameter("jobid"); + if(jobIdStr == null) + return; + JobID jobId = JobID.forName(jobIdStr); + + final boolean isMap = "map".equalsIgnoreCase(request.getParameter("type")); + final TaskReport[] reports = isMap? tracker.getMapTaskReports(jobId) + : tracker.getReduceTaskReports(jobId); + if(reports == null || reports.length == 0) { + return; + } + + final int numTasks = reports.length; + int tasksPerBar = (int)Math.ceil(numTasks / 600d); + int numBars = (int) Math.ceil((double)numTasks / tasksPerBar); + int w = Math.max(600, numBars); + int barWidth = Math.min(10, w / numBars); //min 1px, max 10px + int barsPerNotch = (int)Math.ceil(10d / barWidth); + w = w + numBars / barsPerNotch; + int totalWidth = w + 2 * xmargin; + + //draw a white rectangle + final PrintWriter out = response.getWriter(); + out.print("\n" + + "\n" + + "\n\n"+ + "\n\n"); + + //axes + printLine(out, xmargin - 1, xmargin - 1, height + ymargin + 1 + , ymargin - 1, "black" ); + printLine(out, xmargin - 1, w + xmargin + 1 ,height + ymargin + 1 + , height + ymargin + 1, "black" ); + + //borderlines + printLine(out, w + xmargin + 1 , w + xmargin +1 + , height + ymargin + 1,ymargin - 1, "#CCCCCC" ); + printLine(out, xmargin - 1, w + xmargin + 1 + , ymargin - 1 , ymargin - 1, "#CCCCCC" ); + + String[] colors = new String[] {"#00DD00", "#E50000", "#AAAAFF"}; + + //determine the notch interval using the number of digits for numTasks + int xNotchInterval = (int)(Math.ceil( numTasks / 10d)); + + int xOffset = -1; + int xNotchCount = 0; + //task bar graph + for(int i=0, barCnt=0; ;i+=tasksPerBar, barCnt++) { + if(barCnt % barsPerNotch == 0) { + xOffset++; + } + int x = barCnt * barWidth + xmargin + xOffset; + //x axis notches + if(i >= xNotchInterval * xNotchCount) { + printLine(out, x, x, height + ymargin + 3 + , height + ymargin - 2, "black"); + printText(out, x, height + ymargin + 15 + , String.valueOf(xNotchInterval * xNotchCount++ ), "middle"); + } + if(i >= reports.length) break; + + if(isMap) { + float progress = getMapAvarageProgress(tasksPerBar, i, reports); + int barHeight = (int)Math.ceil(height * progress); + int y = height - barHeight + ymargin; + printRect(out, barWidth, barHeight,x , y , colors[2]); + } + else { + float[] progresses + = getReduceAvarageProgresses(tasksPerBar, i, reports); + //draw three bars stacked, for copy, sort, reduce + + int prevHeight =0; + for(int j=0; j < 3 ; j++) { + int barHeight = (int)((height / 3) * progresses[j]); + if(barHeight > height/ 3 - 3)//fix rounding error + barHeight = height / 3 + 1; + + int y = height - barHeight + ymargin - prevHeight; + prevHeight += barHeight; + printRect(out, barWidth, barHeight, x, y, colors[j] ); + } + } + } + + //y axis notches + for(int i=0;i<=10;i++) { + printLine(out, xmargin-3 , xmargin+2 , ymargin + (i * height) / 10 + , ymargin + (i * height) / 10 , "black"); + printText(out, xmargin - 10 , ymargin + 4 + (i * height) / 10 + , String.valueOf(100 - i * 10), "end"); + } + + if(!isMap) { + //print color codes for copy, sort, reduce + printRect(out, 14, 14, xmargin + w + 4, ymargin + 20, colors[0]); + printText(out, xmargin + w + 24, ymargin + 30, "copy", "start"); + printRect(out, 14, 14, xmargin + w + 4, ymargin + 50, colors[1]); + printText(out, xmargin + w + 24, ymargin + 60, "sort", "start"); + printRect(out, 14, 14, xmargin + w + 4, ymargin + 80, colors[2]); + printText(out, xmargin + w + 24, ymargin + 90, "reduce", "start"); + } + + + //firefox curently does not support vertical text + //out.print("" + //+"Percent\n"); + + out.print(""); + } + + /**Computes average progress per bar*/ + private float getMapAvarageProgress(int tasksPerBar, int index + , TaskReport[] reports ) { + float progress = 0f; + int k=0; + for(;k < tasksPerBar && index + k < reports.length; k++) { + progress += reports[index + k].getProgress(); + } + progress /= k; + return progress; + } + + /**Computes average progresses per bar*/ + private float[] getReduceAvarageProgresses(int tasksPerBar, int index + , TaskReport[] reports ) { + float[] progresses = new float[] {0,0,0}; + int k=0; + for(;k < tasksPerBar && index + k < reports.length; k++) { + float progress = reports[index+k].getProgress(); + for(int j=0; progress > 0 ; j++, progress -= oneThird) { + if(progress > oneThird) + progresses[j] += 1f; + else + progresses[j] += progress * 3 ; + } + } + for(int j=0; j<3; j++) { progresses[j] /= k;} + + return progresses; + } + + private void printRect(PrintWriter out, int width, int height + , int x, int y, String color) throws IOException { + if(height > 0) { + out.print("\n"); + } + } + private void printLine(PrintWriter out, int x1, int x2 + , int y1, int y2, String color) throws IOException { + out.print("\n"); + } + private void printText(PrintWriter out, int x, int y, String text + , String anchor) throws IOException { + out.print(""); + out.print(text); out.print("\n"); + } +} + diff --git a/src/mapred/org/apache/hadoop/mapred/TaskID.java b/src/mapred/org/apache/hadoop/mapred/TaskID.java new file mode 100644 index 0000000..5f276a8 --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/TaskID.java @@ -0,0 +1,139 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapred; + +import java.io.DataInput; +import java.io.IOException; + +/** + * TaskID represents the immutable and unique identifier for + * a Map or Reduce Task. Each TaskID encompasses multiple attempts made to + * execute the Map or Reduce Task, each of which are uniquely indentified by + * their TaskAttemptID. + * + * TaskID consists of 3 parts. First part is the {@link JobID}, that this + * TaskInProgress belongs to. Second part of the TaskID is either 'm' or 'r' + * representing whether the task is a map task or a reduce task. + * And the third part is the task number.
+ * An example TaskID is : + * task_200707121733_0003_m_000005 , which represents the + * fifth map task in the third job running at the jobtracker + * started at 200707121733. + *

+ * Applications should never construct or parse TaskID strings + * , but rather use appropriate constructors or {@link #forName(String)} + * method. + * + * @see JobID + * @see TaskAttemptID + */ +@Deprecated +public class TaskID extends org.apache.hadoop.mapreduce.TaskID { + + /** + * Constructs a TaskID object from given {@link JobID}. + * @param jobId JobID that this tip belongs to + * @param isMap whether the tip is a map + * @param id the tip number + */ + public TaskID(org.apache.hadoop.mapreduce.JobID jobId, boolean isMap,int id) { + super(jobId, isMap, id); + } + + /** + * Constructs a TaskInProgressId object from given parts. + * @param jtIdentifier jobTracker identifier + * @param jobId job number + * @param isMap whether the tip is a map + * @param id the tip number + */ + public TaskID(String jtIdentifier, int jobId, boolean isMap, int id) { + this(new JobID(jtIdentifier, jobId), isMap, id); + } + + public TaskID() { + super(new JobID(), false, 0); + } + + /** + * Downgrade a new TaskID to an old one + * @param old a new or old TaskID + * @return either old or a new TaskID build to match old + */ + public static TaskID downgrade(org.apache.hadoop.mapreduce.TaskID old) { + if (old instanceof TaskID) { + return (TaskID) old; + } else { + return new TaskID(JobID.downgrade(old.getJobID()), old.isMap(), + old.getId()); + } + } + + @Deprecated + public static TaskID read(DataInput in) throws IOException { + TaskID tipId = new TaskID(); + tipId.readFields(in); + return tipId; + } + + public JobID getJobID() { + return (JobID) super.getJobID(); + } + + /** + * Returns a regex pattern which matches task IDs. Arguments can + * be given null, in which case that part of the regex will be generic. + * For example to obtain a regex matching the first map task + * of any jobtracker, of any job, we would use : + *

 
+   * TaskID.getTaskIDsPattern(null, null, true, 1);
+   * 
+ * which will return : + *
 "task_[^_]*_[0-9]*_m_000001*" 
+ * @param jtIdentifier jobTracker identifier, or null + * @param jobId job number, or null + * @param isMap whether the tip is a map, or null + * @param taskId taskId number, or null + * @return a regex pattern matching TaskIDs + */ + @Deprecated + public static String getTaskIDsPattern(String jtIdentifier, Integer jobId + , Boolean isMap, Integer taskId) { + StringBuilder builder = new StringBuilder(TASK).append(SEPARATOR) + .append(getTaskIDsPatternWOPrefix(jtIdentifier, jobId, isMap, taskId)); + return builder.toString(); + } + + @Deprecated + static StringBuilder getTaskIDsPatternWOPrefix(String jtIdentifier + , Integer jobId, Boolean isMap, Integer taskId) { + StringBuilder builder = new StringBuilder(); + builder.append(JobID.getJobIDsPatternWOPrefix(jtIdentifier, jobId)) + .append(SEPARATOR) + .append(isMap != null ? (isMap ? "m" : "r") : "(m|r)").append(SEPARATOR) + .append(taskId != null ? idFormat.format(taskId) : "[0-9]*"); + return builder; + } + + public static TaskID forName(String str + ) throws IllegalArgumentException { + return (TaskID) org.apache.hadoop.mapreduce.TaskID.forName(str); + } + +} diff --git a/src/mapred/org/apache/hadoop/mapred/TaskInProgress.java b/src/mapred/org/apache/hadoop/mapred/TaskInProgress.java new file mode 100644 index 0000000..bd7811d --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/TaskInProgress.java @@ -0,0 +1,1430 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.mapred; + + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Comparator; +import java.util.HashMap; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.TreeMap; +import java.util.TreeSet; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.io.BytesWritable; +import org.apache.hadoop.mapred.JobClient.RawSplit; +import org.apache.hadoop.mapred.JobInProgress.DataStatistics; +import org.apache.hadoop.mapred.SortedRanges.Range; +import org.apache.hadoop.mapreduce.TaskType; +import org.apache.hadoop.net.Node; +import org.apache.hadoop.util.StringUtils; + +/************************************************************* + * TaskInProgress maintains all the info needed for a + * Task in the lifetime of its owning Job. A given Task + * might be speculatively executed or reexecuted, so we + * need a level of indirection above the running-id itself. + *
+ * A given TaskInProgress contains multiple taskids, + * 0 or more of which might be executing at any one time. + * (That's what allows speculative execution.) A taskid + * is now *never* recycled. A TIP allocates enough taskids + * to account for all the speculation and failures it will + * ever have to handle. Once those are up, the TIP is dead. + * ************************************************************** + */ +class TaskInProgress { + static final int MAX_TASK_EXECS = 1; + int maxTaskAttempts = 4; + double speculativeGap; + long speculativeLag; + double maxProgressRateForSpeculation; + private boolean speculativeForced = false; + private static final int NUM_ATTEMPTS_PER_RESTART = 1000; + + public static final Log LOG = LogFactory.getLog(TaskInProgress.class); + + // Defines the TIP + private String jobFile = null; + private RawSplit rawSplit; + private int numMaps; + private int partition; + private JobTracker jobtracker; + private TaskID id; + private JobInProgress job; + private final int numSlotsRequired; + + // Status of the TIP + private int successEventNumber = -1; + private int numTaskFailures = 0; + private int numKilledTasks = 0; + private double progress = 0; + private double oldProgressRate; + private String state = ""; + private long startTime = 0; + private long lastDispatchTime = 0; // most recent time task given to TT + private long execStartTime = 0; + private long execFinishTime = 0; + private int completes = 0; + private boolean failed = false; + private boolean killed = false; + private long maxSkipRecords = 0; + private FailedRanges failedRanges = new FailedRanges(); + private volatile boolean skipping = false; + private boolean jobCleanup = false; + private boolean jobSetup = false; + + // The 'next' usable taskid of this tip + int nextTaskId = 0; + + // The taskid that took this TIP to SUCCESS + private TaskAttemptID successfulTaskId; + + // The first taskid of this tip + private TaskAttemptID firstTaskId; + + // The taskid of speculative task + private TaskAttemptID speculativeTaskId; + + // Map from task Id -> TaskTracker Id, contains tasks that are + // currently runnings + private TreeMap activeTasks = new TreeMap(); + // All attempt Ids of this TIP + private TreeSet tasks = new TreeSet(); + private JobConf conf; + private Map> taskDiagnosticData = + new TreeMap>(); + /** + * Map from taskId -> TaskStatus + */ + TreeMap taskStatuses = + new TreeMap(); + + // Map from taskId -> TaskTracker Id, + // contains cleanup attempts and where they ran, if any + private TreeMap cleanupTasks = + new TreeMap(); + + private TreeSet machinesWhereFailed = new TreeSet(); + private TreeSet tasksReportedClosed = new TreeSet(); + + //list of tasks to kill, -> + private TreeMap tasksToKill = new TreeMap(); + + //task to commit, + private TaskAttemptID taskToCommit; + + private volatile Counters counters = new Counters(); + + private HashMap dispatchTimeMap = + new HashMap(); + + /** + * Constructor for MapTask + */ + public TaskInProgress(JobID jobid, String jobFile, + RawSplit rawSplit, + JobTracker jobtracker, JobConf conf, + JobInProgress job, int partition, + int numSlotsRequired) { + this.jobFile = jobFile; + this.rawSplit = rawSplit; + this.jobtracker = jobtracker; + this.job = job; + this.conf = conf; + this.partition = partition; + this.maxSkipRecords = SkipBadRecords.getMapperMaxSkipRecords(conf); + this.numSlotsRequired = numSlotsRequired; + setMaxTaskAttempts(); + init(jobid); + } + + /** + * Constructor for ReduceTask + */ + public TaskInProgress(JobID jobid, String jobFile, + int numMaps, + int partition, JobTracker jobtracker, JobConf conf, + JobInProgress job, int numSlotsRequired) { + this.jobFile = jobFile; + this.numMaps = numMaps; + this.partition = partition; + this.jobtracker = jobtracker; + this.job = job; + this.conf = conf; + this.maxSkipRecords = SkipBadRecords.getReducerMaxSkipGroups(conf); + this.numSlotsRequired = numSlotsRequired; + setMaxTaskAttempts(); + init(jobid); + } + + /** + * Set the max number of attempts before we declare a TIP as "failed" + */ + private void setMaxTaskAttempts() { + if (isMapTask()) { + this.maxTaskAttempts = conf.getMaxMapAttempts(); + } else { + this.maxTaskAttempts = conf.getMaxReduceAttempts(); + } + } + + /** + * Return the index of the tip within the job, so + * "task_200707121733_1313_0002_m_012345" would return 12345; + * @return int the tip index + */ + public int idWithinJob() { + return partition; + } + + public boolean isJobCleanupTask() { + return jobCleanup; + } + + public void setJobCleanupTask() { + jobCleanup = true; + } + + public boolean isJobSetupTask() { + return jobSetup; + } + + public void setJobSetupTask() { + jobSetup = true; + } + + public boolean isOnlyCommitPending() { + for (TaskStatus t : taskStatuses.values()) { + if (t.getRunState() == TaskStatus.State.COMMIT_PENDING) { + return true; + } + } + return false; + } + + public boolean isCommitPending(TaskAttemptID taskId) { + TaskStatus t = taskStatuses.get(taskId); + if (t == null) { + return false; + } + return t.getRunState() == TaskStatus.State.COMMIT_PENDING; + } + + /** + * Initialization common to Map and Reduce + */ + void init(JobID jobId) { + this.startTime = JobTracker.getClock().getTime(); + this.id = new TaskID(jobId, isMapTask(), partition); + this.skipping = startSkipping(); + long speculativeDuration; + if (isMapTask()) { + this.speculativeGap = conf.getMapSpeculativeGap(); + this.speculativeLag = conf.getMapSpeculativeLag(); + speculativeDuration = conf.getMapSpeculativeDuration(); + } else { + this.speculativeGap = conf.getReduceSpeculativeGap(); + this.speculativeLag = conf.getReduceSpeculativeLag(); + speculativeDuration = conf.getReduceSpeculativeDuration(); + } + + // speculate only if 1/(1000 * progress_rate) > speculativeDuration + // ie. : + // speculate only if progress_rate < 1/(1000 * speculativeDuration) + + if (speculativeDuration > 0) { + this.maxProgressRateForSpeculation = 1.0/(1000.0*speculativeDuration); + } else { + // disable this check for durations <= 0 + this.maxProgressRateForSpeculation = -1.0; + } + } + + //////////////////////////////////// + // Accessors, info, profiles, etc. + //////////////////////////////////// + + + /** + * Return the dispatch time + */ + public long getDispatchTime(TaskAttemptID taskid){ + Long l = dispatchTimeMap.get(taskid); + if (l != null) { + return l.longValue(); + } + return 0; + } + + public long getLastDispatchTime(){ + return this.lastDispatchTime; + } + + /** + * Set the dispatch time + */ + public void setDispatchTime(TaskAttemptID taskid, long disTime){ + dispatchTimeMap.put(taskid, disTime); + this.lastDispatchTime = disTime; + } + /** + * Return the start time + */ + public long getStartTime() { + return startTime; + } + + /** + * Return the exec start time + */ + public long getExecStartTime() { + return execStartTime; + } + + /** + * Set the exec start time + */ + public void setExecStartTime(long startTime) { + execStartTime = startTime; + } + + /** + * Return the exec finish time + */ + public long getExecFinishTime() { + return execFinishTime; + } + + /** + * Set the exec finish time + */ + public void setExecFinishTime(long finishTime) { + execFinishTime = finishTime; + JobHistory.Task.logUpdates(id, execFinishTime); // log the update + } + + /** + * Return the parent job + */ + public JobInProgress getJob() { + return job; + } + /** + * Return an ID for this task, not its component taskid-threads + */ + public TaskID getTIPId() { + return this.id; + } + /** + * Whether this is a map task + */ + public boolean isMapTask() { + return rawSplit != null; + } + + /** + * Returns the type of the {@link TaskAttemptID} passed. + * The type of an attempt is determined by the nature of the task and not its + * id. + * For example, + * - Attempt 'attempt_123_01_m_01_0' might be a job-setup task even though it + * has a _m_ in its id. Hence the task type of this attempt is JOB_SETUP + * instead of MAP. + * - Similarly reduce attempt 'attempt_123_01_r_01_0' might have failed and is + * now supposed to do the task-level cleanup. In such a case this attempt + * will be of type TASK_CLEANUP instead of REDUCE. + */ + TaskType getAttemptType (TaskAttemptID id) { + if (isCleanupAttempt(id)) { + return TaskType.TASK_CLEANUP; + } else if (isJobSetupTask()) { + return TaskType.JOB_SETUP; + } else if (isJobCleanupTask()) { + return TaskType.JOB_CLEANUP; + } else if (isMapTask()) { + return TaskType.MAP; + } else { + return TaskType.REDUCE; + } + } + + /** + * Is the Task associated with taskid is the first attempt of the tip? + * @param taskId + * @return Returns true if the Task is the first attempt of the tip + */ + public boolean isFirstAttempt(TaskAttemptID taskId) { + return firstTaskId == null ? false : firstTaskId.equals(taskId); + } + + /** + * Is the Task associated with taskid is the speculative attempt of the tip? + * @param taskId + * @return Returns true if the Task is the speculative attempt of the tip + */ + public boolean isSpeculativeAttempt(TaskAttemptID taskId) { + return speculativeTaskId == null ? false : + speculativeTaskId.equals(taskId); + } + + /** + * Is this tip currently running any tasks? + * @return true if any tasks are running + */ + public boolean isRunning() { + return !activeTasks.isEmpty(); + } + + /** + * Is this attempt currently running ? + * @param taskId task attempt id. + * @return true if attempt taskId is running + */ + boolean isAttemptRunning(TaskAttemptID taskId) { + return activeTasks.containsKey(taskId); + } + + TaskAttemptID getSuccessfulTaskid() { + return successfulTaskId; + } + + private void setSuccessfulTaskid(TaskAttemptID successfulTaskId) { + this.successfulTaskId = successfulTaskId; + } + + private void resetSuccessfulTaskid() { + this.successfulTaskId = null; + } + + /** + * Is this tip complete? + * + * @return true if the tip is complete, else false + */ + public synchronized boolean isComplete() { + return (completes > 0); + } + + /** + * Is the given taskid the one that took this tip to completion? + * + * @param taskid taskid of attempt to check for completion + * @return true if taskid is complete, else false + */ + public boolean isComplete(TaskAttemptID taskid) { + return ((completes > 0) + && taskid.equals(getSuccessfulTaskid())); + } + + /** + * Is the tip a failure? + * + * @return true if tip has failed, else false + */ + public boolean isFailed() { + return failed; + } + + /** + * Number of times the TaskInProgress has failed. + */ + public int numTaskFailures() { + return numTaskFailures; + } + + /** + * Number of times the TaskInProgress has been killed by the framework. + */ + public int numKilledTasks() { + return numKilledTasks; + } + + /** + * Get the overall progress (from 0 to 1.0) for this TIP + */ + public double getProgress() { + return progress; + } + + /** + * Get the task's counters + */ + public Counters getCounters() { + return counters; + } + + /** + * Returns whether a component task-thread should be + * closed because the containing JobInProgress has completed + * or the task is killed by the user + */ + public boolean shouldClose(TaskAttemptID taskid) { + /** + * If the task hasn't been closed yet, and it belongs to a completed + * TaskInProgress close it. + * + * However, for completed map tasks we do not close the task which + * actually was the one responsible for _completing_ the TaskInProgress. + */ + + if (tasksReportedClosed.contains(taskid)) { + if (tasksToKill.keySet().contains(taskid)) + return true; + else + return false; + } + + boolean close = false; + TaskStatus ts = taskStatuses.get(taskid); + + if ((ts != null) && + ((this.failed) || + ((job.getStatus().getRunState() != JobStatus.RUNNING && + (job.getStatus().getRunState() != JobStatus.PREP))))) { + tasksReportedClosed.add(taskid); + close = true; + } else if ((completes > 0) && // isComplete() is synchronized! + !(isMapTask() && !jobSetup && + !jobCleanup && isComplete(taskid))) { + tasksReportedClosed.add(taskid); + close = true; + } else if (isCommitPending(taskid) && !shouldCommit(taskid)) { + tasksReportedClosed.add(taskid); + close = true; + } else { + close = tasksToKill.keySet().contains(taskid); + } + return close; + } + + /** + * Commit this task attempt for the tip. + * @param taskid + */ + public void doCommit(TaskAttemptID taskid) { + taskToCommit = taskid; + } + + /** + * Returns whether the task attempt should be committed or not + */ + public boolean shouldCommit(TaskAttemptID taskid) { + return !isComplete() && isCommitPending(taskid) && + taskToCommit.equals(taskid); + } + + /** + * Creates a "status report" for this task. Includes the + * task ID and overall status, plus reports for all the + * component task-threads that have ever been started. + */ + synchronized TaskReport generateSingleReport() { + ArrayList diagnostics = new ArrayList(); + for (List l : taskDiagnosticData.values()) { + diagnostics.addAll(l); + } + TIPStatus currentStatus = null; + if (isRunning() && !isComplete()) { + currentStatus = TIPStatus.RUNNING; + } else if (isComplete()) { + currentStatus = TIPStatus.COMPLETE; + } else if (wasKilled()) { + currentStatus = TIPStatus.KILLED; + } else if (isFailed()) { + currentStatus = TIPStatus.FAILED; + } else if (!(isComplete() || isRunning() || wasKilled())) { + currentStatus = TIPStatus.PENDING; + } + + TaskReport report = new TaskReport + (getTIPId(), (float)progress, state, + diagnostics.toArray(new String[diagnostics.size()]), + currentStatus, execStartTime, execFinishTime, counters); + if (currentStatus == TIPStatus.RUNNING) { + report.setRunningTaskAttempts(activeTasks.keySet()); + } else if (currentStatus == TIPStatus.COMPLETE) { + report.setSuccessfulAttempt(getSuccessfulTaskid()); + } + return report; + } + + /** + * Get the diagnostic messages for a given task within this tip. + * + * @param taskId the id of the required task + * @return the list of diagnostics for that task + */ + synchronized List getDiagnosticInfo(TaskAttemptID taskId) { + return taskDiagnosticData.get(taskId); + } + + //////////////////////////////////////////////// + // Update methods, usually invoked by the owning + // job. + //////////////////////////////////////////////// + + /** + * Save diagnostic information for a given task. + * + * @param taskId id of the task + * @param diagInfo diagnostic information for the task + */ + public void addDiagnosticInfo(TaskAttemptID taskId, String diagInfo) { + List diagHistory = taskDiagnosticData.get(taskId); + if (diagHistory == null) { + diagHistory = new ArrayList(); + taskDiagnosticData.put(taskId, diagHistory); + } + diagHistory.add(diagInfo); + } + + /** + * A status message from a client has arrived. + * It updates the status of a single component-thread-task, + * which might result in an overall TaskInProgress status update. + * @return has the task changed its state noticeably? + */ + synchronized boolean updateStatus(TaskStatus status) { + TaskAttemptID taskid = status.getTaskID(); + String taskTracker = status.getTaskTracker(); + String diagInfo = status.getDiagnosticInfo(); + TaskStatus oldStatus = taskStatuses.get(taskid); + boolean changed = true; + if (diagInfo != null && diagInfo.length() > 0) { + LOG.info("Error from " + taskid + " on " + taskTracker + ": "+diagInfo); + addDiagnosticInfo(taskid, diagInfo); + } + + if(skipping) { + failedRanges.updateState(status); + } + + if (oldStatus != null) { + TaskStatus.State oldState = oldStatus.getRunState(); + TaskStatus.State newState = status.getRunState(); + + // We should never recieve a duplicate success/failure/killed + // status update for the same taskid! This is a safety check, + // and is addressed better at the TaskTracker to ensure this. + // @see {@link TaskTracker.transmitHeartbeat()} + if ((newState != TaskStatus.State.RUNNING && + newState != TaskStatus.State.COMMIT_PENDING && + newState != TaskStatus.State.FAILED_UNCLEAN && + newState != TaskStatus.State.KILLED_UNCLEAN && + newState != TaskStatus.State.UNASSIGNED) && + (oldState == newState)) { + LOG.warn("Recieved duplicate status update of '" + newState + + "' for '" + taskid + "' of TIP '" + getTIPId() + "'" + + "oldTT=" + oldStatus.getTaskTracker() + + " while newTT=" + status.getTaskTracker()); + return false; + } + + // The task is not allowed to move from completed back to running. + // We have seen out of order status messagesmoving tasks from complete + // to running. This is a spot fix, but it should be addressed more + // globally. + if ((newState == TaskStatus.State.RUNNING || + newState == TaskStatus.State.UNASSIGNED) && + (oldState == TaskStatus.State.FAILED || + oldState == TaskStatus.State.KILLED || + oldState == TaskStatus.State.FAILED_UNCLEAN || + oldState == TaskStatus.State.KILLED_UNCLEAN || + oldState == TaskStatus.State.SUCCEEDED || + oldState == TaskStatus.State.COMMIT_PENDING)) { + return false; + } + + //Do not accept any status once the task is marked FAILED/KILLED + //This is to handle the case of the JobTracker timing out a task + //due to launch delay, but the TT comes back with any state or + //TT got expired + if (oldState == TaskStatus.State.FAILED || + oldState == TaskStatus.State.KILLED) { + tasksToKill.put(taskid, true); + return false; + } + + changed = oldState != newState; + } + // if task is a cleanup attempt, do not replace the complete status, + // update only specific fields. + // For example, startTime should not be updated, + // but finishTime has to be updated. + if (!isCleanupAttempt(taskid)) { + taskStatuses.put(taskid, status); + //we don't want to include setup tasks in the task execution stats + if (!isJobSetupTask() && ((isMapTask() && job.hasSpeculativeMaps()) || + (!isMapTask() && job.hasSpeculativeReduces()))) { + long now = JobTracker.getClock().getTime(); + + DataStatistics taskStats = job.getRunningTaskStatistics(isMapTask()); + updateProgressRate(now, taskStats); + } + } else { + taskStatuses.get(taskid).statusUpdate(status.getRunState(), + status.getProgress(), status.getStateString(), status.getPhase(), + status.getFinishTime()); + } + + // Recompute progress + recomputeProgress(); + return changed; + } + + /** + * Indicate that one of the taskids in this TaskInProgress + * has failed. + */ + public void incompleteSubTask(TaskAttemptID taskid, + JobStatus jobStatus) { + // + // Note the failure and its location + // + TaskStatus status = taskStatuses.get(taskid); + String trackerName; + String trackerHostName = null; + TaskStatus.State taskState = TaskStatus.State.FAILED; + if (status != null) { + trackerName = status.getTaskTracker(); + trackerHostName = + JobInProgress.convertTrackerNameToHostName(trackerName); + // Check if the user manually KILLED/FAILED this task-attempt... + Boolean shouldFail = tasksToKill.remove(taskid); + if (shouldFail != null) { + if (status.getRunState() == TaskStatus.State.FAILED || + status.getRunState() == TaskStatus.State.KILLED) { + taskState = (shouldFail) ? TaskStatus.State.FAILED : + TaskStatus.State.KILLED; + } else { + taskState = (shouldFail) ? TaskStatus.State.FAILED_UNCLEAN : + TaskStatus.State.KILLED_UNCLEAN; + + } + status.setRunState(taskState); + addDiagnosticInfo(taskid, "Task has been " + taskState + " by the user" ); + } + + taskState = status.getRunState(); + if (taskState != TaskStatus.State.FAILED && + taskState != TaskStatus.State.KILLED && + taskState != TaskStatus.State.FAILED_UNCLEAN && + taskState != TaskStatus.State.KILLED_UNCLEAN) { + LOG.info("Task '" + taskid + "' running on '" + trackerName + + "' in state: '" + taskState + "' being failed!"); + status.setRunState(TaskStatus.State.FAILED); + taskState = TaskStatus.State.FAILED; + } + + // tasktracker went down and failed time was not reported. + if (0 == status.getFinishTime()){ + status.setFinishTime(JobTracker.getClock().getTime()); + } + } + + this.activeTasks.remove(taskid); + + // Since we do not fail completed reduces (whose outputs go to hdfs), we + // should note this failure only for completed maps, only if this taskid; + // completed this map. however if the job is done, there is no need to + // manipulate completed maps + if (this.isMapTask() && !jobSetup && !jobCleanup && isComplete(taskid) && + jobStatus.getRunState() != JobStatus.SUCCEEDED) { + this.completes--; + + // Reset the successfulTaskId since we don't have a SUCCESSFUL task now + resetSuccessfulTaskid(); + } + + // Note that there can be failures of tasks that are hosted on a machine + // that has not yet registered with restarted jobtracker + // recalculate the counts only if its a genuine failure + if (tasks.contains(taskid)) { + if (taskState == TaskStatus.State.FAILED) { + numTaskFailures++; + machinesWhereFailed.add(trackerHostName); + if(maxSkipRecords>0) { + //skipping feature enabled + LOG.debug("TaskInProgress adding" + status.getNextRecordRange()); + failedRanges.add(status.getNextRecordRange()); + skipping = startSkipping(); + } + + } else if (taskState == TaskStatus.State.KILLED) { + numKilledTasks++; + } + } + + if (numTaskFailures >= maxTaskAttempts) { + LOG.info("TaskInProgress " + getTIPId() + " has failed " + numTaskFailures + " times."); + kill(); + } + } + + /** + * Get whether to start skipping mode. + */ + private boolean startSkipping() { + if(maxSkipRecords>0 && + numTaskFailures>=SkipBadRecords.getAttemptsToStartSkipping(conf)) { + return true; + } + return false; + } + + /** + * Finalize the completed task; note that this might not be the first + * task-attempt of the {@link TaskInProgress} and hence might be declared + * {@link TaskStatus.State.SUCCEEDED} or {@link TaskStatus.State.KILLED} + * + * @param taskId id of the completed task-attempt + * @param finalTaskState final {@link TaskStatus.State} of the task-attempt + */ + private void completedTask(TaskAttemptID taskId, TaskStatus.State finalTaskState) { + TaskStatus status = taskStatuses.get(taskId); + status.setRunState(finalTaskState); + activeTasks.remove(taskId); + } + + /** + * Indicate that one of the taskids in this already-completed + * TaskInProgress has successfully completed; hence we mark this + * taskid as {@link TaskStatus.State.KILLED}. + */ + void alreadyCompletedTask(TaskAttemptID taskid) { + // 'KILL' the task + completedTask(taskid, TaskStatus.State.KILLED); + + // Note the reason for the task being 'KILLED' + addDiagnosticInfo(taskid, "Already completed TIP"); + + LOG.info("Already complete TIP " + getTIPId() + + " has completed task " + taskid); + } + + /** + * Indicate that one of the taskids in this TaskInProgress + * has successfully completed! + */ + public void completed(TaskAttemptID taskid) { + // + // Record that this taskid is complete + // + completedTask(taskid, TaskStatus.State.SUCCEEDED); + + // Note the successful taskid + setSuccessfulTaskid(taskid); + + // + // Now that the TIP is complete, the other speculative + // subtasks will be closed when the owning tasktracker + // reports in and calls shouldClose() on this object. + // + + this.completes++; + this.execFinishTime = JobTracker.getClock().getTime(); + recomputeProgress(); + + } + + /** + * Get the split locations + */ + public String[] getSplitLocations() { + if (isMapTask() && !jobSetup && !jobCleanup) { + return rawSplit.getLocations(); + } + return new String[0]; + } + + /** + * Get the Status of the tasks managed by this TIP + */ + public TaskStatus[] getTaskStatuses() { + return taskStatuses.values().toArray(new TaskStatus[taskStatuses.size()]); + } + + /** + * Get all the {@link TaskAttemptID}s in this {@link TaskInProgress} + */ + TaskAttemptID[] getAllTaskAttemptIDs() { + return tasks.toArray(new TaskAttemptID[tasks.size()]); + } + + /** + * Get the status of the specified task + * @param taskid + * @return + */ + public TaskStatus getTaskStatus(TaskAttemptID taskid) { + return taskStatuses.get(taskid); + } + /** + * The TIP's been ordered kill()ed. + */ + public void kill() { + if (isComplete() || failed) { + return; + } + this.failed = true; + killed = true; + this.execFinishTime = JobTracker.getClock().getTime(); + recomputeProgress(); + } + + /** + * Was the task killed? + * @return true if the task killed + */ + public boolean wasKilled() { + return killed; + } + + /** + * Kill the given task + */ + boolean killTask(TaskAttemptID taskId, boolean shouldFail) { + TaskStatus st = taskStatuses.get(taskId); + if(st != null && (st.getRunState() == TaskStatus.State.RUNNING + || st.getRunState() == TaskStatus.State.COMMIT_PENDING || + st.inTaskCleanupPhase() || + st.getRunState() == TaskStatus.State.UNASSIGNED) + && tasksToKill.put(taskId, shouldFail) == null ) { + String logStr = "Request received to " + (shouldFail ? "fail" : "kill") + + " task '" + taskId + "' by user"; + addDiagnosticInfo(taskId, logStr); + LOG.info(logStr); + return true; + } + return false; + } + + /** + * This method is called whenever there's a status change + * for one of the TIP's sub-tasks. It recomputes the overall + * progress for the TIP. We examine all sub-tasks and find + * the one that's most advanced (and non-failed). + */ + void recomputeProgress() { + if (isComplete()) { + this.progress = 1; + // update the counters and the state + TaskStatus completedStatus = taskStatuses.get(getSuccessfulTaskid()); + this.counters = completedStatus.getCounters(); + this.state = completedStatus.getStateString(); + } else if (failed) { + this.progress = 0; + // reset the counters and the state + this.state = ""; + this.counters = new Counters(); + } else { + double bestProgress = 0; + String bestState = ""; + Counters bestCounters = new Counters(); + for (Iterator it = taskStatuses.keySet().iterator(); it.hasNext();) { + TaskAttemptID taskid = it.next(); + TaskStatus status = taskStatuses.get(taskid); + if (status.getRunState() == TaskStatus.State.SUCCEEDED) { + bestProgress = 1; + bestState = status.getStateString(); + bestCounters = status.getCounters(); + break; + } else if (status.getRunState() == TaskStatus.State.COMMIT_PENDING) { + //for COMMIT_PENDING, we take the last state that we recorded + //when the task was RUNNING + bestProgress = this.progress; + bestState = this.state; + bestCounters = this.counters; + } else if (status.getRunState() == TaskStatus.State.RUNNING) { + if (status.getProgress() >= bestProgress) { + bestProgress = status.getProgress(); + bestState = status.getStateString(); + if (status.getIncludeCounters()) { + bestCounters = status.getCounters(); + } else { + bestCounters = this.counters; + } + } + } + } + this.progress = bestProgress; + this.state = bestState; + this.counters = bestCounters; + } + } + + ///////////////////////////////////////////////// + // "Action" methods that actually require the TIP + // to do something. + ///////////////////////////////////////////////// + + /** + * Return whether this TIP still needs to run + */ + boolean isRunnable() { + return !failed && (completes == 0); + } + + + + /** + * Can this task be speculated? This requires that it isn't done or almost + * done and that it isn't already being speculatively executed. + * + * Added for use by queue scheduling algorithms. + * @param currentTime + */ + boolean canBeSpeculated(long currentTime) { + if (skipping || !isRunnable() || !isRunning() || + completes != 0 || isOnlyCommitPending() || + activeTasks.size() > MAX_TASK_EXECS) { + return false; + } + + if (isSpeculativeForced()) { + return true; + } + + // no speculation for first few seconds + if (currentTime - lastDispatchTime < speculativeLag) { + return false; + } + + DataStatistics taskStats = job.getRunningTaskStatistics(isMapTask()); + updateProgressRate(currentTime, taskStats); + double currProgRate = getProgressRate(); + + if (LOG.isDebugEnabled()) { + LOG.debug("activeTasks.size(): " + activeTasks.size() + " " + + activeTasks.firstKey() + " task's progressrate: " + + currProgRate + + " taskStats : " + taskStats); + } + + // if the task is making progress fast enough to complete within + // the acceptable duration allowed for each task - do not speculate + if ((maxProgressRateForSpeculation > 0) && + (currProgRate > maxProgressRateForSpeculation)) { + return false; + } + + // the max difference allowed between the tasks's progress rate + // and the mean progress rate of sibling tasks. + double max_diff = (taskStats.std() == 0 ? + taskStats.mean()/3 : + job.getSlowTaskThreshold() * taskStats.std()); + + return (taskStats.mean() - currProgRate > max_diff); + } + + /** + * Return a Task that can be sent to a TaskTracker for execution. + */ + public Task getTaskToRun(String taskTracker) throws IOException { + + // Create the 'taskid'; do not count the 'killed' tasks against the job! + TaskAttemptID taskid = null; + if (nextTaskId < (MAX_TASK_EXECS + maxTaskAttempts + numKilledTasks)) { + // Make sure that the attempts are unqiue across restarts + int attemptId = job.getNumRestarts() * NUM_ATTEMPTS_PER_RESTART + nextTaskId; + taskid = new TaskAttemptID( id, attemptId); + ++nextTaskId; + } else { + LOG.warn("Exceeded limit of " + (MAX_TASK_EXECS + maxTaskAttempts) + + " (plus " + numKilledTasks + " killed)" + + " attempts for the tip '" + getTIPId() + "'"); + return null; + } + //keep track of the last time we started an attempt at this TIP + //used to calculate the progress rate of this TIP + setDispatchTime(taskid, JobTracker.getClock().getTime()); + if (0 == execStartTime){ + // assume task starts running now + execStartTime = JobTracker.getClock().getTime(); + } + return addRunningTask(taskid, taskTracker); + } + + public Task addRunningTask(TaskAttemptID taskid, String taskTracker) { + return addRunningTask(taskid, taskTracker, false); + } + + /** + * Adds a previously running task to this tip. This is used in case of + * jobtracker restarts. + */ + public Task addRunningTask(TaskAttemptID taskid, + String taskTracker, + boolean taskCleanup) { + // 1 slot is enough for taskCleanup task + int numSlotsNeeded = taskCleanup ? 1 : numSlotsRequired; + // create the task + Task t = null; + if (isMapTask()) { + LOG.debug("attempt " + numTaskFailures + " sending skippedRecords " + + failedRanges.getIndicesCount()); + String splitClass = null; + BytesWritable split; + if (!jobSetup && !jobCleanup) { + splitClass = rawSplit.getClassName(); + split = rawSplit.getBytes(); + } else { + split = new BytesWritable(); + } + t = new MapTask(jobFile, taskid, partition, splitClass, split, + numSlotsNeeded, job.getUser()); + } else { + t = new ReduceTask(jobFile, taskid, partition, numMaps, + numSlotsNeeded, job.getUser()); + } + if (jobCleanup) { + t.setJobCleanupTask(); + } + if (jobSetup) { + t.setJobSetupTask(); + } + if (taskCleanup) { + t.setTaskCleanupTask(); + t.setState(taskStatuses.get(taskid).getRunState()); + cleanupTasks.put(taskid, taskTracker); + } + t.setConf(conf); + LOG.debug("Launching task with skipRanges:"+failedRanges.getSkipRanges()); + t.setSkipRanges(failedRanges.getSkipRanges()); + t.setSkipping(skipping); + if(failedRanges.isTestAttempt()) { + t.setWriteSkipRecs(false); + } + + if (activeTasks.size() > 1) { + speculativeTaskId = taskid; + } else { + speculativeTaskId = null; + } + activeTasks.put(taskid, taskTracker); + tasks.add(taskid); + + // Ask JobTracker to note that the task exists + // jobtracker.createTaskEntry(taskid, taskTracker, this); + + /* + // code to find call paths to createTaskEntry + StackTraceElement[] stackTraceElements = Thread.currentThread().getStackTrace(); + boolean found = false; + for (StackTraceElement s: stackTraceElements) { + if (s.getMethodName().indexOf("heartbeat") != -1 || + s.getMethodName().indexOf("findTask") != -1 || + s.getMethodName().indexOf("createAndAddAttempt") != -1 || + s.getMethodName().indexOf("processTaskAttempt") != -1) { + found = true; + break; + } + } + + if (!found) { + RuntimeException e = new RuntimeException ("calling addRunningTask from outside heartbeat"); + LOG.info(StringUtils.stringifyException(e)); + throw (e); + } + */ + + // check and set the first attempt + if (firstTaskId == null) { + firstTaskId = taskid; + } + return t; + } + + boolean isRunningTask(TaskAttemptID taskid) { + TaskStatus status = taskStatuses.get(taskid); + return status != null && status.getRunState() == TaskStatus.State.RUNNING; + } + + boolean isCleanupAttempt(TaskAttemptID taskid) { + return cleanupTasks.containsKey(taskid); + } + + String machineWhereCleanupRan(TaskAttemptID taskid) { + return cleanupTasks.get(taskid); + } + + String machineWhereTaskRan(TaskAttemptID taskid) { + return taskStatuses.get(taskid).getTaskTracker(); + } + + boolean wasKilled(TaskAttemptID taskid) { + return tasksToKill.containsKey(taskid); + } + + /** + * Has this task already failed on this machine? + * @param trackerHost The task tracker hostname + * @return Has it failed? + */ + public boolean hasFailedOnMachine(String trackerHost) { + return machinesWhereFailed.contains(trackerHost); + } + + /** + * Was this task ever scheduled to run on this machine? + * @param trackerHost The task tracker hostname + * @param trackerName The tracker name + * @return Was task scheduled on the tracker? + */ + public boolean hasRunOnMachine(String trackerHost, String trackerName) { + return this.activeTasks.values().contains(trackerName) || + hasFailedOnMachine(trackerHost); + } + /** + * Get the number of machines where this task has failed. + * @return the size of the failed machine set + */ + public int getNumberOfFailedMachines() { + return machinesWhereFailed.size(); + } + + /** + * Get the id of this map or reduce task. + * @return The index of this tip in the maps/reduces lists. + */ + public int getIdWithinJob() { + return partition; + } + + /** + * Set the event number that was raised for this tip + */ + public void setSuccessEventNumber(int eventNumber) { + successEventNumber = eventNumber; + } + + /** + * Get the event number that was raised for this tip + */ + public int getSuccessEventNumber() { + return successEventNumber; + } + + /** + * Gets the Node list of input split locations sorted in rack order. + */ + public String getSplitNodes() { + if (!isMapTask() || jobSetup || jobCleanup) { + return ""; + } + String[] splits = rawSplit.getLocations(); + Node[] nodes = new Node[splits.length]; + for (int i = 0; i < splits.length; i++) { + nodes[i] = jobtracker.getNode(splits[i]); + } + // sort nodes on rack location + Arrays.sort(nodes, new Comparator() { + public int compare(Node a, Node b) { + String left = a.getNetworkLocation(); + String right = b.getNetworkLocation(); + return left.compareTo(right); + } + }); + return nodeToString(nodes); + } + + private static String nodeToString(Node[] nodes) { + if (nodes == null || nodes.length == 0) { + return ""; + } + StringBuffer ret = new StringBuffer(nodes[0].toString()); + for(int i = 1; i < nodes.length;i++) { + ret.append(","); + ret.append(nodes[i].toString()); + } + return ret.toString(); + } + + public long getMapInputSize() { + if(isMapTask() && !jobSetup && !jobCleanup) { + return rawSplit.getDataLength(); + } else { + return 0; + } + } + + public void clearSplit() { + rawSplit.clearBytes(); + } + + /** + * Compare most recent task attempts dispatch time to current system time so + * that task progress rate will slow down as time proceeds even if no progress + * is reported for the task. This allows speculative tasks to be launched for + * tasks on slow/dead TT's before we realize the TT is dead/slow. Skew isn't + * an issue since both times are from the JobTrackers perspective. + * @return the progress rate from the active task that is doing best + */ + public double getCurrentProgressRate(long currentTime) { + double bestProgressRate = 0; + for (TaskStatus ts : taskStatuses.values()){ + if (ts.getRunState() == TaskStatus.State.RUNNING || + ts.getRunState() == TaskStatus.State.SUCCEEDED || + ts.getRunState() == TaskStatus.State.COMMIT_PENDING) { + double progressRate = ts.getProgress()/Math.max(1, + currentTime - getDispatchTime(ts.getTaskID())); + if (progressRate > bestProgressRate){ + bestProgressRate = progressRate; + } + } + } + return bestProgressRate; + } + + /** + * update the task's progress rate and roll it up into the job level + * summary in one transaction. + */ + synchronized private void updateProgressRate(long currentTime, DataStatistics jobStats) { + double currProgRate = getCurrentProgressRate(currentTime); + jobStats.updateStatistics(oldProgressRate, currProgRate); + oldProgressRate = currProgRate; + } + + private double getProgressRate() { + return oldProgressRate; + } + + /** + * This class keeps the records to be skipped during further executions + * based on failed records from all the previous attempts. + * It also narrow down the skip records if it is more than the + * acceptable value by dividing the failed range into half. In this case one + * half is executed in the next attempt (test attempt). + * In the test attempt, only the test range gets executed, others get skipped. + * Based on the success/failure of the test attempt, the range is divided + * further. + */ + private class FailedRanges { + private SortedRanges skipRanges = new SortedRanges(); + private Divide divide; + + synchronized SortedRanges getSkipRanges() { + if(divide!=null) { + return divide.skipRange; + } + return skipRanges; + } + + synchronized boolean isTestAttempt() { + return divide!=null; + } + + synchronized long getIndicesCount() { + if(isTestAttempt()) { + return divide.skipRange.getIndicesCount(); + } + return skipRanges.getIndicesCount(); + } + + synchronized void updateState(TaskStatus status){ + if (isTestAttempt() && + (status.getRunState() == TaskStatus.State.SUCCEEDED)) { + divide.testPassed = true; + //since it was the test attempt we need to set it to failed + //as it worked only on the test range + status.setRunState(TaskStatus.State.FAILED); + + } + } + + synchronized void add(Range failedRange) { + LOG.warn("FailedRange:"+ failedRange); + if(divide!=null) { + LOG.warn("FailedRange:"+ failedRange +" test:"+divide.test + + " pass:"+divide.testPassed); + if(divide.testPassed) { + //test range passed + //other range would be bad. test it + failedRange = divide.other; + } + else { + //test range failed + //other range would be good. + failedRange = divide.test; + } + //reset + divide = null; + } + + if(maxSkipRecords==0 || failedRange.getLength()<=maxSkipRecords) { + skipRanges.add(failedRange); + } else { + //start dividing the range to narrow down the skipped + //records until maxSkipRecords are met OR all attempts + //get exhausted + divide = new Divide(failedRange); + } + } + + class Divide { + private final SortedRanges skipRange; + private final Range test; + private final Range other; + private boolean testPassed; + Divide(Range range){ + long half = range.getLength()/2; + test = new Range(range.getStartIndex(), half); + other = new Range(test.getEndIndex(), range.getLength()-half); + //construct the skip range from the skipRanges + skipRange = new SortedRanges(); + for(Range r : skipRanges.getRanges()) { + skipRange.add(r); + } + skipRange.add(new Range(0,test.getStartIndex())); + skipRange.add(new Range(test.getEndIndex(), + (Long.MAX_VALUE-test.getEndIndex()))); + } + } + + } + + TreeMap getActiveTasks() { + return activeTasks; + } + + int getNumSlotsRequired() { + return numSlotsRequired; + } + + /** + * Force speculative execution if speculation is allowed in JobInProgress + */ + public void setSpeculativeForced(boolean speculativeForced) { + this.speculativeForced = speculativeForced; + } + + /** + * Is forced speculative execution enabled? + */ + public boolean isSpeculativeForced() { + return speculativeForced; + } +} diff --git a/src/mapred/org/apache/hadoop/mapred/TaskLog.java b/src/mapred/org/apache/hadoop/mapred/TaskLog.java new file mode 100644 index 0000000..791ec81 --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/TaskLog.java @@ -0,0 +1,694 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapred; + +import java.io.BufferedOutputStream; +import java.io.BufferedReader; +import java.io.DataOutputStream; +import java.io.File; +import java.io.FileFilter; +import java.io.FileInputStream; +import java.io.FileOutputStream; +import java.io.IOException; +import java.io.InputStream; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Enumeration; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.LocalFileSystem; +import org.apache.hadoop.fs.FileUtil; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.util.ProcessTree; +import org.apache.hadoop.util.Shell; +import org.apache.log4j.Appender; +import org.apache.log4j.LogManager; +import org.apache.log4j.Logger; + +/** + * A simple logger to handle the task-specific user logs. + * This class uses the system property hadoop.log.dir. + * + */ +public class TaskLog { + private static final Log LOG = + LogFactory.getLog(TaskLog.class); + + private static final File LOG_DIR = + new File(System.getProperty("hadoop.log.dir"), + "userlogs").getAbsoluteFile(); + + // localFS is set in (and used by) writeToIndexFile() + static LocalFileSystem localFS = null; + static { + if (!LOG_DIR.exists()) { + LOG_DIR.mkdirs(); + } + } + + public static File getTaskLogFile(TaskAttemptID taskid, LogName filter) { + return new File(getBaseDir(taskid.toString()), filter.toString()); + } + + /** + * @deprecated Instead use + * {@link #getAllLogsFileDetails(TaskAttemptID, boolean)} to get + * the details of all log-files and then use the particular + * log-type's detail to call getRealTaskLogFileLocation(String, + * LogName) real log-location + */ + @Deprecated + public static File getRealTaskLogFileLocation(TaskAttemptID taskid, + LogName filter) { + LogFileDetail l; + try { + Map allFilesDetails = + getAllLogsFileDetails(taskid, false); + l = allFilesDetails.get(filter); + } catch (IOException ie) { + LOG.error("getTaskLogFileDetailgetAllLogsFileDetails threw an exception " + + ie); + return null; + } + return new File(getBaseDir(l.location), filter.toString()); + } + + /** + * Get the real task-log file-path + * + * @param location Location of the log-file. This should point to an + * attempt-directory. + * @param filter + * @return + * @throws IOException + */ + static String getRealTaskLogFilePath(String location, LogName filter) + throws IOException { + return FileUtil.makeShellPath(new File(getBaseDir(location), + filter.toString())); + } + + static class LogFileDetail { + final static String LOCATION = "LOG_DIR:"; + String location; + long start; + long length; + } + + static Map getAllLogsFileDetails( + TaskAttemptID taskid, boolean isCleanup) throws IOException { + + Map allLogsFileDetails = + new HashMap(); + + File indexFile = getIndexFile(taskid.toString(), isCleanup); + BufferedReader fis = new BufferedReader(new java.io.FileReader(indexFile)); + //the format of the index file is + //LOG_DIR: + //stdout: + //stderr: + //syslog: + String str = fis.readLine(); + if (str == null) { //the file doesn't have anything + throw new IOException ("Index file for the log of " + taskid+" doesn't exist."); + } + String loc = str.substring(str.indexOf(LogFileDetail.LOCATION)+ + LogFileDetail.LOCATION.length()); + //special cases are the debugout and profile.out files. They are guaranteed + //to be associated with each task attempt since jvm reuse is disabled + //when profiling/debugging is enabled + for (LogName filter : new LogName[] { LogName.DEBUGOUT, LogName.PROFILE }) { + LogFileDetail l = new LogFileDetail(); + l.location = loc; + l.length = new File(getBaseDir(l.location), filter.toString()).length(); + l.start = 0; + allLogsFileDetails.put(filter, l); + } + str = fis.readLine(); + while (str != null) { + LogFileDetail l = new LogFileDetail(); + l.location = loc; + int idx = str.indexOf(':'); + LogName filter = LogName.valueOf(str.substring(0, idx).toUpperCase()); + str = str.substring(idx + 1); + String[] startAndLen = str.split(" "); + l.start = Long.parseLong(startAndLen[0]); + l.length = Long.parseLong(startAndLen[1]); + allLogsFileDetails.put(filter, l); + str = fis.readLine(); + } + fis.close(); + return allLogsFileDetails; + } + + private static File getTmpIndexFile(String taskid) { + return new File(getBaseDir(taskid), "log.tmp"); + } + public static File getIndexFile(String taskid) { + return getIndexFile(taskid, false); + } + + public static File getIndexFile(String taskid, boolean isCleanup) { + if (isCleanup) { + return new File(getBaseDir(taskid), "log.index.cleanup"); + } else { + return new File(getBaseDir(taskid), "log.index"); + } + } + + static File getBaseDir(String taskid) { + return new File(LOG_DIR, taskid); + } + + static final List LOGS_TRACKED_BY_INDEX_FILES = + Arrays.asList(LogName.STDOUT, LogName.STDERR, LogName.SYSLOG); + + private static TaskAttemptID currentTaskid; + + /** + * Map to store previous and current lengths. + */ + private static Map logLengths = + new HashMap(); + static { + for (LogName logName : LOGS_TRACKED_BY_INDEX_FILES) { + logLengths.put(logName, new Long[] { Long.valueOf(0L), + Long.valueOf(0L) }); + } + } + + static void writeToIndexFile(TaskAttemptID firstTaskid, + TaskAttemptID currentTaskid, boolean isCleanup, + Map lengths) throws IOException { + // To ensure atomicity of updates to index file, write to temporary index + // file first and then rename. + File tmpIndexFile = getTmpIndexFile(currentTaskid.toString()); + + BufferedOutputStream bos = + new BufferedOutputStream(new FileOutputStream(tmpIndexFile,false)); + DataOutputStream dos = new DataOutputStream(bos); + //the format of the index file is + //LOG_DIR: + //STDOUT: + //STDERR: + //SYSLOG: + dos.writeBytes(LogFileDetail.LOCATION + + firstTaskid.toString() + + "\n"); + for (LogName logName : LOGS_TRACKED_BY_INDEX_FILES) { + Long[] lens = lengths.get(logName); + dos.writeBytes(logName.toString() + ":" + + lens[0].toString() + " " + + Long.toString(lens[1].longValue() - lens[0].longValue()) + + "\n");} + dos.close(); + + File indexFile = getIndexFile(currentTaskid.toString(), isCleanup); + Path indexFilePath = new Path(indexFile.getAbsolutePath()); + Path tmpIndexFilePath = new Path(tmpIndexFile.getAbsolutePath()); + + if (localFS == null) {// set localFS once + localFS = FileSystem.getLocal(new Configuration()); + } + localFS.rename (tmpIndexFilePath, indexFilePath); + } + + public synchronized static void syncLogs(TaskAttemptID firstTaskid, + TaskAttemptID taskid) + throws IOException { + syncLogs(firstTaskid, taskid, false); + } + + @SuppressWarnings("unchecked") + public synchronized static void syncLogs(TaskAttemptID firstTaskid, + TaskAttemptID taskid, + boolean isCleanup) + throws IOException { + System.out.flush(); + System.err.flush(); + Enumeration allLoggers = LogManager.getCurrentLoggers(); + while (allLoggers.hasMoreElements()) { + Logger l = allLoggers.nextElement(); + Enumeration allAppenders = l.getAllAppenders(); + while (allAppenders.hasMoreElements()) { + Appender a = allAppenders.nextElement(); + if (a instanceof TaskLogAppender) { + ((TaskLogAppender)a).flush(); + } + } + } + // set start and end + for (LogName logName : LOGS_TRACKED_BY_INDEX_FILES) { + if (currentTaskid != taskid) { + // Set start = current-end + logLengths.get(logName)[0] = + Long.valueOf(getTaskLogFile(firstTaskid, logName).length()); + } + // Set current end + logLengths.get(logName)[1] = + Long.valueOf(getTaskLogFile(firstTaskid, logName).length()); + } + if (currentTaskid != taskid) { + if (currentTaskid != null) { + LOG.info("Starting logging for a new task " + taskid + + " in the same JVM as that of the first task " + firstTaskid); + } + currentTaskid = taskid; + } + writeToIndexFile(firstTaskid, taskid, isCleanup, logLengths); + } + + /** + * The filter for userlogs. + */ + public static enum LogName { + /** Log on the stdout of the task. */ + STDOUT ("stdout"), + + /** Log on the stderr of the task. */ + STDERR ("stderr"), + + /** Log on the map-reduce system logs of the task. */ + SYSLOG ("syslog"), + + /** The java profiler information. */ + PROFILE ("profile.out"), + + /** Log the debug script's stdout */ + DEBUGOUT ("debugout"); + + private String prefix; + + private LogName(String prefix) { + this.prefix = prefix; + } + + @Override + public String toString() { + return prefix; + } + } + + private static class TaskLogsPurgeFilter implements FileFilter { + long purgeTimeStamp; + + TaskLogsPurgeFilter(long purgeTimeStamp) { + this.purgeTimeStamp = purgeTimeStamp; + } + + public boolean accept(File file) { + LOG.debug("PurgeFilter - file: " + file + ", mtime: " + file.lastModified() + ", purge: " + purgeTimeStamp); + return file.lastModified() < purgeTimeStamp; + } + } + + /** + * Purge old user logs. + * + * @throws IOException + */ + public static synchronized void cleanup(int logsRetainHours + ) throws IOException { + // Purge logs of tasks on this tasktracker if their + // mtime has exceeded "mapred.task.log.retain" hours + long purgeTimeStamp = System.currentTimeMillis() - + (logsRetainHours*60L*60*1000); + File[] oldTaskLogs = LOG_DIR.listFiles + (new TaskLogsPurgeFilter(purgeTimeStamp)); + if (oldTaskLogs != null) { + for (int i=0; i < oldTaskLogs.length; ++i) { + FileUtil.fullyDelete(oldTaskLogs[i]); + } + } + } + + static class Reader extends InputStream { + private long bytesRemaining; + private FileInputStream file; + + public Reader(TaskAttemptID taskid, LogName kind, + long start, long end) throws IOException { + this(taskid, kind, start, end, false); + } + + /** + * Read a log file from start to end positions. The offsets may be negative, + * in which case they are relative to the end of the file. For example, + * Reader(taskid, kind, 0, -1) is the entire file and + * Reader(taskid, kind, -4197, -1) is the last 4196 bytes. + * @param taskid the id of the task to read the log file for + * @param kind the kind of log to read + * @param start the offset to read from (negative is relative to tail) + * @param end the offset to read upto (negative is relative to tail) + * @param isCleanup whether the attempt is cleanup attempt or not + * @throws IOException + */ + public Reader(TaskAttemptID taskid, LogName kind, + long start, long end, boolean isCleanup) throws IOException { + // find the right log file + Map allFilesDetails = + getAllLogsFileDetails(taskid, isCleanup); + LogFileDetail fileDetail = allFilesDetails.get(kind); + // calculate the start and stop + long size = fileDetail.length; + if (start < 0) { + start += size + 1; + } + if (end < 0) { + end += size + 1; + } + start = Math.max(0, Math.min(start, size)); + end = Math.max(0, Math.min(end, size)); + start += fileDetail.start; + end += fileDetail.start; + bytesRemaining = end - start; + file = new FileInputStream(new File(getBaseDir(fileDetail.location), + kind.toString())); + // skip upto start + long pos = 0; + while (pos < start) { + long result = file.skip(start - pos); + if (result < 0) { + bytesRemaining = 0; + break; + } + pos += result; + } + } + + @Override + public int read() throws IOException { + int result = -1; + if (bytesRemaining > 0) { + bytesRemaining -= 1; + result = file.read(); + } + return result; + } + + @Override + public int read(byte[] buffer, int offset, int length) throws IOException { + length = (int) Math.min(length, bytesRemaining); + int bytes = file.read(buffer, offset, length); + if (bytes > 0) { + bytesRemaining -= bytes; + } + return bytes; + } + + @Override + public int available() throws IOException { + return (int) Math.min(bytesRemaining, file.available()); + } + + @Override + public void close() throws IOException { + file.close(); + } + } + + private static final String bashCommand = "bash"; + private static final String tailCommand = "tail"; + + /** + * Get the desired maximum length of task's logs. + * @param conf the job to look in + * @return the number of bytes to cap the log files at + */ + public static long getTaskLogLength(JobConf conf) { + return conf.getLong("mapred.userlog.limit.kb", 100) * 1024; + } + + /** + * Wrap a command in a shell to capture stdout and stderr to files. + * If the tailLength is 0, the entire output will be saved. + * @param cmd The command and the arguments that should be run + * @param stdoutFilename The filename that stdout should be saved to + * @param stderrFilename The filename that stderr should be saved to + * @param tailLength The length of the tail to be saved. + * @return the modified command that should be run + */ + public static List captureOutAndError(List cmd, + File stdoutFilename, + File stderrFilename, + long tailLength + ) throws IOException { + return captureOutAndError(null, cmd, stdoutFilename, + stderrFilename, tailLength, false); + } + + /** + * Wrap a command in a shell to capture stdout and stderr to files. + * Setup commands such as setting memory limit can be passed which + * will be executed before exec. + * If the tailLength is 0, the entire output will be saved. + * @param setup The setup commands for the execed process. + * @param cmd The command and the arguments that should be run + * @param stdoutFilename The filename that stdout should be saved to + * @param stderrFilename The filename that stderr should be saved to + * @param tailLength The length of the tail to be saved. + * @return the modified command that should be run + */ + public static List captureOutAndError(List setup, + List cmd, + File stdoutFilename, + File stderrFilename, + long tailLength + ) throws IOException { + return captureOutAndError(setup, cmd, stdoutFilename, stderrFilename, + tailLength, false); + } + + /** + * Wrap a command in a shell to capture stdout and stderr to files. + * Setup commands such as setting memory limit can be passed which + * will be executed before exec. + * If the tailLength is 0, the entire output will be saved. + * @param setup The setup commands for the execed process. + * @param cmd The command and the arguments that should be run + * @param stdoutFilename The filename that stdout should be saved to + * @param stderrFilename The filename that stderr should be saved to + * @param tailLength The length of the tail to be saved. + * @deprecated pidFiles are no more used. Instead pid is exported to + * env variable JVM_PID. + * @return the modified command that should be run + */ + @Deprecated + public static List captureOutAndError(List setup, + List cmd, + File stdoutFilename, + File stderrFilename, + long tailLength, + String pidFileName + ) throws IOException { + return captureOutAndError(setup, cmd, stdoutFilename, stderrFilename, + tailLength, false, pidFileName); + } + + /** + * Wrap a command in a shell to capture stdout and stderr to files. + * Setup commands such as setting memory limit can be passed which + * will be executed before exec. + * If the tailLength is 0, the entire output will be saved. + * @param setup The setup commands for the execed process. + * @param cmd The command and the arguments that should be run + * @param stdoutFilename The filename that stdout should be saved to + * @param stderrFilename The filename that stderr should be saved to + * @param tailLength The length of the tail to be saved. + * @param useSetsid Should setsid be used in the command or not. + * @deprecated pidFiles are no more used. Instead pid is exported to + * env variable JVM_PID. + * @return the modified command that should be run + * + */ + @Deprecated + public static List captureOutAndError(List setup, + List cmd, + File stdoutFilename, + File stderrFilename, + long tailLength, + boolean useSetsid, + String pidFileName + ) throws IOException { + return captureOutAndError(setup,cmd, stdoutFilename, stderrFilename, tailLength, + useSetsid); + } + + /** + * Wrap a command in a shell to capture stdout and stderr to files. + * Setup commands such as setting memory limit can be passed which + * will be executed before exec. + * If the tailLength is 0, the entire output will be saved. + * @param setup The setup commands for the execed process. + * @param cmd The command and the arguments that should be run + * @param stdoutFilename The filename that stdout should be saved to + * @param stderrFilename The filename that stderr should be saved to + * @param tailLength The length of the tail to be saved. + * @param useSetsid Should setsid be used in the command or not. + * @return the modified command that should be run + */ + public static List captureOutAndError(List setup, + List cmd, + File stdoutFilename, + File stderrFilename, + long tailLength, + boolean useSetsid + ) throws IOException { + List result = new ArrayList(3); + result.add(bashCommand); + result.add("-c"); + String mergedCmd = buildCommandLine(setup, cmd, + stdoutFilename, + stderrFilename, tailLength, + useSetsid); + result.add(mergedCmd.toString()); + return result; + } + + + static String buildCommandLine(List setup, + List cmd, + File stdoutFilename, + File stderrFilename, + long tailLength, + boolean useSetSid) throws IOException { + + String stdout = FileUtil.makeShellPath(stdoutFilename); + String stderr = FileUtil.makeShellPath(stderrFilename); + StringBuffer mergedCmd = new StringBuffer(); + + if (!Shell.WINDOWS) { + mergedCmd.append(" export JVM_PID=`echo $$` ; "); + } + + if (setup != null && setup.size() > 0) { + mergedCmd.append(addCommand(setup, false)); + mergedCmd.append(";"); + } + if (tailLength > 0) { + mergedCmd.append("("); + } else if (ProcessTree.isSetsidAvailable && useSetSid + && !Shell.WINDOWS) { + mergedCmd.append("exec setsid "); + } else { + mergedCmd.append("exec "); + } + mergedCmd.append(addCommand(cmd, true)); + mergedCmd.append(" < /dev/null "); + if (tailLength > 0) { + mergedCmd.append(" | "); + mergedCmd.append(tailCommand); + mergedCmd.append(" -c "); + mergedCmd.append(tailLength); + mergedCmd.append(" >> "); + mergedCmd.append(stdout); + mergedCmd.append(" ; exit $PIPESTATUS ) 2>&1 | "); + mergedCmd.append(tailCommand); + mergedCmd.append(" -c "); + mergedCmd.append(tailLength); + mergedCmd.append(" >> "); + mergedCmd.append(stderr); + mergedCmd.append(" ; exit $PIPESTATUS"); + } else { + mergedCmd.append(" 1>> "); + mergedCmd.append(stdout); + mergedCmd.append(" 2>> "); + mergedCmd.append(stderr); + } + return mergedCmd.toString(); + } + + /** + * Add quotes to each of the command strings and + * return as a single string + * @param cmd The command to be quoted + * @param isExecutable makes shell path if the first + * argument is executable + * @return returns The quoted string. + * @throws IOException + */ + public static String addCommand(List cmd, boolean isExecutable) + throws IOException { + StringBuffer command = new StringBuffer(); + for(String s: cmd) { + command.append('\''); + if (isExecutable) { + // the executable name needs to be expressed as a shell path for the + // shell to find it. + command.append(FileUtil.makeShellPath(new File(s))); + isExecutable = false; + } else { + command.append(s); + } + command.append('\''); + command.append(" "); + } + return command.toString(); + } + + /** + * Wrap a command in a shell to capture debug script's + * stdout and stderr to debugout. + * @param cmd The command and the arguments that should be run + * @param debugoutFilename The filename that stdout and stderr + * should be saved to. + * @return the modified command that should be run + * @throws IOException + */ + public static List captureDebugOut(List cmd, + File debugoutFilename + ) throws IOException { + String debugout = FileUtil.makeShellPath(debugoutFilename); + List result = new ArrayList(3); + result.add(bashCommand); + result.add("-c"); + StringBuffer mergedCmd = new StringBuffer(); + mergedCmd.append("exec "); + boolean isExecutable = true; + for(String s: cmd) { + if (isExecutable) { + // the executable name needs to be expressed as a shell path for the + // shell to find it. + mergedCmd.append(FileUtil.makeShellPath(new File(s))); + isExecutable = false; + } else { + mergedCmd.append(s); + } + mergedCmd.append(" "); + } + mergedCmd.append(" < /dev/null "); + mergedCmd.append(" >"); + mergedCmd.append(debugout); + mergedCmd.append(" 2>&1 "); + result.add(mergedCmd.toString()); + return result; + } + + public static File getUserLogDir() { + return LOG_DIR; + } + +} // TaskLog diff --git a/src/mapred/org/apache/hadoop/mapred/TaskLogAppender.java b/src/mapred/org/apache/hadoop/mapred/TaskLogAppender.java new file mode 100644 index 0000000..bbacdef --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/TaskLogAppender.java @@ -0,0 +1,101 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapred; + +import java.util.LinkedList; +import java.util.Queue; + +import org.apache.log4j.FileAppender; +import org.apache.log4j.spi.LoggingEvent; + +/** + * A simple log4j-appender for the task child's + * map-reduce system logs. + * + */ +public class TaskLogAppender extends FileAppender { + private String taskId; //taskId should be managed as String rather than TaskID object + //so that log4j can configure it from the configuration(log4j.properties). + private int maxEvents; + private Queue tail = null; + + @Override + public void activateOptions() { + synchronized (this) { + if (maxEvents > 0) { + tail = new LinkedList(); + } + setFile(TaskLog.getTaskLogFile(TaskAttemptID.forName(taskId), + TaskLog.LogName.SYSLOG).toString()); + setAppend(true); + super.activateOptions(); + } + } + + @Override + public void append(LoggingEvent event) { + synchronized (this) { + if (tail == null) { + super.append(event); + } else { + if (tail.size() >= maxEvents) { + tail.remove(); + } + tail.add(event); + } + } + } + + public void flush() { + qw.flush(); + } + + @Override + public synchronized void close() { + if (tail != null) { + for(LoggingEvent event: tail) { + super.append(event); + } + } + super.close(); + } + + /** + * Getter/Setter methods for log4j. + */ + + public String getTaskId() { + return taskId; + } + + public void setTaskId(String taskId) { + this.taskId = taskId; + } + + private static final int EVENT_SIZE = 100; + + public long getTotalLogFileSize() { + return maxEvents * EVENT_SIZE; + } + + public void setTotalLogFileSize(long logSize) { + maxEvents = (int) logSize / EVENT_SIZE; + } + +} diff --git a/src/mapred/org/apache/hadoop/mapred/TaskLogServlet.java b/src/mapred/org/apache/hadoop/mapred/TaskLogServlet.java new file mode 100644 index 0000000..17fe8eb --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/TaskLogServlet.java @@ -0,0 +1,241 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.mapred; + +import java.io.File; +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; + +import javax.servlet.ServletException; +import javax.servlet.http.HttpServlet; +import javax.servlet.http.HttpServletRequest; +import javax.servlet.http.HttpServletResponse; + +import org.apache.hadoop.util.StringUtils; + +/** + * A servlet that is run by the TaskTrackers to provide the task logs via http. + */ +public class TaskLogServlet extends HttpServlet { + private static final long serialVersionUID = -6615764817774487321L; + + private boolean haveTaskLog(TaskAttemptID taskId, TaskLog.LogName type) { + File f = TaskLog.getTaskLogFile(taskId, type); + return f.canRead(); + } + + /** + * Construct the taskLogUrl + * @param taskTrackerHostName + * @param httpPort + * @param taskAttemptID + * @return the taskLogUrl + */ + public static String getTaskLogUrl(String taskTrackerHostName, + String httpPort, String taskAttemptID) { + return ("http://" + taskTrackerHostName + ":" + httpPort + + "/tasklog?taskid=" + taskAttemptID); + } + + /** + * Find the next quotable character in the given array. + * @param data the bytes to look in + * @param offset the first index to look in + * @param end the index after the last one to look in + * @return the index of the quotable character or end if none was found + */ + private static int findFirstQuotable(byte[] data, int offset, int end) { + while (offset < end) { + switch (data[offset]) { + case '<': + case '>': + case '&': + return offset; + default: + offset += 1; + } + } + return offset; + } + + private static void quotedWrite(OutputStream out, byte[] data, int offset, + int length) throws IOException { + int end = offset + length; + while (offset < end) { + int next = findFirstQuotable(data, offset, end); + out.write(data, offset, next - offset); + offset = next; + if (offset < end) { + switch (data[offset]) { + case '<': + out.write("<".getBytes()); + break; + case '>': + out.write(">".getBytes()); + break; + case '&': + out.write("&".getBytes()); + break; + default: + out.write(data[offset]); + break; + } + offset += 1; + } + } + } + + private void printTaskLog(HttpServletResponse response, + OutputStream out, TaskAttemptID taskId, + long start, long end, boolean plainText, + TaskLog.LogName filter, boolean isCleanup) + throws IOException { + if (!plainText) { + out.write(("
" + filter + " logs
\n" + + "
\n").getBytes());
+    }
+
+    try {
+      InputStream taskLogReader = 
+        new TaskLog.Reader(taskId, filter, start, end, isCleanup);
+      byte[] b = new byte[65536];
+      int result;
+      while (true) {
+        result = taskLogReader.read(b);
+        if (result > 0) {
+          if (plainText) {
+            out.write(b, 0, result); 
+          } else {
+            quotedWrite(out, b, 0, result);
+          }
+        } else {
+          break;
+        }
+      }
+      taskLogReader.close();
+      if( !plainText ) {
+        out.write("


\n".getBytes()); + } + } catch (IOException ioe) { + if (filter == TaskLog.LogName.DEBUGOUT) { + if (!plainText) { + out.write("

\n".getBytes()); + } + // do nothing + } + else { + response.sendError(HttpServletResponse.SC_GONE, + "Failed to retrieve " + filter + " log for task: " + + taskId); + out.write(("TaskLogServlet exception:\n" + + StringUtils.stringifyException(ioe) + "\n").getBytes()); + } + } + } + + /** + * Get the logs via http. + */ + @Override + public void doGet(HttpServletRequest request, + HttpServletResponse response + ) throws ServletException, IOException { + long start = 0; + long end = -1; + boolean plainText = false; + TaskLog.LogName filter = null; + boolean isCleanup = false; + + String taskIdStr = request.getParameter("taskid"); + if (taskIdStr == null) { + response.sendError(HttpServletResponse.SC_BAD_REQUEST, + "Argument taskid is required"); + return; + } + TaskAttemptID taskId = TaskAttemptID.forName(taskIdStr); + String logFilter = request.getParameter("filter"); + if (logFilter != null) { + try { + filter = TaskLog.LogName.valueOf(TaskLog.LogName.class, + logFilter.toUpperCase()); + } catch (IllegalArgumentException iae) { + response.sendError(HttpServletResponse.SC_BAD_REQUEST, + "Illegal value for filter: " + logFilter); + return; + } + } + + String sLogOff = request.getParameter("start"); + if (sLogOff != null) { + start = Long.valueOf(sLogOff).longValue(); + } + + String sLogEnd = request.getParameter("end"); + if (sLogEnd != null) { + end = Long.valueOf(sLogEnd).longValue(); + } + + String sPlainText = request.getParameter("plaintext"); + if (sPlainText != null) { + plainText = Boolean.valueOf(sPlainText); + } + + String sCleanup = request.getParameter("cleanup"); + if (sCleanup != null) { + isCleanup = Boolean.valueOf(sCleanup); + } + + OutputStream out = response.getOutputStream(); + if( !plainText ) { + out.write(("\n" + + "Task Logs: '" + taskId + "'\n" + + "\n" + + "

Task Logs: '" + taskId + "'


\n").getBytes()); + + if (filter == null) { + printTaskLog(response, out, taskId, start, end, plainText, + TaskLog.LogName.STDOUT, isCleanup); + printTaskLog(response, out, taskId, start, end, plainText, + TaskLog.LogName.STDERR, isCleanup); + printTaskLog(response, out, taskId, start, end, plainText, + TaskLog.LogName.SYSLOG, isCleanup); + if (haveTaskLog(taskId, TaskLog.LogName.DEBUGOUT)) { + printTaskLog(response, out, taskId, start, end, plainText, + TaskLog.LogName.DEBUGOUT, isCleanup); + } + if (haveTaskLog(taskId, TaskLog.LogName.PROFILE)) { + printTaskLog(response, out, taskId, start, end, plainText, + TaskLog.LogName.PROFILE, isCleanup); + } + } else { + printTaskLog(response, out, taskId, start, end, plainText, filter, + isCleanup); + } + + out.write("\n".getBytes()); + out.close(); + } else if (filter == null) { + response.sendError(HttpServletResponse.SC_BAD_REQUEST, + "You must supply a value for `filter' (STDOUT, STDERR, or SYSLOG) if you set plainText = true"); + } else { + printTaskLog(response, out, taskId, start, end, plainText, filter, + isCleanup); + } + } +} diff --git a/src/mapred/org/apache/hadoop/mapred/TaskLogsMonitor.java b/src/mapred/org/apache/hadoop/mapred/TaskLogsMonitor.java new file mode 100644 index 0000000..cf57b9d --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/TaskLogsMonitor.java @@ -0,0 +1,449 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapred; + +import java.io.File; +import java.io.FileNotFoundException; +import java.io.FileReader; +import java.io.FileWriter; +import java.io.IOException; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; + +import org.apache.hadoop.mapred.TaskLog; +import org.apache.hadoop.mapred.TaskLog.LogName; +import org.apache.hadoop.mapred.TaskLog.LogFileDetail; +import org.apache.hadoop.util.StringUtils; + +class TaskLogsMonitor extends Thread { + static final Log LOG = LogFactory.getLog(TaskLogsMonitor.class); + + long mapRetainSize, reduceRetainSize; + + public TaskLogsMonitor(long mapRetSize, long reduceRetSize) { + mapRetainSize = mapRetSize; + reduceRetainSize = reduceRetSize; + LOG.info("Starting logs' monitor with mapRetainSize=" + mapRetainSize + + " and reduceRetainSize=" + reduceRetSize); + } + + /** + * The list of tasks that have finished and so need their logs to be + * truncated. + */ + private Map finishedJVMs = + new HashMap(); + + private static final int DEFAULT_BUFFER_SIZE = 4 * 1024; + + static final int MINIMUM_RETAIN_SIZE_FOR_TRUNCATION = 0; + + private static class PerJVMInfo { + + List allAttempts; + + public PerJVMInfo(List allAtmpts) { + this.allAttempts = allAtmpts; + } + } + + /** + * Process(JVM/debug script) has finished. Asynchronously truncate the logs of + * all the corresponding tasks to the configured limit. In case of JVM, both + * the firstAttempt as well as the list of all attempts that ran in the same + * JVM have to be passed. For debug script, the (only) attempt itself should + * be passed as both the firstAttempt as well as the list of attempts. + * + * @param firstAttempt + * @param isTaskCleanup + */ + void addProcessForLogTruncation(TaskAttemptID firstAttempt, + List allAttempts) { + LOG.info("Adding the jvm with first-attempt " + firstAttempt + + " for logs' truncation"); + PerJVMInfo lInfo = new PerJVMInfo(allAttempts); + synchronized (finishedJVMs) { + finishedJVMs.put(firstAttempt, lInfo); + finishedJVMs.notify(); + } + } + + /** + * Process the removed task's logs. This involves truncating them to + * retainSize. + */ + void truncateLogs(TaskAttemptID firstAttempt, PerJVMInfo lInfo) { + + // Read the log-file details for all the attempts that ran in this JVM + Map> taskLogFileDetails; + try { + taskLogFileDetails = getAllLogsFileDetails(lInfo.allAttempts); + } catch (IOException e) { + LOG.warn( + "Exception in truncateLogs while getting allLogsFileDetails()." + + " Ignoring the truncation of logs of this process.", e); + return; + } + + Map> updatedTaskLogFileDetails = + new HashMap>(); + + File attemptLogDir = TaskLog.getBaseDir(firstAttempt.toString()); + + FileWriter tmpFileWriter; + FileReader logFileReader; + // Now truncate file by file + logNameLoop: for (LogName logName : LogName.values()) { + + File logFile = TaskLog.getTaskLogFile(firstAttempt, logName); + + // //// Optimization: if no task is over limit, just skip truncation-code + if (logFile.exists() + && !isTruncationNeeded(lInfo, taskLogFileDetails, logName)) { + LOG.debug("Truncation is not needed for " + + logFile.getAbsolutePath()); + continue; + } + // //// End of optimization + + // Truncation is needed for this log-file. Go ahead now. + File tmpFile = new File(attemptLogDir, "truncate.tmp"); + try { + tmpFileWriter = new FileWriter(tmpFile); + } catch (IOException ioe) { + LOG.warn("Cannot open " + tmpFile.getAbsolutePath() + + " for writing truncated log-file " + + logFile.getAbsolutePath() + + ". Continuing with other log files. ", ioe); + continue; + } + + try { + logFileReader = new FileReader(logFile); + } catch (FileNotFoundException fe) { + LOG.warn("Cannot open " + logFile.getAbsolutePath() + + " for reading. Continuing with other log files"); + if (!tmpFile.delete()) { + LOG.warn("Cannot delete tmpFile " + tmpFile.getAbsolutePath()); + } + continue; + } + + long newCurrentOffset = 0; + // Process each attempt from the ordered list passed. + for (Task task : lInfo.allAttempts) { + + // Truncate the log files of this task-attempt so that only the last + // retainSize many bytes of this log file is retained and the log + // file is reduced in size saving disk space. + long retainSize = + (task.isMapTask() ? mapRetainSize : reduceRetainSize); + LogFileDetail newLogFileDetail = new LogFileDetail(); + try { + newLogFileDetail = + truncateALogFileOfAnAttempt(task.getTaskID(), + taskLogFileDetails.get(task).get(logName), retainSize, + tmpFileWriter, logFileReader); + } catch (IOException ioe) { + LOG.warn("Cannot truncate the log file " + + logFile.getAbsolutePath() + + ". Caught exception while handling " + task.getTaskID(), + ioe); + // revert back updatedTaskLogFileDetails + revertIndexFileInfo(lInfo, taskLogFileDetails, + updatedTaskLogFileDetails, logName); + if (!tmpFile.delete()) { + LOG.warn("Cannot delete tmpFile " + tmpFile.getAbsolutePath()); + } + continue logNameLoop; + } + + // Track information for updating the index file properly. + // Index files don't track DEBUGOUT and PROFILE logs, so skip'em. + if (TaskLog.LOGS_TRACKED_BY_INDEX_FILES.contains(logName)) { + if (!updatedTaskLogFileDetails.containsKey(task)) { + updatedTaskLogFileDetails.put(task, + new HashMap()); + } + // newLogFileDetail already has the location and length set, just + // set the start offset now. + newLogFileDetail.start = newCurrentOffset; + updatedTaskLogFileDetails.get(task).put(logName, newLogFileDetail); + newCurrentOffset += newLogFileDetail.length; + } + } + + try { + tmpFileWriter.close(); + } catch (IOException ioe) { + LOG.warn("Couldn't close the tmp file " + tmpFile.getAbsolutePath() + + ". Deleting it.", ioe); + revertIndexFileInfo(lInfo, taskLogFileDetails, + updatedTaskLogFileDetails, logName); + if (!tmpFile.delete()) { + LOG.warn("Cannot delete tmpFile " + tmpFile.getAbsolutePath()); + } + continue; + } + + if (!tmpFile.renameTo(logFile)) { + // If the tmpFile cannot be renamed revert back + // updatedTaskLogFileDetails to maintain the consistency of the + // original log file + revertIndexFileInfo(lInfo, taskLogFileDetails, + updatedTaskLogFileDetails, logName); + if (!tmpFile.delete()) { + LOG.warn("Cannot delete tmpFile " + tmpFile.getAbsolutePath()); + } + } + } + + // Update the index files + updateIndicesAfterLogTruncation(firstAttempt, updatedTaskLogFileDetails); + } + + /** + * @param lInfo + * @param taskLogFileDetails + * @param updatedTaskLogFileDetails + * @param logName + */ + private void revertIndexFileInfo(PerJVMInfo lInfo, + Map> taskLogFileDetails, + Map> updatedTaskLogFileDetails, + LogName logName) { + if (TaskLog.LOGS_TRACKED_BY_INDEX_FILES.contains(logName)) { + for (Task task : lInfo.allAttempts) { + if (!updatedTaskLogFileDetails.containsKey(task)) { + updatedTaskLogFileDetails.put(task, + new HashMap()); + } + updatedTaskLogFileDetails.get(task).put(logName, + taskLogFileDetails.get(task).get(logName)); + } + } + } + + /** + * Get the logFileDetails of all the list of attempts passed. + * + * @param lInfo + * @return a map of task to the log-file detail + * @throws IOException + */ + private Map> getAllLogsFileDetails( + final List allAttempts) throws IOException { + Map> taskLogFileDetails = + new HashMap>(); + for (Task task : allAttempts) { + Map allLogsFileDetails; + allLogsFileDetails = + TaskLog.getAllLogsFileDetails(task.getTaskID(), + task.isTaskCleanupTask()); + taskLogFileDetails.put(task, allLogsFileDetails); + } + return taskLogFileDetails; + } + + /** + * Check if truncation of logs is needed for the given jvmInfo. If all the + * tasks that ran in a JVM are within the log-limits, then truncation is not + * needed. Otherwise it is needed. + * + * @param lInfo + * @param taskLogFileDetails + * @param logName + * @return true if truncation is needed, false otherwise + */ + private boolean isTruncationNeeded(PerJVMInfo lInfo, + Map> taskLogFileDetails, + LogName logName) { + boolean truncationNeeded = false; + LogFileDetail logFileDetail = null; + for (Task task : lInfo.allAttempts) { + long taskRetainSize = + (task.isMapTask() ? mapRetainSize : reduceRetainSize); + Map allLogsFileDetails = + taskLogFileDetails.get(task); + logFileDetail = allLogsFileDetails.get(logName); + if (taskRetainSize > MINIMUM_RETAIN_SIZE_FOR_TRUNCATION + && logFileDetail.length > taskRetainSize) { + truncationNeeded = true; + break; + } + } + return truncationNeeded; + } + + /** + * Truncate the log file of this task-attempt so that only the last retainSize + * many bytes of each log file is retained and the log file is reduced in size + * saving disk space. + * + * @param taskID Task whose logs need to be truncated + * @param oldLogFileDetail contains the original log details for the attempt + * @param taskRetainSize retain-size + * @param tmpFileWriter New log file to write to. Already opened in append + * mode. + * @param logFileReader Original log file to read from. + * @return + * @throws IOException + */ + private LogFileDetail truncateALogFileOfAnAttempt( + final TaskAttemptID taskID, final LogFileDetail oldLogFileDetail, + final long taskRetainSize, final FileWriter tmpFileWriter, + final FileReader logFileReader) throws IOException { + LogFileDetail newLogFileDetail = new LogFileDetail(); + + // ///////////// Truncate log file /////////////////////// + + // New location of log file is same as the old + newLogFileDetail.location = oldLogFileDetail.location; + if (taskRetainSize > MINIMUM_RETAIN_SIZE_FOR_TRUNCATION + && oldLogFileDetail.length > taskRetainSize) { + LOG.info("Truncating logs for " + taskID + " from " + + oldLogFileDetail.length + "bytes to " + taskRetainSize + + "bytes."); + newLogFileDetail.length = taskRetainSize; + } else { + LOG.info("No truncation needed for " + taskID + " length is " + + oldLogFileDetail.length + " retain size " + taskRetainSize + + "bytes."); + newLogFileDetail.length = oldLogFileDetail.length; + } + long charsSkipped = + logFileReader.skip(oldLogFileDetail.length + - newLogFileDetail.length); + if (charsSkipped != oldLogFileDetail.length - newLogFileDetail.length) { + throw new IOException("Erroneously skipped " + charsSkipped + + " instead of the expected " + + (oldLogFileDetail.length - newLogFileDetail.length)); + } + long alreadyRead = 0; + while (alreadyRead < newLogFileDetail.length) { + char tmpBuf[]; // Temporary buffer to read logs + if (newLogFileDetail.length - alreadyRead >= DEFAULT_BUFFER_SIZE) { + tmpBuf = new char[DEFAULT_BUFFER_SIZE]; + } else { + tmpBuf = new char[(int) (newLogFileDetail.length - alreadyRead)]; + } + int bytesRead = logFileReader.read(tmpBuf); + if (bytesRead < 0) { + break; + } else { + alreadyRead += bytesRead; + } + tmpFileWriter.write(tmpBuf); + } + // ////// End of truncating log file /////////////////////// + + return newLogFileDetail; + } + + /** + * Truncation of logs is done. Now sync the index files to reflect the + * truncated sizes. + * + * @param firstAttempt + * @param updatedTaskLogFileDetails + */ + private void updateIndicesAfterLogTruncation(TaskAttemptID firstAttempt, + Map> updatedTaskLogFileDetails) { + for (Entry> entry : + updatedTaskLogFileDetails.entrySet()) { + Task task = entry.getKey(); + Map logFileDetails = entry.getValue(); + Map logLengths = new HashMap(); + // set current and previous lengths + for (LogName logName : TaskLog.LOGS_TRACKED_BY_INDEX_FILES) { + logLengths.put(logName, new Long[] { Long.valueOf(0L), + Long.valueOf(0L) }); + LogFileDetail lfd = logFileDetails.get(logName); + if (lfd != null) { + // Set previous lengths + logLengths.get(logName)[0] = Long.valueOf(lfd.start); + // Set current lengths + logLengths.get(logName)[1] = Long.valueOf(lfd.start + lfd.length); + } + } + try { + TaskLog.writeToIndexFile(firstAttempt, task.getTaskID(), + task.isTaskCleanupTask(), logLengths); + } catch (IOException ioe) { + LOG.warn("Exception in updateIndicesAfterLogTruncation : " + + StringUtils.stringifyException(ioe)); + LOG.warn("Exception encountered while updating index file of task " + + task.getTaskID() + + ". Ignoring and continuing with other tasks."); + } + } + } + + /** + * + * @throws IOException + */ + void monitorTaskLogs() throws IOException { + + Map tasksBeingTruncated = + new HashMap(); + + // Start monitoring newly added finishedJVMs + synchronized (finishedJVMs) { + tasksBeingTruncated.clear(); + tasksBeingTruncated.putAll(finishedJVMs); + finishedJVMs.clear(); + } + + for (Entry entry : + tasksBeingTruncated.entrySet()) { + truncateLogs(entry.getKey(), entry.getValue()); + } + } + + @Override + public void run() { + + while (true) { + try { + monitorTaskLogs(); + try { + synchronized (finishedJVMs) { + while (finishedJVMs.isEmpty()) { + finishedJVMs.wait(); + } + } + } catch (InterruptedException e) { + LOG.warn(getName() + " is interrupted. Returning"); + return; + } + } catch (Throwable e) { + LOG.warn(getName() + + " encountered an exception while monitoring : " + + StringUtils.stringifyException(e)); + LOG.info("Ingoring the exception and continuing monitoring."); + } + } + } +} diff --git a/src/mapred/org/apache/hadoop/mapred/TaskMemoryManagerThread.java b/src/mapred/org/apache/hadoop/mapred/TaskMemoryManagerThread.java new file mode 100644 index 0000000..a7c6419 --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/TaskMemoryManagerThread.java @@ -0,0 +1,550 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapred; + +import java.util.HashMap; +import java.util.Iterator; +import java.util.List; +import java.util.ArrayList; + +import java.util.Collections; +import java.util.Comparator; +import java.util.Map; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.mapred.TaskTracker.TaskInProgress; +import org.apache.hadoop.util.ProcessTree; +import org.apache.hadoop.util.ProcfsBasedProcessTree; +import org.apache.hadoop.util.StringUtils; + +/** + * Manages memory usage of tasks running under this TT. Kills any task-trees + * that overflow and over-step memory limits. + */ +class TaskMemoryManagerThread extends Thread { + + private static Log LOG = LogFactory.getLog(TaskMemoryManagerThread.class); + + private TaskTracker taskTracker; + private long monitoringInterval; + + private long maxMemoryAllowedForAllTasks; + private long maxRssMemoryAllowedForAllTasks; + private int maxRssMemoryAllowedUpdateCounter; + static private boolean doUpdateReservedPhysicalMemory = true; + static public final String TT_MEMORY_MANAGER_MONITORING_INTERVAL = + "mapred.tasktracker.taskmemorymanager.monitoring-interval"; + // The amount of memory which will not be used for running tasks + // If this is violated, task with largest memory will be killed. + static public final String TT_RESERVED_PHYSCIALMEMORY_MB = + "mapred.tasktracker.reserved.physicalmemory.mb"; + + private Map processTreeInfoMap; + private Map tasksToBeAdded; + private List tasksToBeRemoved; + + private static final String MEMORY_USAGE_STRING = + "Memory usage of ProcessTree %s for task-id %s : %d bytes, " + + "limit : %d bytes"; + + public TaskMemoryManagerThread(TaskTracker taskTracker) { + + this(taskTracker.getTotalMemoryAllottedForTasksOnTT() * 1024 * 1024L, + taskTracker.getJobConf().getLong(TT_MEMORY_MANAGER_MONITORING_INTERVAL, + 5000L)); + + this.taskTracker = taskTracker; + long reservedRssMemory = taskTracker.getJobConf(). + getLong(TaskMemoryManagerThread.TT_RESERVED_PHYSCIALMEMORY_MB, + JobConf.DISABLED_MEMORY_LIMIT); + long totalPhysicalMemoryOnTT = taskTracker.getTotalPhysicalMemoryOnTT(); + if (reservedRssMemory == JobConf.DISABLED_MEMORY_LIMIT || + totalPhysicalMemoryOnTT == JobConf.DISABLED_MEMORY_LIMIT) { + maxRssMemoryAllowedForAllTasks = JobConf.DISABLED_MEMORY_LIMIT; + } else { + maxRssMemoryAllowedForAllTasks = + totalPhysicalMemoryOnTT - reservedRssMemory * 1024 * 1024L; + } + } + + // mainly for test purposes. note that the tasktracker variable is + // not set here. + TaskMemoryManagerThread(long maxMemoryAllowedForAllTasks, + long monitoringInterval) { + setName(this.getClass().getName()); + + processTreeInfoMap = new HashMap(); + tasksToBeAdded = new HashMap(); + tasksToBeRemoved = new ArrayList(); + + this.maxMemoryAllowedForAllTasks = maxMemoryAllowedForAllTasks > 0 ? + maxMemoryAllowedForAllTasks : JobConf.DISABLED_MEMORY_LIMIT; + + this.monitoringInterval = monitoringInterval; + } + + public void addTask(TaskAttemptID tid, long memLimit) { + synchronized (tasksToBeAdded) { + LOG.debug("Tracking ProcessTree " + tid + " for the first time"); + ProcessTreeInfo ptInfo = new ProcessTreeInfo(tid, null, null, memLimit); + tasksToBeAdded.put(tid, ptInfo); + } + } + + public void removeTask(TaskAttemptID tid) { + synchronized (tasksToBeRemoved) { + tasksToBeRemoved.add(tid); + } + } + + private static class ProcessTreeInfo { + private TaskAttemptID tid; + private String pid; + private ProcfsBasedProcessTree pTree; + private long memLimit; + private String pidFile; + + public ProcessTreeInfo(TaskAttemptID tid, String pid, + ProcfsBasedProcessTree pTree, long memLimit) { + this.tid = tid; + this.pid = pid; + this.pTree = pTree; + this.memLimit = memLimit; + } + + public TaskAttemptID getTID() { + return tid; + } + + public String getPID() { + return pid; + } + + public void setPid(String pid) { + this.pid = pid; + } + + public ProcfsBasedProcessTree getProcessTree() { + return pTree; + } + + public void setProcessTree(ProcfsBasedProcessTree pTree) { + this.pTree = pTree; + } + + public long getMemLimit() { + return memLimit; + } + } + + @Override + public void run() { + + LOG.info("Starting thread: " + this.getClass()); + + while (true) { + // Print the processTrees for debugging. + if (LOG.isDebugEnabled()) { + StringBuffer tmp = new StringBuffer("[ "); + for (ProcessTreeInfo p : processTreeInfoMap.values()) { + tmp.append(p.getPID()); + tmp.append(" "); + } + LOG.debug("Current ProcessTree list : " + + tmp.substring(0, tmp.length()) + "]"); + } + + //Add new Tasks + synchronized (tasksToBeAdded) { + processTreeInfoMap.putAll(tasksToBeAdded); + tasksToBeAdded.clear(); + } + + //Remove finished Tasks + synchronized (tasksToBeRemoved) { + for (TaskAttemptID tid : tasksToBeRemoved) { + processTreeInfoMap.remove(tid); + } + tasksToBeRemoved.clear(); + } + + long memoryStillInUsage = 0; + long rssMemoryStillInUsage = 0; + // Now, check memory usage and kill any overflowing tasks + for (Iterator> it = processTreeInfoMap + .entrySet().iterator(); it.hasNext();) { + Map.Entry entry = it.next(); + TaskAttemptID tid = entry.getKey(); + ProcessTreeInfo ptInfo = entry.getValue(); + try { + String pId = ptInfo.getPID(); + + // Initialize any uninitialized processTrees + if (pId == null) { + // get pid from taskAttemptId + pId = taskTracker.getPid(ptInfo.getTID()); + if (pId != null) { + // PID will be null, either if the pid file is yet to be created + // or if the tip is finished and we removed pidFile, but the TIP + // itself is still retained in runningTasks till successful + // transmission to JT + + // create process tree object + long sleeptimeBeforeSigkill = taskTracker.getJobConf().getLong( + "mapred.tasktracker.tasks.sleeptime-before-sigkill", + ProcessTree.DEFAULT_SLEEPTIME_BEFORE_SIGKILL); + + ProcfsBasedProcessTree pt = new ProcfsBasedProcessTree( + pId,ProcessTree.isSetsidAvailable, sleeptimeBeforeSigkill); + LOG.debug("Tracking ProcessTree " + pId + " for the first time"); + + ptInfo.setPid(pId); + ptInfo.setProcessTree(pt); + } + } + // End of initializing any uninitialized processTrees + + if (pId == null) { + continue; // processTree cannot be tracked + } + + LOG.debug("Constructing ProcessTree for : PID = " + pId + " TID = " + + tid); + ProcfsBasedProcessTree pTree = ptInfo.getProcessTree(); + pTree = pTree.getProcessTree(); // get the updated process-tree + ptInfo.setProcessTree(pTree); // update ptInfo with process-tree of + // updated state + long currentMemUsage = pTree.getCumulativeVmem(); + long currentRssMemUsage = pTree.getCumulativeRssmem(); + // as processes begin with an age 1, we want to see if there + // are processes more than 1 iteration old. + long curMemUsageOfAgedProcesses = pTree.getCumulativeVmem(1); + long limit = ptInfo.getMemLimit(); + String user = taskTracker.getUserName(ptInfo.tid); + // Log RSS and virtual memory usage of all tasks + LOG.info((String.format("Memory usage of ProcessTree %s : " + + "[USER,TID,RSS,VMEM,VLimit,TotalRSSLimit]" + + "=[%s,%s,%s,%s,%s,%s]", + pId, user, ptInfo.tid, currentRssMemUsage, + currentMemUsage, limit, maxRssMemoryAllowedForAllTasks))); + + if (doCheckVirtualMemory() && + isProcessTreeOverLimit(tid.toString(), currentMemUsage, + curMemUsageOfAgedProcesses, limit)) { + // Task (the root process) is still alive and overflowing memory. + // Dump the process-tree and then clean it up. + String msg = + "TaskTree [pid=" + pId + ",tipID=" + tid + + "] is running beyond memory-limits. Current usage : " + + currentMemUsage + "bytes. Limit : " + limit + + "bytes. Killing task. \nDump of the process-tree for " + + tid + " : \n" + pTree.getProcessTreeDump(); + LOG.warn(msg); + taskTracker.cleanUpOverMemoryTask(tid, true, msg); + + // Now destroy the ProcessTree, remove it from monitoring map. + pTree.destroy(true/*in the background*/); + it.remove(); + LOG.info("Removed ProcessTree with root " + pId); + } else { + // Accounting the total memory in usage for all tasks that are still + // alive and within limits. + memoryStillInUsage += currentMemUsage; + rssMemoryStillInUsage += currentRssMemUsage; + } + } catch (Exception e) { + // Log the exception and proceed to the next task. + LOG.warn("Uncaught exception in TaskMemoryManager " + + "while managing memory of " + tid + " : " + + StringUtils.stringifyException(e)); + } + } + + if (doCheckVirtualMemory() && + memoryStillInUsage > maxMemoryAllowedForAllTasks) { + LOG.warn("The total memory in usage " + memoryStillInUsage + + " is still overflowing TTs limits " + + maxMemoryAllowedForAllTasks + + ". Trying to kill a few tasks with the least progress."); + killTasksWithLeastProgress(memoryStillInUsage); + } + + updateMaxRssMemory(); + if (doCheckPhysicalMemory() && + rssMemoryStillInUsage > maxRssMemoryAllowedForAllTasks) { + LOG.warn("The total physical memory in usage " + rssMemoryStillInUsage + + " is still overflowing TTs limits " + + maxRssMemoryAllowedForAllTasks + + ". Trying to kill a few tasks with the highest memory."); + killTasksWithMaxRssMemory(rssMemoryStillInUsage); + } + + // Sleep for some time before beginning next cycle + try { + LOG.debug(this.getClass() + " : Sleeping for " + monitoringInterval + + " ms"); + Thread.sleep(monitoringInterval); + } catch (InterruptedException ie) { + LOG.warn(this.getClass() + + " interrupted. Finishing the thread and returning."); + return; + } + } + } + + /** + * Is the total physical memory check enabled? + * @return true if total physical memory check is enabled. + */ + private boolean doCheckPhysicalMemory() { + return !(maxRssMemoryAllowedForAllTasks == JobConf.DISABLED_MEMORY_LIMIT); + } + + /** + * Is the total virtual memory check enabled? + * @return true if total virtual memory check is enabled. + */ + private boolean doCheckVirtualMemory() { + return !(maxMemoryAllowedForAllTasks == JobConf.DISABLED_MEMORY_LIMIT); + } + + /** + * Disable updating the reserved physical memory. Used only for tests. + */ + static public void disableUpdateReservedPhysicalMemory() { + doUpdateReservedPhysicalMemory = false; + } + + /** + * Read the reserved physical memory configuration and update the maximum + * physical memory allowed periodically. This allows us to change the + * physcial memory limit configuration without starting TaskTracker + */ + private void updateMaxRssMemory() { + if (!doUpdateReservedPhysicalMemory) { + return; + } + final int MEM_CONFIGURATION_READ_PERIOD = 100; + maxRssMemoryAllowedUpdateCounter++; + if (maxRssMemoryAllowedUpdateCounter > MEM_CONFIGURATION_READ_PERIOD) { + maxRssMemoryAllowedUpdateCounter = 0; + Configuration conf = new Configuration(); + long reservedRssMemory = + conf.getLong(TaskMemoryManagerThread.TT_RESERVED_PHYSCIALMEMORY_MB, + JobConf.DISABLED_MEMORY_LIMIT); + if (reservedRssMemory == JobConf.DISABLED_MEMORY_LIMIT) { + maxRssMemoryAllowedForAllTasks = JobConf.DISABLED_MEMORY_LIMIT; + } else { + maxRssMemoryAllowedForAllTasks = + taskTracker.getTotalPhysicalMemoryOnTT() - + reservedRssMemory * 1024 * 1024L; + } + } + } + + /** + * Check whether a task's process tree's current memory usage is over limit. + * + * When a java process exec's a program, it could momentarily account for + * double the size of it's memory, because the JVM does a fork()+exec() + * which at fork time creates a copy of the parent's memory. If the + * monitoring thread detects the memory used by the task tree at the same + * instance, it could assume it is over limit and kill the tree, for no + * fault of the process itself. + * + * We counter this problem by employing a heuristic check: + * - if a process tree exceeds the memory limit by more than twice, + * it is killed immediately + * - if a process tree has processes older than the monitoring interval + * exceeding the memory limit by even 1 time, it is killed. Else it is given + * the benefit of doubt to lie around for one more iteration. + * + * @param tId Task Id for the task tree + * @param currentMemUsage Memory usage of a task tree + * @param curMemUsageOfAgedProcesses Memory usage of processes older than + * an iteration in a task tree + * @param limit The limit specified for the task + * @return true if the memory usage is more than twice the specified limit, + * or if processes in the tree, older than this thread's + * monitoring interval, exceed the memory limit. False, + * otherwise. + */ + boolean isProcessTreeOverLimit(String tId, + long currentMemUsage, + long curMemUsageOfAgedProcesses, + long limit) { + boolean isOverLimit = false; + + if (currentMemUsage > (2*limit)) { + LOG.warn("Process tree for task: " + tId + " running over twice " + + "the configured limit. Limit=" + limit + + ", current usage = " + currentMemUsage); + isOverLimit = true; + } else if (curMemUsageOfAgedProcesses > limit) { + LOG.warn("Process tree for task: " + tId + " has processes older than 1 " + + "iteration running over the configured limit. Limit=" + limit + + ", current usage = " + curMemUsageOfAgedProcesses); + isOverLimit = true; + } + + return isOverLimit; + } + + // method provided just for easy testing purposes + boolean isProcessTreeOverLimit(ProcfsBasedProcessTree pTree, + String tId, long limit) { + long currentMemUsage = pTree.getCumulativeVmem(); + // as processes begin with an age 1, we want to see if there are processes + // more than 1 iteration old. + long curMemUsageOfAgedProcesses = pTree.getCumulativeVmem(1); + return isProcessTreeOverLimit(tId, currentMemUsage, + curMemUsageOfAgedProcesses, limit); + } + + private void killTasksWithLeastProgress(long memoryStillInUsage) { + + List tasksToKill = new ArrayList(); + List tasksToExclude = new ArrayList(); + // Find tasks to kill so as to get memory usage under limits. + while (memoryStillInUsage > maxMemoryAllowedForAllTasks) { + // Exclude tasks that are already marked for + // killing. + TaskInProgress task = taskTracker.findTaskToKill(tasksToExclude); + if (task == null) { + break; // couldn't find any more tasks to kill. + } + + TaskAttemptID tid = task.getTask().getTaskID(); + if (processTreeInfoMap.containsKey(tid)) { + ProcessTreeInfo ptInfo = processTreeInfoMap.get(tid); + ProcfsBasedProcessTree pTree = ptInfo.getProcessTree(); + memoryStillInUsage -= pTree.getCumulativeVmem(); + tasksToKill.add(tid); + } + // Exclude this task from next search because it is already + // considered. + tasksToExclude.add(tid); + } + + // Now kill the tasks. + if (!tasksToKill.isEmpty()) { + for (TaskAttemptID tid : tasksToKill) { + String msg = + "Killing one of the least progress tasks - " + tid + + ", as the cumulative memory usage of all the tasks on " + + "the TaskTracker exceeds virtual memory limit " + + maxMemoryAllowedForAllTasks + "."; + LOG.warn(msg); + killTask(tid, msg); + } + } else { + LOG.info("The total memory usage is overflowing TTs limits. " + + "But found no alive task to kill for freeing memory."); + } + } + + /** + * Return the cumulative rss memory used by a task + * @param tid the task attempt ID of the task + * @return rss memory usage in bytes. 0 if the process tree is not available + */ + private long getTaskCumulativeRssmem(TaskAttemptID tid) { + ProcessTreeInfo ptInfo = processTreeInfoMap.get(tid); + ProcfsBasedProcessTree pTree = ptInfo.getProcessTree(); + return pTree == null ? 0 : pTree.getCumulativeVmem(); + } + + /** + * Starting from the tasks use the highest amount of RSS memory, + * kill the tasks until the RSS memory meets the requirement + * @param rssMemoryInUsage + */ + private void killTasksWithMaxRssMemory(long rssMemoryInUsage) { + + List tasksToKill = new ArrayList(); + List allTasks = new ArrayList(); + allTasks.addAll(processTreeInfoMap.keySet()); + // Sort the tasks descendingly according to RSS memory usage + Collections.sort(allTasks, new Comparator() { + public int compare(TaskAttemptID tid1, TaskAttemptID tid2) { + return getTaskCumulativeRssmem(tid2) > getTaskCumulativeRssmem(tid1) ? + 1 : -1; + }}); + + // Kill the tasks one by one until the memory requirement is met + while (rssMemoryInUsage > maxRssMemoryAllowedForAllTasks && + !allTasks.isEmpty()) { + TaskAttemptID tid = allTasks.remove(0); + if (!isKillable(tid)) { + continue; + } + long rssmem = getTaskCumulativeRssmem(tid); + if (rssmem == 0) { + break; // Skip tasks without process tree information currently + } + tasksToKill.add(tid); + rssMemoryInUsage -= rssmem; + } + + // Now kill the tasks. + if (!tasksToKill.isEmpty()) { + for (TaskAttemptID tid : tasksToKill) { + String msg = + "Killing one of the memory-consuming tasks - " + tid + + ", as the cumulative RSS memory usage of all the tasks on " + + "the TaskTracker exceeds physical memory limit " + + maxRssMemoryAllowedForAllTasks + "."; + LOG.warn(msg); + killTask(tid, msg); + } + } else { + LOG.info("The total physical memory usage is overflowing TTs limits. " + + "But found no alive task to kill for freeing memory."); + } + } + + /** + * Kill the task and clean up ProcessTreeInfo + * @param tid task attempt ID of the task to be killed. + * @param msg diagonostic message + */ + private void killTask(TaskAttemptID tid, String msg) { + // Kill the task and mark it as killed. + taskTracker.cleanUpOverMemoryTask(tid, false, msg); + // Now destroy the ProcessTree, remove it from monitoring map. + ProcessTreeInfo ptInfo = processTreeInfoMap.get(tid); + ProcfsBasedProcessTree pTree = ptInfo.getProcessTree(); + pTree.destroy(true/*in the background*/); + processTreeInfoMap.remove(tid); + LOG.info("Removed ProcessTree with root " + ptInfo.getPID()); + } + + /** + * Check if a task can be killed to increase free memory + * @param tid task attempt ID + * @return true if the task can be killed + */ + private boolean isKillable(TaskAttemptID tid) { + TaskInProgress tip = taskTracker.runningTasks.get(tid); + return tip != null && !tip.wasKilled() && + (tip.getRunState() == TaskStatus.State.RUNNING || + tip.getRunState() == TaskStatus.State.COMMIT_PENDING); + } +} diff --git a/src/mapred/org/apache/hadoop/mapred/TaskReport.java b/src/mapred/org/apache/hadoop/mapred/TaskReport.java new file mode 100644 index 0000000..a48dd01 --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/TaskReport.java @@ -0,0 +1,239 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.mapred; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; + +import org.apache.hadoop.io.Text; +import org.apache.hadoop.io.Writable; +import org.apache.hadoop.io.WritableUtils; + +/** A report on the state of a task. */ +public class TaskReport implements Writable { + private TaskID taskid; + private float progress; + private String state; + private String[] diagnostics; + private long startTime; + private long finishTime; + private Counters counters; + private TIPStatus currentStatus; + + private Collection runningAttempts = + new ArrayList(); + private TaskAttemptID successfulAttempt = new TaskAttemptID(); + public TaskReport() { + taskid = new TaskID(); + } + + /** + * Creates a new TaskReport object + * @param taskid + * @param progress + * @param state + * @param diagnostics + * @param startTime + * @param finishTime + * @param counters + * @deprecated + */ + @Deprecated + TaskReport(TaskID taskid, float progress, String state, + String[] diagnostics, long startTime, long finishTime, + Counters counters) { + this(taskid, progress, state, diagnostics, null, startTime, finishTime, + counters); + } + + /** + * Creates a new TaskReport object + * @param taskid + * @param progress + * @param state + * @param diagnostics + * @param currentStatus + * @param startTime + * @param finishTime + * @param counters + */ + TaskReport(TaskID taskid, float progress, String state, + String[] diagnostics, TIPStatus currentStatus, + long startTime, long finishTime, + Counters counters) { + this.taskid = taskid; + this.progress = progress; + this.state = state; + this.diagnostics = diagnostics; + this.currentStatus = currentStatus; + this.startTime = startTime; + this.finishTime = finishTime; + this.counters = counters; + } + + /** @deprecated use {@link #getTaskID()} instead */ + @Deprecated + public String getTaskId() { return taskid.toString(); } + /** The id of the task. */ + public TaskID getTaskID() { return taskid; } + /** The amount completed, between zero and one. */ + public float getProgress() { return progress; } + /** The most recent state, reported by a {@link Reporter}. */ + public String getState() { return state; } + /** A list of error messages. */ + public String[] getDiagnostics() { return diagnostics; } + /** A table of counters. */ + public Counters getCounters() { return counters; } + /** The current status */ + public TIPStatus getCurrentStatus() { + return currentStatus; + } + + /** + * Get finish time of task. + * @return 0, if finish time was not set else returns finish time. + */ + public long getFinishTime() { + return finishTime; + } + + /** + * set finish time of task. + * @param finishTime finish time of task. + */ + void setFinishTime(long finishTime) { + this.finishTime = finishTime; + } + + /** + * Get start time of task. + * @return 0 if start time was not set, else start time. + */ + public long getStartTime() { + return startTime; + } + + /** + * set start time of the task. + */ + void setStartTime(long startTime) { + this.startTime = startTime; + } + + /** + * set successful attempt ID of the task. + */ + public void setSuccessfulAttempt(TaskAttemptID t) { + successfulAttempt = t; + } + /** + * Get the attempt ID that took this task to completion + */ + public TaskAttemptID getSuccessfulTaskAttempt() { + return successfulAttempt; + } + /** + * set running attempt(s) of the task. + */ + public void setRunningTaskAttempts( + Collection runningAttempts) { + this.runningAttempts = runningAttempts; + } + /** + * Get the running task attempt IDs for this task + */ + public Collection getRunningTaskAttempts() { + return runningAttempts; + } + + + @Override + public boolean equals(Object o) { + if(o == null) + return false; + if(o.getClass().equals(TaskReport.class)) { + TaskReport report = (TaskReport) o; + return counters.equals(report.getCounters()) + && Arrays.toString(this.diagnostics) + .equals(Arrays.toString(report.getDiagnostics())) + && this.finishTime == report.getFinishTime() + && this.progress == report.getProgress() + && this.startTime == report.getStartTime() + && this.state.equals(report.getState()) + && this.taskid.equals(report.getTaskID()); + } + return false; + } + + @Override + public int hashCode() { + return (counters.toString() + Arrays.toString(this.diagnostics) + + this.finishTime + this.progress + this.startTime + this.state + + this.taskid.toString()).hashCode(); + } + ////////////////////////////////////////////// + // Writable + ////////////////////////////////////////////// + public void write(DataOutput out) throws IOException { + taskid.write(out); + out.writeFloat(progress); + Text.writeString(out, state); + out.writeLong(startTime); + out.writeLong(finishTime); + WritableUtils.writeStringArray(out, diagnostics); + counters.write(out); + WritableUtils.writeEnum(out, currentStatus); + if (currentStatus == TIPStatus.RUNNING) { + WritableUtils.writeVInt(out, runningAttempts.size()); + TaskAttemptID t[] = new TaskAttemptID[0]; + t = runningAttempts.toArray(t); + for (int i = 0; i < t.length; i++) { + t[i].write(out); + } + } else if (currentStatus == TIPStatus.COMPLETE) { + successfulAttempt.write(out); + } + } + + public void readFields(DataInput in) throws IOException { + this.taskid.readFields(in); + this.progress = in.readFloat(); + this.state = Text.readString(in); + this.startTime = in.readLong(); + this.finishTime = in.readLong(); + + diagnostics = WritableUtils.readStringArray(in); + counters = new Counters(); + counters.readFields(in); + currentStatus = WritableUtils.readEnum(in, TIPStatus.class); + if (currentStatus == TIPStatus.RUNNING) { + int num = WritableUtils.readVInt(in); + for (int i = 0; i < num; i++) { + TaskAttemptID t = new TaskAttemptID(); + t.readFields(in); + runningAttempts.add(t); + } + } else if (currentStatus == TIPStatus.COMPLETE) { + successfulAttempt.readFields(in); + } + } +} diff --git a/src/mapred/org/apache/hadoop/mapred/TaskRunner.java b/src/mapred/org/apache/hadoop/mapred/TaskRunner.java new file mode 100644 index 0000000..924d41d --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/TaskRunner.java @@ -0,0 +1,823 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.mapred; + +import org.apache.commons.logging.*; + +import org.apache.hadoop.fs.*; +import org.apache.hadoop.filecache.*; +import org.apache.hadoop.util.*; +import org.apache.hadoop.io.MD5Hash; + +import java.io.*; +import java.net.InetSocketAddress; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Vector; +import java.net.URI; +import java.util.Arrays; + +/** Base class that runs a task in a separate process. Tasks are run in a + * separate process in order to isolate the map/reduce system code from bugs in + * user supplied map and reduce functions. + */ +abstract class TaskRunner extends Thread { + public static final Log LOG = + LogFactory.getLog(TaskRunner.class); + + volatile boolean killed = false; + private TaskTracker.TaskInProgress tip; + private Task t; + private Object lock = new Object(); + private volatile boolean done = false; + private int exitCode = -1; + private boolean exitCodeSet = false; + + private TaskTracker tracker; + + protected JobConf conf; + JvmManager jvmManager; + + /** + * for cleaning up old map outputs + */ + protected MapOutputFile mapOutputFile; + + public TaskRunner(TaskTracker.TaskInProgress tip, TaskTracker tracker, + JobConf conf) { + this.tip = tip; + this.t = tip.getTask(); + this.tracker = tracker; + this.conf = conf; + this.mapOutputFile = + new MapOutputFile(t.getJobID(), tracker.getAsyncDiskService()); + this.mapOutputFile.setConf(conf); + this.jvmManager = tracker.getJvmManagerInstance(); + } + + public Task getTask() { return t; } + public TaskTracker.TaskInProgress getTaskInProgress() { return tip; } + public TaskTracker getTracker() { return tracker; } + + /** Called to assemble this task's input. This method is run in the parent + * process before the child is spawned. It should not execute user code, + * only system code. */ + public boolean prepare() throws IOException { + return true; + } + + /** Called when this task's output is no longer needed. + * This method is run in the parent process after the child exits. It should + * not execute user code, only system code. + */ + public void close() throws IOException {} + + private static String stringifyPathArray(Path[] p){ + if (p == null){ + return null; + } + StringBuffer str = new StringBuffer(p[0].toString()); + for (int i = 1; i < p.length; i++){ + str.append(","); + str.append(p[i].toString()); + } + return str.toString(); + } + + /** + * Get the java command line options for the child map/reduce tasks. + * @param jobConf job configuration + * @param defaultValue default value + * @return the java command line options for child map/reduce tasks + * @deprecated Use command line options specific to map or reduce tasks set + * via {@link JobConf#MAPRED_MAP_TASK_JAVA_OPTS} or + * {@link JobConf#MAPRED_REDUCE_TASK_JAVA_OPTS} + */ + @Deprecated + public String getChildJavaOpts(JobConf jobConf, String defaultValue) { + return jobConf.get(JobConf.MAPRED_TASK_JAVA_OPTS, defaultValue); + } + + /** + * Get the maximum virtual memory of the child map/reduce tasks. + * @param jobConf job configuration + * @return the maximum virtual memory of the child task or -1 if + * none is specified + * @deprecated Use limits specific to the map or reduce tasks set via + * {@link JobConf#MAPRED_MAP_TASK_ULIMIT} or + * {@link JobConf#MAPRED_REDUCE_TASK_ULIMIT} + */ + @Deprecated + public int getChildUlimit(JobConf jobConf) { + return jobConf.getInt(JobConf.MAPRED_TASK_ULIMIT, -1); + } + + /** + * Get the environment variables for the child map/reduce tasks. + * @param jobConf job configuration + * @return the environment variables for the child map/reduce tasks or + * null if unspecified + * @deprecated Use environment variables specific to the map or reduce tasks + * set via {@link JobConf#MAPRED_MAP_TASK_ENV} or + * {@link JobConf#MAPRED_REDUCE_TASK_ENV} + */ + public String getChildEnv(JobConf jobConf) { + return jobConf.get(JobConf.MAPRED_TASK_ENV); + } + + private static class CacheFile { + URI uri; + long timeStamp; + CacheFile (URI uri, long timeStamp) { + this.uri = uri; + this.timeStamp = timeStamp; + } + CacheFile(URI uri) { + this.uri = uri; + this.timeStamp = 0; + } + } + + /** + * Given the path to the localized job jar file, add it's constituents to + * the classpath + */ + private void addJobJarToClassPath(String localJarFile, StringBuffer classPath) { + File jobCacheDir = new File + (new Path(localJarFile).getParent().toString()); + File[] libs = new File(jobCacheDir, "lib").listFiles(); + String sep = System.getProperty("path.separator"); + + if (libs != null) { + for (int i = 0; i < libs.length; i++) { + classPath.append(sep); // add libs from jar to classpath + classPath.append(libs[i]); + } + } + classPath.append(sep); + classPath.append(new File(jobCacheDir, "classes")); + classPath.append(sep); + classPath.append(jobCacheDir); + } + + @Override + public final void run() { + String errorInfo = "Child Error"; + List localizedCacheFiles = new ArrayList(); + try { + //before preparing the job localize + //all the archives + TaskAttemptID taskid = t.getTaskID(); + LocalDirAllocator lDirAlloc = new LocalDirAllocator("mapred.local.dir"); + + File workDir = new File(lDirAlloc.getLocalPathToRead( + TaskTracker.getLocalTaskDir( + t.getJobID().toString(), + t.getTaskID().toString(), + t.isTaskCleanupTask()) + + Path.SEPARATOR + MRConstants.WORKDIR, + conf). toString()); + + URI[] archives = DistributedCache.getCacheArchives(conf); + URI[] files = DistributedCache.getCacheFiles(conf); + URI[] sharedArchives = DistributedCache.getSharedCacheArchives(conf); + URI[] sharedFiles = DistributedCache.getSharedCacheFiles(conf); + FileStatus fileStatus; + FileSystem fileSystem; + Path localPath; + String baseDir; + + if ((archives != null) || (files != null) || + (sharedArchives != null) || (sharedFiles != null)) { + + if (archives != null) { + String[] archivesTimestamps = + DistributedCache.getArchiveTimestamps(conf); + Path[] p = new Path[archives.length]; + for (int i = 0; i < archives.length;i++){ + fileSystem = FileSystem.get(archives[i], conf); + fileStatus = fileSystem.getFileStatus( + new Path(archives[i].getPath())); + p[i] = DistributedCache.getLocalCacheFromTimestamps( + archives[i], conf, new Path(TaskTracker.getCacheSubdir()), + fileStatus, true, Long.parseLong(archivesTimestamps[i]), + fileStatus.getLen(), + new Path(workDir.getAbsolutePath()), false, + tracker.getAsyncDiskService(), lDirAlloc); + localizedCacheFiles.add(new CacheFile(archives[i], Long + .parseLong(archivesTimestamps[i]))); + + } + DistributedCache.setLocalArchives(conf, stringifyPathArray(p)); + } + + if (sharedArchives != null) { + String[] archiveLength + = DistributedCache.getSharedArchiveLength(conf); + + Path[] p = new Path[sharedArchives.length]; + for (int i = 0; i < sharedArchives.length;i++){ + p[i] = DistributedCache.getLocalCacheFromURI( + sharedArchives[i], // cache + conf, // conf + new Path(TaskTracker.getCacheSubdir()), // subDir + true, // isArchive + Long.parseLong(archiveLength[i]), // fileLength + new Path(workDir.getAbsolutePath()), // currentWorkDir + false, // honorSymLinkConf + tracker.getAsyncDiskService(), // asyncDiskService + lDirAlloc); // lDirAllocator + localizedCacheFiles.add(new CacheFile( + sharedArchives[i])); + + } + DistributedCache.setLocalSharedArchives(conf, stringifyPathArray(p)); + } + + if ((files != null)) { + String[] fileTimestamps = DistributedCache.getFileTimestamps(conf); + Path[] p = new Path[files.length]; + for (int i = 0; i < files.length;i++){ + fileSystem = FileSystem.get(files[i], conf); + fileStatus = fileSystem.getFileStatus( + new Path(files[i].getPath())); + p[i] = DistributedCache.getLocalCacheFromTimestamps( + files[i], conf, new Path(TaskTracker.getCacheSubdir()), + fileStatus, false, Long.parseLong(fileTimestamps[i]), + fileStatus.getLen(), + new Path(workDir.getAbsolutePath()), false, + tracker.getAsyncDiskService(), lDirAlloc); + localizedCacheFiles.add(new CacheFile(files[i], Long + .parseLong(fileTimestamps[i]))); + } + DistributedCache.setLocalFiles(conf, stringifyPathArray(p)); + } + + if ((sharedFiles != null)) { + String[] fileLength = DistributedCache.getSharedFileLength(conf); + + Path[] p = new Path[sharedFiles.length]; + for (int i = 0; i < sharedFiles.length;i++){ + p[i] = DistributedCache.getLocalCacheFromURI( + sharedFiles[i], conf, new Path(TaskTracker.getCacheSubdir()), + false, Long.parseLong(fileLength[i]), + new Path(workDir.getAbsolutePath()), false, + tracker.getAsyncDiskService(), lDirAlloc); + localizedCacheFiles.add(new CacheFile(sharedFiles[i])); + } + DistributedCache.setLocalSharedFiles(conf, stringifyPathArray(p)); + } + + Path localTaskFile = new Path(t.getJobFile()); + FileSystem localFs = FileSystem.getLocal(conf); + localFs.delete(localTaskFile, true); + OutputStream out = localFs.create(localTaskFile); + try { + conf.writeXml(out); + } finally { + out.close(); + } + } + + if (!prepare()) { + return; + } + + String sep = System.getProperty("path.separator"); + StringBuffer classPath = new StringBuffer(); + // start with same classpath as parent process + classPath.append(System.getProperty("java.class.path")); + classPath.append(sep); + if (!workDir.mkdirs()) { + if (!workDir.isDirectory()) { + LOG.fatal("Mkdirs failed to create " + workDir.toString()); + } + } + + boolean shared = conf.getBoolean("mapred.cache.shared.enabled", false); + String localJar = conf.getJar(); + + // handle job jar file for the non shared case + if (!shared && (localJar != null)) { + addJobJarToClassPath(localJar, classPath); + } + + // include the user specified classpath + + //archive paths + Path[] archiveClasspaths = DistributedCache.getArchiveClassPaths(conf); + if (archiveClasspaths != null && archives != null) { + Path[] localArchives = DistributedCache + .getLocalCacheArchives(conf); + if (localArchives != null){ + for (int i=0;i vargs = new Vector(8); + File jvm = // use same jvm as parent + new File(new File(System.getProperty("java.home"), "bin"), "java"); + + vargs.add(jvm.toString()); + + // Add child (task) java-vm options. + // + // The following symbols if present in mapred.{map|reduce}.child.java.opts + // value are replaced: + // + @taskid@ is interpolated with value of TaskID. + // Other occurrences of @ will not be altered. + // + // Example with multiple arguments and substitutions, showing + // jvm GC logging, and start of a passwordless JVM JMX agent so can + // connect with jconsole and the likes to watch child memory, threads + // and get thread dumps. + // + // + // mapred.map.child.java.opts + // -Xmx 512M -verbose:gc -Xloggc:/tmp/@taskid@.gc \ + // -Dcom.sun.management.jmxremote.authenticate=false \ + // -Dcom.sun.management.jmxremote.ssl=false \ + // + // + // + // + // mapred.reduce.child.java.opts + // -Xmx 1024M -verbose:gc -Xloggc:/tmp/@taskid@.gc \ + // -Dcom.sun.management.jmxremote.authenticate=false \ + // -Dcom.sun.management.jmxremote.ssl=false \ + // + // + // + String javaOpts = getChildJavaOpts(conf, + JobConf.DEFAULT_MAPRED_TASK_JAVA_OPTS); + javaOpts = javaOpts.replace("@taskid@", taskid.toString()); + String [] javaOptsSplit = javaOpts.split(" "); + + // Add java.library.path; necessary for loading native libraries. + // + // 1. To support native-hadoop library i.e. libhadoop.so, we add the + // parent processes' java.library.path to the child. + // 2. We also add the 'cwd' of the task to it's java.library.path to help + // users distribute native libraries via the DistributedCache. + // 3. The user can also specify extra paths to be added to the + // java.library.path via mapred.{map|reduce}.child.java.opts. + // + String libraryPath = System.getProperty("java.library.path"); + if (libraryPath == null) { + libraryPath = workDir.getAbsolutePath(); + } else { + libraryPath += sep + workDir; + } + boolean hasUserLDPath = false; + for(int i=0; i 0) { + String childEnvs[] = mapredChildEnv.split(","); + for (String cEnv : childEnvs) { + try { + String[] parts = cEnv.split("="); // split on '=' + String value = env.get(parts[0]); + if (value != null) { + // replace $env with the child's env constructed by tt's + // example LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/tmp + value = parts[1].replace("$" + parts[0], value); + } else { + // this key is not configured by the tt for the child .. get it + // from the tt's env + // example PATH=$PATH:/tmp + value = System.getenv(parts[0]); + if (value != null) { + // the env key is present in the tt's env + value = parts[1].replace("$" + parts[0], value); + } else { + // the env key is note present anywhere .. simply set it + // example X=$X:/tmp or X=/tmp + value = parts[1].replace("$" + parts[0], ""); + } + } + env.put(parts[0], value); + } catch (Throwable t) { + // set the error msg + errorInfo = "Invalid User environment settings : " + mapredChildEnv + + ". Failed to parse user-passed environment param." + + " Expecting : env1=value1,env2=value2..."; + LOG.warn(errorInfo); + throw t; + } + } + } + + jvmManager.launchJvm(this, + jvmManager.constructJvmEnv(setup,vargs,stdout,stderr,logSize, + workDir, env, conf)); + synchronized (lock) { + while (!done) { + lock.wait(); + } + } + tracker.getTaskTrackerInstrumentation().reportTaskEnd(t.getTaskID()); + if (exitCodeSet) { + if (!killed && exitCode != 0) { + if (exitCode == 65) { + tracker.getTaskTrackerInstrumentation().taskFailedPing(t.getTaskID()); + } + throw new IOException("Task process exit with nonzero status of " + + exitCode + "."); + } + } + } catch (FSError e) { + LOG.fatal("FSError", e); + try { + tracker.fsError(t.getTaskID(), e.getMessage()); + } catch (IOException ie) { + LOG.fatal(t.getTaskID()+" reporting FSError", ie); + } + } catch (Throwable throwable) { + LOG.warn(t.getTaskID() + errorInfo, throwable); + Throwable causeThrowable = new Throwable(errorInfo, throwable); + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + causeThrowable.printStackTrace(new PrintStream(baos)); + try { + tracker.reportDiagnosticInfo(t.getTaskID(), baos.toString()); + } catch (IOException e) { + LOG.warn(t.getTaskID()+" Reporting Diagnostics", e); + } + } finally { + try{ + for (CacheFile cf : localizedCacheFiles) { + DistributedCache.releaseCache(cf.uri, conf, cf.timeStamp); + } + }catch(IOException ie){ + LOG.warn("Error releasing caches : " + + "Cache files might not have been cleaned up"); + } + + // It is safe to call TaskTracker.TaskInProgress.reportTaskFinished with + // *false* since the task has either + // a) SUCCEEDED - which means commit has been done + // b) FAILED - which means we do not need to commit + tip.reportTaskFinished(false); + } + } + + /** + * Makes dir empty directory(does not delete dir itself). + */ + static void deleteDirContents(JobConf conf, File dir) throws IOException { + FileSystem fs = FileSystem.getLocal(conf); + if (fs.exists(new Path(dir.getAbsolutePath()))) { + File contents[] = dir.listFiles(); + if (contents != null) { + for (int i = 0; i < contents.length; i++) { + if (!fs.delete(new Path(contents[i].getAbsolutePath()), true)) { + LOG.warn("Unable to delete "+ contents[i]); + } + } + } + } + else { + LOG.warn(dir + " does not exist."); + } + } + + //Mostly for setting up the symlinks. Note that when we setup the distributed + //cache, we didn't create the symlinks. This is done on a per task basis + //by the currently executing task. + public static void setupWorkDir(JobConf conf) throws IOException { + File workDir = new File(".").getAbsoluteFile(); + if (LOG.isDebugEnabled()) { + LOG.debug("Fully deleting contents of " + workDir); + } + + /** delete only the contents of workDir leaving the directory empty. We + * can't delete the workDir as it is the current working directory. + */ + deleteDirContents(conf, workDir); + + if (DistributedCache.getSymlink(conf)) { + URI[] archives = DistributedCache.getCacheArchives(conf); + URI[] files = DistributedCache.getCacheFiles(conf); + URI[] sharedArchives = DistributedCache.getSharedCacheArchives(conf); + URI[] sharedFiles = DistributedCache.getSharedCacheFiles(conf); + + Path[] localArchives = DistributedCache.getLocalCacheArchives(conf); + Path[] localFiles = DistributedCache.getLocalCacheFiles(conf); + Path[] localSharedArchives = DistributedCache.getLocalSharedCacheArchives(conf); + Path[] localSharedFiles = DistributedCache.getLocalSharedCacheFiles(conf); + + if (archives != null) { + for (int i = 0; i < archives.length; i++) { + String link = archives[i].getFragment(); + if (link != null) { + link = workDir.toString() + Path.SEPARATOR + link; + File flink = new File(link); + if (!flink.exists()) { + FileUtil.symLink(localArchives[i].toString(), link); + } + } + } + } + if (sharedArchives != null) { + for (int i = 0; i < sharedArchives.length; i++) { + String link = sharedArchives[i].getFragment(); + if (link != null) { + // Remove md5 prefix: 2 chars per byte of MD5, plus 1 underscore + link = link.substring(MD5Hash.MD5_LEN * 2 + 1); + link = workDir.toString() + Path.SEPARATOR + link; + File flink = new File(link); + if (!flink.exists()) { + FileUtil.symLink(localSharedArchives[i].toString(), link); + } + } + } + } + if (files != null) { + for (int i = 0; i < files.length; i++) { + String link = files[i].getFragment(); + if (link != null) { + link = workDir.toString() + Path.SEPARATOR + link; + File flink = new File(link); + if (!flink.exists()) { + FileUtil.symLink(localFiles[i].toString(), link); + } + } + } + } + if (sharedFiles != null) { + for (int i = 0; i < sharedFiles.length; i++) { + String link = sharedFiles[i].getFragment(); + if (link != null) { + // Remove md5 prefix: 2 chars per byte of MD5, plus 1 underscore + link = link.substring(MD5Hash.MD5_LEN * 2 + 1); + link = workDir.toString() + Path.SEPARATOR + link; + File flink = new File(link); + if (!flink.exists()) { + FileUtil.symLink(localSharedFiles[i].toString(), link); + } + } + } + } + } + File jobCacheDir = null; + if (conf.getJar() != null) { + jobCacheDir = new File( + new Path(conf.getJar()).getParent().toString()); + } + + // create symlinks for all the files in job cache dir in current + // workingdir for streaming + try{ + DistributedCache.createAllSymlink(conf, jobCacheDir, + workDir); + } catch(IOException ie){ + // Do not exit even if symlinks have not been created. + LOG.warn(StringUtils.stringifyException(ie)); + } + // add java.io.tmpdir given by mapred.child.tmp + String tmp = conf.get("mapred.child.tmp", "./tmp"); + Path tmpDir = new Path(tmp); + + // if temp directory path is not absolute + // prepend it with workDir. + if (!tmpDir.isAbsolute()) { + tmpDir = new Path(workDir.toString(), tmp); + FileSystem localFs = FileSystem.getLocal(conf); + if (!localFs.mkdirs(tmpDir) && !localFs.getFileStatus(tmpDir).isDir()){ + throw new IOException("Mkdirs failed to create " + tmpDir.toString()); + } + } + } + + /** + * Kill the child process + */ + public void kill() { + killed = true; + jvmManager.taskKilled(this); + signalDone(); + } + public void signalDone() { + synchronized (lock) { + done = true; + lock.notify(); + } + } + public void setExitCode(int exitCode) { + this.exitCodeSet = true; + this.exitCode = exitCode; + } +} diff --git a/src/mapred/org/apache/hadoop/mapred/TaskScheduler.java b/src/mapred/org/apache/hadoop/mapred/TaskScheduler.java new file mode 100644 index 0000000..59b495b --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/TaskScheduler.java @@ -0,0 +1,95 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.mapred; + +import java.io.IOException; +import java.util.Collection; +import java.util.List; + +import org.apache.hadoop.conf.Configurable; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.mapreduce.server.jobtracker.TaskTracker; + +/** + * Used by a {@link JobTracker} to schedule {@link Task}s on + * {@link TaskTracker}s. + *

+ * {@link TaskScheduler}s typically use one or more + * {@link JobInProgressListener}s to receive notifications about jobs. + *

+ * It is the responsibility of the {@link TaskScheduler} + * to initialize tasks for a job, by calling {@link JobInProgress#initTasks()} + * between the job being added (when + * {@link JobInProgressListener#jobAdded(JobInProgress)} is called) + * and tasks for that job being assigned (by + * {@link #assignTasks(TaskTracker)}). + * @see EagerTaskInitializationListener + */ +abstract class TaskScheduler implements Configurable { + + protected Configuration conf; + protected TaskTrackerManager taskTrackerManager; + + public Configuration getConf() { + return conf; + } + + public void setConf(Configuration conf) { + this.conf = conf; + } + + public synchronized void setTaskTrackerManager( + TaskTrackerManager taskTrackerManager) { + this.taskTrackerManager = taskTrackerManager; + } + + /** + * Lifecycle method to allow the scheduler to start any work in separate + * threads. + * @throws IOException + */ + public void start() throws IOException { + // do nothing + } + + /** + * Lifecycle method to allow the scheduler to stop any work it is doing. + * @throws IOException + */ + public void terminate() throws IOException { + // do nothing + } + + /** + * Returns the tasks we'd like the TaskTracker to execute right now. + * + * @param taskTracker The TaskTracker for which we're looking for tasks. + * @return A list of tasks to run on that TaskTracker, possibly empty. + */ + public abstract List assignTasks(TaskTracker taskTracker) + throws IOException; + + /** + * Returns a collection of jobs in an order which is specific to + * the particular scheduler. + * @param queueName + * @return + */ + public abstract Collection getJobs(String queueName); + +} diff --git a/src/mapred/org/apache/hadoop/mapred/TaskStatus.java b/src/mapred/org/apache/hadoop/mapred/TaskStatus.java new file mode 100644 index 0000000..fc678a9 --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/TaskStatus.java @@ -0,0 +1,470 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.mapred; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; +import java.util.List; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.io.Text; +import org.apache.hadoop.io.Writable; +import org.apache.hadoop.io.WritableUtils; +import org.apache.hadoop.util.StringUtils; +/************************************************** + * Describes the current status of a task. This is + * not intended to be a comprehensive piece of data. + * + **************************************************/ +public abstract class TaskStatus implements Writable, Cloneable { + static final Log LOG = + LogFactory.getLog(TaskStatus.class.getName()); + + //enumeration for reporting current phase of a task. + public static enum Phase{STARTING, MAP, SHUFFLE, SORT, REDUCE, CLEANUP} + + // what state is the task in? + public static enum State {RUNNING, SUCCEEDED, FAILED, UNASSIGNED, KILLED, + COMMIT_PENDING, FAILED_UNCLEAN, KILLED_UNCLEAN} + + private final TaskAttemptID taskid; + private float progress; + private volatile State runState; + private String diagnosticInfo; + private String stateString; + private String taskTracker; + private int numSlots; + + private long startTime; + private long finishTime; + private long outputSize; + + private volatile Phase phase = Phase.STARTING; + private Counters counters; + private boolean includeCounters; + private SortedRanges.Range nextRecordRange = new SortedRanges.Range(); + + public TaskStatus() { + taskid = new TaskAttemptID(); + numSlots = 0; + } + + public TaskStatus(TaskAttemptID taskid, float progress, int numSlots, + State runState, String diagnosticInfo, + String stateString, String taskTracker, + Phase phase, Counters counters) { + this.taskid = taskid; + this.progress = progress; + this.numSlots = numSlots; + this.runState = runState; + this.diagnosticInfo = diagnosticInfo; + this.stateString = stateString; + this.taskTracker = taskTracker; + this.phase = phase; + this.counters = counters; + this.includeCounters = true; + } + + public TaskAttemptID getTaskID() { return taskid; } + public abstract boolean getIsMap(); + public int getNumSlots() { + return numSlots; + } + + public float getProgress() { return progress; } + public void setProgress(float progress) { this.progress = progress; } + public State getRunState() { return runState; } + public String getTaskTracker() {return taskTracker;} + public void setTaskTracker(String tracker) { this.taskTracker = tracker;} + public void setRunState(State runState) { this.runState = runState; } + public String getDiagnosticInfo() { return diagnosticInfo; } + public void setDiagnosticInfo(String info) { + diagnosticInfo = + ((diagnosticInfo == null) ? info : diagnosticInfo.concat(info)); + } + public String getStateString() { return stateString; } + public void setStateString(String stateString) { this.stateString = stateString; } + + /** + * Get the next record range which is going to be processed by Task. + * @return nextRecordRange + */ + public SortedRanges.Range getNextRecordRange() { + return nextRecordRange; + } + + /** + * Set the next record range which is going to be processed by Task. + * @param nextRecordRange + */ + public void setNextRecordRange(SortedRanges.Range nextRecordRange) { + this.nextRecordRange = nextRecordRange; + } + + /** + * Get task finish time. if shuffleFinishTime and sortFinishTime + * are not set before, these are set to finishTime. It takes care of + * the case when shuffle, sort and finish are completed with in the + * heartbeat interval and are not reported separately. if task state is + * TaskStatus.FAILED then finish time represents when the task failed. + * @return finish time of the task. + */ + public long getFinishTime() { + return finishTime; + } + + /** + * Sets finishTime for the task status if and only if the + * start time is set and passed finish time is greater than + * zero. + * + * @param finishTime finish time of task. + */ + void setFinishTime(long finishTime) { + if(this.getStartTime() > 0 && finishTime > 0) { + this.finishTime = finishTime; + } else { + //Using String utils to get the stack trace. + LOG.error("Trying to set finish time for task " + taskid + + " when no start time is set, stackTrace is : " + + StringUtils.stringifyException(new Exception())); + } + } + + /** + * Get shuffle finish time for the task. If shuffle finish time was + * not set due to shuffle/sort/finish phases ending within same + * heartbeat interval, it is set to finish time of next phase i.e. sort + * or task finish when these are set. + * @return 0 if shuffleFinishTime, sortFinishTime and finish time are not set. else + * it returns approximate shuffle finish time. + */ + public long getShuffleFinishTime() { + return 0; + } + + /** + * Set shuffle finish time. + * @param shuffleFinishTime + */ + void setShuffleFinishTime(long shuffleFinishTime) {} + + /** + * Get sort finish time for the task,. If sort finish time was not set + * due to sort and reduce phase finishing in same heartebat interval, it is + * set to finish time, when finish time is set. + * @return 0 if sort finish time and finish time are not set, else returns sort + * finish time if that is set, else it returns finish time. + */ + public long getSortFinishTime() { + return 0; + } + + /** + * Sets sortFinishTime, if shuffleFinishTime is not set before + * then its set to sortFinishTime. + * @param sortFinishTime + */ + void setSortFinishTime(long sortFinishTime) {} + + /** + * Get start time of the task. + * @return 0 is start time is not set, else returns start time. + */ + public long getStartTime() { + return startTime; + } + + /** + * Set startTime of the task if start time is greater than zero. + * @param startTime start time + */ + void setStartTime(long startTime) { + //Making the assumption of passed startTime to be a positive + //long value explicit. + if (startTime > 0) { + this.startTime = startTime; + } else { + //Using String utils to get the stack trace. + LOG.error("Trying to set illegal startTime for task : " + taskid + + ".Stack trace is : " + + StringUtils.stringifyException(new Exception())); + } + } + + /** + * Get current phase of this task. Phase.Map in case of map tasks, + * for reduce one of Phase.SHUFFLE, Phase.SORT or Phase.REDUCE. + * @return . + */ + public Phase getPhase(){ + return this.phase; + } + /** + * Set current phase of this task. + * @param phase phase of this task + */ + void setPhase(Phase phase){ + TaskStatus.Phase oldPhase = getPhase(); + if (oldPhase != phase){ + // sort phase started + if (phase == TaskStatus.Phase.SORT){ + setShuffleFinishTime(System.currentTimeMillis()); + }else if (phase == TaskStatus.Phase.REDUCE){ + setSortFinishTime(System.currentTimeMillis()); + } + } + this.phase = phase; + } + + boolean inTaskCleanupPhase() { + return (this.phase == TaskStatus.Phase.CLEANUP && + (this.runState == TaskStatus.State.FAILED_UNCLEAN || + this.runState == TaskStatus.State.KILLED_UNCLEAN)); + } + + public boolean getIncludeCounters() { + return includeCounters; + } + + public void setIncludeCounters(boolean send) { + includeCounters = send; + } + + /** + * Get task's counters. + */ + public Counters getCounters() { + return counters; + } + /** + * Set the task's counters. + * @param counters + */ + public void setCounters(Counters counters) { + this.counters = counters; + } + + /** + * Returns the number of bytes of output from this map. + */ + public long getOutputSize() { + return outputSize; + } + + /** + * Set the size on disk of this task's output. + * @param l the number of map output bytes + */ + void setOutputSize(long l) { + outputSize = l; + } + + /** + * Get the list of maps from which output-fetches failed. + * + * @return the list of maps from which output-fetches failed. + */ + public List getFetchFailedMaps() { + return null; + } + + /** + * Add to the list of maps from which output-fetches failed. + * + * @param mapTaskId map from which fetch failed + */ + synchronized void addFetchFailedMap(TaskAttemptID mapTaskId) {} + + /** + * Update the status of the task. + * + * This update is done by ping thread before sending the status. + * + * @param progress + * @param state + * @param counters + */ + synchronized void statusUpdate(float progress, + String state, + Counters counters) { + setProgress(progress); + setStateString(state); + setCounters(counters); + } + + /** + * Update the status of the task. + * + * @param status updated status + */ + synchronized void statusUpdate(TaskStatus status) { + this.progress = status.getProgress(); + this.runState = status.getRunState(); + this.stateString = status.getStateString(); + this.nextRecordRange = status.getNextRecordRange(); + + setDiagnosticInfo(status.getDiagnosticInfo()); + + if (status.getStartTime() > 0) { + this.startTime = status.getStartTime(); + } + if (status.getFinishTime() > 0) { + setFinishTime(status.getFinishTime()); + } + + this.phase = status.getPhase(); + this.counters = status.getCounters(); + this.outputSize = status.outputSize; + } + + /** + * Update specific fields of task status + * + * This update is done in JobTracker when a cleanup attempt of task + * reports its status. Then update only specific fields, not all. + * + * @param runState + * @param progress + * @param state + * @param phase + * @param finishTime + */ + synchronized void statusUpdate(State runState, + float progress, + String state, + Phase phase, + long finishTime) { + setRunState(runState); + setProgress(progress); + setStateString(state); + setPhase(phase); + if (finishTime > 0) { + setFinishTime(finishTime); + } + } + + /** + * Clear out transient information after sending out a status-update + * from either the {@link Task} to the {@link TaskTracker} or from the + * {@link TaskTracker} to the {@link JobTracker}. + */ + synchronized void clearStatus() { + // Clear diagnosticInfo + diagnosticInfo = ""; + } + + @Override + public Object clone() { + try { + return super.clone(); + } catch (CloneNotSupportedException cnse) { + // Shouldn't happen since we do implement Clonable + throw new InternalError(cnse.toString()); + } + } + + ////////////////////////////////////////////// + // Writable + ////////////////////////////////////////////// + public void write(DataOutput out) throws IOException { + taskid.write(out); + out.writeFloat(progress); + out.writeInt(numSlots); + WritableUtils.writeEnum(out, runState); + Text.writeString(out, diagnosticInfo); + Text.writeString(out, stateString); + WritableUtils.writeEnum(out, phase); + out.writeLong(startTime); + out.writeLong(finishTime); + out.writeBoolean(includeCounters); + out.writeLong(outputSize); + if (includeCounters) { + counters.write(out); + } + nextRecordRange.write(out); + } + + public void readFields(DataInput in) throws IOException { + this.taskid.readFields(in); + this.progress = in.readFloat(); + this.numSlots = in.readInt(); + this.runState = WritableUtils.readEnum(in, State.class); + this.diagnosticInfo = Text.readString(in); + this.stateString = Text.readString(in); + this.phase = WritableUtils.readEnum(in, Phase.class); + this.startTime = in.readLong(); + this.finishTime = in.readLong(); + counters = new Counters(); + this.includeCounters = in.readBoolean(); + this.outputSize = in.readLong(); + if (includeCounters) { + counters.readFields(in); + } + nextRecordRange.readFields(in); + } + + ////////////////////////////////////////////////////////////////////////////// + // Factory-like methods to create/read/write appropriate TaskStatus objects + ////////////////////////////////////////////////////////////////////////////// + + static TaskStatus createTaskStatus(DataInput in, TaskAttemptID taskId, + float progress, int numSlots, + State runState, String diagnosticInfo, + String stateString, String taskTracker, + Phase phase, Counters counters) + throws IOException { + boolean isMap = in.readBoolean(); + return createTaskStatus(isMap, taskId, progress, numSlots, runState, + diagnosticInfo, stateString, taskTracker, phase, + counters); + } + + static TaskStatus createTaskStatus(boolean isMap, TaskAttemptID taskId, + float progress, int numSlots, + State runState, String diagnosticInfo, + String stateString, String taskTracker, + Phase phase, Counters counters) { + return (isMap) ? new MapTaskStatus(taskId, progress, numSlots, runState, + diagnosticInfo, stateString, taskTracker, + phase, counters) : + new ReduceTaskStatus(taskId, progress, numSlots, runState, + diagnosticInfo, stateString, + taskTracker, phase, counters); + } + + static TaskStatus createTaskStatus(boolean isMap) { + return (isMap) ? new MapTaskStatus() : new ReduceTaskStatus(); + } + + static TaskStatus readTaskStatus(DataInput in) throws IOException { + boolean isMap = in.readBoolean(); + TaskStatus taskStatus = createTaskStatus(isMap); + taskStatus.readFields(in); + return taskStatus; + } + + static void writeTaskStatus(DataOutput out, TaskStatus taskStatus) + throws IOException { + out.writeBoolean(taskStatus.getIsMap()); + taskStatus.write(out); + } +} + diff --git a/src/mapred/org/apache/hadoop/mapred/TaskTracker.java b/src/mapred/org/apache/hadoop/mapred/TaskTracker.java new file mode 100644 index 0000000..76fbd4e --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/TaskTracker.java @@ -0,0 +1,3675 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.mapred; + +import java.io.File; +import java.io.FileNotFoundException; +import java.io.IOException; +import java.io.OutputStream; +import java.io.RandomAccessFile; +import java.net.InetSocketAddress; +import java.net.URI; +import java.net.URISyntaxException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Iterator; +import java.util.LinkedHashMap; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.Random; +import java.util.Set; +import java.util.TreeMap; +import java.util.Vector; +import java.util.concurrent.BlockingQueue; +import java.util.concurrent.LinkedBlockingQueue; +import java.util.regex.Pattern; + +import javax.servlet.ServletContext; +import javax.servlet.ServletException; +import javax.servlet.http.HttpServlet; +import javax.servlet.http.HttpServletRequest; +import javax.servlet.http.HttpServletResponse; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.filecache.DistributedCache; +import org.apache.hadoop.fs.DF; +import org.apache.hadoop.fs.FSDataInputStream; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.FileUtil; +import org.apache.hadoop.fs.LocalDirAllocator; +import org.apache.hadoop.fs.LocalFileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.http.HttpServer; +import org.apache.hadoop.io.IntWritable; +import org.apache.hadoop.ipc.RPC; +import org.apache.hadoop.ipc.RemoteException; +import org.apache.hadoop.ipc.Server; +import org.apache.hadoop.mapred.TaskLog.LogFileDetail; +import org.apache.hadoop.mapred.TaskLog.LogName; +import org.apache.hadoop.mapred.CleanupQueue.PathDeletionContext; +import org.apache.hadoop.mapred.TaskController.TaskControllerPathDeletionContext; +import org.apache.hadoop.mapred.TaskStatus.Phase; +import org.apache.hadoop.mapred.TaskTrackerStatus.TaskTrackerHealthStatus; +import org.apache.hadoop.mapred.pipes.Submitter; +import org.apache.hadoop.mapreduce.TaskType; +import org.apache.hadoop.metrics.MetricsContext; +import org.apache.hadoop.metrics.MetricsException; +import org.apache.hadoop.metrics.MetricsRecord; +import org.apache.hadoop.metrics.MetricsUtil; +import org.apache.hadoop.metrics.Updater; +import org.apache.hadoop.net.DNS; +import org.apache.hadoop.net.NetUtils; +import org.apache.hadoop.security.SecurityUtil; +import org.apache.hadoop.security.authorize.ConfiguredPolicy; +import org.apache.hadoop.security.authorize.PolicyProvider; +import org.apache.hadoop.security.authorize.ServiceAuthorizationManager; +import org.apache.hadoop.util.DiskChecker; +import org.apache.hadoop.util.MRAsyncDiskService; +import org.apache.hadoop.util.ResourceCalculatorPlugin; +import org.apache.hadoop.util.ProcfsBasedProcessTree; +import org.apache.hadoop.util.ReflectionUtils; +import org.apache.hadoop.util.RunJar; +import org.apache.hadoop.util.StringUtils; +import org.apache.hadoop.util.VersionInfo; +import org.apache.hadoop.util.DiskChecker.DiskErrorException; +import org.apache.hadoop.util.Shell.ShellCommandExecutor; + +/******************************************************* + * TaskTracker is a process that starts and tracks MR Tasks + * in a networked environment. It contacts the JobTracker + * for Task assignments and reporting results. + * + *******************************************************/ +public class TaskTracker + implements MRConstants, TaskUmbilicalProtocol, Runnable { + + /** + * @deprecated + */ + @Deprecated + static final String MAPRED_TASKTRACKER_VMEM_RESERVED_PROPERTY = + "mapred.tasktracker.vmem.reserved"; + /** + * @deprecated + */ + @Deprecated + static final String MAPRED_TASKTRACKER_PMEM_RESERVED_PROPERTY = + "mapred.tasktracker.pmem.reserved"; + + static final String MAP_USERLOG_RETAIN_SIZE = + "mapreduce.cluster.map.userlog.retain-size"; + static final String REDUCE_USERLOG_RETAIN_SIZE = + "mapreduce.cluster.reduce.userlog.retain-size"; + static final String CHECK_TASKTRACKER_BUILD_VERSION = + "mapreduce.tasktracker.build.version.check"; + + static final long WAIT_FOR_DONE = 3 * 1000; + private int httpPort; + + static enum State {NORMAL, STALE, INTERRUPTED, DENIED} + + static{ + Configuration.addDefaultResource("mapred-default.xml"); + Configuration.addDefaultResource("mapred-site.xml"); + } + + public static final Log LOG = + LogFactory.getLog(TaskTracker.class); + + public static final String MR_CLIENTTRACE_FORMAT = + "src: %s" + // src IP + ", dest: %s" + // dst IP + ", bytes: %s" + // byte count + ", op: %s" + // operation + ", cliID: %s" + // task id + ", duration: %s"; // duration + public static final Log ClientTraceLog = + LogFactory.getLog(TaskTracker.class.getName() + ".clienttrace"); + + volatile boolean running = true; + + private LocalDirAllocator localDirAllocator; + String taskTrackerName; + String localHostname; + InetSocketAddress jobTrackAddr; + + InetSocketAddress taskReportAddress; + + Server taskReportServer = null; + InterTrackerProtocol jobClient; + + // last heartbeat response recieved + short heartbeatResponseId = -1; + + static final String TASK_CLEANUP_SUFFIX = ".cleanup"; + + /* + * This is the last 'status' report sent by this tracker to the JobTracker. + * + * If the rpc call succeeds, this 'status' is cleared-out by this tracker; + * indicating that a 'fresh' status report be generated; in the event the + * rpc calls fails for whatever reason, the previous status report is sent + * again. + */ + TaskTrackerStatus status = null; + + // The system-directory on HDFS where job files are stored + Path systemDirectory = null; + + // The filesystem where job files are stored + FileSystem systemFS = null; + + private final HttpServer server; + + volatile boolean shuttingDown = false; + + Map tasks = new HashMap(); + /** + * Map from taskId -> TaskInProgress. + */ + Map runningTasks = null; + Map runningJobs = null; + volatile int mapTotal = 0; + volatile int reduceTotal = 0; + boolean justStarted = true; + boolean justInited = true; + // Mark reduce tasks that are shuffling to rollback their events index + Set shouldReset = new HashSet(); + + //dir -> DF + Map localDirsDf = new HashMap(); + long minSpaceStart = 0; + //must have this much space free to start new tasks + boolean acceptNewTasks = true; + long minSpaceKill = 0; + //if we run under this limit, kill one task + //and make sure we never receive any new jobs + //until all the old tasks have been cleaned up. + //this is if a machine is so full it's only good + //for serving map output to the other nodes + + static Random r = new Random(); + private static final String SUBDIR = "taskTracker"; + private static final String CACHEDIR = "archive"; + private static final String JOBCACHE = "jobcache"; + private static final String OUTPUT = "output"; + private JobConf originalConf; + private JobConf fConf; + private int maxMapSlots; + private int maxReduceSlots; + private int failures; + + private FileSystem localFs; + + // Performance-related config knob to send an out-of-band heartbeat + // on task completion + static final String TT_OUTOFBAND_HEARBEAT = + "mapreduce.tasktracker.outofband.heartbeat"; + private volatile boolean oobHeartbeatOnTaskCompletion; + + // Track number of completed tasks to send an out-of-band heartbeat + private IntWritable finishedCount = new IntWritable(0); + + private MapEventsFetcherThread mapEventsFetcher; + int workerThreads; + CleanupQueue directoryCleanupThread; + volatile JvmManager jvmManager; + + private TaskMemoryManagerThread taskMemoryManager; + private boolean taskMemoryManagerEnabled = true; + private long totalVirtualMemoryOnTT = JobConf.DISABLED_MEMORY_LIMIT; + private long totalPhysicalMemoryOnTT = JobConf.DISABLED_MEMORY_LIMIT; + private long mapSlotMemorySizeOnTT = JobConf.DISABLED_MEMORY_LIMIT; + private long reduceSlotSizeMemoryOnTT = JobConf.DISABLED_MEMORY_LIMIT; + private long totalMemoryAllottedForTasks = JobConf.DISABLED_MEMORY_LIMIT; + + private TaskLogsMonitor taskLogsMonitor; + + public static final String MAPRED_TASKTRACKER_MEMORY_CALCULATOR_PLUGIN_PROPERTY = + "mapred.tasktracker.memory_calculator_plugin"; + private ResourceCalculatorPlugin resourceCalculatorPlugin = null; + + /** + * the minimum interval between jobtracker polls + */ + private volatile int heartbeatInterval = HEARTBEAT_INTERVAL_MIN; + /** + * Number of maptask completion events locations to poll for at one time + */ + private int probe_sample_size = 500; + + private IndexCache indexCache; + + private MRAsyncDiskService asyncDiskService; + + /** + * Handle to the specific instance of the {@link TaskController} class + */ + private TaskController taskController; + + /** + * Handle to the specific instance of the {@link NodeHealthCheckerService} + */ + private NodeHealthCheckerService healthChecker; + + /* + * A list of commitTaskActions for whom commit response has been received + */ + private List commitResponses = + Collections.synchronizedList(new ArrayList()); + + private ShuffleServerMetrics shuffleServerMetrics; + /** This class contains the methods that should be used for metrics-reporting + * the specific metrics for shuffle. The TaskTracker is actually a server for + * the shuffle and hence the name ShuffleServerMetrics. + */ + private class ShuffleServerMetrics implements Updater { + private MetricsRecord shuffleMetricsRecord = null; + private int serverHandlerBusy = 0; + private long outputBytes = 0; + private int failedOutputs = 0; + private int successOutputs = 0; + ShuffleServerMetrics(JobConf conf) { + MetricsContext context = MetricsUtil.getContext("mapred"); + shuffleMetricsRecord = + MetricsUtil.createRecord(context, "shuffleOutput"); + this.shuffleMetricsRecord.setTag("sessionId", conf.getSessionId()); + context.registerUpdater(this); + } + synchronized void serverHandlerBusy() { + ++serverHandlerBusy; + } + synchronized void serverHandlerFree() { + --serverHandlerBusy; + } + synchronized void outputBytes(long bytes) { + outputBytes += bytes; + } + synchronized void failedOutput() { + ++failedOutputs; + } + synchronized void successOutput() { + ++successOutputs; + } + public void doUpdates(MetricsContext unused) { + synchronized (this) { + if (workerThreads != 0) { + shuffleMetricsRecord.setMetric("shuffle_handler_busy_percent", + 100*((float)serverHandlerBusy/workerThreads)); + } else { + shuffleMetricsRecord.setMetric("shuffle_handler_busy_percent", 0); + } + shuffleMetricsRecord.incrMetric("shuffle_output_bytes", + outputBytes); + shuffleMetricsRecord.incrMetric("shuffle_failed_outputs", + failedOutputs); + shuffleMetricsRecord.incrMetric("shuffle_success_outputs", + successOutputs); + outputBytes = 0; + failedOutputs = 0; + successOutputs = 0; + } + shuffleMetricsRecord.update(); + } + } + + + + + + private TaskTrackerInstrumentation myInstrumentation = null; + + public TaskTrackerInstrumentation getTaskTrackerInstrumentation() { + return myInstrumentation; + } + + /** + * A list of tips that should be cleaned up. + */ + private BlockingQueue tasksToCleanup = + new LinkedBlockingQueue(); + + /** + * A daemon-thread that pulls tips off the list of things to cleanup. + */ + private Thread taskCleanupThread = + new Thread(new Runnable() { + public void run() { + while (true) { + try { + TaskTrackerAction action = tasksToCleanup.take(); + if (action instanceof KillJobAction) { + purgeJob((KillJobAction) action); + } else if (action instanceof KillTaskAction) { + TaskInProgress tip; + KillTaskAction killAction = (KillTaskAction) action; + synchronized (TaskTracker.this) { + tip = tasks.get(killAction.getTaskID()); + } + LOG.info("Received KillTaskAction for task: " + + killAction.getTaskID()); + purgeTask(tip, false); + } else { + LOG.error("Non-delete action given to cleanup thread: " + + action); + } + } catch (Throwable except) { + LOG.warn(StringUtils.stringifyException(except)); + } + } + } + }, "taskCleanup"); + + TaskController getTaskController() { + return taskController; + } + + private RunningJob addTaskToJob(JobID jobId, + TaskInProgress tip) { + synchronized (runningJobs) { + RunningJob rJob = null; + if (!runningJobs.containsKey(jobId)) { + rJob = new RunningJob(jobId); + rJob.localized = false; + rJob.tasks = new HashSet(); + runningJobs.put(jobId, rJob); + } else { + rJob = runningJobs.get(jobId); + } + synchronized (rJob) { + rJob.tasks.add(tip); + } + runningJobs.notify(); //notify the fetcher thread + return rJob; + } + } + + private void removeTaskFromJob(JobID jobId, TaskInProgress tip) { + synchronized (runningJobs) { + RunningJob rjob = runningJobs.get(jobId); + if (rjob == null) { + LOG.warn("Unknown job " + jobId + " being deleted."); + } else { + synchronized (rjob) { + rjob.tasks.remove(tip); + } + } + } + } + + TaskLogsMonitor getTaskLogsMonitor() { + return this.taskLogsMonitor; + } + + void setTaskLogsMonitor(TaskLogsMonitor t) { + this.taskLogsMonitor = t; + } + + static String getCacheSubdir() { + return TaskTracker.SUBDIR + Path.SEPARATOR + TaskTracker.CACHEDIR; + } + + static String getJobCacheSubdir() { + return TaskTracker.SUBDIR + Path.SEPARATOR + TaskTracker.JOBCACHE; + } + + static String getLocalJobDir(String jobid) { + return getJobCacheSubdir() + Path.SEPARATOR + jobid; + } + + static String getLocalTaskDir(String jobid, String taskid) { + return getLocalTaskDir(jobid, taskid, false) ; + } + + static String getIntermediateOutputDir(String jobid, String taskid) { + return getLocalTaskDir(jobid, taskid) + + Path.SEPARATOR + TaskTracker.OUTPUT ; + } + + static String getLocalTaskDir(String jobid, + String taskid, + boolean isCleanupAttempt) { + String taskDir = getLocalJobDir(jobid) + Path.SEPARATOR + taskid; + if (isCleanupAttempt) { + taskDir = taskDir + TASK_CLEANUP_SUFFIX; + } + return taskDir; + } + + String getPid(TaskAttemptID tid) { + TaskInProgress tip = tasks.get(tid); + if (tip != null) { + return jvmManager.getPid(tip.getTaskRunner()); + } + return null; + } + + public long getProtocolVersion(String protocol, + long clientVersion) throws IOException { + if (protocol.equals(TaskUmbilicalProtocol.class.getName())) { + return TaskUmbilicalProtocol.versionID; + } else { + throw new IOException("Unknown protocol for task tracker: " + + protocol); + } + } + + /** + * Do the real constructor work here. It's in a separate method + * so we can call it again and "recycle" the object after calling + * close(). + */ + synchronized void initialize() throws IOException { + // use configured nameserver & interface to get local hostname + this.fConf = new JobConf(originalConf); + localFs = FileSystem.getLocal(fConf); + if (fConf.get("slave.host.name") != null) { + this.localHostname = fConf.get("slave.host.name"); + } + if (localHostname == null) { + this.localHostname = + DNS.getDefaultHost + (fConf.get("mapred.tasktracker.dns.interface","default"), + fConf.get("mapred.tasktracker.dns.nameserver","default")); + } + + // Check local disk, start async disk service, and clean up all + // local directories. + checkLocalDirs(this.fConf.getLocalDirs()); + asyncDiskService = new MRAsyncDiskService(FileSystem.getLocal(fConf), + fConf.getLocalDirs(), fConf); + asyncDiskService.cleanupAllVolumes(); + + // Clear out state tables + this.tasks.clear(); + this.runningTasks = new LinkedHashMap(); + this.runningJobs = new TreeMap(); + this.mapTotal = 0; + this.reduceTotal = 0; + this.acceptNewTasks = true; + this.status = null; + + this.minSpaceStart = this.fConf.getLong("mapred.local.dir.minspacestart", 0L); + this.minSpaceKill = this.fConf.getLong("mapred.local.dir.minspacekill", 0L); + //tweak the probe sample size (make it a function of numCopiers) + probe_sample_size = this.fConf.getInt("mapred.tasktracker.events.batchsize", 500); + + // Set up TaskTracker instrumentation + this.myInstrumentation = createInstrumentation(this, fConf); + + // bind address + String address = + NetUtils.getServerAddress(fConf, + "mapred.task.tracker.report.bindAddress", + "mapred.task.tracker.report.port", + "mapred.task.tracker.report.address"); + InetSocketAddress socAddr = NetUtils.createSocketAddr(address); + String bindAddress = socAddr.getHostName(); + int tmpPort = socAddr.getPort(); + + this.jvmManager = new JvmManager(this); + + // Set service-level authorization security policy + if (this.fConf.getBoolean( + ServiceAuthorizationManager.SERVICE_AUTHORIZATION_CONFIG, false)) { + PolicyProvider policyProvider = + (PolicyProvider)(ReflectionUtils.newInstance( + this.fConf.getClass(PolicyProvider.POLICY_PROVIDER_CONFIG, + MapReducePolicyProvider.class, PolicyProvider.class), + this.fConf)); + SecurityUtil.setPolicy(new ConfiguredPolicy(this.fConf, policyProvider)); + } + + // RPC initialization + int max = maxMapSlots > maxReduceSlots ? + maxMapSlots : maxReduceSlots; + //set the num handlers to max*2 since canCommit may wait for the duration + //of a heartbeat RPC + this.taskReportServer = + RPC.getServer(this, bindAddress, tmpPort, 2 * max, false, this.fConf); + this.taskReportServer.start(); + + // get the assigned address + this.taskReportAddress = taskReportServer.getListenerAddress(); + this.fConf.set("mapred.task.tracker.report.address", + taskReportAddress.getHostName() + ":" + taskReportAddress.getPort()); + LOG.info("TaskTracker up at: " + this.taskReportAddress); + + this.taskTrackerName = "tracker_" + localHostname + ":" + taskReportAddress; + LOG.info("Starting tracker " + taskTrackerName); + + this.jobClient = (InterTrackerProtocol) + RPC.waitForProxy(InterTrackerProtocol.class, + InterTrackerProtocol.versionID, + jobTrackAddr, this.fConf); + this.justInited = true; + this.running = true; + // start the thread that will fetch map task completion events + this.mapEventsFetcher = new MapEventsFetcherThread(); + mapEventsFetcher.setDaemon(true); + mapEventsFetcher.setName( + "Map-events fetcher for all reduce tasks " + "on " + + taskTrackerName); + mapEventsFetcher.start(); + + initializeMemoryManagement(); + + setTaskLogsMonitor(new TaskLogsMonitor(getMapUserLogRetainSize(), + getReduceUserLogRetainSize())); + getTaskLogsMonitor().start(); + + this.indexCache = new IndexCache(this.fConf); + + mapLauncher = new TaskLauncher(TaskType.MAP, maxMapSlots); + reduceLauncher = new TaskLauncher(TaskType.REDUCE, maxReduceSlots); + mapLauncher.start(); + reduceLauncher.start(); + Class taskControllerClass + = fConf.getClass("mapred.task.tracker.task-controller", + DefaultTaskController.class, + TaskController.class); + taskController = (TaskController)ReflectionUtils.newInstance( + taskControllerClass, fConf); + + //setup and create jobcache directory with appropriate permissions + taskController.setup(); + + //Start up node health checker service. + if (shouldStartHealthMonitor(this.fConf)) { + startHealthMonitor(this.fConf); + } + + oobHeartbeatOnTaskCompletion = + fConf.getBoolean(TT_OUTOFBAND_HEARBEAT, false); + } + + public static Class[] getInstrumentationClasses(Configuration conf) { + return conf.getClasses("mapred.tasktracker.instrumentation", + TaskTrackerMetricsInst.class); + } + + public static void setInstrumentationClass( + Configuration conf, Class t) { + conf.setClass("mapred.tasktracker.instrumentation", + t, TaskTrackerInstrumentation.class); + } + + public static TaskTrackerInstrumentation createInstrumentation( + TaskTracker tt, Configuration conf) { + try { + Class[] instrumentationClasses = getInstrumentationClasses(conf); + if (instrumentationClasses.length == 0) { + LOG.error("Empty string given for mapred.tasktracker.instrumentation" + + " property -- will use default instrumentation class instead"); + return new TaskTrackerMetricsInst(tt); + } else if (instrumentationClasses.length == 1) { + // Just one instrumentation class given; create it directly + Class cls = instrumentationClasses[0]; + java.lang.reflect.Constructor c = + cls.getConstructor(new Class[] {TaskTracker.class} ); + return (TaskTrackerInstrumentation) c.newInstance(tt); + } else { + // Multiple instrumentation classes given; use a composite object + List instrumentations = + new ArrayList(); + for (Class cls: instrumentationClasses) { + java.lang.reflect.Constructor c = + cls.getConstructor(new Class[] {TaskTracker.class} ); + TaskTrackerInstrumentation inst = + (TaskTrackerInstrumentation) c.newInstance(tt); + instrumentations.add(inst); + } + return new CompositeTaskTrackerInstrumentation(tt, instrumentations); + } + } catch(Exception e) { + // Reflection can throw lots of exceptions -- handle them all by + // falling back on the default. + LOG.error("Failed to initialize TaskTracker metrics", e); + return new TaskTrackerMetricsInst(tt); + } + } + + /** + * Removes all contents of temporary storage. Called upon + * startup, to remove any leftovers from previous run. + * + * Use MRAsyncDiskService.moveAndDeleteAllVolumes instead. + * @see org.apache.hadoop.util.MRAsyncDiskService#cleanupAllVolumes() + */ + @Deprecated + public void cleanupStorage() throws IOException { + this.fConf.deleteLocalFiles(); + } + + // Object on wait which MapEventsFetcherThread is going to wait. + private Object waitingOn = new Object(); + + private class MapEventsFetcherThread extends Thread { + + public List reducesInShuffle() { + List fList = new ArrayList(); + for (Map.Entry item : runningJobs.entrySet()) { + RunningJob rjob = item.getValue(); + JobID jobId = item.getKey(); + FetchStatus f; + synchronized (rjob) { + f = rjob.getFetchStatus(); + for (TaskInProgress tip : rjob.tasks) { + Task task = tip.getTask(); + if (!task.isMapTask()) { + if (((ReduceTask)task).getPhase() == + TaskStatus.Phase.SHUFFLE) { + if (rjob.getFetchStatus() == null) { + //this is a new job; we start fetching its map events + f = new FetchStatus(jobId, + ((ReduceTask)task).getNumMaps()); + rjob.setFetchStatus(f); + } + f = rjob.getFetchStatus(); + fList.add(f); + break; //no need to check any more tasks belonging to this + } + } + } + } + } + //at this point, we have information about for which of + //the running jobs do we need to query the jobtracker for map + //outputs (actually map events). + return fList; + } + + @Override + public void run() { + LOG.info("Starting thread: " + this.getName()); + + while (running) { + try { + List fList = null; + synchronized (runningJobs) { + while (((fList = reducesInShuffle()).size()) == 0) { + try { + runningJobs.wait(); + } catch (InterruptedException e) { + LOG.info("Shutting down: " + this.getName()); + return; + } + } + } + // now fetch all the map task events for all the reduce tasks + // possibly belonging to different jobs + boolean fetchAgain = false; //flag signifying whether we want to fetch + //immediately again. + for (FetchStatus f : fList) { + long currentTime = System.currentTimeMillis(); + try { + //the method below will return true when we have not + //fetched all available events yet + if (f.fetchMapCompletionEvents(currentTime)) { + fetchAgain = true; + } + } catch (Exception e) { + LOG.warn( + "Ignoring exception that fetch for map completion" + + " events threw for " + f.jobId + " threw: " + + StringUtils.stringifyException(e)); + } + if (!running) { + break; + } + } + synchronized (waitingOn) { + try { + if (!fetchAgain) { + waitingOn.wait(heartbeatInterval); + } + } catch (InterruptedException ie) { + LOG.info("Shutting down: " + this.getName()); + return; + } + } + } catch (Exception e) { + LOG.info("Ignoring exception " + e.getMessage()); + } + } + } + } + + public class FetchStatus { + /** The next event ID that we will start querying the JobTracker from*/ + public IntWritable fromEventId; + /** This is the cache of map events for a given job */ + private List allMapEvents; + /** What jobid this fetchstatus object is for*/ + private JobID jobId; + private long lastFetchTime; + private boolean fetchAgain; + + public FetchStatus(JobID jobId, int numMaps) { + this.fromEventId = new IntWritable(0); + this.jobId = jobId; + this.allMapEvents = new ArrayList(numMaps); + } + + /** + * Reset the events obtained so far. + */ + public void reset() { + // Note that the sync is first on fromEventId and then on allMapEvents + synchronized (fromEventId) { + synchronized (allMapEvents) { + fromEventId.set(0); // set the new index for TCE + allMapEvents.clear(); + } + } + } + + public TaskCompletionEvent[] getMapEvents(int fromId, int max) { + + TaskCompletionEvent[] mapEvents = + TaskCompletionEvent.EMPTY_ARRAY; + boolean notifyFetcher = false; + synchronized (allMapEvents) { + if (allMapEvents.size() > fromId) { + int actualMax = Math.min(max, (allMapEvents.size() - fromId)); + List eventSublist = + allMapEvents.subList(fromId, actualMax + fromId); + mapEvents = eventSublist.toArray(mapEvents); + } else { + // Notify Fetcher thread. + notifyFetcher = true; + } + } + if (notifyFetcher) { + synchronized (waitingOn) { + waitingOn.notify(); + } + } + return mapEvents; + } + + public boolean fetchMapCompletionEvents(long currTime) throws IOException { + if (!fetchAgain && (currTime - lastFetchTime) < heartbeatInterval) { + return false; + } + int currFromEventId = 0; + synchronized (fromEventId) { + currFromEventId = fromEventId.get(); + List recentMapEvents = + queryJobTracker(fromEventId, jobId, jobClient); + synchronized (allMapEvents) { + allMapEvents.addAll(recentMapEvents); + } + lastFetchTime = currTime; + if (fromEventId.get() - currFromEventId >= probe_sample_size) { + //return true when we have fetched the full payload, indicating + //that we should fetch again immediately (there might be more to + //fetch + fetchAgain = true; + return true; + } + } + fetchAgain = false; + return false; + } + } + + private static LocalDirAllocator lDirAlloc = + new LocalDirAllocator("mapred.local.dir"); + + // intialize the job directory + private void localizeJob(TaskInProgress tip) throws IOException { + Path localJarFile = null; + Task t = tip.getTask(); + JobID jobId = t.getJobID(); + Path jobFile = new Path(t.getJobFile()); + // Get sizes of JobFile and JarFile + // sizes are -1 if they are not present. + FileStatus status = null; + long jobFileSize = -1; + try { + status = systemFS.getFileStatus(jobFile); + jobFileSize = status.getLen(); + } catch(FileNotFoundException fe) { + jobFileSize = -1; + } + Path localJobFile = lDirAlloc.getLocalPathForWrite( + getLocalJobDir(jobId.toString()) + + Path.SEPARATOR + "job.xml", + jobFileSize, fConf); + RunningJob rjob = addTaskToJob(jobId, tip); + synchronized (rjob.localizationLock) { + if (rjob.localized == false) { + // Actually start the job localization IO. + FileSystem localFs = FileSystem.getLocal(fConf); + // this will happen on a partial execution of localizeJob. + // Sometimes the job.xml gets copied but copying job.jar + // might throw out an exception + // we should clean up and then try again + Path jobDir = localJobFile.getParent(); + if (localFs.exists(jobDir)){ + LOG.warn("Deleting pre-existing jobDir: " + jobDir + + " when localizeJob for tip " + tip); + localFs.delete(jobDir, true); + boolean b = localFs.mkdirs(jobDir); + if (!b) + throw new IOException("Not able to create job directory " + + jobDir.toString()); + } + systemFS.copyToLocalFile(jobFile, localJobFile); + JobConf localJobConf = new JobConf(localJobFile); + + // create the 'work' directory + // job-specific shared directory for use as scratch space + Path workDir = lDirAlloc.getLocalPathForWrite( + (getLocalJobDir(jobId.toString()) + + Path.SEPARATOR + "work"), fConf); + if (!localFs.mkdirs(workDir)) { + throw new IOException("Mkdirs failed to create " + + workDir.toString()); + } + System.setProperty("job.local.dir", workDir.toString()); + localJobConf.set("job.local.dir", workDir.toString()); + + // copy Jar file to the local FS and unjar it. + String jarFile = localJobConf.getJar(); + long jarFileSize = -1; + if (jarFile != null) { + boolean shared = + localJobConf.getBoolean("mapred.cache.shared.enabled", false); + + // If sharing is turned on, we already have the jarFileSize, so we + // don't have to make another RPC call to NameNode + Path jarFilePath = new Path(jarFile); + if (shared) { + try { + jarFileSize = + Long.parseLong(DistributedCache. + getSharedArchiveLength(localJobConf)[0]); + } catch (NullPointerException npe) { + jarFileSize = -1; + } + } else { + try { + status = systemFS.getFileStatus(jarFilePath); + jarFileSize = status.getLen(); + } catch(FileNotFoundException fe) { + jarFileSize = -1; + } + } + // Here we check for and we check five times the size of jarFileSize + // to accommodate for unjarring the jar file in work directory + localJarFile = new Path(lDirAlloc.getLocalPathForWrite( + getLocalJobDir(jobId.toString()) + + Path.SEPARATOR + "jars", + 5 * jarFileSize, fConf), "job.jar"); + if (!localFs.mkdirs(localJarFile.getParent())) { + throw new IOException("Mkdirs failed to create jars directory "); + } + + if (!shared) { + // we copy the job jar to the local disk and unjar it + // for the shared case - this is done inside TaskRunner + systemFS.copyToLocalFile(jarFilePath, localJarFile); + RunJar.unJar(new File(localJarFile.toString()), + new File(localJarFile.getParent().toString())); + } + localJobConf.setJar(localJarFile.toString()); + + OutputStream out = localFs.create(localJobFile); + try { + localJobConf.writeXml(out); + } finally { + out.close(); + } + } + synchronized (rjob) { + rjob.keepJobFiles = ((localJobConf.getKeepTaskFilesPattern() != null) || + localJobConf.getKeepFailedTaskFiles()); + rjob.jobConf = localJobConf; + taskController.initializeJob(jobId); + rjob.localized = true; + } + } + } + launchTaskForJob(tip, new JobConf(rjob.jobConf)); + } + + private void launchTaskForJob(TaskInProgress tip, JobConf jobConf) throws IOException{ + synchronized (tip) { + tip.setJobConf(jobConf); + tip.launchTask(); + } + } + + public synchronized void shutdown() throws IOException { + shuttingDown = true; + close(); + if (this.server != null) { + try { + LOG.info("Shutting down StatusHttpServer"); + this.server.stop(); + } catch (Exception e) { + LOG.warn("Exception shutting down TaskTracker", e); + } + } + } + /** + * Close down the TaskTracker and all its components. We must also shutdown + * any running tasks or threads, and cleanup disk space. A new TaskTracker + * within the same process space might be restarted, so everything must be + * clean. + */ + public synchronized void close() throws IOException { + // + // Kill running tasks. Do this in a 2nd vector, called 'tasksToClose', + // because calling jobHasFinished() may result in an edit to 'tasks'. + // + TreeMap tasksToClose = + new TreeMap(); + tasksToClose.putAll(tasks); + for (TaskInProgress tip : tasksToClose.values()) { + tip.jobHasFinished(false); + } + + this.running = false; + + // Clear local storage + if (asyncDiskService != null) { + // Clear local storage + asyncDiskService.cleanupAllVolumes(); + + // Shutdown all async deletion threads with up to 10 seconds of delay + asyncDiskService.shutdown(); + try { + if (!asyncDiskService.awaitTermination(10000)) { + asyncDiskService.shutdownNow(); + asyncDiskService = null; + } + } catch (InterruptedException e) { + asyncDiskService.shutdownNow(); + asyncDiskService = null; + } + } + + // Shutdown the fetcher thread + this.mapEventsFetcher.interrupt(); + + //stop the launchers + this.mapLauncher.interrupt(); + this.reduceLauncher.interrupt(); + + // All tasks are killed. So, they are removed from TaskLog monitoring also. + // Interrupt the monitor. + getTaskLogsMonitor().interrupt(); + + jvmManager.stop(); + + // shutdown RPC connections + RPC.stopProxy(jobClient); + + // wait for the fetcher thread to exit + for (boolean done = false; !done; ) { + try { + this.mapEventsFetcher.join(); + done = true; + } catch (InterruptedException e) { + } + } + + if (taskReportServer != null) { + taskReportServer.stop(); + taskReportServer = null; + } + if (healthChecker != null) { + //stop node health checker service + healthChecker.stop(); + healthChecker = null; + } + } + + /** + * Start with the local machine name, and the default JobTracker + */ + public TaskTracker(JobConf conf) throws IOException { + originalConf = conf; + + Class clazz = + conf.getClass(MAPRED_TASKTRACKER_MEMORY_CALCULATOR_PLUGIN_PROPERTY, + null, ResourceCalculatorPlugin.class); + resourceCalculatorPlugin = + (ResourceCalculatorPlugin) ResourceCalculatorPlugin + .getResourceCalculatorPlugin(clazz, conf); + LOG.info("Using ResourceCalculatorPlugin : " + resourceCalculatorPlugin); + int numCpuOnTT = resourceCalculatorPlugin.getNumProcessors(); + maxMapSlots = getMaxSlots(conf, numCpuOnTT, TaskType.MAP); + maxReduceSlots = getMaxSlots(conf, numCpuOnTT, TaskType.REDUCE); + + this.jobTrackAddr = JobTracker.getAddress(conf); + String infoAddr = + NetUtils.getServerAddress(conf, + "tasktracker.http.bindAddress", + "tasktracker.http.port", + "mapred.task.tracker.http.address"); + InetSocketAddress infoSocAddr = NetUtils.createSocketAddr(infoAddr); + String httpBindAddress = infoSocAddr.getHostName(); + int httpPort = infoSocAddr.getPort(); + this.server = new HttpServer("task", httpBindAddress, httpPort, + httpPort == 0, conf); + workerThreads = conf.getInt("tasktracker.http.threads", 40); + this.shuffleServerMetrics = new ShuffleServerMetrics(conf); + server.setThreads(1, workerThreads); + // let the jsp pages get to the task tracker, config, and other relevant + // objects + FileSystem local = FileSystem.getLocal(conf); + this.localDirAllocator = new LocalDirAllocator("mapred.local.dir"); + server.setAttribute("task.tracker", this); + server.setAttribute("local.file.system", local); + server.setAttribute("conf", conf); + server.setAttribute("log", LOG); + server.setAttribute("localDirAllocator", localDirAllocator); + server.setAttribute("shuffleServerMetrics", shuffleServerMetrics); + server.addInternalServlet("mapOutput", "/mapOutput", MapOutputServlet.class); + server.addInternalServlet("taskLog", "/tasklog", TaskLogServlet.class); + server.start(); + this.httpPort = server.getPort(); + checkJettyPort(httpPort); + initialize(); + } + + /** + * Blank constructor. Only usable by tests. + */ + TaskTracker() { + server = null; + } + + /** + * Configuration setter method for use by tests. + */ + void setConf(JobConf conf) { + fConf = conf; + } + + private void checkJettyPort(int port) throws IOException { + //See HADOOP-4744 + if (port < 0) { + shuttingDown = true; + throw new IOException("Jetty problem. Jetty didn't bind to a " + + "valid port"); + } + } + + private void startCleanupThreads() throws IOException { + taskCleanupThread.setDaemon(true); + taskCleanupThread.start(); + directoryCleanupThread = new CleanupQueue(); + } + + /** + * The connection to the JobTracker, used by the TaskRunner + * for locating remote files. + */ + public InterTrackerProtocol getJobClient() { + return jobClient; + } + + /** Return the port at which the tasktracker bound to */ + public synchronized InetSocketAddress getTaskTrackerReportAddress() { + return taskReportAddress; + } + + /** Queries the job tracker for a set of outputs ready to be copied + * @param fromEventId the first event ID we want to start from, this is + * modified by the call to this method + * @param jobClient the job tracker + * @return a set of locations to copy outputs from + * @throws IOException + */ + private List queryJobTracker(IntWritable fromEventId, + JobID jobId, + InterTrackerProtocol jobClient) + throws IOException { + + TaskCompletionEvent t[] = jobClient.getTaskCompletionEvents( + jobId, + fromEventId.get(), + probe_sample_size); + //we are interested in map task completion events only. So store + //only those + List recentMapEvents = + new ArrayList(); + for (int i = 0; i < t.length; i++) { + if (t[i].isMap) { + recentMapEvents.add(t[i]); + } + } + fromEventId.set(fromEventId.get() + t.length); + return recentMapEvents; + } + + /** + * Main service loop. Will stay in this loop forever. + */ + State offerService() throws Exception { + long lastHeartbeat = 0; + + while (running && !shuttingDown) { + try { + long now = System.currentTimeMillis(); + + long waitTime = heartbeatInterval - (now - lastHeartbeat); + if (waitTime > 0) { + // sleeps for the wait time or + // until there are empty slots to schedule tasks + synchronized (finishedCount) { + if (finishedCount.get() == 0) { + finishedCount.wait(waitTime); + } + finishedCount.set(0); + } + } + + // If the TaskTracker is just starting up: + // 1. Verify the buildVersion + // 2. Get the system directory & filesystem + if(justInited) { + String jobTrackerBV = jobClient.getBuildVersion(); + if(doCheckBuildVersion() && + !VersionInfo.getBuildVersion().equals(jobTrackerBV)) { + String msg = "Shutting down. Incompatible buildVersion." + + "\nJobTracker's: " + jobTrackerBV + + "\nTaskTracker's: "+ VersionInfo.getBuildVersion(); + LOG.error(msg); + try { + jobClient.reportTaskTrackerError(taskTrackerName, null, msg); + } catch(Exception e ) { + LOG.info("Problem reporting to jobtracker: " + e); + } + return State.DENIED; + } + + String dir = jobClient.getSystemDir(); + if (dir == null) { + throw new IOException("Failed to get system directory"); + } + systemDirectory = new Path(dir); + systemFS = systemDirectory.getFileSystem(fConf); + } + + // Send the heartbeat and process the jobtracker's directives + HeartbeatResponse heartbeatResponse = transmitHeartBeat(now); + + // Note the time when the heartbeat returned, use this to decide when to send the + // next heartbeat + lastHeartbeat = System.currentTimeMillis(); + + + // Check if the map-event list needs purging + Set jobs = heartbeatResponse.getRecoveredJobs(); + if (jobs.size() > 0) { + synchronized (this) { + // purge the local map events list + for (JobID job : jobs) { + RunningJob rjob; + synchronized (runningJobs) { + rjob = runningJobs.get(job); + if (rjob != null) { + synchronized (rjob) { + FetchStatus f = rjob.getFetchStatus(); + if (f != null) { + f.reset(); + } + } + } + } + } + + // Mark the reducers in shuffle for rollback + synchronized (shouldReset) { + for (Map.Entry entry + : runningTasks.entrySet()) { + if (entry.getValue().getStatus().getPhase() == Phase.SHUFFLE) { + this.shouldReset.add(entry.getKey()); + } + } + } + } + } + + TaskTrackerAction[] actions = heartbeatResponse.getActions(); + if(LOG.isDebugEnabled()) { + LOG.debug("Got heartbeatResponse from JobTracker with responseId: " + + heartbeatResponse.getResponseId() + " and " + + ((actions != null) ? actions.length : 0) + " actions"); + } + if (reinitTaskTracker(actions)) { + return State.STALE; + } + + // resetting heartbeat interval from the response. + heartbeatInterval = heartbeatResponse.getHeartbeatInterval(); + justStarted = false; + justInited = false; + if (actions != null){ + for(TaskTrackerAction action: actions) { + if (action instanceof LaunchTaskAction) { + addToTaskQueue((LaunchTaskAction)action); + } else if (action instanceof CommitTaskAction) { + CommitTaskAction commitAction = (CommitTaskAction)action; + if (!commitResponses.contains(commitAction.getTaskID())) { + LOG.info("Received commit task action for " + + commitAction.getTaskID()); + commitResponses.add(commitAction.getTaskID()); + } + } else { + tasksToCleanup.put(action); + } + } + } + markUnresponsiveTasks(); + killOverflowingTasks(); + + //we've cleaned up, resume normal operation + if (!acceptNewTasks && isIdle()) { + acceptNewTasks=true; + } + //The check below may not be required every iteration but we are + //erring on the side of caution here. We have seen many cases where + //the call to jetty's getLocalPort() returns different values at + //different times. Being a real paranoid here. + checkJettyPort(server.getPort()); + } catch (InterruptedException ie) { + LOG.info("Interrupted. Closing down."); + return State.INTERRUPTED; + } catch (DiskErrorException de) { + String msg = "Exiting task tracker for disk error:\n" + + StringUtils.stringifyException(de); + LOG.error(msg); + synchronized (this) { + jobClient.reportTaskTrackerError(taskTrackerName, + "DiskErrorException", msg); + } + return State.STALE; + } catch (RemoteException re) { + String reClass = re.getClassName(); + if (DisallowedTaskTrackerException.class.getName().equals(reClass)) { + LOG.info("Tasktracker disallowed by JobTracker."); + return State.DENIED; + } + } catch (Exception except) { + String msg = "Caught exception: " + + StringUtils.stringifyException(except); + LOG.error(msg); + } + } + + return State.NORMAL; + } + + private long previousUpdate = 0; + + /** + * Build and transmit the heart beat to the JobTracker + * @param now current time + * @return false if the tracker was unknown + * @throws IOException + */ + HeartbeatResponse transmitHeartBeat(long now) throws IOException { + // Send Counters in the status once every COUNTER_UPDATE_INTERVAL + boolean sendCounters; + if (now > (previousUpdate + COUNTER_UPDATE_INTERVAL)) { + sendCounters = true; + previousUpdate = now; + } + else { + sendCounters = false; + } + + // + // Check if the last heartbeat got through... + // if so then build the heartbeat information for the JobTracker; + // else resend the previous status information. + // + if (status == null) { + synchronized (this) { + status = new TaskTrackerStatus(taskTrackerName, localHostname, + httpPort, + cloneAndResetRunningTaskStatuses( + sendCounters), + failures, + maxMapSlots, + maxReduceSlots); + } + } else { + LOG.info("Resending 'status' to '" + jobTrackAddr.getHostName() + + "' with reponseId '" + heartbeatResponseId); + } + + // + // Check if we should ask for a new Task + // + boolean askForNewTask; + long localMinSpaceStart; + synchronized (this) { + askForNewTask = + ((status.countOccupiedMapSlots() < maxMapSlots || + status.countOccupiedReduceSlots() < maxReduceSlots) && + acceptNewTasks); + localMinSpaceStart = minSpaceStart; + } + if (askForNewTask) { + checkLocalDirs(fConf.getLocalDirs()); + askForNewTask = enoughFreeSpace(localMinSpaceStart); + long freeDiskSpace = getFreeSpace(); + long totVmem = getTotalVirtualMemoryOnTT(); + long totPmem = getTotalPhysicalMemoryOnTT(); + long availableVmem = getAvailableVirtualMemoryOnTT(); + long availablePmem = getAvailablePhysicalMemoryOnTT(); + long cumuCpuTime = getCumulativeCpuTimeOnTT(); + long cpuFreq = getCpuFrequencyOnTT(); + int numCpu = getNumProcessorsOnTT(); + float cpuUsage = getCpuUsageOnTT(); + + status.getResourceStatus().setAvailableSpace(freeDiskSpace); + status.getResourceStatus().setTotalVirtualMemory(totVmem); + status.getResourceStatus().setTotalPhysicalMemory(totPmem); + status.getResourceStatus().setAvailableVirtualMemory(availableVmem); + status.getResourceStatus().setAvailablePhysicalMemory(availablePmem); + status.getResourceStatus().setMapSlotMemorySizeOnTT( + mapSlotMemorySizeOnTT); + status.getResourceStatus().setReduceSlotMemorySizeOnTT( + reduceSlotSizeMemoryOnTT); + status.getResourceStatus().setCumulativeCpuTime(cumuCpuTime); + status.getResourceStatus().setCpuFrequency(cpuFreq); + status.getResourceStatus().setNumProcessors(numCpu); + status.getResourceStatus().setCpuUsage(cpuUsage); + } + //add node health information + + TaskTrackerHealthStatus healthStatus = status.getHealthStatus(); + synchronized (this) { + if (healthChecker != null) { + healthChecker.setHealthStatus(healthStatus); + } else { + healthStatus.setNodeHealthy(true); + healthStatus.setLastReported(0L); + healthStatus.setHealthReport(""); + } + } + // + // Xmit the heartbeat + // + HeartbeatResponse heartbeatResponse = jobClient.heartbeat(status, + justStarted, + justInited, + askForNewTask, + heartbeatResponseId); + + // + // The heartbeat got through successfully! + // + heartbeatResponseId = heartbeatResponse.getResponseId(); + + synchronized (this) { + for (TaskStatus taskStatus : status.getTaskReports()) { + if (taskStatus.getRunState() != TaskStatus.State.RUNNING && + taskStatus.getRunState() != TaskStatus.State.UNASSIGNED && + taskStatus.getRunState() != TaskStatus.State.COMMIT_PENDING && + !taskStatus.inTaskCleanupPhase()) { + if (taskStatus.getIsMap()) { + mapTotal--; + } else { + reduceTotal--; + } + try { + myInstrumentation.completeTask(taskStatus.getTaskID()); + } catch (MetricsException me) { + LOG.warn("Caught: " + StringUtils.stringifyException(me)); + } + runningTasks.remove(taskStatus.getTaskID()); + } + } + + // Clear transient status information which should only + // be sent once to the JobTracker + for (TaskInProgress tip: runningTasks.values()) { + tip.getStatus().clearStatus(); + } + } + + // Force a rebuild of 'status' on the next iteration + status = null; + + return heartbeatResponse; + } + + private boolean doCheckBuildVersion() { + return fConf.getBoolean(CHECK_TASKTRACKER_BUILD_VERSION, true); + } + + long getMapUserLogRetainSize() { + return fConf.getLong(MAP_USERLOG_RETAIN_SIZE, -1); + } + + void setMapUserLogRetainSize(long retainSize) { + fConf.setLong(MAP_USERLOG_RETAIN_SIZE, retainSize); + } + + long getReduceUserLogRetainSize() { + return fConf.getLong(REDUCE_USERLOG_RETAIN_SIZE, -1); + } + + void setReduceUserLogRetainSize(long retainSize) { + fConf.setLong(REDUCE_USERLOG_RETAIN_SIZE, retainSize); + } + + /** + * Returns the MRAsyncDiskService object for async deletions. + */ + public MRAsyncDiskService getAsyncDiskService() { + return asyncDiskService; + } + + /** + * Return the total virtual memory available on this TaskTracker. + * @return total size of virtual memory. + */ + long getTotalVirtualMemoryOnTT() { + return totalVirtualMemoryOnTT; + } + + /** + * Return the total physical memory available on this TaskTracker. + * @return total size of physical memory. + */ + long getTotalPhysicalMemoryOnTT() { + return totalPhysicalMemoryOnTT; + } + + /** + * Return the free virtual memory available on this TaskTracker. + * @return total size of free virtual memory. + */ + long getAvailableVirtualMemoryOnTT() { + long availableVirtualMemoryOnTT = JobConf.DISABLED_MEMORY_LIMIT; + if (resourceCalculatorPlugin != null) { + availableVirtualMemoryOnTT = + resourceCalculatorPlugin.getAvailableVirtualMemorySize(); + if (availableVirtualMemoryOnTT <= 0) { + LOG.warn("TaskTracker's freeVmem could not be calculated. " + + "Setting it to " + JobConf.DISABLED_MEMORY_LIMIT); + availableVirtualMemoryOnTT = JobConf.DISABLED_MEMORY_LIMIT; + } + } + return availableVirtualMemoryOnTT; + } + + /** + * Return the free physical memory available on this TaskTracker. + * @return total size of free physical memory. + */ + long getAvailablePhysicalMemoryOnTT() { + long availablePhysicalMemoryOnTT = JobConf.DISABLED_MEMORY_LIMIT; + if (resourceCalculatorPlugin != null) { + availablePhysicalMemoryOnTT = + resourceCalculatorPlugin.getAvailablePhysicalMemorySize(); + if (availablePhysicalMemoryOnTT <= 0) { + LOG.warn("TaskTracker's freePmem could not be calculated. " + + "Setting it to " + JobConf.DISABLED_MEMORY_LIMIT); + availablePhysicalMemoryOnTT = JobConf.DISABLED_MEMORY_LIMIT; + } + } + return availablePhysicalMemoryOnTT; + } + + /** + * Return the cumulative CPU used time on this TaskTracker since system is on + * @return cumulative CPU used time in millisecond + */ + long getCumulativeCpuTimeOnTT() { + long cumulativeCpuTime = TaskTrackerStatus.UNAVAILABLE; + if (resourceCalculatorPlugin != null) { + cumulativeCpuTime = resourceCalculatorPlugin.getCumulativeCpuTime(); + } + return cumulativeCpuTime; + } + + /** + * Return the number of Processors on this TaskTracker + * @return number of processors + */ + int getNumProcessorsOnTT() { + int numProcessors = TaskTrackerStatus.UNAVAILABLE; + if (resourceCalculatorPlugin != null) { + numProcessors = resourceCalculatorPlugin.getNumProcessors(); + } + return numProcessors; + } + + /** + * Return the CPU frequency of this TaskTracker + * @return CPU frequency in kHz + */ + long getCpuFrequencyOnTT() { + long cpuFrequency = TaskTrackerStatus.UNAVAILABLE; + if (resourceCalculatorPlugin != null) { + cpuFrequency = resourceCalculatorPlugin.getCpuFrequency(); + } + return cpuFrequency; + } + + /** + * Return the CPU usage in % of this TaskTracker + * @return CPU usage in % + */ + float getCpuUsageOnTT() { + float cpuUsage = TaskTrackerStatus.UNAVAILABLE; + if (resourceCalculatorPlugin != null) { + cpuUsage = resourceCalculatorPlugin.getCpuUsage(); + } + return cpuUsage; + } + + long getTotalMemoryAllottedForTasksOnTT() { + return totalMemoryAllottedForTasks; + } + + /** + * Check if the jobtracker directed a 'reset' of the tasktracker. + * + * @param actions the directives of the jobtracker for the tasktracker. + * @return true if tasktracker is to be reset, + * false otherwise. + */ + private boolean reinitTaskTracker(TaskTrackerAction[] actions) { + if (actions != null) { + for (TaskTrackerAction action : actions) { + if (action.getActionId() == + TaskTrackerAction.ActionType.REINIT_TRACKER) { + LOG.info("Recieved RenitTrackerAction from JobTracker"); + return true; + } + } + } + return false; + } + + /** + * Kill any tasks that have not reported progress in the last X seconds. + */ + private synchronized void markUnresponsiveTasks() throws IOException { + long now = System.currentTimeMillis(); + for (TaskInProgress tip: runningTasks.values()) { + if (tip.getRunState() == TaskStatus.State.RUNNING || + tip.getRunState() == TaskStatus.State.COMMIT_PENDING || + tip.isCleaningup()) { + // Check the per-job timeout interval for tasks; + // an interval of '0' implies it is never timed-out + long jobTaskTimeout = tip.getTaskTimeout(); + if (jobTaskTimeout == 0) { + continue; + } + + // Check if the task has not reported progress for a + // time-period greater than the configured time-out + long timeSinceLastReport = now - tip.getLastProgressReport(); + if (timeSinceLastReport > jobTaskTimeout && !tip.wasKilled) { + String msg = + "Task " + tip.getTask().getTaskID() + " failed to report status for " + + (timeSinceLastReport / 1000) + " seconds. Killing!"; + LOG.info(tip.getTask().getTaskID() + ": " + msg); + ReflectionUtils.logThreadInfo(LOG, "lost task", 30); + tip.reportDiagnosticInfo(msg); + myInstrumentation.timedoutTask(tip.getTask().getTaskID()); + purgeTask(tip, true); + } + } + } + } + + private static PathDeletionContext[] buildPathDeletionContexts(FileSystem fs, + Path[] paths) { + int i = 0; + PathDeletionContext[] contexts = new PathDeletionContext[paths.length]; + + for (Path p : paths) { + contexts[i++] = new PathDeletionContext(fs, p.toUri().getPath()); + } + return contexts; + } + + static PathDeletionContext[] buildTaskControllerPathDeletionContexts( + FileSystem fs, Path[] paths, Task task, boolean isWorkDir, + TaskController taskController) + throws IOException { + int i = 0; + PathDeletionContext[] contexts = + new TaskControllerPathDeletionContext[paths.length]; + + for (Path p : paths) { + contexts[i++] = new TaskControllerPathDeletionContext(fs, p, task, + isWorkDir, taskController); + } + return contexts; + } + + /** + * The task tracker is done with this job, so we need to clean up. + * @param action The action with the job + * @throws IOException + */ + private synchronized void purgeJob(KillJobAction action) throws IOException { + JobID jobId = action.getJobID(); + LOG.info("Received 'KillJobAction' for job: " + jobId); + RunningJob rjob = null; + synchronized (runningJobs) { + rjob = runningJobs.get(jobId); + } + + if (rjob == null) { + LOG.warn("Unknown job " + jobId + " being deleted."); + } else { + synchronized (rjob) { + // Add this tips of this job to queue of tasks to be purged + for (TaskInProgress tip : rjob.tasks) { + tip.jobHasFinished(false); + Task t = tip.getTask(); + if (t.isMapTask()) { + indexCache.removeMap(tip.getTask().getTaskID().toString()); + } + } + // Delete the job directory for this + // task if the job is done/failed + if (!rjob.keepJobFiles){ + PathDeletionContext[] contexts = buildPathDeletionContexts(localFs, + getLocalFiles(fConf, getLocalJobDir(rjob.getJobID().toString()))); + directoryCleanupThread.addToQueue(contexts); + } + // Remove this job + rjob.tasks.clear(); + } + } + + synchronized(runningJobs) { + runningJobs.remove(jobId); + } + + } + + + /** + * Remove the tip and update all relevant state. + * + * @param tip {@link TaskInProgress} to be removed. + * @param wasFailure did the task fail or was it killed? + */ + private void purgeTask(TaskInProgress tip, boolean wasFailure) + throws IOException { + if (tip != null) { + LOG.info("About to purge task: " + tip.getTask().getTaskID()); + + // Remove the task from running jobs, + // removing the job if it's the last task + removeTaskFromJob(tip.getTask().getJobID(), tip); + tip.jobHasFinished(wasFailure); + if (tip.getTask().isMapTask()) { + indexCache.removeMap(tip.getTask().getTaskID().toString()); + } + } + } + + /** Check if we're dangerously low on disk space + * If so, kill jobs to free up space and make sure + * we don't accept any new tasks + * Try killing the reduce jobs first, since I believe they + * use up most space + * Then pick the one with least progress + */ + private void killOverflowingTasks() throws IOException { + long localMinSpaceKill; + synchronized(this){ + localMinSpaceKill = minSpaceKill; + } + if (!enoughFreeSpace(localMinSpaceKill)) { + acceptNewTasks=false; + //we give up! do not accept new tasks until + //all the ones running have finished and they're all cleared up + synchronized (this) { + TaskInProgress killMe = findTaskToKill(null); + + if (killMe!=null) { + String msg = "Tasktracker running out of space." + + " Killing task."; + LOG.info(killMe.getTask().getTaskID() + ": " + msg); + killMe.reportDiagnosticInfo(msg); + purgeTask(killMe, false); + } + } + } + } + + /** + * Pick a task to kill to free up memory/disk-space + * @param tasksToExclude tasks that are to be excluded while trying to find a + * task to kill. If null, all runningTasks will be searched. + * @return the task to kill or null, if one wasn't found + */ + synchronized TaskInProgress findTaskToKill(List tasksToExclude) { + TaskInProgress killMe = null; + for (Iterator it = runningTasks.values().iterator(); it.hasNext();) { + TaskInProgress tip = (TaskInProgress) it.next(); + + if (tasksToExclude != null + && tasksToExclude.contains(tip.getTask().getTaskID())) { + // exclude this task + continue; + } + + if ((tip.getRunState() == TaskStatus.State.RUNNING || + tip.getRunState() == TaskStatus.State.COMMIT_PENDING) && + !tip.wasKilled) { + + if (killMe == null) { + killMe = tip; + + } else if (!tip.getTask().isMapTask()) { + //reduce task, give priority + if (killMe.getTask().isMapTask() || + (tip.getTask().getProgress().get() < + killMe.getTask().getProgress().get())) { + + killMe = tip; + } + + } else if (killMe.getTask().isMapTask() && + tip.getTask().getProgress().get() < + killMe.getTask().getProgress().get()) { + //map task, only add if the progress is lower + + killMe = tip; + } + } + } + return killMe; + } + + /** + * Check if any of the local directories has enough + * free space (more than minSpace) + * + * If not, do not try to get a new task assigned + * @return + * @throws IOException + */ + private boolean enoughFreeSpace(long minSpace) throws IOException { + if (minSpace == 0) { + return true; + } + return minSpace < getFreeSpace(); + } + + private long getFreeSpace() throws IOException { + long biggestSeenSoFar = 0; + String[] localDirs = fConf.getLocalDirs(); + for (int i = 0; i < localDirs.length; i++) { + DF df = null; + if (localDirsDf.containsKey(localDirs[i])) { + df = localDirsDf.get(localDirs[i]); + } else { + df = new DF(new File(localDirs[i]), fConf); + localDirsDf.put(localDirs[i], df); + } + + long availOnThisVol = df.getAvailable(); + if (availOnThisVol > biggestSeenSoFar) { + biggestSeenSoFar = availOnThisVol; + } + } + + //Should ultimately hold back the space we expect running tasks to use but + //that estimate isn't currently being passed down to the TaskTrackers + return biggestSeenSoFar; + } + + /** + * Try to get the size of output for this task. + * Returns -1 if it can't be found. + * @return + */ + long tryToGetOutputSize(TaskAttemptID taskId, JobConf conf) { + + try{ + TaskInProgress tip; + synchronized(this) { + tip = tasks.get(taskId); + } + if(tip == null) + return -1; + + if (!tip.getTask().isMapTask() || + tip.getRunState() != TaskStatus.State.SUCCEEDED) { + return -1; + } + + MapOutputFile mapOutputFile = new MapOutputFile(); + mapOutputFile.setJobId(taskId.getJobID()); + mapOutputFile.setConf(conf); + + Path tmp_output = mapOutputFile.getOutputFile(taskId); + if(tmp_output == null) + return 0; + FileSystem localFS = FileSystem.getLocal(conf); + FileStatus stat = localFS.getFileStatus(tmp_output); + if(stat == null) + return 0; + else + return stat.getLen(); + } catch(IOException e) { + LOG.info(e); + return -1; + } + } + + private TaskLauncher mapLauncher; + private TaskLauncher reduceLauncher; + public JvmManager getJvmManagerInstance() { + return jvmManager; + } + + private void addToTaskQueue(LaunchTaskAction action) { + if (action.getTask().isMapTask()) { + mapLauncher.addToTaskQueue(action); + } else { + reduceLauncher.addToTaskQueue(action); + } + } + + private class TaskLauncher extends Thread { + private IntWritable numFreeSlots; + private final int maxSlots; + private List tasksToLaunch; + + public TaskLauncher(TaskType taskType, int numSlots) { + this.maxSlots = numSlots; + this.numFreeSlots = new IntWritable(numSlots); + this.tasksToLaunch = new LinkedList(); + setDaemon(true); + setName("TaskLauncher for " + taskType + " tasks"); + } + + public void addToTaskQueue(LaunchTaskAction action) { + synchronized (tasksToLaunch) { + TaskInProgress tip = registerTask(action, this); + tasksToLaunch.add(tip); + tasksToLaunch.notifyAll(); + } + } + + public void cleanTaskQueue() { + tasksToLaunch.clear(); + } + + public void addFreeSlots(int numSlots) { + synchronized (numFreeSlots) { + numFreeSlots.set(numFreeSlots.get() + numSlots); + assert (numFreeSlots.get() <= maxSlots); + LOG.info("addFreeSlot : current free slots : " + numFreeSlots.get()); + numFreeSlots.notifyAll(); + } + } + + public void run() { + while (running) { + try { + TaskInProgress tip; + Task task; + synchronized (tasksToLaunch) { + while (tasksToLaunch.isEmpty()) { + tasksToLaunch.wait(); + } + //get the TIP + tip = tasksToLaunch.remove(0); + task = tip.getTask(); + LOG.info("Trying to launch : " + tip.getTask().getTaskID() + + " which needs " + task.getNumSlotsRequired() + " slots"); + } + //wait for free slots to run + synchronized (numFreeSlots) { + while (numFreeSlots.get() < task.getNumSlotsRequired()) { + LOG.info("TaskLauncher : Waiting for " + task.getNumSlotsRequired() + + " to launch " + task.getTaskID() + ", currently we have " + + numFreeSlots.get() + " free slots"); + numFreeSlots.wait(); + } + LOG.info("In TaskLauncher, current free slots : " + numFreeSlots.get()+ + " and trying to launch "+tip.getTask().getTaskID() + + " which needs " + task.getNumSlotsRequired() + " slots"); + numFreeSlots.set(numFreeSlots.get() - task.getNumSlotsRequired()); + assert (numFreeSlots.get() >= 0); + } + synchronized (tip) { + //to make sure that there is no kill task action for this + if (tip.getRunState() != TaskStatus.State.UNASSIGNED && + tip.getRunState() != TaskStatus.State.FAILED_UNCLEAN && + tip.getRunState() != TaskStatus.State.KILLED_UNCLEAN) { + //got killed externally while still in the launcher queue + addFreeSlots(task.getNumSlotsRequired()); + continue; + } + tip.slotTaken = true; + } + //got a free slot. launch the task + startNewTask(tip); + } catch (InterruptedException e) { + if (!running) + return; // ALL DONE + LOG.warn ("Unexpected InterruptedException"); + } catch (Throwable th) { + LOG.error("TaskLauncher error " + + StringUtils.stringifyException(th)); + } + } + } + } + private TaskInProgress registerTask(LaunchTaskAction action, + TaskLauncher launcher) { + Task t = action.getTask(); + LOG.info("LaunchTaskAction (registerTask): " + t.getTaskID() + + " task's state:" + t.getState()); + TaskInProgress tip = new TaskInProgress(t, this.fConf, launcher); + synchronized (this) { + tasks.put(t.getTaskID(), tip); + runningTasks.put(t.getTaskID(), tip); + boolean isMap = t.isMapTask(); + if (isMap) { + mapTotal++; + } else { + reduceTotal++; + } + } + return tip; + } + /** + * Start a new task. + * All exceptions are handled locally, so that we don't mess up the + * task tracker. + */ + private void startNewTask(TaskInProgress tip) { + try { + localizeJob(tip); + } catch (Throwable e) { + String msg = ("Error initializing " + tip.getTask().getTaskID() + + ":\n" + StringUtils.stringifyException(e)); + LOG.warn(msg); + tip.reportDiagnosticInfo(msg); + try { + tip.kill(true); + tip.cleanup(true); + } catch (IOException ie2) { + LOG.info("Error cleaning up " + tip.getTask().getTaskID() + ":\n" + + StringUtils.stringifyException(ie2)); + } + + // Careful! + // This might not be an 'Exception' - don't handle 'Error' here! + if (e instanceof Error) { + throw ((Error) e); + } + } + } + + void addToMemoryManager(TaskAttemptID attemptId, boolean isMap, + JobConf conf) { + if (isTaskMemoryManagerEnabled()) { + taskMemoryManager.addTask(attemptId, + isMap ? conf + .getMemoryForMapTask() * 1024 * 1024L : conf + .getMemoryForReduceTask() * 1024 * 1024L); + } + } + + void removeFromMemoryManager(TaskAttemptID attemptId) { + // Remove the entry from taskMemoryManagerThread's data structures. + if (isTaskMemoryManagerEnabled()) { + taskMemoryManager.removeTask(attemptId); + } + } + + /** + * Notify the tasktracker to send an out-of-band heartbeat. + */ + private void notifyTTAboutTaskCompletion() { + if (oobHeartbeatOnTaskCompletion) { + synchronized (finishedCount) { + int value = finishedCount.get(); + finishedCount.set(value+1); + finishedCount.notify(); + } + } + } + + /** + * The server retry loop. + * This while-loop attempts to connect to the JobTracker. It only + * loops when the old TaskTracker has gone bad (its state is + * stale somehow) and we need to reinitialize everything. + */ + public void run() { + try { + startCleanupThreads(); + boolean denied = false; + while (running && !shuttingDown && !denied) { + boolean staleState = false; + try { + // This while-loop attempts reconnects if we get network errors + while (running && !staleState && !shuttingDown && !denied) { + try { + State osState = offerService(); + if (osState == State.STALE) { + staleState = true; + } else if (osState == State.DENIED) { + denied = true; + } + } catch (Exception ex) { + if (!shuttingDown) { + LOG.info("Lost connection to JobTracker [" + + jobTrackAddr + "]. Retrying...", ex); + try { + Thread.sleep(5000); + } catch (InterruptedException ie) { + } + } + } + } + } finally { + close(); + } + if (shuttingDown) { return; } + LOG.warn("Reinitializing local state"); + initialize(); + } + if (denied) { + shutdown(); + } + } catch (IOException iex) { + LOG.error("Got fatal exception while reinitializing TaskTracker: " + + StringUtils.stringifyException(iex)); + return; + } + } + + /////////////////////////////////////////////////////// + // TaskInProgress maintains all the info for a Task that + // lives at this TaskTracker. It maintains the Task object, + // its TaskStatus, and the TaskRunner. + /////////////////////////////////////////////////////// + class TaskInProgress { + Task task; + long lastProgressReport; + StringBuffer diagnosticInfo = new StringBuffer(); + private TaskRunner runner; + volatile boolean done = false; + volatile boolean wasKilled = false; + private JobConf defaultJobConf; + private JobConf localJobConf; + private boolean keepFailedTaskFiles; + private boolean alwaysKeepTaskFiles; + private TaskStatus taskStatus; + private long taskTimeout; + private String debugCommand; + private volatile boolean slotTaken = false; + private TaskLauncher launcher; + + /** + */ + public TaskInProgress(Task task, JobConf conf) { + this(task, conf, null); + } + + public TaskInProgress(Task task, JobConf conf, TaskLauncher launcher) { + this.task = task; + this.launcher = launcher; + this.lastProgressReport = System.currentTimeMillis(); + this.defaultJobConf = conf; + localJobConf = null; + taskStatus = TaskStatus.createTaskStatus(task.isMapTask(), task.getTaskID(), + 0.0f, + task.getNumSlotsRequired(), + task.getState(), + diagnosticInfo.toString(), + "initializing", + getName(), + task.isTaskCleanupTask() ? + TaskStatus.Phase.CLEANUP : + task.isMapTask()? TaskStatus.Phase.MAP: + TaskStatus.Phase.SHUFFLE, + task.getCounters()); + taskTimeout = (10 * 60 * 1000); + } + + private void localizeTask(Task task) throws IOException{ + + Path localTaskDir = + lDirAlloc.getLocalPathForWrite( + TaskTracker.getLocalTaskDir(task.getJobID().toString(), + task.getTaskID().toString(), task.isTaskCleanupTask()), + defaultJobConf ); + + FileSystem localFs = FileSystem.getLocal(fConf); + if (!localFs.mkdirs(localTaskDir)) { + throw new IOException("Mkdirs failed to create " + + localTaskDir.toString()); + } + + // create symlink for ../work if it already doesnt exist + String workDir = lDirAlloc.getLocalPathToRead( + TaskTracker.getLocalJobDir(task.getJobID().toString()) + + Path.SEPARATOR + + "work", defaultJobConf).toString(); + String link = localTaskDir.getParent().toString() + + Path.SEPARATOR + "work"; + File flink = new File(link); + if (!flink.exists()) + FileUtil.symLink(workDir, link); + + // create the working-directory of the task + Path cwd = lDirAlloc.getLocalPathForWrite( + getLocalTaskDir(task.getJobID().toString(), + task.getTaskID().toString(), task.isTaskCleanupTask()) + + Path.SEPARATOR + MRConstants.WORKDIR, + defaultJobConf); + if (!localFs.mkdirs(cwd)) { + throw new IOException("Mkdirs failed to create " + + cwd.toString()); + } + + Path localTaskFile = new Path(localTaskDir, "job.xml"); + task.setJobFile(localTaskFile.toString()); + localJobConf.set("mapred.local.dir", + fConf.get("mapred.local.dir")); + if (fConf.get("slave.host.name") != null) { + localJobConf.set("slave.host.name", + fConf.get("slave.host.name")); + } + + localJobConf.set("mapred.task.id", task.getTaskID().toString()); + keepFailedTaskFiles = localJobConf.getKeepFailedTaskFiles(); + + task.localizeConfiguration(localJobConf); + + List staticResolutions = NetUtils.getAllStaticResolutions(); + if (staticResolutions != null && staticResolutions.size() > 0) { + StringBuffer str = new StringBuffer(); + + for (int i = 0; i < staticResolutions.size(); i++) { + String[] hostToResolved = staticResolutions.get(i); + str.append(hostToResolved[0]+"="+hostToResolved[1]); + if (i != staticResolutions.size() - 1) { + str.append(','); + } + } + localJobConf.set("hadoop.net.static.resolutions", str.toString()); + } + if (task.isMapTask()) { + debugCommand = localJobConf.getMapDebugScript(); + } else { + debugCommand = localJobConf.getReduceDebugScript(); + } + String keepPattern = localJobConf.getKeepTaskFilesPattern(); + if (keepPattern != null) { + alwaysKeepTaskFiles = + Pattern.matches(keepPattern, task.getTaskID().toString()); + } else { + alwaysKeepTaskFiles = false; + } + if (debugCommand != null || localJobConf.getProfileEnabled() || + alwaysKeepTaskFiles || keepFailedTaskFiles) { + //disable jvm reuse + localJobConf.setNumTasksToExecutePerJvm(1); + } + if (isTaskMemoryManagerEnabled()) { + localJobConf.setBoolean("task.memory.mgmt.enabled", true); + } + OutputStream out = localFs.create(localTaskFile); + try { + localJobConf.writeXml(out); + } finally { + out.close(); + } + task.setConf(localJobConf); + } + + /** + */ + public Task getTask() { + return task; + } + + public TaskRunner getTaskRunner() { + return runner; + } + + public synchronized void setJobConf(JobConf lconf){ + this.localJobConf = lconf; + keepFailedTaskFiles = localJobConf.getKeepFailedTaskFiles(); + taskTimeout = localJobConf.getLong("mapred.task.timeout", + 10 * 60 * 1000); + } + + public synchronized JobConf getJobConf() { + return localJobConf; + } + + /** + */ + public synchronized TaskStatus getStatus() { + taskStatus.setDiagnosticInfo(diagnosticInfo.toString()); + if (diagnosticInfo.length() > 0) { + diagnosticInfo = new StringBuffer(); + } + + return taskStatus; + } + + /** + * Kick off the task execution + */ + public synchronized void launchTask() throws IOException { + if (this.taskStatus.getRunState() == TaskStatus.State.UNASSIGNED || + this.taskStatus.getRunState() == TaskStatus.State.FAILED_UNCLEAN || + this.taskStatus.getRunState() == TaskStatus.State.KILLED_UNCLEAN) { + localizeTask(task); + if (this.taskStatus.getRunState() == TaskStatus.State.UNASSIGNED) { + this.taskStatus.setRunState(TaskStatus.State.RUNNING); + } + this.runner = task.createRunner(TaskTracker.this, this); + this.runner.start(); + this.taskStatus.setStartTime(System.currentTimeMillis()); + } else { + LOG.info("Not launching task: " + task.getTaskID() + + " since it's state is " + this.taskStatus.getRunState()); + } + } + + boolean isCleaningup() { + return this.taskStatus.inTaskCleanupPhase(); + } + + /** + * The task is reporting its progress + */ + public synchronized void reportProgress(TaskStatus taskStatus) + { + LOG.info(task.getTaskID() + " " + taskStatus.getProgress() + + "% " + taskStatus.getStateString()); + // task will report its state as + // COMMIT_PENDING when it is waiting for commit response and + // when it is committing. + // cleanup attempt will report its state as FAILED_UNCLEAN/KILLED_UNCLEAN + if (this.done || + (this.taskStatus.getRunState() != TaskStatus.State.RUNNING && + this.taskStatus.getRunState() != TaskStatus.State.COMMIT_PENDING && + !isCleaningup()) || + ((this.taskStatus.getRunState() == TaskStatus.State.COMMIT_PENDING || + this.taskStatus.getRunState() == TaskStatus.State.FAILED_UNCLEAN || + this.taskStatus.getRunState() == TaskStatus.State.KILLED_UNCLEAN) && + taskStatus.getRunState() == TaskStatus.State.RUNNING)) { + //make sure we ignore progress messages after a task has + //invoked TaskUmbilicalProtocol.done() or if the task has been + //KILLED/FAILED/FAILED_UNCLEAN/KILLED_UNCLEAN + //Also ignore progress update if the state change is from + //COMMIT_PENDING/FAILED_UNCLEAN/KILLED_UNCLEA to RUNNING + LOG.info(task.getTaskID() + " Ignoring status-update since " + + ((this.done) ? "task is 'done'" : + ("runState: " + this.taskStatus.getRunState())) + ); + return; + } + + this.taskStatus.statusUpdate(taskStatus); + this.lastProgressReport = System.currentTimeMillis(); + } + + /** + */ + public long getLastProgressReport() { + return lastProgressReport; + } + + /** + */ + public TaskStatus.State getRunState() { + return taskStatus.getRunState(); + } + + /** + * The task's configured timeout. + * + * @return the task's configured timeout. + */ + public long getTaskTimeout() { + return taskTimeout; + } + + /** + * The task has reported some diagnostic info about its status + */ + public synchronized void reportDiagnosticInfo(String info) { + this.diagnosticInfo.append(info); + } + + public synchronized void reportNextRecordRange(SortedRanges.Range range) { + this.taskStatus.setNextRecordRange(range); + } + + /** + * The task is reporting that it's done running + */ + public synchronized void reportDone() { + if (isCleaningup()) { + if (this.taskStatus.getRunState() == TaskStatus.State.FAILED_UNCLEAN) { + this.taskStatus.setRunState(TaskStatus.State.FAILED); + } else if (this.taskStatus.getRunState() == + TaskStatus.State.KILLED_UNCLEAN) { + this.taskStatus.setRunState(TaskStatus.State.KILLED); + } + } else { + this.taskStatus.setRunState(TaskStatus.State.SUCCEEDED); + } + this.taskStatus.setProgress(1.0f); + this.taskStatus.setFinishTime(System.currentTimeMillis()); + this.done = true; + jvmManager.taskFinished(runner); + runner.signalDone(); + LOG.info("Task " + task.getTaskID() + " is done."); + LOG.info("reported output size for " + task.getTaskID() + " was " + taskStatus.getOutputSize()); + myInstrumentation.statusUpdate(task, taskStatus); + // Checking the existence of map output directory + if (task.isMapTask()) { + LocalDirAllocator lDirAlloc = TaskTracker.this.localDirAllocator; + String jobId = task.getJobID().toString(); + String taskId = task.getTaskID().toString(); + try { + String outputDir = + TaskTracker.getIntermediateOutputDir(jobId, taskId); + Path mapOutput = lDirAlloc.getLocalPathToRead( + outputDir + "/file.out", TaskTracker.this.fConf); + Path mapOutputIndex = lDirAlloc.getLocalPathToRead( + outputDir + "/file.out.index", TaskTracker.this.fConf); + LOG.info("Map output for " + taskId + " is in " + mapOutput); + LOG.info("Map output index for " + taskId + + " is in " + mapOutputIndex); + } catch (IOException e) { + LOG.warn("Cannot find map output for " + taskId, e); + } + } + } + + public boolean wasKilled() { + return wasKilled; + } + + /** + * A task is reporting in as 'done'. + * + * We need to notify the tasktracker to send an out-of-band heartbeat. + * If isn't commitPending, we need to finalize the task + * and release the slot it's occupied. + * + * @param commitPending is the task-commit pending? + */ + void reportTaskFinished(boolean commitPending) { + if (!commitPending) { + taskFinished(); + releaseSlot(); + } + notifyTTAboutTaskCompletion(); + } + + /* State changes: + * RUNNING/COMMIT_PENDING -> FAILED_UNCLEAN/FAILED/KILLED_UNCLEAN/KILLED + * FAILED_UNCLEAN -> FAILED + * KILLED_UNCLEAN -> KILLED + */ + private void setTaskFailState(boolean wasFailure) { + // go FAILED_UNCLEAN -> FAILED and KILLED_UNCLEAN -> KILLED always + if (taskStatus.getRunState() == TaskStatus.State.FAILED_UNCLEAN) { + taskStatus.setRunState(TaskStatus.State.FAILED); + } else if (taskStatus.getRunState() == + TaskStatus.State.KILLED_UNCLEAN) { + taskStatus.setRunState(TaskStatus.State.KILLED); + } else if (task.isMapOrReduce() && + taskStatus.getPhase() != TaskStatus.Phase.CLEANUP) { + if (wasFailure) { + taskStatus.setRunState(TaskStatus.State.FAILED_UNCLEAN); + } else { + taskStatus.setRunState(TaskStatus.State.KILLED_UNCLEAN); + } + } else { + if (wasFailure) { + taskStatus.setRunState(TaskStatus.State.FAILED); + } else { + taskStatus.setRunState(TaskStatus.State.KILLED); + } + } + } + + /** + * The task has actually finished running. + */ + public void taskFinished() { + long start = System.currentTimeMillis(); + + // + // Wait until task reports as done. If it hasn't reported in, + // wait for a second and try again. + // + while (!done && (System.currentTimeMillis() - start < WAIT_FOR_DONE)) { + try { + Thread.sleep(1000); + } catch (InterruptedException ie) { + } + } + + // + // Change state to success or failure, depending on whether + // task was 'done' before terminating + // + boolean needCleanup = false; + synchronized (this) { + // Remove the task from MemoryManager, if the task SUCCEEDED or FAILED. + // KILLED tasks are removed in method kill(), because Kill + // would result in launching a cleanup attempt before + // TaskRunner returns; if remove happens here, it would remove + // wrong task from memory manager. + if (done || !wasKilled) { + removeFromMemoryManager(task.getTaskID()); + } + if (!done) { + if (!wasKilled) { + failures += 1; + setTaskFailState(true); + // call the script here for the failed tasks. + if (debugCommand != null) { + String taskStdout =""; + String taskStderr =""; + String taskSyslog =""; + String jobConf = task.getJobFile(); + try { + Map allFilesDetails = + TaskLog.getAllLogsFileDetails(task.getTaskID(), false); + // get task's stdout file + taskStdout = + TaskLog.getRealTaskLogFilePath( + allFilesDetails.get(LogName.STDOUT).location, + LogName.STDOUT); + // get task's stderr file + taskStderr = + TaskLog.getRealTaskLogFilePath( + allFilesDetails.get(LogName.STDERR).location, + LogName.STDERR); + // get task's syslog file + taskSyslog = + TaskLog.getRealTaskLogFilePath( + allFilesDetails.get(LogName.SYSLOG).location, + LogName.SYSLOG); + } catch(IOException e){ + LOG.warn("Exception finding task's stdout/err/syslog files"); + } + File workDir = null; + try { + workDir = new File(lDirAlloc.getLocalPathToRead( + TaskTracker.getLocalTaskDir( + task.getJobID().toString(), + task.getTaskID().toString(), + task.isTaskCleanupTask()) + + Path.SEPARATOR + MRConstants.WORKDIR, + localJobConf). toString()); + } catch (IOException e) { + LOG.warn("Working Directory of the task " + task.getTaskID() + + "doesnt exist. Caught exception " + + StringUtils.stringifyException(e)); + } + // Build the command + File stdout = TaskLog.getRealTaskLogFileLocation( + task.getTaskID(), TaskLog.LogName.DEBUGOUT); + // add pipes program as argument if it exists. + String program =""; + String executable = Submitter.getExecutable(localJobConf); + if ( executable != null) { + try { + program = new URI(executable).getFragment(); + } catch (URISyntaxException ur) { + LOG.warn("Problem in the URI fragment for pipes executable"); + } + } + String [] debug = debugCommand.split(" "); + Vector vargs = new Vector(); + for (String component : debug) { + vargs.add(component); + } + vargs.add(taskStdout); + vargs.add(taskStderr); + vargs.add(taskSyslog); + vargs.add(jobConf); + vargs.add(program); + try { + List wrappedCommand = TaskLog.captureDebugOut + (vargs, stdout); + // run the script. + try { + runScript(wrappedCommand, workDir); + } catch (IOException ioe) { + LOG.warn("runScript failed with: " + StringUtils. + stringifyException(ioe)); + } + } catch(IOException e) { + LOG.warn("Error in preparing wrapped debug command"); + } + + // add all lines of debug out to diagnostics + try { + int num = localJobConf.getInt("mapred.debug.out.lines", -1); + addDiagnostics(FileUtil.makeShellPath(stdout),num,"DEBUG OUT"); + } catch(IOException ioe) { + LOG.warn("Exception in add diagnostics!"); + } + + // Debug-command is run. Do the post-debug-script-exit debug-logs + // processing. Truncate the logs. + getTaskLogsMonitor().addProcessForLogTruncation( + task.getTaskID(), Arrays.asList(task)); + } + } + taskStatus.setProgress(0.0f); + } + this.taskStatus.setFinishTime(System.currentTimeMillis()); + needCleanup = (taskStatus.getRunState() == TaskStatus.State.FAILED || + taskStatus.getRunState() == TaskStatus.State.FAILED_UNCLEAN || + taskStatus.getRunState() == TaskStatus.State.KILLED_UNCLEAN || + taskStatus.getRunState() == TaskStatus.State.KILLED); + } + + // + // If the task has failed, or if the task was killAndCleanup()'ed, + // we should clean up right away. We only wait to cleanup + // if the task succeeded, and its results might be useful + // later on to downstream job processing. + // + if (needCleanup) { + removeTaskFromJob(task.getJobID(), this); + } + try { + cleanup(needCleanup); + } catch (IOException ie) { + } + + } + + + /** + * Runs the script given in args + * @param args script name followed by its argumnets + * @param dir current working directory. + * @throws IOException + */ + public void runScript(List args, File dir) throws IOException { + ShellCommandExecutor shexec = + new ShellCommandExecutor(args.toArray(new String[0]), dir); + shexec.execute(); + int exitCode = shexec.getExitCode(); + if (exitCode != 0) { + throw new IOException("Task debug script exit with nonzero status of " + + exitCode + "."); + } + } + + /** + * Add last 'num' lines of the given file to the diagnostics. + * if num =-1, all the lines of file are added to the diagnostics. + * @param file The file from which to collect diagnostics. + * @param num The number of lines to be sent to diagnostics. + * @param tag The tag is printed before the diagnostics are printed. + */ + public void addDiagnostics(String file, int num, String tag) { + RandomAccessFile rafile = null; + try { + rafile = new RandomAccessFile(file,"r"); + int no_lines =0; + String line = null; + StringBuffer tail = new StringBuffer(); + tail.append("\n-------------------- "+tag+"---------------------\n"); + String[] lines = null; + if (num >0) { + lines = new String[num]; + } + while ((line = rafile.readLine()) != null) { + no_lines++; + if (num >0) { + if (no_lines <= num) { + lines[no_lines-1] = line; + } + else { // shift them up + for (int i=0; i num ?num:no_lines; + if (num >0) { + for (int i=0;i tasks; + volatile boolean localized; + final Object localizationLock; + boolean keepJobFiles; + FetchStatus f; + RunningJob(JobID jobid) { + this.jobid = jobid; + localized = false; + localizationLock = new Object(); + tasks = new HashSet(); + keepJobFiles = false; + } + + JobID getJobID() { + return jobid; + } + + void setFetchStatus(FetchStatus f) { + this.f = f; + } + + FetchStatus getFetchStatus() { + return f; + } + } + + /** + * Get the name for this task tracker. + * @return the string like "tracker_mymachine:50010" + */ + String getName() { + return taskTrackerName; + } + + private synchronized List cloneAndResetRunningTaskStatuses( + boolean sendCounters) { + List result = new ArrayList(runningTasks.size()); + for(TaskInProgress tip: runningTasks.values()) { + TaskStatus status = tip.getStatus(); + status.setIncludeCounters(sendCounters); + status.setOutputSize(tryToGetOutputSize(status.getTaskID(), fConf)); + // send counters for finished or failed tasks and commit pending tasks + if (status.getRunState() != TaskStatus.State.RUNNING) { + status.setIncludeCounters(true); + } + result.add((TaskStatus)status.clone()); + status.clearStatus(); + } + return result; + } + /** + * Get the list of tasks that will be reported back to the + * job tracker in the next heartbeat cycle. + * @return a copy of the list of TaskStatus objects + */ + synchronized List getRunningTaskStatuses() { + List result = new ArrayList(runningTasks.size()); + for(TaskInProgress tip: runningTasks.values()) { + result.add(tip.getStatus()); + } + return result; + } + + /** + * Get the list of stored tasks on this task tracker. + * @return + */ + synchronized List getNonRunningTasks() { + List result = new ArrayList(tasks.size()); + for(Map.Entry task: tasks.entrySet()) { + if (!runningTasks.containsKey(task.getKey())) { + result.add(task.getValue().getStatus()); + } + } + return result; + } + + + /** + * Get the list of tasks from running jobs on this task tracker. + * @return a copy of the list of TaskStatus objects + */ + synchronized List getTasksFromRunningJobs() { + List result = new ArrayList(tasks.size()); + for (Map.Entry item : runningJobs.entrySet()) { + RunningJob rjob = item.getValue(); + synchronized (rjob) { + for (TaskInProgress tip : rjob.tasks) { + result.add(tip.getStatus()); + } + } + } + return result; + } + + /** + * Get the default job conf for this tracker. + */ + JobConf getJobConf() { + return fConf; + } + + /** + * Check if the given local directories + * (and parent directories, if necessary) can be created. + * @param localDirs where the new TaskTracker should keep its local files. + * @throws DiskErrorException if all local directories are not writable + */ + private static void checkLocalDirs(String[] localDirs) + throws DiskErrorException { + boolean writable = false; + + if (localDirs != null) { + for (int i = 0; i < localDirs.length; i++) { + try { + DiskChecker.checkDir(new File(localDirs[i])); + writable = true; + } catch(DiskErrorException e) { + LOG.warn("Task Tracker local " + e.getMessage()); + } + } + } + + if (!writable) + throw new DiskErrorException( + "all local directories are not writable"); + } + + /** + * Is this task tracker idle? + * @return has this task tracker finished and cleaned up all of its tasks? + */ + public synchronized boolean isIdle() { + return tasks.isEmpty() && tasksToCleanup.isEmpty(); + } + + /** + * Start the TaskTracker, point toward the indicated JobTracker + */ + public static void main(String argv[]) throws Exception { + StringUtils.startupShutdownMessage(TaskTracker.class, argv, LOG); + if (argv.length != 0) { + System.out.println("usage: TaskTracker"); + System.exit(-1); + } + try { + JobConf conf=new JobConf(); + // enable the server to track time spent waiting on locks + ReflectionUtils.setContentionTracing + (conf.getBoolean("tasktracker.contention.tracking", false)); + new TaskTracker(conf).run(); + } catch (Throwable e) { + LOG.error("Can not start task tracker because "+ + StringUtils.stringifyException(e)); + System.exit(-1); + } + } + + /** + * This class is used in TaskTracker's Jetty to serve the map outputs + * to other nodes. + */ + public static class MapOutputServlet extends HttpServlet { + private static final int MAX_BYTES_TO_READ = 64 * 1024; + @Override + public void doGet(HttpServletRequest request, + HttpServletResponse response + ) throws ServletException, IOException { + String mapId = request.getParameter("map"); + String reduceId = request.getParameter("reduce"); + String jobId = request.getParameter("job"); + + if (jobId == null) { + throw new IOException("job parameter is required"); + } + + if (mapId == null || reduceId == null) { + throw new IOException("map and reduce parameters are required"); + } + ServletContext context = getServletContext(); + int reduce = Integer.parseInt(reduceId); + byte[] buffer = new byte[MAX_BYTES_TO_READ]; + // true iff IOException was caused by attempt to access input + boolean isInputException = true; + OutputStream outStream = null; + FSDataInputStream mapOutputIn = null; + + long totalRead = 0; + ShuffleServerMetrics shuffleMetrics = + (ShuffleServerMetrics) context.getAttribute("shuffleServerMetrics"); + TaskTracker tracker = + (TaskTracker) context.getAttribute("task.tracker"); + + long startTime = 0; + try { + shuffleMetrics.serverHandlerBusy(); + if(ClientTraceLog.isInfoEnabled()) + startTime = System.nanoTime(); + outStream = response.getOutputStream(); + JobConf conf = (JobConf) context.getAttribute("conf"); + LocalDirAllocator lDirAlloc = + (LocalDirAllocator)context.getAttribute("localDirAllocator"); + FileSystem rfs = ((LocalFileSystem) + context.getAttribute("local.file.system")).getRaw(); + + // Index file + Path indexFileName = lDirAlloc.getLocalPathToRead( + TaskTracker.getIntermediateOutputDir(jobId, mapId) + + "/file.out.index", conf); + + // Map-output file + Path mapOutputFileName = lDirAlloc.getLocalPathToRead( + TaskTracker.getIntermediateOutputDir(jobId, mapId) + + "/file.out", conf); + + /** + * Read the index file to get the information about where + * the map-output for the given reducer is available. + */ + IndexRecord info = + tracker.indexCache.getIndexInformation(mapId, reduce,indexFileName); + + //set the custom "from-map-task" http header to the map task from which + //the map output data is being transferred + response.setHeader(FROM_MAP_TASK, mapId); + + //set the custom "Raw-Map-Output-Length" http header to + //the raw (decompressed) length + response.setHeader(RAW_MAP_OUTPUT_LENGTH, + Long.toString(info.rawLength)); + + //set the custom "Map-Output-Length" http header to + //the actual number of bytes being transferred + response.setHeader(MAP_OUTPUT_LENGTH, + Long.toString(info.partLength)); + + //set the custom "for-reduce-task" http header to the reduce task number + //for which this map output is being transferred + response.setHeader(FOR_REDUCE_TASK, Integer.toString(reduce)); + + //use the same buffersize as used for reading the data from disk + response.setBufferSize(MAX_BYTES_TO_READ); + + /** + * Read the data from the sigle map-output file and + * send it to the reducer. + */ + //open the map-output file + mapOutputIn = rfs.open(mapOutputFileName); + + //seek to the correct offset for the reduce + mapOutputIn.seek(info.startOffset); + long rem = info.partLength; + int len = + mapOutputIn.read(buffer, 0, (int)Math.min(rem, MAX_BYTES_TO_READ)); + while (rem > 0 && len >= 0) { + rem -= len; + try { + shuffleMetrics.outputBytes(len); + outStream.write(buffer, 0, len); + outStream.flush(); + } catch (IOException ie) { + isInputException = false; + throw ie; + } + totalRead += len; + len = + mapOutputIn.read(buffer, 0, (int)Math.min(rem, MAX_BYTES_TO_READ)); + } + + LOG.info("Sent out " + totalRead + " bytes for reduce: " + reduce + + " from map: " + mapId + " given " + info.partLength + "/" + + info.rawLength); + } catch (IOException ie) { + Log log = (Log) context.getAttribute("log"); + String errorMsg = ("getMapOutput(" + mapId + "," + reduceId + + ") failed :\n"+ + StringUtils.stringifyException(ie)); + log.warn(errorMsg); + if (isInputException) { + tracker.mapOutputLost(TaskAttemptID.forName(mapId), errorMsg); + } + response.sendError(HttpServletResponse.SC_GONE, errorMsg); + shuffleMetrics.failedOutput(); + throw ie; + } finally { + if (null != mapOutputIn) { + mapOutputIn.close(); + } + final long endTime = ClientTraceLog.isInfoEnabled() ? System.nanoTime() : 0; + shuffleMetrics.serverHandlerFree(); + if (ClientTraceLog.isInfoEnabled()) { + ClientTraceLog.info(String.format(MR_CLIENTTRACE_FORMAT, + request.getLocalAddr() + ":" + request.getLocalPort(), + request.getRemoteAddr() + ":" + request.getRemotePort(), + totalRead, "MAPRED_SHUFFLE", mapId, endTime-startTime)); + } + } + outStream.close(); + shuffleMetrics.successOutput(); + } + } + + // get the full paths of the directory in all the local disks. + Path[] getLocalFiles(JobConf conf, String subdir) throws IOException{ + String[] localDirs = conf.getLocalDirs(); + Path[] paths = new Path[localDirs.length]; + FileSystem localFs = FileSystem.getLocal(conf); + for (int i = 0; i < localDirs.length; i++) { + paths[i] = new Path(localDirs[i], subdir); + paths[i] = paths[i].makeQualified(localFs); + } + return paths; + } + + // get the paths in all the local disks. + Path[] getLocalDirs() throws IOException{ + String[] localDirs = fConf.getLocalDirs(); + Path[] paths = new Path[localDirs.length]; + FileSystem localFs = FileSystem.getLocal(fConf); + for (int i = 0; i < localDirs.length; i++) { + paths[i] = new Path(localDirs[i]); + paths[i] = paths[i].makeQualified(localFs); + } + return paths; + } + + FileSystem getLocalFileSystem(){ + return localFs; + } + + int getMaxCurrentMapTasks() { + return maxMapSlots; + } + + int getMaxCurrentReduceTasks() { + return maxReduceSlots; + } + + /** + * Is the TaskMemoryManager Enabled on this system? + * @return true if enabled, false otherwise. + */ + public boolean isTaskMemoryManagerEnabled() { + return taskMemoryManagerEnabled; + } + + public TaskMemoryManagerThread getTaskMemoryManager() { + return taskMemoryManager; + } + + /** + * Normalize the negative values in configuration + * + * @param val + * @return normalized val + */ + private long normalizeMemoryConfigValue(long val) { + if (val < 0) { + val = JobConf.DISABLED_MEMORY_LIMIT; + } + return val; + } + + /** + * Memory-related setup + */ + private void initializeMemoryManagement() { + + //handling @deprecated + if (fConf.get(MAPRED_TASKTRACKER_VMEM_RESERVED_PROPERTY) != null) { + LOG.warn( + JobConf.deprecatedString( + MAPRED_TASKTRACKER_VMEM_RESERVED_PROPERTY)); + } + + //handling @deprecated + if (fConf.get(MAPRED_TASKTRACKER_PMEM_RESERVED_PROPERTY) != null) { + LOG.warn( + JobConf.deprecatedString( + MAPRED_TASKTRACKER_PMEM_RESERVED_PROPERTY)); + } + + //handling @deprecated + if (fConf.get(JobConf.MAPRED_TASK_DEFAULT_MAXVMEM_PROPERTY) != null) { + LOG.warn( + JobConf.deprecatedString( + JobConf.MAPRED_TASK_DEFAULT_MAXVMEM_PROPERTY)); + } + + //handling @deprecated + if (fConf.get(JobConf.UPPER_LIMIT_ON_TASK_VMEM_PROPERTY) != null) { + LOG.warn( + JobConf.deprecatedString( + JobConf.UPPER_LIMIT_ON_TASK_VMEM_PROPERTY)); + } + + if (resourceCalculatorPlugin != null) { + totalVirtualMemoryOnTT = resourceCalculatorPlugin.getVirtualMemorySize(); + if (totalVirtualMemoryOnTT <= 0) { + LOG.warn("TaskTracker's totalVmem could not be calculated. " + + "Setting it to " + JobConf.DISABLED_MEMORY_LIMIT); + totalVirtualMemoryOnTT = JobConf.DISABLED_MEMORY_LIMIT; + } + totalPhysicalMemoryOnTT = resourceCalculatorPlugin.getPhysicalMemorySize(); + if (totalPhysicalMemoryOnTT <= 0) { + LOG.warn("TaskTracker's totalPmem could not be calculated. " + + "Setting it to " + JobConf.DISABLED_MEMORY_LIMIT); + totalPhysicalMemoryOnTT = JobConf.DISABLED_MEMORY_LIMIT; + } + } + + mapSlotMemorySizeOnTT = + fConf.getLong( + JobTracker.MAPRED_CLUSTER_MAP_MEMORY_MB_PROPERTY, + JobConf.DISABLED_MEMORY_LIMIT); + reduceSlotSizeMemoryOnTT = + fConf.getLong( + JobTracker.MAPRED_CLUSTER_REDUCE_MEMORY_MB_PROPERTY, + JobConf.DISABLED_MEMORY_LIMIT); + totalMemoryAllottedForTasks = + maxMapSlots * mapSlotMemorySizeOnTT + maxReduceSlots + * reduceSlotSizeMemoryOnTT; + if (totalMemoryAllottedForTasks < 0) { + //adding check for the old keys which might be used by the administrator + //while configuration of the memory monitoring on TT + long memoryAllotedForSlot = fConf.normalizeMemoryConfigValue( + fConf.getLong(JobConf.MAPRED_TASK_DEFAULT_MAXVMEM_PROPERTY, + JobConf.DISABLED_MEMORY_LIMIT)); + long limitVmPerTask = fConf.normalizeMemoryConfigValue( + fConf.getLong(JobConf.UPPER_LIMIT_ON_TASK_VMEM_PROPERTY, + JobConf.DISABLED_MEMORY_LIMIT)); + if(memoryAllotedForSlot == JobConf.DISABLED_MEMORY_LIMIT) { + totalMemoryAllottedForTasks = JobConf.DISABLED_MEMORY_LIMIT; + } else { + if(memoryAllotedForSlot > limitVmPerTask) { + LOG.info("DefaultMaxVmPerTask is mis-configured. " + + "It shouldn't be greater than task limits"); + totalMemoryAllottedForTasks = JobConf.DISABLED_MEMORY_LIMIT; + } else { + totalMemoryAllottedForTasks = (maxMapSlots + + maxReduceSlots) * (memoryAllotedForSlot/(1024 * 1024)); + } + } + } + if (totalMemoryAllottedForTasks > totalPhysicalMemoryOnTT) { + LOG.info("totalMemoryAllottedForTasks > totalPhysicalMemoryOnTT." + + " Thrashing might happen."); + } else if (totalMemoryAllottedForTasks > totalVirtualMemoryOnTT) { + LOG.info("totalMemoryAllottedForTasks > totalVirtualMemoryOnTT." + + " Thrashing might happen."); + } + + // start the taskMemoryManager thread only if enabled + setTaskMemoryManagerEnabledFlag(); + if (isTaskMemoryManagerEnabled()) { + taskMemoryManager = new TaskMemoryManagerThread(this); + taskMemoryManager.setDaemon(true); + taskMemoryManager.start(); + } + } + + private void setTaskMemoryManagerEnabledFlag() { + if (!ProcfsBasedProcessTree.isAvailable()) { + LOG.info("ProcessTree implementation is missing on this system. " + + "TaskMemoryManager is disabled."); + taskMemoryManagerEnabled = false; + return; + } + + if (fConf.get(TaskMemoryManagerThread.TT_RESERVED_PHYSCIALMEMORY_MB) == null + && totalMemoryAllottedForTasks == JobConf.DISABLED_MEMORY_LIMIT) { + taskMemoryManagerEnabled = false; + LOG.warn("TaskTracker's totalMemoryAllottedForTasks is -1 and " + + "reserved physical memory is not configured. " + + "TaskMemoryManager is disabled."); + return; + } + + taskMemoryManagerEnabled = true; + } + + /** + * Clean-up the task that TaskMemoryMangerThread requests to do so. + * @param tid + * @param wasFailure mark the task as failed or killed. 'failed' if true, + * 'killed' otherwise + * @param diagnosticMsg + */ + synchronized void cleanUpOverMemoryTask(TaskAttemptID tid, boolean wasFailure, + String diagnosticMsg) { + TaskInProgress tip = runningTasks.get(tid); + if (tip != null) { + tip.reportDiagnosticInfo(diagnosticMsg); + try { + purgeTask(tip, wasFailure); // Marking it as failed/killed. + } catch (IOException ioe) { + LOG.warn("Couldn't purge the task of " + tid + ". Error : " + ioe); + } + } + } + + /** + * Wrapper method used by TaskTracker to check if {@link NodeHealthCheckerService} + * can be started + * @param conf configuration used to check if service can be started + * @return true if service can be started + */ + private boolean shouldStartHealthMonitor(Configuration conf) { + return NodeHealthCheckerService.shouldRun(conf); + } + + /** + * Wrapper method used to start {@link NodeHealthCheckerService} for + * Task Tracker + * @param conf Configuration used by the service. + */ + private void startHealthMonitor(Configuration conf) { + healthChecker = new NodeHealthCheckerService(conf); + healthChecker.start(); + } + + public List reducesInShuffle() { + + return mapEventsFetcher.reducesInShuffle(); + + } + + /** + * Obtain username from TaskId + * @param taskId + * @return username + */ + public String getUserName(TaskAttemptID taskId) { + return tasks.get(taskId).getJobConf().getUser(); + } + + /** + * Obtain the max number of task slots based on the configuration and CPU + */ + private int getMaxSlots(JobConf conf, int numCpuOnTT, TaskType type) { + int maxSlots; + String cpuToSlots; + if (type == TaskType.MAP) { + maxSlots = conf.getInt("mapred.tasktracker.map.tasks.maximum", 2); + cpuToSlots = conf.get("mapred.tasktracker.cpus.to.maptasks"); + } else { + maxSlots = conf.getInt("mapred.tasktracker.reduce.tasks.maximum", 2); + cpuToSlots = conf.get("mapred.tasktracker.cpus.to.reducetasks"); + } + if (cpuToSlots != null) { + try { + // Format of the configuration is + // numCpu1:maxSlot1, numCpu2:maxSlot2, numCpu3:maxSlot3 + for (String str : cpuToSlots.split(",")) { + String[] pair = str.split(":"); + int numCpu = Integer.parseInt(pair[0].trim()); + int max = Integer.parseInt(pair[1].trim()); + if (numCpu == numCpuOnTT) { + maxSlots = max; + break; + } + } + } catch (Exception e) { + LOG.warn("Error parsing number of CPU to map slots configuration", e); + } + } + return maxSlots; + } +} diff --git a/src/mapred/org/apache/hadoop/mapred/TaskTrackerAction.java b/src/mapred/org/apache/hadoop/mapred/TaskTrackerAction.java new file mode 100644 index 0000000..d7d1d6a --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/TaskTrackerAction.java @@ -0,0 +1,117 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapred; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; + +import org.apache.hadoop.io.Writable; +import org.apache.hadoop.io.WritableUtils; + +/** + * A generic directive from the {@link org.apache.hadoop.mapred.JobTracker} + * to the {@link org.apache.hadoop.mapred.TaskTracker} to take some 'action'. + * + */ +abstract class TaskTrackerAction implements Writable { + + /** + * Ennumeration of various 'actions' that the {@link JobTracker} + * directs the {@link TaskTracker} to perform periodically. + * + */ + public static enum ActionType { + /** Launch a new task. */ + LAUNCH_TASK, + + /** Kill a task. */ + KILL_TASK, + + /** Kill any tasks of this job and cleanup. */ + KILL_JOB, + + /** Reinitialize the tasktracker. */ + REINIT_TRACKER, + + /** Ask a task to save its output. */ + COMMIT_TASK + }; + + /** + * A factory-method to create objects of given {@link ActionType}. + * @param actionType the {@link ActionType} of object to create. + * @return an object of {@link ActionType}. + */ + public static TaskTrackerAction createAction(ActionType actionType) { + TaskTrackerAction action = null; + + switch (actionType) { + case LAUNCH_TASK: + { + action = new LaunchTaskAction(); + } + break; + case KILL_TASK: + { + action = new KillTaskAction(); + } + break; + case KILL_JOB: + { + action = new KillJobAction(); + } + break; + case REINIT_TRACKER: + { + action = new ReinitTrackerAction(); + } + break; + case COMMIT_TASK: + { + action = new CommitTaskAction(); + } + break; + } + + return action; + } + + private ActionType actionType; + + protected TaskTrackerAction(ActionType actionType) { + this.actionType = actionType; + } + + /** + * Return the {@link ActionType}. + * @return the {@link ActionType}. + */ + ActionType getActionId() { + return actionType; + } + + public void write(DataOutput out) throws IOException { + WritableUtils.writeEnum(out, actionType); + } + + public void readFields(DataInput in) throws IOException { + actionType = WritableUtils.readEnum(in, ActionType.class); + } +} diff --git a/src/mapred/org/apache/hadoop/mapred/TaskTrackerInstrumentation.java b/src/mapred/org/apache/hadoop/mapred/TaskTrackerInstrumentation.java new file mode 100644 index 0000000..ae584f9 --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/TaskTrackerInstrumentation.java @@ -0,0 +1,71 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapred; + +import java.io.File; + +/** + * TaskTrackerInstrumentation defines a number of instrumentation points + * associated with TaskTrackers. By default, the instrumentation points do + * nothing, but subclasses can do arbitrary instrumentation and monitoring at + * these points. + * + * TaskTrackerInstrumentation interfaces are associated uniquely with a + * TaskTracker. We don't want an inner class here, because then subclasses + * wouldn't have direct access to the associated TaskTracker. + * + **/ +class TaskTrackerInstrumentation { + + protected final TaskTracker tt; + + public TaskTrackerInstrumentation(TaskTracker t) { + tt = t; + } + + /** + * invoked when task attempt t succeeds + * @param t + */ + public void completeTask(TaskAttemptID t) { } + + public void timedoutTask(TaskAttemptID t) { } + + public void taskFailedPing(TaskAttemptID t) { } + + /** + * Called just before task attempt t starts. + * @param stdout the file containing standard out of the new task + * @param stderr the file containing standard error of the new task + */ + public void reportTaskLaunch(TaskAttemptID t, File stdout, File stderr) { } + + /** + * called when task t has just finished. + * @param t + */ + public void reportTaskEnd(TaskAttemptID t) {} + + /** + * Called when a task changes status. + * @param task the task whose status changed + * @param taskStatus the new status of the task + */ + public void statusUpdate(Task task, TaskStatus taskStatus) {} +} diff --git a/src/mapred/org/apache/hadoop/mapred/TaskTrackerManager.java b/src/mapred/org/apache/hadoop/mapred/TaskTrackerManager.java new file mode 100644 index 0000000..60aa382 --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/TaskTrackerManager.java @@ -0,0 +1,115 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.mapred; + +import java.io.IOException; +import java.util.Collection; + +/** + * Manages information about the {@link TaskTracker}s running on a cluster. + * This interface exits primarily to test the {@link JobTracker}, and is not + * intended to be implemented by users. + */ +interface TaskTrackerManager { + + /** + * @return A collection of the {@link TaskTrackerStatus} for the tasktrackers + * being managed. + */ + public Collection taskTrackers(); + + /** + * @return The number of unique hosts running tasktrackers. + */ + public int getNumberOfUniqueHosts(); + + /** + * @return a summary of the cluster's status. + */ + public ClusterStatus getClusterStatus(); + + /** + * Registers a {@link JobInProgressListener} for updates from this + * {@link TaskTrackerManager}. + * @param jobInProgressListener the {@link JobInProgressListener} to add + */ + public void addJobInProgressListener(JobInProgressListener listener); + + /** + * Unregisters a {@link JobInProgressListener} from this + * {@link TaskTrackerManager}. + * @param jobInProgressListener the {@link JobInProgressListener} to remove + */ + public void removeJobInProgressListener(JobInProgressListener listener); + + /** + * Return the {@link QueueManager} which manages the queues in this + * {@link TaskTrackerManager}. + * + * @return the {@link QueueManager} + */ + public QueueManager getQueueManager(); + + /** + * Return the current heartbeat interval that's used by {@link TaskTracker}s. + * + * @return the heartbeat interval used by {@link TaskTracker}s + */ + public int getNextHeartbeatInterval(); + + /** + * Kill the job identified by jobid + * + * @param jobid + * @throws IOException + */ + public void killJob(JobID jobid) + throws IOException; + + /** + * Obtain the job object identified by jobid + * + * @param jobid + * @return jobInProgress object + */ + public JobInProgress getJob(JobID jobid); + + /** + * Initialize the Job + * + * @param job JobInProgress object + */ + public void initJob(JobInProgress job); + + /** + * Fail a job. + * + * @param job JobInProgress object + */ + public void failJob(JobInProgress job); + + /** + * Mark the task attempt identified by taskid to be killed + * + * @param taskid task to kill + * @param shouldFail whether to count the task as failed + * @return true if the task was found and successfully marked to kill + */ + public boolean killTask(TaskAttemptID taskid, boolean shouldFail) + throws IOException; +} diff --git a/src/mapred/org/apache/hadoop/mapred/TaskTrackerMetricsInst.java b/src/mapred/org/apache/hadoop/mapred/TaskTrackerMetricsInst.java new file mode 100644 index 0000000..d68fdce --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/TaskTrackerMetricsInst.java @@ -0,0 +1,86 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapred; + +import org.apache.hadoop.metrics.MetricsContext; +import org.apache.hadoop.metrics.MetricsRecord; +import org.apache.hadoop.metrics.MetricsUtil; +import org.apache.hadoop.metrics.Updater; +import org.apache.hadoop.metrics.jvm.JvmMetrics; + +class TaskTrackerMetricsInst extends TaskTrackerInstrumentation + implements Updater { + private final MetricsRecord metricsRecord; + private int numCompletedTasks = 0; + private int timedoutTasks = 0; + private int tasksFailedPing = 0; + + public TaskTrackerMetricsInst(TaskTracker t) { + super(t); + JobConf conf = tt.getJobConf(); + String sessionId = conf.getSessionId(); + // Initiate Java VM Metrics + JvmMetrics.init("TaskTracker", sessionId); + // Create a record for Task Tracker metrics + MetricsContext context = MetricsUtil.getContext("mapred"); + metricsRecord = MetricsUtil.createRecord(context, "tasktracker"); //guaranteed never null + metricsRecord.setTag("sessionId", sessionId); + context.registerUpdater(this); + } + + @Override + public synchronized void completeTask(TaskAttemptID t) { + ++numCompletedTasks; + } + + @Override + public synchronized void timedoutTask(TaskAttemptID t) { + ++timedoutTasks; + } + + @Override + public synchronized void taskFailedPing(TaskAttemptID t) { + ++tasksFailedPing; + } + + /** + * Since this object is a registered updater, this method will be called + * periodically, e.g. every 5 seconds. + */ + @Override + public void doUpdates(MetricsContext unused) { + synchronized (this) { + metricsRecord.setMetric("maps_running", tt.mapTotal); + metricsRecord.setMetric("reduces_running", tt.reduceTotal); + metricsRecord.setMetric("mapTaskSlots", (short)tt.getMaxCurrentMapTasks()); + metricsRecord.setMetric("reduceTaskSlots", + (short)tt.getMaxCurrentReduceTasks()); + metricsRecord.incrMetric("tasks_completed", numCompletedTasks); + metricsRecord.incrMetric("tasks_failed_timeout", timedoutTasks); + metricsRecord.incrMetric("tasks_failed_ping", tasksFailedPing); + + numCompletedTasks = 0; + timedoutTasks = 0; + tasksFailedPing = 0; + } + metricsRecord.update(); + } + + +} diff --git a/src/mapred/org/apache/hadoop/mapred/TaskTrackerStatus.java b/src/mapred/org/apache/hadoop/mapred/TaskTrackerStatus.java new file mode 100644 index 0000000..e6cd882 --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/TaskTrackerStatus.java @@ -0,0 +1,688 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.mapred; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.io.*; +import org.apache.hadoop.mapred.TaskStatus.State; + +import java.io.*; +import java.util.*; + +/************************************************** + * A TaskTrackerStatus is a MapReduce primitive. Keeps + * info on a TaskTracker. The JobTracker maintains a set + * of the most recent TaskTrackerStatus objects for each + * unique TaskTracker it knows about. + * + * This is NOT a public interface! + **************************************************/ +public class TaskTrackerStatus implements Writable { + public static final Log LOG = LogFactory.getLog(TaskTrackerStatus.class); + public static final int UNAVAILABLE = -1; + + static { // register a ctor + WritableFactories.setFactory + (TaskTrackerStatus.class, + new WritableFactory() { + public Writable newInstance() { return new TaskTrackerStatus(); } + }); + } + + String trackerName; + String host; + int httpPort; + int failures; + List taskReports; + + volatile long lastSeen; + private int maxMapTasks; + private int maxReduceTasks; + private TaskTrackerHealthStatus healthStatus; + + /** + * Class representing a collection of resources on this tasktracker. + */ + static class ResourceStatus implements Writable { + + private long totalVirtualMemory; + private long totalPhysicalMemory; + private long availableVirtualMemory; + private long availablePhysicalMemory; + private long mapSlotMemorySizeOnTT; + private long reduceSlotMemorySizeOnTT; + private long availableSpace; + private int numProcessors = UNAVAILABLE; + private long cumulativeCpuTime = UNAVAILABLE; // in millisecond + private long cpuFrequency = UNAVAILABLE; // in kHz + private float cpuUsage = UNAVAILABLE; // in % + + ResourceStatus() { + totalVirtualMemory = JobConf.DISABLED_MEMORY_LIMIT; + totalPhysicalMemory = JobConf.DISABLED_MEMORY_LIMIT; + availableVirtualMemory = JobConf.DISABLED_MEMORY_LIMIT; + availablePhysicalMemory = JobConf.DISABLED_MEMORY_LIMIT; + mapSlotMemorySizeOnTT = JobConf.DISABLED_MEMORY_LIMIT; + reduceSlotMemorySizeOnTT = JobConf.DISABLED_MEMORY_LIMIT; + availableSpace = Long.MAX_VALUE; + } + + /** + * Set the maximum amount of virtual memory on the tasktracker. + * + * @param vmem maximum amount of virtual memory on the tasktracker in bytes. + */ + void setTotalVirtualMemory(long totalMem) { + totalVirtualMemory = totalMem; + } + + /** + * Get the maximum amount of virtual memory on the tasktracker. + * + * If this is {@link JobConf#DISABLED_MEMORY_LIMIT}, it should be ignored + * and not used in any computation. + * + * @return the maximum amount of virtual memory on the tasktracker in bytes. + */ + long getTotalVirtualMemory() { + return totalVirtualMemory; + } + + /** + * Set the maximum amount of physical memory on the tasktracker. + * + * @param totalRAM maximum amount of physical memory on the tasktracker in + * bytes. + */ + void setTotalPhysicalMemory(long totalRAM) { + totalPhysicalMemory = totalRAM; + } + + /** + * Get the maximum amount of physical memory on the tasktracker. + * + * If this is {@link JobConf#DISABLED_MEMORY_LIMIT}, it should be ignored + * and not used in any computation. + * + * @return maximum amount of physical memory on the tasktracker in bytes. + */ + long getTotalPhysicalMemory() { + return totalPhysicalMemory; + } + + /** + * Set the amount of available virtual memory on the tasktracker. + * + * @param vmem amount of available virtual memory on the tasktracker + * in bytes. + */ + void setAvailableVirtualMemory(long availableMem) { + availableVirtualMemory = availableMem; + } + + /** + * Get the amount of available virtual memory on the tasktracker. + * + * If this is {@link JobConf#DISABLED_MEMORY_LIMIT}, it should be ignored + * and not used in any computation. + * + * @return the amount of available virtual memory on the tasktracker + * in bytes. + */ + long getAvailabelVirtualMemory() { + return availableVirtualMemory; + } + + /** + * Set the amount of available physical memory on the tasktracker. + * + * @param availableRAM amount of available physical memory on the + * tasktracker in bytes. + */ + void setAvailablePhysicalMemory(long availableRAM) { + availablePhysicalMemory = availableRAM; + } + + /** + * Get the amount of available physical memory on the tasktracker. + * + * If this is {@link JobConf#DISABLED_MEMORY_LIMIT}, it should be ignored + * and not used in any computation. + * + * @return amount of available physical memory on the tasktracker in bytes. + */ + long getAvailablePhysicalMemory() { + return availablePhysicalMemory; + } + + /** + * Set the memory size of each map slot on this TT. This will be used by JT + * for accounting more slots for jobs that use more memory. + * + * @param mem + */ + void setMapSlotMemorySizeOnTT(long mem) { + mapSlotMemorySizeOnTT = mem; + } + + /** + * Get the memory size of each map slot on this TT. See + * {@link #setMapSlotMemorySizeOnTT(long)} + * + * @return + */ + long getMapSlotMemorySizeOnTT() { + return mapSlotMemorySizeOnTT; + } + + /** + * Set the memory size of each reduce slot on this TT. This will be used by + * JT for accounting more slots for jobs that use more memory. + * + * @param mem + */ + void setReduceSlotMemorySizeOnTT(long mem) { + reduceSlotMemorySizeOnTT = mem; + } + + /** + * Get the memory size of each reduce slot on this TT. See + * {@link #setReduceSlotMemorySizeOnTT(long)} + * + * @return + */ + long getReduceSlotMemorySizeOnTT() { + return reduceSlotMemorySizeOnTT; + } + + /** + * Set the available disk space on the TT + * @param availSpace + */ + void setAvailableSpace(long availSpace) { + availableSpace = availSpace; + } + + /** + * Will return LONG_MAX if space hasn't been measured yet. + * @return bytes of available local disk space on this tasktracker. + */ + long getAvailableSpace() { + return availableSpace; + } + + /** + * Set the CPU frequency of this TaskTracker + * If the input is not a valid number, it will be set to UNAVAILABLE + * + * @param cpuFrequency CPU frequency in kHz + */ + public void setCpuFrequency(long cpuFrequency) { + this.cpuFrequency = cpuFrequency > 0 ? + cpuFrequency : UNAVAILABLE; + } + + /** + * Get the CPU frequency of this TaskTracker + * Will return UNAVAILABLE if it cannot be obtained + * + * @return CPU frequency in kHz + */ + public long getCpuFrequency() { + return cpuFrequency; + } + + /** + * Set the number of processors on this TaskTracker + * If the input is not a valid number, it will be set to UNAVAILABLE + * + * @param numProcessors number of processors + */ + public void setNumProcessors(int numProcessors) { + this.numProcessors = numProcessors > 0 ? + numProcessors : UNAVAILABLE; + } + + /** + * Get the number of processors on this TaskTracker + * Will return UNAVAILABLE if it cannot be obtained + * + * @return number of processors + */ + public int getNumProcessors() { + return numProcessors; + } + + /** + * Set the cumulative CPU time on this TaskTracker since it is up + * It can be set to UNAVAILABLE if it is currently unavailable. + * + * @param cumulativeCpuTime Used CPU time in millisecond + */ + public void setCumulativeCpuTime(long cumulativeCpuTime) { + this.cumulativeCpuTime = cumulativeCpuTime > 0 ? + cumulativeCpuTime : UNAVAILABLE; + } + + /** + * Get the cumulative CPU time on this TaskTracker since it is up + * Will return UNAVAILABLE if it cannot be obtained + * + * @return used CPU time in milliseconds + */ + public long getCumulativeCpuTime() { + return cumulativeCpuTime; + } + + /** + * Set the CPU usage on this TaskTracker + * + * @param cpuUsage CPU usage in % + */ + public void setCpuUsage(float cpuUsage) { + this.cpuUsage = cpuUsage; + } + + /** + * Get the CPU usage on this TaskTracker + * Will return UNAVAILABLE if it cannot be obtained + * + * @return CPU usage in % + */ + public float getCpuUsage() { + return cpuUsage; + } + + public void write(DataOutput out) throws IOException { + WritableUtils.writeVLong(out, totalVirtualMemory); + WritableUtils.writeVLong(out, totalPhysicalMemory); + WritableUtils.writeVLong(out, availableVirtualMemory); + WritableUtils.writeVLong(out, availablePhysicalMemory); + WritableUtils.writeVLong(out, mapSlotMemorySizeOnTT); + WritableUtils.writeVLong(out, reduceSlotMemorySizeOnTT); + WritableUtils.writeVLong(out, availableSpace); + WritableUtils.writeVLong(out, cumulativeCpuTime); + WritableUtils.writeVLong(out, cpuFrequency); + WritableUtils.writeVInt(out, numProcessors); + out.writeFloat(getCpuUsage()); + } + + public void readFields(DataInput in) throws IOException { + totalVirtualMemory = WritableUtils.readVLong(in); + totalPhysicalMemory = WritableUtils.readVLong(in); + availableVirtualMemory = WritableUtils.readVLong(in); + availablePhysicalMemory = WritableUtils.readVLong(in); + mapSlotMemorySizeOnTT = WritableUtils.readVLong(in); + reduceSlotMemorySizeOnTT = WritableUtils.readVLong(in); + availableSpace = WritableUtils.readVLong(in); + cumulativeCpuTime = WritableUtils.readVLong(in); + cpuFrequency = WritableUtils.readVLong(in); + numProcessors = WritableUtils.readVInt(in); + setCpuUsage(in.readFloat()); + } + } + + private ResourceStatus resStatus; + + /** + */ + public TaskTrackerStatus() { + taskReports = new ArrayList(); + resStatus = new ResourceStatus(); + this.healthStatus = new TaskTrackerHealthStatus(); + } + + TaskTrackerStatus(String trackerName, String host) { + this(); + this.trackerName = trackerName; + this.host = host; + } + + /** + */ + public TaskTrackerStatus(String trackerName, String host, + int httpPort, List taskReports, + int failures, int maxMapTasks, + int maxReduceTasks) { + this.trackerName = trackerName; + this.host = host; + this.httpPort = httpPort; + + this.taskReports = new ArrayList(taskReports); + this.failures = failures; + this.maxMapTasks = maxMapTasks; + this.maxReduceTasks = maxReduceTasks; + this.resStatus = new ResourceStatus(); + this.healthStatus = new TaskTrackerHealthStatus(); + } + + /** + */ + public String getTrackerName() { + return trackerName; + } + /** + */ + public String getHost() { + return host; + } + + /** + * Get the port that this task tracker is serving http requests on. + * @return the http port + */ + public int getHttpPort() { + return httpPort; + } + + /** + * Get the number of tasks that have failed on this tracker. + * @return The number of failed tasks + */ + public int getFailures() { + return failures; + } + + /** + * Get the current tasks at the TaskTracker. + * Tasks are tracked by a {@link TaskStatus} object. + * + * @return a list of {@link TaskStatus} representing + * the current tasks at the TaskTracker. + */ + public List getTaskReports() { + return taskReports; + } + + /** + * Is the given task considered as 'running' ? + * @param taskStatus + * @return + */ + private boolean isTaskRunning(TaskStatus taskStatus) { + TaskStatus.State state = taskStatus.getRunState(); + return (state == State.RUNNING || state == State.UNASSIGNED || + taskStatus.inTaskCleanupPhase()); + } + + /** + * Get the number of running map tasks. + * @return the number of running map tasks + */ + public int countMapTasks() { + int mapCount = 0; + for (Iterator it = taskReports.iterator(); it.hasNext();) { + TaskStatus ts = (TaskStatus) it.next(); + if (ts.getIsMap() && isTaskRunning(ts)) { + mapCount++; + } + } + return mapCount; + } + + /** + * Get the number of occupied map slots. + * @return the number of occupied map slots + */ + public int countOccupiedMapSlots() { + int mapSlotsCount = 0; + for (Iterator it = taskReports.iterator(); it.hasNext();) { + TaskStatus ts = (TaskStatus) it.next(); + if (ts.getIsMap() && isTaskRunning(ts)) { + mapSlotsCount += ts.getNumSlots(); + } + } + return mapSlotsCount; + } + + /** + * Get available map slots. + * @return available map slots + */ + public int getAvailableMapSlots() { + return getMaxMapSlots() - countOccupiedMapSlots(); + } + + /** + * Get the number of running reduce tasks. + * @return the number of running reduce tasks + */ + public int countReduceTasks() { + int reduceCount = 0; + for (Iterator it = taskReports.iterator(); it.hasNext();) { + TaskStatus ts = (TaskStatus) it.next(); + if ((!ts.getIsMap()) && isTaskRunning(ts)) { + reduceCount++; + } + } + return reduceCount; + } + + /** + * Get the number of occupied reduce slots. + * @return the number of occupied reduce slots + */ + public int countOccupiedReduceSlots() { + int reduceSlotsCount = 0; + for (Iterator it = taskReports.iterator(); it.hasNext();) { + TaskStatus ts = (TaskStatus) it.next(); + if ((!ts.getIsMap()) && isTaskRunning(ts)) { + reduceSlotsCount += ts.getNumSlots(); + } + } + return reduceSlotsCount; + } + + /** + * Get available reduce slots. + * @return available reduce slots + */ + public int getAvailableReduceSlots() { + return getMaxReduceSlots() - countOccupiedReduceSlots(); + } + + + /** + */ + public long getLastSeen() { + return lastSeen; + } + /** + */ + public void setLastSeen(long lastSeen) { + this.lastSeen = lastSeen; + } + + /** + * Get the maximum map slots for this node. + * @return the maximum map slots for this node + */ + public int getMaxMapSlots() { + return maxMapTasks; + } + + /** + * Get the maximum reduce slots for this node. + * @return the maximum reduce slots for this node + */ + public int getMaxReduceSlots() { + return maxReduceTasks; + } + + /** + * Return the {@link ResourceStatus} object configured with this + * status. + * + * @return the resource status + */ + ResourceStatus getResourceStatus() { + return resStatus; + } + + /** + * Returns health status of the task tracker. + * @return health status of Task Tracker + */ + public TaskTrackerHealthStatus getHealthStatus() { + return healthStatus; + } + + /** + * Static class which encapsulates the Node health + * related fields. + * + */ + /** + * Static class which encapsulates the Node health + * related fields. + * + */ + static class TaskTrackerHealthStatus implements Writable { + + private boolean isNodeHealthy; + + private String healthReport; + + private long lastReported; + + public TaskTrackerHealthStatus(boolean isNodeHealthy, String healthReport, + long lastReported) { + this.isNodeHealthy = isNodeHealthy; + this.healthReport = healthReport; + this.lastReported = lastReported; + } + + public TaskTrackerHealthStatus() { + this.isNodeHealthy = true; + this.healthReport = ""; + this.lastReported = System.currentTimeMillis(); + } + + /** + * Sets whether or not a task tracker is healthy or not, based on the + * output from the node health script. + * + * @param isNodeHealthy + */ + void setNodeHealthy(boolean isNodeHealthy) { + this.isNodeHealthy = isNodeHealthy; + } + + /** + * Returns if node is healthy or not based on result from node health + * script. + * + * @return true if the node is healthy. + */ + boolean isNodeHealthy() { + return isNodeHealthy; + } + + /** + * Sets the health report based on the output from the health script. + * + * @param healthReport + * String listing cause of failure. + */ + void setHealthReport(String healthReport) { + this.healthReport = healthReport; + } + + /** + * Returns the health report of the node if any, The health report is + * only populated when the node is not healthy. + * + * @return health report of the node if any + */ + String getHealthReport() { + return healthReport; + } + + /** + * Sets when the TT got its health information last + * from node health monitoring service. + * + * @param lastReported last reported time by node + * health script + */ + public void setLastReported(long lastReported) { + this.lastReported = lastReported; + } + + /** + * Gets time of most recent node health update. + * + * @return time stamp of most recent health update. + */ + public long getLastReported() { + return lastReported; + } + + @Override + public void readFields(DataInput in) throws IOException { + isNodeHealthy = in.readBoolean(); + healthReport = Text.readString(in); + lastReported = in.readLong(); + } + + @Override + public void write(DataOutput out) throws IOException { + out.writeBoolean(isNodeHealthy); + Text.writeString(out, healthReport); + out.writeLong(lastReported); + } + + } + + /////////////////////////////////////////// + // Writable + /////////////////////////////////////////// + public void write(DataOutput out) throws IOException { + Text.writeString(out, trackerName); + Text.writeString(out, host); + out.writeInt(httpPort); + out.writeInt(failures); + out.writeInt(maxMapTasks); + out.writeInt(maxReduceTasks); + resStatus.write(out); + out.writeInt(taskReports.size()); + + for (TaskStatus taskStatus : taskReports) { + TaskStatus.writeTaskStatus(out, taskStatus); + } + getHealthStatus().write(out); + } + + public void readFields(DataInput in) throws IOException { + this.trackerName = Text.readString(in); + this.host = Text.readString(in); + this.httpPort = in.readInt(); + this.failures = in.readInt(); + this.maxMapTasks = in.readInt(); + this.maxReduceTasks = in.readInt(); + resStatus.readFields(in); + taskReports.clear(); + int numTasks = in.readInt(); + + for (int i = 0; i < numTasks; i++) { + taskReports.add(TaskStatus.readTaskStatus(in)); + } + getHealthStatus().readFields(in); + } +} diff --git a/src/mapred/org/apache/hadoop/mapred/TaskUmbilicalProtocol.java b/src/mapred/org/apache/hadoop/mapred/TaskUmbilicalProtocol.java new file mode 100644 index 0000000..e3c7da1 --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/TaskUmbilicalProtocol.java @@ -0,0 +1,157 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapred; + +import java.io.IOException; + +import org.apache.hadoop.ipc.VersionedProtocol; +import org.apache.hadoop.mapred.JvmTask; + +/** Protocol that task child process uses to contact its parent process. The + * parent is a daemon which which polls the central master for a new map or + * reduce task and runs it as a child process. All communication between child + * and parent is via this protocol. */ +interface TaskUmbilicalProtocol extends VersionedProtocol { + + /** + * Changed the version to 2, since we have a new method getMapOutputs + * Changed version to 3 to have progress() return a boolean + * Changed the version to 4, since we have replaced + * TaskUmbilicalProtocol.progress(String, float, String, + * org.apache.hadoop.mapred.TaskStatus.Phase, Counters) + * with statusUpdate(String, TaskStatus) + * + * Version 5 changed counters representation for HADOOP-2248 + * Version 6 changes the TaskStatus representation for HADOOP-2208 + * Version 7 changes the done api (via HADOOP-3140). It now expects whether + * or not the task's output needs to be promoted. + * Version 8 changes {job|tip|task}id's to use their corresponding + * objects rather than strings. + * Version 9 changes the counter representation for HADOOP-1915 + * Version 10 changed the TaskStatus format and added reportNextRecordRange + * for HADOOP-153 + * Version 11 Adds RPCs for task commit as part of HADOOP-3150 + * Version 12 getMapCompletionEvents() now also indicates if the events are + * stale or not. Hence the return type is a class that + * encapsulates the events and whether to reset events index. + * Version 13 changed the getTask method signature for HADOOP-249 + * Version 14 changed the getTask method signature for HADOOP-4232 + * Version 15 Adds FAILED_UNCLEAN and KILLED_UNCLEAN states for HADOOP-4759 + * Version 16 Added numRequiredSlots to TaskStatus for MAPREDUCE-516 + * Version 17 Change in signature of getTask() for HADOOP-5488 + * Version 18 Added fatalError for child to communicate fatal errors to TT + * */ + + public static final long versionID = 18L; + + /** + * Called when a child task process starts, to get its task. + * @param context the JvmContext of the JVM w.r.t the TaskTracker that + * launched it + * @return Task object + * @throws IOException + */ + JvmTask getTask(JvmContext context) throws IOException; + + /** + * Report child's progress to parent. + * + * @param taskId task-id of the child + * @param taskStatus status of the child + * @throws IOException + * @throws InterruptedException + * @return True if the task is known + */ + boolean statusUpdate(TaskAttemptID taskId, TaskStatus taskStatus) + throws IOException, InterruptedException; + + /** Report error messages back to parent. Calls should be sparing, since all + * such messages are held in the job tracker. + * @param taskid the id of the task involved + * @param trace the text to report + */ + void reportDiagnosticInfo(TaskAttemptID taskid, String trace) throws IOException; + + /** + * Report the record range which is going to process next by the Task. + * @param taskid the id of the task involved + * @param range the range of record sequence nos + * @throws IOException + */ + void reportNextRecordRange(TaskAttemptID taskid, SortedRanges.Range range) + throws IOException; + + /** Periodically called by child to check if parent is still alive. + * @return True if the task is known + */ + boolean ping(TaskAttemptID taskid) throws IOException; + + /** Report that the task is successfully completed. Failure is assumed if + * the task process exits without calling this. + * @param taskid task's id + */ + void done(TaskAttemptID taskid) throws IOException; + + /** + * Report that the task is complete, but its commit is pending. + * + * @param taskId task's id + * @param taskStatus status of the child + * @throws IOException + */ + void commitPending(TaskAttemptID taskId, TaskStatus taskStatus) + throws IOException, InterruptedException; + + /** + * Polling to know whether the task can go-ahead with commit + * @param taskid + * @return true/false + * @throws IOException + */ + boolean canCommit(TaskAttemptID taskid) throws IOException; + + /** Report that a reduce-task couldn't shuffle map-outputs.*/ + void shuffleError(TaskAttemptID taskId, String message) throws IOException; + + /** Report that the task encounted a local filesystem error.*/ + void fsError(TaskAttemptID taskId, String message) throws IOException; + + /** Report that the task encounted a fatal error.*/ + void fatalError(TaskAttemptID taskId, String message) throws IOException; + + /** Called by a reduce task to get the map output locations for finished maps. + * Returns an update centered around the map-task-completion-events. + * The update also piggybacks the information whether the events copy at the + * task-tracker has changed or not. This will trigger some action at the + * child-process. + * + * @param taskId the reduce task id + * @param fromIndex the index starting from which the locations should be + * fetched + * @param maxLocs the max number of locations to fetch + * @param id The attempt id of the task that is trying to communicate + * @return A {@link MapTaskCompletionEventsUpdate} + */ + MapTaskCompletionEventsUpdate getMapCompletionEvents(JobID jobId, + int fromIndex, + int maxLocs, + TaskAttemptID id) + throws IOException; + +} diff --git a/src/mapred/org/apache/hadoop/mapred/Task_Counter.properties b/src/mapred/org/apache/hadoop/mapred/Task_Counter.properties new file mode 100644 index 0000000..3c7995e --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/Task_Counter.properties @@ -0,0 +1,19 @@ +# ResourceBundle properties file for Map-Reduce counters + +CounterGroupName= Map-Reduce Framework + +MAP_INPUT_RECORDS.name= Map input records +MAP_INPUT_BYTES.name= Map input bytes +MAP_OUTPUT_RECORDS.name= Map output records +MAP_OUTPUT_BYTES.name= Map output bytes +MAP_SKIPPED_RECORDS.name= Map skipped records +COMBINE_INPUT_RECORDS.name= Combine input records +COMBINE_OUTPUT_RECORDS.name= Combine output records +REDUCE_INPUT_GROUPS.name= Reduce input groups +REDUCE_SHUFFLE_BYTES.name= Reduce shuffle bytes +REDUCE_INPUT_RECORDS.name= Reduce input records +REDUCE_OUTPUT_RECORDS.name= Reduce output records +REDUCE_SKIPPED_RECORDS.name= Reduce skipped records +REDUCE_SKIPPED_GROUPS.name= Reduce skipped groups +SPILLED_RECORDS.name= Spilled Records + diff --git a/src/mapred/org/apache/hadoop/mapred/TextInputFormat.java b/src/mapred/org/apache/hadoop/mapred/TextInputFormat.java new file mode 100644 index 0000000..6ded9be --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/TextInputFormat.java @@ -0,0 +1,99 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapred; + +import java.io.*; + +import org.apache.hadoop.fs.*; +import org.apache.hadoop.io.LongWritable; +import org.apache.hadoop.io.Text; +import org.apache.hadoop.io.compress.*; + +/** An {@link InputFormat} for plain text files. Files are broken into lines. + * Either linefeed or carriage-return are used to signal end of line. Keys are + * the position in the file, and values are the line of text.. + * @deprecated Use {@link org.apache.hadoop.mapreduce.lib.input.TextInputFormat} + * instead. + */ +@Deprecated +public class TextInputFormat extends FileInputFormat + implements JobConfigurable { + + private CompressionCodecFactory compressionCodecs = null; + + public void configure(JobConf conf) { + compressionCodecs = new CompressionCodecFactory(conf); + } + + protected boolean isSplitable(FileSystem fs, Path file) { + return compressionCodecs.getCodec(file) == null; + } + + static class EmptyRecordReader implements RecordReader { + @Override + public void close() throws IOException {} + @Override + public LongWritable createKey() { + return new LongWritable(); + } + @Override + public Text createValue() { + return new Text(); + } + @Override + public long getPos() throws IOException { + return 0; + } + @Override + public float getProgress() throws IOException { + return 0; + } + @Override + public boolean next(LongWritable key, Text value) throws IOException { + return false; + } + } + + public RecordReader getRecordReader( + InputSplit genericSplit, JobConf job, + Reporter reporter) + throws IOException { + + reporter.setStatus(genericSplit.toString()); + + // This change is required for CombineFileInputFormat to work with .gz files. + + // Check if we should throw away this split + long start = ((FileSplit)genericSplit).getStart(); + Path file = ((FileSplit)genericSplit).getPath(); + final CompressionCodec codec = compressionCodecs.getCodec(file); + if (codec != null && start != 0) { + // (codec != null) means the file is not splittable. + + // In that case, start should be 0, otherwise this is an extra split created + // by CombineFileInputFormat. We should ignore all such extra splits. + // + // Note that for the first split where start = 0, LineRecordReader will + // ignore the end pos and read the whole file. + return new EmptyRecordReader(); + } + + return new LineRecordReader(job, (FileSplit) genericSplit); + } +} diff --git a/src/mapred/org/apache/hadoop/mapred/TextOutputFormat.java b/src/mapred/org/apache/hadoop/mapred/TextOutputFormat.java new file mode 100644 index 0000000..70029b4 --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/TextOutputFormat.java @@ -0,0 +1,140 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapred; + +import java.io.DataOutputStream; +import java.io.IOException; +import java.io.UnsupportedEncodingException; + +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.FSDataOutputStream; + +import org.apache.hadoop.io.NullWritable; +import org.apache.hadoop.io.Text; +import org.apache.hadoop.io.compress.CompressionCodec; +import org.apache.hadoop.io.compress.GzipCodec; +import org.apache.hadoop.util.*; + +/** An {@link OutputFormat} that writes plain text files. + * @deprecated Use + * {@link org.apache.hadoop.mapreduce.lib.output.TextOutputFormat} instead. + */ +@Deprecated +public class TextOutputFormat extends FileOutputFormat { + + protected static class LineRecordWriter + implements RecordWriter { + private static final String utf8 = "UTF-8"; + private static final byte[] newline; + static { + try { + newline = "\n".getBytes(utf8); + } catch (UnsupportedEncodingException uee) { + throw new IllegalArgumentException("can't find " + utf8 + " encoding"); + } + } + + protected DataOutputStream out; + private final byte[] keyValueSeparator; + + public LineRecordWriter(DataOutputStream out, String keyValueSeparator) { + this.out = out; + try { + this.keyValueSeparator = keyValueSeparator.getBytes(utf8); + } catch (UnsupportedEncodingException uee) { + throw new IllegalArgumentException("can't find " + utf8 + " encoding"); + } + } + + public LineRecordWriter(DataOutputStream out) { + this(out, "\t"); + } + + /** + * Write the object to the byte stream, handling Text as a special + * case. + * @param o the object to print + * @throws IOException if the write throws, we pass it on + */ + private void writeObject(Object o) throws IOException { + if (o instanceof Text) { + Text to = (Text) o; + out.write(to.getBytes(), 0, to.getLength()); + } else { + out.write(o.toString().getBytes(utf8)); + } + } + + public synchronized void write(K key, V value) + throws IOException { + + boolean nullKey = key == null || key instanceof NullWritable; + boolean nullValue = value == null || value instanceof NullWritable; + if (nullKey && nullValue) { + return; + } + if (!nullKey) { + writeObject(key); + } + if (!(nullKey || nullValue)) { + out.write(keyValueSeparator); + } + if (!nullValue) { + writeObject(value); + } + out.write(newline); + } + + public synchronized void close(Reporter reporter) throws IOException { + out.close(); + } + } + + public RecordWriter getRecordWriter(FileSystem ignored, + JobConf job, + String name, + Progressable progress) + throws IOException { + boolean isCompressed = getCompressOutput(job); + String keyValueSeparator = job.get("mapred.textoutputformat.separator", + "\t"); + if (!isCompressed) { + Path file = FileOutputFormat.getTaskOutputPath(job, name); + FileSystem fs = file.getFileSystem(job); + FSDataOutputStream fileOut = fs.create(file, progress); + return new LineRecordWriter(fileOut, keyValueSeparator); + } else { + Class codecClass = + getOutputCompressorClass(job, GzipCodec.class); + // create the named codec + CompressionCodec codec = ReflectionUtils.newInstance(codecClass, job); + // build the filename including the extension + Path file = + FileOutputFormat.getTaskOutputPath(job, + name + codec.getDefaultExtension()); + FileSystem fs = file.getFileSystem(job); + FSDataOutputStream fileOut = fs.create(file, progress); + return new LineRecordWriter(new DataOutputStream + (codec.createOutputStream(fileOut)), + keyValueSeparator); + } + } +} + diff --git a/src/mapred/org/apache/hadoop/mapred/Utils.java b/src/mapred/org/apache/hadoop/mapred/Utils.java new file mode 100644 index 0000000..21d44a6 --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/Utils.java @@ -0,0 +1,60 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapred; + +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.PathFilter; + +/** + * A utility class. It provides + * - file-util + * - A path filter utility to filter out output/part files in the output dir + */ +public class Utils { + public static class OutputFileUtils { + /** + * This class filters output(part) files from the given directory + * It does not accept files with filenames _logs and _SUCCESS. + * This can be used to list paths of output directory as follows: + * Path[] fileList = FileUtil.stat2Paths(fs.listStatus(outDir, + * new OutputFilesFilter())); + */ + public static class OutputFilesFilter extends OutputLogFilter { + public boolean accept(Path path) { + return super.accept(path) + && !FileOutputCommitter.SUCCEEDED_FILE_NAME + .equals(path.getName()); + } + } + + /** + * This class filters log files from directory given + * It doesnt accept paths having _logs. + * This can be used to list paths of output directory as follows: + * Path[] fileList = FileUtil.stat2Paths(fs.listStatus(outDir, + * new OutputLogFilter())); + */ + public static class OutputLogFilter implements PathFilter { + public boolean accept(Path path) { + return !(path.toString().contains("_logs")); + } + } + } +} + diff --git a/src/mapred/org/apache/hadoop/mapred/jobcontrol/Job.java b/src/mapred/org/apache/hadoop/mapred/jobcontrol/Job.java new file mode 100644 index 0000000..9801d2c --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/jobcontrol/Job.java @@ -0,0 +1,387 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapred.jobcontrol; + + +import java.io.IOException; +import java.util.ArrayList; + +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.mapred.FileInputFormat; +import org.apache.hadoop.mapred.JobClient; +import org.apache.hadoop.mapred.JobConf; +import org.apache.hadoop.mapred.JobID; +import org.apache.hadoop.mapred.RunningJob; +import org.apache.hadoop.util.StringUtils; + +/** This class encapsulates a MapReduce job and its dependency. It monitors + * the states of the depending jobs and updates the state of this job. + * A job starts in the WAITING state. If it does not have any depending jobs, or + * all of the depending jobs are in SUCCESS state, then the job state will become + * READY. If any depending jobs fail, the job will fail too. + * When in READY state, the job can be submitted to Hadoop for execution, with + * the state changing into RUNNING state. From RUNNING state, the job can get into + * SUCCESS or FAILED state, depending the status of the job execution. + * + */ + +public class Job { + + // A job will be in one of the following states + final public static int SUCCESS = 0; + final public static int WAITING = 1; + final public static int RUNNING = 2; + final public static int READY = 3; + final public static int FAILED = 4; + final public static int DEPENDENT_FAILED = 5; + + + private JobConf theJobConf; + private int state; + private String jobID; // assigned and used by JobControl class + private JobID mapredJobID; // the job ID assigned by map/reduce + private String jobName; // external name, assigned/used by client app + private String message; // some info for human consumption, + // e.g. the reason why the job failed + private ArrayList dependingJobs; // the jobs the current job depends on + + private JobClient jc = null; // the map reduce job client + + /** + * Construct a job. + * @param jobConf a mapred job configuration representing a job to be executed. + * @param dependingJobs an array of jobs the current job depends on + */ + public Job(JobConf jobConf, ArrayList dependingJobs) throws IOException { + this.theJobConf = jobConf; + this.dependingJobs = dependingJobs; + this.state = Job.WAITING; + this.jobID = "unassigned"; + this.mapredJobID = null; //not yet assigned + this.jobName = "unassigned"; + this.message = "just initialized"; + this.jc = new JobClient(jobConf); + } + + /** + * Construct a job. + * + * @param jobConf mapred job configuration representing a job to be executed. + * @throws IOException + */ + public Job(JobConf jobConf) throws IOException { + this(jobConf, null); + } + + @Override + public String toString() { + StringBuffer sb = new StringBuffer(); + sb.append("job name:\t").append(this.jobName).append("\n"); + sb.append("job id:\t").append(this.jobID).append("\n"); + sb.append("job state:\t").append(this.state).append("\n"); + sb.append("job mapred id:\t").append(this.mapredJobID==null ? "unassigned" + : this.mapredJobID).append("\n"); + sb.append("job message:\t").append(this.message).append("\n"); + + if (this.dependingJobs == null || this.dependingJobs.size() == 0) { + sb.append("job has no depending job:\t").append("\n"); + } else { + sb.append("job has ").append(this.dependingJobs.size()).append(" dependeng jobs:\n"); + for (int i = 0; i < this.dependingJobs.size(); i++) { + sb.append("\t depending job ").append(i).append(":\t"); + sb.append((this.dependingJobs.get(i)).getJobName()).append("\n"); + } + } + return sb.toString(); + } + + /** + * @return the job name of this job + */ + public String getJobName() { + return this.jobName; + } + + /** + * Set the job name for this job. + * @param jobName the job name + */ + public void setJobName(String jobName) { + this.jobName = jobName; + } + + /** + * @return the job ID of this job assigned by JobControl + */ + public String getJobID() { + return this.jobID; + } + + /** + * Set the job ID for this job. + * @param id the job ID + */ + public void setJobID(String id) { + this.jobID = id; + } + + /** + * @return the mapred ID of this job + * @deprecated use {@link #getAssignedJobID()} instead + */ + @Deprecated + public String getMapredJobID() { + return this.mapredJobID.toString(); + } + + /** + * Set the mapred ID for this job. + * @param mapredJobID the mapred job ID for this job. + * @deprecated use {@link #setAssignedJobID(JobID)} instead + */ + @Deprecated + public void setMapredJobID(String mapredJobID) { + this.mapredJobID = JobID.forName(mapredJobID); + } + + /** + * @return the mapred ID of this job as assigned by the + * mapred framework. + */ + public JobID getAssignedJobID() { + return this.mapredJobID; + } + + /** + * Set the mapred ID for this job as assigned by the + * mapred framework. + * @param mapredJobID the mapred job ID for this job. + */ + public void setAssignedJobID(JobID mapredJobID) { + this.mapredJobID = mapredJobID; + } + + /** + * @return the mapred job conf of this job + */ + public JobConf getJobConf() { + return this.theJobConf; + } + + + /** + * Set the mapred job conf for this job. + * @param jobConf the mapred job conf for this job. + */ + public void setJobConf(JobConf jobConf) { + this.theJobConf = jobConf; + } + + /** + * @return the state of this job + */ + public synchronized int getState() { + return this.state; + } + + /** + * Set the state for this job. + * @param state the new state for this job. + */ + protected synchronized void setState(int state) { + this.state = state; + } + + /** + * @return the message of this job + */ + public String getMessage() { + return this.message; + } + + /** + * Set the message for this job. + * @param message the message for this job. + */ + public void setMessage(String message) { + this.message = message; + } + + + /** + * @return the job client of this job + */ + public JobClient getJobClient(){ + return this.jc; + } + + /** + * @return the depending jobs of this job + */ + public ArrayList getDependingJobs() { + return this.dependingJobs; + } + + /** + * Add a job to this jobs' dependency list. Dependent jobs can only be added while a Job + * is waiting to run, not during or afterwards. + * + * @param dependingJob Job that this Job depends on. + * @return true if the Job was added. + */ + public synchronized boolean addDependingJob(Job dependingJob) { + if (this.state == Job.WAITING) { //only allowed to add jobs when waiting + if (this.dependingJobs == null) { + this.dependingJobs = new ArrayList(); + } + return this.dependingJobs.add(dependingJob); + } else { + return false; + } + } + + /** + * @return true if this job is in a complete state + */ + public boolean isCompleted() { + return this.state == Job.FAILED || + this.state == Job.DEPENDENT_FAILED || + this.state == Job.SUCCESS; + } + + /** + * @return true if this job is in READY state + */ + public boolean isReady() { + return this.state == Job.READY; + } + + /** + * Check the state of this running job. The state may + * remain the same, become SUCCESS or FAILED. + */ + private void checkRunningState() { + RunningJob running = null; + try { + running = jc.getJob(this.mapredJobID); + if (running.isComplete()) { + if (running.isSuccessful()) { + this.state = Job.SUCCESS; + } else { + this.state = Job.FAILED; + this.message = "Job failed!"; + try { + running.killJob(); + } catch (IOException e1) { + + } + try { + this.jc.close(); + } catch (IOException e2) { + + } + } + } + + } catch (IOException ioe) { + this.state = Job.FAILED; + this.message = StringUtils.stringifyException(ioe); + try { + if (running != null) + running.killJob(); + } catch (IOException e1) { + + } + try { + this.jc.close(); + } catch (IOException e1) { + + } + } + } + + /** + * Check and update the state of this job. The state changes + * depending on its current state and the states of the depending jobs. + */ + synchronized int checkState() { + if (this.state == Job.RUNNING) { + checkRunningState(); + } + if (this.state != Job.WAITING) { + return this.state; + } + if (this.dependingJobs == null || this.dependingJobs.size() == 0) { + this.state = Job.READY; + return this.state; + } + Job pred = null; + int n = this.dependingJobs.size(); + for (int i = 0; i < n; i++) { + pred = this.dependingJobs.get(i); + int s = pred.checkState(); + if (s == Job.WAITING || s == Job.READY || s == Job.RUNNING) { + break; // a pred is still not completed, continue in WAITING + // state + } + if (s == Job.FAILED || s == Job.DEPENDENT_FAILED) { + this.state = Job.DEPENDENT_FAILED; + this.message = "depending job " + i + " with jobID " + + pred.getJobID() + " failed. " + pred.getMessage(); + break; + } + // pred must be in success state + if (i == n - 1) { + this.state = Job.READY; + } + } + + return this.state; + } + + /** + * Submit this job to mapred. The state becomes RUNNING if submission + * is successful, FAILED otherwise. + */ + protected synchronized void submit() { + try { + if (theJobConf.getBoolean("create.empty.dir.if.nonexist", false)) { + FileSystem fs = FileSystem.get(theJobConf); + Path inputPaths[] = FileInputFormat.getInputPaths(theJobConf); + for (int i = 0; i < inputPaths.length; i++) { + if (!fs.exists(inputPaths[i])) { + try { + fs.mkdirs(inputPaths[i]); + } catch (IOException e) { + + } + } + } + } + RunningJob running = jc.submitJob(theJobConf); + this.mapredJobID = running.getID(); + this.state = Job.RUNNING; + } catch (IOException ioe) { + this.state = Job.FAILED; + this.message = StringUtils.stringifyException(ioe); + } + } + +} diff --git a/src/mapred/org/apache/hadoop/mapred/jobcontrol/JobControl.java b/src/mapred/org/apache/hadoop/mapred/jobcontrol/JobControl.java new file mode 100644 index 0000000..144cdd9 --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/jobcontrol/JobControl.java @@ -0,0 +1,298 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapred.jobcontrol; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.Hashtable; +import java.util.Map; + +/** This class encapsulates a set of MapReduce jobs and its dependency. It tracks + * the states of the jobs by placing them into different tables according to their + * states. + * + * This class provides APIs for the client app to add a job to the group and to get + * the jobs in the group in different states. When a + * job is added, an ID unique to the group is assigned to the job. + * + * This class has a thread that submits jobs when they become ready, monitors the + * states of the running jobs, and updates the states of jobs based on the state changes + * of their depending jobs states. The class provides APIs for suspending/resuming + * the thread,and for stopping the thread. + * + */ +public class JobControl implements Runnable{ + + // The thread can be in one of the following state + private static final int RUNNING = 0; + private static final int SUSPENDED = 1; + private static final int STOPPED = 2; + private static final int STOPPING = 3; + private static final int READY = 4; + + private int runnerState; // the thread state + + private Map waitingJobs; + private Map readyJobs; + private Map runningJobs; + private Map successfulJobs; + private Map failedJobs; + + private long nextJobID; + private String groupName; + + /** + * Construct a job control for a group of jobs. + * @param groupName a name identifying this group + */ + public JobControl(String groupName) { + this.waitingJobs = new Hashtable(); + this.readyJobs = new Hashtable(); + this.runningJobs = new Hashtable(); + this.successfulJobs = new Hashtable(); + this.failedJobs = new Hashtable(); + this.nextJobID = -1; + this.groupName = groupName; + this.runnerState = JobControl.READY; + } + + private static ArrayList toArrayList(Map jobs) { + ArrayList retv = new ArrayList(); + synchronized (jobs) { + for (Job job : jobs.values()) { + retv.add(job); + } + } + return retv; + } + + /** + * @return the jobs in the waiting state + */ + public ArrayList getWaitingJobs() { + return JobControl.toArrayList(this.waitingJobs); + } + + /** + * @return the jobs in the running state + */ + public ArrayList getRunningJobs() { + return JobControl.toArrayList(this.runningJobs); + } + + /** + * @return the jobs in the ready state + */ + public ArrayList getReadyJobs() { + return JobControl.toArrayList(this.readyJobs); + } + + /** + * @return the jobs in the success state + */ + public ArrayList getSuccessfulJobs() { + return JobControl.toArrayList(this.successfulJobs); + } + + public ArrayList getFailedJobs() { + return JobControl.toArrayList(this.failedJobs); + } + + private String getNextJobID() { + nextJobID += 1; + return this.groupName + this.nextJobID; + } + + private static void addToQueue(Job aJob, Map queue) { + synchronized(queue) { + queue.put(aJob.getJobID(), aJob); + } + } + + private void addToQueue(Job aJob) { + Map queue = getQueue(aJob.getState()); + addToQueue(aJob, queue); + } + + private Map getQueue(int state) { + Map retv = null; + if (state == Job.WAITING) { + retv = this.waitingJobs; + } else if (state == Job.READY) { + retv = this.readyJobs; + } else if (state == Job.RUNNING) { + retv = this.runningJobs; + } else if (state == Job.SUCCESS) { + retv = this.successfulJobs; + } else if (state == Job.FAILED || state == Job.DEPENDENT_FAILED) { + retv = this.failedJobs; + } + return retv; + } + + /** + * Add a new job. + * @param aJob the new job + */ + synchronized public String addJob(Job aJob) { + String id = this.getNextJobID(); + aJob.setJobID(id); + aJob.setState(Job.WAITING); + this.addToQueue(aJob); + return id; + } + + /** + * Add a collection of jobs + * + * @param jobs + */ + public void addJobs(Collection jobs) { + for (Job job : jobs) { + addJob(job); + } + } + + /** + * @return the thread state + */ + public int getState() { + return this.runnerState; + } + + /** + * set the thread state to STOPPING so that the + * thread will stop when it wakes up. + */ + public void stop() { + this.runnerState = JobControl.STOPPING; + } + + /** + * suspend the running thread + */ + public void suspend () { + if (this.runnerState == JobControl.RUNNING) { + this.runnerState = JobControl.SUSPENDED; + } + } + + /** + * resume the suspended thread + */ + public void resume () { + if (this.runnerState == JobControl.SUSPENDED) { + this.runnerState = JobControl.RUNNING; + } + } + + synchronized private void checkRunningJobs() { + + Map oldJobs = null; + oldJobs = this.runningJobs; + this.runningJobs = new Hashtable(); + + for (Job nextJob : oldJobs.values()) { + int state = nextJob.checkState(); + /* + if (state != Job.RUNNING) { + System.out.println("The state of the running job " + + nextJob.getJobName() + " has changed to: " + nextJob.getState()); + } + */ + this.addToQueue(nextJob); + } + } + + synchronized private void checkWaitingJobs() { + Map oldJobs = null; + oldJobs = this.waitingJobs; + this.waitingJobs = new Hashtable(); + + for (Job nextJob : oldJobs.values()) { + int state = nextJob.checkState(); + /* + if (state != Job.WAITING) { + System.out.println("The state of the waiting job " + + nextJob.getJobName() + " has changed to: " + nextJob.getState()); + } + */ + this.addToQueue(nextJob); + } + } + + synchronized private void startReadyJobs() { + Map oldJobs = null; + oldJobs = this.readyJobs; + this.readyJobs = new Hashtable(); + + for (Job nextJob : oldJobs.values()) { + //System.out.println("Job to submit to Hadoop: " + nextJob.getJobName()); + nextJob.submit(); + //System.out.println("Hadoop ID: " + nextJob.getMapredJobID()); + this.addToQueue(nextJob); + } + } + + synchronized public boolean allFinished() { + return this.waitingJobs.size() == 0 && + this.readyJobs.size() == 0 && + this.runningJobs.size() == 0; + } + + /** + * The main loop for the thread. + * The loop does the following: + * Check the states of the running jobs + * Update the states of waiting jobs + * Submit the jobs in ready state + */ + public void run() { + this.runnerState = JobControl.RUNNING; + while (true) { + while (this.runnerState == JobControl.SUSPENDED) { + try { + Thread.sleep(5000); + } + catch (Exception e) { + + } + } + checkRunningJobs(); + checkWaitingJobs(); + startReadyJobs(); + if (this.runnerState != JobControl.RUNNING && + this.runnerState != JobControl.SUSPENDED) { + break; + } + try { + Thread.sleep(5000); + } + catch (Exception e) { + + } + if (this.runnerState != JobControl.RUNNING && + this.runnerState != JobControl.SUSPENDED) { + break; + } + } + this.runnerState = JobControl.STOPPED; + } + +} diff --git a/src/mapred/org/apache/hadoop/mapred/jobcontrol/package.html b/src/mapred/org/apache/hadoop/mapred/jobcontrol/package.html new file mode 100644 index 0000000..7c3e53d --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/jobcontrol/package.html @@ -0,0 +1,25 @@ + + + + + + +

Utilities for managing dependent jobs.

+ + + diff --git a/src/mapred/org/apache/hadoop/mapred/join/ArrayListBackedIterator.java b/src/mapred/org/apache/hadoop/mapred/join/ArrayListBackedIterator.java new file mode 100644 index 0000000..78e6957 --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/join/ArrayListBackedIterator.java @@ -0,0 +1,89 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.mapred.join; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Iterator; + +import org.apache.hadoop.io.Writable; +import org.apache.hadoop.io.WritableUtils; + +/** + * This class provides an implementation of ResetableIterator. The + * implementation uses an {@link java.util.ArrayList} to store elements + * added to it, replaying them as requested. + * Prefer {@link StreamBackedIterator}. + */ +public class ArrayListBackedIterator + implements ResetableIterator { + + private Iterator iter; + private ArrayList data; + private X hold = null; + + public ArrayListBackedIterator() { + this(new ArrayList()); + } + + public ArrayListBackedIterator(ArrayList data) { + this.data = data; + this.iter = this.data.iterator(); + } + + public boolean hasNext() { + return iter.hasNext(); + } + + public boolean next(X val) throws IOException { + if (iter.hasNext()) { + WritableUtils.cloneInto(val, iter.next()); + if (null == hold) { + hold = WritableUtils.clone(val, null); + } else { + WritableUtils.cloneInto(hold, val); + } + return true; + } + return false; + } + + public boolean replay(X val) throws IOException { + WritableUtils.cloneInto(val, hold); + return true; + } + + public void reset() { + iter = data.iterator(); + } + + public void add(X item) throws IOException { + data.add(WritableUtils.clone(item, null)); + } + + public void close() throws IOException { + iter = null; + data = null; + } + + public void clear() { + data.clear(); + reset(); + } + +} diff --git a/src/mapred/org/apache/hadoop/mapred/join/ComposableInputFormat.java b/src/mapred/org/apache/hadoop/mapred/join/ComposableInputFormat.java new file mode 100644 index 0000000..81c18b4 --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/join/ComposableInputFormat.java @@ -0,0 +1,40 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapred.join; + +import java.io.IOException; + +import org.apache.hadoop.io.Writable; +import org.apache.hadoop.io.WritableComparable; +import org.apache.hadoop.mapred.InputFormat; +import org.apache.hadoop.mapred.InputSplit; +import org.apache.hadoop.mapred.JobConf; +import org.apache.hadoop.mapred.Reporter; + +/** + * Refinement of InputFormat requiring implementors to provide + * ComposableRecordReader instead of RecordReader. + */ +public interface ComposableInputFormat + extends InputFormat { + + ComposableRecordReader getRecordReader(InputSplit split, + JobConf job, Reporter reporter) throws IOException; +} diff --git a/src/mapred/org/apache/hadoop/mapred/join/ComposableRecordReader.java b/src/mapred/org/apache/hadoop/mapred/join/ComposableRecordReader.java new file mode 100644 index 0000000..ca8b3eb --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/join/ComposableRecordReader.java @@ -0,0 +1,65 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapred.join; + +import java.io.IOException; + +import org.apache.hadoop.io.Writable; +import org.apache.hadoop.io.WritableComparable; +import org.apache.hadoop.mapred.RecordReader; + +/** + * Additional operations required of a RecordReader to participate in a join. + */ +public interface ComposableRecordReader + extends RecordReader, Comparable> { + + /** + * Return the position in the collector this class occupies. + */ + int id(); + + /** + * Return the key this RecordReader would supply on a call to next(K,V) + */ + K key(); + + /** + * Clone the key at the head of this RecordReader into the object provided. + */ + void key(K key) throws IOException; + + /** + * Returns true if the stream is not empty, but provides no guarantee that + * a call to next(K,V) will succeed. + */ + boolean hasNext(); + + /** + * Skip key-value pairs with keys less than or equal to the key provided. + */ + void skip(K key) throws IOException; + + /** + * While key-value pairs from this RecordReader match the given key, register + * them with the JoinCollector provided. + */ + void accept(CompositeRecordReader.JoinCollector jc, K key) throws IOException; +} diff --git a/src/mapred/org/apache/hadoop/mapred/join/CompositeInputFormat.java b/src/mapred/org/apache/hadoop/mapred/join/CompositeInputFormat.java new file mode 100644 index 0000000..5ee97a8 --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/join/CompositeInputFormat.java @@ -0,0 +1,181 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapred.join; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Map; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.io.WritableComparable; +import org.apache.hadoop.mapred.InputFormat; +import org.apache.hadoop.mapred.InputSplit; +import org.apache.hadoop.mapred.JobConf; +import org.apache.hadoop.mapred.Reporter; + +/** + * An InputFormat capable of performing joins over a set of data sources sorted + * and partitioned the same way. + * @see #setFormat + * + * A user may define new join types by setting the property + * mapred.join.define.<ident> to a classname. In the expression + * mapred.join.expr, the identifier will be assumed to be a + * ComposableRecordReader. + * mapred.join.keycomparator can be a classname used to compare keys + * in the join. + * @see JoinRecordReader + * @see MultiFilterRecordReader + */ +public class CompositeInputFormat + implements ComposableInputFormat { + + // expression parse tree to which IF requests are proxied + private Parser.Node root; + + public CompositeInputFormat() { } + + + /** + * Interpret a given string as a composite expression. + * {@code + * func ::= ([,]*) + * func ::= tbl(,"") + * class ::= @see java.lang.Class#forName(java.lang.String) + * path ::= @see org.apache.hadoop.fs.Path#Path(java.lang.String) + * } + * Reads expression from the mapred.join.expr property and + * user-supplied join types from mapred.join.define.<ident> + * types. Paths supplied to tbl are given as input paths to the + * InputFormat class listed. + * @see #compose(java.lang.String, java.lang.Class, java.lang.String...) + */ + public void setFormat(JobConf job) throws IOException { + addDefaults(); + addUserIdentifiers(job); + root = Parser.parse(job.get("mapred.join.expr", null), job); + } + + /** + * Adds the default set of identifiers to the parser. + */ + protected void addDefaults() { + try { + Parser.CNode.addIdentifier("inner", InnerJoinRecordReader.class); + Parser.CNode.addIdentifier("outer", OuterJoinRecordReader.class); + Parser.CNode.addIdentifier("override", OverrideRecordReader.class); + Parser.WNode.addIdentifier("tbl", WrappedRecordReader.class); + } catch (NoSuchMethodException e) { + throw new RuntimeException("FATAL: Failed to init defaults", e); + } + } + + /** + * Inform the parser of user-defined types. + */ + private void addUserIdentifiers(JobConf job) throws IOException { + Pattern x = Pattern.compile("^mapred\\.join\\.define\\.(\\w+)$"); + for (Map.Entry kv : job) { + Matcher m = x.matcher(kv.getKey()); + if (m.matches()) { + try { + Parser.CNode.addIdentifier(m.group(1), + job.getClass(m.group(0), null, ComposableRecordReader.class)); + } catch (NoSuchMethodException e) { + throw (IOException)new IOException( + "Invalid define for " + m.group(1)).initCause(e); + } + } + } + } + + /** + * Build a CompositeInputSplit from the child InputFormats by assigning the + * ith split from each child to the ith composite split. + */ + public InputSplit[] getSplits(JobConf job, int numSplits) throws IOException { + setFormat(job); + job.setLong("mapred.min.split.size", Long.MAX_VALUE); + return root.getSplits(job, numSplits); + } + + /** + * Construct a CompositeRecordReader for the children of this InputFormat + * as defined in the init expression. + * The outermost join need only be composable, not necessarily a composite. + * Mandating TupleWritable isn't strictly correct. + */ + @SuppressWarnings("unchecked") // child types unknown + public ComposableRecordReader getRecordReader( + InputSplit split, JobConf job, Reporter reporter) throws IOException { + setFormat(job); + return root.getRecordReader(split, job, reporter); + } + + /** + * Convenience method for constructing composite formats. + * Given InputFormat class (inf), path (p) return: + * {@code tbl(,

) } + */ + public static String compose(Class inf, String path) { + return compose(inf.getName().intern(), path, new StringBuffer()).toString(); + } + + /** + * Convenience method for constructing composite formats. + * Given operation (op), Object class (inf), set of paths (p) return: + * {@code (tbl(,),tbl(,),...,tbl(,)) } + */ + public static String compose(String op, Class inf, + String... path) { + final String infname = inf.getName(); + StringBuffer ret = new StringBuffer(op + '('); + for (String p : path) { + compose(infname, p, ret); + ret.append(','); + } + ret.setCharAt(ret.length() - 1, ')'); + return ret.toString(); + } + + /** + * Convenience method for constructing composite formats. + * Given operation (op), Object class (inf), set of paths (p) return: + * {@code (tbl(,),tbl(,),...,tbl(,)) } + */ + public static String compose(String op, Class inf, + Path... path) { + ArrayList tmp = new ArrayList(path.length); + for (Path p : path) { + tmp.add(p.toString()); + } + return compose(op, inf, tmp.toArray(new String[0])); + } + + private static StringBuffer compose(String inf, String path, + StringBuffer sb) { + sb.append("tbl(" + inf + ",\""); + sb.append(path); + sb.append("\")"); + return sb; + } + +} diff --git a/src/mapred/org/apache/hadoop/mapred/join/CompositeInputSplit.java b/src/mapred/org/apache/hadoop/mapred/join/CompositeInputSplit.java new file mode 100644 index 0000000..0c45579 --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/join/CompositeInputSplit.java @@ -0,0 +1,149 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapred.join; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; +import java.util.HashSet; + +import org.apache.hadoop.io.Text; +import org.apache.hadoop.io.WritableUtils; +import org.apache.hadoop.mapred.InputSplit; +import org.apache.hadoop.util.ReflectionUtils; + +/** + * This InputSplit contains a set of child InputSplits. Any InputSplit inserted + * into this collection must have a public default constructor. + */ +public class CompositeInputSplit implements InputSplit { + + private int fill = 0; + private long totsize = 0L; + private InputSplit[] splits; + + public CompositeInputSplit() { } + + public CompositeInputSplit(int capacity) { + splits = new InputSplit[capacity]; + } + + /** + * Add an InputSplit to this collection. + * @throws IOException If capacity was not specified during construction + * or if capacity has been reached. + */ + public void add(InputSplit s) throws IOException { + if (null == splits) { + throw new IOException("Uninitialized InputSplit"); + } + if (fill == splits.length) { + throw new IOException("Too many splits"); + } + splits[fill++] = s; + totsize += s.getLength(); + } + + /** + * Get ith child InputSplit. + */ + public InputSplit get(int i) { + return splits[i]; + } + + /** + * Return the aggregate length of all child InputSplits currently added. + */ + public long getLength() throws IOException { + return totsize; + } + + /** + * Get the length of ith child InputSplit. + */ + public long getLength(int i) throws IOException { + return splits[i].getLength(); + } + + /** + * Collect a set of hosts from all child InputSplits. + */ + public String[] getLocations() throws IOException { + HashSet hosts = new HashSet(); + for (InputSplit s : splits) { + String[] hints = s.getLocations(); + if (hints != null && hints.length > 0) { + for (String host : hints) { + hosts.add(host); + } + } + } + return hosts.toArray(new String[hosts.size()]); + } + + /** + * getLocations from ith InputSplit. + */ + public String[] getLocation(int i) throws IOException { + return splits[i].getLocations(); + } + + /** + * Write splits in the following format. + * {@code + * ...... + * } + */ + public void write(DataOutput out) throws IOException { + WritableUtils.writeVInt(out, splits.length); + for (InputSplit s : splits) { + Text.writeString(out, s.getClass().getName()); + } + for (InputSplit s : splits) { + s.write(out); + } + } + + /** + * {@inheritDoc} + * @throws IOException If the child InputSplit cannot be read, typically + * for faliing access checks. + */ + @SuppressWarnings("unchecked") // Generic array assignment + public void readFields(DataInput in) throws IOException { + int card = WritableUtils.readVInt(in); + if (splits == null || splits.length != card) { + splits = new InputSplit[card]; + } + Class[] cls = new Class[card]; + try { + for (int i = 0; i < card; ++i) { + cls[i] = + Class.forName(Text.readString(in)).asSubclass(InputSplit.class); + } + for (int i = 0; i < card; ++i) { + splits[i] = ReflectionUtils.newInstance(cls[i], null); + splits[i].readFields(in); + } + } catch (ClassNotFoundException e) { + throw (IOException)new IOException("Failed split init").initCause(e); + } + } + +} diff --git a/src/mapred/org/apache/hadoop/mapred/join/CompositeRecordReader.java b/src/mapred/org/apache/hadoop/mapred/join/CompositeRecordReader.java new file mode 100644 index 0000000..8d6072a --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/join/CompositeRecordReader.java @@ -0,0 +1,459 @@ +/** * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapred.join; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Comparator; +import java.util.PriorityQueue; + +import org.apache.hadoop.conf.Configurable; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.io.Writable; +import org.apache.hadoop.io.WritableComparable; +import org.apache.hadoop.io.WritableComparator; +import org.apache.hadoop.io.WritableUtils; +import org.apache.hadoop.mapred.RecordReader; +import org.apache.hadoop.util.ReflectionUtils; + +/** + * A RecordReader that can effect joins of RecordReaders sharing a common key + * type and partitioning. + */ +public abstract class CompositeRecordReader< + K extends WritableComparable, // key type + V extends Writable, // accepts RecordReader as children + X extends Writable> // emits Writables of this type + implements Configurable { + + + private int id; + private Configuration conf; + private final ResetableIterator EMPTY = new ResetableIterator.EMPTY(); + + private WritableComparator cmp; + private Class keyclass; + private PriorityQueue> q; + + protected final JoinCollector jc; + protected final ComposableRecordReader[] kids; + + protected abstract boolean combine(Object[] srcs, TupleWritable value); + + /** + * Create a RecordReader with capacity children to position + * id in the parent reader. + * The id of a root CompositeRecordReader is -1 by convention, but relying + * on this is not recommended. + */ + @SuppressWarnings("unchecked") // Generic array assignment + public CompositeRecordReader(int id, int capacity, + Class cmpcl) + throws IOException { + assert capacity > 0 : "Invalid capacity"; + this.id = id; + if (null != cmpcl) { + cmp = ReflectionUtils.newInstance(cmpcl, null); + q = new PriorityQueue>(3, + new Comparator>() { + public int compare(ComposableRecordReader o1, + ComposableRecordReader o2) { + return cmp.compare(o1.key(), o2.key()); + } + }); + } + jc = new JoinCollector(capacity); + kids = new ComposableRecordReader[capacity]; + } + + /** + * Return the position in the collector this class occupies. + */ + public int id() { + return id; + } + + /** + * {@inheritDoc} + */ + public void setConf(Configuration conf) { + this.conf = conf; + } + + /** + * {@inheritDoc} + */ + public Configuration getConf() { + return conf; + } + + /** + * Return sorted list of RecordReaders for this composite. + */ + protected PriorityQueue> getRecordReaderQueue() { + return q; + } + + /** + * Return comparator defining the ordering for RecordReaders in this + * composite. + */ + protected WritableComparator getComparator() { + return cmp; + } + + /** + * Add a RecordReader to this collection. + * The id() of a RecordReader determines where in the Tuple its + * entry will appear. Adding RecordReaders with the same id has + * undefined behavior. + */ + public void add(ComposableRecordReader rr) throws IOException { + kids[rr.id()] = rr; + if (null == q) { + cmp = WritableComparator.get(rr.createKey().getClass()); + q = new PriorityQueue>(3, + new Comparator>() { + public int compare(ComposableRecordReader o1, + ComposableRecordReader o2) { + return cmp.compare(o1.key(), o2.key()); + } + }); + } + if (rr.hasNext()) { + q.add(rr); + } + } + + /** + * Collector for join values. + * This accumulates values for a given key from the child RecordReaders. If + * one or more child RR contain duplicate keys, this will emit the cross + * product of the associated values until exhausted. + */ + class JoinCollector { + private K key; + private ResetableIterator[] iters; + private int pos = -1; + private boolean first = true; + + /** + * Construct a collector capable of handling the specified number of + * children. + */ + @SuppressWarnings("unchecked") // Generic array assignment + public JoinCollector(int card) { + iters = new ResetableIterator[card]; + for (int i = 0; i < iters.length; ++i) { + iters[i] = EMPTY; + } + } + + /** + * Register a given iterator at position id. + */ + public void add(int id, ResetableIterator i) + throws IOException { + iters[id] = i; + } + + /** + * Return the key associated with this collection. + */ + public K key() { + return key; + } + + /** + * Codify the contents of the collector to be iterated over. + * When this is called, all RecordReaders registered for this + * key should have added ResetableIterators. + */ + public void reset(K key) { + this.key = key; + first = true; + pos = iters.length - 1; + for (int i = 0; i < iters.length; ++i) { + iters[i].reset(); + } + } + + /** + * Clear all state information. + */ + public void clear() { + key = null; + pos = -1; + for (int i = 0; i < iters.length; ++i) { + iters[i].clear(); + iters[i] = EMPTY; + } + } + + /** + * Returns false if exhausted or if reset(K) has not been called. + */ + protected boolean hasNext() { + return !(pos < 0); + } + + /** + * Populate Tuple from iterators. + * It should be the case that, given iterators i_1...i_n over values from + * sources s_1...s_n sharing key k, repeated calls to next should yield + * I x I. + */ + @SuppressWarnings("unchecked") // No static typeinfo on Tuples + protected boolean next(TupleWritable val) throws IOException { + if (first) { + int i = -1; + for (pos = 0; pos < iters.length; ++pos) { + if (iters[pos].hasNext() && iters[pos].next((X)val.get(pos))) { + i = pos; + val.setWritten(i); + } + } + pos = i; + first = false; + if (pos < 0) { + clear(); + return false; + } + return true; + } + while (0 <= pos && !(iters[pos].hasNext() && + iters[pos].next((X)val.get(pos)))) { + --pos; + } + if (pos < 0) { + clear(); + return false; + } + val.setWritten(pos); + for (int i = 0; i < pos; ++i) { + if (iters[i].replay((X)val.get(i))) { + val.setWritten(i); + } + } + while (pos + 1 < iters.length) { + ++pos; + iters[pos].reset(); + if (iters[pos].hasNext() && iters[pos].next((X)val.get(pos))) { + val.setWritten(pos); + } + } + return true; + } + + /** + * Replay the last Tuple emitted. + */ + @SuppressWarnings("unchecked") // No static typeinfo on Tuples + public boolean replay(TupleWritable val) throws IOException { + // The last emitted tuple might have drawn on an empty source; + // it can't be cleared prematurely, b/c there may be more duplicate + // keys in iterator positions < pos + assert !first; + boolean ret = false; + for (int i = 0; i < iters.length; ++i) { + if (iters[i].replay((X)val.get(i))) { + val.setWritten(i); + ret = true; + } + } + return ret; + } + + /** + * Close all child iterators. + */ + public void close() throws IOException { + for (int i = 0; i < iters.length; ++i) { + iters[i].close(); + } + } + + /** + * Write the next value into key, value as accepted by the operation + * associated with this set of RecordReaders. + */ + public boolean flush(TupleWritable value) throws IOException { + while (hasNext()) { + value.clearWritten(); + if (next(value) && combine(kids, value)) { + return true; + } + } + return false; + } + } + + /** + * Return the key for the current join or the value at the top of the + * RecordReader heap. + */ + public K key() { + if (jc.hasNext()) { + return jc.key(); + } + if (!q.isEmpty()) { + return q.peek().key(); + } + return null; + } + + /** + * Clone the key at the top of this RR into the given object. + */ + public void key(K key) throws IOException { + WritableUtils.cloneInto(key, key()); + } + + /** + * Return true if it is possible that this could emit more values. + */ + public boolean hasNext() { + return jc.hasNext() || !q.isEmpty(); + } + + /** + * Pass skip key to child RRs. + */ + public void skip(K key) throws IOException { + ArrayList> tmp = + new ArrayList>(); + while (!q.isEmpty() && cmp.compare(q.peek().key(), key) <= 0) { + tmp.add(q.poll()); + } + for (ComposableRecordReader rr : tmp) { + rr.skip(key); + if (rr.hasNext()) { + q.add(rr); + } + } + } + + /** + * Obtain an iterator over the child RRs apropos of the value type + * ultimately emitted from this join. + */ + protected abstract ResetableIterator getDelegate(); + + /** + * If key provided matches that of this Composite, give JoinCollector + * iterator over values it may emit. + */ + @SuppressWarnings("unchecked") // No values from static EMPTY class + public void accept(CompositeRecordReader.JoinCollector jc, K key) + throws IOException { + if (hasNext() && 0 == cmp.compare(key, key())) { + fillJoinCollector(createKey()); + jc.add(id, getDelegate()); + return; + } + jc.add(id, EMPTY); + } + + /** + * For all child RRs offering the key provided, obtain an iterator + * at that position in the JoinCollector. + */ + protected void fillJoinCollector(K iterkey) throws IOException { + if (!q.isEmpty()) { + q.peek().key(iterkey); + while (0 == cmp.compare(q.peek().key(), iterkey)) { + ComposableRecordReader t = q.poll(); + t.accept(jc, iterkey); + if (t.hasNext()) { + q.add(t); + } else if (q.isEmpty()) { + return; + } + } + } + } + + /** + * Implement Comparable contract (compare key of join or head of heap + * with that of another). + */ + public int compareTo(ComposableRecordReader other) { + return cmp.compare(key(), other.key()); + } + + /** + * Create a new key value common to all child RRs. + * @throws ClassCastException if key classes differ. + */ + @SuppressWarnings("unchecked") // Explicit check for key class agreement + public K createKey() { + if (null == keyclass) { + final Class cls = kids[0].createKey().getClass(); + for (RecordReader rr : kids) { + if (!cls.equals(rr.createKey().getClass())) { + throw new ClassCastException("Child key classes fail to agree"); + } + } + keyclass = cls.asSubclass(WritableComparable.class); + } + return (K) ReflectionUtils.newInstance(keyclass, getConf()); + } + + /** + * Create a value to be used internally for joins. + */ + protected TupleWritable createInternalValue() { + Writable[] vals = new Writable[kids.length]; + for (int i = 0; i < vals.length; ++i) { + vals[i] = kids[i].createValue(); + } + return new TupleWritable(vals); + } + + /** + * Unsupported (returns zero in all cases). + */ + public long getPos() throws IOException { + return 0; + } + + /** + * Close all child RRs. + */ + public void close() throws IOException { + if (kids != null) { + for (RecordReader rr : kids) { + rr.close(); + } + } + if (jc != null) { + jc.close(); + } + } + + /** + * Report progress as the minimum of all child RR progress. + */ + public float getProgress() throws IOException { + float ret = 1.0f; + for (RecordReader rr : kids) { + ret = Math.min(ret, rr.getProgress()); + } + return ret; + } +} diff --git a/src/mapred/org/apache/hadoop/mapred/join/InnerJoinRecordReader.java b/src/mapred/org/apache/hadoop/mapred/join/InnerJoinRecordReader.java new file mode 100644 index 0000000..218c342 --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/join/InnerJoinRecordReader.java @@ -0,0 +1,50 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapred.join; + +import java.io.IOException; + +import org.apache.hadoop.io.WritableComparable; +import org.apache.hadoop.io.WritableComparator; +import org.apache.hadoop.mapred.JobConf; + +/** + * Full inner join. + */ +public class InnerJoinRecordReader + extends JoinRecordReader { + + InnerJoinRecordReader(int id, JobConf conf, int capacity, + Class cmpcl) throws IOException { + super(id, conf, capacity, cmpcl); + } + + /** + * Return true iff the tuple is full (all data sources contain this key). + */ + protected boolean combine(Object[] srcs, TupleWritable dst) { + assert srcs.length == dst.size(); + for (int i = 0; i < srcs.length; ++i) { + if (!dst.has(i)) { + return false; + } + } + return true; + } +} diff --git a/src/mapred/org/apache/hadoop/mapred/join/JoinRecordReader.java b/src/mapred/org/apache/hadoop/mapred/join/JoinRecordReader.java new file mode 100644 index 0000000..7151713 --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/join/JoinRecordReader.java @@ -0,0 +1,114 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapred.join; + +import java.io.IOException; +import java.util.PriorityQueue; + +import org.apache.hadoop.io.Writable; +import org.apache.hadoop.io.WritableComparable; +import org.apache.hadoop.io.WritableComparator; +import org.apache.hadoop.io.WritableUtils; +import org.apache.hadoop.mapred.JobConf; + +/** + * Base class for Composite joins returning Tuples of arbitrary Writables. + */ +public abstract class JoinRecordReader + extends CompositeRecordReader + implements ComposableRecordReader { + + public JoinRecordReader(int id, JobConf conf, int capacity, + Class cmpcl) throws IOException { + super(id, capacity, cmpcl); + setConf(conf); + } + + /** + * Emit the next set of key, value pairs as defined by the child + * RecordReaders and operation associated with this composite RR. + */ + public boolean next(K key, TupleWritable value) throws IOException { + if (jc.flush(value)) { + WritableUtils.cloneInto(key, jc.key()); + return true; + } + jc.clear(); + K iterkey = createKey(); + final PriorityQueue> q = getRecordReaderQueue(); + while (!q.isEmpty()) { + fillJoinCollector(iterkey); + jc.reset(iterkey); + if (jc.flush(value)) { + WritableUtils.cloneInto(key, jc.key()); + return true; + } + jc.clear(); + } + return false; + } + + /** {@inheritDoc} */ + public TupleWritable createValue() { + return createInternalValue(); + } + + /** + * Return an iterator wrapping the JoinCollector. + */ + protected ResetableIterator getDelegate() { + return new JoinDelegationIterator(); + } + + /** + * Since the JoinCollector is effecting our operation, we need only + * provide an iterator proxy wrapping its operation. + */ + protected class JoinDelegationIterator + implements ResetableIterator { + + public boolean hasNext() { + return jc.hasNext(); + } + + public boolean next(TupleWritable val) throws IOException { + return jc.flush(val); + } + + public boolean replay(TupleWritable val) throws IOException { + return jc.replay(val); + } + + public void reset() { + jc.reset(jc.key()); + } + + public void add(TupleWritable item) throws IOException { + throw new UnsupportedOperationException(); + } + + public void close() throws IOException { + jc.close(); + } + + public void clear() { + jc.clear(); + } + } +} diff --git a/src/mapred/org/apache/hadoop/mapred/join/MultiFilterRecordReader.java b/src/mapred/org/apache/hadoop/mapred/join/MultiFilterRecordReader.java new file mode 100644 index 0000000..8f291c0 --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/join/MultiFilterRecordReader.java @@ -0,0 +1,154 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapred.join; + +import java.io.IOException; +import java.util.PriorityQueue; + +import org.apache.hadoop.io.Writable; +import org.apache.hadoop.io.WritableComparable; +import org.apache.hadoop.io.WritableComparator; +import org.apache.hadoop.io.WritableUtils; +import org.apache.hadoop.util.ReflectionUtils; +import org.apache.hadoop.mapred.JobConf; +import org.apache.hadoop.mapred.RecordReader; + +/** + * Base class for Composite join returning values derived from multiple + * sources, but generally not tuples. + */ +public abstract class MultiFilterRecordReader + extends CompositeRecordReader + implements ComposableRecordReader { + + private Class valueclass; + private TupleWritable ivalue; + + public MultiFilterRecordReader(int id, JobConf conf, int capacity, + Class cmpcl) throws IOException { + super(id, capacity, cmpcl); + setConf(conf); + } + + /** + * For each tuple emitted, return a value (typically one of the values + * in the tuple). + * Modifying the Writables in the tuple is permitted and unlikely to affect + * join behavior in most cases, but it is not recommended. It's safer to + * clone first. + */ + protected abstract V emit(TupleWritable dst) throws IOException; + + /** + * Default implementation offers {@link #emit} every Tuple from the + * collector (the outer join of child RRs). + */ + protected boolean combine(Object[] srcs, TupleWritable dst) { + return true; + } + + /** {@inheritDoc} */ + public boolean next(K key, V value) throws IOException { + if (jc.flush(ivalue)) { + WritableUtils.cloneInto(key, jc.key()); + WritableUtils.cloneInto(value, emit(ivalue)); + return true; + } + jc.clear(); + K iterkey = createKey(); + final PriorityQueue> q = getRecordReaderQueue(); + while (!q.isEmpty()) { + fillJoinCollector(iterkey); + jc.reset(iterkey); + if (jc.flush(ivalue)) { + WritableUtils.cloneInto(key, jc.key()); + WritableUtils.cloneInto(value, emit(ivalue)); + return true; + } + jc.clear(); + } + return false; + } + + /** {@inheritDoc} */ + @SuppressWarnings("unchecked") // Explicit check for value class agreement + public V createValue() { + if (null == valueclass) { + final Class cls = kids[0].createValue().getClass(); + for (RecordReader rr : kids) { + if (!cls.equals(rr.createValue().getClass())) { + throw new ClassCastException("Child value classes fail to agree"); + } + } + valueclass = cls.asSubclass(Writable.class); + ivalue = createInternalValue(); + } + return (V) ReflectionUtils.newInstance(valueclass, null); + } + + /** + * Return an iterator returning a single value from the tuple. + * @see MultiFilterDelegationIterator + */ + protected ResetableIterator getDelegate() { + return new MultiFilterDelegationIterator(); + } + + /** + * Proxy the JoinCollector, but include callback to emit. + */ + protected class MultiFilterDelegationIterator + implements ResetableIterator { + + public boolean hasNext() { + return jc.hasNext(); + } + + public boolean next(V val) throws IOException { + boolean ret; + if (ret = jc.flush(ivalue)) { + WritableUtils.cloneInto(val, emit(ivalue)); + } + return ret; + } + + public boolean replay(V val) throws IOException { + WritableUtils.cloneInto(val, emit(ivalue)); + return true; + } + + public void reset() { + jc.reset(jc.key()); + } + + public void add(V item) throws IOException { + throw new UnsupportedOperationException(); + } + + public void close() throws IOException { + jc.close(); + } + + public void clear() { + jc.clear(); + } + } + +} diff --git a/src/mapred/org/apache/hadoop/mapred/join/OuterJoinRecordReader.java b/src/mapred/org/apache/hadoop/mapred/join/OuterJoinRecordReader.java new file mode 100644 index 0000000..666c9b1 --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/join/OuterJoinRecordReader.java @@ -0,0 +1,45 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapred.join; + +import java.io.IOException; + +import org.apache.hadoop.io.WritableComparable; +import org.apache.hadoop.io.WritableComparator; +import org.apache.hadoop.mapred.JobConf; + +/** + * Full outer join. + */ +public class OuterJoinRecordReader + extends JoinRecordReader { + + OuterJoinRecordReader(int id, JobConf conf, int capacity, + Class cmpcl) throws IOException { + super(id, conf, capacity, cmpcl); + } + + /** + * Emit everything from the collector. + */ + protected boolean combine(Object[] srcs, TupleWritable dst) { + assert srcs.length == dst.size(); + return true; + } +} diff --git a/src/mapred/org/apache/hadoop/mapred/join/OverrideRecordReader.java b/src/mapred/org/apache/hadoop/mapred/join/OverrideRecordReader.java new file mode 100644 index 0000000..6354914 --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/join/OverrideRecordReader.java @@ -0,0 +1,93 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapred.join; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.PriorityQueue; + +import org.apache.hadoop.io.Writable; +import org.apache.hadoop.io.WritableComparable; +import org.apache.hadoop.io.WritableComparator; +import org.apache.hadoop.mapred.JobConf; + +/** + * Prefer the "rightmost" data source for this key. + * For example, override(S1,S2,S3) will prefer values + * from S3 over S2, and values from S2 over S1 for all keys + * emitted from all sources. + */ +public class OverrideRecordReader + extends MultiFilterRecordReader { + + OverrideRecordReader(int id, JobConf conf, int capacity, + Class cmpcl) throws IOException { + super(id, conf, capacity, cmpcl); + } + + /** + * Emit the value with the highest position in the tuple. + */ + @SuppressWarnings("unchecked") // No static typeinfo on Tuples + protected V emit(TupleWritable dst) { + return (V) dst.iterator().next(); + } + + /** + * Instead of filling the JoinCollector with iterators from all + * data sources, fill only the rightmost for this key. + * This not only saves space by discarding the other sources, but + * it also emits the number of key-value pairs in the preferred + * RecordReader instead of repeating that stream n times, where + * n is the cardinality of the cross product of the discarded + * streams for the given key. + */ + protected void fillJoinCollector(K iterkey) throws IOException { + final PriorityQueue> q = getRecordReaderQueue(); + if (!q.isEmpty()) { + int highpos = -1; + ArrayList> list = + new ArrayList>(kids.length); + q.peek().key(iterkey); + final WritableComparator cmp = getComparator(); + while (0 == cmp.compare(q.peek().key(), iterkey)) { + ComposableRecordReader t = q.poll(); + if (-1 == highpos || list.get(highpos).id() < t.id()) { + highpos = list.size(); + } + list.add(t); + if (q.isEmpty()) + break; + } + ComposableRecordReader t = list.remove(highpos); + t.accept(jc, iterkey); + for (ComposableRecordReader rr : list) { + rr.skip(iterkey); + } + list.add(t); + for (ComposableRecordReader rr : list) { + if (rr.hasNext()) { + q.add(rr); + } + } + } + } + +} diff --git a/src/mapred/org/apache/hadoop/mapred/join/Parser.java b/src/mapred/org/apache/hadoop/mapred/join/Parser.java new file mode 100644 index 0000000..6ca5e40 --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/join/Parser.java @@ -0,0 +1,487 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapred.join; + +import java.io.CharArrayReader; +import java.io.IOException; +import java.io.StreamTokenizer; +import java.lang.reflect.Constructor; +import java.lang.reflect.InvocationTargetException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.Iterator; +import java.util.LinkedList; +import java.util.List; +import java.util.ListIterator; +import java.util.Map; +import java.util.Stack; + +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.io.WritableComparator; +import org.apache.hadoop.mapred.FileInputFormat; +import org.apache.hadoop.mapred.InputFormat; +import org.apache.hadoop.mapred.InputSplit; +import org.apache.hadoop.mapred.JobConf; +import org.apache.hadoop.mapred.RecordReader; +import org.apache.hadoop.mapred.Reporter; +import org.apache.hadoop.util.ReflectionUtils; + +/** + * Very simple shift-reduce parser for join expressions. + * + * This should be sufficient for the user extension permitted now, but ought to + * be replaced with a parser generator if more complex grammars are supported. + * In particular, this "shift-reduce" parser has no states. Each set + * of formals requires a different internal node type, which is responsible for + * interpreting the list of tokens it receives. This is sufficient for the + * current grammar, but it has several annoying properties that might inhibit + * extension. In particular, parenthesis are always function calls; an + * algebraic or filter grammar would not only require a node type, but must + * also work around the internals of this parser. + * + * For most other cases, adding classes to the hierarchy- particularly by + * extending JoinRecordReader and MultiFilterRecordReader- is fairly + * straightforward. One need only override the relevant method(s) (usually only + * {@link CompositeRecordReader#combine}) and include a property to map its + * value to an identifier in the parser. + */ +public class Parser { + public enum TType { CIF, IDENT, COMMA, LPAREN, RPAREN, QUOT, NUM, } + + /** + * Tagged-union type for tokens from the join expression. + * @see Parser.TType + */ + public static class Token { + + private TType type; + + Token(TType type) { + this.type = type; + } + + public TType getType() { return type; } + public Node getNode() throws IOException { + throw new IOException("Expected nodetype"); + } + public double getNum() throws IOException { + throw new IOException("Expected numtype"); + } + public String getStr() throws IOException { + throw new IOException("Expected strtype"); + } + } + + public static class NumToken extends Token { + private double num; + public NumToken(double num) { + super(TType.NUM); + this.num = num; + } + public double getNum() { return num; } + } + + public static class NodeToken extends Token { + private Node node; + NodeToken(Node node) { + super(TType.CIF); + this.node = node; + } + public Node getNode() { + return node; + } + } + + public static class StrToken extends Token { + private String str; + public StrToken(TType type, String str) { + super(type); + this.str = str; + } + public String getStr() { + return str; + } + } + + /** + * Simple lexer wrapping a StreamTokenizer. + * This encapsulates the creation of tagged-union Tokens and initializes the + * SteamTokenizer. + */ + private static class Lexer { + + private StreamTokenizer tok; + + Lexer(String s) { + tok = new StreamTokenizer(new CharArrayReader(s.toCharArray())); + tok.quoteChar('"'); + tok.parseNumbers(); + tok.ordinaryChar(','); + tok.ordinaryChar('('); + tok.ordinaryChar(')'); + tok.wordChars('$','$'); + tok.wordChars('_','_'); + } + + Token next() throws IOException { + int type = tok.nextToken(); + switch (type) { + case StreamTokenizer.TT_EOF: + case StreamTokenizer.TT_EOL: + return null; + case StreamTokenizer.TT_NUMBER: + return new NumToken(tok.nval); + case StreamTokenizer.TT_WORD: + return new StrToken(TType.IDENT, tok.sval); + case '"': + return new StrToken(TType.QUOT, tok.sval); + default: + switch (type) { + case ',': + return new Token(TType.COMMA); + case '(': + return new Token(TType.LPAREN); + case ')': + return new Token(TType.RPAREN); + default: + throw new IOException("Unexpected: " + type); + } + } + } + } + + public abstract static class Node implements ComposableInputFormat { + /** + * Return the node type registered for the particular identifier. + * By default, this is a CNode for any composite node and a WNode + * for "wrapped" nodes. User nodes will likely be composite + * nodes. + * @see #addIdentifier(java.lang.String, java.lang.Class[], java.lang.Class, java.lang.Class) + * @see CompositeInputFormat#setFormat(org.apache.hadoop.mapred.JobConf) + */ + static Node forIdent(String ident) throws IOException { + try { + if (!nodeCstrMap.containsKey(ident)) { + throw new IOException("No nodetype for " + ident); + } + return nodeCstrMap.get(ident).newInstance(ident); + } catch (IllegalAccessException e) { + throw (IOException)new IOException().initCause(e); + } catch (InstantiationException e) { + throw (IOException)new IOException().initCause(e); + } catch (InvocationTargetException e) { + throw (IOException)new IOException().initCause(e); + } + } + + private static final Class[] ncstrSig = { String.class }; + private static final + Map> nodeCstrMap = + new HashMap>(); + protected static final + Map> rrCstrMap = + new HashMap>(); + + /** + * For a given identifier, add a mapping to the nodetype for the parse + * tree and to the ComposableRecordReader to be created, including the + * formals required to invoke the constructor. + * The nodetype and constructor signature should be filled in from the + * child node. + */ + protected static void addIdentifier(String ident, Class[] mcstrSig, + Class nodetype, + Class cl) + throws NoSuchMethodException { + Constructor ncstr = + nodetype.getDeclaredConstructor(ncstrSig); + ncstr.setAccessible(true); + nodeCstrMap.put(ident, ncstr); + Constructor mcstr = + cl.getDeclaredConstructor(mcstrSig); + mcstr.setAccessible(true); + rrCstrMap.put(ident, mcstr); + } + + // inst + protected int id = -1; + protected String ident; + protected Class cmpcl; + + protected Node(String ident) { + this.ident = ident; + } + + protected void setID(int id) { + this.id = id; + } + + protected void setKeyComparator(Class cmpcl) { + this.cmpcl = cmpcl; + } + abstract void parse(List args, JobConf job) throws IOException; + } + + /** + * Nodetype in the parse tree for "wrapped" InputFormats. + */ + static class WNode extends Node { + private static final Class[] cstrSig = + { Integer.TYPE, RecordReader.class, Class.class }; + + static void addIdentifier(String ident, + Class cl) + throws NoSuchMethodException { + Node.addIdentifier(ident, cstrSig, WNode.class, cl); + } + + private String indir; + private InputFormat inf; + + public WNode(String ident) { + super(ident); + } + + /** + * Let the first actual define the InputFormat and the second define + * the mapred.input.dir property. + */ + public void parse(List ll, JobConf job) throws IOException { + StringBuilder sb = new StringBuilder(); + Iterator i = ll.iterator(); + while (i.hasNext()) { + Token t = i.next(); + if (TType.COMMA.equals(t.getType())) { + try { + inf = (InputFormat)ReflectionUtils.newInstance( + job.getClassByName(sb.toString()), + job); + } catch (ClassNotFoundException e) { + throw (IOException)new IOException().initCause(e); + } catch (IllegalArgumentException e) { + throw (IOException)new IOException().initCause(e); + } + break; + } + sb.append(t.getStr()); + } + if (!i.hasNext()) { + throw new IOException("Parse error"); + } + Token t = i.next(); + if (!TType.QUOT.equals(t.getType())) { + throw new IOException("Expected quoted string"); + } + indir = t.getStr(); + // no check for ll.isEmpty() to permit extension + } + + private JobConf getConf(JobConf job) { + JobConf conf = new JobConf(job); + FileInputFormat.setInputPaths(conf, indir); + return conf; + } + + public InputSplit[] getSplits(JobConf job, int numSplits) + throws IOException { + return inf.getSplits(getConf(job), numSplits); + } + + public ComposableRecordReader getRecordReader( + InputSplit split, JobConf job, Reporter reporter) throws IOException { + try { + if (!rrCstrMap.containsKey(ident)) { + throw new IOException("No RecordReader for " + ident); + } + return rrCstrMap.get(ident).newInstance(id, + inf.getRecordReader(split, getConf(job), reporter), cmpcl); + } catch (IllegalAccessException e) { + throw (IOException)new IOException().initCause(e); + } catch (InstantiationException e) { + throw (IOException)new IOException().initCause(e); + } catch (InvocationTargetException e) { + throw (IOException)new IOException().initCause(e); + } + } + + public String toString() { + return ident + "(" + inf.getClass().getName() + ",\"" + indir + "\")"; + } + } + + /** + * Internal nodetype for "composite" InputFormats. + */ + static class CNode extends Node { + + private static final Class[] cstrSig = + { Integer.TYPE, JobConf.class, Integer.TYPE, Class.class }; + + static void addIdentifier(String ident, + Class cl) + throws NoSuchMethodException { + Node.addIdentifier(ident, cstrSig, CNode.class, cl); + } + + // inst + private ArrayList kids = new ArrayList(); + + public CNode(String ident) { + super(ident); + } + + public void setKeyComparator(Class cmpcl) { + super.setKeyComparator(cmpcl); + for (Node n : kids) { + n.setKeyComparator(cmpcl); + } + } + + /** + * Combine InputSplits from child InputFormats into a + * {@link CompositeInputSplit}. + */ + public InputSplit[] getSplits(JobConf job, int numSplits) + throws IOException { + InputSplit[][] splits = new InputSplit[kids.size()][]; + for (int i = 0; i < kids.size(); ++i) { + final InputSplit[] tmp = kids.get(i).getSplits(job, numSplits); + if (null == tmp) { + throw new IOException("Error gathering splits from child RReader"); + } + if (i > 0 && splits[i-1].length != tmp.length) { + throw new IOException("Inconsistent split cardinality from child " + + i + " (" + splits[i-1].length + "/" + tmp.length + ")"); + } + splits[i] = tmp; + } + final int size = splits[0].length; + CompositeInputSplit[] ret = new CompositeInputSplit[size]; + for (int i = 0; i < size; ++i) { + ret[i] = new CompositeInputSplit(splits.length); + for (int j = 0; j < splits.length; ++j) { + ret[i].add(splits[j][i]); + } + } + return ret; + } + + @SuppressWarnings("unchecked") // child types unknowable + public ComposableRecordReader getRecordReader( + InputSplit split, JobConf job, Reporter reporter) throws IOException { + if (!(split instanceof CompositeInputSplit)) { + throw new IOException("Invalid split type:" + + split.getClass().getName()); + } + final CompositeInputSplit spl = (CompositeInputSplit)split; + final int capacity = kids.size(); + CompositeRecordReader ret = null; + try { + if (!rrCstrMap.containsKey(ident)) { + throw new IOException("No RecordReader for " + ident); + } + ret = (CompositeRecordReader) + rrCstrMap.get(ident).newInstance(id, job, capacity, cmpcl); + } catch (IllegalAccessException e) { + throw (IOException)new IOException().initCause(e); + } catch (InstantiationException e) { + throw (IOException)new IOException().initCause(e); + } catch (InvocationTargetException e) { + throw (IOException)new IOException().initCause(e); + } + for (int i = 0; i < capacity; ++i) { + ret.add(kids.get(i).getRecordReader(spl.get(i), job, reporter)); + } + return (ComposableRecordReader)ret; + } + + /** + * Parse a list of comma-separated nodes. + */ + public void parse(List args, JobConf job) throws IOException { + ListIterator i = args.listIterator(); + while (i.hasNext()) { + Token t = i.next(); + t.getNode().setID(i.previousIndex() >> 1); + kids.add(t.getNode()); + if (i.hasNext() && !TType.COMMA.equals(i.next().getType())) { + throw new IOException("Expected ','"); + } + } + } + + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append(ident + "("); + for (Node n : kids) { + sb.append(n.toString() + ","); + } + sb.setCharAt(sb.length() - 1, ')'); + return sb.toString(); + } + } + + private static Token reduce(Stack st, JobConf job) throws IOException { + LinkedList args = new LinkedList(); + while (!st.isEmpty() && !TType.LPAREN.equals(st.peek().getType())) { + args.addFirst(st.pop()); + } + if (st.isEmpty()) { + throw new IOException("Unmatched ')'"); + } + st.pop(); + if (st.isEmpty() || !TType.IDENT.equals(st.peek().getType())) { + throw new IOException("Identifier expected"); + } + Node n = Node.forIdent(st.pop().getStr()); + n.parse(args, job); + return new NodeToken(n); + } + + /** + * Given an expression and an optional comparator, build a tree of + * InputFormats using the comparator to sort keys. + */ + static Node parse(String expr, JobConf job) throws IOException { + if (null == expr) { + throw new IOException("Expression is null"); + } + Class cmpcl = + job.getClass("mapred.join.keycomparator", null, WritableComparator.class); + Lexer lex = new Lexer(expr); + Stack st = new Stack(); + Token tok; + while ((tok = lex.next()) != null) { + if (TType.RPAREN.equals(tok.getType())) { + st.push(reduce(st, job)); + } else { + st.push(tok); + } + } + if (st.size() == 1 && TType.CIF.equals(st.peek().getType())) { + Node ret = st.pop().getNode(); + if (cmpcl != null) { + ret.setKeyComparator(cmpcl); + } + return ret; + } + throw new IOException("Missing ')'"); + } + +} diff --git a/src/mapred/org/apache/hadoop/mapred/join/ResetableIterator.java b/src/mapred/org/apache/hadoop/mapred/join/ResetableIterator.java new file mode 100644 index 0000000..538f006 --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/join/ResetableIterator.java @@ -0,0 +1,93 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.mapred.join; + +import java.io.IOException; + +import org.apache.hadoop.io.Writable; + +/** + * This defines an interface to a stateful Iterator that can replay elements + * added to it directly. + * Note that this does not extend {@link java.util.Iterator}. + */ +public interface ResetableIterator { + + public static class EMPTY + implements ResetableIterator { + public boolean hasNext() { return false; } + public void reset() { } + public void close() throws IOException { } + public void clear() { } + public boolean next(U val) throws IOException { + return false; + } + public boolean replay(U val) throws IOException { + return false; + } + public void add(U item) throws IOException { + throw new UnsupportedOperationException(); + } + } + + /** + * True if a call to next may return a value. This is permitted false + * positives, but not false negatives. + */ + public boolean hasNext(); + + /** + * Assign next value to actual. + * It is required that elements added to a ResetableIterator be returned in + * the same order after a call to {@link #reset} (FIFO). + * + * Note that a call to this may fail for nested joins (i.e. more elements + * available, but none satisfying the constraints of the join) + */ + public boolean next(T val) throws IOException; + + /** + * Assign last value returned to actual. + */ + public boolean replay(T val) throws IOException; + + /** + * Set iterator to return to the start of its range. Must be called after + * calling {@link #add} to avoid a ConcurrentModificationException. + */ + public void reset(); + + /** + * Add an element to the collection of elements to iterate over. + */ + public void add(T item) throws IOException; + + /** + * Close datasources and release resources. Calling methods on the iterator + * after calling close has undefined behavior. + */ + // XXX is this necessary? + public void close() throws IOException; + + /** + * Close datasources, but do not release internal resources. Calling this + * method should permit the object to be reused with a different datasource. + */ + public void clear(); + +} diff --git a/src/mapred/org/apache/hadoop/mapred/join/StreamBackedIterator.java b/src/mapred/org/apache/hadoop/mapred/join/StreamBackedIterator.java new file mode 100644 index 0000000..6fb4408 --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/join/StreamBackedIterator.java @@ -0,0 +1,99 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.mapred.join; + +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.DataInputStream; +import java.io.DataOutputStream; +import java.io.IOException; + +import org.apache.hadoop.io.Writable; + +/** + * This class provides an implementation of ResetableIterator. This + * implementation uses a byte array to store elements added to it. + */ +public class StreamBackedIterator + implements ResetableIterator { + + private static class ReplayableByteInputStream extends ByteArrayInputStream { + public ReplayableByteInputStream(byte[] arr) { + super(arr); + } + public void resetStream() { + mark = 0; + reset(); + } + } + + private ByteArrayOutputStream outbuf = new ByteArrayOutputStream(); + private DataOutputStream outfbuf = new DataOutputStream(outbuf); + private ReplayableByteInputStream inbuf; + private DataInputStream infbuf; + + public StreamBackedIterator() { } + + public boolean hasNext() { + return infbuf != null && inbuf.available() > 0; + } + + public boolean next(X val) throws IOException { + if (hasNext()) { + inbuf.mark(0); + val.readFields(infbuf); + return true; + } + return false; + } + + public boolean replay(X val) throws IOException { + inbuf.reset(); + if (0 == inbuf.available()) + return false; + val.readFields(infbuf); + return true; + } + + public void reset() { + if (null != outfbuf) { + inbuf = new ReplayableByteInputStream(outbuf.toByteArray()); + infbuf = new DataInputStream(inbuf); + outfbuf = null; + } + inbuf.resetStream(); + } + + public void add(X item) throws IOException { + item.write(outfbuf); + } + + public void close() throws IOException { + if (null != infbuf) + infbuf.close(); + if (null != outfbuf) + outfbuf.close(); + } + + public void clear() { + if (null != inbuf) + inbuf.resetStream(); + outbuf.reset(); + outfbuf = new DataOutputStream(outbuf); + } +} diff --git a/src/mapred/org/apache/hadoop/mapred/join/TupleWritable.java b/src/mapred/org/apache/hadoop/mapred/join/TupleWritable.java new file mode 100644 index 0000000..95c4451 --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/join/TupleWritable.java @@ -0,0 +1,227 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapred.join; + +import java.io.DataOutput; +import java.io.DataInput; +import java.io.IOException; +import java.util.Iterator; +import java.util.NoSuchElementException; + +import org.apache.hadoop.io.Text; +import org.apache.hadoop.io.Writable; +import org.apache.hadoop.io.WritableUtils; + +/** + * Writable type storing multiple {@link org.apache.hadoop.io.Writable}s. + * + * This is *not* a general-purpose tuple type. In almost all cases, users are + * encouraged to implement their own serializable types, which can perform + * better validation and provide more efficient encodings than this class is + * capable. TupleWritable relies on the join framework for type safety and + * assumes its instances will rarely be persisted, assumptions not only + * incompatible with, but contrary to the general case. + * + * @see org.apache.hadoop.io.Writable + */ +public class TupleWritable implements Writable, Iterable { + + private long written; + private Writable[] values; + + /** + * Create an empty tuple with no allocated storage for writables. + */ + public TupleWritable() { } + + /** + * Initialize tuple with storage; unknown whether any of them contain + * "written" values. + */ + public TupleWritable(Writable[] vals) { + written = 0L; + values = vals; + } + + /** + * Return true if tuple has an element at the position provided. + */ + public boolean has(int i) { + return 0 != ((1L << i) & written); + } + + /** + * Get ith Writable from Tuple. + */ + public Writable get(int i) { + return values[i]; + } + + /** + * The number of children in this Tuple. + */ + public int size() { + return values.length; + } + + /** + * {@inheritDoc} + */ + public boolean equals(Object other) { + if (other instanceof TupleWritable) { + TupleWritable that = (TupleWritable)other; + if (this.size() != that.size() || this.written != that.written) { + return false; + } + for (int i = 0; i < values.length; ++i) { + if (!has(i)) continue; + if (!values[i].equals(that.get(i))) { + return false; + } + } + return true; + } + return false; + } + + public int hashCode() { + assert false : "hashCode not designed"; + return (int)written; + } + + /** + * Return an iterator over the elements in this tuple. + * Note that this doesn't flatten the tuple; one may receive tuples + * from this iterator. + */ + public Iterator iterator() { + final TupleWritable t = this; + return new Iterator() { + long i = written; + long last = 0L; + public boolean hasNext() { + return 0L != i; + } + public Writable next() { + last = Long.lowestOneBit(i); + if (0 == last) + throw new NoSuchElementException(); + i ^= last; + // numberOfTrailingZeros rtn 64 if lsb set + return t.get(Long.numberOfTrailingZeros(last) % 64); + } + public void remove() { + t.written ^= last; + if (t.has(Long.numberOfTrailingZeros(last))) { + throw new IllegalStateException("Attempt to remove non-existent val"); + } + } + }; + } + + /** + * Convert Tuple to String as in the following. + * [,,...,] + */ + public String toString() { + StringBuffer buf = new StringBuffer("["); + for (int i = 0; i < values.length; ++i) { + buf.append(has(i) ? values[i].toString() : ""); + buf.append(","); + } + if (values.length != 0) + buf.setCharAt(buf.length() - 1, ']'); + else + buf.append(']'); + return buf.toString(); + } + + // Writable + + /** Writes each Writable to out. + * TupleWritable format: + * {@code + * ...... + * } + */ + public void write(DataOutput out) throws IOException { + WritableUtils.writeVInt(out, values.length); + WritableUtils.writeVLong(out, written); + for (int i = 0; i < values.length; ++i) { + Text.writeString(out, values[i].getClass().getName()); + } + for (int i = 0; i < values.length; ++i) { + if (has(i)) { + values[i].write(out); + } + } + } + + /** + * {@inheritDoc} + */ + @SuppressWarnings("unchecked") // No static typeinfo on Tuples + public void readFields(DataInput in) throws IOException { + int card = WritableUtils.readVInt(in); + values = new Writable[card]; + written = WritableUtils.readVLong(in); + Class[] cls = new Class[card]; + try { + for (int i = 0; i < card; ++i) { + cls[i] = Class.forName(Text.readString(in)).asSubclass(Writable.class); + } + for (int i = 0; i < card; ++i) { + values[i] = cls[i].newInstance(); + if (has(i)) { + values[i].readFields(in); + } + } + } catch (ClassNotFoundException e) { + throw (IOException)new IOException("Failed tuple init").initCause(e); + } catch (IllegalAccessException e) { + throw (IOException)new IOException("Failed tuple init").initCause(e); + } catch (InstantiationException e) { + throw (IOException)new IOException("Failed tuple init").initCause(e); + } + } + + /** + * Record that the tuple contains an element at the position provided. + */ + void setWritten(int i) { + written |= 1L << i; + } + + /** + * Record that the tuple does not contain an element at the position + * provided. + */ + void clearWritten(int i) { + written &= -1 ^ (1L << i); + } + + /** + * Clear any record of which writables have been written to, without + * releasing storage. + */ + void clearWritten() { + written = 0L; + } + +} diff --git a/src/mapred/org/apache/hadoop/mapred/join/WrappedRecordReader.java b/src/mapred/org/apache/hadoop/mapred/join/WrappedRecordReader.java new file mode 100644 index 0000000..9ff6504 --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/join/WrappedRecordReader.java @@ -0,0 +1,206 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapred.join; + +import java.io.IOException; + +import org.apache.hadoop.io.Writable; +import org.apache.hadoop.io.WritableComparable; +import org.apache.hadoop.io.WritableComparator; +import org.apache.hadoop.io.WritableUtils; +import org.apache.hadoop.mapred.RecordReader; + +/** + * Proxy class for a RecordReader participating in the join framework. + * This class keeps track of the "head" key-value pair for the + * provided RecordReader and keeps a store of values matching a key when + * this source is participating in a join. + */ +public class WrappedRecordReader + implements ComposableRecordReader { + + private boolean empty = false; + private RecordReader rr; + private int id; // index at which values will be inserted in collector + + private K khead; // key at the top of this RR + private U vhead; // value assoc with khead + private WritableComparator cmp; + + private ResetableIterator vjoin; + + /** + * For a given RecordReader rr, occupy position id in collector. + */ + WrappedRecordReader(int id, RecordReader rr, + Class cmpcl) throws IOException { + this.id = id; + this.rr = rr; + khead = rr.createKey(); + vhead = rr.createValue(); + try { + cmp = (null == cmpcl) + ? WritableComparator.get(khead.getClass()) + : cmpcl.newInstance(); + } catch (InstantiationException e) { + throw (IOException)new IOException().initCause(e); + } catch (IllegalAccessException e) { + throw (IOException)new IOException().initCause(e); + } + vjoin = new StreamBackedIterator(); + next(); + } + + /** {@inheritDoc} */ + public int id() { + return id; + } + + /** + * Return the key at the head of this RR. + */ + public K key() { + return khead; + } + + /** + * Clone the key at the head of this RR into the object supplied. + */ + public void key(K qkey) throws IOException { + WritableUtils.cloneInto(qkey, khead); + } + + /** + * Return true if the RR- including the k,v pair stored in this object- + * is exhausted. + */ + public boolean hasNext() { + return !empty; + } + + /** + * Skip key-value pairs with keys less than or equal to the key provided. + */ + public void skip(K key) throws IOException { + if (hasNext()) { + while (cmp.compare(khead, key) <= 0 && next()); + } + } + + /** + * Read the next k,v pair into the head of this object; return true iff + * the RR and this are exhausted. + */ + protected boolean next() throws IOException { + empty = !rr.next(khead, vhead); + return hasNext(); + } + + /** + * Add an iterator to the collector at the position occupied by this + * RecordReader over the values in this stream paired with the key + * provided (ie register a stream of values from this source matching K + * with a collector). + */ + // JoinCollector comes from parent, which has + @SuppressWarnings("unchecked") // no static type for the slot this sits in + public void accept(CompositeRecordReader.JoinCollector i, K key) + throws IOException { + vjoin.clear(); + if (0 == cmp.compare(key, khead)) { + do { + vjoin.add(vhead); + } while (next() && 0 == cmp.compare(key, khead)); + } + i.add(id, vjoin); + } + + /** + * Write key-value pair at the head of this stream to the objects provided; + * get next key-value pair from proxied RR. + */ + public boolean next(K key, U value) throws IOException { + if (hasNext()) { + WritableUtils.cloneInto(key, khead); + WritableUtils.cloneInto(value, vhead); + next(); + return true; + } + return false; + } + + /** + * Request new key from proxied RR. + */ + public K createKey() { + return rr.createKey(); + } + + /** + * Request new value from proxied RR. + */ + public U createValue() { + return rr.createValue(); + } + + /** + * Request progress from proxied RR. + */ + public float getProgress() throws IOException { + return rr.getProgress(); + } + + /** + * Request position from proxied RR. + */ + public long getPos() throws IOException { + return rr.getPos(); + } + + /** + * Forward close request to proxied RR. + */ + public void close() throws IOException { + rr.close(); + } + + /** + * Implement Comparable contract (compare key at head of proxied RR + * with that of another). + */ + public int compareTo(ComposableRecordReader other) { + return cmp.compare(key(), other.key()); + } + + /** + * Return true iff compareTo(other) retn true. + */ + @SuppressWarnings("unchecked") // Explicit type check prior to cast + public boolean equals(Object other) { + return other instanceof ComposableRecordReader + && 0 == compareTo((ComposableRecordReader)other); + } + + public int hashCode() { + assert false : "hashCode not designed"; + return 42; + } + +} diff --git a/src/mapred/org/apache/hadoop/mapred/join/package.html b/src/mapred/org/apache/hadoop/mapred/join/package.html new file mode 100644 index 0000000..77622a9 --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/join/package.html @@ -0,0 +1,105 @@ + + + + + + +

Given a set of sorted datasets keyed with the same class and yielding equal +partitions, it is possible to effect a join of those datasets prior to the map. +This could save costs in re-partitioning, sorting, shuffling, and writing out +data required in the general case.

+ +

Interface

+ +

The attached code offers the following interface to users of these +classes.

+ + + + + + + + + +
propertyrequiredvalue
mapred.join.expryesJoin expression to effect over input data
mapred.join.keycomparatornoWritableComparator class to use for comparing keys
mapred.join.define.<ident>noClass mapped to identifier in join expression
+ +

The join expression understands the following grammar:

+ +
func ::= <ident>([<func>,]*<func>)
+func ::= tbl(<class>,"<path>");
+
+
+ +

Operations included in this patch are partitioned into one of two types: +join operations emitting tuples and "multi-filter" operations emitting a +single value from (but not necessarily included in) a set of input values. +For a given key, each operation will consider the cross product of all +values for all sources at that node.

+ +

Identifiers supported by default:

+ + + + + + + +
identifiertypedescription
innerJoinFull inner join
outerJoinFull outer join
overrideMultiFilterFor a given key, prefer values from the rightmost source
+ +

A user of this class must set the InputFormat for the job to +CompositeInputFormat and define a join expression accepted by the +preceding grammar. For example, both of the following are acceptable:

+ +
inner(tbl(org.apache.hadoop.mapred.SequenceFileInputFormat.class,
+          "hdfs://host:8020/foo/bar"),
+      tbl(org.apache.hadoop.mapred.SequenceFileInputFormat.class,
+          "hdfs://host:8020/foo/baz"))
+
+outer(override(tbl(org.apache.hadoop.mapred.SequenceFileInputFormat.class,
+                   "hdfs://host:8020/foo/bar"),
+               tbl(org.apache.hadoop.mapred.SequenceFileInputFormat.class,
+                   "hdfs://host:8020/foo/baz")),
+      tbl(org.apache.hadoop.mapred/SequenceFileInputFormat.class,
+          "hdfs://host:8020/foo/rab"))
+
+ +

CompositeInputFormat includes a handful of convenience methods to +aid construction of these verbose statements.

+ +

As in the second example, joins may be nested. Users may provide a +comparator class in the mapred.join.keycomparator property to specify +the ordering of their keys, or accept the default comparator as returned by +WritableComparator.get(keyclass).

+ +

Users can specify their own join operations, typically by overriding +JoinRecordReader or MultiFilterRecordReader and mapping that +class to an identifier in the join expression using the +mapred.join.define.ident property, where ident is +the identifier appearing in the join expression. Users may elect to emit- or +modify- values passing through their join operation. Consulting the existing +operations for guidance is recommended. Adding arguments is considerably more +complex (and only partially supported), as one must also add a Node +type to the parse tree. One is probably better off extending +RecordReader in most cases.

+ +JIRA + + + + diff --git a/src/mapred/org/apache/hadoop/mapred/lib/Chain.java b/src/mapred/org/apache/hadoop/mapred/lib/Chain.java new file mode 100644 index 0000000..847ec24 --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/lib/Chain.java @@ -0,0 +1,543 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.mapred.lib; + +import org.apache.hadoop.io.DataOutputBuffer; +import org.apache.hadoop.io.Stringifier; +import org.apache.hadoop.io.DefaultStringifier; +import org.apache.hadoop.io.serializer.Deserializer; +import org.apache.hadoop.io.serializer.Serialization; +import org.apache.hadoop.io.serializer.SerializationFactory; +import org.apache.hadoop.io.serializer.Serializer; +import org.apache.hadoop.mapred.*; +import org.apache.hadoop.util.ReflectionUtils; +import org.apache.hadoop.util.GenericsUtil; + +import java.io.ByteArrayInputStream; +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; + + +/** + * The Chain class provides all the common functionality for the + * {@link ChainMapper} and the {@link ChainReducer} classes. + */ +class Chain { + private static final String CHAIN_MAPPER = "chain.mapper"; + private static final String CHAIN_REDUCER = "chain.reducer"; + + private static final String CHAIN_MAPPER_SIZE = ".size"; + private static final String CHAIN_MAPPER_CLASS = ".mapper.class."; + private static final String CHAIN_MAPPER_CONFIG = ".mapper.config."; + private static final String CHAIN_REDUCER_CLASS = ".reducer.class"; + private static final String CHAIN_REDUCER_CONFIG = ".reducer.config"; + + private static final String MAPPER_BY_VALUE = "chain.mapper.byValue"; + private static final String REDUCER_BY_VALUE = "chain.reducer.byValue"; + + private static final String MAPPER_INPUT_KEY_CLASS = + "chain.mapper.input.key.class"; + private static final String MAPPER_INPUT_VALUE_CLASS = + "chain.mapper.input.value.class"; + private static final String MAPPER_OUTPUT_KEY_CLASS = + "chain.mapper.output.key.class"; + private static final String MAPPER_OUTPUT_VALUE_CLASS = + "chain.mapper.output.value.class"; + private static final String REDUCER_INPUT_KEY_CLASS = + "chain.reducer.input.key.class"; + private static final String REDUCER_INPUT_VALUE_CLASS = + "chain.reducer.input.value.class"; + private static final String REDUCER_OUTPUT_KEY_CLASS = + "chain.reducer.output.key.class"; + private static final String REDUCER_OUTPUT_VALUE_CLASS = + "chain.reducer.output.value.class"; + + private boolean isMap; + + private JobConf chainJobConf; + + private List mappers = new ArrayList(); + private Reducer reducer; + + // to cache the key/value output class serializations for each chain element + // to avoid everytime lookup. + private List mappersKeySerialization = + new ArrayList(); + private List mappersValueSerialization = + new ArrayList(); + private Serialization reducerKeySerialization; + private Serialization reducerValueSerialization; + + /** + * Creates a Chain instance configured for a Mapper or a Reducer. + * + * @param isMap TRUE indicates the chain is for a Mapper, FALSE that is for a + * Reducer. + */ + Chain(boolean isMap) { + this.isMap = isMap; + } + + /** + * Returns the prefix to use for the configuration of the chain depending + * if it is for a Mapper or a Reducer. + * + * @param isMap TRUE for Mapper, FALSE for Reducer. + * @return the prefix to use. + */ + private static String getPrefix(boolean isMap) { + return (isMap) ? CHAIN_MAPPER : CHAIN_REDUCER; + } + + /** + * Creates a {@link JobConf} for one of the Maps or Reduce in the chain. + *

+ * It creates a new JobConf using the chain job's JobConf as base and adds to + * it the configuration properties for the chain element. The keys of the + * chain element jobConf have precedence over the given JobConf. + * + * @param jobConf the chain job's JobConf. + * @param confKey the key for chain element configuration serialized in the + * chain job's JobConf. + * @return a new JobConf aggregating the chain job's JobConf with the chain + * element configuration properties. + */ + private static JobConf getChainElementConf(JobConf jobConf, String confKey) { + JobConf conf; + try { + Stringifier stringifier = + new DefaultStringifier(jobConf, JobConf.class); + conf = stringifier.fromString(jobConf.get(confKey, null)); + } catch (IOException ioex) { + throw new RuntimeException(ioex); + } + // we have to do this because the Writable desearialization clears all + // values set in the conf making not possible do do a new JobConf(jobConf) + // in the creation of the conf above + jobConf = new JobConf(jobConf); + + for(Map.Entry entry : conf) { + jobConf.set(entry.getKey(), entry.getValue()); + } + return jobConf; + } + + /** + * Adds a Mapper class to the chain job's JobConf. + *

+ * The configuration properties of the chain job have precedence over the + * configuration properties of the Mapper. + * + * @param isMap indicates if the Chain is for a Mapper or for a + * Reducer. + * @param jobConf chain job's JobConf to add the Mapper class. + * @param klass the Mapper class to add. + * @param inputKeyClass mapper input key class. + * @param inputValueClass mapper input value class. + * @param outputKeyClass mapper output key class. + * @param outputValueClass mapper output value class. + * @param byValue indicates if key/values should be passed by value + * to the next Mapper in the chain, if any. + * @param mapperConf a JobConf with the configuration for the Mapper + * class. It is recommended to use a JobConf without default values using the + * JobConf(boolean loadDefaults) constructor with FALSE. + */ + public static void addMapper(boolean isMap, JobConf jobConf, + Class> klass, + Class inputKeyClass, + Class inputValueClass, + Class outputKeyClass, + Class outputValueClass, + boolean byValue, JobConf mapperConf) { + String prefix = getPrefix(isMap); + + // if a reducer chain check the Reducer has been already set + if (!isMap) { + if (jobConf.getClass(prefix + CHAIN_REDUCER_CLASS, + Reducer.class) == null) { + throw new IllegalStateException( + "A Mapper can be added to the chain only after the Reducer has " + + "been set"); + } + } + int index = jobConf.getInt(prefix + CHAIN_MAPPER_SIZE, 0); + jobConf.setClass(prefix + CHAIN_MAPPER_CLASS + index, klass, Mapper.class); + + // if it is a reducer chain and the first Mapper is being added check the + // key and value input classes of the mapper match those of the reducer + // output. + if (!isMap && index == 0) { + JobConf reducerConf = + getChainElementConf(jobConf, prefix + CHAIN_REDUCER_CONFIG); + if (! inputKeyClass.isAssignableFrom( + reducerConf.getClass(REDUCER_OUTPUT_KEY_CLASS, null))) { + throw new IllegalArgumentException("The Reducer output key class does" + + " not match the Mapper input key class"); + } + if (! inputValueClass.isAssignableFrom( + reducerConf.getClass(REDUCER_OUTPUT_VALUE_CLASS, null))) { + throw new IllegalArgumentException("The Reducer output value class" + + " does not match the Mapper input value class"); + } + } else if (index > 0) { + // check the that the new Mapper in the chain key and value input classes + // match those of the previous Mapper output. + JobConf previousMapperConf = + getChainElementConf(jobConf, prefix + CHAIN_MAPPER_CONFIG + + (index - 1)); + if (! inputKeyClass.isAssignableFrom( + previousMapperConf.getClass(MAPPER_OUTPUT_KEY_CLASS, null))) { + throw new IllegalArgumentException("The Mapper output key class does" + + " not match the previous Mapper input key class"); + } + if (! inputValueClass.isAssignableFrom( + previousMapperConf.getClass(MAPPER_OUTPUT_VALUE_CLASS, null))) { + throw new IllegalArgumentException("The Mapper output value class" + + " does not match the previous Mapper input value class"); + } + } + + // if the Mapper does not have a private JobConf create an empty one + if (mapperConf == null) { + // using a JobConf without defaults to make it lightweight. + // still the chain JobConf may have all defaults and this conf is + // overlapped to the chain JobConf one. + mapperConf = new JobConf(true); + } + + // store in the private mapper conf the input/output classes of the mapper + // and if it works by value or by reference + mapperConf.setBoolean(MAPPER_BY_VALUE, byValue); + mapperConf.setClass(MAPPER_INPUT_KEY_CLASS, inputKeyClass, Object.class); + mapperConf.setClass(MAPPER_INPUT_VALUE_CLASS, inputValueClass, + Object.class); + mapperConf.setClass(MAPPER_OUTPUT_KEY_CLASS, outputKeyClass, Object.class); + mapperConf.setClass(MAPPER_OUTPUT_VALUE_CLASS, outputValueClass, + Object.class); + + // serialize the private mapper jobconf in the chain jobconf. + Stringifier stringifier = + new DefaultStringifier(jobConf, JobConf.class); + try { + jobConf.set(prefix + CHAIN_MAPPER_CONFIG + index, + stringifier.toString(new JobConf(mapperConf))); + } + catch (IOException ioEx) { + throw new RuntimeException(ioEx); + } + + // increment the chain counter + jobConf.setInt(prefix + CHAIN_MAPPER_SIZE, index + 1); + } + + /** + * Sets the Reducer class to the chain job's JobConf. + *

+ * The configuration properties of the chain job have precedence over the + * configuration properties of the Reducer. + * + * @param jobConf chain job's JobConf to add the Reducer class. + * @param klass the Reducer class to add. + * @param inputKeyClass reducer input key class. + * @param inputValueClass reducer input value class. + * @param outputKeyClass reducer output key class. + * @param outputValueClass reducer output value class. + * @param byValue indicates if key/values should be passed by value + * to the next Mapper in the chain, if any. + * @param reducerConf a JobConf with the configuration for the Reducer + * class. It is recommended to use a JobConf without default values using the + * JobConf(boolean loadDefaults) constructor with FALSE. + */ + public static void setReducer(JobConf jobConf, + Class> klass, + Class inputKeyClass, + Class inputValueClass, + Class outputKeyClass, + Class outputValueClass, + boolean byValue, JobConf reducerConf) { + String prefix = getPrefix(false); + + if (jobConf.getClass(prefix + CHAIN_REDUCER_CLASS, null) != null) { + throw new IllegalStateException("Reducer has been already set"); + } + + jobConf.setClass(prefix + CHAIN_REDUCER_CLASS, klass, Reducer.class); + + // if the Reducer does not have a private JobConf create an empty one + if (reducerConf == null) { + // using a JobConf without defaults to make it lightweight. + // still the chain JobConf may have all defaults and this conf is + // overlapped to the chain JobConf one. + reducerConf = new JobConf(false); + } + + // store in the private reducer conf the input/output classes of the reducer + // and if it works by value or by reference + reducerConf.setBoolean(MAPPER_BY_VALUE, byValue); + reducerConf.setClass(REDUCER_INPUT_KEY_CLASS, inputKeyClass, Object.class); + reducerConf.setClass(REDUCER_INPUT_VALUE_CLASS, inputValueClass, + Object.class); + reducerConf.setClass(REDUCER_OUTPUT_KEY_CLASS, outputKeyClass, + Object.class); + reducerConf.setClass(REDUCER_OUTPUT_VALUE_CLASS, outputValueClass, + Object.class); + + // serialize the private mapper jobconf in the chain jobconf. + Stringifier stringifier = + new DefaultStringifier(jobConf, JobConf.class); + try { + jobConf.set(prefix + CHAIN_REDUCER_CONFIG, + stringifier.toString(new JobConf(reducerConf))); + } + catch (IOException ioEx) { + throw new RuntimeException(ioEx); + } + } + + /** + * Configures all the chain elements for the task. + * + * @param jobConf chain job's JobConf. + */ + public void configure(JobConf jobConf) { + String prefix = getPrefix(isMap); + chainJobConf = jobConf; + SerializationFactory serializationFactory = + new SerializationFactory(chainJobConf); + int index = jobConf.getInt(prefix + CHAIN_MAPPER_SIZE, 0); + for (int i = 0; i < index; i++) { + Class klass = + jobConf.getClass(prefix + CHAIN_MAPPER_CLASS + i, null, Mapper.class); + JobConf mConf = + getChainElementConf(jobConf, prefix + CHAIN_MAPPER_CONFIG + i); + Mapper mapper = ReflectionUtils.newInstance(klass, mConf); + mappers.add(mapper); + + if (mConf.getBoolean(MAPPER_BY_VALUE, true)) { + mappersKeySerialization.add(serializationFactory.getSerialization( + mConf.getClass(MAPPER_OUTPUT_KEY_CLASS, null))); + mappersValueSerialization.add(serializationFactory.getSerialization( + mConf.getClass(MAPPER_OUTPUT_VALUE_CLASS, null))); + } else { + mappersKeySerialization.add(null); + mappersValueSerialization.add(null); + } + } + Class klass = + jobConf.getClass(prefix + CHAIN_REDUCER_CLASS, null, Reducer.class); + if (klass != null) { + JobConf rConf = + getChainElementConf(jobConf, prefix + CHAIN_REDUCER_CONFIG); + reducer = ReflectionUtils.newInstance(klass, rConf); + if (rConf.getBoolean(REDUCER_BY_VALUE, true)) { + reducerKeySerialization = serializationFactory + .getSerialization(rConf.getClass(REDUCER_OUTPUT_KEY_CLASS, null)); + reducerValueSerialization = serializationFactory + .getSerialization(rConf.getClass(REDUCER_OUTPUT_VALUE_CLASS, null)); + } else { + reducerKeySerialization = null; + reducerValueSerialization = null; + } + } + } + + /** + * Returns the chain job conf. + * + * @return the chain job conf. + */ + protected JobConf getChainJobConf() { + return chainJobConf; + } + + /** + * Returns the first Mapper instance in the chain. + * + * @return the first Mapper instance in the chain or NULL if none. + */ + public Mapper getFirstMap() { + return (mappers.size() > 0) ? mappers.get(0) : null; + } + + /** + * Returns the Reducer instance in the chain. + * + * @return the Reducer instance in the chain or NULL if none. + */ + public Reducer getReducer() { + return reducer; + } + + /** + * Returns the OutputCollector to be used by a Mapper instance in the chain. + * + * @param mapperIndex index of the Mapper instance to get the OutputCollector. + * @param output the original OutputCollector of the task. + * @param reporter the reporter of the task. + * @return the OutputCollector to be used in the chain. + */ + @SuppressWarnings({"unchecked"}) + public OutputCollector getMapperCollector(int mapperIndex, + OutputCollector output, + Reporter reporter) { + Serialization keySerialization = mappersKeySerialization.get(mapperIndex); + Serialization valueSerialization = + mappersValueSerialization.get(mapperIndex); + return new ChainOutputCollector(mapperIndex, keySerialization, + valueSerialization, output, reporter); + } + + /** + * Returns the OutputCollector to be used by a Mapper instance in the chain. + * + * @param output the original OutputCollector of the task. + * @param reporter the reporter of the task. + * @return the OutputCollector to be used in the chain. + */ + @SuppressWarnings({"unchecked"}) + public OutputCollector getReducerCollector(OutputCollector output, + Reporter reporter) { + return new ChainOutputCollector(reducerKeySerialization, + reducerValueSerialization, output, + reporter); + } + + /** + * Closes all the chain elements. + * + * @throws IOException thrown if any of the chain elements threw an + * IOException exception. + */ + public void close() throws IOException { + for (Mapper map : mappers) { + map.close(); + } + if (reducer != null) { + reducer.close(); + } + } + + // using a ThreadLocal to reuse the ByteArrayOutputStream used for ser/deser + // it has to be a thread local because if not it would break if used from a + // MultiThreadedMapRunner. + private ThreadLocal threadLocalDataOutputBuffer = + new ThreadLocal() { + protected DataOutputBuffer initialValue() { + return new DataOutputBuffer(1024); + } + }; + + /** + * OutputCollector implementation used by the chain tasks. + *

+ * If it is not the end of the chain, a {@link #collect} invocation invokes + * the next Mapper in the chain. If it is the end of the chain the task + * OutputCollector is called. + */ + private class ChainOutputCollector implements OutputCollector { + private int nextMapperIndex; + private Serialization keySerialization; + private Serialization valueSerialization; + private OutputCollector output; + private Reporter reporter; + + /* + * Constructor for Mappers + */ + public ChainOutputCollector(int index, Serialization keySerialization, + Serialization valueSerialization, + OutputCollector output, Reporter reporter) { + this.nextMapperIndex = index + 1; + this.keySerialization = keySerialization; + this.valueSerialization = valueSerialization; + this.output = output; + this.reporter = reporter; + } + + /* + * Constructor for Reducer + */ + public ChainOutputCollector(Serialization keySerialization, + Serialization valueSerialization, + OutputCollector output, Reporter reporter) { + this.nextMapperIndex = 0; + this.keySerialization = keySerialization; + this.valueSerialization = valueSerialization; + this.output = output; + this.reporter = reporter; + } + + @SuppressWarnings({"unchecked"}) + public void collect(K key, V value) throws IOException { + if (nextMapperIndex < mappers.size()) { + // there is a next mapper in chain + + // only need to ser/deser if there is next mapper in the chain + if (keySerialization != null) { + key = makeCopyForPassByValue(keySerialization, key); + value = makeCopyForPassByValue(valueSerialization, value); + } + + // gets ser/deser and mapper of next in chain + Serialization nextKeySerialization = + mappersKeySerialization.get(nextMapperIndex); + Serialization nextValueSerialization = + mappersValueSerialization.get(nextMapperIndex); + Mapper nextMapper = mappers.get(nextMapperIndex); + + // invokes next mapper in chain + nextMapper.map(key, value, + new ChainOutputCollector(nextMapperIndex, + nextKeySerialization, + nextValueSerialization, + output, reporter), + reporter); + } else { + // end of chain, user real output collector + output.collect(key, value); + } + } + + private E makeCopyForPassByValue(Serialization serialization, + E obj) throws IOException { + Serializer ser = + serialization.getSerializer(GenericsUtil.getClass(obj)); + Deserializer deser = + serialization.getDeserializer(GenericsUtil.getClass(obj)); + + DataOutputBuffer dof = threadLocalDataOutputBuffer.get(); + + dof.reset(); + ser.open(dof); + ser.serialize(obj); + ser.close(); + obj = ReflectionUtils.newInstance(GenericsUtil.getClass(obj), + getChainJobConf()); + ByteArrayInputStream bais = + new ByteArrayInputStream(dof.getData(), 0, dof.getLength()); + deser.open(bais); + deser.deserialize(obj); + deser.close(); + return obj; + } + + } + +} diff --git a/src/mapred/org/apache/hadoop/mapred/lib/ChainMapper.java b/src/mapred/org/apache/hadoop/mapred/lib/ChainMapper.java new file mode 100644 index 0000000..b2bc246 --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/lib/ChainMapper.java @@ -0,0 +1,178 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.mapred.lib; + +import org.apache.hadoop.mapred.JobConf; +import org.apache.hadoop.mapred.Mapper; +import org.apache.hadoop.mapred.OutputCollector; +import org.apache.hadoop.mapred.Reporter; + +import java.io.IOException; + +/** + * The ChainMapper class allows to use multiple Mapper classes within a single + * Map task. + *

+ * The Mapper classes are invoked in a chained (or piped) fashion, the output of + * the first becomes the input of the second, and so on until the last Mapper, + * the output of the last Mapper will be written to the task's output. + *

+ * The key functionality of this feature is that the Mappers in the chain do not + * need to be aware that they are executed in a chain. This enables having + * reusable specialized Mappers that can be combined to perform composite + * operations within a single task. + *

+ * Special care has to be taken when creating chains that the key/values output + * by a Mapper are valid for the following Mapper in the chain. It is assumed + * all Mappers and the Reduce in the chain use maching output and input key and + * value classes as no conversion is done by the chaining code. + *

+ * Using the ChainMapper and the ChainReducer classes is possible to compose + * Map/Reduce jobs that look like [MAP+ / REDUCE MAP*]. And + * immediate benefit of this pattern is a dramatic reduction in disk IO. + *

+ * IMPORTANT: There is no need to specify the output key/value classes for the + * ChainMapper, this is done by the addMapper for the last mapper in the chain. + *

+ * ChainMapper usage pattern: + *

+ *

+ * ...
+ * conf.setJobName("chain");
+ * conf.setInputFormat(TextInputFormat.class);
+ * conf.setOutputFormat(TextOutputFormat.class);
+ * 

+ * JobConf mapAConf = new JobConf(false); + * ... + * ChainMapper.addMapper(conf, AMap.class, LongWritable.class, Text.class, + * Text.class, Text.class, true, mapAConf); + *

+ * JobConf mapBConf = new JobConf(false); + * ... + * ChainMapper.addMapper(conf, BMap.class, Text.class, Text.class, + * LongWritable.class, Text.class, false, mapBConf); + *

+ * JobConf reduceConf = new JobConf(false); + * ... + * ChainReducer.setReducer(conf, XReduce.class, LongWritable.class, Text.class, + * Text.class, Text.class, true, reduceConf); + *

+ * ChainReducer.addMapper(conf, CMap.class, Text.class, Text.class, + * LongWritable.class, Text.class, false, null); + *

+ * ChainReducer.addMapper(conf, DMap.class, LongWritable.class, Text.class, + * LongWritable.class, LongWritable.class, true, null); + *

+ * FileInputFormat.setInputPaths(conf, inDir); + * FileOutputFormat.setOutputPath(conf, outDir); + * ... + *

+ * JobClient jc = new JobClient(conf); + * RunningJob job = jc.submitJob(conf); + * ... + *

+ */ +public class ChainMapper implements Mapper { + + /** + * Adds a Mapper class to the chain job's JobConf. + *

+ * It has to be specified how key and values are passed from one element of + * the chain to the next, by value or by reference. If a Mapper leverages the + * assumed semantics that the key and values are not modified by the collector + * 'by value' must be used. If the Mapper does not expect this semantics, as + * an optimization to avoid serialization and deserialization 'by reference' + * can be used. + *

+ * For the added Mapper the configuration given for it, + * mapperConf, have precedence over the job's JobConf. This + * precedence is in effect when the task is running. + *

+ * IMPORTANT: There is no need to specify the output key/value classes for the + * ChainMapper, this is done by the addMapper for the last mapper in the chain + *

+ * + * @param job job's JobConf to add the Mapper class. + * @param klass the Mapper class to add. + * @param inputKeyClass mapper input key class. + * @param inputValueClass mapper input value class. + * @param outputKeyClass mapper output key class. + * @param outputValueClass mapper output value class. + * @param byValue indicates if key/values should be passed by value + * to the next Mapper in the chain, if any. + * @param mapperConf a JobConf with the configuration for the Mapper + * class. It is recommended to use a JobConf without default values using the + * JobConf(boolean loadDefaults) constructor with FALSE. + */ + public static void addMapper(JobConf job, + Class> klass, + Class inputKeyClass, + Class inputValueClass, + Class outputKeyClass, + Class outputValueClass, + boolean byValue, JobConf mapperConf) { + job.setMapperClass(ChainMapper.class); + job.setMapOutputKeyClass(outputKeyClass); + job.setMapOutputValueClass(outputValueClass); + Chain.addMapper(true, job, klass, inputKeyClass, inputValueClass, + outputKeyClass, outputValueClass, byValue, mapperConf); + } + + private Chain chain; + + /** + * Constructor. + */ + public ChainMapper() { + chain = new Chain(true); + } + + /** + * Configures the ChainMapper and all the Mappers in the chain. + *

+ * If this method is overriden super.configure(...) should be + * invoked at the beginning of the overwriter method. + */ + public void configure(JobConf job) { + chain.configure(job); + } + + /** + * Chains the map(...) methods of the Mappers in the chain. + */ + @SuppressWarnings({"unchecked"}) + public void map(Object key, Object value, OutputCollector output, + Reporter reporter) throws IOException { + Mapper mapper = chain.getFirstMap(); + if (mapper != null) { + mapper.map(key, value, chain.getMapperCollector(0, output, reporter), + reporter); + } + } + + /** + * Closes the ChainMapper and all the Mappers in the chain. + *

+ * If this method is overriden super.close() should be + * invoked at the end of the overwriter method. + */ + public void close() throws IOException { + chain.close(); + } + +} diff --git a/src/mapred/org/apache/hadoop/mapred/lib/ChainReducer.java b/src/mapred/org/apache/hadoop/mapred/lib/ChainReducer.java new file mode 100644 index 0000000..f48ed46 --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/lib/ChainReducer.java @@ -0,0 +1,222 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.mapred.lib; + +import org.apache.hadoop.mapred.*; + +import java.io.IOException; +import java.util.Iterator; + +/** + * The ChainReducer class allows to chain multiple Mapper classes after a + * Reducer within the Reducer task. + *

+ * For each record output by the Reducer, the Mapper classes are invoked in a + * chained (or piped) fashion, the output of the first becomes the input of the + * second, and so on until the last Mapper, the output of the last Mapper will + * be written to the task's output. + *

+ * The key functionality of this feature is that the Mappers in the chain do not + * need to be aware that they are executed after the Reducer or in a chain. + * This enables having reusable specialized Mappers that can be combined to + * perform composite operations within a single task. + *

+ * Special care has to be taken when creating chains that the key/values output + * by a Mapper are valid for the following Mapper in the chain. It is assumed + * all Mappers and the Reduce in the chain use maching output and input key and + * value classes as no conversion is done by the chaining code. + *

+ * Using the ChainMapper and the ChainReducer classes is possible to compose + * Map/Reduce jobs that look like [MAP+ / REDUCE MAP*]. And + * immediate benefit of this pattern is a dramatic reduction in disk IO. + *

+ * IMPORTANT: There is no need to specify the output key/value classes for the + * ChainReducer, this is done by the setReducer or the addMapper for the last + * element in the chain. + *

+ * ChainReducer usage pattern: + *

+ *

+ * ...
+ * conf.setJobName("chain");
+ * conf.setInputFormat(TextInputFormat.class);
+ * conf.setOutputFormat(TextOutputFormat.class);
+ * 

+ * JobConf mapAConf = new JobConf(false); + * ... + * ChainMapper.addMapper(conf, AMap.class, LongWritable.class, Text.class, + * Text.class, Text.class, true, mapAConf); + *

+ * JobConf mapBConf = new JobConf(false); + * ... + * ChainMapper.addMapper(conf, BMap.class, Text.class, Text.class, + * LongWritable.class, Text.class, false, mapBConf); + *

+ * JobConf reduceConf = new JobConf(false); + * ... + * ChainReducer.setReducer(conf, XReduce.class, LongWritable.class, Text.class, + * Text.class, Text.class, true, reduceConf); + *

+ * ChainReducer.addMapper(conf, CMap.class, Text.class, Text.class, + * LongWritable.class, Text.class, false, null); + *

+ * ChainReducer.addMapper(conf, DMap.class, LongWritable.class, Text.class, + * LongWritable.class, LongWritable.class, true, null); + *

+ * FileInputFormat.setInputPaths(conf, inDir); + * FileOutputFormat.setOutputPath(conf, outDir); + * ... + *

+ * JobClient jc = new JobClient(conf); + * RunningJob job = jc.submitJob(conf); + * ... + *

+ */ +public class ChainReducer implements Reducer { + + /** + * Sets the Reducer class to the chain job's JobConf. + *

+ * It has to be specified how key and values are passed from one element of + * the chain to the next, by value or by reference. If a Reducer leverages the + * assumed semantics that the key and values are not modified by the collector + * 'by value' must be used. If the Reducer does not expect this semantics, as + * an optimization to avoid serialization and deserialization 'by reference' + * can be used. + *

+ * For the added Reducer the configuration given for it, + * reducerConf, have precedence over the job's JobConf. This + * precedence is in effect when the task is running. + *

+ * IMPORTANT: There is no need to specify the output key/value classes for the + * ChainReducer, this is done by the setReducer or the addMapper for the last + * element in the chain. + * + * @param job job's JobConf to add the Reducer class. + * @param klass the Reducer class to add. + * @param inputKeyClass reducer input key class. + * @param inputValueClass reducer input value class. + * @param outputKeyClass reducer output key class. + * @param outputValueClass reducer output value class. + * @param byValue indicates if key/values should be passed by value + * to the next Mapper in the chain, if any. + * @param reducerConf a JobConf with the configuration for the Reducer + * class. It is recommended to use a JobConf without default values using the + * JobConf(boolean loadDefaults) constructor with FALSE. + */ + public static void setReducer(JobConf job, + Class> klass, + Class inputKeyClass, + Class inputValueClass, + Class outputKeyClass, + Class outputValueClass, + boolean byValue, JobConf reducerConf) { + job.setReducerClass(ChainReducer.class); + job.setOutputKeyClass(outputKeyClass); + job.setOutputValueClass(outputValueClass); + Chain.setReducer(job, klass, inputKeyClass, inputValueClass, outputKeyClass, + outputValueClass, byValue, reducerConf); + } + + /** + * Adds a Mapper class to the chain job's JobConf. + *

+ * It has to be specified how key and values are passed from one element of + * the chain to the next, by value or by reference. If a Mapper leverages the + * assumed semantics that the key and values are not modified by the collector + * 'by value' must be used. If the Mapper does not expect this semantics, as + * an optimization to avoid serialization and deserialization 'by reference' + * can be used. + *

+ * For the added Mapper the configuration given for it, + * mapperConf, have precedence over the job's JobConf. This + * precedence is in effect when the task is running. + *

+ * IMPORTANT: There is no need to specify the output key/value classes for the + * ChainMapper, this is done by the addMapper for the last mapper in the chain + * . + * + * @param job chain job's JobConf to add the Mapper class. + * @param klass the Mapper class to add. + * @param inputKeyClass mapper input key class. + * @param inputValueClass mapper input value class. + * @param outputKeyClass mapper output key class. + * @param outputValueClass mapper output value class. + * @param byValue indicates if key/values should be passed by value + * to the next Mapper in the chain, if any. + * @param mapperConf a JobConf with the configuration for the Mapper + * class. It is recommended to use a JobConf without default values using the + * JobConf(boolean loadDefaults) constructor with FALSE. + */ + public static void addMapper(JobConf job, + Class> klass, + Class inputKeyClass, + Class inputValueClass, + Class outputKeyClass, + Class outputValueClass, + boolean byValue, JobConf mapperConf) { + job.setOutputKeyClass(outputKeyClass); + job.setOutputValueClass(outputValueClass); + Chain.addMapper(false, job, klass, inputKeyClass, inputValueClass, + outputKeyClass, outputValueClass, byValue, mapperConf); + } + + private Chain chain; + + /** + * Constructor. + */ + public ChainReducer() { + chain = new Chain(false); + } + + /** + * Configures the ChainReducer, the Reducer and all the Mappers in the chain. + *

+ * If this method is overriden super.configure(...) should be + * invoked at the beginning of the overwriter method. + */ + public void configure(JobConf job) { + chain.configure(job); + } + + /** + * Chains the reduce(...) method of the Reducer with the + * map(...) methods of the Mappers in the chain. + */ + @SuppressWarnings({"unchecked"}) + public void reduce(Object key, Iterator values, OutputCollector output, + Reporter reporter) throws IOException { + Reducer reducer = chain.getReducer(); + if (reducer != null) { + reducer.reduce(key, values, chain.getReducerCollector(output, reporter), + reporter); + } + } + + /** + * Closes the ChainReducer, the Reducer and all the Mappers in the chain. + *

+ * If this method is overriden super.close() should be + * invoked at the end of the overwriter method. + */ + public void close() throws IOException { + chain.close(); + } + +} diff --git a/src/mapred/org/apache/hadoop/mapred/lib/CombineFileInputFormat.java b/src/mapred/org/apache/hadoop/mapred/lib/CombineFileInputFormat.java new file mode 100644 index 0000000..fe1dc46 --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/lib/CombineFileInputFormat.java @@ -0,0 +1,693 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapred.lib; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.LinkedList; +import java.util.HashSet; +import java.util.List; +import java.util.HashMap; +import java.util.Set; +import java.util.Iterator; +import java.util.Map; + +import org.apache.hadoop.fs.LocatedFileStatus; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.BlockLocation; +import org.apache.hadoop.fs.PathFilter; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.io.compress.CompressionCodecFactory; +import org.apache.hadoop.net.NodeBase; +import org.apache.hadoop.net.NetworkTopology; + +import org.apache.hadoop.mapred.InputSplit; +import org.apache.hadoop.mapred.FileInputFormat; +import org.apache.hadoop.mapred.JobConf; +import org.apache.hadoop.mapred.Reporter; +import org.apache.hadoop.mapred.RecordReader; + +/** + * An abstract {@link org.apache.hadoop.mapred.InputFormat} that returns {@link CombineFileSplit}'s + * in {@link org.apache.hadoop.mapred.InputFormat#getSplits(JobConf, int)} method. + * Splits are constructed from the files under the input paths. + * A split cannot have files from different pools. + * Each split returned may contain blocks from different files. + * If a maxSplitSize is specified, then blocks on the same node are + * combined to form a single split. Blocks that are left over are + * then combined with other blocks in the same rack. + * If maxSplitSize is not specified, then blocks from the same rack + * are combined in a single split; no attempt is made to create + * node-local splits. + * If the maxSplitSize is equal to the block size, then this class + * is similar to the default spliting behaviour in Hadoop: each + * block is a locally processed split. + * Subclasses implement {@link org.apache.hadoop.mapred.InputFormat#getRecordReader(InputSplit, JobConf, Reporter)} + * to construct RecordReader's for CombineFileSplit's. + * @see CombineFileSplit + */ +public abstract class CombineFileInputFormat + extends FileInputFormat { + + // ability to limit the size of a single split + private long maxSplitSize = 0; + private long minSplitSizeNode = 0; + private long minSplitSizeRack = 0; + + // A pool of input paths filters. A split cannot have blocks from files + // across multiple pools. + private ArrayList pools = new ArrayList(); + + // mapping from a rack name to the set of Nodes in the rack + private HashMap> rackToNodes = + new HashMap>(); + + // Whether to pass only the path component of the URI to the pool filters + private boolean poolFilterPathOnly = true; + + /** + * Specify the maximum size (in bytes) of each split. Each split is + * approximately equal to the specified size. + */ + protected void setMaxSplitSize(long maxSplitSize) { + this.maxSplitSize = maxSplitSize; + } + + /** + * Specify the minimum size (in bytes) of each split per node. + * This applies to data that is left over after combining data on a single + * node into splits that are of maximum size specified by maxSplitSize. + * This leftover data will be combined into its own split if its size + * exceeds minSplitSizeNode. + */ + protected void setMinSplitSizeNode(long minSplitSizeNode) { + this.minSplitSizeNode = minSplitSizeNode; + } + + /** + * Specify the minimum size (in bytes) of each split per rack. + * This applies to data that is left over after combining data on a single + * rack into splits that are of maximum size specified by maxSplitSize. + * This leftover data will be combined into its own split if its size + * exceeds minSplitSizeRack. + */ + protected void setMinSplitSizeRack(long minSplitSizeRack) { + this.minSplitSizeRack = minSplitSizeRack; + } + + /** + * Create a new pool and add the filters to it. + * A split cannot have files from different pools. + */ + protected void createPool(JobConf conf, List filters) { + pools.add(new MultiPathFilter(filters)); + } + + /** + * Create a new pool and add the filters to it. + * A pathname can satisfy any one of the specified filters. + * A split cannot have files from different pools. + */ + protected void createPool(JobConf conf, PathFilter... filters) { + MultiPathFilter multi = new MultiPathFilter(); + for (PathFilter f: filters) { + multi.add(f); + } + pools.add(multi); + } + + private CompressionCodecFactory compressionCodecs = + new CompressionCodecFactory(new JobConf()); + + @Override + protected boolean isSplitable(FileSystem ignored, Path file) { + return compressionCodecs.getCodec(file) == null; + } + + /** + * default constructor + */ + public CombineFileInputFormat() { + } + + /** + * + * @param pathOnly If true, pass only the path component of input paths (i.e. + * strip out the scheme and authority) to the pool filters + */ + protected void setPoolFilterPathOnly(boolean pathOnly) { + poolFilterPathOnly = pathOnly; + } + + protected boolean getPoolFilterPathOnly() { + return poolFilterPathOnly; + } + + @Override + public InputSplit[] getSplits(JobConf job, int numSplits) + throws IOException { + + long minSizeNode = 0; + long minSizeRack = 0; + long maxSize = 0; + + // the values specified by setxxxSplitSize() takes precedence over the + // values that might have been specified in the config + if (minSplitSizeNode != 0) { + minSizeNode = minSplitSizeNode; + } else { + minSizeNode = job.getLong("mapred.min.split.size.per.node", 0); + } + if (minSplitSizeRack != 0) { + minSizeRack = minSplitSizeRack; + } else { + minSizeRack = job.getLong("mapred.min.split.size.per.rack", 0); + } + if (maxSplitSize != 0) { + maxSize = maxSplitSize; + } else { + maxSize = job.getLong("mapred.max.split.size", 0); + } + if (minSizeNode != 0 && maxSize != 0 && minSizeNode > maxSize) { + throw new IOException("Minimum split size pernode " + minSizeNode + + " cannot be larger than maximum split size " + + maxSize); + } + if (minSizeRack != 0 && maxSize != 0 && minSizeRack > maxSize) { + throw new IOException("Minimum split size per rack" + minSizeRack + + " cannot be larger than maximum split size " + + maxSize); + } + if (minSizeRack != 0 && minSizeNode > minSizeRack) { + throw new IOException("Minimum split size per node" + minSizeNode + + " cannot be smaller than minimum split size per rack " + + minSizeRack); + } + + // all the files in input set + LocatedFileStatus[] stats = listLocatedStatus(job); + List splits = new ArrayList(); + if (stats.length == 0) { + return splits.toArray(new CombineFileSplit[splits.size()]); + } + + // Put them into a list for easier removal during iteration + List newstats = new LinkedList(); + Collections.addAll(newstats, stats); + stats = null; + + // In one single iteration, process all the paths in a single pool. + // Processing one pool at a time ensures that a split contains paths + // from a single pool only. + for (MultiPathFilter onepool : pools) { + ArrayList myStats = new ArrayList(); + + // pick one input path. If it matches all the filters in a pool, + // add it to the output set + for (Iterator iter = newstats.iterator(); + iter.hasNext();) { + LocatedFileStatus stat = iter.next(); + if (onepool.accept(stat.getPath(), poolFilterPathOnly)) { + myStats.add(stat); // add it to my output set + iter.remove(); + } + } + // create splits for all files in this pool. + getMoreSplits(job, myStats, + maxSize, minSizeNode, minSizeRack, splits); + } + + // create splits for all files that are not in any pool. + getMoreSplits(job, newstats, + maxSize, minSizeNode, minSizeRack, splits); + + // free up rackToNodes map + rackToNodes.clear(); + return splits.toArray(new CombineFileSplit[splits.size()]); + } + + /** + * Return all the splits in the specified set of paths + */ + private void getMoreSplits(JobConf job, List stats, + long maxSize, long minSizeNode, long minSizeRack, + List splits) + throws IOException { + + // all blocks for all the files in input set + OneFileInfo[] files; + + // mapping from a rack name to the list of blocks it has + HashMap> rackToBlocks = + new HashMap>(); + + // mapping from a block to the nodes on which it has replicas + HashMap blockToNodes = + new HashMap(); + + // mapping from a node to the list of blocks that it contains + HashMap> nodeToBlocks = + new HashMap>(); + + if (stats.isEmpty()) { + return; + } + files = new OneFileInfo[stats.size()]; + + // populate all the blocks for all files + long totLength = 0; + for (int i = 0; i < files.length; i++) { + LocatedFileStatus oneStatus = stats.get(i); + files[i] = new OneFileInfo(oneStatus, job, + isSplitable(FileSystem.get(job), oneStatus.getPath()), + rackToBlocks, blockToNodes, nodeToBlocks, rackToNodes, maxSize); + totLength += files[i].getLength(); + } + + ArrayList validBlocks = new ArrayList(); + Set nodes = new HashSet(); + long curSplitSize = 0; + + // process all nodes and create splits that are local + // to a node. + for (Iterator>> iter = nodeToBlocks.entrySet().iterator(); + iter.hasNext();) { + + Map.Entry> one = iter.next(); + nodes.add(one.getKey()); + List blocksInNode = one.getValue(); + + // for each block, copy it into validBlocks. Delete it from + // blockToNodes so that the same block does not appear in + // two different splits. + for (OneBlockInfo oneblock : blocksInNode) { + if (blockToNodes.containsKey(oneblock)) { + validBlocks.add(oneblock); + blockToNodes.remove(oneblock); + curSplitSize += oneblock.length; + + // if the accumulated split size exceeds the maximum, then + // create this split. + if (maxSize != 0 && curSplitSize >= maxSize) { + // create an input split and add it to the splits array + addCreatedSplit(job, splits, nodes, validBlocks); + curSplitSize = 0; + validBlocks.clear(); + } + } + } + // if there were any blocks left over and their combined size is + // larger than minSplitNode, then combine them into one split. + // Otherwise add them back to the unprocessed pool. It is likely + // that they will be combined with other blocks from the same rack later on. + if (minSizeNode != 0 && curSplitSize >= minSizeNode) { + // create an input split and add it to the splits array + addCreatedSplit(job, splits, nodes, validBlocks); + } else { + for (OneBlockInfo oneblock : validBlocks) { + blockToNodes.put(oneblock, oneblock.hosts); + } + } + validBlocks.clear(); + nodes.clear(); + curSplitSize = 0; + } + + // if blocks in a rack are below the specified minimum size, then keep them + // in 'overflow'. After the processing of all racks is complete, these overflow + // blocks will be combined into splits. + ArrayList overflowBlocks = new ArrayList(); + Set racks = new HashSet(); + + // Process all racks over and over again until there is no more work to do. + while (blockToNodes.size() > 0) { + + // Create one split for this rack before moving over to the next rack. + // Come back to this rack after creating a single split for each of the + // remaining racks. + // Process one rack location at a time, Combine all possible blocks that + // reside on this rack as one split. (constrained by minimum and maximum + // split size). + + // iterate over all racks + for (Iterator>> iter = + rackToBlocks.entrySet().iterator(); iter.hasNext();) { + + Map.Entry> one = iter.next(); + racks.add(one.getKey()); + List blocks = one.getValue(); + + // for each block, copy it into validBlocks. Delete it from + // blockToNodes so that the same block does not appear in + // two different splits. + boolean createdSplit = false; + for (OneBlockInfo oneblock : blocks) { + if (blockToNodes.containsKey(oneblock)) { + validBlocks.add(oneblock); + blockToNodes.remove(oneblock); + curSplitSize += oneblock.length; + + // if the accumulated split size exceeds the maximum, then + // create this split. + if (maxSize != 0 && curSplitSize >= maxSize) { + // create an input split and add it to the splits array + addCreatedSplit(job, splits, getHosts(racks), validBlocks); + createdSplit = true; + break; + } + } + } + + // if we created a split, then just go to the next rack + if (createdSplit) { + curSplitSize = 0; + validBlocks.clear(); + racks.clear(); + continue; + } + + if (!validBlocks.isEmpty()) { + if (minSizeRack != 0 && curSplitSize >= minSizeRack) { + // if there is a mimimum size specified, then create a single split + // otherwise, store these blocks into overflow data structure + addCreatedSplit(job, splits, getHosts(racks), validBlocks); + } else { + // There were a few blocks in this rack that remained to be processed. + // Keep them in 'overflow' block list. These will be combined later. + overflowBlocks.addAll(validBlocks); + } + } + curSplitSize = 0; + validBlocks.clear(); + racks.clear(); + } + } + + assert blockToNodes.isEmpty(); + assert curSplitSize == 0; + assert validBlocks.isEmpty(); + assert racks.isEmpty(); + + // Process all overflow blocks + for (OneBlockInfo oneblock : overflowBlocks) { + validBlocks.add(oneblock); + curSplitSize += oneblock.length; + + // This might cause an exiting rack location to be re-added, + // but it should be OK because racks is a Set. + for (int i = 0; i < oneblock.racks.length; i++) { + racks.add(oneblock.racks[i]); + } + + // if the accumulated split size exceeds the maximum, then + // create this split. + if (maxSize != 0 && curSplitSize >= maxSize) { + // create an input split and add it to the splits array + addCreatedSplit(job, splits, getHosts(racks), validBlocks); + curSplitSize = 0; + validBlocks.clear(); + racks.clear(); + } + } + + // Process any remaining blocks, if any. + if (!validBlocks.isEmpty()) { + addCreatedSplit(job, splits, getHosts(racks), validBlocks); + } + } + + /** + * Create a single split from the list of blocks specified in validBlocks + * Add this new split into splitList. + */ + private void addCreatedSplit(JobConf job, + List splitList, + Collection locations, + ArrayList validBlocks) { + // create an input split + Path[] fl = new Path[validBlocks.size()]; + long[] offset = new long[validBlocks.size()]; + long[] length = new long[validBlocks.size()]; + for (int i = 0; i < validBlocks.size(); i++) { + fl[i] = validBlocks.get(i).onepath; + offset[i] = validBlocks.get(i).offset; + length[i] = validBlocks.get(i).length; + } + + // add this split to the list that is returned + CombineFileSplit thissplit = new CombineFileSplit(job, fl, offset, + length, locations.toArray(new String[locations.size()])); + splitList.add(thissplit); + } + + /** + * This is not implemented yet. + */ + public abstract RecordReader getRecordReader(InputSplit split, + JobConf job, Reporter reporter) + throws IOException; + + /** + * information about one file from the File System + */ + private static class OneFileInfo { + private long fileSize; // size of the file + private OneBlockInfo[] blocks; // all blocks in this file + + OneFileInfo(LocatedFileStatus stat, JobConf job, + boolean isSplitable, + HashMap> rackToBlocks, + HashMap blockToNodes, + HashMap> nodeToBlocks, + HashMap> rackToNodes, + long maxSize) + throws IOException { + this.fileSize = 0; + + // get block locations from file system + BlockLocation[] locations = stat.getBlockLocations(); + + // create a list of all block and their locations + if (locations == null) { + blocks = new OneBlockInfo[0]; + } else { + if (!isSplitable) { + // if the file is not splitable, just create the one block with + // full file length + blocks = new OneBlockInfo[1]; + fileSize = stat.getLen(); + blocks[0] = new OneBlockInfo(stat.getPath(), 0, fileSize, locations[0] + .getHosts(), locations[0].getTopologyPaths()); + } else { + ArrayList blocksList = new ArrayList(locations.length); + for (int i = 0; i < locations.length; i++) { + + fileSize += locations[i].getLength(); + + // each split can be a maximum of maxSize + long left = locations[i].getLength(); + long myOffset = locations[i].getOffset(); + long myLength = 0; + while (left > 0) { + if (maxSize == 0) { + myLength = left; + } else { + if (left > maxSize && left < 2 * maxSize) { + // if remainder is between max and 2*max - then + // instead of creating splits of size max, left-max we + // create splits of size left/2 and left/2. + myLength = left / 2; + } else { + myLength = Math.min(maxSize, left); + } + } + OneBlockInfo oneblock = new OneBlockInfo(stat.getPath(), + myOffset, + myLength, + locations[i].getHosts(), + locations[i].getTopologyPaths()); + left -= myLength; + myOffset += myLength; + + blocksList.add(oneblock); + } + } + blocks = blocksList.toArray(new OneBlockInfo[blocksList.size()]); + } + + for (OneBlockInfo oneblock : blocks) { + // fail job submission in case of missing block + if (oneblock.hosts.length == 0) { + throw new IOException("The file " + oneblock.onepath + + " is missing block at offset " + oneblock.offset); + } + + // add this block to the block --> node locations map + blockToNodes.put(oneblock, oneblock.hosts); + + // add this block to the rack --> block map + for (int j = 0; j < oneblock.racks.length; j++) { + String rack = oneblock.racks[j]; + List blklist = rackToBlocks.get(rack); + if (blklist == null) { + blklist = new ArrayList(); + rackToBlocks.put(rack, blklist); + } + blklist.add(oneblock); + // Add this host to rackToNodes map + addHostToRack(rackToNodes, oneblock.racks[j], oneblock.hosts[j]); + } + + // add this block to the node --> block map + for (int j = 0; j < oneblock.hosts.length; j++) { + String node = oneblock.hosts[j]; + List blklist = nodeToBlocks.get(node); + if (blklist == null) { + blklist = new ArrayList(); + nodeToBlocks.put(node, blklist); + } + blklist.add(oneblock); + } + } + } + } + + long getLength() { + return fileSize; + } + + OneBlockInfo[] getBlocks() { + return blocks; + } + } + + /** + * information about one block from the File System + */ + private static class OneBlockInfo { + Path onepath; // name of this file + long offset; // offset in file + long length; // length of this block + String[] hosts; // nodes on whch this block resides + String[] racks; // network topology of hosts + + OneBlockInfo(Path path, long offset, long len, + String[] hosts, String[] topologyPaths) { + this.onepath = path; + this.offset = offset; + this.hosts = hosts; + this.length = len; + assert (hosts.length == topologyPaths.length || + topologyPaths.length == 0); + + // if the file ystem does not have any rack information, then + // use dummy rack location. + if (topologyPaths.length == 0) { + topologyPaths = new String[hosts.length]; + for (int i = 0; i < topologyPaths.length; i++) { + topologyPaths[i] = (new NodeBase(hosts[i], NetworkTopology.DEFAULT_RACK)). + toString(); + } + } + + // The topology paths have the host name included as the last + // component. Strip it. + this.racks = new String[topologyPaths.length]; + for (int i = 0; i < topologyPaths.length; i++) { + this.racks[i] = (new NodeBase(topologyPaths[i])).getNetworkLocation(); + } + } + } + + private static void addHostToRack(HashMap> rackToNodes, + String rack, String host) { + Set hosts = rackToNodes.get(rack); + if (hosts == null) { + hosts = new HashSet(); + rackToNodes.put(rack, hosts); + } + hosts.add(host); + } + + private Set getHosts(Set racks) { + Set hosts = new HashSet(); + for (String rack : racks) { + hosts.addAll(rackToNodes.get(rack)); + } + return hosts; + } + + /** + * Accept a path only if any one of filters given in the + * constructor do. + */ + private static class MultiPathFilter implements PathFilter { + private List filters; + + public MultiPathFilter() { + this.filters = new ArrayList(); + } + + public MultiPathFilter(List filters) { + this.filters = filters; + } + + public void add(PathFilter one) { + filters.add(one); + } + + public boolean accept(Path path) { + for (PathFilter filter : filters) { + if (filter.accept(path)) { + return true; + } + } + return false; + } + + /** + * + * @param path + * @param pathOnly whether to strip out the scheme/authority before passing + * to the constituent filters + * @return whether the path matches all of the filters + */ + public boolean accept(Path path, boolean pathOnly) { + Path pathToCheck = path; + if (pathOnly) { + pathToCheck = new Path(path.toUri().getPath()); + } + return accept(pathToCheck); + } + + public String toString() { + StringBuffer buf = new StringBuffer(); + buf.append("["); + for (PathFilter f: filters) { + buf.append(f); + buf.append(","); + } + buf.append("]"); + return buf.toString(); + } + } +} diff --git a/src/mapred/org/apache/hadoop/mapred/lib/CombineFileRecordReader.java b/src/mapred/org/apache/hadoop/mapred/lib/CombineFileRecordReader.java new file mode 100644 index 0000000..ec8e076 --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/lib/CombineFileRecordReader.java @@ -0,0 +1,162 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapred.lib; + +import java.io.*; +import java.util.*; +import java.lang.reflect.*; + +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; + +import org.apache.hadoop.mapred.*; +import org.apache.hadoop.conf.Configuration; + +/** + * A generic RecordReader that can hand out different recordReaders + * for each chunk in a {@link CombineFileSplit}. + * A CombineFileSplit can combine data chunks from multiple files. + * This class allows using different RecordReaders for processing + * these data chunks from different files. + * @see CombineFileSplit + */ + +public class CombineFileRecordReader implements RecordReader { + + static final Class [] constructorSignature = new Class [] + {CombineFileSplit.class, + Configuration.class, + Reporter.class, + Integer.class}; + + protected CombineFileSplit split; + protected JobConf jc; + protected Reporter reporter; + protected Class> rrClass; + protected Constructor> rrConstructor; + protected FileSystem fs; + + protected int idx; + protected long progress; + protected RecordReader curReader; + + public boolean next(K key, V value) throws IOException { + + while ((curReader == null) || !curReader.next(key, value)) { + if (!initNextRecordReader()) { + return false; + } + } + return true; + } + + public K createKey() { + return curReader.createKey(); + } + + public V createValue() { + return curReader.createValue(); + } + + /** + * return the amount of data processed + */ + public long getPos() throws IOException { + return progress; + } + + public void close() throws IOException { + if (curReader != null) { + curReader.close(); + curReader = null; + } + } + + /** + * return progress based on the amount of data processed so far. + */ + public float getProgress() throws IOException { + long subprogress = 0; // bytes processed in current split + if (null != curReader) { + // idx is always one past the current subsplit's true index. + subprogress = (long)(curReader.getProgress() * split.getLength(idx - 1)); + } + return Math.min(1.0f, (progress + subprogress)/(float)(split.getLength())); + } + + /** + * A generic RecordReader that can hand out different recordReaders + * for each chunk in the CombineFileSplit. + */ + public CombineFileRecordReader(JobConf job, CombineFileSplit split, + Reporter reporter, + Class> rrClass) + throws IOException { + this.split = split; + this.jc = job; + this.rrClass = rrClass; + this.reporter = reporter; + this.idx = 0; + this.curReader = null; + this.progress = 0; + + try { + rrConstructor = rrClass.getDeclaredConstructor(constructorSignature); + rrConstructor.setAccessible(true); + } catch (Exception e) { + throw new RuntimeException(rrClass.getName() + + " does not have valid constructor", e); + } + initNextRecordReader(); + } + + /** + * Get the record reader for the next chunk in this CombineFileSplit. + */ + protected boolean initNextRecordReader() throws IOException { + + if (curReader != null) { + curReader.close(); + curReader = null; + if (idx > 0) { + progress += split.getLength(idx-1); // done processing so far + } + } + + // if all chunks have been processed, nothing more to do. + if (idx == split.getNumPaths()) { + return false; + } + + // get a record reader for the idx-th chunk + try { + curReader = rrConstructor.newInstance(new Object [] + {split, jc, reporter, Integer.valueOf(idx)}); + + // setup some helper config variables. + jc.set("map.input.file", split.getPath(idx).toString()); + jc.setLong("map.input.start", split.getOffset(idx)); + jc.setLong("map.input.length", split.getLength(idx)); + } catch (Exception e) { + throw new RuntimeException (e); + } + idx++; + return true; + } +} diff --git a/src/mapred/org/apache/hadoop/mapred/lib/CombineFileSplit.java b/src/mapred/org/apache/hadoop/mapred/lib/CombineFileSplit.java new file mode 100644 index 0000000..20a9420 --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/lib/CombineFileSplit.java @@ -0,0 +1,205 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapred.lib; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; +import java.util.HashSet; +import java.util.Set; + +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.BlockLocation; +import org.apache.hadoop.io.Text; + +import org.apache.hadoop.mapred.InputSplit; +import org.apache.hadoop.mapred.FileInputFormat; +import org.apache.hadoop.mapred.JobConf; + +/** + * A sub-collection of input files. Unlike {@link org.apache.hadoop.mapred.FileSplit}, + * CombineFileSplit * class does not represent a split of a file, but a split of input files + * into smaller sets. A split may contain blocks from different file but all + * the blocks in the same split are probably local to some rack
+ * CombineFileSplit can be used to implement {@link org.apache.hadoop.mapred.RecordReader}'s, + * with reading one record per file. + * @see org.apache.hadoop.mapred.FileSplit + * @see CombineFileInputFormat + */ +public class CombineFileSplit implements InputSplit { + + private Path[] paths; + private long[] startoffset; + private long[] lengths; + private String[] locations; + private long totLength; + private JobConf job; + + /** + * default constructor + */ + public CombineFileSplit() {} + public CombineFileSplit(JobConf job, Path[] files, long[] start, + long[] lengths, String[] locations) { + initSplit(job, files, start, lengths, locations); + } + + public CombineFileSplit(JobConf job, Path[] files, long[] lengths) { + long[] startoffset = new long[files.length]; + for (int i = 0; i < startoffset.length; i++) { + startoffset[i] = 0; + } + String[] locations = new String[files.length]; + for (int i = 0; i < locations.length; i++) { + locations[i] = ""; + } + initSplit(job, files, startoffset, lengths, locations); + } + + private void initSplit(JobConf job, Path[] files, long[] start, + long[] lengths, String[] locations) { + this.job = job; + this.startoffset = start; + this.lengths = lengths; + this.paths = files; + this.totLength = 0; + this.locations = locations; + for(long length : lengths) { + totLength += length; + } + } + + /** + * Copy constructor + */ + public CombineFileSplit(CombineFileSplit old) throws IOException { + this(old.getJob(), old.getPaths(), old.getStartOffsets(), + old.getLengths(), old.getLocations()); + } + + public JobConf getJob() { + return job; + } + + public long getLength() { + return totLength; + } + + /** Returns an array containing the startoffsets of the files in the split*/ + public long[] getStartOffsets() { + return startoffset; + } + + /** Returns an array containing the lengths of the files in the split*/ + public long[] getLengths() { + return lengths; + } + + /** Returns the start offset of the ith Path */ + public long getOffset(int i) { + return startoffset[i]; + } + + /** Returns the length of the ith Path */ + public long getLength(int i) { + return lengths[i]; + } + + /** Returns the number of Paths in the split */ + public int getNumPaths() { + return paths.length; + } + + /** Returns the ith Path */ + public Path getPath(int i) { + return paths[i]; + } + + /** Returns all the Paths in the split */ + public Path[] getPaths() { + return paths; + } + + /** Returns all the Paths where this input-split resides */ + public String[] getLocations() throws IOException { + return locations; + } + + public void readFields(DataInput in) throws IOException { + totLength = in.readLong(); + int arrLength = in.readInt(); + lengths = new long[arrLength]; + for(int i=0; i implements InputFormat { + + public InputSplit[] getSplits(JobConf conf, int numSplits) throws IOException { + + JobConf confCopy = new JobConf(conf); + List splits = new ArrayList(); + Map formatMap = MultipleInputs.getInputFormatMap(conf); + Map> mapperMap = MultipleInputs + .getMapperTypeMap(conf); + Map, List> formatPaths + = new HashMap, List>(); + + // First, build a map of InputFormats to Paths + for (Entry entry : formatMap.entrySet()) { + if (!formatPaths.containsKey(entry.getValue().getClass())) { + formatPaths.put(entry.getValue().getClass(), new LinkedList()); + } + + formatPaths.get(entry.getValue().getClass()).add(entry.getKey()); + } + + for (Entry, List> formatEntry : + formatPaths.entrySet()) { + Class formatClass = formatEntry.getKey(); + InputFormat format = (InputFormat) ReflectionUtils.newInstance( + formatClass, conf); + List paths = formatEntry.getValue(); + + Map, List> mapperPaths + = new HashMap, List>(); + + // Now, for each set of paths that have a common InputFormat, build + // a map of Mappers to the paths they're used for + for (Path path : paths) { + Class mapperClass = mapperMap.get(path); + if (!mapperPaths.containsKey(mapperClass)) { + mapperPaths.put(mapperClass, new LinkedList()); + } + + mapperPaths.get(mapperClass).add(path); + } + + // Now each set of paths that has a common InputFormat and Mapper can + // be added to the same job, and split together. + for (Entry, List> mapEntry : mapperPaths + .entrySet()) { + paths = mapEntry.getValue(); + Class mapperClass = mapEntry.getKey(); + + if (mapperClass == null) { + mapperClass = conf.getMapperClass(); + } + + FileInputFormat.setInputPaths(confCopy, paths.toArray(new Path[paths + .size()])); + + // Get splits for each input path and tag with InputFormat + // and Mapper types by wrapping in a TaggedInputSplit. + InputSplit[] pathSplits = format.getSplits(confCopy, numSplits); + for (InputSplit pathSplit : pathSplits) { + splits.add(new TaggedInputSplit(pathSplit, conf, format.getClass(), + mapperClass)); + } + } + } + + return splits.toArray(new InputSplit[splits.size()]); + } + + @SuppressWarnings("unchecked") + public RecordReader getRecordReader(InputSplit split, JobConf conf, + Reporter reporter) throws IOException { + + // Find the InputFormat and then the RecordReader from the + // TaggedInputSplit. + + TaggedInputSplit taggedInputSplit = (TaggedInputSplit) split; + InputFormat inputFormat = (InputFormat) ReflectionUtils + .newInstance(taggedInputSplit.getInputFormatClass(), conf); + return inputFormat.getRecordReader(taggedInputSplit.getInputSplit(), conf, + reporter); + } +} diff --git a/src/mapred/org/apache/hadoop/mapred/lib/DelegatingMapper.java b/src/mapred/org/apache/hadoop/mapred/lib/DelegatingMapper.java new file mode 100644 index 0000000..875d136 --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/lib/DelegatingMapper.java @@ -0,0 +1,65 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapred.lib; + +import java.io.IOException; + +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.mapred.JobConf; +import org.apache.hadoop.mapred.Mapper; +import org.apache.hadoop.mapred.OutputCollector; +import org.apache.hadoop.mapred.Reporter; +import org.apache.hadoop.util.ReflectionUtils; + +/** + * An {@link Mapper} that delegates behaviour of paths to multiple other + * mappers. + * + * @see MultipleInputs#addInputPath(JobConf, Path, Class, Class) + */ +public class DelegatingMapper implements Mapper { + + private JobConf conf; + + private Mapper mapper; + + @SuppressWarnings("unchecked") + public void map(K1 key, V1 value, OutputCollector outputCollector, + Reporter reporter) throws IOException { + + if (mapper == null) { + // Find the Mapper from the TaggedInputSplit. + TaggedInputSplit inputSplit = (TaggedInputSplit) reporter.getInputSplit(); + mapper = (Mapper) ReflectionUtils.newInstance(inputSplit + .getMapperClass(), conf); + } + mapper.map(key, value, outputCollector, reporter); + } + + public void configure(JobConf conf) { + this.conf = conf; + } + + public void close() throws IOException { + if (mapper != null) { + mapper.close(); + } + } + +} diff --git a/src/mapred/org/apache/hadoop/mapred/lib/FieldSelectionMapReduce.java b/src/mapred/org/apache/hadoop/mapred/lib/FieldSelectionMapReduce.java new file mode 100644 index 0000000..b643f2c --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/lib/FieldSelectionMapReduce.java @@ -0,0 +1,337 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapred.lib; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Iterator; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.io.Text; +import org.apache.hadoop.mapred.JobConf; +import org.apache.hadoop.mapred.Mapper; +import org.apache.hadoop.mapred.OutputCollector; +import org.apache.hadoop.mapred.Reducer; +import org.apache.hadoop.mapred.Reporter; +import org.apache.hadoop.mapred.TextInputFormat; + +/** + * This class implements a mapper/reducer class that can be used to perform + * field selections in a manner similar to unix cut. The input data is treated + * as fields separated by a user specified separator (the default value is + * "\t"). The user can specify a list of fields that form the map output keys, + * and a list of fields that form the map output values. If the inputformat is + * TextInputFormat, the mapper will ignore the key to the map function. and the + * fields are from the value only. Otherwise, the fields are the union of those + * from the key and those from the value. + * + * The field separator is under attribute "mapred.data.field.separator" + * + * The map output field list spec is under attribute "map.output.key.value.fields.spec". + * The value is expected to be like "keyFieldsSpec:valueFieldsSpec" + * key/valueFieldsSpec are comma (,) separated field spec: fieldSpec,fieldSpec,fieldSpec ... + * Each field spec can be a simple number (e.g. 5) specifying a specific field, or a range + * (like 2-5) to specify a range of fields, or an open range (like 3-) specifying all + * the fields starting from field 3. The open range field spec applies value fields only. + * They have no effect on the key fields. + * + * Here is an example: "4,3,0,1:6,5,1-3,7-". It specifies to use fields 4,3,0 and 1 for keys, + * and use fields 6,5,1,2,3,7 and above for values. + * + * The reduce output field list spec is under attribute "reduce.output.key.value.fields.spec". + * + * The reducer extracts output key/value pairs in a similar manner, except that + * the key is never ignored. + * + */ +public class FieldSelectionMapReduce + implements Mapper, Reducer { + + private String mapOutputKeyValueSpec; + + private boolean ignoreInputKey; + + private String fieldSeparator = "\t"; + + private int[] mapOutputKeyFieldList = null; + + private int[] mapOutputValueFieldList = null; + + private int allMapValueFieldsFrom = -1; + + private String reduceOutputKeyValueSpec; + + private int[] reduceOutputKeyFieldList = null; + + private int[] reduceOutputValueFieldList = null; + + private int allReduceValueFieldsFrom = -1; + + private static Text emptyText = new Text(""); + + public static final Log LOG = LogFactory.getLog("FieldSelectionMapReduce"); + + private String specToString() { + StringBuffer sb = new StringBuffer(); + sb.append("fieldSeparator: ").append(fieldSeparator).append("\n"); + + sb.append("mapOutputKeyValueSpec: ").append(mapOutputKeyValueSpec).append( + "\n"); + sb.append("reduceOutputKeyValueSpec: ").append(reduceOutputKeyValueSpec) + .append("\n"); + + sb.append("allMapValueFieldsFrom: ").append(allMapValueFieldsFrom).append( + "\n"); + + sb.append("allReduceValueFieldsFrom: ").append(allReduceValueFieldsFrom) + .append("\n"); + + int i = 0; + + sb.append("mapOutputKeyFieldList.length: ").append( + mapOutputKeyFieldList.length).append("\n"); + for (i = 0; i < mapOutputKeyFieldList.length; i++) { + sb.append("\t").append(mapOutputKeyFieldList[i]).append("\n"); + } + sb.append("mapOutputValueFieldList.length: ").append( + mapOutputValueFieldList.length).append("\n"); + for (i = 0; i < mapOutputValueFieldList.length; i++) { + sb.append("\t").append(mapOutputValueFieldList[i]).append("\n"); + } + + sb.append("reduceOutputKeyFieldList.length: ").append( + reduceOutputKeyFieldList.length).append("\n"); + for (i = 0; i < reduceOutputKeyFieldList.length; i++) { + sb.append("\t").append(reduceOutputKeyFieldList[i]).append("\n"); + } + sb.append("reduceOutputValueFieldList.length: ").append( + reduceOutputValueFieldList.length).append("\n"); + for (i = 0; i < reduceOutputValueFieldList.length; i++) { + sb.append("\t").append(reduceOutputValueFieldList[i]).append("\n"); + } + return sb.toString(); + } + + /** + * The identify function. Input key/value pair is written directly to output. + */ + public void map(K key, V val, + OutputCollector output, Reporter reporter) throws IOException { + String valStr = val.toString(); + String[] inputValFields = valStr.split(this.fieldSeparator); + String[] inputKeyFields = null; + String[] fields = null; + if (this.ignoreInputKey) { + fields = inputValFields; + } else { + inputKeyFields = key.toString().split(this.fieldSeparator); + fields = new String[inputKeyFields.length + inputValFields.length]; + int i = 0; + for (i = 0; i < inputKeyFields.length; i++) { + fields[i] = inputKeyFields[i]; + } + for (i = 0; i < inputValFields.length; i++) { + fields[inputKeyFields.length + i] = inputValFields[i]; + } + } + String newKey = selectFields(fields, mapOutputKeyFieldList, -1, + fieldSeparator); + String newVal = selectFields(fields, mapOutputValueFieldList, + allMapValueFieldsFrom, fieldSeparator); + + if (newKey == null) { + newKey = newVal; + newVal = null; + } + Text newTextKey = emptyText; + if (newKey != null) { + newTextKey = new Text(newKey); + } + Text newTextVal = emptyText; + if (newTextVal != null) { + newTextVal = new Text(newVal); + } + output.collect(newTextKey, newTextVal); + } + + /** + * Extract the actual field numbers from the given field specs. + * If a field spec is in the form of "n-" (like 3-), then n will be the + * return value. Otherwise, -1 will be returned. + * @param fieldListSpec an array of field specs + * @param fieldList an array of field numbers extracted from the specs. + * @return number n if some field spec is in the form of "n-", -1 otherwise. + */ + private int extractFields(String[] fieldListSpec, + ArrayList fieldList) { + int allFieldsFrom = -1; + int i = 0; + int j = 0; + int pos = -1; + String fieldSpec = null; + for (i = 0; i < fieldListSpec.length; i++) { + fieldSpec = fieldListSpec[i]; + if (fieldSpec.length() == 0) { + continue; + } + pos = fieldSpec.indexOf('-'); + if (pos < 0) { + Integer fn = new Integer(fieldSpec); + fieldList.add(fn); + } else { + String start = fieldSpec.substring(0, pos); + String end = fieldSpec.substring(pos + 1); + if (start.length() == 0) { + start = "0"; + } + if (end.length() == 0) { + allFieldsFrom = Integer.parseInt(start); + continue; + } + int startPos = Integer.parseInt(start); + int endPos = Integer.parseInt(end); + for (j = startPos; j <= endPos; j++) { + fieldList.add(j); + } + } + } + return allFieldsFrom; + } + + private void parseOutputKeyValueSpec() { + String[] mapKeyValSpecs = mapOutputKeyValueSpec.split(":", -1); + String[] mapKeySpec = mapKeyValSpecs[0].split(","); + String[] mapValSpec = new String[0]; + if (mapKeyValSpecs.length > 1) { + mapValSpec = mapKeyValSpecs[1].split(","); + } + + int i = 0; + ArrayList fieldList = new ArrayList(); + extractFields(mapKeySpec, fieldList); + this.mapOutputKeyFieldList = new int[fieldList.size()]; + for (i = 0; i < fieldList.size(); i++) { + this.mapOutputKeyFieldList[i] = fieldList.get(i).intValue(); + } + + fieldList = new ArrayList(); + allMapValueFieldsFrom = extractFields(mapValSpec, fieldList); + this.mapOutputValueFieldList = new int[fieldList.size()]; + for (i = 0; i < fieldList.size(); i++) { + this.mapOutputValueFieldList[i] = fieldList.get(i).intValue(); + } + + String[] reduceKeyValSpecs = reduceOutputKeyValueSpec.split(":", -1); + String[] reduceKeySpec = reduceKeyValSpecs[0].split(","); + String[] reduceValSpec = new String[0]; + if (reduceKeyValSpecs.length > 1) { + reduceValSpec = reduceKeyValSpecs[1].split(","); + } + + fieldList = new ArrayList(); + extractFields(reduceKeySpec, fieldList); + this.reduceOutputKeyFieldList = new int[fieldList.size()]; + for (i = 0; i < fieldList.size(); i++) { + this.reduceOutputKeyFieldList[i] = fieldList.get(i).intValue(); + } + + fieldList = new ArrayList(); + allReduceValueFieldsFrom = extractFields(reduceValSpec, fieldList); + this.reduceOutputValueFieldList = new int[fieldList.size()]; + for (i = 0; i < fieldList.size(); i++) { + this.reduceOutputValueFieldList[i] = fieldList.get(i).intValue(); + } + } + + public void configure(JobConf job) { + this.fieldSeparator = job.get("mapred.data.field.separator", "\t"); + this.mapOutputKeyValueSpec = job.get("map.output.key.value.fields.spec", + "0-:"); + this.ignoreInputKey = TextInputFormat.class.getCanonicalName().equals( + job.getInputFormat().getClass().getCanonicalName()); + this.reduceOutputKeyValueSpec = job.get( + "reduce.output.key.value.fields.spec", "0-:"); + parseOutputKeyValueSpec(); + LOG.info(specToString()); + } + + public void close() throws IOException { + // TODO Auto-generated method stub + + } + + private static String selectFields(String[] fields, int[] fieldList, + int allFieldsFrom, String separator) { + String retv = null; + int i = 0; + StringBuffer sb = null; + if (fieldList != null && fieldList.length > 0) { + if (sb == null) { + sb = new StringBuffer(); + } + for (i = 0; i < fieldList.length; i++) { + if (fieldList[i] < fields.length) { + sb.append(fields[fieldList[i]]); + } + sb.append(separator); + } + } + if (allFieldsFrom >= 0) { + if (sb == null) { + sb = new StringBuffer(); + } + for (i = allFieldsFrom; i < fields.length; i++) { + sb.append(fields[i]).append(separator); + } + } + if (sb != null) { + retv = sb.toString(); + if (retv.length() > 0) { + retv = retv.substring(0, retv.length() - 1); + } + } + return retv; + } + + public void reduce(Text key, Iterator values, + OutputCollector output, Reporter reporter) + throws IOException { + + String keyStr = key.toString() + this.fieldSeparator; + while (values.hasNext()) { + String valStr = values.next().toString(); + valStr = keyStr + valStr; + String[] fields = valStr.split(this.fieldSeparator); + String newKey = selectFields(fields, reduceOutputKeyFieldList, -1, + fieldSeparator); + String newVal = selectFields(fields, reduceOutputValueFieldList, + allReduceValueFieldsFrom, fieldSeparator); + Text newTextKey = null; + if (newKey != null) { + newTextKey = new Text(newKey); + } + Text newTextVal = null; + if (newVal != null) { + newTextVal = new Text(newVal); + } + output.collect(newTextKey, newTextVal); + } + } +} diff --git a/src/mapred/org/apache/hadoop/mapred/lib/HashPartitioner.java b/src/mapred/org/apache/hadoop/mapred/lib/HashPartitioner.java new file mode 100644 index 0000000..7366d83 --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/lib/HashPartitioner.java @@ -0,0 +1,39 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapred.lib; + +import org.apache.hadoop.mapred.Partitioner; +import org.apache.hadoop.mapred.JobConf; + +/** Partition keys by their {@link Object#hashCode()}. + * @deprecated Use + * {@link org.apache.hadoop.mapreduce.lib.partition.HashPartitioner} instead. + */ +@Deprecated +public class HashPartitioner implements Partitioner { + + public void configure(JobConf job) {} + + /** Use {@link Object#hashCode()} to partition. */ + public int getPartition(K2 key, V2 value, + int numReduceTasks) { + return (key.hashCode() & Integer.MAX_VALUE) % numReduceTasks; + } + +} diff --git a/src/mapred/org/apache/hadoop/mapred/lib/IdentityMapper.java b/src/mapred/org/apache/hadoop/mapred/lib/IdentityMapper.java new file mode 100644 index 0000000..34931fa --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/lib/IdentityMapper.java @@ -0,0 +1,42 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapred.lib; + +import java.io.IOException; + +import org.apache.hadoop.mapred.Mapper; +import org.apache.hadoop.mapred.OutputCollector; +import org.apache.hadoop.mapred.Reporter; +import org.apache.hadoop.mapred.MapReduceBase; + +/** Implements the identity function, mapping inputs directly to outputs. + * @deprecated Use {@link org.apache.hadoop.mapreduce.Mapper} instead. + */ +@Deprecated +public class IdentityMapper + extends MapReduceBase implements Mapper { + + /** The identify function. Input key/value pair is written directly to + * output.*/ + public void map(K key, V val, + OutputCollector output, Reporter reporter) + throws IOException { + output.collect(key, val); + } +} diff --git a/src/mapred/org/apache/hadoop/mapred/lib/IdentityReducer.java b/src/mapred/org/apache/hadoop/mapred/lib/IdentityReducer.java new file mode 100644 index 0000000..b86e1fa --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/lib/IdentityReducer.java @@ -0,0 +1,46 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapred.lib; + +import java.io.IOException; + +import java.util.Iterator; + +import org.apache.hadoop.mapred.Reducer; +import org.apache.hadoop.mapred.OutputCollector; +import org.apache.hadoop.mapred.Reporter; +import org.apache.hadoop.mapred.MapReduceBase; + +/** Performs no reduction, writing all input values directly to the output. + * @deprecated Use {@link org.apache.hadoop.mapreduce.Reducer} instead. + */ +@Deprecated +public class IdentityReducer + extends MapReduceBase implements Reducer { + + /** Writes all keys and values directly to output. */ + public void reduce(K key, Iterator values, + OutputCollector output, Reporter reporter) + throws IOException { + while (values.hasNext()) { + output.collect(key, values.next()); + } + } + +} diff --git a/src/mapred/org/apache/hadoop/mapred/lib/InputSampler.java b/src/mapred/org/apache/hadoop/mapred/lib/InputSampler.java new file mode 100644 index 0000000..9507a31 --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/lib/InputSampler.java @@ -0,0 +1,418 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapred.lib; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Random; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.io.NullWritable; +import org.apache.hadoop.io.RawComparator; +import org.apache.hadoop.io.SequenceFile; +import org.apache.hadoop.io.WritableComparable; +import org.apache.hadoop.mapred.FileInputFormat; +import org.apache.hadoop.mapred.InputFormat; +import org.apache.hadoop.mapred.InputSplit; +import org.apache.hadoop.mapred.JobConf; +import org.apache.hadoop.mapred.RecordReader; +import org.apache.hadoop.mapred.Reporter; +import org.apache.hadoop.util.Tool; +import org.apache.hadoop.util.ToolRunner; + +/** + * Utility for collecting samples and writing a partition file for + * {@link org.apache.hadoop.mapred.lib.TotalOrderPartitioner}. + */ +public class InputSampler implements Tool { + + private static final Log LOG = LogFactory.getLog(InputSampler.class); + + static int printUsage() { + System.out.println("sampler -r \n" + + " [-inFormat ]\n" + + " [-keyClass ]\n" + + " [-splitRandom | " + + "// Sample from random splits at random (general)\n" + + " -splitSample | " + + " // Sample from first records in splits (random data)\n"+ + " -splitInterval ]" + + " // Sample from splits at intervals (sorted data)"); + System.out.println("Default sampler: -splitRandom 0.1 10000 10"); + ToolRunner.printGenericCommandUsage(System.out); + return -1; + } + + private JobConf conf; + + public InputSampler(JobConf conf) { + this.conf = conf; + } + + public Configuration getConf() { + return conf; + } + + public void setConf(Configuration conf) { + if (!(conf instanceof JobConf)) { + this.conf = new JobConf(conf); + } else { + this.conf = (JobConf) conf; + } + } + + /** + * Interface to sample using an {@link org.apache.hadoop.mapred.InputFormat}. + */ + public interface Sampler { + /** + * For a given job, collect and return a subset of the keys from the + * input data. + */ + K[] getSample(InputFormat inf, JobConf job) throws IOException; + } + + /** + * Samples the first n records from s splits. + * Inexpensive way to sample random data. + */ + public static class SplitSampler implements Sampler { + + private final int numSamples; + private final int maxSplitsSampled; + + /** + * Create a SplitSampler sampling all splits. + * Takes the first numSamples / numSplits records from each split. + * @param numSamples Total number of samples to obtain from all selected + * splits. + */ + public SplitSampler(int numSamples) { + this(numSamples, Integer.MAX_VALUE); + } + + /** + * Create a new SplitSampler. + * @param numSamples Total number of samples to obtain from all selected + * splits. + * @param maxSplitsSampled The maximum number of splits to examine. + */ + public SplitSampler(int numSamples, int maxSplitsSampled) { + this.numSamples = numSamples; + this.maxSplitsSampled = maxSplitsSampled; + } + + /** + * From each split sampled, take the first numSamples / numSplits records. + */ + @SuppressWarnings("unchecked") // ArrayList::toArray doesn't preserve type + public K[] getSample(InputFormat inf, JobConf job) throws IOException { + InputSplit[] splits = inf.getSplits(job, job.getNumMapTasks()); + ArrayList samples = new ArrayList(numSamples); + int splitsToSample = Math.min(maxSplitsSampled, splits.length); + int splitStep = splits.length / splitsToSample; + int samplesPerSplit = numSamples / splitsToSample; + long records = 0; + for (int i = 0; i < splitsToSample; ++i) { + RecordReader reader = inf.getRecordReader(splits[i * splitStep], + job, Reporter.NULL); + K key = reader.createKey(); + V value = reader.createValue(); + while (reader.next(key, value)) { + samples.add(key); + key = reader.createKey(); + ++records; + if ((i+1) * samplesPerSplit <= records) { + break; + } + } + reader.close(); + } + return (K[])samples.toArray(); + } + } + + /** + * Sample from random points in the input. + * General-purpose sampler. Takes numSamples / maxSplitsSampled inputs from + * each split. + */ + public static class RandomSampler implements Sampler { + private double freq; + private final int numSamples; + private final int maxSplitsSampled; + + /** + * Create a new RandomSampler sampling all splits. + * This will read every split at the client, which is very expensive. + * @param freq Probability with which a key will be chosen. + * @param numSamples Total number of samples to obtain from all selected + * splits. + */ + public RandomSampler(double freq, int numSamples) { + this(freq, numSamples, Integer.MAX_VALUE); + } + + /** + * Create a new RandomSampler. + * @param freq Probability with which a key will be chosen. + * @param numSamples Total number of samples to obtain from all selected + * splits. + * @param maxSplitsSampled The maximum number of splits to examine. + */ + public RandomSampler(double freq, int numSamples, int maxSplitsSampled) { + this.freq = freq; + this.numSamples = numSamples; + this.maxSplitsSampled = maxSplitsSampled; + } + + /** + * Randomize the split order, then take the specified number of keys from + * each split sampled, where each key is selected with the specified + * probability and possibly replaced by a subsequently selected key when + * the quota of keys from that split is satisfied. + */ + @SuppressWarnings("unchecked") // ArrayList::toArray doesn't preserve type + public K[] getSample(InputFormat inf, JobConf job) throws IOException { + InputSplit[] splits = inf.getSplits(job, job.getNumMapTasks()); + ArrayList samples = new ArrayList(numSamples); + int splitsToSample = Math.min(maxSplitsSampled, splits.length); + + Random r = new Random(); + long seed = r.nextLong(); + r.setSeed(seed); + LOG.debug("seed: " + seed); + // shuffle splits + for (int i = 0; i < splits.length; ++i) { + InputSplit tmp = splits[i]; + int j = r.nextInt(splits.length); + splits[i] = splits[j]; + splits[j] = tmp; + } + // our target rate is in terms of the maximum number of sample splits, + // but we accept the possibility of sampling additional splits to hit + // the target sample keyset + for (int i = 0; i < splitsToSample || + (i < splits.length && samples.size() < numSamples); ++i) { + RecordReader reader = inf.getRecordReader(splits[i], job, + Reporter.NULL); + K key = reader.createKey(); + V value = reader.createValue(); + while (reader.next(key, value)) { + if (r.nextDouble() <= freq) { + if (samples.size() < numSamples) { + samples.add(key); + } else { + // When exceeding the maximum number of samples, replace a + // random element with this one, then adjust the frequency + // to reflect the possibility of existing elements being + // pushed out + int ind = r.nextInt(numSamples); + if (ind != numSamples) { + samples.set(ind, key); + } + freq *= (numSamples - 1) / (double) numSamples; + } + key = reader.createKey(); + } + } + reader.close(); + } + return (K[])samples.toArray(); + } + } + + /** + * Sample from s splits at regular intervals. + * Useful for sorted data. + */ + public static class IntervalSampler implements Sampler { + private final double freq; + private final int maxSplitsSampled; + + /** + * Create a new IntervalSampler sampling all splits. + * @param freq The frequency with which records will be emitted. + */ + public IntervalSampler(double freq) { + this(freq, Integer.MAX_VALUE); + } + + /** + * Create a new IntervalSampler. + * @param freq The frequency with which records will be emitted. + * @param maxSplitsSampled The maximum number of splits to examine. + * @see #getSample + */ + public IntervalSampler(double freq, int maxSplitsSampled) { + this.freq = freq; + this.maxSplitsSampled = maxSplitsSampled; + } + + /** + * For each split sampled, emit when the ratio of the number of records + * retained to the total record count is less than the specified + * frequency. + */ + @SuppressWarnings("unchecked") // ArrayList::toArray doesn't preserve type + public K[] getSample(InputFormat inf, JobConf job) throws IOException { + InputSplit[] splits = inf.getSplits(job, job.getNumMapTasks()); + ArrayList samples = new ArrayList(); + int splitsToSample = Math.min(maxSplitsSampled, splits.length); + int splitStep = splits.length / splitsToSample; + long records = 0; + long kept = 0; + for (int i = 0; i < splitsToSample; ++i) { + RecordReader reader = inf.getRecordReader(splits[i * splitStep], + job, Reporter.NULL); + K key = reader.createKey(); + V value = reader.createValue(); + while (reader.next(key, value)) { + ++records; + if ((double) kept / records < freq) { + ++kept; + samples.add(key); + key = reader.createKey(); + } + } + reader.close(); + } + return (K[])samples.toArray(); + } + } + + /** + * Write a partition file for the given job, using the Sampler provided. + * Queries the sampler for a sample keyset, sorts by the output key + * comparator, selects the keys for each rank, and writes to the destination + * returned from {@link + org.apache.hadoop.mapred.lib.TotalOrderPartitioner#getPartitionFile}. + */ + @SuppressWarnings("unchecked") // getInputFormat, getOutputKeyComparator + public static void writePartitionFile(JobConf job, + Sampler sampler) throws IOException { + final InputFormat inf = (InputFormat) job.getInputFormat(); + int numPartitions = job.getNumReduceTasks(); + K[] samples = sampler.getSample(inf, job); + LOG.info("Using " + samples.length + " samples"); + RawComparator comparator = + (RawComparator) job.getOutputKeyComparator(); + Arrays.sort(samples, comparator); + Path dst = new Path(TotalOrderPartitioner.getPartitionFile(job)); + FileSystem fs = dst.getFileSystem(job); + if (fs.exists(dst)) { + fs.delete(dst, false); + } + SequenceFile.Writer writer = SequenceFile.createWriter(fs, job, dst, + job.getMapOutputKeyClass(), NullWritable.class); + NullWritable nullValue = NullWritable.get(); + float stepSize = samples.length / (float) numPartitions; + int last = -1; + for(int i = 1; i < numPartitions; ++i) { + int k = Math.round(stepSize * i); + while (last >= k && comparator.compare(samples[last], samples[k]) == 0) { + ++k; + } + writer.append(samples[k], nullValue); + last = k; + } + writer.close(); + } + + /** + * Driver for InputSampler from the command line. + * Configures a JobConf instance and calls {@link #writePartitionFile}. + */ + public int run(String[] args) throws Exception { + JobConf job = (JobConf) getConf(); + ArrayList otherArgs = new ArrayList(); + Sampler sampler = null; + for(int i=0; i < args.length; ++i) { + try { + if ("-r".equals(args[i])) { + job.setNumReduceTasks(Integer.parseInt(args[++i])); + } else if ("-inFormat".equals(args[i])) { + job.setInputFormat( + Class.forName(args[++i]).asSubclass(InputFormat.class)); + } else if ("-keyClass".equals(args[i])) { + job.setMapOutputKeyClass( + Class.forName(args[++i]).asSubclass(WritableComparable.class)); + } else if ("-splitSample".equals(args[i])) { + int numSamples = Integer.parseInt(args[++i]); + int maxSplits = Integer.parseInt(args[++i]); + if (0 >= maxSplits) maxSplits = Integer.MAX_VALUE; + sampler = new SplitSampler(numSamples, maxSplits); + } else if ("-splitRandom".equals(args[i])) { + double pcnt = Double.parseDouble(args[++i]); + int numSamples = Integer.parseInt(args[++i]); + int maxSplits = Integer.parseInt(args[++i]); + if (0 >= maxSplits) maxSplits = Integer.MAX_VALUE; + sampler = new RandomSampler(pcnt, numSamples, maxSplits); + } else if ("-splitInterval".equals(args[i])) { + double pcnt = Double.parseDouble(args[++i]); + int maxSplits = Integer.parseInt(args[++i]); + if (0 >= maxSplits) maxSplits = Integer.MAX_VALUE; + sampler = new IntervalSampler(pcnt, maxSplits); + } else { + otherArgs.add(args[i]); + } + } catch (NumberFormatException except) { + System.out.println("ERROR: Integer expected instead of " + args[i]); + return printUsage(); + } catch (ArrayIndexOutOfBoundsException except) { + System.out.println("ERROR: Required parameter missing from " + + args[i-1]); + return printUsage(); + } + } + if (job.getNumReduceTasks() <= 1) { + System.err.println("Sampler requires more than one reducer"); + return printUsage(); + } + if (otherArgs.size() < 2) { + System.out.println("ERROR: Wrong number of parameters: "); + return printUsage(); + } + if (null == sampler) { + sampler = new RandomSampler(0.1, 10000, 10); + } + + Path outf = new Path(otherArgs.remove(otherArgs.size() - 1)); + TotalOrderPartitioner.setPartitionFile(job, outf); + for (String s : otherArgs) { + FileInputFormat.addInputPath(job, new Path(s)); + } + InputSampler.writePartitionFile(job, sampler); + + return 0; + } + + public static void main(String[] args) throws Exception { + JobConf job = new JobConf(InputSampler.class); + InputSampler sampler = new InputSampler(job); + int res = ToolRunner.run(sampler, args); + System.exit(res); + } +} diff --git a/src/mapred/org/apache/hadoop/mapred/lib/InverseMapper.java b/src/mapred/org/apache/hadoop/mapred/lib/InverseMapper.java new file mode 100644 index 0000000..de0aeaf --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/lib/InverseMapper.java @@ -0,0 +1,43 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapred.lib; + +import java.io.IOException; + +import org.apache.hadoop.mapred.MapReduceBase; +import org.apache.hadoop.mapred.Mapper; +import org.apache.hadoop.mapred.OutputCollector; +import org.apache.hadoop.mapred.Reporter; + +/** A {@link Mapper} that swaps keys and values. + * @deprecated Use {@link org.apache.hadoop.mapreduce.lib.map.InverseMapper} + * instead. + */ +@Deprecated +public class InverseMapper + extends MapReduceBase implements Mapper { + + /** The inverse function. Input keys and values are swapped.*/ + public void map(K key, V value, + OutputCollector output, Reporter reporter) + throws IOException { + output.collect(value, key); + } + +} diff --git a/src/mapred/org/apache/hadoop/mapred/lib/KeyFieldBasedComparator.java b/src/mapred/org/apache/hadoop/mapred/lib/KeyFieldBasedComparator.java new file mode 100644 index 0000000..0b62d57 --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/lib/KeyFieldBasedComparator.java @@ -0,0 +1,328 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapred.lib; + +import java.util.List; + +import org.apache.hadoop.io.WritableComparator; +import org.apache.hadoop.io.WritableUtils; +import org.apache.hadoop.mapred.JobConf; +import org.apache.hadoop.mapred.JobConfigurable; +import org.apache.hadoop.mapred.lib.KeyFieldHelper.KeyDescription; +import org.apache.hadoop.io.Text; + +/** + * This comparator implementation provides a subset of the features provided + * by the Unix/GNU Sort. In particular, the supported features are: + * -n, (Sort numerically) + * -r, (Reverse the result of comparison) + * -k pos1[,pos2], where pos is of the form f[.c][opts], where f is the number + * of the field to use, and c is the number of the first character from the + * beginning of the field. Fields and character posns are numbered starting + * with 1; a character position of zero in pos2 indicates the field's last + * character. If '.c' is omitted from pos1, it defaults to 1 (the beginning + * of the field); if omitted from pos2, it defaults to 0 (the end of the + * field). opts are ordering options (any of 'nr' as described above). + * We assume that the fields in the key are separated by + * map.output.key.field.separator. + */ + +public class KeyFieldBasedComparator extends WritableComparator +implements JobConfigurable { + private KeyFieldHelper keyFieldHelper = new KeyFieldHelper(); + private static final byte NEGATIVE = (byte)'-'; + private static final byte ZERO = (byte)'0'; + private static final byte DECIMAL = (byte)'.'; + + public void configure(JobConf job) { + String option = job.getKeyFieldComparatorOption(); + String keyFieldSeparator = job.get("map.output.key.field.separator","\t"); + keyFieldHelper.setKeyFieldSeparator(keyFieldSeparator); + keyFieldHelper.parseOption(option); + } + + public KeyFieldBasedComparator() { + super(Text.class); + } + + + public int compare(byte[] b1, int s1, int l1, + byte[] b2, int s2, int l2) { + int n1 = WritableUtils.decodeVIntSize(b1[s1]); + int n2 = WritableUtils.decodeVIntSize(b2[s2]); + List allKeySpecs = keyFieldHelper.keySpecs(); + if (allKeySpecs.size() == 0) { + return compareBytes(b1, s1+n1, l1-n1, b2, s2+n2, l2-n2); + } + int []lengthIndicesFirst = keyFieldHelper.getWordLengths(b1, s1+n1, s1+l1); + int []lengthIndicesSecond = keyFieldHelper.getWordLengths(b2, s2+n2, s2+l2); + for (KeyDescription keySpec : allKeySpecs) { + int startCharFirst = keyFieldHelper.getStartOffset(b1, s1+n1, s1+l1, lengthIndicesFirst, + keySpec); + int endCharFirst = keyFieldHelper.getEndOffset(b1, s1+n1, s1+l1, lengthIndicesFirst, + keySpec); + int startCharSecond = keyFieldHelper.getStartOffset(b2, s2+n2, s2+l2, lengthIndicesSecond, + keySpec); + int endCharSecond = keyFieldHelper.getEndOffset(b2, s2+n2, s2+l2, lengthIndicesSecond, + keySpec); + int result; + if ((result = compareByteSequence(b1, startCharFirst, endCharFirst, b2, + startCharSecond, endCharSecond, keySpec)) != 0) { + return result; + } + } + return 0; + } + + private int compareByteSequence(byte[] first, int start1, int end1, + byte[] second, int start2, int end2, KeyDescription key) { + if (start1 == -1) { + if (key.reverse) { + return 1; + } + return -1; + } + if (start2 == -1) { + if (key.reverse) { + return -1; + } + return 1; + } + int compareResult = 0; + if (!key.numeric) { + compareResult = compareBytes(first, start1, end1-start1+1, second, start2, end2-start2+1); + } + if (key.numeric) { + compareResult = numericalCompare (first, start1, end1, second, start2, end2); + } + if (key.reverse) { + return -compareResult; + } + return compareResult; + } + + private int numericalCompare (byte[] a, int start1, int end1, + byte[] b, int start2, int end2) { + int i = start1; + int j = start2; + int mul = 1; + byte first_a = a[i]; + byte first_b = b[j]; + if (first_a == NEGATIVE) { + if (first_b != NEGATIVE) { + //check for cases like -0.0 and 0.0 (they should be declared equal) + return oneNegativeCompare(a,start1+1,end1,b,start2,end2); + } + i++; + } + if (first_b == NEGATIVE) { + if (first_a != NEGATIVE) { + //check for cases like 0.0 and -0.0 (they should be declared equal) + return -oneNegativeCompare(b,start2+1,end2,a,start1,end1); + } + j++; + } + if (first_b == NEGATIVE && first_a == NEGATIVE) { + mul = -1; + } + + //skip over ZEROs + while (i <= end1) { + if (a[i] != ZERO) { + break; + } + i++; + } + while (j <= end2) { + if (b[j] != ZERO) { + break; + } + j++; + } + + //skip over equal characters and stopping at the first nondigit char + //The nondigit character could be '.' + while (i <= end1 && j <= end2) { + if (!isdigit(a[i]) || a[i] != b[j]) { + break; + } + i++; j++; + } + if (i <= end1) { + first_a = a[i]; + } + if (j <= end2) { + first_b = b[j]; + } + //store the result of the difference. This could be final result if the + //number of digits in the mantissa is the same in both the numbers + int firstResult = first_a - first_b; + + //check whether we hit a decimal in the earlier scan + if ((first_a == DECIMAL && (!isdigit(first_b) || j > end2)) || + (first_b == DECIMAL && (!isdigit(first_a) || i > end1))) { + return ((mul < 0) ? -decimalCompare(a,i,end1,b,j,end2) : + decimalCompare(a,i,end1,b,j,end2)); + } + //check the number of digits in the mantissa of the numbers + int numRemainDigits_a = 0; + int numRemainDigits_b = 0; + while (i <= end1) { + //if we encounter a non-digit treat the corresponding number as being + //smaller + if (isdigit(a[i++])) { + numRemainDigits_a++; + } else break; + } + while (j <= end2) { + //if we encounter a non-digit treat the corresponding number as being + //smaller + if (isdigit(b[j++])) { + numRemainDigits_b++; + } else break; + } + int ret = numRemainDigits_a - numRemainDigits_b; + if (ret == 0) { + return ((mul < 0) ? -firstResult : firstResult); + } else { + return ((mul < 0) ? -ret : ret); + } + } + private boolean isdigit(byte b) { + if ('0' <= b && b <= '9') { + return true; + } + return false; + } + private int decimalCompare(byte[] a, int i, int end1, + byte[] b, int j, int end2) { + if (i > end1) { + //if a[] has nothing remaining + return -decimalCompare1(b, ++j, end2); + } + if (j > end2) { + //if b[] has nothing remaining + return decimalCompare1(a, ++i, end1); + } + if (a[i] == DECIMAL && b[j] == DECIMAL) { + while (i <= end1 && j <= end2) { + if (a[i] != b[j]) { + if (isdigit(a[i]) && isdigit(b[j])) { + return a[i] - b[j]; + } + if (isdigit(a[i])) { + return 1; + } + if (isdigit(b[j])) { + return -1; + } + return 0; + } + i++; j++; + } + if (i > end1 && j > end2) { + return 0; + } + + if (i > end1) { + //check whether there is a non-ZERO digit after potentially + //a number of ZEROs (e.g., a=.4444, b=.444400004) + return -decimalCompare1(b, j, end2); + } + if (j > end2) { + //check whether there is a non-ZERO digit after potentially + //a number of ZEROs (e.g., b=.4444, a=.444400004) + return decimalCompare1(a, i, end1); + } + } + else if (a[i] == DECIMAL) { + return decimalCompare1(a, ++i, end1); + } + else if (b[j] == DECIMAL) { + return -decimalCompare1(b, ++j, end2); + } + return 0; + } + + private int decimalCompare1(byte[] a, int i, int end) { + while (i <= end) { + if (a[i] == ZERO) { + i++; + continue; + } + if (isdigit(a[i])) { + return 1; + } else { + return 0; + } + } + return 0; + } + + private int oneNegativeCompare(byte[] a, int start1, int end1, + byte[] b, int start2, int end2) { + //here a[] is negative and b[] is positive + //We have to ascertain whether the number contains any digits. + //If it does, then it is a smaller number for sure. If not, + //then we need to scan b[] to find out whether b[] has a digit + //If b[] does contain a digit, then b[] is certainly + //greater. If not, that is, both a[] and b[] don't contain + //digits then they should be considered equal. + if (!isZero(a, start1, end1)) { + return -1; + } + //reached here - this means that a[] is a ZERO + if (!isZero(b, start2, end2)) { + return -1; + } + //reached here - both numbers are basically ZEROs and hence + //they should compare equal + return 0; + } + + private boolean isZero(byte a[], int start, int end) { + //check for zeros in the significand part as well as the decimal part + //note that we treat the non-digit characters as ZERO + int i = start; + //we check the significand for being a ZERO + while (i <= end) { + if (a[i] != ZERO) { + if (a[i] != DECIMAL && isdigit(a[i])) { + return false; + } + break; + } + i++; + } + + if (i != (end+1) && a[i++] == DECIMAL) { + //we check the decimal part for being a ZERO + while (i <= end) { + if (a[i] != ZERO) { + if (isdigit(a[i])) { + return false; + } + break; + } + i++; + } + } + return true; + } +} diff --git a/src/mapred/org/apache/hadoop/mapred/lib/KeyFieldBasedPartitioner.java b/src/mapred/org/apache/hadoop/mapred/lib/KeyFieldBasedPartitioner.java new file mode 100644 index 0000000..7a08a6a --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/lib/KeyFieldBasedPartitioner.java @@ -0,0 +1,112 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapred.lib; + +import java.io.UnsupportedEncodingException; +import java.util.List; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.mapred.JobConf; +import org.apache.hadoop.mapred.Partitioner; +import org.apache.hadoop.mapred.lib.KeyFieldHelper.KeyDescription; + + /** + * Defines a way to partition keys based on certain key fields (also see + * {@link KeyFieldBasedComparator}. + * The key specification supported is of the form -k pos1[,pos2], where, + * pos is of the form f[.c][opts], where f is the number + * of the key field to use, and c is the number of the first character from + * the beginning of the field. Fields and character posns are numbered + * starting with 1; a character position of zero in pos2 indicates the + * field's last character. If '.c' is omitted from pos1, it defaults to 1 + * (the beginning of the field); if omitted from pos2, it defaults to 0 + * (the end of the field). + * + */ +public class KeyFieldBasedPartitioner implements Partitioner { + + private static final Log LOG = LogFactory.getLog(KeyFieldBasedPartitioner.class.getName()); + private int numOfPartitionFields; + + private KeyFieldHelper keyFieldHelper = new KeyFieldHelper(); + + public void configure(JobConf job) { + String keyFieldSeparator = job.get("map.output.key.field.separator", "\t"); + keyFieldHelper.setKeyFieldSeparator(keyFieldSeparator); + if (job.get("num.key.fields.for.partition") != null) { + LOG.warn("Using deprecated num.key.fields.for.partition. " + + "Use mapred.text.key.partitioner.options instead"); + this.numOfPartitionFields = job.getInt("num.key.fields.for.partition",0); + keyFieldHelper.setKeyFieldSpec(1,numOfPartitionFields); + } else { + String option = job.getKeyFieldPartitionerOption(); + keyFieldHelper.parseOption(option); + } + } + + public int getPartition(K2 key, V2 value, + int numReduceTasks) { + byte[] keyBytes; + + List allKeySpecs = keyFieldHelper.keySpecs(); + if (allKeySpecs.size() == 0) { + return getPartition(key.toString().hashCode(), numReduceTasks); + } + + try { + keyBytes = key.toString().getBytes("UTF-8"); + } catch (UnsupportedEncodingException e) { + throw new RuntimeException("The current system does not " + + "support UTF-8 encoding!", e); + } + // return 0 if the key is empty + if (keyBytes.length == 0) { + return 0; + } + + int []lengthIndicesFirst = keyFieldHelper.getWordLengths(keyBytes, 0, + keyBytes.length); + int currentHash = 0; + for (KeyDescription keySpec : allKeySpecs) { + int startChar = keyFieldHelper.getStartOffset(keyBytes, 0, keyBytes.length, + lengthIndicesFirst, keySpec); + // no key found! continue + if (startChar < 0) { + continue; + } + int endChar = keyFieldHelper.getEndOffset(keyBytes, 0, keyBytes.length, + lengthIndicesFirst, keySpec); + currentHash = hashCode(keyBytes, startChar, endChar, + currentHash); + } + return getPartition(currentHash, numReduceTasks); + } + + protected int hashCode(byte[] b, int start, int end, int currentHash) { + for (int i = start; i <= end; i++) { + currentHash = 31*currentHash + b[i]; + } + return currentHash; + } + + protected int getPartition(int hash, int numReduceTasks) { + return (hash & Integer.MAX_VALUE) % numReduceTasks; + } +} diff --git a/src/mapred/org/apache/hadoop/mapred/lib/KeyFieldHelper.java b/src/mapred/org/apache/hadoop/mapred/lib/KeyFieldHelper.java new file mode 100644 index 0000000..a091b12 --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/lib/KeyFieldHelper.java @@ -0,0 +1,296 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapred.lib; + +import java.io.UnsupportedEncodingException; +import java.util.List; +import java.util.ArrayList; +import java.util.StringTokenizer; + +import org.apache.hadoop.util.UTF8ByteArrayUtils; + +/** + * This is used in {@link KeyFieldBasedComparator} & + * {@link KeyFieldBasedPartitioner}. Defines all the methods + * for parsing key specifications. The key specification is of the form: + * -k pos1[,pos2], where pos is of the form f[.c][opts], where f is the number + * of the field to use, and c is the number of the first character from the + * beginning of the field. Fields and character posns are numbered starting + * with 1; a character position of zero in pos2 indicates the field's last + * character. If '.c' is omitted from pos1, it defaults to 1 (the beginning + * of the field); if omitted from pos2, it defaults to 0 (the end of the + * field). opts are ordering options (supported options are 'nr'). + */ + +class KeyFieldHelper { + + protected static class KeyDescription { + int beginFieldIdx = 1; + int beginChar = 1; + int endFieldIdx = 0; + int endChar = 0; + boolean numeric; + boolean reverse; + @Override + public String toString() { + return "-k" + + beginFieldIdx + "." + beginChar + "," + + endFieldIdx + "." + endChar + + (numeric ? "n" : "") + (reverse ? "r" : ""); + } + } + + private List allKeySpecs = new ArrayList(); + private byte[] keyFieldSeparator; + private boolean keySpecSeen = false; + + public void setKeyFieldSeparator(String keyFieldSeparator) { + try { + this.keyFieldSeparator = + keyFieldSeparator.getBytes("UTF-8"); + } catch (UnsupportedEncodingException e) { + throw new RuntimeException("The current system does not " + + "support UTF-8 encoding!", e); + } + } + + /** Required for backcompatibility with num.key.fields.for.partition in + * {@link KeyFieldBasedPartitioner} */ + public void setKeyFieldSpec(int start, int end) { + if (end >= start) { + KeyDescription k = new KeyDescription(); + k.beginFieldIdx = start; + k.endFieldIdx = end; + keySpecSeen = true; + allKeySpecs.add(k); + } + } + + public List keySpecs() { + return allKeySpecs; + } + + public int[] getWordLengths(byte []b, int start, int end) { + //Given a string like "hello how are you", it returns an array + //like [4 5, 3, 3, 3], where the first element is the number of + //fields + if (!keySpecSeen) { + //if there were no key specs, then the whole key is one word + return new int[] {1}; + } + int[] lengths = new int[10]; + int currLenLengths = lengths.length; + int idx = 1; + int pos; + while ((pos = UTF8ByteArrayUtils.findBytes(b, start, end, + keyFieldSeparator)) != -1) { + if (++idx == currLenLengths) { + int[] temp = lengths; + lengths = new int[(currLenLengths = currLenLengths*2)]; + System.arraycopy(temp, 0, lengths, 0, temp.length); + } + lengths[idx - 1] = pos - start; + start = pos + 1; + } + + if (start != end) { + lengths[idx] = end - start; + } + lengths[0] = idx; //number of words is the first element + return lengths; + } + public int getStartOffset(byte[]b, int start, int end, + int []lengthIndices, KeyDescription k) { + //if -k2.5,2 is the keyspec, the startChar is lengthIndices[1] + 5 + //note that the [0]'th element is the number of fields in the key + if (lengthIndices[0] >= k.beginFieldIdx) { + int position = 0; + for (int i = 1; i < k.beginFieldIdx; i++) { + position += lengthIndices[i] + keyFieldSeparator.length; + } + if (position + k.beginChar <= (end - start)) { + return start + position + k.beginChar - 1; + } + } + return -1; + } + public int getEndOffset(byte[]b, int start, int end, + int []lengthIndices, KeyDescription k) { + //if -k2,2.8 is the keyspec, the endChar is lengthIndices[1] + 8 + //note that the [0]'th element is the number of fields in the key + if (k.endFieldIdx == 0) { + //there is no end field specified for this keyspec. So the remaining + //part of the key is considered in its entirety. + return end - 1; + } + if (lengthIndices[0] >= k.endFieldIdx) { + int position = 0; + int i; + for (i = 1; i < k.endFieldIdx; i++) { + position += lengthIndices[i] + keyFieldSeparator.length; + } + if (k.endChar == 0) { + position += lengthIndices[i]; + } + if (position + k.endChar <= (end - start)) { + return start + position + k.endChar - 1; + } + return end - 1; + } + return end - 1; + } + public void parseOption(String option) { + if (option == null || option.equals("")) { + //we will have only default comparison + return; + } + StringTokenizer args = new StringTokenizer(option); + KeyDescription global = new KeyDescription(); + while (args.hasMoreTokens()) { + String arg = args.nextToken(); + if (arg.equals("-n")) { + global.numeric = true; + } + if (arg.equals("-r")) { + global.reverse = true; + } + if (arg.equals("-nr")) { + global.numeric = true; + global.reverse = true; + } + if (arg.startsWith("-k")) { + KeyDescription k = parseKey(arg, args); + if (k != null) { + allKeySpecs.add(k); + keySpecSeen = true; + } + } + } + for (KeyDescription key : allKeySpecs) { + if (!(key.reverse | key.numeric)) { + key.reverse = global.reverse; + key.numeric = global.numeric; + } + } + if (allKeySpecs.size() == 0) { + allKeySpecs.add(global); + } + } + + private KeyDescription parseKey(String arg, StringTokenizer args) { + //we allow for -k and -k + String keyArgs = null; + if (arg.length() == 2) { + if (args.hasMoreTokens()) { + keyArgs = args.nextToken(); + } + } else { + keyArgs = arg.substring(2); + } + if (keyArgs == null || keyArgs.length() == 0) { + return null; + } + StringTokenizer st = new StringTokenizer(keyArgs,"nr.,",true); + + KeyDescription key = new KeyDescription(); + + String token; + //the key is of the form 1[.3][nr][,1.5][nr] + if (st.hasMoreTokens()) { + token = st.nextToken(); + //the first token must be a number + key.beginFieldIdx = Integer.parseInt(token); + } + if (st.hasMoreTokens()) { + token = st.nextToken(); + if (token.equals(".")) { + token = st.nextToken(); + key.beginChar = Integer.parseInt(token); + if (st.hasMoreTokens()) { + token = st.nextToken(); + } else { + return key; + } + } + do { + if (token.equals("n")) { + key.numeric = true; + } + else if (token.equals("r")) { + key.reverse = true; + } + else break; + if (st.hasMoreTokens()) { + token = st.nextToken(); + } else { + return key; + } + } while (true); + if (token.equals(",")) { + token = st.nextToken(); + //the first token must be a number + key.endFieldIdx = Integer.parseInt(token); + if (st.hasMoreTokens()) { + token = st.nextToken(); + if (token.equals(".")) { + token = st.nextToken(); + key.endChar = Integer.parseInt(token); + if (st.hasMoreTokens()) { + token = st.nextToken(); + } else { + return key; + } + } + do { + if (token.equals("n")) { + key.numeric = true; + } + else if (token.equals("r")) { + key.reverse = true; + } + else { + throw new IllegalArgumentException("Invalid -k argument. " + + "Must be of the form -k pos1,[pos2], where pos is of the form " + + "f[.c]nr"); + } + if (st.hasMoreTokens()) { + token = st.nextToken(); + } else { + break; + } + } while (true); + } + return key; + } + throw new IllegalArgumentException("Invalid -k argument. " + + "Must be of the form -k pos1,[pos2], where pos is of the form " + + "f[.c]nr"); + } + return key; + } + private void printKey(KeyDescription key) { + System.out.println("key.beginFieldIdx: " + key.beginFieldIdx); + System.out.println("key.beginChar: " + key.beginChar); + System.out.println("key.endFieldIdx: " + key.endFieldIdx); + System.out.println("key.endChar: " + key.endChar); + System.out.println("key.numeric: " + key.numeric); + System.out.println("key.reverse: " + key.reverse); + System.out.println("parseKey over"); + } +} diff --git a/src/mapred/org/apache/hadoop/mapred/lib/LongSumReducer.java b/src/mapred/org/apache/hadoop/mapred/lib/LongSumReducer.java new file mode 100644 index 0000000..cc105dd --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/lib/LongSumReducer.java @@ -0,0 +1,54 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapred.lib; + +import java.io.IOException; +import java.util.Iterator; + +import org.apache.hadoop.mapred.Reducer; +import org.apache.hadoop.mapred.OutputCollector; +import org.apache.hadoop.mapred.Reporter; +import org.apache.hadoop.mapred.MapReduceBase; + +import org.apache.hadoop.io.LongWritable; + +/** A {@link Reducer} that sums long values. + * @deprecated Use {@link org.apache.hadoop.mapreduce.lib.reduce.LongSumReducer} + * instead. + */ +@Deprecated +public class LongSumReducer extends MapReduceBase + implements Reducer { + + public void reduce(K key, Iterator values, + OutputCollector output, + Reporter reporter) + throws IOException { + + // sum all values for this key + long sum = 0; + while (values.hasNext()) { + sum += values.next().get(); + } + + // output sum + output.collect(key, new LongWritable(sum)); + } + +} diff --git a/src/mapred/org/apache/hadoop/mapred/lib/MultipleInputs.java b/src/mapred/org/apache/hadoop/mapred/lib/MultipleInputs.java new file mode 100644 index 0000000..d34b332 --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/lib/MultipleInputs.java @@ -0,0 +1,131 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.mapred.lib; + +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; + +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.mapred.InputFormat; +import org.apache.hadoop.mapred.JobConf; +import org.apache.hadoop.mapred.Mapper; +import org.apache.hadoop.util.ReflectionUtils; + +/** + * This class supports MapReduce jobs that have multiple input paths with + * a different {@link InputFormat} and {@link Mapper} for each path + */ +public class MultipleInputs { + /** + * Add a {@link Path} with a custom {@link InputFormat} to the list of + * inputs for the map-reduce job. + * + * @param conf The configuration of the job + * @param path {@link Path} to be added to the list of inputs for the job + * @param inputFormatClass {@link InputFormat} class to use for this path + */ + public static void addInputPath(JobConf conf, Path path, + Class inputFormatClass) { + + String inputFormatMapping = path.toString() + ";" + + inputFormatClass.getName(); + String inputFormats = conf.get("mapred.input.dir.formats"); + conf.set("mapred.input.dir.formats", + inputFormats == null ? inputFormatMapping : inputFormats + "," + + inputFormatMapping); + + conf.setInputFormat(DelegatingInputFormat.class); + } + + /** + * Add a {@link Path} with a custom {@link InputFormat} and + * {@link Mapper} to the list of inputs for the map-reduce job. + * + * @param conf The configuration of the job + * @param path {@link Path} to be added to the list of inputs for the job + * @param inputFormatClass {@link InputFormat} class to use for this path + * @param mapperClass {@link Mapper} class to use for this path + */ + public static void addInputPath(JobConf conf, Path path, + Class inputFormatClass, + Class mapperClass) { + + addInputPath(conf, path, inputFormatClass); + + String mapperMapping = path.toString() + ";" + mapperClass.getName(); + String mappers = conf.get("mapred.input.dir.mappers"); + conf.set("mapred.input.dir.mappers", mappers == null ? mapperMapping + : mappers + "," + mapperMapping); + + conf.setMapperClass(DelegatingMapper.class); + } + + /** + * Retrieves a map of {@link Path}s to the {@link InputFormat} class + * that should be used for them. + * + * @param conf The confuration of the job + * @see #addInputPath(JobConf, Path, Class) + * @return A map of paths to inputformats for the job + */ + static Map getInputFormatMap(JobConf conf) { + Map m = new HashMap(); + String[] pathMappings = conf.get("mapred.input.dir.formats").split(","); + for (String pathMapping : pathMappings) { + String[] split = pathMapping.split(";"); + InputFormat inputFormat; + try { + inputFormat = (InputFormat) ReflectionUtils.newInstance(conf + .getClassByName(split[1]), conf); + } catch (ClassNotFoundException e) { + throw new RuntimeException(e); + } + m.put(new Path(split[0]), inputFormat); + } + return m; + } + + /** + * Retrieves a map of {@link Path}s to the {@link Mapper} class that + * should be used for them. + * + * @param conf The confuration of the job + * @see #addInputPath(JobConf, Path, Class, Class) + * @return A map of paths to mappers for the job + */ + @SuppressWarnings("unchecked") + static Map> getMapperTypeMap(JobConf conf) { + if (conf.get("mapred.input.dir.mappers") == null) { + return Collections.emptyMap(); + } + Map> m = new HashMap>(); + String[] pathMappings = conf.get("mapred.input.dir.mappers").split(","); + for (String pathMapping : pathMappings) { + String[] split = pathMapping.split(";"); + Class mapClass; + try { + mapClass = (Class) conf.getClassByName(split[1]); + } catch (ClassNotFoundException e) { + throw new RuntimeException(e); + } + m.put(new Path(split[0]), mapClass); + } + return m; + } +} diff --git a/src/mapred/org/apache/hadoop/mapred/lib/MultipleOutputFormat.java b/src/mapred/org/apache/hadoop/mapred/lib/MultipleOutputFormat.java new file mode 100644 index 0000000..13025de --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/lib/MultipleOutputFormat.java @@ -0,0 +1,227 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapred.lib; + +import java.io.IOException; +import java.util.Iterator; +import java.util.TreeMap; + +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.mapred.JobConf; +import org.apache.hadoop.mapred.FileOutputFormat; +import org.apache.hadoop.mapred.RecordWriter; +import org.apache.hadoop.mapred.Reporter; +import org.apache.hadoop.util.Progressable; + +/** + * This abstract class extends the FileOutputFormat, allowing to write the + * output data to different output files. There are three basic use cases for + * this class. + * + * Case one: This class is used for a map reduce job with at least one reducer. + * The reducer wants to write data to different files depending on the actual + * keys. It is assumed that a key (or value) encodes the actual key (value) + * and the desired location for the actual key (value). + * + * Case two: This class is used for a map only job. The job wants to use an + * output file name that is either a part of the input file name of the input + * data, or some derivation of it. + * + * Case three: This class is used for a map only job. The job wants to use an + * output file name that depends on both the keys and the input file name, + * + */ +public abstract class MultipleOutputFormat +extends FileOutputFormat { + + /** + * Create a composite record writer that can write key/value data to different + * output files + * + * @param fs + * the file system to use + * @param job + * the job conf for the job + * @param name + * the leaf file name for the output file (such as part-00000") + * @param arg3 + * a progressable for reporting progress. + * @return a composite record writer + * @throws IOException + */ + public RecordWriter getRecordWriter(FileSystem fs, JobConf job, + String name, Progressable arg3) throws IOException { + + final FileSystem myFS = fs; + final String myName = generateLeafFileName(name); + final JobConf myJob = job; + final Progressable myProgressable = arg3; + + return new RecordWriter() { + + // a cache storing the record writers for different output files. + TreeMap> recordWriters = new TreeMap>(); + + public void write(K key, V value) throws IOException { + + // get the file name based on the key + String keyBasedPath = generateFileNameForKeyValue(key, value, myName); + + // get the file name based on the input file name + String finalPath = getInputFileBasedOutputFileName(myJob, keyBasedPath); + + // get the actual key + K actualKey = generateActualKey(key, value); + V actualValue = generateActualValue(key, value); + + RecordWriter rw = this.recordWriters.get(finalPath); + if (rw == null) { + // if we don't have the record writer yet for the final path, create + // one + // and add it to the cache + rw = getBaseRecordWriter(myFS, myJob, finalPath, myProgressable); + this.recordWriters.put(finalPath, rw); + } + rw.write(actualKey, actualValue); + }; + + public void close(Reporter reporter) throws IOException { + Iterator keys = this.recordWriters.keySet().iterator(); + while (keys.hasNext()) { + RecordWriter rw = this.recordWriters.get(keys.next()); + rw.close(reporter); + } + this.recordWriters.clear(); + }; + }; + } + + /** + * Generate the leaf name for the output file name. The default behavior does + * not change the leaf file name (such as part-00000) + * + * @param name + * the leaf file name for the output file + * @return the given leaf file name + */ + protected String generateLeafFileName(String name) { + return name; + } + + /** + * Generate the file output file name based on the given key and the leaf file + * name. The default behavior is that the file name does not depend on the + * key. + * + * @param key + * the key of the output data + * @param name + * the leaf file name + * @return generated file name + */ + protected String generateFileNameForKeyValue(K key, V value, String name) { + return name; + } + + /** + * Generate the actual key from the given key/value. The default behavior is that + * the actual key is equal to the given key + * + * @param key + * the key of the output data + * @param value + * the value of the output data + * @return the actual key derived from the given key/value + */ + protected K generateActualKey(K key, V value) { + return key; + } + + /** + * Generate the actual value from the given key and value. The default behavior is that + * the actual value is equal to the given value + * + * @param key + * the key of the output data + * @param value + * the value of the output data + * @return the actual value derived from the given key/value + */ + protected V generateActualValue(K key, V value) { + return value; + } + + + /** + * Generate the outfile name based on a given anme and the input file name. If + * the map input file does not exists (i.e. this is not for a map only job), + * the given name is returned unchanged. If the config value for + * "num.of.trailing.legs.to.use" is not set, or set 0 or negative, the given + * name is returned unchanged. Otherwise, return a file name consisting of the + * N trailing legs of the input file name where N is the config value for + * "num.of.trailing.legs.to.use". + * + * @param job + * the job config + * @param name + * the output file name + * @return the outfile name based on a given anme and the input file name. + */ + protected String getInputFileBasedOutputFileName(JobConf job, String name) { + String infilepath = job.get("map.input.file"); + if (infilepath == null) { + // if the map input file does not exists, then return the given name + return name; + } + int numOfTrailingLegsToUse = job.getInt("mapred.outputformat.numOfTrailingLegs", 0); + if (numOfTrailingLegsToUse <= 0) { + return name; + } + Path infile = new Path(infilepath); + Path parent = infile.getParent(); + String midName = infile.getName(); + Path outPath = new Path(midName); + for (int i = 1; i < numOfTrailingLegsToUse; i++) { + if (parent == null) break; + midName = parent.getName(); + if (midName.length() == 0) break; + parent = parent.getParent(); + outPath = new Path(midName, outPath); + } + return outPath.toString(); + } + + /** + * + * @param fs + * the file system to use + * @param job + * a job conf object + * @param name + * the name of the file over which a record writer object will be + * constructed + * @param arg3 + * a progressable object + * @return A RecordWriter object over the given file + * @throws IOException + */ + abstract protected RecordWriter getBaseRecordWriter(FileSystem fs, + JobConf job, String name, Progressable arg3) throws IOException; +} diff --git a/src/mapred/org/apache/hadoop/mapred/lib/MultipleOutputs.java b/src/mapred/org/apache/hadoop/mapred/lib/MultipleOutputs.java new file mode 100644 index 0000000..aaaa458 --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/lib/MultipleOutputs.java @@ -0,0 +1,563 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.mapred.lib; + +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.io.Writable; +import org.apache.hadoop.io.WritableComparable; +import org.apache.hadoop.mapred.*; +import org.apache.hadoop.util.Progressable; + +import java.io.IOException; +import java.util.*; + +/** + * The MultipleOutputs class simplifies writting to additional outputs other + * than the job default output via the OutputCollector passed to + * the map() and reduce() methods of the + * Mapper and Reducer implementations. + *

+ * Each additional output, or named output, may be configured with its own + * OutputFormat, with its own key class and with its own value + * class. + *

+ * A named output can be a single file or a multi file. The later is refered as + * a multi named output. + *

+ * A multi named output is an unbound set of files all sharing the same + * OutputFormat, key class and value class configuration. + *

+ * When named outputs are used within a Mapper implementation, + * key/values written to a name output are not part of the reduce phase, only + * key/values written to the job OutputCollector are part of the + * reduce phase. + *

+ * MultipleOutputs supports counters, by default the are disabled. The counters + * group is the {@link MultipleOutputs} class name. + *

+ * The names of the counters are the same as the named outputs. For multi + * named outputs the name of the counter is the concatenation of the named + * output, and underscore '_' and the multiname. + *

+ * Job configuration usage pattern is: + *

+ *
+ * JobConf conf = new JobConf();
+ *
+ * conf.setInputPath(inDir);
+ * FileOutputFormat.setOutputPath(conf, outDir);
+ *
+ * conf.setMapperClass(MOMap.class);
+ * conf.setReducerClass(MOReduce.class);
+ * ...
+ *
+ * // Defines additional single text based output 'text' for the job
+ * MultipleOutputs.addNamedOutput(conf, "text", TextOutputFormat.class,
+ * LongWritable.class, Text.class);
+ *
+ * // Defines additional multi sequencefile based output 'sequence' for the
+ * // job
+ * MultipleOutputs.addMultiNamedOutput(conf, "seq",
+ *   SequenceFileOutputFormat.class,
+ *   LongWritable.class, Text.class);
+ * ...
+ *
+ * JobClient jc = new JobClient();
+ * RunningJob job = jc.submitJob(conf);
+ *
+ * ...
+ * 
+ *

+ * Job configuration usage pattern is: + *

+ *
+ * public class MOReduce implements
+ *   Reducer<WritableComparable, Writable> {
+ * private MultipleOutputs mos;
+ *
+ * public void configure(JobConf conf) {
+ * ...
+ * mos = new MultipleOutputs(conf);
+ * }
+ *
+ * public void reduce(WritableComparable key, Iterator<Writable> values,
+ * OutputCollector output, Reporter reporter)
+ * throws IOException {
+ * ...
+ * mos.getCollector("text", reporter).collect(key, new Text("Hello"));
+ * mos.getCollector("seq", "A", reporter).collect(key, new Text("Bye"));
+ * mos.getCollector("seq", "B", reporter).collect(key, new Text("Chau"));
+ * ...
+ * }
+ *
+ * public void close() throws IOException {
+ * mos.close();
+ * ...
+ * }
+ *
+ * }
+ * 
+ */ +public class MultipleOutputs { + + private static final String NAMED_OUTPUTS = "mo.namedOutputs"; + + private static final String MO_PREFIX = "mo.namedOutput."; + + private static final String FORMAT = ".format"; + private static final String KEY = ".key"; + private static final String VALUE = ".value"; + private static final String MULTI = ".multi"; + + private static final String COUNTERS_ENABLED = "mo.counters"; + + /** + * Counters group used by the counters of MultipleOutputs. + */ + private static final String COUNTERS_GROUP = MultipleOutputs.class.getName(); + + /** + * Checks if a named output is alreadyDefined or not. + * + * @param conf job conf + * @param namedOutput named output names + * @param alreadyDefined whether the existence/non-existence of + * the named output is to be checked + * @throws IllegalArgumentException if the output name is alreadyDefined or + * not depending on the value of the + * 'alreadyDefined' parameter + */ + private static void checkNamedOutput(JobConf conf, String namedOutput, + boolean alreadyDefined) { + List definedChannels = getNamedOutputsList(conf); + if (alreadyDefined && definedChannels.contains(namedOutput)) { + throw new IllegalArgumentException("Named output '" + namedOutput + + "' already alreadyDefined"); + } else if (!alreadyDefined && !definedChannels.contains(namedOutput)) { + throw new IllegalArgumentException("Named output '" + namedOutput + + "' not defined"); + } + } + + /** + * Checks if a named output name is valid token. + * + * @param namedOutput named output Name + * @throws IllegalArgumentException if the output name is not valid. + */ + private static void checkTokenName(String namedOutput) { + if (namedOutput == null || namedOutput.length() == 0) { + throw new IllegalArgumentException( + "Name cannot be NULL or emtpy"); + } + for (char ch : namedOutput.toCharArray()) { + if ((ch >= 'A') && (ch <= 'Z')) { + continue; + } + if ((ch >= 'a') && (ch <= 'z')) { + continue; + } + if ((ch >= '0') && (ch <= '9')) { + continue; + } + throw new IllegalArgumentException( + "Name cannot be have a '" + ch + "' char"); + } + } + + /** + * Checks if a named output name is valid. + * + * @param namedOutput named output Name + * @throws IllegalArgumentException if the output name is not valid. + */ + private static void checkNamedOutputName(String namedOutput) { + checkTokenName(namedOutput); + // name cannot be the name used for the default output + if (namedOutput.equals("part")) { + throw new IllegalArgumentException( + "Named output name cannot be 'part'"); + } + } + + /** + * Returns list of channel names. + * + * @param conf job conf + * @return List of channel Names + */ + public static List getNamedOutputsList(JobConf conf) { + List names = new ArrayList(); + StringTokenizer st = new StringTokenizer(conf.get(NAMED_OUTPUTS, ""), " "); + while (st.hasMoreTokens()) { + names.add(st.nextToken()); + } + return names; + } + + + /** + * Returns if a named output is multiple. + * + * @param conf job conf + * @param namedOutput named output + * @return true if the name output is multi, false + * if it is single. If the name output is not defined it returns + * false + */ + public static boolean isMultiNamedOutput(JobConf conf, String namedOutput) { + checkNamedOutput(conf, namedOutput, false); + return conf.getBoolean(MO_PREFIX + namedOutput + MULTI, false); + } + + /** + * Returns the named output OutputFormat. + * + * @param conf job conf + * @param namedOutput named output + * @return namedOutput OutputFormat + */ + public static Class getNamedOutputFormatClass( + JobConf conf, String namedOutput) { + checkNamedOutput(conf, namedOutput, false); + return conf.getClass(MO_PREFIX + namedOutput + FORMAT, null, + OutputFormat.class); + } + + /** + * Returns the key class for a named output. + * + * @param conf job conf + * @param namedOutput named output + * @return class for the named output key + */ + public static Class getNamedOutputKeyClass(JobConf conf, + String namedOutput) { + checkNamedOutput(conf, namedOutput, false); + return conf.getClass(MO_PREFIX + namedOutput + KEY, null, + WritableComparable.class); + } + + /** + * Returns the value class for a named output. + * + * @param conf job conf + * @param namedOutput named output + * @return class of named output value + */ + public static Class getNamedOutputValueClass(JobConf conf, + String namedOutput) { + checkNamedOutput(conf, namedOutput, false); + return conf.getClass(MO_PREFIX + namedOutput + VALUE, null, + Writable.class); + } + + /** + * Adds a named output for the job. + *

+ * + * @param conf job conf to add the named output + * @param namedOutput named output name, it has to be a word, letters + * and numbers only, cannot be the word 'part' as + * that is reserved for the + * default output. + * @param outputFormatClass OutputFormat class. + * @param keyClass key class + * @param valueClass value class + */ + public static void addNamedOutput(JobConf conf, String namedOutput, + Class outputFormatClass, + Class keyClass, Class valueClass) { + addNamedOutput(conf, namedOutput, false, outputFormatClass, keyClass, + valueClass); + } + + /** + * Adds a multi named output for the job. + *

+ * + * @param conf job conf to add the named output + * @param namedOutput named output name, it has to be a word, letters + * and numbers only, cannot be the word 'part' as + * that is reserved for the + * default output. + * @param outputFormatClass OutputFormat class. + * @param keyClass key class + * @param valueClass value class + */ + public static void addMultiNamedOutput(JobConf conf, String namedOutput, + Class outputFormatClass, + Class keyClass, Class valueClass) { + addNamedOutput(conf, namedOutput, true, outputFormatClass, keyClass, + valueClass); + } + + /** + * Adds a named output for the job. + *

+ * + * @param conf job conf to add the named output + * @param namedOutput named output name, it has to be a word, letters + * and numbers only, cannot be the word 'part' as + * that is reserved for the + * default output. + * @param multi indicates if the named output is multi + * @param outputFormatClass OutputFormat class. + * @param keyClass key class + * @param valueClass value class + */ + private static void addNamedOutput(JobConf conf, String namedOutput, + boolean multi, + Class outputFormatClass, + Class keyClass, Class valueClass) { + checkNamedOutputName(namedOutput); + checkNamedOutput(conf, namedOutput, true); + conf.set(NAMED_OUTPUTS, conf.get(NAMED_OUTPUTS, "") + " " + namedOutput); + conf.setClass(MO_PREFIX + namedOutput + FORMAT, outputFormatClass, + OutputFormat.class); + conf.setClass(MO_PREFIX + namedOutput + KEY, keyClass, Object.class); + conf.setClass(MO_PREFIX + namedOutput + VALUE, valueClass, Object.class); + conf.setBoolean(MO_PREFIX + namedOutput + MULTI, multi); + } + + /** + * Enables or disables counters for the named outputs. + *

+ * By default these counters are disabled. + *

+ * MultipleOutputs supports counters, by default the are disabled. + * The counters group is the {@link MultipleOutputs} class name. + *

+ * The names of the counters are the same as the named outputs. For multi + * named outputs the name of the counter is the concatenation of the named + * output, and underscore '_' and the multiname. + * + * @param conf job conf to enableadd the named output. + * @param enabled indicates if the counters will be enabled or not. + */ + public static void setCountersEnabled(JobConf conf, boolean enabled) { + conf.setBoolean(COUNTERS_ENABLED, enabled); + } + + /** + * Returns if the counters for the named outputs are enabled or not. + *

+ * By default these counters are disabled. + *

+ * MultipleOutputs supports counters, by default the are disabled. + * The counters group is the {@link MultipleOutputs} class name. + *

+ * The names of the counters are the same as the named outputs. For multi + * named outputs the name of the counter is the concatenation of the named + * output, and underscore '_' and the multiname. + * + * + * @param conf job conf to enableadd the named output. + * @return TRUE if the counters are enabled, FALSE if they are disabled. + */ + public static boolean getCountersEnabled(JobConf conf) { + return conf.getBoolean(COUNTERS_ENABLED, false); + } + + // instance code, to be used from Mapper/Reducer code + + private JobConf conf; + private OutputFormat outputFormat; + private Set namedOutputs; + private Map recordWriters; + private boolean countersEnabled; + + /** + * Creates and initializes multiple named outputs support, it should be + * instantiated in the Mapper/Reducer configure method. + * + * @param job the job configuration object + */ + public MultipleOutputs(JobConf job) { + this.conf = job; + outputFormat = new InternalFileOutputFormat(); + namedOutputs = Collections.unmodifiableSet( + new HashSet(MultipleOutputs.getNamedOutputsList(job))); + recordWriters = new HashMap(); + countersEnabled = getCountersEnabled(job); + } + + /** + * Returns iterator with the defined name outputs. + * + * @return iterator with the defined named outputs + */ + public Iterator getNamedOutputs() { + return namedOutputs.iterator(); + } + + + // by being synchronized MultipleOutputTask can be use with a + // MultithreaderMapRunner. + private synchronized RecordWriter getRecordWriter(String namedOutput, + String baseFileName, + final Reporter reporter) + throws IOException { + RecordWriter writer = recordWriters.get(baseFileName); + if (writer == null) { + if (countersEnabled && reporter == null) { + throw new IllegalArgumentException( + "Counters are enabled, Reporter cannot be NULL"); + } + JobConf jobConf = new JobConf(conf); + jobConf.set(InternalFileOutputFormat.CONFIG_NAMED_OUTPUT, namedOutput); + FileSystem fs = FileSystem.get(conf); + writer = + outputFormat.getRecordWriter(fs, jobConf, baseFileName, reporter); + + if (countersEnabled) { + if (reporter == null) { + throw new IllegalArgumentException( + "Counters are enabled, Reporter cannot be NULL"); + } + writer = new RecordWriterWithCounter(writer, baseFileName, reporter); + } + + recordWriters.put(baseFileName, writer); + } + return writer; + } + + private static class RecordWriterWithCounter implements RecordWriter { + private RecordWriter writer; + private String counterName; + private Reporter reporter; + + public RecordWriterWithCounter(RecordWriter writer, String counterName, + Reporter reporter) { + this.writer = writer; + this.counterName = counterName; + this.reporter = reporter; + } + + @SuppressWarnings({"unchecked"}) + public void write(Object key, Object value) throws IOException { + reporter.incrCounter(COUNTERS_GROUP, counterName, 1); + writer.write(key, value); + } + + public void close(Reporter reporter) throws IOException { + writer.close(reporter); + } + } + + /** + * Gets the output collector for a named output. + *

+ * + * @param namedOutput the named output name + * @param reporter the reporter + * @return the output collector for the given named output + * @throws IOException thrown if output collector could not be created + */ + @SuppressWarnings({"unchecked"}) + public OutputCollector getCollector(String namedOutput, Reporter reporter) + throws IOException { + return getCollector(namedOutput, null, reporter); + } + + /** + * Gets the output collector for a multi named output. + *

+ * + * @param namedOutput the named output name + * @param multiName the multi name part + * @param reporter the reporter + * @return the output collector for the given named output + * @throws IOException thrown if output collector could not be created + */ + @SuppressWarnings({"unchecked"}) + public OutputCollector getCollector(String namedOutput, String multiName, + Reporter reporter) + throws IOException { + + checkNamedOutputName(namedOutput); + if (!namedOutputs.contains(namedOutput)) { + throw new IllegalArgumentException("Undefined named output '" + + namedOutput + "'"); + } + boolean multi = isMultiNamedOutput(conf, namedOutput); + + if (!multi && multiName != null) { + throw new IllegalArgumentException("Name output '" + namedOutput + + "' has not been defined as multi"); + } + if (multi) { + checkTokenName(multiName); + } + + String baseFileName = (multi) ? namedOutput + "_" + multiName : namedOutput; + + final RecordWriter writer = + getRecordWriter(namedOutput, baseFileName, reporter); + + return new OutputCollector() { + + @SuppressWarnings({"unchecked"}) + public void collect(Object key, Object value) throws IOException { + writer.write(key, value); + } + + }; + } + + /** + * Closes all the opened named outputs. + *

+ * If overriden subclasses must invoke super.close() at the + * end of their close() + * + * @throws java.io.IOException thrown if any of the MultipleOutput files + * could not be closed properly. + */ + public void close() throws IOException { + for (RecordWriter writer : recordWriters.values()) { + writer.close(null); + } + } + + private static class InternalFileOutputFormat extends + FileOutputFormat { + + public static final String CONFIG_NAMED_OUTPUT = "mo.config.namedOutput"; + + @SuppressWarnings({"unchecked"}) + public RecordWriter getRecordWriter( + FileSystem fs, JobConf job, String baseFileName, Progressable progress) + throws IOException { + + String nameOutput = job.get(CONFIG_NAMED_OUTPUT, null); + String fileName = getUniqueName(job, baseFileName); + + // The following trick leverages the instantiation of a record writer via + // the job conf thus supporting arbitrary output formats. + JobConf outputConf = new JobConf(job); + outputConf.setOutputFormat(getNamedOutputFormatClass(job, nameOutput)); + outputConf.setOutputKeyClass(getNamedOutputKeyClass(job, nameOutput)); + outputConf.setOutputValueClass(getNamedOutputValueClass(job, nameOutput)); + OutputFormat outputFormat = outputConf.getOutputFormat(); + return outputFormat.getRecordWriter(fs, outputConf, fileName, progress); + } + } + +} diff --git a/src/mapred/org/apache/hadoop/mapred/lib/MultipleSequenceFileOutputFormat.java b/src/mapred/org/apache/hadoop/mapred/lib/MultipleSequenceFileOutputFormat.java new file mode 100644 index 0000000..4e79775 --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/lib/MultipleSequenceFileOutputFormat.java @@ -0,0 +1,49 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapred.lib; + +import java.io.IOException; + +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.mapred.JobConf; +import org.apache.hadoop.mapred.RecordWriter; +import org.apache.hadoop.mapred.SequenceFileOutputFormat; +import org.apache.hadoop.util.Progressable; + +/** + * This class extends the MultipleOutputFormat, allowing to write the output data + * to different output files in sequence file output format. + */ +public class MultipleSequenceFileOutputFormat +extends MultipleOutputFormat { + + private SequenceFileOutputFormat theSequenceFileOutputFormat = null; + + @Override + protected RecordWriter getBaseRecordWriter(FileSystem fs, + JobConf job, + String name, + Progressable arg3) + throws IOException { + if (theSequenceFileOutputFormat == null) { + theSequenceFileOutputFormat = new SequenceFileOutputFormat(); + } + return theSequenceFileOutputFormat.getRecordWriter(fs, job, name, arg3); + } +} diff --git a/src/mapred/org/apache/hadoop/mapred/lib/MultipleTextOutputFormat.java b/src/mapred/org/apache/hadoop/mapred/lib/MultipleTextOutputFormat.java new file mode 100644 index 0000000..7e8ca68 --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/lib/MultipleTextOutputFormat.java @@ -0,0 +1,46 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapred.lib; + +import java.io.IOException; + +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.mapred.JobConf; +import org.apache.hadoop.mapred.RecordWriter; +import org.apache.hadoop.mapred.TextOutputFormat; +import org.apache.hadoop.util.Progressable; + +/** + * This class extends the MultipleOutputFormat, allowing to write the output + * data to different output files in Text output format. + */ +public class MultipleTextOutputFormat + extends MultipleOutputFormat { + + private TextOutputFormat theTextOutputFormat = null; + + @Override + protected RecordWriter getBaseRecordWriter(FileSystem fs, JobConf job, + String name, Progressable arg3) throws IOException { + if (theTextOutputFormat == null) { + theTextOutputFormat = new TextOutputFormat(); + } + return theTextOutputFormat.getRecordWriter(fs, job, name, arg3); + } +} diff --git a/src/mapred/org/apache/hadoop/mapred/lib/MultithreadedMapRunner.java b/src/mapred/org/apache/hadoop/mapred/lib/MultithreadedMapRunner.java new file mode 100644 index 0000000..9e86c76 --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/lib/MultithreadedMapRunner.java @@ -0,0 +1,256 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapred.lib; + +import org.apache.hadoop.util.ReflectionUtils; +import org.apache.hadoop.mapred.MapRunnable; +import org.apache.hadoop.mapred.JobConf; +import org.apache.hadoop.mapred.Mapper; +import org.apache.hadoop.mapred.RecordReader; +import org.apache.hadoop.mapred.OutputCollector; +import org.apache.hadoop.mapred.Reporter; +import org.apache.hadoop.mapred.SkipBadRecords; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; + +import java.io.IOException; +import java.util.concurrent.*; + +/** + * Multithreaded implementation for @link org.apache.hadoop.mapred.MapRunnable. + *

+ * It can be used instead of the default implementation, + * @link org.apache.hadoop.mapred.MapRunner, when the Map operation is not CPU + * bound in order to improve throughput. + *

+ * Map implementations using this MapRunnable must be thread-safe. + *

+ * The Map-Reduce job has to be configured to use this MapRunnable class (using + * the JobConf.setMapRunnerClass method) and + * the number of thread the thread-pool can use with the + * mapred.map.multithreadedrunner.threads property, its default + * value is 10 threads. + *

+ */ +public class MultithreadedMapRunner + implements MapRunnable { + + private static final Log LOG = + LogFactory.getLog(MultithreadedMapRunner.class.getName()); + + private JobConf job; + private Mapper mapper; + private ExecutorService executorService; + private volatile IOException ioException; + private volatile RuntimeException runtimeException; + private boolean incrProcCount; + + @SuppressWarnings("unchecked") + public void configure(JobConf jobConf) { + int numberOfThreads = + jobConf.getInt("mapred.map.multithreadedrunner.threads", 10); + if (LOG.isDebugEnabled()) { + LOG.debug("Configuring jobConf " + jobConf.getJobName() + + " to use " + numberOfThreads + " threads"); + } + + this.job = jobConf; + //increment processed counter only if skipping feature is enabled + this.incrProcCount = SkipBadRecords.getMapperMaxSkipRecords(job)>0 && + SkipBadRecords.getAutoIncrMapperProcCount(job); + this.mapper = ReflectionUtils.newInstance(jobConf.getMapperClass(), + jobConf); + + // Creating a threadpool of the configured size to execute the Mapper + // map method in parallel. + executorService = new ThreadPoolExecutor(numberOfThreads, numberOfThreads, + 0L, TimeUnit.MILLISECONDS, + new BlockingArrayQueue + (numberOfThreads)); + } + + /** + * A blocking array queue that replaces offer and add, which throws on a full + * queue, to a put, which waits on a full queue. + */ + private static class BlockingArrayQueue extends ArrayBlockingQueue { + public BlockingArrayQueue(int capacity) { + super(capacity); + } + public boolean offer(Runnable r) { + return add(r); + } + public boolean add(Runnable r) { + try { + put(r); + } catch (InterruptedException ie) { + Thread.currentThread().interrupt(); + } + return true; + } + } + + private void checkForExceptionsFromProcessingThreads() + throws IOException, RuntimeException { + // Checking if a Mapper.map within a Runnable has generated an + // IOException. If so we rethrow it to force an abort of the Map + // operation thus keeping the semantics of the default + // implementation. + if (ioException != null) { + throw ioException; + } + + // Checking if a Mapper.map within a Runnable has generated a + // RuntimeException. If so we rethrow it to force an abort of the Map + // operation thus keeping the semantics of the default + // implementation. + if (runtimeException != null) { + throw runtimeException; + } + } + + public void run(RecordReader input, OutputCollector output, + Reporter reporter) + throws IOException { + try { + // allocate key & value instances these objects will not be reused + // because execution of Mapper.map is not serialized. + K1 key = input.createKey(); + V1 value = input.createValue(); + + while (input.next(key, value)) { + + executorService.execute(new MapperInvokeRunable(key, value, output, + reporter)); + + checkForExceptionsFromProcessingThreads(); + + // Allocate new key & value instances as mapper is running in parallel + key = input.createKey(); + value = input.createValue(); + } + + if (LOG.isDebugEnabled()) { + LOG.debug("Finished dispatching all Mappper.map calls, job " + + job.getJobName()); + } + + // Graceful shutdown of the Threadpool, it will let all scheduled + // Runnables to end. + executorService.shutdown(); + + try { + + // Now waiting for all Runnables to end. + while (!executorService.awaitTermination(100, TimeUnit.MILLISECONDS)) { + if (LOG.isDebugEnabled()) { + LOG.debug("Awaiting all running Mappper.map calls to finish, job " + + job.getJobName()); + } + + // NOTE: while Mapper.map dispatching has concluded there are still + // map calls in progress and exceptions would be thrown. + checkForExceptionsFromProcessingThreads(); + + } + + // NOTE: it could be that a map call has had an exception after the + // call for awaitTermination() returing true. And edge case but it + // could happen. + checkForExceptionsFromProcessingThreads(); + + } catch (IOException ioEx) { + // Forcing a shutdown of all thread of the threadpool and rethrowing + // the IOException + executorService.shutdownNow(); + throw ioEx; + } catch (InterruptedException iEx) { + throw new RuntimeException(iEx); + } + + } finally { + mapper.close(); + } + } + + + /** + * Runnable to execute a single Mapper.map call from a forked thread. + */ + private class MapperInvokeRunable implements Runnable { + private K1 key; + private V1 value; + private OutputCollector output; + private Reporter reporter; + + /** + * Collecting all required parameters to execute a Mapper.map call. + *

+ * + * @param key + * @param value + * @param output + * @param reporter + */ + public MapperInvokeRunable(K1 key, V1 value, + OutputCollector output, + Reporter reporter) { + this.key = key; + this.value = value; + this.output = output; + this.reporter = reporter; + } + + /** + * Executes a Mapper.map call with the given Mapper and parameters. + *

+ * This method is called from the thread-pool thread. + * + */ + public void run() { + try { + // map pair to output + MultithreadedMapRunner.this.mapper.map(key, value, output, reporter); + if(incrProcCount) { + reporter.incrCounter(SkipBadRecords.COUNTER_GROUP, + SkipBadRecords.COUNTER_MAP_PROCESSED_RECORDS, 1); + } + } catch (IOException ex) { + // If there is an IOException during the call it is set in an instance + // variable of the MultithreadedMapRunner from where it will be + // rethrown. + synchronized (MultithreadedMapRunner.this) { + if (MultithreadedMapRunner.this.ioException == null) { + MultithreadedMapRunner.this.ioException = ex; + } + } + } catch (RuntimeException ex) { + // If there is a RuntimeException during the call it is set in an + // instance variable of the MultithreadedMapRunner from where it will be + // rethrown. + synchronized (MultithreadedMapRunner.this) { + if (MultithreadedMapRunner.this.runtimeException == null) { + MultithreadedMapRunner.this.runtimeException = ex; + } + } + } + } + } + +} diff --git a/src/mapred/org/apache/hadoop/mapred/lib/NLineInputFormat.java b/src/mapred/org/apache/hadoop/mapred/lib/NLineInputFormat.java new file mode 100644 index 0000000..eaf9579 --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/lib/NLineInputFormat.java @@ -0,0 +1,122 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapred.lib; + +import java.io.IOException; +import java.util.ArrayList; + +import org.apache.hadoop.fs.FSDataInputStream; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.io.LongWritable; +import org.apache.hadoop.io.Text; +import org.apache.hadoop.mapred.FileInputFormat; +import org.apache.hadoop.mapred.FileSplit; +import org.apache.hadoop.mapred.InputSplit; +import org.apache.hadoop.mapred.JobConf; +import org.apache.hadoop.mapred.JobConfigurable; +import org.apache.hadoop.mapred.LineRecordReader; +import org.apache.hadoop.mapred.RecordReader; +import org.apache.hadoop.mapred.Reporter; +import org.apache.hadoop.util.LineReader; + +/** + * NLineInputFormat which splits N lines of input as one split. + * + * In many "pleasantly" parallel applications, each process/mapper + * processes the same input file (s), but with computations are + * controlled by different parameters.(Referred to as "parameter sweeps"). + * One way to achieve this, is to specify a set of parameters + * (one set per line) as input in a control file + * (which is the input path to the map-reduce application, + * where as the input dataset is specified + * via a config variable in JobConf.). + * + * The NLineInputFormat can be used in such applications, that splits + * the input file such that by default, one line is fed as + * a value to one map task, and key is the offset. + * i.e. (k,v) is (LongWritable, Text). + * The location hints will span the whole mapred cluster. + */ + +public class NLineInputFormat extends FileInputFormat + implements JobConfigurable { + private int N = 1; + + public RecordReader getRecordReader( + InputSplit genericSplit, + JobConf job, + Reporter reporter) + throws IOException { + reporter.setStatus(genericSplit.toString()); + return new LineRecordReader(job, (FileSplit) genericSplit); + } + + /** + * Logically splits the set of input files for the job, splits N lines + * of the input as one split. + * + * @see org.apache.hadoop.mapred.FileInputFormat#getSplits(JobConf, int) + */ + public InputSplit[] getSplits(JobConf job, int numSplits) + throws IOException { + ArrayList splits = new ArrayList(); + for (FileStatus status : listLocatedStatus(job)) { + Path fileName = status.getPath(); + if (status.isDir()) { + throw new IOException("Not a file: " + fileName); + } + FileSystem fs = fileName.getFileSystem(job); + LineReader lr = null; + try { + FSDataInputStream in = fs.open(fileName); + lr = new LineReader(in, job); + Text line = new Text(); + int numLines = 0; + long begin = 0; + long length = 0; + int num = -1; + while ((num = lr.readLine(line)) > 0) { + numLines++; + length += num; + if (numLines == N) { + splits.add(new FileSplit(fileName, begin, length, new String[]{})); + begin += length; + length = 0; + numLines = 0; + } + } + if (numLines != 0) { + splits.add(new FileSplit(fileName, begin, length, new String[]{})); + } + + } finally { + if (lr != null) { + lr.close(); + } + } + } + return splits.toArray(new FileSplit[splits.size()]); + } + + public void configure(JobConf conf) { + N = conf.getInt("mapred.line.input.format.linespermap", 1); + } +} diff --git a/src/mapred/org/apache/hadoop/mapred/lib/NullOutputFormat.java b/src/mapred/org/apache/hadoop/mapred/lib/NullOutputFormat.java new file mode 100644 index 0000000..b9e1fbe --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/lib/NullOutputFormat.java @@ -0,0 +1,45 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapred.lib; + +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.mapred.JobConf; +import org.apache.hadoop.mapred.OutputFormat; +import org.apache.hadoop.mapred.RecordWriter; +import org.apache.hadoop.mapred.Reporter; +import org.apache.hadoop.util.Progressable; + +/** + * Consume all outputs and put them in /dev/null. + * @deprecated Use + * {@link org.apache.hadoop.mapreduce.lib.output.NullOutputFormat} instead. + */ +@Deprecated +public class NullOutputFormat implements OutputFormat { + + public RecordWriter getRecordWriter(FileSystem ignored, JobConf job, + String name, Progressable progress) { + return new RecordWriter(){ + public void write(K key, V value) { } + public void close(Reporter reporter) { } + }; + } + + public void checkOutputSpecs(FileSystem ignored, JobConf job) { } +} diff --git a/src/mapred/org/apache/hadoop/mapred/lib/RegexMapper.java b/src/mapred/org/apache/hadoop/mapred/lib/RegexMapper.java new file mode 100644 index 0000000..11c357d --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/lib/RegexMapper.java @@ -0,0 +1,57 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapred.lib; + +import java.io.IOException; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +import org.apache.hadoop.io.LongWritable; +import org.apache.hadoop.io.Text; +import org.apache.hadoop.mapred.JobConf; +import org.apache.hadoop.mapred.MapReduceBase; +import org.apache.hadoop.mapred.Mapper; +import org.apache.hadoop.mapred.OutputCollector; +import org.apache.hadoop.mapred.Reporter; + + +/** A {@link Mapper} that extracts text matching a regular expression. */ +public class RegexMapper extends MapReduceBase + implements Mapper { + + private Pattern pattern; + private int group; + + public void configure(JobConf job) { + pattern = Pattern.compile(job.get("mapred.mapper.regex")); + group = job.getInt("mapred.mapper.regex.group", 0); + } + + public void map(K key, Text value, + OutputCollector output, + Reporter reporter) + throws IOException { + String text = value.toString(); + Matcher matcher = pattern.matcher(text); + while (matcher.find()) { + output.collect(new Text(matcher.group(group)), new LongWritable(1)); + } + } + +} diff --git a/src/mapred/org/apache/hadoop/mapred/lib/TaggedInputSplit.java b/src/mapred/org/apache/hadoop/mapred/lib/TaggedInputSplit.java new file mode 100644 index 0000000..7be9ff9 --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/lib/TaggedInputSplit.java @@ -0,0 +1,140 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapred.lib; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; + +import org.apache.hadoop.conf.Configurable; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.io.Text; +import org.apache.hadoop.mapred.InputFormat; +import org.apache.hadoop.mapred.InputSplit; +import org.apache.hadoop.mapred.Mapper; +import org.apache.hadoop.util.ReflectionUtils; + +/** + * An {@link InputSplit} that tags another InputSplit with extra data for use by + * {@link DelegatingInputFormat}s and {@link DelegatingMapper}s. + */ +class TaggedInputSplit implements Configurable, InputSplit { + + private Class inputSplitClass; + + private InputSplit inputSplit; + + private Class inputFormatClass; + + private Class mapperClass; + + private Configuration conf; + + public TaggedInputSplit() { + // Default constructor. + } + + /** + * Creates a new TaggedInputSplit. + * + * @param inputSplit The InputSplit to be tagged + * @param conf The configuration to use + * @param inputFormatClass The InputFormat class to use for this job + * @param mapperClass The Mapper class to use for this job + */ + public TaggedInputSplit(InputSplit inputSplit, Configuration conf, + Class inputFormatClass, + Class mapperClass) { + this.inputSplitClass = inputSplit.getClass(); + this.inputSplit = inputSplit; + this.conf = conf; + this.inputFormatClass = inputFormatClass; + this.mapperClass = mapperClass; + } + + /** + * Retrieves the original InputSplit. + * + * @return The InputSplit that was tagged + */ + public InputSplit getInputSplit() { + return inputSplit; + } + + /** + * Retrieves the InputFormat class to use for this split. + * + * @return The InputFormat class to use + */ + public Class getInputFormatClass() { + return inputFormatClass; + } + + /** + * Retrieves the Mapper class to use for this split. + * + * @return The Mapper class to use + */ + public Class getMapperClass() { + return mapperClass; + } + + public long getLength() throws IOException { + return inputSplit.getLength(); + } + + public String[] getLocations() throws IOException { + return inputSplit.getLocations(); + } + + @SuppressWarnings("unchecked") + public void readFields(DataInput in) throws IOException { + inputSplitClass = (Class) readClass(in); + inputSplit = (InputSplit) ReflectionUtils + .newInstance(inputSplitClass, conf); + inputSplit.readFields(in); + inputFormatClass = (Class) readClass(in); + mapperClass = (Class) readClass(in); + } + + private Class readClass(DataInput in) throws IOException { + String className = Text.readString(in); + try { + return conf.getClassByName(className); + } catch (ClassNotFoundException e) { + throw new RuntimeException("readObject can't find class", e); + } + } + + public void write(DataOutput out) throws IOException { + Text.writeString(out, inputSplitClass.getName()); + inputSplit.write(out); + Text.writeString(out, inputFormatClass.getName()); + Text.writeString(out, mapperClass.getName()); + } + + public Configuration getConf() { + return conf; + } + + public void setConf(Configuration conf) { + this.conf = conf; + } + +} diff --git a/src/mapred/org/apache/hadoop/mapred/lib/TokenCountMapper.java b/src/mapred/org/apache/hadoop/mapred/lib/TokenCountMapper.java new file mode 100644 index 0000000..f5a25ce --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/lib/TokenCountMapper.java @@ -0,0 +1,56 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapred.lib; + +import java.io.IOException; +import java.util.StringTokenizer; + +import org.apache.hadoop.io.LongWritable; +import org.apache.hadoop.io.Text; +import org.apache.hadoop.mapred.MapReduceBase; +import org.apache.hadoop.mapred.Mapper; +import org.apache.hadoop.mapred.OutputCollector; +import org.apache.hadoop.mapred.Reporter; + + +/** A {@link Mapper} that maps text values into pairs. Uses + * {@link StringTokenizer} to break text into tokens. + * @deprecated Use + * {@link org.apache.hadoop.mapreduce.lib.map.TokenCounterMapper} instead. + */ +@Deprecated +public class TokenCountMapper extends MapReduceBase + implements Mapper { + + public void map(K key, Text value, + OutputCollector output, + Reporter reporter) + throws IOException { + // get input text + String text = value.toString(); // value is line of text + + // tokenize the value + StringTokenizer st = new StringTokenizer(text); + while (st.hasMoreTokens()) { + // output pairs + output.collect(new Text(st.nextToken()), new LongWritable(1)); + } + } + +} diff --git a/src/mapred/org/apache/hadoop/mapred/lib/TotalOrderPartitioner.java b/src/mapred/org/apache/hadoop/mapred/lib/TotalOrderPartitioner.java new file mode 100644 index 0000000..7d69f92 --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/lib/TotalOrderPartitioner.java @@ -0,0 +1,264 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapred.lib; + +import java.io.IOException; +import java.lang.reflect.Array; +import java.util.ArrayList; +import java.util.Arrays; + +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.io.BinaryComparable; +import org.apache.hadoop.io.NullWritable; +import org.apache.hadoop.io.SequenceFile; +import org.apache.hadoop.io.RawComparator; +import org.apache.hadoop.io.WritableComparable; +import org.apache.hadoop.mapred.JobConf; +import org.apache.hadoop.mapred.Partitioner; +import org.apache.hadoop.util.ReflectionUtils; + +/** + * Partitioner effecting a total order by reading split points from + * an externally generated source. + */ +public class TotalOrderPartitioner + implements Partitioner { + + private Node partitions; + public static final String DEFAULT_PATH = "_partition.lst"; + + public TotalOrderPartitioner() { } + + /** + * Read in the partition file and build indexing data structures. + * If the keytype is {@link org.apache.hadoop.io.BinaryComparable} and + * total.order.partitioner.natural.order is not false, a trie + * of the first total.order.partitioner.max.trie.depth(2) + 1 bytes + * will be built. Otherwise, keys will be located using a binary search of + * the partition keyset using the {@link org.apache.hadoop.io.RawComparator} + * defined for this job. The input file must be sorted with the same + * comparator and contain {@link + org.apache.hadoop.mapred.JobConf#getNumReduceTasks} - 1 keys. + */ + @SuppressWarnings("unchecked") // keytype from conf not static + public void configure(JobConf job) { + try { + String parts = getPartitionFile(job); + final Path partFile = new Path(parts); + final FileSystem fs = (DEFAULT_PATH.equals(parts)) + ? FileSystem.getLocal(job) // assume in DistributedCache + : partFile.getFileSystem(job); + + Class keyClass = (Class)job.getMapOutputKeyClass(); + K[] splitPoints = readPartitions(fs, partFile, keyClass, job); + if (splitPoints.length != job.getNumReduceTasks() - 1) { + throw new IOException("Wrong number of partitions in keyset"); + } + RawComparator comparator = + (RawComparator) job.getOutputKeyComparator(); + for (int i = 0; i < splitPoints.length - 1; ++i) { + if (comparator.compare(splitPoints[i], splitPoints[i+1]) >= 0) { + throw new IOException("Split points are out of order"); + } + } + boolean natOrder = + job.getBoolean("total.order.partitioner.natural.order", true); + if (natOrder && BinaryComparable.class.isAssignableFrom(keyClass)) { + partitions = buildTrie((BinaryComparable[])splitPoints, 0, + splitPoints.length, new byte[0], + job.getInt("total.order.partitioner.max.trie.depth", 2)); + } else { + partitions = new BinarySearchNode(splitPoints, comparator); + } + } catch (IOException e) { + throw new IllegalArgumentException("Can't read partitions file", e); + } + } + + // by construction, we know if our keytype + @SuppressWarnings("unchecked") // is memcmp-able and uses the trie + public int getPartition(K key, V value, int numPartitions) { + return partitions.findPartition(key); + } + + /** + * Set the path to the SequenceFile storing the sorted partition keyset. + * It must be the case that for R reduces, there are R-1 + * keys in the SequenceFile. + */ + public static void setPartitionFile(JobConf job, Path p) { + job.set("total.order.partitioner.path", p.toString()); + } + + /** + * Get the path to the SequenceFile storing the sorted partition keyset. + * @see #setPartitionFile(JobConf,Path) + */ + public static String getPartitionFile(JobConf job) { + return job.get("total.order.partitioner.path", DEFAULT_PATH); + } + + /** + * Interface to the partitioner to locate a key in the partition keyset. + */ + interface Node { + /** + * Locate partition in keyset K, st [Ki..Ki+1) defines a partition, + * with implicit K0 = -inf, Kn = +inf, and |K| = #partitions - 1. + */ + int findPartition(T key); + } + + /** + * Base class for trie nodes. If the keytype is memcomp-able, this builds + * tries of the first total.order.partitioner.max.trie.depth + * bytes. + */ + static abstract class TrieNode implements Node { + private final int level; + TrieNode(int level) { + this.level = level; + } + int getLevel() { + return level; + } + } + + /** + * For types that are not {@link org.apache.hadoop.io.BinaryComparable} or + * where disabled by total.order.partitioner.natural.order, + * search the partition keyset with a binary search. + */ + class BinarySearchNode implements Node { + private final K[] splitPoints; + private final RawComparator comparator; + BinarySearchNode(K[] splitPoints, RawComparator comparator) { + this.splitPoints = splitPoints; + this.comparator = comparator; + } + public int findPartition(K key) { + final int pos = Arrays.binarySearch(splitPoints, key, comparator) + 1; + return (pos < 0) ? -pos : pos; + } + } + + /** + * An inner trie node that contains 256 children based on the next + * character. + */ + class InnerTrieNode extends TrieNode { + private TrieNode[] child = new TrieNode[256]; + + InnerTrieNode(int level) { + super(level); + } + public int findPartition(BinaryComparable key) { + int level = getLevel(); + if (key.getLength() <= level) { + return child[0].findPartition(key); + } + return child[0xFF & key.getBytes()[level]].findPartition(key); + } + } + + /** + * A leaf trie node that scans for the key between lower..upper. + */ + class LeafTrieNode extends TrieNode { + final int lower; + final int upper; + final BinaryComparable[] splitPoints; + LeafTrieNode(int level, BinaryComparable[] splitPoints, int lower, int upper) { + super(level); + this.lower = lower; + this.upper = upper; + this.splitPoints = splitPoints; + } + public int findPartition(BinaryComparable key) { + final int pos = Arrays.binarySearch(splitPoints, lower, upper, key) + 1; + return (pos < 0) ? -pos : pos; + } + } + + + /** + * Read the cut points from the given IFile. + * @param fs The file system + * @param p The path to read + * @param keyClass The map output key class + * @param job The job config + * @throws IOException + */ + // matching key types enforced by passing in + @SuppressWarnings("unchecked") // map output key class + private K[] readPartitions(FileSystem fs, Path p, Class keyClass, + JobConf job) throws IOException { + SequenceFile.Reader reader = new SequenceFile.Reader(fs, p, job); + ArrayList parts = new ArrayList(); + K key = (K) ReflectionUtils.newInstance(keyClass, job); + NullWritable value = NullWritable.get(); + while (reader.next(key, value)) { + parts.add(key); + key = (K) ReflectionUtils.newInstance(keyClass, job); + } + reader.close(); + return parts.toArray((K[])Array.newInstance(keyClass, parts.size())); + } + + /** + * Given a sorted set of cut points, build a trie that will find the correct + * partition quickly. + * @param splits the list of cut points + * @param lower the lower bound of partitions 0..numPartitions-1 + * @param upper the upper bound of partitions 0..numPartitions-1 + * @param prefix the prefix that we have already checked against + * @param maxDepth the maximum depth we will build a trie for + * @return the trie node that will divide the splits correctly + */ + private TrieNode buildTrie(BinaryComparable[] splits, int lower, + int upper, byte[] prefix, int maxDepth) { + final int depth = prefix.length; + if (depth >= maxDepth || lower == upper) { + return new LeafTrieNode(depth, splits, lower, upper); + } + InnerTrieNode result = new InnerTrieNode(depth); + byte[] trial = Arrays.copyOf(prefix, prefix.length + 1); + // append an extra byte on to the prefix + int currentBound = lower; + for(int ch = 0; ch < 255; ++ch) { + trial[depth] = (byte) (ch + 1); + lower = currentBound; + while (currentBound < upper) { + if (splits[currentBound].compareTo(trial, 0, trial.length) >= 0) { + break; + } + currentBound += 1; + } + trial[depth] = (byte) ch; + result.child[0xFF & ch] = buildTrie(splits, lower, currentBound, trial, + maxDepth); + } + // pick up the rest + trial[depth] = 127; + result.child[255] = buildTrie(splits, currentBound, upper, trial, + maxDepth); + return result; + } +} diff --git a/src/mapred/org/apache/hadoop/mapred/lib/aggregate/DoubleValueSum.java b/src/mapred/org/apache/hadoop/mapred/lib/aggregate/DoubleValueSum.java new file mode 100644 index 0000000..1289024 --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/lib/aggregate/DoubleValueSum.java @@ -0,0 +1,95 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapred.lib.aggregate; + +import java.util.ArrayList; + + +/** + * This class implements a value aggregator that sums up a sequence of double + * values. + * + */ +public class DoubleValueSum implements ValueAggregator { + + double sum = 0; + + /** + * The default constructor + * + */ + public DoubleValueSum() { + reset(); + } + + /** + * add a value to the aggregator + * + * @param val + * an object whose string representation represents a double value. + * + */ + public void addNextValue(Object val) { + this.sum += Double.parseDouble(val.toString()); + } + + /** + * add a value to the aggregator + * + * @param val + * a double value. + * + */ + public void addNextValue(double val) { + this.sum += val; + } + + /** + * @return the string representation of the aggregated value + */ + public String getReport() { + return "" + sum; + } + + /** + * @return the aggregated value + */ + public double getSum() { + return this.sum; + } + + /** + * reset the aggregator + */ + public void reset() { + sum = 0; + } + + /** + * @return return an array of one element. The element is a string + * representation of the aggregated value. The return value is + * expected to be used by the a combiner. + */ + public ArrayList getCombinerOutput() { + ArrayList retv = new ArrayList(1); + retv.add("" + sum); + return retv; + } + +} diff --git a/src/mapred/org/apache/hadoop/mapred/lib/aggregate/LongValueMax.java b/src/mapred/org/apache/hadoop/mapred/lib/aggregate/LongValueMax.java new file mode 100644 index 0000000..152436f --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/lib/aggregate/LongValueMax.java @@ -0,0 +1,98 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapred.lib.aggregate; + +import java.util.ArrayList; + +/** + * This class implements a value aggregator that maintain the maximum of + * a sequence of long values. + * + */ +public class LongValueMax implements ValueAggregator { + + long maxVal = Long.MIN_VALUE; + + /** + * the default constructor + * + */ + public LongValueMax() { + reset(); + } + + /** + * add a value to the aggregator + * + * @param val + * an object whose string representation represents a long value. + * + */ + public void addNextValue(Object val) { + long newVal = Long.parseLong(val.toString()); + if (this.maxVal < newVal) { + this.maxVal = newVal; + } + } + + /** + * add a value to the aggregator + * + * @param newVal + * a long value. + * + */ + public void addNextValue(long newVal) { + if (this.maxVal < newVal) { + this.maxVal = newVal; + }; + } + + /** + * @return the aggregated value + */ + public long getVal() { + return this.maxVal; + } + + /** + * @return the string representation of the aggregated value + */ + public String getReport() { + return ""+maxVal; + } + + /** + * reset the aggregator + */ + public void reset() { + maxVal = Long.MIN_VALUE; + } + + /** + * @return return an array of one element. The element is a string + * representation of the aggregated value. The return value is + * expected to be used by the a combiner. + */ + public ArrayList getCombinerOutput() { + ArrayList retv = new ArrayList(1);; + retv.add(""+maxVal); + return retv; + } +} diff --git a/src/mapred/org/apache/hadoop/mapred/lib/aggregate/LongValueMin.java b/src/mapred/org/apache/hadoop/mapred/lib/aggregate/LongValueMin.java new file mode 100644 index 0000000..4f35fb4 --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/lib/aggregate/LongValueMin.java @@ -0,0 +1,98 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapred.lib.aggregate; + +import java.util.ArrayList; + +/** + * This class implements a value aggregator that maintain the minimum of + * a sequence of long values. + * + */ +public class LongValueMin implements ValueAggregator { + + long minVal = Long.MAX_VALUE; + + /** + * the default constructor + * + */ + public LongValueMin() { + reset(); + } + + /** + * add a value to the aggregator + * + * @param val + * an object whose string representation represents a long value. + * + */ + public void addNextValue(Object val) { + long newVal = Long.parseLong(val.toString()); + if (this.minVal > newVal) { + this.minVal = newVal; + } + } + + /** + * add a value to the aggregator + * + * @param newVal + * a long value. + * + */ + public void addNextValue(long newVal) { + if (this.minVal > newVal) { + this.minVal = newVal; + }; + } + + /** + * @return the aggregated value + */ + public long getVal() { + return this.minVal; + } + + /** + * @return the string representation of the aggregated value + */ + public String getReport() { + return ""+minVal; + } + + /** + * reset the aggregator + */ + public void reset() { + minVal = Long.MAX_VALUE; + } + + /** + * @return return an array of one element. The element is a string + * representation of the aggregated value. The return value is + * expected to be used by the a combiner. + */ + public ArrayList getCombinerOutput() { + ArrayList retv = new ArrayList(1); + retv.add(""+minVal); + return retv; + } +} diff --git a/src/mapred/org/apache/hadoop/mapred/lib/aggregate/LongValueSum.java b/src/mapred/org/apache/hadoop/mapred/lib/aggregate/LongValueSum.java new file mode 100644 index 0000000..4c549cc --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/lib/aggregate/LongValueSum.java @@ -0,0 +1,95 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapred.lib.aggregate; + +import java.util.ArrayList; + +/** + * This class implements a value aggregator that sums up + * a sequence of long values. + * + */ +public class LongValueSum implements ValueAggregator { + + long sum = 0; + + /** + * the default constructor + * + */ + public LongValueSum() { + reset(); + } + + /** + * add a value to the aggregator + * + * @param val + * an object whose string representation represents a long value. + * + */ + public void addNextValue(Object val) { + this.sum += Long.parseLong(val.toString()); + } + + /** + * add a value to the aggregator + * + * @param val + * a long value. + * + */ + public void addNextValue(long val) { + this.sum += val; + } + + /** + * @return the aggregated value + */ + public long getSum() { + return this.sum; + } + + /** + * @return the string representation of the aggregated value + */ + public String getReport() { + return ""+sum; + } + + /** + * reset the aggregator + */ + public void reset() { + sum = 0; + } + + /** + * @return return an array of one element. The element is a string + * representation of the aggregated value. The return value is + * expected to be used by the a combiner. + */ + public ArrayList getCombinerOutput() { + ArrayList retv = new ArrayList(1); + retv.add(""+sum); + return retv; + } +} + + diff --git a/src/mapred/org/apache/hadoop/mapred/lib/aggregate/StringValueMax.java b/src/mapred/org/apache/hadoop/mapred/lib/aggregate/StringValueMax.java new file mode 100644 index 0000000..d67ffb1 --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/lib/aggregate/StringValueMax.java @@ -0,0 +1,86 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapred.lib.aggregate; + +import java.util.ArrayList; + +/** + * This class implements a value aggregator that maintain the biggest of + * a sequence of strings. + * + */ +public class StringValueMax implements ValueAggregator { + + String maxVal = null; + + /** + * the default constructor + * + */ + public StringValueMax() { + reset(); + } + + /** + * add a value to the aggregator + * + * @param val + * a string. + * + */ + public void addNextValue(Object val) { + String newVal = val.toString(); + if (this.maxVal == null || this.maxVal.compareTo(newVal) < 0) { + this.maxVal = newVal; + } + } + + + /** + * @return the aggregated value + */ + public String getVal() { + return this.maxVal; + } + + /** + * @return the string representation of the aggregated value + */ + public String getReport() { + return maxVal; + } + + /** + * reset the aggregator + */ + public void reset() { + maxVal = null; + } + + /** + * @return return an array of one element. The element is a string + * representation of the aggregated value. The return value is + * expected to be used by the a combiner. + */ + public ArrayList getCombinerOutput() { + ArrayList retv = new ArrayList(1); + retv.add(maxVal); + return retv; + } +} diff --git a/src/mapred/org/apache/hadoop/mapred/lib/aggregate/StringValueMin.java b/src/mapred/org/apache/hadoop/mapred/lib/aggregate/StringValueMin.java new file mode 100644 index 0000000..2749ae7 --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/lib/aggregate/StringValueMin.java @@ -0,0 +1,86 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapred.lib.aggregate; + +import java.util.ArrayList; + +/** + * This class implements a value aggregator that maintain the smallest of + * a sequence of strings. + * + */ +public class StringValueMin implements ValueAggregator { + + String minVal = null; + + /** + * the default constructor + * + */ + public StringValueMin() { + reset(); + } + + /** + * add a value to the aggregator + * + * @param val + * a string. + * + */ + public void addNextValue(Object val) { + String newVal = val.toString(); + if (this.minVal == null || this.minVal.compareTo(newVal) > 0) { + this.minVal = newVal; + } + } + + + /** + * @return the aggregated value + */ + public String getVal() { + return this.minVal; + } + + /** + * @return the string representation of the aggregated value + */ + public String getReport() { + return minVal; + } + + /** + * reset the aggregator + */ + public void reset() { + minVal = null; + } + + /** + * @return return an array of one element. The element is a string + * representation of the aggregated value. The return value is + * expected to be used by the a combiner. + */ + public ArrayList getCombinerOutput() { + ArrayList retv = new ArrayList(1); + retv.add(minVal); + return retv; + } +} diff --git a/src/mapred/org/apache/hadoop/mapred/lib/aggregate/UniqValueCount.java b/src/mapred/org/apache/hadoop/mapred/lib/aggregate/UniqValueCount.java new file mode 100644 index 0000000..f505085 --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/lib/aggregate/UniqValueCount.java @@ -0,0 +1,125 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapred.lib.aggregate; + +import java.util.ArrayList; +import java.util.Iterator; +import java.util.Set; +import java.util.TreeMap; + +/** + * This class implements a value aggregator that dedupes a sequence of objects. + * + */ +public class UniqValueCount implements ValueAggregator { + + private TreeMap uniqItems = null; + + private long numItems = 0; + + private long maxNumItems = Long.MAX_VALUE; + + /** + * the default constructor + * + */ + public UniqValueCount() { + this(Long.MAX_VALUE); + } + + /** + * constructor + * @param maxNum the limit in the number of unique values to keep. + * + */ + public UniqValueCount(long maxNum) { + uniqItems = new TreeMap(); + this.numItems = 0; + maxNumItems = Long.MAX_VALUE; + if (maxNum > 0 ) { + this.maxNumItems = maxNum; + } + } + + /** + * Set the limit on the number of unique values + * @param n the desired limit on the number of unique values + * @return the new limit on the number of unique values + */ + public long setMaxItems(long n) { + if (n >= numItems) { + this.maxNumItems = n; + } else if (this.maxNumItems >= this.numItems) { + this.maxNumItems = this.numItems; + } + return this.maxNumItems; + } + + /** + * add a value to the aggregator + * + * @param val + * an object. + * + */ + public void addNextValue(Object val) { + if (this.numItems <= this.maxNumItems) { + uniqItems.put(val.toString(), "1"); + this.numItems = this.uniqItems.size(); + } + } + + /** + * @return return the number of unique objects aggregated + */ + public String getReport() { + return "" + uniqItems.size(); + } + + /** + * + * @return the set of the unique objects + */ + public Set getUniqueItems() { + return uniqItems.keySet(); + } + + /** + * reset the aggregator + */ + public void reset() { + uniqItems = new TreeMap(); + } + + /** + * @return return an array of the unique objects. The return value is + * expected to be used by the a combiner. + */ + public ArrayList getCombinerOutput() { + Object key = null; + Iterator iter = uniqItems.keySet().iterator(); + ArrayList retv = new ArrayList(); + + while (iter.hasNext()) { + key = iter.next(); + retv.add(key); + } + return retv; + } +} diff --git a/src/mapred/org/apache/hadoop/mapred/lib/aggregate/UserDefinedValueAggregatorDescriptor.java b/src/mapred/org/apache/hadoop/mapred/lib/aggregate/UserDefinedValueAggregatorDescriptor.java new file mode 100644 index 0000000..a55e85f --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/lib/aggregate/UserDefinedValueAggregatorDescriptor.java @@ -0,0 +1,115 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapred.lib.aggregate; + +import java.lang.reflect.Constructor; +import java.util.ArrayList; +import java.util.Map.Entry; + +import org.apache.hadoop.io.Text; +import org.apache.hadoop.mapred.JobConf; + +/** + * This class implements a wrapper for a user defined value aggregator descriptor. + * It servs two functions: One is to create an object of ValueAggregatorDescriptor from the + * name of a user defined class that may be dynamically loaded. The other is to + * deligate inviokations of generateKeyValPairs function to the created object. + * + */ +public class UserDefinedValueAggregatorDescriptor implements + ValueAggregatorDescriptor { + private String className; + + private ValueAggregatorDescriptor theAggregatorDescriptor = null; + + private static final Class[] argArray = new Class[] {}; + + /** + * Create an instance of the given class + * @param className the name of the class + * @return a dynamically created instance of the given class + */ + public static Object createInstance(String className) { + Object retv = null; + try { + ClassLoader classLoader = Thread.currentThread().getContextClassLoader(); + Class theFilterClass = Class.forName(className, true, classLoader); + Constructor meth = theFilterClass.getDeclaredConstructor(argArray); + meth.setAccessible(true); + retv = meth.newInstance(); + } catch (Exception e) { + throw new RuntimeException(e); + } + return retv; + } + + private void createAggregator(JobConf job) { + if (theAggregatorDescriptor == null) { + theAggregatorDescriptor = (ValueAggregatorDescriptor) createInstance(this.className); + theAggregatorDescriptor.configure(job); + } + } + + /** + * + * @param className the class name of the user defined descriptor class + * @param job a configure object used for decriptor configuration + */ + public UserDefinedValueAggregatorDescriptor(String className, JobConf job) { + this.className = className; + this.createAggregator(job); + } + + /** + * Generate a list of aggregation-id/value pairs for the given key/value pairs + * by delegating the invocation to the real object. + * + * @param key + * input key + * @param val + * input value + * @return a list of aggregation id/value pairs. An aggregation id encodes an + * aggregation type which is used to guide the way to aggregate the + * value in the reduce/combiner phrase of an Aggregate based job. + */ + public ArrayList> generateKeyValPairs(Object key, + Object val) { + ArrayList> retv = new ArrayList>(); + if (this.theAggregatorDescriptor != null) { + retv = this.theAggregatorDescriptor.generateKeyValPairs(key, val); + } + return retv; + } + + /** + * @return the string representation of this object. + */ + public String toString() { + return "UserDefinedValueAggregatorDescriptor with class name:" + "\t" + + this.className; + } + + /** + * Do nothing. + */ + public void configure(JobConf job) { + + } + +} diff --git a/src/mapred/org/apache/hadoop/mapred/lib/aggregate/ValueAggregator.java b/src/mapred/org/apache/hadoop/mapred/lib/aggregate/ValueAggregator.java new file mode 100644 index 0000000..d859216 --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/lib/aggregate/ValueAggregator.java @@ -0,0 +1,53 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapred.lib.aggregate; + +import java.util.ArrayList; + +/** + * This interface defines the minimal protocol for value aggregators. + * + */ +public interface ValueAggregator { + + /** + * add a value to the aggregator + * + * @param val the value to be added + */ + public void addNextValue(Object val); + + /** + * reset the aggregator + * + */ + public void reset(); + + /** + * @return the string representation of the agregator + */ + public String getReport(); + + /** + * + * @return an array of values as the outputs of the combiner. + */ + public ArrayList getCombinerOutput(); + +} diff --git a/src/mapred/org/apache/hadoop/mapred/lib/aggregate/ValueAggregatorBaseDescriptor.java b/src/mapred/org/apache/hadoop/mapred/lib/aggregate/ValueAggregatorBaseDescriptor.java new file mode 100644 index 0000000..7bc6329 --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/lib/aggregate/ValueAggregatorBaseDescriptor.java @@ -0,0 +1,160 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapred.lib.aggregate; + +import java.util.ArrayList; +import java.util.Map.Entry; +import org.apache.hadoop.io.Text; +import org.apache.hadoop.mapred.JobConf; + +/** + * This class implements the common functionalities of + * the subclasses of ValueAggregatorDescriptor class. + */ +public class ValueAggregatorBaseDescriptor implements ValueAggregatorDescriptor { + + static public final String UNIQ_VALUE_COUNT = "UniqValueCount"; + + static public final String LONG_VALUE_SUM = "LongValueSum"; + + static public final String DOUBLE_VALUE_SUM = "DoubleValueSum"; + + static public final String VALUE_HISTOGRAM = "ValueHistogram"; + + static public final String LONG_VALUE_MAX = "LongValueMax"; + + static public final String LONG_VALUE_MIN = "LongValueMin"; + + static public final String STRING_VALUE_MAX = "StringValueMax"; + + static public final String STRING_VALUE_MIN = "StringValueMin"; + + private static long maxNumItems = Long.MAX_VALUE; + + public String inputFile = null; + + private static class MyEntry implements Entry { + Text key; + + Text val; + + public Text getKey() { + return key; + } + + public Text getValue() { + return val; + } + + public Text setValue(Text val) { + this.val = val; + return val; + } + + public MyEntry(Text key, Text val) { + this.key = key; + this.val = val; + } + } + + /** + * + * @param type the aggregation type + * @param id the aggregation id + * @param val the val associated with the id to be aggregated + * @return an Entry whose key is the aggregation id prefixed with + * the aggregation type. + */ + public static Entry generateEntry(String type, String id, Text val) { + Text key = new Text(type + TYPE_SEPARATOR + id); + return new MyEntry(key, val); + } + + /** + * + * @param type the aggregation type + * @return a value aggregator of the given type. + */ + static public ValueAggregator generateValueAggregator(String type) { + ValueAggregator retv = null; + if (type.compareToIgnoreCase(LONG_VALUE_SUM) == 0) { + retv = new LongValueSum(); + } if (type.compareToIgnoreCase(LONG_VALUE_MAX) == 0) { + retv = new LongValueMax(); + } else if (type.compareToIgnoreCase(LONG_VALUE_MIN) == 0) { + retv = new LongValueMin(); + } else if (type.compareToIgnoreCase(STRING_VALUE_MAX) == 0) { + retv = new StringValueMax(); + } else if (type.compareToIgnoreCase(STRING_VALUE_MIN) == 0) { + retv = new StringValueMin(); + } else if (type.compareToIgnoreCase(DOUBLE_VALUE_SUM) == 0) { + retv = new DoubleValueSum(); + } else if (type.compareToIgnoreCase(UNIQ_VALUE_COUNT) == 0) { + retv = new UniqValueCount(maxNumItems); + } else if (type.compareToIgnoreCase(VALUE_HISTOGRAM) == 0) { + retv = new ValueHistogram(); + } + return retv; + } + + /** + * Generate 1 or 2 aggregation-id/value pairs for the given key/value pair. + * The first id will be of type LONG_VALUE_SUM, with "record_count" as + * its aggregation id. If the input is a file split, + * the second id of the same type will be generated too, with the file name + * as its aggregation id. This achieves the behavior of counting the total number + * of records in the input data, and the number of records in each input file. + * + * @param key + * input key + * @param val + * input value + * @return a list of aggregation id/value pairs. An aggregation id encodes an + * aggregation type which is used to guide the way to aggregate the + * value in the reduce/combiner phrase of an Aggregate based job. + */ + public ArrayList> generateKeyValPairs(Object key, + Object val) { + ArrayList> retv = new ArrayList>(); + String countType = LONG_VALUE_SUM; + String id = "record_count"; + Entry e = generateEntry(countType, id, ONE); + if (e != null) { + retv.add(e); + } + if (this.inputFile != null) { + e = generateEntry(countType, this.inputFile, ONE); + if (e != null) { + retv.add(e); + } + } + return retv; + } + + /** + * get the input file name. + * + * @param job a job configuration object + */ + public void configure(JobConf job) { + this.inputFile = job.get("map.input.file"); + maxNumItems = job.getLong("aggregate.max.num.unique.values", + Long.MAX_VALUE); + } +} diff --git a/src/mapred/org/apache/hadoop/mapred/lib/aggregate/ValueAggregatorCombiner.java b/src/mapred/org/apache/hadoop/mapred/lib/aggregate/ValueAggregatorCombiner.java new file mode 100644 index 0000000..708cd7f --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/lib/aggregate/ValueAggregatorCombiner.java @@ -0,0 +1,89 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapred.lib.aggregate; + +import java.io.IOException; +import java.util.Iterator; + +import org.apache.hadoop.io.Text; +import org.apache.hadoop.io.Writable; +import org.apache.hadoop.io.WritableComparable; +import org.apache.hadoop.mapred.JobConf; +import org.apache.hadoop.mapred.OutputCollector; +import org.apache.hadoop.mapred.Reporter; + +/** + * This class implements the generic combiner of Aggregate. + */ +public class ValueAggregatorCombiner + extends ValueAggregatorJobBase { + + /** + * Combiner does not need to configure. + */ + public void configure(JobConf job) { + + } + + /** Combines values for a given key. + * @param key the key is expected to be a Text object, whose prefix indicates + * the type of aggregation to aggregate the values. + * @param values the values to combine + * @param output to collect combined values + */ + public void reduce(Text key, Iterator values, + OutputCollector output, Reporter reporter) throws IOException { + String keyStr = key.toString(); + int pos = keyStr.indexOf(ValueAggregatorDescriptor.TYPE_SEPARATOR); + String type = keyStr.substring(0, pos); + ValueAggregator aggregator = ValueAggregatorBaseDescriptor + .generateValueAggregator(type); + while (values.hasNext()) { + aggregator.addNextValue(values.next()); + } + Iterator outputs = aggregator.getCombinerOutput().iterator(); + + while (outputs.hasNext()) { + Object v = outputs.next(); + if (v instanceof Text) { + output.collect(key, (Text)v); + } else { + output.collect(key, new Text(v.toString())); + } + } + } + + /** + * Do nothing. + * + */ + public void close() throws IOException { + + } + + /** + * Do nothing. Should not be called. + * + */ + public void map(K1 arg0, V1 arg1, OutputCollector arg2, + Reporter arg3) throws IOException { + throw new IOException ("should not be called\n"); + } +} diff --git a/src/mapred/org/apache/hadoop/mapred/lib/aggregate/ValueAggregatorDescriptor.java b/src/mapred/org/apache/hadoop/mapred/lib/aggregate/ValueAggregatorDescriptor.java new file mode 100644 index 0000000..f7e46aa --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/lib/aggregate/ValueAggregatorDescriptor.java @@ -0,0 +1,68 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapred.lib.aggregate; + +import java.util.ArrayList; +import java.util.Map.Entry; + +import org.apache.hadoop.io.Text; +import org.apache.hadoop.mapred.JobConf; + +/** + * This interface defines the contract a value aggregator descriptor must + * support. Such a descriptor can be configured with a JobConf object. Its main + * function is to generate a list of aggregation-id/value pairs. An aggregation + * id encodes an aggregation type which is used to guide the way to aggregate + * the value in the reduce/combiner phrase of an Aggregate based job.The mapper in + * an Aggregate based map/reduce job may create one or more of + * ValueAggregatorDescriptor objects at configuration time. For each input + * key/value pair, the mapper will use those objects to create aggregation + * id/value pairs. + * + */ +public interface ValueAggregatorDescriptor { + + public static final String TYPE_SEPARATOR = ":"; + + public static final Text ONE = new Text("1"); + + /** + * Generate a list of aggregation-id/value pairs for the given key/value pair. + * This function is usually called by the mapper of an Aggregate based job. + * + * @param key + * input key + * @param val + * input value + * @return a list of aggregation id/value pairs. An aggregation id encodes an + * aggregation type which is used to guide the way to aggregate the + * value in the reduce/combiner phrase of an Aggregate based job. + */ + public ArrayList> generateKeyValPairs(Object key, + Object val); + + /** + * Configure the object + * + * @param job + * a JobConf object that may contain the information that can be used + * to configure the object. + */ + public void configure(JobConf job); +} diff --git a/src/mapred/org/apache/hadoop/mapred/lib/aggregate/ValueAggregatorJob.java b/src/mapred/org/apache/hadoop/mapred/lib/aggregate/ValueAggregatorJob.java new file mode 100644 index 0000000..c500163 --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/lib/aggregate/ValueAggregatorJob.java @@ -0,0 +1,210 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapred.lib.aggregate; + +import java.io.IOException; +import java.util.ArrayList; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.io.Text; +import org.apache.hadoop.mapred.FileInputFormat; +import org.apache.hadoop.mapred.FileOutputFormat; +import org.apache.hadoop.mapred.InputFormat; +import org.apache.hadoop.mapred.JobClient; +import org.apache.hadoop.mapred.JobConf; +import org.apache.hadoop.mapred.SequenceFileInputFormat; +import org.apache.hadoop.mapred.TextInputFormat; +import org.apache.hadoop.mapred.TextOutputFormat; +import org.apache.hadoop.mapred.jobcontrol.Job; +import org.apache.hadoop.mapred.jobcontrol.JobControl; +import org.apache.hadoop.util.GenericOptionsParser; + +/** + * This is the main class for creating a map/reduce job using Aggregate + * framework. The Aggregate is a specialization of map/reduce framework, + * specilizing for performing various simple aggregations. + * + * Generally speaking, in order to implement an application using Map/Reduce + * model, the developer is to implement Map and Reduce functions (and possibly + * combine function). However, a lot of applications related to counting and + * statistics computing have very similar characteristics. Aggregate abstracts + * out the general patterns of these functions and implementing those patterns. + * In particular, the package provides generic mapper/redducer/combiner classes, + * and a set of built-in value aggregators, and a generic utility class that + * helps user create map/reduce jobs using the generic class. The built-in + * aggregators include: + * + * sum over numeric values count the number of distinct values compute the + * histogram of values compute the minimum, maximum, media,average, standard + * deviation of numeric values + * + * The developer using Aggregate will need only to provide a plugin class + * conforming to the following interface: + * + * public interface ValueAggregatorDescriptor { public ArrayList + * generateKeyValPairs(Object key, Object value); public void + * configure(JobConfjob); } + * + * The package also provides a base class, ValueAggregatorBaseDescriptor, + * implementing the above interface. The user can extend the base class and + * implement generateKeyValPairs accordingly. + * + * The primary work of generateKeyValPairs is to emit one or more key/value + * pairs based on the input key/value pair. The key in an output key/value pair + * encode two pieces of information: aggregation type and aggregation id. The + * value will be aggregated onto the aggregation id according the aggregation + * type. + * + * This class offers a function to generate a map/reduce job using Aggregate + * framework. The function takes the following parameters: input directory spec + * input format (text or sequence file) output directory a file specifying the + * user plugin class + * + */ +public class ValueAggregatorJob { + + public static JobControl createValueAggregatorJobs(String args[] + , Class[] descriptors) throws IOException { + + JobControl theControl = new JobControl("ValueAggregatorJobs"); + ArrayList dependingJobs = new ArrayList(); + JobConf aJobConf = createValueAggregatorJob(args); + if(descriptors != null) + setAggregatorDescriptors(aJobConf, descriptors); + Job aJob = new Job(aJobConf, dependingJobs); + theControl.addJob(aJob); + return theControl; + } + + public static JobControl createValueAggregatorJobs(String args[]) throws IOException { + return createValueAggregatorJobs(args, null); + } + + /** + * Create an Aggregate based map/reduce job. + * + * @param args the arguments used for job creation. Generic hadoop + * arguments are accepted. + * @return a JobConf object ready for submission. + * + * @throws IOException + * @see GenericOptionsParser + */ + public static JobConf createValueAggregatorJob(String args[]) + throws IOException { + + Configuration conf = new Configuration(); + + GenericOptionsParser genericParser + = new GenericOptionsParser(conf, args); + args = genericParser.getRemainingArgs(); + + if (args.length < 2) { + System.out.println("usage: inputDirs outDir " + + "[numOfReducer [textinputformat|seq [specfile [jobName]]]]"); + GenericOptionsParser.printGenericCommandUsage(System.out); + System.exit(1); + } + String inputDir = args[0]; + String outputDir = args[1]; + int numOfReducers = 1; + if (args.length > 2) { + numOfReducers = Integer.parseInt(args[2]); + } + + Class theInputFormat = + TextInputFormat.class; + if (args.length > 3 && + args[3].compareToIgnoreCase("textinputformat") == 0) { + theInputFormat = TextInputFormat.class; + } else { + theInputFormat = SequenceFileInputFormat.class; + } + + Path specFile = null; + + if (args.length > 4) { + specFile = new Path(args[4]); + } + + String jobName = ""; + + if (args.length > 5) { + jobName = args[5]; + } + + JobConf theJob = new JobConf(conf); + if (specFile != null) { + theJob.addResource(specFile); + } + String userJarFile = theJob.get("user.jar.file"); + if (userJarFile == null) { + theJob.setJarByClass(ValueAggregator.class); + } else { + theJob.setJar(userJarFile); + } + theJob.setJobName("ValueAggregatorJob: " + jobName); + + FileInputFormat.addInputPaths(theJob, inputDir); + + theJob.setInputFormat(theInputFormat); + + theJob.setMapperClass(ValueAggregatorMapper.class); + FileOutputFormat.setOutputPath(theJob, new Path(outputDir)); + theJob.setOutputFormat(TextOutputFormat.class); + theJob.setMapOutputKeyClass(Text.class); + theJob.setMapOutputValueClass(Text.class); + theJob.setOutputKeyClass(Text.class); + theJob.setOutputValueClass(Text.class); + theJob.setReducerClass(ValueAggregatorReducer.class); + theJob.setCombinerClass(ValueAggregatorCombiner.class); + theJob.setNumMapTasks(1); + theJob.setNumReduceTasks(numOfReducers); + return theJob; + } + + public static JobConf createValueAggregatorJob(String args[] + , Class[] descriptors) + throws IOException { + JobConf job = createValueAggregatorJob(args); + setAggregatorDescriptors(job, descriptors); + return job; + } + + public static void setAggregatorDescriptors(JobConf job + , Class[] descriptors) { + job.setInt("aggregator.descriptor.num", descriptors.length); + //specify the aggregator descriptors + for(int i=0; i< descriptors.length; i++) { + job.set("aggregator.descriptor." + i, "UserDefined," + descriptors[i].getName()); + } + } + + /** + * create and run an Aggregate based map/reduce job. + * + * @param args the arguments used for job creation + * @throws IOException + */ + public static void main(String args[]) throws IOException { + JobConf job = ValueAggregatorJob.createValueAggregatorJob(args); + JobClient.runJob(job); + } +} diff --git a/src/mapred/org/apache/hadoop/mapred/lib/aggregate/ValueAggregatorJobBase.java b/src/mapred/org/apache/hadoop/mapred/lib/aggregate/ValueAggregatorJobBase.java new file mode 100644 index 0000000..d9a5076 --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/lib/aggregate/ValueAggregatorJobBase.java @@ -0,0 +1,88 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapred.lib.aggregate; + +import java.io.IOException; +import java.util.ArrayList; + +import org.apache.hadoop.io.Text; +import org.apache.hadoop.io.Writable; +import org.apache.hadoop.io.WritableComparable; +import org.apache.hadoop.mapred.JobConf; +import org.apache.hadoop.mapred.Mapper; +import org.apache.hadoop.mapred.Reducer; + +/** + * This abstract class implements some common functionalities of the + * the generic mapper, reducer and combiner classes of Aggregate. + */ +public abstract class ValueAggregatorJobBase + implements Mapper, Reducer { + + protected ArrayList aggregatorDescriptorList = null; + + public void configure(JobConf job) { + this.initializeMySpec(job); + this.logSpec(); + } + + private static ValueAggregatorDescriptor getValueAggregatorDescriptor( + String spec, JobConf job) { + if (spec == null) + return null; + String[] segments = spec.split(",", -1); + String type = segments[0]; + if (type.compareToIgnoreCase("UserDefined") == 0) { + String className = segments[1]; + return new UserDefinedValueAggregatorDescriptor(className, job); + } + return null; + } + + private static ArrayList getAggregatorDescriptors(JobConf job) { + String advn = "aggregator.descriptor"; + int num = job.getInt(advn + ".num", 0); + ArrayList retv = new ArrayList(num); + for (int i = 0; i < num; i++) { + String spec = job.get(advn + "." + i); + ValueAggregatorDescriptor ad = getValueAggregatorDescriptor(spec, job); + if (ad != null) { + retv.add(ad); + } + } + return retv; + } + + private void initializeMySpec(JobConf job) { + this.aggregatorDescriptorList = getAggregatorDescriptors(job); + if (this.aggregatorDescriptorList.size() == 0) { + this.aggregatorDescriptorList + .add(new UserDefinedValueAggregatorDescriptor( + ValueAggregatorBaseDescriptor.class.getCanonicalName(), job)); + } + } + + protected void logSpec() { + + } + + public void close() throws IOException { + } +} diff --git a/src/mapred/org/apache/hadoop/mapred/lib/aggregate/ValueAggregatorMapper.java b/src/mapred/org/apache/hadoop/mapred/lib/aggregate/ValueAggregatorMapper.java new file mode 100644 index 0000000..aacdee0 --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/lib/aggregate/ValueAggregatorMapper.java @@ -0,0 +1,65 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapred.lib.aggregate; + +import java.io.IOException; +import java.util.Iterator; +import java.util.Map.Entry; + +import org.apache.hadoop.io.Text; +import org.apache.hadoop.io.Writable; +import org.apache.hadoop.io.WritableComparable; +import org.apache.hadoop.mapred.OutputCollector; +import org.apache.hadoop.mapred.Reporter; + +/** + * This class implements the generic mapper of Aggregate. + */ +public class ValueAggregatorMapper + extends ValueAggregatorJobBase { + + /** + * the map function. It iterates through the value aggregator descriptor + * list to generate aggregation id/value pairs and emit them. + */ + public void map(K1 key, V1 value, + OutputCollector output, Reporter reporter) throws IOException { + + Iterator iter = this.aggregatorDescriptorList.iterator(); + while (iter.hasNext()) { + ValueAggregatorDescriptor ad = (ValueAggregatorDescriptor) iter.next(); + Iterator> ens = + ad.generateKeyValPairs(key, value).iterator(); + while (ens.hasNext()) { + Entry en = ens.next(); + output.collect(en.getKey(), en.getValue()); + } + } + } + + /** + * Do nothing. Should not be called. + */ + public void reduce(Text arg0, Iterator arg1, + OutputCollector arg2, + Reporter arg3) throws IOException { + throw new IOException("should not be called\n"); + } +} diff --git a/src/mapred/org/apache/hadoop/mapred/lib/aggregate/ValueAggregatorReducer.java b/src/mapred/org/apache/hadoop/mapred/lib/aggregate/ValueAggregatorReducer.java new file mode 100644 index 0000000..5b44cfa --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/lib/aggregate/ValueAggregatorReducer.java @@ -0,0 +1,74 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapred.lib.aggregate; + +import java.io.IOException; +import java.util.Iterator; + +import org.apache.hadoop.io.Text; +import org.apache.hadoop.io.Writable; +import org.apache.hadoop.io.WritableComparable; +import org.apache.hadoop.mapred.OutputCollector; +import org.apache.hadoop.mapred.Reporter; + +/** + * This class implements the generic reducer of Aggregate. + * + * + */ +public class ValueAggregatorReducer + extends ValueAggregatorJobBase { + + /** + * @param key + * the key is expected to be a Text object, whose prefix indicates + * the type of aggregation to aggregate the values. In effect, data + * driven computing is achieved. It is assumed that each aggregator's + * getReport method emits appropriate output for the aggregator. This + * may be further customiized. + * @value the values to be aggregated + */ + public void reduce(Text key, Iterator values, + OutputCollector output, Reporter reporter) throws IOException { + String keyStr = key.toString(); + int pos = keyStr.indexOf(ValueAggregatorDescriptor.TYPE_SEPARATOR); + String type = keyStr.substring(0, pos); + keyStr = keyStr.substring(pos + + ValueAggregatorDescriptor.TYPE_SEPARATOR.length()); + + ValueAggregator aggregator = ValueAggregatorBaseDescriptor + .generateValueAggregator(type); + while (values.hasNext()) { + aggregator.addNextValue(values.next()); + } + + String val = aggregator.getReport(); + key = new Text(keyStr); + output.collect(key, new Text(val)); + } + + /** + * Do nothing. Should not be called + */ + public void map(K1 arg0, V1 arg1, OutputCollector arg2, + Reporter arg3) throws IOException { + throw new IOException ("should not be called\n"); + } +} diff --git a/src/mapred/org/apache/hadoop/mapred/lib/aggregate/ValueHistogram.java b/src/mapred/org/apache/hadoop/mapred/lib/aggregate/ValueHistogram.java new file mode 100644 index 0000000..38a9f2a --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/lib/aggregate/ValueHistogram.java @@ -0,0 +1,179 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapred.lib.aggregate; + +import java.util.ArrayList; +import java.util.Iterator; +import java.util.TreeMap; +import java.util.Map.Entry; +import java.util.Arrays; + + +/** + * This class implements a value aggregator that computes the + * histogram of a sequence of strings. + * + */ +public class ValueHistogram implements ValueAggregator { + + TreeMap items = null; + + public ValueHistogram() { + items = new TreeMap(); + } + + /** + * add the given val to the aggregator. + * + * @param val the value to be added. It is expected to be a string + * in the form of xxxx\tnum, meaning xxxx has num occurrences. + */ + public void addNextValue(Object val) { + String valCountStr = val.toString(); + int pos = valCountStr.lastIndexOf("\t"); + String valStr = valCountStr; + String countStr = "1"; + if (pos >= 0) { + valStr = valCountStr.substring(0, pos); + countStr = valCountStr.substring(pos + 1); + } + + Long count = (Long) this.items.get(valStr); + long inc = Long.parseLong(countStr); + + if (count == null) { + count = inc; + } else { + count = count.longValue() + inc; + } + items.put(valStr, count); + } + + /** + * @return the string representation of this aggregator. + * It includes the following basic statistics of the histogram: + * the number of unique values + * the minimum value + * the media value + * the maximum value + * the average value + * the standard deviation + */ + public String getReport() { + long[] counts = new long[items.size()]; + + StringBuffer sb = new StringBuffer(); + Iterator iter = items.values().iterator(); + int i = 0; + while (iter.hasNext()) { + Long count = (Long) iter.next(); + counts[i] = count.longValue(); + i += 1; + } + Arrays.sort(counts); + sb.append(counts.length); + i = 0; + long acc = 0; + while (i < counts.length) { + long nextVal = counts[i]; + int j = i + 1; + while (j < counts.length && counts[j] == nextVal) { + j++; + } + acc += nextVal * (j - i); + //sbVal.append("\t").append(nextVal).append("\t").append(j - i) + //.append("\n"); + i = j; + } + double average = 0.0; + double sd = 0.0; + if (counts.length > 0) { + sb.append("\t").append(counts[0]); + sb.append("\t").append(counts[counts.length / 2]); + sb.append("\t").append(counts[counts.length - 1]); + + average = acc * 1.0 / counts.length; + sb.append("\t").append(average); + + i = 0; + while (i < counts.length) { + double nextDiff = counts[i] - average; + sd += nextDiff * nextDiff; + i += 1; + } + sd = Math.sqrt(sd / counts.length); + + sb.append("\t").append(sd); + + } + //sb.append("\n").append(sbVal.toString()); + return sb.toString(); + } + + /** + * + * @return a string representation of the list of value/frequence pairs of + * the histogram + */ + public String getReportDetails() { + StringBuffer sb = new StringBuffer(); + Iterator iter = items.entrySet().iterator(); + while (iter.hasNext()) { + Entry en = (Entry) iter.next(); + Object val = en.getKey(); + Long count = (Long) en.getValue(); + sb.append("\t").append(val.toString()).append("\t").append( + count.longValue()).append("\n"); + } + return sb.toString(); + } + + /** + * @return a list value/frequence pairs. + * The return value is expected to be used by the reducer. + */ + public ArrayList getCombinerOutput() { + ArrayList retv = new ArrayList(); + Iterator iter = items.entrySet().iterator(); + + while (iter.hasNext()) { + Entry en = (Entry) iter.next(); + Object val = en.getKey(); + Long count = (Long) en.getValue(); + retv.add(val.toString() + "\t" + count.longValue()); + } + return retv; + } + + /** + * + * @return a TreeMap representation of the histogram + */ + public TreeMap getReportItems() { + return items; + } + + /** + * reset the aggregator + */ + public void reset() { + items = new TreeMap(); + } + +} diff --git a/src/mapred/org/apache/hadoop/mapred/lib/aggregate/package.html b/src/mapred/org/apache/hadoop/mapred/lib/aggregate/package.html new file mode 100644 index 0000000..b0dc530 --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/lib/aggregate/package.html @@ -0,0 +1,186 @@ + + + + + + +Classes for performing various counting and aggregations. +

+

Aggregate framework

+

+Generally speaking, in order to implement an application using Map/Reduce +model, the developer needs to implement Map and Reduce functions (and possibly +Combine function). However, for a lot of applications related to counting and +statistics computing, these functions have very similar +characteristics. This provides a package implementing +those patterns. In particular, the package provides a generic mapper class, +a reducer class and a combiner class, and a set of built-in value aggregators. +It also provides a generic utility class, ValueAggregatorJob, that offers a static function that +creates map/reduce jobs: +

+
+public static JobConf createValueAggregatorJob(String args[]) throws IOException;
+
+
+To call this function, the user needs to pass in arguments specifying the input directories, the output directory, +the number of reducers, the input data format (textinputformat or sequencefileinputformat), and a file specifying user plugin class(es) to load by the mapper. +A user plugin class is responsible for specifying what +aggregators to use and what values are for which aggregators. +A plugin class must implement the following interface: +
+
+ public interface ValueAggregatorDescriptor { 
+     public ArrayList<Entry> generateKeyValPairs(Object key, Object value); 
+     public void configure(JobConfjob); 
+} 
+
+
+Function generateKeyValPairs will generate aggregation key/value pairs for the +input key/value pair. Each aggregation key encodes two pieces of information: the aggregation type and aggregation ID. +The value is the value to be aggregated onto the aggregation ID according to the aggregation type. Here +is a simple example user plugin class for counting the words in the input texts: +
+
+public class WordCountAggregatorDescriptor extends ValueAggregatorBaseDescriptor { 
+    public ArrayList<Entry> generateKeyValPairs(Object key, Object val) {
+        String words [] = val.toString().split(" |\t");
+        ArrayList<Entry> retv = new ArrayList<Entry>();
+        for (int i = 0; i < words.length; i++) {
+            retv.add(generateEntry(LONG_VALUE_SUM, words[i], ONE))
+        }
+        return retv;
+    }
+    public void configure(JobConf job) {}
+} 
+
+
+In the above code, LONG_VALUE_SUM is a string denoting the aggregation type LongValueSum, which sums over long values. +ONE denotes a string "1". Function generateEntry(LONG_VALUE_SUM, words[i], ONE) will inperpret the first argument as an aggregation type, the second as an aggregation ID, and the the third argumnent as the value to be aggregated. The output will look like: "LongValueSum:xxxx", where XXXX is the string value of words[i]. The value will be "1". The mapper will call generateKeyValPairs(Object key, Object val) for each input key/value pair to generate the desired aggregation id/value pairs. +The down stream combiner/reducer will interpret these pairs as adding one to the aggregator XXXX. +

+Class ValueAggregatorBaseDescriptor is a base class that user plugin classes can extend. Here is the XML fragment specifying the user plugin class: +

+
+<property>
+    <name>aggregator.descriptor.num</name>
+    <value>1</value>
+</property>
+<property>
+   <name>aggregator.descriptor.0</name>
+   <value>UserDefined,org.apache.hadoop.mapred.lib.aggregate.examples.WordCountAggregatorDescriptor</value>
+</property> 
+
+
+Class ValueAggregatorBaseDescriptor itself provides a default implementation for generateKeyValPairs: +
+
+public ArrayList<Entry> generateKeyValPairs(Object key, Object val) {
+   ArrayList<Entry> retv = new ArrayList<Entry>();     
+   String countType = LONG_VALUE_SUM;
+   String id = "record_count";
+   retv.add(generateEntry(countType, id, ONE));
+   return retv;
+}
+
+
+Thus, if no user plugin class is specified, the default behavior of the map/reduce job is to count the number of records (lines) in the imput files. +

+During runtime, the mapper will invoke the generateKeyValPairs function for each input key/value pair, and emit the generated +key/value pairs: +

+
+public void map(WritableComparable key, Writable value,
+            OutputCollector output, Reporter reporter) throws IOException {
+   Iterator iter = this.aggregatorDescriptorList.iterator();
+   while (iter.hasNext()) {
+       ValueAggregatorDescriptor ad = (ValueAggregatorDescriptor) iter.next();
+       Iterator<Entry> ens = ad.generateKeyValPairs(key, value).iterator();
+       while (ens.hasNext()) {
+           Entry en = ens.next();
+           output.collect((WritableComparable)en.getKey(), (Writable)en.getValue());
+       }
+   }
+}
+
+
+The reducer will create an aggregator object for each key/value list pair, and perform the appropriate aggregation. +At the end, it will emit the aggregator's results: +
+
+public void reduce(WritableComparable key, Iterator values,
+            OutputCollector output, Reporter reporter) throws IOException {
+   String keyStr = key.toString();
+   int pos = keyStr.indexOf(ValueAggregatorDescriptor.TYPE_SEPARATOR);
+   String type = keyStr.substring(0,pos);
+   keyStr = keyStr.substring(pos+ValueAggregatorDescriptor.TYPE_SEPARATOR.length());       
+   ValueAggregator aggregator = 
+       ValueAggregatorBaseDescriptor.generateValueAggregator(type);
+   while (values.hasNext()) {
+       aggregator.addNextValue(values.next());
+   }         
+   String val = aggregator.getReport();
+   key = new Text(keyStr);
+   output.collect(key, new Text(val)); 
+}
+
+
+In order to be able to use combiner, all the aggregation type be aggregators must be associative and communitive. +The following are the types supported:
    +
  • LongValueSum: sum over long values +
  • DoubleValueSum: sum over float/double values +
  • uniqValueCount: count the number of distinct values +
  • ValueHistogram: compute the histogram of values compute the minimum, maximum, media,average, standard deviation of numeric values +
+

+

Create and run an application

+

+To create an application, the user needs to do the following things: +

+1. Implement a user plugin: +

+
+import org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorBaseDescriptor;
+import org.apache.hadoop.mapred.JobConf;
+
+public class WordCountAggregatorDescriptor extends ValueAggregatorBaseDescriptor {
+   public void map(WritableComparable key, Writable value,
+            OutputCollector output, Reporter reporter) throws IOException {
+   }
+   public void configure(JobConf job) {
+    
+   } 
+}
+
+
+ +2. Create an xml file specifying the user plugin. +

+3. Compile your java class and create a jar file, say wc.jar. + +

+Finally, run the job: +

+
+        hadoop jar wc.jar org.apache.hadoop.mapred.lib.aggregate..ValueAggregatorJob indirs outdir numofreducers textinputformat|sequencefileinputformat spec_file
+
+
+

+ + + + diff --git a/src/mapred/org/apache/hadoop/mapred/lib/db/DBConfiguration.java b/src/mapred/org/apache/hadoop/mapred/lib/db/DBConfiguration.java new file mode 100644 index 0000000..b92f273 --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/lib/db/DBConfiguration.java @@ -0,0 +1,216 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapred.lib.db; + +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.SQLException; + +import org.apache.hadoop.mapred.JobConf; +import org.apache.hadoop.mapred.lib.db.DBInputFormat.NullDBWritable; + +/** + * A container for configuration property names for jobs with DB input/output. + *
+ * The job can be configured using the static methods in this class, + * {@link DBInputFormat}, and {@link DBOutputFormat}. + *

+ * Alternatively, the properties can be set in the configuration with proper + * values. + * + * @see DBConfiguration#configureDB(JobConf, String, String, String, String) + * @see DBInputFormat#setInput(JobConf, Class, String, String) + * @see DBInputFormat#setInput(JobConf, Class, String, String, String, String...) + * @see DBOutputFormat#setOutput(JobConf, String, String...) + */ +public class DBConfiguration { + + /** The JDBC Driver class name */ + public static final String DRIVER_CLASS_PROPERTY = "mapred.jdbc.driver.class"; + + /** JDBC Database access URL */ + public static final String URL_PROPERTY = "mapred.jdbc.url"; + + /** User name to access the database */ + public static final String USERNAME_PROPERTY = "mapred.jdbc.username"; + + /** Password to access the database */ + public static final String PASSWORD_PROPERTY = "mapred.jdbc.password"; + + /** Input table name */ + public static final String INPUT_TABLE_NAME_PROPERTY = "mapred.jdbc.input.table.name"; + + /** Field names in the Input table */ + public static final String INPUT_FIELD_NAMES_PROPERTY = "mapred.jdbc.input.field.names"; + + /** WHERE clause in the input SELECT statement */ + public static final String INPUT_CONDITIONS_PROPERTY = "mapred.jdbc.input.conditions"; + + /** ORDER BY clause in the input SELECT statement */ + public static final String INPUT_ORDER_BY_PROPERTY = "mapred.jdbc.input.orderby"; + + /** Whole input query, exluding LIMIT...OFFSET */ + public static final String INPUT_QUERY = "mapred.jdbc.input.query"; + + /** Input query to get the count of records */ + public static final String INPUT_COUNT_QUERY = "mapred.jdbc.input.count.query"; + + /** Class name implementing DBWritable which will hold input tuples */ + public static final String INPUT_CLASS_PROPERTY = "mapred.jdbc.input.class"; + + /** Output table name */ + public static final String OUTPUT_TABLE_NAME_PROPERTY = "mapred.jdbc.output.table.name"; + + /** Field names in the Output table */ + public static final String OUTPUT_FIELD_NAMES_PROPERTY = "mapred.jdbc.output.field.names"; + + /** + * Sets the DB access related fields in the JobConf. + * @param job the job + * @param driverClass JDBC Driver class name + * @param dbUrl JDBC DB access URL. + * @param userName DB access username + * @param passwd DB access passwd + */ + public static void configureDB(JobConf job, String driverClass, String dbUrl + , String userName, String passwd) { + + job.set(DRIVER_CLASS_PROPERTY, driverClass); + job.set(URL_PROPERTY, dbUrl); + if(userName != null) + job.set(USERNAME_PROPERTY, userName); + if(passwd != null) + job.set(PASSWORD_PROPERTY, passwd); + } + + /** + * Sets the DB access related fields in the JobConf. + * @param job the job + * @param driverClass JDBC Driver class name + * @param dbUrl JDBC DB access URL. + */ + public static void configureDB(JobConf job, String driverClass, String dbUrl) { + configureDB(job, driverClass, dbUrl, null, null); + } + + private JobConf job; + + DBConfiguration(JobConf job) { + this.job = job; + } + + /** Returns a connection object o the DB + * @throws ClassNotFoundException + * @throws SQLException */ + Connection getConnection() throws ClassNotFoundException, SQLException{ + + Class.forName(job.get(DBConfiguration.DRIVER_CLASS_PROPERTY)); + + if(job.get(DBConfiguration.USERNAME_PROPERTY) == null) { + return DriverManager.getConnection(job.get(DBConfiguration.URL_PROPERTY)); + } else { + return DriverManager.getConnection( + job.get(DBConfiguration.URL_PROPERTY), + job.get(DBConfiguration.USERNAME_PROPERTY), + job.get(DBConfiguration.PASSWORD_PROPERTY)); + } + } + + String getInputTableName() { + return job.get(DBConfiguration.INPUT_TABLE_NAME_PROPERTY); + } + + void setInputTableName(String tableName) { + job.set(DBConfiguration.INPUT_TABLE_NAME_PROPERTY, tableName); + } + + String[] getInputFieldNames() { + return job.getStrings(DBConfiguration.INPUT_FIELD_NAMES_PROPERTY); + } + + void setInputFieldNames(String... fieldNames) { + job.setStrings(DBConfiguration.INPUT_FIELD_NAMES_PROPERTY, fieldNames); + } + + String getInputConditions() { + return job.get(DBConfiguration.INPUT_CONDITIONS_PROPERTY); + } + + void setInputConditions(String conditions) { + if (conditions != null && conditions.length() > 0) + job.set(DBConfiguration.INPUT_CONDITIONS_PROPERTY, conditions); + } + + String getInputOrderBy() { + return job.get(DBConfiguration.INPUT_ORDER_BY_PROPERTY); + } + + void setInputOrderBy(String orderby) { + if(orderby != null && orderby.length() >0) { + job.set(DBConfiguration.INPUT_ORDER_BY_PROPERTY, orderby); + } + } + + String getInputQuery() { + return job.get(DBConfiguration.INPUT_QUERY); + } + + void setInputQuery(String query) { + if(query != null && query.length() >0) { + job.set(DBConfiguration.INPUT_QUERY, query); + } + } + + String getInputCountQuery() { + return job.get(DBConfiguration.INPUT_COUNT_QUERY); + } + + void setInputCountQuery(String query) { + if(query != null && query.length() >0) { + job.set(DBConfiguration.INPUT_COUNT_QUERY, query); + } + } + + + Class getInputClass() { + return job.getClass(DBConfiguration.INPUT_CLASS_PROPERTY, NullDBWritable.class); + } + + void setInputClass(Class inputClass) { + job.setClass(DBConfiguration.INPUT_CLASS_PROPERTY, inputClass, DBWritable.class); + } + + String getOutputTableName() { + return job.get(DBConfiguration.OUTPUT_TABLE_NAME_PROPERTY); + } + + void setOutputTableName(String tableName) { + job.set(DBConfiguration.OUTPUT_TABLE_NAME_PROPERTY, tableName); + } + + String[] getOutputFieldNames() { + return job.getStrings(DBConfiguration.OUTPUT_FIELD_NAMES_PROPERTY); + } + + void setOutputFieldNames(String... fieldNames) { + job.setStrings(DBConfiguration.OUTPUT_FIELD_NAMES_PROPERTY, fieldNames); + } + +} + diff --git a/src/mapred/org/apache/hadoop/mapred/lib/db/DBInputFormat.java b/src/mapred/org/apache/hadoop/mapred/lib/db/DBInputFormat.java new file mode 100644 index 0000000..8480f8b --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/lib/db/DBInputFormat.java @@ -0,0 +1,394 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapred.lib.db; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; +import java.sql.Connection; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; + +import org.apache.hadoop.io.LongWritable; +import org.apache.hadoop.io.Writable; +import org.apache.hadoop.mapred.InputFormat; +import org.apache.hadoop.mapred.InputSplit; +import org.apache.hadoop.mapred.JobConf; +import org.apache.hadoop.mapred.JobConfigurable; +import org.apache.hadoop.mapred.RecordReader; +import org.apache.hadoop.mapred.Reporter; +import org.apache.hadoop.util.ReflectionUtils; + +/** + * A InputFormat that reads input data from an SQL table. + *

+ * DBInputFormat emits LongWritables containing the record number as + * key and DBWritables as value. + * + * The SQL query, and input class can be using one of the two + * setInput methods. + */ +public class DBInputFormat + implements InputFormat, JobConfigurable { + /** + * A RecordReader that reads records from a SQL table. + * Emits LongWritables containing the record number as + * key and DBWritables as value. + */ + protected class DBRecordReader implements + RecordReader { + private ResultSet results; + + private Statement statement; + + private Class inputClass; + + private JobConf job; + + private DBInputSplit split; + + private long pos = 0; + + /** + * @param split The InputSplit to read data for + * @throws SQLException + */ + protected DBRecordReader(DBInputSplit split, Class inputClass, JobConf job) throws SQLException { + this.inputClass = inputClass; + this.split = split; + this.job = job; + + statement = connection.createStatement(ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_READ_ONLY); + + //statement.setFetchSize(Integer.MIN_VALUE); + results = statement.executeQuery(getSelectQuery()); + } + + /** Returns the query for selecting the records, + * subclasses can override this for custom behaviour.*/ + protected String getSelectQuery() { + StringBuilder query = new StringBuilder(); + + if(dbConf.getInputQuery() == null) { + query.append("SELECT "); + + for (int i = 0; i < fieldNames.length; i++) { + query.append(fieldNames[i]); + if(i != fieldNames.length -1) { + query.append(", "); + } + } + + query.append(" FROM ").append(tableName); + query.append(" AS ").append(tableName); //in hsqldb this is necessary + if (conditions != null && conditions.length() > 0) + query.append(" WHERE (").append(conditions).append(")"); + String orderBy = dbConf.getInputOrderBy(); + if(orderBy != null && orderBy.length() > 0) { + query.append(" ORDER BY ").append(orderBy); + } + } + else { + query.append(dbConf.getInputQuery()); + } + + try { + query.append(" LIMIT ").append(split.getLength()); + query.append(" OFFSET ").append(split.getStart()); + } + catch (IOException ex) { + //ignore, will not throw + } + return query.toString(); + } + + /** {@inheritDoc} */ + public void close() throws IOException { + try { + connection.commit(); + results.close(); + statement.close(); + } catch (SQLException e) { + throw new IOException(e.getMessage()); + } + } + + /** {@inheritDoc} */ + public LongWritable createKey() { + return new LongWritable(); + } + + /** {@inheritDoc} */ + public T createValue() { + return ReflectionUtils.newInstance(inputClass, job); + } + + /** {@inheritDoc} */ + public long getPos() throws IOException { + return pos; + } + + /** {@inheritDoc} */ + public float getProgress() throws IOException { + return pos / (float)split.getLength(); + } + + /** {@inheritDoc} */ + public boolean next(LongWritable key, T value) throws IOException { + try { + if (!results.next()) + return false; + + // Set the key field value as the output key value + key.set(pos + split.getStart()); + + value.readFields(results); + + pos ++; + } catch (SQLException e) { + throw new IOException(e.getMessage()); + } + return true; + } + } + + /** + * A Class that does nothing, implementing DBWritable + */ + public static class NullDBWritable implements DBWritable, Writable { + @Override + public void readFields(DataInput in) throws IOException { } + @Override + public void readFields(ResultSet arg0) throws SQLException { } + @Override + public void write(DataOutput out) throws IOException { } + @Override + public void write(PreparedStatement arg0) throws SQLException { } + } + /** + * A InputSplit that spans a set of rows + */ + protected static class DBInputSplit implements InputSplit { + + private long end = 0; + private long start = 0; + + /** + * Default Constructor + */ + public DBInputSplit() { + } + + /** + * Convenience Constructor + * @param start the index of the first row to select + * @param end the index of the last row to select + */ + public DBInputSplit(long start, long end) { + this.start = start; + this.end = end; + } + + /** {@inheritDoc} */ + public String[] getLocations() throws IOException { + // TODO Add a layer to enable SQL "sharding" and support locality + return new String[] {}; + } + + /** + * @return The index of the first row to select + */ + public long getStart() { + return start; + } + + /** + * @return The index of the last row to select + */ + public long getEnd() { + return end; + } + + /** + * @return The total row count in this split + */ + public long getLength() throws IOException { + return end - start; + } + + /** {@inheritDoc} */ + public void readFields(DataInput input) throws IOException { + start = input.readLong(); + end = input.readLong(); + } + + /** {@inheritDoc} */ + public void write(DataOutput output) throws IOException { + output.writeLong(start); + output.writeLong(end); + } + } + + private String conditions; + + private Connection connection; + + private String tableName; + + private String[] fieldNames; + + private DBConfiguration dbConf; + + /** {@inheritDoc} */ + public void configure(JobConf job) { + + dbConf = new DBConfiguration(job); + + try { + this.connection = dbConf.getConnection(); + this.connection.setAutoCommit(false); + connection.setTransactionIsolation(Connection.TRANSACTION_SERIALIZABLE); + } + catch (Exception ex) { + throw new RuntimeException(ex); + } + + tableName = dbConf.getInputTableName(); + fieldNames = dbConf.getInputFieldNames(); + conditions = dbConf.getInputConditions(); + } + + /** {@inheritDoc} */ + @SuppressWarnings("unchecked") + public RecordReader getRecordReader(InputSplit split, + JobConf job, Reporter reporter) throws IOException { + + Class inputClass = dbConf.getInputClass(); + try { + return new DBRecordReader((DBInputSplit) split, inputClass, job); + } + catch (SQLException ex) { + throw new IOException(ex.getMessage()); + } + } + + /** {@inheritDoc} */ + public InputSplit[] getSplits(JobConf job, int chunks) throws IOException { + + try { + Statement statement = connection.createStatement(); + + ResultSet results = statement.executeQuery(getCountQuery()); + results.next(); + + long count = results.getLong(1); + long chunkSize = (count / chunks); + + results.close(); + statement.close(); + + InputSplit[] splits = new InputSplit[chunks]; + + // Split the rows into n-number of chunks and adjust the last chunk + // accordingly + for (int i = 0; i < chunks; i++) { + DBInputSplit split; + + if ((i + 1) == chunks) + split = new DBInputSplit(i * chunkSize, count); + else + split = new DBInputSplit(i * chunkSize, (i * chunkSize) + + chunkSize); + + splits[i] = split; + } + + return splits; + } catch (SQLException e) { + throw new IOException(e.getMessage()); + } + } + + /** Returns the query for getting the total number of rows, + * subclasses can override this for custom behaviour.*/ + protected String getCountQuery() { + + if(dbConf.getInputCountQuery() != null) { + return dbConf.getInputCountQuery(); + } + + StringBuilder query = new StringBuilder(); + query.append("SELECT COUNT(*) FROM " + tableName); + + if (conditions != null && conditions.length() > 0) + query.append(" WHERE " + conditions); + return query.toString(); + } + + /** + * Initializes the map-part of the job with the appropriate input settings. + * + * @param job The job + * @param inputClass the class object implementing DBWritable, which is the + * Java object holding tuple fields. + * @param tableName The table to read data from + * @param conditions The condition which to select data with, eg. '(updated > + * 20070101 AND length > 0)' + * @param orderBy the fieldNames in the orderBy clause. + * @param fieldNames The field names in the table + * @see #setInput(JobConf, Class, String, String) + */ + public static void setInput(JobConf job, Class inputClass, + String tableName,String conditions, String orderBy, String... fieldNames) { + job.setInputFormat(DBInputFormat.class); + + DBConfiguration dbConf = new DBConfiguration(job); + dbConf.setInputClass(inputClass); + dbConf.setInputTableName(tableName); + dbConf.setInputFieldNames(fieldNames); + dbConf.setInputConditions(conditions); + dbConf.setInputOrderBy(orderBy); + } + + /** + * Initializes the map-part of the job with the appropriate input settings. + * + * @param job The job + * @param inputClass the class object implementing DBWritable, which is the + * Java object holding tuple fields. + * @param inputQuery the input query to select fields. Example : + * "SELECT f1, f2, f3 FROM Mytable ORDER BY f1" + * @param inputCountQuery the input query that returns the number of records in + * the table. + * Example : "SELECT COUNT(f1) FROM Mytable" + * @see #setInput(JobConf, Class, String, String, String, String...) + */ + public static void setInput(JobConf job, Class inputClass, + String inputQuery, String inputCountQuery) { + job.setInputFormat(DBInputFormat.class); + + DBConfiguration dbConf = new DBConfiguration(job); + dbConf.setInputClass(inputClass); + dbConf.setInputQuery(inputQuery); + dbConf.setInputCountQuery(inputCountQuery); + + } +} diff --git a/src/mapred/org/apache/hadoop/mapred/lib/db/DBOutputFormat.java b/src/mapred/org/apache/hadoop/mapred/lib/db/DBOutputFormat.java new file mode 100644 index 0000000..acfda9f --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/lib/db/DBOutputFormat.java @@ -0,0 +1,186 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapred.lib.db; + +import java.io.IOException; +import java.sql.Connection; +import java.sql.PreparedStatement; +import java.sql.SQLException; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.mapred.JobConf; +import org.apache.hadoop.mapred.OutputFormat; +import org.apache.hadoop.mapred.RecordWriter; +import org.apache.hadoop.mapred.Reporter; +import org.apache.hadoop.util.Progressable; +import org.apache.hadoop.util.StringUtils; + +/** + * A OutputFormat that sends the reduce output to a SQL table. + *

+ * {@link DBOutputFormat} accepts <key,value> pairs, where + * key has a type extending DBWritable. Returned {@link RecordWriter} + * writes only the key to the database with a batch SQL query. + * + */ +public class DBOutputFormat +implements OutputFormat { + + private static final Log LOG = LogFactory.getLog(DBOutputFormat.class); + + /** + * A RecordWriter that writes the reduce output to a SQL table + */ + protected class DBRecordWriter + implements RecordWriter { + + private Connection connection; + private PreparedStatement statement; + + protected DBRecordWriter(Connection connection + , PreparedStatement statement) throws SQLException { + this.connection = connection; + this.statement = statement; + this.connection.setAutoCommit(false); + } + + /** {@inheritDoc} */ + public void close(Reporter reporter) throws IOException { + try { + statement.executeBatch(); + connection.commit(); + } catch (SQLException e) { + try { + connection.rollback(); + } + catch (SQLException ex) { + LOG.warn(StringUtils.stringifyException(ex)); + } + throw new IOException(e.getMessage()); + } finally { + try { + statement.close(); + connection.close(); + } + catch (SQLException ex) { + throw new IOException(ex.getMessage()); + } + } + } + + /** {@inheritDoc} */ + public void write(K key, V value) throws IOException { + try { + key.write(statement); + statement.addBatch(); + } catch (SQLException e) { + e.printStackTrace(); + } + } + } + + /** + * Constructs the query used as the prepared statement to insert data. + * + * @param table + * the table to insert into + * @param fieldNames + * the fields to insert into. If field names are unknown, supply an + * array of nulls. + */ + protected String constructQuery(String table, String[] fieldNames) { + if(fieldNames == null) { + throw new IllegalArgumentException("Field names may not be null"); + } + + StringBuilder query = new StringBuilder(); + query.append("INSERT INTO ").append(table); + + if (fieldNames.length > 0 && fieldNames[0] != null) { + query.append(" ("); + for (int i = 0; i < fieldNames.length; i++) { + query.append(fieldNames[i]); + if (i != fieldNames.length - 1) { + query.append(","); + } + } + query.append(")"); + } + query.append(" VALUES ("); + + for (int i = 0; i < fieldNames.length; i++) { + query.append("?"); + if(i != fieldNames.length - 1) { + query.append(","); + } + } + query.append(");"); + + return query.toString(); + } + + /** {@inheritDoc} */ + public void checkOutputSpecs(FileSystem filesystem, JobConf job) + throws IOException { + } + + + /** {@inheritDoc} */ + public RecordWriter getRecordWriter(FileSystem filesystem, + JobConf job, String name, Progressable progress) throws IOException { + + DBConfiguration dbConf = new DBConfiguration(job); + String tableName = dbConf.getOutputTableName(); + String[] fieldNames = dbConf.getOutputFieldNames(); + + try { + Connection connection = dbConf.getConnection(); + PreparedStatement statement = null; + + statement = connection.prepareStatement(constructQuery(tableName, fieldNames)); + return new DBRecordWriter(connection, statement); + } + catch (Exception ex) { + throw new IOException(ex.getMessage()); + } + } + + /** + * Initializes the reduce-part of the job with the appropriate output settings + * + * @param job + * The job + * @param tableName + * The table to insert data into + * @param fieldNames + * The field names in the table. If unknown, supply the appropriate + * number of nulls. + */ + public static void setOutput(JobConf job, String tableName, String... fieldNames) { + job.setOutputFormat(DBOutputFormat.class); + job.setReduceSpeculativeExecution(false); + + DBConfiguration dbConf = new DBConfiguration(job); + + dbConf.setOutputTableName(tableName); + dbConf.setOutputFieldNames(fieldNames); + } +} diff --git a/src/mapred/org/apache/hadoop/mapred/lib/db/DBWritable.java b/src/mapred/org/apache/hadoop/mapred/lib/db/DBWritable.java new file mode 100644 index 0000000..e70d4bf --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/lib/db/DBWritable.java @@ -0,0 +1,75 @@ +package org.apache.hadoop.mapred.lib.db; + +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; + +import org.apache.hadoop.io.Writable; + +/** + * Objects that are read from/written to a database should implement + * DBWritable. DBWritable, is similar to {@link Writable} + * except that the {@link #write(PreparedStatement)} method takes a + * {@link PreparedStatement}, and {@link #readFields(ResultSet)} + * takes a {@link ResultSet}. + *

+ * Implementations are responsible for writing the fields of the object + * to PreparedStatement, and reading the fields of the object from the + * ResultSet. + * + *

Example:

+ * If we have the following table in the database : + *
+ * CREATE TABLE MyTable (
+ *   counter        INTEGER NOT NULL,
+ *   timestamp      BIGINT  NOT NULL,
+ * );
+ * 
+ * then we can read/write the tuples from/to the table with : + *

+ * public class MyWritable implements Writable, DBWritable {
+ *   // Some data     
+ *   private int counter;
+ *   private long timestamp;
+ *       
+ *   //Writable#write() implementation
+ *   public void write(DataOutput out) throws IOException {
+ *     out.writeInt(counter);
+ *     out.writeLong(timestamp);
+ *   }
+ *       
+ *   //Writable#readFields() implementation
+ *   public void readFields(DataInput in) throws IOException {
+ *     counter = in.readInt();
+ *     timestamp = in.readLong();
+ *   }
+ *       
+ *   public void write(PreparedStatement statement) throws SQLException {
+ *     statement.setInt(1, counter);
+ *     statement.setLong(2, timestamp);
+ *   }
+ *       
+ *   public void readFields(ResultSet resultSet) throws SQLException {
+ *     counter = resultSet.getInt(1);
+ *     timestamp = resultSet.getLong(2);
+ *   } 
+ * }
+ * 

+ */ +public interface DBWritable { + + /** + * Sets the fields of the object in the {@link PreparedStatement}. + * @param statement the statement that the fields are put into. + * @throws SQLException + */ + public void write(PreparedStatement statement) throws SQLException; + + /** + * Reads the fields of the object from the {@link ResultSet}. + * @param resultSet the {@link ResultSet} to get the fields from. + * @throws SQLException + */ + public void readFields(ResultSet resultSet) throws SQLException ; + +} diff --git a/src/mapred/org/apache/hadoop/mapred/lib/db/package.html b/src/mapred/org/apache/hadoop/mapred/lib/db/package.html new file mode 100644 index 0000000..0692258 --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/lib/db/package.html @@ -0,0 +1,44 @@ + + + + +

org.apache.hadoop.mapred.lib.db Package

+

+This package contains a library to read records from a database as an +input to a mapreduce job, and write the output records to the database. +

+

+The Database to access can be configured using the static methods in the +DBConfiguration class. Jobs reading input from a database should use +DBInputFormat#setInput() to set the configuration. And jobs writing +its output to the database should use DBOutputFormat#setOutput(). +

+

+Tuples from/to the database are converted to/from Java objects using +DBWritable methods. Typically, for each table in the db, a class extending +DBWritable is defined, which holds the fields of the tuple. The fields +of a record are read from the database using DBWritable#readFields(ResultSet), +and written to the database using DBWritable#write(PreparedStatament +statement). +

+

+An example program using both DBInputFormat and DBOutputFormat can be found +at src/examples/org/apache/hadoop/examples/DBCountPageview.java. +

+ + \ No newline at end of file diff --git a/src/mapred/org/apache/hadoop/mapred/lib/package.html b/src/mapred/org/apache/hadoop/mapred/lib/package.html new file mode 100644 index 0000000..1948ec1 --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/lib/package.html @@ -0,0 +1,25 @@ + + + + + + +

Library of generally useful mappers, reducers, and partitioners.

+ + + diff --git a/src/mapred/org/apache/hadoop/mapred/package.html b/src/mapred/org/apache/hadoop/mapred/package.html new file mode 100644 index 0000000..003c723 --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/package.html @@ -0,0 +1,230 @@ + + + + + + +

A software framework for easily writing applications which process vast +amounts of data (multi-terabyte data-sets) parallelly on large clusters +(thousands of nodes) built of commodity hardware in a reliable, fault-tolerant +manner.

+ +

A Map-Reduce job usually splits the input data-set into independent +chunks which processed by map tasks in completely parallel manner, +followed by reduce tasks which aggregating their output. Typically both +the input and the output of the job are stored in a +{@link org.apache.hadoop.fs.FileSystem}. The framework takes care of monitoring +tasks and re-executing failed ones. Since, usually, the compute nodes and the +storage nodes are the same i.e. Hadoop's Map-Reduce framework and Distributed +FileSystem are running on the same set of nodes, tasks are effectively scheduled +on the nodes where data is already present, resulting in very high aggregate +bandwidth across the cluster.

+ +

The Map-Reduce framework operates exclusively on <key, value> +pairs i.e. the input to the job is viewed as a set of <key, value> +pairs and the output as another, possibly different, set of +<key, value> pairs. The keys and values have to +be serializable as {@link org.apache.hadoop.io.Writable}s and additionally the +keys have to be {@link org.apache.hadoop.io.WritableComparable}s in +order to facilitate grouping by the framework.

+ +

Data flow:

+
+                                (input)
+                                <k1, v1>
+       
+                                   |
+                                   V
+       
+                                  map
+       
+                                   |
+                                   V
+
+                                <k2, v2>
+       
+                                   |
+                                   V
+       
+                                combine
+       
+                                   |
+                                   V
+       
+                                <k2, v2>
+       
+                                   |
+                                   V
+       
+                                 reduce
+       
+                                   |
+                                   V
+       
+                                <k3, v3>
+                                (output)
+
+ +

Applications typically implement +{@link org.apache.hadoop.mapred.Mapper#map(Object, Object, OutputCollector, Reporter)} +and +{@link org.apache.hadoop.mapred.Reducer#reduce(Object, Iterator, OutputCollector, Reporter)} +methods. The application-writer also specifies various facets of the job such +as input and output locations, the Partitioner, InputFormat +& OutputFormat implementations to be used etc. as +a {@link org.apache.hadoop.mapred.JobConf}. The client program, +{@link org.apache.hadoop.mapred.JobClient}, then submits the job to the framework +and optionally monitors it.

+ +

The framework spawns one map task per +{@link org.apache.hadoop.mapred.InputSplit} generated by the +{@link org.apache.hadoop.mapred.InputFormat} of the job and calls +{@link org.apache.hadoop.mapred.Mapper#map(Object, Object, OutputCollector, Reporter)} +with each <key, value> pair read by the +{@link org.apache.hadoop.mapred.RecordReader} from the InputSplit for +the task. The intermediate outputs of the maps are then grouped by keys +and optionally aggregated by combiner. The key space of intermediate +outputs are paritioned by the {@link org.apache.hadoop.mapred.Partitioner}, where +the number of partitions is exactly the number of reduce tasks for the job.

+ +

The reduce tasks fetch the sorted intermediate outputs of the maps, via http, +merge the <key, value> pairs and call +{@link org.apache.hadoop.mapred.Reducer#reduce(Object, Iterator, OutputCollector, Reporter)} +for each <key, list of values> pair. The output of the reduce tasks' is +stored on the FileSystem by the +{@link org.apache.hadoop.mapred.RecordWriter} provided by the +{@link org.apache.hadoop.mapred.OutputFormat} of the job.

+ +

Map-Reduce application to perform a distributed grep:

+

+public class Grep extends Configured implements Tool {
+
+  // map: Search for the pattern specified by 'grep.mapper.regex' &
+  //      'grep.mapper.regex.group'
+
+  class GrepMapper<K, Text> 
+  extends MapReduceBase  implements Mapper<K, Text, Text, LongWritable> {
+
+    private Pattern pattern;
+    private int group;
+
+    public void configure(JobConf job) {
+      pattern = Pattern.compile(job.get("grep.mapper.regex"));
+      group = job.getInt("grep.mapper.regex.group", 0);
+    }
+
+    public void map(K key, Text value,
+                    OutputCollector<Text, LongWritable> output,
+                    Reporter reporter)
+    throws IOException {
+      String text = value.toString();
+      Matcher matcher = pattern.matcher(text);
+      while (matcher.find()) {
+        output.collect(new Text(matcher.group(group)), new LongWritable(1));
+      }
+    }
+  }
+
+  // reduce: Count the number of occurrences of the pattern
+
+  class GrepReducer<K> extends MapReduceBase
+  implements Reducer<K, LongWritable, K, LongWritable> {
+
+    public void reduce(K key, Iterator<LongWritable> values,
+                       OutputCollector<K, LongWritable> output,
+                       Reporter reporter)
+    throws IOException {
+
+      // sum all values for this key
+      long sum = 0;
+      while (values.hasNext()) {
+        sum += values.next().get();
+      }
+
+      // output sum
+      output.collect(key, new LongWritable(sum));
+    }
+  }
+  
+  public int run(String[] args) throws Exception {
+    if (args.length < 3) {
+      System.out.println("Grep <inDir> <outDir> <regex> [<group>]");
+      ToolRunner.printGenericCommandUsage(System.out);
+      return -1;
+    }
+
+    JobConf grepJob = new JobConf(getConf(), Grep.class);
+    
+    grepJob.setJobName("grep");
+
+    FileInputFormat.setInputPaths(grepJob, new Path(args[0]));
+    FileOutputFormat.setOutputPath(grepJob, args[1]);
+
+    grepJob.setMapperClass(GrepMapper.class);
+    grepJob.setCombinerClass(GrepReducer.class);
+    grepJob.setReducerClass(GrepReducer.class);
+
+    grepJob.set("mapred.mapper.regex", args[2]);
+    if (args.length == 4)
+      grepJob.set("mapred.mapper.regex.group", args[3]);
+
+    grepJob.setOutputFormat(SequenceFileOutputFormat.class);
+    grepJob.setOutputKeyClass(Text.class);
+    grepJob.setOutputValueClass(LongWritable.class);
+
+    JobClient.runJob(grepJob);
+
+    return 0;
+  }
+
+  public static void main(String[] args) throws Exception {
+    int res = ToolRunner.run(new Configuration(), new Grep(), args);
+    System.exit(res);
+  }
+
+}
+
+ +

Notice how the data-flow of the above grep job is very similar to doing the +same via the unix pipeline:

+ +
+cat input/*   |   grep   |   sort    |   uniq -c   >   out
+
+ +
+      input   |    map   |  shuffle  |   reduce    >   out
+
+ +

Hadoop Map-Reduce applications need not be written in +JavaTM only. +Hadoop Streaming is a utility +which allows users to create and run jobs with any executables (e.g. shell +utilities) as the mapper and/or the reducer. +Hadoop Pipes is a +SWIG-compatible C++ API to implement +Map-Reduce applications (non JNITM based).

+ +

See Google's original +Map/Reduce paper for background information.

+ +

Java and JNI are trademarks or registered trademarks of +Sun Microsystems, Inc. in the United States and other countries.

+ + + diff --git a/src/mapred/org/apache/hadoop/mapred/pipes/Application.java b/src/mapred/org/apache/hadoop/mapred/pipes/Application.java new file mode 100644 index 0000000..02aaecf --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/pipes/Application.java @@ -0,0 +1,188 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapred.pipes; + +import java.io.File; +import java.io.IOException; +import java.net.ServerSocket; +import java.net.Socket; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.filecache.DistributedCache; +import org.apache.hadoop.fs.FileUtil; +import org.apache.hadoop.io.FloatWritable; +import org.apache.hadoop.io.NullWritable; +import org.apache.hadoop.io.Writable; +import org.apache.hadoop.io.WritableComparable; +import org.apache.hadoop.mapred.JobConf; +import org.apache.hadoop.mapred.OutputCollector; +import org.apache.hadoop.mapred.RecordReader; +import org.apache.hadoop.mapred.Reporter; +import org.apache.hadoop.mapred.TaskAttemptID; +import org.apache.hadoop.mapred.TaskLog; +import org.apache.hadoop.util.ReflectionUtils; +import org.apache.hadoop.util.StringUtils; + +/** + * This class is responsible for launching and communicating with the child + * process. + */ +class Application { + private static final Log LOG = LogFactory.getLog(Application.class.getName()); + private ServerSocket serverSocket; + private Process process; + private Socket clientSocket; + private OutputHandler handler; + private DownwardProtocol downlink; + static final boolean WINDOWS + = System.getProperty("os.name").startsWith("Windows"); + + /** + * Start the child process to handle the task for us. + * @param conf the task's configuration + * @param recordReader the fake record reader to update progress with + * @param output the collector to send output to + * @param reporter the reporter for the task + * @param outputKeyClass the class of the output keys + * @param outputValueClass the class of the output values + * @throws IOException + * @throws InterruptedException + */ + Application(JobConf conf, + RecordReader recordReader, + OutputCollector output, Reporter reporter, + Class outputKeyClass, + Class outputValueClass + ) throws IOException, InterruptedException { + serverSocket = new ServerSocket(0); + Map env = new HashMap(); + // add TMPDIR environment variable with the value of java.io.tmpdir + env.put("TMPDIR", System.getProperty("java.io.tmpdir")); + env.put("hadoop.pipes.command.port", + Integer.toString(serverSocket.getLocalPort())); + List cmd = new ArrayList(); + String interpretor = conf.get("hadoop.pipes.executable.interpretor"); + if (interpretor != null) { + cmd.add(interpretor); + } + + String executable = DistributedCache.getLocalCacheFiles(conf)[0].toString(); + FileUtil.chmod(executable, "a+x"); + cmd.add(executable); + // wrap the command in a stdout/stderr capture + TaskAttemptID taskid = TaskAttemptID.forName(conf.get("mapred.task.id")); + File stdout = TaskLog.getTaskLogFile(taskid, TaskLog.LogName.STDOUT); + File stderr = TaskLog.getTaskLogFile(taskid, TaskLog.LogName.STDERR); + long logLength = TaskLog.getTaskLogLength(conf); + cmd = TaskLog.captureOutAndError(null, cmd, stdout, stderr, logLength, + false); + + process = runClient(cmd, env); + clientSocket = serverSocket.accept(); + handler = new OutputHandler(output, reporter, recordReader); + K2 outputKey = (K2) + ReflectionUtils.newInstance(outputKeyClass, conf); + V2 outputValue = (V2) + ReflectionUtils.newInstance(outputValueClass, conf); + downlink = new BinaryProtocol(clientSocket, handler, + outputKey, outputValue, conf); + downlink.start(); + downlink.setJobConf(conf); + } + + /** + * Get the downward protocol object that can send commands down to the + * application. + * @return the downlink proxy + */ + DownwardProtocol getDownlink() { + return downlink; + } + + /** + * Wait for the application to finish + * @return did the application finish correctly? + * @throws Throwable + */ + boolean waitForFinish() throws Throwable { + downlink.flush(); + return handler.waitForFinish(); + } + + /** + * Abort the application and wait for it to finish. + * @param t the exception that signalled the problem + * @throws IOException A wrapper around the exception that was passed in + */ + void abort(Throwable t) throws IOException { + LOG.info("Aborting because of " + StringUtils.stringifyException(t)); + try { + downlink.abort(); + downlink.flush(); + } catch (IOException e) { + // IGNORE cleanup problems + } + try { + handler.waitForFinish(); + } catch (Throwable ignored) { + process.destroy(); + } + IOException wrapper = new IOException("pipe child exception"); + wrapper.initCause(t); + throw wrapper; + } + + /** + * Clean up the child procress and socket. + * @throws IOException + */ + void cleanup() throws IOException { + serverSocket.close(); + try { + downlink.close(); + } catch (InterruptedException ie) { + Thread.currentThread().interrupt(); + } + } + + /** + * Run a given command in a subprocess, including threads to copy its stdout + * and stderr to our stdout and stderr. + * @param command the command and its arguments + * @param env the environment to run the process in + * @return a handle on the process + * @throws IOException + */ + static Process runClient(List command, + Map env) throws IOException { + ProcessBuilder builder = new ProcessBuilder(command); + if (env != null) { + builder.environment().putAll(env); + } + Process result = builder.start(); + return result; + } + +} diff --git a/src/mapred/org/apache/hadoop/mapred/pipes/BinaryProtocol.java b/src/mapred/org/apache/hadoop/mapred/pipes/BinaryProtocol.java new file mode 100644 index 0000000..534e453 --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/pipes/BinaryProtocol.java @@ -0,0 +1,348 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapred.pipes; + +import java.io.*; +import java.net.Socket; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.io.BytesWritable; +import org.apache.hadoop.io.DataOutputBuffer; +import org.apache.hadoop.io.Text; +import org.apache.hadoop.io.Writable; +import org.apache.hadoop.io.WritableComparable; +import org.apache.hadoop.io.WritableUtils; +import org.apache.hadoop.mapred.InputSplit; +import org.apache.hadoop.mapred.JobConf; +import org.apache.hadoop.util.StringUtils; + +/** + * This protocol is a binary implementation of the Pipes protocol. + */ +class BinaryProtocol + implements DownwardProtocol { + + public static final int CURRENT_PROTOCOL_VERSION = 0; + /** + * The buffer size for the command socket + */ + private static final int BUFFER_SIZE = 128*1024; + + private DataOutputStream stream; + private DataOutputBuffer buffer = new DataOutputBuffer(); + private static final Log LOG = + LogFactory.getLog(BinaryProtocol.class.getName()); + private UplinkReaderThread uplink; + + /** + * The integer codes to represent the different messages. These must match + * the C++ codes or massive confusion will result. + */ + private static enum MessageType { START(0), + SET_JOB_CONF(1), + SET_INPUT_TYPES(2), + RUN_MAP(3), + MAP_ITEM(4), + RUN_REDUCE(5), + REDUCE_KEY(6), + REDUCE_VALUE(7), + CLOSE(8), + ABORT(9), + OUTPUT(50), + PARTITIONED_OUTPUT(51), + STATUS(52), + PROGRESS(53), + DONE(54), + REGISTER_COUNTER(55), + INCREMENT_COUNTER(56); + final int code; + MessageType(int code) { + this.code = code; + } + } + + private static class UplinkReaderThread + extends Thread { + + private DataInputStream inStream; + private UpwardProtocol handler; + private K2 key; + private V2 value; + + public UplinkReaderThread(InputStream stream, + UpwardProtocol handler, + K2 key, V2 value) throws IOException{ + inStream = new DataInputStream(new BufferedInputStream(stream, + BUFFER_SIZE)); + this.handler = handler; + this.key = key; + this.value = value; + } + + public void closeConnection() throws IOException { + inStream.close(); + } + + public void run() { + while (true) { + try { + if (Thread.currentThread().isInterrupted()) { + throw new InterruptedException(); + } + int cmd = WritableUtils.readVInt(inStream); + LOG.debug("Handling uplink command " + cmd); + if (cmd == MessageType.OUTPUT.code) { + readObject(key); + readObject(value); + handler.output(key, value); + } else if (cmd == MessageType.PARTITIONED_OUTPUT.code) { + int part = WritableUtils.readVInt(inStream); + readObject(key); + readObject(value); + handler.partitionedOutput(part, key, value); + } else if (cmd == MessageType.STATUS.code) { + handler.status(Text.readString(inStream)); + } else if (cmd == MessageType.PROGRESS.code) { + handler.progress(inStream.readFloat()); + } else if (cmd == MessageType.REGISTER_COUNTER.code) { + int id = WritableUtils.readVInt(inStream); + String group = Text.readString(inStream); + String name = Text.readString(inStream); + handler.registerCounter(id, group, name); + } else if (cmd == MessageType.INCREMENT_COUNTER.code) { + int id = WritableUtils.readVInt(inStream); + long amount = WritableUtils.readVLong(inStream); + handler.incrementCounter(id, amount); + } else if (cmd == MessageType.DONE.code) { + LOG.debug("Pipe child done"); + handler.done(); + return; + } else { + throw new IOException("Bad command code: " + cmd); + } + } catch (InterruptedException e) { + return; + } catch (Throwable e) { + LOG.error(StringUtils.stringifyException(e)); + handler.failed(e); + return; + } + } + } + + private void readObject(Writable obj) throws IOException { + int numBytes = WritableUtils.readVInt(inStream); + byte[] buffer; + // For BytesWritable and Text, use the specified length to set the length + // this causes the "obvious" translations to work. So that if you emit + // a string "abc" from C++, it shows up as "abc". + if (obj instanceof BytesWritable) { + buffer = new byte[numBytes]; + inStream.readFully(buffer); + ((BytesWritable) obj).set(buffer, 0, numBytes); + } else if (obj instanceof Text) { + buffer = new byte[numBytes]; + inStream.readFully(buffer); + ((Text) obj).set(buffer); + } else { + obj.readFields(inStream); + } + } + } + + /** + * An output stream that will save a copy of the data into a file. + */ + private static class TeeOutputStream extends FilterOutputStream { + private OutputStream file; + TeeOutputStream(String filename, OutputStream base) throws IOException { + super(base); + file = new FileOutputStream(filename); + } + public void write(byte b[], int off, int len) throws IOException { + file.write(b,off,len); + out.write(b,off,len); + } + + public void write(int b) throws IOException { + file.write(b); + out.write(b); + } + + public void flush() throws IOException { + file.flush(); + out.flush(); + } + + public void close() throws IOException { + flush(); + file.close(); + out.close(); + } + } + + /** + * Create a proxy object that will speak the binary protocol on a socket. + * Upward messages are passed on the specified handler and downward + * downward messages are public methods on this object. + * @param sock The socket to communicate on. + * @param handler The handler for the received messages. + * @param key The object to read keys into. + * @param value The object to read values into. + * @param config The job's configuration + * @throws IOException + */ + public BinaryProtocol(Socket sock, + UpwardProtocol handler, + K2 key, + V2 value, + JobConf config) throws IOException { + OutputStream raw = sock.getOutputStream(); + // If we are debugging, save a copy of the downlink commands to a file + if (Submitter.getKeepCommandFile(config)) { + raw = new TeeOutputStream("downlink.data", raw); + } + stream = new DataOutputStream(new BufferedOutputStream(raw, + BUFFER_SIZE)) ; + uplink = new UplinkReaderThread(sock.getInputStream(), + handler, key, value); + uplink.setName("pipe-uplink-handler"); + uplink.start(); + } + + /** + * Close the connection and shutdown the handler thread. + * @throws IOException + * @throws InterruptedException + */ + public void close() throws IOException, InterruptedException { + LOG.debug("closing connection"); + stream.close(); + uplink.closeConnection(); + uplink.interrupt(); + uplink.join(); + } + + public void start() throws IOException { + LOG.debug("starting downlink"); + WritableUtils.writeVInt(stream, MessageType.START.code); + WritableUtils.writeVInt(stream, CURRENT_PROTOCOL_VERSION); + } + + public void setJobConf(JobConf job) throws IOException { + WritableUtils.writeVInt(stream, MessageType.SET_JOB_CONF.code); + List list = new ArrayList(); + for(Map.Entry itm: job) { + list.add(itm.getKey()); + list.add(itm.getValue()); + } + WritableUtils.writeVInt(stream, list.size()); + for(String entry: list){ + Text.writeString(stream, entry); + } + } + + public void setInputTypes(String keyType, + String valueType) throws IOException { + WritableUtils.writeVInt(stream, MessageType.SET_INPUT_TYPES.code); + Text.writeString(stream, keyType); + Text.writeString(stream, valueType); + } + + public void runMap(InputSplit split, int numReduces, + boolean pipedInput) throws IOException { + WritableUtils.writeVInt(stream, MessageType.RUN_MAP.code); + writeObject(split); + WritableUtils.writeVInt(stream, numReduces); + WritableUtils.writeVInt(stream, pipedInput ? 1 : 0); + } + + public void mapItem(WritableComparable key, + Writable value) throws IOException { + WritableUtils.writeVInt(stream, MessageType.MAP_ITEM.code); + writeObject(key); + writeObject(value); + } + + public void runReduce(int reduce, boolean pipedOutput) throws IOException { + WritableUtils.writeVInt(stream, MessageType.RUN_REDUCE.code); + WritableUtils.writeVInt(stream, reduce); + WritableUtils.writeVInt(stream, pipedOutput ? 1 : 0); + } + + public void reduceKey(WritableComparable key) throws IOException { + WritableUtils.writeVInt(stream, MessageType.REDUCE_KEY.code); + writeObject(key); + } + + public void reduceValue(Writable value) throws IOException { + WritableUtils.writeVInt(stream, MessageType.REDUCE_VALUE.code); + writeObject(value); + } + + public void endOfInput() throws IOException { + WritableUtils.writeVInt(stream, MessageType.CLOSE.code); + LOG.debug("Sent close command"); + } + + public void abort() throws IOException { + WritableUtils.writeVInt(stream, MessageType.ABORT.code); + LOG.debug("Sent abort command"); + } + + public void flush() throws IOException { + stream.flush(); + } + + /** + * Write the given object to the stream. If it is a Text or BytesWritable, + * write it directly. Otherwise, write it to a buffer and then write the + * length and data to the stream. + * @param obj the object to write + * @throws IOException + */ + private void writeObject(Writable obj) throws IOException { + // For Text and BytesWritable, encode them directly, so that they end up + // in C++ as the natural translations. + if (obj instanceof Text) { + Text t = (Text) obj; + int len = t.getLength(); + WritableUtils.writeVInt(stream, len); + stream.write(t.getBytes(), 0, len); + } else if (obj instanceof BytesWritable) { + BytesWritable b = (BytesWritable) obj; + int len = b.getLength(); + WritableUtils.writeVInt(stream, len); + stream.write(b.getBytes(), 0, len); + } else { + buffer.reset(); + obj.write(buffer); + int length = buffer.getLength(); + WritableUtils.writeVInt(stream, length); + stream.write(buffer.getData(), 0, length); + } + } + +} diff --git a/src/mapred/org/apache/hadoop/mapred/pipes/DownwardProtocol.java b/src/mapred/org/apache/hadoop/mapred/pipes/DownwardProtocol.java new file mode 100644 index 0000000..2e88b97 --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/pipes/DownwardProtocol.java @@ -0,0 +1,117 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapred.pipes; + +import java.io.IOException; + +import org.apache.hadoop.io.Writable; +import org.apache.hadoop.io.WritableComparable; +import org.apache.hadoop.mapred.InputSplit; +import org.apache.hadoop.mapred.JobConf; + +/** + * The abstract description of the downward (from Java to C++) Pipes protocol. + * All of these calls are asynchronous and return before the message has been + * processed. + */ +interface DownwardProtocol { + /** + * Start communication + * @throws IOException + */ + void start() throws IOException; + + /** + * Set the JobConf for the task. + * @param conf + * @throws IOException + */ + void setJobConf(JobConf conf) throws IOException; + + /** + * Set the input types for Maps. + * @param keyType the name of the key's type + * @param valueType the name of the value's type + * @throws IOException + */ + void setInputTypes(String keyType, String valueType) throws IOException; + + /** + * Run a map task in the child. + * @param split The input split for this map. + * @param numReduces The number of reduces for this job. + * @param pipedInput Is the input coming from Java? + * @throws IOException + */ + void runMap(InputSplit split, int numReduces, + boolean pipedInput) throws IOException; + + /** + * For maps with pipedInput, the key/value pairs are sent via this messaage. + * @param key The record's key + * @param value The record's value + * @throws IOException + */ + void mapItem(K key, V value) throws IOException; + + /** + * Run a reduce task in the child + * @param reduce the index of the reduce (0 .. numReduces - 1) + * @param pipedOutput is the output being sent to Java? + * @throws IOException + */ + void runReduce(int reduce, boolean pipedOutput) throws IOException; + + /** + * The reduce should be given a new key + * @param key the new key + * @throws IOException + */ + void reduceKey(K key) throws IOException; + + /** + * The reduce should be given a new value + * @param value the new value + * @throws IOException + */ + void reduceValue(V value) throws IOException; + + /** + * The task has no more input coming, but it should finish processing it's + * input. + * @throws IOException + */ + void endOfInput() throws IOException; + + /** + * The task should stop as soon as possible, because something has gone wrong. + * @throws IOException + */ + void abort() throws IOException; + + /** + * Flush the data through any buffers. + */ + void flush() throws IOException; + + /** + * Close the connection. + */ + void close() throws IOException, InterruptedException; +} diff --git a/src/mapred/org/apache/hadoop/mapred/pipes/OutputHandler.java b/src/mapred/org/apache/hadoop/mapred/pipes/OutputHandler.java new file mode 100644 index 0000000..2364f76 --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/pipes/OutputHandler.java @@ -0,0 +1,159 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapred.pipes; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import org.apache.hadoop.io.FloatWritable; +import org.apache.hadoop.io.NullWritable; +import org.apache.hadoop.io.Writable; +import org.apache.hadoop.io.WritableComparable; +import org.apache.hadoop.mapred.Counters; +import org.apache.hadoop.mapred.OutputCollector; +import org.apache.hadoop.mapred.RecordReader; +import org.apache.hadoop.mapred.Reporter; + +/** + * Handles the upward (C++ to Java) messages from the application. + */ +class OutputHandler + implements UpwardProtocol { + + private Reporter reporter; + private OutputCollector collector; + private float progressValue = 0.0f; + private boolean done = false; + private Throwable exception = null; + RecordReader recordReader = null; + private Map registeredCounters = + new HashMap(); + + /** + * Create a handler that will handle any records output from the application. + * @param collector the "real" collector that takes the output + * @param reporter the reporter for reporting progress + */ + public OutputHandler(OutputCollector collector, Reporter reporter, + RecordReader recordReader) { + this.reporter = reporter; + this.collector = collector; + this.recordReader = recordReader; + } + + /** + * The task output a normal record. + */ + public void output(K key, V value) throws IOException { + collector.collect(key, value); + } + + /** + * The task output a record with a partition number attached. + */ + public void partitionedOutput(int reduce, K key, + V value) throws IOException { + PipesPartitioner.setNextPartition(reduce); + collector.collect(key, value); + } + + /** + * Update the status message for the task. + */ + public void status(String msg) { + reporter.setStatus(msg); + } + + private FloatWritable progressKey = new FloatWritable(0.0f); + private NullWritable nullValue = NullWritable.get(); + /** + * Update the amount done and call progress on the reporter. + */ + public void progress(float progress) throws IOException { + progressValue = progress; + reporter.progress(); + + if (recordReader != null) { + progressKey.set(progress); + recordReader.next(progressKey, nullValue); + } + } + + /** + * The task finished successfully. + */ + public void done() throws IOException { + synchronized (this) { + done = true; + notify(); + } + } + + /** + * Get the current amount done. + * @return a float between 0.0 and 1.0 + */ + public float getProgress() { + return progressValue; + } + + /** + * The task failed with an exception. + */ + public void failed(Throwable e) { + synchronized (this) { + exception = e; + notify(); + } + } + + /** + * Wait for the task to finish or abort. + * @return did the task finish correctly? + * @throws Throwable + */ + public synchronized boolean waitForFinish() throws Throwable { + while (!done && exception == null) { + wait(); + } + if (exception != null) { + throw exception; + } + return done; + } + + public void registerCounter(int id, String group, String name) throws IOException { + Counters.Counter counter = reporter.getCounter(group, name); + registeredCounters.put(id, counter); + } + + public void incrementCounter(int id, long amount) throws IOException { + if (id < registeredCounters.size()) { + Counters.Counter counter = registeredCounters.get(id); + counter.increment(amount); + } else { + throw new IOException("Invalid counter with id: " + id); + } + } + +} diff --git a/src/mapred/org/apache/hadoop/mapred/pipes/PipesMapRunner.java b/src/mapred/org/apache/hadoop/mapred/pipes/PipesMapRunner.java new file mode 100644 index 0000000..3567953 --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/pipes/PipesMapRunner.java @@ -0,0 +1,107 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapred.pipes; + +import java.io.IOException; + +import org.apache.hadoop.io.FloatWritable; +import org.apache.hadoop.io.NullWritable; +import org.apache.hadoop.io.Writable; +import org.apache.hadoop.io.WritableComparable; +import org.apache.hadoop.mapred.JobConf; +import org.apache.hadoop.mapred.MapRunner; +import org.apache.hadoop.mapred.OutputCollector; +import org.apache.hadoop.mapred.RecordReader; +import org.apache.hadoop.mapred.Reporter; +import org.apache.hadoop.mapred.SkipBadRecords; + +/** + * An adaptor to run a C++ mapper. + */ +class PipesMapRunner + extends MapRunner { + private JobConf job; + + /** + * Get the new configuration. + * @param job the job's configuration + */ + public void configure(JobConf job) { + this.job = job; + //disable the auto increment of the counter. For pipes, no of processed + //records could be different(equal or less) than the no of records input. + SkipBadRecords.setAutoIncrMapperProcCount(job, false); + } + + /** + * Run the map task. + * @param input the set of inputs + * @param output the object to collect the outputs of the map + * @param reporter the object to update with status + */ + @SuppressWarnings("unchecked") + public void run(RecordReader input, OutputCollector output, + Reporter reporter) throws IOException { + Application application = null; + try { + RecordReader fakeInput = + (!Submitter.getIsJavaRecordReader(job) && + !Submitter.getIsJavaMapper(job)) ? + (RecordReader) input : null; + application = new Application(job, fakeInput, output, + reporter, + (Class) job.getOutputKeyClass(), + (Class) job.getOutputValueClass()); + } catch (InterruptedException ie) { + throw new RuntimeException("interrupted", ie); + } + DownwardProtocol downlink = application.getDownlink(); + boolean isJavaInput = Submitter.getIsJavaRecordReader(job); + downlink.runMap(reporter.getInputSplit(), + job.getNumReduceTasks(), isJavaInput); + boolean skipping = job.getBoolean("mapred.skip.on", false); + try { + if (isJavaInput) { + // allocate key & value instances that are re-used for all entries + K1 key = input.createKey(); + V1 value = input.createValue(); + downlink.setInputTypes(key.getClass().getName(), + value.getClass().getName()); + + while (input.next(key, value)) { + // map pair to output + downlink.mapItem(key, value); + if(skipping) { + //flush the streams on every record input if running in skip mode + //so that we don't buffer other records surrounding a bad record. + downlink.flush(); + } + } + downlink.endOfInput(); + } + application.waitForFinish(); + } catch (Throwable t) { + application.abort(t); + } finally { + application.cleanup(); + } + } + +} diff --git a/src/mapred/org/apache/hadoop/mapred/pipes/PipesNonJavaInputFormat.java b/src/mapred/org/apache/hadoop/mapred/pipes/PipesNonJavaInputFormat.java new file mode 100644 index 0000000..78d72b4 --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/pipes/PipesNonJavaInputFormat.java @@ -0,0 +1,101 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.mapred.pipes; + +import java.io.IOException; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.io.FloatWritable; +import org.apache.hadoop.io.NullWritable; +import org.apache.hadoop.mapred.InputFormat; +import org.apache.hadoop.mapred.InputSplit; +import org.apache.hadoop.mapred.JobConf; +import org.apache.hadoop.mapred.RecordReader; +import org.apache.hadoop.mapred.Reporter; +import org.apache.hadoop.mapred.TextInputFormat; +import org.apache.hadoop.util.ReflectionUtils; + +/** + * Dummy input format used when non-Java a {@link RecordReader} is used by + * the Pipes' application. + * + * The only useful thing this does is set up the Map-Reduce job to get the + * {@link PipesDummyRecordReader}, everything else left for the 'actual' + * InputFormat specified by the user which is given by + * mapred.pipes.user.inputformat. + */ +class PipesNonJavaInputFormat +implements InputFormat { + + public RecordReader getRecordReader( + InputSplit genericSplit, JobConf job, Reporter reporter) + throws IOException { + return new PipesDummyRecordReader(job, genericSplit); + } + + public InputSplit[] getSplits(JobConf job, int numSplits) throws IOException { + // Delegate the generation of input splits to the 'original' InputFormat + return ReflectionUtils.newInstance( + job.getClass("mapred.pipes.user.inputformat", + TextInputFormat.class, + InputFormat.class), job).getSplits(job, numSplits); + } + + /** + * A dummy {@link org.apache.hadoop.mapred.RecordReader} to help track the + * progress of Hadoop Pipes' applications when they are using a non-Java + * RecordReader. + * + * The PipesDummyRecordReader is informed of the 'progress' of + * the task by the {@link OutputHandler#progress(float)} which calls the + * {@link #next(FloatWritable, NullWritable)} with the progress as the + * key. + */ + static class PipesDummyRecordReader implements RecordReader { + float progress = 0.0f; + + public PipesDummyRecordReader(Configuration job, InputSplit split) + throws IOException{ + } + + + public FloatWritable createKey() { + return null; + } + + public NullWritable createValue() { + return null; + } + + public synchronized void close() throws IOException {} + + public synchronized long getPos() throws IOException { + return 0; + } + + public float getProgress() { + return progress; + } + + public synchronized boolean next(FloatWritable key, NullWritable value) + throws IOException { + progress = key.get(); + return true; + } + } +} diff --git a/src/mapred/org/apache/hadoop/mapred/pipes/PipesPartitioner.java b/src/mapred/org/apache/hadoop/mapred/pipes/PipesPartitioner.java new file mode 100644 index 0000000..ef9da0b --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/pipes/PipesPartitioner.java @@ -0,0 +1,69 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapred.pipes; + +import org.apache.hadoop.io.Writable; +import org.apache.hadoop.io.WritableComparable; +import org.apache.hadoop.mapred.JobConf; +import org.apache.hadoop.mapred.Partitioner; +import org.apache.hadoop.util.ReflectionUtils; + +/** + * This partitioner is one that can either be set manually per a record or it + * can fall back onto a Java partitioner that was set by the user. + */ +class PipesPartitioner + implements Partitioner { + + private static ThreadLocal cache = new ThreadLocal(); + private Partitioner part = null; + + @SuppressWarnings("unchecked") + public void configure(JobConf conf) { + part = + ReflectionUtils.newInstance(Submitter.getJavaPartitioner(conf), conf); + } + + /** + * Set the next key to have the given partition. + * @param newValue the next partition value + */ + static void setNextPartition(int newValue) { + cache.set(newValue); + } + + /** + * If a partition result was set manually, return it. Otherwise, we call + * the Java partitioner. + * @param key the key to partition + * @param value the value to partition + * @param numPartitions the number of reduces + */ + public int getPartition(K key, V value, + int numPartitions) { + Integer result = cache.get(); + if (result == null) { + return part.getPartition(key, value, numPartitions); + } else { + return result; + } + } + +} diff --git a/src/mapred/org/apache/hadoop/mapred/pipes/PipesReducer.java b/src/mapred/org/apache/hadoop/mapred/pipes/PipesReducer.java new file mode 100644 index 0000000..91c4a92 --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/pipes/PipesReducer.java @@ -0,0 +1,125 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapred.pipes; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.io.Writable; +import org.apache.hadoop.io.WritableComparable; +import org.apache.hadoop.mapred.JobConf; +import org.apache.hadoop.mapred.OutputCollector; +import org.apache.hadoop.mapred.Reducer; +import org.apache.hadoop.mapred.Reporter; +import org.apache.hadoop.mapred.SkipBadRecords; + +import java.io.IOException; +import java.util.Iterator; + +/** + * This class is used to talk to a C++ reduce task. + */ +class PipesReducer + implements Reducer { + private static final Log LOG= LogFactory.getLog(PipesReducer.class.getName()); + private JobConf job; + private Application application = null; + private DownwardProtocol downlink = null; + private boolean isOk = true; + private boolean skipping = false; + + public void configure(JobConf job) { + this.job = job; + //disable the auto increment of the counter. For pipes, no of processed + //records could be different(equal or less) than the no of records input. + SkipBadRecords.setAutoIncrReducerProcCount(job, false); + skipping = job.getBoolean("mapred.skip.on", false); + } + + /** + * Process all of the keys and values. Start up the application if we haven't + * started it yet. + */ + public void reduce(K2 key, Iterator values, + OutputCollector output, Reporter reporter + ) throws IOException { + isOk = false; + startApplication(output, reporter); + downlink.reduceKey(key); + while (values.hasNext()) { + downlink.reduceValue(values.next()); + } + if(skipping) { + //flush the streams on every record input if running in skip mode + //so that we don't buffer other records surrounding a bad record. + downlink.flush(); + } + isOk = true; + } + + @SuppressWarnings("unchecked") + private void startApplication(OutputCollector output, Reporter reporter) throws IOException { + if (application == null) { + try { + LOG.info("starting application"); + application = + new Application( + job, null, output, reporter, + (Class) job.getOutputKeyClass(), + (Class) job.getOutputValueClass()); + downlink = application.getDownlink(); + } catch (InterruptedException ie) { + throw new RuntimeException("interrupted", ie); + } + int reduce=0; + downlink.runReduce(reduce, Submitter.getIsJavaRecordWriter(job)); + } + } + + /** + * Handle the end of the input by closing down the application. + */ + public void close() throws IOException { + // if we haven't started the application, we have nothing to do + if (isOk) { + OutputCollector nullCollector = new OutputCollector() { + public void collect(K3 key, + V3 value) throws IOException { + // NULL + } + }; + startApplication(nullCollector, Reporter.NULL); + } + try { + if (isOk) { + application.getDownlink().endOfInput(); + } else { + // send the abort to the application and let it clean up + application.getDownlink().abort(); + } + LOG.info("waiting for finish"); + application.waitForFinish(); + LOG.info("got done"); + } catch (Throwable t) { + application.abort(t); + } finally { + application.cleanup(); + } + } +} diff --git a/src/mapred/org/apache/hadoop/mapred/pipes/Submitter.java b/src/mapred/org/apache/hadoop/mapred/pipes/Submitter.java new file mode 100644 index 0000000..046f083 --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/pipes/Submitter.java @@ -0,0 +1,498 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapred.pipes; + +import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; +import java.net.URL; +import java.net.URLClassLoader; +import java.security.AccessController; +import java.security.PrivilegedAction; +import java.util.Iterator; +import java.util.StringTokenizer; + +import org.apache.commons.cli.BasicParser; +import org.apache.commons.cli.CommandLine; +import org.apache.commons.cli.Option; +import org.apache.commons.cli.OptionBuilder; +import org.apache.commons.cli.OptionGroup; +import org.apache.commons.cli.CommandLineParser; +import org.apache.commons.cli.Options; +import org.apache.commons.cli.ParseException; +import org.apache.commons.cli.Parser; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.conf.Configured; +import org.apache.hadoop.filecache.DistributedCache; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.io.Text; +import org.apache.hadoop.mapred.FileInputFormat; +import org.apache.hadoop.mapred.FileOutputFormat; +import org.apache.hadoop.mapred.InputFormat; +import org.apache.hadoop.mapred.JobClient; +import org.apache.hadoop.mapred.JobConf; +import org.apache.hadoop.mapred.Mapper; +import org.apache.hadoop.mapred.OutputFormat; +import org.apache.hadoop.mapred.Partitioner; +import org.apache.hadoop.mapred.Reducer; +import org.apache.hadoop.mapred.RunningJob; +import org.apache.hadoop.mapred.lib.HashPartitioner; +import org.apache.hadoop.mapred.lib.NullOutputFormat; +import org.apache.hadoop.util.GenericOptionsParser; +import org.apache.hadoop.util.Tool; + +/** + * The main entry point and job submitter. It may either be used as a command + * line-based or API-based method to launch Pipes jobs. + */ +public class Submitter extends Configured implements Tool { + + protected static final Log LOG = LogFactory.getLog(Submitter.class); + + public Submitter() { + this(new Configuration()); + } + + public Submitter(Configuration conf) { + setConf(conf); + } + + /** + * Get the URI of the application's executable. + * @param conf + * @return the URI where the application's executable is located + */ + public static String getExecutable(JobConf conf) { + return conf.get("hadoop.pipes.executable"); + } + + /** + * Set the URI for the application's executable. Normally this is a hdfs: + * location. + * @param conf + * @param executable The URI of the application's executable. + */ + public static void setExecutable(JobConf conf, String executable) { + conf.set("hadoop.pipes.executable", executable); + } + + /** + * Set whether the job is using a Java RecordReader. + * @param conf the configuration to modify + * @param value the new value + */ + public static void setIsJavaRecordReader(JobConf conf, boolean value) { + conf.setBoolean("hadoop.pipes.java.recordreader", value); + } + + /** + * Check whether the job is using a Java RecordReader + * @param conf the configuration to check + * @return is it a Java RecordReader? + */ + public static boolean getIsJavaRecordReader(JobConf conf) { + return conf.getBoolean("hadoop.pipes.java.recordreader", false); + } + + /** + * Set whether the Mapper is written in Java. + * @param conf the configuration to modify + * @param value the new value + */ + public static void setIsJavaMapper(JobConf conf, boolean value) { + conf.setBoolean("hadoop.pipes.java.mapper", value); + } + + /** + * Check whether the job is using a Java Mapper. + * @param conf the configuration to check + * @return is it a Java Mapper? + */ + public static boolean getIsJavaMapper(JobConf conf) { + return conf.getBoolean("hadoop.pipes.java.mapper", false); + } + + /** + * Set whether the Reducer is written in Java. + * @param conf the configuration to modify + * @param value the new value + */ + public static void setIsJavaReducer(JobConf conf, boolean value) { + conf.setBoolean("hadoop.pipes.java.reducer", value); + } + + /** + * Check whether the job is using a Java Reducer. + * @param conf the configuration to check + * @return is it a Java Reducer? + */ + public static boolean getIsJavaReducer(JobConf conf) { + return conf.getBoolean("hadoop.pipes.java.reducer", false); + } + + /** + * Set whether the job will use a Java RecordWriter. + * @param conf the configuration to modify + * @param value the new value to set + */ + public static void setIsJavaRecordWriter(JobConf conf, boolean value) { + conf.setBoolean("hadoop.pipes.java.recordwriter", value); + } + + /** + * Will the reduce use a Java RecordWriter? + * @param conf the configuration to check + * @return true, if the output of the job will be written by Java + */ + public static boolean getIsJavaRecordWriter(JobConf conf) { + return conf.getBoolean("hadoop.pipes.java.recordwriter", false); + } + + /** + * Set the configuration, if it doesn't already have a value for the given + * key. + * @param conf the configuration to modify + * @param key the key to set + * @param value the new "default" value to set + */ + private static void setIfUnset(JobConf conf, String key, String value) { + if (conf.get(key) == null) { + conf.set(key, value); + } + } + + /** + * Save away the user's original partitioner before we override it. + * @param conf the configuration to modify + * @param cls the user's partitioner class + */ + static void setJavaPartitioner(JobConf conf, Class cls) { + conf.set("hadoop.pipes.partitioner", cls.getName()); + } + + /** + * Get the user's original partitioner. + * @param conf the configuration to look in + * @return the class that the user submitted + */ + static Class getJavaPartitioner(JobConf conf) { + return conf.getClass("hadoop.pipes.partitioner", + HashPartitioner.class, + Partitioner.class); + } + + /** + * Does the user want to keep the command file for debugging? If this is + * true, pipes will write a copy of the command data to a file in the + * task directory named "downlink.data", which may be used to run the C++ + * program under the debugger. You probably also want to set + * JobConf.setKeepFailedTaskFiles(true) to keep the entire directory from + * being deleted. + * To run using the data file, set the environment variable + * "hadoop.pipes.command.file" to point to the file. + * @param conf the configuration to check + * @return will the framework save the command file? + */ + public static boolean getKeepCommandFile(JobConf conf) { + return conf.getBoolean("hadoop.pipes.command-file.keep", false); + } + + /** + * Set whether to keep the command file for debugging + * @param conf the configuration to modify + * @param keep the new value + */ + public static void setKeepCommandFile(JobConf conf, boolean keep) { + conf.setBoolean("hadoop.pipes.command-file.keep", keep); + } + + /** + * Submit a job to the map/reduce cluster. All of the necessary modifications + * to the job to run under pipes are made to the configuration. + * @param conf the job to submit to the cluster (MODIFIED) + * @throws IOException + * @deprecated Use {@link Submitter#runJob(JobConf)} + */ + @Deprecated + public static RunningJob submitJob(JobConf conf) throws IOException { + return runJob(conf); + } + + /** + * Submit a job to the map/reduce cluster. All of the necessary modifications + * to the job to run under pipes are made to the configuration. + * @param conf the job to submit to the cluster (MODIFIED) + * @throws IOException + */ + public static RunningJob runJob(JobConf conf) throws IOException { + setupPipesJob(conf); + return JobClient.runJob(conf); + } + + /** + * Submit a job to the Map-Reduce framework. + * This returns a handle to the {@link RunningJob} which can be used to track + * the running-job. + * + * @param conf the job configuration. + * @return a handle to the {@link RunningJob} which can be used to track the + * running-job. + * @throws IOException + */ + public static RunningJob jobSubmit(JobConf conf) throws IOException { + setupPipesJob(conf); + return new JobClient(conf).submitJob(conf); + } + + private static void setupPipesJob(JobConf conf) throws IOException { + // default map output types to Text + if (!getIsJavaMapper(conf)) { + conf.setMapRunnerClass(PipesMapRunner.class); + // Save the user's partitioner and hook in our's. + setJavaPartitioner(conf, conf.getPartitionerClass()); + conf.setPartitionerClass(PipesPartitioner.class); + } + if (!getIsJavaReducer(conf)) { + conf.setReducerClass(PipesReducer.class); + if (!getIsJavaRecordWriter(conf)) { + conf.setOutputFormat(NullOutputFormat.class); + } + } + String textClassname = Text.class.getName(); + setIfUnset(conf, "mapred.mapoutput.key.class", textClassname); + setIfUnset(conf, "mapred.mapoutput.value.class", textClassname); + setIfUnset(conf, "mapred.output.key.class", textClassname); + setIfUnset(conf, "mapred.output.value.class", textClassname); + + // Use PipesNonJavaInputFormat if necessary to handle progress reporting + // from C++ RecordReaders ... + if (!getIsJavaRecordReader(conf) && !getIsJavaMapper(conf)) { + conf.setClass("mapred.pipes.user.inputformat", + conf.getInputFormat().getClass(), InputFormat.class); + conf.setInputFormat(PipesNonJavaInputFormat.class); + } + + String exec = getExecutable(conf); + if (exec == null) { + throw new IllegalArgumentException("No application program defined."); + } + // add default debug script only when executable is expressed as + // # + if (exec.contains("#")) { + DistributedCache.createSymlink(conf); + // set default gdb commands for map and reduce task + String defScript = "$HADOOP_HOME/src/c++/pipes/debug/pipes-default-script"; + setIfUnset(conf,"mapred.map.task.debug.script",defScript); + setIfUnset(conf,"mapred.reduce.task.debug.script",defScript); + } + URI[] fileCache = DistributedCache.getCacheFiles(conf); + if (fileCache == null) { + fileCache = new URI[1]; + } else { + URI[] tmp = new URI[fileCache.length+1]; + System.arraycopy(fileCache, 0, tmp, 1, fileCache.length); + fileCache = tmp; + } + try { + fileCache[0] = new URI(exec); + } catch (URISyntaxException e) { + IOException ie = new IOException("Problem parsing execable URI " + exec); + ie.initCause(e); + throw ie; + } + DistributedCache.setCacheFiles(fileCache, conf); + } + + /** + * A command line parser for the CLI-based Pipes job submitter. + */ + static class CommandLineParser { + private Options options = new Options(); + + void addOption(String longName, boolean required, String description, + String paramName) { + Option option = OptionBuilder.withArgName(paramName).hasArgs(1).withDescription(description).isRequired(required).create(longName); + options.addOption(option); + } + + void addArgument(String name, boolean required, String description) { + Option option = OptionBuilder.withArgName(name).hasArgs(1).withDescription(description).isRequired(required).create(); + options.addOption(option); + + } + + Parser createParser() { + Parser result = new BasicParser(); + return result; + } + + void printUsage() { + // The CLI package should do this for us, but I can't figure out how + // to make it print something reasonable. + System.out.println("bin/hadoop pipes"); + System.out.println(" [-input ] // Input directory"); + System.out.println(" [-output ] // Output directory"); + System.out.println(" [-jar // jar filename"); + System.out.println(" [-inputformat ] // InputFormat class"); + System.out.println(" [-map ] // Java Map class"); + System.out.println(" [-partitioner ] // Java Partitioner"); + System.out.println(" [-reduce ] // Java Reduce class"); + System.out.println(" [-writer ] // Java RecordWriter"); + System.out.println(" [-program ] // executable URI"); + System.out.println(" [-reduces ] // number of reduces"); + System.out.println(); + GenericOptionsParser.printGenericCommandUsage(System.out); + } + } + + private static + Class getClass(CommandLine cl, String key, + JobConf conf, + Class cls + ) throws ClassNotFoundException { + return conf.getClassByName((String) cl.getOptionValue(key)).asSubclass(cls); + } + + @Override + public int run(String[] args) throws Exception { + CommandLineParser cli = new CommandLineParser(); + if (args.length == 0) { + cli.printUsage(); + return 1; + } + cli.addOption("input", false, "input path to the maps", "path"); + cli.addOption("output", false, "output path from the reduces", "path"); + + cli.addOption("jar", false, "job jar file", "path"); + cli.addOption("inputformat", false, "java classname of InputFormat", + "class"); + //cli.addArgument("javareader", false, "is the RecordReader in Java"); + cli.addOption("map", false, "java classname of Mapper", "class"); + cli.addOption("partitioner", false, "java classname of Partitioner", + "class"); + cli.addOption("reduce", false, "java classname of Reducer", "class"); + cli.addOption("writer", false, "java classname of OutputFormat", "class"); + cli.addOption("program", false, "URI to application executable", "class"); + cli.addOption("reduces", false, "number of reduces", "num"); + cli.addOption("jobconf", false, + "\"n1=v1,n2=v2,..\" (Deprecated) Optional. Add or override a JobConf property.", + "key=val"); + Parser parser = cli.createParser(); + try { + + GenericOptionsParser genericParser = new GenericOptionsParser(getConf(), args); + CommandLine results = + parser.parse(cli.options, genericParser.getRemainingArgs()); + + JobConf job = new JobConf(getConf()); + + if (results.hasOption("input")) { + FileInputFormat.setInputPaths(job, + (String) results.getOptionValue("input")); + } + if (results.hasOption("output")) { + FileOutputFormat.setOutputPath(job, + new Path((String) results.getOptionValue("output"))); + } + if (results.hasOption("jar")) { + job.setJar((String) results.getOptionValue("jar")); + } + if (results.hasOption("inputformat")) { + setIsJavaRecordReader(job, true); + job.setInputFormat(getClass(results, "inputformat", job, + InputFormat.class)); + } + if (results.hasOption("javareader")) { + setIsJavaRecordReader(job, true); + } + if (results.hasOption("map")) { + setIsJavaMapper(job, true); + job.setMapperClass(getClass(results, "map", job, Mapper.class)); + } + if (results.hasOption("partitioner")) { + job.setPartitionerClass(getClass(results, "partitioner", job, + Partitioner.class)); + } + if (results.hasOption("reduce")) { + setIsJavaReducer(job, true); + job.setReducerClass(getClass(results, "reduce", job, Reducer.class)); + } + if (results.hasOption("reduces")) { + job.setNumReduceTasks(Integer.parseInt((String) + results.getOptionValue("reduces"))); + } + if (results.hasOption("writer")) { + setIsJavaRecordWriter(job, true); + job.setOutputFormat(getClass(results, "writer", job, + OutputFormat.class)); + } + if (results.hasOption("program")) { + setExecutable(job, (String) results.getOptionValue("program")); + } + if (results.hasOption("jobconf")) { + LOG.warn("-jobconf option is deprecated, please use -D instead."); + String options = (String)results.getOptionValue("jobconf"); + StringTokenizer tokenizer = new StringTokenizer(options, ","); + while (tokenizer.hasMoreTokens()) { + String keyVal = tokenizer.nextToken().trim(); + String[] keyValSplit = keyVal.split("=", 2); + job.set(keyValSplit[0], keyValSplit[1]); + } + } + // if they gave us a jar file, include it into the class path + String jarFile = job.getJar(); + if (jarFile != null) { + final URL[] urls = new URL[]{ FileSystem.getLocal(job). + pathToFile(new Path(jarFile)).toURL()}; + //FindBugs complains that creating a URLClassLoader should be + //in a doPrivileged() block. + ClassLoader loader = + AccessController.doPrivileged( + new PrivilegedAction() { + public ClassLoader run() { + return new URLClassLoader(urls); + } + } + ); + job.setClassLoader(loader); + } + + runJob(job); + return 0; + } catch (ParseException pe) { + LOG.info("Error : " + pe); + cli.printUsage(); + return 1; + } + + } + + /** + * Submit a pipes job based on the command line arguments. + * @param args + */ + public static void main(String[] args) throws Exception { + int exitCode = new Submitter().run(args); + System.exit(exitCode); + } + +} diff --git a/src/mapred/org/apache/hadoop/mapred/pipes/UpwardProtocol.java b/src/mapred/org/apache/hadoop/mapred/pipes/UpwardProtocol.java new file mode 100644 index 0000000..eb38aae --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/pipes/UpwardProtocol.java @@ -0,0 +1,91 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapred.pipes; + +import java.io.IOException; +import org.apache.hadoop.io.Writable; +import org.apache.hadoop.io.WritableComparable; + +/** + * The interface for the messages that can come up from the child. All of these + * calls are asynchronous and return before the message has been processed. + */ +interface UpwardProtocol { + /** + * Output a record from the child. + * @param key the record's key + * @param value the record's value + * @throws IOException + */ + void output(K key, V value) throws IOException; + + /** + * Map functions where the application has defined a partition function + * output records along with their partition. + * @param reduce the reduce to send this record to + * @param key the record's key + * @param value the record's value + * @throws IOException + */ + void partitionedOutput(int reduce, K key, + V value) throws IOException; + + /** + * Update the task's status message + * @param msg the string to display to the user + * @throws IOException + */ + void status(String msg) throws IOException; + + /** + * Report making progress (and the current progress) + * @param progress the current progress (0.0 to 1.0) + * @throws IOException + */ + void progress(float progress) throws IOException; + + /** + * Report that the application has finished processing all inputs + * successfully. + * @throws IOException + */ + void done() throws IOException; + + /** + * Report that the application or more likely communication failed. + * @param e + */ + void failed(Throwable e); + + /** + * Register a counter with the given id and group/name. + * @param group counter group + * @param name counter name + * @throws IOException + */ + void registerCounter(int id, String group, String name) throws IOException; + + /** + * Increment the value of a registered counter. + * @param id counter id of the registered counter + * @param amount increment for the counter value + * @throws IOException + */ + void incrementCounter(int id, long amount) throws IOException; +} diff --git a/src/mapred/org/apache/hadoop/mapred/pipes/package.html b/src/mapred/org/apache/hadoop/mapred/pipes/package.html new file mode 100644 index 0000000..26ca7b2 --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/pipes/package.html @@ -0,0 +1,127 @@ + + + + + + +Hadoop Pipes allows C++ code to use Hadoop DFS and map/reduce. The +primary approach is to split the C++ code into a separate process that +does the application specific code. In many ways, the approach will be +similar to Hadoop streaming, but using Writable serialization to +convert the types into bytes that are sent to the process via a +socket. + +

+ +The class org.apache.hadoop.mapred.pipes.Submitter has a public static +method to submit a job as a JobConf and a main method that takes an +application and optional configuration file, input directories, and +output directory. The cli for the main looks like: + +

+bin/hadoop pipes \
+  [-input inputDir] \
+  [-output outputDir] \
+  [-jar applicationJarFile] \
+  [-inputformat class] \
+  [-map class] \
+  [-partitioner class] \
+  [-reduce class] \
+  [-writer class] \
+  [-program program url] \ 
+  [-conf configuration file] \
+  [-D property=value] \
+  [-fs local|namenode:port] \
+  [-jt local|jobtracker:port] \
+  [-files comma separated list of files] \ 
+  [-libjars comma separated list of jars] \
+  [-archives comma separated list of archives] 
+
+ + +

+ +The application programs link against a thin C++ wrapper library that +handles the communication with the rest of the Hadoop system. The C++ +interface is "swigable" so that interfaces can be generated for python +and other scripting languages. All of the C++ functions and classes +are in the HadoopPipes namespace. The job may consist of any +combination of Java and C++ RecordReaders, Mappers, Paritioner, +Combiner, Reducer, and RecordWriter. + +

+ +Hadoop Pipes has a generic Java class for handling the mapper and +reducer (PipesMapRunner and PipesReducer). They fork off the +application program and communicate with it over a socket. The +communication is handled by the C++ wrapper library and the +PipesMapRunner and PipesReducer. + +

+ +The application program passes in a factory object that can create +the various objects needed by the framework to the runTask +function. The framework creates the Mapper or Reducer as +appropriate and calls the map or reduce method to invoke the +application's code. The JobConf is available to the application. + +

+ +The Mapper and Reducer objects get all of their inputs, outputs, and +context via context objects. The advantage of using the context +objects is that their interface can be extended with additional +methods without breaking clients. Although this interface is different +from the current Java interface, the plan is to migrate the Java +interface in this direction. + +

+ +Although the Java implementation is typed, the C++ interfaces of keys +and values is just a byte buffer. Since STL strings provide precisely +the right functionality and are standard, they will be used. The +decision to not use stronger types was to simplify the interface. + +

+ +The application can also define combiner functions. The combiner will +be run locally by the framework in the application process to avoid +the round trip to the Java process and back. Because the compare +function is not available in C++, the combiner will use memcmp to +sort the inputs to the combiner. This is not as general as the Java +equivalent, which uses the user's comparator, but should cover the +majority of the use cases. As the map function outputs key/value +pairs, they will be buffered. When the buffer is full, it will be +sorted and passed to the combiner. The output of the combiner will be +sent to the Java process. + +

+ +The application can also set a partition function to control which key +is given to a particular reduce. If a partition function is not +defined, the Java one will be used. The partition function will be +called by the C++ framework before the key/value pair is sent back to +Java. + +

+ +The application programs can also register counters with a group and a name +and also increment the counters and get the counter values. Word-count +example illustrating pipes usage with counters is available at +wordcount-simple.cc + + diff --git a/src/mapred/org/apache/hadoop/mapred/tools/MRAdmin.java b/src/mapred/org/apache/hadoop/mapred/tools/MRAdmin.java new file mode 100644 index 0000000..0284660 --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/tools/MRAdmin.java @@ -0,0 +1,262 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.mapred.tools; + +import java.io.IOException; + +import javax.security.auth.login.LoginException; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.conf.Configured; +import org.apache.hadoop.ipc.RPC; +import org.apache.hadoop.ipc.RemoteException; +import org.apache.hadoop.mapred.JobTracker; +import org.apache.hadoop.mapred.AdminOperationsProtocol; +import org.apache.hadoop.net.NetUtils; +import org.apache.hadoop.security.UnixUserGroupInformation; +import org.apache.hadoop.security.authorize.RefreshAuthorizationPolicyProtocol; +import org.apache.hadoop.util.StringUtils; +import org.apache.hadoop.util.Tool; +import org.apache.hadoop.util.ToolRunner; + +/** + * Administrative access to Hadoop Map-Reduce. + * + * Currently it only provides the ability to connect to the {@link JobTracker} + * and 1) refresh the service-level authorization policy, 2) refresh queue acl + * properties. + */ +public class MRAdmin extends Configured implements Tool { + + public MRAdmin() { + super(); + } + + public MRAdmin(Configuration conf) { + super(conf); + } + + private static void printHelp(String cmd) { + String summary = "hadoop mradmin is the command to execute Map-Reduce administrative commands.\n" + + "The full syntax is: \n\n" + + "hadoop mradmin [-refreshServiceAcl] [-refreshQueueAcls] [-help [cmd]] " + + "[-refreshNodes]\n"; + + String refreshServiceAcl = "-refreshServiceAcl: Reload the service-level authorization policy file\n" + + "\t\tJobtracker will reload the authorization policy file.\n"; + + String refreshQueueAcls = + "-refreshQueueAcls: Reload the queue acls\n" + + "\t\tJobTracker will reload the mapred-queue-acls.xml file.\n"; + + String refreshNodes = + "-refreshNodes: Refresh the hosts information at the jobtracker.\n"; + + String help = "-help [cmd]: \tDisplays help for the given command or all commands if none\n" + + "\t\tis specified.\n"; + + if ("refreshServiceAcl".equals(cmd)) { + System.out.println(refreshServiceAcl); + } else if ("refreshQueueAcls".equals(cmd)) { + System.out.println(refreshQueueAcls); + } else if ("refreshNodes".equals(cmd)) { + System.out.println(refreshNodes); + } else if ("help".equals(cmd)) { + System.out.println(help); + } else { + System.out.println(summary); + System.out.println(refreshServiceAcl); + System.out.println(refreshQueueAcls); + System.out.println(refreshNodes); + System.out.println(help); + System.out.println(); + ToolRunner.printGenericCommandUsage(System.out); + } + +} + + /** + * Displays format of commands. + * @param cmd The command that is being executed. + */ + private static void printUsage(String cmd) { + if ("-refreshServiceAcl".equals(cmd)) { + System.err.println("Usage: java MRAdmin" + " [-refreshServiceAcl]"); + } else if ("-refreshQueueAcls".equals(cmd)) { + System.err.println("Usage: java MRAdmin" + " [-refreshQueueAcls]"); + } else if ("-refreshNodes".equals(cmd)) { + System.err.println("Usage: java MRAdmin" + " [-refreshNodes]"); + } else { + System.err.println("Usage: java MRAdmin"); + System.err.println(" [-refreshServiceAcl]"); + System.err.println(" [-refreshQueueAcls]"); + System.err.println(" [-refreshNodes]"); + System.err.println(" [-help [cmd]]"); + System.err.println(); + ToolRunner.printGenericCommandUsage(System.err); + } + } + + private static UnixUserGroupInformation getUGI(Configuration conf) + throws IOException { + UnixUserGroupInformation ugi = null; + try { + ugi = UnixUserGroupInformation.login(conf, true); + } catch (LoginException e) { + throw (IOException)(new IOException( + "Failed to get the current user's information.").initCause(e)); + } + return ugi; + } + + private int refreshAuthorizationPolicy() throws IOException { + // Get the current configuration + Configuration conf = getConf(); + + // Create the client + RefreshAuthorizationPolicyProtocol refreshProtocol = + (RefreshAuthorizationPolicyProtocol) + RPC.getProxy(RefreshAuthorizationPolicyProtocol.class, + RefreshAuthorizationPolicyProtocol.versionID, + JobTracker.getAddress(conf), getUGI(conf), conf, + NetUtils.getSocketFactory(conf, + RefreshAuthorizationPolicyProtocol.class)); + + // Refresh the authorization policy in-effect + refreshProtocol.refreshServiceAcl(); + + return 0; + } + + private int refreshQueueAcls() throws IOException { + // Get the current configuration + Configuration conf = getConf(); + + // Create the client + AdminOperationsProtocol adminOperationsProtocol = + (AdminOperationsProtocol) + RPC.getProxy(AdminOperationsProtocol.class, + AdminOperationsProtocol.versionID, + JobTracker.getAddress(conf), getUGI(conf), conf, + NetUtils.getSocketFactory(conf, + AdminOperationsProtocol.class)); + + // Refresh the queue properties + adminOperationsProtocol.refreshQueueAcls(); + + return 0; + } + + /** + * Command to ask the jobtracker to reread the hosts and excluded hosts + * file. + * Usage: java MRAdmin -refreshNodes + * @exception IOException + */ + private int refreshNodes() throws IOException { + // Get the current configuration + Configuration conf = getConf(); + + // Create the client + AdminOperationsProtocol adminOperationsProtocol = + (AdminOperationsProtocol) + RPC.getProxy(AdminOperationsProtocol.class, + AdminOperationsProtocol.versionID, + JobTracker.getAddress(conf), getUGI(conf), conf, + NetUtils.getSocketFactory(conf, + AdminOperationsProtocol.class)); + + // Refresh the queue properties + adminOperationsProtocol.refreshNodes(); + + return 0; + } + + @Override + public int run(String[] args) throws Exception { + if (args.length < 1) { + printUsage(""); + return -1; + } + + int exitCode = -1; + int i = 0; + String cmd = args[i++]; + + // + // verify that we have enough command line parameters + // + if ("-refreshServiceAcl".equals(cmd) || "-refreshQueueAcls".equals(cmd) + || "-refreshNodes".equals(cmd)) { + if (args.length != 1) { + printUsage(cmd); + return exitCode; + } + } + + exitCode = 0; + try { + if ("-refreshServiceAcl".equals(cmd)) { + exitCode = refreshAuthorizationPolicy(); + } else if ("-refreshQueueAcls".equals(cmd)) { + exitCode = refreshQueueAcls(); + } else if ("-refreshNodes".equals(cmd)) { + exitCode = refreshNodes(); + } else if ("-help".equals(cmd)) { + if (i < args.length) { + printUsage(args[i]); + } else { + printHelp(""); + } + } else { + exitCode = -1; + System.err.println(cmd.substring(1) + ": Unknown command"); + printUsage(""); + } + + } catch (IllegalArgumentException arge) { + exitCode = -1; + System.err.println(cmd.substring(1) + ": " + arge.getLocalizedMessage()); + printUsage(cmd); + } catch (RemoteException e) { + // + // This is a error returned by hadoop server. Print + // out the first line of the error mesage, ignore the stack trace. + exitCode = -1; + try { + String[] content; + content = e.getLocalizedMessage().split("\n"); + System.err.println(cmd.substring(1) + ": " + + content[0]); + } catch (Exception ex) { + System.err.println(cmd.substring(1) + ": " + + ex.getLocalizedMessage()); + } + } catch (Exception e) { + exitCode = -1; + System.err.println(cmd.substring(1) + ": " + + e.getLocalizedMessage()); + } + return exitCode; + } + + public static void main(String[] args) throws Exception { + int result = ToolRunner.run(new MRAdmin(), args); + System.exit(result); + } +} diff --git a/src/mapred/org/apache/hadoop/mapreduce/ClusterMetrics.java b/src/mapred/org/apache/hadoop/mapreduce/ClusterMetrics.java new file mode 100644 index 0000000..e6ca410 --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapreduce/ClusterMetrics.java @@ -0,0 +1,230 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.mapreduce; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; + +import org.apache.hadoop.io.Writable; + +/** + * Status information on the current state of the Map-Reduce cluster. + * + *

ClusterMetrics provides clients with information such as: + *

    + *
  1. + * Size of the cluster. + *
  2. + *
  3. + * Number of blacklisted and decommissioned trackers. + *
  4. + *
  5. + * Slot capacity of the cluster. + *
  6. + *
  7. + * The number of currently occupied/reserved map & reduce slots. + *
  8. + *
  9. + * The number of currently running map & reduce tasks. + *
  10. + *
  11. + * The number of job submissions. + *
  12. + *

+ * + */ +public class ClusterMetrics implements Writable { + private int runningMaps; + private int runningReduces; + private int occupiedMapSlots; + private int occupiedReduceSlots; + private int reservedMapSlots; + private int reservedReduceSlots; + private int totalMapSlots; + private int totalReduceSlots; + private int totalJobSubmissions; + private int numTrackers; + private int numBlacklistedTrackers; + private int numDecommissionedTrackers; + + public ClusterMetrics() { + } + + public ClusterMetrics(int runningMaps, int runningReduces, + int occupiedMapSlots, int occupiedReduceSlots, + int reservedMapSlots, int reservedReduceSlots, + int mapSlots, int reduceSlots, + int totalJobSubmissions, + int numTrackers, int numBlacklistedTrackers, + int numDecommissionedNodes) { + this.runningMaps = runningMaps; + this.runningReduces = runningReduces; + this.occupiedMapSlots = occupiedMapSlots; + this.occupiedReduceSlots = occupiedReduceSlots; + this.reservedMapSlots = reservedMapSlots; + this.reservedReduceSlots = reservedReduceSlots; + this.totalMapSlots = mapSlots; + this.totalReduceSlots = reduceSlots; + this.totalJobSubmissions = totalJobSubmissions; + this.numTrackers = numTrackers; + this.numBlacklistedTrackers = numBlacklistedTrackers; + this.numDecommissionedTrackers = numDecommissionedNodes; + } + + /** + * Get the number of running map tasks in the cluster. + * + * @return running maps + */ + public int getRunningMaps() { + return runningMaps; + } + + /** + * Get the number of running reduce tasks in the cluster. + * + * @return running reduces + */ + public int getRunningReduces() { + return runningReduces; + } + + /** + * Get number of occupied map slots in the cluster. + * + * @return occupied map slot count + */ + public int getOccupiedMapSlots() { + return occupiedMapSlots; + } + + /** + * Get the number of occupied reduce slots in the cluster. + * + * @return occupied reduce slot count + */ + public int getOccupiedReduceSlots() { + return occupiedReduceSlots; + } + + /** + * Get number of reserved map slots in the cluster. + * + * @return reserved map slot count + */ + public int getReservedMapSlots() { + return reservedMapSlots; + } + + /** + * Get the number of reserved reduce slots in the cluster. + * + * @return reserved reduce slot count + */ + public int getReservedReduceSlots() { + return reservedReduceSlots; + } + + /** + * Get the total number of map slots in the cluster. + * + * @return map slot capacity + */ + public int getMapSlotCapacity() { + return totalMapSlots; + } + + /** + * Get the total number of reduce slots in the cluster. + * + * @return reduce slot capacity + */ + public int getReduceSlotCapacity() { + return totalReduceSlots; + } + + /** + * Get the total number of job submissions in the cluster. + * + * @return total number of job submissions + */ + public int getTotalJobSubmissions() { + return totalJobSubmissions; + } + + /** + * Get the number of active trackers in the cluster. + * + * @return active tracker count. + */ + public int getTaskTrackerCount() { + return numTrackers; + } + + /** + * Get the number of blacklisted trackers in the cluster. + * + * @return blacklisted tracker count + */ + public int getBlackListedTaskTrackerCount() { + return numBlacklistedTrackers; + } + + /** + * Get the number of decommissioned trackers in the cluster. + * + * @return decommissioned tracker count + */ + public int getDecommissionedTaskTrackerCount() { + return numDecommissionedTrackers; + } + + @Override + public void readFields(DataInput in) throws IOException { + runningMaps = in.readInt(); + runningReduces = in.readInt(); + occupiedMapSlots = in.readInt(); + occupiedReduceSlots = in.readInt(); + reservedMapSlots = in.readInt(); + reservedReduceSlots = in.readInt(); + totalMapSlots = in.readInt(); + totalReduceSlots = in.readInt(); + totalJobSubmissions = in.readInt(); + numTrackers = in.readInt(); + numBlacklistedTrackers = in.readInt(); + numDecommissionedTrackers = in.readInt(); + } + + @Override + public void write(DataOutput out) throws IOException { + out.writeInt(runningMaps); + out.writeInt(runningReduces); + out.writeInt(occupiedMapSlots); + out.writeInt(occupiedReduceSlots); + out.writeInt(reservedMapSlots); + out.writeInt(reservedReduceSlots); + out.writeInt(totalMapSlots); + out.writeInt(totalReduceSlots); + out.writeInt(totalJobSubmissions); + out.writeInt(numTrackers); + out.writeInt(numBlacklistedTrackers); + out.writeInt(numDecommissionedTrackers); + } + +} diff --git a/src/mapred/org/apache/hadoop/mapreduce/Counter.java b/src/mapred/org/apache/hadoop/mapreduce/Counter.java new file mode 100644 index 0000000..6fb4e50 --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapreduce/Counter.java @@ -0,0 +1,139 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapreduce; + +import java.io.IOException; +import java.io.DataInput; +import java.io.DataOutput; + +import org.apache.hadoop.io.Text; +import org.apache.hadoop.io.Writable; +import org.apache.hadoop.io.WritableUtils; + +/** + * A named counter that tracks the progress of a map/reduce job. + * + *

Counters represent global counters, defined either by the + * Map-Reduce framework or applications. Each Counter is named by + * an {@link Enum} and has a long for the value.

+ * + *

Counters are bunched into Groups, each comprising of + * counters from a particular Enum class. + */ +public class Counter implements Writable { + + private String name; + private String displayName; + private long value = 0; + + protected Counter() { + } + + protected Counter(String name, String displayName) { + this.name = name; + this.displayName = displayName; + } + + @Deprecated + protected synchronized void setDisplayName(String displayName) { + this.displayName = displayName; + } + + /** + * Read the binary representation of the counter + */ + @Override + public synchronized void readFields(DataInput in) throws IOException { + name = Text.readString(in).intern(); + if (in.readBoolean()) { + displayName = Text.readString(in).intern(); + } else { + displayName = name; + } + value = WritableUtils.readVLong(in); + } + + /** + * Write the binary representation of the counter + */ + @Override + public synchronized void write(DataOutput out) throws IOException { + Text.writeString(out, name); + boolean distinctDisplayName = ! name.equals(displayName); + out.writeBoolean(distinctDisplayName); + if (distinctDisplayName) { + Text.writeString(out, displayName); + } + WritableUtils.writeVLong(out, value); + } + + public synchronized String getName() { + return name; + } + + /** + * Get the name of the counter. + * @return the user facing name of the counter + */ + public synchronized String getDisplayName() { + return displayName; + } + + /** + * What is the current value of this counter? + * @return the current value + */ + public synchronized long getValue() { + return value; + } + + /** + * Set this counter by the given value + * @param value the value to set + */ + public synchronized void setValue(long value) { + this.value = value; + } + + /** + * Increment this counter by the given value + * @param incr the value to increase this counter by + */ + public synchronized void increment(long incr) { + value += incr; + } + + @Override + public synchronized boolean equals(Object genericRight) { + if (genericRight instanceof Counter) { + synchronized (genericRight) { + Counter right = (Counter) genericRight; + return name.equals(right.name) && + displayName.equals(right.displayName) && + value == right.value; + } + } + return false; + } + + @Override + public synchronized int hashCode() { + return name.hashCode() + displayName.hashCode(); + } +} diff --git a/src/mapred/org/apache/hadoop/mapreduce/CounterGroup.java b/src/mapred/org/apache/hadoop/mapreduce/CounterGroup.java new file mode 100644 index 0000000..f48fd29 --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapreduce/CounterGroup.java @@ -0,0 +1,184 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapreduce; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; +import java.util.Iterator; +import java.util.MissingResourceException; +import java.util.ResourceBundle; +import java.util.TreeMap; + +import org.apache.hadoop.io.Text; +import org.apache.hadoop.io.Writable; +import org.apache.hadoop.io.WritableUtils; + +/** + * A group of {@link Counter}s that logically belong together. Typically, + * it is an {@link Enum} subclass and the counters are the values. + */ +public class CounterGroup implements Writable, Iterable { + private String name; + private String displayName; + private TreeMap counters = new TreeMap(); + // Optional ResourceBundle for localization of group and counter names. + private ResourceBundle bundle = null; + + /** + * Returns the specified resource bundle, or throws an exception. + * @throws MissingResourceException if the bundle isn't found + */ + private static ResourceBundle getResourceBundle(String enumClassName) { + String bundleName = enumClassName.replace('$','_'); + return ResourceBundle.getBundle(bundleName); + } + + protected CounterGroup(String name) { + this.name = name; + try { + bundle = getResourceBundle(name); + } + catch (MissingResourceException neverMind) { + } + displayName = localize("CounterGroupName", name); + } + + protected CounterGroup(String name, String displayName) { + this.name = name; + this.displayName = displayName; + } + + /** + * Get the internal name of the group + * @return the internal name + */ + public synchronized String getName() { + return name; + } + + /** + * Get the display name of the group. + * @return the human readable name + */ + public synchronized String getDisplayName() { + return displayName; + } + + synchronized void addCounter(Counter counter) { + counters.put(counter.getName(), counter); + } + + /** + * Internal to find a counter in a group. + * @param counterName the name of the counter + * @param displayName the display name of the counter + * @return the counter that was found or added + */ + protected Counter findCounter(String counterName, String displayName) { + Counter result = counters.get(counterName); + if (result == null) { + result = new Counter(counterName, displayName); + counters.put(counterName, result); + } + return result; + } + + public synchronized Counter findCounter(String counterName) { + Counter result = counters.get(counterName); + if (result == null) { + String displayName = localize(counterName, counterName); + result = new Counter(counterName, displayName); + counters.put(counterName, result); + } + return result; + } + + public synchronized Iterator iterator() { + return counters.values().iterator(); + } + + public synchronized void write(DataOutput out) throws IOException { + Text.writeString(out, displayName); + WritableUtils.writeVInt(out, counters.size()); + for(Counter counter: counters.values()) { + counter.write(out); + } + } + + public synchronized void readFields(DataInput in) throws IOException { + displayName = Text.readString(in); + counters.clear(); + int size = WritableUtils.readVInt(in); + for(int i=0; i < size; i++) { + Counter counter = new Counter(); + counter.readFields(in); + counters.put(counter.getName(), counter); + } + } + + /** + * Looks up key in the ResourceBundle and returns the corresponding value. + * If the bundle or the key doesn't exist, returns the default value. + */ + private String localize(String key, String defaultValue) { + String result = defaultValue; + if (bundle != null) { + try { + result = bundle.getString(key); + } + catch (MissingResourceException mre) { + } + } + return result; + } + + /** + * Returns the number of counters in this group. + */ + public synchronized int size() { + return counters.size(); + } + + public synchronized boolean equals(Object genericRight) { + if (genericRight instanceof CounterGroup) { + Iterator right = ((CounterGroup) genericRight).counters. + values().iterator(); + Iterator left = counters.values().iterator(); + while (left.hasNext()) { + if (!right.hasNext() || !left.next().equals(right.next())) { + return false; + } + } + return !right.hasNext(); + } + return false; + } + + public synchronized int hashCode() { + return counters.hashCode(); + } + + public synchronized void incrAllCounters(CounterGroup rightGroup) { + for(Counter right: rightGroup.counters.values()) { + Counter left = findCounter(right.getName(), right.getDisplayName()); + left.increment(right.getValue()); + } + } +} diff --git a/src/mapred/org/apache/hadoop/mapreduce/Counters.java b/src/mapred/org/apache/hadoop/mapreduce/Counters.java new file mode 100644 index 0000000..317309d --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapreduce/Counters.java @@ -0,0 +1,185 @@ +package org.apache.hadoop.mapreduce; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; +import java.util.Collection; +import java.util.IdentityHashMap; +import java.util.Iterator; +import java.util.Map; +import java.util.TreeMap; + +import org.apache.hadoop.io.Text; +import org.apache.hadoop.io.Writable; + +public class Counters implements Writable,Iterable { + /** + * A cache from enum values to the associated counter. Dramatically speeds up + * typical usage. + */ + private Map, Counter> cache = new IdentityHashMap, Counter>(); + + private TreeMap groups = + new TreeMap(); + + public Counters() { + } + + Counters(org.apache.hadoop.mapred.Counters counters) { + for(org.apache.hadoop.mapred.Counters.Group group: counters) { + String name = group.getName(); + CounterGroup newGroup = new CounterGroup(name, group.getDisplayName()); + groups.put(name, newGroup); + for(Counter counter: group) { + newGroup.addCounter(counter); + } + } + } + + public Counter findCounter(String groupName, String counterName) { + CounterGroup grp = getGroup(groupName); + return grp.findCounter(counterName); + } + + /** + * Find the counter for the given enum. The same enum will always return the + * same counter. + * @param key the counter key + * @return the matching counter object + */ + public synchronized Counter findCounter(Enum key) { + Counter counter = cache.get(key); + if (counter == null) { + counter = findCounter(key.getDeclaringClass().getName(), key.toString()); + cache.put(key, counter); + } + return counter; + } + + /** + * Returns the names of all counter classes. + * @return Set of counter names. + */ + public synchronized Collection getGroupNames() { + return groups.keySet(); + } + + @Override + public Iterator iterator() { + return groups.values().iterator(); + } + + /** + * Returns the named counter group, or an empty group if there is none + * with the specified name. + */ + public synchronized CounterGroup getGroup(String groupName) { + CounterGroup grp = groups.get(groupName); + if (grp == null) { + grp = new CounterGroup(groupName); + groups.put(groupName, grp); + } + return grp; + } + + /** + * Returns the total number of counters, by summing the number of counters + * in each group. + */ + public synchronized int countCounters() { + int result = 0; + for (CounterGroup group : this) { + result += group.size(); + } + return result; + } + + /** + * Write the set of groups. + * The external format is: + * #groups (groupName group)* + * + * i.e. the number of groups followed by 0 or more groups, where each + * group is of the form: + * + * groupDisplayName #counters (false | true counter)* + * + * where each counter is of the form: + * + * name (false | true displayName) value + */ + @Override + public synchronized void write(DataOutput out) throws IOException { + out.writeInt(groups.size()); + for (org.apache.hadoop.mapreduce.CounterGroup group: groups.values()) { + Text.writeString(out, group.getName()); + group.write(out); + } + } + + /** + * Read a set of groups. + */ + @Override + public synchronized void readFields(DataInput in) throws IOException { + int numClasses = in.readInt(); + groups.clear(); + while (numClasses-- > 0) { + String groupName = Text.readString(in); + CounterGroup group = new CounterGroup(groupName); + group.readFields(in); + groups.put(groupName, group); + } + } + + /** + * Return textual representation of the counter values. + */ + public synchronized String toString() { + StringBuilder sb = new StringBuilder("Counters: " + countCounters()); + for (CounterGroup group: this) { + sb.append("\n\t" + group.getDisplayName()); + for (Counter counter: group) { + sb.append("\n\t\t" + counter.getDisplayName() + "=" + + counter.getValue()); + } + } + return sb.toString(); + } + + /** + * Increments multiple counters by their amounts in another Counters + * instance. + * @param other the other Counters instance + */ + public synchronized void incrAllCounters(Counters other) { + for(Map.Entry rightEntry: other.groups.entrySet()) { + CounterGroup left = groups.get(rightEntry.getKey()); + CounterGroup right = rightEntry.getValue(); + if (left == null) { + left = new CounterGroup(right.getName(), right.getDisplayName()); + groups.put(rightEntry.getKey(), left); + } + left.incrAllCounters(right); + } + } + + public boolean equals(Object genericRight) { + if (genericRight instanceof Counters) { + Iterator right = ((Counters) genericRight).groups. + values().iterator(); + Iterator left = groups.values().iterator(); + while (left.hasNext()) { + if (!right.hasNext() || !left.next().equals(right.next())) { + return false; + } + } + return !right.hasNext(); + } + return false; + } + + public int hashCode() { + return groups.hashCode(); + } +} diff --git a/src/mapred/org/apache/hadoop/mapreduce/ID.java b/src/mapred/org/apache/hadoop/mapreduce/ID.java new file mode 100644 index 0000000..49ca80c --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapreduce/ID.java @@ -0,0 +1,90 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapreduce; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; + +import org.apache.hadoop.io.WritableComparable; + +/** + * A general identifier, which internally stores the id + * as an integer. This is the super class of {@link JobID}, + * {@link TaskID} and {@link TaskAttemptID}. + * + * @see JobID + * @see TaskID + * @see TaskAttemptID + */ +public abstract class ID implements WritableComparable { + protected static final char SEPARATOR = '_'; + protected int id; + + /** constructs an ID object from the given int */ + public ID(int id) { + this.id = id; + } + + protected ID() { + } + + /** returns the int which represents the identifier */ + public int getId() { + return id; + } + + @Override + public String toString() { + return String.valueOf(id); + } + + @Override + public int hashCode() { + return Integer.valueOf(id).hashCode(); + } + + @Override + public boolean equals(Object o) { + if (this == o) + return true; + if(o == null) + return false; + if (o.getClass() == this.getClass()) { + ID that = (ID) o; + return this.id == that.id; + } + else + return false; + } + + /** Compare IDs by associated numbers */ + public int compareTo(ID that) { + return this.id - that.id; + } + + public void readFields(DataInput in) throws IOException { + this.id = in.readInt(); + } + + public void write(DataOutput out) throws IOException { + out.writeInt(id); + } + +} diff --git a/src/mapred/org/apache/hadoop/mapreduce/InputFormat.java b/src/mapred/org/apache/hadoop/mapreduce/InputFormat.java new file mode 100644 index 0000000..ae8d253 --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapreduce/InputFormat.java @@ -0,0 +1,103 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapreduce; + +import java.io.IOException; +import java.util.List; + +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.mapreduce.lib.input.FileInputFormat; + +/** + * InputFormat describes the input-specification for a + * Map-Reduce job. + * + *

The Map-Reduce framework relies on the InputFormat of the + * job to:

+ *

    + *
  1. + * Validate the input-specification of the job. + *
  2. + * Split-up the input file(s) into logical {@link InputSplit}s, each of + * which is then assigned to an individual {@link Mapper}. + *
  3. + *
  4. + * Provide the {@link RecordReader} implementation to be used to glean + * input records from the logical InputSplit for processing by + * the {@link Mapper}. + *
  5. + *
+ * + *

The default behavior of file-based {@link InputFormat}s, typically + * sub-classes of {@link FileInputFormat}, is to split the + * input into logical {@link InputSplit}s based on the total size, in + * bytes, of the input files. However, the {@link FileSystem} blocksize of + * the input files is treated as an upper bound for input splits. A lower bound + * on the split size can be set via + * + * mapred.min.split.size.

+ * + *

Clearly, logical splits based on input-size is insufficient for many + * applications since record boundaries are to respected. In such cases, the + * application has to also implement a {@link RecordReader} on whom lies the + * responsibility to respect record-boundaries and present a record-oriented + * view of the logical InputSplit to the individual task. + * + * @see InputSplit + * @see RecordReader + * @see FileInputFormat + */ +public abstract class InputFormat { + + /** + * Logically split the set of input files for the job. + * + *

Each {@link InputSplit} is then assigned to an individual {@link Mapper} + * for processing.

+ * + *

Note: The split is a logical split of the inputs and the + * input files are not physically split into chunks. For e.g. a split could + * be <input-file-path, start, offset> tuple. The InputFormat + * also creates the {@link RecordReader} to read the {@link InputSplit}. + * + * @param context job configuration. + * @return an array of {@link InputSplit}s for the job. + */ + public abstract + List getSplits(JobContext context + ) throws IOException, InterruptedException; + + /** + * Create a record reader for a given split. The framework will call + * {@link RecordReader#initialize(InputSplit, TaskAttemptContext)} before + * the split is used. + * @param split the split to be read + * @param context the information about the task + * @return a new record reader + * @throws IOException + * @throws InterruptedException + */ + public abstract + RecordReader createRecordReader(InputSplit split, + TaskAttemptContext context + ) throws IOException, + InterruptedException; + +} + diff --git a/src/mapred/org/apache/hadoop/mapreduce/InputSplit.java b/src/mapred/org/apache/hadoop/mapreduce/InputSplit.java new file mode 100644 index 0000000..f7747c7 --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapreduce/InputSplit.java @@ -0,0 +1,56 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapreduce; + +import java.io.IOException; + +import org.apache.hadoop.mapreduce.InputFormat; +import org.apache.hadoop.mapreduce.Mapper; +import org.apache.hadoop.mapreduce.RecordReader; + +/** + * InputSplit represents the data to be processed by an + * individual {@link Mapper}. + * + *

Typically, it presents a byte-oriented view on the input and is the + * responsibility of {@link RecordReader} of the job to process this and present + * a record-oriented view. + * + * @see InputFormat + * @see RecordReader + */ +public abstract class InputSplit { + /** + * Get the size of the split, so that the input splits can be sorted by size. + * @return the number of bytes in the split + * @throws IOException + * @throws InterruptedException + */ + public abstract long getLength() throws IOException, InterruptedException; + + /** + * Get the list of nodes by name where the data for the split would be local. + * The locations do not need to be serialized. + * @return a new array of the node nodes. + * @throws IOException + * @throws InterruptedException + */ + public abstract + String[] getLocations() throws IOException, InterruptedException; +} \ No newline at end of file diff --git a/src/mapred/org/apache/hadoop/mapreduce/Job.java b/src/mapred/org/apache/hadoop/mapreduce/Job.java new file mode 100644 index 0000000..28e7cf6 --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapreduce/Job.java @@ -0,0 +1,479 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapreduce; + +import java.io.IOException; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.io.RawComparator; +import org.apache.hadoop.mapreduce.TaskAttemptID; +import org.apache.hadoop.mapred.JobClient; +import org.apache.hadoop.mapred.JobConf; +import org.apache.hadoop.mapred.RunningJob; +import org.apache.hadoop.mapred.TaskCompletionEvent; + +/** + * The job submitter's view of the Job. It allows the user to configure the + * job, submit it, control its execution, and query the state. The set methods + * only work until the job is submitted, afterwards they will throw an + * IllegalStateException. + */ +public class Job extends JobContext { + public static enum JobState {DEFINE, RUNNING}; + private JobState state = JobState.DEFINE; + private JobClient jobClient; + private RunningJob info; + + public Job() throws IOException { + this(new Configuration()); + } + + public Job(Configuration conf) throws IOException { + super(conf, null); + jobClient = new JobClient((JobConf) getConfiguration()); + } + + public Job(Configuration conf, String jobName) throws IOException { + this(conf); + setJobName(jobName); + } + + private void ensureState(JobState state) throws IllegalStateException { + if (state != this.state) { + throw new IllegalStateException("Job in state "+ this.state + + " instead of " + state); + } + } + + /** + * Get the job identifier. + * + * @return the job identifier. + */ + public JobID getID() { + ensureState(JobState.RUNNING); + return info.getID(); + } + + /** + * Set the number of reduce tasks for the job. + * @param tasks the number of reduce tasks + * @throws IllegalStateException if the job is submitted + */ + public void setNumReduceTasks(int tasks) throws IllegalStateException { + ensureState(JobState.DEFINE); + conf.setNumReduceTasks(tasks); + } + + /** + * Set the current working directory for the default file system. + * + * @param dir the new current working directory. + * @throws IllegalStateException if the job is submitted + */ + public void setWorkingDirectory(Path dir) throws IOException { + ensureState(JobState.DEFINE); + conf.setWorkingDirectory(dir); + } + + /** + * Set the {@link InputFormat} for the job. + * @param cls the InputFormat to use + * @throws IllegalStateException if the job is submitted + */ + public void setInputFormatClass(Class cls + ) throws IllegalStateException { + ensureState(JobState.DEFINE); + conf.setClass(INPUT_FORMAT_CLASS_ATTR, cls, InputFormat.class); + } + + /** + * Set the {@link OutputFormat} for the job. + * @param cls the OutputFormat to use + * @throws IllegalStateException if the job is submitted + */ + public void setOutputFormatClass(Class cls + ) throws IllegalStateException { + ensureState(JobState.DEFINE); + conf.setClass(OUTPUT_FORMAT_CLASS_ATTR, cls, OutputFormat.class); + } + + /** + * Set the {@link Mapper} for the job. + * @param cls the Mapper to use + * @throws IllegalStateException if the job is submitted + */ + public void setMapperClass(Class cls + ) throws IllegalStateException { + ensureState(JobState.DEFINE); + conf.setClass(MAP_CLASS_ATTR, cls, Mapper.class); + } + + /** + * Set the Jar by finding where a given class came from. + * @param cls the example class + */ + public void setJarByClass(Class cls) { + conf.setJarByClass(cls); + } + + /** + * Get the pathname of the job's jar. + * @return the pathname + */ + public String getJar() { + return conf.getJar(); + } + + /** + * Set the combiner class for the job. + * @param cls the combiner to use + * @throws IllegalStateException if the job is submitted + */ + public void setCombinerClass(Class cls + ) throws IllegalStateException { + ensureState(JobState.DEFINE); + conf.setClass(COMBINE_CLASS_ATTR, cls, Reducer.class); + } + + /** + * Set the {@link Reducer} for the job. + * @param cls the Reducer to use + * @throws IllegalStateException if the job is submitted + */ + public void setReducerClass(Class cls + ) throws IllegalStateException { + ensureState(JobState.DEFINE); + conf.setClass(REDUCE_CLASS_ATTR, cls, Reducer.class); + } + + /** + * Set the {@link Partitioner} for the job. + * @param cls the Partitioner to use + * @throws IllegalStateException if the job is submitted + */ + public void setPartitionerClass(Class cls + ) throws IllegalStateException { + ensureState(JobState.DEFINE); + conf.setClass(PARTITIONER_CLASS_ATTR, cls, Partitioner.class); + } + + /** + * Set the key class for the map output data. This allows the user to + * specify the map output key class to be different than the final output + * value class. + * + * @param theClass the map output key class. + * @throws IllegalStateException if the job is submitted + */ + public void setMapOutputKeyClass(Class theClass + ) throws IllegalStateException { + ensureState(JobState.DEFINE); + conf.setMapOutputKeyClass(theClass); + } + + /** + * Set the value class for the map output data. This allows the user to + * specify the map output value class to be different than the final output + * value class. + * + * @param theClass the map output value class. + * @throws IllegalStateException if the job is submitted + */ + public void setMapOutputValueClass(Class theClass + ) throws IllegalStateException { + ensureState(JobState.DEFINE); + conf.setMapOutputValueClass(theClass); + } + + /** + * Set the key class for the job output data. + * + * @param theClass the key class for the job output data. + * @throws IllegalStateException if the job is submitted + */ + public void setOutputKeyClass(Class theClass + ) throws IllegalStateException { + ensureState(JobState.DEFINE); + conf.setOutputKeyClass(theClass); + } + + /** + * Set the value class for job outputs. + * + * @param theClass the value class for job outputs. + * @throws IllegalStateException if the job is submitted + */ + public void setOutputValueClass(Class theClass + ) throws IllegalStateException { + ensureState(JobState.DEFINE); + conf.setOutputValueClass(theClass); + } + + /** + * Define the comparator that controls how the keys are sorted before they + * are passed to the {@link Reducer}. + * @param cls the raw comparator + * @throws IllegalStateException if the job is submitted + */ + public void setSortComparatorClass(Class cls + ) throws IllegalStateException { + ensureState(JobState.DEFINE); + conf.setOutputKeyComparatorClass(cls); + } + + /** + * Define the comparator that controls which keys are grouped together + * for a single call to + * {@link Reducer#reduce(Object, Iterable, + * org.apache.hadoop.mapreduce.Reducer.Context)} + * @param cls the raw comparator to use + * @throws IllegalStateException if the job is submitted + */ + public void setGroupingComparatorClass(Class cls + ) throws IllegalStateException { + ensureState(JobState.DEFINE); + conf.setOutputValueGroupingComparator(cls); + } + + /** + * Set the user-specified job name. + * + * @param name the job's new name. + * @throws IllegalStateException if the job is submitted + */ + public void setJobName(String name) throws IllegalStateException { + ensureState(JobState.DEFINE); + conf.setJobName(name); + } + + /** + * Get the URL where some job progress information will be displayed. + * + * @return the URL where some job progress information will be displayed. + */ + public String getTrackingURL() { + ensureState(JobState.RUNNING); + return info.getTrackingURL(); + } + + /** + * Get the progress of the job's setup, as a float between 0.0 + * and 1.0. When the job setup is completed, the function returns 1.0. + * + * @return the progress of the job's setup. + * @throws IOException + */ + public float setupProgress() throws IOException { + ensureState(JobState.RUNNING); + return info.setupProgress(); + } + + /** + * Get the progress of the job's map-tasks, as a float between 0.0 + * and 1.0. When all map tasks have completed, the function returns 1.0. + * + * @return the progress of the job's map-tasks. + * @throws IOException + */ + public float mapProgress() throws IOException { + ensureState(JobState.RUNNING); + return info.mapProgress(); + } + + /** + * Get the progress of the job's reduce-tasks, as a float between 0.0 + * and 1.0. When all reduce tasks have completed, the function returns 1.0. + * + * @return the progress of the job's reduce-tasks. + * @throws IOException + */ + public float reduceProgress() throws IOException { + ensureState(JobState.RUNNING); + return info.reduceProgress(); + } + + /** + * Check if the job is finished or not. + * This is a non-blocking call. + * + * @return true if the job is complete, else false. + * @throws IOException + */ + public boolean isComplete() throws IOException { + ensureState(JobState.RUNNING); + return info.isComplete(); + } + + /** + * Check if the job completed successfully. + * + * @return true if the job succeeded, else false. + * @throws IOException + */ + public boolean isSuccessful() throws IOException { + ensureState(JobState.RUNNING); + return info.isSuccessful(); + } + + /** + * Kill the running job. Blocks until all job tasks have been + * killed as well. If the job is no longer running, it simply returns. + * + * @throws IOException + */ + public void killJob() throws IOException { + ensureState(JobState.RUNNING); + info.killJob(); + } + + /** + * Get events indicating completion (success/failure) of component tasks. + * + * @param startFrom index to start fetching events from + * @return an array of {@link TaskCompletionEvent}s + * @throws IOException + */ + public TaskCompletionEvent[] getTaskCompletionEvents(int startFrom + ) throws IOException { + ensureState(JobState.RUNNING); + return info.getTaskCompletionEvents(startFrom); + } + + /** + * Kill indicated task attempt. + * + * @param taskId the id of the task to be terminated. + * @throws IOException + */ + public void killTask(TaskAttemptID taskId) throws IOException { + ensureState(JobState.RUNNING); + info.killTask(org.apache.hadoop.mapred.TaskAttemptID.downgrade(taskId), + false); + } + + /** + * Fail indicated task attempt. + * + * @param taskId the id of the task to be terminated. + * @throws IOException + */ + public void failTask(TaskAttemptID taskId) throws IOException { + ensureState(JobState.RUNNING); + info.killTask(org.apache.hadoop.mapred.TaskAttemptID.downgrade(taskId), + true); + } + + /** + * Gets the counters for this job. + * + * @return the counters for this job. + * @throws IOException + */ + public Counters getCounters() throws IOException { + ensureState(JobState.RUNNING); + return new Counters(info.getCounters()); + } + + private void ensureNotSet(String attr, String msg) throws IOException { + if (conf.get(attr) != null) { + throw new IOException(attr + " is incompatible with " + msg + " mode."); + } + } + + /** + * Default to the new APIs unless they are explicitly set or the old mapper or + * reduce attributes are used. + * @throws IOException if the configuration is inconsistant + */ + private void setUseNewAPI() throws IOException { + int numReduces = conf.getNumReduceTasks(); + String oldMapperClass = "mapred.mapper.class"; + String oldReduceClass = "mapred.reducer.class"; + conf.setBooleanIfUnset("mapred.mapper.new-api", + conf.get(oldMapperClass) == null); + if (conf.getUseNewMapper()) { + String mode = "new map API"; + ensureNotSet("mapred.input.format.class", mode); + ensureNotSet(oldMapperClass, mode); + if (numReduces != 0) { + ensureNotSet("mapred.partitioner.class", mode); + } else { + ensureNotSet("mapred.output.format.class", mode); + } + } else { + String mode = "map compatability"; + ensureNotSet(JobContext.INPUT_FORMAT_CLASS_ATTR, mode); + ensureNotSet(JobContext.MAP_CLASS_ATTR, mode); + if (numReduces != 0) { + ensureNotSet(JobContext.PARTITIONER_CLASS_ATTR, mode); + } else { + ensureNotSet(JobContext.OUTPUT_FORMAT_CLASS_ATTR, mode); + } + } + if (numReduces != 0) { + conf.setBooleanIfUnset("mapred.reducer.new-api", + conf.get(oldReduceClass) == null); + if (conf.getUseNewReducer()) { + String mode = "new reduce API"; + ensureNotSet("mapred.output.format.class", mode); + ensureNotSet(oldReduceClass, mode); + } else { + String mode = "reduce compatability"; + ensureNotSet(JobContext.OUTPUT_FORMAT_CLASS_ATTR, mode); + ensureNotSet(JobContext.REDUCE_CLASS_ATTR, mode); + } + } + } + + /** + * Submit the job to the cluster and return immediately. + * @throws IOException + */ + public void submit() throws IOException, InterruptedException, + ClassNotFoundException { + ensureState(JobState.DEFINE); + setUseNewAPI(); + info = jobClient.submitJobInternal(conf); + state = JobState.RUNNING; + } + + /** + * Submit the job to the cluster and wait for it to finish. + * @param verbose print the progress to the user + * @return true if the job succeeded + * @throws IOException thrown if the communication with the + * JobTracker is lost + */ + public boolean waitForCompletion(boolean verbose + ) throws IOException, InterruptedException, + ClassNotFoundException { + if (state == JobState.DEFINE) { + submit(); + } + if (verbose) { + jobClient.monitorAndPrintJob(conf, info); + } else { + info.waitForCompletion(); + } + return isSuccessful(); + } + +} diff --git a/src/mapred/org/apache/hadoop/mapreduce/JobContext.java b/src/mapred/org/apache/hadoop/mapreduce/JobContext.java new file mode 100644 index 0000000..0800c05 --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapreduce/JobContext.java @@ -0,0 +1,236 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapreduce; + +import java.io.IOException; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.io.RawComparator; +import org.apache.hadoop.mapreduce.Mapper; +import org.apache.hadoop.mapreduce.lib.input.TextInputFormat; +import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat; +import org.apache.hadoop.mapreduce.lib.partition.HashPartitioner; + +/** + * A read-only view of the job that is provided to the tasks while they + * are running. + */ +public class JobContext { + // Put all of the attribute names in here so that Job and JobContext are + // consistent. + protected static final String INPUT_FORMAT_CLASS_ATTR = + "mapreduce.inputformat.class"; + protected static final String MAP_CLASS_ATTR = "mapreduce.map.class"; + protected static final String COMBINE_CLASS_ATTR = "mapreduce.combine.class"; + protected static final String REDUCE_CLASS_ATTR = "mapreduce.reduce.class"; + protected static final String OUTPUT_FORMAT_CLASS_ATTR = + "mapreduce.outputformat.class"; + protected static final String PARTITIONER_CLASS_ATTR = + "mapreduce.partitioner.class"; + + protected final org.apache.hadoop.mapred.JobConf conf; + private final JobID jobId; + + public JobContext(Configuration conf, JobID jobId) { + this.conf = new org.apache.hadoop.mapred.JobConf(conf); + this.jobId = jobId; + } + + /** + * Return the configuration for the job. + * @return the shared configuration object + */ + public Configuration getConfiguration() { + return conf; + } + + /** + * Get the unique ID for the job. + * @return the object with the job id + */ + public JobID getJobID() { + return jobId; + } + + /** + * Get configured the number of reduce tasks for this job. Defaults to + * 1. + * @return the number of reduce tasks for this job. + */ + public int getNumReduceTasks() { + return conf.getNumReduceTasks(); + } + + /** + * Get the current working directory for the default file system. + * + * @return the directory name. + */ + public Path getWorkingDirectory() throws IOException { + return conf.getWorkingDirectory(); + } + + /** + * Get the key class for the job output data. + * @return the key class for the job output data. + */ + public Class getOutputKeyClass() { + return conf.getOutputKeyClass(); + } + + /** + * Get the value class for job outputs. + * @return the value class for job outputs. + */ + public Class getOutputValueClass() { + return conf.getOutputValueClass(); + } + + /** + * Get the key class for the map output data. If it is not set, use the + * (final) output key class. This allows the map output key class to be + * different than the final output key class. + * @return the map output key class. + */ + public Class getMapOutputKeyClass() { + return conf.getMapOutputKeyClass(); + } + + /** + * Get the value class for the map output data. If it is not set, use the + * (final) output value class This allows the map output value class to be + * different than the final output value class. + * + * @return the map output value class. + */ + public Class getMapOutputValueClass() { + return conf.getMapOutputValueClass(); + } + + /** + * Get the user-specified job name. This is only used to identify the + * job to the user. + * + * @return the job's name, defaulting to "". + */ + public String getJobName() { + return conf.getJobName(); + } + + /** + * Get the {@link InputFormat} class for the job. + * + * @return the {@link InputFormat} class for the job. + */ + @SuppressWarnings("unchecked") + public Class> getInputFormatClass() + throws ClassNotFoundException { + return (Class>) + conf.getClass(INPUT_FORMAT_CLASS_ATTR, TextInputFormat.class); + } + + /** + * Get the {@link Mapper} class for the job. + * + * @return the {@link Mapper} class for the job. + */ + @SuppressWarnings("unchecked") + public Class> getMapperClass() + throws ClassNotFoundException { + return (Class>) + conf.getClass(MAP_CLASS_ATTR, Mapper.class); + } + + /** + * Get the combiner class for the job. + * + * @return the combiner class for the job. + */ + @SuppressWarnings("unchecked") + public Class> getCombinerClass() + throws ClassNotFoundException { + return (Class>) + conf.getClass(COMBINE_CLASS_ATTR, null); + } + + /** + * Get the {@link Reducer} class for the job. + * + * @return the {@link Reducer} class for the job. + */ + @SuppressWarnings("unchecked") + public Class> getReducerClass() + throws ClassNotFoundException { + return (Class>) + conf.getClass(REDUCE_CLASS_ATTR, Reducer.class); + } + + /** + * Get the {@link OutputFormat} class for the job. + * + * @return the {@link OutputFormat} class for the job. + */ + @SuppressWarnings("unchecked") + public Class> getOutputFormatClass() + throws ClassNotFoundException { + return (Class>) + conf.getClass(OUTPUT_FORMAT_CLASS_ATTR, TextOutputFormat.class); + } + + /** + * Get the {@link Partitioner} class for the job. + * + * @return the {@link Partitioner} class for the job. + */ + @SuppressWarnings("unchecked") + public Class> getPartitionerClass() + throws ClassNotFoundException { + return (Class>) + conf.getClass(PARTITIONER_CLASS_ATTR, HashPartitioner.class); + } + + /** + * Get the {@link RawComparator} comparator used to compare keys. + * + * @return the {@link RawComparator} comparator used to compare keys. + */ + public RawComparator getSortComparator() { + return conf.getOutputKeyComparator(); + } + + /** + * Get the pathname of the job's jar. + * @return the pathname + */ + public String getJar() { + return conf.getJar(); + } + + /** + * Get the user defined {@link RawComparator} comparator for + * grouping keys of inputs to the reduce. + * + * @return comparator set by the user for grouping values. + * @see Job#setGroupingComparatorClass(Class) for details. + */ + public RawComparator getGroupingComparator() { + return conf.getOutputValueGroupingComparator(); + } +} diff --git a/src/mapred/org/apache/hadoop/mapreduce/JobID.java b/src/mapred/org/apache/hadoop/mapreduce/JobID.java new file mode 100644 index 0000000..63df3b1 --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapreduce/JobID.java @@ -0,0 +1,153 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapreduce; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; +import java.text.NumberFormat; + +import org.apache.hadoop.io.Text; + +/** + * JobID represents the immutable and unique identifier for + * the job. JobID consists of two parts. First part + * represents the jobtracker identifier, so that jobID to jobtracker map + * is defined. For cluster setup this string is the jobtracker + * start time, for local setting, it is "local". + * Second part of the JobID is the job number.
+ * An example JobID is : + * job_200707121733_0003 , which represents the third job + * running at the jobtracker started at 200707121733. + *

+ * Applications should never construct or parse JobID strings, but rather + * use appropriate constructors or {@link #forName(String)} method. + * + * @see TaskID + * @see TaskAttemptID + * @see org.apache.hadoop.mapred.JobTracker#getNewJobId() + * @see org.apache.hadoop.mapred.JobTracker#getStartTime() + */ +public class JobID extends org.apache.hadoop.mapred.ID + implements Comparable { + protected static final String JOB = "job"; + private final Text jtIdentifier; + + protected static final NumberFormat idFormat = NumberFormat.getInstance(); + static { + idFormat.setGroupingUsed(false); + idFormat.setMinimumIntegerDigits(4); + } + + /** + * Constructs a JobID object + * @param jtIdentifier jobTracker identifier + * @param id job number + */ + public JobID(String jtIdentifier, int id) { + super(id); + this.jtIdentifier = new Text(jtIdentifier); + } + + public JobID() { + jtIdentifier = new Text(); + } + + public String getJtIdentifier() { + return jtIdentifier.toString(); + } + + @Override + public boolean equals(Object o) { + if (!super.equals(o)) + return false; + + JobID that = (JobID)o; + return this.jtIdentifier.equals(that.jtIdentifier); + } + + /**Compare JobIds by first jtIdentifiers, then by job numbers*/ + @Override + public int compareTo(ID o) { + JobID that = (JobID)o; + int jtComp = this.jtIdentifier.compareTo(that.jtIdentifier); + if(jtComp == 0) { + return this.id - that.id; + } + else return jtComp; + } + + /** + * Add the stuff after the "job" prefix to the given builder. This is useful, + * because the sub-ids use this substring at the start of their string. + * @param builder the builder to append to + * @return the builder that was passed in + */ + public StringBuilder appendTo(StringBuilder builder) { + builder.append(SEPARATOR); + builder.append(jtIdentifier); + builder.append(SEPARATOR); + builder.append(idFormat.format(id)); + return builder; + } + + @Override + public int hashCode() { + return jtIdentifier.hashCode() + id; + } + + @Override + public String toString() { + return appendTo(new StringBuilder(JOB)).toString(); + } + + @Override + public void readFields(DataInput in) throws IOException { + super.readFields(in); + this.jtIdentifier.readFields(in); + } + + @Override + public void write(DataOutput out) throws IOException { + super.write(out); + jtIdentifier.write(out); + } + + /** Construct a JobId object from given string + * @return constructed JobId object or null if the given String is null + * @throws IllegalArgumentException if the given string is malformed + */ + public static JobID forName(String str) throws IllegalArgumentException { + if(str == null) + return null; + try { + String[] parts = str.split("_"); + if(parts.length == 3) { + if(parts[0].equals(JOB)) { + return new org.apache.hadoop.mapred.JobID(parts[1], + Integer.parseInt(parts[2])); + } + } + }catch (Exception ex) {//fall below + } + throw new IllegalArgumentException("JobId string : " + str + + " is not properly formed"); + } + +} diff --git a/src/mapred/org/apache/hadoop/mapreduce/JobStatus.java b/src/mapred/org/apache/hadoop/mapreduce/JobStatus.java new file mode 100644 index 0000000..774af59 --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapreduce/JobStatus.java @@ -0,0 +1,45 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapreduce; + +/** + * Describes the current status of a job. + */ +public class JobStatus { + /** + * Current state of the job + */ + public static enum State { + RUNNING(1), + SUCCEEDED(2), + FAILED(3), + PREP(4), + KILLED(5); + + int value; + + State(int value) { + this.value = value; + } + + public int getValue() { + return value; + } + } +} \ No newline at end of file diff --git a/src/mapred/org/apache/hadoop/mapreduce/MapContext.java b/src/mapred/org/apache/hadoop/mapreduce/MapContext.java new file mode 100644 index 0000000..2f3990f --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapreduce/MapContext.java @@ -0,0 +1,71 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapreduce; + +import java.io.IOException; + +import org.apache.hadoop.conf.Configuration; + +/** + * The context that is given to the {@link Mapper}. + * @param the key input type to the Mapper + * @param the value input type to the Mapper + * @param the key output type from the Mapper + * @param the value output type from the Mapper + */ +public class MapContext + extends TaskInputOutputContext { + private RecordReader reader; + private InputSplit split; + + public MapContext(Configuration conf, TaskAttemptID taskid, + RecordReader reader, + RecordWriter writer, + OutputCommitter committer, + StatusReporter reporter, + InputSplit split) { + super(conf, taskid, writer, committer, reporter); + this.reader = reader; + this.split = split; + } + + /** + * Get the input split for this map. + */ + public InputSplit getInputSplit() { + return split; + } + + @Override + public KEYIN getCurrentKey() throws IOException, InterruptedException { + return reader.getCurrentKey(); + } + + @Override + public VALUEIN getCurrentValue() throws IOException, InterruptedException { + return reader.getCurrentValue(); + } + + @Override + public boolean nextKeyValue() throws IOException, InterruptedException { + return reader.nextKeyValue(); + } + +} + \ No newline at end of file diff --git a/src/mapred/org/apache/hadoop/mapreduce/Mapper.java b/src/mapred/org/apache/hadoop/mapreduce/Mapper.java new file mode 100644 index 0000000..a49d29c --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapreduce/Mapper.java @@ -0,0 +1,148 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapreduce; + +import java.io.IOException; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.io.RawComparator; +import org.apache.hadoop.io.compress.CompressionCodec; + +/** + * Maps input key/value pairs to a set of intermediate key/value pairs. + * + *

Maps are the individual tasks which transform input records into a + * intermediate records. The transformed intermediate records need not be of + * the same type as the input records. A given input pair may map to zero or + * many output pairs.

+ * + *

The Hadoop Map-Reduce framework spawns one map task for each + * {@link InputSplit} generated by the {@link InputFormat} for the job. + * Mapper implementations can access the {@link Configuration} for + * the job via the {@link JobContext#getConfiguration()}. + * + *

The framework first calls + * {@link #setup(org.apache.hadoop.mapreduce.Mapper.Context)}, followed by + * {@link #map(Object, Object, Context)} + * for each key/value pair in the InputSplit. Finally + * {@link #cleanup(Context)} is called.

+ * + *

All intermediate values associated with a given output key are + * subsequently grouped by the framework, and passed to a {@link Reducer} to + * determine the final output. Users can control the sorting and grouping by + * specifying two key {@link RawComparator} classes.

+ * + *

The Mapper outputs are partitioned per + * Reducer. Users can control which keys (and hence records) go to + * which Reducer by implementing a custom {@link Partitioner}. + * + *

Users can optionally specify a combiner, via + * {@link Job#setCombinerClass(Class)}, to perform local aggregation of the + * intermediate outputs, which helps to cut down the amount of data transferred + * from the Mapper to the Reducer. + * + *

Applications can specify if and how the intermediate + * outputs are to be compressed and which {@link CompressionCodec}s are to be + * used via the Configuration.

+ * + *

If the job has zero + * reduces then the output of the Mapper is directly written + * to the {@link OutputFormat} without sorting by keys.

+ * + *

Example:

+ *

+ * public class TokenCounterMapper 
+ *     extends Mapper{
+ *    
+ *   private final static IntWritable one = new IntWritable(1);
+ *   private Text word = new Text();
+ *   
+ *   public void map(Object key, Text value, Context context) throws IOException {
+ *     StringTokenizer itr = new StringTokenizer(value.toString());
+ *     while (itr.hasMoreTokens()) {
+ *       word.set(itr.nextToken());
+ *       context.collect(word, one);
+ *     }
+ *   }
+ * }
+ * 

+ * + *

Applications may override the {@link #run(Context)} method to exert + * greater control on map processing e.g. multi-threaded Mappers + * etc.

+ * + * @see InputFormat + * @see JobContext + * @see Partitioner + * @see Reducer + */ +public class Mapper { + + public class Context + extends MapContext { + public Context(Configuration conf, TaskAttemptID taskid, + RecordReader reader, + RecordWriter writer, + OutputCommitter committer, + StatusReporter reporter, + InputSplit split) throws IOException, InterruptedException { + super(conf, taskid, reader, writer, committer, reporter, split); + } + } + + /** + * Called once at the beginning of the task. + */ + protected void setup(Context context + ) throws IOException, InterruptedException { + // NOTHING + } + + /** + * Called once for each key/value pair in the input split. Most applications + * should override this, but the default is the identity function. + */ + @SuppressWarnings("unchecked") + protected void map(KEYIN key, VALUEIN value, + Context context) throws IOException, InterruptedException { + context.write((KEYOUT) key, (VALUEOUT) value); + } + + /** + * Called once at the end of the task. + */ + protected void cleanup(Context context + ) throws IOException, InterruptedException { + // NOTHING + } + + /** + * Expert users can override this method for more complete control over the + * execution of the Mapper. + * @param context + * @throws IOException + */ + public void run(Context context) throws IOException, InterruptedException { + setup(context); + while (context.nextKeyValue()) { + map(context.getCurrentKey(), context.getCurrentValue(), context); + } + cleanup(context); + } +} \ No newline at end of file diff --git a/src/mapred/org/apache/hadoop/mapreduce/OutputCommitter.java b/src/mapred/org/apache/hadoop/mapreduce/OutputCommitter.java new file mode 100644 index 0000000..7e0ff4d --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapreduce/OutputCommitter.java @@ -0,0 +1,140 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapreduce; + +import java.io.IOException; + +/** + * OutputCommitter describes the commit of task output for a + * Map-Reduce job. + * + *

The Map-Reduce framework relies on the OutputCommitter of + * the job to:

+ *

    + *
  1. + * Setup the job during initialization. For example, create the temporary + * output directory for the job during the initialization of the job. + *
  2. + *
  3. + * Cleanup the job after the job completion. For example, remove the + * temporary output directory after the job completion. + *
  4. + *
  5. + * Setup the task temporary output. + *
  6. + *
  7. + * Check whether a task needs a commit. This is to avoid the commit + * procedure if a task does not need commit. + *
  8. + *
  9. + * Commit of the task output. + *
  10. + *
  11. + * Discard the task commit. + *
  12. + *
+ * + * @see org.apache.hadoop.mapreduce.lib.output.FileOutputCommitter + * @see JobContext + * @see TaskAttemptContext + * + */ +public abstract class OutputCommitter { + /** + * For the framework to setup the job output during initialization + * + * @param jobContext Context of the job whose output is being written. + * @throws IOException if temporary output could not be created + */ + public abstract void setupJob(JobContext jobContext) throws IOException; + + /** + * For cleaning up the job's output after job completion. Note that this + * is invoked for jobs with final run state as + * {@link JobStatus.State#SUCCEEDED} + * + * @param jobContext Context of the job whose output is being written. + * @throws IOException + */ + public void commitJob(JobContext jobContext) throws IOException { + cleanupJob(jobContext); + } + + /** + * For cleaning up the job's output after job completion + * @deprecated use {@link #commitJob(JobContext)} or + * {@link #abortJob(JobContext, JobStatus.State)} instead + */ + @Deprecated + public void cleanupJob(JobContext context) throws IOException { } + + /** + * For aborting an unsuccessful job's output. Note that this is invoked for + * jobs with final run state as {@link JobStatus.State#FAILED} or + * {@link JobStatus.State#KILLED}. + + * @param jobContext Context of the job whose output is being written. + * @param state final run state of the job, should be either + * {@link JobStatus.State#KILLED} or {@link JobStatus.State#FAILED} + * @throws IOException + */ + public void abortJob(JobContext jobContext, JobStatus.State state) + throws IOException { + cleanupJob(jobContext); + } + + /** + * Sets up output for the task. + * + * @param taskContext Context of the task whose output is being written. + * @throws IOException + */ + public abstract void setupTask(TaskAttemptContext taskContext) + throws IOException; + + /** + * Check whether task needs a commit + * + * @param taskContext + * @return true/false + * @throws IOException + */ + public abstract boolean needsTaskCommit(TaskAttemptContext taskContext) + throws IOException; + + /** + * To promote the task's temporary output to final output location + * + * The task's output is moved to the job's output directory. + * + * @param taskContext Context of the task whose output is being written. + * @throws IOException if commit is not + */ + public abstract void commitTask(TaskAttemptContext taskContext) + throws IOException; + + /** + * Discard the task output + * + * @param taskContext + * @throws IOException + */ + public abstract void abortTask(TaskAttemptContext taskContext) + throws IOException; +} diff --git a/src/mapred/org/apache/hadoop/mapreduce/OutputFormat.java b/src/mapred/org/apache/hadoop/mapreduce/OutputFormat.java new file mode 100644 index 0000000..66a7253 --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapreduce/OutputFormat.java @@ -0,0 +1,84 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapreduce; + +import java.io.IOException; + +import org.apache.hadoop.fs.FileSystem; + +/** + * OutputFormat describes the output-specification for a + * Map-Reduce job. + * + *

The Map-Reduce framework relies on the OutputFormat of the + * job to:

+ *

    + *
  1. + * Validate the output-specification of the job. For e.g. check that the + * output directory doesn't already exist. + *
  2. + * Provide the {@link RecordWriter} implementation to be used to write out + * the output files of the job. Output files are stored in a + * {@link FileSystem}. + *
  3. + *
+ * + * @see RecordWriter + */ +public abstract class OutputFormat { + + /** + * Get the {@link RecordWriter} for the given task. + * + * @param context the information about the current task. + * @return a {@link RecordWriter} to write the output for the job. + * @throws IOException + */ + public abstract RecordWriter + getRecordWriter(TaskAttemptContext context + ) throws IOException, InterruptedException; + + /** + * Check for validity of the output-specification for the job. + * + *

This is to validate the output specification for the job when it is + * a job is submitted. Typically checks that it does not already exist, + * throwing an exception when it already exists, so that output is not + * overwritten.

+ * + * @param context information about the job + * @throws IOException when output should not be attempted + */ + public abstract void checkOutputSpecs(JobContext context + ) throws IOException, + InterruptedException; + + /** + * Get the output committer for this output format. This is responsible + * for ensuring the output is committed correctly. + * @param context the task context + * @return an output committer + * @throws IOException + * @throws InterruptedException + */ + public abstract + OutputCommitter getOutputCommitter(TaskAttemptContext context + ) throws IOException, InterruptedException; +} + diff --git a/src/mapred/org/apache/hadoop/mapreduce/Partitioner.java b/src/mapred/org/apache/hadoop/mapreduce/Partitioner.java new file mode 100644 index 0000000..a8ded0f --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapreduce/Partitioner.java @@ -0,0 +1,48 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapreduce; + +/** + * Partitions the key space. + * + *

Partitioner controls the partitioning of the keys of the + * intermediate map-outputs. The key (or a subset of the key) is used to derive + * the partition, typically by a hash function. The total number of partitions + * is the same as the number of reduce tasks for the job. Hence this controls + * which of the m reduce tasks the intermediate key (and hence the + * record) is sent for reduction.

+ * + * @see Reducer + */ +public abstract class Partitioner { + + /** + * Get the partition number for a given key (hence record) given the total + * number of partitions i.e. number of reduce-tasks for the job. + * + *

Typically a hash function on a all or a subset of the key.

+ * + * @param key the key to be partioned. + * @param value the entry value. + * @param numPartitions the total number of partitions. + * @return the partition number for the key. + */ + public abstract int getPartition(KEY key, VALUE value, int numPartitions); + +} diff --git a/src/mapred/org/apache/hadoop/mapreduce/RecordReader.java b/src/mapred/org/apache/hadoop/mapreduce/RecordReader.java new file mode 100644 index 0000000..079d58b --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapreduce/RecordReader.java @@ -0,0 +1,82 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapreduce; + +import java.io.Closeable; +import java.io.IOException; + +/** + * The record reader breaks the data into key/value pairs for input to the + * {@link Mapper}. + * @param + * @param + */ +public abstract class RecordReader implements Closeable { + + /** + * Called once at initialization. + * @param split the split that defines the range of records to read + * @param context the information about the task + * @throws IOException + * @throws InterruptedException + */ + public abstract void initialize(InputSplit split, + TaskAttemptContext context + ) throws IOException, InterruptedException; + + /** + * Read the next key, value pair. + * @return true if a key/value pair was read + * @throws IOException + * @throws InterruptedException + */ + public abstract + boolean nextKeyValue() throws IOException, InterruptedException; + + /** + * Get the current key + * @return the current key or null if there is no current key + * @throws IOException + * @throws InterruptedException + */ + public abstract + KEYIN getCurrentKey() throws IOException, InterruptedException; + + /** + * Get the current value. + * @return the object that was read + * @throws IOException + * @throws InterruptedException + */ + public abstract + VALUEIN getCurrentValue() throws IOException, InterruptedException; + + /** + * The current progress of the record reader through its data. + * @return a number between 0.0 and 1.0 that is the fraction of the data read + * @throws IOException + * @throws InterruptedException + */ + public abstract float getProgress() throws IOException, InterruptedException; + + /** + * Close the record reader. + */ + public abstract void close() throws IOException; +} diff --git a/src/mapred/org/apache/hadoop/mapreduce/RecordWriter.java b/src/mapred/org/apache/hadoop/mapreduce/RecordWriter.java new file mode 100644 index 0000000..971c962 --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapreduce/RecordWriter.java @@ -0,0 +1,53 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapreduce; + +import java.io.IOException; + +import org.apache.hadoop.fs.FileSystem; + +/** + * RecordWriter writes the output <key, value> pairs + * to an output file. + + *

RecordWriter implementations write the job outputs to the + * {@link FileSystem}. + * + * @see OutputFormat + */ +public abstract class RecordWriter { + /** + * Writes a key/value pair. + * + * @param key the key to write. + * @param value the value to write. + * @throws IOException + */ + public abstract void write(K key, V value + ) throws IOException, InterruptedException; + + /** + * Close this RecordWriter to future operations. + * + * @param context the context of the task + * @throws IOException + */ + public abstract void close(TaskAttemptContext context + ) throws IOException, InterruptedException; +} diff --git a/src/mapred/org/apache/hadoop/mapreduce/ReduceContext.java b/src/mapred/org/apache/hadoop/mapreduce/ReduceContext.java new file mode 100644 index 0000000..8f08f0a --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapreduce/ReduceContext.java @@ -0,0 +1,198 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapreduce; + +import java.io.IOException; +import java.util.Iterator; +import java.util.NoSuchElementException; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.io.BytesWritable; +import org.apache.hadoop.io.DataInputBuffer; +import org.apache.hadoop.io.RawComparator; +import org.apache.hadoop.io.serializer.Deserializer; +import org.apache.hadoop.io.serializer.SerializationFactory; +import org.apache.hadoop.mapred.RawKeyValueIterator; +import org.apache.hadoop.util.Progressable; + +/** + * The context passed to the {@link Reducer}. + * @param the class of the input keys + * @param the class of the input values + * @param the class of the output keys + * @param the class of the output values + */ +public class ReduceContext + extends TaskInputOutputContext { + private RawKeyValueIterator input; + private Counter inputKeyCounter; + private Counter inputValueCounter; + private RawComparator comparator; + private KEYIN key; // current key + private VALUEIN value; // current value + private boolean firstValue = false; // first value in key + private boolean nextKeyIsSame = false; // more w/ this key + private boolean hasMore; // more in file + protected Progressable reporter; + private Deserializer keyDeserializer; + private Deserializer valueDeserializer; + private DataInputBuffer buffer = new DataInputBuffer(); + private BytesWritable currentRawKey = new BytesWritable(); + private ValueIterable iterable = new ValueIterable(); + + public ReduceContext(Configuration conf, TaskAttemptID taskid, + RawKeyValueIterator input, + Counter inputKeyCounter, + Counter inputValueCounter, + RecordWriter output, + OutputCommitter committer, + StatusReporter reporter, + RawComparator comparator, + Class keyClass, + Class valueClass + ) throws InterruptedException, IOException{ + super(conf, taskid, output, committer, reporter); + this.input = input; + this.inputKeyCounter = inputKeyCounter; + this.inputValueCounter = inputValueCounter; + this.comparator = comparator; + SerializationFactory serializationFactory = new SerializationFactory(conf); + this.keyDeserializer = serializationFactory.getDeserializer(keyClass); + this.keyDeserializer.open(buffer); + this.valueDeserializer = serializationFactory.getDeserializer(valueClass); + this.valueDeserializer.open(buffer); + hasMore = input.next(); + } + + /** Start processing next unique key. */ + public boolean nextKey() throws IOException,InterruptedException { + while (hasMore && nextKeyIsSame) { + nextKeyValue(); + } + if (hasMore) { + if (inputKeyCounter != null) { + inputKeyCounter.increment(1); + } + return nextKeyValue(); + } else { + return false; + } + } + + /** + * Advance to the next key/value pair. + */ + @Override + public boolean nextKeyValue() throws IOException, InterruptedException { + if (!hasMore) { + key = null; + value = null; + return false; + } + firstValue = !nextKeyIsSame; + DataInputBuffer next = input.getKey(); + currentRawKey.set(next.getData(), next.getPosition(), + next.getLength() - next.getPosition()); + buffer.reset(currentRawKey.getBytes(), 0, currentRawKey.getLength()); + key = keyDeserializer.deserialize(key); + next = input.getValue(); + buffer.reset(next.getData(), next.getPosition(), next.getLength()); + value = valueDeserializer.deserialize(value); + hasMore = input.next(); + if (hasMore) { + next = input.getKey(); + nextKeyIsSame = comparator.compare(currentRawKey.getBytes(), 0, + currentRawKey.getLength(), + next.getData(), + next.getPosition(), + next.getLength() - next.getPosition() + ) == 0; + } else { + nextKeyIsSame = false; + } + inputValueCounter.increment(1); + return true; + } + + public KEYIN getCurrentKey() { + return key; + } + + @Override + public VALUEIN getCurrentValue() { + return value; + } + + protected class ValueIterator implements Iterator { + + @Override + public boolean hasNext() { + return firstValue || nextKeyIsSame; + } + + @Override + public VALUEIN next() { + // if this is the first record, we don't need to advance + if (firstValue) { + firstValue = false; + return value; + } + // if this isn't the first record and the next key is different, they + // can't advance it here. + if (!nextKeyIsSame) { + throw new NoSuchElementException("iterate past last value"); + } + // otherwise, go to the next key/value pair + try { + nextKeyValue(); + return value; + } catch (IOException ie) { + throw new RuntimeException("next value iterator failed", ie); + } catch (InterruptedException ie) { + // this is bad, but we can't modify the exception list of java.util + throw new RuntimeException("next value iterator interrupted", ie); + } + } + + @Override + public void remove() { + throw new UnsupportedOperationException("remove not implemented"); + } + + } + + protected class ValueIterable implements Iterable { + private ValueIterator iterator = new ValueIterator(); + @Override + public Iterator iterator() { + return iterator; + } + } + + /** + * Iterate through the values for the current key, reusing the same value + * object, which is stored in the context. + * @return the series of values associated with the current key. All of the + * objects returned directly and indirectly from this method are reused. + */ + public + Iterable getValues() throws IOException, InterruptedException { + return iterable; + } +} diff --git a/src/mapred/org/apache/hadoop/mapreduce/Reducer.java b/src/mapred/org/apache/hadoop/mapreduce/Reducer.java new file mode 100644 index 0000000..583135a --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapreduce/Reducer.java @@ -0,0 +1,180 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapreduce; + +import java.io.IOException; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.io.RawComparator; +import org.apache.hadoop.mapred.RawKeyValueIterator; + +/** + * Reduces a set of intermediate values which share a key to a smaller set of + * values. + * + *

Reducer implementations + * can access the {@link Configuration} for the job via the + * {@link JobContext#getConfiguration()} method.

+ + *

Reducer has 3 primary phases:

+ *
    + *
  1. + * + *

    Shuffle

    + * + *

    The Reducer copies the sorted output from each + * {@link Mapper} using HTTP across the network.

    + *
  2. + * + *
  3. + *

    Sort

    + * + *

    The framework merge sorts Reducer inputs by + * keys + * (since different Mappers may have output the same key).

    + * + *

    The shuffle and sort phases occur simultaneously i.e. while outputs are + * being fetched they are merged.

    + * + *
    SecondarySort
    + * + *

    To achieve a secondary sort on the values returned by the value + * iterator, the application should extend the key with the secondary + * key and define a grouping comparator. The keys will be sorted using the + * entire key, but will be grouped using the grouping comparator to decide + * which keys and values are sent in the same call to reduce.The grouping + * comparator is specified via + * {@link Job#setGroupingComparatorClass(Class)}. The sort order is + * controlled by + * {@link Job#setSortComparatorClass(Class)}.

    + * + * + * For example, say that you want to find duplicate web pages and tag them + * all with the url of the "best" known example. You would set up the job + * like: + *
      + *
    • Map Input Key: url
    • + *
    • Map Input Value: document
    • + *
    • Map Output Key: document checksum, url pagerank
    • + *
    • Map Output Value: url
    • + *
    • Partitioner: by checksum
    • + *
    • OutputKeyComparator: by checksum and then decreasing pagerank
    • + *
    • OutputValueGroupingComparator: by checksum
    • + *
    + *
  4. + * + *
  5. + *

    Reduce

    + * + *

    In this phase the + * {@link #reduce(Object, Iterable, Context)} + * method is called for each <key, (collection of values)> in + * the sorted inputs.

    + *

    The output of the reduce task is typically written to a + * {@link RecordWriter} via + * {@link Context#write(Object, Object)}.

    + *
  6. + *
+ * + *

The output of the Reducer is not re-sorted.

+ * + *

Example:

+ *

+ * public class IntSumReducer extends Reducer {
+ *   private IntWritable result = new IntWritable();
+ * 
+ *   public void reduce(Key key, Iterable values, 
+ *                      Context context) throws IOException {
+ *     int sum = 0;
+ *     for (IntWritable val : values) {
+ *       sum += val.get();
+ *     }
+ *     result.set(sum);
+ *     context.collect(key, result);
+ *   }
+ * }
+ * 

+ * + * @see Mapper + * @see Partitioner + */ +public class Reducer { + + public class Context + extends ReduceContext { + public Context(Configuration conf, TaskAttemptID taskid, + RawKeyValueIterator input, + Counter inputKeyCounter, + Counter inputValueCounter, + RecordWriter output, + OutputCommitter committer, + StatusReporter reporter, + RawComparator comparator, + Class keyClass, + Class valueClass + ) throws IOException, InterruptedException { + super(conf, taskid, input, inputKeyCounter, inputValueCounter, + output, committer, reporter, + comparator, keyClass, valueClass); + } + } + + /** + * Called once at the start of the task. + */ + protected void setup(Context context + ) throws IOException, InterruptedException { + // NOTHING + } + + /** + * This method is called once for each key. Most applications will define + * their reduce class by overriding this method. The default implementation + * is an identity function. + */ + @SuppressWarnings("unchecked") + protected void reduce(KEYIN key, Iterable values, Context context + ) throws IOException, InterruptedException { + for(VALUEIN value: values) { + context.write((KEYOUT) key, (VALUEOUT) value); + } + } + + /** + * Called once at the end of the task. + */ + protected void cleanup(Context context + ) throws IOException, InterruptedException { + // NOTHING + } + + /** + * Advanced application writers can use the + * {@link #run(org.apache.hadoop.mapreduce.Reducer.Context)} method to + * control how the reduce task works. + */ + public void run(Context context) throws IOException, InterruptedException { + setup(context); + while (context.nextKey()) { + reduce(context.getCurrentKey(), context.getValues(), context); + } + cleanup(context); + } +} diff --git a/src/mapred/org/apache/hadoop/mapreduce/StatusReporter.java b/src/mapred/org/apache/hadoop/mapreduce/StatusReporter.java new file mode 100644 index 0000000..0bc4525 --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapreduce/StatusReporter.java @@ -0,0 +1,25 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.mapreduce; + +public abstract class StatusReporter { + public abstract Counter getCounter(Enum name); + public abstract Counter getCounter(String group, String name); + public abstract void progress(); + public abstract void setStatus(String status); +} diff --git a/src/mapred/org/apache/hadoop/mapreduce/TaskAttemptContext.java b/src/mapred/org/apache/hadoop/mapreduce/TaskAttemptContext.java new file mode 100644 index 0000000..db6a2f5 --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapreduce/TaskAttemptContext.java @@ -0,0 +1,66 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapreduce; + +import java.io.IOException; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.util.Progressable; + +/** + * The context for task attempts. + */ +public class TaskAttemptContext extends JobContext implements Progressable { + private final TaskAttemptID taskId; + private String status = ""; + + public TaskAttemptContext(Configuration conf, + TaskAttemptID taskId) { + super(conf, taskId.getJobID()); + this.taskId = taskId; + } + + /** + * Get the unique name for this task attempt. + */ + public TaskAttemptID getTaskAttemptID() { + return taskId; + } + + /** + * Set the current status of the task to the given string. + */ + public void setStatus(String msg) throws IOException { + status = msg; + } + + /** + * Get the last set status message. + * @return the current status message + */ + public String getStatus() { + return status; + } + + /** + * Report progress. The subtypes actually do work in this method. + */ + public void progress() { + } +} \ No newline at end of file diff --git a/src/mapred/org/apache/hadoop/mapreduce/TaskAttemptID.java b/src/mapred/org/apache/hadoop/mapreduce/TaskAttemptID.java new file mode 100644 index 0000000..6d87289 --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapreduce/TaskAttemptID.java @@ -0,0 +1,174 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapreduce; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; + +/** + * TaskAttemptID represents the immutable and unique identifier for + * a task attempt. Each task attempt is one particular instance of a Map or + * Reduce Task identified by its TaskID. + * + * TaskAttemptID consists of 2 parts. First part is the + * {@link TaskID}, that this TaskAttemptID belongs to. + * Second part is the task attempt number.
+ * An example TaskAttemptID is : + * attempt_200707121733_0003_m_000005_0 , which represents the + * zeroth task attempt for the fifth map task in the third job + * running at the jobtracker started at 200707121733. + *

+ * Applications should never construct or parse TaskAttemptID strings + * , but rather use appropriate constructors or {@link #forName(String)} + * method. + * + * @see JobID + * @see TaskID + */ +public class TaskAttemptID extends org.apache.hadoop.mapred.ID { + protected static final String ATTEMPT = "attempt"; + private TaskID taskId; + + /** + * Constructs a TaskAttemptID object from given {@link TaskID}. + * @param taskId TaskID that this task belongs to + * @param id the task attempt number + */ + public TaskAttemptID(TaskID taskId, int id) { + super(id); + if(taskId == null) { + throw new IllegalArgumentException("taskId cannot be null"); + } + this.taskId = taskId; + } + + /** + * Constructs a TaskId object from given parts. + * @param jtIdentifier jobTracker identifier + * @param jobId job number + * @param isMap whether the tip is a map + * @param taskId taskId number + * @param id the task attempt number + */ + public TaskAttemptID(String jtIdentifier, int jobId, boolean isMap, + int taskId, int id) { + this(new TaskID(jtIdentifier, jobId, isMap, taskId), id); + } + + public TaskAttemptID() { + taskId = new TaskID(); + } + + /** Returns the {@link JobID} object that this task attempt belongs to */ + public JobID getJobID() { + return taskId.getJobID(); + } + + /** Returns the {@link TaskID} object that this task attempt belongs to */ + public TaskID getTaskID() { + return taskId; + } + + /**Returns whether this TaskAttemptID is a map ID */ + public boolean isMap() { + return taskId.isMap(); + } + + @Override + public boolean equals(Object o) { + if (!super.equals(o)) + return false; + + TaskAttemptID that = (TaskAttemptID)o; + return this.taskId.equals(that.taskId); + } + + /** + * Add the unique string to the StringBuilder + * @param builder the builder to append ot + * @return the builder that was passed in. + */ + protected StringBuilder appendTo(StringBuilder builder) { + return taskId.appendTo(builder).append(SEPARATOR).append(id); + } + + @Override + public void readFields(DataInput in) throws IOException { + super.readFields(in); + taskId.readFields(in); + } + + @Override + public void write(DataOutput out) throws IOException { + super.write(out); + taskId.write(out); + } + + @Override + public int hashCode() { + return taskId.hashCode() * 5 + id; + } + + /**Compare TaskIds by first tipIds, then by task numbers. */ + @Override + public int compareTo(ID o) { + TaskAttemptID that = (TaskAttemptID)o; + int tipComp = this.taskId.compareTo(that.taskId); + if(tipComp == 0) { + return this.id - that.id; + } + else return tipComp; + } + @Override + public String toString() { + return appendTo(new StringBuilder(ATTEMPT)).toString(); + } + + /** Construct a TaskAttemptID object from given string + * @return constructed TaskAttemptID object or null if the given String is null + * @throws IllegalArgumentException if the given string is malformed + */ + public static TaskAttemptID forName(String str + ) throws IllegalArgumentException { + if(str == null) + return null; + try { + String[] parts = str.split(Character.toString(SEPARATOR)); + if(parts.length == 6) { + if(parts[0].equals(ATTEMPT)) { + boolean isMap = false; + if(parts[3].equals("m")) isMap = true; + else if(parts[3].equals("r")) isMap = false; + else throw new Exception(); + return new org.apache.hadoop.mapred.TaskAttemptID + (parts[1], + Integer.parseInt(parts[2]), + isMap, Integer.parseInt(parts[4]), + Integer.parseInt(parts[5])); + } + } + } catch (Exception ex) { + //fall below + } + throw new IllegalArgumentException("TaskAttemptId string : " + str + + " is not properly formed"); + } + +} diff --git a/src/mapred/org/apache/hadoop/mapreduce/TaskID.java b/src/mapred/org/apache/hadoop/mapreduce/TaskID.java new file mode 100644 index 0000000..60377ff --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapreduce/TaskID.java @@ -0,0 +1,187 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapreduce; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; +import java.text.NumberFormat; + +/** + * TaskID represents the immutable and unique identifier for + * a Map or Reduce Task. Each TaskID encompasses multiple attempts made to + * execute the Map or Reduce Task, each of which are uniquely indentified by + * their TaskAttemptID. + * + * TaskID consists of 3 parts. First part is the {@link JobID}, that this + * TaskInProgress belongs to. Second part of the TaskID is either 'm' or 'r' + * representing whether the task is a map task or a reduce task. + * And the third part is the task number.
+ * An example TaskID is : + * task_200707121733_0003_m_000005 , which represents the + * fifth map task in the third job running at the jobtracker + * started at 200707121733. + *

+ * Applications should never construct or parse TaskID strings + * , but rather use appropriate constructors or {@link #forName(String)} + * method. + * + * @see JobID + * @see TaskAttemptID + */ +public class TaskID extends org.apache.hadoop.mapred.ID { + protected static final String TASK = "task"; + protected static final NumberFormat idFormat = NumberFormat.getInstance(); + static { + idFormat.setGroupingUsed(false); + idFormat.setMinimumIntegerDigits(6); + } + + private JobID jobId; + private boolean isMap; + + /** + * Constructs a TaskID object from given {@link JobID}. + * @param jobId JobID that this tip belongs to + * @param isMap whether the tip is a map + * @param id the tip number + */ + public TaskID(JobID jobId, boolean isMap, int id) { + super(id); + if(jobId == null) { + throw new IllegalArgumentException("jobId cannot be null"); + } + this.jobId = jobId; + this.isMap = isMap; + } + + /** + * Constructs a TaskInProgressId object from given parts. + * @param jtIdentifier jobTracker identifier + * @param jobId job number + * @param isMap whether the tip is a map + * @param id the tip number + */ + public TaskID(String jtIdentifier, int jobId, boolean isMap, int id) { + this(new JobID(jtIdentifier, jobId), isMap, id); + } + + public TaskID() { + jobId = new JobID(); + } + + /** Returns the {@link JobID} object that this tip belongs to */ + public JobID getJobID() { + return jobId; + } + + /**Returns whether this TaskID is a map ID */ + public boolean isMap() { + return isMap; + } + + @Override + public boolean equals(Object o) { + if (!super.equals(o)) + return false; + + TaskID that = (TaskID)o; + return this.isMap == that.isMap && this.jobId.equals(that.jobId); + } + + /**Compare TaskInProgressIds by first jobIds, then by tip numbers. Reduces are + * defined as greater then maps.*/ + @Override + public int compareTo(ID o) { + TaskID that = (TaskID)o; + int jobComp = this.jobId.compareTo(that.jobId); + if(jobComp == 0) { + if(this.isMap == that.isMap) { + return this.id - that.id; + } + else return this.isMap ? -1 : 1; + } + else return jobComp; + } + @Override + public String toString() { + return appendTo(new StringBuilder(TASK)).toString(); + } + + /** + * Add the unique string to the given builder. + * @param builder the builder to append to + * @return the builder that was passed in + */ + protected StringBuilder appendTo(StringBuilder builder) { + return jobId.appendTo(builder). + append(SEPARATOR). + append(isMap ? 'm' : 'r'). + append(SEPARATOR). + append(idFormat.format(id)); + } + + @Override + public int hashCode() { + return jobId.hashCode() * 524287 + id; + } + + @Override + public void readFields(DataInput in) throws IOException { + super.readFields(in); + jobId.readFields(in); + isMap = in.readBoolean(); + } + + @Override + public void write(DataOutput out) throws IOException { + super.write(out); + jobId.write(out); + out.writeBoolean(isMap); + } + + /** Construct a TaskID object from given string + * @return constructed TaskID object or null if the given String is null + * @throws IllegalArgumentException if the given string is malformed + */ + public static TaskID forName(String str) + throws IllegalArgumentException { + if(str == null) + return null; + try { + String[] parts = str.split("_"); + if(parts.length == 5) { + if(parts[0].equals(TASK)) { + boolean isMap = false; + if(parts[3].equals("m")) isMap = true; + else if(parts[3].equals("r")) isMap = false; + else throw new Exception(); + return new org.apache.hadoop.mapred.TaskID(parts[1], + Integer.parseInt(parts[2]), + isMap, + Integer.parseInt(parts[4])); + } + } + }catch (Exception ex) {//fall below + } + throw new IllegalArgumentException("TaskId string : " + str + + " is not properly formed"); + } + +} diff --git a/src/mapred/org/apache/hadoop/mapreduce/TaskInputOutputContext.java b/src/mapred/org/apache/hadoop/mapreduce/TaskInputOutputContext.java new file mode 100644 index 0000000..1d2bb06 --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapreduce/TaskInputOutputContext.java @@ -0,0 +1,104 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapreduce; + +import java.io.IOException; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.util.Progressable; + +/** + * A context object that allows input and output from the task. It is only + * supplied to the {@link Mapper} or {@link Reducer}. + * @param the input key type for the task + * @param the input value type for the task + * @param the output key type for the task + * @param the output value type for the task + */ +public abstract class TaskInputOutputContext + extends TaskAttemptContext implements Progressable { + private RecordWriter output; + private StatusReporter reporter; + private OutputCommitter committer; + + public TaskInputOutputContext(Configuration conf, TaskAttemptID taskid, + RecordWriter output, + OutputCommitter committer, + StatusReporter reporter) { + super(conf, taskid); + this.output = output; + this.reporter = reporter; + this.committer = committer; + } + + /** + * Advance to the next key, value pair, returning null if at end. + * @return the key object that was read into, or null if no more + */ + public abstract + boolean nextKeyValue() throws IOException, InterruptedException; + + /** + * Get the current key. + * @return the current key object or null if there isn't one + * @throws IOException + * @throws InterruptedException + */ + public abstract + KEYIN getCurrentKey() throws IOException, InterruptedException; + + /** + * Get the current value. + * @return the value object that was read into + * @throws IOException + * @throws InterruptedException + */ + public abstract VALUEIN getCurrentValue() throws IOException, + InterruptedException; + + /** + * Generate an output key/value pair. + */ + public void write(KEYOUT key, VALUEOUT value + ) throws IOException, InterruptedException { + output.write(key, value); + } + + public Counter getCounter(Enum counterName) { + return reporter.getCounter(counterName); + } + + public Counter getCounter(String groupName, String counterName) { + return reporter.getCounter(groupName, counterName); + } + + @Override + public void progress() { + reporter.progress(); + } + + @Override + public void setStatus(String status) { + reporter.setStatus(status); + } + + public OutputCommitter getOutputCommitter() { + return committer; + } +} diff --git a/src/mapred/org/apache/hadoop/mapreduce/TaskType.java b/src/mapred/org/apache/hadoop/mapreduce/TaskType.java new file mode 100644 index 0000000..3973a67 --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapreduce/TaskType.java @@ -0,0 +1,26 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapreduce; + +/** + * Enum for map, reduce, job-setup, job-cleanup, task-cleanup task types. + */ +public enum TaskType { + MAP, REDUCE, JOB_SETUP, JOB_CLEANUP, TASK_CLEANUP +} diff --git a/src/mapred/org/apache/hadoop/mapreduce/lib/input/FileInputFormat.java b/src/mapred/org/apache/hadoop/mapreduce/lib/input/FileInputFormat.java new file mode 100644 index 0000000..acc73b3 --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapreduce/lib/input/FileInputFormat.java @@ -0,0 +1,477 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapreduce.lib.input; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.BlockLocation; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.LocatedFileStatus; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.PathFilter; +import org.apache.hadoop.fs.RemoteIterator; +import org.apache.hadoop.mapred.JobConf; +import org.apache.hadoop.mapreduce.InputFormat; +import org.apache.hadoop.mapreduce.InputSplit; +import org.apache.hadoop.mapreduce.Job; +import org.apache.hadoop.mapreduce.JobContext; +import org.apache.hadoop.mapreduce.Mapper; +import org.apache.hadoop.util.ReflectionUtils; +import org.apache.hadoop.util.StringUtils; + +/** + * A base class for file-based {@link InputFormat}s. + * + *

FileInputFormat is the base class for all file-based + * InputFormats. This provides a generic implementation of + * {@link #getSplits(JobContext)}. + * Subclasses of FileInputFormat can also override the + * {@link #isSplitable(JobContext, Path)} method to ensure input-files are + * not split-up and are processed as a whole by {@link Mapper}s. + */ +public abstract class FileInputFormat extends InputFormat { + + private static final Log LOG = LogFactory.getLog(FileInputFormat.class); + + private static final double SPLIT_SLOP = 1.1; // 10% slop + + private static final PathFilter hiddenFileFilter = new PathFilter(){ + public boolean accept(Path p){ + String name = p.getName(); + return !name.startsWith("_") && !name.startsWith("."); + } + }; + + /** + * Proxy PathFilter that accepts a path only if all filters given in the + * constructor do. Used by the listPaths() to apply the built-in + * hiddenFileFilter together with a user provided one (if any). + */ + private static class MultiPathFilter implements PathFilter { + private List filters; + + public MultiPathFilter(List filters) { + this.filters = filters; + } + + public boolean accept(Path path) { + for (PathFilter filter : filters) { + if (!filter.accept(path)) { + return false; + } + } + return true; + } + } + + /** + * Get the lower bound on split size imposed by the format. + * @return the number of bytes of the minimal split for this format + */ + protected long getFormatMinSplitSize() { + return 1; + } + + /** + * Is the given filename splitable? Usually, true, but if the file is + * stream compressed, it will not be. + * + * FileInputFormat implementations can override this and return + * false to ensure that individual input files are never split-up + * so that {@link Mapper}s process entire files. + * + * @param context the job context + * @param filename the file name to check + * @return is this file splitable? + */ + protected boolean isSplitable(JobContext context, Path filename) { + return true; + } + + /** + * Set a PathFilter to be applied to the input paths for the map-reduce job. + * @param job the job to modify + * @param filter the PathFilter class use for filtering the input paths. + */ + public static void setInputPathFilter(Job job, + Class filter) { + job.getConfiguration().setClass("mapred.input.pathFilter.class", filter, + PathFilter.class); + } + + /** + * Set the minimum input split size + * @param job the job to modify + * @param size the minimum size + */ + public static void setMinInputSplitSize(Job job, + long size) { + job.getConfiguration().setLong("mapred.min.split.size", size); + } + + /** + * Get the minimum split size + * @param job the job + * @return the minimum number of bytes that can be in a split + */ + public static long getMinSplitSize(JobContext job) { + return job.getConfiguration().getLong("mapred.min.split.size", 1L); + } + + /** + * Set the maximum split size + * @param job the job to modify + * @param size the maximum split size + */ + public static void setMaxInputSplitSize(Job job, + long size) { + job.getConfiguration().setLong("mapred.max.split.size", size); + } + + /** + * Get the maximum split size. + * @param context the job to look at. + * @return the maximum number of bytes a split can include + */ + public static long getMaxSplitSize(JobContext context) { + return context.getConfiguration().getLong("mapred.max.split.size", + Long.MAX_VALUE); + } + + /** + * Get a PathFilter instance of the filter set for the input paths. + * + * @return the PathFilter instance set for the job, NULL if none has been set. + */ + public static PathFilter getInputPathFilter(JobContext context) { + Configuration conf = context.getConfiguration(); + Class filterClass = conf.getClass("mapred.input.pathFilter.class", null, + PathFilter.class); + return (filterClass != null) ? + (PathFilter) ReflectionUtils.newInstance(filterClass, conf) : null; + } + + /** List input directories. + * Mark this method to be final to make sure this method does not + * get overridden by any subclass. + * If a subclass historically overrides this method, now it needs to override + * {@link #listLocatedStatus(JobContext)} instead. + * + * @param job the job to list input paths for + * @return array of FileStatus objects + * @throws IOException if zero items. + */ + final static protected List listStatus(JobContext job + ) throws IOException { + List result = new ArrayList(); + Path[] dirs = getInputPaths(job); + if (dirs.length == 0) { + throw new IOException("No input paths specified in job"); + } + + List errors = new ArrayList(); + + // creates a MultiPathFilter with the hiddenFileFilter and the + // user provided one (if any). + List filters = new ArrayList(); + filters.add(hiddenFileFilter); + PathFilter jobFilter = getInputPathFilter(job); + if (jobFilter != null) { + filters.add(jobFilter); + } + PathFilter inputFilter = new MultiPathFilter(filters); + + for (int i=0; i < dirs.length; ++i) { + Path p = dirs[i]; + FileSystem fs = p.getFileSystem(job.getConfiguration()); + FileStatus[] matches = fs.globStatus(p, inputFilter); + if (matches == null) { + errors.add(new IOException("Input path does not exist: " + p)); + } else if (matches.length == 0) { + errors.add(new IOException("Input Pattern " + p + " matches 0 files")); + } else { + for (FileStatus globStat: matches) { + if (globStat.isDir()) { + for(FileStatus stat: fs.listStatus(globStat.getPath(), + inputFilter)) { + result.add(stat); + } + } else { + result.add(globStat); + } + } + } + } + + if (!errors.isEmpty()) { + throw new InvalidInputException(errors); + } + LOG.info("Total input paths to process : " + result.size()); + return result; + } + + /** List input directories. + * Subclasses may override to, e.g., select only files matching a regular + * expression. + * + * @param job the job to list input paths for + * @return array of LocatedFileStatus objects + * @throws IOException if zero items. + */ + protected List listLocatedStatus(JobContext job + ) throws IOException { + Path[] dirs = getInputPaths(job); + if (dirs.length == 0) { + throw new IOException("No input paths specified in job"); + } + + List errors = new ArrayList(); + + // creates a MultiPathFilter with the hiddenFileFilter and the + // user provided one (if any). + List filters = new ArrayList(); + filters.add(hiddenFileFilter); + PathFilter jobFilter = getInputPathFilter(job); + if (jobFilter != null) { + filters.add(jobFilter); + } + PathFilter inputFilter = new MultiPathFilter(filters); + List result = new ArrayList(); + for (int i=0; i < dirs.length; ++i) { + Path p = dirs[i]; + FileSystem fs = p.getFileSystem(job.getConfiguration()); + FileStatus[] matches = fs.globStatus(p, inputFilter); + if (matches == null) { + errors.add(new IOException("Input path does not exist: " + p)); + } else if (matches.length == 0) { + errors.add(new IOException("Input Pattern " + p + " matches 0 files")); + } else { + for (FileStatus globStat: matches) { + for(RemoteIterator itor = + fs.listLocatedStatus(globStat.getPath(), + inputFilter); itor.hasNext();) { + result.add(itor.next()); + } + } + } + } + + if (!errors.isEmpty()) { + throw new InvalidInputException(errors); + } + LOG.info("Total input paths to process : " + result.size()); + return result; + } + + + /** + * Generate the list of files and make them into FileSplits. + */ + public List getSplits(JobContext job + ) throws IOException { + long minSize = Math.max(getFormatMinSplitSize(), getMinSplitSize(job)); + long maxSize = getMaxSplitSize(job); + + // generate splits + List splits = new ArrayList(); + for (LocatedFileStatus file: listLocatedStatus(job)) { + Path path = file.getPath(); + long length = file.getLen(); + BlockLocation[] blkLocations = file.getBlockLocations(); + + if ((length != 0) && isSplitable(job, path)) { + long blockSize = file.getBlockSize(); + long splitSize = computeSplitSize(blockSize, minSize, maxSize); + + long bytesRemaining = length; + while (((double) bytesRemaining)/splitSize > SPLIT_SLOP) { + int blkIndex = getBlockIndex(blkLocations, length-bytesRemaining); + splits.add(new FileSplit(path, length-bytesRemaining, splitSize, + blkLocations[blkIndex].getHosts())); + bytesRemaining -= splitSize; + } + + if (bytesRemaining != 0) { + splits.add(new FileSplit(path, length-bytesRemaining, bytesRemaining, + blkLocations[blkLocations.length-1].getHosts())); + } + } else if (length != 0) { + splits.add(new FileSplit(path, 0, length, blkLocations[0].getHosts())); + } else { + //Create empty hosts array for zero length files + splits.add(new FileSplit(path, 0, length, new String[0])); + } + } + LOG.debug("Total # of splits: " + splits.size()); + return splits; + } + + protected long computeSplitSize(long blockSize, long minSize, + long maxSize) { + return Math.max(minSize, Math.min(maxSize, blockSize)); + } + + protected int getBlockIndex(BlockLocation[] blkLocations, + long offset) { + for (int i = 0 ; i < blkLocations.length; i++) { + // is the offset inside this block? + if ((blkLocations[i].getOffset() <= offset) && + (offset < blkLocations[i].getOffset() + blkLocations[i].getLength())){ + return i; + } + } + BlockLocation last = blkLocations[blkLocations.length -1]; + long fileLength = last.getOffset() + last.getLength() -1; + throw new IllegalArgumentException("Offset " + offset + + " is outside of file (0.." + + fileLength + ")"); + } + + /** + * Sets the given comma separated paths as the list of inputs + * for the map-reduce job. + * + * @param job the job + * @param commaSeparatedPaths Comma separated paths to be set as + * the list of inputs for the map-reduce job. + */ + public static void setInputPaths(Job job, + String commaSeparatedPaths + ) throws IOException { + setInputPaths(job, StringUtils.stringToPath( + getPathStrings(commaSeparatedPaths))); + } + + /** + * Add the given comma separated paths to the list of inputs for + * the map-reduce job. + * + * @param job The job to modify + * @param commaSeparatedPaths Comma separated paths to be added to + * the list of inputs for the map-reduce job. + */ + public static void addInputPaths(Job job, + String commaSeparatedPaths + ) throws IOException { + for (String str : getPathStrings(commaSeparatedPaths)) { + addInputPath(job, new Path(str)); + } + } + + /** + * Set the array of {@link Path}s as the list of inputs + * for the map-reduce job. + * + * @param job The job to modify + * @param inputPaths the {@link Path}s of the input directories/files + * for the map-reduce job. + */ + public static void setInputPaths(Job job, + Path... inputPaths) throws IOException { + Configuration conf = job.getConfiguration(); + FileSystem fs = FileSystem.get(conf); + Path path = inputPaths[0].makeQualified(fs); + StringBuffer str = new StringBuffer(StringUtils.escapeString(path.toString())); + for(int i = 1; i < inputPaths.length;i++) { + str.append(StringUtils.COMMA_STR); + path = inputPaths[i].makeQualified(fs); + str.append(StringUtils.escapeString(path.toString())); + } + conf.set("mapred.input.dir", str.toString()); + } + + /** + * Add a {@link Path} to the list of inputs for the map-reduce job. + * + * @param job The {@link Job} to modify + * @param path {@link Path} to be added to the list of inputs for + * the map-reduce job. + */ + public static void addInputPath(Job job, + Path path) throws IOException { + Configuration conf = job.getConfiguration(); + FileSystem fs = FileSystem.get(conf); + path = path.makeQualified(fs); + String dirStr = StringUtils.escapeString(path.toString()); + String dirs = conf.get("mapred.input.dir"); + conf.set("mapred.input.dir", dirs == null ? dirStr : dirs + "," + dirStr); + } + + // This method escapes commas in the glob pattern of the given paths. + private static String[] getPathStrings(String commaSeparatedPaths) { + int length = commaSeparatedPaths.length(); + int curlyOpen = 0; + int pathStart = 0; + boolean globPattern = false; + List pathStrings = new ArrayList(); + + for (int i=0; i problems; + + /** + * Create the exception with the given list. + * @param probs the list of problems to report. this list is not copied. + */ + public InvalidInputException(List probs) { + problems = probs; + } + + /** + * Get the complete list of the problems reported. + * @return the list of problems, which must not be modified + */ + public List getProblems() { + return problems; + } + + /** + * Get a summary message of the problems found. + * @return the concatenated messages from all of the problems. + */ + public String getMessage() { + StringBuffer result = new StringBuffer(); + Iterator itr = problems.iterator(); + while(itr.hasNext()) { + result.append(itr.next().getMessage()); + if (itr.hasNext()) { + result.append("\n"); + } + } + return result.toString(); + } +} diff --git a/src/mapred/org/apache/hadoop/mapreduce/lib/input/LineRecordReader.java b/src/mapred/org/apache/hadoop/mapreduce/lib/input/LineRecordReader.java new file mode 100644 index 0000000..a54b0d7 --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapreduce/lib/input/LineRecordReader.java @@ -0,0 +1,147 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapreduce.lib.input; + +import java.io.IOException; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FSDataInputStream; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.io.LongWritable; +import org.apache.hadoop.io.Text; +import org.apache.hadoop.io.compress.CompressionCodec; +import org.apache.hadoop.io.compress.CompressionCodecFactory; +import org.apache.hadoop.mapreduce.InputSplit; +import org.apache.hadoop.mapreduce.RecordReader; +import org.apache.hadoop.mapreduce.TaskAttemptContext; +import org.apache.hadoop.util.LineReader; +import org.apache.commons.logging.LogFactory; +import org.apache.commons.logging.Log; + +/** + * Treats keys as offset in file and value as line. + */ +public class LineRecordReader extends RecordReader { + private static final Log LOG = LogFactory.getLog(LineRecordReader.class); + + private CompressionCodecFactory compressionCodecs = null; + private long start; + private long pos; + private long end; + private LineReader in; + private int maxLineLength; + private LongWritable key = null; + private Text value = null; + + public void initialize(InputSplit genericSplit, + TaskAttemptContext context) throws IOException { + FileSplit split = (FileSplit) genericSplit; + Configuration job = context.getConfiguration(); + this.maxLineLength = job.getInt("mapred.linerecordreader.maxlength", + Integer.MAX_VALUE); + start = split.getStart(); + end = start + split.getLength(); + final Path file = split.getPath(); + compressionCodecs = new CompressionCodecFactory(job); + final CompressionCodec codec = compressionCodecs.getCodec(file); + + // open the file and seek to the start of the split + FileSystem fs = file.getFileSystem(job); + FSDataInputStream fileIn = fs.open(split.getPath()); + boolean skipFirstLine = false; + if (codec != null) { + in = new LineReader(codec.createInputStream(fileIn), job); + end = Long.MAX_VALUE; + } else { + if (start != 0) { + skipFirstLine = true; + --start; + fileIn.seek(start); + } + in = new LineReader(fileIn, job); + } + if (skipFirstLine) { // skip first line and re-establish "start". + start += in.readLine(new Text(), 0, + (int)Math.min((long)Integer.MAX_VALUE, end - start)); + } + this.pos = start; + } + + public boolean nextKeyValue() throws IOException { + if (key == null) { + key = new LongWritable(); + } + key.set(pos); + if (value == null) { + value = new Text(); + } + int newSize = 0; + while (pos < end) { + newSize = in.readLine(value, maxLineLength, + Math.max((int)Math.min(Integer.MAX_VALUE, end-pos), + maxLineLength)); + if (newSize == 0) { + break; + } + pos += newSize; + if (newSize < maxLineLength) { + break; + } + + // line too long. try again + LOG.info("Skipped line of size " + newSize + " at pos " + + (pos - newSize)); + } + if (newSize == 0) { + key = null; + value = null; + return false; + } else { + return true; + } + } + + @Override + public LongWritable getCurrentKey() { + return key; + } + + @Override + public Text getCurrentValue() { + return value; + } + + /** + * Get the progress within the split + */ + public float getProgress() { + if (start == end) { + return 0.0f; + } else { + return Math.min(1.0f, (pos - start) / (float)(end - start)); + } + } + + public synchronized void close() throws IOException { + if (in != null) { + in.close(); + } + } +} diff --git a/src/mapred/org/apache/hadoop/mapreduce/lib/input/SequenceFileInputFormat.java b/src/mapred/org/apache/hadoop/mapreduce/lib/input/SequenceFileInputFormat.java new file mode 100644 index 0000000..b14e041 --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapreduce/lib/input/SequenceFileInputFormat.java @@ -0,0 +1,71 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapreduce.lib.input; + +import java.io.IOException; +import java.util.List; + +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.LocatedFileStatus; +import org.apache.hadoop.fs.Path; + +import org.apache.hadoop.io.SequenceFile; +import org.apache.hadoop.io.MapFile; +import org.apache.hadoop.mapreduce.InputFormat; +import org.apache.hadoop.mapreduce.InputSplit; +import org.apache.hadoop.mapreduce.JobContext; +import org.apache.hadoop.mapreduce.RecordReader; +import org.apache.hadoop.mapreduce.TaskAttemptContext; + +/** An {@link InputFormat} for {@link SequenceFile}s. */ +public class SequenceFileInputFormat extends FileInputFormat { + + @Override + public RecordReader createRecordReader(InputSplit split, + TaskAttemptContext context + ) throws IOException { + return new SequenceFileRecordReader(); + } + + @Override + protected long getFormatMinSplitSize() { + return SequenceFile.SYNC_INTERVAL; + } + + @Override + protected List listLocatedStatus(JobContext job + )throws IOException { + + List files = super.listLocatedStatus(job); + int len = files.size(); + for(int i=0; i < len; ++i) { + FileStatus file = files.get(i); + if (file.isDir()) { // it's a MapFile + Path p = file.getPath(); + FileSystem fs = p.getFileSystem(job.getConfiguration()); + // use the data file + files.set(i, fs.listLocatedStatus( + new Path(p, MapFile.DATA_FILE_NAME)).next()); + } + } + return files; + } +} + diff --git a/src/mapred/org/apache/hadoop/mapreduce/lib/input/SequenceFileRecordReader.java b/src/mapred/org/apache/hadoop/mapreduce/lib/input/SequenceFileRecordReader.java new file mode 100644 index 0000000..f15657e --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapreduce/lib/input/SequenceFileRecordReader.java @@ -0,0 +1,104 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapreduce.lib.input; + +import java.io.IOException; + + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.io.*; +import org.apache.hadoop.mapreduce.InputSplit; +import org.apache.hadoop.mapreduce.RecordReader; +import org.apache.hadoop.mapreduce.TaskAttemptContext; + +/** An {@link RecordReader} for {@link SequenceFile}s. */ +public class SequenceFileRecordReader extends RecordReader { + private SequenceFile.Reader in; + private long start; + private long end; + private boolean more = true; + private K key = null; + private V value = null; + protected Configuration conf; + + @Override + public void initialize(InputSplit split, + TaskAttemptContext context + ) throws IOException, InterruptedException { + FileSplit fileSplit = (FileSplit) split; + conf = context.getConfiguration(); + Path path = fileSplit.getPath(); + FileSystem fs = path.getFileSystem(conf); + this.in = new SequenceFile.Reader(fs, path, conf); + this.end = fileSplit.getStart() + fileSplit.getLength(); + + if (fileSplit.getStart() > in.getPosition()) { + in.sync(fileSplit.getStart()); // sync to start + } + + this.start = in.getPosition(); + more = start < end; + } + + @Override + @SuppressWarnings("unchecked") + public boolean nextKeyValue() throws IOException, InterruptedException { + if (!more) { + return false; + } + long pos = in.getPosition(); + key = (K) in.next(key); + if (key == null || (pos >= end && in.syncSeen())) { + more = false; + key = null; + value = null; + } else { + value = (V) in.getCurrentValue(value); + } + return more; + } + + @Override + public K getCurrentKey() { + return key; + } + + @Override + public V getCurrentValue() { + return value; + } + + /** + * Return the progress within the input split + * @return 0.0 to 1.0 of the input byte range + */ + public float getProgress() throws IOException { + if (end == start) { + return 0.0f; + } else { + return Math.min(1.0f, (in.getPosition() - start) / (float)(end - start)); + } + } + + public synchronized void close() throws IOException { in.close(); } + +} + diff --git a/src/mapred/org/apache/hadoop/mapreduce/lib/input/TextInputFormat.java b/src/mapred/org/apache/hadoop/mapreduce/lib/input/TextInputFormat.java new file mode 100644 index 0000000..bbf2ca8 --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapreduce/lib/input/TextInputFormat.java @@ -0,0 +1,51 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapreduce.lib.input; + +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.io.LongWritable; +import org.apache.hadoop.io.Text; +import org.apache.hadoop.io.compress.CompressionCodec; +import org.apache.hadoop.io.compress.CompressionCodecFactory; +import org.apache.hadoop.mapreduce.InputFormat; +import org.apache.hadoop.mapreduce.InputSplit; +import org.apache.hadoop.mapreduce.JobContext; +import org.apache.hadoop.mapreduce.RecordReader; +import org.apache.hadoop.mapreduce.TaskAttemptContext; + +/** An {@link InputFormat} for plain text files. Files are broken into lines. + * Either linefeed or carriage-return are used to signal end of line. Keys are + * the position in the file, and values are the line of text.. */ +public class TextInputFormat extends FileInputFormat { + + @Override + public RecordReader + createRecordReader(InputSplit split, + TaskAttemptContext context) { + return new LineRecordReader(); + } + + @Override + protected boolean isSplitable(JobContext context, Path file) { + CompressionCodec codec = + new CompressionCodecFactory(context.getConfiguration()).getCodec(file); + return codec == null; + } + +} diff --git a/src/mapred/org/apache/hadoop/mapreduce/lib/map/InverseMapper.java b/src/mapred/org/apache/hadoop/mapreduce/lib/map/InverseMapper.java new file mode 100644 index 0000000..00b0db5 --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapreduce/lib/map/InverseMapper.java @@ -0,0 +1,35 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapreduce.lib.map; + +import java.io.IOException; + +import org.apache.hadoop.mapreduce.Mapper; + +/** A {@link Mapper} that swaps keys and values. */ +public class InverseMapper extends Mapper { + + /** The inverse function. Input keys and values are swapped.*/ + @Override + public void map(K key, V value, Context context + ) throws IOException, InterruptedException { + context.write(value, key); + } + +} diff --git a/src/mapred/org/apache/hadoop/mapreduce/lib/map/MultithreadedMapper.java b/src/mapred/org/apache/hadoop/mapreduce/lib/map/MultithreadedMapper.java new file mode 100644 index 0000000..95530f9 --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapreduce/lib/map/MultithreadedMapper.java @@ -0,0 +1,271 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapreduce.lib.map; + +import org.apache.hadoop.util.ReflectionUtils; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.mapreduce.Counter; +import org.apache.hadoop.mapreduce.InputSplit; +import org.apache.hadoop.mapreduce.Job; +import org.apache.hadoop.mapreduce.JobContext; +import org.apache.hadoop.mapreduce.Mapper; +import org.apache.hadoop.mapreduce.RecordReader; +import org.apache.hadoop.mapreduce.RecordWriter; +import org.apache.hadoop.mapreduce.StatusReporter; +import org.apache.hadoop.mapreduce.TaskAttemptContext; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +/** + * Multithreaded implementation for @link org.apache.hadoop.mapreduce.Mapper. + *

+ * It can be used instead of the default implementation, + * @link org.apache.hadoop.mapred.MapRunner, when the Map operation is not CPU + * bound in order to improve throughput. + *

+ * Mapper implementations using this MapRunnable must be thread-safe. + *

+ * The Map-Reduce job has to be configured with the mapper to use via + * {@link #setMapperClass(Configuration, Class)} and + * the number of thread the thread-pool can use with the + * {@link #getNumberOfThreads(Configuration) method. The default + * value is 10 threads. + *

+ */ +public class MultithreadedMapper + extends Mapper { + + private static final Log LOG = LogFactory.getLog(MultithreadedMapper.class); + private Class> mapClass; + private Context outer; + private List runners; + + /** + * The number of threads in the thread pool that will run the map function. + * @param job the job + * @return the number of threads + */ + public static int getNumberOfThreads(JobContext job) { + return job.getConfiguration(). + getInt("mapred.map.multithreadedrunner.threads", 10); + } + + /** + * Set the number of threads in the pool for running maps. + * @param job the job to modify + * @param threads the new number of threads + */ + public static void setNumberOfThreads(Job job, int threads) { + job.getConfiguration().setInt("mapred.map.multithreadedrunner.threads", + threads); + } + + /** + * Get the application's mapper class. + * @param the map's input key type + * @param the map's input value type + * @param the map's output key type + * @param the map's output value type + * @param job the job + * @return the mapper class to run + */ + @SuppressWarnings("unchecked") + public static + Class> getMapperClass(JobContext job) { + return (Class>) + job.getConfiguration().getClass("mapred.map.multithreadedrunner.class", + Mapper.class); + } + + /** + * Set the application's mapper class. + * @param the map input key type + * @param the map input value type + * @param the map output key type + * @param the map output value type + * @param job the job to modify + * @param cls the class to use as the mapper + */ + public static + void setMapperClass(Job job, + Class> cls) { + if (MultithreadedMapper.class.isAssignableFrom(cls)) { + throw new IllegalArgumentException("Can't have recursive " + + "MultithreadedMapper instances."); + } + job.getConfiguration().setClass("mapred.map.multithreadedrunner.class", + cls, Mapper.class); + } + + /** + * Run the application's maps using a thread pool. + */ + @Override + public void run(Context context) throws IOException, InterruptedException { + outer = context; + int numberOfThreads = getNumberOfThreads(context); + mapClass = getMapperClass(context); + if (LOG.isDebugEnabled()) { + LOG.debug("Configuring multithread runner to use " + numberOfThreads + + " threads"); + } + + runners = new ArrayList(numberOfThreads); + for(int i=0; i < numberOfThreads; ++i) { + MapRunner thread = new MapRunner(context); + thread.start(); + runners.add(i, thread); + } + for(int i=0; i < numberOfThreads; ++i) { + MapRunner thread = runners.get(i); + thread.join(); + Throwable th = thread.throwable; + if (th != null) { + if (th instanceof IOException) { + throw (IOException) th; + } else if (th instanceof InterruptedException) { + throw (InterruptedException) th; + } else { + throw new RuntimeException(th); + } + } + } + } + + private class SubMapRecordReader extends RecordReader { + private K1 key; + private V1 value; + private Configuration conf; + + @Override + public void close() throws IOException { + } + + @Override + public float getProgress() throws IOException, InterruptedException { + return 0; + } + + @Override + public void initialize(InputSplit split, + TaskAttemptContext context + ) throws IOException, InterruptedException { + conf = context.getConfiguration(); + } + + + @Override + public boolean nextKeyValue() throws IOException, InterruptedException { + synchronized (outer) { + if (!outer.nextKeyValue()) { + return false; + } + key = ReflectionUtils.copy(outer.getConfiguration(), + outer.getCurrentKey(), key); + value = ReflectionUtils.copy(conf, outer.getCurrentValue(), value); + return true; + } + } + + public K1 getCurrentKey() { + return key; + } + + @Override + public V1 getCurrentValue() { + return value; + } + } + + private class SubMapRecordWriter extends RecordWriter { + + @Override + public void close(TaskAttemptContext context) throws IOException, + InterruptedException { + } + + @Override + public void write(K2 key, V2 value) throws IOException, + InterruptedException { + synchronized (outer) { + outer.write(key, value); + } + } + } + + private class SubMapStatusReporter extends StatusReporter { + + @Override + public Counter getCounter(Enum name) { + return outer.getCounter(name); + } + + @Override + public Counter getCounter(String group, String name) { + return outer.getCounter(group, name); + } + + @Override + public void progress() { + outer.progress(); + } + + @Override + public void setStatus(String status) { + outer.setStatus(status); + } + + } + + private class MapRunner extends Thread { + private Mapper mapper; + private Context subcontext; + private Throwable throwable; + + MapRunner(Context context) throws IOException, InterruptedException { + mapper = ReflectionUtils.newInstance(mapClass, + context.getConfiguration()); + subcontext = new Context(outer.getConfiguration(), + outer.getTaskAttemptID(), + new SubMapRecordReader(), + new SubMapRecordWriter(), + context.getOutputCommitter(), + new SubMapStatusReporter(), + outer.getInputSplit()); + } + + public Throwable getThrowable() { + return throwable; + } + + @Override + public void run() { + try { + mapper.run(subcontext); + } catch (Throwable ie) { + throwable = ie; + } + } + } + +} diff --git a/src/mapred/org/apache/hadoop/mapreduce/lib/map/TokenCounterMapper.java b/src/mapred/org/apache/hadoop/mapreduce/lib/map/TokenCounterMapper.java new file mode 100644 index 0000000..3ac68ad --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapreduce/lib/map/TokenCounterMapper.java @@ -0,0 +1,45 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapreduce.lib.map; + +import java.io.IOException; +import java.util.StringTokenizer; + +import org.apache.hadoop.io.IntWritable; +import org.apache.hadoop.io.Text; +import org.apache.hadoop.mapreduce.Mapper; + +/** + * Tokenize the input values and emit each word with a count of 1. + */ +public class TokenCounterMapper extends Mapper{ + + private final static IntWritable one = new IntWritable(1); + private Text word = new Text(); + + @Override + public void map(Object key, Text value, Context context + ) throws IOException, InterruptedException { + StringTokenizer itr = new StringTokenizer(value.toString()); + while (itr.hasMoreTokens()) { + word.set(itr.nextToken()); + context.write(word, one); + } + } +} diff --git a/src/mapred/org/apache/hadoop/mapreduce/lib/output/FileOutputCommitter.java b/src/mapred/org/apache/hadoop/mapreduce/lib/output/FileOutputCommitter.java new file mode 100644 index 0000000..7507c27 --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapreduce/lib/output/FileOutputCommitter.java @@ -0,0 +1,277 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapreduce.lib.output; + +import java.io.IOException; +import java.net.URI; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.mapreduce.JobContext; +import org.apache.hadoop.mapreduce.JobStatus; +import org.apache.hadoop.mapreduce.OutputCommitter; +import org.apache.hadoop.mapreduce.TaskAttemptContext; +import org.apache.hadoop.mapreduce.TaskAttemptID; +import org.apache.hadoop.util.StringUtils; + +/** An {@link OutputCommitter} that commits files specified + * in job output directory i.e. ${mapred.output.dir}. + **/ +public class FileOutputCommitter extends OutputCommitter { + + private static final Log LOG = LogFactory.getLog(FileOutputCommitter.class); + + /** + * Temporary directory name + */ + protected static final String TEMP_DIR_NAME = "_temporary"; + public static final String SUCCEEDED_FILE_NAME = "_SUCCESS"; + static final String SUCCESSFUL_JOB_OUTPUT_DIR_MARKER = + "mapreduce.fileoutputcommitter.marksuccessfuljobs"; + private FileSystem outputFileSystem = null; + private Path outputPath = null; + private Path workPath = null; + + /** + * Create a file output committer + * @param outputPath the job's output path + * @param context the task's context + * @throws IOException + */ + public FileOutputCommitter(Path outputPath, + TaskAttemptContext context) throws IOException { + if (outputPath != null) { + this.outputPath = outputPath; + outputFileSystem = outputPath.getFileSystem(context.getConfiguration()); + workPath = new Path(outputPath, + (FileOutputCommitter.TEMP_DIR_NAME + Path.SEPARATOR + + "_" + context.getTaskAttemptID().toString() + )).makeQualified(outputFileSystem); + } + } + + /** + * Create the temporary directory that is the root of all of the task + * work directories. + * @param context the job's context + */ + public void setupJob(JobContext context) throws IOException { + if (outputPath != null) { + Path tmpDir = new Path(outputPath, FileOutputCommitter.TEMP_DIR_NAME); + FileSystem fileSys = tmpDir.getFileSystem(context.getConfiguration()); + if (!fileSys.mkdirs(tmpDir)) { + LOG.error("Mkdirs failed to create " + tmpDir.toString()); + } + } + } + + private static boolean shouldMarkOutputDir(Configuration conf) { + return conf.getBoolean(SUCCESSFUL_JOB_OUTPUT_DIR_MARKER, + true); + } + + // Mark the output dir of the job for which the context is passed. + private void markOutputDirSuccessful(JobContext context) + throws IOException { + if (outputPath != null) { + FileSystem fileSys = outputPath.getFileSystem(context.getConfiguration()); + if (fileSys.exists(outputPath)) { + // create a file in the folder to mark it + Path filePath = new Path(outputPath, SUCCEEDED_FILE_NAME); + fileSys.create(filePath).close(); + } + } + } + + /** + * Delete the temporary directory, including all of the work directories. + * This is called for all jobs whose final run state is SUCCEEDED + * @param context the job's context. + */ + public void commitJob(JobContext context) throws IOException { + // delete the _temporary folder + cleanupJob(context); + // check if the o/p dir should be marked + if (shouldMarkOutputDir(context.getConfiguration())) { + // create a _success file in the o/p folder + markOutputDirSuccessful(context); + } + } + + @Override + @Deprecated + public void cleanupJob(JobContext context) throws IOException { + if (outputPath != null) { + Path tmpDir = new Path(outputPath, FileOutputCommitter.TEMP_DIR_NAME); + FileSystem fileSys = tmpDir.getFileSystem(context.getConfiguration()); + if (fileSys.exists(tmpDir)) { + fileSys.delete(tmpDir, true); + } + } else { + LOG.warn("Output path is null in cleanup"); + } + } + + /** + * Delete the temporary directory, including all of the work directories. + * @param context the job's context + * @param state final run state of the job, should be FAILED or KILLED + */ + @Override + public void abortJob(JobContext context, JobStatus.State state) + throws IOException { + cleanupJob(context); + } + + /** + * No task setup required. + */ + @Override + public void setupTask(TaskAttemptContext context) throws IOException { + // FileOutputCommitter's setupTask doesn't do anything. Because the + // temporary task directory is created on demand when the + // task is writing. + } + + /** + * Move the files from the work directory to the job output directory + * @param context the task context + */ + public void commitTask(TaskAttemptContext context) + throws IOException { + TaskAttemptID attemptId = context.getTaskAttemptID(); + if (workPath != null) { + context.progress(); + if (outputFileSystem.exists(workPath)) { + // Move the task outputs to their final place + moveTaskOutputs(context, outputFileSystem, outputPath, workPath); + // Delete the temporary task-specific output directory + if (!outputFileSystem.delete(workPath, true)) { + LOG.warn("Failed to delete the temporary output" + + " directory of task: " + attemptId + " - " + workPath); + } + LOG.info("Saved output of task '" + attemptId + "' to " + + outputPath); + } + } + } + + /** + * Move all of the files from the work directory to the final output + * @param context the task context + * @param fs the output file system + * @param jobOutputDir the final output direcotry + * @param taskOutput the work path + * @throws IOException + */ + private void moveTaskOutputs(TaskAttemptContext context, + FileSystem fs, + Path jobOutputDir, + Path taskOutput) + throws IOException { + TaskAttemptID attemptId = context.getTaskAttemptID(); + context.progress(); + if (fs.isFile(taskOutput)) { + Path finalOutputPath = getFinalPath(jobOutputDir, taskOutput, + workPath); + if (!fs.rename(taskOutput, finalOutputPath)) { + if (!fs.delete(finalOutputPath, true)) { + throw new IOException("Failed to delete earlier output of task: " + + attemptId); + } + if (!fs.rename(taskOutput, finalOutputPath)) { + throw new IOException("Failed to save output of task: " + + attemptId); + } + } + LOG.debug("Moved " + taskOutput + " to " + finalOutputPath); + } else if(fs.getFileStatus(taskOutput).isDir()) { + FileStatus[] paths = fs.listStatus(taskOutput); + Path finalOutputPath = getFinalPath(jobOutputDir, taskOutput, workPath); + fs.mkdirs(finalOutputPath); + if (paths != null) { + for (FileStatus path : paths) { + moveTaskOutputs(context, fs, jobOutputDir, path.getPath()); + } + } + } + } + + /** + * Delete the work directory + */ + @Override + public void abortTask(TaskAttemptContext context) { + try { + if (workPath != null) { + context.progress(); + outputFileSystem.delete(workPath, true); + } + } catch (IOException ie) { + LOG.warn("Error discarding output" + StringUtils.stringifyException(ie)); + } + } + + /** + * Find the final name of a given output file, given the job output directory + * and the work directory. + * @param jobOutputDir the job's output directory + * @param taskOutput the specific task output file + * @param taskOutputPath the job's work directory + * @return the final path for the specific output file + * @throws IOException + */ + private Path getFinalPath(Path jobOutputDir, Path taskOutput, + Path taskOutputPath) throws IOException { + URI taskOutputUri = taskOutput.toUri(); + URI relativePath = taskOutputPath.toUri().relativize(taskOutputUri); + if (taskOutputUri == relativePath) { + throw new IOException("Can not get the relative path: base = " + + taskOutputPath + " child = " + taskOutput); + } + if (relativePath.getPath().length() > 0) { + return new Path(jobOutputDir, relativePath.getPath()); + } else { + return jobOutputDir; + } + } + + /** + * Did this task write any files in the work directory? + * @param context the task's context + */ + @Override + public boolean needsTaskCommit(TaskAttemptContext context + ) throws IOException { + return workPath != null && outputFileSystem.exists(workPath); + } + + /** + * Get the directory that the task should write results into + * @return the work directory + * @throws IOException + */ + public Path getWorkPath() throws IOException { + return workPath; + } +} diff --git a/src/mapred/org/apache/hadoop/mapreduce/lib/output/FileOutputFormat.java b/src/mapred/org/apache/hadoop/mapreduce/lib/output/FileOutputFormat.java new file mode 100644 index 0000000..941822b --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapreduce/lib/output/FileOutputFormat.java @@ -0,0 +1,269 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapreduce.lib.output; + +import java.io.IOException; +import java.text.NumberFormat; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.io.compress.CompressionCodec; +import org.apache.hadoop.mapred.FileAlreadyExistsException; +import org.apache.hadoop.mapred.InvalidJobConfException; +import org.apache.hadoop.mapreduce.Job; +import org.apache.hadoop.mapreduce.JobContext; +import org.apache.hadoop.mapreduce.OutputCommitter; +import org.apache.hadoop.mapreduce.OutputFormat; +import org.apache.hadoop.mapreduce.RecordWriter; +import org.apache.hadoop.mapreduce.TaskAttemptContext; +import org.apache.hadoop.mapreduce.TaskID; +import org.apache.hadoop.mapreduce.TaskInputOutputContext; + +/** A base class for {@link OutputFormat}s that read from {@link FileSystem}s.*/ +public abstract class FileOutputFormat extends OutputFormat { + + /** Construct output file names so that, when an output directory listing is + * sorted lexicographically, positions correspond to output partitions.*/ + private static final NumberFormat NUMBER_FORMAT = NumberFormat.getInstance(); + static { + NUMBER_FORMAT.setMinimumIntegerDigits(5); + NUMBER_FORMAT.setGroupingUsed(false); + } + private FileOutputCommitter committer = null; + + /** + * Set whether the output of the job is compressed. + * @param job the job to modify + * @param compress should the output of the job be compressed? + */ + public static void setCompressOutput(Job job, boolean compress) { + job.getConfiguration().setBoolean("mapred.output.compress", compress); + } + + /** + * Is the job output compressed? + * @param job the Job to look in + * @return true if the job output should be compressed, + * false otherwise + */ + public static boolean getCompressOutput(JobContext job) { + return job.getConfiguration().getBoolean("mapred.output.compress", false); + } + + /** + * Set the {@link CompressionCodec} to be used to compress job outputs. + * @param job the job to modify + * @param codecClass the {@link CompressionCodec} to be used to + * compress the job outputs + */ + public static void + setOutputCompressorClass(Job job, + Class codecClass) { + setCompressOutput(job, true); + job.getConfiguration().setClass("mapred.output.compression.codec", + codecClass, + CompressionCodec.class); + } + + /** + * Get the {@link CompressionCodec} for compressing the job outputs. + * @param job the {@link Job} to look in + * @param defaultValue the {@link CompressionCodec} to return if not set + * @return the {@link CompressionCodec} to be used to compress the + * job outputs + * @throws IllegalArgumentException if the class was specified, but not found + */ + public static Class + getOutputCompressorClass(JobContext job, + Class defaultValue) { + Class codecClass = defaultValue; + Configuration conf = job.getConfiguration(); + String name = conf.get("mapred.output.compression.codec"); + if (name != null) { + try { + codecClass = + conf.getClassByName(name).asSubclass(CompressionCodec.class); + } catch (ClassNotFoundException e) { + throw new IllegalArgumentException("Compression codec " + name + + " was not found.", e); + } + } + return codecClass; + } + + public abstract RecordWriter + getRecordWriter(TaskAttemptContext job + ) throws IOException, InterruptedException; + + public void checkOutputSpecs(JobContext job + ) throws FileAlreadyExistsException, IOException{ + // Ensure that the output directory is set and not already there + Path outDir = getOutputPath(job); + if (outDir == null) { + throw new InvalidJobConfException("Output directory not set."); + } + if (outDir.getFileSystem(job.getConfiguration()).exists(outDir)) { + throw new FileAlreadyExistsException("Output directory " + outDir + + " already exists"); + } + } + + /** + * Set the {@link Path} of the output directory for the map-reduce job. + * + * @param job The job to modify + * @param outputDir the {@link Path} of the output directory for + * the map-reduce job. + */ + public static void setOutputPath(Job job, Path outputDir) { + job.getConfiguration().set("mapred.output.dir", outputDir.toString()); + } + + /** + * Get the {@link Path} to the output directory for the map-reduce job. + * + * @return the {@link Path} to the output directory for the map-reduce job. + * @see FileOutputFormat#getWorkOutputPath(TaskInputOutputContext) + */ + public static Path getOutputPath(JobContext job) { + String name = job.getConfiguration().get("mapred.output.dir"); + return name == null ? null: new Path(name); + } + + /** + * Get the {@link Path} to the task's temporary output directory + * for the map-reduce job + * + *

Tasks' Side-Effect Files

+ * + *

Some applications need to create/write-to side-files, which differ from + * the actual job-outputs. + * + *

In such cases there could be issues with 2 instances of the same TIP + * (running simultaneously e.g. speculative tasks) trying to open/write-to the + * same file (path) on HDFS. Hence the application-writer will have to pick + * unique names per task-attempt (e.g. using the attemptid, say + * attempt_200709221812_0001_m_000000_0), not just per TIP.

+ * + *

To get around this the Map-Reduce framework helps the application-writer + * out by maintaining a special + * ${mapred.output.dir}/_temporary/_${taskid} + * sub-directory for each task-attempt on HDFS where the output of the + * task-attempt goes. On successful completion of the task-attempt the files + * in the ${mapred.output.dir}/_temporary/_${taskid} (only) + * are promoted to ${mapred.output.dir}. Of course, the + * framework discards the sub-directory of unsuccessful task-attempts. This + * is completely transparent to the application.

+ * + *

The application-writer can take advantage of this by creating any + * side-files required in a work directory during execution + * of his task i.e. via + * {@link #getWorkOutputPath(TaskInputOutputContext)}, and + * the framework will move them out similarly - thus she doesn't have to pick + * unique paths per task-attempt.

+ * + *

The entire discussion holds true for maps of jobs with + * reducer=NONE (i.e. 0 reduces) since output of the map, in that case, + * goes directly to HDFS.

+ * + * @return the {@link Path} to the task's temporary output directory + * for the map-reduce job. + */ + public static Path getWorkOutputPath(TaskInputOutputContext context + ) throws IOException, + InterruptedException { + FileOutputCommitter committer = (FileOutputCommitter) + context.getOutputCommitter(); + return committer.getWorkPath(); + } + + /** + * Helper function to generate a {@link Path} for a file that is unique for + * the task within the job output directory. + * + *

The path can be used to create custom files from within the map and + * reduce tasks. The path name will be unique for each task. The path parent + * will be the job output directory.

ls + * + *

This method uses the {@link #getUniqueFile} method to make the file name + * unique for the task.

+ * + * @param context the context for the task. + * @param name the name for the file. + * @param extension the extension for the file + * @return a unique path accross all tasks of the job. + */ + public + static Path getPathForWorkFile(TaskInputOutputContext context, + String name, + String extension + ) throws IOException, InterruptedException { + return new Path(getWorkOutputPath(context), + getUniqueFile(context, name, extension)); + } + + /** + * Generate a unique filename, based on the task id, name, and extension + * @param context the task that is calling this + * @param name the base filename + * @param extension the filename extension + * @return a string like $name-[mr]-$id$extension + */ + public synchronized static String getUniqueFile(TaskAttemptContext context, + String name, + String extension) { + TaskID taskId = context.getTaskAttemptID().getTaskID(); + int partition = taskId.getId(); + StringBuilder result = new StringBuilder(); + result.append(name); + result.append('-'); + result.append(taskId.isMap() ? 'm' : 'r'); + result.append('-'); + result.append(NUMBER_FORMAT.format(partition)); + result.append(extension); + return result.toString(); + } + + /** + * Get the default path and filename for the output format. + * @param context the task context + * @param extension an extension to add to the filename + * @return a full path $output/_temporary/$taskid/part-[mr]-$id + * @throws IOException + */ + public Path getDefaultWorkFile(TaskAttemptContext context, + String extension) throws IOException{ + FileOutputCommitter committer = + (FileOutputCommitter) getOutputCommitter(context); + return new Path(committer.getWorkPath(), getUniqueFile(context, "part", + extension)); + } + + public synchronized + OutputCommitter getOutputCommitter(TaskAttemptContext context + ) throws IOException { + if (committer == null) { + Path output = getOutputPath(context); + committer = new FileOutputCommitter(output, context); + } + return committer; + } +} + diff --git a/src/mapred/org/apache/hadoop/mapreduce/lib/output/NullOutputFormat.java b/src/mapred/org/apache/hadoop/mapreduce/lib/output/NullOutputFormat.java new file mode 100644 index 0000000..b61c3a3 --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapreduce/lib/output/NullOutputFormat.java @@ -0,0 +1,58 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapreduce.lib.output; + +import org.apache.hadoop.mapreduce.JobContext; +import org.apache.hadoop.mapreduce.OutputCommitter; +import org.apache.hadoop.mapreduce.OutputFormat; +import org.apache.hadoop.mapreduce.RecordWriter; +import org.apache.hadoop.mapreduce.TaskAttemptContext; + +/** + * Consume all outputs and put them in /dev/null. + */ +public class NullOutputFormat extends OutputFormat { + + @Override + public RecordWriter + getRecordWriter(TaskAttemptContext context) { + return new RecordWriter(){ + public void write(K key, V value) { } + public void close(TaskAttemptContext context) { } + }; + } + + @Override + public void checkOutputSpecs(JobContext context) { } + + @Override + public OutputCommitter getOutputCommitter(TaskAttemptContext context) { + return new OutputCommitter() { + public void abortTask(TaskAttemptContext taskContext) { } + public void cleanupJob(JobContext jobContext) { } + public void commitJob(JobContext jobContext) { } + public void commitTask(TaskAttemptContext taskContext) { } + public boolean needsTaskCommit(TaskAttemptContext taskContext) { + return false; + } + public void setupJob(JobContext jobContext) { } + public void setupTask(TaskAttemptContext taskContext) { } + }; + } +} diff --git a/src/mapred/org/apache/hadoop/mapreduce/lib/output/SequenceFileOutputFormat.java b/src/mapred/org/apache/hadoop/mapreduce/lib/output/SequenceFileOutputFormat.java new file mode 100644 index 0000000..607d704 --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapreduce/lib/output/SequenceFileOutputFormat.java @@ -0,0 +1,109 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapreduce.lib.output; + +import java.io.IOException; + +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; + +import org.apache.hadoop.io.SequenceFile; +import org.apache.hadoop.io.SequenceFile.CompressionType; +import org.apache.hadoop.io.compress.CompressionCodec; +import org.apache.hadoop.io.compress.DefaultCodec; +import org.apache.hadoop.mapreduce.Job; +import org.apache.hadoop.mapreduce.JobContext; +import org.apache.hadoop.mapreduce.OutputFormat; +import org.apache.hadoop.mapreduce.RecordWriter; +import org.apache.hadoop.mapreduce.TaskAttemptContext; +import org.apache.hadoop.util.ReflectionUtils; +import org.apache.hadoop.conf.Configuration; + +/** An {@link OutputFormat} that writes {@link SequenceFile}s. */ +public class SequenceFileOutputFormat extends FileOutputFormat { + + public RecordWriter + getRecordWriter(TaskAttemptContext context + ) throws IOException, InterruptedException { + Configuration conf = context.getConfiguration(); + + CompressionCodec codec = null; + CompressionType compressionType = CompressionType.NONE; + if (getCompressOutput(context)) { + // find the kind of compression to do + compressionType = getOutputCompressionType(context); + + // find the right codec + Class codecClass = getOutputCompressorClass(context, + DefaultCodec.class); + codec = (CompressionCodec) + ReflectionUtils.newInstance(codecClass, conf); + } + // get the path of the temporary output file + Path file = getDefaultWorkFile(context, ""); + FileSystem fs = file.getFileSystem(conf); + final SequenceFile.Writer out = + SequenceFile.createWriter(fs, conf, file, + context.getOutputKeyClass(), + context.getOutputValueClass(), + compressionType, + codec, + context); + + return new RecordWriter() { + + public void write(K key, V value) + throws IOException { + + out.append(key, value); + } + + public void close(TaskAttemptContext context) throws IOException { + out.close(); + } + }; + } + + /** + * Get the {@link CompressionType} for the output {@link SequenceFile}. + * @param job the {@link Job} + * @return the {@link CompressionType} for the output {@link SequenceFile}, + * defaulting to {@link CompressionType#RECORD} + */ + public static CompressionType getOutputCompressionType(JobContext job) { + String val = job.getConfiguration().get("mapred.output.compression.type", + CompressionType.RECORD.toString()); + return CompressionType.valueOf(val); + } + + /** + * Set the {@link CompressionType} for the output {@link SequenceFile}. + * @param job the {@link Job} to modify + * @param style the {@link CompressionType} for the output + * {@link SequenceFile} + */ + public static void setOutputCompressionType(Job job, + CompressionType style) { + setCompressOutput(job, true); + job.getConfiguration().set("mapred.output.compression.type", + style.toString()); + } + +} + diff --git a/src/mapred/org/apache/hadoop/mapreduce/lib/output/TextOutputFormat.java b/src/mapred/org/apache/hadoop/mapreduce/lib/output/TextOutputFormat.java new file mode 100644 index 0000000..9f234df --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapreduce/lib/output/TextOutputFormat.java @@ -0,0 +1,138 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapreduce.lib.output; + +import java.io.DataOutputStream; +import java.io.IOException; +import java.io.UnsupportedEncodingException; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.FSDataOutputStream; + +import org.apache.hadoop.io.NullWritable; +import org.apache.hadoop.io.Text; +import org.apache.hadoop.io.compress.CompressionCodec; +import org.apache.hadoop.io.compress.GzipCodec; +import org.apache.hadoop.mapreduce.OutputFormat; +import org.apache.hadoop.mapreduce.RecordWriter; +import org.apache.hadoop.mapreduce.TaskAttemptContext; +import org.apache.hadoop.util.*; + +/** An {@link OutputFormat} that writes plain text files. */ +public class TextOutputFormat extends FileOutputFormat { + protected static class LineRecordWriter + extends RecordWriter { + private static final String utf8 = "UTF-8"; + private static final byte[] newline; + static { + try { + newline = "\n".getBytes(utf8); + } catch (UnsupportedEncodingException uee) { + throw new IllegalArgumentException("can't find " + utf8 + " encoding"); + } + } + + protected DataOutputStream out; + private final byte[] keyValueSeparator; + + public LineRecordWriter(DataOutputStream out, String keyValueSeparator) { + this.out = out; + try { + this.keyValueSeparator = keyValueSeparator.getBytes(utf8); + } catch (UnsupportedEncodingException uee) { + throw new IllegalArgumentException("can't find " + utf8 + " encoding"); + } + } + + public LineRecordWriter(DataOutputStream out) { + this(out, "\t"); + } + + /** + * Write the object to the byte stream, handling Text as a special + * case. + * @param o the object to print + * @throws IOException if the write throws, we pass it on + */ + private void writeObject(Object o) throws IOException { + if (o instanceof Text) { + Text to = (Text) o; + out.write(to.getBytes(), 0, to.getLength()); + } else { + out.write(o.toString().getBytes(utf8)); + } + } + + public synchronized void write(K key, V value) + throws IOException { + + boolean nullKey = key == null || key instanceof NullWritable; + boolean nullValue = value == null || value instanceof NullWritable; + if (nullKey && nullValue) { + return; + } + if (!nullKey) { + writeObject(key); + } + if (!(nullKey || nullValue)) { + out.write(keyValueSeparator); + } + if (!nullValue) { + writeObject(value); + } + out.write(newline); + } + + public synchronized + void close(TaskAttemptContext context) throws IOException { + out.close(); + } + } + + public RecordWriter + getRecordWriter(TaskAttemptContext job + ) throws IOException, InterruptedException { + Configuration conf = job.getConfiguration(); + boolean isCompressed = getCompressOutput(job); + String keyValueSeparator= conf.get("mapred.textoutputformat.separator", + "\t"); + CompressionCodec codec = null; + String extension = ""; + if (isCompressed) { + Class codecClass = + getOutputCompressorClass(job, GzipCodec.class); + codec = (CompressionCodec) ReflectionUtils.newInstance(codecClass, conf); + extension = codec.getDefaultExtension(); + } + Path file = getDefaultWorkFile(job, extension); + FileSystem fs = file.getFileSystem(conf); + if (!isCompressed) { + FSDataOutputStream fileOut = fs.create(file, false); + return new LineRecordWriter(fileOut, keyValueSeparator); + } else { + FSDataOutputStream fileOut = fs.create(file, false); + return new LineRecordWriter(new DataOutputStream + (codec.createOutputStream(fileOut)), + keyValueSeparator); + } + } +} + diff --git a/src/mapred/org/apache/hadoop/mapreduce/lib/partition/HashPartitioner.java b/src/mapred/org/apache/hadoop/mapreduce/lib/partition/HashPartitioner.java new file mode 100644 index 0000000..2ecd27a --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapreduce/lib/partition/HashPartitioner.java @@ -0,0 +1,32 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapreduce.lib.partition; + +import org.apache.hadoop.mapreduce.Partitioner; + +/** Partition keys by their {@link Object#hashCode()}. */ +public class HashPartitioner extends Partitioner { + + /** Use {@link Object#hashCode()} to partition. */ + public int getPartition(K key, V value, + int numReduceTasks) { + return (key.hashCode() & Integer.MAX_VALUE) % numReduceTasks; + } + +} diff --git a/src/mapred/org/apache/hadoop/mapreduce/lib/reduce/IntSumReducer.java b/src/mapred/org/apache/hadoop/mapreduce/lib/reduce/IntSumReducer.java new file mode 100644 index 0000000..d0bc0f2 --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapreduce/lib/reduce/IntSumReducer.java @@ -0,0 +1,40 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapreduce.lib.reduce; + +import java.io.IOException; + +import org.apache.hadoop.io.IntWritable; +import org.apache.hadoop.mapreduce.Reducer; + +public class IntSumReducer extends Reducer { + private IntWritable result = new IntWritable(); + + public void reduce(Key key, Iterable values, + Context context) throws IOException, InterruptedException { + int sum = 0; + for (IntWritable val : values) { + sum += val.get(); + } + result.set(sum); + context.write(key, result); + } + +} \ No newline at end of file diff --git a/src/mapred/org/apache/hadoop/mapreduce/lib/reduce/LongSumReducer.java b/src/mapred/org/apache/hadoop/mapreduce/lib/reduce/LongSumReducer.java new file mode 100644 index 0000000..19a45c6 --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapreduce/lib/reduce/LongSumReducer.java @@ -0,0 +1,40 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapreduce.lib.reduce; + +import java.io.IOException; +import org.apache.hadoop.io.LongWritable; +import org.apache.hadoop.mapreduce.Reducer; + +public class LongSumReducer extends Reducer { + + private LongWritable result = new LongWritable(); + + public void reduce(KEY key, Iterable values, + Context context) throws IOException, InterruptedException { + long sum = 0; + for (LongWritable val : values) { + sum += val.get(); + } + result.set(sum); + context.write(key, result); + } + +} \ No newline at end of file diff --git a/src/mapred/org/apache/hadoop/mapreduce/server/jobtracker/JobTrackerJspHelper.java b/src/mapred/org/apache/hadoop/mapreduce/server/jobtracker/JobTrackerJspHelper.java new file mode 100644 index 0000000..173f578 --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapreduce/server/jobtracker/JobTrackerJspHelper.java @@ -0,0 +1,106 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapreduce.server.jobtracker; + +import java.io.IOException; +import java.util.List; +import java.text.DecimalFormat; + +import javax.servlet.jsp.JspWriter; +import javax.servlet.http.*; + +import org.apache.hadoop.mapred.ClusterStatus; +import org.apache.hadoop.mapred.JobInProgress; +import org.apache.hadoop.mapred.JobProfile; +import org.apache.hadoop.mapred.JobStatus; +import org.apache.hadoop.mapred.JobTracker; +import org.apache.hadoop.mapred.TaskTrackerStatus; +import org.apache.hadoop.mapreduce.JobID; +import org.apache.hadoop.mapreduce.TaskType; +import org.apache.hadoop.util.StringUtils; + +/** + * Methods to help format output for JobTracker XML JSPX + */ +public class JobTrackerJspHelper { + + public JobTrackerJspHelper() { + percentFormat = new DecimalFormat("##0.00"); + } + + private DecimalFormat percentFormat; + + /** + * Returns an XML-formatted table of the jobs in the list. + * This is called repeatedly for different lists of jobs (e.g., running, completed, failed). + */ + public void generateJobTable(JspWriter out, String label, List jobs) + throws IOException { + if (jobs.size() > 0) { + for (JobInProgress job : jobs) { + JobProfile profile = job.getProfile(); + JobStatus status = job.getStatus(); + JobID jobid = profile.getJobID(); + + int desiredMaps = job.desiredMaps(); + int desiredReduces = job.desiredReduces(); + int completedMaps = job.finishedMaps(); + int completedReduces = job.finishedReduces(); + String name = profile.getJobName(); + + out.print("<" + label + "_job jobid=\"" + jobid + "\">\n"); + out.print(" " + jobid + "\n"); + out.print(" " + profile.getUser() + "\n"); + out.print(" " + ("".equals(name) ? " " : name) + "\n"); + out.print(" " + StringUtils.formatPercent(status.mapProgress(), 2) + "\n"); + out.print(" " + desiredMaps + "\n"); + out.print(" " + completedMaps + "\n"); + out.print(" " + StringUtils.formatPercent(status.reduceProgress(), 2) + "\n"); + out.print(" " + desiredReduces + "\n"); + out.print(" " + completedReduces + "\n"); + out.print("\n"); + } + } + } + + /** + * Generates an XML-formatted block that summarizes the state of the JobTracker. + */ + public void generateSummaryTable(JspWriter out, + JobTracker tracker) throws IOException { + ClusterStatus status = tracker.getClusterStatus(); + int maxMapTasks = status.getMaxMapTasks(); + int maxReduceTasks = status.getMaxReduceTasks(); + int numTaskTrackers = status.getTaskTrackers(); + String tasksPerNodeStr; + if (numTaskTrackers > 0) { + double tasksPerNodePct = (double) (maxMapTasks + maxReduceTasks) / (double) numTaskTrackers; + tasksPerNodeStr = percentFormat.format(tasksPerNodePct); + } else { + tasksPerNodeStr = "-"; + } + out.print("" + status.getMapTasks() + "\n" + + "" + status.getReduceTasks() + "\n" + + "" + tracker.getTotalSubmissions() + "\n" + + "" + status.getTaskTrackers() + "\n" + + "" + status.getMaxMapTasks() + "\n" + + "" + status.getMaxReduceTasks() + "\n" + + "" + tasksPerNodeStr + "\n"); + } +} diff --git a/src/mapred/org/apache/hadoop/mapreduce/server/jobtracker/TaskTracker.java b/src/mapred/org/apache/hadoop/mapreduce/server/jobtracker/TaskTracker.java new file mode 100644 index 0000000..a259af6 --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapreduce/server/jobtracker/TaskTracker.java @@ -0,0 +1,201 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.mapreduce.server.jobtracker; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.mapred.JobInProgress; +import org.apache.hadoop.mapred.JobTracker; +import org.apache.hadoop.mapred.TaskTrackerStatus; +import org.apache.hadoop.mapreduce.JobID; +import org.apache.hadoop.mapreduce.TaskType; + +/** + * The representation of a single TaskTracker as seen by + * the {@link JobTracker}. + */ +public class TaskTracker { + static final Log LOG = LogFactory.getLog(TaskTracker.class); + + final private String trackerName; + private TaskTrackerStatus status; + + private JobInProgress jobForFallowMapSlot; + private JobInProgress jobForFallowReduceSlot; + + /** + * Create a new {@link TaskTracker}. + * @param trackerName Unique identifier for the TaskTracker + */ + public TaskTracker(String trackerName) { + this.trackerName = trackerName; + } + + /** + * Get the unique identifier for the {@link TaskTracker} + * @return the unique identifier for the TaskTracker + */ + public String getTrackerName() { + return trackerName; + } + + /** + * Get the current {@link TaskTrackerStatus} of the TaskTracker. + * @return the current TaskTrackerStatus of the + * TaskTracker + */ + public TaskTrackerStatus getStatus() { + return status; + } + + /** + * Set the current {@link TaskTrackerStatus} of the TaskTracker. + * @param status the current TaskTrackerStatus of the + * TaskTracker + */ + public void setStatus(TaskTrackerStatus status) { + this.status = status; + } + + /** + * Get the number of currently available slots on this tasktracker for the + * given type of the task. + * @param taskType the {@link TaskType} to check for number of available slots + * @return the number of currently available slots for the given + * taskType + */ + public int getAvailableSlots(TaskType taskType) { + int availableSlots = 0; + if (taskType == TaskType.MAP) { + if (LOG.isDebugEnabled()) { + LOG.debug(trackerName + " getAvailSlots:" + + " max(m)=" + status.getMaxMapSlots() + + " occupied(m)=" + status.countOccupiedMapSlots()); + } + availableSlots = status.getAvailableMapSlots(); + } else { + if (LOG.isDebugEnabled()) { + LOG.debug(trackerName + " getAvailSlots:" + + " max(r)=" + status.getMaxReduceSlots() + + " occupied(r)=" + status.countOccupiedReduceSlots()); + } + availableSlots = status.getAvailableReduceSlots(); + } + return availableSlots; + } + + /** + * Get the {@link JobInProgress} for which the fallow slot(s) are held. + * @param taskType {@link TaskType} of the task + * @return the task for which the fallow slot(s) are held, + * null if there are no fallow slots + */ + public JobInProgress getJobForFallowSlot(TaskType taskType) { + return + (taskType == TaskType.MAP) ? jobForFallowMapSlot : jobForFallowReduceSlot; + } + + /** + * Reserve specified number of slots for a given job. + * @param taskType {@link TaskType} of the task + * @param job the job for which slots on this TaskTracker + * are to be reserved + * @param numSlots number of slots to be reserved + */ + public void reserveSlots(TaskType taskType, JobInProgress job, int numSlots) { + JobID jobId = job.getJobID(); + if (taskType == TaskType.MAP) { + if (jobForFallowMapSlot != null && + !jobForFallowMapSlot.getJobID().equals(jobId)) { + throw new RuntimeException(trackerName + " already has " + + "slots reserved for " + + jobForFallowMapSlot + "; being" + + " asked to reserve " + numSlots + " for " + + jobId); + } + + jobForFallowMapSlot = job; + } else if (taskType == TaskType.REDUCE){ + if (jobForFallowReduceSlot != null && + !jobForFallowReduceSlot.getJobID().equals(jobId)) { + throw new RuntimeException(trackerName + " already has " + + "slots reserved for " + + jobForFallowReduceSlot + "; being" + + " asked to reserve " + numSlots + " for " + + jobId); + } + + jobForFallowReduceSlot = job; + } + + job.reserveTaskTracker(this, taskType, numSlots); + LOG.info(trackerName + ": Reserved " + numSlots + " " + taskType + + " slots for " + jobId); + } + + /** + * Free map slots on this TaskTracker which were reserved for + * taskType. + * @param taskType {@link TaskType} of the task + * @param job job whose slots are being un-reserved + */ + public void unreserveSlots(TaskType taskType, JobInProgress job) { + JobID jobId = job.getJobID(); + if (taskType == TaskType.MAP) { + if (jobForFallowMapSlot == null || + !jobForFallowMapSlot.getJobID().equals(jobId)) { + throw new RuntimeException(trackerName + " already has " + + "slots reserved for " + + jobForFallowMapSlot + "; being" + + " asked to un-reserve for " + jobId); + } + + jobForFallowMapSlot = null; + } else { + if (jobForFallowReduceSlot == null || + !jobForFallowReduceSlot.getJobID().equals(jobId)) { + throw new RuntimeException(trackerName + " already has " + + "slots reserved for " + + jobForFallowReduceSlot + "; being" + + " asked to un-reserve for " + jobId); + } + + jobForFallowReduceSlot = null; + } + + job.unreserveTaskTracker(this, taskType); + LOG.info(trackerName + ": Unreserved " + taskType + " slots for " + jobId); + } + + /** + * Cleanup when the {@link TaskTracker} is declared as 'lost/blacklisted' + * by the JobTracker. + * + * The method assumes that the lock on the {@link JobTracker} is obtained + * by the caller. + */ + public void cancelAllReservations() { + // Inform jobs which have reserved slots on this tasktracker + if (jobForFallowMapSlot != null) { + unreserveSlots(TaskType.MAP, jobForFallowMapSlot); + } + if (jobForFallowReduceSlot != null) { + unreserveSlots(TaskType.REDUCE, jobForFallowReduceSlot); + } + } +} diff --git a/src/native/.autom4te.cfg b/src/native/.autom4te.cfg new file mode 100644 index 0000000..a69c197 --- /dev/null +++ b/src/native/.autom4te.cfg @@ -0,0 +1,42 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# +# autom4te configuration for hadoop-native library +# + +begin-language: "Autoheader-preselections" +args: --no-cache +end-language: "Autoheader-preselections" + +begin-language: "Automake-preselections" +args: --no-cache +end-language: "Automake-preselections" + +begin-language: "Autoreconf-preselections" +args: --no-cache +end-language: "Autoreconf-preselections" + +begin-language: "Autoconf-without-aclocal-m4" +args: --no-cache +end-language: "Autoconf-without-aclocal-m4" + +begin-language: "Autoconf" +args: --no-cache +end-language: "Autoconf" + diff --git a/src/native/AUTHORS b/src/native/AUTHORS new file mode 100644 index 0000000..173cbd1 --- /dev/null +++ b/src/native/AUTHORS @@ -0,0 +1,3 @@ +Arun C Murthy + * Initial version + diff --git a/src/native/COPYING b/src/native/COPYING new file mode 100644 index 0000000..59f6378 --- /dev/null +++ b/src/native/COPYING @@ -0,0 +1,54 @@ +Apache License +Version 2.0, January 2004 +http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + +"License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. + +"Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. + +"Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. + +"You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. + +"Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. + +"Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. + +"Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). + +"Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. + +"Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." + +"Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: + + 1. You must give any other recipients of the Work or Derivative Works a copy of this License; and + + 2. You must cause any modified files to carry prominent notices stating that You changed the files; and + + 3. You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and + + 4. If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. + + You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. + diff --git a/src/native/ChangeLog b/src/native/ChangeLog new file mode 100644 index 0000000..86ab369 --- /dev/null +++ b/src/native/ChangeLog @@ -0,0 +1,3 @@ +2006-10-05 Arun C Murthy + * Initial version + diff --git a/src/native/INSTALL b/src/native/INSTALL new file mode 100644 index 0000000..23e5f25 --- /dev/null +++ b/src/native/INSTALL @@ -0,0 +1,236 @@ +Installation Instructions +************************* + +Copyright (C) 1994, 1995, 1996, 1999, 2000, 2001, 2002, 2004, 2005 Free +Software Foundation, Inc. + +This file is free documentation; the Free Software Foundation gives +unlimited permission to copy, distribute and modify it. + +Basic Installation +================== + +These are generic installation instructions. + + The `configure' shell script attempts to guess correct values for +various system-dependent variables used during compilation. It uses +those values to create a `Makefile' in each directory of the package. +It may also create one or more `.h' files containing system-dependent +definitions. Finally, it creates a shell script `config.status' that +you can run in the future to recreate the current configuration, and a +file `config.log' containing compiler output (useful mainly for +debugging `configure'). + + It can also use an optional file (typically called `config.cache' +and enabled with `--cache-file=config.cache' or simply `-C') that saves +the results of its tests to speed up reconfiguring. (Caching is +disabled by default to prevent problems with accidental use of stale +cache files.) + + If you need to do unusual things to compile the package, please try +to figure out how `configure' could check whether to do them, and mail +diffs or instructions to the address given in the `README' so they can +be considered for the next release. If you are using the cache, and at +some point `config.cache' contains results you don't want to keep, you +may remove or edit it. + + The file `configure.ac' (or `configure.in') is used to create +`configure' by a program called `autoconf'. You only need +`configure.ac' if you want to change it or regenerate `configure' using +a newer version of `autoconf'. + +The simplest way to compile this package is: + + 1. `cd' to the directory containing the package's source code and type + `./configure' to configure the package for your system. If you're + using `csh' on an old version of System V, you might need to type + `sh ./configure' instead to prevent `csh' from trying to execute + `configure' itself. + + Running `configure' takes awhile. While running, it prints some + messages telling which features it is checking for. + + 2. Type `make' to compile the package. + + 3. Optionally, type `make check' to run any self-tests that come with + the package. + + 4. Type `make install' to install the programs and any data files and + documentation. + + 5. You can remove the program binaries and object files from the + source code directory by typing `make clean'. To also remove the + files that `configure' created (so you can compile the package for + a different kind of computer), type `make distclean'. There is + also a `make maintainer-clean' target, but that is intended mainly + for the package's developers. If you use it, you may have to get + all sorts of other programs in order to regenerate files that came + with the distribution. + +Compilers and Options +===================== + +Some systems require unusual options for compilation or linking that the +`configure' script does not know about. Run `./configure --help' for +details on some of the pertinent environment variables. + + You can give `configure' initial values for configuration parameters +by setting variables in the command line or in the environment. Here +is an example: + + ./configure CC=c89 CFLAGS=-O2 LIBS=-lposix + + *Note Defining Variables::, for more details. + +Compiling For Multiple Architectures +==================================== + +You can compile the package for more than one kind of computer at the +same time, by placing the object files for each architecture in their +own directory. To do this, you must use a version of `make' that +supports the `VPATH' variable, such as GNU `make'. `cd' to the +directory where you want the object files and executables to go and run +the `configure' script. `configure' automatically checks for the +source code in the directory that `configure' is in and in `..'. + + If you have to use a `make' that does not support the `VPATH' +variable, you have to compile the package for one architecture at a +time in the source code directory. After you have installed the +package for one architecture, use `make distclean' before reconfiguring +for another architecture. + +Installation Names +================== + +By default, `make install' installs the package's commands under +`/usr/local/bin', include files under `/usr/local/include', etc. You +can specify an installation prefix other than `/usr/local' by giving +`configure' the option `--prefix=PREFIX'. + + You can specify separate installation prefixes for +architecture-specific files and architecture-independent files. If you +pass the option `--exec-prefix=PREFIX' to `configure', the package uses +PREFIX as the prefix for installing programs and libraries. +Documentation and other data files still use the regular prefix. + + In addition, if you use an unusual directory layout you can give +options like `--bindir=DIR' to specify different values for particular +kinds of files. Run `configure --help' for a list of the directories +you can set and what kinds of files go in them. + + If the package supports it, you can cause programs to be installed +with an extra prefix or suffix on their names by giving `configure' the +option `--program-prefix=PREFIX' or `--program-suffix=SUFFIX'. + +Optional Features +================= + +Some packages pay attention to `--enable-FEATURE' options to +`configure', where FEATURE indicates an optional part of the package. +They may also pay attention to `--with-PACKAGE' options, where PACKAGE +is something like `gnu-as' or `x' (for the X Window System). The +`README' should mention any `--enable-' and `--with-' options that the +package recognizes. + + For packages that use the X Window System, `configure' can usually +find the X include and library files automatically, but if it doesn't, +you can use the `configure' options `--x-includes=DIR' and +`--x-libraries=DIR' to specify their locations. + +Specifying the System Type +========================== + +There may be some features `configure' cannot figure out automatically, +but needs to determine by the type of machine the package will run on. +Usually, assuming the package is built to be run on the _same_ +architectures, `configure' can figure that out, but if it prints a +message saying it cannot guess the machine type, give it the +`--build=TYPE' option. TYPE can either be a short name for the system +type, such as `sun4', or a canonical name which has the form: + + CPU-COMPANY-SYSTEM + +where SYSTEM can have one of these forms: + + OS KERNEL-OS + + See the file `config.sub' for the possible values of each field. If +`config.sub' isn't included in this package, then this package doesn't +need to know the machine type. + + If you are _building_ compiler tools for cross-compiling, you should +use the option `--target=TYPE' to select the type of system they will +produce code for. + + If you want to _use_ a cross compiler, that generates code for a +platform different from the build platform, you should specify the +"host" platform (i.e., that on which the generated programs will +eventually be run) with `--host=TYPE'. + +Sharing Defaults +================ + +If you want to set default values for `configure' scripts to share, you +can create a site shell script called `config.site' that gives default +values for variables like `CC', `cache_file', and `prefix'. +`configure' looks for `PREFIX/share/config.site' if it exists, then +`PREFIX/etc/config.site' if it exists. Or, you can set the +`CONFIG_SITE' environment variable to the location of the site script. +A warning: not all `configure' scripts look for a site script. + +Defining Variables +================== + +Variables not defined in a site shell script can be set in the +environment passed to `configure'. However, some packages may run +configure again during the build, and the customized values of these +variables may be lost. In order to avoid this problem, you should set +them in the `configure' command line, using `VAR=value'. For example: + + ./configure CC=/usr/local2/bin/gcc + +causes the specified `gcc' to be used as the C compiler (unless it is +overridden in the site shell script). Here is a another example: + + /bin/bash ./configure CONFIG_SHELL=/bin/bash + +Here the `CONFIG_SHELL=/bin/bash' operand causes subsequent +configuration-related scripts to be executed by `/bin/bash'. + +`configure' Invocation +====================== + +`configure' recognizes the following options to control how it operates. + +`--help' +`-h' + Print a summary of the options to `configure', and exit. + +`--version' +`-V' + Print the version of Autoconf used to generate the `configure' + script, and exit. + +`--cache-file=FILE' + Enable the cache: use and save the results of the tests in FILE, + traditionally `config.cache'. FILE defaults to `/dev/null' to + disable caching. + +`--config-cache' +`-C' + Alias for `--cache-file=config.cache'. + +`--quiet' +`--silent' +`-q' + Do not print messages saying which checks are being made. To + suppress all normal output, redirect it to `/dev/null' (any error + messages will still be shown). + +`--srcdir=DIR' + Look for the package's source code in directory DIR. Usually + `configure' can determine that directory automatically. + +`configure' also accepts some other, not widely useful, options. Run +`configure --help' for more details. + diff --git a/src/native/Makefile.am b/src/native/Makefile.am new file mode 100644 index 0000000..bc265b1 --- /dev/null +++ b/src/native/Makefile.am @@ -0,0 +1,46 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# +# Top-level makefile template for native hadoop code +# + +# +# Notes: +# 1. This makefile is designed to do the actual builds in $(HADOOP_HOME)/build/native/${os.name}-${os-arch}. +# 2. This makefile depends on the following environment variables to function correctly: +# * HADOOP_NATIVE_SRCDIR +# * JAVA_HOME +# * JVM_DATA_MODEL +# * OS_NAME +# * OS_ARCH +# All these are setup by build.xml. +# + +# Export $(PLATFORM) to prevent proliferation of sub-shells +export PLATFORM = $(shell echo $$OS_NAME | tr [A-Z] [a-z]) + +# List the sub-directories here +SUBDIRS = src/org/apache/hadoop/io/compress/zlib src/org/apache/hadoop/io/compress/lzma lib + +# The following export is needed to build libhadoop.so in the 'lib' directory +export SUBDIRS + +# +#vim: sw=4: ts=4: noet +# diff --git a/src/native/Makefile.in b/src/native/Makefile.in new file mode 100644 index 0000000..3e95c57 --- /dev/null +++ b/src/native/Makefile.in @@ -0,0 +1,659 @@ +# Makefile.in generated by automake 1.9.6 from Makefile.am. +# @configure_input@ + +# Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, +# 2003, 2004, 2005 Free Software Foundation, Inc. +# This Makefile.in is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY, to the extent permitted by law; without +# even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. + +@SET_MAKE@ + +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# +# Top-level makefile template for native hadoop code +# + +# +# Notes: +# 1. This makefile is designed to do the actual builds in $(HADOOP_HOME)/build/native/${os.name}-${os-arch}. +# 2. This makefile depends on the following environment variables to function correctly: +# * HADOOP_NATIVE_SRCDIR +# * JAVA_HOME +# * JVM_DATA_MODEL +# * OS_NAME +# * OS_ARCH +# All these are setup by build.xml. +# +srcdir = @srcdir@ +top_srcdir = @top_srcdir@ +VPATH = @srcdir@ +pkgdatadir = $(datadir)/@PACKAGE@ +pkglibdir = $(libdir)/@PACKAGE@ +pkgincludedir = $(includedir)/@PACKAGE@ +top_builddir = . +am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd +INSTALL = @INSTALL@ +install_sh_DATA = $(install_sh) -c -m 644 +install_sh_PROGRAM = $(install_sh) -c +install_sh_SCRIPT = $(install_sh) -c +INSTALL_HEADER = $(INSTALL_DATA) +transform = $(program_transform_name) +NORMAL_INSTALL = : +PRE_INSTALL = : +POST_INSTALL = : +NORMAL_UNINSTALL = : +PRE_UNINSTALL = : +POST_UNINSTALL = : +build_triplet = @build@ +host_triplet = @host@ +DIST_COMMON = README $(am__configure_deps) $(srcdir)/Makefile.am \ + $(srcdir)/Makefile.in $(srcdir)/config.h.in \ + $(top_srcdir)/configure AUTHORS COPYING ChangeLog INSTALL NEWS \ + config/config.guess config/config.sub config/depcomp \ + config/install-sh config/ltmain.sh config/missing +subdir = . +ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 +am__aclocal_m4_deps = $(top_srcdir)/acinclude.m4 \ + $(top_srcdir)/configure.ac +am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ + $(ACLOCAL_M4) +am__CONFIG_DISTCLEAN_FILES = config.status config.cache config.log \ + configure.lineno configure.status.lineno +mkinstalldirs = $(install_sh) -d +CONFIG_HEADER = config.h +CONFIG_CLEAN_FILES = +SOURCES = +DIST_SOURCES = +RECURSIVE_TARGETS = all-recursive check-recursive dvi-recursive \ + html-recursive info-recursive install-data-recursive \ + install-exec-recursive install-info-recursive \ + install-recursive installcheck-recursive installdirs-recursive \ + pdf-recursive ps-recursive uninstall-info-recursive \ + uninstall-recursive +ETAGS = etags +CTAGS = ctags +DIST_SUBDIRS = $(SUBDIRS) +DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) +distdir = $(PACKAGE)-$(VERSION) +top_distdir = $(distdir) +am__remove_distdir = \ + { test ! -d $(distdir) \ + || { find $(distdir) -type d ! -perm -200 -exec chmod u+w {} ';' \ + && rm -fr $(distdir); }; } +DIST_ARCHIVES = $(distdir).tar.gz +GZIP_ENV = --best +distuninstallcheck_listfiles = find . -type f -print +distcleancheck_listfiles = find . -type f -print +ACLOCAL = @ACLOCAL@ +AMDEP_FALSE = @AMDEP_FALSE@ +AMDEP_TRUE = @AMDEP_TRUE@ +AMTAR = @AMTAR@ +AR = @AR@ +AUTOCONF = @AUTOCONF@ +AUTOHEADER = @AUTOHEADER@ +AUTOMAKE = @AUTOMAKE@ +AWK = @AWK@ +CC = @CC@ +CCDEPMODE = @CCDEPMODE@ +CFLAGS = @CFLAGS@ +CPP = @CPP@ +CPPFLAGS = @CPPFLAGS@ +CXX = @CXX@ +CXXCPP = @CXXCPP@ +CXXDEPMODE = @CXXDEPMODE@ +CXXFLAGS = @CXXFLAGS@ +CYGPATH_W = @CYGPATH_W@ +DEFS = @DEFS@ +DEPDIR = @DEPDIR@ +ECHO = @ECHO@ +ECHO_C = @ECHO_C@ +ECHO_N = @ECHO_N@ +ECHO_T = @ECHO_T@ +EGREP = @EGREP@ +EXEEXT = @EXEEXT@ +F77 = @F77@ +FFLAGS = @FFLAGS@ +INSTALL_DATA = @INSTALL_DATA@ +INSTALL_PROGRAM = @INSTALL_PROGRAM@ +INSTALL_SCRIPT = @INSTALL_SCRIPT@ +INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ +JNI_CPPFLAGS = @JNI_CPPFLAGS@ +JNI_LDFLAGS = @JNI_LDFLAGS@ +LDFLAGS = @LDFLAGS@ +LIBOBJS = @LIBOBJS@ +LIBS = @LIBS@ +LIBTOOL = @LIBTOOL@ +LN_S = @LN_S@ +LTLIBOBJS = @LTLIBOBJS@ +MAKEINFO = @MAKEINFO@ +OBJEXT = @OBJEXT@ +PACKAGE = @PACKAGE@ +PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ +PACKAGE_NAME = @PACKAGE_NAME@ +PACKAGE_STRING = @PACKAGE_STRING@ +PACKAGE_TARNAME = @PACKAGE_TARNAME@ +PACKAGE_VERSION = @PACKAGE_VERSION@ +PATH_SEPARATOR = @PATH_SEPARATOR@ +RANLIB = @RANLIB@ +SED = @SED@ +SET_MAKE = @SET_MAKE@ +SHELL = @SHELL@ +STRIP = @STRIP@ +VERSION = @VERSION@ +ac_ct_AR = @ac_ct_AR@ +ac_ct_CC = @ac_ct_CC@ +ac_ct_CXX = @ac_ct_CXX@ +ac_ct_F77 = @ac_ct_F77@ +ac_ct_RANLIB = @ac_ct_RANLIB@ +ac_ct_STRIP = @ac_ct_STRIP@ +am__fastdepCC_FALSE = @am__fastdepCC_FALSE@ +am__fastdepCC_TRUE = @am__fastdepCC_TRUE@ +am__fastdepCXX_FALSE = @am__fastdepCXX_FALSE@ +am__fastdepCXX_TRUE = @am__fastdepCXX_TRUE@ +am__include = @am__include@ +am__leading_dot = @am__leading_dot@ +am__quote = @am__quote@ +am__tar = @am__tar@ +am__untar = @am__untar@ +bindir = @bindir@ +build = @build@ +build_alias = @build_alias@ +build_cpu = @build_cpu@ +build_os = @build_os@ +build_vendor = @build_vendor@ +datadir = @datadir@ +exec_prefix = @exec_prefix@ +host = @host@ +host_alias = @host_alias@ +host_cpu = @host_cpu@ +host_os = @host_os@ +host_vendor = @host_vendor@ +includedir = @includedir@ +infodir = @infodir@ +install_sh = @install_sh@ +libdir = @libdir@ +libexecdir = @libexecdir@ +localstatedir = @localstatedir@ +mandir = @mandir@ +mkdir_p = @mkdir_p@ +oldincludedir = @oldincludedir@ +prefix = @prefix@ +program_transform_name = @program_transform_name@ +sbindir = @sbindir@ +sharedstatedir = @sharedstatedir@ +sysconfdir = @sysconfdir@ +target_alias = @target_alias@ + +# List the sub-directories here +SUBDIRS = src/org/apache/hadoop/io/compress/zlib src/org/apache/hadoop/io/compress/lzma lib +all: config.h + $(MAKE) $(AM_MAKEFLAGS) all-recursive + +.SUFFIXES: +am--refresh: + @: +$(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) + @for dep in $?; do \ + case '$(am__configure_deps)' in \ + *$$dep*) \ + echo ' cd $(srcdir) && $(AUTOMAKE) --gnu '; \ + cd $(srcdir) && $(AUTOMAKE) --gnu \ + && exit 0; \ + exit 1;; \ + esac; \ + done; \ + echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu Makefile'; \ + cd $(top_srcdir) && \ + $(AUTOMAKE) --gnu Makefile +.PRECIOUS: Makefile +Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status + @case '$?' in \ + *config.status*) \ + echo ' $(SHELL) ./config.status'; \ + $(SHELL) ./config.status;; \ + *) \ + echo ' cd $(top_builddir) && $(SHELL) ./config.status $@ $(am__depfiles_maybe)'; \ + cd $(top_builddir) && $(SHELL) ./config.status $@ $(am__depfiles_maybe);; \ + esac; + +$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) + $(SHELL) ./config.status --recheck + +$(top_srcdir)/configure: $(am__configure_deps) + cd $(srcdir) && $(AUTOCONF) +$(ACLOCAL_M4): $(am__aclocal_m4_deps) + cd $(srcdir) && $(ACLOCAL) $(ACLOCAL_AMFLAGS) + +config.h: stamp-h1 + @if test ! -f $@; then \ + rm -f stamp-h1; \ + $(MAKE) stamp-h1; \ + else :; fi + +stamp-h1: $(srcdir)/config.h.in $(top_builddir)/config.status + @rm -f stamp-h1 + cd $(top_builddir) && $(SHELL) ./config.status config.h +$(srcdir)/config.h.in: $(am__configure_deps) + cd $(top_srcdir) && $(AUTOHEADER) + rm -f stamp-h1 + touch $@ + +distclean-hdr: + -rm -f config.h stamp-h1 + +mostlyclean-libtool: + -rm -f *.lo + +clean-libtool: + -rm -rf .libs _libs + +distclean-libtool: + -rm -f libtool +uninstall-info-am: + +# This directory's subdirectories are mostly independent; you can cd +# into them and run `make' without going through this Makefile. +# To change the values of `make' variables: instead of editing Makefiles, +# (1) if the variable is set in `config.status', edit `config.status' +# (which will cause the Makefiles to be regenerated when you run `make'); +# (2) otherwise, pass the desired values on the `make' command line. +$(RECURSIVE_TARGETS): + @failcom='exit 1'; \ + for f in x $$MAKEFLAGS; do \ + case $$f in \ + *=* | --[!k]*);; \ + *k*) failcom='fail=yes';; \ + esac; \ + done; \ + dot_seen=no; \ + target=`echo $@ | sed s/-recursive//`; \ + list='$(SUBDIRS)'; for subdir in $$list; do \ + echo "Making $$target in $$subdir"; \ + if test "$$subdir" = "."; then \ + dot_seen=yes; \ + local_target="$$target-am"; \ + else \ + local_target="$$target"; \ + fi; \ + (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ + || eval $$failcom; \ + done; \ + if test "$$dot_seen" = "no"; then \ + $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \ + fi; test -z "$$fail" + +mostlyclean-recursive clean-recursive distclean-recursive \ +maintainer-clean-recursive: + @failcom='exit 1'; \ + for f in x $$MAKEFLAGS; do \ + case $$f in \ + *=* | --[!k]*);; \ + *k*) failcom='fail=yes';; \ + esac; \ + done; \ + dot_seen=no; \ + case "$@" in \ + distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \ + *) list='$(SUBDIRS)' ;; \ + esac; \ + rev=''; for subdir in $$list; do \ + if test "$$subdir" = "."; then :; else \ + rev="$$subdir $$rev"; \ + fi; \ + done; \ + rev="$$rev ."; \ + target=`echo $@ | sed s/-recursive//`; \ + for subdir in $$rev; do \ + echo "Making $$target in $$subdir"; \ + if test "$$subdir" = "."; then \ + local_target="$$target-am"; \ + else \ + local_target="$$target"; \ + fi; \ + (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ + || eval $$failcom; \ + done && test -z "$$fail" +tags-recursive: + list='$(SUBDIRS)'; for subdir in $$list; do \ + test "$$subdir" = . || (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) tags); \ + done +ctags-recursive: + list='$(SUBDIRS)'; for subdir in $$list; do \ + test "$$subdir" = . || (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) ctags); \ + done + +ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) + list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ + unique=`for i in $$list; do \ + if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ + done | \ + $(AWK) ' { files[$$0] = 1; } \ + END { for (i in files) print i; }'`; \ + mkid -fID $$unique +tags: TAGS + +TAGS: tags-recursive $(HEADERS) $(SOURCES) config.h.in $(TAGS_DEPENDENCIES) \ + $(TAGS_FILES) $(LISP) + tags=; \ + here=`pwd`; \ + if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \ + include_option=--etags-include; \ + empty_fix=.; \ + else \ + include_option=--include; \ + empty_fix=; \ + fi; \ + list='$(SUBDIRS)'; for subdir in $$list; do \ + if test "$$subdir" = .; then :; else \ + test ! -f $$subdir/TAGS || \ + tags="$$tags $$include_option=$$here/$$subdir/TAGS"; \ + fi; \ + done; \ + list='$(SOURCES) $(HEADERS) config.h.in $(LISP) $(TAGS_FILES)'; \ + unique=`for i in $$list; do \ + if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ + done | \ + $(AWK) ' { files[$$0] = 1; } \ + END { for (i in files) print i; }'`; \ + if test -z "$(ETAGS_ARGS)$$tags$$unique"; then :; else \ + test -n "$$unique" || unique=$$empty_fix; \ + $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ + $$tags $$unique; \ + fi +ctags: CTAGS +CTAGS: ctags-recursive $(HEADERS) $(SOURCES) config.h.in $(TAGS_DEPENDENCIES) \ + $(TAGS_FILES) $(LISP) + tags=; \ + here=`pwd`; \ + list='$(SOURCES) $(HEADERS) config.h.in $(LISP) $(TAGS_FILES)'; \ + unique=`for i in $$list; do \ + if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ + done | \ + $(AWK) ' { files[$$0] = 1; } \ + END { for (i in files) print i; }'`; \ + test -z "$(CTAGS_ARGS)$$tags$$unique" \ + || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ + $$tags $$unique + +GTAGS: + here=`$(am__cd) $(top_builddir) && pwd` \ + && cd $(top_srcdir) \ + && gtags -i $(GTAGS_ARGS) $$here + +distclean-tags: + -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags + +distdir: $(DISTFILES) + $(am__remove_distdir) + mkdir $(distdir) + $(mkdir_p) $(distdir)/config + @srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; \ + topsrcdirstrip=`echo "$(top_srcdir)" | sed 's|.|.|g'`; \ + list='$(DISTFILES)'; for file in $$list; do \ + case $$file in \ + $(srcdir)/*) file=`echo "$$file" | sed "s|^$$srcdirstrip/||"`;; \ + $(top_srcdir)/*) file=`echo "$$file" | sed "s|^$$topsrcdirstrip/|$(top_builddir)/|"`;; \ + esac; \ + if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ + dir=`echo "$$file" | sed -e 's,/[^/]*$$,,'`; \ + if test "$$dir" != "$$file" && test "$$dir" != "."; then \ + dir="/$$dir"; \ + $(mkdir_p) "$(distdir)$$dir"; \ + else \ + dir=''; \ + fi; \ + if test -d $$d/$$file; then \ + if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ + cp -pR $(srcdir)/$$file $(distdir)$$dir || exit 1; \ + fi; \ + cp -pR $$d/$$file $(distdir)$$dir || exit 1; \ + else \ + test -f $(distdir)/$$file \ + || cp -p $$d/$$file $(distdir)/$$file \ + || exit 1; \ + fi; \ + done + list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ + if test "$$subdir" = .; then :; else \ + test -d "$(distdir)/$$subdir" \ + || $(mkdir_p) "$(distdir)/$$subdir" \ + || exit 1; \ + distdir=`$(am__cd) $(distdir) && pwd`; \ + top_distdir=`$(am__cd) $(top_distdir) && pwd`; \ + (cd $$subdir && \ + $(MAKE) $(AM_MAKEFLAGS) \ + top_distdir="$$top_distdir" \ + distdir="$$distdir/$$subdir" \ + distdir) \ + || exit 1; \ + fi; \ + done + -find $(distdir) -type d ! -perm -777 -exec chmod a+rwx {} \; -o \ + ! -type d ! -perm -444 -links 1 -exec chmod a+r {} \; -o \ + ! -type d ! -perm -400 -exec chmod a+r {} \; -o \ + ! -type d ! -perm -444 -exec $(SHELL) $(install_sh) -c -m a+r {} {} \; \ + || chmod -R a+r $(distdir) +dist-gzip: distdir + tardir=$(distdir) && $(am__tar) | GZIP=$(GZIP_ENV) gzip -c >$(distdir).tar.gz + $(am__remove_distdir) + +dist-bzip2: distdir + tardir=$(distdir) && $(am__tar) | bzip2 -9 -c >$(distdir).tar.bz2 + $(am__remove_distdir) + +dist-tarZ: distdir + tardir=$(distdir) && $(am__tar) | compress -c >$(distdir).tar.Z + $(am__remove_distdir) + +dist-shar: distdir + shar $(distdir) | GZIP=$(GZIP_ENV) gzip -c >$(distdir).shar.gz + $(am__remove_distdir) + +dist-zip: distdir + -rm -f $(distdir).zip + zip -rq $(distdir).zip $(distdir) + $(am__remove_distdir) + +dist dist-all: distdir + tardir=$(distdir) && $(am__tar) | GZIP=$(GZIP_ENV) gzip -c >$(distdir).tar.gz + $(am__remove_distdir) + +# This target untars the dist file and tries a VPATH configuration. Then +# it guarantees that the distribution is self-contained by making another +# tarfile. +distcheck: dist + case '$(DIST_ARCHIVES)' in \ + *.tar.gz*) \ + GZIP=$(GZIP_ENV) gunzip -c $(distdir).tar.gz | $(am__untar) ;;\ + *.tar.bz2*) \ + bunzip2 -c $(distdir).tar.bz2 | $(am__untar) ;;\ + *.tar.Z*) \ + uncompress -c $(distdir).tar.Z | $(am__untar) ;;\ + *.shar.gz*) \ + GZIP=$(GZIP_ENV) gunzip -c $(distdir).shar.gz | unshar ;;\ + *.zip*) \ + unzip $(distdir).zip ;;\ + esac + chmod -R a-w $(distdir); chmod a+w $(distdir) + mkdir $(distdir)/_build + mkdir $(distdir)/_inst + chmod a-w $(distdir) + dc_install_base=`$(am__cd) $(distdir)/_inst && pwd | sed -e 's,^[^:\\/]:[\\/],/,'` \ + && dc_destdir="$${TMPDIR-/tmp}/am-dc-$$$$/" \ + && cd $(distdir)/_build \ + && ../configure --srcdir=.. --prefix="$$dc_install_base" \ + $(DISTCHECK_CONFIGURE_FLAGS) \ + && $(MAKE) $(AM_MAKEFLAGS) \ + && $(MAKE) $(AM_MAKEFLAGS) dvi \ + && $(MAKE) $(AM_MAKEFLAGS) check \ + && $(MAKE) $(AM_MAKEFLAGS) install \ + && $(MAKE) $(AM_MAKEFLAGS) installcheck \ + && $(MAKE) $(AM_MAKEFLAGS) uninstall \ + && $(MAKE) $(AM_MAKEFLAGS) distuninstallcheck_dir="$$dc_install_base" \ + distuninstallcheck \ + && chmod -R a-w "$$dc_install_base" \ + && ({ \ + (cd ../.. && umask 077 && mkdir "$$dc_destdir") \ + && $(MAKE) $(AM_MAKEFLAGS) DESTDIR="$$dc_destdir" install \ + && $(MAKE) $(AM_MAKEFLAGS) DESTDIR="$$dc_destdir" uninstall \ + && $(MAKE) $(AM_MAKEFLAGS) DESTDIR="$$dc_destdir" \ + distuninstallcheck_dir="$$dc_destdir" distuninstallcheck; \ + } || { rm -rf "$$dc_destdir"; exit 1; }) \ + && rm -rf "$$dc_destdir" \ + && $(MAKE) $(AM_MAKEFLAGS) dist \ + && rm -rf $(DIST_ARCHIVES) \ + && $(MAKE) $(AM_MAKEFLAGS) distcleancheck + $(am__remove_distdir) + @(echo "$(distdir) archives ready for distribution: "; \ + list='$(DIST_ARCHIVES)'; for i in $$list; do echo $$i; done) | \ + sed -e '1{h;s/./=/g;p;x;}' -e '$${p;x;}' +distuninstallcheck: + @cd $(distuninstallcheck_dir) \ + && test `$(distuninstallcheck_listfiles) | wc -l` -le 1 \ + || { echo "ERROR: files left after uninstall:" ; \ + if test -n "$(DESTDIR)"; then \ + echo " (check DESTDIR support)"; \ + fi ; \ + $(distuninstallcheck_listfiles) ; \ + exit 1; } >&2 +distcleancheck: distclean + @if test '$(srcdir)' = . ; then \ + echo "ERROR: distcleancheck can only run from a VPATH build" ; \ + exit 1 ; \ + fi + @test `$(distcleancheck_listfiles) | wc -l` -eq 0 \ + || { echo "ERROR: files left in build directory after distclean:" ; \ + $(distcleancheck_listfiles) ; \ + exit 1; } >&2 +check-am: all-am +check: check-recursive +all-am: Makefile config.h +installdirs: installdirs-recursive +installdirs-am: +install: install-recursive +install-exec: install-exec-recursive +install-data: install-data-recursive +uninstall: uninstall-recursive + +install-am: all-am + @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am + +installcheck: installcheck-recursive +install-strip: + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + `test -z '$(STRIP)' || \ + echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install +mostlyclean-generic: + +clean-generic: + +distclean-generic: + -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) + +maintainer-clean-generic: + @echo "This command is intended for maintainers to use" + @echo "it deletes files that may require special tools to rebuild." +clean: clean-recursive + +clean-am: clean-generic clean-libtool mostlyclean-am + +distclean: distclean-recursive + -rm -f $(am__CONFIG_DISTCLEAN_FILES) + -rm -f Makefile +distclean-am: clean-am distclean-generic distclean-hdr \ + distclean-libtool distclean-tags + +dvi: dvi-recursive + +dvi-am: + +html: html-recursive + +info: info-recursive + +info-am: + +install-data-am: + +install-exec-am: + +install-info: install-info-recursive + +install-man: + +installcheck-am: + +maintainer-clean: maintainer-clean-recursive + -rm -f $(am__CONFIG_DISTCLEAN_FILES) + -rm -rf $(top_srcdir)/autom4te.cache + -rm -f Makefile +maintainer-clean-am: distclean-am maintainer-clean-generic + +mostlyclean: mostlyclean-recursive + +mostlyclean-am: mostlyclean-generic mostlyclean-libtool + +pdf: pdf-recursive + +pdf-am: + +ps: ps-recursive + +ps-am: + +uninstall-am: uninstall-info-am + +uninstall-info: uninstall-info-recursive + +.PHONY: $(RECURSIVE_TARGETS) CTAGS GTAGS all all-am am--refresh check \ + check-am clean clean-generic clean-libtool clean-recursive \ + ctags ctags-recursive dist dist-all dist-bzip2 dist-gzip \ + dist-shar dist-tarZ dist-zip distcheck distclean \ + distclean-generic distclean-hdr distclean-libtool \ + distclean-recursive distclean-tags distcleancheck distdir \ + distuninstallcheck dvi dvi-am html html-am info info-am \ + install install-am install-data install-data-am install-exec \ + install-exec-am install-info install-info-am install-man \ + install-strip installcheck installcheck-am installdirs \ + installdirs-am maintainer-clean maintainer-clean-generic \ + maintainer-clean-recursive mostlyclean mostlyclean-generic \ + mostlyclean-libtool mostlyclean-recursive pdf pdf-am ps ps-am \ + tags tags-recursive uninstall uninstall-am uninstall-info-am + + +# Export $(PLATFORM) to prevent proliferation of sub-shells +export PLATFORM = $(shell echo $$OS_NAME | tr [A-Z] [a-z]) + +# The following export is needed to build libhadoop.so in the 'lib' directory +export SUBDIRS + +# +#vim: sw=4: ts=4: noet +# +# Tell versions [3.59,3.63) of GNU make to not export all variables. +# Otherwise a system limit (for SysV at least) may be exceeded. +.NOEXPORT: diff --git a/src/native/NEWS b/src/native/NEWS new file mode 100644 index 0000000..f519a49 --- /dev/null +++ b/src/native/NEWS @@ -0,0 +1,5 @@ +2006-10-05 Arun C Murthy + * Initial version of libhadoop released + +2007-01-03 Arun C Murthy + * Added support for lzo compression library diff --git a/src/native/README b/src/native/README new file mode 100644 index 0000000..8c5af78 --- /dev/null +++ b/src/native/README @@ -0,0 +1,10 @@ +Package: libhadoop +Authors: Arun C Murthy + +MOTIVATION + +The libhadoop package contains the native code for any of hadoop (http://hadoop.apache.org/core). + +IMPROVEMENTS + +Any suggestions for improvements or patched should be sent to core-dev@hadoop.apache.org. Please go through http://wiki.apache.org/hadoop/HowToContribute for more information on how to contribute. diff --git a/src/native/acinclude.m4 b/src/native/acinclude.m4 new file mode 100644 index 0000000..d63469e --- /dev/null +++ b/src/native/acinclude.m4 @@ -0,0 +1,26 @@ +# AC_COMPUTE_NEEDED_DSO(LIBRARY, PREPROC_SYMBOL) +# -------------------------------------------------- +# Compute the 'actual' dynamic-library used +# for LIBRARY and set it to PREPROC_SYMBOL +AC_DEFUN([AC_COMPUTE_NEEDED_DSO], +[ +AC_CACHE_CHECK([Checking for the 'actual' dynamic-library for '-l$1'], ac_cv_libname_$1, + [ + echo 'int main(int argc, char **argv){return 0;}' > conftest.c + if test -z "`${CC} ${LDFLAGS} -o conftest conftest.c -l$1 2>&1`"; then + dnl Try objdump and ldd in that order to get the dynamic library + if test ! -z "`which objdump | grep -v 'no objdump'`"; then + ac_cv_libname_$1="`objdump -p conftest | grep NEEDED | grep $1 | sed 's/\W*NEEDED\W*\(.*\)\W*$/\"\1\"/'`" + elif test ! -z "`which ldd | grep -v 'no ldd'`"; then + ac_cv_libname_$1="`ldd conftest | grep $1 | sed 's/^[[[^A-Za-z0-9]]]*\([[[A-Za-z0-9\.]]]*\)[[[^A-Za-z0-9]]]*=>.*$/\"\1\"/'`" + else + AC_MSG_ERROR(Can't find either 'objdump' or 'ldd' to compute the dynamic library for '-l$1') + fi + else + ac_cv_libname_$1=libnotfound.so + fi + rm -f conftest* + ] +) +AC_DEFINE_UNQUOTED($2, ${ac_cv_libname_$1}, [The 'actual' dynamic-library for '-l$1']) +])# AC_COMPUTE_NEEDED_DSO diff --git a/src/native/aclocal.m4 b/src/native/aclocal.m4 new file mode 100644 index 0000000..e7a1ac8 --- /dev/null +++ b/src/native/aclocal.m4 @@ -0,0 +1,7250 @@ +# generated automatically by aclocal 1.9.6 -*- Autoconf -*- + +# Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, +# 2005 Free Software Foundation, Inc. +# This file is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY, to the extent permitted by law; without +# even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. + +# libtool.m4 - Configure libtool for the host system. -*-Autoconf-*- + +# serial 48 AC_PROG_LIBTOOL + + +# AC_PROVIDE_IFELSE(MACRO-NAME, IF-PROVIDED, IF-NOT-PROVIDED) +# ----------------------------------------------------------- +# If this macro is not defined by Autoconf, define it here. +m4_ifdef([AC_PROVIDE_IFELSE], + [], + [m4_define([AC_PROVIDE_IFELSE], + [m4_ifdef([AC_PROVIDE_$1], + [$2], [$3])])]) + + +# AC_PROG_LIBTOOL +# --------------- +AC_DEFUN([AC_PROG_LIBTOOL], +[AC_REQUIRE([_AC_PROG_LIBTOOL])dnl +dnl If AC_PROG_CXX has already been expanded, run AC_LIBTOOL_CXX +dnl immediately, otherwise, hook it in at the end of AC_PROG_CXX. + AC_PROVIDE_IFELSE([AC_PROG_CXX], + [AC_LIBTOOL_CXX], + [define([AC_PROG_CXX], defn([AC_PROG_CXX])[AC_LIBTOOL_CXX + ])]) +dnl And a similar setup for Fortran 77 support + AC_PROVIDE_IFELSE([AC_PROG_F77], + [AC_LIBTOOL_F77], + [define([AC_PROG_F77], defn([AC_PROG_F77])[AC_LIBTOOL_F77 +])]) + +dnl Quote A][M_PROG_GCJ so that aclocal doesn't bring it in needlessly. +dnl If either AC_PROG_GCJ or A][M_PROG_GCJ have already been expanded, run +dnl AC_LIBTOOL_GCJ immediately, otherwise, hook it in at the end of both. + AC_PROVIDE_IFELSE([AC_PROG_GCJ], + [AC_LIBTOOL_GCJ], + [AC_PROVIDE_IFELSE([A][M_PROG_GCJ], + [AC_LIBTOOL_GCJ], + [AC_PROVIDE_IFELSE([LT_AC_PROG_GCJ], + [AC_LIBTOOL_GCJ], + [ifdef([AC_PROG_GCJ], + [define([AC_PROG_GCJ], defn([AC_PROG_GCJ])[AC_LIBTOOL_GCJ])]) + ifdef([A][M_PROG_GCJ], + [define([A][M_PROG_GCJ], defn([A][M_PROG_GCJ])[AC_LIBTOOL_GCJ])]) + ifdef([LT_AC_PROG_GCJ], + [define([LT_AC_PROG_GCJ], + defn([LT_AC_PROG_GCJ])[AC_LIBTOOL_GCJ])])])]) +])])# AC_PROG_LIBTOOL + + +# _AC_PROG_LIBTOOL +# ---------------- +AC_DEFUN([_AC_PROG_LIBTOOL], +[AC_REQUIRE([AC_LIBTOOL_SETUP])dnl +AC_BEFORE([$0],[AC_LIBTOOL_CXX])dnl +AC_BEFORE([$0],[AC_LIBTOOL_F77])dnl +AC_BEFORE([$0],[AC_LIBTOOL_GCJ])dnl + +# This can be used to rebuild libtool when needed +LIBTOOL_DEPS="$ac_aux_dir/ltmain.sh" + +# Always use our own libtool. +LIBTOOL='$(SHELL) $(top_builddir)/libtool' +AC_SUBST(LIBTOOL)dnl + +# Prevent multiple expansion +define([AC_PROG_LIBTOOL], []) +])# _AC_PROG_LIBTOOL + + +# AC_LIBTOOL_SETUP +# ---------------- +AC_DEFUN([AC_LIBTOOL_SETUP], +[AC_PREREQ(2.50)dnl +AC_REQUIRE([AC_ENABLE_SHARED])dnl +AC_REQUIRE([AC_ENABLE_STATIC])dnl +AC_REQUIRE([AC_ENABLE_FAST_INSTALL])dnl +AC_REQUIRE([AC_CANONICAL_HOST])dnl +AC_REQUIRE([AC_CANONICAL_BUILD])dnl +AC_REQUIRE([AC_PROG_CC])dnl +AC_REQUIRE([AC_PROG_LD])dnl +AC_REQUIRE([AC_PROG_LD_RELOAD_FLAG])dnl +AC_REQUIRE([AC_PROG_NM])dnl + +AC_REQUIRE([AC_PROG_LN_S])dnl +AC_REQUIRE([AC_DEPLIBS_CHECK_METHOD])dnl +# Autoconf 2.13's AC_OBJEXT and AC_EXEEXT macros only works for C compilers! +AC_REQUIRE([AC_OBJEXT])dnl +AC_REQUIRE([AC_EXEEXT])dnl +dnl + +AC_LIBTOOL_SYS_MAX_CMD_LEN +AC_LIBTOOL_SYS_GLOBAL_SYMBOL_PIPE +AC_LIBTOOL_OBJDIR + +AC_REQUIRE([_LT_AC_SYS_COMPILER])dnl +_LT_AC_PROG_ECHO_BACKSLASH + +case $host_os in +aix3*) + # AIX sometimes has problems with the GCC collect2 program. For some + # reason, if we set the COLLECT_NAMES environment variable, the problems + # vanish in a puff of smoke. + if test "X${COLLECT_NAMES+set}" != Xset; then + COLLECT_NAMES= + export COLLECT_NAMES + fi + ;; +esac + +# Sed substitution that helps us do robust quoting. It backslashifies +# metacharacters that are still active within double-quoted strings. +Xsed='sed -e 1s/^X//' +[sed_quote_subst='s/\([\\"\\`$\\\\]\)/\\\1/g'] + +# Same as above, but do not quote variable references. +[double_quote_subst='s/\([\\"\\`\\\\]\)/\\\1/g'] + +# Sed substitution to delay expansion of an escaped shell variable in a +# double_quote_subst'ed string. +delay_variable_subst='s/\\\\\\\\\\\$/\\\\\\$/g' + +# Sed substitution to avoid accidental globbing in evaled expressions +no_glob_subst='s/\*/\\\*/g' + +# Constants: +rm="rm -f" + +# Global variables: +default_ofile=libtool +can_build_shared=yes + +# All known linkers require a `.a' archive for static linking (except MSVC, +# which needs '.lib'). +libext=a +ltmain="$ac_aux_dir/ltmain.sh" +ofile="$default_ofile" +with_gnu_ld="$lt_cv_prog_gnu_ld" + +AC_CHECK_TOOL(AR, ar, false) +AC_CHECK_TOOL(RANLIB, ranlib, :) +AC_CHECK_TOOL(STRIP, strip, :) + +old_CC="$CC" +old_CFLAGS="$CFLAGS" + +# Set sane defaults for various variables +test -z "$AR" && AR=ar +test -z "$AR_FLAGS" && AR_FLAGS=cru +test -z "$AS" && AS=as +test -z "$CC" && CC=cc +test -z "$LTCC" && LTCC=$CC +test -z "$LTCFLAGS" && LTCFLAGS=$CFLAGS +test -z "$DLLTOOL" && DLLTOOL=dlltool +test -z "$LD" && LD=ld +test -z "$LN_S" && LN_S="ln -s" +test -z "$MAGIC_CMD" && MAGIC_CMD=file +test -z "$NM" && NM=nm +test -z "$SED" && SED=sed +test -z "$OBJDUMP" && OBJDUMP=objdump +test -z "$RANLIB" && RANLIB=: +test -z "$STRIP" && STRIP=: +test -z "$ac_objext" && ac_objext=o + +# Determine commands to create old-style static archives. +old_archive_cmds='$AR $AR_FLAGS $oldlib$oldobjs$old_deplibs' +old_postinstall_cmds='chmod 644 $oldlib' +old_postuninstall_cmds= + +if test -n "$RANLIB"; then + case $host_os in + openbsd*) + old_postinstall_cmds="$old_postinstall_cmds~\$RANLIB -t \$oldlib" + ;; + *) + old_postinstall_cmds="$old_postinstall_cmds~\$RANLIB \$oldlib" + ;; + esac + old_archive_cmds="$old_archive_cmds~\$RANLIB \$oldlib" +fi + +_LT_CC_BASENAME([$compiler]) + +# Only perform the check for file, if the check method requires it +case $deplibs_check_method in +file_magic*) + if test "$file_magic_cmd" = '$MAGIC_CMD'; then + AC_PATH_MAGIC + fi + ;; +esac + +AC_PROVIDE_IFELSE([AC_LIBTOOL_DLOPEN], enable_dlopen=yes, enable_dlopen=no) +AC_PROVIDE_IFELSE([AC_LIBTOOL_WIN32_DLL], +enable_win32_dll=yes, enable_win32_dll=no) + +AC_ARG_ENABLE([libtool-lock], + [AC_HELP_STRING([--disable-libtool-lock], + [avoid locking (might break parallel builds)])]) +test "x$enable_libtool_lock" != xno && enable_libtool_lock=yes + +AC_ARG_WITH([pic], + [AC_HELP_STRING([--with-pic], + [try to use only PIC/non-PIC objects @<:@default=use both@:>@])], + [pic_mode="$withval"], + [pic_mode=default]) +test -z "$pic_mode" && pic_mode=default + +# Use C for the default configuration in the libtool script +tagname= +AC_LIBTOOL_LANG_C_CONFIG +_LT_AC_TAGCONFIG +])# AC_LIBTOOL_SETUP + + +# _LT_AC_SYS_COMPILER +# ------------------- +AC_DEFUN([_LT_AC_SYS_COMPILER], +[AC_REQUIRE([AC_PROG_CC])dnl + +# If no C compiler was specified, use CC. +LTCC=${LTCC-"$CC"} + +# If no C compiler flags were specified, use CFLAGS. +LTCFLAGS=${LTCFLAGS-"$CFLAGS"} + +# Allow CC to be a program name with arguments. +compiler=$CC +])# _LT_AC_SYS_COMPILER + + +# _LT_CC_BASENAME(CC) +# ------------------- +# Calculate cc_basename. Skip known compiler wrappers and cross-prefix. +AC_DEFUN([_LT_CC_BASENAME], +[for cc_temp in $1""; do + case $cc_temp in + compile | *[[\\/]]compile | ccache | *[[\\/]]ccache ) ;; + distcc | *[[\\/]]distcc | purify | *[[\\/]]purify ) ;; + \-*) ;; + *) break;; + esac +done +cc_basename=`$echo "X$cc_temp" | $Xsed -e 's%.*/%%' -e "s%^$host_alias-%%"` +]) + + +# _LT_COMPILER_BOILERPLATE +# ------------------------ +# Check for compiler boilerplate output or warnings with +# the simple compiler test code. +AC_DEFUN([_LT_COMPILER_BOILERPLATE], +[ac_outfile=conftest.$ac_objext +printf "$lt_simple_compile_test_code" >conftest.$ac_ext +eval "$ac_compile" 2>&1 >/dev/null | $SED '/^$/d; /^ *+/d' >conftest.err +_lt_compiler_boilerplate=`cat conftest.err` +$rm conftest* +])# _LT_COMPILER_BOILERPLATE + + +# _LT_LINKER_BOILERPLATE +# ---------------------- +# Check for linker boilerplate output or warnings with +# the simple link test code. +AC_DEFUN([_LT_LINKER_BOILERPLATE], +[ac_outfile=conftest.$ac_objext +printf "$lt_simple_link_test_code" >conftest.$ac_ext +eval "$ac_link" 2>&1 >/dev/null | $SED '/^$/d; /^ *+/d' >conftest.err +_lt_linker_boilerplate=`cat conftest.err` +$rm conftest* +])# _LT_LINKER_BOILERPLATE + + +# _LT_AC_SYS_LIBPATH_AIX +# ---------------------- +# Links a minimal program and checks the executable +# for the system default hardcoded library path. In most cases, +# this is /usr/lib:/lib, but when the MPI compilers are used +# the location of the communication and MPI libs are included too. +# If we don't find anything, use the default library path according +# to the aix ld manual. +AC_DEFUN([_LT_AC_SYS_LIBPATH_AIX], +[AC_LINK_IFELSE(AC_LANG_PROGRAM,[ +aix_libpath=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e '/Import File Strings/,/^$/ { /^0/ { s/^0 *\(.*\)$/\1/; p; } +}'` +# Check for a 64-bit object if we didn't find anything. +if test -z "$aix_libpath"; then aix_libpath=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e '/Import File Strings/,/^$/ { /^0/ { s/^0 *\(.*\)$/\1/; p; } +}'`; fi],[]) +if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi +])# _LT_AC_SYS_LIBPATH_AIX + + +# _LT_AC_SHELL_INIT(ARG) +# ---------------------- +AC_DEFUN([_LT_AC_SHELL_INIT], +[ifdef([AC_DIVERSION_NOTICE], + [AC_DIVERT_PUSH(AC_DIVERSION_NOTICE)], + [AC_DIVERT_PUSH(NOTICE)]) +$1 +AC_DIVERT_POP +])# _LT_AC_SHELL_INIT + + +# _LT_AC_PROG_ECHO_BACKSLASH +# -------------------------- +# Add some code to the start of the generated configure script which +# will find an echo command which doesn't interpret backslashes. +AC_DEFUN([_LT_AC_PROG_ECHO_BACKSLASH], +[_LT_AC_SHELL_INIT([ +# Check that we are running under the correct shell. +SHELL=${CONFIG_SHELL-/bin/sh} + +case X$ECHO in +X*--fallback-echo) + # Remove one level of quotation (which was required for Make). + ECHO=`echo "$ECHO" | sed 's,\\\\\[$]\\[$]0,'[$]0','` + ;; +esac + +echo=${ECHO-echo} +if test "X[$]1" = X--no-reexec; then + # Discard the --no-reexec flag, and continue. + shift +elif test "X[$]1" = X--fallback-echo; then + # Avoid inline document here, it may be left over + : +elif test "X`($echo '\t') 2>/dev/null`" = 'X\t' ; then + # Yippee, $echo works! + : +else + # Restart under the correct shell. + exec $SHELL "[$]0" --no-reexec ${1+"[$]@"} +fi + +if test "X[$]1" = X--fallback-echo; then + # used as fallback echo + shift + cat </dev/null 2>&1 && unset CDPATH + +if test -z "$ECHO"; then +if test "X${echo_test_string+set}" != Xset; then +# find a string as large as possible, as long as the shell can cope with it + for cmd in 'sed 50q "[$]0"' 'sed 20q "[$]0"' 'sed 10q "[$]0"' 'sed 2q "[$]0"' 'echo test'; do + # expected sizes: less than 2Kb, 1Kb, 512 bytes, 16 bytes, ... + if (echo_test_string=`eval $cmd`) 2>/dev/null && + echo_test_string=`eval $cmd` && + (test "X$echo_test_string" = "X$echo_test_string") 2>/dev/null + then + break + fi + done +fi + +if test "X`($echo '\t') 2>/dev/null`" = 'X\t' && + echo_testing_string=`($echo "$echo_test_string") 2>/dev/null` && + test "X$echo_testing_string" = "X$echo_test_string"; then + : +else + # The Solaris, AIX, and Digital Unix default echo programs unquote + # backslashes. This makes it impossible to quote backslashes using + # echo "$something" | sed 's/\\/\\\\/g' + # + # So, first we look for a working echo in the user's PATH. + + lt_save_ifs="$IFS"; IFS=$PATH_SEPARATOR + for dir in $PATH /usr/ucb; do + IFS="$lt_save_ifs" + if (test -f $dir/echo || test -f $dir/echo$ac_exeext) && + test "X`($dir/echo '\t') 2>/dev/null`" = 'X\t' && + echo_testing_string=`($dir/echo "$echo_test_string") 2>/dev/null` && + test "X$echo_testing_string" = "X$echo_test_string"; then + echo="$dir/echo" + break + fi + done + IFS="$lt_save_ifs" + + if test "X$echo" = Xecho; then + # We didn't find a better echo, so look for alternatives. + if test "X`(print -r '\t') 2>/dev/null`" = 'X\t' && + echo_testing_string=`(print -r "$echo_test_string") 2>/dev/null` && + test "X$echo_testing_string" = "X$echo_test_string"; then + # This shell has a builtin print -r that does the trick. + echo='print -r' + elif (test -f /bin/ksh || test -f /bin/ksh$ac_exeext) && + test "X$CONFIG_SHELL" != X/bin/ksh; then + # If we have ksh, try running configure again with it. + ORIGINAL_CONFIG_SHELL=${CONFIG_SHELL-/bin/sh} + export ORIGINAL_CONFIG_SHELL + CONFIG_SHELL=/bin/ksh + export CONFIG_SHELL + exec $CONFIG_SHELL "[$]0" --no-reexec ${1+"[$]@"} + else + # Try using printf. + echo='printf %s\n' + if test "X`($echo '\t') 2>/dev/null`" = 'X\t' && + echo_testing_string=`($echo "$echo_test_string") 2>/dev/null` && + test "X$echo_testing_string" = "X$echo_test_string"; then + # Cool, printf works + : + elif echo_testing_string=`($ORIGINAL_CONFIG_SHELL "[$]0" --fallback-echo '\t') 2>/dev/null` && + test "X$echo_testing_string" = 'X\t' && + echo_testing_string=`($ORIGINAL_CONFIG_SHELL "[$]0" --fallback-echo "$echo_test_string") 2>/dev/null` && + test "X$echo_testing_string" = "X$echo_test_string"; then + CONFIG_SHELL=$ORIGINAL_CONFIG_SHELL + export CONFIG_SHELL + SHELL="$CONFIG_SHELL" + export SHELL + echo="$CONFIG_SHELL [$]0 --fallback-echo" + elif echo_testing_string=`($CONFIG_SHELL "[$]0" --fallback-echo '\t') 2>/dev/null` && + test "X$echo_testing_string" = 'X\t' && + echo_testing_string=`($CONFIG_SHELL "[$]0" --fallback-echo "$echo_test_string") 2>/dev/null` && + test "X$echo_testing_string" = "X$echo_test_string"; then + echo="$CONFIG_SHELL [$]0 --fallback-echo" + else + # maybe with a smaller string... + prev=: + + for cmd in 'echo test' 'sed 2q "[$]0"' 'sed 10q "[$]0"' 'sed 20q "[$]0"' 'sed 50q "[$]0"'; do + if (test "X$echo_test_string" = "X`eval $cmd`") 2>/dev/null + then + break + fi + prev="$cmd" + done + + if test "$prev" != 'sed 50q "[$]0"'; then + echo_test_string=`eval $prev` + export echo_test_string + exec ${ORIGINAL_CONFIG_SHELL-${CONFIG_SHELL-/bin/sh}} "[$]0" ${1+"[$]@"} + else + # Oops. We lost completely, so just stick with echo. + echo=echo + fi + fi + fi + fi +fi +fi + +# Copy echo and quote the copy suitably for passing to libtool from +# the Makefile, instead of quoting the original, which is used later. +ECHO=$echo +if test "X$ECHO" = "X$CONFIG_SHELL [$]0 --fallback-echo"; then + ECHO="$CONFIG_SHELL \\\$\[$]0 --fallback-echo" +fi + +AC_SUBST(ECHO) +])])# _LT_AC_PROG_ECHO_BACKSLASH + + +# _LT_AC_LOCK +# ----------- +AC_DEFUN([_LT_AC_LOCK], +[AC_ARG_ENABLE([libtool-lock], + [AC_HELP_STRING([--disable-libtool-lock], + [avoid locking (might break parallel builds)])]) +test "x$enable_libtool_lock" != xno && enable_libtool_lock=yes + +# Some flags need to be propagated to the compiler or linker for good +# libtool support. +case $host in +ia64-*-hpux*) + # Find out which ABI we are using. + echo 'int i;' > conftest.$ac_ext + if AC_TRY_EVAL(ac_compile); then + case `/usr/bin/file conftest.$ac_objext` in + *ELF-32*) + HPUX_IA64_MODE="32" + ;; + *ELF-64*) + HPUX_IA64_MODE="64" + ;; + esac + fi + rm -rf conftest* + ;; +*-*-irix6*) + # Find out which ABI we are using. + echo '[#]line __oline__ "configure"' > conftest.$ac_ext + if AC_TRY_EVAL(ac_compile); then + if test "$lt_cv_prog_gnu_ld" = yes; then + case `/usr/bin/file conftest.$ac_objext` in + *32-bit*) + LD="${LD-ld} -melf32bsmip" + ;; + *N32*) + LD="${LD-ld} -melf32bmipn32" + ;; + *64-bit*) + LD="${LD-ld} -melf64bmip" + ;; + esac + else + case `/usr/bin/file conftest.$ac_objext` in + *32-bit*) + LD="${LD-ld} -32" + ;; + *N32*) + LD="${LD-ld} -n32" + ;; + *64-bit*) + LD="${LD-ld} -64" + ;; + esac + fi + fi + rm -rf conftest* + ;; + +x86_64-*linux*|ppc*-*linux*|powerpc*-*linux*|s390*-*linux*|sparc*-*linux*) + # Find out which ABI we are using. + echo 'int i;' > conftest.$ac_ext + if AC_TRY_EVAL(ac_compile); then + case `/usr/bin/file conftest.o` in + *32-bit*) + case $host in + x86_64-*linux*) + LD="${LD-ld} -m elf_i386" + ;; + ppc64-*linux*|powerpc64-*linux*) + LD="${LD-ld} -m elf32ppclinux" + ;; + s390x-*linux*) + LD="${LD-ld} -m elf_s390" + ;; + sparc64-*linux*) + LD="${LD-ld} -m elf32_sparc" + ;; + esac + ;; + *64-bit*) + case $host in + x86_64-*linux*) + LD="${LD-ld} -m elf_x86_64" + ;; + ppc*-*linux*|powerpc*-*linux*) + LD="${LD-ld} -m elf64ppc" + ;; + s390*-*linux*) + LD="${LD-ld} -m elf64_s390" + ;; + sparc*-*linux*) + LD="${LD-ld} -m elf64_sparc" + ;; + esac + ;; + esac + fi + rm -rf conftest* + ;; + +*-*-sco3.2v5*) + # On SCO OpenServer 5, we need -belf to get full-featured binaries. + SAVE_CFLAGS="$CFLAGS" + CFLAGS="$CFLAGS -belf" + AC_CACHE_CHECK([whether the C compiler needs -belf], lt_cv_cc_needs_belf, + [AC_LANG_PUSH(C) + AC_TRY_LINK([],[],[lt_cv_cc_needs_belf=yes],[lt_cv_cc_needs_belf=no]) + AC_LANG_POP]) + if test x"$lt_cv_cc_needs_belf" != x"yes"; then + # this is probably gcc 2.8.0, egcs 1.0 or newer; no need for -belf + CFLAGS="$SAVE_CFLAGS" + fi + ;; +sparc*-*solaris*) + # Find out which ABI we are using. + echo 'int i;' > conftest.$ac_ext + if AC_TRY_EVAL(ac_compile); then + case `/usr/bin/file conftest.o` in + *64-bit*) + case $lt_cv_prog_gnu_ld in + yes*) LD="${LD-ld} -m elf64_sparc" ;; + *) LD="${LD-ld} -64" ;; + esac + ;; + esac + fi + rm -rf conftest* + ;; + +AC_PROVIDE_IFELSE([AC_LIBTOOL_WIN32_DLL], +[*-*-cygwin* | *-*-mingw* | *-*-pw32*) + AC_CHECK_TOOL(DLLTOOL, dlltool, false) + AC_CHECK_TOOL(AS, as, false) + AC_CHECK_TOOL(OBJDUMP, objdump, false) + ;; + ]) +esac + +need_locks="$enable_libtool_lock" + +])# _LT_AC_LOCK + + +# AC_LIBTOOL_COMPILER_OPTION(MESSAGE, VARIABLE-NAME, FLAGS, +# [OUTPUT-FILE], [ACTION-SUCCESS], [ACTION-FAILURE]) +# ---------------------------------------------------------------- +# Check whether the given compiler option works +AC_DEFUN([AC_LIBTOOL_COMPILER_OPTION], +[AC_REQUIRE([LT_AC_PROG_SED]) +AC_CACHE_CHECK([$1], [$2], + [$2=no + ifelse([$4], , [ac_outfile=conftest.$ac_objext], [ac_outfile=$4]) + printf "$lt_simple_compile_test_code" > conftest.$ac_ext + lt_compiler_flag="$3" + # Insert the option either (1) after the last *FLAGS variable, or + # (2) before a word containing "conftest.", or (3) at the end. + # Note that $ac_compile itself does not contain backslashes and begins + # with a dollar sign (not a hyphen), so the echo should work correctly. + # The option is referenced via a variable to avoid confusing sed. + lt_compile=`echo "$ac_compile" | $SED \ + -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ + -e 's: [[^ ]]*conftest\.: $lt_compiler_flag&:; t' \ + -e 's:$: $lt_compiler_flag:'` + (eval echo "\"\$as_me:__oline__: $lt_compile\"" >&AS_MESSAGE_LOG_FD) + (eval "$lt_compile" 2>conftest.err) + ac_status=$? + cat conftest.err >&AS_MESSAGE_LOG_FD + echo "$as_me:__oline__: \$? = $ac_status" >&AS_MESSAGE_LOG_FD + if (exit $ac_status) && test -s "$ac_outfile"; then + # The compiler can only warn and ignore the option if not recognized + # So say no if there are warnings other than the usual output. + $echo "X$_lt_compiler_boilerplate" | $Xsed -e '/^$/d' >conftest.exp + $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2 + if test ! -s conftest.er2 || diff conftest.exp conftest.er2 >/dev/null; then + $2=yes + fi + fi + $rm conftest* +]) + +if test x"[$]$2" = xyes; then + ifelse([$5], , :, [$5]) +else + ifelse([$6], , :, [$6]) +fi +])# AC_LIBTOOL_COMPILER_OPTION + + +# AC_LIBTOOL_LINKER_OPTION(MESSAGE, VARIABLE-NAME, FLAGS, +# [ACTION-SUCCESS], [ACTION-FAILURE]) +# ------------------------------------------------------------ +# Check whether the given compiler option works +AC_DEFUN([AC_LIBTOOL_LINKER_OPTION], +[AC_CACHE_CHECK([$1], [$2], + [$2=no + save_LDFLAGS="$LDFLAGS" + LDFLAGS="$LDFLAGS $3" + printf "$lt_simple_link_test_code" > conftest.$ac_ext + if (eval $ac_link 2>conftest.err) && test -s conftest$ac_exeext; then + # The linker can only warn and ignore the option if not recognized + # So say no if there are warnings + if test -s conftest.err; then + # Append any errors to the config.log. + cat conftest.err 1>&AS_MESSAGE_LOG_FD + $echo "X$_lt_linker_boilerplate" | $Xsed -e '/^$/d' > conftest.exp + $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2 + if diff conftest.exp conftest.er2 >/dev/null; then + $2=yes + fi + else + $2=yes + fi + fi + $rm conftest* + LDFLAGS="$save_LDFLAGS" +]) + +if test x"[$]$2" = xyes; then + ifelse([$4], , :, [$4]) +else + ifelse([$5], , :, [$5]) +fi +])# AC_LIBTOOL_LINKER_OPTION + + +# AC_LIBTOOL_SYS_MAX_CMD_LEN +# -------------------------- +AC_DEFUN([AC_LIBTOOL_SYS_MAX_CMD_LEN], +[# find the maximum length of command line arguments +AC_MSG_CHECKING([the maximum length of command line arguments]) +AC_CACHE_VAL([lt_cv_sys_max_cmd_len], [dnl + i=0 + teststring="ABCD" + + case $build_os in + msdosdjgpp*) + # On DJGPP, this test can blow up pretty badly due to problems in libc + # (any single argument exceeding 2000 bytes causes a buffer overrun + # during glob expansion). Even if it were fixed, the result of this + # check would be larger than it should be. + lt_cv_sys_max_cmd_len=12288; # 12K is about right + ;; + + gnu*) + # Under GNU Hurd, this test is not required because there is + # no limit to the length of command line arguments. + # Libtool will interpret -1 as no limit whatsoever + lt_cv_sys_max_cmd_len=-1; + ;; + + cygwin* | mingw*) + # On Win9x/ME, this test blows up -- it succeeds, but takes + # about 5 minutes as the teststring grows exponentially. + # Worse, since 9x/ME are not pre-emptively multitasking, + # you end up with a "frozen" computer, even though with patience + # the test eventually succeeds (with a max line length of 256k). + # Instead, let's just punt: use the minimum linelength reported by + # all of the supported platforms: 8192 (on NT/2K/XP). + lt_cv_sys_max_cmd_len=8192; + ;; + + amigaos*) + # On AmigaOS with pdksh, this test takes hours, literally. + # So we just punt and use a minimum line length of 8192. + lt_cv_sys_max_cmd_len=8192; + ;; + + netbsd* | freebsd* | openbsd* | darwin* | dragonfly*) + # This has been around since 386BSD, at least. Likely further. + if test -x /sbin/sysctl; then + lt_cv_sys_max_cmd_len=`/sbin/sysctl -n kern.argmax` + elif test -x /usr/sbin/sysctl; then + lt_cv_sys_max_cmd_len=`/usr/sbin/sysctl -n kern.argmax` + else + lt_cv_sys_max_cmd_len=65536 # usable default for all BSDs + fi + # And add a safety zone + lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \/ 4` + lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \* 3` + ;; + + interix*) + # We know the value 262144 and hardcode it with a safety zone (like BSD) + lt_cv_sys_max_cmd_len=196608 + ;; + + osf*) + # Dr. Hans Ekkehard Plesser reports seeing a kernel panic running configure + # due to this test when exec_disable_arg_limit is 1 on Tru64. It is not + # nice to cause kernel panics so lets avoid the loop below. + # First set a reasonable default. + lt_cv_sys_max_cmd_len=16384 + # + if test -x /sbin/sysconfig; then + case `/sbin/sysconfig -q proc exec_disable_arg_limit` in + *1*) lt_cv_sys_max_cmd_len=-1 ;; + esac + fi + ;; + sco3.2v5*) + lt_cv_sys_max_cmd_len=102400 + ;; + sysv5* | sco5v6* | sysv4.2uw2*) + kargmax=`grep ARG_MAX /etc/conf/cf.d/stune 2>/dev/null` + if test -n "$kargmax"; then + lt_cv_sys_max_cmd_len=`echo $kargmax | sed 's/.*[[ ]]//'` + else + lt_cv_sys_max_cmd_len=32768 + fi + ;; + *) + # If test is not a shell built-in, we'll probably end up computing a + # maximum length that is only half of the actual maximum length, but + # we can't tell. + SHELL=${SHELL-${CONFIG_SHELL-/bin/sh}} + while (test "X"`$SHELL [$]0 --fallback-echo "X$teststring" 2>/dev/null` \ + = "XX$teststring") >/dev/null 2>&1 && + new_result=`expr "X$teststring" : ".*" 2>&1` && + lt_cv_sys_max_cmd_len=$new_result && + test $i != 17 # 1/2 MB should be enough + do + i=`expr $i + 1` + teststring=$teststring$teststring + done + teststring= + # Add a significant safety factor because C++ compilers can tack on massive + # amounts of additional arguments before passing them to the linker. + # It appears as though 1/2 is a usable value. + lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \/ 2` + ;; + esac +]) +if test -n $lt_cv_sys_max_cmd_len ; then + AC_MSG_RESULT($lt_cv_sys_max_cmd_len) +else + AC_MSG_RESULT(none) +fi +])# AC_LIBTOOL_SYS_MAX_CMD_LEN + + +# _LT_AC_CHECK_DLFCN +# ------------------ +AC_DEFUN([_LT_AC_CHECK_DLFCN], +[AC_CHECK_HEADERS(dlfcn.h)dnl +])# _LT_AC_CHECK_DLFCN + + +# _LT_AC_TRY_DLOPEN_SELF (ACTION-IF-TRUE, ACTION-IF-TRUE-W-USCORE, +# ACTION-IF-FALSE, ACTION-IF-CROSS-COMPILING) +# --------------------------------------------------------------------- +AC_DEFUN([_LT_AC_TRY_DLOPEN_SELF], +[AC_REQUIRE([_LT_AC_CHECK_DLFCN])dnl +if test "$cross_compiling" = yes; then : + [$4] +else + lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2 + lt_status=$lt_dlunknown + cat > conftest.$ac_ext < +#endif + +#include + +#ifdef RTLD_GLOBAL +# define LT_DLGLOBAL RTLD_GLOBAL +#else +# ifdef DL_GLOBAL +# define LT_DLGLOBAL DL_GLOBAL +# else +# define LT_DLGLOBAL 0 +# endif +#endif + +/* We may have to define LT_DLLAZY_OR_NOW in the command line if we + find out it does not work in some platform. */ +#ifndef LT_DLLAZY_OR_NOW +# ifdef RTLD_LAZY +# define LT_DLLAZY_OR_NOW RTLD_LAZY +# else +# ifdef DL_LAZY +# define LT_DLLAZY_OR_NOW DL_LAZY +# else +# ifdef RTLD_NOW +# define LT_DLLAZY_OR_NOW RTLD_NOW +# else +# ifdef DL_NOW +# define LT_DLLAZY_OR_NOW DL_NOW +# else +# define LT_DLLAZY_OR_NOW 0 +# endif +# endif +# endif +# endif +#endif + +#ifdef __cplusplus +extern "C" void exit (int); +#endif + +void fnord() { int i=42;} +int main () +{ + void *self = dlopen (0, LT_DLGLOBAL|LT_DLLAZY_OR_NOW); + int status = $lt_dlunknown; + + if (self) + { + if (dlsym (self,"fnord")) status = $lt_dlno_uscore; + else if (dlsym( self,"_fnord")) status = $lt_dlneed_uscore; + /* dlclose (self); */ + } + else + puts (dlerror ()); + + exit (status); +}] +EOF + if AC_TRY_EVAL(ac_link) && test -s conftest${ac_exeext} 2>/dev/null; then + (./conftest; exit; ) >&AS_MESSAGE_LOG_FD 2>/dev/null + lt_status=$? + case x$lt_status in + x$lt_dlno_uscore) $1 ;; + x$lt_dlneed_uscore) $2 ;; + x$lt_dlunknown|x*) $3 ;; + esac + else : + # compilation failed + $3 + fi +fi +rm -fr conftest* +])# _LT_AC_TRY_DLOPEN_SELF + + +# AC_LIBTOOL_DLOPEN_SELF +# ---------------------- +AC_DEFUN([AC_LIBTOOL_DLOPEN_SELF], +[AC_REQUIRE([_LT_AC_CHECK_DLFCN])dnl +if test "x$enable_dlopen" != xyes; then + enable_dlopen=unknown + enable_dlopen_self=unknown + enable_dlopen_self_static=unknown +else + lt_cv_dlopen=no + lt_cv_dlopen_libs= + + case $host_os in + beos*) + lt_cv_dlopen="load_add_on" + lt_cv_dlopen_libs= + lt_cv_dlopen_self=yes + ;; + + mingw* | pw32*) + lt_cv_dlopen="LoadLibrary" + lt_cv_dlopen_libs= + ;; + + cygwin*) + lt_cv_dlopen="dlopen" + lt_cv_dlopen_libs= + ;; + + darwin*) + # if libdl is installed we need to link against it + AC_CHECK_LIB([dl], [dlopen], + [lt_cv_dlopen="dlopen" lt_cv_dlopen_libs="-ldl"],[ + lt_cv_dlopen="dyld" + lt_cv_dlopen_libs= + lt_cv_dlopen_self=yes + ]) + ;; + + *) + AC_CHECK_FUNC([shl_load], + [lt_cv_dlopen="shl_load"], + [AC_CHECK_LIB([dld], [shl_load], + [lt_cv_dlopen="shl_load" lt_cv_dlopen_libs="-dld"], + [AC_CHECK_FUNC([dlopen], + [lt_cv_dlopen="dlopen"], + [AC_CHECK_LIB([dl], [dlopen], + [lt_cv_dlopen="dlopen" lt_cv_dlopen_libs="-ldl"], + [AC_CHECK_LIB([svld], [dlopen], + [lt_cv_dlopen="dlopen" lt_cv_dlopen_libs="-lsvld"], + [AC_CHECK_LIB([dld], [dld_link], + [lt_cv_dlopen="dld_link" lt_cv_dlopen_libs="-dld"]) + ]) + ]) + ]) + ]) + ]) + ;; + esac + + if test "x$lt_cv_dlopen" != xno; then + enable_dlopen=yes + else + enable_dlopen=no + fi + + case $lt_cv_dlopen in + dlopen) + save_CPPFLAGS="$CPPFLAGS" + test "x$ac_cv_header_dlfcn_h" = xyes && CPPFLAGS="$CPPFLAGS -DHAVE_DLFCN_H" + + save_LDFLAGS="$LDFLAGS" + wl=$lt_prog_compiler_wl eval LDFLAGS=\"\$LDFLAGS $export_dynamic_flag_spec\" + + save_LIBS="$LIBS" + LIBS="$lt_cv_dlopen_libs $LIBS" + + AC_CACHE_CHECK([whether a program can dlopen itself], + lt_cv_dlopen_self, [dnl + _LT_AC_TRY_DLOPEN_SELF( + lt_cv_dlopen_self=yes, lt_cv_dlopen_self=yes, + lt_cv_dlopen_self=no, lt_cv_dlopen_self=cross) + ]) + + if test "x$lt_cv_dlopen_self" = xyes; then + wl=$lt_prog_compiler_wl eval LDFLAGS=\"\$LDFLAGS $lt_prog_compiler_static\" + AC_CACHE_CHECK([whether a statically linked program can dlopen itself], + lt_cv_dlopen_self_static, [dnl + _LT_AC_TRY_DLOPEN_SELF( + lt_cv_dlopen_self_static=yes, lt_cv_dlopen_self_static=yes, + lt_cv_dlopen_self_static=no, lt_cv_dlopen_self_static=cross) + ]) + fi + + CPPFLAGS="$save_CPPFLAGS" + LDFLAGS="$save_LDFLAGS" + LIBS="$save_LIBS" + ;; + esac + + case $lt_cv_dlopen_self in + yes|no) enable_dlopen_self=$lt_cv_dlopen_self ;; + *) enable_dlopen_self=unknown ;; + esac + + case $lt_cv_dlopen_self_static in + yes|no) enable_dlopen_self_static=$lt_cv_dlopen_self_static ;; + *) enable_dlopen_self_static=unknown ;; + esac +fi +])# AC_LIBTOOL_DLOPEN_SELF + + +# AC_LIBTOOL_PROG_CC_C_O([TAGNAME]) +# --------------------------------- +# Check to see if options -c and -o are simultaneously supported by compiler +AC_DEFUN([AC_LIBTOOL_PROG_CC_C_O], +[AC_REQUIRE([_LT_AC_SYS_COMPILER])dnl +AC_CACHE_CHECK([if $compiler supports -c -o file.$ac_objext], + [_LT_AC_TAGVAR(lt_cv_prog_compiler_c_o, $1)], + [_LT_AC_TAGVAR(lt_cv_prog_compiler_c_o, $1)=no + $rm -r conftest 2>/dev/null + mkdir conftest + cd conftest + mkdir out + printf "$lt_simple_compile_test_code" > conftest.$ac_ext + + lt_compiler_flag="-o out/conftest2.$ac_objext" + # Insert the option either (1) after the last *FLAGS variable, or + # (2) before a word containing "conftest.", or (3) at the end. + # Note that $ac_compile itself does not contain backslashes and begins + # with a dollar sign (not a hyphen), so the echo should work correctly. + lt_compile=`echo "$ac_compile" | $SED \ + -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ + -e 's: [[^ ]]*conftest\.: $lt_compiler_flag&:; t' \ + -e 's:$: $lt_compiler_flag:'` + (eval echo "\"\$as_me:__oline__: $lt_compile\"" >&AS_MESSAGE_LOG_FD) + (eval "$lt_compile" 2>out/conftest.err) + ac_status=$? + cat out/conftest.err >&AS_MESSAGE_LOG_FD + echo "$as_me:__oline__: \$? = $ac_status" >&AS_MESSAGE_LOG_FD + if (exit $ac_status) && test -s out/conftest2.$ac_objext + then + # The compiler can only warn and ignore the option if not recognized + # So say no if there are warnings + $echo "X$_lt_compiler_boilerplate" | $Xsed -e '/^$/d' > out/conftest.exp + $SED '/^$/d; /^ *+/d' out/conftest.err >out/conftest.er2 + if test ! -s out/conftest.er2 || diff out/conftest.exp out/conftest.er2 >/dev/null; then + _LT_AC_TAGVAR(lt_cv_prog_compiler_c_o, $1)=yes + fi + fi + chmod u+w . 2>&AS_MESSAGE_LOG_FD + $rm conftest* + # SGI C++ compiler will create directory out/ii_files/ for + # template instantiation + test -d out/ii_files && $rm out/ii_files/* && rmdir out/ii_files + $rm out/* && rmdir out + cd .. + rmdir conftest + $rm conftest* +]) +])# AC_LIBTOOL_PROG_CC_C_O + + +# AC_LIBTOOL_SYS_HARD_LINK_LOCKS([TAGNAME]) +# ----------------------------------------- +# Check to see if we can do hard links to lock some files if needed +AC_DEFUN([AC_LIBTOOL_SYS_HARD_LINK_LOCKS], +[AC_REQUIRE([_LT_AC_LOCK])dnl + +hard_links="nottested" +if test "$_LT_AC_TAGVAR(lt_cv_prog_compiler_c_o, $1)" = no && test "$need_locks" != no; then + # do not overwrite the value of need_locks provided by the user + AC_MSG_CHECKING([if we can lock with hard links]) + hard_links=yes + $rm conftest* + ln conftest.a conftest.b 2>/dev/null && hard_links=no + touch conftest.a + ln conftest.a conftest.b 2>&5 || hard_links=no + ln conftest.a conftest.b 2>/dev/null && hard_links=no + AC_MSG_RESULT([$hard_links]) + if test "$hard_links" = no; then + AC_MSG_WARN([`$CC' does not support `-c -o', so `make -j' may be unsafe]) + need_locks=warn + fi +else + need_locks=no +fi +])# AC_LIBTOOL_SYS_HARD_LINK_LOCKS + + +# AC_LIBTOOL_OBJDIR +# ----------------- +AC_DEFUN([AC_LIBTOOL_OBJDIR], +[AC_CACHE_CHECK([for objdir], [lt_cv_objdir], +[rm -f .libs 2>/dev/null +mkdir .libs 2>/dev/null +if test -d .libs; then + lt_cv_objdir=.libs +else + # MS-DOS does not allow filenames that begin with a dot. + lt_cv_objdir=_libs +fi +rmdir .libs 2>/dev/null]) +objdir=$lt_cv_objdir +])# AC_LIBTOOL_OBJDIR + + +# AC_LIBTOOL_PROG_LD_HARDCODE_LIBPATH([TAGNAME]) +# ---------------------------------------------- +# Check hardcoding attributes. +AC_DEFUN([AC_LIBTOOL_PROG_LD_HARDCODE_LIBPATH], +[AC_MSG_CHECKING([how to hardcode library paths into programs]) +_LT_AC_TAGVAR(hardcode_action, $1)= +if test -n "$_LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)" || \ + test -n "$_LT_AC_TAGVAR(runpath_var, $1)" || \ + test "X$_LT_AC_TAGVAR(hardcode_automatic, $1)" = "Xyes" ; then + + # We can hardcode non-existant directories. + if test "$_LT_AC_TAGVAR(hardcode_direct, $1)" != no && + # If the only mechanism to avoid hardcoding is shlibpath_var, we + # have to relink, otherwise we might link with an installed library + # when we should be linking with a yet-to-be-installed one + ## test "$_LT_AC_TAGVAR(hardcode_shlibpath_var, $1)" != no && + test "$_LT_AC_TAGVAR(hardcode_minus_L, $1)" != no; then + # Linking always hardcodes the temporary library directory. + _LT_AC_TAGVAR(hardcode_action, $1)=relink + else + # We can link without hardcoding, and we can hardcode nonexisting dirs. + _LT_AC_TAGVAR(hardcode_action, $1)=immediate + fi +else + # We cannot hardcode anything, or else we can only hardcode existing + # directories. + _LT_AC_TAGVAR(hardcode_action, $1)=unsupported +fi +AC_MSG_RESULT([$_LT_AC_TAGVAR(hardcode_action, $1)]) + +if test "$_LT_AC_TAGVAR(hardcode_action, $1)" = relink; then + # Fast installation is not supported + enable_fast_install=no +elif test "$shlibpath_overrides_runpath" = yes || + test "$enable_shared" = no; then + # Fast installation is not necessary + enable_fast_install=needless +fi +])# AC_LIBTOOL_PROG_LD_HARDCODE_LIBPATH + + +# AC_LIBTOOL_SYS_LIB_STRIP +# ------------------------ +AC_DEFUN([AC_LIBTOOL_SYS_LIB_STRIP], +[striplib= +old_striplib= +AC_MSG_CHECKING([whether stripping libraries is possible]) +if test -n "$STRIP" && $STRIP -V 2>&1 | grep "GNU strip" >/dev/null; then + test -z "$old_striplib" && old_striplib="$STRIP --strip-debug" + test -z "$striplib" && striplib="$STRIP --strip-unneeded" + AC_MSG_RESULT([yes]) +else +# FIXME - insert some real tests, host_os isn't really good enough + case $host_os in + darwin*) + if test -n "$STRIP" ; then + striplib="$STRIP -x" + AC_MSG_RESULT([yes]) + else + AC_MSG_RESULT([no]) +fi + ;; + *) + AC_MSG_RESULT([no]) + ;; + esac +fi +])# AC_LIBTOOL_SYS_LIB_STRIP + + +# AC_LIBTOOL_SYS_DYNAMIC_LINKER +# ----------------------------- +# PORTME Fill in your ld.so characteristics +AC_DEFUN([AC_LIBTOOL_SYS_DYNAMIC_LINKER], +[AC_MSG_CHECKING([dynamic linker characteristics]) +library_names_spec= +libname_spec='lib$name' +soname_spec= +shrext_cmds=".so" +postinstall_cmds= +postuninstall_cmds= +finish_cmds= +finish_eval= +shlibpath_var= +shlibpath_overrides_runpath=unknown +version_type=none +dynamic_linker="$host_os ld.so" +sys_lib_dlsearch_path_spec="/lib /usr/lib" +if test "$GCC" = yes; then + sys_lib_search_path_spec=`$CC -print-search-dirs | grep "^libraries:" | $SED -e "s/^libraries://" -e "s,=/,/,g"` + if echo "$sys_lib_search_path_spec" | grep ';' >/dev/null ; then + # if the path contains ";" then we assume it to be the separator + # otherwise default to the standard path separator (i.e. ":") - it is + # assumed that no part of a normal pathname contains ";" but that should + # okay in the real world where ";" in dirpaths is itself problematic. + sys_lib_search_path_spec=`echo "$sys_lib_search_path_spec" | $SED -e 's/;/ /g'` + else + sys_lib_search_path_spec=`echo "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"` + fi +else + sys_lib_search_path_spec="/lib /usr/lib /usr/local/lib" +fi +need_lib_prefix=unknown +hardcode_into_libs=no + +# when you set need_version to no, make sure it does not cause -set_version +# flags to be left without arguments +need_version=unknown + +case $host_os in +aix3*) + version_type=linux + library_names_spec='${libname}${release}${shared_ext}$versuffix $libname.a' + shlibpath_var=LIBPATH + + # AIX 3 has no versioning support, so we append a major version to the name. + soname_spec='${libname}${release}${shared_ext}$major' + ;; + +aix4* | aix5*) + version_type=linux + need_lib_prefix=no + need_version=no + hardcode_into_libs=yes + if test "$host_cpu" = ia64; then + # AIX 5 supports IA64 + library_names_spec='${libname}${release}${shared_ext}$major ${libname}${release}${shared_ext}$versuffix $libname${shared_ext}' + shlibpath_var=LD_LIBRARY_PATH + else + # With GCC up to 2.95.x, collect2 would create an import file + # for dependence libraries. The import file would start with + # the line `#! .'. This would cause the generated library to + # depend on `.', always an invalid library. This was fixed in + # development snapshots of GCC prior to 3.0. + case $host_os in + aix4 | aix4.[[01]] | aix4.[[01]].*) + if { echo '#if __GNUC__ > 2 || (__GNUC__ == 2 && __GNUC_MINOR__ >= 97)' + echo ' yes ' + echo '#endif'; } | ${CC} -E - | grep yes > /dev/null; then + : + else + can_build_shared=no + fi + ;; + esac + # AIX (on Power*) has no versioning support, so currently we can not hardcode correct + # soname into executable. Probably we can add versioning support to + # collect2, so additional links can be useful in future. + if test "$aix_use_runtimelinking" = yes; then + # If using run time linking (on AIX 4.2 or later) use lib.so + # instead of lib.a to let people know that these are not + # typical AIX shared libraries. + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + else + # We preserve .a as extension for shared libraries through AIX4.2 + # and later when we are not doing run time linking. + library_names_spec='${libname}${release}.a $libname.a' + soname_spec='${libname}${release}${shared_ext}$major' + fi + shlibpath_var=LIBPATH + fi + ;; + +amigaos*) + library_names_spec='$libname.ixlibrary $libname.a' + # Create ${libname}_ixlibrary.a entries in /sys/libs. + finish_eval='for lib in `ls $libdir/*.ixlibrary 2>/dev/null`; do libname=`$echo "X$lib" | $Xsed -e '\''s%^.*/\([[^/]]*\)\.ixlibrary$%\1%'\''`; test $rm /sys/libs/${libname}_ixlibrary.a; $show "cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a"; cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a || exit 1; done' + ;; + +beos*) + library_names_spec='${libname}${shared_ext}' + dynamic_linker="$host_os ld.so" + shlibpath_var=LIBRARY_PATH + ;; + +bsdi[[45]]*) + version_type=linux + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + finish_cmds='PATH="\$PATH:/sbin" ldconfig $libdir' + shlibpath_var=LD_LIBRARY_PATH + sys_lib_search_path_spec="/shlib /usr/lib /usr/X11/lib /usr/contrib/lib /lib /usr/local/lib" + sys_lib_dlsearch_path_spec="/shlib /usr/lib /usr/local/lib" + # the default ld.so.conf also contains /usr/contrib/lib and + # /usr/X11R6/lib (/usr/X11 is a link to /usr/X11R6), but let us allow + # libtool to hard-code these into programs + ;; + +cygwin* | mingw* | pw32*) + version_type=windows + shrext_cmds=".dll" + need_version=no + need_lib_prefix=no + + case $GCC,$host_os in + yes,cygwin* | yes,mingw* | yes,pw32*) + library_names_spec='$libname.dll.a' + # DLL is installed to $(libdir)/../bin by postinstall_cmds + postinstall_cmds='base_file=`basename \${file}`~ + dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\${base_file}'\''i;echo \$dlname'\''`~ + dldir=$destdir/`dirname \$dlpath`~ + test -d \$dldir || mkdir -p \$dldir~ + $install_prog $dir/$dlname \$dldir/$dlname~ + chmod a+x \$dldir/$dlname' + postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; echo \$dlname'\''`~ + dlpath=$dir/\$dldll~ + $rm \$dlpath' + shlibpath_overrides_runpath=yes + + case $host_os in + cygwin*) + # Cygwin DLLs use 'cyg' prefix rather than 'lib' + soname_spec='`echo ${libname} | sed -e 's/^lib/cyg/'``echo ${release} | $SED -e 's/[[.]]/-/g'`${versuffix}${shared_ext}' + sys_lib_search_path_spec="/usr/lib /lib/w32api /lib /usr/local/lib" + ;; + mingw*) + # MinGW DLLs use traditional 'lib' prefix + soname_spec='${libname}`echo ${release} | $SED -e 's/[[.]]/-/g'`${versuffix}${shared_ext}' + sys_lib_search_path_spec=`$CC -print-search-dirs | grep "^libraries:" | $SED -e "s/^libraries://" -e "s,=/,/,g"` + if echo "$sys_lib_search_path_spec" | [grep ';[c-zC-Z]:/' >/dev/null]; then + # It is most probably a Windows format PATH printed by + # mingw gcc, but we are running on Cygwin. Gcc prints its search + # path with ; separators, and with drive letters. We can handle the + # drive letters (cygwin fileutils understands them), so leave them, + # especially as we might pass files found there to a mingw objdump, + # which wouldn't understand a cygwinified path. Ahh. + sys_lib_search_path_spec=`echo "$sys_lib_search_path_spec" | $SED -e 's/;/ /g'` + else + sys_lib_search_path_spec=`echo "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"` + fi + ;; + pw32*) + # pw32 DLLs use 'pw' prefix rather than 'lib' + library_names_spec='`echo ${libname} | sed -e 's/^lib/pw/'``echo ${release} | $SED -e 's/[[.]]/-/g'`${versuffix}${shared_ext}' + ;; + esac + ;; + + *) + library_names_spec='${libname}`echo ${release} | $SED -e 's/[[.]]/-/g'`${versuffix}${shared_ext} $libname.lib' + ;; + esac + dynamic_linker='Win32 ld.exe' + # FIXME: first we should search . and the directory the executable is in + shlibpath_var=PATH + ;; + +darwin* | rhapsody*) + dynamic_linker="$host_os dyld" + version_type=darwin + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${versuffix}$shared_ext ${libname}${release}${major}$shared_ext ${libname}$shared_ext' + soname_spec='${libname}${release}${major}$shared_ext' + shlibpath_overrides_runpath=yes + shlibpath_var=DYLD_LIBRARY_PATH + shrext_cmds='`test .$module = .yes && echo .so || echo .dylib`' + # Apple's gcc prints 'gcc -print-search-dirs' doesn't operate the same. + if test "$GCC" = yes; then + sys_lib_search_path_spec=`$CC -print-search-dirs | tr "\n" "$PATH_SEPARATOR" | sed -e 's/libraries:/@libraries:/' | tr "@" "\n" | grep "^libraries:" | sed -e "s/^libraries://" -e "s,=/,/,g" -e "s,$PATH_SEPARATOR, ,g" -e "s,.*,& /lib /usr/lib /usr/local/lib,g"` + else + sys_lib_search_path_spec='/lib /usr/lib /usr/local/lib' + fi + sys_lib_dlsearch_path_spec='/usr/local/lib /lib /usr/lib' + ;; + +dgux*) + version_type=linux + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname$shared_ext' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + ;; + +freebsd1*) + dynamic_linker=no + ;; + +kfreebsd*-gnu) + version_type=linux + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=no + hardcode_into_libs=yes + dynamic_linker='GNU ld.so' + ;; + +freebsd* | dragonfly*) + # DragonFly does not have aout. When/if they implement a new + # versioning mechanism, adjust this. + if test -x /usr/bin/objformat; then + objformat=`/usr/bin/objformat` + else + case $host_os in + freebsd[[123]]*) objformat=aout ;; + *) objformat=elf ;; + esac + fi + version_type=freebsd-$objformat + case $version_type in + freebsd-elf*) + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext} $libname${shared_ext}' + need_version=no + need_lib_prefix=no + ;; + freebsd-*) + library_names_spec='${libname}${release}${shared_ext}$versuffix $libname${shared_ext}$versuffix' + need_version=yes + ;; + esac + shlibpath_var=LD_LIBRARY_PATH + case $host_os in + freebsd2*) + shlibpath_overrides_runpath=yes + ;; + freebsd3.[[01]]* | freebsdelf3.[[01]]*) + shlibpath_overrides_runpath=yes + hardcode_into_libs=yes + ;; + freebsd3.[[2-9]]* | freebsdelf3.[[2-9]]* | \ + freebsd4.[[0-5]] | freebsdelf4.[[0-5]] | freebsd4.1.1 | freebsdelf4.1.1) + shlibpath_overrides_runpath=no + hardcode_into_libs=yes + ;; + freebsd*) # from 4.6 on + shlibpath_overrides_runpath=yes + hardcode_into_libs=yes + ;; + esac + ;; + +gnu*) + version_type=linux + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}${major} ${libname}${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + hardcode_into_libs=yes + ;; + +hpux9* | hpux10* | hpux11*) + # Give a soname corresponding to the major version so that dld.sl refuses to + # link against other versions. + version_type=sunos + need_lib_prefix=no + need_version=no + case $host_cpu in + ia64*) + shrext_cmds='.so' + hardcode_into_libs=yes + dynamic_linker="$host_os dld.so" + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes # Unless +noenvvar is specified. + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + if test "X$HPUX_IA64_MODE" = X32; then + sys_lib_search_path_spec="/usr/lib/hpux32 /usr/local/lib/hpux32 /usr/local/lib" + else + sys_lib_search_path_spec="/usr/lib/hpux64 /usr/local/lib/hpux64" + fi + sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec + ;; + hppa*64*) + shrext_cmds='.sl' + hardcode_into_libs=yes + dynamic_linker="$host_os dld.sl" + shlibpath_var=LD_LIBRARY_PATH # How should we handle SHLIB_PATH + shlibpath_overrides_runpath=yes # Unless +noenvvar is specified. + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + sys_lib_search_path_spec="/usr/lib/pa20_64 /usr/ccs/lib/pa20_64" + sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec + ;; + *) + shrext_cmds='.sl' + dynamic_linker="$host_os dld.sl" + shlibpath_var=SHLIB_PATH + shlibpath_overrides_runpath=no # +s is required to enable SHLIB_PATH + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + ;; + esac + # HP-UX runs *really* slowly unless shared libraries are mode 555. + postinstall_cmds='chmod 555 $lib' + ;; + +interix3*) + version_type=linux + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + dynamic_linker='Interix 3.x ld.so.1 (PE, like ELF)' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=no + hardcode_into_libs=yes + ;; + +irix5* | irix6* | nonstopux*) + case $host_os in + nonstopux*) version_type=nonstopux ;; + *) + if test "$lt_cv_prog_gnu_ld" = yes; then + version_type=linux + else + version_type=irix + fi ;; + esac + need_lib_prefix=no + need_version=no + soname_spec='${libname}${release}${shared_ext}$major' + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${release}${shared_ext} $libname${shared_ext}' + case $host_os in + irix5* | nonstopux*) + libsuff= shlibsuff= + ;; + *) + case $LD in # libtool.m4 will add one of these switches to LD + *-32|*"-32 "|*-melf32bsmip|*"-melf32bsmip ") + libsuff= shlibsuff= libmagic=32-bit;; + *-n32|*"-n32 "|*-melf32bmipn32|*"-melf32bmipn32 ") + libsuff=32 shlibsuff=N32 libmagic=N32;; + *-64|*"-64 "|*-melf64bmip|*"-melf64bmip ") + libsuff=64 shlibsuff=64 libmagic=64-bit;; + *) libsuff= shlibsuff= libmagic=never-match;; + esac + ;; + esac + shlibpath_var=LD_LIBRARY${shlibsuff}_PATH + shlibpath_overrides_runpath=no + sys_lib_search_path_spec="/usr/lib${libsuff} /lib${libsuff} /usr/local/lib${libsuff}" + sys_lib_dlsearch_path_spec="/usr/lib${libsuff} /lib${libsuff}" + hardcode_into_libs=yes + ;; + +# No shared lib support for Linux oldld, aout, or coff. +linux*oldld* | linux*aout* | linux*coff*) + dynamic_linker=no + ;; + +# This must be Linux ELF. +linux*) + version_type=linux + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + finish_cmds='PATH="\$PATH:/sbin" ldconfig -n $libdir' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=no + # This implies no fast_install, which is unacceptable. + # Some rework will be needed to allow for fast_install + # before this can be enabled. + hardcode_into_libs=yes + + # find out which ABI we are using + libsuff= + case "$host_cpu" in + x86_64*|s390x*|powerpc64*) + echo '[#]line __oline__ "configure"' > conftest.$ac_ext + if AC_TRY_EVAL(ac_compile); then + case `/usr/bin/file conftest.$ac_objext` in + *64-bit*) + libsuff=64 + sys_lib_search_path_spec="/lib${libsuff} /usr/lib${libsuff} /usr/local/lib${libsuff}" + ;; + esac + fi + rm -rf conftest* + ;; + esac + + # Append ld.so.conf contents to the search path + if test -f /etc/ld.so.conf; then + lt_ld_extra=`awk '/^include / { system(sprintf("cd /etc; cat %s 2>/dev/null", \[$]2)); skip = 1; } { if (!skip) print \[$]0; skip = 0; }' < /etc/ld.so.conf | $SED -e 's/#.*//;s/[:, ]/ /g;s/=[^=]*$//;s/=[^= ]* / /g;/^$/d' | tr '\n' ' '` + sys_lib_dlsearch_path_spec="/lib${libsuff} /usr/lib${libsuff} $lt_ld_extra" + fi + + # We used to test for /lib/ld.so.1 and disable shared libraries on + # powerpc, because MkLinux only supported shared libraries with the + # GNU dynamic linker. Since this was broken with cross compilers, + # most powerpc-linux boxes support dynamic linking these days and + # people can always --disable-shared, the test was removed, and we + # assume the GNU/Linux dynamic linker is in use. + dynamic_linker='GNU/Linux ld.so' + ;; + +knetbsd*-gnu) + version_type=linux + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=no + hardcode_into_libs=yes + dynamic_linker='GNU ld.so' + ;; + +netbsd*) + version_type=sunos + need_lib_prefix=no + need_version=no + if echo __ELF__ | $CC -E - | grep __ELF__ >/dev/null; then + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${shared_ext}$versuffix' + finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir' + dynamic_linker='NetBSD (a.out) ld.so' + else + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + dynamic_linker='NetBSD ld.elf_so' + fi + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + hardcode_into_libs=yes + ;; + +newsos6) + version_type=linux + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + ;; + +nto-qnx*) + version_type=linux + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + ;; + +openbsd*) + version_type=sunos + sys_lib_dlsearch_path_spec="/usr/lib" + need_lib_prefix=no + # Some older versions of OpenBSD (3.3 at least) *do* need versioned libs. + case $host_os in + openbsd3.3 | openbsd3.3.*) need_version=yes ;; + *) need_version=no ;; + esac + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${shared_ext}$versuffix' + finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir' + shlibpath_var=LD_LIBRARY_PATH + if test -z "`echo __ELF__ | $CC -E - | grep __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then + case $host_os in + openbsd2.[[89]] | openbsd2.[[89]].*) + shlibpath_overrides_runpath=no + ;; + *) + shlibpath_overrides_runpath=yes + ;; + esac + else + shlibpath_overrides_runpath=yes + fi + ;; + +os2*) + libname_spec='$name' + shrext_cmds=".dll" + need_lib_prefix=no + library_names_spec='$libname${shared_ext} $libname.a' + dynamic_linker='OS/2 ld.exe' + shlibpath_var=LIBPATH + ;; + +osf3* | osf4* | osf5*) + version_type=osf + need_lib_prefix=no + need_version=no + soname_spec='${libname}${release}${shared_ext}$major' + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + shlibpath_var=LD_LIBRARY_PATH + sys_lib_search_path_spec="/usr/shlib /usr/ccs/lib /usr/lib/cmplrs/cc /usr/lib /usr/local/lib /var/shlib" + sys_lib_dlsearch_path_spec="$sys_lib_search_path_spec" + ;; + +solaris*) + version_type=linux + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + hardcode_into_libs=yes + # ldd complains unless libraries are executable + postinstall_cmds='chmod +x $lib' + ;; + +sunos4*) + version_type=sunos + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${shared_ext}$versuffix' + finish_cmds='PATH="\$PATH:/usr/etc" ldconfig $libdir' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + if test "$with_gnu_ld" = yes; then + need_lib_prefix=no + fi + need_version=yes + ;; + +sysv4 | sysv4.3*) + version_type=linux + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + case $host_vendor in + sni) + shlibpath_overrides_runpath=no + need_lib_prefix=no + export_dynamic_flag_spec='${wl}-Blargedynsym' + runpath_var=LD_RUN_PATH + ;; + siemens) + need_lib_prefix=no + ;; + motorola) + need_lib_prefix=no + need_version=no + shlibpath_overrides_runpath=no + sys_lib_search_path_spec='/lib /usr/lib /usr/ccs/lib' + ;; + esac + ;; + +sysv4*MP*) + if test -d /usr/nec ;then + version_type=linux + library_names_spec='$libname${shared_ext}.$versuffix $libname${shared_ext}.$major $libname${shared_ext}' + soname_spec='$libname${shared_ext}.$major' + shlibpath_var=LD_LIBRARY_PATH + fi + ;; + +sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX* | sysv4*uw2*) + version_type=freebsd-elf + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext} $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + hardcode_into_libs=yes + if test "$with_gnu_ld" = yes; then + sys_lib_search_path_spec='/usr/local/lib /usr/gnu/lib /usr/ccs/lib /usr/lib /lib' + shlibpath_overrides_runpath=no + else + sys_lib_search_path_spec='/usr/ccs/lib /usr/lib' + shlibpath_overrides_runpath=yes + case $host_os in + sco3.2v5*) + sys_lib_search_path_spec="$sys_lib_search_path_spec /lib" + ;; + esac + fi + sys_lib_dlsearch_path_spec='/usr/lib' + ;; + +uts4*) + version_type=linux + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + ;; + +*) + dynamic_linker=no + ;; +esac +AC_MSG_RESULT([$dynamic_linker]) +test "$dynamic_linker" = no && can_build_shared=no + +variables_saved_for_relink="PATH $shlibpath_var $runpath_var" +if test "$GCC" = yes; then + variables_saved_for_relink="$variables_saved_for_relink GCC_EXEC_PREFIX COMPILER_PATH LIBRARY_PATH" +fi +])# AC_LIBTOOL_SYS_DYNAMIC_LINKER + + +# _LT_AC_TAGCONFIG +# ---------------- +AC_DEFUN([_LT_AC_TAGCONFIG], +[AC_ARG_WITH([tags], + [AC_HELP_STRING([--with-tags@<:@=TAGS@:>@], + [include additional configurations @<:@automatic@:>@])], + [tagnames="$withval"]) + +if test -f "$ltmain" && test -n "$tagnames"; then + if test ! -f "${ofile}"; then + AC_MSG_WARN([output file `$ofile' does not exist]) + fi + + if test -z "$LTCC"; then + eval "`$SHELL ${ofile} --config | grep '^LTCC='`" + if test -z "$LTCC"; then + AC_MSG_WARN([output file `$ofile' does not look like a libtool script]) + else + AC_MSG_WARN([using `LTCC=$LTCC', extracted from `$ofile']) + fi + fi + if test -z "$LTCFLAGS"; then + eval "`$SHELL ${ofile} --config | grep '^LTCFLAGS='`" + fi + + # Extract list of available tagged configurations in $ofile. + # Note that this assumes the entire list is on one line. + available_tags=`grep "^available_tags=" "${ofile}" | $SED -e 's/available_tags=\(.*$\)/\1/' -e 's/\"//g'` + + lt_save_ifs="$IFS"; IFS="${IFS}$PATH_SEPARATOR," + for tagname in $tagnames; do + IFS="$lt_save_ifs" + # Check whether tagname contains only valid characters + case `$echo "X$tagname" | $Xsed -e 's:[[-_ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz1234567890,/]]::g'` in + "") ;; + *) AC_MSG_ERROR([invalid tag name: $tagname]) + ;; + esac + + if grep "^# ### BEGIN LIBTOOL TAG CONFIG: $tagname$" < "${ofile}" > /dev/null + then + AC_MSG_ERROR([tag name \"$tagname\" already exists]) + fi + + # Update the list of available tags. + if test -n "$tagname"; then + echo appending configuration tag \"$tagname\" to $ofile + + case $tagname in + CXX) + if test -n "$CXX" && ( test "X$CXX" != "Xno" && + ( (test "X$CXX" = "Xg++" && `g++ -v >/dev/null 2>&1` ) || + (test "X$CXX" != "Xg++"))) ; then + AC_LIBTOOL_LANG_CXX_CONFIG + else + tagname="" + fi + ;; + + F77) + if test -n "$F77" && test "X$F77" != "Xno"; then + AC_LIBTOOL_LANG_F77_CONFIG + else + tagname="" + fi + ;; + + GCJ) + if test -n "$GCJ" && test "X$GCJ" != "Xno"; then + AC_LIBTOOL_LANG_GCJ_CONFIG + else + tagname="" + fi + ;; + + RC) + AC_LIBTOOL_LANG_RC_CONFIG + ;; + + *) + AC_MSG_ERROR([Unsupported tag name: $tagname]) + ;; + esac + + # Append the new tag name to the list of available tags. + if test -n "$tagname" ; then + available_tags="$available_tags $tagname" + fi + fi + done + IFS="$lt_save_ifs" + + # Now substitute the updated list of available tags. + if eval "sed -e 's/^available_tags=.*\$/available_tags=\"$available_tags\"/' \"$ofile\" > \"${ofile}T\""; then + mv "${ofile}T" "$ofile" + chmod +x "$ofile" + else + rm -f "${ofile}T" + AC_MSG_ERROR([unable to update list of available tagged configurations.]) + fi +fi +])# _LT_AC_TAGCONFIG + + +# AC_LIBTOOL_DLOPEN +# ----------------- +# enable checks for dlopen support +AC_DEFUN([AC_LIBTOOL_DLOPEN], + [AC_BEFORE([$0],[AC_LIBTOOL_SETUP]) +])# AC_LIBTOOL_DLOPEN + + +# AC_LIBTOOL_WIN32_DLL +# -------------------- +# declare package support for building win32 DLLs +AC_DEFUN([AC_LIBTOOL_WIN32_DLL], +[AC_BEFORE([$0], [AC_LIBTOOL_SETUP]) +])# AC_LIBTOOL_WIN32_DLL + + +# AC_ENABLE_SHARED([DEFAULT]) +# --------------------------- +# implement the --enable-shared flag +# DEFAULT is either `yes' or `no'. If omitted, it defaults to `yes'. +AC_DEFUN([AC_ENABLE_SHARED], +[define([AC_ENABLE_SHARED_DEFAULT], ifelse($1, no, no, yes))dnl +AC_ARG_ENABLE([shared], + [AC_HELP_STRING([--enable-shared@<:@=PKGS@:>@], + [build shared libraries @<:@default=]AC_ENABLE_SHARED_DEFAULT[@:>@])], + [p=${PACKAGE-default} + case $enableval in + yes) enable_shared=yes ;; + no) enable_shared=no ;; + *) + enable_shared=no + # Look at the argument we got. We use all the common list separators. + lt_save_ifs="$IFS"; IFS="${IFS}$PATH_SEPARATOR," + for pkg in $enableval; do + IFS="$lt_save_ifs" + if test "X$pkg" = "X$p"; then + enable_shared=yes + fi + done + IFS="$lt_save_ifs" + ;; + esac], + [enable_shared=]AC_ENABLE_SHARED_DEFAULT) +])# AC_ENABLE_SHARED + + +# AC_DISABLE_SHARED +# ----------------- +# set the default shared flag to --disable-shared +AC_DEFUN([AC_DISABLE_SHARED], +[AC_BEFORE([$0],[AC_LIBTOOL_SETUP])dnl +AC_ENABLE_SHARED(no) +])# AC_DISABLE_SHARED + + +# AC_ENABLE_STATIC([DEFAULT]) +# --------------------------- +# implement the --enable-static flag +# DEFAULT is either `yes' or `no'. If omitted, it defaults to `yes'. +AC_DEFUN([AC_ENABLE_STATIC], +[define([AC_ENABLE_STATIC_DEFAULT], ifelse($1, no, no, yes))dnl +AC_ARG_ENABLE([static], + [AC_HELP_STRING([--enable-static@<:@=PKGS@:>@], + [build static libraries @<:@default=]AC_ENABLE_STATIC_DEFAULT[@:>@])], + [p=${PACKAGE-default} + case $enableval in + yes) enable_static=yes ;; + no) enable_static=no ;; + *) + enable_static=no + # Look at the argument we got. We use all the common list separators. + lt_save_ifs="$IFS"; IFS="${IFS}$PATH_SEPARATOR," + for pkg in $enableval; do + IFS="$lt_save_ifs" + if test "X$pkg" = "X$p"; then + enable_static=yes + fi + done + IFS="$lt_save_ifs" + ;; + esac], + [enable_static=]AC_ENABLE_STATIC_DEFAULT) +])# AC_ENABLE_STATIC + + +# AC_DISABLE_STATIC +# ----------------- +# set the default static flag to --disable-static +AC_DEFUN([AC_DISABLE_STATIC], +[AC_BEFORE([$0],[AC_LIBTOOL_SETUP])dnl +AC_ENABLE_STATIC(no) +])# AC_DISABLE_STATIC + + +# AC_ENABLE_FAST_INSTALL([DEFAULT]) +# --------------------------------- +# implement the --enable-fast-install flag +# DEFAULT is either `yes' or `no'. If omitted, it defaults to `yes'. +AC_DEFUN([AC_ENABLE_FAST_INSTALL], +[define([AC_ENABLE_FAST_INSTALL_DEFAULT], ifelse($1, no, no, yes))dnl +AC_ARG_ENABLE([fast-install], + [AC_HELP_STRING([--enable-fast-install@<:@=PKGS@:>@], + [optimize for fast installation @<:@default=]AC_ENABLE_FAST_INSTALL_DEFAULT[@:>@])], + [p=${PACKAGE-default} + case $enableval in + yes) enable_fast_install=yes ;; + no) enable_fast_install=no ;; + *) + enable_fast_install=no + # Look at the argument we got. We use all the common list separators. + lt_save_ifs="$IFS"; IFS="${IFS}$PATH_SEPARATOR," + for pkg in $enableval; do + IFS="$lt_save_ifs" + if test "X$pkg" = "X$p"; then + enable_fast_install=yes + fi + done + IFS="$lt_save_ifs" + ;; + esac], + [enable_fast_install=]AC_ENABLE_FAST_INSTALL_DEFAULT) +])# AC_ENABLE_FAST_INSTALL + + +# AC_DISABLE_FAST_INSTALL +# ----------------------- +# set the default to --disable-fast-install +AC_DEFUN([AC_DISABLE_FAST_INSTALL], +[AC_BEFORE([$0],[AC_LIBTOOL_SETUP])dnl +AC_ENABLE_FAST_INSTALL(no) +])# AC_DISABLE_FAST_INSTALL + + +# AC_LIBTOOL_PICMODE([MODE]) +# -------------------------- +# implement the --with-pic flag +# MODE is either `yes' or `no'. If omitted, it defaults to `both'. +AC_DEFUN([AC_LIBTOOL_PICMODE], +[AC_BEFORE([$0],[AC_LIBTOOL_SETUP])dnl +pic_mode=ifelse($#,1,$1,default) +])# AC_LIBTOOL_PICMODE + + +# AC_PROG_EGREP +# ------------- +# This is predefined starting with Autoconf 2.54, so this conditional +# definition can be removed once we require Autoconf 2.54 or later. +m4_ifndef([AC_PROG_EGREP], [AC_DEFUN([AC_PROG_EGREP], +[AC_CACHE_CHECK([for egrep], [ac_cv_prog_egrep], + [if echo a | (grep -E '(a|b)') >/dev/null 2>&1 + then ac_cv_prog_egrep='grep -E' + else ac_cv_prog_egrep='egrep' + fi]) + EGREP=$ac_cv_prog_egrep + AC_SUBST([EGREP]) +])]) + + +# AC_PATH_TOOL_PREFIX +# ------------------- +# find a file program which can recognise shared library +AC_DEFUN([AC_PATH_TOOL_PREFIX], +[AC_REQUIRE([AC_PROG_EGREP])dnl +AC_MSG_CHECKING([for $1]) +AC_CACHE_VAL(lt_cv_path_MAGIC_CMD, +[case $MAGIC_CMD in +[[\\/*] | ?:[\\/]*]) + lt_cv_path_MAGIC_CMD="$MAGIC_CMD" # Let the user override the test with a path. + ;; +*) + lt_save_MAGIC_CMD="$MAGIC_CMD" + lt_save_ifs="$IFS"; IFS=$PATH_SEPARATOR +dnl $ac_dummy forces splitting on constant user-supplied paths. +dnl POSIX.2 word splitting is done only on the output of word expansions, +dnl not every word. This closes a longstanding sh security hole. + ac_dummy="ifelse([$2], , $PATH, [$2])" + for ac_dir in $ac_dummy; do + IFS="$lt_save_ifs" + test -z "$ac_dir" && ac_dir=. + if test -f $ac_dir/$1; then + lt_cv_path_MAGIC_CMD="$ac_dir/$1" + if test -n "$file_magic_test_file"; then + case $deplibs_check_method in + "file_magic "*) + file_magic_regex=`expr "$deplibs_check_method" : "file_magic \(.*\)"` + MAGIC_CMD="$lt_cv_path_MAGIC_CMD" + if eval $file_magic_cmd \$file_magic_test_file 2> /dev/null | + $EGREP "$file_magic_regex" > /dev/null; then + : + else + cat <&2 + +*** Warning: the command libtool uses to detect shared libraries, +*** $file_magic_cmd, produces output that libtool cannot recognize. +*** The result is that libtool may fail to recognize shared libraries +*** as such. This will affect the creation of libtool libraries that +*** depend on shared libraries, but programs linked with such libtool +*** libraries will work regardless of this problem. Nevertheless, you +*** may want to report the problem to your system manager and/or to +*** bug-libtool@gnu.org + +EOF + fi ;; + esac + fi + break + fi + done + IFS="$lt_save_ifs" + MAGIC_CMD="$lt_save_MAGIC_CMD" + ;; +esac]) +MAGIC_CMD="$lt_cv_path_MAGIC_CMD" +if test -n "$MAGIC_CMD"; then + AC_MSG_RESULT($MAGIC_CMD) +else + AC_MSG_RESULT(no) +fi +])# AC_PATH_TOOL_PREFIX + + +# AC_PATH_MAGIC +# ------------- +# find a file program which can recognise a shared library +AC_DEFUN([AC_PATH_MAGIC], +[AC_PATH_TOOL_PREFIX(${ac_tool_prefix}file, /usr/bin$PATH_SEPARATOR$PATH) +if test -z "$lt_cv_path_MAGIC_CMD"; then + if test -n "$ac_tool_prefix"; then + AC_PATH_TOOL_PREFIX(file, /usr/bin$PATH_SEPARATOR$PATH) + else + MAGIC_CMD=: + fi +fi +])# AC_PATH_MAGIC + + +# AC_PROG_LD +# ---------- +# find the pathname to the GNU or non-GNU linker +AC_DEFUN([AC_PROG_LD], +[AC_ARG_WITH([gnu-ld], + [AC_HELP_STRING([--with-gnu-ld], + [assume the C compiler uses GNU ld @<:@default=no@:>@])], + [test "$withval" = no || with_gnu_ld=yes], + [with_gnu_ld=no]) +AC_REQUIRE([LT_AC_PROG_SED])dnl +AC_REQUIRE([AC_PROG_CC])dnl +AC_REQUIRE([AC_CANONICAL_HOST])dnl +AC_REQUIRE([AC_CANONICAL_BUILD])dnl +ac_prog=ld +if test "$GCC" = yes; then + # Check if gcc -print-prog-name=ld gives a path. + AC_MSG_CHECKING([for ld used by $CC]) + case $host in + *-*-mingw*) + # gcc leaves a trailing carriage return which upsets mingw + ac_prog=`($CC -print-prog-name=ld) 2>&5 | tr -d '\015'` ;; + *) + ac_prog=`($CC -print-prog-name=ld) 2>&5` ;; + esac + case $ac_prog in + # Accept absolute paths. + [[\\/]]* | ?:[[\\/]]*) + re_direlt='/[[^/]][[^/]]*/\.\./' + # Canonicalize the pathname of ld + ac_prog=`echo $ac_prog| $SED 's%\\\\%/%g'` + while echo $ac_prog | grep "$re_direlt" > /dev/null 2>&1; do + ac_prog=`echo $ac_prog| $SED "s%$re_direlt%/%"` + done + test -z "$LD" && LD="$ac_prog" + ;; + "") + # If it fails, then pretend we aren't using GCC. + ac_prog=ld + ;; + *) + # If it is relative, then search for the first ld in PATH. + with_gnu_ld=unknown + ;; + esac +elif test "$with_gnu_ld" = yes; then + AC_MSG_CHECKING([for GNU ld]) +else + AC_MSG_CHECKING([for non-GNU ld]) +fi +AC_CACHE_VAL(lt_cv_path_LD, +[if test -z "$LD"; then + lt_save_ifs="$IFS"; IFS=$PATH_SEPARATOR + for ac_dir in $PATH; do + IFS="$lt_save_ifs" + test -z "$ac_dir" && ac_dir=. + if test -f "$ac_dir/$ac_prog" || test -f "$ac_dir/$ac_prog$ac_exeext"; then + lt_cv_path_LD="$ac_dir/$ac_prog" + # Check to see if the program is GNU ld. I'd rather use --version, + # but apparently some variants of GNU ld only accept -v. + # Break only if it was the GNU/non-GNU ld that we prefer. + case `"$lt_cv_path_LD" -v 2>&1 &1 /dev/null; then + case $host_cpu in + i*86 ) + # Not sure whether the presence of OpenBSD here was a mistake. + # Let's accept both of them until this is cleared up. + lt_cv_deplibs_check_method='file_magic (FreeBSD|OpenBSD|DragonFly)/i[[3-9]]86 (compact )?demand paged shared library' + lt_cv_file_magic_cmd=/usr/bin/file + lt_cv_file_magic_test_file=`echo /usr/lib/libc.so.*` + ;; + esac + else + lt_cv_deplibs_check_method=pass_all + fi + ;; + +gnu*) + lt_cv_deplibs_check_method=pass_all + ;; + +hpux10.20* | hpux11*) + lt_cv_file_magic_cmd=/usr/bin/file + case $host_cpu in + ia64*) + lt_cv_deplibs_check_method='file_magic (s[[0-9]][[0-9]][[0-9]]|ELF-[[0-9]][[0-9]]) shared object file - IA64' + lt_cv_file_magic_test_file=/usr/lib/hpux32/libc.so + ;; + hppa*64*) + [lt_cv_deplibs_check_method='file_magic (s[0-9][0-9][0-9]|ELF-[0-9][0-9]) shared object file - PA-RISC [0-9].[0-9]'] + lt_cv_file_magic_test_file=/usr/lib/pa20_64/libc.sl + ;; + *) + lt_cv_deplibs_check_method='file_magic (s[[0-9]][[0-9]][[0-9]]|PA-RISC[[0-9]].[[0-9]]) shared library' + lt_cv_file_magic_test_file=/usr/lib/libc.sl + ;; + esac + ;; + +interix3*) + # PIC code is broken on Interix 3.x, that's why |\.a not |_pic\.a here + lt_cv_deplibs_check_method='match_pattern /lib[[^/]]+(\.so|\.a)$' + ;; + +irix5* | irix6* | nonstopux*) + case $LD in + *-32|*"-32 ") libmagic=32-bit;; + *-n32|*"-n32 ") libmagic=N32;; + *-64|*"-64 ") libmagic=64-bit;; + *) libmagic=never-match;; + esac + lt_cv_deplibs_check_method=pass_all + ;; + +# This must be Linux ELF. +linux*) + lt_cv_deplibs_check_method=pass_all + ;; + +netbsd*) + if echo __ELF__ | $CC -E - | grep __ELF__ > /dev/null; then + lt_cv_deplibs_check_method='match_pattern /lib[[^/]]+(\.so\.[[0-9]]+\.[[0-9]]+|_pic\.a)$' + else + lt_cv_deplibs_check_method='match_pattern /lib[[^/]]+(\.so|_pic\.a)$' + fi + ;; + +newos6*) + lt_cv_deplibs_check_method='file_magic ELF [[0-9]][[0-9]]*-bit [[ML]]SB (executable|dynamic lib)' + lt_cv_file_magic_cmd=/usr/bin/file + lt_cv_file_magic_test_file=/usr/lib/libnls.so + ;; + +nto-qnx*) + lt_cv_deplibs_check_method=unknown + ;; + +openbsd*) + if test -z "`echo __ELF__ | $CC -E - | grep __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then + lt_cv_deplibs_check_method='match_pattern /lib[[^/]]+(\.so\.[[0-9]]+\.[[0-9]]+|\.so|_pic\.a)$' + else + lt_cv_deplibs_check_method='match_pattern /lib[[^/]]+(\.so\.[[0-9]]+\.[[0-9]]+|_pic\.a)$' + fi + ;; + +osf3* | osf4* | osf5*) + lt_cv_deplibs_check_method=pass_all + ;; + +solaris*) + lt_cv_deplibs_check_method=pass_all + ;; + +sysv4 | sysv4.3*) + case $host_vendor in + motorola) + lt_cv_deplibs_check_method='file_magic ELF [[0-9]][[0-9]]*-bit [[ML]]SB (shared object|dynamic lib) M[[0-9]][[0-9]]* Version [[0-9]]' + lt_cv_file_magic_test_file=`echo /usr/lib/libc.so*` + ;; + ncr) + lt_cv_deplibs_check_method=pass_all + ;; + sequent) + lt_cv_file_magic_cmd='/bin/file' + lt_cv_deplibs_check_method='file_magic ELF [[0-9]][[0-9]]*-bit [[LM]]SB (shared object|dynamic lib )' + ;; + sni) + lt_cv_file_magic_cmd='/bin/file' + lt_cv_deplibs_check_method="file_magic ELF [[0-9]][[0-9]]*-bit [[LM]]SB dynamic lib" + lt_cv_file_magic_test_file=/lib/libc.so + ;; + siemens) + lt_cv_deplibs_check_method=pass_all + ;; + pc) + lt_cv_deplibs_check_method=pass_all + ;; + esac + ;; + +sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX* | sysv4*uw2*) + lt_cv_deplibs_check_method=pass_all + ;; +esac +]) +file_magic_cmd=$lt_cv_file_magic_cmd +deplibs_check_method=$lt_cv_deplibs_check_method +test -z "$deplibs_check_method" && deplibs_check_method=unknown +])# AC_DEPLIBS_CHECK_METHOD + + +# AC_PROG_NM +# ---------- +# find the pathname to a BSD-compatible name lister +AC_DEFUN([AC_PROG_NM], +[AC_CACHE_CHECK([for BSD-compatible nm], lt_cv_path_NM, +[if test -n "$NM"; then + # Let the user override the test. + lt_cv_path_NM="$NM" +else + lt_nm_to_check="${ac_tool_prefix}nm" + if test -n "$ac_tool_prefix" && test "$build" = "$host"; then + lt_nm_to_check="$lt_nm_to_check nm" + fi + for lt_tmp_nm in $lt_nm_to_check; do + lt_save_ifs="$IFS"; IFS=$PATH_SEPARATOR + for ac_dir in $PATH /usr/ccs/bin/elf /usr/ccs/bin /usr/ucb /bin; do + IFS="$lt_save_ifs" + test -z "$ac_dir" && ac_dir=. + tmp_nm="$ac_dir/$lt_tmp_nm" + if test -f "$tmp_nm" || test -f "$tmp_nm$ac_exeext" ; then + # Check to see if the nm accepts a BSD-compat flag. + # Adding the `sed 1q' prevents false positives on HP-UX, which says: + # nm: unknown option "B" ignored + # Tru64's nm complains that /dev/null is an invalid object file + case `"$tmp_nm" -B /dev/null 2>&1 | sed '1q'` in + */dev/null* | *'Invalid file or object type'*) + lt_cv_path_NM="$tmp_nm -B" + break + ;; + *) + case `"$tmp_nm" -p /dev/null 2>&1 | sed '1q'` in + */dev/null*) + lt_cv_path_NM="$tmp_nm -p" + break + ;; + *) + lt_cv_path_NM=${lt_cv_path_NM="$tmp_nm"} # keep the first match, but + continue # so that we can try to find one that supports BSD flags + ;; + esac + ;; + esac + fi + done + IFS="$lt_save_ifs" + done + test -z "$lt_cv_path_NM" && lt_cv_path_NM=nm +fi]) +NM="$lt_cv_path_NM" +])# AC_PROG_NM + + +# AC_CHECK_LIBM +# ------------- +# check for math library +AC_DEFUN([AC_CHECK_LIBM], +[AC_REQUIRE([AC_CANONICAL_HOST])dnl +LIBM= +case $host in +*-*-beos* | *-*-cygwin* | *-*-pw32* | *-*-darwin*) + # These system don't have libm, or don't need it + ;; +*-ncr-sysv4.3*) + AC_CHECK_LIB(mw, _mwvalidcheckl, LIBM="-lmw") + AC_CHECK_LIB(m, cos, LIBM="$LIBM -lm") + ;; +*) + AC_CHECK_LIB(m, cos, LIBM="-lm") + ;; +esac +])# AC_CHECK_LIBM + + +# AC_LIBLTDL_CONVENIENCE([DIRECTORY]) +# ----------------------------------- +# sets LIBLTDL to the link flags for the libltdl convenience library and +# LTDLINCL to the include flags for the libltdl header and adds +# --enable-ltdl-convenience to the configure arguments. Note that +# AC_CONFIG_SUBDIRS is not called here. If DIRECTORY is not provided, +# it is assumed to be `libltdl'. LIBLTDL will be prefixed with +# '${top_builddir}/' and LTDLINCL will be prefixed with '${top_srcdir}/' +# (note the single quotes!). If your package is not flat and you're not +# using automake, define top_builddir and top_srcdir appropriately in +# the Makefiles. +AC_DEFUN([AC_LIBLTDL_CONVENIENCE], +[AC_BEFORE([$0],[AC_LIBTOOL_SETUP])dnl + case $enable_ltdl_convenience in + no) AC_MSG_ERROR([this package needs a convenience libltdl]) ;; + "") enable_ltdl_convenience=yes + ac_configure_args="$ac_configure_args --enable-ltdl-convenience" ;; + esac + LIBLTDL='${top_builddir}/'ifelse($#,1,[$1],['libltdl'])/libltdlc.la + LTDLINCL='-I${top_srcdir}/'ifelse($#,1,[$1],['libltdl']) + # For backwards non-gettext consistent compatibility... + INCLTDL="$LTDLINCL" +])# AC_LIBLTDL_CONVENIENCE + + +# AC_LIBLTDL_INSTALLABLE([DIRECTORY]) +# ----------------------------------- +# sets LIBLTDL to the link flags for the libltdl installable library and +# LTDLINCL to the include flags for the libltdl header and adds +# --enable-ltdl-install to the configure arguments. Note that +# AC_CONFIG_SUBDIRS is not called here. If DIRECTORY is not provided, +# and an installed libltdl is not found, it is assumed to be `libltdl'. +# LIBLTDL will be prefixed with '${top_builddir}/'# and LTDLINCL with +# '${top_srcdir}/' (note the single quotes!). If your package is not +# flat and you're not using automake, define top_builddir and top_srcdir +# appropriately in the Makefiles. +# In the future, this macro may have to be called after AC_PROG_LIBTOOL. +AC_DEFUN([AC_LIBLTDL_INSTALLABLE], +[AC_BEFORE([$0],[AC_LIBTOOL_SETUP])dnl + AC_CHECK_LIB(ltdl, lt_dlinit, + [test x"$enable_ltdl_install" != xyes && enable_ltdl_install=no], + [if test x"$enable_ltdl_install" = xno; then + AC_MSG_WARN([libltdl not installed, but installation disabled]) + else + enable_ltdl_install=yes + fi + ]) + if test x"$enable_ltdl_install" = x"yes"; then + ac_configure_args="$ac_configure_args --enable-ltdl-install" + LIBLTDL='${top_builddir}/'ifelse($#,1,[$1],['libltdl'])/libltdl.la + LTDLINCL='-I${top_srcdir}/'ifelse($#,1,[$1],['libltdl']) + else + ac_configure_args="$ac_configure_args --enable-ltdl-install=no" + LIBLTDL="-lltdl" + LTDLINCL= + fi + # For backwards non-gettext consistent compatibility... + INCLTDL="$LTDLINCL" +])# AC_LIBLTDL_INSTALLABLE + + +# AC_LIBTOOL_CXX +# -------------- +# enable support for C++ libraries +AC_DEFUN([AC_LIBTOOL_CXX], +[AC_REQUIRE([_LT_AC_LANG_CXX]) +])# AC_LIBTOOL_CXX + + +# _LT_AC_LANG_CXX +# --------------- +AC_DEFUN([_LT_AC_LANG_CXX], +[AC_REQUIRE([AC_PROG_CXX]) +AC_REQUIRE([_LT_AC_PROG_CXXCPP]) +_LT_AC_SHELL_INIT([tagnames=${tagnames+${tagnames},}CXX]) +])# _LT_AC_LANG_CXX + +# _LT_AC_PROG_CXXCPP +# ------------------ +AC_DEFUN([_LT_AC_PROG_CXXCPP], +[ +AC_REQUIRE([AC_PROG_CXX]) +if test -n "$CXX" && ( test "X$CXX" != "Xno" && + ( (test "X$CXX" = "Xg++" && `g++ -v >/dev/null 2>&1` ) || + (test "X$CXX" != "Xg++"))) ; then + AC_PROG_CXXCPP +fi +])# _LT_AC_PROG_CXXCPP + +# AC_LIBTOOL_F77 +# -------------- +# enable support for Fortran 77 libraries +AC_DEFUN([AC_LIBTOOL_F77], +[AC_REQUIRE([_LT_AC_LANG_F77]) +])# AC_LIBTOOL_F77 + + +# _LT_AC_LANG_F77 +# --------------- +AC_DEFUN([_LT_AC_LANG_F77], +[AC_REQUIRE([AC_PROG_F77]) +_LT_AC_SHELL_INIT([tagnames=${tagnames+${tagnames},}F77]) +])# _LT_AC_LANG_F77 + + +# AC_LIBTOOL_GCJ +# -------------- +# enable support for GCJ libraries +AC_DEFUN([AC_LIBTOOL_GCJ], +[AC_REQUIRE([_LT_AC_LANG_GCJ]) +])# AC_LIBTOOL_GCJ + + +# _LT_AC_LANG_GCJ +# --------------- +AC_DEFUN([_LT_AC_LANG_GCJ], +[AC_PROVIDE_IFELSE([AC_PROG_GCJ],[], + [AC_PROVIDE_IFELSE([A][M_PROG_GCJ],[], + [AC_PROVIDE_IFELSE([LT_AC_PROG_GCJ],[], + [ifdef([AC_PROG_GCJ],[AC_REQUIRE([AC_PROG_GCJ])], + [ifdef([A][M_PROG_GCJ],[AC_REQUIRE([A][M_PROG_GCJ])], + [AC_REQUIRE([A][C_PROG_GCJ_OR_A][M_PROG_GCJ])])])])])]) +_LT_AC_SHELL_INIT([tagnames=${tagnames+${tagnames},}GCJ]) +])# _LT_AC_LANG_GCJ + + +# AC_LIBTOOL_RC +# ------------- +# enable support for Windows resource files +AC_DEFUN([AC_LIBTOOL_RC], +[AC_REQUIRE([LT_AC_PROG_RC]) +_LT_AC_SHELL_INIT([tagnames=${tagnames+${tagnames},}RC]) +])# AC_LIBTOOL_RC + + +# AC_LIBTOOL_LANG_C_CONFIG +# ------------------------ +# Ensure that the configuration vars for the C compiler are +# suitably defined. Those variables are subsequently used by +# AC_LIBTOOL_CONFIG to write the compiler configuration to `libtool'. +AC_DEFUN([AC_LIBTOOL_LANG_C_CONFIG], [_LT_AC_LANG_C_CONFIG]) +AC_DEFUN([_LT_AC_LANG_C_CONFIG], +[lt_save_CC="$CC" +AC_LANG_PUSH(C) + +# Source file extension for C test sources. +ac_ext=c + +# Object file extension for compiled C test sources. +objext=o +_LT_AC_TAGVAR(objext, $1)=$objext + +# Code to be used in simple compile tests +lt_simple_compile_test_code="int some_variable = 0;\n" + +# Code to be used in simple link tests +lt_simple_link_test_code='int main(){return(0);}\n' + +_LT_AC_SYS_COMPILER + +# save warnings/boilerplate of simple test code +_LT_COMPILER_BOILERPLATE +_LT_LINKER_BOILERPLATE + +AC_LIBTOOL_PROG_COMPILER_NO_RTTI($1) +AC_LIBTOOL_PROG_COMPILER_PIC($1) +AC_LIBTOOL_PROG_CC_C_O($1) +AC_LIBTOOL_SYS_HARD_LINK_LOCKS($1) +AC_LIBTOOL_PROG_LD_SHLIBS($1) +AC_LIBTOOL_SYS_DYNAMIC_LINKER($1) +AC_LIBTOOL_PROG_LD_HARDCODE_LIBPATH($1) +AC_LIBTOOL_SYS_LIB_STRIP +AC_LIBTOOL_DLOPEN_SELF + +# Report which library types will actually be built +AC_MSG_CHECKING([if libtool supports shared libraries]) +AC_MSG_RESULT([$can_build_shared]) + +AC_MSG_CHECKING([whether to build shared libraries]) +test "$can_build_shared" = "no" && enable_shared=no + +# On AIX, shared libraries and static libraries use the same namespace, and +# are all built from PIC. +case $host_os in +aix3*) + test "$enable_shared" = yes && enable_static=no + if test -n "$RANLIB"; then + archive_cmds="$archive_cmds~\$RANLIB \$lib" + postinstall_cmds='$RANLIB $lib' + fi + ;; + +aix4* | aix5*) + if test "$host_cpu" != ia64 && test "$aix_use_runtimelinking" = no ; then + test "$enable_shared" = yes && enable_static=no + fi + ;; +esac +AC_MSG_RESULT([$enable_shared]) + +AC_MSG_CHECKING([whether to build static libraries]) +# Make sure either enable_shared or enable_static is yes. +test "$enable_shared" = yes || enable_static=yes +AC_MSG_RESULT([$enable_static]) + +AC_LIBTOOL_CONFIG($1) + +AC_LANG_POP +CC="$lt_save_CC" +])# AC_LIBTOOL_LANG_C_CONFIG + + +# AC_LIBTOOL_LANG_CXX_CONFIG +# -------------------------- +# Ensure that the configuration vars for the C compiler are +# suitably defined. Those variables are subsequently used by +# AC_LIBTOOL_CONFIG to write the compiler configuration to `libtool'. +AC_DEFUN([AC_LIBTOOL_LANG_CXX_CONFIG], [_LT_AC_LANG_CXX_CONFIG(CXX)]) +AC_DEFUN([_LT_AC_LANG_CXX_CONFIG], +[AC_LANG_PUSH(C++) +AC_REQUIRE([AC_PROG_CXX]) +AC_REQUIRE([_LT_AC_PROG_CXXCPP]) + +_LT_AC_TAGVAR(archive_cmds_need_lc, $1)=no +_LT_AC_TAGVAR(allow_undefined_flag, $1)= +_LT_AC_TAGVAR(always_export_symbols, $1)=no +_LT_AC_TAGVAR(archive_expsym_cmds, $1)= +_LT_AC_TAGVAR(export_dynamic_flag_spec, $1)= +_LT_AC_TAGVAR(hardcode_direct, $1)=no +_LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)= +_LT_AC_TAGVAR(hardcode_libdir_flag_spec_ld, $1)= +_LT_AC_TAGVAR(hardcode_libdir_separator, $1)= +_LT_AC_TAGVAR(hardcode_minus_L, $1)=no +_LT_AC_TAGVAR(hardcode_shlibpath_var, $1)=unsupported +_LT_AC_TAGVAR(hardcode_automatic, $1)=no +_LT_AC_TAGVAR(module_cmds, $1)= +_LT_AC_TAGVAR(module_expsym_cmds, $1)= +_LT_AC_TAGVAR(link_all_deplibs, $1)=unknown +_LT_AC_TAGVAR(old_archive_cmds, $1)=$old_archive_cmds +_LT_AC_TAGVAR(no_undefined_flag, $1)= +_LT_AC_TAGVAR(whole_archive_flag_spec, $1)= +_LT_AC_TAGVAR(enable_shared_with_static_runtimes, $1)=no + +# Dependencies to place before and after the object being linked: +_LT_AC_TAGVAR(predep_objects, $1)= +_LT_AC_TAGVAR(postdep_objects, $1)= +_LT_AC_TAGVAR(predeps, $1)= +_LT_AC_TAGVAR(postdeps, $1)= +_LT_AC_TAGVAR(compiler_lib_search_path, $1)= + +# Source file extension for C++ test sources. +ac_ext=cpp + +# Object file extension for compiled C++ test sources. +objext=o +_LT_AC_TAGVAR(objext, $1)=$objext + +# Code to be used in simple compile tests +lt_simple_compile_test_code="int some_variable = 0;\n" + +# Code to be used in simple link tests +lt_simple_link_test_code='int main(int, char *[[]]) { return(0); }\n' + +# ltmain only uses $CC for tagged configurations so make sure $CC is set. +_LT_AC_SYS_COMPILER + +# save warnings/boilerplate of simple test code +_LT_COMPILER_BOILERPLATE +_LT_LINKER_BOILERPLATE + +# Allow CC to be a program name with arguments. +lt_save_CC=$CC +lt_save_LD=$LD +lt_save_GCC=$GCC +GCC=$GXX +lt_save_with_gnu_ld=$with_gnu_ld +lt_save_path_LD=$lt_cv_path_LD +if test -n "${lt_cv_prog_gnu_ldcxx+set}"; then + lt_cv_prog_gnu_ld=$lt_cv_prog_gnu_ldcxx +else + $as_unset lt_cv_prog_gnu_ld +fi +if test -n "${lt_cv_path_LDCXX+set}"; then + lt_cv_path_LD=$lt_cv_path_LDCXX +else + $as_unset lt_cv_path_LD +fi +test -z "${LDCXX+set}" || LD=$LDCXX +CC=${CXX-"c++"} +compiler=$CC +_LT_AC_TAGVAR(compiler, $1)=$CC +_LT_CC_BASENAME([$compiler]) + +# We don't want -fno-exception wen compiling C++ code, so set the +# no_builtin_flag separately +if test "$GXX" = yes; then + _LT_AC_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)=' -fno-builtin' +else + _LT_AC_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)= +fi + +if test "$GXX" = yes; then + # Set up default GNU C++ configuration + + AC_PROG_LD + + # Check if GNU C++ uses GNU ld as the underlying linker, since the + # archiving commands below assume that GNU ld is being used. + if test "$with_gnu_ld" = yes; then + _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname -o $lib' + _LT_AC_TAGVAR(archive_expsym_cmds, $1)='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' + + _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}--rpath ${wl}$libdir' + _LT_AC_TAGVAR(export_dynamic_flag_spec, $1)='${wl}--export-dynamic' + + # If archive_cmds runs LD, not CC, wlarc should be empty + # XXX I think wlarc can be eliminated in ltcf-cxx, but I need to + # investigate it a little bit more. (MM) + wlarc='${wl}' + + # ancient GNU ld didn't support --whole-archive et. al. + if eval "`$CC -print-prog-name=ld` --help 2>&1" | \ + grep 'no-whole-archive' > /dev/null; then + _LT_AC_TAGVAR(whole_archive_flag_spec, $1)="$wlarc"'--whole-archive$convenience '"$wlarc"'--no-whole-archive' + else + _LT_AC_TAGVAR(whole_archive_flag_spec, $1)= + fi + else + with_gnu_ld=no + wlarc= + + # A generic and very simple default shared library creation + # command for GNU C++ for the case where it uses the native + # linker, instead of GNU ld. If possible, this setting should + # overridden to take advantage of the native linker features on + # the platform it is being used on. + _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $lib' + fi + + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. + output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | grep "\-L"' + +else + GXX=no + with_gnu_ld=no + wlarc= +fi + +# PORTME: fill in a description of your system's C++ link characteristics +AC_MSG_CHECKING([whether the $compiler linker ($LD) supports shared libraries]) +_LT_AC_TAGVAR(ld_shlibs, $1)=yes +case $host_os in + aix3*) + # FIXME: insert proper C++ library support + _LT_AC_TAGVAR(ld_shlibs, $1)=no + ;; + aix4* | aix5*) + if test "$host_cpu" = ia64; then + # On IA64, the linker does run time linking by default, so we don't + # have to do anything special. + aix_use_runtimelinking=no + exp_sym_flag='-Bexport' + no_entry_flag="" + else + aix_use_runtimelinking=no + + # Test if we are trying to use run time linking or normal + # AIX style linking. If -brtl is somewhere in LDFLAGS, we + # need to do runtime linking. + case $host_os in aix4.[[23]]|aix4.[[23]].*|aix5*) + for ld_flag in $LDFLAGS; do + case $ld_flag in + *-brtl*) + aix_use_runtimelinking=yes + break + ;; + esac + done + ;; + esac + + exp_sym_flag='-bexport' + no_entry_flag='-bnoentry' + fi + + # When large executables or shared objects are built, AIX ld can + # have problems creating the table of contents. If linking a library + # or program results in "error TOC overflow" add -mminimal-toc to + # CXXFLAGS/CFLAGS for g++/gcc. In the cases where that is not + # enough to fix the problem, add -Wl,-bbigtoc to LDFLAGS. + + _LT_AC_TAGVAR(archive_cmds, $1)='' + _LT_AC_TAGVAR(hardcode_direct, $1)=yes + _LT_AC_TAGVAR(hardcode_libdir_separator, $1)=':' + _LT_AC_TAGVAR(link_all_deplibs, $1)=yes + + if test "$GXX" = yes; then + case $host_os in aix4.[[012]]|aix4.[[012]].*) + # We only want to do this on AIX 4.2 and lower, the check + # below for broken collect2 doesn't work under 4.3+ + collect2name=`${CC} -print-prog-name=collect2` + if test -f "$collect2name" && \ + strings "$collect2name" | grep resolve_lib_name >/dev/null + then + # We have reworked collect2 + _LT_AC_TAGVAR(hardcode_direct, $1)=yes + else + # We have old collect2 + _LT_AC_TAGVAR(hardcode_direct, $1)=unsupported + # It fails to find uninstalled libraries when the uninstalled + # path is not listed in the libpath. Setting hardcode_minus_L + # to unsupported forces relinking + _LT_AC_TAGVAR(hardcode_minus_L, $1)=yes + _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' + _LT_AC_TAGVAR(hardcode_libdir_separator, $1)= + fi + ;; + esac + shared_flag='-shared' + if test "$aix_use_runtimelinking" = yes; then + shared_flag="$shared_flag "'${wl}-G' + fi + else + # not using gcc + if test "$host_cpu" = ia64; then + # VisualAge C++, Version 5.5 for AIX 5L for IA-64, Beta 3 Release + # chokes on -Wl,-G. The following line is correct: + shared_flag='-G' + else + if test "$aix_use_runtimelinking" = yes; then + shared_flag='${wl}-G' + else + shared_flag='${wl}-bM:SRE' + fi + fi + fi + + # It seems that -bexpall does not export symbols beginning with + # underscore (_), so it is better to generate a list of symbols to export. + _LT_AC_TAGVAR(always_export_symbols, $1)=yes + if test "$aix_use_runtimelinking" = yes; then + # Warning - without using the other runtime loading flags (-brtl), + # -berok will link without error, but may produce a broken library. + _LT_AC_TAGVAR(allow_undefined_flag, $1)='-berok' + # Determine the default libpath from the value encoded in an empty executable. + _LT_AC_SYS_LIBPATH_AIX + _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-blibpath:$libdir:'"$aix_libpath" + + _LT_AC_TAGVAR(archive_expsym_cmds, $1)="\$CC"' -o $output_objdir/$soname $libobjs $deplibs '"\${wl}$no_entry_flag"' $compiler_flags `if test "x${allow_undefined_flag}" != "x"; then echo "${wl}${allow_undefined_flag}"; else :; fi` '"\${wl}$exp_sym_flag:\$export_symbols $shared_flag" + else + if test "$host_cpu" = ia64; then + _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-R $libdir:/usr/lib:/lib' + _LT_AC_TAGVAR(allow_undefined_flag, $1)="-z nodefs" + _LT_AC_TAGVAR(archive_expsym_cmds, $1)="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs '"\${wl}$no_entry_flag"' $compiler_flags ${wl}${allow_undefined_flag} '"\${wl}$exp_sym_flag:\$export_symbols" + else + # Determine the default libpath from the value encoded in an empty executable. + _LT_AC_SYS_LIBPATH_AIX + _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-blibpath:$libdir:'"$aix_libpath" + # Warning - without using the other run time loading flags, + # -berok will link without error, but may produce a broken library. + _LT_AC_TAGVAR(no_undefined_flag, $1)=' ${wl}-bernotok' + _LT_AC_TAGVAR(allow_undefined_flag, $1)=' ${wl}-berok' + # Exported symbols can be pulled into shared objects from archives + _LT_AC_TAGVAR(whole_archive_flag_spec, $1)='$convenience' + _LT_AC_TAGVAR(archive_cmds_need_lc, $1)=yes + # This is similar to how AIX traditionally builds its shared libraries. + _LT_AC_TAGVAR(archive_expsym_cmds, $1)="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs ${wl}-bnoentry $compiler_flags ${wl}-bE:$export_symbols${allow_undefined_flag}~$AR $AR_FLAGS $output_objdir/$libname$release.a $output_objdir/$soname' + fi + fi + ;; + + beos*) + if $LD --help 2>&1 | grep ': supported targets:.* elf' > /dev/null; then + _LT_AC_TAGVAR(allow_undefined_flag, $1)=unsupported + # Joseph Beckenbach says some releases of gcc + # support --undefined. This deserves some investigation. FIXME + _LT_AC_TAGVAR(archive_cmds, $1)='$CC -nostart $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + else + _LT_AC_TAGVAR(ld_shlibs, $1)=no + fi + ;; + + chorus*) + case $cc_basename in + *) + # FIXME: insert proper C++ library support + _LT_AC_TAGVAR(ld_shlibs, $1)=no + ;; + esac + ;; + + cygwin* | mingw* | pw32*) + # _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1) is actually meaningless, + # as there is no search path for DLLs. + _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' + _LT_AC_TAGVAR(allow_undefined_flag, $1)=unsupported + _LT_AC_TAGVAR(always_export_symbols, $1)=no + _LT_AC_TAGVAR(enable_shared_with_static_runtimes, $1)=yes + + if $LD --help 2>&1 | grep 'auto-import' > /dev/null; then + _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $output_objdir/$soname ${wl}--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' + # If the export-symbols file already is a .def file (1st line + # is EXPORTS), use it as is; otherwise, prepend... + _LT_AC_TAGVAR(archive_expsym_cmds, $1)='if test "x`$SED 1q $export_symbols`" = xEXPORTS; then + cp $export_symbols $output_objdir/$soname.def; + else + echo EXPORTS > $output_objdir/$soname.def; + cat $export_symbols >> $output_objdir/$soname.def; + fi~ + $CC -shared -nostdlib $output_objdir/$soname.def $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $output_objdir/$soname ${wl}--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' + else + _LT_AC_TAGVAR(ld_shlibs, $1)=no + fi + ;; + darwin* | rhapsody*) + case $host_os in + rhapsody* | darwin1.[[012]]) + _LT_AC_TAGVAR(allow_undefined_flag, $1)='${wl}-undefined ${wl}suppress' + ;; + *) # Darwin 1.3 on + if test -z ${MACOSX_DEPLOYMENT_TARGET} ; then + _LT_AC_TAGVAR(allow_undefined_flag, $1)='${wl}-flat_namespace ${wl}-undefined ${wl}suppress' + else + case ${MACOSX_DEPLOYMENT_TARGET} in + 10.[[012]]) + _LT_AC_TAGVAR(allow_undefined_flag, $1)='${wl}-flat_namespace ${wl}-undefined ${wl}suppress' + ;; + 10.*) + _LT_AC_TAGVAR(allow_undefined_flag, $1)='${wl}-undefined ${wl}dynamic_lookup' + ;; + esac + fi + ;; + esac + _LT_AC_TAGVAR(archive_cmds_need_lc, $1)=no + _LT_AC_TAGVAR(hardcode_direct, $1)=no + _LT_AC_TAGVAR(hardcode_automatic, $1)=yes + _LT_AC_TAGVAR(hardcode_shlibpath_var, $1)=unsupported + _LT_AC_TAGVAR(whole_archive_flag_spec, $1)='' + _LT_AC_TAGVAR(link_all_deplibs, $1)=yes + + if test "$GXX" = yes ; then + lt_int_apple_cc_single_mod=no + output_verbose_link_cmd='echo' + if $CC -dumpspecs 2>&1 | $EGREP 'single_module' >/dev/null ; then + lt_int_apple_cc_single_mod=yes + fi + if test "X$lt_int_apple_cc_single_mod" = Xyes ; then + _LT_AC_TAGVAR(archive_cmds, $1)='$CC -dynamiclib -single_module $allow_undefined_flag -o $lib $libobjs $deplibs $compiler_flags -install_name $rpath/$soname $verstring' + else + _LT_AC_TAGVAR(archive_cmds, $1)='$CC -r -keep_private_externs -nostdlib -o ${lib}-master.o $libobjs~$CC -dynamiclib $allow_undefined_flag -o $lib ${lib}-master.o $deplibs $compiler_flags -install_name $rpath/$soname $verstring' + fi + _LT_AC_TAGVAR(module_cmds, $1)='$CC $allow_undefined_flag -o $lib -bundle $libobjs $deplibs$compiler_flags' + # Don't fix this by using the ld -exported_symbols_list flag, it doesn't exist in older darwin lds + if test "X$lt_int_apple_cc_single_mod" = Xyes ; then + _LT_AC_TAGVAR(archive_expsym_cmds, $1)='sed -e "s,#.*,," -e "s,^[ ]*,," -e "s,^\(..*\),_&," < $export_symbols > $output_objdir/${libname}-symbols.expsym~$CC -dynamiclib -single_module $allow_undefined_flag -o $lib $libobjs $deplibs $compiler_flags -install_name $rpath/$soname $verstring~nmedit -s $output_objdir/${libname}-symbols.expsym ${lib}' + else + _LT_AC_TAGVAR(archive_expsym_cmds, $1)='sed -e "s,#.*,," -e "s,^[ ]*,," -e "s,^\(..*\),_&," < $export_symbols > $output_objdir/${libname}-symbols.expsym~$CC -r -keep_private_externs -nostdlib -o ${lib}-master.o $libobjs~$CC -dynamiclib $allow_undefined_flag -o $lib ${lib}-master.o $deplibs $compiler_flags -install_name $rpath/$soname $verstring~nmedit -s $output_objdir/${libname}-symbols.expsym ${lib}' + fi + _LT_AC_TAGVAR(module_expsym_cmds, $1)='sed -e "s,#.*,," -e "s,^[ ]*,," -e "s,^\(..*\),_&," < $export_symbols > $output_objdir/${libname}-symbols.expsym~$CC $allow_undefined_flag -o $lib -bundle $libobjs $deplibs$compiler_flags~nmedit -s $output_objdir/${libname}-symbols.expsym ${lib}' + else + case $cc_basename in + xlc*) + output_verbose_link_cmd='echo' + _LT_AC_TAGVAR(archive_cmds, $1)='$CC -qmkshrobj ${wl}-single_module $allow_undefined_flag -o $lib $libobjs $deplibs $compiler_flags ${wl}-install_name ${wl}`echo $rpath/$soname` $verstring' + _LT_AC_TAGVAR(module_cmds, $1)='$CC $allow_undefined_flag -o $lib -bundle $libobjs $deplibs$compiler_flags' + # Don't fix this by using the ld -exported_symbols_list flag, it doesn't exist in older darwin lds + _LT_AC_TAGVAR(archive_expsym_cmds, $1)='sed -e "s,#.*,," -e "s,^[ ]*,," -e "s,^\(..*\),_&," < $export_symbols > $output_objdir/${libname}-symbols.expsym~$CC -qmkshrobj ${wl}-single_module $allow_undefined_flag -o $lib $libobjs $deplibs $compiler_flags ${wl}-install_name ${wl}$rpath/$soname $verstring~nmedit -s $output_objdir/${libname}-symbols.expsym ${lib}' + _LT_AC_TAGVAR(module_expsym_cmds, $1)='sed -e "s,#.*,," -e "s,^[ ]*,," -e "s,^\(..*\),_&," < $export_symbols > $output_objdir/${libname}-symbols.expsym~$CC $allow_undefined_flag -o $lib -bundle $libobjs $deplibs$compiler_flags~nmedit -s $output_objdir/${libname}-symbols.expsym ${lib}' + ;; + *) + _LT_AC_TAGVAR(ld_shlibs, $1)=no + ;; + esac + fi + ;; + + dgux*) + case $cc_basename in + ec++*) + # FIXME: insert proper C++ library support + _LT_AC_TAGVAR(ld_shlibs, $1)=no + ;; + ghcx*) + # Green Hills C++ Compiler + # FIXME: insert proper C++ library support + _LT_AC_TAGVAR(ld_shlibs, $1)=no + ;; + *) + # FIXME: insert proper C++ library support + _LT_AC_TAGVAR(ld_shlibs, $1)=no + ;; + esac + ;; + freebsd[[12]]*) + # C++ shared libraries reported to be fairly broken before switch to ELF + _LT_AC_TAGVAR(ld_shlibs, $1)=no + ;; + freebsd-elf*) + _LT_AC_TAGVAR(archive_cmds_need_lc, $1)=no + ;; + freebsd* | kfreebsd*-gnu | dragonfly*) + # FreeBSD 3 and later use GNU C++ and GNU ld with standard ELF + # conventions + _LT_AC_TAGVAR(ld_shlibs, $1)=yes + ;; + gnu*) + ;; + hpux9*) + _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}+b ${wl}$libdir' + _LT_AC_TAGVAR(hardcode_libdir_separator, $1)=: + _LT_AC_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-E' + _LT_AC_TAGVAR(hardcode_direct, $1)=yes + _LT_AC_TAGVAR(hardcode_minus_L, $1)=yes # Not in the search PATH, + # but as the default + # location of the library. + + case $cc_basename in + CC*) + # FIXME: insert proper C++ library support + _LT_AC_TAGVAR(ld_shlibs, $1)=no + ;; + aCC*) + _LT_AC_TAGVAR(archive_cmds, $1)='$rm $output_objdir/$soname~$CC -b ${wl}+b ${wl}$install_libdir -o $output_objdir/$soname $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. + # + # There doesn't appear to be a way to prevent this compiler from + # explicitly linking system object files so we need to strip them + # from the output so that they don't get included in the library + # dependencies. + output_verbose_link_cmd='templist=`($CC -b $CFLAGS -v conftest.$objext 2>&1) | grep "[[-]]L"`; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; echo $list' + ;; + *) + if test "$GXX" = yes; then + _LT_AC_TAGVAR(archive_cmds, $1)='$rm $output_objdir/$soname~$CC -shared -nostdlib -fPIC ${wl}+b ${wl}$install_libdir -o $output_objdir/$soname $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' + else + # FIXME: insert proper C++ library support + _LT_AC_TAGVAR(ld_shlibs, $1)=no + fi + ;; + esac + ;; + hpux10*|hpux11*) + if test $with_gnu_ld = no; then + _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}+b ${wl}$libdir' + _LT_AC_TAGVAR(hardcode_libdir_separator, $1)=: + + case $host_cpu in + hppa*64*|ia64*) + _LT_AC_TAGVAR(hardcode_libdir_flag_spec_ld, $1)='+b $libdir' + ;; + *) + _LT_AC_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-E' + ;; + esac + fi + case $host_cpu in + hppa*64*|ia64*) + _LT_AC_TAGVAR(hardcode_direct, $1)=no + _LT_AC_TAGVAR(hardcode_shlibpath_var, $1)=no + ;; + *) + _LT_AC_TAGVAR(hardcode_direct, $1)=yes + _LT_AC_TAGVAR(hardcode_minus_L, $1)=yes # Not in the search PATH, + # but as the default + # location of the library. + ;; + esac + + case $cc_basename in + CC*) + # FIXME: insert proper C++ library support + _LT_AC_TAGVAR(ld_shlibs, $1)=no + ;; + aCC*) + case $host_cpu in + hppa*64*) + _LT_AC_TAGVAR(archive_cmds, $1)='$CC -b ${wl}+h ${wl}$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' + ;; + ia64*) + _LT_AC_TAGVAR(archive_cmds, $1)='$CC -b ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' + ;; + *) + _LT_AC_TAGVAR(archive_cmds, $1)='$CC -b ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' + ;; + esac + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. + # + # There doesn't appear to be a way to prevent this compiler from + # explicitly linking system object files so we need to strip them + # from the output so that they don't get included in the library + # dependencies. + output_verbose_link_cmd='templist=`($CC -b $CFLAGS -v conftest.$objext 2>&1) | grep "\-L"`; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; echo $list' + ;; + *) + if test "$GXX" = yes; then + if test $with_gnu_ld = no; then + case $host_cpu in + hppa*64*) + _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib -fPIC ${wl}+h ${wl}$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' + ;; + ia64*) + _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib -fPIC ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' + ;; + *) + _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib -fPIC ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' + ;; + esac + fi + else + # FIXME: insert proper C++ library support + _LT_AC_TAGVAR(ld_shlibs, $1)=no + fi + ;; + esac + ;; + interix3*) + _LT_AC_TAGVAR(hardcode_direct, $1)=no + _LT_AC_TAGVAR(hardcode_shlibpath_var, $1)=no + _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath,$libdir' + _LT_AC_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-E' + # Hack: On Interix 3.x, we cannot compile PIC because of a broken gcc. + # Instead, shared libraries are loaded at an image base (0x10000000 by + # default) and relocated if they conflict, which is a slow very memory + # consuming and fragmenting process. To avoid this, we pick a random, + # 256 KiB-aligned image base between 0x50000000 and 0x6FFC0000 at link + # time. Moving up from 0x10000000 also allows more sbrk(2) space. + _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-h,$soname ${wl}--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib' + _LT_AC_TAGVAR(archive_expsym_cmds, $1)='sed "s,^,_," $export_symbols >$output_objdir/$soname.expsym~$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-h,$soname ${wl}--retain-symbols-file,$output_objdir/$soname.expsym ${wl}--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib' + ;; + irix5* | irix6*) + case $cc_basename in + CC*) + # SGI C++ + _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared -all -multigot $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -soname $soname `test -n "$verstring" && echo -set_version $verstring` -update_registry ${output_objdir}/so_locations -o $lib' + + # Archives containing C++ object files must be created using + # "CC -ar", where "CC" is the IRIX C++ compiler. This is + # necessary to make sure instantiated templates are included + # in the archive. + _LT_AC_TAGVAR(old_archive_cmds, $1)='$CC -ar -WR,-u -o $oldlib $oldobjs' + ;; + *) + if test "$GXX" = yes; then + if test "$with_gnu_ld" = no; then + _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && echo ${wl}-set_version ${wl}$verstring` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' + else + _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && echo ${wl}-set_version ${wl}$verstring` -o $lib' + fi + fi + _LT_AC_TAGVAR(link_all_deplibs, $1)=yes + ;; + esac + _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir' + _LT_AC_TAGVAR(hardcode_libdir_separator, $1)=: + ;; + linux*) + case $cc_basename in + KCC*) + # Kuck and Associates, Inc. (KAI) C++ Compiler + + # KCC will only create a shared library if the output file + # ends with ".so" (or ".sl" for HP-UX), so rename the library + # to its proper name (with version) after linking. + _LT_AC_TAGVAR(archive_cmds, $1)='tempext=`echo $shared_ext | $SED -e '\''s/\([[^()0-9A-Za-z{}]]\)/\\\\\1/g'\''`; templib=`echo $lib | $SED -e "s/\${tempext}\..*/.so/"`; $CC $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags --soname $soname -o \$templib; mv \$templib $lib' + _LT_AC_TAGVAR(archive_expsym_cmds, $1)='tempext=`echo $shared_ext | $SED -e '\''s/\([[^()0-9A-Za-z{}]]\)/\\\\\1/g'\''`; templib=`echo $lib | $SED -e "s/\${tempext}\..*/.so/"`; $CC $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags --soname $soname -o \$templib ${wl}-retain-symbols-file,$export_symbols; mv \$templib $lib' + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. + # + # There doesn't appear to be a way to prevent this compiler from + # explicitly linking system object files so we need to strip them + # from the output so that they don't get included in the library + # dependencies. + output_verbose_link_cmd='templist=`$CC $CFLAGS -v conftest.$objext -o libconftest$shared_ext 2>&1 | grep "ld"`; rm -f libconftest$shared_ext; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; echo $list' + + _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}--rpath,$libdir' + _LT_AC_TAGVAR(export_dynamic_flag_spec, $1)='${wl}--export-dynamic' + + # Archives containing C++ object files must be created using + # "CC -Bstatic", where "CC" is the KAI C++ compiler. + _LT_AC_TAGVAR(old_archive_cmds, $1)='$CC -Bstatic -o $oldlib $oldobjs' + ;; + icpc*) + # Intel C++ + with_gnu_ld=yes + # version 8.0 and above of icpc choke on multiply defined symbols + # if we add $predep_objects and $postdep_objects, however 7.1 and + # earlier do not add the objects themselves. + case `$CC -V 2>&1` in + *"Version 7."*) + _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname -o $lib' + _LT_AC_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' + ;; + *) # Version 8.0 or newer + tmp_idyn= + case $host_cpu in + ia64*) tmp_idyn=' -i_dynamic';; + esac + _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared'"$tmp_idyn"' $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + _LT_AC_TAGVAR(archive_expsym_cmds, $1)='$CC -shared'"$tmp_idyn"' $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' + ;; + esac + _LT_AC_TAGVAR(archive_cmds_need_lc, $1)=no + _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath,$libdir' + _LT_AC_TAGVAR(export_dynamic_flag_spec, $1)='${wl}--export-dynamic' + _LT_AC_TAGVAR(whole_archive_flag_spec, $1)='${wl}--whole-archive$convenience ${wl}--no-whole-archive' + ;; + pgCC*) + # Portland Group C++ compiler + _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname -o $lib' + _LT_AC_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname ${wl}-retain-symbols-file ${wl}$export_symbols -o $lib' + + _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}--rpath ${wl}$libdir' + _LT_AC_TAGVAR(export_dynamic_flag_spec, $1)='${wl}--export-dynamic' + _LT_AC_TAGVAR(whole_archive_flag_spec, $1)='${wl}--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; $echo \"$new_convenience\"` ${wl}--no-whole-archive' + ;; + cxx*) + # Compaq C++ + _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname -o $lib' + _LT_AC_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname -o $lib ${wl}-retain-symbols-file $wl$export_symbols' + + runpath_var=LD_RUN_PATH + _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='-rpath $libdir' + _LT_AC_TAGVAR(hardcode_libdir_separator, $1)=: + + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. + # + # There doesn't appear to be a way to prevent this compiler from + # explicitly linking system object files so we need to strip them + # from the output so that they don't get included in the library + # dependencies. + output_verbose_link_cmd='templist=`$CC -shared $CFLAGS -v conftest.$objext 2>&1 | grep "ld"`; templist=`echo $templist | $SED "s/\(^.*ld.*\)\( .*ld .*$\)/\1/"`; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; echo $list' + ;; + esac + ;; + lynxos*) + # FIXME: insert proper C++ library support + _LT_AC_TAGVAR(ld_shlibs, $1)=no + ;; + m88k*) + # FIXME: insert proper C++ library support + _LT_AC_TAGVAR(ld_shlibs, $1)=no + ;; + mvs*) + case $cc_basename in + cxx*) + # FIXME: insert proper C++ library support + _LT_AC_TAGVAR(ld_shlibs, $1)=no + ;; + *) + # FIXME: insert proper C++ library support + _LT_AC_TAGVAR(ld_shlibs, $1)=no + ;; + esac + ;; + netbsd*) + if echo __ELF__ | $CC -E - | grep __ELF__ >/dev/null; then + _LT_AC_TAGVAR(archive_cmds, $1)='$LD -Bshareable -o $lib $predep_objects $libobjs $deplibs $postdep_objects $linker_flags' + wlarc= + _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir' + _LT_AC_TAGVAR(hardcode_direct, $1)=yes + _LT_AC_TAGVAR(hardcode_shlibpath_var, $1)=no + fi + # Workaround some broken pre-1.5 toolchains + output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | grep conftest.$objext | $SED -e "s:-lgcc -lc -lgcc::"' + ;; + openbsd2*) + # C++ shared libraries are fairly broken + _LT_AC_TAGVAR(ld_shlibs, $1)=no + ;; + openbsd*) + _LT_AC_TAGVAR(hardcode_direct, $1)=yes + _LT_AC_TAGVAR(hardcode_shlibpath_var, $1)=no + _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $lib' + _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath,$libdir' + if test -z "`echo __ELF__ | $CC -E - | grep __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then + _LT_AC_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-retain-symbols-file,$export_symbols -o $lib' + _LT_AC_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-E' + _LT_AC_TAGVAR(whole_archive_flag_spec, $1)="$wlarc"'--whole-archive$convenience '"$wlarc"'--no-whole-archive' + fi + output_verbose_link_cmd='echo' + ;; + osf3*) + case $cc_basename in + KCC*) + # Kuck and Associates, Inc. (KAI) C++ Compiler + + # KCC will only create a shared library if the output file + # ends with ".so" (or ".sl" for HP-UX), so rename the library + # to its proper name (with version) after linking. + _LT_AC_TAGVAR(archive_cmds, $1)='tempext=`echo $shared_ext | $SED -e '\''s/\([[^()0-9A-Za-z{}]]\)/\\\\\1/g'\''`; templib=`echo $lib | $SED -e "s/\${tempext}\..*/.so/"`; $CC $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags --soname $soname -o \$templib; mv \$templib $lib' + + _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath,$libdir' + _LT_AC_TAGVAR(hardcode_libdir_separator, $1)=: + + # Archives containing C++ object files must be created using + # "CC -Bstatic", where "CC" is the KAI C++ compiler. + _LT_AC_TAGVAR(old_archive_cmds, $1)='$CC -Bstatic -o $oldlib $oldobjs' + + ;; + RCC*) + # Rational C++ 2.4.1 + # FIXME: insert proper C++ library support + _LT_AC_TAGVAR(ld_shlibs, $1)=no + ;; + cxx*) + _LT_AC_TAGVAR(allow_undefined_flag, $1)=' ${wl}-expect_unresolved ${wl}\*' + _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared${allow_undefined_flag} $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $soname `test -n "$verstring" && echo ${wl}-set_version $verstring` -update_registry ${output_objdir}/so_locations -o $lib' + + _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir' + _LT_AC_TAGVAR(hardcode_libdir_separator, $1)=: + + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. + # + # There doesn't appear to be a way to prevent this compiler from + # explicitly linking system object files so we need to strip them + # from the output so that they don't get included in the library + # dependencies. + output_verbose_link_cmd='templist=`$CC -shared $CFLAGS -v conftest.$objext 2>&1 | grep "ld" | grep -v "ld:"`; templist=`echo $templist | $SED "s/\(^.*ld.*\)\( .*ld.*$\)/\1/"`; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; echo $list' + ;; + *) + if test "$GXX" = yes && test "$with_gnu_ld" = no; then + _LT_AC_TAGVAR(allow_undefined_flag, $1)=' ${wl}-expect_unresolved ${wl}\*' + _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib ${allow_undefined_flag} $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && echo ${wl}-set_version ${wl}$verstring` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' + + _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir' + _LT_AC_TAGVAR(hardcode_libdir_separator, $1)=: + + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. + output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | grep "\-L"' + + else + # FIXME: insert proper C++ library support + _LT_AC_TAGVAR(ld_shlibs, $1)=no + fi + ;; + esac + ;; + osf4* | osf5*) + case $cc_basename in + KCC*) + # Kuck and Associates, Inc. (KAI) C++ Compiler + + # KCC will only create a shared library if the output file + # ends with ".so" (or ".sl" for HP-UX), so rename the library + # to its proper name (with version) after linking. + _LT_AC_TAGVAR(archive_cmds, $1)='tempext=`echo $shared_ext | $SED -e '\''s/\([[^()0-9A-Za-z{}]]\)/\\\\\1/g'\''`; templib=`echo $lib | $SED -e "s/\${tempext}\..*/.so/"`; $CC $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags --soname $soname -o \$templib; mv \$templib $lib' + + _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath,$libdir' + _LT_AC_TAGVAR(hardcode_libdir_separator, $1)=: + + # Archives containing C++ object files must be created using + # the KAI C++ compiler. + _LT_AC_TAGVAR(old_archive_cmds, $1)='$CC -o $oldlib $oldobjs' + ;; + RCC*) + # Rational C++ 2.4.1 + # FIXME: insert proper C++ library support + _LT_AC_TAGVAR(ld_shlibs, $1)=no + ;; + cxx*) + _LT_AC_TAGVAR(allow_undefined_flag, $1)=' -expect_unresolved \*' + _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared${allow_undefined_flag} $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -msym -soname $soname `test -n "$verstring" && echo -set_version $verstring` -update_registry ${output_objdir}/so_locations -o $lib' + _LT_AC_TAGVAR(archive_expsym_cmds, $1)='for i in `cat $export_symbols`; do printf "%s %s\\n" -exported_symbol "\$i" >> $lib.exp; done~ + echo "-hidden">> $lib.exp~ + $CC -shared$allow_undefined_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -msym -soname $soname -Wl,-input -Wl,$lib.exp `test -n "$verstring" && echo -set_version $verstring` -update_registry ${output_objdir}/so_locations -o $lib~ + $rm $lib.exp' + + _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='-rpath $libdir' + _LT_AC_TAGVAR(hardcode_libdir_separator, $1)=: + + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. + # + # There doesn't appear to be a way to prevent this compiler from + # explicitly linking system object files so we need to strip them + # from the output so that they don't get included in the library + # dependencies. + output_verbose_link_cmd='templist=`$CC -shared $CFLAGS -v conftest.$objext 2>&1 | grep "ld" | grep -v "ld:"`; templist=`echo $templist | $SED "s/\(^.*ld.*\)\( .*ld.*$\)/\1/"`; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; echo $list' + ;; + *) + if test "$GXX" = yes && test "$with_gnu_ld" = no; then + _LT_AC_TAGVAR(allow_undefined_flag, $1)=' ${wl}-expect_unresolved ${wl}\*' + _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib ${allow_undefined_flag} $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-msym ${wl}-soname ${wl}$soname `test -n "$verstring" && echo ${wl}-set_version ${wl}$verstring` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' + + _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir' + _LT_AC_TAGVAR(hardcode_libdir_separator, $1)=: + + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. + output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | grep "\-L"' + + else + # FIXME: insert proper C++ library support + _LT_AC_TAGVAR(ld_shlibs, $1)=no + fi + ;; + esac + ;; + psos*) + # FIXME: insert proper C++ library support + _LT_AC_TAGVAR(ld_shlibs, $1)=no + ;; + sunos4*) + case $cc_basename in + CC*) + # Sun C++ 4.x + # FIXME: insert proper C++ library support + _LT_AC_TAGVAR(ld_shlibs, $1)=no + ;; + lcc*) + # Lucid + # FIXME: insert proper C++ library support + _LT_AC_TAGVAR(ld_shlibs, $1)=no + ;; + *) + # FIXME: insert proper C++ library support + _LT_AC_TAGVAR(ld_shlibs, $1)=no + ;; + esac + ;; + solaris*) + case $cc_basename in + CC*) + # Sun C++ 4.2, 5.x and Centerline C++ + _LT_AC_TAGVAR(archive_cmds_need_lc,$1)=yes + _LT_AC_TAGVAR(no_undefined_flag, $1)=' -zdefs' + _LT_AC_TAGVAR(archive_cmds, $1)='$CC -G${allow_undefined_flag} -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' + _LT_AC_TAGVAR(archive_expsym_cmds, $1)='$echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~$echo "local: *; };" >> $lib.exp~ + $CC -G${allow_undefined_flag} ${wl}-M ${wl}$lib.exp -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~$rm $lib.exp' + + _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir' + _LT_AC_TAGVAR(hardcode_shlibpath_var, $1)=no + case $host_os in + solaris2.[[0-5]] | solaris2.[[0-5]].*) ;; + *) + # The C++ compiler is used as linker so we must use $wl + # flag to pass the commands to the underlying system + # linker. We must also pass each convience library through + # to the system linker between allextract/defaultextract. + # The C++ compiler will combine linker options so we + # cannot just pass the convience library names through + # without $wl. + # Supported since Solaris 2.6 (maybe 2.5.1?) + _LT_AC_TAGVAR(whole_archive_flag_spec, $1)='${wl}-z ${wl}allextract`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; $echo \"$new_convenience\"` ${wl}-z ${wl}defaultextract' + ;; + esac + _LT_AC_TAGVAR(link_all_deplibs, $1)=yes + + output_verbose_link_cmd='echo' + + # Archives containing C++ object files must be created using + # "CC -xar", where "CC" is the Sun C++ compiler. This is + # necessary to make sure instantiated templates are included + # in the archive. + _LT_AC_TAGVAR(old_archive_cmds, $1)='$CC -xar -o $oldlib $oldobjs' + ;; + gcx*) + # Green Hills C++ Compiler + _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-h $wl$soname -o $lib' + + # The C++ compiler must be used to create the archive. + _LT_AC_TAGVAR(old_archive_cmds, $1)='$CC $LDFLAGS -archive -o $oldlib $oldobjs' + ;; + *) + # GNU C++ compiler with Solaris linker + if test "$GXX" = yes && test "$with_gnu_ld" = no; then + _LT_AC_TAGVAR(no_undefined_flag, $1)=' ${wl}-z ${wl}defs' + if $CC --version | grep -v '^2\.7' > /dev/null; then + _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib $LDFLAGS $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-h $wl$soname -o $lib' + _LT_AC_TAGVAR(archive_expsym_cmds, $1)='$echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~$echo "local: *; };" >> $lib.exp~ + $CC -shared -nostdlib ${wl}-M $wl$lib.exp -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~$rm $lib.exp' + + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. + output_verbose_link_cmd="$CC -shared $CFLAGS -v conftest.$objext 2>&1 | grep \"\-L\"" + else + # g++ 2.7 appears to require `-G' NOT `-shared' on this + # platform. + _LT_AC_TAGVAR(archive_cmds, $1)='$CC -G -nostdlib $LDFLAGS $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-h $wl$soname -o $lib' + _LT_AC_TAGVAR(archive_expsym_cmds, $1)='$echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~$echo "local: *; };" >> $lib.exp~ + $CC -G -nostdlib ${wl}-M $wl$lib.exp -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~$rm $lib.exp' + + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. + output_verbose_link_cmd="$CC -G $CFLAGS -v conftest.$objext 2>&1 | grep \"\-L\"" + fi + + _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-R $wl$libdir' + fi + ;; + esac + ;; + sysv4*uw2* | sysv5OpenUNIX* | sysv5UnixWare7.[[01]].[[10]]* | unixware7* | sco3.2v5.0.[[024]]*) + _LT_AC_TAGVAR(no_undefined_flag, $1)='${wl}-z,text' + _LT_AC_TAGVAR(archive_cmds_need_lc, $1)=no + _LT_AC_TAGVAR(hardcode_shlibpath_var, $1)=no + runpath_var='LD_RUN_PATH' + + case $cc_basename in + CC*) + _LT_AC_TAGVAR(archive_cmds, $1)='$CC -G ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + _LT_AC_TAGVAR(archive_expsym_cmds, $1)='$CC -G ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + ;; + *) + _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + _LT_AC_TAGVAR(archive_expsym_cmds, $1)='$CC -shared ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + ;; + esac + ;; + sysv5* | sco3.2v5* | sco5v6*) + # Note: We can NOT use -z defs as we might desire, because we do not + # link with -lc, and that would cause any symbols used from libc to + # always be unresolved, which means just about no library would + # ever link correctly. If we're not using GNU ld we use -z text + # though, which does catch some bad symbols but isn't as heavy-handed + # as -z defs. + # For security reasons, it is highly recommended that you always + # use absolute paths for naming shared libraries, and exclude the + # DT_RUNPATH tag from executables and libraries. But doing so + # requires that you compile everything twice, which is a pain. + # So that behaviour is only enabled if SCOABSPATH is set to a + # non-empty value in the environment. Most likely only useful for + # creating official distributions of packages. + # This is a hack until libtool officially supports absolute path + # names for shared libraries. + _LT_AC_TAGVAR(no_undefined_flag, $1)='${wl}-z,text' + _LT_AC_TAGVAR(allow_undefined_flag, $1)='${wl}-z,nodefs' + _LT_AC_TAGVAR(archive_cmds_need_lc, $1)=no + _LT_AC_TAGVAR(hardcode_shlibpath_var, $1)=no + _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='`test -z "$SCOABSPATH" && echo ${wl}-R,$libdir`' + _LT_AC_TAGVAR(hardcode_libdir_separator, $1)=':' + _LT_AC_TAGVAR(link_all_deplibs, $1)=yes + _LT_AC_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-Bexport' + runpath_var='LD_RUN_PATH' + + case $cc_basename in + CC*) + _LT_AC_TAGVAR(archive_cmds, $1)='$CC -G ${wl}-h,\${SCOABSPATH:+${install_libdir}/}$soname -o $lib $libobjs $deplibs $compiler_flags' + _LT_AC_TAGVAR(archive_expsym_cmds, $1)='$CC -G ${wl}-Bexport:$export_symbols ${wl}-h,\${SCOABSPATH:+${install_libdir}/}$soname -o $lib $libobjs $deplibs $compiler_flags' + ;; + *) + _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared ${wl}-h,\${SCOABSPATH:+${install_libdir}/}$soname -o $lib $libobjs $deplibs $compiler_flags' + _LT_AC_TAGVAR(archive_expsym_cmds, $1)='$CC -shared ${wl}-Bexport:$export_symbols ${wl}-h,\${SCOABSPATH:+${install_libdir}/}$soname -o $lib $libobjs $deplibs $compiler_flags' + ;; + esac + ;; + tandem*) + case $cc_basename in + NCC*) + # NonStop-UX NCC 3.20 + # FIXME: insert proper C++ library support + _LT_AC_TAGVAR(ld_shlibs, $1)=no + ;; + *) + # FIXME: insert proper C++ library support + _LT_AC_TAGVAR(ld_shlibs, $1)=no + ;; + esac + ;; + vxworks*) + # FIXME: insert proper C++ library support + _LT_AC_TAGVAR(ld_shlibs, $1)=no + ;; + *) + # FIXME: insert proper C++ library support + _LT_AC_TAGVAR(ld_shlibs, $1)=no + ;; +esac +AC_MSG_RESULT([$_LT_AC_TAGVAR(ld_shlibs, $1)]) +test "$_LT_AC_TAGVAR(ld_shlibs, $1)" = no && can_build_shared=no + +_LT_AC_TAGVAR(GCC, $1)="$GXX" +_LT_AC_TAGVAR(LD, $1)="$LD" + +AC_LIBTOOL_POSTDEP_PREDEP($1) +AC_LIBTOOL_PROG_COMPILER_PIC($1) +AC_LIBTOOL_PROG_CC_C_O($1) +AC_LIBTOOL_SYS_HARD_LINK_LOCKS($1) +AC_LIBTOOL_PROG_LD_SHLIBS($1) +AC_LIBTOOL_SYS_DYNAMIC_LINKER($1) +AC_LIBTOOL_PROG_LD_HARDCODE_LIBPATH($1) + +AC_LIBTOOL_CONFIG($1) + +AC_LANG_POP +CC=$lt_save_CC +LDCXX=$LD +LD=$lt_save_LD +GCC=$lt_save_GCC +with_gnu_ldcxx=$with_gnu_ld +with_gnu_ld=$lt_save_with_gnu_ld +lt_cv_path_LDCXX=$lt_cv_path_LD +lt_cv_path_LD=$lt_save_path_LD +lt_cv_prog_gnu_ldcxx=$lt_cv_prog_gnu_ld +lt_cv_prog_gnu_ld=$lt_save_with_gnu_ld +])# AC_LIBTOOL_LANG_CXX_CONFIG + +# AC_LIBTOOL_POSTDEP_PREDEP([TAGNAME]) +# ------------------------------------ +# Figure out "hidden" library dependencies from verbose +# compiler output when linking a shared library. +# Parse the compiler output and extract the necessary +# objects, libraries and library flags. +AC_DEFUN([AC_LIBTOOL_POSTDEP_PREDEP],[ +dnl we can't use the lt_simple_compile_test_code here, +dnl because it contains code intended for an executable, +dnl not a library. It's possible we should let each +dnl tag define a new lt_????_link_test_code variable, +dnl but it's only used here... +ifelse([$1],[],[cat > conftest.$ac_ext < conftest.$ac_ext < conftest.$ac_ext < conftest.$ac_ext <> "$cfgfile" +ifelse([$1], [], +[#! $SHELL + +# `$echo "$cfgfile" | sed 's%^.*/%%'` - Provide generalized library-building support services. +# Generated automatically by $PROGRAM (GNU $PACKAGE $VERSION$TIMESTAMP) +# NOTE: Changes made to this file will be lost: look at ltmain.sh. +# +# Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001 +# Free Software Foundation, Inc. +# +# This file is part of GNU Libtool: +# Originally by Gordon Matzigkeit , 1996 +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. +# +# As a special exception to the GNU General Public License, if you +# distribute this file as part of a program that contains a +# configuration script generated by Autoconf, you may include it under +# the same distribution terms that you use for the rest of that program. + +# A sed program that does not truncate output. +SED=$lt_SED + +# Sed that helps us avoid accidentally triggering echo(1) options like -n. +Xsed="$SED -e 1s/^X//" + +# The HP-UX ksh and POSIX shell print the target directory to stdout +# if CDPATH is set. +(unset CDPATH) >/dev/null 2>&1 && unset CDPATH + +# The names of the tagged configurations supported by this script. +available_tags= + +# ### BEGIN LIBTOOL CONFIG], +[# ### BEGIN LIBTOOL TAG CONFIG: $tagname]) + +# Libtool was configured on host `(hostname || uname -n) 2>/dev/null | sed 1q`: + +# Shell to use when invoking shell scripts. +SHELL=$lt_SHELL + +# Whether or not to build shared libraries. +build_libtool_libs=$enable_shared + +# Whether or not to build static libraries. +build_old_libs=$enable_static + +# Whether or not to add -lc for building shared libraries. +build_libtool_need_lc=$_LT_AC_TAGVAR(archive_cmds_need_lc, $1) + +# Whether or not to disallow shared libs when runtime libs are static +allow_libtool_libs_with_static_runtimes=$_LT_AC_TAGVAR(enable_shared_with_static_runtimes, $1) + +# Whether or not to optimize for fast installation. +fast_install=$enable_fast_install + +# The host system. +host_alias=$host_alias +host=$host +host_os=$host_os + +# The build system. +build_alias=$build_alias +build=$build +build_os=$build_os + +# An echo program that does not interpret backslashes. +echo=$lt_echo + +# The archiver. +AR=$lt_AR +AR_FLAGS=$lt_AR_FLAGS + +# A C compiler. +LTCC=$lt_LTCC + +# LTCC compiler flags. +LTCFLAGS=$lt_LTCFLAGS + +# A language-specific compiler. +CC=$lt_[]_LT_AC_TAGVAR(compiler, $1) + +# Is the compiler the GNU C compiler? +with_gcc=$_LT_AC_TAGVAR(GCC, $1) + +gcc_dir=\`gcc -print-file-name=. | $SED 's,/\.$,,'\` +gcc_ver=\`gcc -dumpversion\` + +# An ERE matcher. +EGREP=$lt_EGREP + +# The linker used to build libraries. +LD=$lt_[]_LT_AC_TAGVAR(LD, $1) + +# Whether we need hard or soft links. +LN_S=$lt_LN_S + +# A BSD-compatible nm program. +NM=$lt_NM + +# A symbol stripping program +STRIP=$lt_STRIP + +# Used to examine libraries when file_magic_cmd begins "file" +MAGIC_CMD=$MAGIC_CMD + +# Used on cygwin: DLL creation program. +DLLTOOL="$DLLTOOL" + +# Used on cygwin: object dumper. +OBJDUMP="$OBJDUMP" + +# Used on cygwin: assembler. +AS="$AS" + +# The name of the directory that contains temporary libtool files. +objdir=$objdir + +# How to create reloadable object files. +reload_flag=$lt_reload_flag +reload_cmds=$lt_reload_cmds + +# How to pass a linker flag through the compiler. +wl=$lt_[]_LT_AC_TAGVAR(lt_prog_compiler_wl, $1) + +# Object file suffix (normally "o"). +objext="$ac_objext" + +# Old archive suffix (normally "a"). +libext="$libext" + +# Shared library suffix (normally ".so"). +shrext_cmds='$shrext_cmds' + +# Executable file suffix (normally ""). +exeext="$exeext" + +# Additional compiler flags for building library objects. +pic_flag=$lt_[]_LT_AC_TAGVAR(lt_prog_compiler_pic, $1) +pic_mode=$pic_mode + +# What is the maximum length of a command? +max_cmd_len=$lt_cv_sys_max_cmd_len + +# Does compiler simultaneously support -c and -o options? +compiler_c_o=$lt_[]_LT_AC_TAGVAR(lt_cv_prog_compiler_c_o, $1) + +# Must we lock files when doing compilation? +need_locks=$lt_need_locks + +# Do we need the lib prefix for modules? +need_lib_prefix=$need_lib_prefix + +# Do we need a version for libraries? +need_version=$need_version + +# Whether dlopen is supported. +dlopen_support=$enable_dlopen + +# Whether dlopen of programs is supported. +dlopen_self=$enable_dlopen_self + +# Whether dlopen of statically linked programs is supported. +dlopen_self_static=$enable_dlopen_self_static + +# Compiler flag to prevent dynamic linking. +link_static_flag=$lt_[]_LT_AC_TAGVAR(lt_prog_compiler_static, $1) + +# Compiler flag to turn off builtin functions. +no_builtin_flag=$lt_[]_LT_AC_TAGVAR(lt_prog_compiler_no_builtin_flag, $1) + +# Compiler flag to allow reflexive dlopens. +export_dynamic_flag_spec=$lt_[]_LT_AC_TAGVAR(export_dynamic_flag_spec, $1) + +# Compiler flag to generate shared objects directly from archives. +whole_archive_flag_spec=$lt_[]_LT_AC_TAGVAR(whole_archive_flag_spec, $1) + +# Compiler flag to generate thread-safe objects. +thread_safe_flag_spec=$lt_[]_LT_AC_TAGVAR(thread_safe_flag_spec, $1) + +# Library versioning type. +version_type=$version_type + +# Format of library name prefix. +libname_spec=$lt_libname_spec + +# List of archive names. First name is the real one, the rest are links. +# The last name is the one that the linker finds with -lNAME. +library_names_spec=$lt_library_names_spec + +# The coded name of the library, if different from the real name. +soname_spec=$lt_soname_spec + +# Commands used to build and install an old-style archive. +RANLIB=$lt_RANLIB +old_archive_cmds=$lt_[]_LT_AC_TAGVAR(old_archive_cmds, $1) +old_postinstall_cmds=$lt_old_postinstall_cmds +old_postuninstall_cmds=$lt_old_postuninstall_cmds + +# Create an old-style archive from a shared archive. +old_archive_from_new_cmds=$lt_[]_LT_AC_TAGVAR(old_archive_from_new_cmds, $1) + +# Create a temporary old-style archive to link instead of a shared archive. +old_archive_from_expsyms_cmds=$lt_[]_LT_AC_TAGVAR(old_archive_from_expsyms_cmds, $1) + +# Commands used to build and install a shared archive. +archive_cmds=$lt_[]_LT_AC_TAGVAR(archive_cmds, $1) +archive_expsym_cmds=$lt_[]_LT_AC_TAGVAR(archive_expsym_cmds, $1) +postinstall_cmds=$lt_postinstall_cmds +postuninstall_cmds=$lt_postuninstall_cmds + +# Commands used to build a loadable module (assumed same as above if empty) +module_cmds=$lt_[]_LT_AC_TAGVAR(module_cmds, $1) +module_expsym_cmds=$lt_[]_LT_AC_TAGVAR(module_expsym_cmds, $1) + +# Commands to strip libraries. +old_striplib=$lt_old_striplib +striplib=$lt_striplib + +# Dependencies to place before the objects being linked to create a +# shared library. +predep_objects=\`echo $lt_[]_LT_AC_TAGVAR(predep_objects, $1) | \$SED -e "s@\${gcc_dir}@\\\${gcc_dir}@g;s@\${gcc_ver}@\\\${gcc_ver}@g"\` + +# Dependencies to place after the objects being linked to create a +# shared library. +postdep_objects=\`echo $lt_[]_LT_AC_TAGVAR(postdep_objects, $1) | \$SED -e "s@\${gcc_dir}@\\\${gcc_dir}@g;s@\${gcc_ver}@\\\${gcc_ver}@g"\` + +# Dependencies to place before the objects being linked to create a +# shared library. +predeps=$lt_[]_LT_AC_TAGVAR(predeps, $1) + +# Dependencies to place after the objects being linked to create a +# shared library. +postdeps=$lt_[]_LT_AC_TAGVAR(postdeps, $1) + +# The library search path used internally by the compiler when linking +# a shared library. +compiler_lib_search_path=\`echo $lt_[]_LT_AC_TAGVAR(compiler_lib_search_path, $1) | \$SED -e "s@\${gcc_dir}@\\\${gcc_dir}@g;s@\${gcc_ver}@\\\${gcc_ver}@g"\` + +# Method to check whether dependent libraries are shared objects. +deplibs_check_method=$lt_deplibs_check_method + +# Command to use when deplibs_check_method == file_magic. +file_magic_cmd=$lt_file_magic_cmd + +# Flag that allows shared libraries with undefined symbols to be built. +allow_undefined_flag=$lt_[]_LT_AC_TAGVAR(allow_undefined_flag, $1) + +# Flag that forces no undefined symbols. +no_undefined_flag=$lt_[]_LT_AC_TAGVAR(no_undefined_flag, $1) + +# Commands used to finish a libtool library installation in a directory. +finish_cmds=$lt_finish_cmds + +# Same as above, but a single script fragment to be evaled but not shown. +finish_eval=$lt_finish_eval + +# Take the output of nm and produce a listing of raw symbols and C names. +global_symbol_pipe=$lt_lt_cv_sys_global_symbol_pipe + +# Transform the output of nm in a proper C declaration +global_symbol_to_cdecl=$lt_lt_cv_sys_global_symbol_to_cdecl + +# Transform the output of nm in a C name address pair +global_symbol_to_c_name_address=$lt_lt_cv_sys_global_symbol_to_c_name_address + +# This is the shared library runtime path variable. +runpath_var=$runpath_var + +# This is the shared library path variable. +shlibpath_var=$shlibpath_var + +# Is shlibpath searched before the hard-coded library search path? +shlibpath_overrides_runpath=$shlibpath_overrides_runpath + +# How to hardcode a shared library path into an executable. +hardcode_action=$_LT_AC_TAGVAR(hardcode_action, $1) + +# Whether we should hardcode library paths into libraries. +hardcode_into_libs=$hardcode_into_libs + +# Flag to hardcode \$libdir into a binary during linking. +# This must work even if \$libdir does not exist. +hardcode_libdir_flag_spec=$lt_[]_LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1) + +# If ld is used when linking, flag to hardcode \$libdir into +# a binary during linking. This must work even if \$libdir does +# not exist. +hardcode_libdir_flag_spec_ld=$lt_[]_LT_AC_TAGVAR(hardcode_libdir_flag_spec_ld, $1) + +# Whether we need a single -rpath flag with a separated argument. +hardcode_libdir_separator=$lt_[]_LT_AC_TAGVAR(hardcode_libdir_separator, $1) + +# Set to yes if using DIR/libNAME${shared_ext} during linking hardcodes DIR into the +# resulting binary. +hardcode_direct=$_LT_AC_TAGVAR(hardcode_direct, $1) + +# Set to yes if using the -LDIR flag during linking hardcodes DIR into the +# resulting binary. +hardcode_minus_L=$_LT_AC_TAGVAR(hardcode_minus_L, $1) + +# Set to yes if using SHLIBPATH_VAR=DIR during linking hardcodes DIR into +# the resulting binary. +hardcode_shlibpath_var=$_LT_AC_TAGVAR(hardcode_shlibpath_var, $1) + +# Set to yes if building a shared library automatically hardcodes DIR into the library +# and all subsequent libraries and executables linked against it. +hardcode_automatic=$_LT_AC_TAGVAR(hardcode_automatic, $1) + +# Variables whose values should be saved in libtool wrapper scripts and +# restored at relink time. +variables_saved_for_relink="$variables_saved_for_relink" + +# Whether libtool must link a program against all its dependency libraries. +link_all_deplibs=$_LT_AC_TAGVAR(link_all_deplibs, $1) + +# Compile-time system search path for libraries +sys_lib_search_path_spec=\`echo $lt_sys_lib_search_path_spec | \$SED -e "s@\${gcc_dir}@\\\${gcc_dir}@g;s@\${gcc_ver}@\\\${gcc_ver}@g"\` + +# Run-time system search path for libraries +sys_lib_dlsearch_path_spec=$lt_sys_lib_dlsearch_path_spec + +# Fix the shell variable \$srcfile for the compiler. +fix_srcfile_path="$_LT_AC_TAGVAR(fix_srcfile_path, $1)" + +# Set to yes if exported symbols are required. +always_export_symbols=$_LT_AC_TAGVAR(always_export_symbols, $1) + +# The commands to list exported symbols. +export_symbols_cmds=$lt_[]_LT_AC_TAGVAR(export_symbols_cmds, $1) + +# The commands to extract the exported symbol list from a shared archive. +extract_expsyms_cmds=$lt_extract_expsyms_cmds + +# Symbols that should not be listed in the preloaded symbols. +exclude_expsyms=$lt_[]_LT_AC_TAGVAR(exclude_expsyms, $1) + +# Symbols that must always be exported. +include_expsyms=$lt_[]_LT_AC_TAGVAR(include_expsyms, $1) + +ifelse([$1],[], +[# ### END LIBTOOL CONFIG], +[# ### END LIBTOOL TAG CONFIG: $tagname]) + +__EOF__ + +ifelse([$1],[], [ + case $host_os in + aix3*) + cat <<\EOF >> "$cfgfile" + +# AIX sometimes has problems with the GCC collect2 program. For some +# reason, if we set the COLLECT_NAMES environment variable, the problems +# vanish in a puff of smoke. +if test "X${COLLECT_NAMES+set}" != Xset; then + COLLECT_NAMES= + export COLLECT_NAMES +fi +EOF + ;; + esac + + # We use sed instead of cat because bash on DJGPP gets confused if + # if finds mixed CR/LF and LF-only lines. Since sed operates in + # text mode, it properly converts lines to CR/LF. This bash problem + # is reportedly fixed, but why not run on old versions too? + sed '$q' "$ltmain" >> "$cfgfile" || (rm -f "$cfgfile"; exit 1) + + mv -f "$cfgfile" "$ofile" || \ + (rm -f "$ofile" && cp "$cfgfile" "$ofile" && rm -f "$cfgfile") + chmod +x "$ofile" +]) +else + # If there is no Makefile yet, we rely on a make rule to execute + # `config.status --recheck' to rerun these tests and create the + # libtool script then. + ltmain_in=`echo $ltmain | sed -e 's/\.sh$/.in/'` + if test -f "$ltmain_in"; then + test -f Makefile && make "$ltmain" + fi +fi +])# AC_LIBTOOL_CONFIG + + +# AC_LIBTOOL_PROG_COMPILER_NO_RTTI([TAGNAME]) +# ------------------------------------------- +AC_DEFUN([AC_LIBTOOL_PROG_COMPILER_NO_RTTI], +[AC_REQUIRE([_LT_AC_SYS_COMPILER])dnl + +_LT_AC_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)= + +if test "$GCC" = yes; then + _LT_AC_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)=' -fno-builtin' + + AC_LIBTOOL_COMPILER_OPTION([if $compiler supports -fno-rtti -fno-exceptions], + lt_cv_prog_compiler_rtti_exceptions, + [-fno-rtti -fno-exceptions], [], + [_LT_AC_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)="$_LT_AC_TAGVAR(lt_prog_compiler_no_builtin_flag, $1) -fno-rtti -fno-exceptions"]) +fi +])# AC_LIBTOOL_PROG_COMPILER_NO_RTTI + + +# AC_LIBTOOL_SYS_GLOBAL_SYMBOL_PIPE +# --------------------------------- +AC_DEFUN([AC_LIBTOOL_SYS_GLOBAL_SYMBOL_PIPE], +[AC_REQUIRE([AC_CANONICAL_HOST]) +AC_REQUIRE([AC_PROG_NM]) +AC_REQUIRE([AC_OBJEXT]) +# Check for command to grab the raw symbol name followed by C symbol from nm. +AC_MSG_CHECKING([command to parse $NM output from $compiler object]) +AC_CACHE_VAL([lt_cv_sys_global_symbol_pipe], +[ +# These are sane defaults that work on at least a few old systems. +# [They come from Ultrix. What could be older than Ultrix?!! ;)] + +# Character class describing NM global symbol codes. +symcode='[[BCDEGRST]]' + +# Regexp to match symbols that can be accessed directly from C. +sympat='\([[_A-Za-z]][[_A-Za-z0-9]]*\)' + +# Transform an extracted symbol line into a proper C declaration +lt_cv_sys_global_symbol_to_cdecl="sed -n -e 's/^. .* \(.*\)$/extern int \1;/p'" + +# Transform an extracted symbol line into symbol name and symbol address +lt_cv_sys_global_symbol_to_c_name_address="sed -n -e 's/^: \([[^ ]]*\) $/ {\\\"\1\\\", (lt_ptr) 0},/p' -e 's/^$symcode \([[^ ]]*\) \([[^ ]]*\)$/ {\"\2\", (lt_ptr) \&\2},/p'" + +# Define system-specific variables. +case $host_os in +aix*) + symcode='[[BCDT]]' + ;; +cygwin* | mingw* | pw32*) + symcode='[[ABCDGISTW]]' + ;; +hpux*) # Its linker distinguishes data from code symbols + if test "$host_cpu" = ia64; then + symcode='[[ABCDEGRST]]' + fi + lt_cv_sys_global_symbol_to_cdecl="sed -n -e 's/^T .* \(.*\)$/extern int \1();/p' -e 's/^$symcode* .* \(.*\)$/extern char \1;/p'" + lt_cv_sys_global_symbol_to_c_name_address="sed -n -e 's/^: \([[^ ]]*\) $/ {\\\"\1\\\", (lt_ptr) 0},/p' -e 's/^$symcode* \([[^ ]]*\) \([[^ ]]*\)$/ {\"\2\", (lt_ptr) \&\2},/p'" + ;; +linux*) + if test "$host_cpu" = ia64; then + symcode='[[ABCDGIRSTW]]' + lt_cv_sys_global_symbol_to_cdecl="sed -n -e 's/^T .* \(.*\)$/extern int \1();/p' -e 's/^$symcode* .* \(.*\)$/extern char \1;/p'" + lt_cv_sys_global_symbol_to_c_name_address="sed -n -e 's/^: \([[^ ]]*\) $/ {\\\"\1\\\", (lt_ptr) 0},/p' -e 's/^$symcode* \([[^ ]]*\) \([[^ ]]*\)$/ {\"\2\", (lt_ptr) \&\2},/p'" + fi + ;; +irix* | nonstopux*) + symcode='[[BCDEGRST]]' + ;; +osf*) + symcode='[[BCDEGQRST]]' + ;; +solaris*) + symcode='[[BDRT]]' + ;; +sco3.2v5*) + symcode='[[DT]]' + ;; +sysv4.2uw2*) + symcode='[[DT]]' + ;; +sysv5* | sco5v6* | unixware* | OpenUNIX*) + symcode='[[ABDT]]' + ;; +sysv4) + symcode='[[DFNSTU]]' + ;; +esac + +# Handle CRLF in mingw tool chain +opt_cr= +case $build_os in +mingw*) + opt_cr=`echo 'x\{0,1\}' | tr x '\015'` # option cr in regexp + ;; +esac + +# If we're using GNU nm, then use its standard symbol codes. +case `$NM -V 2>&1` in +*GNU* | *'with BFD'*) + symcode='[[ABCDGIRSTW]]' ;; +esac + +# Try without a prefix undercore, then with it. +for ac_symprfx in "" "_"; do + + # Transform symcode, sympat, and symprfx into a raw symbol and a C symbol. + symxfrm="\\1 $ac_symprfx\\2 \\2" + + # Write the raw and C identifiers. + lt_cv_sys_global_symbol_pipe="sed -n -e 's/^.*[[ ]]\($symcode$symcode*\)[[ ]][[ ]]*$ac_symprfx$sympat$opt_cr$/$symxfrm/p'" + + # Check to see that the pipe works correctly. + pipe_works=no + + rm -f conftest* + cat > conftest.$ac_ext < $nlist) && test -s "$nlist"; then + # Try sorting and uniquifying the output. + if sort "$nlist" | uniq > "$nlist"T; then + mv -f "$nlist"T "$nlist" + else + rm -f "$nlist"T + fi + + # Make sure that we snagged all the symbols we need. + if grep ' nm_test_var$' "$nlist" >/dev/null; then + if grep ' nm_test_func$' "$nlist" >/dev/null; then + cat < conftest.$ac_ext +#ifdef __cplusplus +extern "C" { +#endif + +EOF + # Now generate the symbol file. + eval "$lt_cv_sys_global_symbol_to_cdecl"' < "$nlist" | grep -v main >> conftest.$ac_ext' + + cat <> conftest.$ac_ext +#if defined (__STDC__) && __STDC__ +# define lt_ptr_t void * +#else +# define lt_ptr_t char * +# define const +#endif + +/* The mapping between symbol names and symbols. */ +const struct { + const char *name; + lt_ptr_t address; +} +lt_preloaded_symbols[[]] = +{ +EOF + $SED "s/^$symcode$symcode* \(.*\) \(.*\)$/ {\"\2\", (lt_ptr_t) \&\2},/" < "$nlist" | grep -v main >> conftest.$ac_ext + cat <<\EOF >> conftest.$ac_ext + {0, (lt_ptr_t) 0} +}; + +#ifdef __cplusplus +} +#endif +EOF + # Now try linking the two files. + mv conftest.$ac_objext conftstm.$ac_objext + lt_save_LIBS="$LIBS" + lt_save_CFLAGS="$CFLAGS" + LIBS="conftstm.$ac_objext" + CFLAGS="$CFLAGS$_LT_AC_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)" + if AC_TRY_EVAL(ac_link) && test -s conftest${ac_exeext}; then + pipe_works=yes + fi + LIBS="$lt_save_LIBS" + CFLAGS="$lt_save_CFLAGS" + else + echo "cannot find nm_test_func in $nlist" >&AS_MESSAGE_LOG_FD + fi + else + echo "cannot find nm_test_var in $nlist" >&AS_MESSAGE_LOG_FD + fi + else + echo "cannot run $lt_cv_sys_global_symbol_pipe" >&AS_MESSAGE_LOG_FD + fi + else + echo "$progname: failed program was:" >&AS_MESSAGE_LOG_FD + cat conftest.$ac_ext >&5 + fi + rm -f conftest* conftst* + + # Do not use the global_symbol_pipe unless it works. + if test "$pipe_works" = yes; then + break + else + lt_cv_sys_global_symbol_pipe= + fi +done +]) +if test -z "$lt_cv_sys_global_symbol_pipe"; then + lt_cv_sys_global_symbol_to_cdecl= +fi +if test -z "$lt_cv_sys_global_symbol_pipe$lt_cv_sys_global_symbol_to_cdecl"; then + AC_MSG_RESULT(failed) +else + AC_MSG_RESULT(ok) +fi +]) # AC_LIBTOOL_SYS_GLOBAL_SYMBOL_PIPE + + +# AC_LIBTOOL_PROG_COMPILER_PIC([TAGNAME]) +# --------------------------------------- +AC_DEFUN([AC_LIBTOOL_PROG_COMPILER_PIC], +[_LT_AC_TAGVAR(lt_prog_compiler_wl, $1)= +_LT_AC_TAGVAR(lt_prog_compiler_pic, $1)= +_LT_AC_TAGVAR(lt_prog_compiler_static, $1)= + +AC_MSG_CHECKING([for $compiler option to produce PIC]) + ifelse([$1],[CXX],[ + # C++ specific cases for pic, static, wl, etc. + if test "$GXX" = yes; then + _LT_AC_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + _LT_AC_TAGVAR(lt_prog_compiler_static, $1)='-static' + + case $host_os in + aix*) + # All AIX code is PIC. + if test "$host_cpu" = ia64; then + # AIX 5 now supports IA64 processor + _LT_AC_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + fi + ;; + amigaos*) + # FIXME: we need at least 68020 code to build shared libraries, but + # adding the `-m68020' flag to GCC prevents building anything better, + # like `-m68040'. + _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-m68020 -resident32 -malways-restore-a4' + ;; + beos* | cygwin* | irix5* | irix6* | nonstopux* | osf3* | osf4* | osf5*) + # PIC is the default for these OSes. + ;; + mingw* | os2* | pw32*) + # This hack is so that the source file can tell whether it is being + # built for inclusion in a dll (and should export symbols for example). + _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-DDLL_EXPORT' + ;; + darwin* | rhapsody*) + # PIC is the default on this platform + # Common symbols not allowed in MH_DYLIB files + _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-fno-common' + ;; + *djgpp*) + # DJGPP does not support shared libraries at all + _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)= + ;; + interix3*) + # Interix 3.x gcc -fpic/-fPIC options generate broken code. + # Instead, we relocate shared libraries at runtime. + ;; + sysv4*MP*) + if test -d /usr/nec; then + _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)=-Kconform_pic + fi + ;; + hpux*) + # PIC is the default for IA64 HP-UX and 64-bit HP-UX, but + # not for PA HP-UX. + case $host_cpu in + hppa*64*|ia64*) + ;; + *) + _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' + ;; + esac + ;; + *) + _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' + ;; + esac + else + case $host_os in + aix4* | aix5*) + # All AIX code is PIC. + if test "$host_cpu" = ia64; then + # AIX 5 now supports IA64 processor + _LT_AC_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + else + _LT_AC_TAGVAR(lt_prog_compiler_static, $1)='-bnso -bI:/lib/syscalls.exp' + fi + ;; + chorus*) + case $cc_basename in + cxch68*) + # Green Hills C++ Compiler + # _LT_AC_TAGVAR(lt_prog_compiler_static, $1)="--no_auto_instantiation -u __main -u __premain -u _abort -r $COOL_DIR/lib/libOrb.a $MVME_DIR/lib/CC/libC.a $MVME_DIR/lib/classix/libcx.s.a" + ;; + esac + ;; + darwin*) + # PIC is the default on this platform + # Common symbols not allowed in MH_DYLIB files + case $cc_basename in + xlc*) + _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-qnocommon' + _LT_AC_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + ;; + esac + ;; + dgux*) + case $cc_basename in + ec++*) + _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' + ;; + ghcx*) + # Green Hills C++ Compiler + _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-pic' + ;; + *) + ;; + esac + ;; + freebsd* | kfreebsd*-gnu | dragonfly*) + # FreeBSD uses GNU C++ + ;; + hpux9* | hpux10* | hpux11*) + case $cc_basename in + CC*) + _LT_AC_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + _LT_AC_TAGVAR(lt_prog_compiler_static, $1)='${wl}-a ${wl}archive' + if test "$host_cpu" != ia64; then + _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='+Z' + fi + ;; + aCC*) + _LT_AC_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + _LT_AC_TAGVAR(lt_prog_compiler_static, $1)='${wl}-a ${wl}archive' + case $host_cpu in + hppa*64*|ia64*) + # +Z the default + ;; + *) + _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='+Z' + ;; + esac + ;; + *) + ;; + esac + ;; + interix*) + # This is c89, which is MS Visual C++ (no shared libs) + # Anyone wants to do a port? + ;; + irix5* | irix6* | nonstopux*) + case $cc_basename in + CC*) + _LT_AC_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + _LT_AC_TAGVAR(lt_prog_compiler_static, $1)='-non_shared' + # CC pic flag -KPIC is the default. + ;; + *) + ;; + esac + ;; + linux*) + case $cc_basename in + KCC*) + # KAI C++ Compiler + _LT_AC_TAGVAR(lt_prog_compiler_wl, $1)='--backend -Wl,' + _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' + ;; + icpc* | ecpc*) + # Intel C++ + _LT_AC_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' + _LT_AC_TAGVAR(lt_prog_compiler_static, $1)='-static' + ;; + pgCC*) + # Portland Group C++ compiler. + _LT_AC_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-fpic' + _LT_AC_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + ;; + cxx*) + # Compaq C++ + # Make sure the PIC flag is empty. It appears that all Alpha + # Linux and Compaq Tru64 Unix objects are PIC. + _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)= + _LT_AC_TAGVAR(lt_prog_compiler_static, $1)='-non_shared' + ;; + *) + ;; + esac + ;; + lynxos*) + ;; + m88k*) + ;; + mvs*) + case $cc_basename in + cxx*) + _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-W c,exportall' + ;; + *) + ;; + esac + ;; + netbsd*) + ;; + osf3* | osf4* | osf5*) + case $cc_basename in + KCC*) + _LT_AC_TAGVAR(lt_prog_compiler_wl, $1)='--backend -Wl,' + ;; + RCC*) + # Rational C++ 2.4.1 + _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-pic' + ;; + cxx*) + # Digital/Compaq C++ + _LT_AC_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + # Make sure the PIC flag is empty. It appears that all Alpha + # Linux and Compaq Tru64 Unix objects are PIC. + _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)= + _LT_AC_TAGVAR(lt_prog_compiler_static, $1)='-non_shared' + ;; + *) + ;; + esac + ;; + psos*) + ;; + solaris*) + case $cc_basename in + CC*) + # Sun C++ 4.2, 5.x and Centerline C++ + _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' + _LT_AC_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + _LT_AC_TAGVAR(lt_prog_compiler_wl, $1)='-Qoption ld ' + ;; + gcx*) + # Green Hills C++ Compiler + _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-PIC' + ;; + *) + ;; + esac + ;; + sunos4*) + case $cc_basename in + CC*) + # Sun C++ 4.x + _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-pic' + _LT_AC_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + ;; + lcc*) + # Lucid + _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-pic' + ;; + *) + ;; + esac + ;; + tandem*) + case $cc_basename in + NCC*) + # NonStop-UX NCC 3.20 + _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' + ;; + *) + ;; + esac + ;; + sysv5* | unixware* | sco3.2v5* | sco5v6* | OpenUNIX*) + case $cc_basename in + CC*) + _LT_AC_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' + _LT_AC_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + ;; + esac + ;; + vxworks*) + ;; + *) + _LT_AC_TAGVAR(lt_prog_compiler_can_build_shared, $1)=no + ;; + esac + fi +], +[ + if test "$GCC" = yes; then + _LT_AC_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + _LT_AC_TAGVAR(lt_prog_compiler_static, $1)='-static' + + case $host_os in + aix*) + # All AIX code is PIC. + if test "$host_cpu" = ia64; then + # AIX 5 now supports IA64 processor + _LT_AC_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + fi + ;; + + amigaos*) + # FIXME: we need at least 68020 code to build shared libraries, but + # adding the `-m68020' flag to GCC prevents building anything better, + # like `-m68040'. + _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-m68020 -resident32 -malways-restore-a4' + ;; + + beos* | cygwin* | irix5* | irix6* | nonstopux* | osf3* | osf4* | osf5*) + # PIC is the default for these OSes. + ;; + + mingw* | pw32* | os2*) + # This hack is so that the source file can tell whether it is being + # built for inclusion in a dll (and should export symbols for example). + _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-DDLL_EXPORT' + ;; + + darwin* | rhapsody*) + # PIC is the default on this platform + # Common symbols not allowed in MH_DYLIB files + _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-fno-common' + ;; + + interix3*) + # Interix 3.x gcc -fpic/-fPIC options generate broken code. + # Instead, we relocate shared libraries at runtime. + ;; + + msdosdjgpp*) + # Just because we use GCC doesn't mean we suddenly get shared libraries + # on systems that don't support them. + _LT_AC_TAGVAR(lt_prog_compiler_can_build_shared, $1)=no + enable_shared=no + ;; + + sysv4*MP*) + if test -d /usr/nec; then + _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)=-Kconform_pic + fi + ;; + + hpux*) + # PIC is the default for IA64 HP-UX and 64-bit HP-UX, but + # not for PA HP-UX. + case $host_cpu in + hppa*64*|ia64*) + # +Z the default + ;; + *) + _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' + ;; + esac + ;; + + *) + _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' + ;; + esac + else + # PORTME Check for flag to pass linker flags through the system compiler. + case $host_os in + aix*) + _LT_AC_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + if test "$host_cpu" = ia64; then + # AIX 5 now supports IA64 processor + _LT_AC_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + else + _LT_AC_TAGVAR(lt_prog_compiler_static, $1)='-bnso -bI:/lib/syscalls.exp' + fi + ;; + darwin*) + # PIC is the default on this platform + # Common symbols not allowed in MH_DYLIB files + case $cc_basename in + xlc*) + _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-qnocommon' + _LT_AC_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + ;; + esac + ;; + + mingw* | pw32* | os2*) + # This hack is so that the source file can tell whether it is being + # built for inclusion in a dll (and should export symbols for example). + _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-DDLL_EXPORT' + ;; + + hpux9* | hpux10* | hpux11*) + _LT_AC_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + # PIC is the default for IA64 HP-UX and 64-bit HP-UX, but + # not for PA HP-UX. + case $host_cpu in + hppa*64*|ia64*) + # +Z the default + ;; + *) + _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='+Z' + ;; + esac + # Is there a better lt_prog_compiler_static that works with the bundled CC? + _LT_AC_TAGVAR(lt_prog_compiler_static, $1)='${wl}-a ${wl}archive' + ;; + + irix5* | irix6* | nonstopux*) + _LT_AC_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + # PIC (with -KPIC) is the default. + _LT_AC_TAGVAR(lt_prog_compiler_static, $1)='-non_shared' + ;; + + newsos6) + _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' + _LT_AC_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + ;; + + linux*) + case $cc_basename in + icc* | ecc*) + _LT_AC_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' + _LT_AC_TAGVAR(lt_prog_compiler_static, $1)='-static' + ;; + pgcc* | pgf77* | pgf90* | pgf95*) + # Portland Group compilers (*not* the Pentium gcc compiler, + # which looks to be a dead project) + _LT_AC_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-fpic' + _LT_AC_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + ;; + ccc*) + _LT_AC_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + # All Alpha code is PIC. + _LT_AC_TAGVAR(lt_prog_compiler_static, $1)='-non_shared' + ;; + esac + ;; + + osf3* | osf4* | osf5*) + _LT_AC_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + # All OSF/1 code is PIC. + _LT_AC_TAGVAR(lt_prog_compiler_static, $1)='-non_shared' + ;; + + solaris*) + _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' + _LT_AC_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + case $cc_basename in + f77* | f90* | f95*) + _LT_AC_TAGVAR(lt_prog_compiler_wl, $1)='-Qoption ld ';; + *) + _LT_AC_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,';; + esac + ;; + + sunos4*) + _LT_AC_TAGVAR(lt_prog_compiler_wl, $1)='-Qoption ld ' + _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-PIC' + _LT_AC_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + ;; + + sysv4 | sysv4.2uw2* | sysv4.3*) + _LT_AC_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' + _LT_AC_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + ;; + + sysv4*MP*) + if test -d /usr/nec ;then + _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-Kconform_pic' + _LT_AC_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + fi + ;; + + sysv5* | unixware* | sco3.2v5* | sco5v6* | OpenUNIX*) + _LT_AC_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' + _LT_AC_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + ;; + + unicos*) + _LT_AC_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + _LT_AC_TAGVAR(lt_prog_compiler_can_build_shared, $1)=no + ;; + + uts4*) + _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-pic' + _LT_AC_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + ;; + + *) + _LT_AC_TAGVAR(lt_prog_compiler_can_build_shared, $1)=no + ;; + esac + fi +]) +AC_MSG_RESULT([$_LT_AC_TAGVAR(lt_prog_compiler_pic, $1)]) + +# +# Check to make sure the PIC flag actually works. +# +if test -n "$_LT_AC_TAGVAR(lt_prog_compiler_pic, $1)"; then + AC_LIBTOOL_COMPILER_OPTION([if $compiler PIC flag $_LT_AC_TAGVAR(lt_prog_compiler_pic, $1) works], + _LT_AC_TAGVAR(lt_prog_compiler_pic_works, $1), + [$_LT_AC_TAGVAR(lt_prog_compiler_pic, $1)ifelse([$1],[],[ -DPIC],[ifelse([$1],[CXX],[ -DPIC],[])])], [], + [case $_LT_AC_TAGVAR(lt_prog_compiler_pic, $1) in + "" | " "*) ;; + *) _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)=" $_LT_AC_TAGVAR(lt_prog_compiler_pic, $1)" ;; + esac], + [_LT_AC_TAGVAR(lt_prog_compiler_pic, $1)= + _LT_AC_TAGVAR(lt_prog_compiler_can_build_shared, $1)=no]) +fi +case $host_os in + # For platforms which do not support PIC, -DPIC is meaningless: + *djgpp*) + _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)= + ;; + *) + _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)="$_LT_AC_TAGVAR(lt_prog_compiler_pic, $1)ifelse([$1],[],[ -DPIC],[ifelse([$1],[CXX],[ -DPIC],[])])" + ;; +esac + +# +# Check to make sure the static flag actually works. +# +wl=$_LT_AC_TAGVAR(lt_prog_compiler_wl, $1) eval lt_tmp_static_flag=\"$_LT_AC_TAGVAR(lt_prog_compiler_static, $1)\" +AC_LIBTOOL_LINKER_OPTION([if $compiler static flag $lt_tmp_static_flag works], + _LT_AC_TAGVAR(lt_prog_compiler_static_works, $1), + $lt_tmp_static_flag, + [], + [_LT_AC_TAGVAR(lt_prog_compiler_static, $1)=]) +]) + + +# AC_LIBTOOL_PROG_LD_SHLIBS([TAGNAME]) +# ------------------------------------ +# See if the linker supports building shared libraries. +AC_DEFUN([AC_LIBTOOL_PROG_LD_SHLIBS], +[AC_MSG_CHECKING([whether the $compiler linker ($LD) supports shared libraries]) +ifelse([$1],[CXX],[ + _LT_AC_TAGVAR(export_symbols_cmds, $1)='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols' + case $host_os in + aix4* | aix5*) + # If we're using GNU nm, then we don't want the "-C" option. + # -C means demangle to AIX nm, but means don't demangle with GNU nm + if $NM -V 2>&1 | grep 'GNU' > /dev/null; then + _LT_AC_TAGVAR(export_symbols_cmds, $1)='$NM -Bpg $libobjs $convenience | awk '\''{ if (((\[$]2 == "T") || (\[$]2 == "D") || (\[$]2 == "B")) && ([substr](\[$]3,1,1) != ".")) { print \[$]3 } }'\'' | sort -u > $export_symbols' + else + _LT_AC_TAGVAR(export_symbols_cmds, $1)='$NM -BCpg $libobjs $convenience | awk '\''{ if (((\[$]2 == "T") || (\[$]2 == "D") || (\[$]2 == "B")) && ([substr](\[$]3,1,1) != ".")) { print \[$]3 } }'\'' | sort -u > $export_symbols' + fi + ;; + pw32*) + _LT_AC_TAGVAR(export_symbols_cmds, $1)="$ltdll_cmds" + ;; + cygwin* | mingw*) + _LT_AC_TAGVAR(export_symbols_cmds, $1)='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[[BCDGRS]] /s/.* \([[^ ]]*\)/\1 DATA/;/^.* __nm__/s/^.* __nm__\([[^ ]]*\) [[^ ]]*/\1 DATA/;/^I /d;/^[[AITW]] /s/.* //'\'' | sort | uniq > $export_symbols' + ;; + *) + _LT_AC_TAGVAR(export_symbols_cmds, $1)='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols' + ;; + esac +],[ + runpath_var= + _LT_AC_TAGVAR(allow_undefined_flag, $1)= + _LT_AC_TAGVAR(enable_shared_with_static_runtimes, $1)=no + _LT_AC_TAGVAR(archive_cmds, $1)= + _LT_AC_TAGVAR(archive_expsym_cmds, $1)= + _LT_AC_TAGVAR(old_archive_From_new_cmds, $1)= + _LT_AC_TAGVAR(old_archive_from_expsyms_cmds, $1)= + _LT_AC_TAGVAR(export_dynamic_flag_spec, $1)= + _LT_AC_TAGVAR(whole_archive_flag_spec, $1)= + _LT_AC_TAGVAR(thread_safe_flag_spec, $1)= + _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)= + _LT_AC_TAGVAR(hardcode_libdir_flag_spec_ld, $1)= + _LT_AC_TAGVAR(hardcode_libdir_separator, $1)= + _LT_AC_TAGVAR(hardcode_direct, $1)=no + _LT_AC_TAGVAR(hardcode_minus_L, $1)=no + _LT_AC_TAGVAR(hardcode_shlibpath_var, $1)=unsupported + _LT_AC_TAGVAR(link_all_deplibs, $1)=unknown + _LT_AC_TAGVAR(hardcode_automatic, $1)=no + _LT_AC_TAGVAR(module_cmds, $1)= + _LT_AC_TAGVAR(module_expsym_cmds, $1)= + _LT_AC_TAGVAR(always_export_symbols, $1)=no + _LT_AC_TAGVAR(export_symbols_cmds, $1)='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols' + # include_expsyms should be a list of space-separated symbols to be *always* + # included in the symbol list + _LT_AC_TAGVAR(include_expsyms, $1)= + # exclude_expsyms can be an extended regexp of symbols to exclude + # it will be wrapped by ` (' and `)$', so one must not match beginning or + # end of line. Example: `a|bc|.*d.*' will exclude the symbols `a' and `bc', + # as well as any symbol that contains `d'. + _LT_AC_TAGVAR(exclude_expsyms, $1)="_GLOBAL_OFFSET_TABLE_" + # Although _GLOBAL_OFFSET_TABLE_ is a valid symbol C name, most a.out + # platforms (ab)use it in PIC code, but their linkers get confused if + # the symbol is explicitly referenced. Since portable code cannot + # rely on this symbol name, it's probably fine to never include it in + # preloaded symbol tables. + extract_expsyms_cmds= + # Just being paranoid about ensuring that cc_basename is set. + _LT_CC_BASENAME([$compiler]) + case $host_os in + cygwin* | mingw* | pw32*) + # FIXME: the MSVC++ port hasn't been tested in a loooong time + # When not using gcc, we currently assume that we are using + # Microsoft Visual C++. + if test "$GCC" != yes; then + with_gnu_ld=no + fi + ;; + interix*) + # we just hope/assume this is gcc and not c89 (= MSVC++) + with_gnu_ld=yes + ;; + openbsd*) + with_gnu_ld=no + ;; + esac + + _LT_AC_TAGVAR(ld_shlibs, $1)=yes + if test "$with_gnu_ld" = yes; then + # If archive_cmds runs LD, not CC, wlarc should be empty + wlarc='${wl}' + + # Set some defaults for GNU ld with shared library support. These + # are reset later if shared libraries are not supported. Putting them + # here allows them to be overridden if necessary. + runpath_var=LD_RUN_PATH + _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}--rpath ${wl}$libdir' + _LT_AC_TAGVAR(export_dynamic_flag_spec, $1)='${wl}--export-dynamic' + # ancient GNU ld didn't support --whole-archive et. al. + if $LD --help 2>&1 | grep 'no-whole-archive' > /dev/null; then + _LT_AC_TAGVAR(whole_archive_flag_spec, $1)="$wlarc"'--whole-archive$convenience '"$wlarc"'--no-whole-archive' + else + _LT_AC_TAGVAR(whole_archive_flag_spec, $1)= + fi + supports_anon_versioning=no + case `$LD -v 2>/dev/null` in + *\ [[01]].* | *\ 2.[[0-9]].* | *\ 2.10.*) ;; # catch versions < 2.11 + *\ 2.11.93.0.2\ *) supports_anon_versioning=yes ;; # RH7.3 ... + *\ 2.11.92.0.12\ *) supports_anon_versioning=yes ;; # Mandrake 8.2 ... + *\ 2.11.*) ;; # other 2.11 versions + *) supports_anon_versioning=yes ;; + esac + + # See if GNU ld supports shared libraries. + case $host_os in + aix3* | aix4* | aix5*) + # On AIX/PPC, the GNU linker is very broken + if test "$host_cpu" != ia64; then + _LT_AC_TAGVAR(ld_shlibs, $1)=no + cat <&2 + +*** Warning: the GNU linker, at least up to release 2.9.1, is reported +*** to be unable to reliably create shared libraries on AIX. +*** Therefore, libtool is disabling shared libraries support. If you +*** really care for shared libraries, you may want to modify your PATH +*** so that a non-GNU linker is found, and then restart. + +EOF + fi + ;; + + amigaos*) + _LT_AC_TAGVAR(archive_cmds, $1)='$rm $output_objdir/a2ixlibrary.data~$echo "#define NAME $libname" > $output_objdir/a2ixlibrary.data~$echo "#define LIBRARY_ID 1" >> $output_objdir/a2ixlibrary.data~$echo "#define VERSION $major" >> $output_objdir/a2ixlibrary.data~$echo "#define REVISION $revision" >> $output_objdir/a2ixlibrary.data~$AR $AR_FLAGS $lib $libobjs~$RANLIB $lib~(cd $output_objdir && a2ixlibrary -32)' + _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' + _LT_AC_TAGVAR(hardcode_minus_L, $1)=yes + + # Samuel A. Falvo II reports + # that the semantics of dynamic libraries on AmigaOS, at least up + # to version 4, is to share data among multiple programs linked + # with the same dynamic library. Since this doesn't match the + # behavior of shared libraries on other platforms, we can't use + # them. + _LT_AC_TAGVAR(ld_shlibs, $1)=no + ;; + + beos*) + if $LD --help 2>&1 | grep ': supported targets:.* elf' > /dev/null; then + _LT_AC_TAGVAR(allow_undefined_flag, $1)=unsupported + # Joseph Beckenbach says some releases of gcc + # support --undefined. This deserves some investigation. FIXME + _LT_AC_TAGVAR(archive_cmds, $1)='$CC -nostart $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + else + _LT_AC_TAGVAR(ld_shlibs, $1)=no + fi + ;; + + cygwin* | mingw* | pw32*) + # _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1) is actually meaningless, + # as there is no search path for DLLs. + _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' + _LT_AC_TAGVAR(allow_undefined_flag, $1)=unsupported + _LT_AC_TAGVAR(always_export_symbols, $1)=no + _LT_AC_TAGVAR(enable_shared_with_static_runtimes, $1)=yes + _LT_AC_TAGVAR(export_symbols_cmds, $1)='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[[BCDGRS]] /s/.* \([[^ ]]*\)/\1 DATA/'\'' | $SED -e '\''/^[[AITW]] /s/.* //'\'' | sort | uniq > $export_symbols' + + if $LD --help 2>&1 | grep 'auto-import' > /dev/null; then + _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags -o $output_objdir/$soname ${wl}--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' + # If the export-symbols file already is a .def file (1st line + # is EXPORTS), use it as is; otherwise, prepend... + _LT_AC_TAGVAR(archive_expsym_cmds, $1)='if test "x`$SED 1q $export_symbols`" = xEXPORTS; then + cp $export_symbols $output_objdir/$soname.def; + else + echo EXPORTS > $output_objdir/$soname.def; + cat $export_symbols >> $output_objdir/$soname.def; + fi~ + $CC -shared $output_objdir/$soname.def $libobjs $deplibs $compiler_flags -o $output_objdir/$soname ${wl}--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' + else + _LT_AC_TAGVAR(ld_shlibs, $1)=no + fi + ;; + + interix3*) + _LT_AC_TAGVAR(hardcode_direct, $1)=no + _LT_AC_TAGVAR(hardcode_shlibpath_var, $1)=no + _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath,$libdir' + _LT_AC_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-E' + # Hack: On Interix 3.x, we cannot compile PIC because of a broken gcc. + # Instead, shared libraries are loaded at an image base (0x10000000 by + # default) and relocated if they conflict, which is a slow very memory + # consuming and fragmenting process. To avoid this, we pick a random, + # 256 KiB-aligned image base between 0x50000000 and 0x6FFC0000 at link + # time. Moving up from 0x10000000 also allows more sbrk(2) space. + _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-h,$soname ${wl}--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib' + _LT_AC_TAGVAR(archive_expsym_cmds, $1)='sed "s,^,_," $export_symbols >$output_objdir/$soname.expsym~$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-h,$soname ${wl}--retain-symbols-file,$output_objdir/$soname.expsym ${wl}--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib' + ;; + + linux*) + if $LD --help 2>&1 | grep ': supported targets:.* elf' > /dev/null; then + tmp_addflag= + case $cc_basename,$host_cpu in + pgcc*) # Portland Group C compiler + _LT_AC_TAGVAR(whole_archive_flag_spec, $1)='${wl}--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; $echo \"$new_convenience\"` ${wl}--no-whole-archive' + tmp_addflag=' $pic_flag' + ;; + pgf77* | pgf90* | pgf95*) # Portland Group f77 and f90 compilers + _LT_AC_TAGVAR(whole_archive_flag_spec, $1)='${wl}--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; $echo \"$new_convenience\"` ${wl}--no-whole-archive' + tmp_addflag=' $pic_flag -Mnomain' ;; + ecc*,ia64* | icc*,ia64*) # Intel C compiler on ia64 + tmp_addflag=' -i_dynamic' ;; + efc*,ia64* | ifort*,ia64*) # Intel Fortran compiler on ia64 + tmp_addflag=' -i_dynamic -nofor_main' ;; + ifc* | ifort*) # Intel Fortran compiler + tmp_addflag=' -nofor_main' ;; + esac + _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared'"$tmp_addflag"' $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + + if test $supports_anon_versioning = yes; then + _LT_AC_TAGVAR(archive_expsym_cmds, $1)='$echo "{ global:" > $output_objdir/$libname.ver~ + cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~ + $echo "local: *; };" >> $output_objdir/$libname.ver~ + $CC -shared'"$tmp_addflag"' $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-version-script ${wl}$output_objdir/$libname.ver -o $lib' + fi + else + _LT_AC_TAGVAR(ld_shlibs, $1)=no + fi + ;; + + netbsd*) + if echo __ELF__ | $CC -E - | grep __ELF__ >/dev/null; then + _LT_AC_TAGVAR(archive_cmds, $1)='$LD -Bshareable $libobjs $deplibs $linker_flags -o $lib' + wlarc= + else + _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + _LT_AC_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' + fi + ;; + + solaris*) + if $LD -v 2>&1 | grep 'BFD 2\.8' > /dev/null; then + _LT_AC_TAGVAR(ld_shlibs, $1)=no + cat <&2 + +*** Warning: The releases 2.8.* of the GNU linker cannot reliably +*** create shared libraries on Solaris systems. Therefore, libtool +*** is disabling shared libraries support. We urge you to upgrade GNU +*** binutils to release 2.9.1 or newer. Another option is to modify +*** your PATH or compiler configuration so that the native linker is +*** used, and then restart. + +EOF + elif $LD --help 2>&1 | grep ': supported targets:.* elf' > /dev/null; then + _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + _LT_AC_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' + else + _LT_AC_TAGVAR(ld_shlibs, $1)=no + fi + ;; + + sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX*) + case `$LD -v 2>&1` in + *\ [[01]].* | *\ 2.[[0-9]].* | *\ 2.1[[0-5]].*) + _LT_AC_TAGVAR(ld_shlibs, $1)=no + cat <<_LT_EOF 1>&2 + +*** Warning: Releases of the GNU linker prior to 2.16.91.0.3 can not +*** reliably create shared libraries on SCO systems. Therefore, libtool +*** is disabling shared libraries support. We urge you to upgrade GNU +*** binutils to release 2.16.91.0.3 or newer. Another option is to modify +*** your PATH or compiler configuration so that the native linker is +*** used, and then restart. + +_LT_EOF + ;; + *) + if $LD --help 2>&1 | grep ': supported targets:.* elf' > /dev/null; then + _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='`test -z "$SCOABSPATH" && echo ${wl}-rpath,$libdir`' + _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname,\${SCOABSPATH:+${install_libdir}/}$soname -o $lib' + _LT_AC_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname,\${SCOABSPATH:+${install_libdir}/}$soname,-retain-symbols-file,$export_symbols -o $lib' + else + _LT_AC_TAGVAR(ld_shlibs, $1)=no + fi + ;; + esac + ;; + + sunos4*) + _LT_AC_TAGVAR(archive_cmds, $1)='$LD -assert pure-text -Bshareable -o $lib $libobjs $deplibs $linker_flags' + wlarc= + _LT_AC_TAGVAR(hardcode_direct, $1)=yes + _LT_AC_TAGVAR(hardcode_shlibpath_var, $1)=no + ;; + + *) + if $LD --help 2>&1 | grep ': supported targets:.* elf' > /dev/null; then + _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + _LT_AC_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' + else + _LT_AC_TAGVAR(ld_shlibs, $1)=no + fi + ;; + esac + + if test "$_LT_AC_TAGVAR(ld_shlibs, $1)" = no; then + runpath_var= + _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)= + _LT_AC_TAGVAR(export_dynamic_flag_spec, $1)= + _LT_AC_TAGVAR(whole_archive_flag_spec, $1)= + fi + else + # PORTME fill in a description of your system's linker (not GNU ld) + case $host_os in + aix3*) + _LT_AC_TAGVAR(allow_undefined_flag, $1)=unsupported + _LT_AC_TAGVAR(always_export_symbols, $1)=yes + _LT_AC_TAGVAR(archive_expsym_cmds, $1)='$LD -o $output_objdir/$soname $libobjs $deplibs $linker_flags -bE:$export_symbols -T512 -H512 -bM:SRE~$AR $AR_FLAGS $lib $output_objdir/$soname' + # Note: this linker hardcodes the directories in LIBPATH if there + # are no directories specified by -L. + _LT_AC_TAGVAR(hardcode_minus_L, $1)=yes + if test "$GCC" = yes && test -z "$lt_prog_compiler_static"; then + # Neither direct hardcoding nor static linking is supported with a + # broken collect2. + _LT_AC_TAGVAR(hardcode_direct, $1)=unsupported + fi + ;; + + aix4* | aix5*) + if test "$host_cpu" = ia64; then + # On IA64, the linker does run time linking by default, so we don't + # have to do anything special. + aix_use_runtimelinking=no + exp_sym_flag='-Bexport' + no_entry_flag="" + else + # If we're using GNU nm, then we don't want the "-C" option. + # -C means demangle to AIX nm, but means don't demangle with GNU nm + if $NM -V 2>&1 | grep 'GNU' > /dev/null; then + _LT_AC_TAGVAR(export_symbols_cmds, $1)='$NM -Bpg $libobjs $convenience | awk '\''{ if (((\[$]2 == "T") || (\[$]2 == "D") || (\[$]2 == "B")) && ([substr](\[$]3,1,1) != ".")) { print \[$]3 } }'\'' | sort -u > $export_symbols' + else + _LT_AC_TAGVAR(export_symbols_cmds, $1)='$NM -BCpg $libobjs $convenience | awk '\''{ if (((\[$]2 == "T") || (\[$]2 == "D") || (\[$]2 == "B")) && ([substr](\[$]3,1,1) != ".")) { print \[$]3 } }'\'' | sort -u > $export_symbols' + fi + aix_use_runtimelinking=no + + # Test if we are trying to use run time linking or normal + # AIX style linking. If -brtl is somewhere in LDFLAGS, we + # need to do runtime linking. + case $host_os in aix4.[[23]]|aix4.[[23]].*|aix5*) + for ld_flag in $LDFLAGS; do + if (test $ld_flag = "-brtl" || test $ld_flag = "-Wl,-brtl"); then + aix_use_runtimelinking=yes + break + fi + done + ;; + esac + + exp_sym_flag='-bexport' + no_entry_flag='-bnoentry' + fi + + # When large executables or shared objects are built, AIX ld can + # have problems creating the table of contents. If linking a library + # or program results in "error TOC overflow" add -mminimal-toc to + # CXXFLAGS/CFLAGS for g++/gcc. In the cases where that is not + # enough to fix the problem, add -Wl,-bbigtoc to LDFLAGS. + + _LT_AC_TAGVAR(archive_cmds, $1)='' + _LT_AC_TAGVAR(hardcode_direct, $1)=yes + _LT_AC_TAGVAR(hardcode_libdir_separator, $1)=':' + _LT_AC_TAGVAR(link_all_deplibs, $1)=yes + + if test "$GCC" = yes; then + case $host_os in aix4.[[012]]|aix4.[[012]].*) + # We only want to do this on AIX 4.2 and lower, the check + # below for broken collect2 doesn't work under 4.3+ + collect2name=`${CC} -print-prog-name=collect2` + if test -f "$collect2name" && \ + strings "$collect2name" | grep resolve_lib_name >/dev/null + then + # We have reworked collect2 + _LT_AC_TAGVAR(hardcode_direct, $1)=yes + else + # We have old collect2 + _LT_AC_TAGVAR(hardcode_direct, $1)=unsupported + # It fails to find uninstalled libraries when the uninstalled + # path is not listed in the libpath. Setting hardcode_minus_L + # to unsupported forces relinking + _LT_AC_TAGVAR(hardcode_minus_L, $1)=yes + _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' + _LT_AC_TAGVAR(hardcode_libdir_separator, $1)= + fi + ;; + esac + shared_flag='-shared' + if test "$aix_use_runtimelinking" = yes; then + shared_flag="$shared_flag "'${wl}-G' + fi + else + # not using gcc + if test "$host_cpu" = ia64; then + # VisualAge C++, Version 5.5 for AIX 5L for IA-64, Beta 3 Release + # chokes on -Wl,-G. The following line is correct: + shared_flag='-G' + else + if test "$aix_use_runtimelinking" = yes; then + shared_flag='${wl}-G' + else + shared_flag='${wl}-bM:SRE' + fi + fi + fi + + # It seems that -bexpall does not export symbols beginning with + # underscore (_), so it is better to generate a list of symbols to export. + _LT_AC_TAGVAR(always_export_symbols, $1)=yes + if test "$aix_use_runtimelinking" = yes; then + # Warning - without using the other runtime loading flags (-brtl), + # -berok will link without error, but may produce a broken library. + _LT_AC_TAGVAR(allow_undefined_flag, $1)='-berok' + # Determine the default libpath from the value encoded in an empty executable. + _LT_AC_SYS_LIBPATH_AIX + _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-blibpath:$libdir:'"$aix_libpath" + _LT_AC_TAGVAR(archive_expsym_cmds, $1)="\$CC"' -o $output_objdir/$soname $libobjs $deplibs '"\${wl}$no_entry_flag"' $compiler_flags `if test "x${allow_undefined_flag}" != "x"; then echo "${wl}${allow_undefined_flag}"; else :; fi` '"\${wl}$exp_sym_flag:\$export_symbols $shared_flag" + else + if test "$host_cpu" = ia64; then + _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-R $libdir:/usr/lib:/lib' + _LT_AC_TAGVAR(allow_undefined_flag, $1)="-z nodefs" + _LT_AC_TAGVAR(archive_expsym_cmds, $1)="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs '"\${wl}$no_entry_flag"' $compiler_flags ${wl}${allow_undefined_flag} '"\${wl}$exp_sym_flag:\$export_symbols" + else + # Determine the default libpath from the value encoded in an empty executable. + _LT_AC_SYS_LIBPATH_AIX + _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-blibpath:$libdir:'"$aix_libpath" + # Warning - without using the other run time loading flags, + # -berok will link without error, but may produce a broken library. + _LT_AC_TAGVAR(no_undefined_flag, $1)=' ${wl}-bernotok' + _LT_AC_TAGVAR(allow_undefined_flag, $1)=' ${wl}-berok' + # Exported symbols can be pulled into shared objects from archives + _LT_AC_TAGVAR(whole_archive_flag_spec, $1)='$convenience' + _LT_AC_TAGVAR(archive_cmds_need_lc, $1)=yes + # This is similar to how AIX traditionally builds its shared libraries. + _LT_AC_TAGVAR(archive_expsym_cmds, $1)="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs ${wl}-bnoentry $compiler_flags ${wl}-bE:$export_symbols${allow_undefined_flag}~$AR $AR_FLAGS $output_objdir/$libname$release.a $output_objdir/$soname' + fi + fi + ;; + + amigaos*) + _LT_AC_TAGVAR(archive_cmds, $1)='$rm $output_objdir/a2ixlibrary.data~$echo "#define NAME $libname" > $output_objdir/a2ixlibrary.data~$echo "#define LIBRARY_ID 1" >> $output_objdir/a2ixlibrary.data~$echo "#define VERSION $major" >> $output_objdir/a2ixlibrary.data~$echo "#define REVISION $revision" >> $output_objdir/a2ixlibrary.data~$AR $AR_FLAGS $lib $libobjs~$RANLIB $lib~(cd $output_objdir && a2ixlibrary -32)' + _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' + _LT_AC_TAGVAR(hardcode_minus_L, $1)=yes + # see comment about different semantics on the GNU ld section + _LT_AC_TAGVAR(ld_shlibs, $1)=no + ;; + + bsdi[[45]]*) + _LT_AC_TAGVAR(export_dynamic_flag_spec, $1)=-rdynamic + ;; + + cygwin* | mingw* | pw32*) + # When not using gcc, we currently assume that we are using + # Microsoft Visual C++. + # hardcode_libdir_flag_spec is actually meaningless, as there is + # no search path for DLLs. + _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)=' ' + _LT_AC_TAGVAR(allow_undefined_flag, $1)=unsupported + # Tell ltmain to make .lib files, not .a files. + libext=lib + # Tell ltmain to make .dll files, not .so files. + shrext_cmds=".dll" + # FIXME: Setting linknames here is a bad hack. + _LT_AC_TAGVAR(archive_cmds, $1)='$CC -o $lib $libobjs $compiler_flags `echo "$deplibs" | $SED -e '\''s/ -lc$//'\''` -link -dll~linknames=' + # The linker will automatically build a .lib file if we build a DLL. + _LT_AC_TAGVAR(old_archive_From_new_cmds, $1)='true' + # FIXME: Should let the user specify the lib program. + _LT_AC_TAGVAR(old_archive_cmds, $1)='lib /OUT:$oldlib$oldobjs$old_deplibs' + _LT_AC_TAGVAR(fix_srcfile_path, $1)='`cygpath -w "$srcfile"`' + _LT_AC_TAGVAR(enable_shared_with_static_runtimes, $1)=yes + ;; + + darwin* | rhapsody*) + case $host_os in + rhapsody* | darwin1.[[012]]) + _LT_AC_TAGVAR(allow_undefined_flag, $1)='${wl}-undefined ${wl}suppress' + ;; + *) # Darwin 1.3 on + if test -z ${MACOSX_DEPLOYMENT_TARGET} ; then + _LT_AC_TAGVAR(allow_undefined_flag, $1)='${wl}-flat_namespace ${wl}-undefined ${wl}suppress' + else + case ${MACOSX_DEPLOYMENT_TARGET} in + 10.[[012]]) + _LT_AC_TAGVAR(allow_undefined_flag, $1)='${wl}-flat_namespace ${wl}-undefined ${wl}suppress' + ;; + 10.*) + _LT_AC_TAGVAR(allow_undefined_flag, $1)='${wl}-undefined ${wl}dynamic_lookup' + ;; + esac + fi + ;; + esac + _LT_AC_TAGVAR(archive_cmds_need_lc, $1)=no + _LT_AC_TAGVAR(hardcode_direct, $1)=no + _LT_AC_TAGVAR(hardcode_automatic, $1)=yes + _LT_AC_TAGVAR(hardcode_shlibpath_var, $1)=unsupported + _LT_AC_TAGVAR(whole_archive_flag_spec, $1)='' + _LT_AC_TAGVAR(link_all_deplibs, $1)=yes + if test "$GCC" = yes ; then + output_verbose_link_cmd='echo' + _LT_AC_TAGVAR(archive_cmds, $1)='$CC -dynamiclib $allow_undefined_flag -o $lib $libobjs $deplibs $compiler_flags -install_name $rpath/$soname $verstring' + _LT_AC_TAGVAR(module_cmds, $1)='$CC $allow_undefined_flag -o $lib -bundle $libobjs $deplibs$compiler_flags' + # Don't fix this by using the ld -exported_symbols_list flag, it doesn't exist in older darwin lds + _LT_AC_TAGVAR(archive_expsym_cmds, $1)='sed -e "s,#.*,," -e "s,^[ ]*,," -e "s,^\(..*\),_&," < $export_symbols > $output_objdir/${libname}-symbols.expsym~$CC -dynamiclib $allow_undefined_flag -o $lib $libobjs $deplibs $compiler_flags -install_name $rpath/$soname $verstring~nmedit -s $output_objdir/${libname}-symbols.expsym ${lib}' + _LT_AC_TAGVAR(module_expsym_cmds, $1)='sed -e "s,#.*,," -e "s,^[ ]*,," -e "s,^\(..*\),_&," < $export_symbols > $output_objdir/${libname}-symbols.expsym~$CC $allow_undefined_flag -o $lib -bundle $libobjs $deplibs$compiler_flags~nmedit -s $output_objdir/${libname}-symbols.expsym ${lib}' + else + case $cc_basename in + xlc*) + output_verbose_link_cmd='echo' + _LT_AC_TAGVAR(archive_cmds, $1)='$CC -qmkshrobj $allow_undefined_flag -o $lib $libobjs $deplibs $compiler_flags ${wl}-install_name ${wl}`echo $rpath/$soname` $verstring' + _LT_AC_TAGVAR(module_cmds, $1)='$CC $allow_undefined_flag -o $lib -bundle $libobjs $deplibs$compiler_flags' + # Don't fix this by using the ld -exported_symbols_list flag, it doesn't exist in older darwin lds + _LT_AC_TAGVAR(archive_expsym_cmds, $1)='sed -e "s,#.*,," -e "s,^[ ]*,," -e "s,^\(..*\),_&," < $export_symbols > $output_objdir/${libname}-symbols.expsym~$CC -qmkshrobj $allow_undefined_flag -o $lib $libobjs $deplibs $compiler_flags ${wl}-install_name ${wl}$rpath/$soname $verstring~nmedit -s $output_objdir/${libname}-symbols.expsym ${lib}' + _LT_AC_TAGVAR(module_expsym_cmds, $1)='sed -e "s,#.*,," -e "s,^[ ]*,," -e "s,^\(..*\),_&," < $export_symbols > $output_objdir/${libname}-symbols.expsym~$CC $allow_undefined_flag -o $lib -bundle $libobjs $deplibs$compiler_flags~nmedit -s $output_objdir/${libname}-symbols.expsym ${lib}' + ;; + *) + _LT_AC_TAGVAR(ld_shlibs, $1)=no + ;; + esac + fi + ;; + + dgux*) + _LT_AC_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' + _LT_AC_TAGVAR(hardcode_shlibpath_var, $1)=no + ;; + + freebsd1*) + _LT_AC_TAGVAR(ld_shlibs, $1)=no + ;; + + # FreeBSD 2.2.[012] allows us to include c++rt0.o to get C++ constructor + # support. Future versions do this automatically, but an explicit c++rt0.o + # does not break anything, and helps significantly (at the cost of a little + # extra space). + freebsd2.2*) + _LT_AC_TAGVAR(archive_cmds, $1)='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags /usr/lib/c++rt0.o' + _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir' + _LT_AC_TAGVAR(hardcode_direct, $1)=yes + _LT_AC_TAGVAR(hardcode_shlibpath_var, $1)=no + ;; + + # Unfortunately, older versions of FreeBSD 2 do not have this feature. + freebsd2*) + _LT_AC_TAGVAR(archive_cmds, $1)='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags' + _LT_AC_TAGVAR(hardcode_direct, $1)=yes + _LT_AC_TAGVAR(hardcode_minus_L, $1)=yes + _LT_AC_TAGVAR(hardcode_shlibpath_var, $1)=no + ;; + + # FreeBSD 3 and greater uses gcc -shared to do shared libraries. + freebsd* | kfreebsd*-gnu | dragonfly*) + _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared -o $lib $libobjs $deplibs $compiler_flags' + _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir' + _LT_AC_TAGVAR(hardcode_direct, $1)=yes + _LT_AC_TAGVAR(hardcode_shlibpath_var, $1)=no + ;; + + hpux9*) + if test "$GCC" = yes; then + _LT_AC_TAGVAR(archive_cmds, $1)='$rm $output_objdir/$soname~$CC -shared -fPIC ${wl}+b ${wl}$install_libdir -o $output_objdir/$soname $libobjs $deplibs $compiler_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' + else + _LT_AC_TAGVAR(archive_cmds, $1)='$rm $output_objdir/$soname~$LD -b +b $install_libdir -o $output_objdir/$soname $libobjs $deplibs $linker_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' + fi + _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}+b ${wl}$libdir' + _LT_AC_TAGVAR(hardcode_libdir_separator, $1)=: + _LT_AC_TAGVAR(hardcode_direct, $1)=yes + + # hardcode_minus_L: Not really in the search PATH, + # but as the default location of the library. + _LT_AC_TAGVAR(hardcode_minus_L, $1)=yes + _LT_AC_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-E' + ;; + + hpux10*) + if test "$GCC" = yes -a "$with_gnu_ld" = no; then + _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared -fPIC ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags' + else + _LT_AC_TAGVAR(archive_cmds, $1)='$LD -b +h $soname +b $install_libdir -o $lib $libobjs $deplibs $linker_flags' + fi + if test "$with_gnu_ld" = no; then + _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}+b ${wl}$libdir' + _LT_AC_TAGVAR(hardcode_libdir_separator, $1)=: + + _LT_AC_TAGVAR(hardcode_direct, $1)=yes + _LT_AC_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-E' + + # hardcode_minus_L: Not really in the search PATH, + # but as the default location of the library. + _LT_AC_TAGVAR(hardcode_minus_L, $1)=yes + fi + ;; + + hpux11*) + if test "$GCC" = yes -a "$with_gnu_ld" = no; then + case $host_cpu in + hppa*64*) + _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared ${wl}+h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags' + ;; + ia64*) + _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $libobjs $deplibs $compiler_flags' + ;; + *) + _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared -fPIC ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags' + ;; + esac + else + case $host_cpu in + hppa*64*) + _LT_AC_TAGVAR(archive_cmds, $1)='$CC -b ${wl}+h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags' + ;; + ia64*) + _LT_AC_TAGVAR(archive_cmds, $1)='$CC -b ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $libobjs $deplibs $compiler_flags' + ;; + *) + _LT_AC_TAGVAR(archive_cmds, $1)='$CC -b ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags' + ;; + esac + fi + if test "$with_gnu_ld" = no; then + _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}+b ${wl}$libdir' + _LT_AC_TAGVAR(hardcode_libdir_separator, $1)=: + + case $host_cpu in + hppa*64*|ia64*) + _LT_AC_TAGVAR(hardcode_libdir_flag_spec_ld, $1)='+b $libdir' + _LT_AC_TAGVAR(hardcode_direct, $1)=no + _LT_AC_TAGVAR(hardcode_shlibpath_var, $1)=no + ;; + *) + _LT_AC_TAGVAR(hardcode_direct, $1)=yes + _LT_AC_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-E' + + # hardcode_minus_L: Not really in the search PATH, + # but as the default location of the library. + _LT_AC_TAGVAR(hardcode_minus_L, $1)=yes + ;; + esac + fi + ;; + + irix5* | irix6* | nonstopux*) + if test "$GCC" = yes; then + _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && echo ${wl}-set_version ${wl}$verstring` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' + else + _LT_AC_TAGVAR(archive_cmds, $1)='$LD -shared $libobjs $deplibs $linker_flags -soname $soname `test -n "$verstring" && echo -set_version $verstring` -update_registry ${output_objdir}/so_locations -o $lib' + _LT_AC_TAGVAR(hardcode_libdir_flag_spec_ld, $1)='-rpath $libdir' + fi + _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir' + _LT_AC_TAGVAR(hardcode_libdir_separator, $1)=: + _LT_AC_TAGVAR(link_all_deplibs, $1)=yes + ;; + + netbsd*) + if echo __ELF__ | $CC -E - | grep __ELF__ >/dev/null; then + _LT_AC_TAGVAR(archive_cmds, $1)='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags' # a.out + else + _LT_AC_TAGVAR(archive_cmds, $1)='$LD -shared -o $lib $libobjs $deplibs $linker_flags' # ELF + fi + _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir' + _LT_AC_TAGVAR(hardcode_direct, $1)=yes + _LT_AC_TAGVAR(hardcode_shlibpath_var, $1)=no + ;; + + newsos6) + _LT_AC_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + _LT_AC_TAGVAR(hardcode_direct, $1)=yes + _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir' + _LT_AC_TAGVAR(hardcode_libdir_separator, $1)=: + _LT_AC_TAGVAR(hardcode_shlibpath_var, $1)=no + ;; + + openbsd*) + _LT_AC_TAGVAR(hardcode_direct, $1)=yes + _LT_AC_TAGVAR(hardcode_shlibpath_var, $1)=no + if test -z "`echo __ELF__ | $CC -E - | grep __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then + _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags' + _LT_AC_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags ${wl}-retain-symbols-file,$export_symbols' + _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath,$libdir' + _LT_AC_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-E' + else + case $host_os in + openbsd[[01]].* | openbsd2.[[0-7]] | openbsd2.[[0-7]].*) + _LT_AC_TAGVAR(archive_cmds, $1)='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags' + _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir' + ;; + *) + _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags' + _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath,$libdir' + ;; + esac + fi + ;; + + os2*) + _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' + _LT_AC_TAGVAR(hardcode_minus_L, $1)=yes + _LT_AC_TAGVAR(allow_undefined_flag, $1)=unsupported + _LT_AC_TAGVAR(archive_cmds, $1)='$echo "LIBRARY $libname INITINSTANCE" > $output_objdir/$libname.def~$echo "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~$echo DATA >> $output_objdir/$libname.def~$echo " SINGLE NONSHARED" >> $output_objdir/$libname.def~$echo EXPORTS >> $output_objdir/$libname.def~emxexp $libobjs >> $output_objdir/$libname.def~$CC -Zdll -Zcrtdll -o $lib $libobjs $deplibs $compiler_flags $output_objdir/$libname.def' + _LT_AC_TAGVAR(old_archive_From_new_cmds, $1)='emximp -o $output_objdir/$libname.a $output_objdir/$libname.def' + ;; + + osf3*) + if test "$GCC" = yes; then + _LT_AC_TAGVAR(allow_undefined_flag, $1)=' ${wl}-expect_unresolved ${wl}\*' + _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared${allow_undefined_flag} $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && echo ${wl}-set_version ${wl}$verstring` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' + else + _LT_AC_TAGVAR(allow_undefined_flag, $1)=' -expect_unresolved \*' + _LT_AC_TAGVAR(archive_cmds, $1)='$LD -shared${allow_undefined_flag} $libobjs $deplibs $linker_flags -soname $soname `test -n "$verstring" && echo -set_version $verstring` -update_registry ${output_objdir}/so_locations -o $lib' + fi + _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir' + _LT_AC_TAGVAR(hardcode_libdir_separator, $1)=: + ;; + + osf4* | osf5*) # as osf3* with the addition of -msym flag + if test "$GCC" = yes; then + _LT_AC_TAGVAR(allow_undefined_flag, $1)=' ${wl}-expect_unresolved ${wl}\*' + _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared${allow_undefined_flag} $libobjs $deplibs $compiler_flags ${wl}-msym ${wl}-soname ${wl}$soname `test -n "$verstring" && echo ${wl}-set_version ${wl}$verstring` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' + _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir' + else + _LT_AC_TAGVAR(allow_undefined_flag, $1)=' -expect_unresolved \*' + _LT_AC_TAGVAR(archive_cmds, $1)='$LD -shared${allow_undefined_flag} $libobjs $deplibs $linker_flags -msym -soname $soname `test -n "$verstring" && echo -set_version $verstring` -update_registry ${output_objdir}/so_locations -o $lib' + _LT_AC_TAGVAR(archive_expsym_cmds, $1)='for i in `cat $export_symbols`; do printf "%s %s\\n" -exported_symbol "\$i" >> $lib.exp; done; echo "-hidden">> $lib.exp~ + $LD -shared${allow_undefined_flag} -input $lib.exp $linker_flags $libobjs $deplibs -soname $soname `test -n "$verstring" && echo -set_version $verstring` -update_registry ${output_objdir}/so_locations -o $lib~$rm $lib.exp' + + # Both c and cxx compiler support -rpath directly + _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='-rpath $libdir' + fi + _LT_AC_TAGVAR(hardcode_libdir_separator, $1)=: + ;; + + solaris*) + _LT_AC_TAGVAR(no_undefined_flag, $1)=' -z text' + if test "$GCC" = yes; then + wlarc='${wl}' + _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags' + _LT_AC_TAGVAR(archive_expsym_cmds, $1)='$echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~$echo "local: *; };" >> $lib.exp~ + $CC -shared ${wl}-M ${wl}$lib.exp ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags~$rm $lib.exp' + else + wlarc='' + _LT_AC_TAGVAR(archive_cmds, $1)='$LD -G${allow_undefined_flag} -h $soname -o $lib $libobjs $deplibs $linker_flags' + _LT_AC_TAGVAR(archive_expsym_cmds, $1)='$echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~$echo "local: *; };" >> $lib.exp~ + $LD -G${allow_undefined_flag} -M $lib.exp -h $soname -o $lib $libobjs $deplibs $linker_flags~$rm $lib.exp' + fi + _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir' + _LT_AC_TAGVAR(hardcode_shlibpath_var, $1)=no + case $host_os in + solaris2.[[0-5]] | solaris2.[[0-5]].*) ;; + *) + # The compiler driver will combine linker options so we + # cannot just pass the convience library names through + # without $wl, iff we do not link with $LD. + # Luckily, gcc supports the same syntax we need for Sun Studio. + # Supported since Solaris 2.6 (maybe 2.5.1?) + case $wlarc in + '') + _LT_AC_TAGVAR(whole_archive_flag_spec, $1)='-z allextract$convenience -z defaultextract' ;; + *) + _LT_AC_TAGVAR(whole_archive_flag_spec, $1)='${wl}-z ${wl}allextract`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; $echo \"$new_convenience\"` ${wl}-z ${wl}defaultextract' ;; + esac ;; + esac + _LT_AC_TAGVAR(link_all_deplibs, $1)=yes + ;; + + sunos4*) + if test "x$host_vendor" = xsequent; then + # Use $CC to link under sequent, because it throws in some extra .o + # files that make .init and .fini sections work. + _LT_AC_TAGVAR(archive_cmds, $1)='$CC -G ${wl}-h $soname -o $lib $libobjs $deplibs $compiler_flags' + else + _LT_AC_TAGVAR(archive_cmds, $1)='$LD -assert pure-text -Bstatic -o $lib $libobjs $deplibs $linker_flags' + fi + _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' + _LT_AC_TAGVAR(hardcode_direct, $1)=yes + _LT_AC_TAGVAR(hardcode_minus_L, $1)=yes + _LT_AC_TAGVAR(hardcode_shlibpath_var, $1)=no + ;; + + sysv4) + case $host_vendor in + sni) + _LT_AC_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + _LT_AC_TAGVAR(hardcode_direct, $1)=yes # is this really true??? + ;; + siemens) + ## LD is ld it makes a PLAMLIB + ## CC just makes a GrossModule. + _LT_AC_TAGVAR(archive_cmds, $1)='$LD -G -o $lib $libobjs $deplibs $linker_flags' + _LT_AC_TAGVAR(reload_cmds, $1)='$CC -r -o $output$reload_objs' + _LT_AC_TAGVAR(hardcode_direct, $1)=no + ;; + motorola) + _LT_AC_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + _LT_AC_TAGVAR(hardcode_direct, $1)=no #Motorola manual says yes, but my tests say they lie + ;; + esac + runpath_var='LD_RUN_PATH' + _LT_AC_TAGVAR(hardcode_shlibpath_var, $1)=no + ;; + + sysv4.3*) + _LT_AC_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + _LT_AC_TAGVAR(hardcode_shlibpath_var, $1)=no + _LT_AC_TAGVAR(export_dynamic_flag_spec, $1)='-Bexport' + ;; + + sysv4*MP*) + if test -d /usr/nec; then + _LT_AC_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + _LT_AC_TAGVAR(hardcode_shlibpath_var, $1)=no + runpath_var=LD_RUN_PATH + hardcode_runpath_var=yes + _LT_AC_TAGVAR(ld_shlibs, $1)=yes + fi + ;; + + sysv4*uw2* | sysv5OpenUNIX* | sysv5UnixWare7.[[01]].[[10]]* | unixware7*) + _LT_AC_TAGVAR(no_undefined_flag, $1)='${wl}-z,text' + _LT_AC_TAGVAR(archive_cmds_need_lc, $1)=no + _LT_AC_TAGVAR(hardcode_shlibpath_var, $1)=no + runpath_var='LD_RUN_PATH' + + if test "$GCC" = yes; then + _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + _LT_AC_TAGVAR(archive_expsym_cmds, $1)='$CC -shared ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + else + _LT_AC_TAGVAR(archive_cmds, $1)='$CC -G ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + _LT_AC_TAGVAR(archive_expsym_cmds, $1)='$CC -G ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + fi + ;; + + sysv5* | sco3.2v5* | sco5v6*) + # Note: We can NOT use -z defs as we might desire, because we do not + # link with -lc, and that would cause any symbols used from libc to + # always be unresolved, which means just about no library would + # ever link correctly. If we're not using GNU ld we use -z text + # though, which does catch some bad symbols but isn't as heavy-handed + # as -z defs. + _LT_AC_TAGVAR(no_undefined_flag, $1)='${wl}-z,text' + _LT_AC_TAGVAR(allow_undefined_flag, $1)='${wl}-z,nodefs' + _LT_AC_TAGVAR(archive_cmds_need_lc, $1)=no + _LT_AC_TAGVAR(hardcode_shlibpath_var, $1)=no + _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='`test -z "$SCOABSPATH" && echo ${wl}-R,$libdir`' + _LT_AC_TAGVAR(hardcode_libdir_separator, $1)=':' + _LT_AC_TAGVAR(link_all_deplibs, $1)=yes + _LT_AC_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-Bexport' + runpath_var='LD_RUN_PATH' + + if test "$GCC" = yes; then + _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared ${wl}-h,\${SCOABSPATH:+${install_libdir}/}$soname -o $lib $libobjs $deplibs $compiler_flags' + _LT_AC_TAGVAR(archive_expsym_cmds, $1)='$CC -shared ${wl}-Bexport:$export_symbols ${wl}-h,\${SCOABSPATH:+${install_libdir}/}$soname -o $lib $libobjs $deplibs $compiler_flags' + else + _LT_AC_TAGVAR(archive_cmds, $1)='$CC -G ${wl}-h,\${SCOABSPATH:+${install_libdir}/}$soname -o $lib $libobjs $deplibs $compiler_flags' + _LT_AC_TAGVAR(archive_expsym_cmds, $1)='$CC -G ${wl}-Bexport:$export_symbols ${wl}-h,\${SCOABSPATH:+${install_libdir}/}$soname -o $lib $libobjs $deplibs $compiler_flags' + fi + ;; + + uts4*) + _LT_AC_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' + _LT_AC_TAGVAR(hardcode_shlibpath_var, $1)=no + ;; + + *) + _LT_AC_TAGVAR(ld_shlibs, $1)=no + ;; + esac + fi +]) +AC_MSG_RESULT([$_LT_AC_TAGVAR(ld_shlibs, $1)]) +test "$_LT_AC_TAGVAR(ld_shlibs, $1)" = no && can_build_shared=no + +# +# Do we need to explicitly link libc? +# +case "x$_LT_AC_TAGVAR(archive_cmds_need_lc, $1)" in +x|xyes) + # Assume -lc should be added + _LT_AC_TAGVAR(archive_cmds_need_lc, $1)=yes + + if test "$enable_shared" = yes && test "$GCC" = yes; then + case $_LT_AC_TAGVAR(archive_cmds, $1) in + *'~'*) + # FIXME: we may have to deal with multi-command sequences. + ;; + '$CC '*) + # Test whether the compiler implicitly links with -lc since on some + # systems, -lgcc has to come before -lc. If gcc already passes -lc + # to ld, don't add -lc before -lgcc. + AC_MSG_CHECKING([whether -lc should be explicitly linked in]) + $rm conftest* + printf "$lt_simple_compile_test_code" > conftest.$ac_ext + + if AC_TRY_EVAL(ac_compile) 2>conftest.err; then + soname=conftest + lib=conftest + libobjs=conftest.$ac_objext + deplibs= + wl=$_LT_AC_TAGVAR(lt_prog_compiler_wl, $1) + pic_flag=$_LT_AC_TAGVAR(lt_prog_compiler_pic, $1) + compiler_flags=-v + linker_flags=-v + verstring= + output_objdir=. + libname=conftest + lt_save_allow_undefined_flag=$_LT_AC_TAGVAR(allow_undefined_flag, $1) + _LT_AC_TAGVAR(allow_undefined_flag, $1)= + if AC_TRY_EVAL(_LT_AC_TAGVAR(archive_cmds, $1) 2\>\&1 \| grep \" -lc \" \>/dev/null 2\>\&1) + then + _LT_AC_TAGVAR(archive_cmds_need_lc, $1)=no + else + _LT_AC_TAGVAR(archive_cmds_need_lc, $1)=yes + fi + _LT_AC_TAGVAR(allow_undefined_flag, $1)=$lt_save_allow_undefined_flag + else + cat conftest.err 1>&5 + fi + $rm conftest* + AC_MSG_RESULT([$_LT_AC_TAGVAR(archive_cmds_need_lc, $1)]) + ;; + esac + fi + ;; +esac +])# AC_LIBTOOL_PROG_LD_SHLIBS + + +# _LT_AC_FILE_LTDLL_C +# ------------------- +# Be careful that the start marker always follows a newline. +AC_DEFUN([_LT_AC_FILE_LTDLL_C], [ +# /* ltdll.c starts here */ +# #define WIN32_LEAN_AND_MEAN +# #include +# #undef WIN32_LEAN_AND_MEAN +# #include +# +# #ifndef __CYGWIN__ +# # ifdef __CYGWIN32__ +# # define __CYGWIN__ __CYGWIN32__ +# # endif +# #endif +# +# #ifdef __cplusplus +# extern "C" { +# #endif +# BOOL APIENTRY DllMain (HINSTANCE hInst, DWORD reason, LPVOID reserved); +# #ifdef __cplusplus +# } +# #endif +# +# #ifdef __CYGWIN__ +# #include +# DECLARE_CYGWIN_DLL( DllMain ); +# #endif +# HINSTANCE __hDllInstance_base; +# +# BOOL APIENTRY +# DllMain (HINSTANCE hInst, DWORD reason, LPVOID reserved) +# { +# __hDllInstance_base = hInst; +# return TRUE; +# } +# /* ltdll.c ends here */ +])# _LT_AC_FILE_LTDLL_C + + +# _LT_AC_TAGVAR(VARNAME, [TAGNAME]) +# --------------------------------- +AC_DEFUN([_LT_AC_TAGVAR], [ifelse([$2], [], [$1], [$1_$2])]) + + +# old names +AC_DEFUN([AM_PROG_LIBTOOL], [AC_PROG_LIBTOOL]) +AC_DEFUN([AM_ENABLE_SHARED], [AC_ENABLE_SHARED($@)]) +AC_DEFUN([AM_ENABLE_STATIC], [AC_ENABLE_STATIC($@)]) +AC_DEFUN([AM_DISABLE_SHARED], [AC_DISABLE_SHARED($@)]) +AC_DEFUN([AM_DISABLE_STATIC], [AC_DISABLE_STATIC($@)]) +AC_DEFUN([AM_PROG_LD], [AC_PROG_LD]) +AC_DEFUN([AM_PROG_NM], [AC_PROG_NM]) + +# This is just to silence aclocal about the macro not being used +ifelse([AC_DISABLE_FAST_INSTALL]) + +AC_DEFUN([LT_AC_PROG_GCJ], +[AC_CHECK_TOOL(GCJ, gcj, no) + test "x${GCJFLAGS+set}" = xset || GCJFLAGS="-g -O2" + AC_SUBST(GCJFLAGS) +]) + +AC_DEFUN([LT_AC_PROG_RC], +[AC_CHECK_TOOL(RC, windres, no) +]) + +# NOTE: This macro has been submitted for inclusion into # +# GNU Autoconf as AC_PROG_SED. When it is available in # +# a released version of Autoconf we should remove this # +# macro and use it instead. # +# LT_AC_PROG_SED +# -------------- +# Check for a fully-functional sed program, that truncates +# as few characters as possible. Prefer GNU sed if found. +AC_DEFUN([LT_AC_PROG_SED], +[AC_MSG_CHECKING([for a sed that does not truncate output]) +AC_CACHE_VAL(lt_cv_path_SED, +[# Loop through the user's path and test for sed and gsed. +# Then use that list of sed's as ones to test for truncation. +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for lt_ac_prog in sed gsed; do + for ac_exec_ext in '' $ac_executable_extensions; do + if $as_executable_p "$as_dir/$lt_ac_prog$ac_exec_ext"; then + lt_ac_sed_list="$lt_ac_sed_list $as_dir/$lt_ac_prog$ac_exec_ext" + fi + done + done +done +IFS=$as_save_IFS +lt_ac_max=0 +lt_ac_count=0 +# Add /usr/xpg4/bin/sed as it is typically found on Solaris +# along with /bin/sed that truncates output. +for lt_ac_sed in $lt_ac_sed_list /usr/xpg4/bin/sed; do + test ! -f $lt_ac_sed && continue + cat /dev/null > conftest.in + lt_ac_count=0 + echo $ECHO_N "0123456789$ECHO_C" >conftest.in + # Check for GNU sed and select it if it is found. + if "$lt_ac_sed" --version 2>&1 < /dev/null | grep 'GNU' > /dev/null; then + lt_cv_path_SED=$lt_ac_sed + break + fi + while true; do + cat conftest.in conftest.in >conftest.tmp + mv conftest.tmp conftest.in + cp conftest.in conftest.nl + echo >>conftest.nl + $lt_ac_sed -e 's/a$//' < conftest.nl >conftest.out || break + cmp -s conftest.out conftest.nl || break + # 10000 chars as input seems more than enough + test $lt_ac_count -gt 10 && break + lt_ac_count=`expr $lt_ac_count + 1` + if test $lt_ac_count -gt $lt_ac_max; then + lt_ac_max=$lt_ac_count + lt_cv_path_SED=$lt_ac_sed + fi + done +done +]) +SED=$lt_cv_path_SED +AC_SUBST([SED]) +AC_MSG_RESULT([$SED]) +]) + +# Copyright (C) 2002, 2003, 2005 Free Software Foundation, Inc. +# +# This file is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# AM_AUTOMAKE_VERSION(VERSION) +# ---------------------------- +# Automake X.Y traces this macro to ensure aclocal.m4 has been +# generated from the m4 files accompanying Automake X.Y. +AC_DEFUN([AM_AUTOMAKE_VERSION], [am__api_version="1.9"]) + +# AM_SET_CURRENT_AUTOMAKE_VERSION +# ------------------------------- +# Call AM_AUTOMAKE_VERSION so it can be traced. +# This function is AC_REQUIREd by AC_INIT_AUTOMAKE. +AC_DEFUN([AM_SET_CURRENT_AUTOMAKE_VERSION], + [AM_AUTOMAKE_VERSION([1.9.6])]) + +# AM_AUX_DIR_EXPAND -*- Autoconf -*- + +# Copyright (C) 2001, 2003, 2005 Free Software Foundation, Inc. +# +# This file is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# For projects using AC_CONFIG_AUX_DIR([foo]), Autoconf sets +# $ac_aux_dir to `$srcdir/foo'. In other projects, it is set to +# `$srcdir', `$srcdir/..', or `$srcdir/../..'. +# +# Of course, Automake must honor this variable whenever it calls a +# tool from the auxiliary directory. The problem is that $srcdir (and +# therefore $ac_aux_dir as well) can be either absolute or relative, +# depending on how configure is run. This is pretty annoying, since +# it makes $ac_aux_dir quite unusable in subdirectories: in the top +# source directory, any form will work fine, but in subdirectories a +# relative path needs to be adjusted first. +# +# $ac_aux_dir/missing +# fails when called from a subdirectory if $ac_aux_dir is relative +# $top_srcdir/$ac_aux_dir/missing +# fails if $ac_aux_dir is absolute, +# fails when called from a subdirectory in a VPATH build with +# a relative $ac_aux_dir +# +# The reason of the latter failure is that $top_srcdir and $ac_aux_dir +# are both prefixed by $srcdir. In an in-source build this is usually +# harmless because $srcdir is `.', but things will broke when you +# start a VPATH build or use an absolute $srcdir. +# +# So we could use something similar to $top_srcdir/$ac_aux_dir/missing, +# iff we strip the leading $srcdir from $ac_aux_dir. That would be: +# am_aux_dir='\$(top_srcdir)/'`expr "$ac_aux_dir" : "$srcdir//*\(.*\)"` +# and then we would define $MISSING as +# MISSING="\${SHELL} $am_aux_dir/missing" +# This will work as long as MISSING is not called from configure, because +# unfortunately $(top_srcdir) has no meaning in configure. +# However there are other variables, like CC, which are often used in +# configure, and could therefore not use this "fixed" $ac_aux_dir. +# +# Another solution, used here, is to always expand $ac_aux_dir to an +# absolute PATH. The drawback is that using absolute paths prevent a +# configured tree to be moved without reconfiguration. + +AC_DEFUN([AM_AUX_DIR_EXPAND], +[dnl Rely on autoconf to set up CDPATH properly. +AC_PREREQ([2.50])dnl +# expand $ac_aux_dir to an absolute path +am_aux_dir=`cd $ac_aux_dir && pwd` +]) + +# AM_CONDITIONAL -*- Autoconf -*- + +# Copyright (C) 1997, 2000, 2001, 2003, 2004, 2005 +# Free Software Foundation, Inc. +# +# This file is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# serial 7 + +# AM_CONDITIONAL(NAME, SHELL-CONDITION) +# ------------------------------------- +# Define a conditional. +AC_DEFUN([AM_CONDITIONAL], +[AC_PREREQ(2.52)dnl + ifelse([$1], [TRUE], [AC_FATAL([$0: invalid condition: $1])], + [$1], [FALSE], [AC_FATAL([$0: invalid condition: $1])])dnl +AC_SUBST([$1_TRUE]) +AC_SUBST([$1_FALSE]) +if $2; then + $1_TRUE= + $1_FALSE='#' +else + $1_TRUE='#' + $1_FALSE= +fi +AC_CONFIG_COMMANDS_PRE( +[if test -z "${$1_TRUE}" && test -z "${$1_FALSE}"; then + AC_MSG_ERROR([[conditional "$1" was never defined. +Usually this means the macro was only invoked conditionally.]]) +fi])]) + + +# Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005 +# Free Software Foundation, Inc. +# +# This file is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# serial 8 + +# There are a few dirty hacks below to avoid letting `AC_PROG_CC' be +# written in clear, in which case automake, when reading aclocal.m4, +# will think it sees a *use*, and therefore will trigger all it's +# C support machinery. Also note that it means that autoscan, seeing +# CC etc. in the Makefile, will ask for an AC_PROG_CC use... + + +# _AM_DEPENDENCIES(NAME) +# ---------------------- +# See how the compiler implements dependency checking. +# NAME is "CC", "CXX", "GCJ", or "OBJC". +# We try a few techniques and use that to set a single cache variable. +# +# We don't AC_REQUIRE the corresponding AC_PROG_CC since the latter was +# modified to invoke _AM_DEPENDENCIES(CC); we would have a circular +# dependency, and given that the user is not expected to run this macro, +# just rely on AC_PROG_CC. +AC_DEFUN([_AM_DEPENDENCIES], +[AC_REQUIRE([AM_SET_DEPDIR])dnl +AC_REQUIRE([AM_OUTPUT_DEPENDENCY_COMMANDS])dnl +AC_REQUIRE([AM_MAKE_INCLUDE])dnl +AC_REQUIRE([AM_DEP_TRACK])dnl + +ifelse([$1], CC, [depcc="$CC" am_compiler_list=], + [$1], CXX, [depcc="$CXX" am_compiler_list=], + [$1], OBJC, [depcc="$OBJC" am_compiler_list='gcc3 gcc'], + [$1], GCJ, [depcc="$GCJ" am_compiler_list='gcc3 gcc'], + [depcc="$$1" am_compiler_list=]) + +AC_CACHE_CHECK([dependency style of $depcc], + [am_cv_$1_dependencies_compiler_type], +[if test -z "$AMDEP_TRUE" && test -f "$am_depcomp"; then + # We make a subdir and do the tests there. Otherwise we can end up + # making bogus files that we don't know about and never remove. For + # instance it was reported that on HP-UX the gcc test will end up + # making a dummy file named `D' -- because `-MD' means `put the output + # in D'. + mkdir conftest.dir + # Copy depcomp to subdir because otherwise we won't find it if we're + # using a relative directory. + cp "$am_depcomp" conftest.dir + cd conftest.dir + # We will build objects and dependencies in a subdirectory because + # it helps to detect inapplicable dependency modes. For instance + # both Tru64's cc and ICC support -MD to output dependencies as a + # side effect of compilation, but ICC will put the dependencies in + # the current directory while Tru64 will put them in the object + # directory. + mkdir sub + + am_cv_$1_dependencies_compiler_type=none + if test "$am_compiler_list" = ""; then + am_compiler_list=`sed -n ['s/^#*\([a-zA-Z0-9]*\))$/\1/p'] < ./depcomp` + fi + for depmode in $am_compiler_list; do + # Setup a source with many dependencies, because some compilers + # like to wrap large dependency lists on column 80 (with \), and + # we should not choose a depcomp mode which is confused by this. + # + # We need to recreate these files for each test, as the compiler may + # overwrite some of them when testing with obscure command lines. + # This happens at least with the AIX C compiler. + : > sub/conftest.c + for i in 1 2 3 4 5 6; do + echo '#include "conftst'$i'.h"' >> sub/conftest.c + # Using `: > sub/conftst$i.h' creates only sub/conftst1.h with + # Solaris 8's {/usr,}/bin/sh. + touch sub/conftst$i.h + done + echo "${am__include} ${am__quote}sub/conftest.Po${am__quote}" > confmf + + case $depmode in + nosideeffect) + # after this tag, mechanisms are not by side-effect, so they'll + # only be used when explicitly requested + if test "x$enable_dependency_tracking" = xyes; then + continue + else + break + fi + ;; + none) break ;; + esac + # We check with `-c' and `-o' for the sake of the "dashmstdout" + # mode. It turns out that the SunPro C++ compiler does not properly + # handle `-M -o', and we need to detect this. + if depmode=$depmode \ + source=sub/conftest.c object=sub/conftest.${OBJEXT-o} \ + depfile=sub/conftest.Po tmpdepfile=sub/conftest.TPo \ + $SHELL ./depcomp $depcc -c -o sub/conftest.${OBJEXT-o} sub/conftest.c \ + >/dev/null 2>conftest.err && + grep sub/conftst6.h sub/conftest.Po > /dev/null 2>&1 && + grep sub/conftest.${OBJEXT-o} sub/conftest.Po > /dev/null 2>&1 && + ${MAKE-make} -s -f confmf > /dev/null 2>&1; then + # icc doesn't choke on unknown options, it will just issue warnings + # or remarks (even with -Werror). So we grep stderr for any message + # that says an option was ignored or not supported. + # When given -MP, icc 7.0 and 7.1 complain thusly: + # icc: Command line warning: ignoring option '-M'; no argument required + # The diagnosis changed in icc 8.0: + # icc: Command line remark: option '-MP' not supported + if (grep 'ignoring option' conftest.err || + grep 'not supported' conftest.err) >/dev/null 2>&1; then :; else + am_cv_$1_dependencies_compiler_type=$depmode + break + fi + fi + done + + cd .. + rm -rf conftest.dir +else + am_cv_$1_dependencies_compiler_type=none +fi +]) +AC_SUBST([$1DEPMODE], [depmode=$am_cv_$1_dependencies_compiler_type]) +AM_CONDITIONAL([am__fastdep$1], [ + test "x$enable_dependency_tracking" != xno \ + && test "$am_cv_$1_dependencies_compiler_type" = gcc3]) +]) + + +# AM_SET_DEPDIR +# ------------- +# Choose a directory name for dependency files. +# This macro is AC_REQUIREd in _AM_DEPENDENCIES +AC_DEFUN([AM_SET_DEPDIR], +[AC_REQUIRE([AM_SET_LEADING_DOT])dnl +AC_SUBST([DEPDIR], ["${am__leading_dot}deps"])dnl +]) + + +# AM_DEP_TRACK +# ------------ +AC_DEFUN([AM_DEP_TRACK], +[AC_ARG_ENABLE(dependency-tracking, +[ --disable-dependency-tracking speeds up one-time build + --enable-dependency-tracking do not reject slow dependency extractors]) +if test "x$enable_dependency_tracking" != xno; then + am_depcomp="$ac_aux_dir/depcomp" + AMDEPBACKSLASH='\' +fi +AM_CONDITIONAL([AMDEP], [test "x$enable_dependency_tracking" != xno]) +AC_SUBST([AMDEPBACKSLASH]) +]) + +# Generate code to set up dependency tracking. -*- Autoconf -*- + +# Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005 +# Free Software Foundation, Inc. +# +# This file is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +#serial 3 + +# _AM_OUTPUT_DEPENDENCY_COMMANDS +# ------------------------------ +AC_DEFUN([_AM_OUTPUT_DEPENDENCY_COMMANDS], +[for mf in $CONFIG_FILES; do + # Strip MF so we end up with the name of the file. + mf=`echo "$mf" | sed -e 's/:.*$//'` + # Check whether this is an Automake generated Makefile or not. + # We used to match only the files named `Makefile.in', but + # some people rename them; so instead we look at the file content. + # Grep'ing the first line is not enough: some people post-process + # each Makefile.in and add a new line on top of each file to say so. + # So let's grep whole file. + if grep '^#.*generated by automake' $mf > /dev/null 2>&1; then + dirpart=`AS_DIRNAME("$mf")` + else + continue + fi + # Extract the definition of DEPDIR, am__include, and am__quote + # from the Makefile without running `make'. + DEPDIR=`sed -n 's/^DEPDIR = //p' < "$mf"` + test -z "$DEPDIR" && continue + am__include=`sed -n 's/^am__include = //p' < "$mf"` + test -z "am__include" && continue + am__quote=`sed -n 's/^am__quote = //p' < "$mf"` + # When using ansi2knr, U may be empty or an underscore; expand it + U=`sed -n 's/^U = //p' < "$mf"` + # Find all dependency output files, they are included files with + # $(DEPDIR) in their names. We invoke sed twice because it is the + # simplest approach to changing $(DEPDIR) to its actual value in the + # expansion. + for file in `sed -n " + s/^$am__include $am__quote\(.*(DEPDIR).*\)$am__quote"'$/\1/p' <"$mf" | \ + sed -e 's/\$(DEPDIR)/'"$DEPDIR"'/g' -e 's/\$U/'"$U"'/g'`; do + # Make sure the directory exists. + test -f "$dirpart/$file" && continue + fdir=`AS_DIRNAME(["$file"])` + AS_MKDIR_P([$dirpart/$fdir]) + # echo "creating $dirpart/$file" + echo '# dummy' > "$dirpart/$file" + done +done +])# _AM_OUTPUT_DEPENDENCY_COMMANDS + + +# AM_OUTPUT_DEPENDENCY_COMMANDS +# ----------------------------- +# This macro should only be invoked once -- use via AC_REQUIRE. +# +# This code is only required when automatic dependency tracking +# is enabled. FIXME. This creates each `.P' file that we will +# need in order to bootstrap the dependency handling code. +AC_DEFUN([AM_OUTPUT_DEPENDENCY_COMMANDS], +[AC_CONFIG_COMMANDS([depfiles], + [test x"$AMDEP_TRUE" != x"" || _AM_OUTPUT_DEPENDENCY_COMMANDS], + [AMDEP_TRUE="$AMDEP_TRUE" ac_aux_dir="$ac_aux_dir"]) +]) + +# Do all the work for Automake. -*- Autoconf -*- + +# Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005 +# Free Software Foundation, Inc. +# +# This file is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# serial 12 + +# This macro actually does too much. Some checks are only needed if +# your package does certain things. But this isn't really a big deal. + +# AM_INIT_AUTOMAKE(PACKAGE, VERSION, [NO-DEFINE]) +# AM_INIT_AUTOMAKE([OPTIONS]) +# ----------------------------------------------- +# The call with PACKAGE and VERSION arguments is the old style +# call (pre autoconf-2.50), which is being phased out. PACKAGE +# and VERSION should now be passed to AC_INIT and removed from +# the call to AM_INIT_AUTOMAKE. +# We support both call styles for the transition. After +# the next Automake release, Autoconf can make the AC_INIT +# arguments mandatory, and then we can depend on a new Autoconf +# release and drop the old call support. +AC_DEFUN([AM_INIT_AUTOMAKE], +[AC_PREREQ([2.58])dnl +dnl Autoconf wants to disallow AM_ names. We explicitly allow +dnl the ones we care about. +m4_pattern_allow([^AM_[A-Z]+FLAGS$])dnl +AC_REQUIRE([AM_SET_CURRENT_AUTOMAKE_VERSION])dnl +AC_REQUIRE([AC_PROG_INSTALL])dnl +# test to see if srcdir already configured +if test "`cd $srcdir && pwd`" != "`pwd`" && + test -f $srcdir/config.status; then + AC_MSG_ERROR([source directory already configured; run "make distclean" there first]) +fi + +# test whether we have cygpath +if test -z "$CYGPATH_W"; then + if (cygpath --version) >/dev/null 2>/dev/null; then + CYGPATH_W='cygpath -w' + else + CYGPATH_W=echo + fi +fi +AC_SUBST([CYGPATH_W]) + +# Define the identity of the package. +dnl Distinguish between old-style and new-style calls. +m4_ifval([$2], +[m4_ifval([$3], [_AM_SET_OPTION([no-define])])dnl + AC_SUBST([PACKAGE], [$1])dnl + AC_SUBST([VERSION], [$2])], +[_AM_SET_OPTIONS([$1])dnl + AC_SUBST([PACKAGE], ['AC_PACKAGE_TARNAME'])dnl + AC_SUBST([VERSION], ['AC_PACKAGE_VERSION'])])dnl + +_AM_IF_OPTION([no-define],, +[AC_DEFINE_UNQUOTED(PACKAGE, "$PACKAGE", [Name of package]) + AC_DEFINE_UNQUOTED(VERSION, "$VERSION", [Version number of package])])dnl + +# Some tools Automake needs. +AC_REQUIRE([AM_SANITY_CHECK])dnl +AC_REQUIRE([AC_ARG_PROGRAM])dnl +AM_MISSING_PROG(ACLOCAL, aclocal-${am__api_version}) +AM_MISSING_PROG(AUTOCONF, autoconf) +AM_MISSING_PROG(AUTOMAKE, automake-${am__api_version}) +AM_MISSING_PROG(AUTOHEADER, autoheader) +AM_MISSING_PROG(MAKEINFO, makeinfo) +AM_PROG_INSTALL_SH +AM_PROG_INSTALL_STRIP +AC_REQUIRE([AM_PROG_MKDIR_P])dnl +# We need awk for the "check" target. The system "awk" is bad on +# some platforms. +AC_REQUIRE([AC_PROG_AWK])dnl +AC_REQUIRE([AC_PROG_MAKE_SET])dnl +AC_REQUIRE([AM_SET_LEADING_DOT])dnl +_AM_IF_OPTION([tar-ustar], [_AM_PROG_TAR([ustar])], + [_AM_IF_OPTION([tar-pax], [_AM_PROG_TAR([pax])], + [_AM_PROG_TAR([v7])])]) +_AM_IF_OPTION([no-dependencies],, +[AC_PROVIDE_IFELSE([AC_PROG_CC], + [_AM_DEPENDENCIES(CC)], + [define([AC_PROG_CC], + defn([AC_PROG_CC])[_AM_DEPENDENCIES(CC)])])dnl +AC_PROVIDE_IFELSE([AC_PROG_CXX], + [_AM_DEPENDENCIES(CXX)], + [define([AC_PROG_CXX], + defn([AC_PROG_CXX])[_AM_DEPENDENCIES(CXX)])])dnl +]) +]) + + +# When config.status generates a header, we must update the stamp-h file. +# This file resides in the same directory as the config header +# that is generated. The stamp files are numbered to have different names. + +# Autoconf calls _AC_AM_CONFIG_HEADER_HOOK (when defined) in the +# loop where config.status creates the headers, so we can generate +# our stamp files there. +AC_DEFUN([_AC_AM_CONFIG_HEADER_HOOK], +[# Compute $1's index in $config_headers. +_am_stamp_count=1 +for _am_header in $config_headers :; do + case $_am_header in + $1 | $1:* ) + break ;; + * ) + _am_stamp_count=`expr $_am_stamp_count + 1` ;; + esac +done +echo "timestamp for $1" >`AS_DIRNAME([$1])`/stamp-h[]$_am_stamp_count]) + +# Copyright (C) 2001, 2003, 2005 Free Software Foundation, Inc. +# +# This file is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# AM_PROG_INSTALL_SH +# ------------------ +# Define $install_sh. +AC_DEFUN([AM_PROG_INSTALL_SH], +[AC_REQUIRE([AM_AUX_DIR_EXPAND])dnl +install_sh=${install_sh-"$am_aux_dir/install-sh"} +AC_SUBST(install_sh)]) + +# Copyright (C) 2003, 2005 Free Software Foundation, Inc. +# +# This file is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# serial 2 + +# Check whether the underlying file-system supports filenames +# with a leading dot. For instance MS-DOS doesn't. +AC_DEFUN([AM_SET_LEADING_DOT], +[rm -rf .tst 2>/dev/null +mkdir .tst 2>/dev/null +if test -d .tst; then + am__leading_dot=. +else + am__leading_dot=_ +fi +rmdir .tst 2>/dev/null +AC_SUBST([am__leading_dot])]) + +# Check to see how 'make' treats includes. -*- Autoconf -*- + +# Copyright (C) 2001, 2002, 2003, 2005 Free Software Foundation, Inc. +# +# This file is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# serial 3 + +# AM_MAKE_INCLUDE() +# ----------------- +# Check to see how make treats includes. +AC_DEFUN([AM_MAKE_INCLUDE], +[am_make=${MAKE-make} +cat > confinc << 'END' +am__doit: + @echo done +.PHONY: am__doit +END +# If we don't find an include directive, just comment out the code. +AC_MSG_CHECKING([for style of include used by $am_make]) +am__include="#" +am__quote= +_am_result=none +# First try GNU make style include. +echo "include confinc" > confmf +# We grep out `Entering directory' and `Leaving directory' +# messages which can occur if `w' ends up in MAKEFLAGS. +# In particular we don't look at `^make:' because GNU make might +# be invoked under some other name (usually "gmake"), in which +# case it prints its new name instead of `make'. +if test "`$am_make -s -f confmf 2> /dev/null | grep -v 'ing directory'`" = "done"; then + am__include=include + am__quote= + _am_result=GNU +fi +# Now try BSD make style include. +if test "$am__include" = "#"; then + echo '.include "confinc"' > confmf + if test "`$am_make -s -f confmf 2> /dev/null`" = "done"; then + am__include=.include + am__quote="\"" + _am_result=BSD + fi +fi +AC_SUBST([am__include]) +AC_SUBST([am__quote]) +AC_MSG_RESULT([$_am_result]) +rm -f confinc confmf +]) + +# Fake the existence of programs that GNU maintainers use. -*- Autoconf -*- + +# Copyright (C) 1997, 1999, 2000, 2001, 2003, 2005 +# Free Software Foundation, Inc. +# +# This file is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# serial 4 + +# AM_MISSING_PROG(NAME, PROGRAM) +# ------------------------------ +AC_DEFUN([AM_MISSING_PROG], +[AC_REQUIRE([AM_MISSING_HAS_RUN]) +$1=${$1-"${am_missing_run}$2"} +AC_SUBST($1)]) + + +# AM_MISSING_HAS_RUN +# ------------------ +# Define MISSING if not defined so far and test if it supports --run. +# If it does, set am_missing_run to use it, otherwise, to nothing. +AC_DEFUN([AM_MISSING_HAS_RUN], +[AC_REQUIRE([AM_AUX_DIR_EXPAND])dnl +test x"${MISSING+set}" = xset || MISSING="\${SHELL} $am_aux_dir/missing" +# Use eval to expand $SHELL +if eval "$MISSING --run true"; then + am_missing_run="$MISSING --run " +else + am_missing_run= + AC_MSG_WARN([`missing' script is too old or missing]) +fi +]) + +# Copyright (C) 2003, 2004, 2005 Free Software Foundation, Inc. +# +# This file is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# AM_PROG_MKDIR_P +# --------------- +# Check whether `mkdir -p' is supported, fallback to mkinstalldirs otherwise. +# +# Automake 1.8 used `mkdir -m 0755 -p --' to ensure that directories +# created by `make install' are always world readable, even if the +# installer happens to have an overly restrictive umask (e.g. 077). +# This was a mistake. There are at least two reasons why we must not +# use `-m 0755': +# - it causes special bits like SGID to be ignored, +# - it may be too restrictive (some setups expect 775 directories). +# +# Do not use -m 0755 and let people choose whatever they expect by +# setting umask. +# +# We cannot accept any implementation of `mkdir' that recognizes `-p'. +# Some implementations (such as Solaris 8's) are not thread-safe: if a +# parallel make tries to run `mkdir -p a/b' and `mkdir -p a/c' +# concurrently, both version can detect that a/ is missing, but only +# one can create it and the other will error out. Consequently we +# restrict ourselves to GNU make (using the --version option ensures +# this.) +AC_DEFUN([AM_PROG_MKDIR_P], +[if mkdir -p --version . >/dev/null 2>&1 && test ! -d ./--version; then + # We used to keeping the `.' as first argument, in order to + # allow $(mkdir_p) to be used without argument. As in + # $(mkdir_p) $(somedir) + # where $(somedir) is conditionally defined. However this is wrong + # for two reasons: + # 1. if the package is installed by a user who cannot write `.' + # make install will fail, + # 2. the above comment should most certainly read + # $(mkdir_p) $(DESTDIR)$(somedir) + # so it does not work when $(somedir) is undefined and + # $(DESTDIR) is not. + # To support the latter case, we have to write + # test -z "$(somedir)" || $(mkdir_p) $(DESTDIR)$(somedir), + # so the `.' trick is pointless. + mkdir_p='mkdir -p --' +else + # On NextStep and OpenStep, the `mkdir' command does not + # recognize any option. It will interpret all options as + # directories to create, and then abort because `.' already + # exists. + for d in ./-p ./--version; + do + test -d $d && rmdir $d + done + # $(mkinstalldirs) is defined by Automake if mkinstalldirs exists. + if test -f "$ac_aux_dir/mkinstalldirs"; then + mkdir_p='$(mkinstalldirs)' + else + mkdir_p='$(install_sh) -d' + fi +fi +AC_SUBST([mkdir_p])]) + +# Helper functions for option handling. -*- Autoconf -*- + +# Copyright (C) 2001, 2002, 2003, 2005 Free Software Foundation, Inc. +# +# This file is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# serial 3 + +# _AM_MANGLE_OPTION(NAME) +# ----------------------- +AC_DEFUN([_AM_MANGLE_OPTION], +[[_AM_OPTION_]m4_bpatsubst($1, [[^a-zA-Z0-9_]], [_])]) + +# _AM_SET_OPTION(NAME) +# ------------------------------ +# Set option NAME. Presently that only means defining a flag for this option. +AC_DEFUN([_AM_SET_OPTION], +[m4_define(_AM_MANGLE_OPTION([$1]), 1)]) + +# _AM_SET_OPTIONS(OPTIONS) +# ---------------------------------- +# OPTIONS is a space-separated list of Automake options. +AC_DEFUN([_AM_SET_OPTIONS], +[AC_FOREACH([_AM_Option], [$1], [_AM_SET_OPTION(_AM_Option)])]) + +# _AM_IF_OPTION(OPTION, IF-SET, [IF-NOT-SET]) +# ------------------------------------------- +# Execute IF-SET if OPTION is set, IF-NOT-SET otherwise. +AC_DEFUN([_AM_IF_OPTION], +[m4_ifset(_AM_MANGLE_OPTION([$1]), [$2], [$3])]) + +# Check to make sure that the build environment is sane. -*- Autoconf -*- + +# Copyright (C) 1996, 1997, 2000, 2001, 2003, 2005 +# Free Software Foundation, Inc. +# +# This file is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# serial 4 + +# AM_SANITY_CHECK +# --------------- +AC_DEFUN([AM_SANITY_CHECK], +[AC_MSG_CHECKING([whether build environment is sane]) +# Just in case +sleep 1 +echo timestamp > conftest.file +# Do `set' in a subshell so we don't clobber the current shell's +# arguments. Must try -L first in case configure is actually a +# symlink; some systems play weird games with the mod time of symlinks +# (eg FreeBSD returns the mod time of the symlink's containing +# directory). +if ( + set X `ls -Lt $srcdir/configure conftest.file 2> /dev/null` + if test "$[*]" = "X"; then + # -L didn't work. + set X `ls -t $srcdir/configure conftest.file` + fi + rm -f conftest.file + if test "$[*]" != "X $srcdir/configure conftest.file" \ + && test "$[*]" != "X conftest.file $srcdir/configure"; then + + # If neither matched, then we have a broken ls. This can happen + # if, for instance, CONFIG_SHELL is bash and it inherits a + # broken ls alias from the environment. This has actually + # happened. Such a system could not be considered "sane". + AC_MSG_ERROR([ls -t appears to fail. Make sure there is not a broken +alias in your environment]) + fi + + test "$[2]" = conftest.file + ) +then + # Ok. + : +else + AC_MSG_ERROR([newly created file is older than distributed files! +Check your system clock]) +fi +AC_MSG_RESULT(yes)]) + +# Copyright (C) 2001, 2003, 2005 Free Software Foundation, Inc. +# +# This file is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# AM_PROG_INSTALL_STRIP +# --------------------- +# One issue with vendor `install' (even GNU) is that you can't +# specify the program used to strip binaries. This is especially +# annoying in cross-compiling environments, where the build's strip +# is unlikely to handle the host's binaries. +# Fortunately install-sh will honor a STRIPPROG variable, so we +# always use install-sh in `make install-strip', and initialize +# STRIPPROG with the value of the STRIP variable (set by the user). +AC_DEFUN([AM_PROG_INSTALL_STRIP], +[AC_REQUIRE([AM_PROG_INSTALL_SH])dnl +# Installed binaries are usually stripped using `strip' when the user +# run `make install-strip'. However `strip' might not be the right +# tool to use in cross-compilation environments, therefore Automake +# will honor the `STRIP' environment variable to overrule this program. +dnl Don't test for $cross_compiling = yes, because it might be `maybe'. +if test "$cross_compiling" != no; then + AC_CHECK_TOOL([STRIP], [strip], :) +fi +INSTALL_STRIP_PROGRAM="\${SHELL} \$(install_sh) -c -s" +AC_SUBST([INSTALL_STRIP_PROGRAM])]) + +# Check how to create a tarball. -*- Autoconf -*- + +# Copyright (C) 2004, 2005 Free Software Foundation, Inc. +# +# This file is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# serial 2 + +# _AM_PROG_TAR(FORMAT) +# -------------------- +# Check how to create a tarball in format FORMAT. +# FORMAT should be one of `v7', `ustar', or `pax'. +# +# Substitute a variable $(am__tar) that is a command +# writing to stdout a FORMAT-tarball containing the directory +# $tardir. +# tardir=directory && $(am__tar) > result.tar +# +# Substitute a variable $(am__untar) that extract such +# a tarball read from stdin. +# $(am__untar) < result.tar +AC_DEFUN([_AM_PROG_TAR], +[# Always define AMTAR for backward compatibility. +AM_MISSING_PROG([AMTAR], [tar]) +m4_if([$1], [v7], + [am__tar='${AMTAR} chof - "$$tardir"'; am__untar='${AMTAR} xf -'], + [m4_case([$1], [ustar],, [pax],, + [m4_fatal([Unknown tar format])]) +AC_MSG_CHECKING([how to create a $1 tar archive]) +# Loop over all known methods to create a tar archive until one works. +_am_tools='gnutar m4_if([$1], [ustar], [plaintar]) pax cpio none' +_am_tools=${am_cv_prog_tar_$1-$_am_tools} +# Do not fold the above two line into one, because Tru64 sh and +# Solaris sh will not grok spaces in the rhs of `-'. +for _am_tool in $_am_tools +do + case $_am_tool in + gnutar) + for _am_tar in tar gnutar gtar; + do + AM_RUN_LOG([$_am_tar --version]) && break + done + am__tar="$_am_tar --format=m4_if([$1], [pax], [posix], [$1]) -chf - "'"$$tardir"' + am__tar_="$_am_tar --format=m4_if([$1], [pax], [posix], [$1]) -chf - "'"$tardir"' + am__untar="$_am_tar -xf -" + ;; + plaintar) + # Must skip GNU tar: if it does not support --format= it doesn't create + # ustar tarball either. + (tar --version) >/dev/null 2>&1 && continue + am__tar='tar chf - "$$tardir"' + am__tar_='tar chf - "$tardir"' + am__untar='tar xf -' + ;; + pax) + am__tar='pax -L -x $1 -w "$$tardir"' + am__tar_='pax -L -x $1 -w "$tardir"' + am__untar='pax -r' + ;; + cpio) + am__tar='find "$$tardir" -print | cpio -o -H $1 -L' + am__tar_='find "$tardir" -print | cpio -o -H $1 -L' + am__untar='cpio -i -H $1 -d' + ;; + none) + am__tar=false + am__tar_=false + am__untar=false + ;; + esac + + # If the value was cached, stop now. We just wanted to have am__tar + # and am__untar set. + test -n "${am_cv_prog_tar_$1}" && break + + # tar/untar a dummy directory, and stop if the command works + rm -rf conftest.dir + mkdir conftest.dir + echo GrepMe > conftest.dir/file + AM_RUN_LOG([tardir=conftest.dir && eval $am__tar_ >conftest.tar]) + rm -rf conftest.dir + if test -s conftest.tar; then + AM_RUN_LOG([$am__untar /dev/null 2>&1 && break + fi +done +rm -rf conftest.dir + +AC_CACHE_VAL([am_cv_prog_tar_$1], [am_cv_prog_tar_$1=$_am_tool]) +AC_MSG_RESULT([$am_cv_prog_tar_$1])]) +AC_SUBST([am__tar]) +AC_SUBST([am__untar]) +]) # _AM_PROG_TAR + +m4_include([acinclude.m4]) diff --git a/src/native/config.h.in b/src/native/config.h.in new file mode 100644 index 0000000..d1e05d4 --- /dev/null +++ b/src/native/config.h.in @@ -0,0 +1,91 @@ +/* config.h.in. Generated from configure.ac by autoheader. */ + +/* The 'actual' dynamic-library for '-llzma' */ +#undef HADOOP_LZMA_LIBRARY + +/* The 'actual' dynamic-library for '-lz' */ +#undef HADOOP_ZLIB_LIBRARY + +/* Define to 1 if you have the header file. */ +#undef HAVE_DLFCN_H + +/* Define to 1 if you have the header file. */ +#undef HAVE_INTTYPES_H + +/* Define to 1 if you have the header file. */ +#undef HAVE_JNI_H + +/* Define to 1 if you have the `dl' library (-ldl). */ +#undef HAVE_LIBDL + +/* Define to 1 if you have the `jvm' library (-ljvm). */ +#undef HAVE_LIBJVM + +/* Define to 1 if you have the header file. */ +#undef HAVE_LZMA_LZMA_H + +/* Define to 1 if you have the header file. */ +#undef HAVE_MEMORY_H + +/* Define to 1 if you have the `memset' function. */ +#undef HAVE_MEMSET + +/* Define to 1 if you have the header file. */ +#undef HAVE_STDDEF_H + +/* Define to 1 if you have the header file. */ +#undef HAVE_STDINT_H + +/* Define to 1 if you have the header file. */ +#undef HAVE_STDIO_H + +/* Define to 1 if you have the header file. */ +#undef HAVE_STDLIB_H + +/* Define to 1 if you have the header file. */ +#undef HAVE_STRINGS_H + +/* Define to 1 if you have the header file. */ +#undef HAVE_STRING_H + +/* Define to 1 if you have the header file. */ +#undef HAVE_SYS_STAT_H + +/* Define to 1 if you have the header file. */ +#undef HAVE_SYS_TYPES_H + +/* Define to 1 if you have the header file. */ +#undef HAVE_UNISTD_H + +/* Define to 1 if you have the header file. */ +#undef HAVE_ZCONF_H + +/* Define to 1 if you have the header file. */ +#undef HAVE_ZLIB_H + +/* Name of package */ +#undef PACKAGE + +/* Define to the address where bug reports for this package should be sent. */ +#undef PACKAGE_BUGREPORT + +/* Define to the full name of this package. */ +#undef PACKAGE_NAME + +/* Define to the full name and version of this package. */ +#undef PACKAGE_STRING + +/* Define to the one symbol short name of this package. */ +#undef PACKAGE_TARNAME + +/* Define to the version of this package. */ +#undef PACKAGE_VERSION + +/* Define to 1 if you have the ANSI C header files. */ +#undef STDC_HEADERS + +/* Version number of package */ +#undef VERSION + +/* Define to empty if `const' does not conform to ANSI C. */ +#undef const diff --git a/src/native/config/config.guess b/src/native/config/config.guess new file mode 100755 index 0000000..2c957d5 --- /dev/null +++ b/src/native/config/config.guess @@ -0,0 +1,1477 @@ +#! /bin/sh +# Attempt to guess a canonical system name. +# Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, +# 2000, 2001, 2002, 2003, 2004, 2005 Free Software Foundation, Inc. + +timestamp='2005-02-10' + +# This file is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. +# +# As a special exception to the GNU General Public License, if you +# distribute this file as part of a program that contains a +# configuration script generated by Autoconf, you may include it under +# the same distribution terms that you use for the rest of that program. + +# Originally written by Per Bothner . +# Please send patches to . Submit a context +# diff and a properly formatted ChangeLog entry. +# +# This script attempts to guess a canonical system name similar to +# config.sub. If it succeeds, it prints the system name on stdout, and +# exits with 0. Otherwise, it exits with 1. +# +# The plan is that this can be called by configure scripts if you +# don't specify an explicit build system type. + +me=`echo "$0" | sed -e 's,.*/,,'` + +usage="\ +Usage: $0 [OPTION] + +Output the configuration name of the system \`$me' is run on. + +Operation modes: + -h, --help print this help, then exit + -t, --time-stamp print date of last modification, then exit + -v, --version print version number, then exit + +Report bugs and patches to ." + +version="\ +GNU config.guess ($timestamp) + +Originally written by Per Bothner. +Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005 +Free Software Foundation, Inc. + +This is free software; see the source for copying conditions. There is NO +warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE." + +help=" +Try \`$me --help' for more information." + +# Parse command line +while test $# -gt 0 ; do + case $1 in + --time-stamp | --time* | -t ) + echo "$timestamp" ; exit 0 ;; + --version | -v ) + echo "$version" ; exit 0 ;; + --help | --h* | -h ) + echo "$usage"; exit 0 ;; + -- ) # Stop option processing + shift; break ;; + - ) # Use stdin as input. + break ;; + -* ) + echo "$me: invalid option $1$help" >&2 + exit 1 ;; + * ) + break ;; + esac +done + +if test $# != 0; then + echo "$me: too many arguments$help" >&2 + exit 1 +fi + +trap 'exit 1' 1 2 15 + +# CC_FOR_BUILD -- compiler used by this script. Note that the use of a +# compiler to aid in system detection is discouraged as it requires +# temporary files to be created and, as you can see below, it is a +# headache to deal with in a portable fashion. + +# Historically, `CC_FOR_BUILD' used to be named `HOST_CC'. We still +# use `HOST_CC' if defined, but it is deprecated. + +# Portable tmp directory creation inspired by the Autoconf team. + +set_cc_for_build=' +trap "exitcode=\$?; (rm -f \$tmpfiles 2>/dev/null; rmdir \$tmp 2>/dev/null) && exit \$exitcode" 0 ; +trap "rm -f \$tmpfiles 2>/dev/null; rmdir \$tmp 2>/dev/null; exit 1" 1 2 13 15 ; +: ${TMPDIR=/tmp} ; + { tmp=`(umask 077 && mktemp -d -q "$TMPDIR/cgXXXXXX") 2>/dev/null` && test -n "$tmp" && test -d "$tmp" ; } || + { test -n "$RANDOM" && tmp=$TMPDIR/cg$$-$RANDOM && (umask 077 && mkdir $tmp) ; } || + { tmp=$TMPDIR/cg-$$ && (umask 077 && mkdir $tmp) && echo "Warning: creating insecure temp directory" >&2 ; } || + { echo "$me: cannot create a temporary directory in $TMPDIR" >&2 ; exit 1 ; } ; +dummy=$tmp/dummy ; +tmpfiles="$dummy.c $dummy.o $dummy.rel $dummy" ; +case $CC_FOR_BUILD,$HOST_CC,$CC in + ,,) echo "int x;" > $dummy.c ; + for c in cc gcc c89 c99 ; do + if ($c -c -o $dummy.o $dummy.c) >/dev/null 2>&1 ; then + CC_FOR_BUILD="$c"; break ; + fi ; + done ; + if test x"$CC_FOR_BUILD" = x ; then + CC_FOR_BUILD=no_compiler_found ; + fi + ;; + ,,*) CC_FOR_BUILD=$CC ;; + ,*,*) CC_FOR_BUILD=$HOST_CC ;; +esac ;' + +# This is needed to find uname on a Pyramid OSx when run in the BSD universe. +# (ghazi@noc.rutgers.edu 1994-08-24) +if (test -f /.attbin/uname) >/dev/null 2>&1 ; then + PATH=$PATH:/.attbin ; export PATH +fi + +UNAME_MACHINE=`(uname -m) 2>/dev/null` || UNAME_MACHINE=unknown +UNAME_RELEASE=`(uname -r) 2>/dev/null` || UNAME_RELEASE=unknown +UNAME_SYSTEM=`(uname -s) 2>/dev/null` || UNAME_SYSTEM=unknown +UNAME_VERSION=`(uname -v) 2>/dev/null` || UNAME_VERSION=unknown + +if [ "${UNAME_SYSTEM}" = "Linux" ] ; then + eval $set_cc_for_build + cat << EOF > $dummy.c + #include + #ifdef __UCLIBC__ + # ifdef __UCLIBC_CONFIG_VERSION__ + LIBC=uclibc __UCLIBC_CONFIG_VERSION__ + # else + LIBC=uclibc + # endif + #else + LIBC=gnu + #endif +EOF + eval `$CC_FOR_BUILD -E $dummy.c 2>/dev/null | grep LIBC= | sed -e 's: ::g'` +fi + +# Note: order is significant - the case branches are not exclusive. + +case "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" in + *:NetBSD:*:*) + # NetBSD (nbsd) targets should (where applicable) match one or + # more of the tupples: *-*-netbsdelf*, *-*-netbsdaout*, + # *-*-netbsdecoff* and *-*-netbsd*. For targets that recently + # switched to ELF, *-*-netbsd* would select the old + # object file format. This provides both forward + # compatibility and a consistent mechanism for selecting the + # object file format. + # + # Note: NetBSD doesn't particularly care about the vendor + # portion of the name. We always set it to "unknown". + sysctl="sysctl -n hw.machine_arch" + UNAME_MACHINE_ARCH=`(/sbin/$sysctl 2>/dev/null || \ + /usr/sbin/$sysctl 2>/dev/null || echo unknown)` + case "${UNAME_MACHINE_ARCH}" in + armeb) machine=armeb-unknown ;; + arm*) machine=arm-unknown ;; + sh3el) machine=shl-unknown ;; + sh3eb) machine=sh-unknown ;; + *) machine=${UNAME_MACHINE_ARCH}-unknown ;; + esac + # The Operating System including object format, if it has switched + # to ELF recently, or will in the future. + case "${UNAME_MACHINE_ARCH}" in + arm*|i386|m68k|ns32k|sh3*|sparc|vax) + eval $set_cc_for_build + if echo __ELF__ | $CC_FOR_BUILD -E - 2>/dev/null \ + | grep __ELF__ >/dev/null + then + # Once all utilities can be ECOFF (netbsdecoff) or a.out (netbsdaout). + # Return netbsd for either. FIX? + os=netbsd + else + os=netbsdelf + fi + ;; + *) + os=netbsd + ;; + esac + # The OS release + # Debian GNU/NetBSD machines have a different userland, and + # thus, need a distinct triplet. However, they do not need + # kernel version information, so it can be replaced with a + # suitable tag, in the style of linux-gnu. + case "${UNAME_VERSION}" in + Debian*) + release='-gnu' + ;; + *) + release=`echo ${UNAME_RELEASE}|sed -e 's/[-_].*/\./'` + ;; + esac + # Since CPU_TYPE-MANUFACTURER-KERNEL-OPERATING_SYSTEM: + # contains redundant information, the shorter form: + # CPU_TYPE-MANUFACTURER-OPERATING_SYSTEM is used. + echo "${machine}-${os}${release}" + exit 0 ;; + amd64:OpenBSD:*:*) + echo x86_64-unknown-openbsd${UNAME_RELEASE} + exit 0 ;; + amiga:OpenBSD:*:*) + echo m68k-unknown-openbsd${UNAME_RELEASE} + exit 0 ;; + cats:OpenBSD:*:*) + echo arm-unknown-openbsd${UNAME_RELEASE} + exit 0 ;; + hp300:OpenBSD:*:*) + echo m68k-unknown-openbsd${UNAME_RELEASE} + exit 0 ;; + luna88k:OpenBSD:*:*) + echo m88k-unknown-openbsd${UNAME_RELEASE} + exit 0 ;; + mac68k:OpenBSD:*:*) + echo m68k-unknown-openbsd${UNAME_RELEASE} + exit 0 ;; + macppc:OpenBSD:*:*) + echo powerpc-unknown-openbsd${UNAME_RELEASE} + exit 0 ;; + mvme68k:OpenBSD:*:*) + echo m68k-unknown-openbsd${UNAME_RELEASE} + exit 0 ;; + mvme88k:OpenBSD:*:*) + echo m88k-unknown-openbsd${UNAME_RELEASE} + exit 0 ;; + mvmeppc:OpenBSD:*:*) + echo powerpc-unknown-openbsd${UNAME_RELEASE} + exit 0 ;; + sgi:OpenBSD:*:*) + echo mips64-unknown-openbsd${UNAME_RELEASE} + exit 0 ;; + sun3:OpenBSD:*:*) + echo m68k-unknown-openbsd${UNAME_RELEASE} + exit 0 ;; + *:OpenBSD:*:*) + echo ${UNAME_MACHINE}-unknown-openbsd${UNAME_RELEASE} + exit 0 ;; + *:ekkoBSD:*:*) + echo ${UNAME_MACHINE}-unknown-ekkobsd${UNAME_RELEASE} + exit 0 ;; + macppc:MirBSD:*:*) + echo powerppc-unknown-mirbsd${UNAME_RELEASE} + exit 0 ;; + *:MirBSD:*:*) + echo ${UNAME_MACHINE}-unknown-mirbsd${UNAME_RELEASE} + exit 0 ;; + alpha:OSF1:*:*) + case $UNAME_RELEASE in + *4.0) + UNAME_RELEASE=`/usr/sbin/sizer -v | awk '{print $3}'` + ;; + *5.*) + UNAME_RELEASE=`/usr/sbin/sizer -v | awk '{print $4}'` + ;; + esac + # According to Compaq, /usr/sbin/psrinfo has been available on + # OSF/1 and Tru64 systems produced since 1995. I hope that + # covers most systems running today. This code pipes the CPU + # types through head -n 1, so we only detect the type of CPU 0. + ALPHA_CPU_TYPE=`/usr/sbin/psrinfo -v | sed -n -e 's/^ The alpha \(.*\) processor.*$/\1/p' | head -n 1` + case "$ALPHA_CPU_TYPE" in + "EV4 (21064)") + UNAME_MACHINE="alpha" ;; + "EV4.5 (21064)") + UNAME_MACHINE="alpha" ;; + "LCA4 (21066/21068)") + UNAME_MACHINE="alpha" ;; + "EV5 (21164)") + UNAME_MACHINE="alphaev5" ;; + "EV5.6 (21164A)") + UNAME_MACHINE="alphaev56" ;; + "EV5.6 (21164PC)") + UNAME_MACHINE="alphapca56" ;; + "EV5.7 (21164PC)") + UNAME_MACHINE="alphapca57" ;; + "EV6 (21264)") + UNAME_MACHINE="alphaev6" ;; + "EV6.7 (21264A)") + UNAME_MACHINE="alphaev67" ;; + "EV6.8CB (21264C)") + UNAME_MACHINE="alphaev68" ;; + "EV6.8AL (21264B)") + UNAME_MACHINE="alphaev68" ;; + "EV6.8CX (21264D)") + UNAME_MACHINE="alphaev68" ;; + "EV6.9A (21264/EV69A)") + UNAME_MACHINE="alphaev69" ;; + "EV7 (21364)") + UNAME_MACHINE="alphaev7" ;; + "EV7.9 (21364A)") + UNAME_MACHINE="alphaev79" ;; + esac + # A Pn.n version is a patched version. + # A Vn.n version is a released version. + # A Tn.n version is a released field test version. + # A Xn.n version is an unreleased experimental baselevel. + # 1.2 uses "1.2" for uname -r. + echo ${UNAME_MACHINE}-dec-osf`echo ${UNAME_RELEASE} | sed -e 's/^[PVTX]//' | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz'` + exit 0 ;; + Alpha\ *:Windows_NT*:*) + # How do we know it's Interix rather than the generic POSIX subsystem? + # Should we change UNAME_MACHINE based on the output of uname instead + # of the specific Alpha model? + echo alpha-pc-interix + exit 0 ;; + 21064:Windows_NT:50:3) + echo alpha-dec-winnt3.5 + exit 0 ;; + Amiga*:UNIX_System_V:4.0:*) + echo m68k-unknown-sysv4 + exit 0;; + *:[Aa]miga[Oo][Ss]:*:*) + echo ${UNAME_MACHINE}-unknown-amigaos + exit 0 ;; + *:[Mm]orph[Oo][Ss]:*:*) + echo ${UNAME_MACHINE}-unknown-morphos + exit 0 ;; + *:OS/390:*:*) + echo i370-ibm-openedition + exit 0 ;; + *:z/VM:*:*) + echo s390-ibm-zvmoe + exit 0 ;; + *:OS400:*:*) + echo powerpc-ibm-os400 + exit 0 ;; + arm:RISC*:1.[012]*:*|arm:riscix:1.[012]*:*) + echo arm-acorn-riscix${UNAME_RELEASE} + exit 0;; + SR2?01:HI-UX/MPP:*:* | SR8000:HI-UX/MPP:*:*) + echo hppa1.1-hitachi-hiuxmpp + exit 0;; + Pyramid*:OSx*:*:* | MIS*:OSx*:*:* | MIS*:SMP_DC-OSx*:*:*) + # akee@wpdis03.wpafb.af.mil (Earle F. Ake) contributed MIS and NILE. + if test "`(/bin/universe) 2>/dev/null`" = att ; then + echo pyramid-pyramid-sysv3 + else + echo pyramid-pyramid-bsd + fi + exit 0 ;; + NILE*:*:*:dcosx) + echo pyramid-pyramid-svr4 + exit 0 ;; + DRS?6000:unix:4.0:6*) + echo sparc-icl-nx6 + exit 0 ;; + DRS?6000:UNIX_SV:4.2*:7* | DRS?6000:isis:4.2*:7*) + case `/usr/bin/uname -p` in + sparc) echo sparc-icl-nx7 && exit 0 ;; + esac ;; + sun4H:SunOS:5.*:*) + echo sparc-hal-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` + exit 0 ;; + sun4*:SunOS:5.*:* | tadpole*:SunOS:5.*:*) + echo sparc-sun-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` + exit 0 ;; + i86pc:SunOS:5.*:*) + echo i386-pc-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` + exit 0 ;; + sun4*:SunOS:6*:*) + # According to config.sub, this is the proper way to canonicalize + # SunOS6. Hard to guess exactly what SunOS6 will be like, but + # it's likely to be more like Solaris than SunOS4. + echo sparc-sun-solaris3`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` + exit 0 ;; + sun4*:SunOS:*:*) + case "`/usr/bin/arch -k`" in + Series*|S4*) + UNAME_RELEASE=`uname -v` + ;; + esac + # Japanese Language versions have a version number like `4.1.3-JL'. + echo sparc-sun-sunos`echo ${UNAME_RELEASE}|sed -e 's/-/_/'` + exit 0 ;; + sun3*:SunOS:*:*) + echo m68k-sun-sunos${UNAME_RELEASE} + exit 0 ;; + sun*:*:4.2BSD:*) + UNAME_RELEASE=`(sed 1q /etc/motd | awk '{print substr($5,1,3)}') 2>/dev/null` + test "x${UNAME_RELEASE}" = "x" && UNAME_RELEASE=3 + case "`/bin/arch`" in + sun3) + echo m68k-sun-sunos${UNAME_RELEASE} + ;; + sun4) + echo sparc-sun-sunos${UNAME_RELEASE} + ;; + esac + exit 0 ;; + aushp:SunOS:*:*) + echo sparc-auspex-sunos${UNAME_RELEASE} + exit 0 ;; + # The situation for MiNT is a little confusing. The machine name + # can be virtually everything (everything which is not + # "atarist" or "atariste" at least should have a processor + # > m68000). The system name ranges from "MiNT" over "FreeMiNT" + # to the lowercase version "mint" (or "freemint"). Finally + # the system name "TOS" denotes a system which is actually not + # MiNT. But MiNT is downward compatible to TOS, so this should + # be no problem. + atarist[e]:*MiNT:*:* | atarist[e]:*mint:*:* | atarist[e]:*TOS:*:*) + echo m68k-atari-mint${UNAME_RELEASE} + exit 0 ;; + atari*:*MiNT:*:* | atari*:*mint:*:* | atarist[e]:*TOS:*:*) + echo m68k-atari-mint${UNAME_RELEASE} + exit 0 ;; + *falcon*:*MiNT:*:* | *falcon*:*mint:*:* | *falcon*:*TOS:*:*) + echo m68k-atari-mint${UNAME_RELEASE} + exit 0 ;; + milan*:*MiNT:*:* | milan*:*mint:*:* | *milan*:*TOS:*:*) + echo m68k-milan-mint${UNAME_RELEASE} + exit 0 ;; + hades*:*MiNT:*:* | hades*:*mint:*:* | *hades*:*TOS:*:*) + echo m68k-hades-mint${UNAME_RELEASE} + exit 0 ;; + *:*MiNT:*:* | *:*mint:*:* | *:*TOS:*:*) + echo m68k-unknown-mint${UNAME_RELEASE} + exit 0 ;; + m68k:machten:*:*) + echo m68k-apple-machten${UNAME_RELEASE} + exit 0 ;; + powerpc:machten:*:*) + echo powerpc-apple-machten${UNAME_RELEASE} + exit 0 ;; + RISC*:Mach:*:*) + echo mips-dec-mach_bsd4.3 + exit 0 ;; + RISC*:ULTRIX:*:*) + echo mips-dec-ultrix${UNAME_RELEASE} + exit 0 ;; + VAX*:ULTRIX*:*:*) + echo vax-dec-ultrix${UNAME_RELEASE} + exit 0 ;; + 2020:CLIX:*:* | 2430:CLIX:*:*) + echo clipper-intergraph-clix${UNAME_RELEASE} + exit 0 ;; + mips:*:*:UMIPS | mips:*:*:RISCos) + eval $set_cc_for_build + sed 's/^ //' << EOF >$dummy.c +#ifdef __cplusplus +#include /* for printf() prototype */ + int main (int argc, char *argv[]) { +#else + int main (argc, argv) int argc; char *argv[]; { +#endif + #if defined (host_mips) && defined (MIPSEB) + #if defined (SYSTYPE_SYSV) + printf ("mips-mips-riscos%ssysv\n", argv[1]); exit (0); + #endif + #if defined (SYSTYPE_SVR4) + printf ("mips-mips-riscos%ssvr4\n", argv[1]); exit (0); + #endif + #if defined (SYSTYPE_BSD43) || defined(SYSTYPE_BSD) + printf ("mips-mips-riscos%sbsd\n", argv[1]); exit (0); + #endif + #endif + exit (-1); + } +EOF + $CC_FOR_BUILD -o $dummy $dummy.c \ + && $dummy `echo "${UNAME_RELEASE}" | sed -n 's/\([0-9]*\).*/\1/p'` \ + && exit 0 + echo mips-mips-riscos${UNAME_RELEASE} + exit 0 ;; + Motorola:PowerMAX_OS:*:*) + echo powerpc-motorola-powermax + exit 0 ;; + Motorola:*:4.3:PL8-*) + echo powerpc-harris-powermax + exit 0 ;; + Night_Hawk:*:*:PowerMAX_OS | Synergy:PowerMAX_OS:*:*) + echo powerpc-harris-powermax + exit 0 ;; + Night_Hawk:Power_UNIX:*:*) + echo powerpc-harris-powerunix + exit 0 ;; + m88k:CX/UX:7*:*) + echo m88k-harris-cxux7 + exit 0 ;; + m88k:*:4*:R4*) + echo m88k-motorola-sysv4 + exit 0 ;; + m88k:*:3*:R3*) + echo m88k-motorola-sysv3 + exit 0 ;; + AViiON:dgux:*:*) + # DG/UX returns AViiON for all architectures + UNAME_PROCESSOR=`/usr/bin/uname -p` + if [ $UNAME_PROCESSOR = mc88100 ] || [ $UNAME_PROCESSOR = mc88110 ] + then + if [ ${TARGET_BINARY_INTERFACE}x = m88kdguxelfx ] || \ + [ ${TARGET_BINARY_INTERFACE}x = x ] + then + echo m88k-dg-dgux${UNAME_RELEASE} + else + echo m88k-dg-dguxbcs${UNAME_RELEASE} + fi + else + echo i586-dg-dgux${UNAME_RELEASE} + fi + exit 0 ;; + M88*:DolphinOS:*:*) # DolphinOS (SVR3) + echo m88k-dolphin-sysv3 + exit 0 ;; + M88*:*:R3*:*) + # Delta 88k system running SVR3 + echo m88k-motorola-sysv3 + exit 0 ;; + XD88*:*:*:*) # Tektronix XD88 system running UTekV (SVR3) + echo m88k-tektronix-sysv3 + exit 0 ;; + Tek43[0-9][0-9]:UTek:*:*) # Tektronix 4300 system running UTek (BSD) + echo m68k-tektronix-bsd + exit 0 ;; + *:IRIX*:*:*) + echo mips-sgi-irix`echo ${UNAME_RELEASE}|sed -e 's/-/_/g'` + exit 0 ;; + ????????:AIX?:[12].1:2) # AIX 2.2.1 or AIX 2.1.1 is RT/PC AIX. + echo romp-ibm-aix # uname -m gives an 8 hex-code CPU id + exit 0 ;; # Note that: echo "'`uname -s`'" gives 'AIX ' + i*86:AIX:*:*) + echo i386-ibm-aix + exit 0 ;; + ia64:AIX:*:*) + if [ -x /usr/bin/oslevel ] ; then + IBM_REV=`/usr/bin/oslevel` + else + IBM_REV=${UNAME_VERSION}.${UNAME_RELEASE} + fi + echo ${UNAME_MACHINE}-ibm-aix${IBM_REV} + exit 0 ;; + *:AIX:2:3) + if grep bos325 /usr/include/stdio.h >/dev/null 2>&1; then + eval $set_cc_for_build + sed 's/^ //' << EOF >$dummy.c + #include + + main() + { + if (!__power_pc()) + exit(1); + puts("powerpc-ibm-aix3.2.5"); + exit(0); + } +EOF + $CC_FOR_BUILD -o $dummy $dummy.c && $dummy && exit 0 + echo rs6000-ibm-aix3.2.5 + elif grep bos324 /usr/include/stdio.h >/dev/null 2>&1; then + echo rs6000-ibm-aix3.2.4 + else + echo rs6000-ibm-aix3.2 + fi + exit 0 ;; + *:AIX:*:[45]) + IBM_CPU_ID=`/usr/sbin/lsdev -C -c processor -S available | sed 1q | awk '{ print $1 }'` + if /usr/sbin/lsattr -El ${IBM_CPU_ID} | grep ' POWER' >/dev/null 2>&1; then + IBM_ARCH=rs6000 + else + IBM_ARCH=powerpc + fi + if [ -x /usr/bin/oslevel ] ; then + IBM_REV=`/usr/bin/oslevel` + else + IBM_REV=${UNAME_VERSION}.${UNAME_RELEASE} + fi + echo ${IBM_ARCH}-ibm-aix${IBM_REV} + exit 0 ;; + *:AIX:*:*) + echo rs6000-ibm-aix + exit 0 ;; + ibmrt:4.4BSD:*|romp-ibm:BSD:*) + echo romp-ibm-bsd4.4 + exit 0 ;; + ibmrt:*BSD:*|romp-ibm:BSD:*) # covers RT/PC BSD and + echo romp-ibm-bsd${UNAME_RELEASE} # 4.3 with uname added to + exit 0 ;; # report: romp-ibm BSD 4.3 + *:BOSX:*:*) + echo rs6000-bull-bosx + exit 0 ;; + DPX/2?00:B.O.S.:*:*) + echo m68k-bull-sysv3 + exit 0 ;; + 9000/[34]??:4.3bsd:1.*:*) + echo m68k-hp-bsd + exit 0 ;; + hp300:4.4BSD:*:* | 9000/[34]??:4.3bsd:2.*:*) + echo m68k-hp-bsd4.4 + exit 0 ;; + 9000/[34678]??:HP-UX:*:*) + HPUX_REV=`echo ${UNAME_RELEASE}|sed -e 's/[^.]*.[0B]*//'` + case "${UNAME_MACHINE}" in + 9000/31? ) HP_ARCH=m68000 ;; + 9000/[34]?? ) HP_ARCH=m68k ;; + 9000/[678][0-9][0-9]) + if [ -x /usr/bin/getconf ]; then + sc_cpu_version=`/usr/bin/getconf SC_CPU_VERSION 2>/dev/null` + sc_kernel_bits=`/usr/bin/getconf SC_KERNEL_BITS 2>/dev/null` + case "${sc_cpu_version}" in + 523) HP_ARCH="hppa1.0" ;; # CPU_PA_RISC1_0 + 528) HP_ARCH="hppa1.1" ;; # CPU_PA_RISC1_1 + 532) # CPU_PA_RISC2_0 + case "${sc_kernel_bits}" in + 32) HP_ARCH="hppa2.0n" ;; + 64) HP_ARCH="hppa2.0w" ;; + '') HP_ARCH="hppa2.0" ;; # HP-UX 10.20 + esac ;; + esac + fi + if [ "${HP_ARCH}" = "" ]; then + eval $set_cc_for_build + sed 's/^ //' << EOF >$dummy.c + + #define _HPUX_SOURCE + #include + #include + + int main () + { + #if defined(_SC_KERNEL_BITS) + long bits = sysconf(_SC_KERNEL_BITS); + #endif + long cpu = sysconf (_SC_CPU_VERSION); + + switch (cpu) + { + case CPU_PA_RISC1_0: puts ("hppa1.0"); break; + case CPU_PA_RISC1_1: puts ("hppa1.1"); break; + case CPU_PA_RISC2_0: + #if defined(_SC_KERNEL_BITS) + switch (bits) + { + case 64: puts ("hppa2.0w"); break; + case 32: puts ("hppa2.0n"); break; + default: puts ("hppa2.0"); break; + } break; + #else /* !defined(_SC_KERNEL_BITS) */ + puts ("hppa2.0"); break; + #endif + default: puts ("hppa1.0"); break; + } + exit (0); + } +EOF + (CCOPTS= $CC_FOR_BUILD -o $dummy $dummy.c 2>/dev/null) && HP_ARCH=`$dummy` + test -z "$HP_ARCH" && HP_ARCH=hppa + fi ;; + esac + if [ ${HP_ARCH} = "hppa2.0w" ] + then + # avoid double evaluation of $set_cc_for_build + test -n "$CC_FOR_BUILD" || eval $set_cc_for_build + if echo __LP64__ | (CCOPTS= $CC_FOR_BUILD -E -) | grep __LP64__ >/dev/null + then + HP_ARCH="hppa2.0w" + else + HP_ARCH="hppa64" + fi + fi + echo ${HP_ARCH}-hp-hpux${HPUX_REV} + exit 0 ;; + ia64:HP-UX:*:*) + HPUX_REV=`echo ${UNAME_RELEASE}|sed -e 's/[^.]*.[0B]*//'` + echo ia64-hp-hpux${HPUX_REV} + exit 0 ;; + 3050*:HI-UX:*:*) + eval $set_cc_for_build + sed 's/^ //' << EOF >$dummy.c + #include + int + main () + { + long cpu = sysconf (_SC_CPU_VERSION); + /* The order matters, because CPU_IS_HP_MC68K erroneously returns + true for CPU_PA_RISC1_0. CPU_IS_PA_RISC returns correct + results, however. */ + if (CPU_IS_PA_RISC (cpu)) + { + switch (cpu) + { + case CPU_PA_RISC1_0: puts ("hppa1.0-hitachi-hiuxwe2"); break; + case CPU_PA_RISC1_1: puts ("hppa1.1-hitachi-hiuxwe2"); break; + case CPU_PA_RISC2_0: puts ("hppa2.0-hitachi-hiuxwe2"); break; + default: puts ("hppa-hitachi-hiuxwe2"); break; + } + } + else if (CPU_IS_HP_MC68K (cpu)) + puts ("m68k-hitachi-hiuxwe2"); + else puts ("unknown-hitachi-hiuxwe2"); + exit (0); + } +EOF + $CC_FOR_BUILD -o $dummy $dummy.c && $dummy && exit 0 + echo unknown-hitachi-hiuxwe2 + exit 0 ;; + 9000/7??:4.3bsd:*:* | 9000/8?[79]:4.3bsd:*:* ) + echo hppa1.1-hp-bsd + exit 0 ;; + 9000/8??:4.3bsd:*:*) + echo hppa1.0-hp-bsd + exit 0 ;; + *9??*:MPE/iX:*:* | *3000*:MPE/iX:*:*) + echo hppa1.0-hp-mpeix + exit 0 ;; + hp7??:OSF1:*:* | hp8?[79]:OSF1:*:* ) + echo hppa1.1-hp-osf + exit 0 ;; + hp8??:OSF1:*:*) + echo hppa1.0-hp-osf + exit 0 ;; + i*86:OSF1:*:*) + if [ -x /usr/sbin/sysversion ] ; then + echo ${UNAME_MACHINE}-unknown-osf1mk + else + echo ${UNAME_MACHINE}-unknown-osf1 + fi + exit 0 ;; + parisc*:Lites*:*:*) + echo hppa1.1-hp-lites + exit 0 ;; + C1*:ConvexOS:*:* | convex:ConvexOS:C1*:*) + echo c1-convex-bsd + exit 0 ;; + C2*:ConvexOS:*:* | convex:ConvexOS:C2*:*) + if getsysinfo -f scalar_acc + then echo c32-convex-bsd + else echo c2-convex-bsd + fi + exit 0 ;; + C34*:ConvexOS:*:* | convex:ConvexOS:C34*:*) + echo c34-convex-bsd + exit 0 ;; + C38*:ConvexOS:*:* | convex:ConvexOS:C38*:*) + echo c38-convex-bsd + exit 0 ;; + C4*:ConvexOS:*:* | convex:ConvexOS:C4*:*) + echo c4-convex-bsd + exit 0 ;; + CRAY*Y-MP:*:*:*) + echo ymp-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/' + exit 0 ;; + CRAY*[A-Z]90:*:*:*) + echo ${UNAME_MACHINE}-cray-unicos${UNAME_RELEASE} \ + | sed -e 's/CRAY.*\([A-Z]90\)/\1/' \ + -e y/ABCDEFGHIJKLMNOPQRSTUVWXYZ/abcdefghijklmnopqrstuvwxyz/ \ + -e 's/\.[^.]*$/.X/' + exit 0 ;; + CRAY*TS:*:*:*) + echo t90-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/' + exit 0 ;; + CRAY*T3E:*:*:*) + echo alphaev5-cray-unicosmk${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/' + exit 0 ;; + CRAY*SV1:*:*:*) + echo sv1-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/' + exit 0 ;; + *:UNICOS/mp:*:*) + echo craynv-cray-unicosmp${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/' + exit 0 ;; + F30[01]:UNIX_System_V:*:* | F700:UNIX_System_V:*:*) + FUJITSU_PROC=`uname -m | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz'` + FUJITSU_SYS=`uname -p | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz' | sed -e 's/\///'` + FUJITSU_REL=`echo ${UNAME_RELEASE} | sed -e 's/ /_/'` + echo "${FUJITSU_PROC}-fujitsu-${FUJITSU_SYS}${FUJITSU_REL}" + exit 0 ;; + 5000:UNIX_System_V:4.*:*) + FUJITSU_SYS=`uname -p | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz' | sed -e 's/\///'` + FUJITSU_REL=`echo ${UNAME_RELEASE} | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz' | sed -e 's/ /_/'` + echo "sparc-fujitsu-${FUJITSU_SYS}${FUJITSU_REL}" + exit 0 ;; + i*86:BSD/386:*:* | i*86:BSD/OS:*:* | *:Ascend\ Embedded/OS:*:*) + echo ${UNAME_MACHINE}-pc-bsdi${UNAME_RELEASE} + exit 0 ;; + sparc*:BSD/OS:*:*) + echo sparc-unknown-bsdi${UNAME_RELEASE} + exit 0 ;; + *:BSD/OS:*:*) + echo ${UNAME_MACHINE}-unknown-bsdi${UNAME_RELEASE} + exit 0 ;; + *:FreeBSD:*:*) + echo ${UNAME_MACHINE}-unknown-freebsd`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'` + exit 0 ;; + i*:CYGWIN*:*) + echo ${UNAME_MACHINE}-pc-cygwin + exit 0 ;; + i*:MINGW*:*) + echo ${UNAME_MACHINE}-pc-mingw32 + exit 0 ;; + i*:PW*:*) + echo ${UNAME_MACHINE}-pc-pw32 + exit 0 ;; + x86:Interix*:[34]*) + echo i586-pc-interix${UNAME_RELEASE}|sed -e 's/\..*//' + exit 0 ;; + [345]86:Windows_95:* | [345]86:Windows_98:* | [345]86:Windows_NT:*) + echo i${UNAME_MACHINE}-pc-mks + exit 0 ;; + i*:Windows_NT*:* | Pentium*:Windows_NT*:*) + # How do we know it's Interix rather than the generic POSIX subsystem? + # It also conflicts with pre-2.0 versions of AT&T UWIN. Should we + # UNAME_MACHINE based on the output of uname instead of i386? + echo i586-pc-interix + exit 0 ;; + i*:UWIN*:*) + echo ${UNAME_MACHINE}-pc-uwin + exit 0 ;; + amd64:CYGWIN*:*:*) + echo x86_64-unknown-cygwin + exit 0 ;; + p*:CYGWIN*:*) + echo powerpcle-unknown-cygwin + exit 0 ;; + prep*:SunOS:5.*:*) + echo powerpcle-unknown-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` + exit 0 ;; + *:GNU:*:*) + # the GNU system + echo `echo ${UNAME_MACHINE}|sed -e 's,[-/].*$,,'`-unknown-gnu`echo ${UNAME_RELEASE}|sed -e 's,/.*$,,'` + exit 0 ;; + *:GNU/*:*:*) + # other systems with GNU libc and userland + echo ${UNAME_MACHINE}-unknown-`echo ${UNAME_SYSTEM} | sed 's,^[^/]*/,,' | tr '[A-Z]' '[a-z]'``echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'`-gnu + exit 0 ;; + i*86:Minix:*:*) + echo ${UNAME_MACHINE}-pc-minix + exit 0 ;; + arm*:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + exit 0 ;; + cris:Linux:*:*) + echo cris-axis-linux-${LIBC} + exit 0 ;; + crisv32:Linux:*:*) + echo crisv32-axis-linux-${LIBC} + exit 0 ;; + frv:Linux:*:*) + echo frv-unknown-linux-${LIBC} + exit 0 ;; + ia64:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + exit 0 ;; + m32r*:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + exit 0 ;; + m68*:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + exit 0 ;; + mips:Linux:*:*) + eval $set_cc_for_build + sed 's/^ //' << EOF >$dummy.c + #undef CPU + #undef mips + #undef mipsel + #if defined(__MIPSEL__) || defined(__MIPSEL) || defined(_MIPSEL) || defined(MIPSEL) + CPU=mipsel + #else + #if defined(__MIPSEB__) || defined(__MIPSEB) || defined(_MIPSEB) || defined(MIPSEB) + CPU=mips + #else + CPU= + #endif + #endif +EOF + eval `$CC_FOR_BUILD -E $dummy.c 2>/dev/null | grep ^CPU=` + test x"${CPU}" != x && echo "${CPU}-unknown-linux-${LIBC}" && exit 0 + ;; + mips64:Linux:*:*) + eval $set_cc_for_build + sed 's/^ //' << EOF >$dummy.c + #undef CPU + #undef mips64 + #undef mips64el + #if defined(__MIPSEL__) || defined(__MIPSEL) || defined(_MIPSEL) || defined(MIPSEL) + CPU=mips64el + #else + #if defined(__MIPSEB__) || defined(__MIPSEB) || defined(_MIPSEB) || defined(MIPSEB) + CPU=mips64 + #else + CPU= + #endif + #endif +EOF + eval `$CC_FOR_BUILD -E $dummy.c 2>/dev/null | grep ^CPU=` + test x"${CPU}" != x && echo "${CPU}-unknown-linux-${LIBC}" && exit 0 + ;; + ppc:Linux:*:*) + echo powerpc-unknown-linux-${LIBC} + exit 0 ;; + ppc64:Linux:*:*) + echo powerpc64-unknown-linux-${LIBC} + exit 0 ;; + alpha:Linux:*:*) + case `sed -n '/^cpu model/s/^.*: \(.*\)/\1/p' < /proc/cpuinfo` in + EV5) UNAME_MACHINE=alphaev5 ;; + EV56) UNAME_MACHINE=alphaev56 ;; + PCA56) UNAME_MACHINE=alphapca56 ;; + PCA57) UNAME_MACHINE=alphapca56 ;; + EV6) UNAME_MACHINE=alphaev6 ;; + EV67) UNAME_MACHINE=alphaev67 ;; + EV68*) UNAME_MACHINE=alphaev68 ;; + esac + objdump --private-headers /bin/sh | grep ld.so.1 >/dev/null + if test "$?" = 0 ; then LIBC="gnulibc1" ; fi + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + exit 0 ;; + parisc:Linux:*:* | hppa:Linux:*:*) + # Look for CPU level + case `grep '^cpu[^a-z]*:' /proc/cpuinfo 2>/dev/null | cut -d' ' -f2` in + PA7*) echo hppa1.1-unknown-linux-${LIBC} ;; + PA8*) echo hppa2.0-unknown-linux-${LIBC} ;; + *) echo hppa-unknown-linux-${LIBC} ;; + esac + exit 0 ;; + parisc64:Linux:*:* | hppa64:Linux:*:*) + echo hppa64-unknown-linux-${LIBC} + exit 0 ;; + s390:Linux:*:* | s390x:Linux:*:*) + echo ${UNAME_MACHINE}-ibm-linux + exit 0 ;; + sh64*:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + exit 0 ;; + sh*:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + exit 0 ;; + sparc:Linux:*:* | sparc64:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + exit 0 ;; + x86_64:Linux:*:*) + echo x86_64-unknown-linux-${LIBC} + exit 0 ;; + i*86:Linux:*:*) + # The BFD linker knows what the default object file format is, so + # first see if it will tell us. cd to the root directory to prevent + # problems with other programs or directories called `ld' in the path. + # Set LC_ALL=C to ensure ld outputs messages in English. + ld_supported_targets=`cd /; LC_ALL=C ld --help 2>&1 \ + | sed -ne '/supported targets:/!d + s/[ ][ ]*/ /g + s/.*supported targets: *// + s/ .*// + p'` + case "$ld_supported_targets" in + elf32-i386) + TENTATIVE="${UNAME_MACHINE}-pc-linux-${LIBC}" + ;; + a.out-i386-linux) + echo "${UNAME_MACHINE}-pc-linux-${LIBC}aout" + exit 0 ;; + coff-i386) + echo "${UNAME_MACHINE}-pc-linux-${LIBC}coff" + exit 0 ;; + "") + # Either a pre-BFD a.out linker (linux-gnuoldld) or + # one that does not give us useful --help. + echo "${UNAME_MACHINE}-pc-linux-${LIBC}oldld" + exit 0 ;; + esac + if [ "`echo $LIBC | sed -e 's:uclibc::'`" != "$LIBC" ] ; then echo "$TENTATIVE" && exit 0 ; fi + # Determine whether the default compiler is a.out or elf + eval $set_cc_for_build + sed 's/^ //' << EOF >$dummy.c + #include + #ifdef __ELF__ + # ifdef __GLIBC__ + # if __GLIBC__ >= 2 + LIBC=gnu + # else + LIBC=gnulibc1 + # endif + # else + LIBC=gnulibc1 + # endif + #else + #ifdef __INTEL_COMPILER + LIBC=gnu + #else + LIBC=gnuaout + #endif + #endif + #ifdef __dietlibc__ + LIBC=dietlibc + #endif +EOF + eval `$CC_FOR_BUILD -E $dummy.c 2>/dev/null | grep ^LIBC=` + test x"${LIBC}" != x && echo "${UNAME_MACHINE}-pc-linux-${LIBC}" && exit 0 + test x"${TENTATIVE}" != x && echo "${TENTATIVE}" && exit 0 + ;; + i*86:DYNIX/ptx:4*:*) + # ptx 4.0 does uname -s correctly, with DYNIX/ptx in there. + # earlier versions are messed up and put the nodename in both + # sysname and nodename. + echo i386-sequent-sysv4 + exit 0 ;; + i*86:UNIX_SV:4.2MP:2.*) + # Unixware is an offshoot of SVR4, but it has its own version + # number series starting with 2... + # I am not positive that other SVR4 systems won't match this, + # I just have to hope. -- rms. + # Use sysv4.2uw... so that sysv4* matches it. + echo ${UNAME_MACHINE}-pc-sysv4.2uw${UNAME_VERSION} + exit 0 ;; + i*86:OS/2:*:*) + # If we were able to find `uname', then EMX Unix compatibility + # is probably installed. + echo ${UNAME_MACHINE}-pc-os2-emx + exit 0 ;; + i*86:XTS-300:*:STOP) + echo ${UNAME_MACHINE}-unknown-stop + exit 0 ;; + i*86:atheos:*:*) + echo ${UNAME_MACHINE}-unknown-atheos + exit 0 ;; + i*86:syllable:*:*) + echo ${UNAME_MACHINE}-pc-syllable + exit 0 ;; + i*86:LynxOS:2.*:* | i*86:LynxOS:3.[01]*:* | i*86:LynxOS:4.0*:*) + echo i386-unknown-lynxos${UNAME_RELEASE} + exit 0 ;; + i*86:*DOS:*:*) + echo ${UNAME_MACHINE}-pc-msdosdjgpp + exit 0 ;; + i*86:*:4.*:* | i*86:SYSTEM_V:4.*:*) + UNAME_REL=`echo ${UNAME_RELEASE} | sed 's/\/MP$//'` + if grep Novell /usr/include/link.h >/dev/null 2>/dev/null; then + echo ${UNAME_MACHINE}-univel-sysv${UNAME_REL} + else + echo ${UNAME_MACHINE}-pc-sysv${UNAME_REL} + fi + exit 0 ;; + i*86:*:5:[78]*) + case `/bin/uname -X | grep "^Machine"` in + *486*) UNAME_MACHINE=i486 ;; + *Pentium) UNAME_MACHINE=i586 ;; + *Pent*|*Celeron) UNAME_MACHINE=i686 ;; + esac + echo ${UNAME_MACHINE}-unknown-sysv${UNAME_RELEASE}${UNAME_SYSTEM}${UNAME_VERSION} + exit 0 ;; + i*86:*:3.2:*) + if test -f /usr/options/cb.name; then + UNAME_REL=`sed -n 's/.*Version //p' /dev/null >/dev/null ; then + UNAME_REL=`(/bin/uname -X|grep Release|sed -e 's/.*= //')` + (/bin/uname -X|grep i80486 >/dev/null) && UNAME_MACHINE=i486 + (/bin/uname -X|grep '^Machine.*Pentium' >/dev/null) \ + && UNAME_MACHINE=i586 + (/bin/uname -X|grep '^Machine.*Pent *II' >/dev/null) \ + && UNAME_MACHINE=i686 + (/bin/uname -X|grep '^Machine.*Pentium Pro' >/dev/null) \ + && UNAME_MACHINE=i686 + echo ${UNAME_MACHINE}-pc-sco$UNAME_REL + else + echo ${UNAME_MACHINE}-pc-sysv32 + fi + exit 0 ;; + pc:*:*:*) + # Left here for compatibility: + # uname -m prints for DJGPP always 'pc', but it prints nothing about + # the processor, so we play safe by assuming i386. + echo i386-pc-msdosdjgpp + exit 0 ;; + Intel:Mach:3*:*) + echo i386-pc-mach3 + exit 0 ;; + paragon:*:*:*) + echo i860-intel-osf1 + exit 0 ;; + i860:*:4.*:*) # i860-SVR4 + if grep Stardent /usr/include/sys/uadmin.h >/dev/null 2>&1 ; then + echo i860-stardent-sysv${UNAME_RELEASE} # Stardent Vistra i860-SVR4 + else # Add other i860-SVR4 vendors below as they are discovered. + echo i860-unknown-sysv${UNAME_RELEASE} # Unknown i860-SVR4 + fi + exit 0 ;; + mini*:CTIX:SYS*5:*) + # "miniframe" + echo m68010-convergent-sysv + exit 0 ;; + mc68k:UNIX:SYSTEM5:3.51m) + echo m68k-convergent-sysv + exit 0 ;; + M680?0:D-NIX:5.3:*) + echo m68k-diab-dnix + exit 0 ;; + M68*:*:R3V[5678]*:*) + test -r /sysV68 && echo 'm68k-motorola-sysv' && exit 0 ;; + 3[345]??:*:4.0:3.0 | 3[34]??A:*:4.0:3.0 | 3[34]??,*:*:4.0:3.0 | 3[34]??/*:*:4.0:3.0 | 4400:*:4.0:3.0 | 4850:*:4.0:3.0 | SKA40:*:4.0:3.0 | SDS2:*:4.0:3.0 | SHG2:*:4.0:3.0 | S7501*:*:4.0:3.0) + OS_REL='' + test -r /etc/.relid \ + && OS_REL=.`sed -n 's/[^ ]* [^ ]* \([0-9][0-9]\).*/\1/p' < /etc/.relid` + /bin/uname -p 2>/dev/null | grep 86 >/dev/null \ + && echo i486-ncr-sysv4.3${OS_REL} && exit 0 + /bin/uname -p 2>/dev/null | /bin/grep entium >/dev/null \ + && echo i586-ncr-sysv4.3${OS_REL} && exit 0 ;; + 3[34]??:*:4.0:* | 3[34]??,*:*:4.0:*) + /bin/uname -p 2>/dev/null | grep 86 >/dev/null \ + && echo i486-ncr-sysv4 && exit 0 ;; + m68*:LynxOS:2.*:* | m68*:LynxOS:3.0*:*) + echo m68k-unknown-lynxos${UNAME_RELEASE} + exit 0 ;; + mc68030:UNIX_System_V:4.*:*) + echo m68k-atari-sysv4 + exit 0 ;; + TSUNAMI:LynxOS:2.*:*) + echo sparc-unknown-lynxos${UNAME_RELEASE} + exit 0 ;; + rs6000:LynxOS:2.*:*) + echo rs6000-unknown-lynxos${UNAME_RELEASE} + exit 0 ;; + PowerPC:LynxOS:2.*:* | PowerPC:LynxOS:3.[01]*:* | PowerPC:LynxOS:4.0*:*) + echo powerpc-unknown-lynxos${UNAME_RELEASE} + exit 0 ;; + SM[BE]S:UNIX_SV:*:*) + echo mips-dde-sysv${UNAME_RELEASE} + exit 0 ;; + RM*:ReliantUNIX-*:*:*) + echo mips-sni-sysv4 + exit 0 ;; + RM*:SINIX-*:*:*) + echo mips-sni-sysv4 + exit 0 ;; + *:SINIX-*:*:*) + if uname -p 2>/dev/null >/dev/null ; then + UNAME_MACHINE=`(uname -p) 2>/dev/null` + echo ${UNAME_MACHINE}-sni-sysv4 + else + echo ns32k-sni-sysv + fi + exit 0 ;; + PENTIUM:*:4.0*:*) # Unisys `ClearPath HMP IX 4000' SVR4/MP effort + # says + echo i586-unisys-sysv4 + exit 0 ;; + *:UNIX_System_V:4*:FTX*) + # From Gerald Hewes . + # How about differentiating between stratus architectures? -djm + echo hppa1.1-stratus-sysv4 + exit 0 ;; + *:*:*:FTX*) + # From seanf@swdc.stratus.com. + echo i860-stratus-sysv4 + exit 0 ;; + *:VOS:*:*) + # From Paul.Green@stratus.com. + echo hppa1.1-stratus-vos + exit 0 ;; + mc68*:A/UX:*:*) + echo m68k-apple-aux${UNAME_RELEASE} + exit 0 ;; + news*:NEWS-OS:6*:*) + echo mips-sony-newsos6 + exit 0 ;; + R[34]000:*System_V*:*:* | R4000:UNIX_SYSV:*:* | R*000:UNIX_SV:*:*) + if [ -d /usr/nec ]; then + echo mips-nec-sysv${UNAME_RELEASE} + else + echo mips-unknown-sysv${UNAME_RELEASE} + fi + exit 0 ;; + BeBox:BeOS:*:*) # BeOS running on hardware made by Be, PPC only. + echo powerpc-be-beos + exit 0 ;; + BeMac:BeOS:*:*) # BeOS running on Mac or Mac clone, PPC only. + echo powerpc-apple-beos + exit 0 ;; + BePC:BeOS:*:*) # BeOS running on Intel PC compatible. + echo i586-pc-beos + exit 0 ;; + SX-4:SUPER-UX:*:*) + echo sx4-nec-superux${UNAME_RELEASE} + exit 0 ;; + SX-5:SUPER-UX:*:*) + echo sx5-nec-superux${UNAME_RELEASE} + exit 0 ;; + SX-6:SUPER-UX:*:*) + echo sx6-nec-superux${UNAME_RELEASE} + exit 0 ;; + Power*:Rhapsody:*:*) + echo powerpc-apple-rhapsody${UNAME_RELEASE} + exit 0 ;; + *:Rhapsody:*:*) + echo ${UNAME_MACHINE}-apple-rhapsody${UNAME_RELEASE} + exit 0 ;; + *:Darwin:*:*) + UNAME_PROCESSOR=`uname -p` || UNAME_PROCESSOR=unknown + case $UNAME_PROCESSOR in + *86) UNAME_PROCESSOR=i686 ;; + unknown) UNAME_PROCESSOR=powerpc ;; + esac + echo ${UNAME_PROCESSOR}-apple-darwin${UNAME_RELEASE} + exit 0 ;; + *:procnto*:*:* | *:QNX:[0123456789]*:*) + UNAME_PROCESSOR=`uname -p` + if test "$UNAME_PROCESSOR" = "x86"; then + UNAME_PROCESSOR=i386 + UNAME_MACHINE=pc + fi + echo ${UNAME_PROCESSOR}-${UNAME_MACHINE}-nto-qnx${UNAME_RELEASE} + exit 0 ;; + *:QNX:*:4*) + echo i386-pc-qnx + exit 0 ;; + NSE-?:NONSTOP_KERNEL:*:*) + echo nse-tandem-nsk${UNAME_RELEASE} + exit 0 ;; + NSR-?:NONSTOP_KERNEL:*:*) + echo nsr-tandem-nsk${UNAME_RELEASE} + exit 0 ;; + *:NonStop-UX:*:*) + echo mips-compaq-nonstopux + exit 0 ;; + BS2000:POSIX*:*:*) + echo bs2000-siemens-sysv + exit 0 ;; + DS/*:UNIX_System_V:*:*) + echo ${UNAME_MACHINE}-${UNAME_SYSTEM}-${UNAME_RELEASE} + exit 0 ;; + *:Plan9:*:*) + # "uname -m" is not consistent, so use $cputype instead. 386 + # is converted to i386 for consistency with other x86 + # operating systems. + if test "$cputype" = "386"; then + UNAME_MACHINE=i386 + else + UNAME_MACHINE="$cputype" + fi + echo ${UNAME_MACHINE}-unknown-plan9 + exit 0 ;; + *:TOPS-10:*:*) + echo pdp10-unknown-tops10 + exit 0 ;; + *:TENEX:*:*) + echo pdp10-unknown-tenex + exit 0 ;; + KS10:TOPS-20:*:* | KL10:TOPS-20:*:* | TYPE4:TOPS-20:*:*) + echo pdp10-dec-tops20 + exit 0 ;; + XKL-1:TOPS-20:*:* | TYPE5:TOPS-20:*:*) + echo pdp10-xkl-tops20 + exit 0 ;; + *:TOPS-20:*:*) + echo pdp10-unknown-tops20 + exit 0 ;; + *:ITS:*:*) + echo pdp10-unknown-its + exit 0 ;; + SEI:*:*:SEIUX) + echo mips-sei-seiux${UNAME_RELEASE} + exit 0 ;; + *:DragonFly:*:*) + echo ${UNAME_MACHINE}-unknown-dragonfly`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'` + exit 0 ;; + *:*VMS:*:*) + UNAME_MACHINE=`(uname -p) 2>/dev/null` + case "${UNAME_MACHINE}" in + A*) echo alpha-dec-vms && exit 0 ;; + I*) echo ia64-dec-vms && exit 0 ;; + V*) echo vax-dec-vms && exit 0 ;; + esac ;; + *:XENIX:*:SysV) + echo i386-pc-xenix + exit 0 ;; +esac + +#echo '(No uname command or uname output not recognized.)' 1>&2 +#echo "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" 1>&2 + +eval $set_cc_for_build +cat >$dummy.c < +# include +#endif +main () +{ +#if defined (sony) +#if defined (MIPSEB) + /* BFD wants "bsd" instead of "newsos". Perhaps BFD should be changed, + I don't know.... */ + printf ("mips-sony-bsd\n"); exit (0); +#else +#include + printf ("m68k-sony-newsos%s\n", +#ifdef NEWSOS4 + "4" +#else + "" +#endif + ); exit (0); +#endif +#endif + +#if defined (__arm) && defined (__acorn) && defined (__unix) + printf ("arm-acorn-riscix"); exit (0); +#endif + +#if defined (hp300) && !defined (hpux) + printf ("m68k-hp-bsd\n"); exit (0); +#endif + +#if defined (NeXT) +#if !defined (__ARCHITECTURE__) +#define __ARCHITECTURE__ "m68k" +#endif + int version; + version=`(hostinfo | sed -n 's/.*NeXT Mach \([0-9]*\).*/\1/p') 2>/dev/null`; + if (version < 4) + printf ("%s-next-nextstep%d\n", __ARCHITECTURE__, version); + else + printf ("%s-next-openstep%d\n", __ARCHITECTURE__, version); + exit (0); +#endif + +#if defined (MULTIMAX) || defined (n16) +#if defined (UMAXV) + printf ("ns32k-encore-sysv\n"); exit (0); +#else +#if defined (CMU) + printf ("ns32k-encore-mach\n"); exit (0); +#else + printf ("ns32k-encore-bsd\n"); exit (0); +#endif +#endif +#endif + +#if defined (__386BSD__) + printf ("i386-pc-bsd\n"); exit (0); +#endif + +#if defined (sequent) +#if defined (i386) + printf ("i386-sequent-dynix\n"); exit (0); +#endif +#if defined (ns32000) + printf ("ns32k-sequent-dynix\n"); exit (0); +#endif +#endif + +#if defined (_SEQUENT_) + struct utsname un; + + uname(&un); + + if (strncmp(un.version, "V2", 2) == 0) { + printf ("i386-sequent-ptx2\n"); exit (0); + } + if (strncmp(un.version, "V1", 2) == 0) { /* XXX is V1 correct? */ + printf ("i386-sequent-ptx1\n"); exit (0); + } + printf ("i386-sequent-ptx\n"); exit (0); + +#endif + +#if defined (vax) +# if !defined (ultrix) +# include +# if defined (BSD) +# if BSD == 43 + printf ("vax-dec-bsd4.3\n"); exit (0); +# else +# if BSD == 199006 + printf ("vax-dec-bsd4.3reno\n"); exit (0); +# else + printf ("vax-dec-bsd\n"); exit (0); +# endif +# endif +# else + printf ("vax-dec-bsd\n"); exit (0); +# endif +# else + printf ("vax-dec-ultrix\n"); exit (0); +# endif +#endif + +#if defined (alliant) && defined (i860) + printf ("i860-alliant-bsd\n"); exit (0); +#endif + + exit (1); +} +EOF + +$CC_FOR_BUILD -o $dummy $dummy.c 2>/dev/null && $dummy && exit 0 + +# Apollos put the system type in the environment. + +test -d /usr/apollo && { echo ${ISP}-apollo-${SYSTYPE}; exit 0; } + +# Convex versions that predate uname can use getsysinfo(1) + +if [ -x /usr/convex/getsysinfo ] +then + case `getsysinfo -f cpu_type` in + c1*) + echo c1-convex-bsd + exit 0 ;; + c2*) + if getsysinfo -f scalar_acc + then echo c32-convex-bsd + else echo c2-convex-bsd + fi + exit 0 ;; + c34*) + echo c34-convex-bsd + exit 0 ;; + c38*) + echo c38-convex-bsd + exit 0 ;; + c4*) + echo c4-convex-bsd + exit 0 ;; + esac +fi + +cat >&2 < in order to provide the needed +information to handle your system. + +config.guess timestamp = $timestamp + +uname -m = `(uname -m) 2>/dev/null || echo unknown` +uname -r = `(uname -r) 2>/dev/null || echo unknown` +uname -s = `(uname -s) 2>/dev/null || echo unknown` +uname -v = `(uname -v) 2>/dev/null || echo unknown` + +/usr/bin/uname -p = `(/usr/bin/uname -p) 2>/dev/null` +/bin/uname -X = `(/bin/uname -X) 2>/dev/null` + +hostinfo = `(hostinfo) 2>/dev/null` +/bin/universe = `(/bin/universe) 2>/dev/null` +/usr/bin/arch -k = `(/usr/bin/arch -k) 2>/dev/null` +/bin/arch = `(/bin/arch) 2>/dev/null` +/usr/bin/oslevel = `(/usr/bin/oslevel) 2>/dev/null` +/usr/convex/getsysinfo = `(/usr/convex/getsysinfo) 2>/dev/null` + +UNAME_MACHINE = ${UNAME_MACHINE} +UNAME_RELEASE = ${UNAME_RELEASE} +UNAME_SYSTEM = ${UNAME_SYSTEM} +UNAME_VERSION = ${UNAME_VERSION} +EOF + +exit 1 + +# Local variables: +# eval: (add-hook 'write-file-hooks 'time-stamp) +# time-stamp-start: "timestamp='" +# time-stamp-format: "%:y-%02m-%02d" +# time-stamp-end: "'" +# End: diff --git a/src/native/config/config.sub b/src/native/config/config.sub new file mode 100755 index 0000000..d8fd2f8 --- /dev/null +++ b/src/native/config/config.sub @@ -0,0 +1,1566 @@ +#! /bin/sh +# Configuration validation subroutine script. +# Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, +# 2000, 2001, 2002, 2003, 2004, 2005 Free Software Foundation, Inc. + +timestamp='2005-02-10' + +# This file is (in principle) common to ALL GNU software. +# The presence of a machine in this file suggests that SOME GNU software +# can handle that machine. It does not imply ALL GNU software can. +# +# This file is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 59 Temple Place - Suite 330, +# Boston, MA 02111-1307, USA. + +# As a special exception to the GNU General Public License, if you +# distribute this file as part of a program that contains a +# configuration script generated by Autoconf, you may include it under +# the same distribution terms that you use for the rest of that program. + +# Please send patches to . Submit a context +# diff and a properly formatted ChangeLog entry. +# +# Configuration subroutine to validate and canonicalize a configuration type. +# Supply the specified configuration type as an argument. +# If it is invalid, we print an error message on stderr and exit with code 1. +# Otherwise, we print the canonical config type on stdout and succeed. + +# This file is supposed to be the same for all GNU packages +# and recognize all the CPU types, system types and aliases +# that are meaningful with *any* GNU software. +# Each package is responsible for reporting which valid configurations +# it does not support. The user should be able to distinguish +# a failure to support a valid configuration from a meaningless +# configuration. + +# The goal of this file is to map all the various variations of a given +# machine specification into a single specification in the form: +# CPU_TYPE-MANUFACTURER-OPERATING_SYSTEM +# or in some cases, the newer four-part form: +# CPU_TYPE-MANUFACTURER-KERNEL-OPERATING_SYSTEM +# It is wrong to echo any other type of specification. + +me=`echo "$0" | sed -e 's,.*/,,'` + +usage="\ +Usage: $0 [OPTION] CPU-MFR-OPSYS + $0 [OPTION] ALIAS + +Canonicalize a configuration name. + +Operation modes: + -h, --help print this help, then exit + -t, --time-stamp print date of last modification, then exit + -v, --version print version number, then exit + +Report bugs and patches to ." + +version="\ +GNU config.sub ($timestamp) + +Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005 +Free Software Foundation, Inc. + +This is free software; see the source for copying conditions. There is NO +warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE." + +help=" +Try \`$me --help' for more information." + +# Parse command line +while test $# -gt 0 ; do + case $1 in + --time-stamp | --time* | -t ) + echo "$timestamp" ; exit 0 ;; + --version | -v ) + echo "$version" ; exit 0 ;; + --help | --h* | -h ) + echo "$usage"; exit 0 ;; + -- ) # Stop option processing + shift; break ;; + - ) # Use stdin as input. + break ;; + -* ) + echo "$me: invalid option $1$help" + exit 1 ;; + + *local*) + # First pass through any local machine types. + echo $1 + exit 0;; + + * ) + break ;; + esac +done + +case $# in + 0) echo "$me: missing argument$help" >&2 + exit 1;; + 1) ;; + *) echo "$me: too many arguments$help" >&2 + exit 1;; +esac + +# Separate what the user gave into CPU-COMPANY and OS or KERNEL-OS (if any). +# Here we must recognize all the valid KERNEL-OS combinations. +maybe_os=`echo $1 | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\2/'` +case $maybe_os in + nto-qnx* | linux-gnu* | linux-dietlibc | linux-uclibc* | uclinux-uclibc* | uclinux-gnu* | \ + kfreebsd*-gnu* | knetbsd*-gnu* | netbsd*-gnu* | storm-chaos* | os2-emx* | rtmk-nova*) + os=-$maybe_os + basic_machine=`echo $1 | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\1/'` + ;; + *) + basic_machine=`echo $1 | sed 's/-[^-]*$//'` + if [ $basic_machine != $1 ] + then os=`echo $1 | sed 's/.*-/-/'` + else os=; fi + ;; +esac + +### Let's recognize common machines as not being operating systems so +### that things like config.sub decstation-3100 work. We also +### recognize some manufacturers as not being operating systems, so we +### can provide default operating systems below. +case $os in + -sun*os*) + # Prevent following clause from handling this invalid input. + ;; + -dec* | -mips* | -sequent* | -encore* | -pc532* | -sgi* | -sony* | \ + -att* | -7300* | -3300* | -delta* | -motorola* | -sun[234]* | \ + -unicom* | -ibm* | -next | -hp | -isi* | -apollo | -altos* | \ + -convergent* | -ncr* | -news | -32* | -3600* | -3100* | -hitachi* |\ + -c[123]* | -convex* | -sun | -crds | -omron* | -dg | -ultra | -tti* | \ + -harris | -dolphin | -highlevel | -gould | -cbm | -ns | -masscomp | \ + -apple | -axis | -knuth | -cray) + os= + basic_machine=$1 + ;; + -sim | -cisco | -oki | -wec | -winbond) + os= + basic_machine=$1 + ;; + -scout) + ;; + -wrs) + os=-vxworks + basic_machine=$1 + ;; + -chorusos*) + os=-chorusos + basic_machine=$1 + ;; + -chorusrdb) + os=-chorusrdb + basic_machine=$1 + ;; + -hiux*) + os=-hiuxwe2 + ;; + -sco5) + os=-sco3.2v5 + basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` + ;; + -sco4) + os=-sco3.2v4 + basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` + ;; + -sco3.2.[4-9]*) + os=`echo $os | sed -e 's/sco3.2./sco3.2v/'` + basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` + ;; + -sco3.2v[4-9]*) + # Don't forget version if it is 3.2v4 or newer. + basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` + ;; + -sco*) + os=-sco3.2v2 + basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` + ;; + -udk*) + basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` + ;; + -isc) + os=-isc2.2 + basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` + ;; + -clix*) + basic_machine=clipper-intergraph + ;; + -isc*) + basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` + ;; + -lynx*) + os=-lynxos + ;; + -ptx*) + basic_machine=`echo $1 | sed -e 's/86-.*/86-sequent/'` + ;; + -windowsnt*) + os=`echo $os | sed -e 's/windowsnt/winnt/'` + ;; + -psos*) + os=-psos + ;; + -mint | -mint[0-9]*) + basic_machine=m68k-atari + os=-mint + ;; +esac + +# Decode aliases for certain CPU-COMPANY combinations. +case $basic_machine in + # Recognize the basic CPU types without company name. + # Some are omitted here because they have special meanings below. + 1750a | 580 \ + | a29k \ + | alpha | alphaev[4-8] | alphaev56 | alphaev6[78] | alphapca5[67] \ + | alpha64 | alpha64ev[4-8] | alpha64ev56 | alpha64ev6[78] | alpha64pca5[67] \ + | am33_2.0 \ + | arc | arm | arm[bl]e | arme[lb] | armv[2345] | armv[345][lb] | avr \ + | c4x | clipper \ + | d10v | d30v | dlx | dsp16xx \ + | fr30 | frv \ + | h8300 | h8500 | hppa | hppa1.[01] | hppa2.0 | hppa2.0[nw] | hppa64 \ + | i370 | i860 | i960 | ia64 \ + | ip2k | iq2000 \ + | m32r | m32rle | m68000 | m68k | m88k | maxq | mcore \ + | mips | mipsbe | mipseb | mipsel | mipsle \ + | mips16 \ + | mips64 | mips64el \ + | mips64vr | mips64vrel \ + | mips64orion | mips64orionel \ + | mips64vr4100 | mips64vr4100el \ + | mips64vr4300 | mips64vr4300el \ + | mips64vr5000 | mips64vr5000el \ + | mipsisa32 | mipsisa32el \ + | mipsisa32r2 | mipsisa32r2el \ + | mipsisa64 | mipsisa64el \ + | mipsisa64r2 | mipsisa64r2el \ + | mipsisa64sb1 | mipsisa64sb1el \ + | mipsisa64sr71k | mipsisa64sr71kel \ + | mipstx39 | mipstx39el \ + | mn10200 | mn10300 \ + | msp430 \ + | ns16k | ns32k \ + | openrisc | or32 \ + | pdp10 | pdp11 | pj | pjl \ + | powerpc | powerpc64 | powerpc64le | powerpcle | ppcbe \ + | pyramid \ + | sh | sh[1234] | sh[23]e | sh[34]eb | shbe | shle | sh[1234]le | sh3ele \ + | sh64 | sh64le \ + | sparc | sparc64 | sparc86x | sparclet | sparclite | sparcv8 | sparcv9 | sparcv9b \ + | strongarm \ + | tahoe | thumb | tic4x | tic80 | tron \ + | v850 | v850e \ + | we32k \ + | x86 | xscale | xscalee[bl] | xstormy16 | xtensa \ + | z8k) + basic_machine=$basic_machine-unknown + ;; + m6811 | m68hc11 | m6812 | m68hc12) + # Motorola 68HC11/12. + basic_machine=$basic_machine-unknown + os=-none + ;; + m88110 | m680[12346]0 | m683?2 | m68360 | m5200 | v70 | w65 | z8k) + ;; + + # We use `pc' rather than `unknown' + # because (1) that's what they normally are, and + # (2) the word "unknown" tends to confuse beginning users. + i*86 | x86_64) + basic_machine=$basic_machine-pc + ;; + # Object if more than one company name word. + *-*-*) + echo Invalid configuration \`$1\': machine \`$basic_machine\' not recognized 1>&2 + exit 1 + ;; + # Recognize the basic CPU types with company name. + 580-* \ + | a29k-* \ + | alpha-* | alphaev[4-8]-* | alphaev56-* | alphaev6[78]-* \ + | alpha64-* | alpha64ev[4-8]-* | alpha64ev56-* | alpha64ev6[78]-* \ + | alphapca5[67]-* | alpha64pca5[67]-* | arc-* \ + | arm-* | armbe-* | armle-* | armeb-* | armv*-* \ + | avr-* \ + | bs2000-* \ + | c[123]* | c30-* | [cjt]90-* | c4x-* | c54x-* | c55x-* | c6x-* \ + | clipper-* | craynv-* | cydra-* \ + | d10v-* | d30v-* | dlx-* \ + | elxsi-* \ + | f30[01]-* | f700-* | fr30-* | frv-* | fx80-* \ + | h8300-* | h8500-* \ + | hppa-* | hppa1.[01]-* | hppa2.0-* | hppa2.0[nw]-* | hppa64-* \ + | i*86-* | i860-* | i960-* | ia64-* \ + | ip2k-* | iq2000-* \ + | m32r-* | m32rle-* \ + | m68000-* | m680[012346]0-* | m68360-* | m683?2-* | m68k-* \ + | m88110-* | m88k-* | maxq-* | mcore-* \ + | mips-* | mipsbe-* | mipseb-* | mipsel-* | mipsle-* \ + | mips16-* \ + | mips64-* | mips64el-* \ + | mips64vr-* | mips64vrel-* \ + | mips64orion-* | mips64orionel-* \ + | mips64vr4100-* | mips64vr4100el-* \ + | mips64vr4300-* | mips64vr4300el-* \ + | mips64vr5000-* | mips64vr5000el-* \ + | mipsisa32-* | mipsisa32el-* \ + | mipsisa32r2-* | mipsisa32r2el-* \ + | mipsisa64-* | mipsisa64el-* \ + | mipsisa64r2-* | mipsisa64r2el-* \ + | mipsisa64sb1-* | mipsisa64sb1el-* \ + | mipsisa64sr71k-* | mipsisa64sr71kel-* \ + | mipstx39-* | mipstx39el-* \ + | mmix-* \ + | msp430-* \ + | none-* | np1-* | ns16k-* | ns32k-* \ + | orion-* \ + | pdp10-* | pdp11-* | pj-* | pjl-* | pn-* | power-* \ + | powerpc-* | powerpc64-* | powerpc64le-* | powerpcle-* | ppcbe-* \ + | pyramid-* \ + | romp-* | rs6000-* \ + | sh-* | sh[1234]-* | sh[23]e-* | sh[34]eb-* | shbe-* \ + | shle-* | sh[1234]le-* | sh3ele-* | sh64-* | sh64le-* \ + | sparc-* | sparc64-* | sparc86x-* | sparclet-* | sparclite-* \ + | sparcv8-* | sparcv9-* | sparcv9b-* | strongarm-* | sv1-* | sx?-* \ + | tahoe-* | thumb-* \ + | tic30-* | tic4x-* | tic54x-* | tic55x-* | tic6x-* | tic80-* \ + | tron-* \ + | v850-* | v850e-* | vax-* \ + | we32k-* \ + | x86-* | x86_64-* | xps100-* | xscale-* | xscalee[bl]-* \ + | xstormy16-* | xtensa-* \ + | ymp-* \ + | z8k-*) + ;; + # Recognize the various machine names and aliases which stand + # for a CPU type and a company and sometimes even an OS. + 386bsd) + basic_machine=i386-unknown + os=-bsd + ;; + 3b1 | 7300 | 7300-att | att-7300 | pc7300 | safari | unixpc) + basic_machine=m68000-att + ;; + 3b*) + basic_machine=we32k-att + ;; + a29khif) + basic_machine=a29k-amd + os=-udi + ;; + abacus) + basic_machine=abacus-unknown + ;; + adobe68k) + basic_machine=m68010-adobe + os=-scout + ;; + alliant | fx80) + basic_machine=fx80-alliant + ;; + altos | altos3068) + basic_machine=m68k-altos + ;; + am29k) + basic_machine=a29k-none + os=-bsd + ;; + amd64) + basic_machine=x86_64-pc + ;; + amd64-*) + basic_machine=x86_64-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + amdahl) + basic_machine=580-amdahl + os=-sysv + ;; + amiga | amiga-*) + basic_machine=m68k-unknown + ;; + amigaos | amigados) + basic_machine=m68k-unknown + os=-amigaos + ;; + amigaunix | amix) + basic_machine=m68k-unknown + os=-sysv4 + ;; + apollo68) + basic_machine=m68k-apollo + os=-sysv + ;; + apollo68bsd) + basic_machine=m68k-apollo + os=-bsd + ;; + aux) + basic_machine=m68k-apple + os=-aux + ;; + balance) + basic_machine=ns32k-sequent + os=-dynix + ;; + c90) + basic_machine=c90-cray + os=-unicos + ;; + convex-c1) + basic_machine=c1-convex + os=-bsd + ;; + convex-c2) + basic_machine=c2-convex + os=-bsd + ;; + convex-c32) + basic_machine=c32-convex + os=-bsd + ;; + convex-c34) + basic_machine=c34-convex + os=-bsd + ;; + convex-c38) + basic_machine=c38-convex + os=-bsd + ;; + cray | j90) + basic_machine=j90-cray + os=-unicos + ;; + craynv) + basic_machine=craynv-cray + os=-unicosmp + ;; + cr16c) + basic_machine=cr16c-unknown + os=-elf + ;; + crds | unos) + basic_machine=m68k-crds + ;; + crisv32 | crisv32-* | etraxfs*) + basic_machine=crisv32-axis + ;; + cris | cris-* | etrax*) + basic_machine=cris-axis + ;; + crx) + basic_machine=crx-unknown + os=-elf + ;; + da30 | da30-*) + basic_machine=m68k-da30 + ;; + decstation | decstation-3100 | pmax | pmax-* | pmin | dec3100 | decstatn) + basic_machine=mips-dec + ;; + decsystem10* | dec10*) + basic_machine=pdp10-dec + os=-tops10 + ;; + decsystem20* | dec20*) + basic_machine=pdp10-dec + os=-tops20 + ;; + delta | 3300 | motorola-3300 | motorola-delta \ + | 3300-motorola | delta-motorola) + basic_machine=m68k-motorola + ;; + delta88) + basic_machine=m88k-motorola + os=-sysv3 + ;; + djgpp) + basic_machine=i586-pc + os=-msdosdjgpp + ;; + dpx20 | dpx20-*) + basic_machine=rs6000-bull + os=-bosx + ;; + dpx2* | dpx2*-bull) + basic_machine=m68k-bull + os=-sysv3 + ;; + ebmon29k) + basic_machine=a29k-amd + os=-ebmon + ;; + elxsi) + basic_machine=elxsi-elxsi + os=-bsd + ;; + encore | umax | mmax) + basic_machine=ns32k-encore + ;; + es1800 | OSE68k | ose68k | ose | OSE) + basic_machine=m68k-ericsson + os=-ose + ;; + fx2800) + basic_machine=i860-alliant + ;; + genix) + basic_machine=ns32k-ns + ;; + gmicro) + basic_machine=tron-gmicro + os=-sysv + ;; + go32) + basic_machine=i386-pc + os=-go32 + ;; + h3050r* | hiux*) + basic_machine=hppa1.1-hitachi + os=-hiuxwe2 + ;; + h8300hms) + basic_machine=h8300-hitachi + os=-hms + ;; + h8300xray) + basic_machine=h8300-hitachi + os=-xray + ;; + h8500hms) + basic_machine=h8500-hitachi + os=-hms + ;; + harris) + basic_machine=m88k-harris + os=-sysv3 + ;; + hp300-*) + basic_machine=m68k-hp + ;; + hp300bsd) + basic_machine=m68k-hp + os=-bsd + ;; + hp300hpux) + basic_machine=m68k-hp + os=-hpux + ;; + hp3k9[0-9][0-9] | hp9[0-9][0-9]) + basic_machine=hppa1.0-hp + ;; + hp9k2[0-9][0-9] | hp9k31[0-9]) + basic_machine=m68000-hp + ;; + hp9k3[2-9][0-9]) + basic_machine=m68k-hp + ;; + hp9k6[0-9][0-9] | hp6[0-9][0-9]) + basic_machine=hppa1.0-hp + ;; + hp9k7[0-79][0-9] | hp7[0-79][0-9]) + basic_machine=hppa1.1-hp + ;; + hp9k78[0-9] | hp78[0-9]) + # FIXME: really hppa2.0-hp + basic_machine=hppa1.1-hp + ;; + hp9k8[67]1 | hp8[67]1 | hp9k80[24] | hp80[24] | hp9k8[78]9 | hp8[78]9 | hp9k893 | hp893) + # FIXME: really hppa2.0-hp + basic_machine=hppa1.1-hp + ;; + hp9k8[0-9][13679] | hp8[0-9][13679]) + basic_machine=hppa1.1-hp + ;; + hp9k8[0-9][0-9] | hp8[0-9][0-9]) + basic_machine=hppa1.0-hp + ;; + hppa-next) + os=-nextstep3 + ;; + hppaosf) + basic_machine=hppa1.1-hp + os=-osf + ;; + hppro) + basic_machine=hppa1.1-hp + os=-proelf + ;; + i370-ibm* | ibm*) + basic_machine=i370-ibm + ;; +# I'm not sure what "Sysv32" means. Should this be sysv3.2? + i*86v32) + basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'` + os=-sysv32 + ;; + i*86v4*) + basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'` + os=-sysv4 + ;; + i*86v) + basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'` + os=-sysv + ;; + i*86sol2) + basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'` + os=-solaris2 + ;; + i386mach) + basic_machine=i386-mach + os=-mach + ;; + i386-vsta | vsta) + basic_machine=i386-unknown + os=-vsta + ;; + iris | iris4d) + basic_machine=mips-sgi + case $os in + -irix*) + ;; + *) + os=-irix4 + ;; + esac + ;; + isi68 | isi) + basic_machine=m68k-isi + os=-sysv + ;; + m88k-omron*) + basic_machine=m88k-omron + ;; + magnum | m3230) + basic_machine=mips-mips + os=-sysv + ;; + merlin) + basic_machine=ns32k-utek + os=-sysv + ;; + mingw32) + basic_machine=i386-pc + os=-mingw32 + ;; + miniframe) + basic_machine=m68000-convergent + ;; + *mint | -mint[0-9]* | *MiNT | *MiNT[0-9]*) + basic_machine=m68k-atari + os=-mint + ;; + mips3*-*) + basic_machine=`echo $basic_machine | sed -e 's/mips3/mips64/'` + ;; + mips3*) + basic_machine=`echo $basic_machine | sed -e 's/mips3/mips64/'`-unknown + ;; + monitor) + basic_machine=m68k-rom68k + os=-coff + ;; + morphos) + basic_machine=powerpc-unknown + os=-morphos + ;; + msdos) + basic_machine=i386-pc + os=-msdos + ;; + mvs) + basic_machine=i370-ibm + os=-mvs + ;; + ncr3000) + basic_machine=i486-ncr + os=-sysv4 + ;; + netbsd386) + basic_machine=i386-unknown + os=-netbsd + ;; + netwinder) + basic_machine=armv4l-rebel + os=-linux + ;; + news | news700 | news800 | news900) + basic_machine=m68k-sony + os=-newsos + ;; + news1000) + basic_machine=m68030-sony + os=-newsos + ;; + news-3600 | risc-news) + basic_machine=mips-sony + os=-newsos + ;; + necv70) + basic_machine=v70-nec + os=-sysv + ;; + next | m*-next ) + basic_machine=m68k-next + case $os in + -nextstep* ) + ;; + -ns2*) + os=-nextstep2 + ;; + *) + os=-nextstep3 + ;; + esac + ;; + nh3000) + basic_machine=m68k-harris + os=-cxux + ;; + nh[45]000) + basic_machine=m88k-harris + os=-cxux + ;; + nindy960) + basic_machine=i960-intel + os=-nindy + ;; + mon960) + basic_machine=i960-intel + os=-mon960 + ;; + nonstopux) + basic_machine=mips-compaq + os=-nonstopux + ;; + np1) + basic_machine=np1-gould + ;; + nsr-tandem) + basic_machine=nsr-tandem + ;; + op50n-* | op60c-*) + basic_machine=hppa1.1-oki + os=-proelf + ;; + or32 | or32-*) + basic_machine=or32-unknown + os=-coff + ;; + os400) + basic_machine=powerpc-ibm + os=-os400 + ;; + OSE68000 | ose68000) + basic_machine=m68000-ericsson + os=-ose + ;; + os68k) + basic_machine=m68k-none + os=-os68k + ;; + pa-hitachi) + basic_machine=hppa1.1-hitachi + os=-hiuxwe2 + ;; + paragon) + basic_machine=i860-intel + os=-osf + ;; + pbd) + basic_machine=sparc-tti + ;; + pbb) + basic_machine=m68k-tti + ;; + pc532 | pc532-*) + basic_machine=ns32k-pc532 + ;; + pentium | p5 | k5 | k6 | nexgen | viac3) + basic_machine=i586-pc + ;; + pentiumpro | p6 | 6x86 | athlon | athlon_*) + basic_machine=i686-pc + ;; + pentiumii | pentium2 | pentiumiii | pentium3) + basic_machine=i686-pc + ;; + pentium4) + basic_machine=i786-pc + ;; + pentium-* | p5-* | k5-* | k6-* | nexgen-* | viac3-*) + basic_machine=i586-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + pentiumpro-* | p6-* | 6x86-* | athlon-*) + basic_machine=i686-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + pentiumii-* | pentium2-* | pentiumiii-* | pentium3-*) + basic_machine=i686-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + pentium4-*) + basic_machine=i786-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + pn) + basic_machine=pn-gould + ;; + power) basic_machine=power-ibm + ;; + ppc) basic_machine=powerpc-unknown + ;; + ppc-*) basic_machine=powerpc-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + ppcle | powerpclittle | ppc-le | powerpc-little) + basic_machine=powerpcle-unknown + ;; + ppcle-* | powerpclittle-*) + basic_machine=powerpcle-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + ppc64) basic_machine=powerpc64-unknown + ;; + ppc64-*) basic_machine=powerpc64-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + ppc64le | powerpc64little | ppc64-le | powerpc64-little) + basic_machine=powerpc64le-unknown + ;; + ppc64le-* | powerpc64little-*) + basic_machine=powerpc64le-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + ps2) + basic_machine=i386-ibm + ;; + pw32) + basic_machine=i586-unknown + os=-pw32 + ;; + rom68k) + basic_machine=m68k-rom68k + os=-coff + ;; + rm[46]00) + basic_machine=mips-siemens + ;; + rtpc | rtpc-*) + basic_machine=romp-ibm + ;; + s390 | s390-*) + basic_machine=s390-ibm + ;; + s390x | s390x-*) + basic_machine=s390x-ibm + ;; + sa29200) + basic_machine=a29k-amd + os=-udi + ;; + sb1) + basic_machine=mipsisa64sb1-unknown + ;; + sb1el) + basic_machine=mipsisa64sb1el-unknown + ;; + sei) + basic_machine=mips-sei + os=-seiux + ;; + sequent) + basic_machine=i386-sequent + ;; + sh) + basic_machine=sh-hitachi + os=-hms + ;; + sh64) + basic_machine=sh64-unknown + ;; + sparclite-wrs | simso-wrs) + basic_machine=sparclite-wrs + os=-vxworks + ;; + sps7) + basic_machine=m68k-bull + os=-sysv2 + ;; + spur) + basic_machine=spur-unknown + ;; + st2000) + basic_machine=m68k-tandem + ;; + stratus) + basic_machine=i860-stratus + os=-sysv4 + ;; + sun2) + basic_machine=m68000-sun + ;; + sun2os3) + basic_machine=m68000-sun + os=-sunos3 + ;; + sun2os4) + basic_machine=m68000-sun + os=-sunos4 + ;; + sun3os3) + basic_machine=m68k-sun + os=-sunos3 + ;; + sun3os4) + basic_machine=m68k-sun + os=-sunos4 + ;; + sun4os3) + basic_machine=sparc-sun + os=-sunos3 + ;; + sun4os4) + basic_machine=sparc-sun + os=-sunos4 + ;; + sun4sol2) + basic_machine=sparc-sun + os=-solaris2 + ;; + sun3 | sun3-*) + basic_machine=m68k-sun + ;; + sun4) + basic_machine=sparc-sun + ;; + sun386 | sun386i | roadrunner) + basic_machine=i386-sun + ;; + sv1) + basic_machine=sv1-cray + os=-unicos + ;; + symmetry) + basic_machine=i386-sequent + os=-dynix + ;; + t3e) + basic_machine=alphaev5-cray + os=-unicos + ;; + t90) + basic_machine=t90-cray + os=-unicos + ;; + tic54x | c54x*) + basic_machine=tic54x-unknown + os=-coff + ;; + tic55x | c55x*) + basic_machine=tic55x-unknown + os=-coff + ;; + tic6x | c6x*) + basic_machine=tic6x-unknown + os=-coff + ;; + tx39) + basic_machine=mipstx39-unknown + ;; + tx39el) + basic_machine=mipstx39el-unknown + ;; + toad1) + basic_machine=pdp10-xkl + os=-tops20 + ;; + tower | tower-32) + basic_machine=m68k-ncr + ;; + tpf) + basic_machine=s390x-ibm + os=-tpf + ;; + udi29k) + basic_machine=a29k-amd + os=-udi + ;; + ultra3) + basic_machine=a29k-nyu + os=-sym1 + ;; + v810 | necv810) + basic_machine=v810-nec + os=-none + ;; + vaxv) + basic_machine=vax-dec + os=-sysv + ;; + vms) + basic_machine=vax-dec + os=-vms + ;; + vpp*|vx|vx-*) + basic_machine=f301-fujitsu + ;; + vxworks960) + basic_machine=i960-wrs + os=-vxworks + ;; + vxworks68) + basic_machine=m68k-wrs + os=-vxworks + ;; + vxworks29k) + basic_machine=a29k-wrs + os=-vxworks + ;; + w65*) + basic_machine=w65-wdc + os=-none + ;; + w89k-*) + basic_machine=hppa1.1-winbond + os=-proelf + ;; + xbox) + basic_machine=i686-pc + os=-mingw32 + ;; + xps | xps100) + basic_machine=xps100-honeywell + ;; + ymp) + basic_machine=ymp-cray + os=-unicos + ;; + z8k-*-coff) + basic_machine=z8k-unknown + os=-sim + ;; + none) + basic_machine=none-none + os=-none + ;; + +# Here we handle the default manufacturer of certain CPU types. It is in +# some cases the only manufacturer, in others, it is the most popular. + w89k) + basic_machine=hppa1.1-winbond + ;; + op50n) + basic_machine=hppa1.1-oki + ;; + op60c) + basic_machine=hppa1.1-oki + ;; + romp) + basic_machine=romp-ibm + ;; + mmix) + basic_machine=mmix-knuth + ;; + rs6000) + basic_machine=rs6000-ibm + ;; + vax) + basic_machine=vax-dec + ;; + pdp10) + # there are many clones, so DEC is not a safe bet + basic_machine=pdp10-unknown + ;; + pdp11) + basic_machine=pdp11-dec + ;; + we32k) + basic_machine=we32k-att + ;; + sh3 | sh4 | sh[34]eb | sh[1234]le | sh[23]ele) + basic_machine=sh-unknown + ;; + sh64) + basic_machine=sh64-unknown + ;; + sparc | sparcv8 | sparcv9 | sparcv9b) + basic_machine=sparc-sun + ;; + cydra) + basic_machine=cydra-cydrome + ;; + orion) + basic_machine=orion-highlevel + ;; + orion105) + basic_machine=clipper-highlevel + ;; + mac | mpw | mac-mpw) + basic_machine=m68k-apple + ;; + pmac | pmac-mpw) + basic_machine=powerpc-apple + ;; + *-unknown) + # Make sure to match an already-canonicalized machine name. + ;; + *) + echo Invalid configuration \`$1\': machine \`$basic_machine\' not recognized 1>&2 + exit 1 + ;; +esac + +# Here we canonicalize certain aliases for manufacturers. +case $basic_machine in + *-digital*) + basic_machine=`echo $basic_machine | sed 's/digital.*/dec/'` + ;; + *-commodore*) + basic_machine=`echo $basic_machine | sed 's/commodore.*/cbm/'` + ;; + *) + ;; +esac + +# Decode manufacturer-specific aliases for certain operating systems. + +if [ x"$os" != x"" ] +then +case $os in + # First match some system type aliases + # that might get confused with valid system types. + # -solaris* is a basic system type, with this one exception. + -solaris1 | -solaris1.*) + os=`echo $os | sed -e 's|solaris1|sunos4|'` + ;; + -solaris) + os=-solaris2 + ;; + -svr4*) + os=-sysv4 + ;; + -unixware*) + os=-sysv4.2uw + ;; + -gnu/linux*) + os=`echo $os | sed -e 's|gnu/linux|linux-gnu|'` + ;; + # First accept the basic system types. + # The portable systems comes first. + # Each alternative MUST END IN A *, to match a version number. + # -sysv* is not here because it comes later, after sysvr4. + -gnu* | -bsd* | -mach* | -minix* | -genix* | -ultrix* | -irix* \ + | -*vms* | -sco* | -esix* | -isc* | -aix* | -sunos | -sunos[34]*\ + | -hpux* | -unos* | -osf* | -luna* | -dgux* | -solaris* | -sym* \ + | -amigaos* | -amigados* | -msdos* | -newsos* | -unicos* | -aof* \ + | -aos* \ + | -nindy* | -vxsim* | -vxworks* | -ebmon* | -hms* | -mvs* \ + | -clix* | -riscos* | -uniplus* | -iris* | -rtu* | -xenix* \ + | -hiux* | -386bsd* | -knetbsd* | -mirbsd* | -netbsd* | -openbsd* \ + | -ekkobsd* | -kfreebsd* | -freebsd* | -riscix* | -lynxos* \ + | -bosx* | -nextstep* | -cxux* | -aout* | -elf* | -oabi* \ + | -ptx* | -coff* | -ecoff* | -winnt* | -domain* | -vsta* \ + | -udi* | -eabi* | -lites* | -ieee* | -go32* | -aux* \ + | -chorusos* | -chorusrdb* \ + | -cygwin* | -pe* | -psos* | -moss* | -proelf* | -rtems* \ + | -mingw32* | -linux-gnu* | -linux-uclibc* | -uxpv* | -beos* | -mpeix* | -udk* \ + | -interix* | -uwin* | -mks* | -rhapsody* | -darwin* | -opened* \ + | -openstep* | -oskit* | -conix* | -pw32* | -nonstopux* \ + | -storm-chaos* | -tops10* | -tenex* | -tops20* | -its* \ + | -os2* | -vos* | -palmos* | -uclinux* | -nucleus* \ + | -morphos* | -superux* | -rtmk* | -rtmk-nova* | -windiss* \ + | -powermax* | -dnix* | -nx6 | -nx7 | -sei* | -dragonfly*) + # Remember, each alternative MUST END IN *, to match a version number. + ;; + -qnx*) + case $basic_machine in + x86-* | i*86-*) + ;; + *) + os=-nto$os + ;; + esac + ;; + -nto-qnx*) + ;; + -nto*) + os=`echo $os | sed -e 's|nto|nto-qnx|'` + ;; + -sim | -es1800* | -hms* | -xray | -os68k* | -none* | -v88r* \ + | -windows* | -osx | -abug | -netware* | -os9* | -beos* \ + | -macos* | -mpw* | -magic* | -mmixware* | -mon960* | -lnews*) + ;; + -mac*) + os=`echo $os | sed -e 's|mac|macos|'` + ;; + -linux-dietlibc) + os=-linux-dietlibc + ;; + -linux*) + os=`echo $os | sed -e 's|linux|linux-gnu|'` + ;; + -sunos5*) + os=`echo $os | sed -e 's|sunos5|solaris2|'` + ;; + -sunos6*) + os=`echo $os | sed -e 's|sunos6|solaris3|'` + ;; + -opened*) + os=-openedition + ;; + -os400*) + os=-os400 + ;; + -wince*) + os=-wince + ;; + -osfrose*) + os=-osfrose + ;; + -osf*) + os=-osf + ;; + -utek*) + os=-bsd + ;; + -dynix*) + os=-bsd + ;; + -acis*) + os=-aos + ;; + -atheos*) + os=-atheos + ;; + -syllable*) + os=-syllable + ;; + -386bsd) + os=-bsd + ;; + -ctix* | -uts*) + os=-sysv + ;; + -nova*) + os=-rtmk-nova + ;; + -ns2 ) + os=-nextstep2 + ;; + -nsk*) + os=-nsk + ;; + # Preserve the version number of sinix5. + -sinix5.*) + os=`echo $os | sed -e 's|sinix|sysv|'` + ;; + -sinix*) + os=-sysv4 + ;; + -tpf*) + os=-tpf + ;; + -triton*) + os=-sysv3 + ;; + -oss*) + os=-sysv3 + ;; + -svr4) + os=-sysv4 + ;; + -svr3) + os=-sysv3 + ;; + -sysvr4) + os=-sysv4 + ;; + # This must come after -sysvr4. + -sysv*) + ;; + -ose*) + os=-ose + ;; + -es1800*) + os=-ose + ;; + -xenix) + os=-xenix + ;; + -*mint | -mint[0-9]* | -*MiNT | -MiNT[0-9]*) + os=-mint + ;; + -aros*) + os=-aros + ;; + -kaos*) + os=-kaos + ;; + -zvmoe) + os=-zvmoe + ;; + -none) + ;; + *) + # Get rid of the `-' at the beginning of $os. + os=`echo $os | sed 's/[^-]*-//'` + echo Invalid configuration \`$1\': system \`$os\' not recognized 1>&2 + exit 1 + ;; +esac +else + +# Here we handle the default operating systems that come with various machines. +# The value should be what the vendor currently ships out the door with their +# machine or put another way, the most popular os provided with the machine. + +# Note that if you're going to try to match "-MANUFACTURER" here (say, +# "-sun"), then you have to tell the case statement up towards the top +# that MANUFACTURER isn't an operating system. Otherwise, code above +# will signal an error saying that MANUFACTURER isn't an operating +# system, and we'll never get to this point. + +case $basic_machine in + *-acorn) + os=-riscix1.2 + ;; + arm*-rebel) + os=-linux + ;; + arm*-semi) + os=-aout + ;; + c4x-* | tic4x-*) + os=-coff + ;; + # This must come before the *-dec entry. + pdp10-*) + os=-tops20 + ;; + pdp11-*) + os=-none + ;; + *-dec | vax-*) + os=-ultrix4.2 + ;; + m68*-apollo) + os=-domain + ;; + i386-sun) + os=-sunos4.0.2 + ;; + m68000-sun) + os=-sunos3 + # This also exists in the configure program, but was not the + # default. + # os=-sunos4 + ;; + m68*-cisco) + os=-aout + ;; + mips*-cisco) + os=-elf + ;; + mips*-*) + os=-elf + ;; + or32-*) + os=-coff + ;; + *-tti) # must be before sparc entry or we get the wrong os. + os=-sysv3 + ;; + sparc-* | *-sun) + os=-sunos4.1.1 + ;; + *-be) + os=-beos + ;; + *-ibm) + os=-aix + ;; + *-knuth) + os=-mmixware + ;; + *-wec) + os=-proelf + ;; + *-winbond) + os=-proelf + ;; + *-oki) + os=-proelf + ;; + *-hp) + os=-hpux + ;; + *-hitachi) + os=-hiux + ;; + i860-* | *-att | *-ncr | *-altos | *-motorola | *-convergent) + os=-sysv + ;; + *-cbm) + os=-amigaos + ;; + *-dg) + os=-dgux + ;; + *-dolphin) + os=-sysv3 + ;; + m68k-ccur) + os=-rtu + ;; + m88k-omron*) + os=-luna + ;; + *-next ) + os=-nextstep + ;; + *-sequent) + os=-ptx + ;; + *-crds) + os=-unos + ;; + *-ns) + os=-genix + ;; + i370-*) + os=-mvs + ;; + *-next) + os=-nextstep3 + ;; + *-gould) + os=-sysv + ;; + *-highlevel) + os=-bsd + ;; + *-encore) + os=-bsd + ;; + *-sgi) + os=-irix + ;; + *-siemens) + os=-sysv4 + ;; + *-masscomp) + os=-rtu + ;; + f30[01]-fujitsu | f700-fujitsu) + os=-uxpv + ;; + *-rom68k) + os=-coff + ;; + *-*bug) + os=-coff + ;; + *-apple) + os=-macos + ;; + *-atari*) + os=-mint + ;; + *) + os=-none + ;; +esac +fi + +# Here we handle the case where we know the os, and the CPU type, but not the +# manufacturer. We pick the logical manufacturer. +vendor=unknown +case $basic_machine in + *-unknown) + case $os in + -riscix*) + vendor=acorn + ;; + -sunos*) + vendor=sun + ;; + -aix*) + vendor=ibm + ;; + -beos*) + vendor=be + ;; + -hpux*) + vendor=hp + ;; + -mpeix*) + vendor=hp + ;; + -hiux*) + vendor=hitachi + ;; + -unos*) + vendor=crds + ;; + -dgux*) + vendor=dg + ;; + -luna*) + vendor=omron + ;; + -genix*) + vendor=ns + ;; + -mvs* | -opened*) + vendor=ibm + ;; + -os400*) + vendor=ibm + ;; + -ptx*) + vendor=sequent + ;; + -tpf*) + vendor=ibm + ;; + -vxsim* | -vxworks* | -windiss*) + vendor=wrs + ;; + -aux*) + vendor=apple + ;; + -hms*) + vendor=hitachi + ;; + -mpw* | -macos*) + vendor=apple + ;; + -*mint | -mint[0-9]* | -*MiNT | -MiNT[0-9]*) + vendor=atari + ;; + -vos*) + vendor=stratus + ;; + esac + basic_machine=`echo $basic_machine | sed "s/unknown/$vendor/"` + ;; +esac + +echo $basic_machine$os +exit 0 + +# Local variables: +# eval: (add-hook 'write-file-hooks 'time-stamp) +# time-stamp-start: "timestamp='" +# time-stamp-format: "%:y-%02m-%02d" +# time-stamp-end: "'" +# End: diff --git a/src/native/config/depcomp b/src/native/config/depcomp new file mode 100755 index 0000000..04701da --- /dev/null +++ b/src/native/config/depcomp @@ -0,0 +1,530 @@ +#! /bin/sh +# depcomp - compile a program generating dependencies as side-effects + +scriptversion=2005-07-09.11 + +# Copyright (C) 1999, 2000, 2003, 2004, 2005 Free Software Foundation, Inc. + +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2, or (at your option) +# any later version. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. + +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA +# 02110-1301, USA. + +# As a special exception to the GNU General Public License, if you +# distribute this file as part of a program that contains a +# configuration script generated by Autoconf, you may include it under +# the same distribution terms that you use for the rest of that program. + +# Originally written by Alexandre Oliva . + +case $1 in + '') + echo "$0: No command. Try \`$0 --help' for more information." 1>&2 + exit 1; + ;; + -h | --h*) + cat <<\EOF +Usage: depcomp [--help] [--version] PROGRAM [ARGS] + +Run PROGRAMS ARGS to compile a file, generating dependencies +as side-effects. + +Environment variables: + depmode Dependency tracking mode. + source Source file read by `PROGRAMS ARGS'. + object Object file output by `PROGRAMS ARGS'. + DEPDIR directory where to store dependencies. + depfile Dependency file to output. + tmpdepfile Temporary file to use when outputing dependencies. + libtool Whether libtool is used (yes/no). + +Report bugs to . +EOF + exit $? + ;; + -v | --v*) + echo "depcomp $scriptversion" + exit $? + ;; +esac + +if test -z "$depmode" || test -z "$source" || test -z "$object"; then + echo "depcomp: Variables source, object and depmode must be set" 1>&2 + exit 1 +fi + +# Dependencies for sub/bar.o or sub/bar.obj go into sub/.deps/bar.Po. +depfile=${depfile-`echo "$object" | + sed 's|[^\\/]*$|'${DEPDIR-.deps}'/&|;s|\.\([^.]*\)$|.P\1|;s|Pobj$|Po|'`} +tmpdepfile=${tmpdepfile-`echo "$depfile" | sed 's/\.\([^.]*\)$/.T\1/'`} + +rm -f "$tmpdepfile" + +# Some modes work just like other modes, but use different flags. We +# parameterize here, but still list the modes in the big case below, +# to make depend.m4 easier to write. Note that we *cannot* use a case +# here, because this file can only contain one case statement. +if test "$depmode" = hp; then + # HP compiler uses -M and no extra arg. + gccflag=-M + depmode=gcc +fi + +if test "$depmode" = dashXmstdout; then + # This is just like dashmstdout with a different argument. + dashmflag=-xM + depmode=dashmstdout +fi + +case "$depmode" in +gcc3) +## gcc 3 implements dependency tracking that does exactly what +## we want. Yay! Note: for some reason libtool 1.4 doesn't like +## it if -MD -MP comes after the -MF stuff. Hmm. + "$@" -MT "$object" -MD -MP -MF "$tmpdepfile" + stat=$? + if test $stat -eq 0; then : + else + rm -f "$tmpdepfile" + exit $stat + fi + mv "$tmpdepfile" "$depfile" + ;; + +gcc) +## There are various ways to get dependency output from gcc. Here's +## why we pick this rather obscure method: +## - Don't want to use -MD because we'd like the dependencies to end +## up in a subdir. Having to rename by hand is ugly. +## (We might end up doing this anyway to support other compilers.) +## - The DEPENDENCIES_OUTPUT environment variable makes gcc act like +## -MM, not -M (despite what the docs say). +## - Using -M directly means running the compiler twice (even worse +## than renaming). + if test -z "$gccflag"; then + gccflag=-MD, + fi + "$@" -Wp,"$gccflag$tmpdepfile" + stat=$? + if test $stat -eq 0; then : + else + rm -f "$tmpdepfile" + exit $stat + fi + rm -f "$depfile" + echo "$object : \\" > "$depfile" + alpha=ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz +## The second -e expression handles DOS-style file names with drive letters. + sed -e 's/^[^:]*: / /' \ + -e 's/^['$alpha']:\/[^:]*: / /' < "$tmpdepfile" >> "$depfile" +## This next piece of magic avoids the `deleted header file' problem. +## The problem is that when a header file which appears in a .P file +## is deleted, the dependency causes make to die (because there is +## typically no way to rebuild the header). We avoid this by adding +## dummy dependencies for each header file. Too bad gcc doesn't do +## this for us directly. + tr ' ' ' +' < "$tmpdepfile" | +## Some versions of gcc put a space before the `:'. On the theory +## that the space means something, we add a space to the output as +## well. +## Some versions of the HPUX 10.20 sed can't process this invocation +## correctly. Breaking it into two sed invocations is a workaround. + sed -e 's/^\\$//' -e '/^$/d' -e '/:$/d' | sed -e 's/$/ :/' >> "$depfile" + rm -f "$tmpdepfile" + ;; + +hp) + # This case exists only to let depend.m4 do its work. It works by + # looking at the text of this script. This case will never be run, + # since it is checked for above. + exit 1 + ;; + +sgi) + if test "$libtool" = yes; then + "$@" "-Wp,-MDupdate,$tmpdepfile" + else + "$@" -MDupdate "$tmpdepfile" + fi + stat=$? + if test $stat -eq 0; then : + else + rm -f "$tmpdepfile" + exit $stat + fi + rm -f "$depfile" + + if test -f "$tmpdepfile"; then # yes, the sourcefile depend on other files + echo "$object : \\" > "$depfile" + + # Clip off the initial element (the dependent). Don't try to be + # clever and replace this with sed code, as IRIX sed won't handle + # lines with more than a fixed number of characters (4096 in + # IRIX 6.2 sed, 8192 in IRIX 6.5). We also remove comment lines; + # the IRIX cc adds comments like `#:fec' to the end of the + # dependency line. + tr ' ' ' +' < "$tmpdepfile" \ + | sed -e 's/^.*\.o://' -e 's/#.*$//' -e '/^$/ d' | \ + tr ' +' ' ' >> $depfile + echo >> $depfile + + # The second pass generates a dummy entry for each header file. + tr ' ' ' +' < "$tmpdepfile" \ + | sed -e 's/^.*\.o://' -e 's/#.*$//' -e '/^$/ d' -e 's/$/:/' \ + >> $depfile + else + # The sourcefile does not contain any dependencies, so just + # store a dummy comment line, to avoid errors with the Makefile + # "include basename.Plo" scheme. + echo "#dummy" > "$depfile" + fi + rm -f "$tmpdepfile" + ;; + +aix) + # The C for AIX Compiler uses -M and outputs the dependencies + # in a .u file. In older versions, this file always lives in the + # current directory. Also, the AIX compiler puts `$object:' at the + # start of each line; $object doesn't have directory information. + # Version 6 uses the directory in both cases. + stripped=`echo "$object" | sed 's/\(.*\)\..*$/\1/'` + tmpdepfile="$stripped.u" + if test "$libtool" = yes; then + "$@" -Wc,-M + else + "$@" -M + fi + stat=$? + + if test -f "$tmpdepfile"; then : + else + stripped=`echo "$stripped" | sed 's,^.*/,,'` + tmpdepfile="$stripped.u" + fi + + if test $stat -eq 0; then : + else + rm -f "$tmpdepfile" + exit $stat + fi + + if test -f "$tmpdepfile"; then + outname="$stripped.o" + # Each line is of the form `foo.o: dependent.h'. + # Do two passes, one to just change these to + # `$object: dependent.h' and one to simply `dependent.h:'. + sed -e "s,^$outname:,$object :," < "$tmpdepfile" > "$depfile" + sed -e "s,^$outname: \(.*\)$,\1:," < "$tmpdepfile" >> "$depfile" + else + # The sourcefile does not contain any dependencies, so just + # store a dummy comment line, to avoid errors with the Makefile + # "include basename.Plo" scheme. + echo "#dummy" > "$depfile" + fi + rm -f "$tmpdepfile" + ;; + +icc) + # Intel's C compiler understands `-MD -MF file'. However on + # icc -MD -MF foo.d -c -o sub/foo.o sub/foo.c + # ICC 7.0 will fill foo.d with something like + # foo.o: sub/foo.c + # foo.o: sub/foo.h + # which is wrong. We want: + # sub/foo.o: sub/foo.c + # sub/foo.o: sub/foo.h + # sub/foo.c: + # sub/foo.h: + # ICC 7.1 will output + # foo.o: sub/foo.c sub/foo.h + # and will wrap long lines using \ : + # foo.o: sub/foo.c ... \ + # sub/foo.h ... \ + # ... + + "$@" -MD -MF "$tmpdepfile" + stat=$? + if test $stat -eq 0; then : + else + rm -f "$tmpdepfile" + exit $stat + fi + rm -f "$depfile" + # Each line is of the form `foo.o: dependent.h', + # or `foo.o: dep1.h dep2.h \', or ` dep3.h dep4.h \'. + # Do two passes, one to just change these to + # `$object: dependent.h' and one to simply `dependent.h:'. + sed "s,^[^:]*:,$object :," < "$tmpdepfile" > "$depfile" + # Some versions of the HPUX 10.20 sed can't process this invocation + # correctly. Breaking it into two sed invocations is a workaround. + sed 's,^[^:]*: \(.*\)$,\1,;s/^\\$//;/^$/d;/:$/d' < "$tmpdepfile" | + sed -e 's/$/ :/' >> "$depfile" + rm -f "$tmpdepfile" + ;; + +tru64) + # The Tru64 compiler uses -MD to generate dependencies as a side + # effect. `cc -MD -o foo.o ...' puts the dependencies into `foo.o.d'. + # At least on Alpha/Redhat 6.1, Compaq CCC V6.2-504 seems to put + # dependencies in `foo.d' instead, so we check for that too. + # Subdirectories are respected. + dir=`echo "$object" | sed -e 's|/[^/]*$|/|'` + test "x$dir" = "x$object" && dir= + base=`echo "$object" | sed -e 's|^.*/||' -e 's/\.o$//' -e 's/\.lo$//'` + + if test "$libtool" = yes; then + # With Tru64 cc, shared objects can also be used to make a + # static library. This mecanism is used in libtool 1.4 series to + # handle both shared and static libraries in a single compilation. + # With libtool 1.4, dependencies were output in $dir.libs/$base.lo.d. + # + # With libtool 1.5 this exception was removed, and libtool now + # generates 2 separate objects for the 2 libraries. These two + # compilations output dependencies in in $dir.libs/$base.o.d and + # in $dir$base.o.d. We have to check for both files, because + # one of the two compilations can be disabled. We should prefer + # $dir$base.o.d over $dir.libs/$base.o.d because the latter is + # automatically cleaned when .libs/ is deleted, while ignoring + # the former would cause a distcleancheck panic. + tmpdepfile1=$dir.libs/$base.lo.d # libtool 1.4 + tmpdepfile2=$dir$base.o.d # libtool 1.5 + tmpdepfile3=$dir.libs/$base.o.d # libtool 1.5 + tmpdepfile4=$dir.libs/$base.d # Compaq CCC V6.2-504 + "$@" -Wc,-MD + else + tmpdepfile1=$dir$base.o.d + tmpdepfile2=$dir$base.d + tmpdepfile3=$dir$base.d + tmpdepfile4=$dir$base.d + "$@" -MD + fi + + stat=$? + if test $stat -eq 0; then : + else + rm -f "$tmpdepfile1" "$tmpdepfile2" "$tmpdepfile3" "$tmpdepfile4" + exit $stat + fi + + for tmpdepfile in "$tmpdepfile1" "$tmpdepfile2" "$tmpdepfile3" "$tmpdepfile4" + do + test -f "$tmpdepfile" && break + done + if test -f "$tmpdepfile"; then + sed -e "s,^.*\.[a-z]*:,$object:," < "$tmpdepfile" > "$depfile" + # That's a tab and a space in the []. + sed -e 's,^.*\.[a-z]*:[ ]*,,' -e 's,$,:,' < "$tmpdepfile" >> "$depfile" + else + echo "#dummy" > "$depfile" + fi + rm -f "$tmpdepfile" + ;; + +#nosideeffect) + # This comment above is used by automake to tell side-effect + # dependency tracking mechanisms from slower ones. + +dashmstdout) + # Important note: in order to support this mode, a compiler *must* + # always write the preprocessed file to stdout, regardless of -o. + "$@" || exit $? + + # Remove the call to Libtool. + if test "$libtool" = yes; then + while test $1 != '--mode=compile'; do + shift + done + shift + fi + + # Remove `-o $object'. + IFS=" " + for arg + do + case $arg in + -o) + shift + ;; + $object) + shift + ;; + *) + set fnord "$@" "$arg" + shift # fnord + shift # $arg + ;; + esac + done + + test -z "$dashmflag" && dashmflag=-M + # Require at least two characters before searching for `:' + # in the target name. This is to cope with DOS-style filenames: + # a dependency such as `c:/foo/bar' could be seen as target `c' otherwise. + "$@" $dashmflag | + sed 's:^[ ]*[^: ][^:][^:]*\:[ ]*:'"$object"'\: :' > "$tmpdepfile" + rm -f "$depfile" + cat < "$tmpdepfile" > "$depfile" + tr ' ' ' +' < "$tmpdepfile" | \ +## Some versions of the HPUX 10.20 sed can't process this invocation +## correctly. Breaking it into two sed invocations is a workaround. + sed -e 's/^\\$//' -e '/^$/d' -e '/:$/d' | sed -e 's/$/ :/' >> "$depfile" + rm -f "$tmpdepfile" + ;; + +dashXmstdout) + # This case only exists to satisfy depend.m4. It is never actually + # run, as this mode is specially recognized in the preamble. + exit 1 + ;; + +makedepend) + "$@" || exit $? + # Remove any Libtool call + if test "$libtool" = yes; then + while test $1 != '--mode=compile'; do + shift + done + shift + fi + # X makedepend + shift + cleared=no + for arg in "$@"; do + case $cleared in + no) + set ""; shift + cleared=yes ;; + esac + case "$arg" in + -D*|-I*) + set fnord "$@" "$arg"; shift ;; + # Strip any option that makedepend may not understand. Remove + # the object too, otherwise makedepend will parse it as a source file. + -*|$object) + ;; + *) + set fnord "$@" "$arg"; shift ;; + esac + done + obj_suffix="`echo $object | sed 's/^.*\././'`" + touch "$tmpdepfile" + ${MAKEDEPEND-makedepend} -o"$obj_suffix" -f"$tmpdepfile" "$@" + rm -f "$depfile" + cat < "$tmpdepfile" > "$depfile" + sed '1,2d' "$tmpdepfile" | tr ' ' ' +' | \ +## Some versions of the HPUX 10.20 sed can't process this invocation +## correctly. Breaking it into two sed invocations is a workaround. + sed -e 's/^\\$//' -e '/^$/d' -e '/:$/d' | sed -e 's/$/ :/' >> "$depfile" + rm -f "$tmpdepfile" "$tmpdepfile".bak + ;; + +cpp) + # Important note: in order to support this mode, a compiler *must* + # always write the preprocessed file to stdout. + "$@" || exit $? + + # Remove the call to Libtool. + if test "$libtool" = yes; then + while test $1 != '--mode=compile'; do + shift + done + shift + fi + + # Remove `-o $object'. + IFS=" " + for arg + do + case $arg in + -o) + shift + ;; + $object) + shift + ;; + *) + set fnord "$@" "$arg" + shift # fnord + shift # $arg + ;; + esac + done + + "$@" -E | + sed -n -e '/^# [0-9][0-9]* "\([^"]*\)".*/ s:: \1 \\:p' \ + -e '/^#line [0-9][0-9]* "\([^"]*\)".*/ s:: \1 \\:p' | + sed '$ s: \\$::' > "$tmpdepfile" + rm -f "$depfile" + echo "$object : \\" > "$depfile" + cat < "$tmpdepfile" >> "$depfile" + sed < "$tmpdepfile" '/^$/d;s/^ //;s/ \\$//;s/$/ :/' >> "$depfile" + rm -f "$tmpdepfile" + ;; + +msvisualcpp) + # Important note: in order to support this mode, a compiler *must* + # always write the preprocessed file to stdout, regardless of -o, + # because we must use -o when running libtool. + "$@" || exit $? + IFS=" " + for arg + do + case "$arg" in + "-Gm"|"/Gm"|"-Gi"|"/Gi"|"-ZI"|"/ZI") + set fnord "$@" + shift + shift + ;; + *) + set fnord "$@" "$arg" + shift + shift + ;; + esac + done + "$@" -E | + sed -n '/^#line [0-9][0-9]* "\([^"]*\)"/ s::echo "`cygpath -u \\"\1\\"`":p' | sort | uniq > "$tmpdepfile" + rm -f "$depfile" + echo "$object : \\" > "$depfile" + . "$tmpdepfile" | sed 's% %\\ %g' | sed -n '/^\(.*\)$/ s:: \1 \\:p' >> "$depfile" + echo " " >> "$depfile" + . "$tmpdepfile" | sed 's% %\\ %g' | sed -n '/^\(.*\)$/ s::\1\::p' >> "$depfile" + rm -f "$tmpdepfile" + ;; + +none) + exec "$@" + ;; + +*) + echo "Unknown depmode $depmode" 1>&2 + exit 1 + ;; +esac + +exit 0 + +# Local Variables: +# mode: shell-script +# sh-indentation: 2 +# eval: (add-hook 'write-file-hooks 'time-stamp) +# time-stamp-start: "scriptversion=" +# time-stamp-format: "%:y-%02m-%02d.%02H" +# time-stamp-end: "$" +# End: diff --git a/src/native/config/install-sh b/src/native/config/install-sh new file mode 100755 index 0000000..4d4a951 --- /dev/null +++ b/src/native/config/install-sh @@ -0,0 +1,323 @@ +#!/bin/sh +# install - install a program, script, or datafile + +scriptversion=2005-05-14.22 + +# This originates from X11R5 (mit/util/scripts/install.sh), which was +# later released in X11R6 (xc/config/util/install.sh) with the +# following copyright and license. +# +# Copyright (C) 1994 X Consortium +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to +# deal in the Software without restriction, including without limitation the +# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or +# sell copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# X CONSORTIUM BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN +# AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNEC- +# TION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +# +# Except as contained in this notice, the name of the X Consortium shall not +# be used in advertising or otherwise to promote the sale, use or other deal- +# ings in this Software without prior written authorization from the X Consor- +# tium. +# +# +# FSF changes to this file are in the public domain. +# +# Calling this script install-sh is preferred over install.sh, to prevent +# `make' implicit rules from creating a file called install from it +# when there is no Makefile. +# +# This script is compatible with the BSD install script, but was written +# from scratch. It can only install one file at a time, a restriction +# shared with many OS's install programs. + +# set DOITPROG to echo to test this script + +# Don't use :- since 4.3BSD and earlier shells don't like it. +doit="${DOITPROG-}" + +# put in absolute paths if you don't have them in your path; or use env. vars. + +mvprog="${MVPROG-mv}" +cpprog="${CPPROG-cp}" +chmodprog="${CHMODPROG-chmod}" +chownprog="${CHOWNPROG-chown}" +chgrpprog="${CHGRPPROG-chgrp}" +stripprog="${STRIPPROG-strip}" +rmprog="${RMPROG-rm}" +mkdirprog="${MKDIRPROG-mkdir}" + +chmodcmd="$chmodprog 0755" +chowncmd= +chgrpcmd= +stripcmd= +rmcmd="$rmprog -f" +mvcmd="$mvprog" +src= +dst= +dir_arg= +dstarg= +no_target_directory= + +usage="Usage: $0 [OPTION]... [-T] SRCFILE DSTFILE + or: $0 [OPTION]... SRCFILES... DIRECTORY + or: $0 [OPTION]... -t DIRECTORY SRCFILES... + or: $0 [OPTION]... -d DIRECTORIES... + +In the 1st form, copy SRCFILE to DSTFILE. +In the 2nd and 3rd, copy all SRCFILES to DIRECTORY. +In the 4th, create DIRECTORIES. + +Options: +-c (ignored) +-d create directories instead of installing files. +-g GROUP $chgrpprog installed files to GROUP. +-m MODE $chmodprog installed files to MODE. +-o USER $chownprog installed files to USER. +-s $stripprog installed files. +-t DIRECTORY install into DIRECTORY. +-T report an error if DSTFILE is a directory. +--help display this help and exit. +--version display version info and exit. + +Environment variables override the default commands: + CHGRPPROG CHMODPROG CHOWNPROG CPPROG MKDIRPROG MVPROG RMPROG STRIPPROG +" + +while test -n "$1"; do + case $1 in + -c) shift + continue;; + + -d) dir_arg=true + shift + continue;; + + -g) chgrpcmd="$chgrpprog $2" + shift + shift + continue;; + + --help) echo "$usage"; exit $?;; + + -m) chmodcmd="$chmodprog $2" + shift + shift + continue;; + + -o) chowncmd="$chownprog $2" + shift + shift + continue;; + + -s) stripcmd=$stripprog + shift + continue;; + + -t) dstarg=$2 + shift + shift + continue;; + + -T) no_target_directory=true + shift + continue;; + + --version) echo "$0 $scriptversion"; exit $?;; + + *) # When -d is used, all remaining arguments are directories to create. + # When -t is used, the destination is already specified. + test -n "$dir_arg$dstarg" && break + # Otherwise, the last argument is the destination. Remove it from $@. + for arg + do + if test -n "$dstarg"; then + # $@ is not empty: it contains at least $arg. + set fnord "$@" "$dstarg" + shift # fnord + fi + shift # arg + dstarg=$arg + done + break;; + esac +done + +if test -z "$1"; then + if test -z "$dir_arg"; then + echo "$0: no input file specified." >&2 + exit 1 + fi + # It's OK to call `install-sh -d' without argument. + # This can happen when creating conditional directories. + exit 0 +fi + +for src +do + # Protect names starting with `-'. + case $src in + -*) src=./$src ;; + esac + + if test -n "$dir_arg"; then + dst=$src + src= + + if test -d "$dst"; then + mkdircmd=: + chmodcmd= + else + mkdircmd=$mkdirprog + fi + else + # Waiting for this to be detected by the "$cpprog $src $dsttmp" command + # might cause directories to be created, which would be especially bad + # if $src (and thus $dsttmp) contains '*'. + if test ! -f "$src" && test ! -d "$src"; then + echo "$0: $src does not exist." >&2 + exit 1 + fi + + if test -z "$dstarg"; then + echo "$0: no destination specified." >&2 + exit 1 + fi + + dst=$dstarg + # Protect names starting with `-'. + case $dst in + -*) dst=./$dst ;; + esac + + # If destination is a directory, append the input filename; won't work + # if double slashes aren't ignored. + if test -d "$dst"; then + if test -n "$no_target_directory"; then + echo "$0: $dstarg: Is a directory" >&2 + exit 1 + fi + dst=$dst/`basename "$src"` + fi + fi + + # This sed command emulates the dirname command. + dstdir=`echo "$dst" | sed -e 's,/*$,,;s,[^/]*$,,;s,/*$,,;s,^$,.,'` + + # Make sure that the destination directory exists. + + # Skip lots of stat calls in the usual case. + if test ! -d "$dstdir"; then + defaultIFS=' + ' + IFS="${IFS-$defaultIFS}" + + oIFS=$IFS + # Some sh's can't handle IFS=/ for some reason. + IFS='%' + set x `echo "$dstdir" | sed -e 's@/@%@g' -e 's@^%@/@'` + shift + IFS=$oIFS + + pathcomp= + + while test $# -ne 0 ; do + pathcomp=$pathcomp$1 + shift + if test ! -d "$pathcomp"; then + $mkdirprog "$pathcomp" + # mkdir can fail with a `File exist' error in case several + # install-sh are creating the directory concurrently. This + # is OK. + test -d "$pathcomp" || exit + fi + pathcomp=$pathcomp/ + done + fi + + if test -n "$dir_arg"; then + $doit $mkdircmd "$dst" \ + && { test -z "$chowncmd" || $doit $chowncmd "$dst"; } \ + && { test -z "$chgrpcmd" || $doit $chgrpcmd "$dst"; } \ + && { test -z "$stripcmd" || $doit $stripcmd "$dst"; } \ + && { test -z "$chmodcmd" || $doit $chmodcmd "$dst"; } + + else + dstfile=`basename "$dst"` + + # Make a couple of temp file names in the proper directory. + dsttmp=$dstdir/_inst.$$_ + rmtmp=$dstdir/_rm.$$_ + + # Trap to clean up those temp files at exit. + trap 'ret=$?; rm -f "$dsttmp" "$rmtmp" && exit $ret' 0 + trap '(exit $?); exit' 1 2 13 15 + + # Copy the file name to the temp name. + $doit $cpprog "$src" "$dsttmp" && + + # and set any options; do chmod last to preserve setuid bits. + # + # If any of these fail, we abort the whole thing. If we want to + # ignore errors from any of these, just make sure not to ignore + # errors from the above "$doit $cpprog $src $dsttmp" command. + # + { test -z "$chowncmd" || $doit $chowncmd "$dsttmp"; } \ + && { test -z "$chgrpcmd" || $doit $chgrpcmd "$dsttmp"; } \ + && { test -z "$stripcmd" || $doit $stripcmd "$dsttmp"; } \ + && { test -z "$chmodcmd" || $doit $chmodcmd "$dsttmp"; } && + + # Now rename the file to the real destination. + { $doit $mvcmd -f "$dsttmp" "$dstdir/$dstfile" 2>/dev/null \ + || { + # The rename failed, perhaps because mv can't rename something else + # to itself, or perhaps because mv is so ancient that it does not + # support -f. + + # Now remove or move aside any old file at destination location. + # We try this two ways since rm can't unlink itself on some + # systems and the destination file might be busy for other + # reasons. In this case, the final cleanup might fail but the new + # file should still install successfully. + { + if test -f "$dstdir/$dstfile"; then + $doit $rmcmd -f "$dstdir/$dstfile" 2>/dev/null \ + || $doit $mvcmd -f "$dstdir/$dstfile" "$rmtmp" 2>/dev/null \ + || { + echo "$0: cannot unlink or rename $dstdir/$dstfile" >&2 + (exit 1); exit 1 + } + else + : + fi + } && + + # Now rename the file to the real destination. + $doit $mvcmd "$dsttmp" "$dstdir/$dstfile" + } + } + fi || { (exit 1); exit 1; } +done + +# The final little trick to "correctly" pass the exit status to the exit trap. +{ + (exit 0); exit 0 +} + +# Local variables: +# eval: (add-hook 'write-file-hooks 'time-stamp) +# time-stamp-start: "scriptversion=" +# time-stamp-format: "%:y-%02m-%02d.%02H" +# time-stamp-end: "$" +# End: diff --git a/src/native/config/ltmain.sh b/src/native/config/ltmain.sh new file mode 100644 index 0000000..8f7a6ac --- /dev/null +++ b/src/native/config/ltmain.sh @@ -0,0 +1,6971 @@ +# ltmain.sh - Provide generalized library-building support services. +# NOTE: Changing this file will not affect anything until you rerun configure. +# +# Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2003, 2004, 2005 +# Free Software Foundation, Inc. +# Originally by Gordon Matzigkeit , 1996 +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. +# +# As a special exception to the GNU General Public License, if you +# distribute this file as part of a program that contains a +# configuration script generated by Autoconf, you may include it under +# the same distribution terms that you use for the rest of that program. + +basename="s,^.*/,,g" + +# Work around backward compatibility issue on IRIX 6.5. On IRIX 6.4+, sh +# is ksh but when the shell is invoked as "sh" and the current value of +# the _XPG environment variable is not equal to 1 (one), the special +# positional parameter $0, within a function call, is the name of the +# function. +progpath="$0" + +# define SED for historic ltconfig's generated by Libtool 1.3 +test -z "$SED" && SED=sed + +# The name of this program: +progname=`echo "$progpath" | $SED $basename` +modename="$progname" + +# Global variables: +EXIT_SUCCESS=0 +EXIT_FAILURE=1 + +PROGRAM=ltmain.sh +PACKAGE=libtool +VERSION=1.5.22 +TIMESTAMP=" (1.1220.2.365 2005/12/18 22:14:06)" + +# See if we are running on zsh, and set the options which allow our +# commands through without removal of \ escapes. +if test -n "${ZSH_VERSION+set}" ; then + setopt NO_GLOB_SUBST +fi +# Same for EGREP, and just to be sure, do LTCC as well +if test "X$EGREP" = X ; then + EGREP=egrep +fi +if test "X$LTCC" = X ; then + LTCC=${CC-gcc} +fi + +# Check that we have a working $echo. +if test "X$1" = X--no-reexec; then + # Discard the --no-reexec flag, and continue. + shift +elif test "X$1" = X--fallback-echo; then + # Avoid inline document here, it may be left over + : +elif test "X`($echo '\t') 2>/dev/null`" = 'X\t'; then + # Yippee, $echo works! + : +else + # Restart under the correct shell, and then maybe $echo will work. + exec $SHELL "$progpath" --no-reexec ${1+"$@"} +fi + +if test "X$1" = X--fallback-echo; then + # used as fallback echo + shift + cat <&2 + $echo "Fatal configuration error. See the $PACKAGE docs for more information." 1>&2 + exit $EXIT_FAILURE +fi + +# Global variables. +mode=$default_mode +nonopt= +prev= +prevopt= +run= +show="$echo" +show_help= +execute_dlfiles= +duplicate_deps=no +preserve_args= +lo2o="s/\\.lo\$/.${objext}/" +o2lo="s/\\.${objext}\$/.lo/" + +if test -z "$max_cmd_len"; then + i=0 + testring="ABCD" + new_result= + + # If test is not a shell built-in, we'll probably end up computing a + # maximum length that is only half of the actual maximum length, but + # we can't tell. + while (test "X"`$SHELL $0 --fallback-echo "X$testring" 2>/dev/null` \ + = "XX$testring") >/dev/null 2>&1 && + new_result=`expr "X$testring" : ".*" 2>&1` && + max_cmd_len="$new_result" && + test "$i" != 17 # 1/2 MB should be enough + do + i=`expr $i + 1` + testring="$testring$testring" + done + testring= + # Add a significant safety factor because C++ compilers can tack on massive + # amounts of additional arguments before passing them to the linker. + # It appears as though 1/2 is a usable value. + max_cmd_len=`expr $max_cmd_len \/ 2` +fi + +##################################### +# Shell function definitions: +# This seems to be the best place for them + +# func_mktempdir [string] +# Make a temporary directory that won't clash with other running +# libtool processes, and avoids race conditions if possible. If +# given, STRING is the basename for that directory. +func_mktempdir () +{ + my_template="${TMPDIR-/tmp}/${1-$progname}" + + if test "$run" = ":"; then + # Return a directory name, but don't create it in dry-run mode + my_tmpdir="${my_template}-$$" + else + + # If mktemp works, use that first and foremost + my_tmpdir=`mktemp -d "${my_template}-XXXXXXXX" 2>/dev/null` + + if test ! -d "$my_tmpdir"; then + # Failing that, at least try and use $RANDOM to avoid a race + my_tmpdir="${my_template}-${RANDOM-0}$$" + + save_mktempdir_umask=`umask` + umask 0077 + $mkdir "$my_tmpdir" + umask $save_mktempdir_umask + fi + + # If we're not in dry-run mode, bomb out on failure + test -d "$my_tmpdir" || { + $echo "cannot create temporary directory \`$my_tmpdir'" 1>&2 + exit $EXIT_FAILURE + } + fi + + $echo "X$my_tmpdir" | $Xsed +} + + +# func_win32_libid arg +# return the library type of file 'arg' +# +# Need a lot of goo to handle *both* DLLs and import libs +# Has to be a shell function in order to 'eat' the argument +# that is supplied when $file_magic_command is called. +func_win32_libid () +{ + win32_libid_type="unknown" + win32_fileres=`file -L $1 2>/dev/null` + case $win32_fileres in + *ar\ archive\ import\ library*) # definitely import + win32_libid_type="x86 archive import" + ;; + *ar\ archive*) # could be an import, or static + if eval $OBJDUMP -f $1 | $SED -e '10q' 2>/dev/null | \ + $EGREP -e 'file format pe-i386(.*architecture: i386)?' >/dev/null ; then + win32_nmres=`eval $NM -f posix -A $1 | \ + $SED -n -e '1,100{/ I /{s,.*,import,;p;q;};}'` + case $win32_nmres in + import*) win32_libid_type="x86 archive import";; + *) win32_libid_type="x86 archive static";; + esac + fi + ;; + *DLL*) + win32_libid_type="x86 DLL" + ;; + *executable*) # but shell scripts are "executable" too... + case $win32_fileres in + *MS\ Windows\ PE\ Intel*) + win32_libid_type="x86 DLL" + ;; + esac + ;; + esac + $echo $win32_libid_type +} + + +# func_infer_tag arg +# Infer tagged configuration to use if any are available and +# if one wasn't chosen via the "--tag" command line option. +# Only attempt this if the compiler in the base compile +# command doesn't match the default compiler. +# arg is usually of the form 'gcc ...' +func_infer_tag () +{ + if test -n "$available_tags" && test -z "$tagname"; then + CC_quoted= + for arg in $CC; do + case $arg in + *[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*|"") + arg="\"$arg\"" + ;; + esac + CC_quoted="$CC_quoted $arg" + done + case $@ in + # Blanks in the command may have been stripped by the calling shell, + # but not from the CC environment variable when configure was run. + " $CC "* | "$CC "* | " `$echo $CC` "* | "`$echo $CC` "* | " $CC_quoted"* | "$CC_quoted "* | " `$echo $CC_quoted` "* | "`$echo $CC_quoted` "*) ;; + # Blanks at the start of $base_compile will cause this to fail + # if we don't check for them as well. + *) + for z in $available_tags; do + if grep "^# ### BEGIN LIBTOOL TAG CONFIG: $z$" < "$progpath" > /dev/null; then + # Evaluate the configuration. + eval "`${SED} -n -e '/^# ### BEGIN LIBTOOL TAG CONFIG: '$z'$/,/^# ### END LIBTOOL TAG CONFIG: '$z'$/p' < $progpath`" + CC_quoted= + for arg in $CC; do + # Double-quote args containing other shell metacharacters. + case $arg in + *[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*|"") + arg="\"$arg\"" + ;; + esac + CC_quoted="$CC_quoted $arg" + done + # user sometimes does CC=-gcc so we need to match that to 'gcc' + trimedcc=`echo ${CC} | $SED -e "s/${host}-//g"` + # and sometimes libtool has CC=-gcc but user does CC=gcc + extendcc=${host}-${CC} + # and sometimes libtool has CC=-gcc but user has CC=-gcc + # (Gentoo-specific hack because we always export $CHOST) + mungedcc=${CHOST-${host}}-${trimedcc} + case "$@ " in + "cc "* | " cc "* | "${host}-cc "* | " ${host}-cc "*|\ + "gcc "* | " gcc "* | "${host}-gcc "* | " ${host}-gcc "*) + tagname=CC + break ;; + "$trimedcc "* | " $trimedcc "* | "`$echo $trimedcc` "* | " `$echo $trimedcc` "*|\ + "$extendcc "* | " $extendcc "* | "`$echo $extendcc` "* | " `$echo $extendcc` "*|\ + "$mungedcc "* | " $mungedcc "* | "`$echo $mungedcc` "* | " `$echo $mungedcc` "*|\ + " $CC "* | "$CC "* | " `$echo $CC` "* | "`$echo $CC` "* | " $CC_quoted"* | "$CC_quoted "* | " `$echo $CC_quoted` "* | "`$echo $CC_quoted` "*) + # The compiler in the base compile command matches + # the one in the tagged configuration. + # Assume this is the tagged configuration we want. + tagname=$z + break + ;; + esac + fi + done + # If $tagname still isn't set, then no tagged configuration + # was found and let the user know that the "--tag" command + # line option must be used. + if test -z "$tagname"; then + $echo "$modename: unable to infer tagged configuration" + $echo "$modename: specify a tag with \`--tag'" 1>&2 + exit $EXIT_FAILURE +# else +# $echo "$modename: using $tagname tagged configuration" + fi + ;; + esac + fi +} + + +# func_extract_an_archive dir oldlib +func_extract_an_archive () +{ + f_ex_an_ar_dir="$1"; shift + f_ex_an_ar_oldlib="$1" + + $show "(cd $f_ex_an_ar_dir && $AR x $f_ex_an_ar_oldlib)" + $run eval "(cd \$f_ex_an_ar_dir && $AR x \$f_ex_an_ar_oldlib)" || exit $? + if ($AR t "$f_ex_an_ar_oldlib" | sort | sort -uc >/dev/null 2>&1); then + : + else + $echo "$modename: ERROR: object name conflicts: $f_ex_an_ar_dir/$f_ex_an_ar_oldlib" 1>&2 + exit $EXIT_FAILURE + fi +} + +# func_extract_archives gentop oldlib ... +func_extract_archives () +{ + my_gentop="$1"; shift + my_oldlibs=${1+"$@"} + my_oldobjs="" + my_xlib="" + my_xabs="" + my_xdir="" + my_status="" + + $show "${rm}r $my_gentop" + $run ${rm}r "$my_gentop" + $show "$mkdir $my_gentop" + $run $mkdir "$my_gentop" + my_status=$? + if test "$my_status" -ne 0 && test ! -d "$my_gentop"; then + exit $my_status + fi + + for my_xlib in $my_oldlibs; do + # Extract the objects. + case $my_xlib in + [\\/]* | [A-Za-z]:[\\/]*) my_xabs="$my_xlib" ;; + *) my_xabs=`pwd`"/$my_xlib" ;; + esac + my_xlib=`$echo "X$my_xlib" | $Xsed -e 's%^.*/%%'` + my_xdir="$my_gentop/$my_xlib" + + $show "${rm}r $my_xdir" + $run ${rm}r "$my_xdir" + $show "$mkdir $my_xdir" + $run $mkdir "$my_xdir" + exit_status=$? + if test "$exit_status" -ne 0 && test ! -d "$my_xdir"; then + exit $exit_status + fi + case $host in + *-darwin*) + $show "Extracting $my_xabs" + # Do not bother doing anything if just a dry run + if test -z "$run"; then + darwin_orig_dir=`pwd` + cd $my_xdir || exit $? + darwin_archive=$my_xabs + darwin_curdir=`pwd` + darwin_base_archive=`$echo "X$darwin_archive" | $Xsed -e 's%^.*/%%'` + darwin_arches=`lipo -info "$darwin_archive" 2>/dev/null | $EGREP Architectures 2>/dev/null` + if test -n "$darwin_arches"; then + darwin_arches=`echo "$darwin_arches" | $SED -e 's/.*are://'` + darwin_arch= + $show "$darwin_base_archive has multiple architectures $darwin_arches" + for darwin_arch in $darwin_arches ; do + mkdir -p "unfat-$$/${darwin_base_archive}-${darwin_arch}" + lipo -thin $darwin_arch -output "unfat-$$/${darwin_base_archive}-${darwin_arch}/${darwin_base_archive}" "${darwin_archive}" + cd "unfat-$$/${darwin_base_archive}-${darwin_arch}" + func_extract_an_archive "`pwd`" "${darwin_base_archive}" + cd "$darwin_curdir" + $rm "unfat-$$/${darwin_base_archive}-${darwin_arch}/${darwin_base_archive}" + done # $darwin_arches + ## Okay now we have a bunch of thin objects, gotta fatten them up :) + darwin_filelist=`find unfat-$$ -type f -name \*.o -print -o -name \*.lo -print| xargs basename | sort -u | $NL2SP` + darwin_file= + darwin_files= + for darwin_file in $darwin_filelist; do + darwin_files=`find unfat-$$ -name $darwin_file -print | $NL2SP` + lipo -create -output "$darwin_file" $darwin_files + done # $darwin_filelist + ${rm}r unfat-$$ + cd "$darwin_orig_dir" + else + cd "$darwin_orig_dir" + func_extract_an_archive "$my_xdir" "$my_xabs" + fi # $darwin_arches + fi # $run + ;; + *) + func_extract_an_archive "$my_xdir" "$my_xabs" + ;; + esac + my_oldobjs="$my_oldobjs "`find $my_xdir -name \*.$objext -print -o -name \*.lo -print | $NL2SP` + done + func_extract_archives_result="$my_oldobjs" +} +# End of Shell function definitions +##################################### + +# Darwin sucks +eval std_shrext=\"$shrext_cmds\" + +disable_libs=no + +# Parse our command line options once, thoroughly. +while test "$#" -gt 0 +do + arg="$1" + shift + + case $arg in + -*=*) optarg=`$echo "X$arg" | $Xsed -e 's/[-_a-zA-Z0-9]*=//'` ;; + *) optarg= ;; + esac + + # If the previous option needs an argument, assign it. + if test -n "$prev"; then + case $prev in + execute_dlfiles) + execute_dlfiles="$execute_dlfiles $arg" + ;; + tag) + tagname="$arg" + preserve_args="${preserve_args}=$arg" + + # Check whether tagname contains only valid characters + case $tagname in + *[!-_A-Za-z0-9,/]*) + $echo "$progname: invalid tag name: $tagname" 1>&2 + exit $EXIT_FAILURE + ;; + esac + + case $tagname in + CC) + # Don't test for the "default" C tag, as we know, it's there, but + # not specially marked. + ;; + *) + if grep "^# ### BEGIN LIBTOOL TAG CONFIG: $tagname$" < "$progpath" > /dev/null; then + taglist="$taglist $tagname" + # Evaluate the configuration. + eval "`${SED} -n -e '/^# ### BEGIN LIBTOOL TAG CONFIG: '$tagname'$/,/^# ### END LIBTOOL TAG CONFIG: '$tagname'$/p' < $progpath`" + else + $echo "$progname: ignoring unknown tag $tagname" 1>&2 + fi + ;; + esac + ;; + *) + eval "$prev=\$arg" + ;; + esac + + prev= + prevopt= + continue + fi + + # Have we seen a non-optional argument yet? + case $arg in + --help) + show_help=yes + ;; + + --version) + $echo "$PROGRAM (GNU $PACKAGE) $VERSION$TIMESTAMP" + $echo + $echo "Copyright (C) 2005 Free Software Foundation, Inc." + $echo "This is free software; see the source for copying conditions. There is NO" + $echo "warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE." + exit $? + ;; + + --config) + ${SED} -e '1,/^# ### BEGIN LIBTOOL CONFIG/d' -e '/^# ### END LIBTOOL CONFIG/,$d' $progpath + # Now print the configurations for the tags. + for tagname in $taglist; do + ${SED} -n -e "/^# ### BEGIN LIBTOOL TAG CONFIG: $tagname$/,/^# ### END LIBTOOL TAG CONFIG: $tagname$/p" < "$progpath" + done + exit $? + ;; + + --debug) + $echo "$progname: enabling shell trace mode" + set -x + preserve_args="$preserve_args $arg" + ;; + + --dry-run | -n) + run=: + ;; + + --features) + $echo "host: $host" + if test "$build_libtool_libs" = yes; then + $echo "enable shared libraries" + else + $echo "disable shared libraries" + fi + if test "$build_old_libs" = yes; then + $echo "enable static libraries" + else + $echo "disable static libraries" + fi + exit $? + ;; + + --finish) mode="finish" ;; + + --mode) prevopt="--mode" prev=mode ;; + --mode=*) mode="$optarg" ;; + + --preserve-dup-deps) duplicate_deps="yes" ;; + + --quiet | --silent) + show=: + preserve_args="$preserve_args $arg" + ;; + + --tag) + prevopt="--tag" + prev=tag + preserve_args="$preserve_args --tag" + ;; + --tag=*) + set tag "$optarg" ${1+"$@"} + shift + prev=tag + preserve_args="$preserve_args --tag" + ;; + + -dlopen) + prevopt="-dlopen" + prev=execute_dlfiles + ;; + + -*) + $echo "$modename: unrecognized option \`$arg'" 1>&2 + $echo "$help" 1>&2 + exit $EXIT_FAILURE + ;; + + *) + nonopt="$arg" + break + ;; + esac +done + +if test -n "$prevopt"; then + $echo "$modename: option \`$prevopt' requires an argument" 1>&2 + $echo "$help" 1>&2 + exit $EXIT_FAILURE +fi + +case $disable_libs in +no) + ;; +shared) + build_libtool_libs=no + build_old_libs=yes + ;; +static) + build_old_libs=`case $build_libtool_libs in yes) echo no;; *) echo yes;; esac` + ;; +esac + +# If this variable is set in any of the actions, the command in it +# will be execed at the end. This prevents here-documents from being +# left over by shells. +exec_cmd= + +if test -z "$show_help"; then + + # Infer the operation mode. + if test -z "$mode"; then + $echo "*** Warning: inferring the mode of operation is deprecated." 1>&2 + $echo "*** Future versions of Libtool will require --mode=MODE be specified." 1>&2 + case $nonopt in + *cc | cc* | *++ | gcc* | *-gcc* | g++* | xlc*) + mode=link + for arg + do + case $arg in + -c) + mode=compile + break + ;; + esac + done + ;; + *db | *dbx | *strace | *truss) + mode=execute + ;; + *install*|cp|mv) + mode=install + ;; + *rm) + mode=uninstall + ;; + *) + # If we have no mode, but dlfiles were specified, then do execute mode. + test -n "$execute_dlfiles" && mode=execute + + # Just use the default operation mode. + if test -z "$mode"; then + if test -n "$nonopt"; then + $echo "$modename: warning: cannot infer operation mode from \`$nonopt'" 1>&2 + else + $echo "$modename: warning: cannot infer operation mode without MODE-ARGS" 1>&2 + fi + fi + ;; + esac + fi + + # Only execute mode is allowed to have -dlopen flags. + if test -n "$execute_dlfiles" && test "$mode" != execute; then + $echo "$modename: unrecognized option \`-dlopen'" 1>&2 + $echo "$help" 1>&2 + exit $EXIT_FAILURE + fi + + # Change the help message to a mode-specific one. + generic_help="$help" + help="Try \`$modename --help --mode=$mode' for more information." + + # These modes are in order of execution frequency so that they run quickly. + case $mode in + # libtool compile mode + compile) + modename="$modename: compile" + # Get the compilation command and the source file. + base_compile= + srcfile="$nonopt" # always keep a non-empty value in "srcfile" + suppress_opt=yes + suppress_output= + arg_mode=normal + libobj= + later= + + for arg + do + case $arg_mode in + arg ) + # do not "continue". Instead, add this to base_compile + lastarg="$arg" + arg_mode=normal + ;; + + target ) + libobj="$arg" + arg_mode=normal + continue + ;; + + normal ) + # Accept any command-line options. + case $arg in + -o) + if test -n "$libobj" ; then + $echo "$modename: you cannot specify \`-o' more than once" 1>&2 + exit $EXIT_FAILURE + fi + arg_mode=target + continue + ;; + + -static | -prefer-pic | -prefer-non-pic) + later="$later $arg" + continue + ;; + + -no-suppress) + suppress_opt=no + continue + ;; + + -Xcompiler) + arg_mode=arg # the next one goes into the "base_compile" arg list + continue # The current "srcfile" will either be retained or + ;; # replaced later. I would guess that would be a bug. + + -Wc,*) + args=`$echo "X$arg" | $Xsed -e "s/^-Wc,//"` + lastarg= + save_ifs="$IFS"; IFS=',' + for arg in $args; do + IFS="$save_ifs" + + # Double-quote args containing other shell metacharacters. + # Many Bourne shells cannot handle close brackets correctly + # in scan sets, so we specify it separately. + case $arg in + *[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*|"") + arg="\"$arg\"" + ;; + esac + lastarg="$lastarg $arg" + done + IFS="$save_ifs" + lastarg=`$echo "X$lastarg" | $Xsed -e "s/^ //"` + + # Add the arguments to base_compile. + base_compile="$base_compile $lastarg" + continue + ;; + + * ) + # Accept the current argument as the source file. + # The previous "srcfile" becomes the current argument. + # + lastarg="$srcfile" + srcfile="$arg" + ;; + esac # case $arg + ;; + esac # case $arg_mode + + # Aesthetically quote the previous argument. + lastarg=`$echo "X$lastarg" | $Xsed -e "$sed_quote_subst"` + + case $lastarg in + # Double-quote args containing other shell metacharacters. + # Many Bourne shells cannot handle close brackets correctly + # in scan sets, and some SunOS ksh mistreat backslash-escaping + # in scan sets (worked around with variable expansion), + # and furthermore cannot handle '|' '&' '(' ')' in scan sets + # at all, so we specify them separately. + *[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*|"") + lastarg="\"$lastarg\"" + ;; + esac + + base_compile="$base_compile $lastarg" + done # for arg + + case $arg_mode in + arg) + $echo "$modename: you must specify an argument for -Xcompile" + exit $EXIT_FAILURE + ;; + target) + $echo "$modename: you must specify a target with \`-o'" 1>&2 + exit $EXIT_FAILURE + ;; + *) + # Get the name of the library object. + [ -z "$libobj" ] && libobj=`$echo "X$srcfile" | $Xsed -e 's%^.*/%%'` + ;; + esac + + # Recognize several different file suffixes. + # If the user specifies -o file.o, it is replaced with file.lo + xform='[cCFSifmso]' + case $libobj in + *.ada) xform=ada ;; + *.adb) xform=adb ;; + *.ads) xform=ads ;; + *.asm) xform=asm ;; + *.c++) xform=c++ ;; + *.cc) xform=cc ;; + *.ii) xform=ii ;; + *.class) xform=class ;; + *.cpp) xform=cpp ;; + *.cxx) xform=cxx ;; + *.f90) xform=f90 ;; + *.for) xform=for ;; + *.java) xform=java ;; + esac + + libobj=`$echo "X$libobj" | $Xsed -e "s/\.$xform$/.lo/"` + + case $libobj in + *.lo) obj=`$echo "X$libobj" | $Xsed -e "$lo2o"` ;; + *) + $echo "$modename: cannot determine name of library object from \`$libobj'" 1>&2 + exit $EXIT_FAILURE + ;; + esac + + func_infer_tag $base_compile + + for arg in $later; do + case $arg in + -static) + build_old_libs=yes + continue + ;; + + -prefer-pic) + pic_mode=yes + continue + ;; + + -prefer-non-pic) + pic_mode=no + continue + ;; + esac + done + + qlibobj=`$echo "X$libobj" | $Xsed -e "$sed_quote_subst"` + case $qlibobj in + *[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*|"") + qlibobj="\"$qlibobj\"" ;; + esac + test "X$libobj" != "X$qlibobj" \ + && $echo "X$libobj" | grep '[]~#^*{};<>?"'"'"' &()|`$[]' \ + && $echo "$modename: libobj name \`$libobj' may not contain shell special characters." + objname=`$echo "X$obj" | $Xsed -e 's%^.*/%%'` + xdir=`$echo "X$obj" | $Xsed -e 's%/[^/]*$%%'` + if test "X$xdir" = "X$obj"; then + xdir= + else + xdir=$xdir/ + fi + lobj=${xdir}$objdir/$objname + + if test -z "$base_compile"; then + $echo "$modename: you must specify a compilation command" 1>&2 + $echo "$help" 1>&2 + exit $EXIT_FAILURE + fi + + # Delete any leftover library objects. + if test "$build_old_libs" = yes; then + removelist="$obj $lobj $libobj ${libobj}T" + else + removelist="$lobj $libobj ${libobj}T" + fi + + $run $rm $removelist + trap "$run $rm $removelist; exit $EXIT_FAILURE" 1 2 15 + + # On Cygwin there's no "real" PIC flag so we must build both object types + case $host_os in + cygwin* | mingw* | pw32* | os2*) + pic_mode=default + ;; + esac + if test "$pic_mode" = no && test "$deplibs_check_method" != pass_all; then + # non-PIC code in shared libraries is not supported + pic_mode=default + fi + + # Calculate the filename of the output object if compiler does + # not support -o with -c + if test "$compiler_c_o" = no; then + output_obj=`$echo "X$srcfile" | $Xsed -e 's%^.*/%%' -e 's%\.[^.]*$%%'`.${objext} + lockfile="$output_obj.lock" + removelist="$removelist $output_obj $lockfile" + trap "$run $rm $removelist; exit $EXIT_FAILURE" 1 2 15 + else + output_obj= + need_locks=no + lockfile= + fi + + # Lock this critical section if it is needed + # We use this script file to make the link, it avoids creating a new file + if test "$need_locks" = yes; then + until $run ln "$srcfile" "$lockfile" 2>/dev/null; do + $show "Waiting for $lockfile to be removed" + sleep 2 + done + elif test "$need_locks" = warn; then + if test -f "$lockfile"; then + $echo "\ +*** ERROR, $lockfile exists and contains: +`cat $lockfile 2>/dev/null` + +This indicates that another process is trying to use the same +temporary object file, and libtool could not work around it because +your compiler does not support \`-c' and \`-o' together. If you +repeat this compilation, it may succeed, by chance, but you had better +avoid parallel builds (make -j) in this platform, or get a better +compiler." + + $run $rm $removelist + exit $EXIT_FAILURE + fi + $echo "$srcfile" > "$lockfile" + fi + + if test -n "$fix_srcfile_path"; then + eval srcfile=\"$fix_srcfile_path\" + fi + qsrcfile=`$echo "X$srcfile" | $Xsed -e "$sed_quote_subst"` + case $qsrcfile in + *[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*|"") + qsrcfile="\"$qsrcfile\"" ;; + esac + + $run $rm "$libobj" "${libobj}T" + + # Create a libtool object file (analogous to a ".la" file), + # but don't create it if we're doing a dry run. + test -z "$run" && cat > ${libobj}T </dev/null`" != "X$srcfile"; then + $echo "\ +*** ERROR, $lockfile contains: +`cat $lockfile 2>/dev/null` + +but it should contain: +$srcfile + +This indicates that another process is trying to use the same +temporary object file, and libtool could not work around it because +your compiler does not support \`-c' and \`-o' together. If you +repeat this compilation, it may succeed, by chance, but you had better +avoid parallel builds (make -j) in this platform, or get a better +compiler." + + $run $rm $removelist + exit $EXIT_FAILURE + fi + + # Just move the object if needed, then go on to compile the next one + if test -n "$output_obj" && test "X$output_obj" != "X$lobj"; then + $show "$mv $output_obj $lobj" + if $run $mv $output_obj $lobj; then : + else + error=$? + $run $rm $removelist + exit $error + fi + fi + + # Append the name of the PIC object to the libtool object file. + test -z "$run" && cat >> ${libobj}T <> ${libobj}T </dev/null`" != "X$srcfile"; then + $echo "\ +*** ERROR, $lockfile contains: +`cat $lockfile 2>/dev/null` + +but it should contain: +$srcfile + +This indicates that another process is trying to use the same +temporary object file, and libtool could not work around it because +your compiler does not support \`-c' and \`-o' together. If you +repeat this compilation, it may succeed, by chance, but you had better +avoid parallel builds (make -j) in this platform, or get a better +compiler." + + $run $rm $removelist + exit $EXIT_FAILURE + fi + + # Just move the object if needed + if test -n "$output_obj" && test "X$output_obj" != "X$obj"; then + $show "$mv $output_obj $obj" + if $run $mv $output_obj $obj; then : + else + error=$? + $run $rm $removelist + exit $error + fi + fi + + # Append the name of the non-PIC object the libtool object file. + # Only append if the libtool object file exists. + test -z "$run" && cat >> ${libobj}T <> ${libobj}T <&2 + fi + if test -n "$link_static_flag"; then + dlopen_self=$dlopen_self_static + fi + prefer_static_libs=yes + else + if test -z "$pic_flag" && test -n "$link_static_flag"; then + dlopen_self=$dlopen_self_static + fi + prefer_static_libs=built + fi + build_libtool_libs=no + build_old_libs=yes + break + ;; + esac + done + + # See if our shared archives depend on static archives. + test -n "$old_archive_from_new_cmds" && build_old_libs=yes + + # Go through the arguments, transforming them on the way. + while test "$#" -gt 0; do + arg="$1" + shift + case $arg in + *[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*|"") + qarg=\"`$echo "X$arg" | $Xsed -e "$sed_quote_subst"`\" ### testsuite: skip nested quoting test + ;; + *) qarg=$arg ;; + esac + libtool_args="$libtool_args $qarg" + + # If the previous option needs an argument, assign it. + if test -n "$prev"; then + case $prev in + output) + compile_command="$compile_command @OUTPUT@" + finalize_command="$finalize_command @OUTPUT@" + ;; + esac + + case $prev in + dlfiles|dlprefiles) + if test "$preload" = no; then + # Add the symbol object into the linking commands. + compile_command="$compile_command @SYMFILE@" + finalize_command="$finalize_command @SYMFILE@" + preload=yes + fi + case $arg in + *.la | *.lo) ;; # We handle these cases below. + force) + if test "$dlself" = no; then + dlself=needless + export_dynamic=yes + fi + prev= + continue + ;; + self) + if test "$prev" = dlprefiles; then + dlself=yes + elif test "$prev" = dlfiles && test "$dlopen_self" != yes; then + dlself=yes + else + dlself=needless + export_dynamic=yes + fi + prev= + continue + ;; + *) + if test "$prev" = dlfiles; then + dlfiles="$dlfiles $arg" + else + dlprefiles="$dlprefiles $arg" + fi + prev= + continue + ;; + esac + ;; + expsyms) + export_symbols="$arg" + if test ! -f "$arg"; then + $echo "$modename: symbol file \`$arg' does not exist" + exit $EXIT_FAILURE + fi + prev= + continue + ;; + expsyms_regex) + export_symbols_regex="$arg" + prev= + continue + ;; + inst_prefix) + inst_prefix_dir="$arg" + prev= + continue + ;; + precious_regex) + precious_files_regex="$arg" + prev= + continue + ;; + release) + release="-$arg" + prev= + continue + ;; + objectlist) + if test -f "$arg"; then + save_arg=$arg + moreargs= + for fil in `cat $save_arg` + do +# moreargs="$moreargs $fil" + arg=$fil + # A libtool-controlled object. + + # Check to see that this really is a libtool object. + if (${SED} -e '2q' $arg | grep "^# Generated by .*$PACKAGE") >/dev/null 2>&1; then + pic_object= + non_pic_object= + + # Read the .lo file + # If there is no directory component, then add one. + case $arg in + */* | *\\*) . $arg ;; + *) . ./$arg ;; + esac + + if test -z "$pic_object" || \ + test -z "$non_pic_object" || + test "$pic_object" = none && \ + test "$non_pic_object" = none; then + $echo "$modename: cannot find name of object for \`$arg'" 1>&2 + exit $EXIT_FAILURE + fi + + # Extract subdirectory from the argument. + xdir=`$echo "X$arg" | $Xsed -e 's%/[^/]*$%%'` + if test "X$xdir" = "X$arg"; then + xdir= + else + xdir="$xdir/" + fi + + if test "$pic_object" != none; then + # Prepend the subdirectory the object is found in. + pic_object="$xdir$pic_object" + + if test "$prev" = dlfiles; then + if test "$build_libtool_libs" = yes && test "$dlopen_support" = yes; then + dlfiles="$dlfiles $pic_object" + prev= + continue + else + # If libtool objects are unsupported, then we need to preload. + prev=dlprefiles + fi + fi + + # CHECK ME: I think I busted this. -Ossama + if test "$prev" = dlprefiles; then + # Preload the old-style object. + dlprefiles="$dlprefiles $pic_object" + prev= + fi + + # A PIC object. + libobjs="$libobjs $pic_object" + arg="$pic_object" + fi + + # Non-PIC object. + if test "$non_pic_object" != none; then + # Prepend the subdirectory the object is found in. + non_pic_object="$xdir$non_pic_object" + + # A standard non-PIC object + non_pic_objects="$non_pic_objects $non_pic_object" + if test -z "$pic_object" || test "$pic_object" = none ; then + arg="$non_pic_object" + fi + else + # If the PIC object exists, use it instead. + # $xdir was prepended to $pic_object above. + non_pic_object="$pic_object" + non_pic_objects="$non_pic_objects $non_pic_object" + fi + else + # Only an error if not doing a dry-run. + if test -z "$run"; then + $echo "$modename: \`$arg' is not a valid libtool object" 1>&2 + exit $EXIT_FAILURE + else + # Dry-run case. + + # Extract subdirectory from the argument. + xdir=`$echo "X$arg" | $Xsed -e 's%/[^/]*$%%'` + if test "X$xdir" = "X$arg"; then + xdir= + else + xdir="$xdir/" + fi + + pic_object=`$echo "X${xdir}${objdir}/${arg}" | $Xsed -e "$lo2o"` + non_pic_object=`$echo "X${xdir}${arg}" | $Xsed -e "$lo2o"` + libobjs="$libobjs $pic_object" + non_pic_objects="$non_pic_objects $non_pic_object" + fi + fi + done + else + $echo "$modename: link input file \`$save_arg' does not exist" + exit $EXIT_FAILURE + fi + arg=$save_arg + prev= + continue + ;; + rpath | xrpath) + # We need an absolute path. + case $arg in + [\\/]* | [A-Za-z]:[\\/]*) ;; + *) + $echo "$modename: only absolute run-paths are allowed" 1>&2 + exit $EXIT_FAILURE + ;; + esac + if test "$prev" = rpath; then + case "$rpath " in + *" $arg "*) ;; + *) rpath="$rpath $arg" ;; + esac + else + case "$xrpath " in + *" $arg "*) ;; + *) xrpath="$xrpath $arg" ;; + esac + fi + prev= + continue + ;; + xcompiler) + compiler_flags="$compiler_flags $qarg" + prev= + compile_command="$compile_command $qarg" + finalize_command="$finalize_command $qarg" + continue + ;; + xlinker) + linker_flags="$linker_flags $qarg" + compiler_flags="$compiler_flags $wl$qarg" + prev= + compile_command="$compile_command $wl$qarg" + finalize_command="$finalize_command $wl$qarg" + continue + ;; + xcclinker) + linker_flags="$linker_flags $qarg" + compiler_flags="$compiler_flags $qarg" + prev= + compile_command="$compile_command $qarg" + finalize_command="$finalize_command $qarg" + continue + ;; + shrext) + shrext_cmds="$arg" + prev= + continue + ;; + darwin_framework|darwin_framework_skip) + test "$prev" = "darwin_framework" && compiler_flags="$compiler_flags $arg" + compile_command="$compile_command $arg" + finalize_command="$finalize_command $arg" + prev= + continue + ;; + *) + eval "$prev=\"\$arg\"" + prev= + continue + ;; + esac + fi # test -n "$prev" + + prevarg="$arg" + + case $arg in + -all-static) + if test -n "$link_static_flag"; then + compile_command="$compile_command $link_static_flag" + finalize_command="$finalize_command $link_static_flag" + fi + continue + ;; + + -allow-undefined) + # FIXME: remove this flag sometime in the future. + $echo "$modename: \`-allow-undefined' is deprecated because it is the default" 1>&2 + continue + ;; + + -avoid-version) + avoid_version=yes + continue + ;; + + -dlopen) + prev=dlfiles + continue + ;; + + -dlpreopen) + prev=dlprefiles + continue + ;; + + -export-dynamic) + export_dynamic=yes + continue + ;; + + -export-symbols | -export-symbols-regex) + if test -n "$export_symbols" || test -n "$export_symbols_regex"; then + $echo "$modename: more than one -exported-symbols argument is not allowed" + exit $EXIT_FAILURE + fi + if test "X$arg" = "X-export-symbols"; then + prev=expsyms + else + prev=expsyms_regex + fi + continue + ;; + + -framework|-arch|-isysroot) + case " $CC " in + *" ${arg} ${1} "* | *" ${arg} ${1} "*) + prev=darwin_framework_skip ;; + *) compiler_flags="$compiler_flags $arg" + prev=darwin_framework ;; + esac + compile_command="$compile_command $arg" + finalize_command="$finalize_command $arg" + continue + ;; + + -inst-prefix-dir) + prev=inst_prefix + continue + ;; + + # The native IRIX linker understands -LANG:*, -LIST:* and -LNO:* + # so, if we see these flags be careful not to treat them like -L + -L[A-Z][A-Z]*:*) + case $with_gcc/$host in + no/*-*-irix* | /*-*-irix*) + compile_command="$compile_command $arg" + finalize_command="$finalize_command $arg" + ;; + esac + continue + ;; + + -L*) + dir=`$echo "X$arg" | $Xsed -e 's/^-L//'` + # We need an absolute path. + case $dir in + [\\/]* | [A-Za-z]:[\\/]*) ;; + *) + absdir=`cd "$dir" && pwd` + if test -z "$absdir"; then + $echo "$modename: cannot determine absolute directory name of \`$dir'" 1>&2 + absdir="$dir" + notinst_path="$notinst_path $dir" + fi + dir="$absdir" + ;; + esac + case "$deplibs " in + *" -L$dir "*) ;; + *) + deplibs="$deplibs -L$dir" + lib_search_path="$lib_search_path $dir" + ;; + esac + case $host in + *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2*) + testbindir=`$echo "X$dir" | $Xsed -e 's*/lib$*/bin*'` + case :$dllsearchpath: in + *":$dir:"*) ;; + *) dllsearchpath="$dllsearchpath:$dir";; + esac + case :$dllsearchpath: in + *":$testbindir:"*) ;; + *) dllsearchpath="$dllsearchpath:$testbindir";; + esac + ;; + esac + continue + ;; + + -l*) + if test "X$arg" = "X-lc" || test "X$arg" = "X-lm"; then + case $host in + *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-beos*) + # These systems don't actually have a C or math library (as such) + continue + ;; + *-*-os2*) + # These systems don't actually have a C library (as such) + test "X$arg" = "X-lc" && continue + ;; + *-*-openbsd* | *-*-freebsd* | *-*-dragonfly*) + # Do not include libc due to us having libc/libc_r. + test "X$arg" = "X-lc" && continue + ;; + *-*-rhapsody* | *-*-darwin1.[012]) + # Rhapsody C and math libraries are in the System framework + deplibs="$deplibs -framework System" + continue + ;; + *-*-sco3.2v5* | *-*-sco5v6*) + # Causes problems with __ctype + test "X$arg" = "X-lc" && continue + ;; + *-*-sysv4.2uw2* | *-*-sysv5* | *-*-unixware* | *-*-OpenUNIX*) + # Compiler inserts libc in the correct place for threads to work + test "X$arg" = "X-lc" && continue + ;; + esac + elif test "X$arg" = "X-lc_r"; then + case $host in + *-*-openbsd* | *-*-freebsd* | *-*-dragonfly*) + # Do not include libc_r directly, use -pthread flag. + continue + ;; + esac + fi + deplibs="$deplibs $arg" + continue + ;; + + # Tru64 UNIX uses -model [arg] to determine the layout of C++ + # classes, name mangling, and exception handling. + -model) + compile_command="$compile_command $arg" + compiler_flags="$compiler_flags $arg" + finalize_command="$finalize_command $arg" + prev=xcompiler + continue + ;; + + -mt|-mthreads|-kthread|-Kthread|-pthread|-pthreads|--thread-safe) + compiler_flags="$compiler_flags $arg" + compile_command="$compile_command $arg" + finalize_command="$finalize_command $arg" + continue + ;; + + -module) + module=yes + continue + ;; + + # -64, -mips[0-9] enable 64-bit mode on the SGI compiler + # -r[0-9][0-9]* specifies the processor on the SGI compiler + # -xarch=*, -xtarget=* enable 64-bit mode on the Sun compiler + # +DA*, +DD* enable 64-bit mode on the HP compiler + # -q* pass through compiler args for the IBM compiler + # -m* pass through architecture-specific compiler args for GCC + # -m*, -t[45]*, -txscale* pass through architecture-specific + # compiler args for GCC + # -pg pass through profiling flag for GCC + # @file GCC response files + -64|-mips[0-9]|-r[0-9][0-9]*|-xarch=*|-xtarget=*|+DA*|+DD*|-q*|-m*|-pg| \ + -t[45]*|-txscale*|@*) + + # Unknown arguments in both finalize_command and compile_command need + # to be aesthetically quoted because they are evaled later. + arg=`$echo "X$arg" | $Xsed -e "$sed_quote_subst"` + case $arg in + *[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*|"") + arg="\"$arg\"" + ;; + esac + compile_command="$compile_command $arg" + finalize_command="$finalize_command $arg" + compiler_flags="$compiler_flags $arg" + continue + ;; + + -shrext) + prev=shrext + continue + ;; + + -no-fast-install) + fast_install=no + continue + ;; + + -no-install) + case $host in + *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2*) + # The PATH hackery in wrapper scripts is required on Windows + # in order for the loader to find any dlls it needs. + $echo "$modename: warning: \`-no-install' is ignored for $host" 1>&2 + $echo "$modename: warning: assuming \`-no-fast-install' instead" 1>&2 + fast_install=no + ;; + *) no_install=yes ;; + esac + continue + ;; + + -no-undefined) + allow_undefined=no + continue + ;; + + -objectlist) + prev=objectlist + continue + ;; + + -o) prev=output ;; + + -precious-files-regex) + prev=precious_regex + continue + ;; + + -release) + prev=release + continue + ;; + + -rpath) + prev=rpath + continue + ;; + + -R) + prev=xrpath + continue + ;; + + -R*) + dir=`$echo "X$arg" | $Xsed -e 's/^-R//'` + # We need an absolute path. + case $dir in + [\\/]* | [A-Za-z]:[\\/]*) ;; + *) + $echo "$modename: only absolute run-paths are allowed" 1>&2 + exit $EXIT_FAILURE + ;; + esac + case "$xrpath " in + *" $dir "*) ;; + *) xrpath="$xrpath $dir" ;; + esac + continue + ;; + + -static) + # The effects of -static are defined in a previous loop. + # We used to do the same as -all-static on platforms that + # didn't have a PIC flag, but the assumption that the effects + # would be equivalent was wrong. It would break on at least + # Digital Unix and AIX. + continue + ;; + + -thread-safe) + thread_safe=yes + continue + ;; + + -version-info) + prev=vinfo + continue + ;; + -version-number) + prev=vinfo + vinfo_number=yes + continue + ;; + + -Wc,*) + args=`$echo "X$arg" | $Xsed -e "$sed_quote_subst" -e 's/^-Wc,//'` + arg= + save_ifs="$IFS"; IFS=',' + for flag in $args; do + IFS="$save_ifs" + case $flag in + *[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*|"") + flag="\"$flag\"" + ;; + esac + arg="$arg $wl$flag" + compiler_flags="$compiler_flags $flag" + done + IFS="$save_ifs" + arg=`$echo "X$arg" | $Xsed -e "s/^ //"` + ;; + + -Wl,*) + args=`$echo "X$arg" | $Xsed -e "$sed_quote_subst" -e 's/^-Wl,//'` + arg= + save_ifs="$IFS"; IFS=',' + for flag in $args; do + IFS="$save_ifs" + case $flag in + *[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*|"") + flag="\"$flag\"" + ;; + esac + arg="$arg $wl$flag" + compiler_flags="$compiler_flags $wl$flag" + linker_flags="$linker_flags $flag" + done + IFS="$save_ifs" + arg=`$echo "X$arg" | $Xsed -e "s/^ //"` + ;; + + -Xcompiler) + prev=xcompiler + continue + ;; + + -Xlinker) + prev=xlinker + continue + ;; + + -XCClinker) + prev=xcclinker + continue + ;; + + # Some other compiler flag. + -* | +*) + # Unknown arguments in both finalize_command and compile_command need + # to be aesthetically quoted because they are evaled later. + arg=`$echo "X$arg" | $Xsed -e "$sed_quote_subst"` + case $arg in + *[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*|"") + arg="\"$arg\"" + ;; + esac + ;; + + *.$objext) + # A standard object. + objs="$objs $arg" + ;; + + *.lo) + # A libtool-controlled object. + + # Check to see that this really is a libtool object. + if (${SED} -e '2q' $arg | grep "^# Generated by .*$PACKAGE") >/dev/null 2>&1; then + pic_object= + non_pic_object= + + # Read the .lo file + # If there is no directory component, then add one. + case $arg in + */* | *\\*) . $arg ;; + *) . ./$arg ;; + esac + + if test -z "$pic_object" || \ + test -z "$non_pic_object" || + test "$pic_object" = none && \ + test "$non_pic_object" = none; then + $echo "$modename: cannot find name of object for \`$arg'" 1>&2 + exit $EXIT_FAILURE + fi + + # Extract subdirectory from the argument. + xdir=`$echo "X$arg" | $Xsed -e 's%/[^/]*$%%'` + if test "X$xdir" = "X$arg"; then + xdir= + else + xdir="$xdir/" + fi + + if test "$pic_object" != none; then + # Prepend the subdirectory the object is found in. + pic_object="$xdir$pic_object" + + if test "$prev" = dlfiles; then + if test "$build_libtool_libs" = yes && test "$dlopen_support" = yes; then + dlfiles="$dlfiles $pic_object" + prev= + continue + else + # If libtool objects are unsupported, then we need to preload. + prev=dlprefiles + fi + fi + + # CHECK ME: I think I busted this. -Ossama + if test "$prev" = dlprefiles; then + # Preload the old-style object. + dlprefiles="$dlprefiles $pic_object" + prev= + fi + + # A PIC object. + libobjs="$libobjs $pic_object" + arg="$pic_object" + fi + + # Non-PIC object. + if test "$non_pic_object" != none; then + # Prepend the subdirectory the object is found in. + non_pic_object="$xdir$non_pic_object" + + # A standard non-PIC object + non_pic_objects="$non_pic_objects $non_pic_object" + if test -z "$pic_object" || test "$pic_object" = none ; then + arg="$non_pic_object" + fi + else + # If the PIC object exists, use it instead. + # $xdir was prepended to $pic_object above. + non_pic_object="$pic_object" + non_pic_objects="$non_pic_objects $non_pic_object" + fi + else + # Only an error if not doing a dry-run. + if test -z "$run"; then + $echo "$modename: \`$arg' is not a valid libtool object" 1>&2 + exit $EXIT_FAILURE + else + # Dry-run case. + + # Extract subdirectory from the argument. + xdir=`$echo "X$arg" | $Xsed -e 's%/[^/]*$%%'` + if test "X$xdir" = "X$arg"; then + xdir= + else + xdir="$xdir/" + fi + + pic_object=`$echo "X${xdir}${objdir}/${arg}" | $Xsed -e "$lo2o"` + non_pic_object=`$echo "X${xdir}${arg}" | $Xsed -e "$lo2o"` + libobjs="$libobjs $pic_object" + non_pic_objects="$non_pic_objects $non_pic_object" + fi + fi + ;; + + *.$libext) + # An archive. + deplibs="$deplibs $arg" + old_deplibs="$old_deplibs $arg" + continue + ;; + + *.la) + # A libtool-controlled library. + + if test "$prev" = dlfiles; then + # This library was specified with -dlopen. + dlfiles="$dlfiles $arg" + prev= + elif test "$prev" = dlprefiles; then + # The library was specified with -dlpreopen. + dlprefiles="$dlprefiles $arg" + prev= + else + deplibs="$deplibs $arg" + fi + continue + ;; + + # Some other compiler argument. + *) + # Unknown arguments in both finalize_command and compile_command need + # to be aesthetically quoted because they are evaled later. + arg=`$echo "X$arg" | $Xsed -e "$sed_quote_subst"` + case $arg in + *[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*|"") + arg="\"$arg\"" + ;; + esac + ;; + esac # arg + + # Now actually substitute the argument into the commands. + if test -n "$arg"; then + compile_command="$compile_command $arg" + finalize_command="$finalize_command $arg" + fi + done # argument parsing loop + + if test -n "$prev"; then + $echo "$modename: the \`$prevarg' option requires an argument" 1>&2 + $echo "$help" 1>&2 + exit $EXIT_FAILURE + fi + + if test "$export_dynamic" = yes && test -n "$export_dynamic_flag_spec"; then + eval arg=\"$export_dynamic_flag_spec\" + compile_command="$compile_command $arg" + finalize_command="$finalize_command $arg" + fi + + oldlibs= + # calculate the name of the file, without its directory + outputname=`$echo "X$output" | $Xsed -e 's%^.*/%%'` + libobjs_save="$libobjs" + + if test -n "$shlibpath_var"; then + # get the directories listed in $shlibpath_var + eval shlib_search_path=\`\$echo \"X\${$shlibpath_var}\" \| \$Xsed -e \'s/:/ /g\'\` + else + shlib_search_path= + fi + eval sys_lib_search_path=\"$sys_lib_search_path_spec\" + eval sys_lib_dlsearch_path=\"$sys_lib_dlsearch_path_spec\" + + output_objdir=`$echo "X$output" | $Xsed -e 's%/[^/]*$%%'` + if test "X$output_objdir" = "X$output"; then + output_objdir="$objdir" + else + output_objdir="$output_objdir/$objdir" + fi + # Create the object directory. + if test ! -d "$output_objdir"; then + $show "$mkdir $output_objdir" + $run $mkdir $output_objdir + exit_status=$? + if test "$exit_status" -ne 0 && test ! -d "$output_objdir"; then + exit $exit_status + fi + fi + + # Determine the type of output + case $output in + "") + $echo "$modename: you must specify an output file" 1>&2 + $echo "$help" 1>&2 + exit $EXIT_FAILURE + ;; + *.$libext) linkmode=oldlib ;; + *.lo | *.$objext) linkmode=obj ;; + *.la) linkmode=lib ;; + *) linkmode=prog ;; # Anything else should be a program. + esac + + case $host in + *cygwin* | *mingw* | *pw32*) + # don't eliminate duplications in $postdeps and $predeps + duplicate_compiler_generated_deps=yes + ;; + *) + duplicate_compiler_generated_deps=$duplicate_deps + ;; + esac + specialdeplibs= + + libs= + # Find all interdependent deplibs by searching for libraries + # that are linked more than once (e.g. -la -lb -la) + for deplib in $deplibs; do + if test "X$duplicate_deps" = "Xyes" ; then + case "$libs " in + *" $deplib "*) specialdeplibs="$specialdeplibs $deplib" ;; + esac + fi + libs="$libs $deplib" + done + + if test "$linkmode" = lib; then + libs="$predeps $libs $compiler_lib_search_path $postdeps" + + # Compute libraries that are listed more than once in $predeps + # $postdeps and mark them as special (i.e., whose duplicates are + # not to be eliminated). + pre_post_deps= + if test "X$duplicate_compiler_generated_deps" = "Xyes" ; then + for pre_post_dep in $predeps $postdeps; do + case "$pre_post_deps " in + *" $pre_post_dep "*) specialdeplibs="$specialdeplibs $pre_post_deps" ;; + esac + pre_post_deps="$pre_post_deps $pre_post_dep" + done + fi + pre_post_deps= + fi + + deplibs= + newdependency_libs= + newlib_search_path= + need_relink=no # whether we're linking any uninstalled libtool libraries + notinst_deplibs= # not-installed libtool libraries + case $linkmode in + lib) + passes="conv link" + for file in $dlfiles $dlprefiles; do + case $file in + *.la) ;; + *) + $echo "$modename: libraries can \`-dlopen' only libtool libraries: $file" 1>&2 + exit $EXIT_FAILURE + ;; + esac + done + ;; + prog) + compile_deplibs= + finalize_deplibs= + alldeplibs=no + newdlfiles= + newdlprefiles= + passes="conv scan dlopen dlpreopen link" + ;; + *) passes="conv" + ;; + esac + for pass in $passes; do + if test "$linkmode,$pass" = "lib,link" || + test "$linkmode,$pass" = "prog,scan"; then + libs="$deplibs" + deplibs= + fi + if test "$linkmode" = prog; then + case $pass in + dlopen) libs="$dlfiles" ;; + dlpreopen) libs="$dlprefiles" ;; + link) libs="$deplibs %DEPLIBS% $dependency_libs" ;; + esac + fi + if test "$pass" = dlopen; then + # Collect dlpreopened libraries + save_deplibs="$deplibs" + deplibs= + fi + for deplib in $libs; do + lib= + found=no + case $deplib in + -mt|-mthreads|-kthread|-Kthread|-pthread|-pthreads|--thread-safe) + if test "$linkmode,$pass" = "prog,link"; then + compile_deplibs="$deplib $compile_deplibs" + finalize_deplibs="$deplib $finalize_deplibs" + else + compiler_flags="$compiler_flags $deplib" + fi + continue + ;; + -l*) + if test "$linkmode" != lib && test "$linkmode" != prog; then + $echo "$modename: warning: \`-l' is ignored for archives/objects" 1>&2 + continue + fi + name=`$echo "X$deplib" | $Xsed -e 's/^-l//'` + for searchdir in $newlib_search_path $lib_search_path $sys_lib_search_path $shlib_search_path; do + for search_ext in .la $std_shrext .so .a; do + # Search the libtool library + lib="$searchdir/lib${name}${search_ext}" + if test -f "$lib"; then + if test "$search_ext" = ".la"; then + found=yes + else + found=no + fi + break 2 + fi + done + done + if test "$found" != yes; then + # deplib doesn't seem to be a libtool library + if test "$linkmode,$pass" = "prog,link"; then + compile_deplibs="$deplib $compile_deplibs" + finalize_deplibs="$deplib $finalize_deplibs" + else + deplibs="$deplib $deplibs" + test "$linkmode" = lib && newdependency_libs="$deplib $newdependency_libs" + fi + continue + else # deplib is a libtool library + # If $allow_libtool_libs_with_static_runtimes && $deplib is a stdlib, + # We need to do some special things here, and not later. + if test "X$allow_libtool_libs_with_static_runtimes" = "Xyes" ; then + case " $predeps $postdeps " in + *" $deplib "*) + if (${SED} -e '2q' $lib | + grep "^# Generated by .*$PACKAGE") >/dev/null 2>&1; then + library_names= + old_library= + case $lib in + */* | *\\*) . $lib ;; + *) . ./$lib ;; + esac + for l in $old_library $library_names; do + ll="$l" + done + if test "X$ll" = "X$old_library" ; then # only static version available + found=no + ladir=`$echo "X$lib" | $Xsed -e 's%/[^/]*$%%'` + test "X$ladir" = "X$lib" && ladir="." + lib=$ladir/$old_library + if test "$linkmode,$pass" = "prog,link"; then + compile_deplibs="$deplib $compile_deplibs" + finalize_deplibs="$deplib $finalize_deplibs" + else + deplibs="$deplib $deplibs" + test "$linkmode" = lib && newdependency_libs="$deplib $newdependency_libs" + fi + continue + fi + fi + ;; + *) ;; + esac + fi + fi + ;; # -l + -L*) + case $linkmode in + lib) + deplibs="$deplib $deplibs" + test "$pass" = conv && continue + newdependency_libs="$deplib $newdependency_libs" + newlib_search_path="$newlib_search_path "`$echo "X$deplib" | $Xsed -e 's/^-L//'` + ;; + prog) + if test "$pass" = conv; then + deplibs="$deplib $deplibs" + continue + fi + if test "$pass" = scan; then + deplibs="$deplib $deplibs" + else + compile_deplibs="$deplib $compile_deplibs" + finalize_deplibs="$deplib $finalize_deplibs" + fi + newlib_search_path="$newlib_search_path "`$echo "X$deplib" | $Xsed -e 's/^-L//'` + ;; + *) + $echo "$modename: warning: \`-L' is ignored for archives/objects" 1>&2 + ;; + esac # linkmode + continue + ;; # -L + -R*) + if test "$pass" = link; then + dir=`$echo "X$deplib" | $Xsed -e 's/^-R//'` + # Make sure the xrpath contains only unique directories. + case "$xrpath " in + *" $dir "*) ;; + *) xrpath="$xrpath $dir" ;; + esac + fi + deplibs="$deplib $deplibs" + continue + ;; + *.la) lib="$deplib" ;; + *.$libext) + if test "$pass" = conv; then + deplibs="$deplib $deplibs" + continue + fi + case $linkmode in + lib) + valid_a_lib=no + case $deplibs_check_method in + match_pattern*) + set dummy $deplibs_check_method + match_pattern_regex=`expr "$deplibs_check_method" : "$2 \(.*\)"` + if eval $echo \"$deplib\" 2>/dev/null \ + | $SED 10q \ + | $EGREP "$match_pattern_regex" > /dev/null; then + valid_a_lib=yes + fi + ;; + pass_all) + valid_a_lib=yes + ;; + esac + if test "$valid_a_lib" != yes; then + $echo + $echo "*** Warning: Trying to link with static lib archive $deplib." + $echo "*** I have the capability to make that library automatically link in when" + $echo "*** you link to this library. But I can only do this if you have a" + $echo "*** shared version of the library, which you do not appear to have" + $echo "*** because the file extensions .$libext of this argument makes me believe" + $echo "*** that it is just a static archive that I should not used here." + else + $echo + $echo "*** Warning: Linking the shared library $output against the" + $echo "*** static library $deplib is not portable!" + deplibs="$deplib $deplibs" + fi + continue + ;; + prog) + if test "$pass" != link; then + deplibs="$deplib $deplibs" + else + compile_deplibs="$deplib $compile_deplibs" + finalize_deplibs="$deplib $finalize_deplibs" + fi + continue + ;; + esac # linkmode + ;; # *.$libext + *.lo | *.$objext) + if test "$pass" = conv; then + deplibs="$deplib $deplibs" + elif test "$linkmode" = prog; then + if test "$pass" = dlpreopen || test "$dlopen_support" != yes || test "$build_libtool_libs" = no; then + # If there is no dlopen support or we're linking statically, + # we need to preload. + newdlprefiles="$newdlprefiles $deplib" + compile_deplibs="$deplib $compile_deplibs" + finalize_deplibs="$deplib $finalize_deplibs" + else + newdlfiles="$newdlfiles $deplib" + fi + fi + continue + ;; + %DEPLIBS%) + alldeplibs=yes + continue + ;; + esac # case $deplib + if test "$found" = yes || test -f "$lib"; then : + else + $echo "$modename: cannot find the library \`$lib' or unhandled argument \`$deplib'" 1>&2 + exit $EXIT_FAILURE + fi + + # Check to see that this really is a libtool archive. + if (${SED} -e '2q' $lib | grep "^# Generated by .*$PACKAGE") >/dev/null 2>&1; then : + else + $echo "$modename: \`$lib' is not a valid libtool archive" 1>&2 + exit $EXIT_FAILURE + fi + + ladir=`$echo "X$lib" | $Xsed -e 's%/[^/]*$%%'` + test "X$ladir" = "X$lib" && ladir="." + + dlname= + dlopen= + dlpreopen= + libdir= + library_names= + old_library= + # If the library was installed with an old release of libtool, + # it will not redefine variables installed, or shouldnotlink + installed=yes + shouldnotlink=no + avoidtemprpath= + + + # Read the .la file + case $lib in + */* | *\\*) . $lib ;; + *) . ./$lib ;; + esac + + if test "$linkmode,$pass" = "lib,link" || + test "$linkmode,$pass" = "prog,scan" || + { test "$linkmode" != prog && test "$linkmode" != lib; }; then + test -n "$dlopen" && dlfiles="$dlfiles $dlopen" + test -n "$dlpreopen" && dlprefiles="$dlprefiles $dlpreopen" + fi + + if test "$pass" = conv; then + # Only check for convenience libraries + deplibs="$lib $deplibs" + if test -z "$libdir"; then + if test -z "$old_library"; then + $echo "$modename: cannot find name of link library for \`$lib'" 1>&2 + exit $EXIT_FAILURE + fi + # It is a libtool convenience library, so add in its objects. + convenience="$convenience $ladir/$objdir/$old_library" + old_convenience="$old_convenience $ladir/$objdir/$old_library" + tmp_libs= + for deplib in $dependency_libs; do + deplibs="$deplib $deplibs" + if test "X$duplicate_deps" = "Xyes" ; then + case "$tmp_libs " in + *" $deplib "*) specialdeplibs="$specialdeplibs $deplib" ;; + esac + fi + tmp_libs="$tmp_libs $deplib" + done + elif test "$linkmode" != prog && test "$linkmode" != lib; then + $echo "$modename: \`$lib' is not a convenience library" 1>&2 + exit $EXIT_FAILURE + fi + continue + fi # $pass = conv + + + # Get the name of the library we link against. + linklib= + for l in $old_library $library_names; do + linklib="$l" + done + if test -z "$linklib"; then + $echo "$modename: cannot find name of link library for \`$lib'" 1>&2 + exit $EXIT_FAILURE + fi + + # This library was specified with -dlopen. + if test "$pass" = dlopen; then + if test -z "$libdir"; then + $echo "$modename: cannot -dlopen a convenience library: \`$lib'" 1>&2 + exit $EXIT_FAILURE + fi + if test -z "$dlname" || + test "$dlopen_support" != yes || + test "$build_libtool_libs" = no; then + # If there is no dlname, no dlopen support or we're linking + # statically, we need to preload. We also need to preload any + # dependent libraries so libltdl's deplib preloader doesn't + # bomb out in the load deplibs phase. + dlprefiles="$dlprefiles $lib $dependency_libs" + else + newdlfiles="$newdlfiles $lib" + fi + continue + fi # $pass = dlopen + + # We need an absolute path. + case $ladir in + [\\/]* | [A-Za-z]:[\\/]*) abs_ladir="$ladir" ;; + *) + abs_ladir=`cd "$ladir" && pwd` + if test -z "$abs_ladir"; then + $echo "$modename: warning: cannot determine absolute directory name of \`$ladir'" 1>&2 + $echo "$modename: passing it literally to the linker, although it might fail" 1>&2 + abs_ladir="$ladir" + fi + ;; + esac + laname=`$echo "X$lib" | $Xsed -e 's%^.*/%%'` + + # Find the relevant object directory and library name. + if test "X$installed" = Xyes; then + if test ! -f "$libdir/$linklib" && test -f "$abs_ladir/$linklib"; then + $echo "$modename: warning: library \`$lib' was moved." 1>&2 + dir="$ladir" + absdir="$abs_ladir" + libdir="$abs_ladir" + else + dir="$libdir" + absdir="$libdir" + fi + test "X$hardcode_automatic" = Xyes && avoidtemprpath=yes + else + if test ! -f "$ladir/$objdir/$linklib" && test -f "$abs_ladir/$linklib"; then + dir="$ladir" + absdir="$abs_ladir" + # Remove this search path later + notinst_path="$notinst_path $abs_ladir" + else + dir="$ladir/$objdir" + absdir="$abs_ladir/$objdir" + # Remove this search path later + notinst_path="$notinst_path $abs_ladir" + fi + fi # $installed = yes + name=`$echo "X$laname" | $Xsed -e 's/\.la$//' -e 's/^lib//'` + + # This library was specified with -dlpreopen. + if test "$pass" = dlpreopen; then + if test -z "$libdir"; then + $echo "$modename: cannot -dlpreopen a convenience library: \`$lib'" 1>&2 + exit $EXIT_FAILURE + fi + # Prefer using a static library (so that no silly _DYNAMIC symbols + # are required to link). + if test -n "$old_library"; then + newdlprefiles="$newdlprefiles $dir/$old_library" + # Otherwise, use the dlname, so that lt_dlopen finds it. + elif test -n "$dlname"; then + newdlprefiles="$newdlprefiles $dir/$dlname" + else + newdlprefiles="$newdlprefiles $dir/$linklib" + fi + fi # $pass = dlpreopen + + if test -z "$libdir"; then + # Link the convenience library + if test "$linkmode" = lib; then + deplibs="$dir/$old_library $deplibs" + elif test "$linkmode,$pass" = "prog,link"; then + compile_deplibs="$dir/$old_library $compile_deplibs" + finalize_deplibs="$dir/$old_library $finalize_deplibs" + else + deplibs="$lib $deplibs" # used for prog,scan pass + fi + continue + fi + + + if test "$linkmode" = prog && test "$pass" != link; then + newlib_search_path="$newlib_search_path $ladir" + deplibs="$lib $deplibs" + + linkalldeplibs=no + if test "$link_all_deplibs" != no || test -z "$library_names" || + test "$build_libtool_libs" = no; then + linkalldeplibs=yes + fi + + tmp_libs= + for deplib in $dependency_libs; do + case $deplib in + -L*) newlib_search_path="$newlib_search_path "`$echo "X$deplib" | $Xsed -e 's/^-L//'`;; ### testsuite: skip nested quoting test + esac + # Need to link against all dependency_libs? + if test "$linkalldeplibs" = yes; then + deplibs="$deplib $deplibs" + else + # Need to hardcode shared library paths + # or/and link against static libraries + newdependency_libs="$deplib $newdependency_libs" + fi + if test "X$duplicate_deps" = "Xyes" ; then + case "$tmp_libs " in + *" $deplib "*) specialdeplibs="$specialdeplibs $deplib" ;; + esac + fi + tmp_libs="$tmp_libs $deplib" + done # for deplib + continue + fi # $linkmode = prog... + + if test "$linkmode,$pass" = "prog,link"; then + if test -n "$library_names" && + { test "$prefer_static_libs" = no || test -z "$old_library"; }; then + # We need to hardcode the library path + if test -n "$shlibpath_var" && test -z "$avoidtemprpath" ; then + # Make sure the rpath contains only unique directories. + case "$temp_rpath " in + *" $dir "*) ;; + *" $absdir "*) ;; + *) temp_rpath="$temp_rpath $absdir" ;; + esac + fi + + # Hardcode the library path. + # Skip directories that are in the system default run-time + # search path. + case " $sys_lib_dlsearch_path " in + *" $absdir "*) ;; + *) + case "$compile_rpath " in + *" $absdir "*) ;; + *) compile_rpath="$compile_rpath $absdir" + esac + ;; + esac + case " $sys_lib_dlsearch_path " in + *" $libdir "*) ;; + *) + case "$finalize_rpath " in + *" $libdir "*) ;; + *) finalize_rpath="$finalize_rpath $libdir" + esac + ;; + esac + fi # $linkmode,$pass = prog,link... + + if test "$alldeplibs" = yes && + { test "$deplibs_check_method" = pass_all || + { test "$build_libtool_libs" = yes && + test -n "$library_names"; }; }; then + # We only need to search for static libraries + continue + fi + fi + + link_static=no # Whether the deplib will be linked statically + use_static_libs=$prefer_static_libs + if test "$use_static_libs" = built && test "$installed" = yes ; then + use_static_libs=no + fi + if test -n "$library_names" && + { test "$use_static_libs" = no || test -z "$old_library"; }; then + if test "$installed" = no; then + notinst_deplibs="$notinst_deplibs $lib" + need_relink=yes + fi + # This is a shared library + + # Warn about portability, can't link against -module's on + # some systems (darwin) + if test "$shouldnotlink" = yes && test "$pass" = link ; then + $echo + if test "$linkmode" = prog; then + $echo "*** Warning: Linking the executable $output against the loadable module" + else + $echo "*** Warning: Linking the shared library $output against the loadable module" + fi + $echo "*** $linklib is not portable!" + fi + if test "$linkmode" = lib && + test "$hardcode_into_libs" = yes; then + # Hardcode the library path. + # Skip directories that are in the system default run-time + # search path. + case " $sys_lib_dlsearch_path " in + *" $absdir "*) ;; + *) + case "$compile_rpath " in + *" $absdir "*) ;; + *) compile_rpath="$compile_rpath $absdir" + esac + ;; + esac + case " $sys_lib_dlsearch_path " in + *" $libdir "*) ;; + *) + case "$finalize_rpath " in + *" $libdir "*) ;; + *) finalize_rpath="$finalize_rpath $libdir" + esac + ;; + esac + fi + + if test -n "$old_archive_from_expsyms_cmds"; then + # figure out the soname + set dummy $library_names + realname="$2" + shift; shift + libname=`eval \\$echo \"$libname_spec\"` + # use dlname if we got it. it's perfectly good, no? + if test -n "$dlname"; then + soname="$dlname" + elif test -n "$soname_spec"; then + # bleh windows + case $host in + *cygwin* | mingw*) + major=`expr $current - $age` + versuffix="-$major" + ;; + esac + eval soname=\"$soname_spec\" + else + soname="$realname" + fi + + # Make a new name for the extract_expsyms_cmds to use + soroot="$soname" + soname=`$echo $soroot | ${SED} -e 's/^.*\///'` + newlib="libimp-`$echo $soname | ${SED} 's/^lib//;s/\.dll$//'`.a" + + # If the library has no export list, then create one now + if test -f "$output_objdir/$soname-def"; then : + else + $show "extracting exported symbol list from \`$soname'" + save_ifs="$IFS"; IFS='~' + cmds=$extract_expsyms_cmds + for cmd in $cmds; do + IFS="$save_ifs" + eval cmd=\"$cmd\" + $show "$cmd" + $run eval "$cmd" || exit $? + done + IFS="$save_ifs" + fi + + # Create $newlib + if test -f "$output_objdir/$newlib"; then :; else + $show "generating import library for \`$soname'" + save_ifs="$IFS"; IFS='~' + cmds=$old_archive_from_expsyms_cmds + for cmd in $cmds; do + IFS="$save_ifs" + eval cmd=\"$cmd\" + $show "$cmd" + $run eval "$cmd" || exit $? + done + IFS="$save_ifs" + fi + # make sure the library variables are pointing to the new library + dir=$output_objdir + linklib=$newlib + fi # test -n "$old_archive_from_expsyms_cmds" + + if test "$linkmode" = prog || test "$mode" != relink; then + add_shlibpath= + add_dir= + add= + lib_linked=yes + case $hardcode_action in + immediate | unsupported) + if test "$hardcode_direct" = no; then + add="$dir/$linklib" + case $host in + *-*-sco3.2v5.0.[024]*) add_dir="-L$dir" ;; + *-*-sysv4*uw2*) add_dir="-L$dir" ;; + *-*-sysv5OpenUNIX* | *-*-sysv5UnixWare7.[01].[10]* | \ + *-*-unixware7*) add_dir="-L$dir" ;; + *-*-darwin* ) + # if the lib is a module then we can not link against + # it, someone is ignoring the new warnings I added + if /usr/bin/file -L $add 2> /dev/null | + $EGREP ": [^:]* bundle" >/dev/null ; then + $echo "** Warning, lib $linklib is a module, not a shared library" + if test -z "$old_library" ; then + $echo + $echo "** And there doesn't seem to be a static archive available" + $echo "** The link will probably fail, sorry" + else + add="$dir/$old_library" + fi + fi + esac + elif test "$hardcode_minus_L" = no; then + case $host in + *-*-sunos*) add_shlibpath="$dir" ;; + esac + add_dir="-L$dir" + add="-l$name" + elif test "$hardcode_shlibpath_var" = no; then + add_shlibpath="$dir" + add="-l$name" + else + lib_linked=no + fi + ;; + relink) + if test "$hardcode_direct" = yes; then + add="$dir/$linklib" + elif test "$hardcode_minus_L" = yes; then + add_dir="-L$dir" + # Try looking first in the location we're being installed to. + if test -n "$inst_prefix_dir"; then + case $libdir in + [\\/]*) + add_dir="$add_dir -L$inst_prefix_dir$libdir" + ;; + esac + fi + add="-l$name" + elif test "$hardcode_shlibpath_var" = yes; then + add_shlibpath="$dir" + add="-l$name" + else + lib_linked=no + fi + ;; + *) lib_linked=no ;; + esac + + if test "$lib_linked" != yes; then + $echo "$modename: configuration error: unsupported hardcode properties" + exit $EXIT_FAILURE + fi + + if test -n "$add_shlibpath"; then + case :$compile_shlibpath: in + *":$add_shlibpath:"*) ;; + *) compile_shlibpath="$compile_shlibpath$add_shlibpath:" ;; + esac + fi + if test "$linkmode" = prog; then + test -n "$add_dir" && compile_deplibs="$add_dir $compile_deplibs" + test -n "$add" && compile_deplibs="$add $compile_deplibs" + else + test -n "$add_dir" && deplibs="$add_dir $deplibs" + test -n "$add" && deplibs="$add $deplibs" + if test "$hardcode_direct" != yes && \ + test "$hardcode_minus_L" != yes && \ + test "$hardcode_shlibpath_var" = yes; then + case :$finalize_shlibpath: in + *":$libdir:"*) ;; + *) finalize_shlibpath="$finalize_shlibpath$libdir:" ;; + esac + fi + fi + fi + + if test "$linkmode" = prog || test "$mode" = relink; then + add_shlibpath= + add_dir= + add= + # Finalize command for both is simple: just hardcode it. + if test "$hardcode_direct" = yes; then + add="$libdir/$linklib" + elif test "$hardcode_minus_L" = yes; then + add_dir="-L$libdir" + add="-l$name" + elif test "$hardcode_shlibpath_var" = yes; then + case :$finalize_shlibpath: in + *":$libdir:"*) ;; + *) finalize_shlibpath="$finalize_shlibpath$libdir:" ;; + esac + add="-l$name" + elif test "$hardcode_automatic" = yes; then + if test -n "$inst_prefix_dir" && + test -f "$inst_prefix_dir$libdir/$linklib" ; then + add="$inst_prefix_dir$libdir/$linklib" + else + add="$libdir/$linklib" + fi + else + # We cannot seem to hardcode it, guess we'll fake it. + add_dir="-L$libdir" + # Try looking first in the location we're being installed to. + if test -n "$inst_prefix_dir"; then + case $libdir in + [\\/]*) + add_dir="$add_dir -L$inst_prefix_dir$libdir" + ;; + esac + fi + add="-l$name" + fi + + if test "$linkmode" = prog; then + test -n "$add_dir" && finalize_deplibs="$add_dir $finalize_deplibs" + test -n "$add" && finalize_deplibs="$add $finalize_deplibs" + else + test -n "$add_dir" && deplibs="$add_dir $deplibs" + test -n "$add" && deplibs="$add $deplibs" + fi + fi + elif test "$linkmode" = prog; then + # Here we assume that one of hardcode_direct or hardcode_minus_L + # is not unsupported. This is valid on all known static and + # shared platforms. + if test "$hardcode_direct" != unsupported; then + test -n "$old_library" && linklib="$old_library" + compile_deplibs="$dir/$linklib $compile_deplibs" + finalize_deplibs="$dir/$linklib $finalize_deplibs" + else + compile_deplibs="-l$name -L$dir $compile_deplibs" + finalize_deplibs="-l$name -L$dir $finalize_deplibs" + fi + elif test "$build_libtool_libs" = yes; then + # Not a shared library + if test "$deplibs_check_method" != pass_all; then + # We're trying link a shared library against a static one + # but the system doesn't support it. + + # Just print a warning and add the library to dependency_libs so + # that the program can be linked against the static library. + $echo + $echo "*** Warning: This system can not link to static lib archive $lib." + $echo "*** I have the capability to make that library automatically link in when" + $echo "*** you link to this library. But I can only do this if you have a" + $echo "*** shared version of the library, which you do not appear to have." + if test "$module" = yes; then + $echo "*** But as you try to build a module library, libtool will still create " + $echo "*** a static module, that should work as long as the dlopening application" + $echo "*** is linked with the -dlopen flag to resolve symbols at runtime." + if test -z "$global_symbol_pipe"; then + $echo + $echo "*** However, this would only work if libtool was able to extract symbol" + $echo "*** lists from a program, using \`nm' or equivalent, but libtool could" + $echo "*** not find such a program. So, this module is probably useless." + $echo "*** \`nm' from GNU binutils and a full rebuild may help." + fi + if test "$build_old_libs" = no; then + build_libtool_libs=module + build_old_libs=yes + else + build_libtool_libs=no + fi + fi + else + deplibs="$dir/$old_library $deplibs" + link_static=yes + fi + fi # link shared/static library? + + if test "$linkmode" = lib; then + if test -n "$dependency_libs" && + { test "$hardcode_into_libs" != yes || + test "$build_old_libs" = yes || + test "$link_static" = yes; }; then + # Extract -R from dependency_libs + temp_deplibs= + for libdir in $dependency_libs; do + case $libdir in + -R*) temp_xrpath=`$echo "X$libdir" | $Xsed -e 's/^-R//'` + case " $xrpath " in + *" $temp_xrpath "*) ;; + *) xrpath="$xrpath $temp_xrpath";; + esac;; + *) temp_deplibs="$temp_deplibs $libdir";; + esac + done + dependency_libs="$temp_deplibs" + fi + + newlib_search_path="$newlib_search_path $absdir" + # Link against this library + test "$link_static" = no && newdependency_libs="$abs_ladir/$laname $newdependency_libs" + # ... and its dependency_libs + tmp_libs= + for deplib in $dependency_libs; do + newdependency_libs="$deplib $newdependency_libs" + if test "X$duplicate_deps" = "Xyes" ; then + case "$tmp_libs " in + *" $deplib "*) specialdeplibs="$specialdeplibs $deplib" ;; + esac + fi + tmp_libs="$tmp_libs $deplib" + done + + if test "$link_all_deplibs" != no; then + # Add the search paths of all dependency libraries + for deplib in $dependency_libs; do + case $deplib in + -L*) path="$deplib" ;; + *.la) + dir=`$echo "X$deplib" | $Xsed -e 's%/[^/]*$%%'` + test "X$dir" = "X$deplib" && dir="." + # We need an absolute path. + case $dir in + [\\/]* | [A-Za-z]:[\\/]*) absdir="$dir" ;; + *) + absdir=`cd "$dir" && pwd` + if test -z "$absdir"; then + $echo "$modename: warning: cannot determine absolute directory name of \`$dir'" 1>&2 + absdir="$dir" + fi + ;; + esac + if grep "^installed=no" $deplib > /dev/null; then + path="$absdir/$objdir" + else + eval libdir=`${SED} -n -e 's/^libdir=\(.*\)$/\1/p' $deplib` + if test -z "$libdir"; then + $echo "$modename: \`$deplib' is not a valid libtool archive" 1>&2 + exit $EXIT_FAILURE + fi + if test "$absdir" != "$libdir"; then + $echo "$modename: warning: \`$deplib' seems to be moved" 1>&2 + fi + path="$absdir" + fi + depdepl= + case $host in + *-*-darwin*) + # we do not want to link against static libs, + # but need to link against shared + eval deplibrary_names=`${SED} -n -e 's/^library_names=\(.*\)$/\1/p' $deplib` + if test -n "$deplibrary_names" ; then + for tmp in $deplibrary_names ; do + depdepl=$tmp + done + if test -f "$path/$depdepl" ; then + depdepl="$path/$depdepl" + fi + # do not add paths which are already there + case " $newlib_search_path " in + *" $path "*) ;; + *) newlib_search_path="$newlib_search_path $path";; + esac + fi + path="" + ;; + *) + path="-L$path" + ;; + esac + ;; + -l*) + case $host in + *-*-darwin*) + # Again, we only want to link against shared libraries + eval tmp_libs=`$echo "X$deplib" | $Xsed -e "s,^\-l,,"` + for tmp in $newlib_search_path ; do + if test -f "$tmp/lib$tmp_libs.dylib" ; then + eval depdepl="$tmp/lib$tmp_libs.dylib" + break + fi + done + path="" + ;; + *) continue ;; + esac + ;; + *) continue ;; + esac + case " $deplibs " in + *" $path "*) ;; + *) deplibs="$path $deplibs" ;; + esac + case " $deplibs " in + *" $depdepl "*) ;; + *) deplibs="$depdepl $deplibs" ;; + esac + done + fi # link_all_deplibs != no + fi # linkmode = lib + done # for deplib in $libs + dependency_libs="$newdependency_libs" + if test "$pass" = dlpreopen; then + # Link the dlpreopened libraries before other libraries + for deplib in $save_deplibs; do + deplibs="$deplib $deplibs" + done + fi + if test "$pass" != dlopen; then + if test "$pass" != conv; then + # Make sure lib_search_path contains only unique directories. + lib_search_path= + for dir in $newlib_search_path; do + case "$lib_search_path " in + *" $dir "*) ;; + *) lib_search_path="$lib_search_path $dir" ;; + esac + done + newlib_search_path= + fi + + if test "$linkmode,$pass" != "prog,link"; then + vars="deplibs" + else + vars="compile_deplibs finalize_deplibs" + fi + for var in $vars dependency_libs; do + # Add libraries to $var in reverse order + eval tmp_libs=\"\$$var\" + new_libs= + for deplib in $tmp_libs; do + # FIXME: Pedantically, this is the right thing to do, so + # that some nasty dependency loop isn't accidentally + # broken: + #new_libs="$deplib $new_libs" + # Pragmatically, this seems to cause very few problems in + # practice: + case $deplib in + -L*) new_libs="$deplib $new_libs" ;; + -R*) ;; + *) + # And here is the reason: when a library appears more + # than once as an explicit dependence of a library, or + # is implicitly linked in more than once by the + # compiler, it is considered special, and multiple + # occurrences thereof are not removed. Compare this + # with having the same library being listed as a + # dependency of multiple other libraries: in this case, + # we know (pedantically, we assume) the library does not + # need to be listed more than once, so we keep only the + # last copy. This is not always right, but it is rare + # enough that we require users that really mean to play + # such unportable linking tricks to link the library + # using -Wl,-lname, so that libtool does not consider it + # for duplicate removal. + case " $specialdeplibs " in + *" $deplib "*) new_libs="$deplib $new_libs" ;; + *) + case " $new_libs " in + *" $deplib "*) ;; + *) new_libs="$deplib $new_libs" ;; + esac + ;; + esac + ;; + esac + done + tmp_libs= + for deplib in $new_libs; do + case $deplib in + -L*) + case " $tmp_libs " in + *" $deplib "*) ;; + *) tmp_libs="$tmp_libs $deplib" ;; + esac + ;; + *) tmp_libs="$tmp_libs $deplib" ;; + esac + done + eval $var=\"$tmp_libs\" + done # for var + fi + # Last step: remove runtime libs from dependency_libs + # (they stay in deplibs) + tmp_libs= + for i in $dependency_libs ; do + case " $predeps $postdeps $compiler_lib_search_path " in + *" $i "*) + i="" + ;; + esac + if test -n "$i" ; then + tmp_libs="$tmp_libs $i" + fi + done + dependency_libs=$tmp_libs + done # for pass + if test "$linkmode" = prog; then + dlfiles="$newdlfiles" + dlprefiles="$newdlprefiles" + fi + + case $linkmode in + oldlib) + if test -n "$deplibs"; then + $echo "$modename: warning: \`-l' and \`-L' are ignored for archives" 1>&2 + fi + + if test -n "$dlfiles$dlprefiles" || test "$dlself" != no; then + $echo "$modename: warning: \`-dlopen' is ignored for archives" 1>&2 + fi + + if test -n "$rpath"; then + $echo "$modename: warning: \`-rpath' is ignored for archives" 1>&2 + fi + + if test -n "$xrpath"; then + $echo "$modename: warning: \`-R' is ignored for archives" 1>&2 + fi + + if test -n "$vinfo"; then + $echo "$modename: warning: \`-version-info/-version-number' is ignored for archives" 1>&2 + fi + + if test -n "$release"; then + $echo "$modename: warning: \`-release' is ignored for archives" 1>&2 + fi + + if test -n "$export_symbols" || test -n "$export_symbols_regex"; then + $echo "$modename: warning: \`-export-symbols' is ignored for archives" 1>&2 + fi + + # Now set the variables for building old libraries. + build_libtool_libs=no + oldlibs="$output" + objs="$objs$old_deplibs" + ;; + + lib) + # Make sure we only generate libraries of the form `libNAME.la'. + case $outputname in + lib*) + name=`$echo "X$outputname" | $Xsed -e 's/\.la$//' -e 's/^lib//'` + eval shared_ext=\"$shrext_cmds\" + eval libname=\"$libname_spec\" + ;; + *) + if test "$module" = no; then + $echo "$modename: libtool library \`$output' must begin with \`lib'" 1>&2 + $echo "$help" 1>&2 + exit $EXIT_FAILURE + fi + if test "$need_lib_prefix" != no; then + # Add the "lib" prefix for modules if required + name=`$echo "X$outputname" | $Xsed -e 's/\.la$//'` + eval shared_ext=\"$shrext_cmds\" + eval libname=\"$libname_spec\" + else + libname=`$echo "X$outputname" | $Xsed -e 's/\.la$//'` + fi + ;; + esac + + if test -n "$objs"; then + if test "$deplibs_check_method" != pass_all; then + $echo "$modename: cannot build libtool library \`$output' from non-libtool objects on this host:$objs" 2>&1 + exit $EXIT_FAILURE + else + $echo + $echo "*** Warning: Linking the shared library $output against the non-libtool" + $echo "*** objects $objs is not portable!" + libobjs="$libobjs $objs" + fi + fi + + if test "$dlself" != no; then + $echo "$modename: warning: \`-dlopen self' is ignored for libtool libraries" 1>&2 + fi + + set dummy $rpath + if test "$#" -gt 2; then + $echo "$modename: warning: ignoring multiple \`-rpath's for a libtool library" 1>&2 + fi + install_libdir="$2" + + oldlibs= + if test -z "$rpath"; then + if test "$build_libtool_libs" = yes; then + # Building a libtool convenience library. + # Some compilers have problems with a `.al' extension so + # convenience libraries should have the same extension an + # archive normally would. + oldlibs="$output_objdir/$libname.$libext $oldlibs" + build_libtool_libs=convenience + build_old_libs=yes + fi + + if test -n "$vinfo"; then + $echo "$modename: warning: \`-version-info/-version-number' is ignored for convenience libraries" 1>&2 + fi + + if test -n "$release"; then + $echo "$modename: warning: \`-release' is ignored for convenience libraries" 1>&2 + fi + else + + # Parse the version information argument. + save_ifs="$IFS"; IFS=':' + set dummy $vinfo 0 0 0 + IFS="$save_ifs" + + if test -n "$8"; then + $echo "$modename: too many parameters to \`-version-info'" 1>&2 + $echo "$help" 1>&2 + exit $EXIT_FAILURE + fi + + # convert absolute version numbers to libtool ages + # this retains compatibility with .la files and attempts + # to make the code below a bit more comprehensible + + case $vinfo_number in + yes) + number_major="$2" + number_minor="$3" + number_revision="$4" + # + # There are really only two kinds -- those that + # use the current revision as the major version + # and those that subtract age and use age as + # a minor version. But, then there is irix + # which has an extra 1 added just for fun + # + case $version_type in + darwin|linux|osf|windows) + current=`expr $number_major + $number_minor` + age="$number_minor" + revision="$number_revision" + ;; + freebsd-aout|freebsd-elf|sunos) + current="$number_major" + revision="$number_minor" + age="0" + ;; + irix|nonstopux) + current=`expr $number_major + $number_minor - 1` + age="$number_minor" + revision="$number_minor" + ;; + esac + ;; + no) + current="$2" + revision="$3" + age="$4" + ;; + esac + + # Check that each of the things are valid numbers. + case $current in + 0|[1-9]|[1-9][0-9]|[1-9][0-9][0-9]|[1-9][0-9][0-9][0-9]|[1-9][0-9][0-9][0-9][0-9]) ;; + *) + $echo "$modename: CURRENT \`$current' must be a nonnegative integer" 1>&2 + $echo "$modename: \`$vinfo' is not valid version information" 1>&2 + exit $EXIT_FAILURE + ;; + esac + + case $revision in + 0|[1-9]|[1-9][0-9]|[1-9][0-9][0-9]|[1-9][0-9][0-9][0-9]|[1-9][0-9][0-9][0-9][0-9]) ;; + *) + $echo "$modename: REVISION \`$revision' must be a nonnegative integer" 1>&2 + $echo "$modename: \`$vinfo' is not valid version information" 1>&2 + exit $EXIT_FAILURE + ;; + esac + + case $age in + 0|[1-9]|[1-9][0-9]|[1-9][0-9][0-9]|[1-9][0-9][0-9][0-9]|[1-9][0-9][0-9][0-9][0-9]) ;; + *) + $echo "$modename: AGE \`$age' must be a nonnegative integer" 1>&2 + $echo "$modename: \`$vinfo' is not valid version information" 1>&2 + exit $EXIT_FAILURE + ;; + esac + + if test "$age" -gt "$current"; then + $echo "$modename: AGE \`$age' is greater than the current interface number \`$current'" 1>&2 + $echo "$modename: \`$vinfo' is not valid version information" 1>&2 + exit $EXIT_FAILURE + fi + + # Calculate the version variables. + major= + versuffix= + verstring= + case $version_type in + none) ;; + + darwin) + # Like Linux, but with the current version available in + # verstring for coding it into the library header + major=.`expr $current - $age` + versuffix="$major.$age.$revision" + # Darwin ld doesn't like 0 for these options... + minor_current=`expr $current + 1` + verstring="${wl}-compatibility_version ${wl}$minor_current ${wl}-current_version ${wl}$minor_current.$revision" + ;; + + freebsd-aout) + major=".$current" + versuffix=".$current.$revision"; + ;; + + freebsd-elf) + major=".$current" + versuffix=".$current"; + ;; + + irix | nonstopux) + major=`expr $current - $age + 1` + + case $version_type in + nonstopux) verstring_prefix=nonstopux ;; + *) verstring_prefix=sgi ;; + esac + verstring="$verstring_prefix$major.$revision" + + # Add in all the interfaces that we are compatible with. + loop=$revision + while test "$loop" -ne 0; do + iface=`expr $revision - $loop` + loop=`expr $loop - 1` + verstring="$verstring_prefix$major.$iface:$verstring" + done + + # Before this point, $major must not contain `.'. + major=.$major + versuffix="$major.$revision" + ;; + + linux) + major=.`expr $current - $age` + versuffix="$major.$age.$revision" + ;; + + osf) + major=.`expr $current - $age` + versuffix=".$current.$age.$revision" + verstring="$current.$age.$revision" + + # Add in all the interfaces that we are compatible with. + loop=$age + while test "$loop" -ne 0; do + iface=`expr $current - $loop` + loop=`expr $loop - 1` + verstring="$verstring:${iface}.0" + done + + # Make executables depend on our current version. + verstring="$verstring:${current}.0" + ;; + + sunos) + major=".$current" + versuffix=".$current.$revision" + ;; + + windows) + # Use '-' rather than '.', since we only want one + # extension on DOS 8.3 filesystems. + major=`expr $current - $age` + versuffix="-$major" + ;; + + *) + $echo "$modename: unknown library version type \`$version_type'" 1>&2 + $echo "Fatal configuration error. See the $PACKAGE docs for more information." 1>&2 + exit $EXIT_FAILURE + ;; + esac + + # Clear the version info if we defaulted, and they specified a release. + if test -z "$vinfo" && test -n "$release"; then + major= + case $version_type in + darwin) + # we can't check for "0.0" in archive_cmds due to quoting + # problems, so we reset it completely + verstring= + ;; + *) + verstring="0.0" + ;; + esac + if test "$need_version" = no; then + versuffix= + else + versuffix=".0.0" + fi + fi + + # Remove version info from name if versioning should be avoided + if test "$avoid_version" = yes && test "$need_version" = no; then + major= + versuffix= + verstring="" + fi + + # Check to see if the archive will have undefined symbols. + if test "$allow_undefined" = yes; then + if test "$allow_undefined_flag" = unsupported; then + $echo "$modename: warning: undefined symbols not allowed in $host shared libraries" 1>&2 + build_libtool_libs=no + build_old_libs=yes + fi + else + # Don't allow undefined symbols. + allow_undefined_flag="$no_undefined_flag" + fi + fi + + if test "$mode" != relink; then + # Remove our outputs, but don't remove object files since they + # may have been created when compiling PIC objects. + removelist= + tempremovelist=`$echo "$output_objdir/*"` + for p in $tempremovelist; do + case $p in + *.$objext) + ;; + $output_objdir/$outputname | $output_objdir/$libname.* | $output_objdir/${libname}${release}.*) + if test "X$precious_files_regex" != "X"; then + if echo $p | $EGREP -e "$precious_files_regex" >/dev/null 2>&1 + then + continue + fi + fi + removelist="$removelist $p" + ;; + *) ;; + esac + done + if test -n "$removelist"; then + $show "${rm}r $removelist" + $run ${rm}r $removelist + fi + fi + + # Now set the variables for building old libraries. + if test "$build_old_libs" = yes && test "$build_libtool_libs" != convenience ; then + oldlibs="$oldlibs $output_objdir/$libname.$libext" + + # Transform .lo files to .o files. + oldobjs="$objs "`$echo "X$libobjs" | $SP2NL | $Xsed -e '/\.'${libext}'$/d' -e "$lo2o" | $NL2SP` + fi + + # Eliminate all temporary directories. + for path in $notinst_path; do + lib_search_path=`$echo "$lib_search_path " | ${SED} -e "s% $path % %g"` + deplibs=`$echo "$deplibs " | ${SED} -e "s% -L$path % %g"` + dependency_libs=`$echo "$dependency_libs " | ${SED} -e "s% -L$path % %g"` + done + + if test -n "$xrpath"; then + # If the user specified any rpath flags, then add them. + temp_xrpath= + for libdir in $xrpath; do + temp_xrpath="$temp_xrpath -R$libdir" + case "$finalize_rpath " in + *" $libdir "*) ;; + *) finalize_rpath="$finalize_rpath $libdir" ;; + esac + done + if test "$hardcode_into_libs" != yes || test "$build_old_libs" = yes; then + dependency_libs="$temp_xrpath $dependency_libs" + fi + fi + + # Make sure dlfiles contains only unique files that won't be dlpreopened + old_dlfiles="$dlfiles" + dlfiles= + for lib in $old_dlfiles; do + case " $dlprefiles $dlfiles " in + *" $lib "*) ;; + *) dlfiles="$dlfiles $lib" ;; + esac + done + + # Make sure dlprefiles contains only unique files + old_dlprefiles="$dlprefiles" + dlprefiles= + for lib in $old_dlprefiles; do + case "$dlprefiles " in + *" $lib "*) ;; + *) dlprefiles="$dlprefiles $lib" ;; + esac + done + + if test "$build_libtool_libs" = yes; then + if test -n "$rpath"; then + case $host in + *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2* | *-*-beos*) + # these systems don't actually have a c library (as such)! + ;; + *-*-rhapsody* | *-*-darwin1.[012]) + # Rhapsody C library is in the System framework + deplibs="$deplibs -framework System" + ;; + *-*-netbsd*) + # Don't link with libc until the a.out ld.so is fixed. + ;; + *-*-openbsd* | *-*-freebsd* | *-*-dragonfly*) + # Do not include libc due to us having libc/libc_r. + ;; + *-*-sco3.2v5* | *-*-sco5v6*) + # Causes problems with __ctype + ;; + *-*-sysv4.2uw2* | *-*-sysv5* | *-*-unixware* | *-*-OpenUNIX*) + # Compiler inserts libc in the correct place for threads to work + ;; + *) + # Add libc to deplibs on all other systems if necessary. + if test "$build_libtool_need_lc" = "yes"; then + deplibs="$deplibs -lc" + fi + ;; + esac + fi + + # Transform deplibs into only deplibs that can be linked in shared. + name_save=$name + libname_save=$libname + release_save=$release + versuffix_save=$versuffix + major_save=$major + # I'm not sure if I'm treating the release correctly. I think + # release should show up in the -l (ie -lgmp5) so we don't want to + # add it in twice. Is that correct? + release="" + versuffix="" + major="" + newdeplibs= + droppeddeps=no + case $deplibs_check_method in + pass_all) + # Don't check for shared/static. Everything works. + # This might be a little naive. We might want to check + # whether the library exists or not. But this is on + # osf3 & osf4 and I'm not really sure... Just + # implementing what was already the behavior. + newdeplibs=$deplibs + ;; + test_compile) + # This code stresses the "libraries are programs" paradigm to its + # limits. Maybe even breaks it. We compile a program, linking it + # against the deplibs as a proxy for the library. Then we can check + # whether they linked in statically or dynamically with ldd. + $rm conftest.c + cat > conftest.c </dev/null` + for potent_lib in $potential_libs; do + # Follow soft links. + if ls -lLd "$potent_lib" 2>/dev/null \ + | grep " -> " >/dev/null; then + continue + fi + # The statement above tries to avoid entering an + # endless loop below, in case of cyclic links. + # We might still enter an endless loop, since a link + # loop can be closed while we follow links, + # but so what? + potlib="$potent_lib" + while test -h "$potlib" 2>/dev/null; do + potliblink=`ls -ld $potlib | ${SED} 's/.* -> //'` + case $potliblink in + [\\/]* | [A-Za-z]:[\\/]*) potlib="$potliblink";; + *) potlib=`$echo "X$potlib" | $Xsed -e 's,[^/]*$,,'`"$potliblink";; + esac + done + # It is ok to link against an archive when + # building a shared library. + if $AR -t $potlib > /dev/null 2>&1; then + newdeplibs="$newdeplibs $a_deplib" + a_deplib="" + break 2 + fi + if eval $file_magic_cmd \"\$potlib\" 2>/dev/null \ + | ${SED} 10q \ + | $EGREP "$file_magic_regex" > /dev/null; then + newdeplibs="$newdeplibs $a_deplib" + a_deplib="" + break 2 + fi + done + done + fi + if test -n "$a_deplib" ; then + droppeddeps=yes + $echo + $echo "*** Warning: linker path does not have real file for library $a_deplib." + $echo "*** I have the capability to make that library automatically link in when" + $echo "*** you link to this library. But I can only do this if you have a" + $echo "*** shared version of the library, which you do not appear to have" + $echo "*** because I did check the linker path looking for a file starting" + if test -z "$potlib" ; then + $echo "*** with $libname but no candidates were found. (...for file magic test)" + else + $echo "*** with $libname and none of the candidates passed a file format test" + $echo "*** using a file magic. Last file checked: $potlib" + fi + fi + else + # Add a -L argument. + newdeplibs="$newdeplibs $a_deplib" + fi + done # Gone through all deplibs. + ;; + match_pattern*) + set dummy $deplibs_check_method + match_pattern_regex=`expr "$deplibs_check_method" : "$2 \(.*\)"` + for a_deplib in $deplibs; do + name=`expr $a_deplib : '-l\(.*\)'` + # If $name is empty we are operating on a -L argument. + if test -n "$name" && test "$name" != "0"; then + if test "X$allow_libtool_libs_with_static_runtimes" = "Xyes" ; then + case " $predeps $postdeps " in + *" $a_deplib "*) + newdeplibs="$newdeplibs $a_deplib" + a_deplib="" + ;; + esac + fi + if test -n "$a_deplib" ; then + libname=`eval \\$echo \"$libname_spec\"` + for i in $lib_search_path $sys_lib_search_path $shlib_search_path; do + potential_libs=`ls $i/$libname[.-]* 2>/dev/null` + for potent_lib in $potential_libs; do + potlib="$potent_lib" # see symlink-check above in file_magic test + if eval $echo \"$potent_lib\" 2>/dev/null \ + | ${SED} 10q \ + | $EGREP "$match_pattern_regex" > /dev/null; then + newdeplibs="$newdeplibs $a_deplib" + a_deplib="" + break 2 + fi + done + done + fi + if test -n "$a_deplib" ; then + droppeddeps=yes + $echo + $echo "*** Warning: linker path does not have real file for library $a_deplib." + $echo "*** I have the capability to make that library automatically link in when" + $echo "*** you link to this library. But I can only do this if you have a" + $echo "*** shared version of the library, which you do not appear to have" + $echo "*** because I did check the linker path looking for a file starting" + if test -z "$potlib" ; then + $echo "*** with $libname but no candidates were found. (...for regex pattern test)" + else + $echo "*** with $libname and none of the candidates passed a file format test" + $echo "*** using a regex pattern. Last file checked: $potlib" + fi + fi + else + # Add a -L argument. + newdeplibs="$newdeplibs $a_deplib" + fi + done # Gone through all deplibs. + ;; + none | unknown | *) + newdeplibs="" + tmp_deplibs=`$echo "X $deplibs" | $Xsed -e 's/ -lc$//' \ + -e 's/ -[LR][^ ]*//g'` + if test "X$allow_libtool_libs_with_static_runtimes" = "Xyes" ; then + for i in $predeps $postdeps ; do + # can't use Xsed below, because $i might contain '/' + tmp_deplibs=`$echo "X $tmp_deplibs" | ${SED} -e "1s,^X,," -e "s,$i,,"` + done + fi + if $echo "X $tmp_deplibs" | $Xsed -e 's/[ ]//g' \ + | grep . >/dev/null; then + $echo + if test "X$deplibs_check_method" = "Xnone"; then + $echo "*** Warning: inter-library dependencies are not supported in this platform." + else + $echo "*** Warning: inter-library dependencies are not known to be supported." + fi + $echo "*** All declared inter-library dependencies are being dropped." + droppeddeps=yes + fi + ;; + esac + versuffix=$versuffix_save + major=$major_save + release=$release_save + libname=$libname_save + name=$name_save + + case $host in + *-*-rhapsody* | *-*-darwin1.[012]) + # On Rhapsody replace the C library is the System framework + newdeplibs=`$echo "X $newdeplibs" | $Xsed -e 's/ -lc / -framework System /'` + ;; + esac + + if test "$droppeddeps" = yes; then + if test "$module" = yes; then + $echo + $echo "*** Warning: libtool could not satisfy all declared inter-library" + $echo "*** dependencies of module $libname. Therefore, libtool will create" + $echo "*** a static module, that should work as long as the dlopening" + $echo "*** application is linked with the -dlopen flag." + if test -z "$global_symbol_pipe"; then + $echo + $echo "*** However, this would only work if libtool was able to extract symbol" + $echo "*** lists from a program, using \`nm' or equivalent, but libtool could" + $echo "*** not find such a program. So, this module is probably useless." + $echo "*** \`nm' from GNU binutils and a full rebuild may help." + fi + if test "$build_old_libs" = no; then + oldlibs="$output_objdir/$libname.$libext" + build_libtool_libs=module + build_old_libs=yes + else + build_libtool_libs=no + fi + else + $echo "*** The inter-library dependencies that have been dropped here will be" + $echo "*** automatically added whenever a program is linked with this library" + $echo "*** or is declared to -dlopen it." + + if test "$allow_undefined" = no; then + $echo + $echo "*** Since this library must not contain undefined symbols," + $echo "*** because either the platform does not support them or" + $echo "*** it was explicitly requested with -no-undefined," + $echo "*** libtool will only create a static version of it." + if test "$build_old_libs" = no; then + oldlibs="$output_objdir/$libname.$libext" + build_libtool_libs=module + build_old_libs=yes + else + build_libtool_libs=no + fi + fi + fi + fi + # Done checking deplibs! + deplibs=$newdeplibs + fi + + + # move library search paths that coincide with paths to not yet + # installed libraries to the beginning of the library search list + new_libs= + for path in $notinst_path; do + case " $new_libs " in + *" -L$path/$objdir "*) ;; + *) + case " $deplibs " in + *" -L$path/$objdir "*) + new_libs="$new_libs -L$path/$objdir" ;; + esac + ;; + esac + done + for deplib in $deplibs; do + case $deplib in + -L*) + case " $new_libs " in + *" $deplib "*) ;; + *) new_libs="$new_libs $deplib" ;; + esac + ;; + *) new_libs="$new_libs $deplib" ;; + esac + done + deplibs="$new_libs" + + + # All the library-specific variables (install_libdir is set above). + library_names= + old_library= + dlname= + + # Test again, we may have decided not to build it any more + if test "$build_libtool_libs" = yes; then + if test "$hardcode_into_libs" = yes; then + # Hardcode the library paths + hardcode_libdirs= + dep_rpath= + rpath="$finalize_rpath" + test "$mode" != relink && rpath="$compile_rpath$rpath" + for libdir in $rpath; do + if test -n "$hardcode_libdir_flag_spec"; then + if test -n "$hardcode_libdir_separator"; then + if test -z "$hardcode_libdirs"; then + hardcode_libdirs="$libdir" + else + # Just accumulate the unique libdirs. + case $hardcode_libdir_separator$hardcode_libdirs$hardcode_libdir_separator in + *"$hardcode_libdir_separator$libdir$hardcode_libdir_separator"*) + ;; + *) + hardcode_libdirs="$hardcode_libdirs$hardcode_libdir_separator$libdir" + ;; + esac + fi + else + eval flag=\"$hardcode_libdir_flag_spec\" + dep_rpath="$dep_rpath $flag" + fi + elif test -n "$runpath_var"; then + case "$perm_rpath " in + *" $libdir "*) ;; + *) perm_rpath="$perm_rpath $libdir" ;; + esac + fi + done + # Substitute the hardcoded libdirs into the rpath. + if test -n "$hardcode_libdir_separator" && + test -n "$hardcode_libdirs"; then + libdir="$hardcode_libdirs" + if test -n "$hardcode_libdir_flag_spec_ld"; then + eval dep_rpath=\"$hardcode_libdir_flag_spec_ld\" + else + eval dep_rpath=\"$hardcode_libdir_flag_spec\" + fi + fi + if test -n "$runpath_var" && test -n "$perm_rpath"; then + # We should set the runpath_var. + rpath= + for dir in $perm_rpath; do + rpath="$rpath$dir:" + done + eval "$runpath_var='$rpath\$$runpath_var'; export $runpath_var" + fi + test -n "$dep_rpath" && deplibs="$dep_rpath $deplibs" + fi + + shlibpath="$finalize_shlibpath" + test "$mode" != relink && shlibpath="$compile_shlibpath$shlibpath" + if test -n "$shlibpath"; then + eval "$shlibpath_var='$shlibpath\$$shlibpath_var'; export $shlibpath_var" + fi + + # Get the real and link names of the library. + eval shared_ext=\"$shrext_cmds\" + eval library_names=\"$library_names_spec\" + set dummy $library_names + realname="$2" + shift; shift + + if test -n "$soname_spec"; then + eval soname=\"$soname_spec\" + else + soname="$realname" + fi + if test -z "$dlname"; then + dlname=$soname + fi + + lib="$output_objdir/$realname" + linknames= + for link + do + linknames="$linknames $link" + done + + # Use standard objects if they are pic + test -z "$pic_flag" && libobjs=`$echo "X$libobjs" | $SP2NL | $Xsed -e "$lo2o" | $NL2SP` + + # Prepare the list of exported symbols + if test -z "$export_symbols"; then + if test "$always_export_symbols" = yes || test -n "$export_symbols_regex"; then + $show "generating symbol list for \`$libname.la'" + export_symbols="$output_objdir/$libname.exp" + $run $rm $export_symbols + cmds=$export_symbols_cmds + save_ifs="$IFS"; IFS='~' + for cmd in $cmds; do + IFS="$save_ifs" + eval cmd=\"$cmd\" + if len=`expr "X$cmd" : ".*"` && + test "$len" -le "$max_cmd_len" || test "$max_cmd_len" -le -1; then + $show "$cmd" + $run eval "$cmd" || exit $? + skipped_export=false + else + # The command line is too long to execute in one step. + $show "using reloadable object file for export list..." + skipped_export=: + # Break out early, otherwise skipped_export may be + # set to false by a later but shorter cmd. + break + fi + done + IFS="$save_ifs" + if test -n "$export_symbols_regex"; then + $show "$EGREP -e \"$export_symbols_regex\" \"$export_symbols\" > \"${export_symbols}T\"" + $run eval '$EGREP -e "$export_symbols_regex" "$export_symbols" > "${export_symbols}T"' + $show "$mv \"${export_symbols}T\" \"$export_symbols\"" + $run eval '$mv "${export_symbols}T" "$export_symbols"' + fi + fi + fi + + if test -n "$export_symbols" && test -n "$include_expsyms"; then + $run eval '$echo "X$include_expsyms" | $SP2NL >> "$export_symbols"' + fi + + tmp_deplibs= + for test_deplib in $deplibs; do + case " $convenience " in + *" $test_deplib "*) ;; + *) + tmp_deplibs="$tmp_deplibs $test_deplib" + ;; + esac + done + deplibs="$tmp_deplibs" + + if test -n "$convenience"; then + if test -n "$whole_archive_flag_spec"; then + save_libobjs=$libobjs + eval libobjs=\"\$libobjs $whole_archive_flag_spec\" + else + gentop="$output_objdir/${outputname}x" + generated="$generated $gentop" + + func_extract_archives $gentop $convenience + libobjs="$libobjs $func_extract_archives_result" + fi + fi + + if test "$thread_safe" = yes && test -n "$thread_safe_flag_spec"; then + eval flag=\"$thread_safe_flag_spec\" + linker_flags="$linker_flags $flag" + fi + + # Make a backup of the uninstalled library when relinking + if test "$mode" = relink; then + $run eval '(cd $output_objdir && $rm ${realname}U && $mv $realname ${realname}U)' || exit $? + fi + + # Do each of the archive commands. + if test "$module" = yes && test -n "$module_cmds" ; then + if test -n "$export_symbols" && test -n "$module_expsym_cmds"; then + eval test_cmds=\"$module_expsym_cmds\" + cmds=$module_expsym_cmds + else + eval test_cmds=\"$module_cmds\" + cmds=$module_cmds + fi + else + if test -n "$export_symbols" && test -n "$archive_expsym_cmds"; then + eval test_cmds=\"$archive_expsym_cmds\" + cmds=$archive_expsym_cmds + else + eval test_cmds=\"$archive_cmds\" + cmds=$archive_cmds + fi + fi + + if test "X$skipped_export" != "X:" && + len=`expr "X$test_cmds" : ".*" 2>/dev/null` && + test "$len" -le "$max_cmd_len" || test "$max_cmd_len" -le -1; then + : + else + # The command line is too long to link in one step, link piecewise. + $echo "creating reloadable object files..." + + # Save the value of $output and $libobjs because we want to + # use them later. If we have whole_archive_flag_spec, we + # want to use save_libobjs as it was before + # whole_archive_flag_spec was expanded, because we can't + # assume the linker understands whole_archive_flag_spec. + # This may have to be revisited, in case too many + # convenience libraries get linked in and end up exceeding + # the spec. + if test -z "$convenience" || test -z "$whole_archive_flag_spec"; then + save_libobjs=$libobjs + fi + save_output=$output + output_la=`$echo "X$output" | $Xsed -e "$basename"` + + # Clear the reloadable object creation command queue and + # initialize k to one. + test_cmds= + concat_cmds= + objlist= + delfiles= + last_robj= + k=1 + output=$output_objdir/$output_la-${k}.$objext + # Loop over the list of objects to be linked. + for obj in $save_libobjs + do + eval test_cmds=\"$reload_cmds $objlist $last_robj\" + if test "X$objlist" = X || + { len=`expr "X$test_cmds" : ".*" 2>/dev/null` && + test "$len" -le "$max_cmd_len"; }; then + objlist="$objlist $obj" + else + # The command $test_cmds is almost too long, add a + # command to the queue. + if test "$k" -eq 1 ; then + # The first file doesn't have a previous command to add. + eval concat_cmds=\"$reload_cmds $objlist $last_robj\" + else + # All subsequent reloadable object files will link in + # the last one created. + eval concat_cmds=\"\$concat_cmds~$reload_cmds $objlist $last_robj\" + fi + last_robj=$output_objdir/$output_la-${k}.$objext + k=`expr $k + 1` + output=$output_objdir/$output_la-${k}.$objext + objlist=$obj + len=1 + fi + done + # Handle the remaining objects by creating one last + # reloadable object file. All subsequent reloadable object + # files will link in the last one created. + test -z "$concat_cmds" || concat_cmds=$concat_cmds~ + eval concat_cmds=\"\${concat_cmds}$reload_cmds $objlist $last_robj\" + + if ${skipped_export-false}; then + $show "generating symbol list for \`$libname.la'" + export_symbols="$output_objdir/$libname.exp" + $run $rm $export_symbols + libobjs=$output + # Append the command to create the export file. + eval concat_cmds=\"\$concat_cmds~$export_symbols_cmds\" + fi + + # Set up a command to remove the reloadable object files + # after they are used. + i=0 + while test "$i" -lt "$k" + do + i=`expr $i + 1` + delfiles="$delfiles $output_objdir/$output_la-${i}.$objext" + done + + $echo "creating a temporary reloadable object file: $output" + + # Loop through the commands generated above and execute them. + save_ifs="$IFS"; IFS='~' + for cmd in $concat_cmds; do + IFS="$save_ifs" + $show "$cmd" + $run eval "$cmd" || exit $? + done + IFS="$save_ifs" + + libobjs=$output + # Restore the value of output. + output=$save_output + + if test -n "$convenience" && test -n "$whole_archive_flag_spec"; then + eval libobjs=\"\$libobjs $whole_archive_flag_spec\" + fi + # Expand the library linking commands again to reset the + # value of $libobjs for piecewise linking. + + # Do each of the archive commands. + if test "$module" = yes && test -n "$module_cmds" ; then + if test -n "$export_symbols" && test -n "$module_expsym_cmds"; then + cmds=$module_expsym_cmds + else + cmds=$module_cmds + fi + else + if test -n "$export_symbols" && test -n "$archive_expsym_cmds"; then + cmds=$archive_expsym_cmds + else + cmds=$archive_cmds + fi + fi + + # Append the command to remove the reloadable object files + # to the just-reset $cmds. + eval cmds=\"\$cmds~\$rm $delfiles\" + fi + save_ifs="$IFS"; IFS='~' + for cmd in $cmds; do + IFS="$save_ifs" + eval cmd=\"$cmd\" + $show "$cmd" + $run eval "$cmd" || { + lt_exit=$? + + # Restore the uninstalled library and exit + if test "$mode" = relink; then + $run eval '(cd $output_objdir && $rm ${realname}T && $mv ${realname}U $realname)' + fi + + exit $lt_exit + } + done + IFS="$save_ifs" + + # Restore the uninstalled library and exit + if test "$mode" = relink; then + $run eval '(cd $output_objdir && $rm ${realname}T && $mv $realname ${realname}T && $mv "$realname"U $realname)' || exit $? + + if test -n "$convenience"; then + if test -z "$whole_archive_flag_spec"; then + $show "${rm}r $gentop" + $run ${rm}r "$gentop" + fi + fi + + exit $EXIT_SUCCESS + fi + + # Create links to the real library. + for linkname in $linknames; do + if test "$realname" != "$linkname"; then + $show "(cd $output_objdir && $rm $linkname && $LN_S $realname $linkname)" + $run eval '(cd $output_objdir && $rm $linkname && $LN_S $realname $linkname)' || exit $? + fi + done + + # If -module or -export-dynamic was specified, set the dlname. + if test "$module" = yes || test "$export_dynamic" = yes; then + # On all known operating systems, these are identical. + dlname="$soname" + fi + fi + ;; + + obj) + if test -n "$deplibs"; then + $echo "$modename: warning: \`-l' and \`-L' are ignored for objects" 1>&2 + fi + + if test -n "$dlfiles$dlprefiles" || test "$dlself" != no; then + $echo "$modename: warning: \`-dlopen' is ignored for objects" 1>&2 + fi + + if test -n "$rpath"; then + $echo "$modename: warning: \`-rpath' is ignored for objects" 1>&2 + fi + + if test -n "$xrpath"; then + $echo "$modename: warning: \`-R' is ignored for objects" 1>&2 + fi + + if test -n "$vinfo"; then + $echo "$modename: warning: \`-version-info' is ignored for objects" 1>&2 + fi + + if test -n "$release"; then + $echo "$modename: warning: \`-release' is ignored for objects" 1>&2 + fi + + case $output in + *.lo) + if test -n "$objs$old_deplibs"; then + $echo "$modename: cannot build library object \`$output' from non-libtool objects" 1>&2 + exit $EXIT_FAILURE + fi + libobj="$output" + obj=`$echo "X$output" | $Xsed -e "$lo2o"` + ;; + *) + libobj= + obj="$output" + ;; + esac + + # Delete the old objects. + $run $rm $obj $libobj + + # Objects from convenience libraries. This assumes + # single-version convenience libraries. Whenever we create + # different ones for PIC/non-PIC, this we'll have to duplicate + # the extraction. + reload_conv_objs= + gentop= + # reload_cmds runs $LD directly, so let us get rid of + # -Wl from whole_archive_flag_spec + wl= + + if test -n "$convenience"; then + if test -n "$whole_archive_flag_spec"; then + eval reload_conv_objs=\"\$reload_objs $whole_archive_flag_spec\" + else + gentop="$output_objdir/${obj}x" + generated="$generated $gentop" + + func_extract_archives $gentop $convenience + reload_conv_objs="$reload_objs $func_extract_archives_result" + fi + fi + + # Create the old-style object. + reload_objs="$objs$old_deplibs "`$echo "X$libobjs" | $SP2NL | $Xsed -e '/\.'${libext}$'/d' -e '/\.lib$/d' -e "$lo2o" | $NL2SP`" $reload_conv_objs" ### testsuite: skip nested quoting test + + output="$obj" + cmds=$reload_cmds + save_ifs="$IFS"; IFS='~' + for cmd in $cmds; do + IFS="$save_ifs" + eval cmd=\"$cmd\" + $show "$cmd" + $run eval "$cmd" || exit $? + done + IFS="$save_ifs" + + # Exit if we aren't doing a library object file. + if test -z "$libobj"; then + if test -n "$gentop"; then + $show "${rm}r $gentop" + $run ${rm}r $gentop + fi + + exit $EXIT_SUCCESS + fi + + if test "$build_libtool_libs" != yes; then + if test -n "$gentop"; then + $show "${rm}r $gentop" + $run ${rm}r $gentop + fi + + # Create an invalid libtool object if no PIC, so that we don't + # accidentally link it into a program. + # $show "echo timestamp > $libobj" + # $run eval "echo timestamp > $libobj" || exit $? + exit $EXIT_SUCCESS + fi + + if test -n "$pic_flag" || test "$pic_mode" != default; then + # Only do commands if we really have different PIC objects. + reload_objs="$libobjs $reload_conv_objs" + output="$libobj" + cmds=$reload_cmds + save_ifs="$IFS"; IFS='~' + for cmd in $cmds; do + IFS="$save_ifs" + eval cmd=\"$cmd\" + $show "$cmd" + $run eval "$cmd" || exit $? + done + IFS="$save_ifs" + fi + + if test -n "$gentop"; then + $show "${rm}r $gentop" + $run ${rm}r $gentop + fi + + exit $EXIT_SUCCESS + ;; + + prog) + case $host in + *cygwin*) output=`$echo $output | ${SED} -e 's,.exe$,,;s,$,.exe,'` ;; + esac + if test -n "$vinfo"; then + $echo "$modename: warning: \`-version-info' is ignored for programs" 1>&2 + fi + + if test -n "$release"; then + $echo "$modename: warning: \`-release' is ignored for programs" 1>&2 + fi + + if test "$preload" = yes; then + if test "$dlopen_support" = unknown && test "$dlopen_self" = unknown && + test "$dlopen_self_static" = unknown; then + $echo "$modename: warning: \`AC_LIBTOOL_DLOPEN' not used. Assuming no dlopen support." + fi + fi + + case $host in + *-*-rhapsody* | *-*-darwin1.[012]) + # On Rhapsody replace the C library is the System framework + compile_deplibs=`$echo "X $compile_deplibs" | $Xsed -e 's/ -lc / -framework System /'` + finalize_deplibs=`$echo "X $finalize_deplibs" | $Xsed -e 's/ -lc / -framework System /'` + ;; + esac + + case $host in + *darwin*) + # Don't allow lazy linking, it breaks C++ global constructors + if test "$tagname" = CXX ; then + compile_command="$compile_command ${wl}-bind_at_load" + finalize_command="$finalize_command ${wl}-bind_at_load" + fi + ;; + esac + + + # move library search paths that coincide with paths to not yet + # installed libraries to the beginning of the library search list + new_libs= + for path in $notinst_path; do + case " $new_libs " in + *" -L$path/$objdir "*) ;; + *) + case " $compile_deplibs " in + *" -L$path/$objdir "*) + new_libs="$new_libs -L$path/$objdir" ;; + esac + ;; + esac + done + for deplib in $compile_deplibs; do + case $deplib in + -L*) + case " $new_libs " in + *" $deplib "*) ;; + *) new_libs="$new_libs $deplib" ;; + esac + ;; + *) new_libs="$new_libs $deplib" ;; + esac + done + compile_deplibs="$new_libs" + + + compile_command="$compile_command $compile_deplibs" + finalize_command="$finalize_command $finalize_deplibs" + + if test -n "$rpath$xrpath"; then + # If the user specified any rpath flags, then add them. + for libdir in $rpath $xrpath; do + # This is the magic to use -rpath. + case "$finalize_rpath " in + *" $libdir "*) ;; + *) finalize_rpath="$finalize_rpath $libdir" ;; + esac + done + fi + + # Now hardcode the library paths + rpath= + hardcode_libdirs= + for libdir in $compile_rpath $finalize_rpath; do + if test -n "$hardcode_libdir_flag_spec"; then + if test -n "$hardcode_libdir_separator"; then + if test -z "$hardcode_libdirs"; then + hardcode_libdirs="$libdir" + else + # Just accumulate the unique libdirs. + case $hardcode_libdir_separator$hardcode_libdirs$hardcode_libdir_separator in + *"$hardcode_libdir_separator$libdir$hardcode_libdir_separator"*) + ;; + *) + hardcode_libdirs="$hardcode_libdirs$hardcode_libdir_separator$libdir" + ;; + esac + fi + else + eval flag=\"$hardcode_libdir_flag_spec\" + rpath="$rpath $flag" + fi + elif test -n "$runpath_var"; then + case "$perm_rpath " in + *" $libdir "*) ;; + *) perm_rpath="$perm_rpath $libdir" ;; + esac + fi + case $host in + *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2*) + testbindir=`$echo "X$libdir" | $Xsed -e 's*/lib$*/bin*'` + case :$dllsearchpath: in + *":$libdir:"*) ;; + *) dllsearchpath="$dllsearchpath:$libdir";; + esac + case :$dllsearchpath: in + *":$testbindir:"*) ;; + *) dllsearchpath="$dllsearchpath:$testbindir";; + esac + ;; + esac + done + # Substitute the hardcoded libdirs into the rpath. + if test -n "$hardcode_libdir_separator" && + test -n "$hardcode_libdirs"; then + libdir="$hardcode_libdirs" + eval rpath=\" $hardcode_libdir_flag_spec\" + fi + compile_rpath="$rpath" + + rpath= + hardcode_libdirs= + for libdir in $finalize_rpath; do + if test -n "$hardcode_libdir_flag_spec"; then + if test -n "$hardcode_libdir_separator"; then + if test -z "$hardcode_libdirs"; then + hardcode_libdirs="$libdir" + else + # Just accumulate the unique libdirs. + case $hardcode_libdir_separator$hardcode_libdirs$hardcode_libdir_separator in + *"$hardcode_libdir_separator$libdir$hardcode_libdir_separator"*) + ;; + *) + hardcode_libdirs="$hardcode_libdirs$hardcode_libdir_separator$libdir" + ;; + esac + fi + else + eval flag=\"$hardcode_libdir_flag_spec\" + rpath="$rpath $flag" + fi + elif test -n "$runpath_var"; then + case "$finalize_perm_rpath " in + *" $libdir "*) ;; + *) finalize_perm_rpath="$finalize_perm_rpath $libdir" ;; + esac + fi + done + # Substitute the hardcoded libdirs into the rpath. + if test -n "$hardcode_libdir_separator" && + test -n "$hardcode_libdirs"; then + libdir="$hardcode_libdirs" + eval rpath=\" $hardcode_libdir_flag_spec\" + fi + finalize_rpath="$rpath" + + if test -n "$libobjs" && test "$build_old_libs" = yes; then + # Transform all the library objects into standard objects. + compile_command=`$echo "X$compile_command" | $SP2NL | $Xsed -e "$lo2o" | $NL2SP` + finalize_command=`$echo "X$finalize_command" | $SP2NL | $Xsed -e "$lo2o" | $NL2SP` + fi + + dlsyms= + if test -n "$dlfiles$dlprefiles" || test "$dlself" != no; then + if test -n "$NM" && test -n "$global_symbol_pipe"; then + dlsyms="${outputname}S.c" + else + $echo "$modename: not configured to extract global symbols from dlpreopened files" 1>&2 + fi + fi + + if test -n "$dlsyms"; then + case $dlsyms in + "") ;; + *.c) + # Discover the nlist of each of the dlfiles. + nlist="$output_objdir/${outputname}.nm" + + $show "$rm $nlist ${nlist}S ${nlist}T" + $run $rm "$nlist" "${nlist}S" "${nlist}T" + + # Parse the name list into a source file. + $show "creating $output_objdir/$dlsyms" + + test -z "$run" && $echo > "$output_objdir/$dlsyms" "\ +/* $dlsyms - symbol resolution table for \`$outputname' dlsym emulation. */ +/* Generated by $PROGRAM - GNU $PACKAGE $VERSION$TIMESTAMP */ + +#ifdef __cplusplus +extern \"C\" { +#endif + +/* Prevent the only kind of declaration conflicts we can make. */ +#define lt_preloaded_symbols some_other_symbol + +/* External symbol declarations for the compiler. */\ +" + + if test "$dlself" = yes; then + $show "generating symbol list for \`$output'" + + test -z "$run" && $echo ': @PROGRAM@ ' > "$nlist" + + # Add our own program objects to the symbol list. + progfiles=`$echo "X$objs$old_deplibs" | $SP2NL | $Xsed -e "$lo2o" | $NL2SP` + for arg in $progfiles; do + $show "extracting global C symbols from \`$arg'" + $run eval "$NM $arg | $global_symbol_pipe >> '$nlist'" + done + + if test -n "$exclude_expsyms"; then + $run eval '$EGREP -v " ($exclude_expsyms)$" "$nlist" > "$nlist"T' + $run eval '$mv "$nlist"T "$nlist"' + fi + + if test -n "$export_symbols_regex"; then + $run eval '$EGREP -e "$export_symbols_regex" "$nlist" > "$nlist"T' + $run eval '$mv "$nlist"T "$nlist"' + fi + + # Prepare the list of exported symbols + if test -z "$export_symbols"; then + export_symbols="$output_objdir/$outputname.exp" + $run $rm $export_symbols + $run eval "${SED} -n -e '/^: @PROGRAM@ $/d' -e 's/^.* \(.*\)$/\1/p' "'< "$nlist" > "$export_symbols"' + case $host in + *cygwin* | *mingw* ) + $run eval "echo EXPORTS "'> "$output_objdir/$outputname.def"' + $run eval 'cat "$export_symbols" >> "$output_objdir/$outputname.def"' + ;; + esac + else + $run eval "${SED} -e 's/\([].[*^$]\)/\\\\\1/g' -e 's/^/ /' -e 's/$/$/'"' < "$export_symbols" > "$output_objdir/$outputname.exp"' + $run eval 'grep -f "$output_objdir/$outputname.exp" < "$nlist" > "$nlist"T' + $run eval 'mv "$nlist"T "$nlist"' + case $host in + *cygwin* | *mingw* ) + $run eval "echo EXPORTS "'> "$output_objdir/$outputname.def"' + $run eval 'cat "$nlist" >> "$output_objdir/$outputname.def"' + ;; + esac + fi + fi + + for arg in $dlprefiles; do + $show "extracting global C symbols from \`$arg'" + name=`$echo "$arg" | ${SED} -e 's%^.*/%%'` + $run eval '$echo ": $name " >> "$nlist"' + $run eval "$NM $arg | $global_symbol_pipe >> '$nlist'" + done + + if test -z "$run"; then + # Make sure we have at least an empty file. + test -f "$nlist" || : > "$nlist" + + if test -n "$exclude_expsyms"; then + $EGREP -v " ($exclude_expsyms)$" "$nlist" > "$nlist"T + $mv "$nlist"T "$nlist" + fi + + # Try sorting and uniquifying the output. + if grep -v "^: " < "$nlist" | + if sort -k 3 /dev/null 2>&1; then + sort -k 3 + else + sort +2 + fi | + uniq > "$nlist"S; then + : + else + grep -v "^: " < "$nlist" > "$nlist"S + fi + + if test -f "$nlist"S; then + eval "$global_symbol_to_cdecl"' < "$nlist"S >> "$output_objdir/$dlsyms"' + else + $echo '/* NONE */' >> "$output_objdir/$dlsyms" + fi + + $echo >> "$output_objdir/$dlsyms" "\ + +#undef lt_preloaded_symbols + +#if defined (__STDC__) && __STDC__ +# define lt_ptr void * +#else +# define lt_ptr char * +# define const +#endif + +/* The mapping between symbol names and symbols. */ +" + + case $host in + *cygwin* | *mingw* ) + $echo >> "$output_objdir/$dlsyms" "\ +/* DATA imports from DLLs on WIN32 can't be const, because + runtime relocations are performed -- see ld's documentation + on pseudo-relocs */ +struct { +" + ;; + * ) + $echo >> "$output_objdir/$dlsyms" "\ +const struct { +" + ;; + esac + + + $echo >> "$output_objdir/$dlsyms" "\ + const char *name; + lt_ptr address; +} +lt_preloaded_symbols[] = +{\ +" + + eval "$global_symbol_to_c_name_address" < "$nlist" >> "$output_objdir/$dlsyms" + + $echo >> "$output_objdir/$dlsyms" "\ + {0, (lt_ptr) 0} +}; + +/* This works around a problem in FreeBSD linker */ +#ifdef FREEBSD_WORKAROUND +static const void *lt_preloaded_setup() { + return lt_preloaded_symbols; +} +#endif + +#ifdef __cplusplus +} +#endif\ +" + fi + + pic_flag_for_symtable= + case $host in + # compiling the symbol table file with pic_flag works around + # a FreeBSD bug that causes programs to crash when -lm is + # linked before any other PIC object. But we must not use + # pic_flag when linking with -static. The problem exists in + # FreeBSD 2.2.6 and is fixed in FreeBSD 3.1. + *-*-freebsd2*|*-*-freebsd3.0*|*-*-freebsdelf3.0*) + case "$compile_command " in + *" -static "*) ;; + *) pic_flag_for_symtable=" $pic_flag -DFREEBSD_WORKAROUND";; + esac;; + *-*-hpux*) + case "$compile_command " in + *" -static "*) ;; + *) pic_flag_for_symtable=" $pic_flag";; + esac + esac + + # Now compile the dynamic symbol file. + $show "(cd $output_objdir && $LTCC $LTCFLAGS -c$no_builtin_flag$pic_flag_for_symtable \"$dlsyms\")" + $run eval '(cd $output_objdir && $LTCC $LTCFLAGS -c$no_builtin_flag$pic_flag_for_symtable "$dlsyms")' || exit $? + + # Clean up the generated files. + $show "$rm $output_objdir/$dlsyms $nlist ${nlist}S ${nlist}T" + $run $rm "$output_objdir/$dlsyms" "$nlist" "${nlist}S" "${nlist}T" + + # Transform the symbol file into the correct name. + case $host in + *cygwin* | *mingw* ) + if test -f "$output_objdir/${outputname}.def" ; then + compile_command=`$echo "X$compile_command" | $Xsed -e "s%@SYMFILE@%$output_objdir/${outputname}.def $output_objdir/${outputname}S.${objext}%"` + finalize_command=`$echo "X$finalize_command" | $Xsed -e "s%@SYMFILE@%$output_objdir/${outputname}.def $output_objdir/${outputname}S.${objext}%"` + else + compile_command=`$echo "X$compile_command" | $Xsed -e "s%@SYMFILE@%$output_objdir/${outputname}S.${objext}%"` + finalize_command=`$echo "X$finalize_command" | $Xsed -e "s%@SYMFILE@%$output_objdir/${outputname}S.${objext}%"` + fi + ;; + * ) + compile_command=`$echo "X$compile_command" | $Xsed -e "s%@SYMFILE@%$output_objdir/${outputname}S.${objext}%"` + finalize_command=`$echo "X$finalize_command" | $Xsed -e "s%@SYMFILE@%$output_objdir/${outputname}S.${objext}%"` + ;; + esac + ;; + *) + $echo "$modename: unknown suffix for \`$dlsyms'" 1>&2 + exit $EXIT_FAILURE + ;; + esac + else + # We keep going just in case the user didn't refer to + # lt_preloaded_symbols. The linker will fail if global_symbol_pipe + # really was required. + + # Nullify the symbol file. + compile_command=`$echo "X$compile_command" | $Xsed -e "s% @SYMFILE@%%"` + finalize_command=`$echo "X$finalize_command" | $Xsed -e "s% @SYMFILE@%%"` + fi + + if test "$need_relink" = no || test "$build_libtool_libs" != yes; then + # Replace the output file specification. + compile_command=`$echo "X$compile_command" | $Xsed -e 's%@OUTPUT@%'"$output"'%g'` + link_command="$compile_command$compile_rpath" + + # We have no uninstalled library dependencies, so finalize right now. + $show "$link_command" + $run eval "$link_command" + exit_status=$? + + # Delete the generated files. + if test -n "$dlsyms"; then + $show "$rm $output_objdir/${outputname}S.${objext}" + $run $rm "$output_objdir/${outputname}S.${objext}" + fi + + exit $exit_status + fi + + if test -n "$shlibpath_var"; then + # We should set the shlibpath_var + rpath= + for dir in $temp_rpath; do + case $dir in + [\\/]* | [A-Za-z]:[\\/]*) + # Absolute path. + rpath="$rpath$dir:" + ;; + *) + # Relative path: add a thisdir entry. + rpath="$rpath\$thisdir/$dir:" + ;; + esac + done + temp_rpath="$rpath" + fi + + if test -n "$compile_shlibpath$finalize_shlibpath"; then + compile_command="$shlibpath_var=\"$compile_shlibpath$finalize_shlibpath\$$shlibpath_var\" $compile_command" + fi + if test -n "$finalize_shlibpath"; then + finalize_command="$shlibpath_var=\"$finalize_shlibpath\$$shlibpath_var\" $finalize_command" + fi + + compile_var= + finalize_var= + if test -n "$runpath_var"; then + if test -n "$perm_rpath"; then + # We should set the runpath_var. + rpath= + for dir in $perm_rpath; do + rpath="$rpath$dir:" + done + compile_var="$runpath_var=\"$rpath\$$runpath_var\" " + fi + if test -n "$finalize_perm_rpath"; then + # We should set the runpath_var. + rpath= + for dir in $finalize_perm_rpath; do + rpath="$rpath$dir:" + done + finalize_var="$runpath_var=\"$rpath\$$runpath_var\" " + fi + fi + + if test "$no_install" = yes; then + # We don't need to create a wrapper script. + link_command="$compile_var$compile_command$compile_rpath" + # Replace the output file specification. + link_command=`$echo "X$link_command" | $Xsed -e 's%@OUTPUT@%'"$output"'%g'` + # Delete the old output file. + $run $rm $output + # Link the executable and exit + $show "$link_command" + $run eval "$link_command" || exit $? + exit $EXIT_SUCCESS + fi + + if test "$hardcode_action" = relink; then + # Fast installation is not supported + link_command="$compile_var$compile_command$compile_rpath" + relink_command="$finalize_var$finalize_command$finalize_rpath" + + $echo "$modename: warning: this platform does not like uninstalled shared libraries" 1>&2 + $echo "$modename: \`$output' will be relinked during installation" 1>&2 + else + if test "$fast_install" != no; then + link_command="$finalize_var$compile_command$finalize_rpath" + if test "$fast_install" = yes; then + relink_command=`$echo "X$compile_var$compile_command$compile_rpath" | $Xsed -e 's%@OUTPUT@%\$progdir/\$file%g'` + else + # fast_install is set to needless + relink_command= + fi + else + link_command="$compile_var$compile_command$compile_rpath" + relink_command="$finalize_var$finalize_command$finalize_rpath" + fi + fi + + # Replace the output file specification. + link_command=`$echo "X$link_command" | $Xsed -e 's%@OUTPUT@%'"$output_objdir/$outputname"'%g'` + + # Delete the old output files. + $run $rm $output $output_objdir/$outputname $output_objdir/lt-$outputname + + $show "$link_command" + $run eval "$link_command" || exit $? + + # Now create the wrapper script. + $show "creating $output" + + # Quote the relink command for shipping. + if test -n "$relink_command"; then + # Preserve any variables that may affect compiler behavior + for var in $variables_saved_for_relink; do + if eval test -z \"\${$var+set}\"; then + relink_command="{ test -z \"\${$var+set}\" || unset $var || { $var=; export $var; }; }; $relink_command" + elif eval var_value=\$$var; test -z "$var_value"; then + relink_command="$var=; export $var; $relink_command" + else + var_value=`$echo "X$var_value" | $Xsed -e "$sed_quote_subst"` + relink_command="$var=\"$var_value\"; export $var; $relink_command" + fi + done + relink_command="(cd `pwd`; $relink_command)" + relink_command=`$echo "X$relink_command" | $Xsed -e "$sed_quote_subst"` + fi + + # Quote $echo for shipping. + if test "X$echo" = "X$SHELL $progpath --fallback-echo"; then + case $progpath in + [\\/]* | [A-Za-z]:[\\/]*) qecho="$SHELL $progpath --fallback-echo";; + *) qecho="$SHELL `pwd`/$progpath --fallback-echo";; + esac + qecho=`$echo "X$qecho" | $Xsed -e "$sed_quote_subst"` + else + qecho=`$echo "X$echo" | $Xsed -e "$sed_quote_subst"` + fi + + # Only actually do things if our run command is non-null. + if test -z "$run"; then + # win32 will think the script is a binary if it has + # a .exe suffix, so we strip it off here. + case $output in + *.exe) output=`$echo $output|${SED} 's,.exe$,,'` ;; + esac + # test for cygwin because mv fails w/o .exe extensions + case $host in + *cygwin*) + exeext=.exe + outputname=`$echo $outputname|${SED} 's,.exe$,,'` ;; + *) exeext= ;; + esac + case $host in + *cygwin* | *mingw* ) + output_name=`basename $output` + output_path=`dirname $output` + cwrappersource="$output_path/$objdir/lt-$output_name.c" + cwrapper="$output_path/$output_name.exe" + $rm $cwrappersource $cwrapper + trap "$rm $cwrappersource $cwrapper; exit $EXIT_FAILURE" 1 2 15 + + cat > $cwrappersource <> $cwrappersource<<"EOF" +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#if defined(PATH_MAX) +# define LT_PATHMAX PATH_MAX +#elif defined(MAXPATHLEN) +# define LT_PATHMAX MAXPATHLEN +#else +# define LT_PATHMAX 1024 +#endif + +#ifndef DIR_SEPARATOR +# define DIR_SEPARATOR '/' +# define PATH_SEPARATOR ':' +#endif + +#if defined (_WIN32) || defined (__MSDOS__) || defined (__DJGPP__) || \ + defined (__OS2__) +# define HAVE_DOS_BASED_FILE_SYSTEM +# ifndef DIR_SEPARATOR_2 +# define DIR_SEPARATOR_2 '\\' +# endif +# ifndef PATH_SEPARATOR_2 +# define PATH_SEPARATOR_2 ';' +# endif +#endif + +#ifndef DIR_SEPARATOR_2 +# define IS_DIR_SEPARATOR(ch) ((ch) == DIR_SEPARATOR) +#else /* DIR_SEPARATOR_2 */ +# define IS_DIR_SEPARATOR(ch) \ + (((ch) == DIR_SEPARATOR) || ((ch) == DIR_SEPARATOR_2)) +#endif /* DIR_SEPARATOR_2 */ + +#ifndef PATH_SEPARATOR_2 +# define IS_PATH_SEPARATOR(ch) ((ch) == PATH_SEPARATOR) +#else /* PATH_SEPARATOR_2 */ +# define IS_PATH_SEPARATOR(ch) ((ch) == PATH_SEPARATOR_2) +#endif /* PATH_SEPARATOR_2 */ + +#define XMALLOC(type, num) ((type *) xmalloc ((num) * sizeof(type))) +#define XFREE(stale) do { \ + if (stale) { free ((void *) stale); stale = 0; } \ +} while (0) + +/* -DDEBUG is fairly common in CFLAGS. */ +#undef DEBUG +#if defined DEBUGWRAPPER +# define DEBUG(format, ...) fprintf(stderr, format, __VA_ARGS__) +#else +# define DEBUG(format, ...) +#endif + +const char *program_name = NULL; + +void * xmalloc (size_t num); +char * xstrdup (const char *string); +const char * base_name (const char *name); +char * find_executable(const char *wrapper); +int check_executable(const char *path); +char * strendzap(char *str, const char *pat); +void lt_fatal (const char *message, ...); + +int +main (int argc, char *argv[]) +{ + char **newargz; + int i; + + program_name = (char *) xstrdup (base_name (argv[0])); + DEBUG("(main) argv[0] : %s\n",argv[0]); + DEBUG("(main) program_name : %s\n",program_name); + newargz = XMALLOC(char *, argc+2); +EOF + + cat >> $cwrappersource <> $cwrappersource <<"EOF" + newargz[1] = find_executable(argv[0]); + if (newargz[1] == NULL) + lt_fatal("Couldn't find %s", argv[0]); + DEBUG("(main) found exe at : %s\n",newargz[1]); + /* we know the script has the same name, without the .exe */ + /* so make sure newargz[1] doesn't end in .exe */ + strendzap(newargz[1],".exe"); + for (i = 1; i < argc; i++) + newargz[i+1] = xstrdup(argv[i]); + newargz[argc+1] = NULL; + + for (i=0; i> $cwrappersource <> $cwrappersource <> $cwrappersource <<"EOF" + return 127; +} + +void * +xmalloc (size_t num) +{ + void * p = (void *) malloc (num); + if (!p) + lt_fatal ("Memory exhausted"); + + return p; +} + +char * +xstrdup (const char *string) +{ + return string ? strcpy ((char *) xmalloc (strlen (string) + 1), string) : NULL +; +} + +const char * +base_name (const char *name) +{ + const char *base; + +#if defined (HAVE_DOS_BASED_FILE_SYSTEM) + /* Skip over the disk name in MSDOS pathnames. */ + if (isalpha ((unsigned char)name[0]) && name[1] == ':') + name += 2; +#endif + + for (base = name; *name; name++) + if (IS_DIR_SEPARATOR (*name)) + base = name + 1; + return base; +} + +int +check_executable(const char * path) +{ + struct stat st; + + DEBUG("(check_executable) : %s\n", path ? (*path ? path : "EMPTY!") : "NULL!"); + if ((!path) || (!*path)) + return 0; + + if ((stat (path, &st) >= 0) && + ( + /* MinGW & native WIN32 do not support S_IXOTH or S_IXGRP */ +#if defined (S_IXOTH) + ((st.st_mode & S_IXOTH) == S_IXOTH) || +#endif +#if defined (S_IXGRP) + ((st.st_mode & S_IXGRP) == S_IXGRP) || +#endif + ((st.st_mode & S_IXUSR) == S_IXUSR)) + ) + return 1; + else + return 0; +} + +/* Searches for the full path of the wrapper. Returns + newly allocated full path name if found, NULL otherwise */ +char * +find_executable (const char* wrapper) +{ + int has_slash = 0; + const char* p; + const char* p_next; + /* static buffer for getcwd */ + char tmp[LT_PATHMAX + 1]; + int tmp_len; + char* concat_name; + + DEBUG("(find_executable) : %s\n", wrapper ? (*wrapper ? wrapper : "EMPTY!") : "NULL!"); + + if ((wrapper == NULL) || (*wrapper == '\0')) + return NULL; + + /* Absolute path? */ +#if defined (HAVE_DOS_BASED_FILE_SYSTEM) + if (isalpha ((unsigned char)wrapper[0]) && wrapper[1] == ':') + { + concat_name = xstrdup (wrapper); + if (check_executable(concat_name)) + return concat_name; + XFREE(concat_name); + } + else + { +#endif + if (IS_DIR_SEPARATOR (wrapper[0])) + { + concat_name = xstrdup (wrapper); + if (check_executable(concat_name)) + return concat_name; + XFREE(concat_name); + } +#if defined (HAVE_DOS_BASED_FILE_SYSTEM) + } +#endif + + for (p = wrapper; *p; p++) + if (*p == '/') + { + has_slash = 1; + break; + } + if (!has_slash) + { + /* no slashes; search PATH */ + const char* path = getenv ("PATH"); + if (path != NULL) + { + for (p = path; *p; p = p_next) + { + const char* q; + size_t p_len; + for (q = p; *q; q++) + if (IS_PATH_SEPARATOR(*q)) + break; + p_len = q - p; + p_next = (*q == '\0' ? q : q + 1); + if (p_len == 0) + { + /* empty path: current directory */ + if (getcwd (tmp, LT_PATHMAX) == NULL) + lt_fatal ("getcwd failed"); + tmp_len = strlen(tmp); + concat_name = XMALLOC(char, tmp_len + 1 + strlen(wrapper) + 1); + memcpy (concat_name, tmp, tmp_len); + concat_name[tmp_len] = '/'; + strcpy (concat_name + tmp_len + 1, wrapper); + } + else + { + concat_name = XMALLOC(char, p_len + 1 + strlen(wrapper) + 1); + memcpy (concat_name, p, p_len); + concat_name[p_len] = '/'; + strcpy (concat_name + p_len + 1, wrapper); + } + if (check_executable(concat_name)) + return concat_name; + XFREE(concat_name); + } + } + /* not found in PATH; assume curdir */ + } + /* Relative path | not found in path: prepend cwd */ + if (getcwd (tmp, LT_PATHMAX) == NULL) + lt_fatal ("getcwd failed"); + tmp_len = strlen(tmp); + concat_name = XMALLOC(char, tmp_len + 1 + strlen(wrapper) + 1); + memcpy (concat_name, tmp, tmp_len); + concat_name[tmp_len] = '/'; + strcpy (concat_name + tmp_len + 1, wrapper); + + if (check_executable(concat_name)) + return concat_name; + XFREE(concat_name); + return NULL; +} + +char * +strendzap(char *str, const char *pat) +{ + size_t len, patlen; + + assert(str != NULL); + assert(pat != NULL); + + len = strlen(str); + patlen = strlen(pat); + + if (patlen <= len) + { + str += len - patlen; + if (strcmp(str, pat) == 0) + *str = '\0'; + } + return str; +} + +static void +lt_error_core (int exit_status, const char * mode, + const char * message, va_list ap) +{ + fprintf (stderr, "%s: %s: ", program_name, mode); + vfprintf (stderr, message, ap); + fprintf (stderr, ".\n"); + + if (exit_status >= 0) + exit (exit_status); +} + +void +lt_fatal (const char *message, ...) +{ + va_list ap; + va_start (ap, message); + lt_error_core (EXIT_FAILURE, "FATAL", message, ap); + va_end (ap); +} +EOF + # we should really use a build-platform specific compiler + # here, but OTOH, the wrappers (shell script and this C one) + # are only useful if you want to execute the "real" binary. + # Since the "real" binary is built for $host, then this + # wrapper might as well be built for $host, too. + $run $LTCC $LTCFLAGS -s -o $cwrapper $cwrappersource + ;; + esac + $rm $output + trap "$rm $output; exit $EXIT_FAILURE" 1 2 15 + + $echo > $output "\ +#! $SHELL + +# $output - temporary wrapper script for $objdir/$outputname +# Generated by $PROGRAM - GNU $PACKAGE $VERSION$TIMESTAMP +# +# The $output program cannot be directly executed until all the libtool +# libraries that it depends on are installed. +# +# This wrapper script should never be moved out of the build directory. +# If it is, it will not operate correctly. + +# Sed substitution that helps us do robust quoting. It backslashifies +# metacharacters that are still active within double-quoted strings. +Xsed='${SED} -e 1s/^X//' +sed_quote_subst='$sed_quote_subst' + +# The HP-UX ksh and POSIX shell print the target directory to stdout +# if CDPATH is set. +(unset CDPATH) >/dev/null 2>&1 && unset CDPATH + +relink_command=\"$relink_command\" + +# This environment variable determines our operation mode. +if test \"\$libtool_install_magic\" = \"$magic\"; then + # install mode needs the following variable: + notinst_deplibs='$notinst_deplibs' +else + # When we are sourced in execute mode, \$file and \$echo are already set. + if test \"\$libtool_execute_magic\" != \"$magic\"; then + echo=\"$qecho\" + file=\"\$0\" + # Make sure echo works. + if test \"X\$1\" = X--no-reexec; then + # Discard the --no-reexec flag, and continue. + shift + elif test \"X\`(\$echo '\t') 2>/dev/null\`\" = 'X\t'; then + # Yippee, \$echo works! + : + else + # Restart under the correct shell, and then maybe \$echo will work. + exec $SHELL \"\$0\" --no-reexec \${1+\"\$@\"} + fi + fi\ +" + $echo >> $output "\ + + # Find the directory that this script lives in. + thisdir=\`\$echo \"X\$file\" | \$Xsed -e 's%/[^/]*$%%'\` + test \"x\$thisdir\" = \"x\$file\" && thisdir=. + + # Follow symbolic links until we get to the real thisdir. + file=\`ls -ld \"\$file\" | ${SED} -n 's/.*-> //p'\` + while test -n \"\$file\"; do + destdir=\`\$echo \"X\$file\" | \$Xsed -e 's%/[^/]*\$%%'\` + + # If there was a directory component, then change thisdir. + if test \"x\$destdir\" != \"x\$file\"; then + case \"\$destdir\" in + [\\\\/]* | [A-Za-z]:[\\\\/]*) thisdir=\"\$destdir\" ;; + *) thisdir=\"\$thisdir/\$destdir\" ;; + esac + fi + + file=\`\$echo \"X\$file\" | \$Xsed -e 's%^.*/%%'\` + file=\`ls -ld \"\$thisdir/\$file\" | ${SED} -n 's/.*-> //p'\` + done + + # Try to get the absolute directory name. + absdir=\`cd \"\$thisdir\" && pwd\` + test -n \"\$absdir\" && thisdir=\"\$absdir\" +" + + if test "$fast_install" = yes; then + $echo >> $output "\ + program=lt-'$outputname'$exeext + progdir=\"\$thisdir/$objdir\" + + if test ! -f \"\$progdir/\$program\" || \\ + { file=\`ls -1dt \"\$progdir/\$program\" \"\$progdir/../\$program\" 2>/dev/null | ${SED} 1q\`; \\ + test \"X\$file\" != \"X\$progdir/\$program\"; }; then + + file=\"\$\$-\$program\" + + if test ! -d \"\$progdir\"; then + $mkdir \"\$progdir\" + else + $rm \"\$progdir/\$file\" + fi" + + $echo >> $output "\ + + # relink executable if necessary + if test -n \"\$relink_command\"; then + if relink_command_output=\`eval \$relink_command 2>&1\`; then : + else + $echo \"\$relink_command_output\" >&2 + $rm \"\$progdir/\$file\" + exit $EXIT_FAILURE + fi + fi + + $mv \"\$progdir/\$file\" \"\$progdir/\$program\" 2>/dev/null || + { $rm \"\$progdir/\$program\"; + $mv \"\$progdir/\$file\" \"\$progdir/\$program\"; } + $rm \"\$progdir/\$file\" + fi" + else + $echo >> $output "\ + program='$outputname' + progdir=\"\$thisdir/$objdir\" +" + fi + + $echo >> $output "\ + + if test -f \"\$progdir/\$program\"; then" + + # Export our shlibpath_var if we have one. + if test "$shlibpath_overrides_runpath" = yes && test -n "$shlibpath_var" && test -n "$temp_rpath"; then + $echo >> $output "\ + # Add our own library path to $shlibpath_var + $shlibpath_var=\"$temp_rpath\$$shlibpath_var\" + + # Some systems cannot cope with colon-terminated $shlibpath_var + # The second colon is a workaround for a bug in BeOS R4 sed + $shlibpath_var=\`\$echo \"X\$$shlibpath_var\" | \$Xsed -e 's/::*\$//'\` + + export $shlibpath_var +" + fi + + # fixup the dll searchpath if we need to. + if test -n "$dllsearchpath"; then + $echo >> $output "\ + # Add the dll search path components to the executable PATH + PATH=$dllsearchpath:\$PATH +" + fi + + $echo >> $output "\ + if test \"\$libtool_execute_magic\" != \"$magic\"; then + # Run the actual program with our arguments. + + # Make sure env LD_LIBRARY_PATH does not mess us up + if test -n \"\${LD_LIBRARY_PATH+set}\"; then + export LD_LIBRARY_PATH=\$progdir:\$LD_LIBRARY_PATH + fi +" + case $host in + # Backslashes separate directories on plain windows + *-*-mingw | *-*-os2*) + $echo >> $output "\ + exec \"\$progdir\\\\\$program\" \${1+\"\$@\"} +" + ;; + + *) + $echo >> $output "\ + exec \"\$progdir/\$program\" \${1+\"\$@\"} +" + ;; + esac + $echo >> $output "\ + \$echo \"\$0: cannot exec \$program \${1+\"\$@\"}\" + exit $EXIT_FAILURE + fi + else + # The program doesn't exist. + \$echo \"\$0: error: \\\`\$progdir/\$program' does not exist\" 1>&2 + \$echo \"This script is just a wrapper for \$program.\" 1>&2 + $echo \"See the $PACKAGE documentation for more information.\" 1>&2 + exit $EXIT_FAILURE + fi +fi\ +" + chmod +x $output + fi + exit $EXIT_SUCCESS + ;; + esac + + # See if we need to build an old-fashioned archive. + for oldlib in $oldlibs; do + + if test "$build_libtool_libs" = convenience; then + oldobjs="$libobjs_save" + addlibs="$convenience" + build_libtool_libs=no + else + if test "$build_libtool_libs" = module; then + oldobjs="$libobjs_save" + build_libtool_libs=no + else + oldobjs="$old_deplibs $non_pic_objects" + fi + addlibs="$old_convenience" + fi + + if test -n "$addlibs"; then + gentop="$output_objdir/${outputname}x" + generated="$generated $gentop" + + func_extract_archives $gentop $addlibs + oldobjs="$oldobjs $func_extract_archives_result" + fi + + # Do each command in the archive commands. + if test -n "$old_archive_from_new_cmds" && test "$build_libtool_libs" = yes; then + cmds=$old_archive_from_new_cmds + else + # POSIX demands no paths to be encoded in archives. We have + # to avoid creating archives with duplicate basenames if we + # might have to extract them afterwards, e.g., when creating a + # static archive out of a convenience library, or when linking + # the entirety of a libtool archive into another (currently + # not supported by libtool). + if (for obj in $oldobjs + do + $echo "X$obj" | $Xsed -e 's%^.*/%%' + done | sort | sort -uc >/dev/null 2>&1); then + : + else + $echo "copying selected object files to avoid basename conflicts..." + + if test -z "$gentop"; then + gentop="$output_objdir/${outputname}x" + generated="$generated $gentop" + + $show "${rm}r $gentop" + $run ${rm}r "$gentop" + $show "$mkdir $gentop" + $run $mkdir "$gentop" + exit_status=$? + if test "$exit_status" -ne 0 && test ! -d "$gentop"; then + exit $exit_status + fi + fi + + save_oldobjs=$oldobjs + oldobjs= + counter=1 + for obj in $save_oldobjs + do + objbase=`$echo "X$obj" | $Xsed -e 's%^.*/%%'` + case " $oldobjs " in + " ") oldobjs=$obj ;; + *[\ /]"$objbase "*) + while :; do + # Make sure we don't pick an alternate name that also + # overlaps. + newobj=lt$counter-$objbase + counter=`expr $counter + 1` + case " $oldobjs " in + *[\ /]"$newobj "*) ;; + *) if test ! -f "$gentop/$newobj"; then break; fi ;; + esac + done + $show "ln $obj $gentop/$newobj || cp $obj $gentop/$newobj" + $run ln "$obj" "$gentop/$newobj" || + $run cp "$obj" "$gentop/$newobj" + oldobjs="$oldobjs $gentop/$newobj" + ;; + *) oldobjs="$oldobjs $obj" ;; + esac + done + fi + + eval cmds=\"$old_archive_cmds\" + + if len=`expr "X$cmds" : ".*"` && + test "$len" -le "$max_cmd_len" || test "$max_cmd_len" -le -1; then + cmds=$old_archive_cmds + else + # the command line is too long to link in one step, link in parts + $echo "using piecewise archive linking..." + save_RANLIB=$RANLIB + RANLIB=: + objlist= + concat_cmds= + save_oldobjs=$oldobjs + + # Is there a better way of finding the last object in the list? + for obj in $save_oldobjs + do + last_oldobj=$obj + done + for obj in $save_oldobjs + do + oldobjs="$objlist $obj" + objlist="$objlist $obj" + eval test_cmds=\"$old_archive_cmds\" + if len=`expr "X$test_cmds" : ".*" 2>/dev/null` && + test "$len" -le "$max_cmd_len"; then + : + else + # the above command should be used before it gets too long + oldobjs=$objlist + if test "$obj" = "$last_oldobj" ; then + RANLIB=$save_RANLIB + fi + test -z "$concat_cmds" || concat_cmds=$concat_cmds~ + eval concat_cmds=\"\${concat_cmds}$old_archive_cmds\" + objlist= + fi + done + RANLIB=$save_RANLIB + oldobjs=$objlist + if test "X$oldobjs" = "X" ; then + eval cmds=\"\$concat_cmds\" + else + eval cmds=\"\$concat_cmds~\$old_archive_cmds\" + fi + fi + fi + save_ifs="$IFS"; IFS='~' + for cmd in $cmds; do + eval cmd=\"$cmd\" + IFS="$save_ifs" + $show "$cmd" + $run eval "$cmd" || exit $? + done + IFS="$save_ifs" + done + + if test -n "$generated"; then + $show "${rm}r$generated" + $run ${rm}r$generated + fi + + # Now create the libtool archive. + case $output in + *.la) + old_library= + test "$build_old_libs" = yes && old_library="$libname.$libext" + $show "creating $output" + + # Preserve any variables that may affect compiler behavior + for var in $variables_saved_for_relink; do + if eval test -z \"\${$var+set}\"; then + relink_command="{ test -z \"\${$var+set}\" || unset $var || { $var=; export $var; }; }; $relink_command" + elif eval var_value=\$$var; test -z "$var_value"; then + relink_command="$var=; export $var; $relink_command" + else + var_value=`$echo "X$var_value" | $Xsed -e "$sed_quote_subst"` + relink_command="$var=\"$var_value\"; export $var; $relink_command" + fi + done + # Quote the link command for shipping. + relink_command="(cd `pwd`; $SHELL $progpath $preserve_args --mode=relink $libtool_args @inst_prefix_dir@)" + relink_command=`$echo "X$relink_command" | $Xsed -e "$sed_quote_subst"` + if test "$hardcode_automatic" = yes ; then + relink_command= + fi + + + # Only create the output if not a dry run. + if test -z "$run"; then + for installed in no yes; do + if test "$installed" = yes; then + if test -z "$install_libdir"; then + break + fi + output="$output_objdir/$outputname"i + # Replace all uninstalled libtool libraries with the installed ones + newdependency_libs= + for deplib in $dependency_libs; do + case $deplib in + *.la) + name=`$echo "X$deplib" | $Xsed -e 's%^.*/%%'` + eval libdir=`${SED} -n -e 's/^libdir=\(.*\)$/\1/p' $deplib` + if test -z "$libdir"; then + $echo "$modename: \`$deplib' is not a valid libtool archive" 1>&2 + exit $EXIT_FAILURE + fi + if test "X$EGREP" = X ; then + EGREP=egrep + fi + # We do not want portage's install root ($D) present. Check only for + # this if the .la is being installed. + if test "$installed" = yes && test "$D"; then + eval mynewdependency_lib=`echo "$libdir/$name" |sed -e "s:$D:/:g" -e 's:/\+:/:g'` + else + mynewdependency_lib="$libdir/$name" + fi + # Do not add duplicates + if test "$mynewdependency_lib"; then + my_little_ninja_foo_1=`echo $newdependency_libs |$EGREP -e "$mynewdependency_lib"` + if test -z "$my_little_ninja_foo_1"; then + newdependency_libs="$newdependency_libs $mynewdependency_lib" + fi + fi + ;; + *) + if test "$installed" = yes; then + # Rather use S=WORKDIR if our version of portage supports it. + # This is because some ebuild (gcc) do not use $S as buildroot. + if test "$PWORKDIR"; then + S="$PWORKDIR" + fi + # We do not want portage's build root ($S) present. + my_little_ninja_foo_2=`echo $deplib |$EGREP -e "$S"` + # We do not want portage's install root ($D) present. + my_little_ninja_foo_3=`echo $deplib |$EGREP -e "$D"` + if test -n "$my_little_ninja_foo_2" && test "$S"; then + mynewdependency_lib="" + elif test -n "$my_little_ninja_foo_3" && test "$D"; then + eval mynewdependency_lib=`echo "$deplib" |sed -e "s:$D:/:g" -e 's:/\+:/:g'` + else + mynewdependency_lib="$deplib" + fi + else + mynewdependency_lib="$deplib" + fi + # Do not add duplicates + if test "$mynewdependency_lib"; then + my_little_ninja_foo_4=`echo $newdependency_libs |$EGREP -e "$mynewdependency_lib"` + if test -z "$my_little_ninja_foo_4"; then + newdependency_libs="$newdependency_libs $mynewdependency_lib" + fi + fi + ;; + esac + done + dependency_libs="$newdependency_libs" + newdlfiles= + for lib in $dlfiles; do + name=`$echo "X$lib" | $Xsed -e 's%^.*/%%'` + eval libdir=`${SED} -n -e 's/^libdir=\(.*\)$/\1/p' $lib` + if test -z "$libdir"; then + $echo "$modename: \`$lib' is not a valid libtool archive" 1>&2 + exit $EXIT_FAILURE + fi + newdlfiles="$newdlfiles $libdir/$name" + done + dlfiles="$newdlfiles" + newdlprefiles= + for lib in $dlprefiles; do + name=`$echo "X$lib" | $Xsed -e 's%^.*/%%'` + eval libdir=`${SED} -n -e 's/^libdir=\(.*\)$/\1/p' $lib` + if test -z "$libdir"; then + $echo "$modename: \`$lib' is not a valid libtool archive" 1>&2 + exit $EXIT_FAILURE + fi + newdlprefiles="$newdlprefiles $libdir/$name" + done + dlprefiles="$newdlprefiles" + else + newdlfiles= + for lib in $dlfiles; do + case $lib in + [\\/]* | [A-Za-z]:[\\/]*) abs="$lib" ;; + *) abs=`pwd`"/$lib" ;; + esac + newdlfiles="$newdlfiles $abs" + done + dlfiles="$newdlfiles" + newdlprefiles= + for lib in $dlprefiles; do + case $lib in + [\\/]* | [A-Za-z]:[\\/]*) abs="$lib" ;; + *) abs=`pwd`"/$lib" ;; + esac + newdlprefiles="$newdlprefiles $abs" + done + dlprefiles="$newdlprefiles" + fi + $rm $output + # place dlname in correct position for cygwin + tdlname=$dlname + case $host,$output,$installed,$module,$dlname in + *cygwin*,*lai,yes,no,*.dll | *mingw*,*lai,yes,no,*.dll) tdlname=../bin/$dlname ;; + esac + # Do not add duplicates + if test "$installed" = yes && test "$D"; then + install_libdir=`echo "$install_libdir" |sed -e "s:$D:/:g" -e 's:/\+:/:g'` + fi + $echo > $output "\ +# $outputname - a libtool library file +# Generated by $PROGRAM - GNU $PACKAGE $VERSION$TIMESTAMP +# +# Please DO NOT delete this file! +# It is necessary for linking the library. + +# The name that we can dlopen(3). +dlname='$tdlname' + +# Names of this library. +library_names='$library_names' + +# The name of the static archive. +old_library='$old_library' + +# Libraries that this one depends upon. +dependency_libs='$dependency_libs' + +# Version information for $libname. +current=$current +age=$age +revision=$revision + +# Is this an already installed library? +installed=$installed + +# Should we warn about portability when linking against -modules? +shouldnotlink=$module + +# Files to dlopen/dlpreopen +dlopen='$dlfiles' +dlpreopen='$dlprefiles' + +# Directory that this library needs to be installed in: +libdir='$install_libdir'" + if test "$installed" = no && test "$need_relink" = yes; then + $echo >> $output "\ +relink_command=\"$relink_command\"" + fi + done + fi + + # Do a symbolic link so that the libtool archive can be found in + # LD_LIBRARY_PATH before the program is installed. + $show "(cd $output_objdir && $rm $outputname && $LN_S ../$outputname $outputname)" + $run eval '(cd $output_objdir && $rm $outputname && $LN_S ../$outputname $outputname)' || exit $? + ;; + esac + exit $EXIT_SUCCESS + ;; + + # libtool install mode + install) + modename="$modename: install" + + # There may be an optional sh(1) argument at the beginning of + # install_prog (especially on Windows NT). + if test "$nonopt" = "$SHELL" || test "$nonopt" = /bin/sh || + # Allow the use of GNU shtool's install command. + $echo "X$nonopt" | grep shtool > /dev/null; then + # Aesthetically quote it. + arg=`$echo "X$nonopt" | $Xsed -e "$sed_quote_subst"` + case $arg in + *[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*|"") + arg="\"$arg\"" + ;; + esac + install_prog="$arg " + arg="$1" + shift + else + install_prog= + arg=$nonopt + fi + + # The real first argument should be the name of the installation program. + # Aesthetically quote it. + arg=`$echo "X$arg" | $Xsed -e "$sed_quote_subst"` + case $arg in + *[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*|"") + arg="\"$arg\"" + ;; + esac + install_prog="$install_prog$arg" + + # We need to accept at least all the BSD install flags. + dest= + files= + opts= + prev= + install_type= + isdir=no + stripme= + for arg + do + if test -n "$dest"; then + files="$files $dest" + dest=$arg + continue + fi + + case $arg in + -d) isdir=yes ;; + -f) + case " $install_prog " in + *[\\\ /]cp\ *) ;; + *) prev=$arg ;; + esac + ;; + -g | -m | -o) prev=$arg ;; + -s) + stripme=" -s" + continue + ;; + -*) + ;; + *) + # If the previous option needed an argument, then skip it. + if test -n "$prev"; then + prev= + else + dest=$arg + continue + fi + ;; + esac + + # Aesthetically quote the argument. + arg=`$echo "X$arg" | $Xsed -e "$sed_quote_subst"` + case $arg in + *[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*|"") + arg="\"$arg\"" + ;; + esac + install_prog="$install_prog $arg" + done + + if test -z "$install_prog"; then + $echo "$modename: you must specify an install program" 1>&2 + $echo "$help" 1>&2 + exit $EXIT_FAILURE + fi + + if test -n "$prev"; then + $echo "$modename: the \`$prev' option requires an argument" 1>&2 + $echo "$help" 1>&2 + exit $EXIT_FAILURE + fi + + if test -z "$files"; then + if test -z "$dest"; then + $echo "$modename: no file or destination specified" 1>&2 + else + $echo "$modename: you must specify a destination" 1>&2 + fi + $echo "$help" 1>&2 + exit $EXIT_FAILURE + fi + + # Strip any trailing slash from the destination. + dest=`$echo "X$dest" | $Xsed -e 's%/$%%'` + + # Check to see that the destination is a directory. + test -d "$dest" && isdir=yes + if test "$isdir" = yes; then + destdir="$dest" + destname= + else + destdir=`$echo "X$dest" | $Xsed -e 's%/[^/]*$%%'` + test "X$destdir" = "X$dest" && destdir=. + destname=`$echo "X$dest" | $Xsed -e 's%^.*/%%'` + + # Not a directory, so check to see that there is only one file specified. + set dummy $files + if test "$#" -gt 2; then + $echo "$modename: \`$dest' is not a directory" 1>&2 + $echo "$help" 1>&2 + exit $EXIT_FAILURE + fi + fi + case $destdir in + [\\/]* | [A-Za-z]:[\\/]*) ;; + *) + for file in $files; do + case $file in + *.lo) ;; + *) + $echo "$modename: \`$destdir' must be an absolute directory name" 1>&2 + $echo "$help" 1>&2 + exit $EXIT_FAILURE + ;; + esac + done + ;; + esac + + # This variable tells wrapper scripts just to set variables rather + # than running their programs. + libtool_install_magic="$magic" + + staticlibs= + future_libdirs= + current_libdirs= + for file in $files; do + + # Do each installation. + case $file in + *.$libext) + # Do the static libraries later. + staticlibs="$staticlibs $file" + ;; + + *.la) + # Check to see that this really is a libtool archive. + if (${SED} -e '2q' $file | grep "^# Generated by .*$PACKAGE") >/dev/null 2>&1; then : + else + $echo "$modename: \`$file' is not a valid libtool archive" 1>&2 + $echo "$help" 1>&2 + exit $EXIT_FAILURE + fi + + library_names= + old_library= + relink_command= + # If there is no directory component, then add one. + case $file in + */* | *\\*) . $file ;; + *) . ./$file ;; + esac + + # Add the libdir to current_libdirs if it is the destination. + if test "X$destdir" = "X$libdir"; then + case "$current_libdirs " in + *" $libdir "*) ;; + *) current_libdirs="$current_libdirs $libdir" ;; + esac + else + # Note the libdir as a future libdir. + case "$future_libdirs " in + *" $libdir "*) ;; + *) future_libdirs="$future_libdirs $libdir" ;; + esac + fi + + dir=`$echo "X$file" | $Xsed -e 's%/[^/]*$%%'`/ + test "X$dir" = "X$file/" && dir= + dir="$dir$objdir" + + if test -n "$relink_command"; then + # Determine the prefix the user has applied to our future dir. + inst_prefix_dir=`$echo "$destdir" | $SED "s%$libdir\$%%"` + + # Don't allow the user to place us outside of our expected + # location b/c this prevents finding dependent libraries that + # are installed to the same prefix. + # At present, this check doesn't affect windows .dll's that + # are installed into $libdir/../bin (currently, that works fine) + # but it's something to keep an eye on. + if test "$inst_prefix_dir" = "$destdir"; then + $echo "$modename: error: cannot install \`$file' to a directory not ending in $libdir" 1>&2 + exit $EXIT_FAILURE + fi + + if test -n "$inst_prefix_dir"; then + # Stick the inst_prefix_dir data into the link command. + relink_command=`$echo "$relink_command" | $SED "s%@inst_prefix_dir@%-inst-prefix-dir $inst_prefix_dir%"` + else + relink_command=`$echo "$relink_command" | $SED "s%@inst_prefix_dir@%%"` + fi + + $echo "$modename: warning: relinking \`$file'" 1>&2 + $show "$relink_command" + if $run eval "$relink_command"; then : + else + $echo "$modename: error: relink \`$file' with the above command before installing it" 1>&2 + exit $EXIT_FAILURE + fi + fi + + # See the names of the shared library. + set dummy $library_names + if test -n "$2"; then + realname="$2" + shift + shift + + srcname="$realname" + test -n "$relink_command" && srcname="$realname"T + + # Install the shared library and build the symlinks. + $show "$install_prog $dir/$srcname $destdir/$realname" + $run eval "$install_prog $dir/$srcname $destdir/$realname" || exit $? + if test -n "$stripme" && test -n "$striplib"; then + $show "$striplib $destdir/$realname" + $run eval "$striplib $destdir/$realname" || exit $? + fi + + if test "$#" -gt 0; then + # Delete the old symlinks, and create new ones. + # Try `ln -sf' first, because the `ln' binary might depend on + # the symlink we replace! Solaris /bin/ln does not understand -f, + # so we also need to try rm && ln -s. + for linkname + do + if test "$linkname" != "$realname"; then + $show "(cd $destdir && { $LN_S -f $realname $linkname || { $rm $linkname && $LN_S $realname $linkname; }; })" + $run eval "(cd $destdir && { $LN_S -f $realname $linkname || { $rm $linkname && $LN_S $realname $linkname; }; })" + fi + done + fi + + # Do each command in the postinstall commands. + lib="$destdir/$realname" + cmds=$postinstall_cmds + save_ifs="$IFS"; IFS='~' + for cmd in $cmds; do + IFS="$save_ifs" + eval cmd=\"$cmd\" + $show "$cmd" + $run eval "$cmd" || { + lt_exit=$? + + # Restore the uninstalled library and exit + if test "$mode" = relink; then + $run eval '(cd $output_objdir && $rm ${realname}T && $mv ${realname}U $realname)' + fi + + exit $lt_exit + } + done + IFS="$save_ifs" + fi + + # Install the pseudo-library for information purposes. + name=`$echo "X$file" | $Xsed -e 's%^.*/%%'` + instname="$dir/$name"i + $show "$install_prog $instname $destdir/$name" + $run eval "$install_prog $instname $destdir/$name" || exit $? + + # Maybe install the static library, too. + test -n "$old_library" && staticlibs="$staticlibs $dir/$old_library" + ;; + + *.lo) + # Install (i.e. copy) a libtool object. + + # Figure out destination file name, if it wasn't already specified. + if test -n "$destname"; then + destfile="$destdir/$destname" + else + destfile=`$echo "X$file" | $Xsed -e 's%^.*/%%'` + destfile="$destdir/$destfile" + fi + + # Deduce the name of the destination old-style object file. + case $destfile in + *.lo) + staticdest=`$echo "X$destfile" | $Xsed -e "$lo2o"` + ;; + *.$objext) + staticdest="$destfile" + destfile= + ;; + *) + $echo "$modename: cannot copy a libtool object to \`$destfile'" 1>&2 + $echo "$help" 1>&2 + exit $EXIT_FAILURE + ;; + esac + + # Install the libtool object if requested. + if test -n "$destfile"; then + $show "$install_prog $file $destfile" + $run eval "$install_prog $file $destfile" || exit $? + fi + + # Install the old object if enabled. + if test "$build_old_libs" = yes; then + # Deduce the name of the old-style object file. + staticobj=`$echo "X$file" | $Xsed -e "$lo2o"` + + $show "$install_prog $staticobj $staticdest" + $run eval "$install_prog \$staticobj \$staticdest" || exit $? + fi + exit $EXIT_SUCCESS + ;; + + *) + # Figure out destination file name, if it wasn't already specified. + if test -n "$destname"; then + destfile="$destdir/$destname" + else + destfile=`$echo "X$file" | $Xsed -e 's%^.*/%%'` + destfile="$destdir/$destfile" + fi + + # If the file is missing, and there is a .exe on the end, strip it + # because it is most likely a libtool script we actually want to + # install + stripped_ext="" + case $file in + *.exe) + if test ! -f "$file"; then + file=`$echo $file|${SED} 's,.exe$,,'` + stripped_ext=".exe" + fi + ;; + esac + + # Do a test to see if this is really a libtool program. + case $host in + *cygwin*|*mingw*) + wrapper=`$echo $file | ${SED} -e 's,.exe$,,'` + ;; + *) + wrapper=$file + ;; + esac + if (${SED} -e '4q' $wrapper | grep "^# Generated by .*$PACKAGE")>/dev/null 2>&1; then + notinst_deplibs= + relink_command= + + # Note that it is not necessary on cygwin/mingw to append a dot to + # foo even if both foo and FILE.exe exist: automatic-append-.exe + # behavior happens only for exec(3), not for open(2)! Also, sourcing + # `FILE.' does not work on cygwin managed mounts. + # + # If there is no directory component, then add one. + case $wrapper in + */* | *\\*) . ${wrapper} ;; + *) . ./${wrapper} ;; + esac + + # Check the variables that should have been set. + if test -z "$notinst_deplibs"; then + $echo "$modename: invalid libtool wrapper script \`$wrapper'" 1>&2 + exit $EXIT_FAILURE + fi + + finalize=yes + for lib in $notinst_deplibs; do + # Check to see that each library is installed. + libdir= + if test -f "$lib"; then + # If there is no directory component, then add one. + case $lib in + */* | *\\*) . $lib ;; + *) . ./$lib ;; + esac + fi + libfile="$libdir/"`$echo "X$lib" | $Xsed -e 's%^.*/%%g'` ### testsuite: skip nested quoting test + if test -n "$libdir" && test ! -f "$libfile"; then + $echo "$modename: warning: \`$lib' has not been installed in \`$libdir'" 1>&2 + finalize=no + fi + done + + relink_command= + # Note that it is not necessary on cygwin/mingw to append a dot to + # foo even if both foo and FILE.exe exist: automatic-append-.exe + # behavior happens only for exec(3), not for open(2)! Also, sourcing + # `FILE.' does not work on cygwin managed mounts. + # + # If there is no directory component, then add one. + case $wrapper in + */* | *\\*) . ${wrapper} ;; + *) . ./${wrapper} ;; + esac + + outputname= + if test "$fast_install" = no && test -n "$relink_command"; then + if test "$finalize" = yes && test -z "$run"; then + tmpdir=`func_mktempdir` + file=`$echo "X$file$stripped_ext" | $Xsed -e 's%^.*/%%'` + outputname="$tmpdir/$file" + # Replace the output file specification. + relink_command=`$echo "X$relink_command" | $Xsed -e 's%@OUTPUT@%'"$outputname"'%g'` + + $show "$relink_command" + if $run eval "$relink_command"; then : + else + $echo "$modename: error: relink \`$file' with the above command before installing it" 1>&2 + ${rm}r "$tmpdir" + continue + fi + file="$outputname" + else + $echo "$modename: warning: cannot relink \`$file'" 1>&2 + fi + else + # Install the binary that we compiled earlier. + file=`$echo "X$file$stripped_ext" | $Xsed -e "s%\([^/]*\)$%$objdir/\1%"` + fi + fi + + # remove .exe since cygwin /usr/bin/install will append another + # one anyway + case $install_prog,$host in + */usr/bin/install*,*cygwin*) + case $file:$destfile in + *.exe:*.exe) + # this is ok + ;; + *.exe:*) + destfile=$destfile.exe + ;; + *:*.exe) + destfile=`$echo $destfile | ${SED} -e 's,.exe$,,'` + ;; + esac + ;; + esac + $show "$install_prog$stripme $file $destfile" + $run eval "$install_prog\$stripme \$file \$destfile" || exit $? + test -n "$outputname" && ${rm}r "$tmpdir" + ;; + esac + done + + for file in $staticlibs; do + name=`$echo "X$file" | $Xsed -e 's%^.*/%%'` + + # Set up the ranlib parameters. + oldlib="$destdir/$name" + + $show "$install_prog $file $oldlib" + $run eval "$install_prog \$file \$oldlib" || exit $? + + if test -n "$stripme" && test -n "$old_striplib"; then + $show "$old_striplib $oldlib" + $run eval "$old_striplib $oldlib" || exit $? + fi + + # Do each command in the postinstall commands. + cmds=$old_postinstall_cmds + save_ifs="$IFS"; IFS='~' + for cmd in $cmds; do + IFS="$save_ifs" + eval cmd=\"$cmd\" + $show "$cmd" + $run eval "$cmd" || exit $? + done + IFS="$save_ifs" + done + + if test -n "$future_libdirs"; then + $echo "$modename: warning: remember to run \`$progname --finish$future_libdirs'" 1>&2 + fi + + if test -n "$current_libdirs"; then + # Maybe just do a dry run. + test -n "$run" && current_libdirs=" -n$current_libdirs" + exec_cmd='$SHELL $progpath $preserve_args --finish$current_libdirs' + else + exit $EXIT_SUCCESS + fi + ;; + + # libtool finish mode + finish) + modename="$modename: finish" + libdirs="$nonopt" + admincmds= + + if test -n "$finish_cmds$finish_eval" && test -n "$libdirs"; then + for dir + do + libdirs="$libdirs $dir" + done + + for libdir in $libdirs; do + if test -n "$finish_cmds"; then + # Do each command in the finish commands. + cmds=$finish_cmds + save_ifs="$IFS"; IFS='~' + for cmd in $cmds; do + IFS="$save_ifs" + eval cmd=\"$cmd\" + $show "$cmd" + $run eval "$cmd" || admincmds="$admincmds + $cmd" + done + IFS="$save_ifs" + fi + if test -n "$finish_eval"; then + # Do the single finish_eval. + eval cmds=\"$finish_eval\" + $run eval "$cmds" || admincmds="$admincmds + $cmds" + fi + done + fi + + # Exit here if they wanted silent mode. + test "$show" = : && exit $EXIT_SUCCESS + + $echo "X----------------------------------------------------------------------" | $Xsed + $echo "Libraries have been installed in:" + for libdir in $libdirs; do + $echo " $libdir" + done + $echo + $echo "If you ever happen to want to link against installed libraries" + $echo "in a given directory, LIBDIR, you must either use libtool, and" + $echo "specify the full pathname of the library, or use the \`-LLIBDIR'" + $echo "flag during linking and do at least one of the following:" + if test -n "$shlibpath_var"; then + $echo " - add LIBDIR to the \`$shlibpath_var' environment variable" + $echo " during execution" + fi + if test -n "$runpath_var"; then + $echo " - add LIBDIR to the \`$runpath_var' environment variable" + $echo " during linking" + fi + if test -n "$hardcode_libdir_flag_spec"; then + libdir=LIBDIR + eval flag=\"$hardcode_libdir_flag_spec\" + + $echo " - use the \`$flag' linker flag" + fi + if test -n "$admincmds"; then + $echo " - have your system administrator run these commands:$admincmds" + fi + if test -f /etc/ld.so.conf; then + $echo " - have your system administrator add LIBDIR to \`/etc/ld.so.conf'" + fi + $echo + $echo "See any operating system documentation about shared libraries for" + $echo "more information, such as the ld(1) and ld.so(8) manual pages." + $echo "X----------------------------------------------------------------------" | $Xsed + exit $EXIT_SUCCESS + ;; + + # libtool execute mode + execute) + modename="$modename: execute" + + # The first argument is the command name. + cmd="$nonopt" + if test -z "$cmd"; then + $echo "$modename: you must specify a COMMAND" 1>&2 + $echo "$help" + exit $EXIT_FAILURE + fi + + # Handle -dlopen flags immediately. + for file in $execute_dlfiles; do + if test ! -f "$file"; then + $echo "$modename: \`$file' is not a file" 1>&2 + $echo "$help" 1>&2 + exit $EXIT_FAILURE + fi + + dir= + case $file in + *.la) + # Check to see that this really is a libtool archive. + if (${SED} -e '2q' $file | grep "^# Generated by .*$PACKAGE") >/dev/null 2>&1; then : + else + $echo "$modename: \`$lib' is not a valid libtool archive" 1>&2 + $echo "$help" 1>&2 + exit $EXIT_FAILURE + fi + + # Read the libtool library. + dlname= + library_names= + + # If there is no directory component, then add one. + case $file in + */* | *\\*) . $file ;; + *) . ./$file ;; + esac + + # Skip this library if it cannot be dlopened. + if test -z "$dlname"; then + # Warn if it was a shared library. + test -n "$library_names" && $echo "$modename: warning: \`$file' was not linked with \`-export-dynamic'" + continue + fi + + dir=`$echo "X$file" | $Xsed -e 's%/[^/]*$%%'` + test "X$dir" = "X$file" && dir=. + + if test -f "$dir/$objdir/$dlname"; then + dir="$dir/$objdir" + else + $echo "$modename: cannot find \`$dlname' in \`$dir' or \`$dir/$objdir'" 1>&2 + exit $EXIT_FAILURE + fi + ;; + + *.lo) + # Just add the directory containing the .lo file. + dir=`$echo "X$file" | $Xsed -e 's%/[^/]*$%%'` + test "X$dir" = "X$file" && dir=. + ;; + + *) + $echo "$modename: warning \`-dlopen' is ignored for non-libtool libraries and objects" 1>&2 + continue + ;; + esac + + # Get the absolute pathname. + absdir=`cd "$dir" && pwd` + test -n "$absdir" && dir="$absdir" + + # Now add the directory to shlibpath_var. + if eval "test -z \"\$$shlibpath_var\""; then + eval "$shlibpath_var=\"\$dir\"" + else + eval "$shlibpath_var=\"\$dir:\$$shlibpath_var\"" + fi + done + + # This variable tells wrapper scripts just to set shlibpath_var + # rather than running their programs. + libtool_execute_magic="$magic" + + # Check if any of the arguments is a wrapper script. + args= + for file + do + case $file in + -*) ;; + *) + # Do a test to see if this is really a libtool program. + if (${SED} -e '4q' $file | grep "^# Generated by .*$PACKAGE") >/dev/null 2>&1; then + # If there is no directory component, then add one. + case $file in + */* | *\\*) . $file ;; + *) . ./$file ;; + esac + + # Transform arg to wrapped name. + file="$progdir/$program" + fi + ;; + esac + # Quote arguments (to preserve shell metacharacters). + file=`$echo "X$file" | $Xsed -e "$sed_quote_subst"` + args="$args \"$file\"" + done + + if test -z "$run"; then + if test -n "$shlibpath_var"; then + # Export the shlibpath_var. + eval "export $shlibpath_var" + fi + + # Restore saved environment variables + if test "${save_LC_ALL+set}" = set; then + LC_ALL="$save_LC_ALL"; export LC_ALL + fi + if test "${save_LANG+set}" = set; then + LANG="$save_LANG"; export LANG + fi + + # Now prepare to actually exec the command. + exec_cmd="\$cmd$args" + else + # Display what would be done. + if test -n "$shlibpath_var"; then + eval "\$echo \"\$shlibpath_var=\$$shlibpath_var\"" + $echo "export $shlibpath_var" + fi + $echo "$cmd$args" + exit $EXIT_SUCCESS + fi + ;; + + # libtool clean and uninstall mode + clean | uninstall) + modename="$modename: $mode" + rm="$nonopt" + files= + rmforce= + exit_status=0 + + # This variable tells wrapper scripts just to set variables rather + # than running their programs. + libtool_install_magic="$magic" + + for arg + do + case $arg in + -f) rm="$rm $arg"; rmforce=yes ;; + -*) rm="$rm $arg" ;; + *) files="$files $arg" ;; + esac + done + + if test -z "$rm"; then + $echo "$modename: you must specify an RM program" 1>&2 + $echo "$help" 1>&2 + exit $EXIT_FAILURE + fi + + rmdirs= + + origobjdir="$objdir" + for file in $files; do + dir=`$echo "X$file" | $Xsed -e 's%/[^/]*$%%'` + if test "X$dir" = "X$file"; then + dir=. + objdir="$origobjdir" + else + objdir="$dir/$origobjdir" + fi + name=`$echo "X$file" | $Xsed -e 's%^.*/%%'` + test "$mode" = uninstall && objdir="$dir" + + # Remember objdir for removal later, being careful to avoid duplicates + if test "$mode" = clean; then + case " $rmdirs " in + *" $objdir "*) ;; + *) rmdirs="$rmdirs $objdir" ;; + esac + fi + + # Don't error if the file doesn't exist and rm -f was used. + if (test -L "$file") >/dev/null 2>&1 \ + || (test -h "$file") >/dev/null 2>&1 \ + || test -f "$file"; then + : + elif test -d "$file"; then + exit_status=1 + continue + elif test "$rmforce" = yes; then + continue + fi + + rmfiles="$file" + + case $name in + *.la) + # Possibly a libtool archive, so verify it. + if (${SED} -e '2q' $file | grep "^# Generated by .*$PACKAGE") >/dev/null 2>&1; then + . $dir/$name + + # Delete the libtool libraries and symlinks. + for n in $library_names; do + rmfiles="$rmfiles $objdir/$n" + done + test -n "$old_library" && rmfiles="$rmfiles $objdir/$old_library" + + case "$mode" in + clean) + case " $library_names " in + # " " in the beginning catches empty $dlname + *" $dlname "*) ;; + *) rmfiles="$rmfiles $objdir/$dlname" ;; + esac + test -n "$libdir" && rmfiles="$rmfiles $objdir/$name $objdir/${name}i" + ;; + uninstall) + if test -n "$library_names"; then + # Do each command in the postuninstall commands. + cmds=$postuninstall_cmds + save_ifs="$IFS"; IFS='~' + for cmd in $cmds; do + IFS="$save_ifs" + eval cmd=\"$cmd\" + $show "$cmd" + $run eval "$cmd" + if test "$?" -ne 0 && test "$rmforce" != yes; then + exit_status=1 + fi + done + IFS="$save_ifs" + fi + + if test -n "$old_library"; then + # Do each command in the old_postuninstall commands. + cmds=$old_postuninstall_cmds + save_ifs="$IFS"; IFS='~' + for cmd in $cmds; do + IFS="$save_ifs" + eval cmd=\"$cmd\" + $show "$cmd" + $run eval "$cmd" + if test "$?" -ne 0 && test "$rmforce" != yes; then + exit_status=1 + fi + done + IFS="$save_ifs" + fi + # FIXME: should reinstall the best remaining shared library. + ;; + esac + fi + ;; + + *.lo) + # Possibly a libtool object, so verify it. + if (${SED} -e '2q' $file | grep "^# Generated by .*$PACKAGE") >/dev/null 2>&1; then + + # Read the .lo file + . $dir/$name + + # Add PIC object to the list of files to remove. + if test -n "$pic_object" \ + && test "$pic_object" != none; then + rmfiles="$rmfiles $dir/$pic_object" + fi + + # Add non-PIC object to the list of files to remove. + if test -n "$non_pic_object" \ + && test "$non_pic_object" != none; then + rmfiles="$rmfiles $dir/$non_pic_object" + fi + fi + ;; + + *) + if test "$mode" = clean ; then + noexename=$name + case $file in + *.exe) + file=`$echo $file|${SED} 's,.exe$,,'` + noexename=`$echo $name|${SED} 's,.exe$,,'` + # $file with .exe has already been added to rmfiles, + # add $file without .exe + rmfiles="$rmfiles $file" + ;; + esac + # Do a test to see if this is a libtool program. + if (${SED} -e '4q' $file | grep "^# Generated by .*$PACKAGE") >/dev/null 2>&1; then + relink_command= + . $dir/$noexename + + # note $name still contains .exe if it was in $file originally + # as does the version of $file that was added into $rmfiles + rmfiles="$rmfiles $objdir/$name $objdir/${name}S.${objext}" + if test "$fast_install" = yes && test -n "$relink_command"; then + rmfiles="$rmfiles $objdir/lt-$name" + fi + if test "X$noexename" != "X$name" ; then + rmfiles="$rmfiles $objdir/lt-${noexename}.c" + fi + fi + fi + ;; + esac + $show "$rm $rmfiles" + $run $rm $rmfiles || exit_status=1 + done + objdir="$origobjdir" + + # Try to remove the ${objdir}s in the directories where we deleted files + for dir in $rmdirs; do + if test -d "$dir"; then + $show "rmdir $dir" + $run rmdir $dir >/dev/null 2>&1 + fi + done + + exit $exit_status + ;; + + "") + $echo "$modename: you must specify a MODE" 1>&2 + $echo "$generic_help" 1>&2 + exit $EXIT_FAILURE + ;; + esac + + if test -z "$exec_cmd"; then + $echo "$modename: invalid operation mode \`$mode'" 1>&2 + $echo "$generic_help" 1>&2 + exit $EXIT_FAILURE + fi +fi # test -z "$show_help" + +if test -n "$exec_cmd"; then + eval exec $exec_cmd + exit $EXIT_FAILURE +fi + +# We need to display help for each of the modes. +case $mode in +"") $echo \ +"Usage: $modename [OPTION]... [MODE-ARG]... + +Provide generalized library-building support services. + + --config show all configuration variables + --debug enable verbose shell tracing +-n, --dry-run display commands without modifying any files + --features display basic configuration information and exit + --finish same as \`--mode=finish' + --help display this help message and exit + --mode=MODE use operation mode MODE [default=inferred from MODE-ARGS] + --quiet same as \`--silent' + --silent don't print informational messages + --tag=TAG use configuration variables from tag TAG + --version print version information + +MODE must be one of the following: + + clean remove files from the build directory + compile compile a source file into a libtool object + execute automatically set library path, then run a program + finish complete the installation of libtool libraries + install install libraries or executables + link create a library or an executable + uninstall remove libraries from an installed directory + +MODE-ARGS vary depending on the MODE. Try \`$modename --help --mode=MODE' for +a more detailed description of MODE. + +Report bugs to ." + exit $EXIT_SUCCESS + ;; + +clean) + $echo \ +"Usage: $modename [OPTION]... --mode=clean RM [RM-OPTION]... FILE... + +Remove files from the build directory. + +RM is the name of the program to use to delete files associated with each FILE +(typically \`/bin/rm'). RM-OPTIONS are options (such as \`-f') to be passed +to RM. + +If FILE is a libtool library, object or program, all the files associated +with it are deleted. Otherwise, only FILE itself is deleted using RM." + ;; + +compile) + $echo \ +"Usage: $modename [OPTION]... --mode=compile COMPILE-COMMAND... SOURCEFILE + +Compile a source file into a libtool library object. + +This mode accepts the following additional options: + + -o OUTPUT-FILE set the output file name to OUTPUT-FILE + -prefer-pic try to building PIC objects only + -prefer-non-pic try to building non-PIC objects only + -static always build a \`.o' file suitable for static linking + +COMPILE-COMMAND is a command to be used in creating a \`standard' object file +from the given SOURCEFILE. + +The output file name is determined by removing the directory component from +SOURCEFILE, then substituting the C source code suffix \`.c' with the +library object suffix, \`.lo'." + ;; + +execute) + $echo \ +"Usage: $modename [OPTION]... --mode=execute COMMAND [ARGS]... + +Automatically set library path, then run a program. + +This mode accepts the following additional options: + + -dlopen FILE add the directory containing FILE to the library path + +This mode sets the library path environment variable according to \`-dlopen' +flags. + +If any of the ARGS are libtool executable wrappers, then they are translated +into their corresponding uninstalled binary, and any of their required library +directories are added to the library path. + +Then, COMMAND is executed, with ARGS as arguments." + ;; + +finish) + $echo \ +"Usage: $modename [OPTION]... --mode=finish [LIBDIR]... + +Complete the installation of libtool libraries. + +Each LIBDIR is a directory that contains libtool libraries. + +The commands that this mode executes may require superuser privileges. Use +the \`--dry-run' option if you just want to see what would be executed." + ;; + +install) + $echo \ +"Usage: $modename [OPTION]... --mode=install INSTALL-COMMAND... + +Install executables or libraries. + +INSTALL-COMMAND is the installation command. The first component should be +either the \`install' or \`cp' program. + +The rest of the components are interpreted as arguments to that command (only +BSD-compatible install options are recognized)." + ;; + +link) + $echo \ +"Usage: $modename [OPTION]... --mode=link LINK-COMMAND... + +Link object files or libraries together to form another library, or to +create an executable program. + +LINK-COMMAND is a command using the C compiler that you would use to create +a program from several object files. + +The following components of LINK-COMMAND are treated specially: + + -all-static do not do any dynamic linking at all + -avoid-version do not add a version suffix if possible + -dlopen FILE \`-dlpreopen' FILE if it cannot be dlopened at runtime + -dlpreopen FILE link in FILE and add its symbols to lt_preloaded_symbols + -export-dynamic allow symbols from OUTPUT-FILE to be resolved with dlsym(3) + -export-symbols SYMFILE + try to export only the symbols listed in SYMFILE + -export-symbols-regex REGEX + try to export only the symbols matching REGEX + -LLIBDIR search LIBDIR for required installed libraries + -lNAME OUTPUT-FILE requires the installed library libNAME + -module build a library that can dlopened + -no-fast-install disable the fast-install mode + -no-install link a not-installable executable + -no-undefined declare that a library does not refer to external symbols + -o OUTPUT-FILE create OUTPUT-FILE from the specified objects + -objectlist FILE Use a list of object files found in FILE to specify objects + -precious-files-regex REGEX + don't remove output files matching REGEX + -release RELEASE specify package release information + -rpath LIBDIR the created library will eventually be installed in LIBDIR + -R[ ]LIBDIR add LIBDIR to the runtime path of programs and libraries + -static do not do any dynamic linking of libtool libraries + -version-info CURRENT[:REVISION[:AGE]] + specify library version info [each variable defaults to 0] + +All other options (arguments beginning with \`-') are ignored. + +Every other argument is treated as a filename. Files ending in \`.la' are +treated as uninstalled libtool libraries, other files are standard or library +object files. + +If the OUTPUT-FILE ends in \`.la', then a libtool library is created, +only library objects (\`.lo' files) may be specified, and \`-rpath' is +required, except when creating a convenience library. + +If OUTPUT-FILE ends in \`.a' or \`.lib', then a standard library is created +using \`ar' and \`ranlib', or on Windows using \`lib'. + +If OUTPUT-FILE ends in \`.lo' or \`.${objext}', then a reloadable object file +is created, otherwise an executable program is created." + ;; + +uninstall) + $echo \ +"Usage: $modename [OPTION]... --mode=uninstall RM [RM-OPTION]... FILE... + +Remove libraries from an installation directory. + +RM is the name of the program to use to delete files associated with each FILE +(typically \`/bin/rm'). RM-OPTIONS are options (such as \`-f') to be passed +to RM. + +If FILE is a libtool library, all the files associated with it are deleted. +Otherwise, only FILE itself is deleted using RM." + ;; + +*) + $echo "$modename: invalid operation mode \`$mode'" 1>&2 + $echo "$help" 1>&2 + exit $EXIT_FAILURE + ;; +esac + +$echo +$echo "Try \`$modename --help' for more information about other modes." + +exit $? + +# The TAGs below are defined such that we never get into a situation +# in which we disable both kinds of libraries. Given conflicting +# choices, we go for a static library, that is the most portable, +# since we can't tell whether shared libraries were disabled because +# the user asked for that or because the platform doesn't support +# them. This is particularly important on AIX, because we don't +# support having both static and shared libraries enabled at the same +# time on that platform, so we default to a shared-only configuration. +# If a disable-shared tag is given, we'll fallback to a static-only +# configuration. But we'll never go from static-only to shared-only. + +# ### BEGIN LIBTOOL TAG CONFIG: disable-shared +disable_libs=shared +# ### END LIBTOOL TAG CONFIG: disable-shared + +# ### BEGIN LIBTOOL TAG CONFIG: disable-static +disable_libs=static +# ### END LIBTOOL TAG CONFIG: disable-static + +# Local Variables: +# mode:shell-script +# sh-indentation:2 +# End: diff --git a/src/native/config/missing b/src/native/config/missing new file mode 100755 index 0000000..894e786 --- /dev/null +++ b/src/native/config/missing @@ -0,0 +1,360 @@ +#! /bin/sh +# Common stub for a few missing GNU programs while installing. + +scriptversion=2005-06-08.21 + +# Copyright (C) 1996, 1997, 1999, 2000, 2002, 2003, 2004, 2005 +# Free Software Foundation, Inc. +# Originally by Fran,cois Pinard , 1996. + +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2, or (at your option) +# any later version. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. + +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA +# 02110-1301, USA. + +# As a special exception to the GNU General Public License, if you +# distribute this file as part of a program that contains a +# configuration script generated by Autoconf, you may include it under +# the same distribution terms that you use for the rest of that program. + +if test $# -eq 0; then + echo 1>&2 "Try \`$0 --help' for more information" + exit 1 +fi + +run=: + +# In the cases where this matters, `missing' is being run in the +# srcdir already. +if test -f configure.ac; then + configure_ac=configure.ac +else + configure_ac=configure.in +fi + +msg="missing on your system" + +case "$1" in +--run) + # Try to run requested program, and just exit if it succeeds. + run= + shift + "$@" && exit 0 + # Exit code 63 means version mismatch. This often happens + # when the user try to use an ancient version of a tool on + # a file that requires a minimum version. In this case we + # we should proceed has if the program had been absent, or + # if --run hadn't been passed. + if test $? = 63; then + run=: + msg="probably too old" + fi + ;; + + -h|--h|--he|--hel|--help) + echo "\ +$0 [OPTION]... PROGRAM [ARGUMENT]... + +Handle \`PROGRAM [ARGUMENT]...' for when PROGRAM is missing, or return an +error status if there is no known handling for PROGRAM. + +Options: + -h, --help display this help and exit + -v, --version output version information and exit + --run try to run the given command, and emulate it if it fails + +Supported PROGRAM values: + aclocal touch file \`aclocal.m4' + autoconf touch file \`configure' + autoheader touch file \`config.h.in' + automake touch all \`Makefile.in' files + bison create \`y.tab.[ch]', if possible, from existing .[ch] + flex create \`lex.yy.c', if possible, from existing .c + help2man touch the output file + lex create \`lex.yy.c', if possible, from existing .c + makeinfo touch the output file + tar try tar, gnutar, gtar, then tar without non-portable flags + yacc create \`y.tab.[ch]', if possible, from existing .[ch] + +Send bug reports to ." + exit $? + ;; + + -v|--v|--ve|--ver|--vers|--versi|--versio|--version) + echo "missing $scriptversion (GNU Automake)" + exit $? + ;; + + -*) + echo 1>&2 "$0: Unknown \`$1' option" + echo 1>&2 "Try \`$0 --help' for more information" + exit 1 + ;; + +esac + +# Now exit if we have it, but it failed. Also exit now if we +# don't have it and --version was passed (most likely to detect +# the program). +case "$1" in + lex|yacc) + # Not GNU programs, they don't have --version. + ;; + + tar) + if test -n "$run"; then + echo 1>&2 "ERROR: \`tar' requires --run" + exit 1 + elif test "x$2" = "x--version" || test "x$2" = "x--help"; then + exit 1 + fi + ;; + + *) + if test -z "$run" && ($1 --version) > /dev/null 2>&1; then + # We have it, but it failed. + exit 1 + elif test "x$2" = "x--version" || test "x$2" = "x--help"; then + # Could not run --version or --help. This is probably someone + # running `$TOOL --version' or `$TOOL --help' to check whether + # $TOOL exists and not knowing $TOOL uses missing. + exit 1 + fi + ;; +esac + +# If it does not exist, or fails to run (possibly an outdated version), +# try to emulate it. +case "$1" in + aclocal*) + echo 1>&2 "\ +WARNING: \`$1' is $msg. You should only need it if + you modified \`acinclude.m4' or \`${configure_ac}'. You might want + to install the \`Automake' and \`Perl' packages. Grab them from + any GNU archive site." + touch aclocal.m4 + ;; + + autoconf) + echo 1>&2 "\ +WARNING: \`$1' is $msg. You should only need it if + you modified \`${configure_ac}'. You might want to install the + \`Autoconf' and \`GNU m4' packages. Grab them from any GNU + archive site." + touch configure + ;; + + autoheader) + echo 1>&2 "\ +WARNING: \`$1' is $msg. You should only need it if + you modified \`acconfig.h' or \`${configure_ac}'. You might want + to install the \`Autoconf' and \`GNU m4' packages. Grab them + from any GNU archive site." + files=`sed -n 's/^[ ]*A[CM]_CONFIG_HEADER(\([^)]*\)).*/\1/p' ${configure_ac}` + test -z "$files" && files="config.h" + touch_files= + for f in $files; do + case "$f" in + *:*) touch_files="$touch_files "`echo "$f" | + sed -e 's/^[^:]*://' -e 's/:.*//'`;; + *) touch_files="$touch_files $f.in";; + esac + done + touch $touch_files + ;; + + automake*) + echo 1>&2 "\ +WARNING: \`$1' is $msg. You should only need it if + you modified \`Makefile.am', \`acinclude.m4' or \`${configure_ac}'. + You might want to install the \`Automake' and \`Perl' packages. + Grab them from any GNU archive site." + find . -type f -name Makefile.am -print | + sed 's/\.am$/.in/' | + while read f; do touch "$f"; done + ;; + + autom4te) + echo 1>&2 "\ +WARNING: \`$1' is needed, but is $msg. + You might have modified some files without having the + proper tools for further handling them. + You can get \`$1' as part of \`Autoconf' from any GNU + archive site." + + file=`echo "$*" | sed -n 's/.*--output[ =]*\([^ ]*\).*/\1/p'` + test -z "$file" && file=`echo "$*" | sed -n 's/.*-o[ ]*\([^ ]*\).*/\1/p'` + if test -f "$file"; then + touch $file + else + test -z "$file" || exec >$file + echo "#! /bin/sh" + echo "# Created by GNU Automake missing as a replacement of" + echo "# $ $@" + echo "exit 0" + chmod +x $file + exit 1 + fi + ;; + + bison|yacc) + echo 1>&2 "\ +WARNING: \`$1' $msg. You should only need it if + you modified a \`.y' file. You may need the \`Bison' package + in order for those modifications to take effect. You can get + \`Bison' from any GNU archive site." + rm -f y.tab.c y.tab.h + if [ $# -ne 1 ]; then + eval LASTARG="\${$#}" + case "$LASTARG" in + *.y) + SRCFILE=`echo "$LASTARG" | sed 's/y$/c/'` + if [ -f "$SRCFILE" ]; then + cp "$SRCFILE" y.tab.c + fi + SRCFILE=`echo "$LASTARG" | sed 's/y$/h/'` + if [ -f "$SRCFILE" ]; then + cp "$SRCFILE" y.tab.h + fi + ;; + esac + fi + if [ ! -f y.tab.h ]; then + echo >y.tab.h + fi + if [ ! -f y.tab.c ]; then + echo 'main() { return 0; }' >y.tab.c + fi + ;; + + lex|flex) + echo 1>&2 "\ +WARNING: \`$1' is $msg. You should only need it if + you modified a \`.l' file. You may need the \`Flex' package + in order for those modifications to take effect. You can get + \`Flex' from any GNU archive site." + rm -f lex.yy.c + if [ $# -ne 1 ]; then + eval LASTARG="\${$#}" + case "$LASTARG" in + *.l) + SRCFILE=`echo "$LASTARG" | sed 's/l$/c/'` + if [ -f "$SRCFILE" ]; then + cp "$SRCFILE" lex.yy.c + fi + ;; + esac + fi + if [ ! -f lex.yy.c ]; then + echo 'main() { return 0; }' >lex.yy.c + fi + ;; + + help2man) + echo 1>&2 "\ +WARNING: \`$1' is $msg. You should only need it if + you modified a dependency of a manual page. You may need the + \`Help2man' package in order for those modifications to take + effect. You can get \`Help2man' from any GNU archive site." + + file=`echo "$*" | sed -n 's/.*-o \([^ ]*\).*/\1/p'` + if test -z "$file"; then + file=`echo "$*" | sed -n 's/.*--output=\([^ ]*\).*/\1/p'` + fi + if [ -f "$file" ]; then + touch $file + else + test -z "$file" || exec >$file + echo ".ab help2man is required to generate this page" + exit 1 + fi + ;; + + makeinfo) + echo 1>&2 "\ +WARNING: \`$1' is $msg. You should only need it if + you modified a \`.texi' or \`.texinfo' file, or any other file + indirectly affecting the aspect of the manual. The spurious + call might also be the consequence of using a buggy \`make' (AIX, + DU, IRIX). You might want to install the \`Texinfo' package or + the \`GNU make' package. Grab either from any GNU archive site." + # The file to touch is that specified with -o ... + file=`echo "$*" | sed -n 's/.*-o \([^ ]*\).*/\1/p'` + if test -z "$file"; then + # ... or it is the one specified with @setfilename ... + infile=`echo "$*" | sed 's/.* \([^ ]*\) *$/\1/'` + file=`sed -n '/^@setfilename/ { s/.* \([^ ]*\) *$/\1/; p; q; }' $infile` + # ... or it is derived from the source name (dir/f.texi becomes f.info) + test -z "$file" && file=`echo "$infile" | sed 's,.*/,,;s,.[^.]*$,,'`.info + fi + # If the file does not exist, the user really needs makeinfo; + # let's fail without touching anything. + test -f $file || exit 1 + touch $file + ;; + + tar) + shift + + # We have already tried tar in the generic part. + # Look for gnutar/gtar before invocation to avoid ugly error + # messages. + if (gnutar --version > /dev/null 2>&1); then + gnutar "$@" && exit 0 + fi + if (gtar --version > /dev/null 2>&1); then + gtar "$@" && exit 0 + fi + firstarg="$1" + if shift; then + case "$firstarg" in + *o*) + firstarg=`echo "$firstarg" | sed s/o//` + tar "$firstarg" "$@" && exit 0 + ;; + esac + case "$firstarg" in + *h*) + firstarg=`echo "$firstarg" | sed s/h//` + tar "$firstarg" "$@" && exit 0 + ;; + esac + fi + + echo 1>&2 "\ +WARNING: I can't seem to be able to run \`tar' with the given arguments. + You may want to install GNU tar or Free paxutils, or check the + command line arguments." + exit 1 + ;; + + *) + echo 1>&2 "\ +WARNING: \`$1' is needed, and is $msg. + You might have modified some files without having the + proper tools for further handling them. Check the \`README' file, + it often tells you about the needed prerequisites for installing + this package. You may also peek at any GNU archive site, in case + some other package would contain this missing \`$1' program." + exit 1 + ;; +esac + +exit 0 + +# Local variables: +# eval: (add-hook 'write-file-hooks 'time-stamp) +# time-stamp-start: "scriptversion=" +# time-stamp-format: "%:y-%02m-%02d.%02H" +# time-stamp-end: "$" +# End: diff --git a/src/native/configure b/src/native/configure new file mode 100755 index 0000000..5041e06 --- /dev/null +++ b/src/native/configure @@ -0,0 +1,21284 @@ +#! /bin/sh +# Guess values for system-dependent variables and create Makefiles. +# Generated by GNU Autoconf 2.59. +# +# Copyright (C) 2003 Free Software Foundation, Inc. +# This configure script is free software; the Free Software Foundation +# gives unlimited permission to copy, distribute and modify it. +## --------------------- ## +## M4sh Initialization. ## +## --------------------- ## + +# Be Bourne compatible +if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then + emulate sh + NULLCMD=: + # Zsh 3.x and 4.x performs word splitting on ${1+"$@"}, which + # is contrary to our usage. Disable this feature. + alias -g '${1+"$@"}'='"$@"' +elif test -n "${BASH_VERSION+set}" && (set -o posix) >/dev/null 2>&1; then + set -o posix +fi +DUALCASE=1; export DUALCASE # for MKS sh + +# Support unset when possible. +if ( (MAIL=60; unset MAIL) || exit) >/dev/null 2>&1; then + as_unset=unset +else + as_unset=false +fi + + +# Work around bugs in pre-3.0 UWIN ksh. +$as_unset ENV MAIL MAILPATH +PS1='$ ' +PS2='> ' +PS4='+ ' + +# NLS nuisances. +for as_var in \ + LANG LANGUAGE LC_ADDRESS LC_ALL LC_COLLATE LC_CTYPE LC_IDENTIFICATION \ + LC_MEASUREMENT LC_MESSAGES LC_MONETARY LC_NAME LC_NUMERIC LC_PAPER \ + LC_TELEPHONE LC_TIME +do + if (set +x; test -z "`(eval $as_var=C; export $as_var) 2>&1`"); then + eval $as_var=C; export $as_var + else + $as_unset $as_var + fi +done + +# Required to use basename. +if expr a : '\(a\)' >/dev/null 2>&1; then + as_expr=expr +else + as_expr=false +fi + +if (basename /) >/dev/null 2>&1 && test "X`basename / 2>&1`" = "X/"; then + as_basename=basename +else + as_basename=false +fi + + +# Name of the executable. +as_me=`$as_basename "$0" || +$as_expr X/"$0" : '.*/\([^/][^/]*\)/*$' \| \ + X"$0" : 'X\(//\)$' \| \ + X"$0" : 'X\(/\)$' \| \ + . : '\(.\)' 2>/dev/null || +echo X/"$0" | + sed '/^.*\/\([^/][^/]*\)\/*$/{ s//\1/; q; } + /^X\/\(\/\/\)$/{ s//\1/; q; } + /^X\/\(\/\).*/{ s//\1/; q; } + s/.*/./; q'` + + +# PATH needs CR, and LINENO needs CR and PATH. +# Avoid depending upon Character Ranges. +as_cr_letters='abcdefghijklmnopqrstuvwxyz' +as_cr_LETTERS='ABCDEFGHIJKLMNOPQRSTUVWXYZ' +as_cr_Letters=$as_cr_letters$as_cr_LETTERS +as_cr_digits='0123456789' +as_cr_alnum=$as_cr_Letters$as_cr_digits + +# The user is always right. +if test "${PATH_SEPARATOR+set}" != set; then + echo "#! /bin/sh" >conf$$.sh + echo "exit 0" >>conf$$.sh + chmod +x conf$$.sh + if (PATH="/nonexistent;."; conf$$.sh) >/dev/null 2>&1; then + PATH_SEPARATOR=';' + else + PATH_SEPARATOR=: + fi + rm -f conf$$.sh +fi + + + as_lineno_1=$LINENO + as_lineno_2=$LINENO + as_lineno_3=`(expr $as_lineno_1 + 1) 2>/dev/null` + test "x$as_lineno_1" != "x$as_lineno_2" && + test "x$as_lineno_3" = "x$as_lineno_2" || { + # Find who we are. Look in the path if we contain no path at all + # relative or not. + case $0 in + *[\\/]* ) as_myself=$0 ;; + *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + test -r "$as_dir/$0" && as_myself=$as_dir/$0 && break +done + + ;; + esac + # We did not find ourselves, most probably we were run as `sh COMMAND' + # in which case we are not to be found in the path. + if test "x$as_myself" = x; then + as_myself=$0 + fi + if test ! -f "$as_myself"; then + { echo "$as_me: error: cannot find myself; rerun with an absolute path" >&2 + { (exit 1); exit 1; }; } + fi + case $CONFIG_SHELL in + '') + as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in /bin$PATH_SEPARATOR/usr/bin$PATH_SEPARATOR$PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for as_base in sh bash ksh sh5; do + case $as_dir in + /*) + if ("$as_dir/$as_base" -c ' + as_lineno_1=$LINENO + as_lineno_2=$LINENO + as_lineno_3=`(expr $as_lineno_1 + 1) 2>/dev/null` + test "x$as_lineno_1" != "x$as_lineno_2" && + test "x$as_lineno_3" = "x$as_lineno_2" ') 2>/dev/null; then + $as_unset BASH_ENV || test "${BASH_ENV+set}" != set || { BASH_ENV=; export BASH_ENV; } + $as_unset ENV || test "${ENV+set}" != set || { ENV=; export ENV; } + CONFIG_SHELL=$as_dir/$as_base + export CONFIG_SHELL + exec "$CONFIG_SHELL" "$0" ${1+"$@"} + fi;; + esac + done +done +;; + esac + + # Create $as_me.lineno as a copy of $as_myself, but with $LINENO + # uniformly replaced by the line number. The first 'sed' inserts a + # line-number line before each line; the second 'sed' does the real + # work. The second script uses 'N' to pair each line-number line + # with the numbered line, and appends trailing '-' during + # substitution so that $LINENO is not a special case at line end. + # (Raja R Harinath suggested sed '=', and Paul Eggert wrote the + # second 'sed' script. Blame Lee E. McMahon for sed's syntax. :-) + sed '=' <$as_myself | + sed ' + N + s,$,-, + : loop + s,^\(['$as_cr_digits']*\)\(.*\)[$]LINENO\([^'$as_cr_alnum'_]\),\1\2\1\3, + t loop + s,-$,, + s,^['$as_cr_digits']*\n,, + ' >$as_me.lineno && + chmod +x $as_me.lineno || + { echo "$as_me: error: cannot create $as_me.lineno; rerun with a POSIX shell" >&2 + { (exit 1); exit 1; }; } + + # Don't try to exec as it changes $[0], causing all sort of problems + # (the dirname of $[0] is not the place where we might find the + # original and so on. Autoconf is especially sensible to this). + . ./$as_me.lineno + # Exit status is that of the last command. + exit +} + + +case `echo "testing\c"; echo 1,2,3`,`echo -n testing; echo 1,2,3` in + *c*,-n*) ECHO_N= ECHO_C=' +' ECHO_T=' ' ;; + *c*,* ) ECHO_N=-n ECHO_C= ECHO_T= ;; + *) ECHO_N= ECHO_C='\c' ECHO_T= ;; +esac + +if expr a : '\(a\)' >/dev/null 2>&1; then + as_expr=expr +else + as_expr=false +fi + +rm -f conf$$ conf$$.exe conf$$.file +echo >conf$$.file +if ln -s conf$$.file conf$$ 2>/dev/null; then + # We could just check for DJGPP; but this test a) works b) is more generic + # and c) will remain valid once DJGPP supports symlinks (DJGPP 2.04). + if test -f conf$$.exe; then + # Don't use ln at all; we don't have any links + as_ln_s='cp -p' + else + as_ln_s='ln -s' + fi +elif ln conf$$.file conf$$ 2>/dev/null; then + as_ln_s=ln +else + as_ln_s='cp -p' +fi +rm -f conf$$ conf$$.exe conf$$.file + +if mkdir -p . 2>/dev/null; then + as_mkdir_p=: +else + test -d ./-p && rmdir ./-p + as_mkdir_p=false +fi + +as_executable_p="test -f" + +# Sed expression to map a string onto a valid CPP name. +as_tr_cpp="eval sed 'y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g'" + +# Sed expression to map a string onto a valid variable name. +as_tr_sh="eval sed 'y%*+%pp%;s%[^_$as_cr_alnum]%_%g'" + + +# IFS +# We need space, tab and new line, in precisely that order. +as_nl=' +' +IFS=" $as_nl" + +# CDPATH. +$as_unset CDPATH + + + +# Check that we are running under the correct shell. +SHELL=${CONFIG_SHELL-/bin/sh} + +case X$ECHO in +X*--fallback-echo) + # Remove one level of quotation (which was required for Make). + ECHO=`echo "$ECHO" | sed 's,\\\\\$\\$0,'$0','` + ;; +esac + +echo=${ECHO-echo} +if test "X$1" = X--no-reexec; then + # Discard the --no-reexec flag, and continue. + shift +elif test "X$1" = X--fallback-echo; then + # Avoid inline document here, it may be left over + : +elif test "X`($echo '\t') 2>/dev/null`" = 'X\t' ; then + # Yippee, $echo works! + : +else + # Restart under the correct shell. + exec $SHELL "$0" --no-reexec ${1+"$@"} +fi + +if test "X$1" = X--fallback-echo; then + # used as fallback echo + shift + cat </dev/null 2>&1 && unset CDPATH + +if test -z "$ECHO"; then +if test "X${echo_test_string+set}" != Xset; then +# find a string as large as possible, as long as the shell can cope with it + for cmd in 'sed 50q "$0"' 'sed 20q "$0"' 'sed 10q "$0"' 'sed 2q "$0"' 'echo test'; do + # expected sizes: less than 2Kb, 1Kb, 512 bytes, 16 bytes, ... + if (echo_test_string=`eval $cmd`) 2>/dev/null && + echo_test_string=`eval $cmd` && + (test "X$echo_test_string" = "X$echo_test_string") 2>/dev/null + then + break + fi + done +fi + +if test "X`($echo '\t') 2>/dev/null`" = 'X\t' && + echo_testing_string=`($echo "$echo_test_string") 2>/dev/null` && + test "X$echo_testing_string" = "X$echo_test_string"; then + : +else + # The Solaris, AIX, and Digital Unix default echo programs unquote + # backslashes. This makes it impossible to quote backslashes using + # echo "$something" | sed 's/\\/\\\\/g' + # + # So, first we look for a working echo in the user's PATH. + + lt_save_ifs="$IFS"; IFS=$PATH_SEPARATOR + for dir in $PATH /usr/ucb; do + IFS="$lt_save_ifs" + if (test -f $dir/echo || test -f $dir/echo$ac_exeext) && + test "X`($dir/echo '\t') 2>/dev/null`" = 'X\t' && + echo_testing_string=`($dir/echo "$echo_test_string") 2>/dev/null` && + test "X$echo_testing_string" = "X$echo_test_string"; then + echo="$dir/echo" + break + fi + done + IFS="$lt_save_ifs" + + if test "X$echo" = Xecho; then + # We didn't find a better echo, so look for alternatives. + if test "X`(print -r '\t') 2>/dev/null`" = 'X\t' && + echo_testing_string=`(print -r "$echo_test_string") 2>/dev/null` && + test "X$echo_testing_string" = "X$echo_test_string"; then + # This shell has a builtin print -r that does the trick. + echo='print -r' + elif (test -f /bin/ksh || test -f /bin/ksh$ac_exeext) && + test "X$CONFIG_SHELL" != X/bin/ksh; then + # If we have ksh, try running configure again with it. + ORIGINAL_CONFIG_SHELL=${CONFIG_SHELL-/bin/sh} + export ORIGINAL_CONFIG_SHELL + CONFIG_SHELL=/bin/ksh + export CONFIG_SHELL + exec $CONFIG_SHELL "$0" --no-reexec ${1+"$@"} + else + # Try using printf. + echo='printf %s\n' + if test "X`($echo '\t') 2>/dev/null`" = 'X\t' && + echo_testing_string=`($echo "$echo_test_string") 2>/dev/null` && + test "X$echo_testing_string" = "X$echo_test_string"; then + # Cool, printf works + : + elif echo_testing_string=`($ORIGINAL_CONFIG_SHELL "$0" --fallback-echo '\t') 2>/dev/null` && + test "X$echo_testing_string" = 'X\t' && + echo_testing_string=`($ORIGINAL_CONFIG_SHELL "$0" --fallback-echo "$echo_test_string") 2>/dev/null` && + test "X$echo_testing_string" = "X$echo_test_string"; then + CONFIG_SHELL=$ORIGINAL_CONFIG_SHELL + export CONFIG_SHELL + SHELL="$CONFIG_SHELL" + export SHELL + echo="$CONFIG_SHELL $0 --fallback-echo" + elif echo_testing_string=`($CONFIG_SHELL "$0" --fallback-echo '\t') 2>/dev/null` && + test "X$echo_testing_string" = 'X\t' && + echo_testing_string=`($CONFIG_SHELL "$0" --fallback-echo "$echo_test_string") 2>/dev/null` && + test "X$echo_testing_string" = "X$echo_test_string"; then + echo="$CONFIG_SHELL $0 --fallback-echo" + else + # maybe with a smaller string... + prev=: + + for cmd in 'echo test' 'sed 2q "$0"' 'sed 10q "$0"' 'sed 20q "$0"' 'sed 50q "$0"'; do + if (test "X$echo_test_string" = "X`eval $cmd`") 2>/dev/null + then + break + fi + prev="$cmd" + done + + if test "$prev" != 'sed 50q "$0"'; then + echo_test_string=`eval $prev` + export echo_test_string + exec ${ORIGINAL_CONFIG_SHELL-${CONFIG_SHELL-/bin/sh}} "$0" ${1+"$@"} + else + # Oops. We lost completely, so just stick with echo. + echo=echo + fi + fi + fi + fi +fi +fi + +# Copy echo and quote the copy suitably for passing to libtool from +# the Makefile, instead of quoting the original, which is used later. +ECHO=$echo +if test "X$ECHO" = "X$CONFIG_SHELL $0 --fallback-echo"; then + ECHO="$CONFIG_SHELL \\\$\$0 --fallback-echo" +fi + + + + +tagnames=${tagnames+${tagnames},}CXX + +tagnames=${tagnames+${tagnames},}F77 + +# Name of the host. +# hostname on some systems (SVR3.2, Linux) returns a bogus exit status, +# so uname gets run too. +ac_hostname=`(hostname || uname -n) 2>/dev/null | sed 1q` + +exec 6>&1 + +# +# Initializations. +# +ac_default_prefix=/usr/local +ac_config_libobj_dir=. +cross_compiling=no +subdirs= +MFLAGS= +MAKEFLAGS= +SHELL=${CONFIG_SHELL-/bin/sh} + +# Maximum number of lines to put in a shell here document. +# This variable seems obsolete. It should probably be removed, and +# only ac_max_sed_lines should be used. +: ${ac_max_here_lines=38} + +# Identity of this package. +PACKAGE_NAME= +PACKAGE_TARNAME= +PACKAGE_VERSION= +PACKAGE_STRING= +PACKAGE_BUGREPORT= + +ac_unique_file="src/org_apache_hadoop.h" +ac_unique_file="src/org_apache_hadoop.h" +# Factoring default headers for most tests. +ac_includes_default="\ +#include +#if HAVE_SYS_TYPES_H +# include +#endif +#if HAVE_SYS_STAT_H +# include +#endif +#if STDC_HEADERS +# include +# include +#else +# if HAVE_STDLIB_H +# include +# endif +#endif +#if HAVE_STRING_H +# if !STDC_HEADERS && HAVE_MEMORY_H +# include +# endif +# include +#endif +#if HAVE_STRINGS_H +# include +#endif +#if HAVE_INTTYPES_H +# include +#else +# if HAVE_STDINT_H +# include +# endif +#endif +#if HAVE_UNISTD_H +# include +#endif" + +ac_subst_vars='SHELL PATH_SEPARATOR PACKAGE_NAME PACKAGE_TARNAME PACKAGE_VERSION PACKAGE_STRING PACKAGE_BUGREPORT exec_prefix prefix program_transform_name bindir sbindir libexecdir datadir sysconfdir sharedstatedir localstatedir libdir includedir oldincludedir infodir mandir build_alias host_alias target_alias DEFS ECHO_C ECHO_N ECHO_T LIBS INSTALL_PROGRAM INSTALL_SCRIPT INSTALL_DATA CYGPATH_W PACKAGE VERSION ACLOCAL AUTOCONF AUTOMAKE AUTOHEADER MAKEINFO install_sh STRIP ac_ct_STRIP INSTALL_STRIP_PROGRAM mkdir_p AWK SET_MAKE am__leading_dot AMTAR am__tar am__untar CC CFLAGS LDFLAGS CPPFLAGS ac_ct_CC EXEEXT OBJEXT DEPDIR am__include am__quote AMDEP_TRUE AMDEP_FALSE AMDEPBACKSLASH CCDEPMODE am__fastdepCC_TRUE am__fastdepCC_FALSE build build_cpu build_vendor build_os host host_cpu host_vendor host_os SED EGREP LN_S ECHO AR ac_ct_AR RANLIB ac_ct_RANLIB CPP CXX CXXFLAGS ac_ct_CXX CXXDEPMODE am__fastdepCXX_TRUE am__fastdepCXX_FALSE CXXCPP F77 FFLAGS ac_ct_F77 LIBTOOL JNI_LDFLAGS JNI_CPPFLAGS LIBOBJS LTLIBOBJS' +ac_subst_files='' + +# Initialize some variables set by options. +ac_init_help= +ac_init_version=false +# The variables have the same names as the options, with +# dashes changed to underlines. +cache_file=/dev/null +exec_prefix=NONE +no_create= +no_recursion= +prefix=NONE +program_prefix=NONE +program_suffix=NONE +program_transform_name=s,x,x, +silent= +site= +srcdir= +verbose= +x_includes=NONE +x_libraries=NONE + +# Installation directory options. +# These are left unexpanded so users can "make install exec_prefix=/foo" +# and all the variables that are supposed to be based on exec_prefix +# by default will actually change. +# Use braces instead of parens because sh, perl, etc. also accept them. +bindir='${exec_prefix}/bin' +sbindir='${exec_prefix}/sbin' +libexecdir='${exec_prefix}/libexec' +datadir='${prefix}/share' +sysconfdir='${prefix}/etc' +sharedstatedir='${prefix}/com' +localstatedir='${prefix}/var' +libdir='${exec_prefix}/lib' +includedir='${prefix}/include' +oldincludedir='/usr/include' +infodir='${prefix}/info' +mandir='${prefix}/man' + +ac_prev= +for ac_option +do + # If the previous option needs an argument, assign it. + if test -n "$ac_prev"; then + eval "$ac_prev=\$ac_option" + ac_prev= + continue + fi + + ac_optarg=`expr "x$ac_option" : 'x[^=]*=\(.*\)'` + + # Accept the important Cygnus configure options, so we can diagnose typos. + + case $ac_option in + + -bindir | --bindir | --bindi | --bind | --bin | --bi) + ac_prev=bindir ;; + -bindir=* | --bindir=* | --bindi=* | --bind=* | --bin=* | --bi=*) + bindir=$ac_optarg ;; + + -build | --build | --buil | --bui | --bu) + ac_prev=build_alias ;; + -build=* | --build=* | --buil=* | --bui=* | --bu=*) + build_alias=$ac_optarg ;; + + -cache-file | --cache-file | --cache-fil | --cache-fi \ + | --cache-f | --cache- | --cache | --cach | --cac | --ca | --c) + ac_prev=cache_file ;; + -cache-file=* | --cache-file=* | --cache-fil=* | --cache-fi=* \ + | --cache-f=* | --cache-=* | --cache=* | --cach=* | --cac=* | --ca=* | --c=*) + cache_file=$ac_optarg ;; + + --config-cache | -C) + cache_file=config.cache ;; + + -datadir | --datadir | --datadi | --datad | --data | --dat | --da) + ac_prev=datadir ;; + -datadir=* | --datadir=* | --datadi=* | --datad=* | --data=* | --dat=* \ + | --da=*) + datadir=$ac_optarg ;; + + -disable-* | --disable-*) + ac_feature=`expr "x$ac_option" : 'x-*disable-\(.*\)'` + # Reject names that are not valid shell variable names. + expr "x$ac_feature" : ".*[^-_$as_cr_alnum]" >/dev/null && + { echo "$as_me: error: invalid feature name: $ac_feature" >&2 + { (exit 1); exit 1; }; } + ac_feature=`echo $ac_feature | sed 's/-/_/g'` + eval "enable_$ac_feature=no" ;; + + -enable-* | --enable-*) + ac_feature=`expr "x$ac_option" : 'x-*enable-\([^=]*\)'` + # Reject names that are not valid shell variable names. + expr "x$ac_feature" : ".*[^-_$as_cr_alnum]" >/dev/null && + { echo "$as_me: error: invalid feature name: $ac_feature" >&2 + { (exit 1); exit 1; }; } + ac_feature=`echo $ac_feature | sed 's/-/_/g'` + case $ac_option in + *=*) ac_optarg=`echo "$ac_optarg" | sed "s/'/'\\\\\\\\''/g"`;; + *) ac_optarg=yes ;; + esac + eval "enable_$ac_feature='$ac_optarg'" ;; + + -exec-prefix | --exec_prefix | --exec-prefix | --exec-prefi \ + | --exec-pref | --exec-pre | --exec-pr | --exec-p | --exec- \ + | --exec | --exe | --ex) + ac_prev=exec_prefix ;; + -exec-prefix=* | --exec_prefix=* | --exec-prefix=* | --exec-prefi=* \ + | --exec-pref=* | --exec-pre=* | --exec-pr=* | --exec-p=* | --exec-=* \ + | --exec=* | --exe=* | --ex=*) + exec_prefix=$ac_optarg ;; + + -gas | --gas | --ga | --g) + # Obsolete; use --with-gas. + with_gas=yes ;; + + -help | --help | --hel | --he | -h) + ac_init_help=long ;; + -help=r* | --help=r* | --hel=r* | --he=r* | -hr*) + ac_init_help=recursive ;; + -help=s* | --help=s* | --hel=s* | --he=s* | -hs*) + ac_init_help=short ;; + + -host | --host | --hos | --ho) + ac_prev=host_alias ;; + -host=* | --host=* | --hos=* | --ho=*) + host_alias=$ac_optarg ;; + + -includedir | --includedir | --includedi | --included | --include \ + | --includ | --inclu | --incl | --inc) + ac_prev=includedir ;; + -includedir=* | --includedir=* | --includedi=* | --included=* | --include=* \ + | --includ=* | --inclu=* | --incl=* | --inc=*) + includedir=$ac_optarg ;; + + -infodir | --infodir | --infodi | --infod | --info | --inf) + ac_prev=infodir ;; + -infodir=* | --infodir=* | --infodi=* | --infod=* | --info=* | --inf=*) + infodir=$ac_optarg ;; + + -libdir | --libdir | --libdi | --libd) + ac_prev=libdir ;; + -libdir=* | --libdir=* | --libdi=* | --libd=*) + libdir=$ac_optarg ;; + + -libexecdir | --libexecdir | --libexecdi | --libexecd | --libexec \ + | --libexe | --libex | --libe) + ac_prev=libexecdir ;; + -libexecdir=* | --libexecdir=* | --libexecdi=* | --libexecd=* | --libexec=* \ + | --libexe=* | --libex=* | --libe=*) + libexecdir=$ac_optarg ;; + + -localstatedir | --localstatedir | --localstatedi | --localstated \ + | --localstate | --localstat | --localsta | --localst \ + | --locals | --local | --loca | --loc | --lo) + ac_prev=localstatedir ;; + -localstatedir=* | --localstatedir=* | --localstatedi=* | --localstated=* \ + | --localstate=* | --localstat=* | --localsta=* | --localst=* \ + | --locals=* | --local=* | --loca=* | --loc=* | --lo=*) + localstatedir=$ac_optarg ;; + + -mandir | --mandir | --mandi | --mand | --man | --ma | --m) + ac_prev=mandir ;; + -mandir=* | --mandir=* | --mandi=* | --mand=* | --man=* | --ma=* | --m=*) + mandir=$ac_optarg ;; + + -nfp | --nfp | --nf) + # Obsolete; use --without-fp. + with_fp=no ;; + + -no-create | --no-create | --no-creat | --no-crea | --no-cre \ + | --no-cr | --no-c | -n) + no_create=yes ;; + + -no-recursion | --no-recursion | --no-recursio | --no-recursi \ + | --no-recurs | --no-recur | --no-recu | --no-rec | --no-re | --no-r) + no_recursion=yes ;; + + -oldincludedir | --oldincludedir | --oldincludedi | --oldincluded \ + | --oldinclude | --oldinclud | --oldinclu | --oldincl | --oldinc \ + | --oldin | --oldi | --old | --ol | --o) + ac_prev=oldincludedir ;; + -oldincludedir=* | --oldincludedir=* | --oldincludedi=* | --oldincluded=* \ + | --oldinclude=* | --oldinclud=* | --oldinclu=* | --oldincl=* | --oldinc=* \ + | --oldin=* | --oldi=* | --old=* | --ol=* | --o=*) + oldincludedir=$ac_optarg ;; + + -prefix | --prefix | --prefi | --pref | --pre | --pr | --p) + ac_prev=prefix ;; + -prefix=* | --prefix=* | --prefi=* | --pref=* | --pre=* | --pr=* | --p=*) + prefix=$ac_optarg ;; + + -program-prefix | --program-prefix | --program-prefi | --program-pref \ + | --program-pre | --program-pr | --program-p) + ac_prev=program_prefix ;; + -program-prefix=* | --program-prefix=* | --program-prefi=* \ + | --program-pref=* | --program-pre=* | --program-pr=* | --program-p=*) + program_prefix=$ac_optarg ;; + + -program-suffix | --program-suffix | --program-suffi | --program-suff \ + | --program-suf | --program-su | --program-s) + ac_prev=program_suffix ;; + -program-suffix=* | --program-suffix=* | --program-suffi=* \ + | --program-suff=* | --program-suf=* | --program-su=* | --program-s=*) + program_suffix=$ac_optarg ;; + + -program-transform-name | --program-transform-name \ + | --program-transform-nam | --program-transform-na \ + | --program-transform-n | --program-transform- \ + | --program-transform | --program-transfor \ + | --program-transfo | --program-transf \ + | --program-trans | --program-tran \ + | --progr-tra | --program-tr | --program-t) + ac_prev=program_transform_name ;; + -program-transform-name=* | --program-transform-name=* \ + | --program-transform-nam=* | --program-transform-na=* \ + | --program-transform-n=* | --program-transform-=* \ + | --program-transform=* | --program-transfor=* \ + | --program-transfo=* | --program-transf=* \ + | --program-trans=* | --program-tran=* \ + | --progr-tra=* | --program-tr=* | --program-t=*) + program_transform_name=$ac_optarg ;; + + -q | -quiet | --quiet | --quie | --qui | --qu | --q \ + | -silent | --silent | --silen | --sile | --sil) + silent=yes ;; + + -sbindir | --sbindir | --sbindi | --sbind | --sbin | --sbi | --sb) + ac_prev=sbindir ;; + -sbindir=* | --sbindir=* | --sbindi=* | --sbind=* | --sbin=* \ + | --sbi=* | --sb=*) + sbindir=$ac_optarg ;; + + -sharedstatedir | --sharedstatedir | --sharedstatedi \ + | --sharedstated | --sharedstate | --sharedstat | --sharedsta \ + | --sharedst | --shareds | --shared | --share | --shar \ + | --sha | --sh) + ac_prev=sharedstatedir ;; + -sharedstatedir=* | --sharedstatedir=* | --sharedstatedi=* \ + | --sharedstated=* | --sharedstate=* | --sharedstat=* | --sharedsta=* \ + | --sharedst=* | --shareds=* | --shared=* | --share=* | --shar=* \ + | --sha=* | --sh=*) + sharedstatedir=$ac_optarg ;; + + -site | --site | --sit) + ac_prev=site ;; + -site=* | --site=* | --sit=*) + site=$ac_optarg ;; + + -srcdir | --srcdir | --srcdi | --srcd | --src | --sr) + ac_prev=srcdir ;; + -srcdir=* | --srcdir=* | --srcdi=* | --srcd=* | --src=* | --sr=*) + srcdir=$ac_optarg ;; + + -sysconfdir | --sysconfdir | --sysconfdi | --sysconfd | --sysconf \ + | --syscon | --sysco | --sysc | --sys | --sy) + ac_prev=sysconfdir ;; + -sysconfdir=* | --sysconfdir=* | --sysconfdi=* | --sysconfd=* | --sysconf=* \ + | --syscon=* | --sysco=* | --sysc=* | --sys=* | --sy=*) + sysconfdir=$ac_optarg ;; + + -target | --target | --targe | --targ | --tar | --ta | --t) + ac_prev=target_alias ;; + -target=* | --target=* | --targe=* | --targ=* | --tar=* | --ta=* | --t=*) + target_alias=$ac_optarg ;; + + -v | -verbose | --verbose | --verbos | --verbo | --verb) + verbose=yes ;; + + -version | --version | --versio | --versi | --vers | -V) + ac_init_version=: ;; + + -with-* | --with-*) + ac_package=`expr "x$ac_option" : 'x-*with-\([^=]*\)'` + # Reject names that are not valid shell variable names. + expr "x$ac_package" : ".*[^-_$as_cr_alnum]" >/dev/null && + { echo "$as_me: error: invalid package name: $ac_package" >&2 + { (exit 1); exit 1; }; } + ac_package=`echo $ac_package| sed 's/-/_/g'` + case $ac_option in + *=*) ac_optarg=`echo "$ac_optarg" | sed "s/'/'\\\\\\\\''/g"`;; + *) ac_optarg=yes ;; + esac + eval "with_$ac_package='$ac_optarg'" ;; + + -without-* | --without-*) + ac_package=`expr "x$ac_option" : 'x-*without-\(.*\)'` + # Reject names that are not valid shell variable names. + expr "x$ac_package" : ".*[^-_$as_cr_alnum]" >/dev/null && + { echo "$as_me: error: invalid package name: $ac_package" >&2 + { (exit 1); exit 1; }; } + ac_package=`echo $ac_package | sed 's/-/_/g'` + eval "with_$ac_package=no" ;; + + --x) + # Obsolete; use --with-x. + with_x=yes ;; + + -x-includes | --x-includes | --x-include | --x-includ | --x-inclu \ + | --x-incl | --x-inc | --x-in | --x-i) + ac_prev=x_includes ;; + -x-includes=* | --x-includes=* | --x-include=* | --x-includ=* | --x-inclu=* \ + | --x-incl=* | --x-inc=* | --x-in=* | --x-i=*) + x_includes=$ac_optarg ;; + + -x-libraries | --x-libraries | --x-librarie | --x-librari \ + | --x-librar | --x-libra | --x-libr | --x-lib | --x-li | --x-l) + ac_prev=x_libraries ;; + -x-libraries=* | --x-libraries=* | --x-librarie=* | --x-librari=* \ + | --x-librar=* | --x-libra=* | --x-libr=* | --x-lib=* | --x-li=* | --x-l=*) + x_libraries=$ac_optarg ;; + + -*) { echo "$as_me: error: unrecognized option: $ac_option +Try \`$0 --help' for more information." >&2 + { (exit 1); exit 1; }; } + ;; + + *=*) + ac_envvar=`expr "x$ac_option" : 'x\([^=]*\)='` + # Reject names that are not valid shell variable names. + expr "x$ac_envvar" : ".*[^_$as_cr_alnum]" >/dev/null && + { echo "$as_me: error: invalid variable name: $ac_envvar" >&2 + { (exit 1); exit 1; }; } + ac_optarg=`echo "$ac_optarg" | sed "s/'/'\\\\\\\\''/g"` + eval "$ac_envvar='$ac_optarg'" + export $ac_envvar ;; + + *) + # FIXME: should be removed in autoconf 3.0. + echo "$as_me: WARNING: you should use --build, --host, --target" >&2 + expr "x$ac_option" : ".*[^-._$as_cr_alnum]" >/dev/null && + echo "$as_me: WARNING: invalid host type: $ac_option" >&2 + : ${build_alias=$ac_option} ${host_alias=$ac_option} ${target_alias=$ac_option} + ;; + + esac +done + +if test -n "$ac_prev"; then + ac_option=--`echo $ac_prev | sed 's/_/-/g'` + { echo "$as_me: error: missing argument to $ac_option" >&2 + { (exit 1); exit 1; }; } +fi + +# Be sure to have absolute paths. +for ac_var in exec_prefix prefix +do + eval ac_val=$`echo $ac_var` + case $ac_val in + [\\/$]* | ?:[\\/]* | NONE | '' ) ;; + *) { echo "$as_me: error: expected an absolute directory name for --$ac_var: $ac_val" >&2 + { (exit 1); exit 1; }; };; + esac +done + +# Be sure to have absolute paths. +for ac_var in bindir sbindir libexecdir datadir sysconfdir sharedstatedir \ + localstatedir libdir includedir oldincludedir infodir mandir +do + eval ac_val=$`echo $ac_var` + case $ac_val in + [\\/$]* | ?:[\\/]* ) ;; + *) { echo "$as_me: error: expected an absolute directory name for --$ac_var: $ac_val" >&2 + { (exit 1); exit 1; }; };; + esac +done + +# There might be people who depend on the old broken behavior: `$host' +# used to hold the argument of --host etc. +# FIXME: To remove some day. +build=$build_alias +host=$host_alias +target=$target_alias + +# FIXME: To remove some day. +if test "x$host_alias" != x; then + if test "x$build_alias" = x; then + cross_compiling=maybe + echo "$as_me: WARNING: If you wanted to set the --build type, don't use --host. + If a cross compiler is detected then cross compile mode will be used." >&2 + elif test "x$build_alias" != "x$host_alias"; then + cross_compiling=yes + fi +fi + +ac_tool_prefix= +test -n "$host_alias" && ac_tool_prefix=$host_alias- + +test "$silent" = yes && exec 6>/dev/null + + +# Find the source files, if location was not specified. +if test -z "$srcdir"; then + ac_srcdir_defaulted=yes + # Try the directory containing this script, then its parent. + ac_confdir=`(dirname "$0") 2>/dev/null || +$as_expr X"$0" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ + X"$0" : 'X\(//\)[^/]' \| \ + X"$0" : 'X\(//\)$' \| \ + X"$0" : 'X\(/\)' \| \ + . : '\(.\)' 2>/dev/null || +echo X"$0" | + sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ s//\1/; q; } + /^X\(\/\/\)[^/].*/{ s//\1/; q; } + /^X\(\/\/\)$/{ s//\1/; q; } + /^X\(\/\).*/{ s//\1/; q; } + s/.*/./; q'` + srcdir=$ac_confdir + if test ! -r $srcdir/$ac_unique_file; then + srcdir=.. + fi +else + ac_srcdir_defaulted=no +fi +if test ! -r $srcdir/$ac_unique_file; then + if test "$ac_srcdir_defaulted" = yes; then + { echo "$as_me: error: cannot find sources ($ac_unique_file) in $ac_confdir or .." >&2 + { (exit 1); exit 1; }; } + else + { echo "$as_me: error: cannot find sources ($ac_unique_file) in $srcdir" >&2 + { (exit 1); exit 1; }; } + fi +fi +(cd $srcdir && test -r ./$ac_unique_file) 2>/dev/null || + { echo "$as_me: error: sources are in $srcdir, but \`cd $srcdir' does not work" >&2 + { (exit 1); exit 1; }; } +srcdir=`echo "$srcdir" | sed 's%\([^\\/]\)[\\/]*$%\1%'` +ac_env_build_alias_set=${build_alias+set} +ac_env_build_alias_value=$build_alias +ac_cv_env_build_alias_set=${build_alias+set} +ac_cv_env_build_alias_value=$build_alias +ac_env_host_alias_set=${host_alias+set} +ac_env_host_alias_value=$host_alias +ac_cv_env_host_alias_set=${host_alias+set} +ac_cv_env_host_alias_value=$host_alias +ac_env_target_alias_set=${target_alias+set} +ac_env_target_alias_value=$target_alias +ac_cv_env_target_alias_set=${target_alias+set} +ac_cv_env_target_alias_value=$target_alias +ac_env_CC_set=${CC+set} +ac_env_CC_value=$CC +ac_cv_env_CC_set=${CC+set} +ac_cv_env_CC_value=$CC +ac_env_CFLAGS_set=${CFLAGS+set} +ac_env_CFLAGS_value=$CFLAGS +ac_cv_env_CFLAGS_set=${CFLAGS+set} +ac_cv_env_CFLAGS_value=$CFLAGS +ac_env_LDFLAGS_set=${LDFLAGS+set} +ac_env_LDFLAGS_value=$LDFLAGS +ac_cv_env_LDFLAGS_set=${LDFLAGS+set} +ac_cv_env_LDFLAGS_value=$LDFLAGS +ac_env_CPPFLAGS_set=${CPPFLAGS+set} +ac_env_CPPFLAGS_value=$CPPFLAGS +ac_cv_env_CPPFLAGS_set=${CPPFLAGS+set} +ac_cv_env_CPPFLAGS_value=$CPPFLAGS +ac_env_CPP_set=${CPP+set} +ac_env_CPP_value=$CPP +ac_cv_env_CPP_set=${CPP+set} +ac_cv_env_CPP_value=$CPP +ac_env_CXX_set=${CXX+set} +ac_env_CXX_value=$CXX +ac_cv_env_CXX_set=${CXX+set} +ac_cv_env_CXX_value=$CXX +ac_env_CXXFLAGS_set=${CXXFLAGS+set} +ac_env_CXXFLAGS_value=$CXXFLAGS +ac_cv_env_CXXFLAGS_set=${CXXFLAGS+set} +ac_cv_env_CXXFLAGS_value=$CXXFLAGS +ac_env_CXXCPP_set=${CXXCPP+set} +ac_env_CXXCPP_value=$CXXCPP +ac_cv_env_CXXCPP_set=${CXXCPP+set} +ac_cv_env_CXXCPP_value=$CXXCPP +ac_env_F77_set=${F77+set} +ac_env_F77_value=$F77 +ac_cv_env_F77_set=${F77+set} +ac_cv_env_F77_value=$F77 +ac_env_FFLAGS_set=${FFLAGS+set} +ac_env_FFLAGS_value=$FFLAGS +ac_cv_env_FFLAGS_set=${FFLAGS+set} +ac_cv_env_FFLAGS_value=$FFLAGS + +# +# Report the --help message. +# +if test "$ac_init_help" = "long"; then + # Omit some internal or obsolete options to make the list less imposing. + # This message is too long to be a string in the A/UX 3.1 sh. + cat <<_ACEOF +\`configure' configures this package to adapt to many kinds of systems. + +Usage: $0 [OPTION]... [VAR=VALUE]... + +To assign environment variables (e.g., CC, CFLAGS...), specify them as +VAR=VALUE. See below for descriptions of some of the useful variables. + +Defaults for the options are specified in brackets. + +Configuration: + -h, --help display this help and exit + --help=short display options specific to this package + --help=recursive display the short help of all the included packages + -V, --version display version information and exit + -q, --quiet, --silent do not print \`checking...' messages + --cache-file=FILE cache test results in FILE [disabled] + -C, --config-cache alias for \`--cache-file=config.cache' + -n, --no-create do not create output files + --srcdir=DIR find the sources in DIR [configure dir or \`..'] + +_ACEOF + + cat <<_ACEOF +Installation directories: + --prefix=PREFIX install architecture-independent files in PREFIX + [$ac_default_prefix] + --exec-prefix=EPREFIX install architecture-dependent files in EPREFIX + [PREFIX] + +By default, \`make install' will install all the files in +\`$ac_default_prefix/bin', \`$ac_default_prefix/lib' etc. You can specify +an installation prefix other than \`$ac_default_prefix' using \`--prefix', +for instance \`--prefix=\$HOME'. + +For better control, use the options below. + +Fine tuning of the installation directories: + --bindir=DIR user executables [EPREFIX/bin] + --sbindir=DIR system admin executables [EPREFIX/sbin] + --libexecdir=DIR program executables [EPREFIX/libexec] + --datadir=DIR read-only architecture-independent data [PREFIX/share] + --sysconfdir=DIR read-only single-machine data [PREFIX/etc] + --sharedstatedir=DIR modifiable architecture-independent data [PREFIX/com] + --localstatedir=DIR modifiable single-machine data [PREFIX/var] + --libdir=DIR object code libraries [EPREFIX/lib] + --includedir=DIR C header files [PREFIX/include] + --oldincludedir=DIR C header files for non-gcc [/usr/include] + --infodir=DIR info documentation [PREFIX/info] + --mandir=DIR man documentation [PREFIX/man] +_ACEOF + + cat <<\_ACEOF + +Program names: + --program-prefix=PREFIX prepend PREFIX to installed program names + --program-suffix=SUFFIX append SUFFIX to installed program names + --program-transform-name=PROGRAM run sed PROGRAM on installed program names + +System types: + --build=BUILD configure for building on BUILD [guessed] + --host=HOST cross-compile to build programs to run on HOST [BUILD] +_ACEOF +fi + +if test -n "$ac_init_help"; then + + cat <<\_ACEOF + +Optional Features: + --disable-FEATURE do not include FEATURE (same as --enable-FEATURE=no) + --enable-FEATURE[=ARG] include FEATURE [ARG=yes] + --disable-dependency-tracking speeds up one-time build + --enable-dependency-tracking do not reject slow dependency extractors + --enable-shared[=PKGS] + build shared libraries [default=yes] + --enable-static[=PKGS] + build static libraries [default=yes] + --enable-fast-install[=PKGS] + optimize for fast installation [default=yes] + --disable-libtool-lock avoid locking (might break parallel builds) + +Optional Packages: + --with-PACKAGE[=ARG] use PACKAGE [ARG=yes] + --without-PACKAGE do not use PACKAGE (same as --with-PACKAGE=no) + --with-gnu-ld assume the C compiler uses GNU ld [default=no] + --with-pic try to use only PIC/non-PIC objects [default=use + both] + --with-tags[=TAGS] + include additional configurations [automatic] + +Some influential environment variables: + CC C compiler command + CFLAGS C compiler flags + LDFLAGS linker flags, e.g. -L if you have libraries in a + nonstandard directory + CPPFLAGS C/C++ preprocessor flags, e.g. -I if you have + headers in a nonstandard directory + CPP C preprocessor + CXX C++ compiler command + CXXFLAGS C++ compiler flags + CXXCPP C++ preprocessor + F77 Fortran 77 compiler command + FFLAGS Fortran 77 compiler flags + +Use these variables to override the choices made by `configure' or to help +it to find libraries and programs with nonstandard names/locations. + +_ACEOF +fi + +if test "$ac_init_help" = "recursive"; then + # If there are subdirs, report their specific --help. + ac_popdir=`pwd` + for ac_dir in : $ac_subdirs_all; do test "x$ac_dir" = x: && continue + test -d $ac_dir || continue + ac_builddir=. + +if test "$ac_dir" != .; then + ac_dir_suffix=/`echo "$ac_dir" | sed 's,^\.[\\/],,'` + # A "../" for each directory in $ac_dir_suffix. + ac_top_builddir=`echo "$ac_dir_suffix" | sed 's,/[^\\/]*,../,g'` +else + ac_dir_suffix= ac_top_builddir= +fi + +case $srcdir in + .) # No --srcdir option. We are building in place. + ac_srcdir=. + if test -z "$ac_top_builddir"; then + ac_top_srcdir=. + else + ac_top_srcdir=`echo $ac_top_builddir | sed 's,/$,,'` + fi ;; + [\\/]* | ?:[\\/]* ) # Absolute path. + ac_srcdir=$srcdir$ac_dir_suffix; + ac_top_srcdir=$srcdir ;; + *) # Relative path. + ac_srcdir=$ac_top_builddir$srcdir$ac_dir_suffix + ac_top_srcdir=$ac_top_builddir$srcdir ;; +esac + +# Do not use `cd foo && pwd` to compute absolute paths, because +# the directories may not exist. +case `pwd` in +.) ac_abs_builddir="$ac_dir";; +*) + case "$ac_dir" in + .) ac_abs_builddir=`pwd`;; + [\\/]* | ?:[\\/]* ) ac_abs_builddir="$ac_dir";; + *) ac_abs_builddir=`pwd`/"$ac_dir";; + esac;; +esac +case $ac_abs_builddir in +.) ac_abs_top_builddir=${ac_top_builddir}.;; +*) + case ${ac_top_builddir}. in + .) ac_abs_top_builddir=$ac_abs_builddir;; + [\\/]* | ?:[\\/]* ) ac_abs_top_builddir=${ac_top_builddir}.;; + *) ac_abs_top_builddir=$ac_abs_builddir/${ac_top_builddir}.;; + esac;; +esac +case $ac_abs_builddir in +.) ac_abs_srcdir=$ac_srcdir;; +*) + case $ac_srcdir in + .) ac_abs_srcdir=$ac_abs_builddir;; + [\\/]* | ?:[\\/]* ) ac_abs_srcdir=$ac_srcdir;; + *) ac_abs_srcdir=$ac_abs_builddir/$ac_srcdir;; + esac;; +esac +case $ac_abs_builddir in +.) ac_abs_top_srcdir=$ac_top_srcdir;; +*) + case $ac_top_srcdir in + .) ac_abs_top_srcdir=$ac_abs_builddir;; + [\\/]* | ?:[\\/]* ) ac_abs_top_srcdir=$ac_top_srcdir;; + *) ac_abs_top_srcdir=$ac_abs_builddir/$ac_top_srcdir;; + esac;; +esac + + cd $ac_dir + # Check for guested configure; otherwise get Cygnus style configure. + if test -f $ac_srcdir/configure.gnu; then + echo + $SHELL $ac_srcdir/configure.gnu --help=recursive + elif test -f $ac_srcdir/configure; then + echo + $SHELL $ac_srcdir/configure --help=recursive + elif test -f $ac_srcdir/configure.ac || + test -f $ac_srcdir/configure.in; then + echo + $ac_configure --help + else + echo "$as_me: WARNING: no configuration information is in $ac_dir" >&2 + fi + cd $ac_popdir + done +fi + +test -n "$ac_init_help" && exit 0 +if $ac_init_version; then + cat <<\_ACEOF + +Copyright (C) 2003 Free Software Foundation, Inc. +This configure script is free software; the Free Software Foundation +gives unlimited permission to copy, distribute and modify it. +_ACEOF + exit 0 +fi +exec 5>config.log +cat >&5 <<_ACEOF +This file contains any messages produced by compilers while +running configure, to aid debugging if configure makes a mistake. + +It was created by $as_me, which was +generated by GNU Autoconf 2.59. Invocation command line was + + $ $0 $@ + +_ACEOF +{ +cat <<_ASUNAME +## --------- ## +## Platform. ## +## --------- ## + +hostname = `(hostname || uname -n) 2>/dev/null | sed 1q` +uname -m = `(uname -m) 2>/dev/null || echo unknown` +uname -r = `(uname -r) 2>/dev/null || echo unknown` +uname -s = `(uname -s) 2>/dev/null || echo unknown` +uname -v = `(uname -v) 2>/dev/null || echo unknown` + +/usr/bin/uname -p = `(/usr/bin/uname -p) 2>/dev/null || echo unknown` +/bin/uname -X = `(/bin/uname -X) 2>/dev/null || echo unknown` + +/bin/arch = `(/bin/arch) 2>/dev/null || echo unknown` +/usr/bin/arch -k = `(/usr/bin/arch -k) 2>/dev/null || echo unknown` +/usr/convex/getsysinfo = `(/usr/convex/getsysinfo) 2>/dev/null || echo unknown` +hostinfo = `(hostinfo) 2>/dev/null || echo unknown` +/bin/machine = `(/bin/machine) 2>/dev/null || echo unknown` +/usr/bin/oslevel = `(/usr/bin/oslevel) 2>/dev/null || echo unknown` +/bin/universe = `(/bin/universe) 2>/dev/null || echo unknown` + +_ASUNAME + +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + echo "PATH: $as_dir" +done + +} >&5 + +cat >&5 <<_ACEOF + + +## ----------- ## +## Core tests. ## +## ----------- ## + +_ACEOF + + +# Keep a trace of the command line. +# Strip out --no-create and --no-recursion so they do not pile up. +# Strip out --silent because we don't want to record it for future runs. +# Also quote any args containing shell meta-characters. +# Make two passes to allow for proper duplicate-argument suppression. +ac_configure_args= +ac_configure_args0= +ac_configure_args1= +ac_sep= +ac_must_keep_next=false +for ac_pass in 1 2 +do + for ac_arg + do + case $ac_arg in + -no-create | --no-c* | -n | -no-recursion | --no-r*) continue ;; + -q | -quiet | --quiet | --quie | --qui | --qu | --q \ + | -silent | --silent | --silen | --sile | --sil) + continue ;; + *" "*|*" "*|*[\[\]\~\#\$\^\&\*\(\)\{\}\\\|\;\<\>\?\"\']*) + ac_arg=`echo "$ac_arg" | sed "s/'/'\\\\\\\\''/g"` ;; + esac + case $ac_pass in + 1) ac_configure_args0="$ac_configure_args0 '$ac_arg'" ;; + 2) + ac_configure_args1="$ac_configure_args1 '$ac_arg'" + if test $ac_must_keep_next = true; then + ac_must_keep_next=false # Got value, back to normal. + else + case $ac_arg in + *=* | --config-cache | -C | -disable-* | --disable-* \ + | -enable-* | --enable-* | -gas | --g* | -nfp | --nf* \ + | -q | -quiet | --q* | -silent | --sil* | -v | -verb* \ + | -with-* | --with-* | -without-* | --without-* | --x) + case "$ac_configure_args0 " in + "$ac_configure_args1"*" '$ac_arg' "* ) continue ;; + esac + ;; + -* ) ac_must_keep_next=true ;; + esac + fi + ac_configure_args="$ac_configure_args$ac_sep'$ac_arg'" + # Get rid of the leading space. + ac_sep=" " + ;; + esac + done +done +$as_unset ac_configure_args0 || test "${ac_configure_args0+set}" != set || { ac_configure_args0=; export ac_configure_args0; } +$as_unset ac_configure_args1 || test "${ac_configure_args1+set}" != set || { ac_configure_args1=; export ac_configure_args1; } + +# When interrupted or exit'd, cleanup temporary files, and complete +# config.log. We remove comments because anyway the quotes in there +# would cause problems or look ugly. +# WARNING: Be sure not to use single quotes in there, as some shells, +# such as our DU 5.0 friend, will then `close' the trap. +trap 'exit_status=$? + # Save into config.log some information that might help in debugging. + { + echo + + cat <<\_ASBOX +## ---------------- ## +## Cache variables. ## +## ---------------- ## +_ASBOX + echo + # The following way of writing the cache mishandles newlines in values, +{ + (set) 2>&1 | + case `(ac_space='"'"' '"'"'; set | grep ac_space) 2>&1` in + *ac_space=\ *) + sed -n \ + "s/'"'"'/'"'"'\\\\'"'"''"'"'/g; + s/^\\([_$as_cr_alnum]*_cv_[_$as_cr_alnum]*\\)=\\(.*\\)/\\1='"'"'\\2'"'"'/p" + ;; + *) + sed -n \ + "s/^\\([_$as_cr_alnum]*_cv_[_$as_cr_alnum]*\\)=\\(.*\\)/\\1=\\2/p" + ;; + esac; +} + echo + + cat <<\_ASBOX +## ----------------- ## +## Output variables. ## +## ----------------- ## +_ASBOX + echo + for ac_var in $ac_subst_vars + do + eval ac_val=$`echo $ac_var` + echo "$ac_var='"'"'$ac_val'"'"'" + done | sort + echo + + if test -n "$ac_subst_files"; then + cat <<\_ASBOX +## ------------- ## +## Output files. ## +## ------------- ## +_ASBOX + echo + for ac_var in $ac_subst_files + do + eval ac_val=$`echo $ac_var` + echo "$ac_var='"'"'$ac_val'"'"'" + done | sort + echo + fi + + if test -s confdefs.h; then + cat <<\_ASBOX +## ----------- ## +## confdefs.h. ## +## ----------- ## +_ASBOX + echo + sed "/^$/d" confdefs.h | sort + echo + fi + test "$ac_signal" != 0 && + echo "$as_me: caught signal $ac_signal" + echo "$as_me: exit $exit_status" + } >&5 + rm -f core *.core && + rm -rf conftest* confdefs* conf$$* $ac_clean_files && + exit $exit_status + ' 0 +for ac_signal in 1 2 13 15; do + trap 'ac_signal='$ac_signal'; { (exit 1); exit 1; }' $ac_signal +done +ac_signal=0 + +# confdefs.h avoids OS command line length limits that DEFS can exceed. +rm -rf conftest* confdefs.h +# AIX cpp loses on an empty file, so make sure it contains at least a newline. +echo >confdefs.h + +# Predefined preprocessor variables. + +cat >>confdefs.h <<_ACEOF +#define PACKAGE_NAME "$PACKAGE_NAME" +_ACEOF + + +cat >>confdefs.h <<_ACEOF +#define PACKAGE_TARNAME "$PACKAGE_TARNAME" +_ACEOF + + +cat >>confdefs.h <<_ACEOF +#define PACKAGE_VERSION "$PACKAGE_VERSION" +_ACEOF + + +cat >>confdefs.h <<_ACEOF +#define PACKAGE_STRING "$PACKAGE_STRING" +_ACEOF + + +cat >>confdefs.h <<_ACEOF +#define PACKAGE_BUGREPORT "$PACKAGE_BUGREPORT" +_ACEOF + + +# Let the site file select an alternate cache file if it wants to. +# Prefer explicitly selected file to automatically selected ones. +if test -z "$CONFIG_SITE"; then + if test "x$prefix" != xNONE; then + CONFIG_SITE="$prefix/share/config.site $prefix/etc/config.site" + else + CONFIG_SITE="$ac_default_prefix/share/config.site $ac_default_prefix/etc/config.site" + fi +fi +for ac_site_file in $CONFIG_SITE; do + if test -r "$ac_site_file"; then + { echo "$as_me:$LINENO: loading site script $ac_site_file" >&5 +echo "$as_me: loading site script $ac_site_file" >&6;} + sed 's/^/| /' "$ac_site_file" >&5 + . "$ac_site_file" + fi +done + +if test -r "$cache_file"; then + # Some versions of bash will fail to source /dev/null (special + # files actually), so we avoid doing that. + if test -f "$cache_file"; then + { echo "$as_me:$LINENO: loading cache $cache_file" >&5 +echo "$as_me: loading cache $cache_file" >&6;} + case $cache_file in + [\\/]* | ?:[\\/]* ) . $cache_file;; + *) . ./$cache_file;; + esac + fi +else + { echo "$as_me:$LINENO: creating cache $cache_file" >&5 +echo "$as_me: creating cache $cache_file" >&6;} + >$cache_file +fi + +# Check that the precious variables saved in the cache have kept the same +# value. +ac_cache_corrupted=false +for ac_var in `(set) 2>&1 | + sed -n 's/^ac_env_\([a-zA-Z_0-9]*\)_set=.*/\1/p'`; do + eval ac_old_set=\$ac_cv_env_${ac_var}_set + eval ac_new_set=\$ac_env_${ac_var}_set + eval ac_old_val="\$ac_cv_env_${ac_var}_value" + eval ac_new_val="\$ac_env_${ac_var}_value" + case $ac_old_set,$ac_new_set in + set,) + { echo "$as_me:$LINENO: error: \`$ac_var' was set to \`$ac_old_val' in the previous run" >&5 +echo "$as_me: error: \`$ac_var' was set to \`$ac_old_val' in the previous run" >&2;} + ac_cache_corrupted=: ;; + ,set) + { echo "$as_me:$LINENO: error: \`$ac_var' was not set in the previous run" >&5 +echo "$as_me: error: \`$ac_var' was not set in the previous run" >&2;} + ac_cache_corrupted=: ;; + ,);; + *) + if test "x$ac_old_val" != "x$ac_new_val"; then + { echo "$as_me:$LINENO: error: \`$ac_var' has changed since the previous run:" >&5 +echo "$as_me: error: \`$ac_var' has changed since the previous run:" >&2;} + { echo "$as_me:$LINENO: former value: $ac_old_val" >&5 +echo "$as_me: former value: $ac_old_val" >&2;} + { echo "$as_me:$LINENO: current value: $ac_new_val" >&5 +echo "$as_me: current value: $ac_new_val" >&2;} + ac_cache_corrupted=: + fi;; + esac + # Pass precious variables to config.status. + if test "$ac_new_set" = set; then + case $ac_new_val in + *" "*|*" "*|*[\[\]\~\#\$\^\&\*\(\)\{\}\\\|\;\<\>\?\"\']*) + ac_arg=$ac_var=`echo "$ac_new_val" | sed "s/'/'\\\\\\\\''/g"` ;; + *) ac_arg=$ac_var=$ac_new_val ;; + esac + case " $ac_configure_args " in + *" '$ac_arg' "*) ;; # Avoid dups. Use of quotes ensures accuracy. + *) ac_configure_args="$ac_configure_args '$ac_arg'" ;; + esac + fi +done +if $ac_cache_corrupted; then + { echo "$as_me:$LINENO: error: changes in the environment can compromise the build" >&5 +echo "$as_me: error: changes in the environment can compromise the build" >&2;} + { { echo "$as_me:$LINENO: error: run \`make distclean' and/or \`rm $cache_file' and start over" >&5 +echo "$as_me: error: run \`make distclean' and/or \`rm $cache_file' and start over" >&2;} + { (exit 1); exit 1; }; } +fi + +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu + + + + + + + + + + + + + + + + + + + + +ac_aux_dir= +for ac_dir in config $srcdir/config; do + if test -f $ac_dir/install-sh; then + ac_aux_dir=$ac_dir + ac_install_sh="$ac_aux_dir/install-sh -c" + break + elif test -f $ac_dir/install.sh; then + ac_aux_dir=$ac_dir + ac_install_sh="$ac_aux_dir/install.sh -c" + break + elif test -f $ac_dir/shtool; then + ac_aux_dir=$ac_dir + ac_install_sh="$ac_aux_dir/shtool install -c" + break + fi +done +if test -z "$ac_aux_dir"; then + { { echo "$as_me:$LINENO: error: cannot find install-sh or install.sh in config $srcdir/config" >&5 +echo "$as_me: error: cannot find install-sh or install.sh in config $srcdir/config" >&2;} + { (exit 1); exit 1; }; } +fi +ac_config_guess="$SHELL $ac_aux_dir/config.guess" +ac_config_sub="$SHELL $ac_aux_dir/config.sub" +ac_configure="$SHELL $ac_aux_dir/configure" # This should be Cygnus configure. + + ac_config_headers="$ac_config_headers config.h" + + +am__api_version="1.9" +# Find a good install program. We prefer a C program (faster), +# so one script is as good as another. But avoid the broken or +# incompatible versions: +# SysV /etc/install, /usr/sbin/install +# SunOS /usr/etc/install +# IRIX /sbin/install +# AIX /bin/install +# AmigaOS /C/install, which installs bootblocks on floppy discs +# AIX 4 /usr/bin/installbsd, which doesn't work without a -g flag +# AFS /usr/afsws/bin/install, which mishandles nonexistent args +# SVR4 /usr/ucb/install, which tries to use the nonexistent group "staff" +# OS/2's system install, which has a completely different semantic +# ./install, which can be erroneously created by make from ./install.sh. +echo "$as_me:$LINENO: checking for a BSD-compatible install" >&5 +echo $ECHO_N "checking for a BSD-compatible install... $ECHO_C" >&6 +if test -z "$INSTALL"; then +if test "${ac_cv_path_install+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + # Account for people who put trailing slashes in PATH elements. +case $as_dir/ in + ./ | .// | /cC/* | \ + /etc/* | /usr/sbin/* | /usr/etc/* | /sbin/* | /usr/afsws/bin/* | \ + ?:\\/os2\\/install\\/* | ?:\\/OS2\\/INSTALL\\/* | \ + /usr/ucb/* ) ;; + *) + # OSF1 and SCO ODT 3.0 have their own names for install. + # Don't use installbsd from OSF since it installs stuff as root + # by default. + for ac_prog in ginstall scoinst install; do + for ac_exec_ext in '' $ac_executable_extensions; do + if $as_executable_p "$as_dir/$ac_prog$ac_exec_ext"; then + if test $ac_prog = install && + grep dspmsg "$as_dir/$ac_prog$ac_exec_ext" >/dev/null 2>&1; then + # AIX install. It has an incompatible calling convention. + : + elif test $ac_prog = install && + grep pwplus "$as_dir/$ac_prog$ac_exec_ext" >/dev/null 2>&1; then + # program-specific install script used by HP pwplus--don't use. + : + else + ac_cv_path_install="$as_dir/$ac_prog$ac_exec_ext -c" + break 3 + fi + fi + done + done + ;; +esac +done + + +fi + if test "${ac_cv_path_install+set}" = set; then + INSTALL=$ac_cv_path_install + else + # As a last resort, use the slow shell script. We don't cache a + # path for INSTALL within a source directory, because that will + # break other packages using the cache if that directory is + # removed, or if the path is relative. + INSTALL=$ac_install_sh + fi +fi +echo "$as_me:$LINENO: result: $INSTALL" >&5 +echo "${ECHO_T}$INSTALL" >&6 + +# Use test -z because SunOS4 sh mishandles braces in ${var-val}. +# It thinks the first close brace ends the variable substitution. +test -z "$INSTALL_PROGRAM" && INSTALL_PROGRAM='${INSTALL}' + +test -z "$INSTALL_SCRIPT" && INSTALL_SCRIPT='${INSTALL}' + +test -z "$INSTALL_DATA" && INSTALL_DATA='${INSTALL} -m 644' + +echo "$as_me:$LINENO: checking whether build environment is sane" >&5 +echo $ECHO_N "checking whether build environment is sane... $ECHO_C" >&6 +# Just in case +sleep 1 +echo timestamp > conftest.file +# Do `set' in a subshell so we don't clobber the current shell's +# arguments. Must try -L first in case configure is actually a +# symlink; some systems play weird games with the mod time of symlinks +# (eg FreeBSD returns the mod time of the symlink's containing +# directory). +if ( + set X `ls -Lt $srcdir/configure conftest.file 2> /dev/null` + if test "$*" = "X"; then + # -L didn't work. + set X `ls -t $srcdir/configure conftest.file` + fi + rm -f conftest.file + if test "$*" != "X $srcdir/configure conftest.file" \ + && test "$*" != "X conftest.file $srcdir/configure"; then + + # If neither matched, then we have a broken ls. This can happen + # if, for instance, CONFIG_SHELL is bash and it inherits a + # broken ls alias from the environment. This has actually + # happened. Such a system could not be considered "sane". + { { echo "$as_me:$LINENO: error: ls -t appears to fail. Make sure there is not a broken +alias in your environment" >&5 +echo "$as_me: error: ls -t appears to fail. Make sure there is not a broken +alias in your environment" >&2;} + { (exit 1); exit 1; }; } + fi + + test "$2" = conftest.file + ) +then + # Ok. + : +else + { { echo "$as_me:$LINENO: error: newly created file is older than distributed files! +Check your system clock" >&5 +echo "$as_me: error: newly created file is older than distributed files! +Check your system clock" >&2;} + { (exit 1); exit 1; }; } +fi +echo "$as_me:$LINENO: result: yes" >&5 +echo "${ECHO_T}yes" >&6 +test "$program_prefix" != NONE && + program_transform_name="s,^,$program_prefix,;$program_transform_name" +# Use a double $ so make ignores it. +test "$program_suffix" != NONE && + program_transform_name="s,\$,$program_suffix,;$program_transform_name" +# Double any \ or $. echo might interpret backslashes. +# By default was `s,x,x', remove it if useless. +cat <<\_ACEOF >conftest.sed +s/[\\$]/&&/g;s/;s,x,x,$// +_ACEOF +program_transform_name=`echo $program_transform_name | sed -f conftest.sed` +rm conftest.sed + +# expand $ac_aux_dir to an absolute path +am_aux_dir=`cd $ac_aux_dir && pwd` + +test x"${MISSING+set}" = xset || MISSING="\${SHELL} $am_aux_dir/missing" +# Use eval to expand $SHELL +if eval "$MISSING --run true"; then + am_missing_run="$MISSING --run " +else + am_missing_run= + { echo "$as_me:$LINENO: WARNING: \`missing' script is too old or missing" >&5 +echo "$as_me: WARNING: \`missing' script is too old or missing" >&2;} +fi + +if mkdir -p --version . >/dev/null 2>&1 && test ! -d ./--version; then + # We used to keeping the `.' as first argument, in order to + # allow $(mkdir_p) to be used without argument. As in + # $(mkdir_p) $(somedir) + # where $(somedir) is conditionally defined. However this is wrong + # for two reasons: + # 1. if the package is installed by a user who cannot write `.' + # make install will fail, + # 2. the above comment should most certainly read + # $(mkdir_p) $(DESTDIR)$(somedir) + # so it does not work when $(somedir) is undefined and + # $(DESTDIR) is not. + # To support the latter case, we have to write + # test -z "$(somedir)" || $(mkdir_p) $(DESTDIR)$(somedir), + # so the `.' trick is pointless. + mkdir_p='mkdir -p --' +else + # On NextStep and OpenStep, the `mkdir' command does not + # recognize any option. It will interpret all options as + # directories to create, and then abort because `.' already + # exists. + for d in ./-p ./--version; + do + test -d $d && rmdir $d + done + # $(mkinstalldirs) is defined by Automake if mkinstalldirs exists. + if test -f "$ac_aux_dir/mkinstalldirs"; then + mkdir_p='$(mkinstalldirs)' + else + mkdir_p='$(install_sh) -d' + fi +fi + +for ac_prog in gawk mawk nawk awk +do + # Extract the first word of "$ac_prog", so it can be a program name with args. +set dummy $ac_prog; ac_word=$2 +echo "$as_me:$LINENO: checking for $ac_word" >&5 +echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6 +if test "${ac_cv_prog_AWK+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + if test -n "$AWK"; then + ac_cv_prog_AWK="$AWK" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if $as_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_AWK="$ac_prog" + echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done +done + +fi +fi +AWK=$ac_cv_prog_AWK +if test -n "$AWK"; then + echo "$as_me:$LINENO: result: $AWK" >&5 +echo "${ECHO_T}$AWK" >&6 +else + echo "$as_me:$LINENO: result: no" >&5 +echo "${ECHO_T}no" >&6 +fi + + test -n "$AWK" && break +done + +echo "$as_me:$LINENO: checking whether ${MAKE-make} sets \$(MAKE)" >&5 +echo $ECHO_N "checking whether ${MAKE-make} sets \$(MAKE)... $ECHO_C" >&6 +set dummy ${MAKE-make}; ac_make=`echo "$2" | sed 'y,:./+-,___p_,'` +if eval "test \"\${ac_cv_prog_make_${ac_make}_set+set}\" = set"; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + cat >conftest.make <<\_ACEOF +all: + @echo 'ac_maketemp="$(MAKE)"' +_ACEOF +# GNU make sometimes prints "make[1]: Entering...", which would confuse us. +eval `${MAKE-make} -f conftest.make 2>/dev/null | grep temp=` +if test -n "$ac_maketemp"; then + eval ac_cv_prog_make_${ac_make}_set=yes +else + eval ac_cv_prog_make_${ac_make}_set=no +fi +rm -f conftest.make +fi +if eval "test \"`echo '$ac_cv_prog_make_'${ac_make}_set`\" = yes"; then + echo "$as_me:$LINENO: result: yes" >&5 +echo "${ECHO_T}yes" >&6 + SET_MAKE= +else + echo "$as_me:$LINENO: result: no" >&5 +echo "${ECHO_T}no" >&6 + SET_MAKE="MAKE=${MAKE-make}" +fi + +rm -rf .tst 2>/dev/null +mkdir .tst 2>/dev/null +if test -d .tst; then + am__leading_dot=. +else + am__leading_dot=_ +fi +rmdir .tst 2>/dev/null + +# test to see if srcdir already configured +if test "`cd $srcdir && pwd`" != "`pwd`" && + test -f $srcdir/config.status; then + { { echo "$as_me:$LINENO: error: source directory already configured; run \"make distclean\" there first" >&5 +echo "$as_me: error: source directory already configured; run \"make distclean\" there first" >&2;} + { (exit 1); exit 1; }; } +fi + +# test whether we have cygpath +if test -z "$CYGPATH_W"; then + if (cygpath --version) >/dev/null 2>/dev/null; then + CYGPATH_W='cygpath -w' + else + CYGPATH_W=echo + fi +fi + + +# Define the identity of the package. + PACKAGE=hadoop + VERSION=1.0.0 + + +cat >>confdefs.h <<_ACEOF +#define PACKAGE "$PACKAGE" +_ACEOF + + +cat >>confdefs.h <<_ACEOF +#define VERSION "$VERSION" +_ACEOF + +# Some tools Automake needs. + +ACLOCAL=${ACLOCAL-"${am_missing_run}aclocal-${am__api_version}"} + + +AUTOCONF=${AUTOCONF-"${am_missing_run}autoconf"} + + +AUTOMAKE=${AUTOMAKE-"${am_missing_run}automake-${am__api_version}"} + + +AUTOHEADER=${AUTOHEADER-"${am_missing_run}autoheader"} + + +MAKEINFO=${MAKEINFO-"${am_missing_run}makeinfo"} + +install_sh=${install_sh-"$am_aux_dir/install-sh"} + +# Installed binaries are usually stripped using `strip' when the user +# run `make install-strip'. However `strip' might not be the right +# tool to use in cross-compilation environments, therefore Automake +# will honor the `STRIP' environment variable to overrule this program. +if test "$cross_compiling" != no; then + if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}strip", so it can be a program name with args. +set dummy ${ac_tool_prefix}strip; ac_word=$2 +echo "$as_me:$LINENO: checking for $ac_word" >&5 +echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6 +if test "${ac_cv_prog_STRIP+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + if test -n "$STRIP"; then + ac_cv_prog_STRIP="$STRIP" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if $as_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_STRIP="${ac_tool_prefix}strip" + echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done +done + +fi +fi +STRIP=$ac_cv_prog_STRIP +if test -n "$STRIP"; then + echo "$as_me:$LINENO: result: $STRIP" >&5 +echo "${ECHO_T}$STRIP" >&6 +else + echo "$as_me:$LINENO: result: no" >&5 +echo "${ECHO_T}no" >&6 +fi + +fi +if test -z "$ac_cv_prog_STRIP"; then + ac_ct_STRIP=$STRIP + # Extract the first word of "strip", so it can be a program name with args. +set dummy strip; ac_word=$2 +echo "$as_me:$LINENO: checking for $ac_word" >&5 +echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6 +if test "${ac_cv_prog_ac_ct_STRIP+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + if test -n "$ac_ct_STRIP"; then + ac_cv_prog_ac_ct_STRIP="$ac_ct_STRIP" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if $as_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_ac_ct_STRIP="strip" + echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done +done + + test -z "$ac_cv_prog_ac_ct_STRIP" && ac_cv_prog_ac_ct_STRIP=":" +fi +fi +ac_ct_STRIP=$ac_cv_prog_ac_ct_STRIP +if test -n "$ac_ct_STRIP"; then + echo "$as_me:$LINENO: result: $ac_ct_STRIP" >&5 +echo "${ECHO_T}$ac_ct_STRIP" >&6 +else + echo "$as_me:$LINENO: result: no" >&5 +echo "${ECHO_T}no" >&6 +fi + + STRIP=$ac_ct_STRIP +else + STRIP="$ac_cv_prog_STRIP" +fi + +fi +INSTALL_STRIP_PROGRAM="\${SHELL} \$(install_sh) -c -s" + +# We need awk for the "check" target. The system "awk" is bad on +# some platforms. +# Always define AMTAR for backward compatibility. + +AMTAR=${AMTAR-"${am_missing_run}tar"} + +am__tar='${AMTAR} chof - "$$tardir"'; am__untar='${AMTAR} xf -' + + + + + + +# Checks for programs. +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu +if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}gcc", so it can be a program name with args. +set dummy ${ac_tool_prefix}gcc; ac_word=$2 +echo "$as_me:$LINENO: checking for $ac_word" >&5 +echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6 +if test "${ac_cv_prog_CC+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + if test -n "$CC"; then + ac_cv_prog_CC="$CC" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if $as_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_CC="${ac_tool_prefix}gcc" + echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done +done + +fi +fi +CC=$ac_cv_prog_CC +if test -n "$CC"; then + echo "$as_me:$LINENO: result: $CC" >&5 +echo "${ECHO_T}$CC" >&6 +else + echo "$as_me:$LINENO: result: no" >&5 +echo "${ECHO_T}no" >&6 +fi + +fi +if test -z "$ac_cv_prog_CC"; then + ac_ct_CC=$CC + # Extract the first word of "gcc", so it can be a program name with args. +set dummy gcc; ac_word=$2 +echo "$as_me:$LINENO: checking for $ac_word" >&5 +echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6 +if test "${ac_cv_prog_ac_ct_CC+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + if test -n "$ac_ct_CC"; then + ac_cv_prog_ac_ct_CC="$ac_ct_CC" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if $as_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_ac_ct_CC="gcc" + echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done +done + +fi +fi +ac_ct_CC=$ac_cv_prog_ac_ct_CC +if test -n "$ac_ct_CC"; then + echo "$as_me:$LINENO: result: $ac_ct_CC" >&5 +echo "${ECHO_T}$ac_ct_CC" >&6 +else + echo "$as_me:$LINENO: result: no" >&5 +echo "${ECHO_T}no" >&6 +fi + + CC=$ac_ct_CC +else + CC="$ac_cv_prog_CC" +fi + +if test -z "$CC"; then + if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}cc", so it can be a program name with args. +set dummy ${ac_tool_prefix}cc; ac_word=$2 +echo "$as_me:$LINENO: checking for $ac_word" >&5 +echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6 +if test "${ac_cv_prog_CC+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + if test -n "$CC"; then + ac_cv_prog_CC="$CC" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if $as_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_CC="${ac_tool_prefix}cc" + echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done +done + +fi +fi +CC=$ac_cv_prog_CC +if test -n "$CC"; then + echo "$as_me:$LINENO: result: $CC" >&5 +echo "${ECHO_T}$CC" >&6 +else + echo "$as_me:$LINENO: result: no" >&5 +echo "${ECHO_T}no" >&6 +fi + +fi +if test -z "$ac_cv_prog_CC"; then + ac_ct_CC=$CC + # Extract the first word of "cc", so it can be a program name with args. +set dummy cc; ac_word=$2 +echo "$as_me:$LINENO: checking for $ac_word" >&5 +echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6 +if test "${ac_cv_prog_ac_ct_CC+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + if test -n "$ac_ct_CC"; then + ac_cv_prog_ac_ct_CC="$ac_ct_CC" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if $as_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_ac_ct_CC="cc" + echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done +done + +fi +fi +ac_ct_CC=$ac_cv_prog_ac_ct_CC +if test -n "$ac_ct_CC"; then + echo "$as_me:$LINENO: result: $ac_ct_CC" >&5 +echo "${ECHO_T}$ac_ct_CC" >&6 +else + echo "$as_me:$LINENO: result: no" >&5 +echo "${ECHO_T}no" >&6 +fi + + CC=$ac_ct_CC +else + CC="$ac_cv_prog_CC" +fi + +fi +if test -z "$CC"; then + # Extract the first word of "cc", so it can be a program name with args. +set dummy cc; ac_word=$2 +echo "$as_me:$LINENO: checking for $ac_word" >&5 +echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6 +if test "${ac_cv_prog_CC+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + if test -n "$CC"; then + ac_cv_prog_CC="$CC" # Let the user override the test. +else + ac_prog_rejected=no +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if $as_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + if test "$as_dir/$ac_word$ac_exec_ext" = "/usr/ucb/cc"; then + ac_prog_rejected=yes + continue + fi + ac_cv_prog_CC="cc" + echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done +done + +if test $ac_prog_rejected = yes; then + # We found a bogon in the path, so make sure we never use it. + set dummy $ac_cv_prog_CC + shift + if test $# != 0; then + # We chose a different compiler from the bogus one. + # However, it has the same basename, so the bogon will be chosen + # first if we set CC to just the basename; use the full file name. + shift + ac_cv_prog_CC="$as_dir/$ac_word${1+' '}$@" + fi +fi +fi +fi +CC=$ac_cv_prog_CC +if test -n "$CC"; then + echo "$as_me:$LINENO: result: $CC" >&5 +echo "${ECHO_T}$CC" >&6 +else + echo "$as_me:$LINENO: result: no" >&5 +echo "${ECHO_T}no" >&6 +fi + +fi +if test -z "$CC"; then + if test -n "$ac_tool_prefix"; then + for ac_prog in cl + do + # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args. +set dummy $ac_tool_prefix$ac_prog; ac_word=$2 +echo "$as_me:$LINENO: checking for $ac_word" >&5 +echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6 +if test "${ac_cv_prog_CC+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + if test -n "$CC"; then + ac_cv_prog_CC="$CC" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if $as_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_CC="$ac_tool_prefix$ac_prog" + echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done +done + +fi +fi +CC=$ac_cv_prog_CC +if test -n "$CC"; then + echo "$as_me:$LINENO: result: $CC" >&5 +echo "${ECHO_T}$CC" >&6 +else + echo "$as_me:$LINENO: result: no" >&5 +echo "${ECHO_T}no" >&6 +fi + + test -n "$CC" && break + done +fi +if test -z "$CC"; then + ac_ct_CC=$CC + for ac_prog in cl +do + # Extract the first word of "$ac_prog", so it can be a program name with args. +set dummy $ac_prog; ac_word=$2 +echo "$as_me:$LINENO: checking for $ac_word" >&5 +echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6 +if test "${ac_cv_prog_ac_ct_CC+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + if test -n "$ac_ct_CC"; then + ac_cv_prog_ac_ct_CC="$ac_ct_CC" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if $as_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_ac_ct_CC="$ac_prog" + echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done +done + +fi +fi +ac_ct_CC=$ac_cv_prog_ac_ct_CC +if test -n "$ac_ct_CC"; then + echo "$as_me:$LINENO: result: $ac_ct_CC" >&5 +echo "${ECHO_T}$ac_ct_CC" >&6 +else + echo "$as_me:$LINENO: result: no" >&5 +echo "${ECHO_T}no" >&6 +fi + + test -n "$ac_ct_CC" && break +done + + CC=$ac_ct_CC +fi + +fi + + +test -z "$CC" && { { echo "$as_me:$LINENO: error: no acceptable C compiler found in \$PATH +See \`config.log' for more details." >&5 +echo "$as_me: error: no acceptable C compiler found in \$PATH +See \`config.log' for more details." >&2;} + { (exit 1); exit 1; }; } + +# Provide some information about the compiler. +echo "$as_me:$LINENO:" \ + "checking for C compiler version" >&5 +ac_compiler=`set X $ac_compile; echo $2` +{ (eval echo "$as_me:$LINENO: \"$ac_compiler --version &5\"") >&5 + (eval $ac_compiler --version &5) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } +{ (eval echo "$as_me:$LINENO: \"$ac_compiler -v &5\"") >&5 + (eval $ac_compiler -v &5) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } +{ (eval echo "$as_me:$LINENO: \"$ac_compiler -V &5\"") >&5 + (eval $ac_compiler -V &5) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } + +cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +ac_clean_files_save=$ac_clean_files +ac_clean_files="$ac_clean_files a.out a.exe b.out" +# Try to create an executable without -o first, disregard a.out. +# It will help us diagnose broken compilers, and finding out an intuition +# of exeext. +echo "$as_me:$LINENO: checking for C compiler default output file name" >&5 +echo $ECHO_N "checking for C compiler default output file name... $ECHO_C" >&6 +ac_link_default=`echo "$ac_link" | sed 's/ -o *conftest[^ ]*//'` +if { (eval echo "$as_me:$LINENO: \"$ac_link_default\"") >&5 + (eval $ac_link_default) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; then + # Find the output, starting from the most likely. This scheme is +# not robust to junk in `.', hence go to wildcards (a.*) only as a last +# resort. + +# Be careful to initialize this variable, since it used to be cached. +# Otherwise an old cache value of `no' led to `EXEEXT = no' in a Makefile. +ac_cv_exeext= +# b.out is created by i960 compilers. +for ac_file in a_out.exe a.exe conftest.exe a.out conftest a.* conftest.* b.out +do + test -f "$ac_file" || continue + case $ac_file in + *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.o | *.obj ) + ;; + conftest.$ac_ext ) + # This is the source file. + ;; + [ab].out ) + # We found the default executable, but exeext='' is most + # certainly right. + break;; + *.* ) + ac_cv_exeext=`expr "$ac_file" : '[^.]*\(\..*\)'` + # FIXME: I believe we export ac_cv_exeext for Libtool, + # but it would be cool to find out if it's true. Does anybody + # maintain Libtool? --akim. + export ac_cv_exeext + break;; + * ) + break;; + esac +done +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +{ { echo "$as_me:$LINENO: error: C compiler cannot create executables +See \`config.log' for more details." >&5 +echo "$as_me: error: C compiler cannot create executables +See \`config.log' for more details." >&2;} + { (exit 77); exit 77; }; } +fi + +ac_exeext=$ac_cv_exeext +echo "$as_me:$LINENO: result: $ac_file" >&5 +echo "${ECHO_T}$ac_file" >&6 + +# Check the compiler produces executables we can run. If not, either +# the compiler is broken, or we cross compile. +echo "$as_me:$LINENO: checking whether the C compiler works" >&5 +echo $ECHO_N "checking whether the C compiler works... $ECHO_C" >&6 +# FIXME: These cross compiler hacks should be removed for Autoconf 3.0 +# If not cross compiling, check that we can run a simple program. +if test "$cross_compiling" != yes; then + if { ac_try='./$ac_file' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; }; then + cross_compiling=no + else + if test "$cross_compiling" = maybe; then + cross_compiling=yes + else + { { echo "$as_me:$LINENO: error: cannot run C compiled programs. +If you meant to cross compile, use \`--host'. +See \`config.log' for more details." >&5 +echo "$as_me: error: cannot run C compiled programs. +If you meant to cross compile, use \`--host'. +See \`config.log' for more details." >&2;} + { (exit 1); exit 1; }; } + fi + fi +fi +echo "$as_me:$LINENO: result: yes" >&5 +echo "${ECHO_T}yes" >&6 + +rm -f a.out a.exe conftest$ac_cv_exeext b.out +ac_clean_files=$ac_clean_files_save +# Check the compiler produces executables we can run. If not, either +# the compiler is broken, or we cross compile. +echo "$as_me:$LINENO: checking whether we are cross compiling" >&5 +echo $ECHO_N "checking whether we are cross compiling... $ECHO_C" >&6 +echo "$as_me:$LINENO: result: $cross_compiling" >&5 +echo "${ECHO_T}$cross_compiling" >&6 + +echo "$as_me:$LINENO: checking for suffix of executables" >&5 +echo $ECHO_N "checking for suffix of executables... $ECHO_C" >&6 +if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 + (eval $ac_link) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; then + # If both `conftest.exe' and `conftest' are `present' (well, observable) +# catch `conftest.exe'. For instance with Cygwin, `ls conftest' will +# work properly (i.e., refer to `conftest.exe'), while it won't with +# `rm'. +for ac_file in conftest.exe conftest conftest.*; do + test -f "$ac_file" || continue + case $ac_file in + *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.o | *.obj ) ;; + *.* ) ac_cv_exeext=`expr "$ac_file" : '[^.]*\(\..*\)'` + export ac_cv_exeext + break;; + * ) break;; + esac +done +else + { { echo "$as_me:$LINENO: error: cannot compute suffix of executables: cannot compile and link +See \`config.log' for more details." >&5 +echo "$as_me: error: cannot compute suffix of executables: cannot compile and link +See \`config.log' for more details." >&2;} + { (exit 1); exit 1; }; } +fi + +rm -f conftest$ac_cv_exeext +echo "$as_me:$LINENO: result: $ac_cv_exeext" >&5 +echo "${ECHO_T}$ac_cv_exeext" >&6 + +rm -f conftest.$ac_ext +EXEEXT=$ac_cv_exeext +ac_exeext=$EXEEXT +echo "$as_me:$LINENO: checking for suffix of object files" >&5 +echo $ECHO_N "checking for suffix of object files... $ECHO_C" >&6 +if test "${ac_cv_objext+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +rm -f conftest.o conftest.obj +if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 + (eval $ac_compile) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; then + for ac_file in `(ls conftest.o conftest.obj; ls conftest.*) 2>/dev/null`; do + case $ac_file in + *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg ) ;; + *) ac_cv_objext=`expr "$ac_file" : '.*\.\(.*\)'` + break;; + esac +done +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +{ { echo "$as_me:$LINENO: error: cannot compute suffix of object files: cannot compile +See \`config.log' for more details." >&5 +echo "$as_me: error: cannot compute suffix of object files: cannot compile +See \`config.log' for more details." >&2;} + { (exit 1); exit 1; }; } +fi + +rm -f conftest.$ac_cv_objext conftest.$ac_ext +fi +echo "$as_me:$LINENO: result: $ac_cv_objext" >&5 +echo "${ECHO_T}$ac_cv_objext" >&6 +OBJEXT=$ac_cv_objext +ac_objext=$OBJEXT +echo "$as_me:$LINENO: checking whether we are using the GNU C compiler" >&5 +echo $ECHO_N "checking whether we are using the GNU C compiler... $ECHO_C" >&6 +if test "${ac_cv_c_compiler_gnu+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ + +int +main () +{ +#ifndef __GNUC__ + choke me +#endif + + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext +if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 + (eval $ac_compile) 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; }; then + ac_compiler_gnu=yes +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +ac_compiler_gnu=no +fi +rm -f conftest.err conftest.$ac_objext conftest.$ac_ext +ac_cv_c_compiler_gnu=$ac_compiler_gnu + +fi +echo "$as_me:$LINENO: result: $ac_cv_c_compiler_gnu" >&5 +echo "${ECHO_T}$ac_cv_c_compiler_gnu" >&6 +GCC=`test $ac_compiler_gnu = yes && echo yes` +ac_test_CFLAGS=${CFLAGS+set} +ac_save_CFLAGS=$CFLAGS +CFLAGS="-g" +echo "$as_me:$LINENO: checking whether $CC accepts -g" >&5 +echo $ECHO_N "checking whether $CC accepts -g... $ECHO_C" >&6 +if test "${ac_cv_prog_cc_g+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext +if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 + (eval $ac_compile) 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; }; then + ac_cv_prog_cc_g=yes +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +ac_cv_prog_cc_g=no +fi +rm -f conftest.err conftest.$ac_objext conftest.$ac_ext +fi +echo "$as_me:$LINENO: result: $ac_cv_prog_cc_g" >&5 +echo "${ECHO_T}$ac_cv_prog_cc_g" >&6 +if test "$ac_test_CFLAGS" = set; then + CFLAGS=$ac_save_CFLAGS +elif test $ac_cv_prog_cc_g = yes; then + if test "$GCC" = yes; then + CFLAGS="-g -O2" + else + CFLAGS="-g" + fi +else + if test "$GCC" = yes; then + CFLAGS="-O2" + else + CFLAGS= + fi +fi +echo "$as_me:$LINENO: checking for $CC option to accept ANSI C" >&5 +echo $ECHO_N "checking for $CC option to accept ANSI C... $ECHO_C" >&6 +if test "${ac_cv_prog_cc_stdc+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + ac_cv_prog_cc_stdc=no +ac_save_CC=$CC +cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +#include +#include +#include +#include +/* Most of the following tests are stolen from RCS 5.7's src/conf.sh. */ +struct buf { int x; }; +FILE * (*rcsopen) (struct buf *, struct stat *, int); +static char *e (p, i) + char **p; + int i; +{ + return p[i]; +} +static char *f (char * (*g) (char **, int), char **p, ...) +{ + char *s; + va_list v; + va_start (v,p); + s = g (p, va_arg (v,int)); + va_end (v); + return s; +} + +/* OSF 4.0 Compaq cc is some sort of almost-ANSI by default. It has + function prototypes and stuff, but not '\xHH' hex character constants. + These don't provoke an error unfortunately, instead are silently treated + as 'x'. The following induces an error, until -std1 is added to get + proper ANSI mode. Curiously '\x00'!='x' always comes out true, for an + array size at least. It's necessary to write '\x00'==0 to get something + that's true only with -std1. */ +int osf4_cc_array ['\x00' == 0 ? 1 : -1]; + +int test (int i, double x); +struct s1 {int (*f) (int a);}; +struct s2 {int (*f) (double a);}; +int pairnames (int, char **, FILE *(*)(struct buf *, struct stat *, int), int, int); +int argc; +char **argv; +int +main () +{ +return f (e, argv, 0) != argv[0] || f (e, argv, 1) != argv[1]; + ; + return 0; +} +_ACEOF +# Don't try gcc -ansi; that turns off useful extensions and +# breaks some systems' header files. +# AIX -qlanglvl=ansi +# Ultrix and OSF/1 -std1 +# HP-UX 10.20 and later -Ae +# HP-UX older versions -Aa -D_HPUX_SOURCE +# SVR4 -Xc -D__EXTENSIONS__ +for ac_arg in "" -qlanglvl=ansi -std1 -Ae "-Aa -D_HPUX_SOURCE" "-Xc -D__EXTENSIONS__" +do + CC="$ac_save_CC $ac_arg" + rm -f conftest.$ac_objext +if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 + (eval $ac_compile) 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; }; then + ac_cv_prog_cc_stdc=$ac_arg +break +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +fi +rm -f conftest.err conftest.$ac_objext +done +rm -f conftest.$ac_ext conftest.$ac_objext +CC=$ac_save_CC + +fi + +case "x$ac_cv_prog_cc_stdc" in + x|xno) + echo "$as_me:$LINENO: result: none needed" >&5 +echo "${ECHO_T}none needed" >&6 ;; + *) + echo "$as_me:$LINENO: result: $ac_cv_prog_cc_stdc" >&5 +echo "${ECHO_T}$ac_cv_prog_cc_stdc" >&6 + CC="$CC $ac_cv_prog_cc_stdc" ;; +esac + +# Some people use a C++ compiler to compile C. Since we use `exit', +# in C++ we need to declare it. In case someone uses the same compiler +# for both compiling C and C++ we need to have the C++ compiler decide +# the declaration of exit, since it's the most demanding environment. +cat >conftest.$ac_ext <<_ACEOF +#ifndef __cplusplus + choke me +#endif +_ACEOF +rm -f conftest.$ac_objext +if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 + (eval $ac_compile) 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; }; then + for ac_declaration in \ + '' \ + 'extern "C" void std::exit (int) throw (); using std::exit;' \ + 'extern "C" void std::exit (int); using std::exit;' \ + 'extern "C" void exit (int) throw ();' \ + 'extern "C" void exit (int);' \ + 'void exit (int);' +do + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +$ac_declaration +#include +int +main () +{ +exit (42); + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext +if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 + (eval $ac_compile) 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; }; then + : +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +continue +fi +rm -f conftest.err conftest.$ac_objext conftest.$ac_ext + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +$ac_declaration +int +main () +{ +exit (42); + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext +if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 + (eval $ac_compile) 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; }; then + break +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +fi +rm -f conftest.err conftest.$ac_objext conftest.$ac_ext +done +rm -f conftest* +if test -n "$ac_declaration"; then + echo '#ifdef __cplusplus' >>confdefs.h + echo $ac_declaration >>confdefs.h + echo '#endif' >>confdefs.h +fi + +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +fi +rm -f conftest.err conftest.$ac_objext conftest.$ac_ext +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu +DEPDIR="${am__leading_dot}deps" + + ac_config_commands="$ac_config_commands depfiles" + + +am_make=${MAKE-make} +cat > confinc << 'END' +am__doit: + @echo done +.PHONY: am__doit +END +# If we don't find an include directive, just comment out the code. +echo "$as_me:$LINENO: checking for style of include used by $am_make" >&5 +echo $ECHO_N "checking for style of include used by $am_make... $ECHO_C" >&6 +am__include="#" +am__quote= +_am_result=none +# First try GNU make style include. +echo "include confinc" > confmf +# We grep out `Entering directory' and `Leaving directory' +# messages which can occur if `w' ends up in MAKEFLAGS. +# In particular we don't look at `^make:' because GNU make might +# be invoked under some other name (usually "gmake"), in which +# case it prints its new name instead of `make'. +if test "`$am_make -s -f confmf 2> /dev/null | grep -v 'ing directory'`" = "done"; then + am__include=include + am__quote= + _am_result=GNU +fi +# Now try BSD make style include. +if test "$am__include" = "#"; then + echo '.include "confinc"' > confmf + if test "`$am_make -s -f confmf 2> /dev/null`" = "done"; then + am__include=.include + am__quote="\"" + _am_result=BSD + fi +fi + + +echo "$as_me:$LINENO: result: $_am_result" >&5 +echo "${ECHO_T}$_am_result" >&6 +rm -f confinc confmf + +# Check whether --enable-dependency-tracking or --disable-dependency-tracking was given. +if test "${enable_dependency_tracking+set}" = set; then + enableval="$enable_dependency_tracking" + +fi; +if test "x$enable_dependency_tracking" != xno; then + am_depcomp="$ac_aux_dir/depcomp" + AMDEPBACKSLASH='\' +fi + + +if test "x$enable_dependency_tracking" != xno; then + AMDEP_TRUE= + AMDEP_FALSE='#' +else + AMDEP_TRUE='#' + AMDEP_FALSE= +fi + + + + +depcc="$CC" am_compiler_list= + +echo "$as_me:$LINENO: checking dependency style of $depcc" >&5 +echo $ECHO_N "checking dependency style of $depcc... $ECHO_C" >&6 +if test "${am_cv_CC_dependencies_compiler_type+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + if test -z "$AMDEP_TRUE" && test -f "$am_depcomp"; then + # We make a subdir and do the tests there. Otherwise we can end up + # making bogus files that we don't know about and never remove. For + # instance it was reported that on HP-UX the gcc test will end up + # making a dummy file named `D' -- because `-MD' means `put the output + # in D'. + mkdir conftest.dir + # Copy depcomp to subdir because otherwise we won't find it if we're + # using a relative directory. + cp "$am_depcomp" conftest.dir + cd conftest.dir + # We will build objects and dependencies in a subdirectory because + # it helps to detect inapplicable dependency modes. For instance + # both Tru64's cc and ICC support -MD to output dependencies as a + # side effect of compilation, but ICC will put the dependencies in + # the current directory while Tru64 will put them in the object + # directory. + mkdir sub + + am_cv_CC_dependencies_compiler_type=none + if test "$am_compiler_list" = ""; then + am_compiler_list=`sed -n 's/^#*\([a-zA-Z0-9]*\))$/\1/p' < ./depcomp` + fi + for depmode in $am_compiler_list; do + # Setup a source with many dependencies, because some compilers + # like to wrap large dependency lists on column 80 (with \), and + # we should not choose a depcomp mode which is confused by this. + # + # We need to recreate these files for each test, as the compiler may + # overwrite some of them when testing with obscure command lines. + # This happens at least with the AIX C compiler. + : > sub/conftest.c + for i in 1 2 3 4 5 6; do + echo '#include "conftst'$i'.h"' >> sub/conftest.c + # Using `: > sub/conftst$i.h' creates only sub/conftst1.h with + # Solaris 8's {/usr,}/bin/sh. + touch sub/conftst$i.h + done + echo "${am__include} ${am__quote}sub/conftest.Po${am__quote}" > confmf + + case $depmode in + nosideeffect) + # after this tag, mechanisms are not by side-effect, so they'll + # only be used when explicitly requested + if test "x$enable_dependency_tracking" = xyes; then + continue + else + break + fi + ;; + none) break ;; + esac + # We check with `-c' and `-o' for the sake of the "dashmstdout" + # mode. It turns out that the SunPro C++ compiler does not properly + # handle `-M -o', and we need to detect this. + if depmode=$depmode \ + source=sub/conftest.c object=sub/conftest.${OBJEXT-o} \ + depfile=sub/conftest.Po tmpdepfile=sub/conftest.TPo \ + $SHELL ./depcomp $depcc -c -o sub/conftest.${OBJEXT-o} sub/conftest.c \ + >/dev/null 2>conftest.err && + grep sub/conftst6.h sub/conftest.Po > /dev/null 2>&1 && + grep sub/conftest.${OBJEXT-o} sub/conftest.Po > /dev/null 2>&1 && + ${MAKE-make} -s -f confmf > /dev/null 2>&1; then + # icc doesn't choke on unknown options, it will just issue warnings + # or remarks (even with -Werror). So we grep stderr for any message + # that says an option was ignored or not supported. + # When given -MP, icc 7.0 and 7.1 complain thusly: + # icc: Command line warning: ignoring option '-M'; no argument required + # The diagnosis changed in icc 8.0: + # icc: Command line remark: option '-MP' not supported + if (grep 'ignoring option' conftest.err || + grep 'not supported' conftest.err) >/dev/null 2>&1; then :; else + am_cv_CC_dependencies_compiler_type=$depmode + break + fi + fi + done + + cd .. + rm -rf conftest.dir +else + am_cv_CC_dependencies_compiler_type=none +fi + +fi +echo "$as_me:$LINENO: result: $am_cv_CC_dependencies_compiler_type" >&5 +echo "${ECHO_T}$am_cv_CC_dependencies_compiler_type" >&6 +CCDEPMODE=depmode=$am_cv_CC_dependencies_compiler_type + + + +if + test "x$enable_dependency_tracking" != xno \ + && test "$am_cv_CC_dependencies_compiler_type" = gcc3; then + am__fastdepCC_TRUE= + am__fastdepCC_FALSE='#' +else + am__fastdepCC_TRUE='#' + am__fastdepCC_FALSE= +fi + + +# Check whether --enable-shared or --disable-shared was given. +if test "${enable_shared+set}" = set; then + enableval="$enable_shared" + p=${PACKAGE-default} + case $enableval in + yes) enable_shared=yes ;; + no) enable_shared=no ;; + *) + enable_shared=no + # Look at the argument we got. We use all the common list separators. + lt_save_ifs="$IFS"; IFS="${IFS}$PATH_SEPARATOR," + for pkg in $enableval; do + IFS="$lt_save_ifs" + if test "X$pkg" = "X$p"; then + enable_shared=yes + fi + done + IFS="$lt_save_ifs" + ;; + esac +else + enable_shared=yes +fi; + +# Check whether --enable-static or --disable-static was given. +if test "${enable_static+set}" = set; then + enableval="$enable_static" + p=${PACKAGE-default} + case $enableval in + yes) enable_static=yes ;; + no) enable_static=no ;; + *) + enable_static=no + # Look at the argument we got. We use all the common list separators. + lt_save_ifs="$IFS"; IFS="${IFS}$PATH_SEPARATOR," + for pkg in $enableval; do + IFS="$lt_save_ifs" + if test "X$pkg" = "X$p"; then + enable_static=yes + fi + done + IFS="$lt_save_ifs" + ;; + esac +else + enable_static=yes +fi; + +# Check whether --enable-fast-install or --disable-fast-install was given. +if test "${enable_fast_install+set}" = set; then + enableval="$enable_fast_install" + p=${PACKAGE-default} + case $enableval in + yes) enable_fast_install=yes ;; + no) enable_fast_install=no ;; + *) + enable_fast_install=no + # Look at the argument we got. We use all the common list separators. + lt_save_ifs="$IFS"; IFS="${IFS}$PATH_SEPARATOR," + for pkg in $enableval; do + IFS="$lt_save_ifs" + if test "X$pkg" = "X$p"; then + enable_fast_install=yes + fi + done + IFS="$lt_save_ifs" + ;; + esac +else + enable_fast_install=yes +fi; + +# Make sure we can run config.sub. +$ac_config_sub sun4 >/dev/null 2>&1 || + { { echo "$as_me:$LINENO: error: cannot run $ac_config_sub" >&5 +echo "$as_me: error: cannot run $ac_config_sub" >&2;} + { (exit 1); exit 1; }; } + +echo "$as_me:$LINENO: checking build system type" >&5 +echo $ECHO_N "checking build system type... $ECHO_C" >&6 +if test "${ac_cv_build+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + ac_cv_build_alias=$build_alias +test -z "$ac_cv_build_alias" && + ac_cv_build_alias=`$ac_config_guess` +test -z "$ac_cv_build_alias" && + { { echo "$as_me:$LINENO: error: cannot guess build type; you must specify one" >&5 +echo "$as_me: error: cannot guess build type; you must specify one" >&2;} + { (exit 1); exit 1; }; } +ac_cv_build=`$ac_config_sub $ac_cv_build_alias` || + { { echo "$as_me:$LINENO: error: $ac_config_sub $ac_cv_build_alias failed" >&5 +echo "$as_me: error: $ac_config_sub $ac_cv_build_alias failed" >&2;} + { (exit 1); exit 1; }; } + +fi +echo "$as_me:$LINENO: result: $ac_cv_build" >&5 +echo "${ECHO_T}$ac_cv_build" >&6 +build=$ac_cv_build +build_cpu=`echo $ac_cv_build | sed 's/^\([^-]*\)-\([^-]*\)-\(.*\)$/\1/'` +build_vendor=`echo $ac_cv_build | sed 's/^\([^-]*\)-\([^-]*\)-\(.*\)$/\2/'` +build_os=`echo $ac_cv_build | sed 's/^\([^-]*\)-\([^-]*\)-\(.*\)$/\3/'` + + +echo "$as_me:$LINENO: checking host system type" >&5 +echo $ECHO_N "checking host system type... $ECHO_C" >&6 +if test "${ac_cv_host+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + ac_cv_host_alias=$host_alias +test -z "$ac_cv_host_alias" && + ac_cv_host_alias=$ac_cv_build_alias +ac_cv_host=`$ac_config_sub $ac_cv_host_alias` || + { { echo "$as_me:$LINENO: error: $ac_config_sub $ac_cv_host_alias failed" >&5 +echo "$as_me: error: $ac_config_sub $ac_cv_host_alias failed" >&2;} + { (exit 1); exit 1; }; } + +fi +echo "$as_me:$LINENO: result: $ac_cv_host" >&5 +echo "${ECHO_T}$ac_cv_host" >&6 +host=$ac_cv_host +host_cpu=`echo $ac_cv_host | sed 's/^\([^-]*\)-\([^-]*\)-\(.*\)$/\1/'` +host_vendor=`echo $ac_cv_host | sed 's/^\([^-]*\)-\([^-]*\)-\(.*\)$/\2/'` +host_os=`echo $ac_cv_host | sed 's/^\([^-]*\)-\([^-]*\)-\(.*\)$/\3/'` + + +echo "$as_me:$LINENO: checking for a sed that does not truncate output" >&5 +echo $ECHO_N "checking for a sed that does not truncate output... $ECHO_C" >&6 +if test "${lt_cv_path_SED+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + # Loop through the user's path and test for sed and gsed. +# Then use that list of sed's as ones to test for truncation. +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for lt_ac_prog in sed gsed; do + for ac_exec_ext in '' $ac_executable_extensions; do + if $as_executable_p "$as_dir/$lt_ac_prog$ac_exec_ext"; then + lt_ac_sed_list="$lt_ac_sed_list $as_dir/$lt_ac_prog$ac_exec_ext" + fi + done + done +done +IFS=$as_save_IFS +lt_ac_max=0 +lt_ac_count=0 +# Add /usr/xpg4/bin/sed as it is typically found on Solaris +# along with /bin/sed that truncates output. +for lt_ac_sed in $lt_ac_sed_list /usr/xpg4/bin/sed; do + test ! -f $lt_ac_sed && continue + cat /dev/null > conftest.in + lt_ac_count=0 + echo $ECHO_N "0123456789$ECHO_C" >conftest.in + # Check for GNU sed and select it if it is found. + if "$lt_ac_sed" --version 2>&1 < /dev/null | grep 'GNU' > /dev/null; then + lt_cv_path_SED=$lt_ac_sed + break + fi + while true; do + cat conftest.in conftest.in >conftest.tmp + mv conftest.tmp conftest.in + cp conftest.in conftest.nl + echo >>conftest.nl + $lt_ac_sed -e 's/a$//' < conftest.nl >conftest.out || break + cmp -s conftest.out conftest.nl || break + # 10000 chars as input seems more than enough + test $lt_ac_count -gt 10 && break + lt_ac_count=`expr $lt_ac_count + 1` + if test $lt_ac_count -gt $lt_ac_max; then + lt_ac_max=$lt_ac_count + lt_cv_path_SED=$lt_ac_sed + fi + done +done + +fi + +SED=$lt_cv_path_SED + +echo "$as_me:$LINENO: result: $SED" >&5 +echo "${ECHO_T}$SED" >&6 + +echo "$as_me:$LINENO: checking for egrep" >&5 +echo $ECHO_N "checking for egrep... $ECHO_C" >&6 +if test "${ac_cv_prog_egrep+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + if echo a | (grep -E '(a|b)') >/dev/null 2>&1 + then ac_cv_prog_egrep='grep -E' + else ac_cv_prog_egrep='egrep' + fi +fi +echo "$as_me:$LINENO: result: $ac_cv_prog_egrep" >&5 +echo "${ECHO_T}$ac_cv_prog_egrep" >&6 + EGREP=$ac_cv_prog_egrep + + + +# Check whether --with-gnu-ld or --without-gnu-ld was given. +if test "${with_gnu_ld+set}" = set; then + withval="$with_gnu_ld" + test "$withval" = no || with_gnu_ld=yes +else + with_gnu_ld=no +fi; +ac_prog=ld +if test "$GCC" = yes; then + # Check if gcc -print-prog-name=ld gives a path. + echo "$as_me:$LINENO: checking for ld used by $CC" >&5 +echo $ECHO_N "checking for ld used by $CC... $ECHO_C" >&6 + case $host in + *-*-mingw*) + # gcc leaves a trailing carriage return which upsets mingw + ac_prog=`($CC -print-prog-name=ld) 2>&5 | tr -d '\015'` ;; + *) + ac_prog=`($CC -print-prog-name=ld) 2>&5` ;; + esac + case $ac_prog in + # Accept absolute paths. + [\\/]* | ?:[\\/]*) + re_direlt='/[^/][^/]*/\.\./' + # Canonicalize the pathname of ld + ac_prog=`echo $ac_prog| $SED 's%\\\\%/%g'` + while echo $ac_prog | grep "$re_direlt" > /dev/null 2>&1; do + ac_prog=`echo $ac_prog| $SED "s%$re_direlt%/%"` + done + test -z "$LD" && LD="$ac_prog" + ;; + "") + # If it fails, then pretend we aren't using GCC. + ac_prog=ld + ;; + *) + # If it is relative, then search for the first ld in PATH. + with_gnu_ld=unknown + ;; + esac +elif test "$with_gnu_ld" = yes; then + echo "$as_me:$LINENO: checking for GNU ld" >&5 +echo $ECHO_N "checking for GNU ld... $ECHO_C" >&6 +else + echo "$as_me:$LINENO: checking for non-GNU ld" >&5 +echo $ECHO_N "checking for non-GNU ld... $ECHO_C" >&6 +fi +if test "${lt_cv_path_LD+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + if test -z "$LD"; then + lt_save_ifs="$IFS"; IFS=$PATH_SEPARATOR + for ac_dir in $PATH; do + IFS="$lt_save_ifs" + test -z "$ac_dir" && ac_dir=. + if test -f "$ac_dir/$ac_prog" || test -f "$ac_dir/$ac_prog$ac_exeext"; then + lt_cv_path_LD="$ac_dir/$ac_prog" + # Check to see if the program is GNU ld. I'd rather use --version, + # but apparently some variants of GNU ld only accept -v. + # Break only if it was the GNU/non-GNU ld that we prefer. + case `"$lt_cv_path_LD" -v 2>&1 &5 +echo "${ECHO_T}$LD" >&6 +else + echo "$as_me:$LINENO: result: no" >&5 +echo "${ECHO_T}no" >&6 +fi +test -z "$LD" && { { echo "$as_me:$LINENO: error: no acceptable ld found in \$PATH" >&5 +echo "$as_me: error: no acceptable ld found in \$PATH" >&2;} + { (exit 1); exit 1; }; } +echo "$as_me:$LINENO: checking if the linker ($LD) is GNU ld" >&5 +echo $ECHO_N "checking if the linker ($LD) is GNU ld... $ECHO_C" >&6 +if test "${lt_cv_prog_gnu_ld+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + # I'd rather use --version here, but apparently some GNU lds only accept -v. +case `$LD -v 2>&1 &5 +echo "${ECHO_T}$lt_cv_prog_gnu_ld" >&6 +with_gnu_ld=$lt_cv_prog_gnu_ld + + +echo "$as_me:$LINENO: checking for $LD option to reload object files" >&5 +echo $ECHO_N "checking for $LD option to reload object files... $ECHO_C" >&6 +if test "${lt_cv_ld_reload_flag+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + lt_cv_ld_reload_flag='-r' +fi +echo "$as_me:$LINENO: result: $lt_cv_ld_reload_flag" >&5 +echo "${ECHO_T}$lt_cv_ld_reload_flag" >&6 +reload_flag=$lt_cv_ld_reload_flag +case $reload_flag in +"" | " "*) ;; +*) reload_flag=" $reload_flag" ;; +esac +reload_cmds='$LD$reload_flag -o $output$reload_objs' +case $host_os in + darwin*) + if test "$GCC" = yes; then + reload_cmds='$LTCC $LTCFLAGS -nostdlib ${wl}-r -o $output$reload_objs' + else + reload_cmds='$LD$reload_flag -o $output$reload_objs' + fi + ;; +esac + +echo "$as_me:$LINENO: checking for BSD-compatible nm" >&5 +echo $ECHO_N "checking for BSD-compatible nm... $ECHO_C" >&6 +if test "${lt_cv_path_NM+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + if test -n "$NM"; then + # Let the user override the test. + lt_cv_path_NM="$NM" +else + lt_nm_to_check="${ac_tool_prefix}nm" + if test -n "$ac_tool_prefix" && test "$build" = "$host"; then + lt_nm_to_check="$lt_nm_to_check nm" + fi + for lt_tmp_nm in $lt_nm_to_check; do + lt_save_ifs="$IFS"; IFS=$PATH_SEPARATOR + for ac_dir in $PATH /usr/ccs/bin/elf /usr/ccs/bin /usr/ucb /bin; do + IFS="$lt_save_ifs" + test -z "$ac_dir" && ac_dir=. + tmp_nm="$ac_dir/$lt_tmp_nm" + if test -f "$tmp_nm" || test -f "$tmp_nm$ac_exeext" ; then + # Check to see if the nm accepts a BSD-compat flag. + # Adding the `sed 1q' prevents false positives on HP-UX, which says: + # nm: unknown option "B" ignored + # Tru64's nm complains that /dev/null is an invalid object file + case `"$tmp_nm" -B /dev/null 2>&1 | sed '1q'` in + */dev/null* | *'Invalid file or object type'*) + lt_cv_path_NM="$tmp_nm -B" + break + ;; + *) + case `"$tmp_nm" -p /dev/null 2>&1 | sed '1q'` in + */dev/null*) + lt_cv_path_NM="$tmp_nm -p" + break + ;; + *) + lt_cv_path_NM=${lt_cv_path_NM="$tmp_nm"} # keep the first match, but + continue # so that we can try to find one that supports BSD flags + ;; + esac + ;; + esac + fi + done + IFS="$lt_save_ifs" + done + test -z "$lt_cv_path_NM" && lt_cv_path_NM=nm +fi +fi +echo "$as_me:$LINENO: result: $lt_cv_path_NM" >&5 +echo "${ECHO_T}$lt_cv_path_NM" >&6 +NM="$lt_cv_path_NM" + +echo "$as_me:$LINENO: checking whether ln -s works" >&5 +echo $ECHO_N "checking whether ln -s works... $ECHO_C" >&6 +LN_S=$as_ln_s +if test "$LN_S" = "ln -s"; then + echo "$as_me:$LINENO: result: yes" >&5 +echo "${ECHO_T}yes" >&6 +else + echo "$as_me:$LINENO: result: no, using $LN_S" >&5 +echo "${ECHO_T}no, using $LN_S" >&6 +fi + +echo "$as_me:$LINENO: checking how to recognise dependent libraries" >&5 +echo $ECHO_N "checking how to recognise dependent libraries... $ECHO_C" >&6 +if test "${lt_cv_deplibs_check_method+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + lt_cv_file_magic_cmd='$MAGIC_CMD' +lt_cv_file_magic_test_file= +lt_cv_deplibs_check_method='unknown' +# Need to set the preceding variable on all platforms that support +# interlibrary dependencies. +# 'none' -- dependencies not supported. +# `unknown' -- same as none, but documents that we really don't know. +# 'pass_all' -- all dependencies passed with no checks. +# 'test_compile' -- check by making test program. +# 'file_magic [[regex]]' -- check by looking for files in library path +# which responds to the $file_magic_cmd with a given extended regex. +# If you have `file' or equivalent on your system and you're not sure +# whether `pass_all' will *always* work, you probably want this one. + +case $host_os in +aix4* | aix5*) + lt_cv_deplibs_check_method=pass_all + ;; + +beos*) + lt_cv_deplibs_check_method=pass_all + ;; + +bsdi[45]*) + lt_cv_deplibs_check_method='file_magic ELF [0-9][0-9]*-bit [ML]SB (shared object|dynamic lib)' + lt_cv_file_magic_cmd='/usr/bin/file -L' + lt_cv_file_magic_test_file=/shlib/libc.so + ;; + +cygwin*) + # func_win32_libid is a shell function defined in ltmain.sh + lt_cv_deplibs_check_method='file_magic ^x86 archive import|^x86 DLL' + lt_cv_file_magic_cmd='func_win32_libid' + ;; + +mingw* | pw32*) + # Base MSYS/MinGW do not provide the 'file' command needed by + # func_win32_libid shell function, so use a weaker test based on 'objdump'. + lt_cv_deplibs_check_method='file_magic file format pei*-i386(.*architecture: i386)?' + lt_cv_file_magic_cmd='$OBJDUMP -f' + ;; + +darwin* | rhapsody*) + lt_cv_deplibs_check_method=pass_all + ;; + +freebsd* | kfreebsd*-gnu | dragonfly*) + if echo __ELF__ | $CC -E - | grep __ELF__ > /dev/null; then + case $host_cpu in + i*86 ) + # Not sure whether the presence of OpenBSD here was a mistake. + # Let's accept both of them until this is cleared up. + lt_cv_deplibs_check_method='file_magic (FreeBSD|OpenBSD|DragonFly)/i[3-9]86 (compact )?demand paged shared library' + lt_cv_file_magic_cmd=/usr/bin/file + lt_cv_file_magic_test_file=`echo /usr/lib/libc.so.*` + ;; + esac + else + lt_cv_deplibs_check_method=pass_all + fi + ;; + +gnu*) + lt_cv_deplibs_check_method=pass_all + ;; + +hpux10.20* | hpux11*) + lt_cv_file_magic_cmd=/usr/bin/file + case $host_cpu in + ia64*) + lt_cv_deplibs_check_method='file_magic (s[0-9][0-9][0-9]|ELF-[0-9][0-9]) shared object file - IA64' + lt_cv_file_magic_test_file=/usr/lib/hpux32/libc.so + ;; + hppa*64*) + lt_cv_deplibs_check_method='file_magic (s[0-9][0-9][0-9]|ELF-[0-9][0-9]) shared object file - PA-RISC [0-9].[0-9]' + lt_cv_file_magic_test_file=/usr/lib/pa20_64/libc.sl + ;; + *) + lt_cv_deplibs_check_method='file_magic (s[0-9][0-9][0-9]|PA-RISC[0-9].[0-9]) shared library' + lt_cv_file_magic_test_file=/usr/lib/libc.sl + ;; + esac + ;; + +interix3*) + # PIC code is broken on Interix 3.x, that's why |\.a not |_pic\.a here + lt_cv_deplibs_check_method='match_pattern /lib[^/]+(\.so|\.a)$' + ;; + +irix5* | irix6* | nonstopux*) + case $LD in + *-32|*"-32 ") libmagic=32-bit;; + *-n32|*"-n32 ") libmagic=N32;; + *-64|*"-64 ") libmagic=64-bit;; + *) libmagic=never-match;; + esac + lt_cv_deplibs_check_method=pass_all + ;; + +# This must be Linux ELF. +linux*) + lt_cv_deplibs_check_method=pass_all + ;; + +netbsd*) + if echo __ELF__ | $CC -E - | grep __ELF__ > /dev/null; then + lt_cv_deplibs_check_method='match_pattern /lib[^/]+(\.so\.[0-9]+\.[0-9]+|_pic\.a)$' + else + lt_cv_deplibs_check_method='match_pattern /lib[^/]+(\.so|_pic\.a)$' + fi + ;; + +newos6*) + lt_cv_deplibs_check_method='file_magic ELF [0-9][0-9]*-bit [ML]SB (executable|dynamic lib)' + lt_cv_file_magic_cmd=/usr/bin/file + lt_cv_file_magic_test_file=/usr/lib/libnls.so + ;; + +nto-qnx*) + lt_cv_deplibs_check_method=unknown + ;; + +openbsd*) + if test -z "`echo __ELF__ | $CC -E - | grep __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then + lt_cv_deplibs_check_method='match_pattern /lib[^/]+(\.so\.[0-9]+\.[0-9]+|\.so|_pic\.a)$' + else + lt_cv_deplibs_check_method='match_pattern /lib[^/]+(\.so\.[0-9]+\.[0-9]+|_pic\.a)$' + fi + ;; + +osf3* | osf4* | osf5*) + lt_cv_deplibs_check_method=pass_all + ;; + +solaris*) + lt_cv_deplibs_check_method=pass_all + ;; + +sysv4 | sysv4.3*) + case $host_vendor in + motorola) + lt_cv_deplibs_check_method='file_magic ELF [0-9][0-9]*-bit [ML]SB (shared object|dynamic lib) M[0-9][0-9]* Version [0-9]' + lt_cv_file_magic_test_file=`echo /usr/lib/libc.so*` + ;; + ncr) + lt_cv_deplibs_check_method=pass_all + ;; + sequent) + lt_cv_file_magic_cmd='/bin/file' + lt_cv_deplibs_check_method='file_magic ELF [0-9][0-9]*-bit [LM]SB (shared object|dynamic lib )' + ;; + sni) + lt_cv_file_magic_cmd='/bin/file' + lt_cv_deplibs_check_method="file_magic ELF [0-9][0-9]*-bit [LM]SB dynamic lib" + lt_cv_file_magic_test_file=/lib/libc.so + ;; + siemens) + lt_cv_deplibs_check_method=pass_all + ;; + pc) + lt_cv_deplibs_check_method=pass_all + ;; + esac + ;; + +sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX* | sysv4*uw2*) + lt_cv_deplibs_check_method=pass_all + ;; +esac + +fi +echo "$as_me:$LINENO: result: $lt_cv_deplibs_check_method" >&5 +echo "${ECHO_T}$lt_cv_deplibs_check_method" >&6 +file_magic_cmd=$lt_cv_file_magic_cmd +deplibs_check_method=$lt_cv_deplibs_check_method +test -z "$deplibs_check_method" && deplibs_check_method=unknown + + + + +# If no C compiler was specified, use CC. +LTCC=${LTCC-"$CC"} + +# If no C compiler flags were specified, use CFLAGS. +LTCFLAGS=${LTCFLAGS-"$CFLAGS"} + +# Allow CC to be a program name with arguments. +compiler=$CC + + +# Check whether --enable-libtool-lock or --disable-libtool-lock was given. +if test "${enable_libtool_lock+set}" = set; then + enableval="$enable_libtool_lock" + +fi; +test "x$enable_libtool_lock" != xno && enable_libtool_lock=yes + +# Some flags need to be propagated to the compiler or linker for good +# libtool support. +case $host in +ia64-*-hpux*) + # Find out which ABI we are using. + echo 'int i;' > conftest.$ac_ext + if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 + (eval $ac_compile) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; then + case `/usr/bin/file conftest.$ac_objext` in + *ELF-32*) + HPUX_IA64_MODE="32" + ;; + *ELF-64*) + HPUX_IA64_MODE="64" + ;; + esac + fi + rm -rf conftest* + ;; +*-*-irix6*) + # Find out which ABI we are using. + echo '#line 3667 "configure"' > conftest.$ac_ext + if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 + (eval $ac_compile) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; then + if test "$lt_cv_prog_gnu_ld" = yes; then + case `/usr/bin/file conftest.$ac_objext` in + *32-bit*) + LD="${LD-ld} -melf32bsmip" + ;; + *N32*) + LD="${LD-ld} -melf32bmipn32" + ;; + *64-bit*) + LD="${LD-ld} -melf64bmip" + ;; + esac + else + case `/usr/bin/file conftest.$ac_objext` in + *32-bit*) + LD="${LD-ld} -32" + ;; + *N32*) + LD="${LD-ld} -n32" + ;; + *64-bit*) + LD="${LD-ld} -64" + ;; + esac + fi + fi + rm -rf conftest* + ;; + +x86_64-*linux*|ppc*-*linux*|powerpc*-*linux*|s390*-*linux*|sparc*-*linux*) + # Find out which ABI we are using. + echo 'int i;' > conftest.$ac_ext + if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 + (eval $ac_compile) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; then + case `/usr/bin/file conftest.o` in + *32-bit*) + case $host in + x86_64-*linux*) + LD="${LD-ld} -m elf_i386" + ;; + ppc64-*linux*|powerpc64-*linux*) + LD="${LD-ld} -m elf32ppclinux" + ;; + s390x-*linux*) + LD="${LD-ld} -m elf_s390" + ;; + sparc64-*linux*) + LD="${LD-ld} -m elf32_sparc" + ;; + esac + ;; + *64-bit*) + case $host in + x86_64-*linux*) + LD="${LD-ld} -m elf_x86_64" + ;; + ppc*-*linux*|powerpc*-*linux*) + LD="${LD-ld} -m elf64ppc" + ;; + s390*-*linux*) + LD="${LD-ld} -m elf64_s390" + ;; + sparc*-*linux*) + LD="${LD-ld} -m elf64_sparc" + ;; + esac + ;; + esac + fi + rm -rf conftest* + ;; + +*-*-sco3.2v5*) + # On SCO OpenServer 5, we need -belf to get full-featured binaries. + SAVE_CFLAGS="$CFLAGS" + CFLAGS="$CFLAGS -belf" + echo "$as_me:$LINENO: checking whether the C compiler needs -belf" >&5 +echo $ECHO_N "checking whether the C compiler needs -belf... $ECHO_C" >&6 +if test "${lt_cv_cc_needs_belf+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu + + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext conftest$ac_exeext +if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 + (eval $ac_link) 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest$ac_exeext' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; }; then + lt_cv_cc_needs_belf=yes +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +lt_cv_cc_needs_belf=no +fi +rm -f conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext + ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu + +fi +echo "$as_me:$LINENO: result: $lt_cv_cc_needs_belf" >&5 +echo "${ECHO_T}$lt_cv_cc_needs_belf" >&6 + if test x"$lt_cv_cc_needs_belf" != x"yes"; then + # this is probably gcc 2.8.0, egcs 1.0 or newer; no need for -belf + CFLAGS="$SAVE_CFLAGS" + fi + ;; +sparc*-*solaris*) + # Find out which ABI we are using. + echo 'int i;' > conftest.$ac_ext + if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 + (eval $ac_compile) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; then + case `/usr/bin/file conftest.o` in + *64-bit*) + case $lt_cv_prog_gnu_ld in + yes*) LD="${LD-ld} -m elf64_sparc" ;; + *) LD="${LD-ld} -64" ;; + esac + ;; + esac + fi + rm -rf conftest* + ;; + + +esac + +need_locks="$enable_libtool_lock" + + +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu +echo "$as_me:$LINENO: checking how to run the C preprocessor" >&5 +echo $ECHO_N "checking how to run the C preprocessor... $ECHO_C" >&6 +# On Suns, sometimes $CPP names a directory. +if test -n "$CPP" && test -d "$CPP"; then + CPP= +fi +if test -z "$CPP"; then + if test "${ac_cv_prog_CPP+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + # Double quotes because CPP needs to be expanded + for CPP in "$CC -E" "$CC -E -traditional-cpp" "/lib/cpp" + do + ac_preproc_ok=false +for ac_c_preproc_warn_flag in '' yes +do + # Use a header file that comes with gcc, so configuring glibc + # with a fresh cross-compiler works. + # Prefer to if __STDC__ is defined, since + # exists even on freestanding compilers. + # On the NeXT, cc -E runs the code through the compiler's parser, + # not just through cpp. "Syntax error" is here to catch this case. + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +#ifdef __STDC__ +# include +#else +# include +#endif + Syntax error +_ACEOF +if { (eval echo "$as_me:$LINENO: \"$ac_cpp conftest.$ac_ext\"") >&5 + (eval $ac_cpp conftest.$ac_ext) 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } >/dev/null; then + if test -s conftest.err; then + ac_cpp_err=$ac_c_preproc_warn_flag + ac_cpp_err=$ac_cpp_err$ac_c_werror_flag + else + ac_cpp_err= + fi +else + ac_cpp_err=yes +fi +if test -z "$ac_cpp_err"; then + : +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + # Broken: fails on valid input. +continue +fi +rm -f conftest.err conftest.$ac_ext + + # OK, works on sane cases. Now check whether non-existent headers + # can be detected and how. + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +#include +_ACEOF +if { (eval echo "$as_me:$LINENO: \"$ac_cpp conftest.$ac_ext\"") >&5 + (eval $ac_cpp conftest.$ac_ext) 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } >/dev/null; then + if test -s conftest.err; then + ac_cpp_err=$ac_c_preproc_warn_flag + ac_cpp_err=$ac_cpp_err$ac_c_werror_flag + else + ac_cpp_err= + fi +else + ac_cpp_err=yes +fi +if test -z "$ac_cpp_err"; then + # Broken: success on invalid input. +continue +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + # Passes both tests. +ac_preproc_ok=: +break +fi +rm -f conftest.err conftest.$ac_ext + +done +# Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped. +rm -f conftest.err conftest.$ac_ext +if $ac_preproc_ok; then + break +fi + + done + ac_cv_prog_CPP=$CPP + +fi + CPP=$ac_cv_prog_CPP +else + ac_cv_prog_CPP=$CPP +fi +echo "$as_me:$LINENO: result: $CPP" >&5 +echo "${ECHO_T}$CPP" >&6 +ac_preproc_ok=false +for ac_c_preproc_warn_flag in '' yes +do + # Use a header file that comes with gcc, so configuring glibc + # with a fresh cross-compiler works. + # Prefer to if __STDC__ is defined, since + # exists even on freestanding compilers. + # On the NeXT, cc -E runs the code through the compiler's parser, + # not just through cpp. "Syntax error" is here to catch this case. + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +#ifdef __STDC__ +# include +#else +# include +#endif + Syntax error +_ACEOF +if { (eval echo "$as_me:$LINENO: \"$ac_cpp conftest.$ac_ext\"") >&5 + (eval $ac_cpp conftest.$ac_ext) 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } >/dev/null; then + if test -s conftest.err; then + ac_cpp_err=$ac_c_preproc_warn_flag + ac_cpp_err=$ac_cpp_err$ac_c_werror_flag + else + ac_cpp_err= + fi +else + ac_cpp_err=yes +fi +if test -z "$ac_cpp_err"; then + : +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + # Broken: fails on valid input. +continue +fi +rm -f conftest.err conftest.$ac_ext + + # OK, works on sane cases. Now check whether non-existent headers + # can be detected and how. + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +#include +_ACEOF +if { (eval echo "$as_me:$LINENO: \"$ac_cpp conftest.$ac_ext\"") >&5 + (eval $ac_cpp conftest.$ac_ext) 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } >/dev/null; then + if test -s conftest.err; then + ac_cpp_err=$ac_c_preproc_warn_flag + ac_cpp_err=$ac_cpp_err$ac_c_werror_flag + else + ac_cpp_err= + fi +else + ac_cpp_err=yes +fi +if test -z "$ac_cpp_err"; then + # Broken: success on invalid input. +continue +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + # Passes both tests. +ac_preproc_ok=: +break +fi +rm -f conftest.err conftest.$ac_ext + +done +# Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped. +rm -f conftest.err conftest.$ac_ext +if $ac_preproc_ok; then + : +else + { { echo "$as_me:$LINENO: error: C preprocessor \"$CPP\" fails sanity check +See \`config.log' for more details." >&5 +echo "$as_me: error: C preprocessor \"$CPP\" fails sanity check +See \`config.log' for more details." >&2;} + { (exit 1); exit 1; }; } +fi + +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu + + +echo "$as_me:$LINENO: checking for ANSI C header files" >&5 +echo $ECHO_N "checking for ANSI C header files... $ECHO_C" >&6 +if test "${ac_cv_header_stdc+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +#include +#include +#include +#include + +int +main () +{ + + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext +if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 + (eval $ac_compile) 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; }; then + ac_cv_header_stdc=yes +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +ac_cv_header_stdc=no +fi +rm -f conftest.err conftest.$ac_objext conftest.$ac_ext + +if test $ac_cv_header_stdc = yes; then + # SunOS 4.x string.h does not declare mem*, contrary to ANSI. + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +#include + +_ACEOF +if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | + $EGREP "memchr" >/dev/null 2>&1; then + : +else + ac_cv_header_stdc=no +fi +rm -f conftest* + +fi + +if test $ac_cv_header_stdc = yes; then + # ISC 2.0.2 stdlib.h does not declare free, contrary to ANSI. + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +#include + +_ACEOF +if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | + $EGREP "free" >/dev/null 2>&1; then + : +else + ac_cv_header_stdc=no +fi +rm -f conftest* + +fi + +if test $ac_cv_header_stdc = yes; then + # /bin/cc in Irix-4.0.5 gets non-ANSI ctype macros unless using -ansi. + if test "$cross_compiling" = yes; then + : +else + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +#include +#if ((' ' & 0x0FF) == 0x020) +# define ISLOWER(c) ('a' <= (c) && (c) <= 'z') +# define TOUPPER(c) (ISLOWER(c) ? 'A' + ((c) - 'a') : (c)) +#else +# define ISLOWER(c) \ + (('a' <= (c) && (c) <= 'i') \ + || ('j' <= (c) && (c) <= 'r') \ + || ('s' <= (c) && (c) <= 'z')) +# define TOUPPER(c) (ISLOWER(c) ? ((c) | 0x40) : (c)) +#endif + +#define XOR(e, f) (((e) && !(f)) || (!(e) && (f))) +int +main () +{ + int i; + for (i = 0; i < 256; i++) + if (XOR (islower (i), ISLOWER (i)) + || toupper (i) != TOUPPER (i)) + exit(2); + exit (0); +} +_ACEOF +rm -f conftest$ac_exeext +if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 + (eval $ac_link) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { ac_try='./conftest$ac_exeext' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; }; then + : +else + echo "$as_me: program exited with status $ac_status" >&5 +echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +( exit $ac_status ) +ac_cv_header_stdc=no +fi +rm -f core *.core gmon.out bb.out conftest$ac_exeext conftest.$ac_objext conftest.$ac_ext +fi +fi +fi +echo "$as_me:$LINENO: result: $ac_cv_header_stdc" >&5 +echo "${ECHO_T}$ac_cv_header_stdc" >&6 +if test $ac_cv_header_stdc = yes; then + +cat >>confdefs.h <<\_ACEOF +#define STDC_HEADERS 1 +_ACEOF + +fi + +# On IRIX 5.3, sys/types and inttypes.h are conflicting. + + + + + + + + + +for ac_header in sys/types.h sys/stat.h stdlib.h string.h memory.h strings.h \ + inttypes.h stdint.h unistd.h +do +as_ac_Header=`echo "ac_cv_header_$ac_header" | $as_tr_sh` +echo "$as_me:$LINENO: checking for $ac_header" >&5 +echo $ECHO_N "checking for $ac_header... $ECHO_C" >&6 +if eval "test \"\${$as_ac_Header+set}\" = set"; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +$ac_includes_default + +#include <$ac_header> +_ACEOF +rm -f conftest.$ac_objext +if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 + (eval $ac_compile) 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; }; then + eval "$as_ac_Header=yes" +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +eval "$as_ac_Header=no" +fi +rm -f conftest.err conftest.$ac_objext conftest.$ac_ext +fi +echo "$as_me:$LINENO: result: `eval echo '${'$as_ac_Header'}'`" >&5 +echo "${ECHO_T}`eval echo '${'$as_ac_Header'}'`" >&6 +if test `eval echo '${'$as_ac_Header'}'` = yes; then + cat >>confdefs.h <<_ACEOF +#define `echo "HAVE_$ac_header" | $as_tr_cpp` 1 +_ACEOF + +fi + +done + + + +for ac_header in dlfcn.h +do +as_ac_Header=`echo "ac_cv_header_$ac_header" | $as_tr_sh` +if eval "test \"\${$as_ac_Header+set}\" = set"; then + echo "$as_me:$LINENO: checking for $ac_header" >&5 +echo $ECHO_N "checking for $ac_header... $ECHO_C" >&6 +if eval "test \"\${$as_ac_Header+set}\" = set"; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +fi +echo "$as_me:$LINENO: result: `eval echo '${'$as_ac_Header'}'`" >&5 +echo "${ECHO_T}`eval echo '${'$as_ac_Header'}'`" >&6 +else + # Is the header compilable? +echo "$as_me:$LINENO: checking $ac_header usability" >&5 +echo $ECHO_N "checking $ac_header usability... $ECHO_C" >&6 +cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +$ac_includes_default +#include <$ac_header> +_ACEOF +rm -f conftest.$ac_objext +if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 + (eval $ac_compile) 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; }; then + ac_header_compiler=yes +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +ac_header_compiler=no +fi +rm -f conftest.err conftest.$ac_objext conftest.$ac_ext +echo "$as_me:$LINENO: result: $ac_header_compiler" >&5 +echo "${ECHO_T}$ac_header_compiler" >&6 + +# Is the header present? +echo "$as_me:$LINENO: checking $ac_header presence" >&5 +echo $ECHO_N "checking $ac_header presence... $ECHO_C" >&6 +cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +#include <$ac_header> +_ACEOF +if { (eval echo "$as_me:$LINENO: \"$ac_cpp conftest.$ac_ext\"") >&5 + (eval $ac_cpp conftest.$ac_ext) 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } >/dev/null; then + if test -s conftest.err; then + ac_cpp_err=$ac_c_preproc_warn_flag + ac_cpp_err=$ac_cpp_err$ac_c_werror_flag + else + ac_cpp_err= + fi +else + ac_cpp_err=yes +fi +if test -z "$ac_cpp_err"; then + ac_header_preproc=yes +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + ac_header_preproc=no +fi +rm -f conftest.err conftest.$ac_ext +echo "$as_me:$LINENO: result: $ac_header_preproc" >&5 +echo "${ECHO_T}$ac_header_preproc" >&6 + +# So? What about this header? +case $ac_header_compiler:$ac_header_preproc:$ac_c_preproc_warn_flag in + yes:no: ) + { echo "$as_me:$LINENO: WARNING: $ac_header: accepted by the compiler, rejected by the preprocessor!" >&5 +echo "$as_me: WARNING: $ac_header: accepted by the compiler, rejected by the preprocessor!" >&2;} + { echo "$as_me:$LINENO: WARNING: $ac_header: proceeding with the compiler's result" >&5 +echo "$as_me: WARNING: $ac_header: proceeding with the compiler's result" >&2;} + ac_header_preproc=yes + ;; + no:yes:* ) + { echo "$as_me:$LINENO: WARNING: $ac_header: present but cannot be compiled" >&5 +echo "$as_me: WARNING: $ac_header: present but cannot be compiled" >&2;} + { echo "$as_me:$LINENO: WARNING: $ac_header: check for missing prerequisite headers?" >&5 +echo "$as_me: WARNING: $ac_header: check for missing prerequisite headers?" >&2;} + { echo "$as_me:$LINENO: WARNING: $ac_header: see the Autoconf documentation" >&5 +echo "$as_me: WARNING: $ac_header: see the Autoconf documentation" >&2;} + { echo "$as_me:$LINENO: WARNING: $ac_header: section \"Present But Cannot Be Compiled\"" >&5 +echo "$as_me: WARNING: $ac_header: section \"Present But Cannot Be Compiled\"" >&2;} + { echo "$as_me:$LINENO: WARNING: $ac_header: proceeding with the preprocessor's result" >&5 +echo "$as_me: WARNING: $ac_header: proceeding with the preprocessor's result" >&2;} + { echo "$as_me:$LINENO: WARNING: $ac_header: in the future, the compiler will take precedence" >&5 +echo "$as_me: WARNING: $ac_header: in the future, the compiler will take precedence" >&2;} + ( + cat <<\_ASBOX +## ------------------------------------------ ## +## Report this to the AC_PACKAGE_NAME lists. ## +## ------------------------------------------ ## +_ASBOX + ) | + sed "s/^/$as_me: WARNING: /" >&2 + ;; +esac +echo "$as_me:$LINENO: checking for $ac_header" >&5 +echo $ECHO_N "checking for $ac_header... $ECHO_C" >&6 +if eval "test \"\${$as_ac_Header+set}\" = set"; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + eval "$as_ac_Header=\$ac_header_preproc" +fi +echo "$as_me:$LINENO: result: `eval echo '${'$as_ac_Header'}'`" >&5 +echo "${ECHO_T}`eval echo '${'$as_ac_Header'}'`" >&6 + +fi +if test `eval echo '${'$as_ac_Header'}'` = yes; then + cat >>confdefs.h <<_ACEOF +#define `echo "HAVE_$ac_header" | $as_tr_cpp` 1 +_ACEOF + +fi + +done + +ac_ext=cc +ac_cpp='$CXXCPP $CPPFLAGS' +ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_cxx_compiler_gnu +if test -n "$ac_tool_prefix"; then + for ac_prog in $CCC g++ c++ gpp aCC CC cxx cc++ cl FCC KCC RCC xlC_r xlC + do + # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args. +set dummy $ac_tool_prefix$ac_prog; ac_word=$2 +echo "$as_me:$LINENO: checking for $ac_word" >&5 +echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6 +if test "${ac_cv_prog_CXX+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + if test -n "$CXX"; then + ac_cv_prog_CXX="$CXX" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if $as_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_CXX="$ac_tool_prefix$ac_prog" + echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done +done + +fi +fi +CXX=$ac_cv_prog_CXX +if test -n "$CXX"; then + echo "$as_me:$LINENO: result: $CXX" >&5 +echo "${ECHO_T}$CXX" >&6 +else + echo "$as_me:$LINENO: result: no" >&5 +echo "${ECHO_T}no" >&6 +fi + + test -n "$CXX" && break + done +fi +if test -z "$CXX"; then + ac_ct_CXX=$CXX + for ac_prog in $CCC g++ c++ gpp aCC CC cxx cc++ cl FCC KCC RCC xlC_r xlC +do + # Extract the first word of "$ac_prog", so it can be a program name with args. +set dummy $ac_prog; ac_word=$2 +echo "$as_me:$LINENO: checking for $ac_word" >&5 +echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6 +if test "${ac_cv_prog_ac_ct_CXX+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + if test -n "$ac_ct_CXX"; then + ac_cv_prog_ac_ct_CXX="$ac_ct_CXX" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if $as_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_ac_ct_CXX="$ac_prog" + echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done +done + +fi +fi +ac_ct_CXX=$ac_cv_prog_ac_ct_CXX +if test -n "$ac_ct_CXX"; then + echo "$as_me:$LINENO: result: $ac_ct_CXX" >&5 +echo "${ECHO_T}$ac_ct_CXX" >&6 +else + echo "$as_me:$LINENO: result: no" >&5 +echo "${ECHO_T}no" >&6 +fi + + test -n "$ac_ct_CXX" && break +done +test -n "$ac_ct_CXX" || ac_ct_CXX="g++" + + CXX=$ac_ct_CXX +fi + + +# Provide some information about the compiler. +echo "$as_me:$LINENO:" \ + "checking for C++ compiler version" >&5 +ac_compiler=`set X $ac_compile; echo $2` +{ (eval echo "$as_me:$LINENO: \"$ac_compiler --version &5\"") >&5 + (eval $ac_compiler --version &5) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } +{ (eval echo "$as_me:$LINENO: \"$ac_compiler -v &5\"") >&5 + (eval $ac_compiler -v &5) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } +{ (eval echo "$as_me:$LINENO: \"$ac_compiler -V &5\"") >&5 + (eval $ac_compiler -V &5) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } + +echo "$as_me:$LINENO: checking whether we are using the GNU C++ compiler" >&5 +echo $ECHO_N "checking whether we are using the GNU C++ compiler... $ECHO_C" >&6 +if test "${ac_cv_cxx_compiler_gnu+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ + +int +main () +{ +#ifndef __GNUC__ + choke me +#endif + + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext +if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 + (eval $ac_compile) 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && + { ac_try='test -z "$ac_cxx_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; }; then + ac_compiler_gnu=yes +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +ac_compiler_gnu=no +fi +rm -f conftest.err conftest.$ac_objext conftest.$ac_ext +ac_cv_cxx_compiler_gnu=$ac_compiler_gnu + +fi +echo "$as_me:$LINENO: result: $ac_cv_cxx_compiler_gnu" >&5 +echo "${ECHO_T}$ac_cv_cxx_compiler_gnu" >&6 +GXX=`test $ac_compiler_gnu = yes && echo yes` +ac_test_CXXFLAGS=${CXXFLAGS+set} +ac_save_CXXFLAGS=$CXXFLAGS +CXXFLAGS="-g" +echo "$as_me:$LINENO: checking whether $CXX accepts -g" >&5 +echo $ECHO_N "checking whether $CXX accepts -g... $ECHO_C" >&6 +if test "${ac_cv_prog_cxx_g+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext +if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 + (eval $ac_compile) 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && + { ac_try='test -z "$ac_cxx_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; }; then + ac_cv_prog_cxx_g=yes +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +ac_cv_prog_cxx_g=no +fi +rm -f conftest.err conftest.$ac_objext conftest.$ac_ext +fi +echo "$as_me:$LINENO: result: $ac_cv_prog_cxx_g" >&5 +echo "${ECHO_T}$ac_cv_prog_cxx_g" >&6 +if test "$ac_test_CXXFLAGS" = set; then + CXXFLAGS=$ac_save_CXXFLAGS +elif test $ac_cv_prog_cxx_g = yes; then + if test "$GXX" = yes; then + CXXFLAGS="-g -O2" + else + CXXFLAGS="-g" + fi +else + if test "$GXX" = yes; then + CXXFLAGS="-O2" + else + CXXFLAGS= + fi +fi +for ac_declaration in \ + '' \ + 'extern "C" void std::exit (int) throw (); using std::exit;' \ + 'extern "C" void std::exit (int); using std::exit;' \ + 'extern "C" void exit (int) throw ();' \ + 'extern "C" void exit (int);' \ + 'void exit (int);' +do + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +$ac_declaration +#include +int +main () +{ +exit (42); + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext +if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 + (eval $ac_compile) 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && + { ac_try='test -z "$ac_cxx_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; }; then + : +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +continue +fi +rm -f conftest.err conftest.$ac_objext conftest.$ac_ext + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +$ac_declaration +int +main () +{ +exit (42); + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext +if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 + (eval $ac_compile) 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && + { ac_try='test -z "$ac_cxx_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; }; then + break +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +fi +rm -f conftest.err conftest.$ac_objext conftest.$ac_ext +done +rm -f conftest* +if test -n "$ac_declaration"; then + echo '#ifdef __cplusplus' >>confdefs.h + echo $ac_declaration >>confdefs.h + echo '#endif' >>confdefs.h +fi + +ac_ext=cc +ac_cpp='$CXXCPP $CPPFLAGS' +ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_cxx_compiler_gnu + +depcc="$CXX" am_compiler_list= + +echo "$as_me:$LINENO: checking dependency style of $depcc" >&5 +echo $ECHO_N "checking dependency style of $depcc... $ECHO_C" >&6 +if test "${am_cv_CXX_dependencies_compiler_type+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + if test -z "$AMDEP_TRUE" && test -f "$am_depcomp"; then + # We make a subdir and do the tests there. Otherwise we can end up + # making bogus files that we don't know about and never remove. For + # instance it was reported that on HP-UX the gcc test will end up + # making a dummy file named `D' -- because `-MD' means `put the output + # in D'. + mkdir conftest.dir + # Copy depcomp to subdir because otherwise we won't find it if we're + # using a relative directory. + cp "$am_depcomp" conftest.dir + cd conftest.dir + # We will build objects and dependencies in a subdirectory because + # it helps to detect inapplicable dependency modes. For instance + # both Tru64's cc and ICC support -MD to output dependencies as a + # side effect of compilation, but ICC will put the dependencies in + # the current directory while Tru64 will put them in the object + # directory. + mkdir sub + + am_cv_CXX_dependencies_compiler_type=none + if test "$am_compiler_list" = ""; then + am_compiler_list=`sed -n 's/^#*\([a-zA-Z0-9]*\))$/\1/p' < ./depcomp` + fi + for depmode in $am_compiler_list; do + # Setup a source with many dependencies, because some compilers + # like to wrap large dependency lists on column 80 (with \), and + # we should not choose a depcomp mode which is confused by this. + # + # We need to recreate these files for each test, as the compiler may + # overwrite some of them when testing with obscure command lines. + # This happens at least with the AIX C compiler. + : > sub/conftest.c + for i in 1 2 3 4 5 6; do + echo '#include "conftst'$i'.h"' >> sub/conftest.c + # Using `: > sub/conftst$i.h' creates only sub/conftst1.h with + # Solaris 8's {/usr,}/bin/sh. + touch sub/conftst$i.h + done + echo "${am__include} ${am__quote}sub/conftest.Po${am__quote}" > confmf + + case $depmode in + nosideeffect) + # after this tag, mechanisms are not by side-effect, so they'll + # only be used when explicitly requested + if test "x$enable_dependency_tracking" = xyes; then + continue + else + break + fi + ;; + none) break ;; + esac + # We check with `-c' and `-o' for the sake of the "dashmstdout" + # mode. It turns out that the SunPro C++ compiler does not properly + # handle `-M -o', and we need to detect this. + if depmode=$depmode \ + source=sub/conftest.c object=sub/conftest.${OBJEXT-o} \ + depfile=sub/conftest.Po tmpdepfile=sub/conftest.TPo \ + $SHELL ./depcomp $depcc -c -o sub/conftest.${OBJEXT-o} sub/conftest.c \ + >/dev/null 2>conftest.err && + grep sub/conftst6.h sub/conftest.Po > /dev/null 2>&1 && + grep sub/conftest.${OBJEXT-o} sub/conftest.Po > /dev/null 2>&1 && + ${MAKE-make} -s -f confmf > /dev/null 2>&1; then + # icc doesn't choke on unknown options, it will just issue warnings + # or remarks (even with -Werror). So we grep stderr for any message + # that says an option was ignored or not supported. + # When given -MP, icc 7.0 and 7.1 complain thusly: + # icc: Command line warning: ignoring option '-M'; no argument required + # The diagnosis changed in icc 8.0: + # icc: Command line remark: option '-MP' not supported + if (grep 'ignoring option' conftest.err || + grep 'not supported' conftest.err) >/dev/null 2>&1; then :; else + am_cv_CXX_dependencies_compiler_type=$depmode + break + fi + fi + done + + cd .. + rm -rf conftest.dir +else + am_cv_CXX_dependencies_compiler_type=none +fi + +fi +echo "$as_me:$LINENO: result: $am_cv_CXX_dependencies_compiler_type" >&5 +echo "${ECHO_T}$am_cv_CXX_dependencies_compiler_type" >&6 +CXXDEPMODE=depmode=$am_cv_CXX_dependencies_compiler_type + + + +if + test "x$enable_dependency_tracking" != xno \ + && test "$am_cv_CXX_dependencies_compiler_type" = gcc3; then + am__fastdepCXX_TRUE= + am__fastdepCXX_FALSE='#' +else + am__fastdepCXX_TRUE='#' + am__fastdepCXX_FALSE= +fi + + + + +if test -n "$CXX" && ( test "X$CXX" != "Xno" && + ( (test "X$CXX" = "Xg++" && `g++ -v >/dev/null 2>&1` ) || + (test "X$CXX" != "Xg++"))) ; then + ac_ext=cc +ac_cpp='$CXXCPP $CPPFLAGS' +ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_cxx_compiler_gnu +echo "$as_me:$LINENO: checking how to run the C++ preprocessor" >&5 +echo $ECHO_N "checking how to run the C++ preprocessor... $ECHO_C" >&6 +if test -z "$CXXCPP"; then + if test "${ac_cv_prog_CXXCPP+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + # Double quotes because CXXCPP needs to be expanded + for CXXCPP in "$CXX -E" "/lib/cpp" + do + ac_preproc_ok=false +for ac_cxx_preproc_warn_flag in '' yes +do + # Use a header file that comes with gcc, so configuring glibc + # with a fresh cross-compiler works. + # Prefer to if __STDC__ is defined, since + # exists even on freestanding compilers. + # On the NeXT, cc -E runs the code through the compiler's parser, + # not just through cpp. "Syntax error" is here to catch this case. + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +#ifdef __STDC__ +# include +#else +# include +#endif + Syntax error +_ACEOF +if { (eval echo "$as_me:$LINENO: \"$ac_cpp conftest.$ac_ext\"") >&5 + (eval $ac_cpp conftest.$ac_ext) 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } >/dev/null; then + if test -s conftest.err; then + ac_cpp_err=$ac_cxx_preproc_warn_flag + ac_cpp_err=$ac_cpp_err$ac_cxx_werror_flag + else + ac_cpp_err= + fi +else + ac_cpp_err=yes +fi +if test -z "$ac_cpp_err"; then + : +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + # Broken: fails on valid input. +continue +fi +rm -f conftest.err conftest.$ac_ext + + # OK, works on sane cases. Now check whether non-existent headers + # can be detected and how. + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +#include +_ACEOF +if { (eval echo "$as_me:$LINENO: \"$ac_cpp conftest.$ac_ext\"") >&5 + (eval $ac_cpp conftest.$ac_ext) 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } >/dev/null; then + if test -s conftest.err; then + ac_cpp_err=$ac_cxx_preproc_warn_flag + ac_cpp_err=$ac_cpp_err$ac_cxx_werror_flag + else + ac_cpp_err= + fi +else + ac_cpp_err=yes +fi +if test -z "$ac_cpp_err"; then + # Broken: success on invalid input. +continue +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + # Passes both tests. +ac_preproc_ok=: +break +fi +rm -f conftest.err conftest.$ac_ext + +done +# Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped. +rm -f conftest.err conftest.$ac_ext +if $ac_preproc_ok; then + break +fi + + done + ac_cv_prog_CXXCPP=$CXXCPP + +fi + CXXCPP=$ac_cv_prog_CXXCPP +else + ac_cv_prog_CXXCPP=$CXXCPP +fi +echo "$as_me:$LINENO: result: $CXXCPP" >&5 +echo "${ECHO_T}$CXXCPP" >&6 +ac_preproc_ok=false +for ac_cxx_preproc_warn_flag in '' yes +do + # Use a header file that comes with gcc, so configuring glibc + # with a fresh cross-compiler works. + # Prefer to if __STDC__ is defined, since + # exists even on freestanding compilers. + # On the NeXT, cc -E runs the code through the compiler's parser, + # not just through cpp. "Syntax error" is here to catch this case. + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +#ifdef __STDC__ +# include +#else +# include +#endif + Syntax error +_ACEOF +if { (eval echo "$as_me:$LINENO: \"$ac_cpp conftest.$ac_ext\"") >&5 + (eval $ac_cpp conftest.$ac_ext) 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } >/dev/null; then + if test -s conftest.err; then + ac_cpp_err=$ac_cxx_preproc_warn_flag + ac_cpp_err=$ac_cpp_err$ac_cxx_werror_flag + else + ac_cpp_err= + fi +else + ac_cpp_err=yes +fi +if test -z "$ac_cpp_err"; then + : +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + # Broken: fails on valid input. +continue +fi +rm -f conftest.err conftest.$ac_ext + + # OK, works on sane cases. Now check whether non-existent headers + # can be detected and how. + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +#include +_ACEOF +if { (eval echo "$as_me:$LINENO: \"$ac_cpp conftest.$ac_ext\"") >&5 + (eval $ac_cpp conftest.$ac_ext) 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } >/dev/null; then + if test -s conftest.err; then + ac_cpp_err=$ac_cxx_preproc_warn_flag + ac_cpp_err=$ac_cpp_err$ac_cxx_werror_flag + else + ac_cpp_err= + fi +else + ac_cpp_err=yes +fi +if test -z "$ac_cpp_err"; then + # Broken: success on invalid input. +continue +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + # Passes both tests. +ac_preproc_ok=: +break +fi +rm -f conftest.err conftest.$ac_ext + +done +# Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped. +rm -f conftest.err conftest.$ac_ext +if $ac_preproc_ok; then + : +else + { { echo "$as_me:$LINENO: error: C++ preprocessor \"$CXXCPP\" fails sanity check +See \`config.log' for more details." >&5 +echo "$as_me: error: C++ preprocessor \"$CXXCPP\" fails sanity check +See \`config.log' for more details." >&2;} + { (exit 1); exit 1; }; } +fi + +ac_ext=cc +ac_cpp='$CXXCPP $CPPFLAGS' +ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_cxx_compiler_gnu + +fi + + +ac_ext=f +ac_compile='$F77 -c $FFLAGS conftest.$ac_ext >&5' +ac_link='$F77 -o conftest$ac_exeext $FFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_f77_compiler_gnu +if test -n "$ac_tool_prefix"; then + for ac_prog in g77 f77 xlf frt pgf77 fort77 fl32 af77 f90 xlf90 pgf90 epcf90 f95 fort xlf95 ifc efc pgf95 lf95 gfortran + do + # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args. +set dummy $ac_tool_prefix$ac_prog; ac_word=$2 +echo "$as_me:$LINENO: checking for $ac_word" >&5 +echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6 +if test "${ac_cv_prog_F77+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + if test -n "$F77"; then + ac_cv_prog_F77="$F77" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if $as_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_F77="$ac_tool_prefix$ac_prog" + echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done +done + +fi +fi +F77=$ac_cv_prog_F77 +if test -n "$F77"; then + echo "$as_me:$LINENO: result: $F77" >&5 +echo "${ECHO_T}$F77" >&6 +else + echo "$as_me:$LINENO: result: no" >&5 +echo "${ECHO_T}no" >&6 +fi + + test -n "$F77" && break + done +fi +if test -z "$F77"; then + ac_ct_F77=$F77 + for ac_prog in g77 f77 xlf frt pgf77 fort77 fl32 af77 f90 xlf90 pgf90 epcf90 f95 fort xlf95 ifc efc pgf95 lf95 gfortran +do + # Extract the first word of "$ac_prog", so it can be a program name with args. +set dummy $ac_prog; ac_word=$2 +echo "$as_me:$LINENO: checking for $ac_word" >&5 +echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6 +if test "${ac_cv_prog_ac_ct_F77+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + if test -n "$ac_ct_F77"; then + ac_cv_prog_ac_ct_F77="$ac_ct_F77" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if $as_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_ac_ct_F77="$ac_prog" + echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done +done + +fi +fi +ac_ct_F77=$ac_cv_prog_ac_ct_F77 +if test -n "$ac_ct_F77"; then + echo "$as_me:$LINENO: result: $ac_ct_F77" >&5 +echo "${ECHO_T}$ac_ct_F77" >&6 +else + echo "$as_me:$LINENO: result: no" >&5 +echo "${ECHO_T}no" >&6 +fi + + test -n "$ac_ct_F77" && break +done + + F77=$ac_ct_F77 +fi + + +# Provide some information about the compiler. +echo "$as_me:5266:" \ + "checking for Fortran 77 compiler version" >&5 +ac_compiler=`set X $ac_compile; echo $2` +{ (eval echo "$as_me:$LINENO: \"$ac_compiler --version &5\"") >&5 + (eval $ac_compiler --version &5) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } +{ (eval echo "$as_me:$LINENO: \"$ac_compiler -v &5\"") >&5 + (eval $ac_compiler -v &5) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } +{ (eval echo "$as_me:$LINENO: \"$ac_compiler -V &5\"") >&5 + (eval $ac_compiler -V &5) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } +rm -f a.out + +# If we don't use `.F' as extension, the preprocessor is not run on the +# input file. (Note that this only needs to work for GNU compilers.) +ac_save_ext=$ac_ext +ac_ext=F +echo "$as_me:$LINENO: checking whether we are using the GNU Fortran 77 compiler" >&5 +echo $ECHO_N "checking whether we are using the GNU Fortran 77 compiler... $ECHO_C" >&6 +if test "${ac_cv_f77_compiler_gnu+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + cat >conftest.$ac_ext <<_ACEOF + program main +#ifndef __GNUC__ + choke me +#endif + + end +_ACEOF +rm -f conftest.$ac_objext +if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 + (eval $ac_compile) 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && + { ac_try='test -z "$ac_f77_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; }; then + ac_compiler_gnu=yes +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +ac_compiler_gnu=no +fi +rm -f conftest.err conftest.$ac_objext conftest.$ac_ext +ac_cv_f77_compiler_gnu=$ac_compiler_gnu + +fi +echo "$as_me:$LINENO: result: $ac_cv_f77_compiler_gnu" >&5 +echo "${ECHO_T}$ac_cv_f77_compiler_gnu" >&6 +ac_ext=$ac_save_ext +ac_test_FFLAGS=${FFLAGS+set} +ac_save_FFLAGS=$FFLAGS +FFLAGS= +echo "$as_me:$LINENO: checking whether $F77 accepts -g" >&5 +echo $ECHO_N "checking whether $F77 accepts -g... $ECHO_C" >&6 +if test "${ac_cv_prog_f77_g+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + FFLAGS=-g +cat >conftest.$ac_ext <<_ACEOF + program main + + end +_ACEOF +rm -f conftest.$ac_objext +if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 + (eval $ac_compile) 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && + { ac_try='test -z "$ac_f77_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; }; then + ac_cv_prog_f77_g=yes +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +ac_cv_prog_f77_g=no +fi +rm -f conftest.err conftest.$ac_objext conftest.$ac_ext + +fi +echo "$as_me:$LINENO: result: $ac_cv_prog_f77_g" >&5 +echo "${ECHO_T}$ac_cv_prog_f77_g" >&6 +if test "$ac_test_FFLAGS" = set; then + FFLAGS=$ac_save_FFLAGS +elif test $ac_cv_prog_f77_g = yes; then + if test "x$ac_cv_f77_compiler_gnu" = xyes; then + FFLAGS="-g -O2" + else + FFLAGS="-g" + fi +else + if test "x$ac_cv_f77_compiler_gnu" = xyes; then + FFLAGS="-O2" + else + FFLAGS= + fi +fi + +G77=`test $ac_compiler_gnu = yes && echo yes` +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu + + + +# Autoconf 2.13's AC_OBJEXT and AC_EXEEXT macros only works for C compilers! + +# find the maximum length of command line arguments +echo "$as_me:$LINENO: checking the maximum length of command line arguments" >&5 +echo $ECHO_N "checking the maximum length of command line arguments... $ECHO_C" >&6 +if test "${lt_cv_sys_max_cmd_len+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + i=0 + teststring="ABCD" + + case $build_os in + msdosdjgpp*) + # On DJGPP, this test can blow up pretty badly due to problems in libc + # (any single argument exceeding 2000 bytes causes a buffer overrun + # during glob expansion). Even if it were fixed, the result of this + # check would be larger than it should be. + lt_cv_sys_max_cmd_len=12288; # 12K is about right + ;; + + gnu*) + # Under GNU Hurd, this test is not required because there is + # no limit to the length of command line arguments. + # Libtool will interpret -1 as no limit whatsoever + lt_cv_sys_max_cmd_len=-1; + ;; + + cygwin* | mingw*) + # On Win9x/ME, this test blows up -- it succeeds, but takes + # about 5 minutes as the teststring grows exponentially. + # Worse, since 9x/ME are not pre-emptively multitasking, + # you end up with a "frozen" computer, even though with patience + # the test eventually succeeds (with a max line length of 256k). + # Instead, let's just punt: use the minimum linelength reported by + # all of the supported platforms: 8192 (on NT/2K/XP). + lt_cv_sys_max_cmd_len=8192; + ;; + + amigaos*) + # On AmigaOS with pdksh, this test takes hours, literally. + # So we just punt and use a minimum line length of 8192. + lt_cv_sys_max_cmd_len=8192; + ;; + + netbsd* | freebsd* | openbsd* | darwin* | dragonfly*) + # This has been around since 386BSD, at least. Likely further. + if test -x /sbin/sysctl; then + lt_cv_sys_max_cmd_len=`/sbin/sysctl -n kern.argmax` + elif test -x /usr/sbin/sysctl; then + lt_cv_sys_max_cmd_len=`/usr/sbin/sysctl -n kern.argmax` + else + lt_cv_sys_max_cmd_len=65536 # usable default for all BSDs + fi + # And add a safety zone + lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \/ 4` + lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \* 3` + ;; + + interix*) + # We know the value 262144 and hardcode it with a safety zone (like BSD) + lt_cv_sys_max_cmd_len=196608 + ;; + + osf*) + # Dr. Hans Ekkehard Plesser reports seeing a kernel panic running configure + # due to this test when exec_disable_arg_limit is 1 on Tru64. It is not + # nice to cause kernel panics so lets avoid the loop below. + # First set a reasonable default. + lt_cv_sys_max_cmd_len=16384 + # + if test -x /sbin/sysconfig; then + case `/sbin/sysconfig -q proc exec_disable_arg_limit` in + *1*) lt_cv_sys_max_cmd_len=-1 ;; + esac + fi + ;; + sco3.2v5*) + lt_cv_sys_max_cmd_len=102400 + ;; + sysv5* | sco5v6* | sysv4.2uw2*) + kargmax=`grep ARG_MAX /etc/conf/cf.d/stune 2>/dev/null` + if test -n "$kargmax"; then + lt_cv_sys_max_cmd_len=`echo $kargmax | sed 's/.*[ ]//'` + else + lt_cv_sys_max_cmd_len=32768 + fi + ;; + *) + # If test is not a shell built-in, we'll probably end up computing a + # maximum length that is only half of the actual maximum length, but + # we can't tell. + SHELL=${SHELL-${CONFIG_SHELL-/bin/sh}} + while (test "X"`$SHELL $0 --fallback-echo "X$teststring" 2>/dev/null` \ + = "XX$teststring") >/dev/null 2>&1 && + new_result=`expr "X$teststring" : ".*" 2>&1` && + lt_cv_sys_max_cmd_len=$new_result && + test $i != 17 # 1/2 MB should be enough + do + i=`expr $i + 1` + teststring=$teststring$teststring + done + teststring= + # Add a significant safety factor because C++ compilers can tack on massive + # amounts of additional arguments before passing them to the linker. + # It appears as though 1/2 is a usable value. + lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \/ 2` + ;; + esac + +fi + +if test -n $lt_cv_sys_max_cmd_len ; then + echo "$as_me:$LINENO: result: $lt_cv_sys_max_cmd_len" >&5 +echo "${ECHO_T}$lt_cv_sys_max_cmd_len" >&6 +else + echo "$as_me:$LINENO: result: none" >&5 +echo "${ECHO_T}none" >&6 +fi + + + + +# Check for command to grab the raw symbol name followed by C symbol from nm. +echo "$as_me:$LINENO: checking command to parse $NM output from $compiler object" >&5 +echo $ECHO_N "checking command to parse $NM output from $compiler object... $ECHO_C" >&6 +if test "${lt_cv_sys_global_symbol_pipe+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + +# These are sane defaults that work on at least a few old systems. +# [They come from Ultrix. What could be older than Ultrix?!! ;)] + +# Character class describing NM global symbol codes. +symcode='[BCDEGRST]' + +# Regexp to match symbols that can be accessed directly from C. +sympat='\([_A-Za-z][_A-Za-z0-9]*\)' + +# Transform an extracted symbol line into a proper C declaration +lt_cv_sys_global_symbol_to_cdecl="sed -n -e 's/^. .* \(.*\)$/extern int \1;/p'" + +# Transform an extracted symbol line into symbol name and symbol address +lt_cv_sys_global_symbol_to_c_name_address="sed -n -e 's/^: \([^ ]*\) $/ {\\\"\1\\\", (lt_ptr) 0},/p' -e 's/^$symcode \([^ ]*\) \([^ ]*\)$/ {\"\2\", (lt_ptr) \&\2},/p'" + +# Define system-specific variables. +case $host_os in +aix*) + symcode='[BCDT]' + ;; +cygwin* | mingw* | pw32*) + symcode='[ABCDGISTW]' + ;; +hpux*) # Its linker distinguishes data from code symbols + if test "$host_cpu" = ia64; then + symcode='[ABCDEGRST]' + fi + lt_cv_sys_global_symbol_to_cdecl="sed -n -e 's/^T .* \(.*\)$/extern int \1();/p' -e 's/^$symcode* .* \(.*\)$/extern char \1;/p'" + lt_cv_sys_global_symbol_to_c_name_address="sed -n -e 's/^: \([^ ]*\) $/ {\\\"\1\\\", (lt_ptr) 0},/p' -e 's/^$symcode* \([^ ]*\) \([^ ]*\)$/ {\"\2\", (lt_ptr) \&\2},/p'" + ;; +linux*) + if test "$host_cpu" = ia64; then + symcode='[ABCDGIRSTW]' + lt_cv_sys_global_symbol_to_cdecl="sed -n -e 's/^T .* \(.*\)$/extern int \1();/p' -e 's/^$symcode* .* \(.*\)$/extern char \1;/p'" + lt_cv_sys_global_symbol_to_c_name_address="sed -n -e 's/^: \([^ ]*\) $/ {\\\"\1\\\", (lt_ptr) 0},/p' -e 's/^$symcode* \([^ ]*\) \([^ ]*\)$/ {\"\2\", (lt_ptr) \&\2},/p'" + fi + ;; +irix* | nonstopux*) + symcode='[BCDEGRST]' + ;; +osf*) + symcode='[BCDEGQRST]' + ;; +solaris*) + symcode='[BDRT]' + ;; +sco3.2v5*) + symcode='[DT]' + ;; +sysv4.2uw2*) + symcode='[DT]' + ;; +sysv5* | sco5v6* | unixware* | OpenUNIX*) + symcode='[ABDT]' + ;; +sysv4) + symcode='[DFNSTU]' + ;; +esac + +# Handle CRLF in mingw tool chain +opt_cr= +case $build_os in +mingw*) + opt_cr=`echo 'x\{0,1\}' | tr x '\015'` # option cr in regexp + ;; +esac + +# If we're using GNU nm, then use its standard symbol codes. +case `$NM -V 2>&1` in +*GNU* | *'with BFD'*) + symcode='[ABCDGIRSTW]' ;; +esac + +# Try without a prefix undercore, then with it. +for ac_symprfx in "" "_"; do + + # Transform symcode, sympat, and symprfx into a raw symbol and a C symbol. + symxfrm="\\1 $ac_symprfx\\2 \\2" + + # Write the raw and C identifiers. + lt_cv_sys_global_symbol_pipe="sed -n -e 's/^.*[ ]\($symcode$symcode*\)[ ][ ]*$ac_symprfx$sympat$opt_cr$/$symxfrm/p'" + + # Check to see that the pipe works correctly. + pipe_works=no + + rm -f conftest* + cat > conftest.$ac_ext <&5 + (eval $ac_compile) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; then + # Now try to grab the symbols. + nlist=conftest.nm + if { (eval echo "$as_me:$LINENO: \"$NM conftest.$ac_objext \| $lt_cv_sys_global_symbol_pipe \> $nlist\"") >&5 + (eval $NM conftest.$ac_objext \| $lt_cv_sys_global_symbol_pipe \> $nlist) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && test -s "$nlist"; then + # Try sorting and uniquifying the output. + if sort "$nlist" | uniq > "$nlist"T; then + mv -f "$nlist"T "$nlist" + else + rm -f "$nlist"T + fi + + # Make sure that we snagged all the symbols we need. + if grep ' nm_test_var$' "$nlist" >/dev/null; then + if grep ' nm_test_func$' "$nlist" >/dev/null; then + cat < conftest.$ac_ext +#ifdef __cplusplus +extern "C" { +#endif + +EOF + # Now generate the symbol file. + eval "$lt_cv_sys_global_symbol_to_cdecl"' < "$nlist" | grep -v main >> conftest.$ac_ext' + + cat <> conftest.$ac_ext +#if defined (__STDC__) && __STDC__ +# define lt_ptr_t void * +#else +# define lt_ptr_t char * +# define const +#endif + +/* The mapping between symbol names and symbols. */ +const struct { + const char *name; + lt_ptr_t address; +} +lt_preloaded_symbols[] = +{ +EOF + $SED "s/^$symcode$symcode* \(.*\) \(.*\)$/ {\"\2\", (lt_ptr_t) \&\2},/" < "$nlist" | grep -v main >> conftest.$ac_ext + cat <<\EOF >> conftest.$ac_ext + {0, (lt_ptr_t) 0} +}; + +#ifdef __cplusplus +} +#endif +EOF + # Now try linking the two files. + mv conftest.$ac_objext conftstm.$ac_objext + lt_save_LIBS="$LIBS" + lt_save_CFLAGS="$CFLAGS" + LIBS="conftstm.$ac_objext" + CFLAGS="$CFLAGS$lt_prog_compiler_no_builtin_flag" + if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 + (eval $ac_link) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && test -s conftest${ac_exeext}; then + pipe_works=yes + fi + LIBS="$lt_save_LIBS" + CFLAGS="$lt_save_CFLAGS" + else + echo "cannot find nm_test_func in $nlist" >&5 + fi + else + echo "cannot find nm_test_var in $nlist" >&5 + fi + else + echo "cannot run $lt_cv_sys_global_symbol_pipe" >&5 + fi + else + echo "$progname: failed program was:" >&5 + cat conftest.$ac_ext >&5 + fi + rm -f conftest* conftst* + + # Do not use the global_symbol_pipe unless it works. + if test "$pipe_works" = yes; then + break + else + lt_cv_sys_global_symbol_pipe= + fi +done + +fi + +if test -z "$lt_cv_sys_global_symbol_pipe"; then + lt_cv_sys_global_symbol_to_cdecl= +fi +if test -z "$lt_cv_sys_global_symbol_pipe$lt_cv_sys_global_symbol_to_cdecl"; then + echo "$as_me:$LINENO: result: failed" >&5 +echo "${ECHO_T}failed" >&6 +else + echo "$as_me:$LINENO: result: ok" >&5 +echo "${ECHO_T}ok" >&6 +fi + +echo "$as_me:$LINENO: checking for objdir" >&5 +echo $ECHO_N "checking for objdir... $ECHO_C" >&6 +if test "${lt_cv_objdir+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + rm -f .libs 2>/dev/null +mkdir .libs 2>/dev/null +if test -d .libs; then + lt_cv_objdir=.libs +else + # MS-DOS does not allow filenames that begin with a dot. + lt_cv_objdir=_libs +fi +rmdir .libs 2>/dev/null +fi +echo "$as_me:$LINENO: result: $lt_cv_objdir" >&5 +echo "${ECHO_T}$lt_cv_objdir" >&6 +objdir=$lt_cv_objdir + + + + + +case $host_os in +aix3*) + # AIX sometimes has problems with the GCC collect2 program. For some + # reason, if we set the COLLECT_NAMES environment variable, the problems + # vanish in a puff of smoke. + if test "X${COLLECT_NAMES+set}" != Xset; then + COLLECT_NAMES= + export COLLECT_NAMES + fi + ;; +esac + +# Sed substitution that helps us do robust quoting. It backslashifies +# metacharacters that are still active within double-quoted strings. +Xsed='sed -e 1s/^X//' +sed_quote_subst='s/\([\\"\\`$\\\\]\)/\\\1/g' + +# Same as above, but do not quote variable references. +double_quote_subst='s/\([\\"\\`\\\\]\)/\\\1/g' + +# Sed substitution to delay expansion of an escaped shell variable in a +# double_quote_subst'ed string. +delay_variable_subst='s/\\\\\\\\\\\$/\\\\\\$/g' + +# Sed substitution to avoid accidental globbing in evaled expressions +no_glob_subst='s/\*/\\\*/g' + +# Constants: +rm="rm -f" + +# Global variables: +default_ofile=libtool +can_build_shared=yes + +# All known linkers require a `.a' archive for static linking (except MSVC, +# which needs '.lib'). +libext=a +ltmain="$ac_aux_dir/ltmain.sh" +ofile="$default_ofile" +with_gnu_ld="$lt_cv_prog_gnu_ld" + +if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}ar", so it can be a program name with args. +set dummy ${ac_tool_prefix}ar; ac_word=$2 +echo "$as_me:$LINENO: checking for $ac_word" >&5 +echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6 +if test "${ac_cv_prog_AR+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + if test -n "$AR"; then + ac_cv_prog_AR="$AR" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if $as_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_AR="${ac_tool_prefix}ar" + echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done +done + +fi +fi +AR=$ac_cv_prog_AR +if test -n "$AR"; then + echo "$as_me:$LINENO: result: $AR" >&5 +echo "${ECHO_T}$AR" >&6 +else + echo "$as_me:$LINENO: result: no" >&5 +echo "${ECHO_T}no" >&6 +fi + +fi +if test -z "$ac_cv_prog_AR"; then + ac_ct_AR=$AR + # Extract the first word of "ar", so it can be a program name with args. +set dummy ar; ac_word=$2 +echo "$as_me:$LINENO: checking for $ac_word" >&5 +echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6 +if test "${ac_cv_prog_ac_ct_AR+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + if test -n "$ac_ct_AR"; then + ac_cv_prog_ac_ct_AR="$ac_ct_AR" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if $as_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_ac_ct_AR="ar" + echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done +done + + test -z "$ac_cv_prog_ac_ct_AR" && ac_cv_prog_ac_ct_AR="false" +fi +fi +ac_ct_AR=$ac_cv_prog_ac_ct_AR +if test -n "$ac_ct_AR"; then + echo "$as_me:$LINENO: result: $ac_ct_AR" >&5 +echo "${ECHO_T}$ac_ct_AR" >&6 +else + echo "$as_me:$LINENO: result: no" >&5 +echo "${ECHO_T}no" >&6 +fi + + AR=$ac_ct_AR +else + AR="$ac_cv_prog_AR" +fi + +if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}ranlib", so it can be a program name with args. +set dummy ${ac_tool_prefix}ranlib; ac_word=$2 +echo "$as_me:$LINENO: checking for $ac_word" >&5 +echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6 +if test "${ac_cv_prog_RANLIB+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + if test -n "$RANLIB"; then + ac_cv_prog_RANLIB="$RANLIB" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if $as_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_RANLIB="${ac_tool_prefix}ranlib" + echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done +done + +fi +fi +RANLIB=$ac_cv_prog_RANLIB +if test -n "$RANLIB"; then + echo "$as_me:$LINENO: result: $RANLIB" >&5 +echo "${ECHO_T}$RANLIB" >&6 +else + echo "$as_me:$LINENO: result: no" >&5 +echo "${ECHO_T}no" >&6 +fi + +fi +if test -z "$ac_cv_prog_RANLIB"; then + ac_ct_RANLIB=$RANLIB + # Extract the first word of "ranlib", so it can be a program name with args. +set dummy ranlib; ac_word=$2 +echo "$as_me:$LINENO: checking for $ac_word" >&5 +echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6 +if test "${ac_cv_prog_ac_ct_RANLIB+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + if test -n "$ac_ct_RANLIB"; then + ac_cv_prog_ac_ct_RANLIB="$ac_ct_RANLIB" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if $as_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_ac_ct_RANLIB="ranlib" + echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done +done + + test -z "$ac_cv_prog_ac_ct_RANLIB" && ac_cv_prog_ac_ct_RANLIB=":" +fi +fi +ac_ct_RANLIB=$ac_cv_prog_ac_ct_RANLIB +if test -n "$ac_ct_RANLIB"; then + echo "$as_me:$LINENO: result: $ac_ct_RANLIB" >&5 +echo "${ECHO_T}$ac_ct_RANLIB" >&6 +else + echo "$as_me:$LINENO: result: no" >&5 +echo "${ECHO_T}no" >&6 +fi + + RANLIB=$ac_ct_RANLIB +else + RANLIB="$ac_cv_prog_RANLIB" +fi + +if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}strip", so it can be a program name with args. +set dummy ${ac_tool_prefix}strip; ac_word=$2 +echo "$as_me:$LINENO: checking for $ac_word" >&5 +echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6 +if test "${ac_cv_prog_STRIP+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + if test -n "$STRIP"; then + ac_cv_prog_STRIP="$STRIP" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if $as_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_STRIP="${ac_tool_prefix}strip" + echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done +done + +fi +fi +STRIP=$ac_cv_prog_STRIP +if test -n "$STRIP"; then + echo "$as_me:$LINENO: result: $STRIP" >&5 +echo "${ECHO_T}$STRIP" >&6 +else + echo "$as_me:$LINENO: result: no" >&5 +echo "${ECHO_T}no" >&6 +fi + +fi +if test -z "$ac_cv_prog_STRIP"; then + ac_ct_STRIP=$STRIP + # Extract the first word of "strip", so it can be a program name with args. +set dummy strip; ac_word=$2 +echo "$as_me:$LINENO: checking for $ac_word" >&5 +echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6 +if test "${ac_cv_prog_ac_ct_STRIP+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + if test -n "$ac_ct_STRIP"; then + ac_cv_prog_ac_ct_STRIP="$ac_ct_STRIP" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if $as_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_ac_ct_STRIP="strip" + echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done +done + + test -z "$ac_cv_prog_ac_ct_STRIP" && ac_cv_prog_ac_ct_STRIP=":" +fi +fi +ac_ct_STRIP=$ac_cv_prog_ac_ct_STRIP +if test -n "$ac_ct_STRIP"; then + echo "$as_me:$LINENO: result: $ac_ct_STRIP" >&5 +echo "${ECHO_T}$ac_ct_STRIP" >&6 +else + echo "$as_me:$LINENO: result: no" >&5 +echo "${ECHO_T}no" >&6 +fi + + STRIP=$ac_ct_STRIP +else + STRIP="$ac_cv_prog_STRIP" +fi + + +old_CC="$CC" +old_CFLAGS="$CFLAGS" + +# Set sane defaults for various variables +test -z "$AR" && AR=ar +test -z "$AR_FLAGS" && AR_FLAGS=cru +test -z "$AS" && AS=as +test -z "$CC" && CC=cc +test -z "$LTCC" && LTCC=$CC +test -z "$LTCFLAGS" && LTCFLAGS=$CFLAGS +test -z "$DLLTOOL" && DLLTOOL=dlltool +test -z "$LD" && LD=ld +test -z "$LN_S" && LN_S="ln -s" +test -z "$MAGIC_CMD" && MAGIC_CMD=file +test -z "$NM" && NM=nm +test -z "$SED" && SED=sed +test -z "$OBJDUMP" && OBJDUMP=objdump +test -z "$RANLIB" && RANLIB=: +test -z "$STRIP" && STRIP=: +test -z "$ac_objext" && ac_objext=o + +# Determine commands to create old-style static archives. +old_archive_cmds='$AR $AR_FLAGS $oldlib$oldobjs$old_deplibs' +old_postinstall_cmds='chmod 644 $oldlib' +old_postuninstall_cmds= + +if test -n "$RANLIB"; then + case $host_os in + openbsd*) + old_postinstall_cmds="$old_postinstall_cmds~\$RANLIB -t \$oldlib" + ;; + *) + old_postinstall_cmds="$old_postinstall_cmds~\$RANLIB \$oldlib" + ;; + esac + old_archive_cmds="$old_archive_cmds~\$RANLIB \$oldlib" +fi + +for cc_temp in $compiler""; do + case $cc_temp in + compile | *[\\/]compile | ccache | *[\\/]ccache ) ;; + distcc | *[\\/]distcc | purify | *[\\/]purify ) ;; + \-*) ;; + *) break;; + esac +done +cc_basename=`$echo "X$cc_temp" | $Xsed -e 's%.*/%%' -e "s%^$host_alias-%%"` + + +# Only perform the check for file, if the check method requires it +case $deplibs_check_method in +file_magic*) + if test "$file_magic_cmd" = '$MAGIC_CMD'; then + echo "$as_me:$LINENO: checking for ${ac_tool_prefix}file" >&5 +echo $ECHO_N "checking for ${ac_tool_prefix}file... $ECHO_C" >&6 +if test "${lt_cv_path_MAGIC_CMD+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + case $MAGIC_CMD in +[\\/*] | ?:[\\/]*) + lt_cv_path_MAGIC_CMD="$MAGIC_CMD" # Let the user override the test with a path. + ;; +*) + lt_save_MAGIC_CMD="$MAGIC_CMD" + lt_save_ifs="$IFS"; IFS=$PATH_SEPARATOR + ac_dummy="/usr/bin$PATH_SEPARATOR$PATH" + for ac_dir in $ac_dummy; do + IFS="$lt_save_ifs" + test -z "$ac_dir" && ac_dir=. + if test -f $ac_dir/${ac_tool_prefix}file; then + lt_cv_path_MAGIC_CMD="$ac_dir/${ac_tool_prefix}file" + if test -n "$file_magic_test_file"; then + case $deplibs_check_method in + "file_magic "*) + file_magic_regex=`expr "$deplibs_check_method" : "file_magic \(.*\)"` + MAGIC_CMD="$lt_cv_path_MAGIC_CMD" + if eval $file_magic_cmd \$file_magic_test_file 2> /dev/null | + $EGREP "$file_magic_regex" > /dev/null; then + : + else + cat <&2 + +*** Warning: the command libtool uses to detect shared libraries, +*** $file_magic_cmd, produces output that libtool cannot recognize. +*** The result is that libtool may fail to recognize shared libraries +*** as such. This will affect the creation of libtool libraries that +*** depend on shared libraries, but programs linked with such libtool +*** libraries will work regardless of this problem. Nevertheless, you +*** may want to report the problem to your system manager and/or to +*** bug-libtool@gnu.org + +EOF + fi ;; + esac + fi + break + fi + done + IFS="$lt_save_ifs" + MAGIC_CMD="$lt_save_MAGIC_CMD" + ;; +esac +fi + +MAGIC_CMD="$lt_cv_path_MAGIC_CMD" +if test -n "$MAGIC_CMD"; then + echo "$as_me:$LINENO: result: $MAGIC_CMD" >&5 +echo "${ECHO_T}$MAGIC_CMD" >&6 +else + echo "$as_me:$LINENO: result: no" >&5 +echo "${ECHO_T}no" >&6 +fi + +if test -z "$lt_cv_path_MAGIC_CMD"; then + if test -n "$ac_tool_prefix"; then + echo "$as_me:$LINENO: checking for file" >&5 +echo $ECHO_N "checking for file... $ECHO_C" >&6 +if test "${lt_cv_path_MAGIC_CMD+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + case $MAGIC_CMD in +[\\/*] | ?:[\\/]*) + lt_cv_path_MAGIC_CMD="$MAGIC_CMD" # Let the user override the test with a path. + ;; +*) + lt_save_MAGIC_CMD="$MAGIC_CMD" + lt_save_ifs="$IFS"; IFS=$PATH_SEPARATOR + ac_dummy="/usr/bin$PATH_SEPARATOR$PATH" + for ac_dir in $ac_dummy; do + IFS="$lt_save_ifs" + test -z "$ac_dir" && ac_dir=. + if test -f $ac_dir/file; then + lt_cv_path_MAGIC_CMD="$ac_dir/file" + if test -n "$file_magic_test_file"; then + case $deplibs_check_method in + "file_magic "*) + file_magic_regex=`expr "$deplibs_check_method" : "file_magic \(.*\)"` + MAGIC_CMD="$lt_cv_path_MAGIC_CMD" + if eval $file_magic_cmd \$file_magic_test_file 2> /dev/null | + $EGREP "$file_magic_regex" > /dev/null; then + : + else + cat <&2 + +*** Warning: the command libtool uses to detect shared libraries, +*** $file_magic_cmd, produces output that libtool cannot recognize. +*** The result is that libtool may fail to recognize shared libraries +*** as such. This will affect the creation of libtool libraries that +*** depend on shared libraries, but programs linked with such libtool +*** libraries will work regardless of this problem. Nevertheless, you +*** may want to report the problem to your system manager and/or to +*** bug-libtool@gnu.org + +EOF + fi ;; + esac + fi + break + fi + done + IFS="$lt_save_ifs" + MAGIC_CMD="$lt_save_MAGIC_CMD" + ;; +esac +fi + +MAGIC_CMD="$lt_cv_path_MAGIC_CMD" +if test -n "$MAGIC_CMD"; then + echo "$as_me:$LINENO: result: $MAGIC_CMD" >&5 +echo "${ECHO_T}$MAGIC_CMD" >&6 +else + echo "$as_me:$LINENO: result: no" >&5 +echo "${ECHO_T}no" >&6 +fi + + else + MAGIC_CMD=: + fi +fi + + fi + ;; +esac + +enable_dlopen=no +enable_win32_dll=no + +# Check whether --enable-libtool-lock or --disable-libtool-lock was given. +if test "${enable_libtool_lock+set}" = set; then + enableval="$enable_libtool_lock" + +fi; +test "x$enable_libtool_lock" != xno && enable_libtool_lock=yes + + +# Check whether --with-pic or --without-pic was given. +if test "${with_pic+set}" = set; then + withval="$with_pic" + pic_mode="$withval" +else + pic_mode=default +fi; +test -z "$pic_mode" && pic_mode=default + +# Use C for the default configuration in the libtool script +tagname= +lt_save_CC="$CC" +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu + + +# Source file extension for C test sources. +ac_ext=c + +# Object file extension for compiled C test sources. +objext=o +objext=$objext + +# Code to be used in simple compile tests +lt_simple_compile_test_code="int some_variable = 0;\n" + +# Code to be used in simple link tests +lt_simple_link_test_code='int main(){return(0);}\n' + + +# If no C compiler was specified, use CC. +LTCC=${LTCC-"$CC"} + +# If no C compiler flags were specified, use CFLAGS. +LTCFLAGS=${LTCFLAGS-"$CFLAGS"} + +# Allow CC to be a program name with arguments. +compiler=$CC + + +# save warnings/boilerplate of simple test code +ac_outfile=conftest.$ac_objext +printf "$lt_simple_compile_test_code" >conftest.$ac_ext +eval "$ac_compile" 2>&1 >/dev/null | $SED '/^$/d; /^ *+/d' >conftest.err +_lt_compiler_boilerplate=`cat conftest.err` +$rm conftest* + +ac_outfile=conftest.$ac_objext +printf "$lt_simple_link_test_code" >conftest.$ac_ext +eval "$ac_link" 2>&1 >/dev/null | $SED '/^$/d; /^ *+/d' >conftest.err +_lt_linker_boilerplate=`cat conftest.err` +$rm conftest* + + + +lt_prog_compiler_no_builtin_flag= + +if test "$GCC" = yes; then + lt_prog_compiler_no_builtin_flag=' -fno-builtin' + + +echo "$as_me:$LINENO: checking if $compiler supports -fno-rtti -fno-exceptions" >&5 +echo $ECHO_N "checking if $compiler supports -fno-rtti -fno-exceptions... $ECHO_C" >&6 +if test "${lt_cv_prog_compiler_rtti_exceptions+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + lt_cv_prog_compiler_rtti_exceptions=no + ac_outfile=conftest.$ac_objext + printf "$lt_simple_compile_test_code" > conftest.$ac_ext + lt_compiler_flag="-fno-rtti -fno-exceptions" + # Insert the option either (1) after the last *FLAGS variable, or + # (2) before a word containing "conftest.", or (3) at the end. + # Note that $ac_compile itself does not contain backslashes and begins + # with a dollar sign (not a hyphen), so the echo should work correctly. + # The option is referenced via a variable to avoid confusing sed. + lt_compile=`echo "$ac_compile" | $SED \ + -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ + -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ + -e 's:$: $lt_compiler_flag:'` + (eval echo "\"\$as_me:6329: $lt_compile\"" >&5) + (eval "$lt_compile" 2>conftest.err) + ac_status=$? + cat conftest.err >&5 + echo "$as_me:6333: \$? = $ac_status" >&5 + if (exit $ac_status) && test -s "$ac_outfile"; then + # The compiler can only warn and ignore the option if not recognized + # So say no if there are warnings other than the usual output. + $echo "X$_lt_compiler_boilerplate" | $Xsed -e '/^$/d' >conftest.exp + $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2 + if test ! -s conftest.er2 || diff conftest.exp conftest.er2 >/dev/null; then + lt_cv_prog_compiler_rtti_exceptions=yes + fi + fi + $rm conftest* + +fi +echo "$as_me:$LINENO: result: $lt_cv_prog_compiler_rtti_exceptions" >&5 +echo "${ECHO_T}$lt_cv_prog_compiler_rtti_exceptions" >&6 + +if test x"$lt_cv_prog_compiler_rtti_exceptions" = xyes; then + lt_prog_compiler_no_builtin_flag="$lt_prog_compiler_no_builtin_flag -fno-rtti -fno-exceptions" +else + : +fi + +fi + +lt_prog_compiler_wl= +lt_prog_compiler_pic= +lt_prog_compiler_static= + +echo "$as_me:$LINENO: checking for $compiler option to produce PIC" >&5 +echo $ECHO_N "checking for $compiler option to produce PIC... $ECHO_C" >&6 + + if test "$GCC" = yes; then + lt_prog_compiler_wl='-Wl,' + lt_prog_compiler_static='-static' + + case $host_os in + aix*) + # All AIX code is PIC. + if test "$host_cpu" = ia64; then + # AIX 5 now supports IA64 processor + lt_prog_compiler_static='-Bstatic' + fi + ;; + + amigaos*) + # FIXME: we need at least 68020 code to build shared libraries, but + # adding the `-m68020' flag to GCC prevents building anything better, + # like `-m68040'. + lt_prog_compiler_pic='-m68020 -resident32 -malways-restore-a4' + ;; + + beos* | cygwin* | irix5* | irix6* | nonstopux* | osf3* | osf4* | osf5*) + # PIC is the default for these OSes. + ;; + + mingw* | pw32* | os2*) + # This hack is so that the source file can tell whether it is being + # built for inclusion in a dll (and should export symbols for example). + lt_prog_compiler_pic='-DDLL_EXPORT' + ;; + + darwin* | rhapsody*) + # PIC is the default on this platform + # Common symbols not allowed in MH_DYLIB files + lt_prog_compiler_pic='-fno-common' + ;; + + interix3*) + # Interix 3.x gcc -fpic/-fPIC options generate broken code. + # Instead, we relocate shared libraries at runtime. + ;; + + msdosdjgpp*) + # Just because we use GCC doesn't mean we suddenly get shared libraries + # on systems that don't support them. + lt_prog_compiler_can_build_shared=no + enable_shared=no + ;; + + sysv4*MP*) + if test -d /usr/nec; then + lt_prog_compiler_pic=-Kconform_pic + fi + ;; + + hpux*) + # PIC is the default for IA64 HP-UX and 64-bit HP-UX, but + # not for PA HP-UX. + case $host_cpu in + hppa*64*|ia64*) + # +Z the default + ;; + *) + lt_prog_compiler_pic='-fPIC' + ;; + esac + ;; + + *) + lt_prog_compiler_pic='-fPIC' + ;; + esac + else + # PORTME Check for flag to pass linker flags through the system compiler. + case $host_os in + aix*) + lt_prog_compiler_wl='-Wl,' + if test "$host_cpu" = ia64; then + # AIX 5 now supports IA64 processor + lt_prog_compiler_static='-Bstatic' + else + lt_prog_compiler_static='-bnso -bI:/lib/syscalls.exp' + fi + ;; + darwin*) + # PIC is the default on this platform + # Common symbols not allowed in MH_DYLIB files + case $cc_basename in + xlc*) + lt_prog_compiler_pic='-qnocommon' + lt_prog_compiler_wl='-Wl,' + ;; + esac + ;; + + mingw* | pw32* | os2*) + # This hack is so that the source file can tell whether it is being + # built for inclusion in a dll (and should export symbols for example). + lt_prog_compiler_pic='-DDLL_EXPORT' + ;; + + hpux9* | hpux10* | hpux11*) + lt_prog_compiler_wl='-Wl,' + # PIC is the default for IA64 HP-UX and 64-bit HP-UX, but + # not for PA HP-UX. + case $host_cpu in + hppa*64*|ia64*) + # +Z the default + ;; + *) + lt_prog_compiler_pic='+Z' + ;; + esac + # Is there a better lt_prog_compiler_static that works with the bundled CC? + lt_prog_compiler_static='${wl}-a ${wl}archive' + ;; + + irix5* | irix6* | nonstopux*) + lt_prog_compiler_wl='-Wl,' + # PIC (with -KPIC) is the default. + lt_prog_compiler_static='-non_shared' + ;; + + newsos6) + lt_prog_compiler_pic='-KPIC' + lt_prog_compiler_static='-Bstatic' + ;; + + linux*) + case $cc_basename in + icc* | ecc*) + lt_prog_compiler_wl='-Wl,' + lt_prog_compiler_pic='-KPIC' + lt_prog_compiler_static='-static' + ;; + pgcc* | pgf77* | pgf90* | pgf95*) + # Portland Group compilers (*not* the Pentium gcc compiler, + # which looks to be a dead project) + lt_prog_compiler_wl='-Wl,' + lt_prog_compiler_pic='-fpic' + lt_prog_compiler_static='-Bstatic' + ;; + ccc*) + lt_prog_compiler_wl='-Wl,' + # All Alpha code is PIC. + lt_prog_compiler_static='-non_shared' + ;; + esac + ;; + + osf3* | osf4* | osf5*) + lt_prog_compiler_wl='-Wl,' + # All OSF/1 code is PIC. + lt_prog_compiler_static='-non_shared' + ;; + + solaris*) + lt_prog_compiler_pic='-KPIC' + lt_prog_compiler_static='-Bstatic' + case $cc_basename in + f77* | f90* | f95*) + lt_prog_compiler_wl='-Qoption ld ';; + *) + lt_prog_compiler_wl='-Wl,';; + esac + ;; + + sunos4*) + lt_prog_compiler_wl='-Qoption ld ' + lt_prog_compiler_pic='-PIC' + lt_prog_compiler_static='-Bstatic' + ;; + + sysv4 | sysv4.2uw2* | sysv4.3*) + lt_prog_compiler_wl='-Wl,' + lt_prog_compiler_pic='-KPIC' + lt_prog_compiler_static='-Bstatic' + ;; + + sysv4*MP*) + if test -d /usr/nec ;then + lt_prog_compiler_pic='-Kconform_pic' + lt_prog_compiler_static='-Bstatic' + fi + ;; + + sysv5* | unixware* | sco3.2v5* | sco5v6* | OpenUNIX*) + lt_prog_compiler_wl='-Wl,' + lt_prog_compiler_pic='-KPIC' + lt_prog_compiler_static='-Bstatic' + ;; + + unicos*) + lt_prog_compiler_wl='-Wl,' + lt_prog_compiler_can_build_shared=no + ;; + + uts4*) + lt_prog_compiler_pic='-pic' + lt_prog_compiler_static='-Bstatic' + ;; + + *) + lt_prog_compiler_can_build_shared=no + ;; + esac + fi + +echo "$as_me:$LINENO: result: $lt_prog_compiler_pic" >&5 +echo "${ECHO_T}$lt_prog_compiler_pic" >&6 + +# +# Check to make sure the PIC flag actually works. +# +if test -n "$lt_prog_compiler_pic"; then + +echo "$as_me:$LINENO: checking if $compiler PIC flag $lt_prog_compiler_pic works" >&5 +echo $ECHO_N "checking if $compiler PIC flag $lt_prog_compiler_pic works... $ECHO_C" >&6 +if test "${lt_prog_compiler_pic_works+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + lt_prog_compiler_pic_works=no + ac_outfile=conftest.$ac_objext + printf "$lt_simple_compile_test_code" > conftest.$ac_ext + lt_compiler_flag="$lt_prog_compiler_pic -DPIC" + # Insert the option either (1) after the last *FLAGS variable, or + # (2) before a word containing "conftest.", or (3) at the end. + # Note that $ac_compile itself does not contain backslashes and begins + # with a dollar sign (not a hyphen), so the echo should work correctly. + # The option is referenced via a variable to avoid confusing sed. + lt_compile=`echo "$ac_compile" | $SED \ + -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ + -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ + -e 's:$: $lt_compiler_flag:'` + (eval echo "\"\$as_me:6597: $lt_compile\"" >&5) + (eval "$lt_compile" 2>conftest.err) + ac_status=$? + cat conftest.err >&5 + echo "$as_me:6601: \$? = $ac_status" >&5 + if (exit $ac_status) && test -s "$ac_outfile"; then + # The compiler can only warn and ignore the option if not recognized + # So say no if there are warnings other than the usual output. + $echo "X$_lt_compiler_boilerplate" | $Xsed -e '/^$/d' >conftest.exp + $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2 + if test ! -s conftest.er2 || diff conftest.exp conftest.er2 >/dev/null; then + lt_prog_compiler_pic_works=yes + fi + fi + $rm conftest* + +fi +echo "$as_me:$LINENO: result: $lt_prog_compiler_pic_works" >&5 +echo "${ECHO_T}$lt_prog_compiler_pic_works" >&6 + +if test x"$lt_prog_compiler_pic_works" = xyes; then + case $lt_prog_compiler_pic in + "" | " "*) ;; + *) lt_prog_compiler_pic=" $lt_prog_compiler_pic" ;; + esac +else + lt_prog_compiler_pic= + lt_prog_compiler_can_build_shared=no +fi + +fi +case $host_os in + # For platforms which do not support PIC, -DPIC is meaningless: + *djgpp*) + lt_prog_compiler_pic= + ;; + *) + lt_prog_compiler_pic="$lt_prog_compiler_pic -DPIC" + ;; +esac + +# +# Check to make sure the static flag actually works. +# +wl=$lt_prog_compiler_wl eval lt_tmp_static_flag=\"$lt_prog_compiler_static\" +echo "$as_me:$LINENO: checking if $compiler static flag $lt_tmp_static_flag works" >&5 +echo $ECHO_N "checking if $compiler static flag $lt_tmp_static_flag works... $ECHO_C" >&6 +if test "${lt_prog_compiler_static_works+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + lt_prog_compiler_static_works=no + save_LDFLAGS="$LDFLAGS" + LDFLAGS="$LDFLAGS $lt_tmp_static_flag" + printf "$lt_simple_link_test_code" > conftest.$ac_ext + if (eval $ac_link 2>conftest.err) && test -s conftest$ac_exeext; then + # The linker can only warn and ignore the option if not recognized + # So say no if there are warnings + if test -s conftest.err; then + # Append any errors to the config.log. + cat conftest.err 1>&5 + $echo "X$_lt_linker_boilerplate" | $Xsed -e '/^$/d' > conftest.exp + $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2 + if diff conftest.exp conftest.er2 >/dev/null; then + lt_prog_compiler_static_works=yes + fi + else + lt_prog_compiler_static_works=yes + fi + fi + $rm conftest* + LDFLAGS="$save_LDFLAGS" + +fi +echo "$as_me:$LINENO: result: $lt_prog_compiler_static_works" >&5 +echo "${ECHO_T}$lt_prog_compiler_static_works" >&6 + +if test x"$lt_prog_compiler_static_works" = xyes; then + : +else + lt_prog_compiler_static= +fi + + +echo "$as_me:$LINENO: checking if $compiler supports -c -o file.$ac_objext" >&5 +echo $ECHO_N "checking if $compiler supports -c -o file.$ac_objext... $ECHO_C" >&6 +if test "${lt_cv_prog_compiler_c_o+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + lt_cv_prog_compiler_c_o=no + $rm -r conftest 2>/dev/null + mkdir conftest + cd conftest + mkdir out + printf "$lt_simple_compile_test_code" > conftest.$ac_ext + + lt_compiler_flag="-o out/conftest2.$ac_objext" + # Insert the option either (1) after the last *FLAGS variable, or + # (2) before a word containing "conftest.", or (3) at the end. + # Note that $ac_compile itself does not contain backslashes and begins + # with a dollar sign (not a hyphen), so the echo should work correctly. + lt_compile=`echo "$ac_compile" | $SED \ + -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ + -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ + -e 's:$: $lt_compiler_flag:'` + (eval echo "\"\$as_me:6701: $lt_compile\"" >&5) + (eval "$lt_compile" 2>out/conftest.err) + ac_status=$? + cat out/conftest.err >&5 + echo "$as_me:6705: \$? = $ac_status" >&5 + if (exit $ac_status) && test -s out/conftest2.$ac_objext + then + # The compiler can only warn and ignore the option if not recognized + # So say no if there are warnings + $echo "X$_lt_compiler_boilerplate" | $Xsed -e '/^$/d' > out/conftest.exp + $SED '/^$/d; /^ *+/d' out/conftest.err >out/conftest.er2 + if test ! -s out/conftest.er2 || diff out/conftest.exp out/conftest.er2 >/dev/null; then + lt_cv_prog_compiler_c_o=yes + fi + fi + chmod u+w . 2>&5 + $rm conftest* + # SGI C++ compiler will create directory out/ii_files/ for + # template instantiation + test -d out/ii_files && $rm out/ii_files/* && rmdir out/ii_files + $rm out/* && rmdir out + cd .. + rmdir conftest + $rm conftest* + +fi +echo "$as_me:$LINENO: result: $lt_cv_prog_compiler_c_o" >&5 +echo "${ECHO_T}$lt_cv_prog_compiler_c_o" >&6 + + +hard_links="nottested" +if test "$lt_cv_prog_compiler_c_o" = no && test "$need_locks" != no; then + # do not overwrite the value of need_locks provided by the user + echo "$as_me:$LINENO: checking if we can lock with hard links" >&5 +echo $ECHO_N "checking if we can lock with hard links... $ECHO_C" >&6 + hard_links=yes + $rm conftest* + ln conftest.a conftest.b 2>/dev/null && hard_links=no + touch conftest.a + ln conftest.a conftest.b 2>&5 || hard_links=no + ln conftest.a conftest.b 2>/dev/null && hard_links=no + echo "$as_me:$LINENO: result: $hard_links" >&5 +echo "${ECHO_T}$hard_links" >&6 + if test "$hard_links" = no; then + { echo "$as_me:$LINENO: WARNING: \`$CC' does not support \`-c -o', so \`make -j' may be unsafe" >&5 +echo "$as_me: WARNING: \`$CC' does not support \`-c -o', so \`make -j' may be unsafe" >&2;} + need_locks=warn + fi +else + need_locks=no +fi + +echo "$as_me:$LINENO: checking whether the $compiler linker ($LD) supports shared libraries" >&5 +echo $ECHO_N "checking whether the $compiler linker ($LD) supports shared libraries... $ECHO_C" >&6 + + runpath_var= + allow_undefined_flag= + enable_shared_with_static_runtimes=no + archive_cmds= + archive_expsym_cmds= + old_archive_From_new_cmds= + old_archive_from_expsyms_cmds= + export_dynamic_flag_spec= + whole_archive_flag_spec= + thread_safe_flag_spec= + hardcode_libdir_flag_spec= + hardcode_libdir_flag_spec_ld= + hardcode_libdir_separator= + hardcode_direct=no + hardcode_minus_L=no + hardcode_shlibpath_var=unsupported + link_all_deplibs=unknown + hardcode_automatic=no + module_cmds= + module_expsym_cmds= + always_export_symbols=no + export_symbols_cmds='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols' + # include_expsyms should be a list of space-separated symbols to be *always* + # included in the symbol list + include_expsyms= + # exclude_expsyms can be an extended regexp of symbols to exclude + # it will be wrapped by ` (' and `)$', so one must not match beginning or + # end of line. Example: `a|bc|.*d.*' will exclude the symbols `a' and `bc', + # as well as any symbol that contains `d'. + exclude_expsyms="_GLOBAL_OFFSET_TABLE_" + # Although _GLOBAL_OFFSET_TABLE_ is a valid symbol C name, most a.out + # platforms (ab)use it in PIC code, but their linkers get confused if + # the symbol is explicitly referenced. Since portable code cannot + # rely on this symbol name, it's probably fine to never include it in + # preloaded symbol tables. + extract_expsyms_cmds= + # Just being paranoid about ensuring that cc_basename is set. + for cc_temp in $compiler""; do + case $cc_temp in + compile | *[\\/]compile | ccache | *[\\/]ccache ) ;; + distcc | *[\\/]distcc | purify | *[\\/]purify ) ;; + \-*) ;; + *) break;; + esac +done +cc_basename=`$echo "X$cc_temp" | $Xsed -e 's%.*/%%' -e "s%^$host_alias-%%"` + + case $host_os in + cygwin* | mingw* | pw32*) + # FIXME: the MSVC++ port hasn't been tested in a loooong time + # When not using gcc, we currently assume that we are using + # Microsoft Visual C++. + if test "$GCC" != yes; then + with_gnu_ld=no + fi + ;; + interix*) + # we just hope/assume this is gcc and not c89 (= MSVC++) + with_gnu_ld=yes + ;; + openbsd*) + with_gnu_ld=no + ;; + esac + + ld_shlibs=yes + if test "$with_gnu_ld" = yes; then + # If archive_cmds runs LD, not CC, wlarc should be empty + wlarc='${wl}' + + # Set some defaults for GNU ld with shared library support. These + # are reset later if shared libraries are not supported. Putting them + # here allows them to be overridden if necessary. + runpath_var=LD_RUN_PATH + hardcode_libdir_flag_spec='${wl}--rpath ${wl}$libdir' + export_dynamic_flag_spec='${wl}--export-dynamic' + # ancient GNU ld didn't support --whole-archive et. al. + if $LD --help 2>&1 | grep 'no-whole-archive' > /dev/null; then + whole_archive_flag_spec="$wlarc"'--whole-archive$convenience '"$wlarc"'--no-whole-archive' + else + whole_archive_flag_spec= + fi + supports_anon_versioning=no + case `$LD -v 2>/dev/null` in + *\ [01].* | *\ 2.[0-9].* | *\ 2.10.*) ;; # catch versions < 2.11 + *\ 2.11.93.0.2\ *) supports_anon_versioning=yes ;; # RH7.3 ... + *\ 2.11.92.0.12\ *) supports_anon_versioning=yes ;; # Mandrake 8.2 ... + *\ 2.11.*) ;; # other 2.11 versions + *) supports_anon_versioning=yes ;; + esac + + # See if GNU ld supports shared libraries. + case $host_os in + aix3* | aix4* | aix5*) + # On AIX/PPC, the GNU linker is very broken + if test "$host_cpu" != ia64; then + ld_shlibs=no + cat <&2 + +*** Warning: the GNU linker, at least up to release 2.9.1, is reported +*** to be unable to reliably create shared libraries on AIX. +*** Therefore, libtool is disabling shared libraries support. If you +*** really care for shared libraries, you may want to modify your PATH +*** so that a non-GNU linker is found, and then restart. + +EOF + fi + ;; + + amigaos*) + archive_cmds='$rm $output_objdir/a2ixlibrary.data~$echo "#define NAME $libname" > $output_objdir/a2ixlibrary.data~$echo "#define LIBRARY_ID 1" >> $output_objdir/a2ixlibrary.data~$echo "#define VERSION $major" >> $output_objdir/a2ixlibrary.data~$echo "#define REVISION $revision" >> $output_objdir/a2ixlibrary.data~$AR $AR_FLAGS $lib $libobjs~$RANLIB $lib~(cd $output_objdir && a2ixlibrary -32)' + hardcode_libdir_flag_spec='-L$libdir' + hardcode_minus_L=yes + + # Samuel A. Falvo II reports + # that the semantics of dynamic libraries on AmigaOS, at least up + # to version 4, is to share data among multiple programs linked + # with the same dynamic library. Since this doesn't match the + # behavior of shared libraries on other platforms, we can't use + # them. + ld_shlibs=no + ;; + + beos*) + if $LD --help 2>&1 | grep ': supported targets:.* elf' > /dev/null; then + allow_undefined_flag=unsupported + # Joseph Beckenbach says some releases of gcc + # support --undefined. This deserves some investigation. FIXME + archive_cmds='$CC -nostart $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + else + ld_shlibs=no + fi + ;; + + cygwin* | mingw* | pw32*) + # _LT_AC_TAGVAR(hardcode_libdir_flag_spec, ) is actually meaningless, + # as there is no search path for DLLs. + hardcode_libdir_flag_spec='-L$libdir' + allow_undefined_flag=unsupported + always_export_symbols=no + enable_shared_with_static_runtimes=yes + export_symbols_cmds='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[BCDGRS] /s/.* \([^ ]*\)/\1 DATA/'\'' | $SED -e '\''/^[AITW] /s/.* //'\'' | sort | uniq > $export_symbols' + + if $LD --help 2>&1 | grep 'auto-import' > /dev/null; then + archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags -o $output_objdir/$soname ${wl}--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' + # If the export-symbols file already is a .def file (1st line + # is EXPORTS), use it as is; otherwise, prepend... + archive_expsym_cmds='if test "x`$SED 1q $export_symbols`" = xEXPORTS; then + cp $export_symbols $output_objdir/$soname.def; + else + echo EXPORTS > $output_objdir/$soname.def; + cat $export_symbols >> $output_objdir/$soname.def; + fi~ + $CC -shared $output_objdir/$soname.def $libobjs $deplibs $compiler_flags -o $output_objdir/$soname ${wl}--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' + else + ld_shlibs=no + fi + ;; + + interix3*) + hardcode_direct=no + hardcode_shlibpath_var=no + hardcode_libdir_flag_spec='${wl}-rpath,$libdir' + export_dynamic_flag_spec='${wl}-E' + # Hack: On Interix 3.x, we cannot compile PIC because of a broken gcc. + # Instead, shared libraries are loaded at an image base (0x10000000 by + # default) and relocated if they conflict, which is a slow very memory + # consuming and fragmenting process. To avoid this, we pick a random, + # 256 KiB-aligned image base between 0x50000000 and 0x6FFC0000 at link + # time. Moving up from 0x10000000 also allows more sbrk(2) space. + archive_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-h,$soname ${wl}--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib' + archive_expsym_cmds='sed "s,^,_," $export_symbols >$output_objdir/$soname.expsym~$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-h,$soname ${wl}--retain-symbols-file,$output_objdir/$soname.expsym ${wl}--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib' + ;; + + linux*) + if $LD --help 2>&1 | grep ': supported targets:.* elf' > /dev/null; then + tmp_addflag= + case $cc_basename,$host_cpu in + pgcc*) # Portland Group C compiler + whole_archive_flag_spec='${wl}--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; $echo \"$new_convenience\"` ${wl}--no-whole-archive' + tmp_addflag=' $pic_flag' + ;; + pgf77* | pgf90* | pgf95*) # Portland Group f77 and f90 compilers + whole_archive_flag_spec='${wl}--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; $echo \"$new_convenience\"` ${wl}--no-whole-archive' + tmp_addflag=' $pic_flag -Mnomain' ;; + ecc*,ia64* | icc*,ia64*) # Intel C compiler on ia64 + tmp_addflag=' -i_dynamic' ;; + efc*,ia64* | ifort*,ia64*) # Intel Fortran compiler on ia64 + tmp_addflag=' -i_dynamic -nofor_main' ;; + ifc* | ifort*) # Intel Fortran compiler + tmp_addflag=' -nofor_main' ;; + esac + archive_cmds='$CC -shared'"$tmp_addflag"' $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + + if test $supports_anon_versioning = yes; then + archive_expsym_cmds='$echo "{ global:" > $output_objdir/$libname.ver~ + cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~ + $echo "local: *; };" >> $output_objdir/$libname.ver~ + $CC -shared'"$tmp_addflag"' $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-version-script ${wl}$output_objdir/$libname.ver -o $lib' + fi + else + ld_shlibs=no + fi + ;; + + netbsd*) + if echo __ELF__ | $CC -E - | grep __ELF__ >/dev/null; then + archive_cmds='$LD -Bshareable $libobjs $deplibs $linker_flags -o $lib' + wlarc= + else + archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' + fi + ;; + + solaris*) + if $LD -v 2>&1 | grep 'BFD 2\.8' > /dev/null; then + ld_shlibs=no + cat <&2 + +*** Warning: The releases 2.8.* of the GNU linker cannot reliably +*** create shared libraries on Solaris systems. Therefore, libtool +*** is disabling shared libraries support. We urge you to upgrade GNU +*** binutils to release 2.9.1 or newer. Another option is to modify +*** your PATH or compiler configuration so that the native linker is +*** used, and then restart. + +EOF + elif $LD --help 2>&1 | grep ': supported targets:.* elf' > /dev/null; then + archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' + else + ld_shlibs=no + fi + ;; + + sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX*) + case `$LD -v 2>&1` in + *\ [01].* | *\ 2.[0-9].* | *\ 2.1[0-5].*) + ld_shlibs=no + cat <<_LT_EOF 1>&2 + +*** Warning: Releases of the GNU linker prior to 2.16.91.0.3 can not +*** reliably create shared libraries on SCO systems. Therefore, libtool +*** is disabling shared libraries support. We urge you to upgrade GNU +*** binutils to release 2.16.91.0.3 or newer. Another option is to modify +*** your PATH or compiler configuration so that the native linker is +*** used, and then restart. + +_LT_EOF + ;; + *) + if $LD --help 2>&1 | grep ': supported targets:.* elf' > /dev/null; then + hardcode_libdir_flag_spec='`test -z "$SCOABSPATH" && echo ${wl}-rpath,$libdir`' + archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname,\${SCOABSPATH:+${install_libdir}/}$soname -o $lib' + archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname,\${SCOABSPATH:+${install_libdir}/}$soname,-retain-symbols-file,$export_symbols -o $lib' + else + ld_shlibs=no + fi + ;; + esac + ;; + + sunos4*) + archive_cmds='$LD -assert pure-text -Bshareable -o $lib $libobjs $deplibs $linker_flags' + wlarc= + hardcode_direct=yes + hardcode_shlibpath_var=no + ;; + + *) + if $LD --help 2>&1 | grep ': supported targets:.* elf' > /dev/null; then + archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' + else + ld_shlibs=no + fi + ;; + esac + + if test "$ld_shlibs" = no; then + runpath_var= + hardcode_libdir_flag_spec= + export_dynamic_flag_spec= + whole_archive_flag_spec= + fi + else + # PORTME fill in a description of your system's linker (not GNU ld) + case $host_os in + aix3*) + allow_undefined_flag=unsupported + always_export_symbols=yes + archive_expsym_cmds='$LD -o $output_objdir/$soname $libobjs $deplibs $linker_flags -bE:$export_symbols -T512 -H512 -bM:SRE~$AR $AR_FLAGS $lib $output_objdir/$soname' + # Note: this linker hardcodes the directories in LIBPATH if there + # are no directories specified by -L. + hardcode_minus_L=yes + if test "$GCC" = yes && test -z "$lt_prog_compiler_static"; then + # Neither direct hardcoding nor static linking is supported with a + # broken collect2. + hardcode_direct=unsupported + fi + ;; + + aix4* | aix5*) + if test "$host_cpu" = ia64; then + # On IA64, the linker does run time linking by default, so we don't + # have to do anything special. + aix_use_runtimelinking=no + exp_sym_flag='-Bexport' + no_entry_flag="" + else + # If we're using GNU nm, then we don't want the "-C" option. + # -C means demangle to AIX nm, but means don't demangle with GNU nm + if $NM -V 2>&1 | grep 'GNU' > /dev/null; then + export_symbols_cmds='$NM -Bpg $libobjs $convenience | awk '\''{ if (((\$2 == "T") || (\$2 == "D") || (\$2 == "B")) && (substr(\$3,1,1) != ".")) { print \$3 } }'\'' | sort -u > $export_symbols' + else + export_symbols_cmds='$NM -BCpg $libobjs $convenience | awk '\''{ if (((\$2 == "T") || (\$2 == "D") || (\$2 == "B")) && (substr(\$3,1,1) != ".")) { print \$3 } }'\'' | sort -u > $export_symbols' + fi + aix_use_runtimelinking=no + + # Test if we are trying to use run time linking or normal + # AIX style linking. If -brtl is somewhere in LDFLAGS, we + # need to do runtime linking. + case $host_os in aix4.[23]|aix4.[23].*|aix5*) + for ld_flag in $LDFLAGS; do + if (test $ld_flag = "-brtl" || test $ld_flag = "-Wl,-brtl"); then + aix_use_runtimelinking=yes + break + fi + done + ;; + esac + + exp_sym_flag='-bexport' + no_entry_flag='-bnoentry' + fi + + # When large executables or shared objects are built, AIX ld can + # have problems creating the table of contents. If linking a library + # or program results in "error TOC overflow" add -mminimal-toc to + # CXXFLAGS/CFLAGS for g++/gcc. In the cases where that is not + # enough to fix the problem, add -Wl,-bbigtoc to LDFLAGS. + + archive_cmds='' + hardcode_direct=yes + hardcode_libdir_separator=':' + link_all_deplibs=yes + + if test "$GCC" = yes; then + case $host_os in aix4.[012]|aix4.[012].*) + # We only want to do this on AIX 4.2 and lower, the check + # below for broken collect2 doesn't work under 4.3+ + collect2name=`${CC} -print-prog-name=collect2` + if test -f "$collect2name" && \ + strings "$collect2name" | grep resolve_lib_name >/dev/null + then + # We have reworked collect2 + hardcode_direct=yes + else + # We have old collect2 + hardcode_direct=unsupported + # It fails to find uninstalled libraries when the uninstalled + # path is not listed in the libpath. Setting hardcode_minus_L + # to unsupported forces relinking + hardcode_minus_L=yes + hardcode_libdir_flag_spec='-L$libdir' + hardcode_libdir_separator= + fi + ;; + esac + shared_flag='-shared' + if test "$aix_use_runtimelinking" = yes; then + shared_flag="$shared_flag "'${wl}-G' + fi + else + # not using gcc + if test "$host_cpu" = ia64; then + # VisualAge C++, Version 5.5 for AIX 5L for IA-64, Beta 3 Release + # chokes on -Wl,-G. The following line is correct: + shared_flag='-G' + else + if test "$aix_use_runtimelinking" = yes; then + shared_flag='${wl}-G' + else + shared_flag='${wl}-bM:SRE' + fi + fi + fi + + # It seems that -bexpall does not export symbols beginning with + # underscore (_), so it is better to generate a list of symbols to export. + always_export_symbols=yes + if test "$aix_use_runtimelinking" = yes; then + # Warning - without using the other runtime loading flags (-brtl), + # -berok will link without error, but may produce a broken library. + allow_undefined_flag='-berok' + # Determine the default libpath from the value encoded in an empty executable. + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext conftest$ac_exeext +if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 + (eval $ac_link) 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest$ac_exeext' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; }; then + +aix_libpath=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e '/Import File Strings/,/^$/ { /^0/ { s/^0 *\(.*\)$/\1/; p; } +}'` +# Check for a 64-bit object if we didn't find anything. +if test -z "$aix_libpath"; then aix_libpath=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e '/Import File Strings/,/^$/ { /^0/ { s/^0 *\(.*\)$/\1/; p; } +}'`; fi +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +fi +rm -f conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi + + hardcode_libdir_flag_spec='${wl}-blibpath:$libdir:'"$aix_libpath" + archive_expsym_cmds="\$CC"' -o $output_objdir/$soname $libobjs $deplibs '"\${wl}$no_entry_flag"' $compiler_flags `if test "x${allow_undefined_flag}" != "x"; then echo "${wl}${allow_undefined_flag}"; else :; fi` '"\${wl}$exp_sym_flag:\$export_symbols $shared_flag" + else + if test "$host_cpu" = ia64; then + hardcode_libdir_flag_spec='${wl}-R $libdir:/usr/lib:/lib' + allow_undefined_flag="-z nodefs" + archive_expsym_cmds="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs '"\${wl}$no_entry_flag"' $compiler_flags ${wl}${allow_undefined_flag} '"\${wl}$exp_sym_flag:\$export_symbols" + else + # Determine the default libpath from the value encoded in an empty executable. + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext conftest$ac_exeext +if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 + (eval $ac_link) 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest$ac_exeext' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; }; then + +aix_libpath=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e '/Import File Strings/,/^$/ { /^0/ { s/^0 *\(.*\)$/\1/; p; } +}'` +# Check for a 64-bit object if we didn't find anything. +if test -z "$aix_libpath"; then aix_libpath=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e '/Import File Strings/,/^$/ { /^0/ { s/^0 *\(.*\)$/\1/; p; } +}'`; fi +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +fi +rm -f conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi + + hardcode_libdir_flag_spec='${wl}-blibpath:$libdir:'"$aix_libpath" + # Warning - without using the other run time loading flags, + # -berok will link without error, but may produce a broken library. + no_undefined_flag=' ${wl}-bernotok' + allow_undefined_flag=' ${wl}-berok' + # Exported symbols can be pulled into shared objects from archives + whole_archive_flag_spec='$convenience' + archive_cmds_need_lc=yes + # This is similar to how AIX traditionally builds its shared libraries. + archive_expsym_cmds="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs ${wl}-bnoentry $compiler_flags ${wl}-bE:$export_symbols${allow_undefined_flag}~$AR $AR_FLAGS $output_objdir/$libname$release.a $output_objdir/$soname' + fi + fi + ;; + + amigaos*) + archive_cmds='$rm $output_objdir/a2ixlibrary.data~$echo "#define NAME $libname" > $output_objdir/a2ixlibrary.data~$echo "#define LIBRARY_ID 1" >> $output_objdir/a2ixlibrary.data~$echo "#define VERSION $major" >> $output_objdir/a2ixlibrary.data~$echo "#define REVISION $revision" >> $output_objdir/a2ixlibrary.data~$AR $AR_FLAGS $lib $libobjs~$RANLIB $lib~(cd $output_objdir && a2ixlibrary -32)' + hardcode_libdir_flag_spec='-L$libdir' + hardcode_minus_L=yes + # see comment about different semantics on the GNU ld section + ld_shlibs=no + ;; + + bsdi[45]*) + export_dynamic_flag_spec=-rdynamic + ;; + + cygwin* | mingw* | pw32*) + # When not using gcc, we currently assume that we are using + # Microsoft Visual C++. + # hardcode_libdir_flag_spec is actually meaningless, as there is + # no search path for DLLs. + hardcode_libdir_flag_spec=' ' + allow_undefined_flag=unsupported + # Tell ltmain to make .lib files, not .a files. + libext=lib + # Tell ltmain to make .dll files, not .so files. + shrext_cmds=".dll" + # FIXME: Setting linknames here is a bad hack. + archive_cmds='$CC -o $lib $libobjs $compiler_flags `echo "$deplibs" | $SED -e '\''s/ -lc$//'\''` -link -dll~linknames=' + # The linker will automatically build a .lib file if we build a DLL. + old_archive_From_new_cmds='true' + # FIXME: Should let the user specify the lib program. + old_archive_cmds='lib /OUT:$oldlib$oldobjs$old_deplibs' + fix_srcfile_path='`cygpath -w "$srcfile"`' + enable_shared_with_static_runtimes=yes + ;; + + darwin* | rhapsody*) + case $host_os in + rhapsody* | darwin1.[012]) + allow_undefined_flag='${wl}-undefined ${wl}suppress' + ;; + *) # Darwin 1.3 on + if test -z ${MACOSX_DEPLOYMENT_TARGET} ; then + allow_undefined_flag='${wl}-flat_namespace ${wl}-undefined ${wl}suppress' + else + case ${MACOSX_DEPLOYMENT_TARGET} in + 10.[012]) + allow_undefined_flag='${wl}-flat_namespace ${wl}-undefined ${wl}suppress' + ;; + 10.*) + allow_undefined_flag='${wl}-undefined ${wl}dynamic_lookup' + ;; + esac + fi + ;; + esac + archive_cmds_need_lc=no + hardcode_direct=no + hardcode_automatic=yes + hardcode_shlibpath_var=unsupported + whole_archive_flag_spec='' + link_all_deplibs=yes + if test "$GCC" = yes ; then + output_verbose_link_cmd='echo' + archive_cmds='$CC -dynamiclib $allow_undefined_flag -o $lib $libobjs $deplibs $compiler_flags -install_name $rpath/$soname $verstring' + module_cmds='$CC $allow_undefined_flag -o $lib -bundle $libobjs $deplibs$compiler_flags' + # Don't fix this by using the ld -exported_symbols_list flag, it doesn't exist in older darwin lds + archive_expsym_cmds='sed -e "s,#.*,," -e "s,^[ ]*,," -e "s,^\(..*\),_&," < $export_symbols > $output_objdir/${libname}-symbols.expsym~$CC -dynamiclib $allow_undefined_flag -o $lib $libobjs $deplibs $compiler_flags -install_name $rpath/$soname $verstring~nmedit -s $output_objdir/${libname}-symbols.expsym ${lib}' + module_expsym_cmds='sed -e "s,#.*,," -e "s,^[ ]*,," -e "s,^\(..*\),_&," < $export_symbols > $output_objdir/${libname}-symbols.expsym~$CC $allow_undefined_flag -o $lib -bundle $libobjs $deplibs$compiler_flags~nmedit -s $output_objdir/${libname}-symbols.expsym ${lib}' + else + case $cc_basename in + xlc*) + output_verbose_link_cmd='echo' + archive_cmds='$CC -qmkshrobj $allow_undefined_flag -o $lib $libobjs $deplibs $compiler_flags ${wl}-install_name ${wl}`echo $rpath/$soname` $verstring' + module_cmds='$CC $allow_undefined_flag -o $lib -bundle $libobjs $deplibs$compiler_flags' + # Don't fix this by using the ld -exported_symbols_list flag, it doesn't exist in older darwin lds + archive_expsym_cmds='sed -e "s,#.*,," -e "s,^[ ]*,," -e "s,^\(..*\),_&," < $export_symbols > $output_objdir/${libname}-symbols.expsym~$CC -qmkshrobj $allow_undefined_flag -o $lib $libobjs $deplibs $compiler_flags ${wl}-install_name ${wl}$rpath/$soname $verstring~nmedit -s $output_objdir/${libname}-symbols.expsym ${lib}' + module_expsym_cmds='sed -e "s,#.*,," -e "s,^[ ]*,," -e "s,^\(..*\),_&," < $export_symbols > $output_objdir/${libname}-symbols.expsym~$CC $allow_undefined_flag -o $lib -bundle $libobjs $deplibs$compiler_flags~nmedit -s $output_objdir/${libname}-symbols.expsym ${lib}' + ;; + *) + ld_shlibs=no + ;; + esac + fi + ;; + + dgux*) + archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + hardcode_libdir_flag_spec='-L$libdir' + hardcode_shlibpath_var=no + ;; + + freebsd1*) + ld_shlibs=no + ;; + + # FreeBSD 2.2.[012] allows us to include c++rt0.o to get C++ constructor + # support. Future versions do this automatically, but an explicit c++rt0.o + # does not break anything, and helps significantly (at the cost of a little + # extra space). + freebsd2.2*) + archive_cmds='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags /usr/lib/c++rt0.o' + hardcode_libdir_flag_spec='-R$libdir' + hardcode_direct=yes + hardcode_shlibpath_var=no + ;; + + # Unfortunately, older versions of FreeBSD 2 do not have this feature. + freebsd2*) + archive_cmds='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags' + hardcode_direct=yes + hardcode_minus_L=yes + hardcode_shlibpath_var=no + ;; + + # FreeBSD 3 and greater uses gcc -shared to do shared libraries. + freebsd* | kfreebsd*-gnu | dragonfly*) + archive_cmds='$CC -shared -o $lib $libobjs $deplibs $compiler_flags' + hardcode_libdir_flag_spec='-R$libdir' + hardcode_direct=yes + hardcode_shlibpath_var=no + ;; + + hpux9*) + if test "$GCC" = yes; then + archive_cmds='$rm $output_objdir/$soname~$CC -shared -fPIC ${wl}+b ${wl}$install_libdir -o $output_objdir/$soname $libobjs $deplibs $compiler_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' + else + archive_cmds='$rm $output_objdir/$soname~$LD -b +b $install_libdir -o $output_objdir/$soname $libobjs $deplibs $linker_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' + fi + hardcode_libdir_flag_spec='${wl}+b ${wl}$libdir' + hardcode_libdir_separator=: + hardcode_direct=yes + + # hardcode_minus_L: Not really in the search PATH, + # but as the default location of the library. + hardcode_minus_L=yes + export_dynamic_flag_spec='${wl}-E' + ;; + + hpux10*) + if test "$GCC" = yes -a "$with_gnu_ld" = no; then + archive_cmds='$CC -shared -fPIC ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags' + else + archive_cmds='$LD -b +h $soname +b $install_libdir -o $lib $libobjs $deplibs $linker_flags' + fi + if test "$with_gnu_ld" = no; then + hardcode_libdir_flag_spec='${wl}+b ${wl}$libdir' + hardcode_libdir_separator=: + + hardcode_direct=yes + export_dynamic_flag_spec='${wl}-E' + + # hardcode_minus_L: Not really in the search PATH, + # but as the default location of the library. + hardcode_minus_L=yes + fi + ;; + + hpux11*) + if test "$GCC" = yes -a "$with_gnu_ld" = no; then + case $host_cpu in + hppa*64*) + archive_cmds='$CC -shared ${wl}+h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags' + ;; + ia64*) + archive_cmds='$CC -shared ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $libobjs $deplibs $compiler_flags' + ;; + *) + archive_cmds='$CC -shared -fPIC ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags' + ;; + esac + else + case $host_cpu in + hppa*64*) + archive_cmds='$CC -b ${wl}+h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags' + ;; + ia64*) + archive_cmds='$CC -b ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $libobjs $deplibs $compiler_flags' + ;; + *) + archive_cmds='$CC -b ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags' + ;; + esac + fi + if test "$with_gnu_ld" = no; then + hardcode_libdir_flag_spec='${wl}+b ${wl}$libdir' + hardcode_libdir_separator=: + + case $host_cpu in + hppa*64*|ia64*) + hardcode_libdir_flag_spec_ld='+b $libdir' + hardcode_direct=no + hardcode_shlibpath_var=no + ;; + *) + hardcode_direct=yes + export_dynamic_flag_spec='${wl}-E' + + # hardcode_minus_L: Not really in the search PATH, + # but as the default location of the library. + hardcode_minus_L=yes + ;; + esac + fi + ;; + + irix5* | irix6* | nonstopux*) + if test "$GCC" = yes; then + archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && echo ${wl}-set_version ${wl}$verstring` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' + else + archive_cmds='$LD -shared $libobjs $deplibs $linker_flags -soname $soname `test -n "$verstring" && echo -set_version $verstring` -update_registry ${output_objdir}/so_locations -o $lib' + hardcode_libdir_flag_spec_ld='-rpath $libdir' + fi + hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir' + hardcode_libdir_separator=: + link_all_deplibs=yes + ;; + + netbsd*) + if echo __ELF__ | $CC -E - | grep __ELF__ >/dev/null; then + archive_cmds='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags' # a.out + else + archive_cmds='$LD -shared -o $lib $libobjs $deplibs $linker_flags' # ELF + fi + hardcode_libdir_flag_spec='-R$libdir' + hardcode_direct=yes + hardcode_shlibpath_var=no + ;; + + newsos6) + archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + hardcode_direct=yes + hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir' + hardcode_libdir_separator=: + hardcode_shlibpath_var=no + ;; + + openbsd*) + hardcode_direct=yes + hardcode_shlibpath_var=no + if test -z "`echo __ELF__ | $CC -E - | grep __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then + archive_cmds='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags' + archive_expsym_cmds='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags ${wl}-retain-symbols-file,$export_symbols' + hardcode_libdir_flag_spec='${wl}-rpath,$libdir' + export_dynamic_flag_spec='${wl}-E' + else + case $host_os in + openbsd[01].* | openbsd2.[0-7] | openbsd2.[0-7].*) + archive_cmds='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags' + hardcode_libdir_flag_spec='-R$libdir' + ;; + *) + archive_cmds='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags' + hardcode_libdir_flag_spec='${wl}-rpath,$libdir' + ;; + esac + fi + ;; + + os2*) + hardcode_libdir_flag_spec='-L$libdir' + hardcode_minus_L=yes + allow_undefined_flag=unsupported + archive_cmds='$echo "LIBRARY $libname INITINSTANCE" > $output_objdir/$libname.def~$echo "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~$echo DATA >> $output_objdir/$libname.def~$echo " SINGLE NONSHARED" >> $output_objdir/$libname.def~$echo EXPORTS >> $output_objdir/$libname.def~emxexp $libobjs >> $output_objdir/$libname.def~$CC -Zdll -Zcrtdll -o $lib $libobjs $deplibs $compiler_flags $output_objdir/$libname.def' + old_archive_From_new_cmds='emximp -o $output_objdir/$libname.a $output_objdir/$libname.def' + ;; + + osf3*) + if test "$GCC" = yes; then + allow_undefined_flag=' ${wl}-expect_unresolved ${wl}\*' + archive_cmds='$CC -shared${allow_undefined_flag} $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && echo ${wl}-set_version ${wl}$verstring` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' + else + allow_undefined_flag=' -expect_unresolved \*' + archive_cmds='$LD -shared${allow_undefined_flag} $libobjs $deplibs $linker_flags -soname $soname `test -n "$verstring" && echo -set_version $verstring` -update_registry ${output_objdir}/so_locations -o $lib' + fi + hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir' + hardcode_libdir_separator=: + ;; + + osf4* | osf5*) # as osf3* with the addition of -msym flag + if test "$GCC" = yes; then + allow_undefined_flag=' ${wl}-expect_unresolved ${wl}\*' + archive_cmds='$CC -shared${allow_undefined_flag} $libobjs $deplibs $compiler_flags ${wl}-msym ${wl}-soname ${wl}$soname `test -n "$verstring" && echo ${wl}-set_version ${wl}$verstring` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' + hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir' + else + allow_undefined_flag=' -expect_unresolved \*' + archive_cmds='$LD -shared${allow_undefined_flag} $libobjs $deplibs $linker_flags -msym -soname $soname `test -n "$verstring" && echo -set_version $verstring` -update_registry ${output_objdir}/so_locations -o $lib' + archive_expsym_cmds='for i in `cat $export_symbols`; do printf "%s %s\\n" -exported_symbol "\$i" >> $lib.exp; done; echo "-hidden">> $lib.exp~ + $LD -shared${allow_undefined_flag} -input $lib.exp $linker_flags $libobjs $deplibs -soname $soname `test -n "$verstring" && echo -set_version $verstring` -update_registry ${output_objdir}/so_locations -o $lib~$rm $lib.exp' + + # Both c and cxx compiler support -rpath directly + hardcode_libdir_flag_spec='-rpath $libdir' + fi + hardcode_libdir_separator=: + ;; + + solaris*) + no_undefined_flag=' -z text' + if test "$GCC" = yes; then + wlarc='${wl}' + archive_cmds='$CC -shared ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags' + archive_expsym_cmds='$echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~$echo "local: *; };" >> $lib.exp~ + $CC -shared ${wl}-M ${wl}$lib.exp ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags~$rm $lib.exp' + else + wlarc='' + archive_cmds='$LD -G${allow_undefined_flag} -h $soname -o $lib $libobjs $deplibs $linker_flags' + archive_expsym_cmds='$echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~$echo "local: *; };" >> $lib.exp~ + $LD -G${allow_undefined_flag} -M $lib.exp -h $soname -o $lib $libobjs $deplibs $linker_flags~$rm $lib.exp' + fi + hardcode_libdir_flag_spec='-R$libdir' + hardcode_shlibpath_var=no + case $host_os in + solaris2.[0-5] | solaris2.[0-5].*) ;; + *) + # The compiler driver will combine linker options so we + # cannot just pass the convience library names through + # without $wl, iff we do not link with $LD. + # Luckily, gcc supports the same syntax we need for Sun Studio. + # Supported since Solaris 2.6 (maybe 2.5.1?) + case $wlarc in + '') + whole_archive_flag_spec='-z allextract$convenience -z defaultextract' ;; + *) + whole_archive_flag_spec='${wl}-z ${wl}allextract`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; $echo \"$new_convenience\"` ${wl}-z ${wl}defaultextract' ;; + esac ;; + esac + link_all_deplibs=yes + ;; + + sunos4*) + if test "x$host_vendor" = xsequent; then + # Use $CC to link under sequent, because it throws in some extra .o + # files that make .init and .fini sections work. + archive_cmds='$CC -G ${wl}-h $soname -o $lib $libobjs $deplibs $compiler_flags' + else + archive_cmds='$LD -assert pure-text -Bstatic -o $lib $libobjs $deplibs $linker_flags' + fi + hardcode_libdir_flag_spec='-L$libdir' + hardcode_direct=yes + hardcode_minus_L=yes + hardcode_shlibpath_var=no + ;; + + sysv4) + case $host_vendor in + sni) + archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + hardcode_direct=yes # is this really true??? + ;; + siemens) + ## LD is ld it makes a PLAMLIB + ## CC just makes a GrossModule. + archive_cmds='$LD -G -o $lib $libobjs $deplibs $linker_flags' + reload_cmds='$CC -r -o $output$reload_objs' + hardcode_direct=no + ;; + motorola) + archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + hardcode_direct=no #Motorola manual says yes, but my tests say they lie + ;; + esac + runpath_var='LD_RUN_PATH' + hardcode_shlibpath_var=no + ;; + + sysv4.3*) + archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + hardcode_shlibpath_var=no + export_dynamic_flag_spec='-Bexport' + ;; + + sysv4*MP*) + if test -d /usr/nec; then + archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + hardcode_shlibpath_var=no + runpath_var=LD_RUN_PATH + hardcode_runpath_var=yes + ld_shlibs=yes + fi + ;; + + sysv4*uw2* | sysv5OpenUNIX* | sysv5UnixWare7.[01].[10]* | unixware7*) + no_undefined_flag='${wl}-z,text' + archive_cmds_need_lc=no + hardcode_shlibpath_var=no + runpath_var='LD_RUN_PATH' + + if test "$GCC" = yes; then + archive_cmds='$CC -shared ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + archive_expsym_cmds='$CC -shared ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + else + archive_cmds='$CC -G ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + archive_expsym_cmds='$CC -G ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + fi + ;; + + sysv5* | sco3.2v5* | sco5v6*) + # Note: We can NOT use -z defs as we might desire, because we do not + # link with -lc, and that would cause any symbols used from libc to + # always be unresolved, which means just about no library would + # ever link correctly. If we're not using GNU ld we use -z text + # though, which does catch some bad symbols but isn't as heavy-handed + # as -z defs. + no_undefined_flag='${wl}-z,text' + allow_undefined_flag='${wl}-z,nodefs' + archive_cmds_need_lc=no + hardcode_shlibpath_var=no + hardcode_libdir_flag_spec='`test -z "$SCOABSPATH" && echo ${wl}-R,$libdir`' + hardcode_libdir_separator=':' + link_all_deplibs=yes + export_dynamic_flag_spec='${wl}-Bexport' + runpath_var='LD_RUN_PATH' + + if test "$GCC" = yes; then + archive_cmds='$CC -shared ${wl}-h,\${SCOABSPATH:+${install_libdir}/}$soname -o $lib $libobjs $deplibs $compiler_flags' + archive_expsym_cmds='$CC -shared ${wl}-Bexport:$export_symbols ${wl}-h,\${SCOABSPATH:+${install_libdir}/}$soname -o $lib $libobjs $deplibs $compiler_flags' + else + archive_cmds='$CC -G ${wl}-h,\${SCOABSPATH:+${install_libdir}/}$soname -o $lib $libobjs $deplibs $compiler_flags' + archive_expsym_cmds='$CC -G ${wl}-Bexport:$export_symbols ${wl}-h,\${SCOABSPATH:+${install_libdir}/}$soname -o $lib $libobjs $deplibs $compiler_flags' + fi + ;; + + uts4*) + archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + hardcode_libdir_flag_spec='-L$libdir' + hardcode_shlibpath_var=no + ;; + + *) + ld_shlibs=no + ;; + esac + fi + +echo "$as_me:$LINENO: result: $ld_shlibs" >&5 +echo "${ECHO_T}$ld_shlibs" >&6 +test "$ld_shlibs" = no && can_build_shared=no + +# +# Do we need to explicitly link libc? +# +case "x$archive_cmds_need_lc" in +x|xyes) + # Assume -lc should be added + archive_cmds_need_lc=yes + + if test "$enable_shared" = yes && test "$GCC" = yes; then + case $archive_cmds in + *'~'*) + # FIXME: we may have to deal with multi-command sequences. + ;; + '$CC '*) + # Test whether the compiler implicitly links with -lc since on some + # systems, -lgcc has to come before -lc. If gcc already passes -lc + # to ld, don't add -lc before -lgcc. + echo "$as_me:$LINENO: checking whether -lc should be explicitly linked in" >&5 +echo $ECHO_N "checking whether -lc should be explicitly linked in... $ECHO_C" >&6 + $rm conftest* + printf "$lt_simple_compile_test_code" > conftest.$ac_ext + + if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 + (eval $ac_compile) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } 2>conftest.err; then + soname=conftest + lib=conftest + libobjs=conftest.$ac_objext + deplibs= + wl=$lt_prog_compiler_wl + pic_flag=$lt_prog_compiler_pic + compiler_flags=-v + linker_flags=-v + verstring= + output_objdir=. + libname=conftest + lt_save_allow_undefined_flag=$allow_undefined_flag + allow_undefined_flag= + if { (eval echo "$as_me:$LINENO: \"$archive_cmds 2\>\&1 \| grep \" -lc \" \>/dev/null 2\>\&1\"") >&5 + (eval $archive_cmds 2\>\&1 \| grep \" -lc \" \>/dev/null 2\>\&1) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } + then + archive_cmds_need_lc=no + else + archive_cmds_need_lc=yes + fi + allow_undefined_flag=$lt_save_allow_undefined_flag + else + cat conftest.err 1>&5 + fi + $rm conftest* + echo "$as_me:$LINENO: result: $archive_cmds_need_lc" >&5 +echo "${ECHO_T}$archive_cmds_need_lc" >&6 + ;; + esac + fi + ;; +esac + +echo "$as_me:$LINENO: checking dynamic linker characteristics" >&5 +echo $ECHO_N "checking dynamic linker characteristics... $ECHO_C" >&6 +library_names_spec= +libname_spec='lib$name' +soname_spec= +shrext_cmds=".so" +postinstall_cmds= +postuninstall_cmds= +finish_cmds= +finish_eval= +shlibpath_var= +shlibpath_overrides_runpath=unknown +version_type=none +dynamic_linker="$host_os ld.so" +sys_lib_dlsearch_path_spec="/lib /usr/lib" +if test "$GCC" = yes; then + sys_lib_search_path_spec=`$CC -print-search-dirs | grep "^libraries:" | $SED -e "s/^libraries://" -e "s,=/,/,g"` + if echo "$sys_lib_search_path_spec" | grep ';' >/dev/null ; then + # if the path contains ";" then we assume it to be the separator + # otherwise default to the standard path separator (i.e. ":") - it is + # assumed that no part of a normal pathname contains ";" but that should + # okay in the real world where ";" in dirpaths is itself problematic. + sys_lib_search_path_spec=`echo "$sys_lib_search_path_spec" | $SED -e 's/;/ /g'` + else + sys_lib_search_path_spec=`echo "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"` + fi +else + sys_lib_search_path_spec="/lib /usr/lib /usr/local/lib" +fi +need_lib_prefix=unknown +hardcode_into_libs=no + +# when you set need_version to no, make sure it does not cause -set_version +# flags to be left without arguments +need_version=unknown + +case $host_os in +aix3*) + version_type=linux + library_names_spec='${libname}${release}${shared_ext}$versuffix $libname.a' + shlibpath_var=LIBPATH + + # AIX 3 has no versioning support, so we append a major version to the name. + soname_spec='${libname}${release}${shared_ext}$major' + ;; + +aix4* | aix5*) + version_type=linux + need_lib_prefix=no + need_version=no + hardcode_into_libs=yes + if test "$host_cpu" = ia64; then + # AIX 5 supports IA64 + library_names_spec='${libname}${release}${shared_ext}$major ${libname}${release}${shared_ext}$versuffix $libname${shared_ext}' + shlibpath_var=LD_LIBRARY_PATH + else + # With GCC up to 2.95.x, collect2 would create an import file + # for dependence libraries. The import file would start with + # the line `#! .'. This would cause the generated library to + # depend on `.', always an invalid library. This was fixed in + # development snapshots of GCC prior to 3.0. + case $host_os in + aix4 | aix4.[01] | aix4.[01].*) + if { echo '#if __GNUC__ > 2 || (__GNUC__ == 2 && __GNUC_MINOR__ >= 97)' + echo ' yes ' + echo '#endif'; } | ${CC} -E - | grep yes > /dev/null; then + : + else + can_build_shared=no + fi + ;; + esac + # AIX (on Power*) has no versioning support, so currently we can not hardcode correct + # soname into executable. Probably we can add versioning support to + # collect2, so additional links can be useful in future. + if test "$aix_use_runtimelinking" = yes; then + # If using run time linking (on AIX 4.2 or later) use lib.so + # instead of lib.a to let people know that these are not + # typical AIX shared libraries. + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + else + # We preserve .a as extension for shared libraries through AIX4.2 + # and later when we are not doing run time linking. + library_names_spec='${libname}${release}.a $libname.a' + soname_spec='${libname}${release}${shared_ext}$major' + fi + shlibpath_var=LIBPATH + fi + ;; + +amigaos*) + library_names_spec='$libname.ixlibrary $libname.a' + # Create ${libname}_ixlibrary.a entries in /sys/libs. + finish_eval='for lib in `ls $libdir/*.ixlibrary 2>/dev/null`; do libname=`$echo "X$lib" | $Xsed -e '\''s%^.*/\([^/]*\)\.ixlibrary$%\1%'\''`; test $rm /sys/libs/${libname}_ixlibrary.a; $show "cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a"; cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a || exit 1; done' + ;; + +beos*) + library_names_spec='${libname}${shared_ext}' + dynamic_linker="$host_os ld.so" + shlibpath_var=LIBRARY_PATH + ;; + +bsdi[45]*) + version_type=linux + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + finish_cmds='PATH="\$PATH:/sbin" ldconfig $libdir' + shlibpath_var=LD_LIBRARY_PATH + sys_lib_search_path_spec="/shlib /usr/lib /usr/X11/lib /usr/contrib/lib /lib /usr/local/lib" + sys_lib_dlsearch_path_spec="/shlib /usr/lib /usr/local/lib" + # the default ld.so.conf also contains /usr/contrib/lib and + # /usr/X11R6/lib (/usr/X11 is a link to /usr/X11R6), but let us allow + # libtool to hard-code these into programs + ;; + +cygwin* | mingw* | pw32*) + version_type=windows + shrext_cmds=".dll" + need_version=no + need_lib_prefix=no + + case $GCC,$host_os in + yes,cygwin* | yes,mingw* | yes,pw32*) + library_names_spec='$libname.dll.a' + # DLL is installed to $(libdir)/../bin by postinstall_cmds + postinstall_cmds='base_file=`basename \${file}`~ + dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\${base_file}'\''i;echo \$dlname'\''`~ + dldir=$destdir/`dirname \$dlpath`~ + test -d \$dldir || mkdir -p \$dldir~ + $install_prog $dir/$dlname \$dldir/$dlname~ + chmod a+x \$dldir/$dlname' + postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; echo \$dlname'\''`~ + dlpath=$dir/\$dldll~ + $rm \$dlpath' + shlibpath_overrides_runpath=yes + + case $host_os in + cygwin*) + # Cygwin DLLs use 'cyg' prefix rather than 'lib' + soname_spec='`echo ${libname} | sed -e 's/^lib/cyg/'``echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext}' + sys_lib_search_path_spec="/usr/lib /lib/w32api /lib /usr/local/lib" + ;; + mingw*) + # MinGW DLLs use traditional 'lib' prefix + soname_spec='${libname}`echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext}' + sys_lib_search_path_spec=`$CC -print-search-dirs | grep "^libraries:" | $SED -e "s/^libraries://" -e "s,=/,/,g"` + if echo "$sys_lib_search_path_spec" | grep ';[c-zC-Z]:/' >/dev/null; then + # It is most probably a Windows format PATH printed by + # mingw gcc, but we are running on Cygwin. Gcc prints its search + # path with ; separators, and with drive letters. We can handle the + # drive letters (cygwin fileutils understands them), so leave them, + # especially as we might pass files found there to a mingw objdump, + # which wouldn't understand a cygwinified path. Ahh. + sys_lib_search_path_spec=`echo "$sys_lib_search_path_spec" | $SED -e 's/;/ /g'` + else + sys_lib_search_path_spec=`echo "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"` + fi + ;; + pw32*) + # pw32 DLLs use 'pw' prefix rather than 'lib' + library_names_spec='`echo ${libname} | sed -e 's/^lib/pw/'``echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext}' + ;; + esac + ;; + + *) + library_names_spec='${libname}`echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext} $libname.lib' + ;; + esac + dynamic_linker='Win32 ld.exe' + # FIXME: first we should search . and the directory the executable is in + shlibpath_var=PATH + ;; + +darwin* | rhapsody*) + dynamic_linker="$host_os dyld" + version_type=darwin + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${versuffix}$shared_ext ${libname}${release}${major}$shared_ext ${libname}$shared_ext' + soname_spec='${libname}${release}${major}$shared_ext' + shlibpath_overrides_runpath=yes + shlibpath_var=DYLD_LIBRARY_PATH + shrext_cmds='`test .$module = .yes && echo .so || echo .dylib`' + # Apple's gcc prints 'gcc -print-search-dirs' doesn't operate the same. + if test "$GCC" = yes; then + sys_lib_search_path_spec=`$CC -print-search-dirs | tr "\n" "$PATH_SEPARATOR" | sed -e 's/libraries:/@libraries:/' | tr "@" "\n" | grep "^libraries:" | sed -e "s/^libraries://" -e "s,=/,/,g" -e "s,$PATH_SEPARATOR, ,g" -e "s,.*,& /lib /usr/lib /usr/local/lib,g"` + else + sys_lib_search_path_spec='/lib /usr/lib /usr/local/lib' + fi + sys_lib_dlsearch_path_spec='/usr/local/lib /lib /usr/lib' + ;; + +dgux*) + version_type=linux + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname$shared_ext' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + ;; + +freebsd1*) + dynamic_linker=no + ;; + +kfreebsd*-gnu) + version_type=linux + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=no + hardcode_into_libs=yes + dynamic_linker='GNU ld.so' + ;; + +freebsd* | dragonfly*) + # DragonFly does not have aout. When/if they implement a new + # versioning mechanism, adjust this. + if test -x /usr/bin/objformat; then + objformat=`/usr/bin/objformat` + else + case $host_os in + freebsd[123]*) objformat=aout ;; + *) objformat=elf ;; + esac + fi + version_type=freebsd-$objformat + case $version_type in + freebsd-elf*) + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext} $libname${shared_ext}' + need_version=no + need_lib_prefix=no + ;; + freebsd-*) + library_names_spec='${libname}${release}${shared_ext}$versuffix $libname${shared_ext}$versuffix' + need_version=yes + ;; + esac + shlibpath_var=LD_LIBRARY_PATH + case $host_os in + freebsd2*) + shlibpath_overrides_runpath=yes + ;; + freebsd3.[01]* | freebsdelf3.[01]*) + shlibpath_overrides_runpath=yes + hardcode_into_libs=yes + ;; + freebsd3.[2-9]* | freebsdelf3.[2-9]* | \ + freebsd4.[0-5] | freebsdelf4.[0-5] | freebsd4.1.1 | freebsdelf4.1.1) + shlibpath_overrides_runpath=no + hardcode_into_libs=yes + ;; + freebsd*) # from 4.6 on + shlibpath_overrides_runpath=yes + hardcode_into_libs=yes + ;; + esac + ;; + +gnu*) + version_type=linux + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}${major} ${libname}${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + hardcode_into_libs=yes + ;; + +hpux9* | hpux10* | hpux11*) + # Give a soname corresponding to the major version so that dld.sl refuses to + # link against other versions. + version_type=sunos + need_lib_prefix=no + need_version=no + case $host_cpu in + ia64*) + shrext_cmds='.so' + hardcode_into_libs=yes + dynamic_linker="$host_os dld.so" + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes # Unless +noenvvar is specified. + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + if test "X$HPUX_IA64_MODE" = X32; then + sys_lib_search_path_spec="/usr/lib/hpux32 /usr/local/lib/hpux32 /usr/local/lib" + else + sys_lib_search_path_spec="/usr/lib/hpux64 /usr/local/lib/hpux64" + fi + sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec + ;; + hppa*64*) + shrext_cmds='.sl' + hardcode_into_libs=yes + dynamic_linker="$host_os dld.sl" + shlibpath_var=LD_LIBRARY_PATH # How should we handle SHLIB_PATH + shlibpath_overrides_runpath=yes # Unless +noenvvar is specified. + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + sys_lib_search_path_spec="/usr/lib/pa20_64 /usr/ccs/lib/pa20_64" + sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec + ;; + *) + shrext_cmds='.sl' + dynamic_linker="$host_os dld.sl" + shlibpath_var=SHLIB_PATH + shlibpath_overrides_runpath=no # +s is required to enable SHLIB_PATH + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + ;; + esac + # HP-UX runs *really* slowly unless shared libraries are mode 555. + postinstall_cmds='chmod 555 $lib' + ;; + +interix3*) + version_type=linux + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + dynamic_linker='Interix 3.x ld.so.1 (PE, like ELF)' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=no + hardcode_into_libs=yes + ;; + +irix5* | irix6* | nonstopux*) + case $host_os in + nonstopux*) version_type=nonstopux ;; + *) + if test "$lt_cv_prog_gnu_ld" = yes; then + version_type=linux + else + version_type=irix + fi ;; + esac + need_lib_prefix=no + need_version=no + soname_spec='${libname}${release}${shared_ext}$major' + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${release}${shared_ext} $libname${shared_ext}' + case $host_os in + irix5* | nonstopux*) + libsuff= shlibsuff= + ;; + *) + case $LD in # libtool.m4 will add one of these switches to LD + *-32|*"-32 "|*-melf32bsmip|*"-melf32bsmip ") + libsuff= shlibsuff= libmagic=32-bit;; + *-n32|*"-n32 "|*-melf32bmipn32|*"-melf32bmipn32 ") + libsuff=32 shlibsuff=N32 libmagic=N32;; + *-64|*"-64 "|*-melf64bmip|*"-melf64bmip ") + libsuff=64 shlibsuff=64 libmagic=64-bit;; + *) libsuff= shlibsuff= libmagic=never-match;; + esac + ;; + esac + shlibpath_var=LD_LIBRARY${shlibsuff}_PATH + shlibpath_overrides_runpath=no + sys_lib_search_path_spec="/usr/lib${libsuff} /lib${libsuff} /usr/local/lib${libsuff}" + sys_lib_dlsearch_path_spec="/usr/lib${libsuff} /lib${libsuff}" + hardcode_into_libs=yes + ;; + +# No shared lib support for Linux oldld, aout, or coff. +linux*oldld* | linux*aout* | linux*coff*) + dynamic_linker=no + ;; + +# This must be Linux ELF. +linux*) + version_type=linux + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + finish_cmds='PATH="\$PATH:/sbin" ldconfig -n $libdir' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=no + # This implies no fast_install, which is unacceptable. + # Some rework will be needed to allow for fast_install + # before this can be enabled. + hardcode_into_libs=yes + + # find out which ABI we are using + libsuff= + case "$host_cpu" in + x86_64*|s390x*|powerpc64*) + echo '#line 8170 "configure"' > conftest.$ac_ext + if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 + (eval $ac_compile) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; then + case `/usr/bin/file conftest.$ac_objext` in + *64-bit*) + libsuff=64 + sys_lib_search_path_spec="/lib${libsuff} /usr/lib${libsuff} /usr/local/lib${libsuff}" + ;; + esac + fi + rm -rf conftest* + ;; + esac + + # Append ld.so.conf contents to the search path + if test -f /etc/ld.so.conf; then + lt_ld_extra=`awk '/^include / { system(sprintf("cd /etc; cat %s 2>/dev/null", \$2)); skip = 1; } { if (!skip) print \$0; skip = 0; }' < /etc/ld.so.conf | $SED -e 's/#.*//;s/[:, ]/ /g;s/=[^=]*$//;s/=[^= ]* / /g;/^$/d' | tr '\n' ' '` + sys_lib_dlsearch_path_spec="/lib${libsuff} /usr/lib${libsuff} $lt_ld_extra" + fi + + # We used to test for /lib/ld.so.1 and disable shared libraries on + # powerpc, because MkLinux only supported shared libraries with the + # GNU dynamic linker. Since this was broken with cross compilers, + # most powerpc-linux boxes support dynamic linking these days and + # people can always --disable-shared, the test was removed, and we + # assume the GNU/Linux dynamic linker is in use. + dynamic_linker='GNU/Linux ld.so' + ;; + +knetbsd*-gnu) + version_type=linux + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=no + hardcode_into_libs=yes + dynamic_linker='GNU ld.so' + ;; + +netbsd*) + version_type=sunos + need_lib_prefix=no + need_version=no + if echo __ELF__ | $CC -E - | grep __ELF__ >/dev/null; then + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${shared_ext}$versuffix' + finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir' + dynamic_linker='NetBSD (a.out) ld.so' + else + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + dynamic_linker='NetBSD ld.elf_so' + fi + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + hardcode_into_libs=yes + ;; + +newsos6) + version_type=linux + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + ;; + +nto-qnx*) + version_type=linux + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + ;; + +openbsd*) + version_type=sunos + sys_lib_dlsearch_path_spec="/usr/lib" + need_lib_prefix=no + # Some older versions of OpenBSD (3.3 at least) *do* need versioned libs. + case $host_os in + openbsd3.3 | openbsd3.3.*) need_version=yes ;; + *) need_version=no ;; + esac + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${shared_ext}$versuffix' + finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir' + shlibpath_var=LD_LIBRARY_PATH + if test -z "`echo __ELF__ | $CC -E - | grep __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then + case $host_os in + openbsd2.[89] | openbsd2.[89].*) + shlibpath_overrides_runpath=no + ;; + *) + shlibpath_overrides_runpath=yes + ;; + esac + else + shlibpath_overrides_runpath=yes + fi + ;; + +os2*) + libname_spec='$name' + shrext_cmds=".dll" + need_lib_prefix=no + library_names_spec='$libname${shared_ext} $libname.a' + dynamic_linker='OS/2 ld.exe' + shlibpath_var=LIBPATH + ;; + +osf3* | osf4* | osf5*) + version_type=osf + need_lib_prefix=no + need_version=no + soname_spec='${libname}${release}${shared_ext}$major' + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + shlibpath_var=LD_LIBRARY_PATH + sys_lib_search_path_spec="/usr/shlib /usr/ccs/lib /usr/lib/cmplrs/cc /usr/lib /usr/local/lib /var/shlib" + sys_lib_dlsearch_path_spec="$sys_lib_search_path_spec" + ;; + +solaris*) + version_type=linux + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + hardcode_into_libs=yes + # ldd complains unless libraries are executable + postinstall_cmds='chmod +x $lib' + ;; + +sunos4*) + version_type=sunos + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${shared_ext}$versuffix' + finish_cmds='PATH="\$PATH:/usr/etc" ldconfig $libdir' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + if test "$with_gnu_ld" = yes; then + need_lib_prefix=no + fi + need_version=yes + ;; + +sysv4 | sysv4.3*) + version_type=linux + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + case $host_vendor in + sni) + shlibpath_overrides_runpath=no + need_lib_prefix=no + export_dynamic_flag_spec='${wl}-Blargedynsym' + runpath_var=LD_RUN_PATH + ;; + siemens) + need_lib_prefix=no + ;; + motorola) + need_lib_prefix=no + need_version=no + shlibpath_overrides_runpath=no + sys_lib_search_path_spec='/lib /usr/lib /usr/ccs/lib' + ;; + esac + ;; + +sysv4*MP*) + if test -d /usr/nec ;then + version_type=linux + library_names_spec='$libname${shared_ext}.$versuffix $libname${shared_ext}.$major $libname${shared_ext}' + soname_spec='$libname${shared_ext}.$major' + shlibpath_var=LD_LIBRARY_PATH + fi + ;; + +sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX* | sysv4*uw2*) + version_type=freebsd-elf + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext} $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + hardcode_into_libs=yes + if test "$with_gnu_ld" = yes; then + sys_lib_search_path_spec='/usr/local/lib /usr/gnu/lib /usr/ccs/lib /usr/lib /lib' + shlibpath_overrides_runpath=no + else + sys_lib_search_path_spec='/usr/ccs/lib /usr/lib' + shlibpath_overrides_runpath=yes + case $host_os in + sco3.2v5*) + sys_lib_search_path_spec="$sys_lib_search_path_spec /lib" + ;; + esac + fi + sys_lib_dlsearch_path_spec='/usr/lib' + ;; + +uts4*) + version_type=linux + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + ;; + +*) + dynamic_linker=no + ;; +esac +echo "$as_me:$LINENO: result: $dynamic_linker" >&5 +echo "${ECHO_T}$dynamic_linker" >&6 +test "$dynamic_linker" = no && can_build_shared=no + +variables_saved_for_relink="PATH $shlibpath_var $runpath_var" +if test "$GCC" = yes; then + variables_saved_for_relink="$variables_saved_for_relink GCC_EXEC_PREFIX COMPILER_PATH LIBRARY_PATH" +fi + +echo "$as_me:$LINENO: checking how to hardcode library paths into programs" >&5 +echo $ECHO_N "checking how to hardcode library paths into programs... $ECHO_C" >&6 +hardcode_action= +if test -n "$hardcode_libdir_flag_spec" || \ + test -n "$runpath_var" || \ + test "X$hardcode_automatic" = "Xyes" ; then + + # We can hardcode non-existant directories. + if test "$hardcode_direct" != no && + # If the only mechanism to avoid hardcoding is shlibpath_var, we + # have to relink, otherwise we might link with an installed library + # when we should be linking with a yet-to-be-installed one + ## test "$_LT_AC_TAGVAR(hardcode_shlibpath_var, )" != no && + test "$hardcode_minus_L" != no; then + # Linking always hardcodes the temporary library directory. + hardcode_action=relink + else + # We can link without hardcoding, and we can hardcode nonexisting dirs. + hardcode_action=immediate + fi +else + # We cannot hardcode anything, or else we can only hardcode existing + # directories. + hardcode_action=unsupported +fi +echo "$as_me:$LINENO: result: $hardcode_action" >&5 +echo "${ECHO_T}$hardcode_action" >&6 + +if test "$hardcode_action" = relink; then + # Fast installation is not supported + enable_fast_install=no +elif test "$shlibpath_overrides_runpath" = yes || + test "$enable_shared" = no; then + # Fast installation is not necessary + enable_fast_install=needless +fi + +striplib= +old_striplib= +echo "$as_me:$LINENO: checking whether stripping libraries is possible" >&5 +echo $ECHO_N "checking whether stripping libraries is possible... $ECHO_C" >&6 +if test -n "$STRIP" && $STRIP -V 2>&1 | grep "GNU strip" >/dev/null; then + test -z "$old_striplib" && old_striplib="$STRIP --strip-debug" + test -z "$striplib" && striplib="$STRIP --strip-unneeded" + echo "$as_me:$LINENO: result: yes" >&5 +echo "${ECHO_T}yes" >&6 +else +# FIXME - insert some real tests, host_os isn't really good enough + case $host_os in + darwin*) + if test -n "$STRIP" ; then + striplib="$STRIP -x" + echo "$as_me:$LINENO: result: yes" >&5 +echo "${ECHO_T}yes" >&6 + else + echo "$as_me:$LINENO: result: no" >&5 +echo "${ECHO_T}no" >&6 +fi + ;; + *) + echo "$as_me:$LINENO: result: no" >&5 +echo "${ECHO_T}no" >&6 + ;; + esac +fi + +if test "x$enable_dlopen" != xyes; then + enable_dlopen=unknown + enable_dlopen_self=unknown + enable_dlopen_self_static=unknown +else + lt_cv_dlopen=no + lt_cv_dlopen_libs= + + case $host_os in + beos*) + lt_cv_dlopen="load_add_on" + lt_cv_dlopen_libs= + lt_cv_dlopen_self=yes + ;; + + mingw* | pw32*) + lt_cv_dlopen="LoadLibrary" + lt_cv_dlopen_libs= + ;; + + cygwin*) + lt_cv_dlopen="dlopen" + lt_cv_dlopen_libs= + ;; + + darwin*) + # if libdl is installed we need to link against it + echo "$as_me:$LINENO: checking for dlopen in -ldl" >&5 +echo $ECHO_N "checking for dlopen in -ldl... $ECHO_C" >&6 +if test "${ac_cv_lib_dl_dlopen+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + ac_check_lib_save_LIBS=$LIBS +LIBS="-ldl $LIBS" +cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ + +/* Override any gcc2 internal prototype to avoid an error. */ +#ifdef __cplusplus +extern "C" +#endif +/* We use char because int might match the return type of a gcc2 + builtin and then its argument prototype would still apply. */ +char dlopen (); +int +main () +{ +dlopen (); + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext conftest$ac_exeext +if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 + (eval $ac_link) 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest$ac_exeext' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; }; then + ac_cv_lib_dl_dlopen=yes +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +ac_cv_lib_dl_dlopen=no +fi +rm -f conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS +fi +echo "$as_me:$LINENO: result: $ac_cv_lib_dl_dlopen" >&5 +echo "${ECHO_T}$ac_cv_lib_dl_dlopen" >&6 +if test $ac_cv_lib_dl_dlopen = yes; then + lt_cv_dlopen="dlopen" lt_cv_dlopen_libs="-ldl" +else + + lt_cv_dlopen="dyld" + lt_cv_dlopen_libs= + lt_cv_dlopen_self=yes + +fi + + ;; + + *) + echo "$as_me:$LINENO: checking for shl_load" >&5 +echo $ECHO_N "checking for shl_load... $ECHO_C" >&6 +if test "${ac_cv_func_shl_load+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +/* Define shl_load to an innocuous variant, in case declares shl_load. + For example, HP-UX 11i declares gettimeofday. */ +#define shl_load innocuous_shl_load + +/* System header to define __stub macros and hopefully few prototypes, + which can conflict with char shl_load (); below. + Prefer to if __STDC__ is defined, since + exists even on freestanding compilers. */ + +#ifdef __STDC__ +# include +#else +# include +#endif + +#undef shl_load + +/* Override any gcc2 internal prototype to avoid an error. */ +#ifdef __cplusplus +extern "C" +{ +#endif +/* We use char because int might match the return type of a gcc2 + builtin and then its argument prototype would still apply. */ +char shl_load (); +/* The GNU C library defines this for functions which it implements + to always fail with ENOSYS. Some functions are actually named + something starting with __ and the normal name is an alias. */ +#if defined (__stub_shl_load) || defined (__stub___shl_load) +choke me +#else +char (*f) () = shl_load; +#endif +#ifdef __cplusplus +} +#endif + +int +main () +{ +return f != shl_load; + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext conftest$ac_exeext +if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 + (eval $ac_link) 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest$ac_exeext' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; }; then + ac_cv_func_shl_load=yes +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +ac_cv_func_shl_load=no +fi +rm -f conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +fi +echo "$as_me:$LINENO: result: $ac_cv_func_shl_load" >&5 +echo "${ECHO_T}$ac_cv_func_shl_load" >&6 +if test $ac_cv_func_shl_load = yes; then + lt_cv_dlopen="shl_load" +else + echo "$as_me:$LINENO: checking for shl_load in -ldld" >&5 +echo $ECHO_N "checking for shl_load in -ldld... $ECHO_C" >&6 +if test "${ac_cv_lib_dld_shl_load+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + ac_check_lib_save_LIBS=$LIBS +LIBS="-ldld $LIBS" +cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ + +/* Override any gcc2 internal prototype to avoid an error. */ +#ifdef __cplusplus +extern "C" +#endif +/* We use char because int might match the return type of a gcc2 + builtin and then its argument prototype would still apply. */ +char shl_load (); +int +main () +{ +shl_load (); + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext conftest$ac_exeext +if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 + (eval $ac_link) 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest$ac_exeext' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; }; then + ac_cv_lib_dld_shl_load=yes +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +ac_cv_lib_dld_shl_load=no +fi +rm -f conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS +fi +echo "$as_me:$LINENO: result: $ac_cv_lib_dld_shl_load" >&5 +echo "${ECHO_T}$ac_cv_lib_dld_shl_load" >&6 +if test $ac_cv_lib_dld_shl_load = yes; then + lt_cv_dlopen="shl_load" lt_cv_dlopen_libs="-dld" +else + echo "$as_me:$LINENO: checking for dlopen" >&5 +echo $ECHO_N "checking for dlopen... $ECHO_C" >&6 +if test "${ac_cv_func_dlopen+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +/* Define dlopen to an innocuous variant, in case declares dlopen. + For example, HP-UX 11i declares gettimeofday. */ +#define dlopen innocuous_dlopen + +/* System header to define __stub macros and hopefully few prototypes, + which can conflict with char dlopen (); below. + Prefer to if __STDC__ is defined, since + exists even on freestanding compilers. */ + +#ifdef __STDC__ +# include +#else +# include +#endif + +#undef dlopen + +/* Override any gcc2 internal prototype to avoid an error. */ +#ifdef __cplusplus +extern "C" +{ +#endif +/* We use char because int might match the return type of a gcc2 + builtin and then its argument prototype would still apply. */ +char dlopen (); +/* The GNU C library defines this for functions which it implements + to always fail with ENOSYS. Some functions are actually named + something starting with __ and the normal name is an alias. */ +#if defined (__stub_dlopen) || defined (__stub___dlopen) +choke me +#else +char (*f) () = dlopen; +#endif +#ifdef __cplusplus +} +#endif + +int +main () +{ +return f != dlopen; + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext conftest$ac_exeext +if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 + (eval $ac_link) 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest$ac_exeext' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; }; then + ac_cv_func_dlopen=yes +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +ac_cv_func_dlopen=no +fi +rm -f conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +fi +echo "$as_me:$LINENO: result: $ac_cv_func_dlopen" >&5 +echo "${ECHO_T}$ac_cv_func_dlopen" >&6 +if test $ac_cv_func_dlopen = yes; then + lt_cv_dlopen="dlopen" +else + echo "$as_me:$LINENO: checking for dlopen in -ldl" >&5 +echo $ECHO_N "checking for dlopen in -ldl... $ECHO_C" >&6 +if test "${ac_cv_lib_dl_dlopen+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + ac_check_lib_save_LIBS=$LIBS +LIBS="-ldl $LIBS" +cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ + +/* Override any gcc2 internal prototype to avoid an error. */ +#ifdef __cplusplus +extern "C" +#endif +/* We use char because int might match the return type of a gcc2 + builtin and then its argument prototype would still apply. */ +char dlopen (); +int +main () +{ +dlopen (); + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext conftest$ac_exeext +if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 + (eval $ac_link) 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest$ac_exeext' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; }; then + ac_cv_lib_dl_dlopen=yes +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +ac_cv_lib_dl_dlopen=no +fi +rm -f conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS +fi +echo "$as_me:$LINENO: result: $ac_cv_lib_dl_dlopen" >&5 +echo "${ECHO_T}$ac_cv_lib_dl_dlopen" >&6 +if test $ac_cv_lib_dl_dlopen = yes; then + lt_cv_dlopen="dlopen" lt_cv_dlopen_libs="-ldl" +else + echo "$as_me:$LINENO: checking for dlopen in -lsvld" >&5 +echo $ECHO_N "checking for dlopen in -lsvld... $ECHO_C" >&6 +if test "${ac_cv_lib_svld_dlopen+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + ac_check_lib_save_LIBS=$LIBS +LIBS="-lsvld $LIBS" +cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ + +/* Override any gcc2 internal prototype to avoid an error. */ +#ifdef __cplusplus +extern "C" +#endif +/* We use char because int might match the return type of a gcc2 + builtin and then its argument prototype would still apply. */ +char dlopen (); +int +main () +{ +dlopen (); + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext conftest$ac_exeext +if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 + (eval $ac_link) 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest$ac_exeext' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; }; then + ac_cv_lib_svld_dlopen=yes +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +ac_cv_lib_svld_dlopen=no +fi +rm -f conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS +fi +echo "$as_me:$LINENO: result: $ac_cv_lib_svld_dlopen" >&5 +echo "${ECHO_T}$ac_cv_lib_svld_dlopen" >&6 +if test $ac_cv_lib_svld_dlopen = yes; then + lt_cv_dlopen="dlopen" lt_cv_dlopen_libs="-lsvld" +else + echo "$as_me:$LINENO: checking for dld_link in -ldld" >&5 +echo $ECHO_N "checking for dld_link in -ldld... $ECHO_C" >&6 +if test "${ac_cv_lib_dld_dld_link+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + ac_check_lib_save_LIBS=$LIBS +LIBS="-ldld $LIBS" +cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ + +/* Override any gcc2 internal prototype to avoid an error. */ +#ifdef __cplusplus +extern "C" +#endif +/* We use char because int might match the return type of a gcc2 + builtin and then its argument prototype would still apply. */ +char dld_link (); +int +main () +{ +dld_link (); + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext conftest$ac_exeext +if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 + (eval $ac_link) 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest$ac_exeext' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; }; then + ac_cv_lib_dld_dld_link=yes +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +ac_cv_lib_dld_dld_link=no +fi +rm -f conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS +fi +echo "$as_me:$LINENO: result: $ac_cv_lib_dld_dld_link" >&5 +echo "${ECHO_T}$ac_cv_lib_dld_dld_link" >&6 +if test $ac_cv_lib_dld_dld_link = yes; then + lt_cv_dlopen="dld_link" lt_cv_dlopen_libs="-dld" +fi + + +fi + + +fi + + +fi + + +fi + + +fi + + ;; + esac + + if test "x$lt_cv_dlopen" != xno; then + enable_dlopen=yes + else + enable_dlopen=no + fi + + case $lt_cv_dlopen in + dlopen) + save_CPPFLAGS="$CPPFLAGS" + test "x$ac_cv_header_dlfcn_h" = xyes && CPPFLAGS="$CPPFLAGS -DHAVE_DLFCN_H" + + save_LDFLAGS="$LDFLAGS" + wl=$lt_prog_compiler_wl eval LDFLAGS=\"\$LDFLAGS $export_dynamic_flag_spec\" + + save_LIBS="$LIBS" + LIBS="$lt_cv_dlopen_libs $LIBS" + + echo "$as_me:$LINENO: checking whether a program can dlopen itself" >&5 +echo $ECHO_N "checking whether a program can dlopen itself... $ECHO_C" >&6 +if test "${lt_cv_dlopen_self+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + if test "$cross_compiling" = yes; then : + lt_cv_dlopen_self=cross +else + lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2 + lt_status=$lt_dlunknown + cat > conftest.$ac_ext < +#endif + +#include + +#ifdef RTLD_GLOBAL +# define LT_DLGLOBAL RTLD_GLOBAL +#else +# ifdef DL_GLOBAL +# define LT_DLGLOBAL DL_GLOBAL +# else +# define LT_DLGLOBAL 0 +# endif +#endif + +/* We may have to define LT_DLLAZY_OR_NOW in the command line if we + find out it does not work in some platform. */ +#ifndef LT_DLLAZY_OR_NOW +# ifdef RTLD_LAZY +# define LT_DLLAZY_OR_NOW RTLD_LAZY +# else +# ifdef DL_LAZY +# define LT_DLLAZY_OR_NOW DL_LAZY +# else +# ifdef RTLD_NOW +# define LT_DLLAZY_OR_NOW RTLD_NOW +# else +# ifdef DL_NOW +# define LT_DLLAZY_OR_NOW DL_NOW +# else +# define LT_DLLAZY_OR_NOW 0 +# endif +# endif +# endif +# endif +#endif + +#ifdef __cplusplus +extern "C" void exit (int); +#endif + +void fnord() { int i=42;} +int main () +{ + void *self = dlopen (0, LT_DLGLOBAL|LT_DLLAZY_OR_NOW); + int status = $lt_dlunknown; + + if (self) + { + if (dlsym (self,"fnord")) status = $lt_dlno_uscore; + else if (dlsym( self,"_fnord")) status = $lt_dlneed_uscore; + /* dlclose (self); */ + } + else + puts (dlerror ()); + + exit (status); +} +EOF + if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 + (eval $ac_link) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && test -s conftest${ac_exeext} 2>/dev/null; then + (./conftest; exit; ) >&5 2>/dev/null + lt_status=$? + case x$lt_status in + x$lt_dlno_uscore) lt_cv_dlopen_self=yes ;; + x$lt_dlneed_uscore) lt_cv_dlopen_self=yes ;; + x$lt_dlunknown|x*) lt_cv_dlopen_self=no ;; + esac + else : + # compilation failed + lt_cv_dlopen_self=no + fi +fi +rm -fr conftest* + + +fi +echo "$as_me:$LINENO: result: $lt_cv_dlopen_self" >&5 +echo "${ECHO_T}$lt_cv_dlopen_self" >&6 + + if test "x$lt_cv_dlopen_self" = xyes; then + wl=$lt_prog_compiler_wl eval LDFLAGS=\"\$LDFLAGS $lt_prog_compiler_static\" + echo "$as_me:$LINENO: checking whether a statically linked program can dlopen itself" >&5 +echo $ECHO_N "checking whether a statically linked program can dlopen itself... $ECHO_C" >&6 +if test "${lt_cv_dlopen_self_static+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + if test "$cross_compiling" = yes; then : + lt_cv_dlopen_self_static=cross +else + lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2 + lt_status=$lt_dlunknown + cat > conftest.$ac_ext < +#endif + +#include + +#ifdef RTLD_GLOBAL +# define LT_DLGLOBAL RTLD_GLOBAL +#else +# ifdef DL_GLOBAL +# define LT_DLGLOBAL DL_GLOBAL +# else +# define LT_DLGLOBAL 0 +# endif +#endif + +/* We may have to define LT_DLLAZY_OR_NOW in the command line if we + find out it does not work in some platform. */ +#ifndef LT_DLLAZY_OR_NOW +# ifdef RTLD_LAZY +# define LT_DLLAZY_OR_NOW RTLD_LAZY +# else +# ifdef DL_LAZY +# define LT_DLLAZY_OR_NOW DL_LAZY +# else +# ifdef RTLD_NOW +# define LT_DLLAZY_OR_NOW RTLD_NOW +# else +# ifdef DL_NOW +# define LT_DLLAZY_OR_NOW DL_NOW +# else +# define LT_DLLAZY_OR_NOW 0 +# endif +# endif +# endif +# endif +#endif + +#ifdef __cplusplus +extern "C" void exit (int); +#endif + +void fnord() { int i=42;} +int main () +{ + void *self = dlopen (0, LT_DLGLOBAL|LT_DLLAZY_OR_NOW); + int status = $lt_dlunknown; + + if (self) + { + if (dlsym (self,"fnord")) status = $lt_dlno_uscore; + else if (dlsym( self,"_fnord")) status = $lt_dlneed_uscore; + /* dlclose (self); */ + } + else + puts (dlerror ()); + + exit (status); +} +EOF + if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 + (eval $ac_link) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && test -s conftest${ac_exeext} 2>/dev/null; then + (./conftest; exit; ) >&5 2>/dev/null + lt_status=$? + case x$lt_status in + x$lt_dlno_uscore) lt_cv_dlopen_self_static=yes ;; + x$lt_dlneed_uscore) lt_cv_dlopen_self_static=yes ;; + x$lt_dlunknown|x*) lt_cv_dlopen_self_static=no ;; + esac + else : + # compilation failed + lt_cv_dlopen_self_static=no + fi +fi +rm -fr conftest* + + +fi +echo "$as_me:$LINENO: result: $lt_cv_dlopen_self_static" >&5 +echo "${ECHO_T}$lt_cv_dlopen_self_static" >&6 + fi + + CPPFLAGS="$save_CPPFLAGS" + LDFLAGS="$save_LDFLAGS" + LIBS="$save_LIBS" + ;; + esac + + case $lt_cv_dlopen_self in + yes|no) enable_dlopen_self=$lt_cv_dlopen_self ;; + *) enable_dlopen_self=unknown ;; + esac + + case $lt_cv_dlopen_self_static in + yes|no) enable_dlopen_self_static=$lt_cv_dlopen_self_static ;; + *) enable_dlopen_self_static=unknown ;; + esac +fi + + +# Report which library types will actually be built +echo "$as_me:$LINENO: checking if libtool supports shared libraries" >&5 +echo $ECHO_N "checking if libtool supports shared libraries... $ECHO_C" >&6 +echo "$as_me:$LINENO: result: $can_build_shared" >&5 +echo "${ECHO_T}$can_build_shared" >&6 + +echo "$as_me:$LINENO: checking whether to build shared libraries" >&5 +echo $ECHO_N "checking whether to build shared libraries... $ECHO_C" >&6 +test "$can_build_shared" = "no" && enable_shared=no + +# On AIX, shared libraries and static libraries use the same namespace, and +# are all built from PIC. +case $host_os in +aix3*) + test "$enable_shared" = yes && enable_static=no + if test -n "$RANLIB"; then + archive_cmds="$archive_cmds~\$RANLIB \$lib" + postinstall_cmds='$RANLIB $lib' + fi + ;; + +aix4* | aix5*) + if test "$host_cpu" != ia64 && test "$aix_use_runtimelinking" = no ; then + test "$enable_shared" = yes && enable_static=no + fi + ;; +esac +echo "$as_me:$LINENO: result: $enable_shared" >&5 +echo "${ECHO_T}$enable_shared" >&6 + +echo "$as_me:$LINENO: checking whether to build static libraries" >&5 +echo $ECHO_N "checking whether to build static libraries... $ECHO_C" >&6 +# Make sure either enable_shared or enable_static is yes. +test "$enable_shared" = yes || enable_static=yes +echo "$as_me:$LINENO: result: $enable_static" >&5 +echo "${ECHO_T}$enable_static" >&6 + +# The else clause should only fire when bootstrapping the +# libtool distribution, otherwise you forgot to ship ltmain.sh +# with your package, and you will get complaints that there are +# no rules to generate ltmain.sh. +if test -f "$ltmain"; then + # See if we are running on zsh, and set the options which allow our commands through + # without removal of \ escapes. + if test -n "${ZSH_VERSION+set}" ; then + setopt NO_GLOB_SUBST + fi + # Now quote all the things that may contain metacharacters while being + # careful not to overquote the AC_SUBSTed values. We take copies of the + # variables and quote the copies for generation of the libtool script. + for var in echo old_CC old_CFLAGS AR AR_FLAGS EGREP RANLIB LN_S LTCC LTCFLAGS NM \ + SED SHELL STRIP \ + libname_spec library_names_spec soname_spec extract_expsyms_cmds \ + old_striplib striplib file_magic_cmd finish_cmds finish_eval \ + deplibs_check_method reload_flag reload_cmds need_locks \ + lt_cv_sys_global_symbol_pipe lt_cv_sys_global_symbol_to_cdecl \ + lt_cv_sys_global_symbol_to_c_name_address \ + sys_lib_search_path_spec sys_lib_dlsearch_path_spec \ + old_postinstall_cmds old_postuninstall_cmds \ + compiler \ + CC \ + LD \ + lt_prog_compiler_wl \ + lt_prog_compiler_pic \ + lt_prog_compiler_static \ + lt_prog_compiler_no_builtin_flag \ + export_dynamic_flag_spec \ + thread_safe_flag_spec \ + whole_archive_flag_spec \ + enable_shared_with_static_runtimes \ + old_archive_cmds \ + old_archive_from_new_cmds \ + predep_objects \ + postdep_objects \ + predeps \ + postdeps \ + compiler_lib_search_path \ + archive_cmds \ + archive_expsym_cmds \ + postinstall_cmds \ + postuninstall_cmds \ + old_archive_from_expsyms_cmds \ + allow_undefined_flag \ + no_undefined_flag \ + export_symbols_cmds \ + hardcode_libdir_flag_spec \ + hardcode_libdir_flag_spec_ld \ + hardcode_libdir_separator \ + hardcode_automatic \ + module_cmds \ + module_expsym_cmds \ + lt_cv_prog_compiler_c_o \ + exclude_expsyms \ + include_expsyms; do + + case $var in + old_archive_cmds | \ + old_archive_from_new_cmds | \ + archive_cmds | \ + archive_expsym_cmds | \ + module_cmds | \ + module_expsym_cmds | \ + old_archive_from_expsyms_cmds | \ + export_symbols_cmds | \ + extract_expsyms_cmds | reload_cmds | finish_cmds | \ + postinstall_cmds | postuninstall_cmds | \ + old_postinstall_cmds | old_postuninstall_cmds | \ + sys_lib_search_path_spec | sys_lib_dlsearch_path_spec) + # Double-quote double-evaled strings. + eval "lt_$var=\\\"\`\$echo \"X\$$var\" | \$Xsed -e \"\$double_quote_subst\" -e \"\$sed_quote_subst\" -e \"\$delay_variable_subst\"\`\\\"" + ;; + *) + eval "lt_$var=\\\"\`\$echo \"X\$$var\" | \$Xsed -e \"\$sed_quote_subst\"\`\\\"" + ;; + esac + done + + case $lt_echo in + *'\$0 --fallback-echo"') + lt_echo=`$echo "X$lt_echo" | $Xsed -e 's/\\\\\\\$0 --fallback-echo"$/$0 --fallback-echo"/'` + ;; + esac + +cfgfile="${ofile}T" + trap "$rm \"$cfgfile\"; exit 1" 1 2 15 + $rm -f "$cfgfile" + { echo "$as_me:$LINENO: creating $ofile" >&5 +echo "$as_me: creating $ofile" >&6;} + + cat <<__EOF__ >> "$cfgfile" +#! $SHELL + +# `$echo "$cfgfile" | sed 's%^.*/%%'` - Provide generalized library-building support services. +# Generated automatically by $PROGRAM (GNU $PACKAGE $VERSION$TIMESTAMP) +# NOTE: Changes made to this file will be lost: look at ltmain.sh. +# +# Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001 +# Free Software Foundation, Inc. +# +# This file is part of GNU Libtool: +# Originally by Gordon Matzigkeit , 1996 +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. +# +# As a special exception to the GNU General Public License, if you +# distribute this file as part of a program that contains a +# configuration script generated by Autoconf, you may include it under +# the same distribution terms that you use for the rest of that program. + +# A sed program that does not truncate output. +SED=$lt_SED + +# Sed that helps us avoid accidentally triggering echo(1) options like -n. +Xsed="$SED -e 1s/^X//" + +# The HP-UX ksh and POSIX shell print the target directory to stdout +# if CDPATH is set. +(unset CDPATH) >/dev/null 2>&1 && unset CDPATH + +# The names of the tagged configurations supported by this script. +available_tags= + +# ### BEGIN LIBTOOL CONFIG + +# Libtool was configured on host `(hostname || uname -n) 2>/dev/null | sed 1q`: + +# Shell to use when invoking shell scripts. +SHELL=$lt_SHELL + +# Whether or not to build shared libraries. +build_libtool_libs=$enable_shared + +# Whether or not to build static libraries. +build_old_libs=$enable_static + +# Whether or not to add -lc for building shared libraries. +build_libtool_need_lc=$archive_cmds_need_lc + +# Whether or not to disallow shared libs when runtime libs are static +allow_libtool_libs_with_static_runtimes=$enable_shared_with_static_runtimes + +# Whether or not to optimize for fast installation. +fast_install=$enable_fast_install + +# The host system. +host_alias=$host_alias +host=$host +host_os=$host_os + +# The build system. +build_alias=$build_alias +build=$build +build_os=$build_os + +# An echo program that does not interpret backslashes. +echo=$lt_echo + +# The archiver. +AR=$lt_AR +AR_FLAGS=$lt_AR_FLAGS + +# A C compiler. +LTCC=$lt_LTCC + +# LTCC compiler flags. +LTCFLAGS=$lt_LTCFLAGS + +# A language-specific compiler. +CC=$lt_compiler + +# Is the compiler the GNU C compiler? +with_gcc=$GCC + +gcc_dir=\`gcc -print-file-name=. | $SED 's,/\.$,,'\` +gcc_ver=\`gcc -dumpversion\` + +# An ERE matcher. +EGREP=$lt_EGREP + +# The linker used to build libraries. +LD=$lt_LD + +# Whether we need hard or soft links. +LN_S=$lt_LN_S + +# A BSD-compatible nm program. +NM=$lt_NM + +# A symbol stripping program +STRIP=$lt_STRIP + +# Used to examine libraries when file_magic_cmd begins "file" +MAGIC_CMD=$MAGIC_CMD + +# Used on cygwin: DLL creation program. +DLLTOOL="$DLLTOOL" + +# Used on cygwin: object dumper. +OBJDUMP="$OBJDUMP" + +# Used on cygwin: assembler. +AS="$AS" + +# The name of the directory that contains temporary libtool files. +objdir=$objdir + +# How to create reloadable object files. +reload_flag=$lt_reload_flag +reload_cmds=$lt_reload_cmds + +# How to pass a linker flag through the compiler. +wl=$lt_lt_prog_compiler_wl + +# Object file suffix (normally "o"). +objext="$ac_objext" + +# Old archive suffix (normally "a"). +libext="$libext" + +# Shared library suffix (normally ".so"). +shrext_cmds='$shrext_cmds' + +# Executable file suffix (normally ""). +exeext="$exeext" + +# Additional compiler flags for building library objects. +pic_flag=$lt_lt_prog_compiler_pic +pic_mode=$pic_mode + +# What is the maximum length of a command? +max_cmd_len=$lt_cv_sys_max_cmd_len + +# Does compiler simultaneously support -c and -o options? +compiler_c_o=$lt_lt_cv_prog_compiler_c_o + +# Must we lock files when doing compilation? +need_locks=$lt_need_locks + +# Do we need the lib prefix for modules? +need_lib_prefix=$need_lib_prefix + +# Do we need a version for libraries? +need_version=$need_version + +# Whether dlopen is supported. +dlopen_support=$enable_dlopen + +# Whether dlopen of programs is supported. +dlopen_self=$enable_dlopen_self + +# Whether dlopen of statically linked programs is supported. +dlopen_self_static=$enable_dlopen_self_static + +# Compiler flag to prevent dynamic linking. +link_static_flag=$lt_lt_prog_compiler_static + +# Compiler flag to turn off builtin functions. +no_builtin_flag=$lt_lt_prog_compiler_no_builtin_flag + +# Compiler flag to allow reflexive dlopens. +export_dynamic_flag_spec=$lt_export_dynamic_flag_spec + +# Compiler flag to generate shared objects directly from archives. +whole_archive_flag_spec=$lt_whole_archive_flag_spec + +# Compiler flag to generate thread-safe objects. +thread_safe_flag_spec=$lt_thread_safe_flag_spec + +# Library versioning type. +version_type=$version_type + +# Format of library name prefix. +libname_spec=$lt_libname_spec + +# List of archive names. First name is the real one, the rest are links. +# The last name is the one that the linker finds with -lNAME. +library_names_spec=$lt_library_names_spec + +# The coded name of the library, if different from the real name. +soname_spec=$lt_soname_spec + +# Commands used to build and install an old-style archive. +RANLIB=$lt_RANLIB +old_archive_cmds=$lt_old_archive_cmds +old_postinstall_cmds=$lt_old_postinstall_cmds +old_postuninstall_cmds=$lt_old_postuninstall_cmds + +# Create an old-style archive from a shared archive. +old_archive_from_new_cmds=$lt_old_archive_from_new_cmds + +# Create a temporary old-style archive to link instead of a shared archive. +old_archive_from_expsyms_cmds=$lt_old_archive_from_expsyms_cmds + +# Commands used to build and install a shared archive. +archive_cmds=$lt_archive_cmds +archive_expsym_cmds=$lt_archive_expsym_cmds +postinstall_cmds=$lt_postinstall_cmds +postuninstall_cmds=$lt_postuninstall_cmds + +# Commands used to build a loadable module (assumed same as above if empty) +module_cmds=$lt_module_cmds +module_expsym_cmds=$lt_module_expsym_cmds + +# Commands to strip libraries. +old_striplib=$lt_old_striplib +striplib=$lt_striplib + +# Dependencies to place before the objects being linked to create a +# shared library. +predep_objects=\`echo $lt_predep_objects | \$SED -e "s@\${gcc_dir}@\\\${gcc_dir}@g;s@\${gcc_ver}@\\\${gcc_ver}@g"\` + +# Dependencies to place after the objects being linked to create a +# shared library. +postdep_objects=\`echo $lt_postdep_objects | \$SED -e "s@\${gcc_dir}@\\\${gcc_dir}@g;s@\${gcc_ver}@\\\${gcc_ver}@g"\` + +# Dependencies to place before the objects being linked to create a +# shared library. +predeps=$lt_predeps + +# Dependencies to place after the objects being linked to create a +# shared library. +postdeps=$lt_postdeps + +# The library search path used internally by the compiler when linking +# a shared library. +compiler_lib_search_path=\`echo $lt_compiler_lib_search_path | \$SED -e "s@\${gcc_dir}@\\\${gcc_dir}@g;s@\${gcc_ver}@\\\${gcc_ver}@g"\` + +# Method to check whether dependent libraries are shared objects. +deplibs_check_method=$lt_deplibs_check_method + +# Command to use when deplibs_check_method == file_magic. +file_magic_cmd=$lt_file_magic_cmd + +# Flag that allows shared libraries with undefined symbols to be built. +allow_undefined_flag=$lt_allow_undefined_flag + +# Flag that forces no undefined symbols. +no_undefined_flag=$lt_no_undefined_flag + +# Commands used to finish a libtool library installation in a directory. +finish_cmds=$lt_finish_cmds + +# Same as above, but a single script fragment to be evaled but not shown. +finish_eval=$lt_finish_eval + +# Take the output of nm and produce a listing of raw symbols and C names. +global_symbol_pipe=$lt_lt_cv_sys_global_symbol_pipe + +# Transform the output of nm in a proper C declaration +global_symbol_to_cdecl=$lt_lt_cv_sys_global_symbol_to_cdecl + +# Transform the output of nm in a C name address pair +global_symbol_to_c_name_address=$lt_lt_cv_sys_global_symbol_to_c_name_address + +# This is the shared library runtime path variable. +runpath_var=$runpath_var + +# This is the shared library path variable. +shlibpath_var=$shlibpath_var + +# Is shlibpath searched before the hard-coded library search path? +shlibpath_overrides_runpath=$shlibpath_overrides_runpath + +# How to hardcode a shared library path into an executable. +hardcode_action=$hardcode_action + +# Whether we should hardcode library paths into libraries. +hardcode_into_libs=$hardcode_into_libs + +# Flag to hardcode \$libdir into a binary during linking. +# This must work even if \$libdir does not exist. +hardcode_libdir_flag_spec=$lt_hardcode_libdir_flag_spec + +# If ld is used when linking, flag to hardcode \$libdir into +# a binary during linking. This must work even if \$libdir does +# not exist. +hardcode_libdir_flag_spec_ld=$lt_hardcode_libdir_flag_spec_ld + +# Whether we need a single -rpath flag with a separated argument. +hardcode_libdir_separator=$lt_hardcode_libdir_separator + +# Set to yes if using DIR/libNAME${shared_ext} during linking hardcodes DIR into the +# resulting binary. +hardcode_direct=$hardcode_direct + +# Set to yes if using the -LDIR flag during linking hardcodes DIR into the +# resulting binary. +hardcode_minus_L=$hardcode_minus_L + +# Set to yes if using SHLIBPATH_VAR=DIR during linking hardcodes DIR into +# the resulting binary. +hardcode_shlibpath_var=$hardcode_shlibpath_var + +# Set to yes if building a shared library automatically hardcodes DIR into the library +# and all subsequent libraries and executables linked against it. +hardcode_automatic=$hardcode_automatic + +# Variables whose values should be saved in libtool wrapper scripts and +# restored at relink time. +variables_saved_for_relink="$variables_saved_for_relink" + +# Whether libtool must link a program against all its dependency libraries. +link_all_deplibs=$link_all_deplibs + +# Compile-time system search path for libraries +sys_lib_search_path_spec=\`echo $lt_sys_lib_search_path_spec | \$SED -e "s@\${gcc_dir}@\\\${gcc_dir}@g;s@\${gcc_ver}@\\\${gcc_ver}@g"\` + +# Run-time system search path for libraries +sys_lib_dlsearch_path_spec=$lt_sys_lib_dlsearch_path_spec + +# Fix the shell variable \$srcfile for the compiler. +fix_srcfile_path="$fix_srcfile_path" + +# Set to yes if exported symbols are required. +always_export_symbols=$always_export_symbols + +# The commands to list exported symbols. +export_symbols_cmds=$lt_export_symbols_cmds + +# The commands to extract the exported symbol list from a shared archive. +extract_expsyms_cmds=$lt_extract_expsyms_cmds + +# Symbols that should not be listed in the preloaded symbols. +exclude_expsyms=$lt_exclude_expsyms + +# Symbols that must always be exported. +include_expsyms=$lt_include_expsyms + +# ### END LIBTOOL CONFIG + +__EOF__ + + + case $host_os in + aix3*) + cat <<\EOF >> "$cfgfile" + +# AIX sometimes has problems with the GCC collect2 program. For some +# reason, if we set the COLLECT_NAMES environment variable, the problems +# vanish in a puff of smoke. +if test "X${COLLECT_NAMES+set}" != Xset; then + COLLECT_NAMES= + export COLLECT_NAMES +fi +EOF + ;; + esac + + # We use sed instead of cat because bash on DJGPP gets confused if + # if finds mixed CR/LF and LF-only lines. Since sed operates in + # text mode, it properly converts lines to CR/LF. This bash problem + # is reportedly fixed, but why not run on old versions too? + sed '$q' "$ltmain" >> "$cfgfile" || (rm -f "$cfgfile"; exit 1) + + mv -f "$cfgfile" "$ofile" || \ + (rm -f "$ofile" && cp "$cfgfile" "$ofile" && rm -f "$cfgfile") + chmod +x "$ofile" + +else + # If there is no Makefile yet, we rely on a make rule to execute + # `config.status --recheck' to rerun these tests and create the + # libtool script then. + ltmain_in=`echo $ltmain | sed -e 's/\.sh$/.in/'` + if test -f "$ltmain_in"; then + test -f Makefile && make "$ltmain" + fi +fi + + +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu + +CC="$lt_save_CC" + + +# Check whether --with-tags or --without-tags was given. +if test "${with_tags+set}" = set; then + withval="$with_tags" + tagnames="$withval" +fi; + +if test -f "$ltmain" && test -n "$tagnames"; then + if test ! -f "${ofile}"; then + { echo "$as_me:$LINENO: WARNING: output file \`$ofile' does not exist" >&5 +echo "$as_me: WARNING: output file \`$ofile' does not exist" >&2;} + fi + + if test -z "$LTCC"; then + eval "`$SHELL ${ofile} --config | grep '^LTCC='`" + if test -z "$LTCC"; then + { echo "$as_me:$LINENO: WARNING: output file \`$ofile' does not look like a libtool script" >&5 +echo "$as_me: WARNING: output file \`$ofile' does not look like a libtool script" >&2;} + else + { echo "$as_me:$LINENO: WARNING: using \`LTCC=$LTCC', extracted from \`$ofile'" >&5 +echo "$as_me: WARNING: using \`LTCC=$LTCC', extracted from \`$ofile'" >&2;} + fi + fi + if test -z "$LTCFLAGS"; then + eval "`$SHELL ${ofile} --config | grep '^LTCFLAGS='`" + fi + + # Extract list of available tagged configurations in $ofile. + # Note that this assumes the entire list is on one line. + available_tags=`grep "^available_tags=" "${ofile}" | $SED -e 's/available_tags=\(.*$\)/\1/' -e 's/\"//g'` + + lt_save_ifs="$IFS"; IFS="${IFS}$PATH_SEPARATOR," + for tagname in $tagnames; do + IFS="$lt_save_ifs" + # Check whether tagname contains only valid characters + case `$echo "X$tagname" | $Xsed -e 's:[-_ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz1234567890,/]::g'` in + "") ;; + *) { { echo "$as_me:$LINENO: error: invalid tag name: $tagname" >&5 +echo "$as_me: error: invalid tag name: $tagname" >&2;} + { (exit 1); exit 1; }; } + ;; + esac + + if grep "^# ### BEGIN LIBTOOL TAG CONFIG: $tagname$" < "${ofile}" > /dev/null + then + { { echo "$as_me:$LINENO: error: tag name \"$tagname\" already exists" >&5 +echo "$as_me: error: tag name \"$tagname\" already exists" >&2;} + { (exit 1); exit 1; }; } + fi + + # Update the list of available tags. + if test -n "$tagname"; then + echo appending configuration tag \"$tagname\" to $ofile + + case $tagname in + CXX) + if test -n "$CXX" && ( test "X$CXX" != "Xno" && + ( (test "X$CXX" = "Xg++" && `g++ -v >/dev/null 2>&1` ) || + (test "X$CXX" != "Xg++"))) ; then + ac_ext=cc +ac_cpp='$CXXCPP $CPPFLAGS' +ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_cxx_compiler_gnu + + + + +archive_cmds_need_lc_CXX=no +allow_undefined_flag_CXX= +always_export_symbols_CXX=no +archive_expsym_cmds_CXX= +export_dynamic_flag_spec_CXX= +hardcode_direct_CXX=no +hardcode_libdir_flag_spec_CXX= +hardcode_libdir_flag_spec_ld_CXX= +hardcode_libdir_separator_CXX= +hardcode_minus_L_CXX=no +hardcode_shlibpath_var_CXX=unsupported +hardcode_automatic_CXX=no +module_cmds_CXX= +module_expsym_cmds_CXX= +link_all_deplibs_CXX=unknown +old_archive_cmds_CXX=$old_archive_cmds +no_undefined_flag_CXX= +whole_archive_flag_spec_CXX= +enable_shared_with_static_runtimes_CXX=no + +# Dependencies to place before and after the object being linked: +predep_objects_CXX= +postdep_objects_CXX= +predeps_CXX= +postdeps_CXX= +compiler_lib_search_path_CXX= + +# Source file extension for C++ test sources. +ac_ext=cpp + +# Object file extension for compiled C++ test sources. +objext=o +objext_CXX=$objext + +# Code to be used in simple compile tests +lt_simple_compile_test_code="int some_variable = 0;\n" + +# Code to be used in simple link tests +lt_simple_link_test_code='int main(int, char *[]) { return(0); }\n' + +# ltmain only uses $CC for tagged configurations so make sure $CC is set. + +# If no C compiler was specified, use CC. +LTCC=${LTCC-"$CC"} + +# If no C compiler flags were specified, use CFLAGS. +LTCFLAGS=${LTCFLAGS-"$CFLAGS"} + +# Allow CC to be a program name with arguments. +compiler=$CC + + +# save warnings/boilerplate of simple test code +ac_outfile=conftest.$ac_objext +printf "$lt_simple_compile_test_code" >conftest.$ac_ext +eval "$ac_compile" 2>&1 >/dev/null | $SED '/^$/d; /^ *+/d' >conftest.err +_lt_compiler_boilerplate=`cat conftest.err` +$rm conftest* + +ac_outfile=conftest.$ac_objext +printf "$lt_simple_link_test_code" >conftest.$ac_ext +eval "$ac_link" 2>&1 >/dev/null | $SED '/^$/d; /^ *+/d' >conftest.err +_lt_linker_boilerplate=`cat conftest.err` +$rm conftest* + + +# Allow CC to be a program name with arguments. +lt_save_CC=$CC +lt_save_LD=$LD +lt_save_GCC=$GCC +GCC=$GXX +lt_save_with_gnu_ld=$with_gnu_ld +lt_save_path_LD=$lt_cv_path_LD +if test -n "${lt_cv_prog_gnu_ldcxx+set}"; then + lt_cv_prog_gnu_ld=$lt_cv_prog_gnu_ldcxx +else + $as_unset lt_cv_prog_gnu_ld +fi +if test -n "${lt_cv_path_LDCXX+set}"; then + lt_cv_path_LD=$lt_cv_path_LDCXX +else + $as_unset lt_cv_path_LD +fi +test -z "${LDCXX+set}" || LD=$LDCXX +CC=${CXX-"c++"} +compiler=$CC +compiler_CXX=$CC +for cc_temp in $compiler""; do + case $cc_temp in + compile | *[\\/]compile | ccache | *[\\/]ccache ) ;; + distcc | *[\\/]distcc | purify | *[\\/]purify ) ;; + \-*) ;; + *) break;; + esac +done +cc_basename=`$echo "X$cc_temp" | $Xsed -e 's%.*/%%' -e "s%^$host_alias-%%"` + + +# We don't want -fno-exception wen compiling C++ code, so set the +# no_builtin_flag separately +if test "$GXX" = yes; then + lt_prog_compiler_no_builtin_flag_CXX=' -fno-builtin' +else + lt_prog_compiler_no_builtin_flag_CXX= +fi + +if test "$GXX" = yes; then + # Set up default GNU C++ configuration + + +# Check whether --with-gnu-ld or --without-gnu-ld was given. +if test "${with_gnu_ld+set}" = set; then + withval="$with_gnu_ld" + test "$withval" = no || with_gnu_ld=yes +else + with_gnu_ld=no +fi; +ac_prog=ld +if test "$GCC" = yes; then + # Check if gcc -print-prog-name=ld gives a path. + echo "$as_me:$LINENO: checking for ld used by $CC" >&5 +echo $ECHO_N "checking for ld used by $CC... $ECHO_C" >&6 + case $host in + *-*-mingw*) + # gcc leaves a trailing carriage return which upsets mingw + ac_prog=`($CC -print-prog-name=ld) 2>&5 | tr -d '\015'` ;; + *) + ac_prog=`($CC -print-prog-name=ld) 2>&5` ;; + esac + case $ac_prog in + # Accept absolute paths. + [\\/]* | ?:[\\/]*) + re_direlt='/[^/][^/]*/\.\./' + # Canonicalize the pathname of ld + ac_prog=`echo $ac_prog| $SED 's%\\\\%/%g'` + while echo $ac_prog | grep "$re_direlt" > /dev/null 2>&1; do + ac_prog=`echo $ac_prog| $SED "s%$re_direlt%/%"` + done + test -z "$LD" && LD="$ac_prog" + ;; + "") + # If it fails, then pretend we aren't using GCC. + ac_prog=ld + ;; + *) + # If it is relative, then search for the first ld in PATH. + with_gnu_ld=unknown + ;; + esac +elif test "$with_gnu_ld" = yes; then + echo "$as_me:$LINENO: checking for GNU ld" >&5 +echo $ECHO_N "checking for GNU ld... $ECHO_C" >&6 +else + echo "$as_me:$LINENO: checking for non-GNU ld" >&5 +echo $ECHO_N "checking for non-GNU ld... $ECHO_C" >&6 +fi +if test "${lt_cv_path_LD+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + if test -z "$LD"; then + lt_save_ifs="$IFS"; IFS=$PATH_SEPARATOR + for ac_dir in $PATH; do + IFS="$lt_save_ifs" + test -z "$ac_dir" && ac_dir=. + if test -f "$ac_dir/$ac_prog" || test -f "$ac_dir/$ac_prog$ac_exeext"; then + lt_cv_path_LD="$ac_dir/$ac_prog" + # Check to see if the program is GNU ld. I'd rather use --version, + # but apparently some variants of GNU ld only accept -v. + # Break only if it was the GNU/non-GNU ld that we prefer. + case `"$lt_cv_path_LD" -v 2>&1 &5 +echo "${ECHO_T}$LD" >&6 +else + echo "$as_me:$LINENO: result: no" >&5 +echo "${ECHO_T}no" >&6 +fi +test -z "$LD" && { { echo "$as_me:$LINENO: error: no acceptable ld found in \$PATH" >&5 +echo "$as_me: error: no acceptable ld found in \$PATH" >&2;} + { (exit 1); exit 1; }; } +echo "$as_me:$LINENO: checking if the linker ($LD) is GNU ld" >&5 +echo $ECHO_N "checking if the linker ($LD) is GNU ld... $ECHO_C" >&6 +if test "${lt_cv_prog_gnu_ld+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + # I'd rather use --version here, but apparently some GNU lds only accept -v. +case `$LD -v 2>&1 &5 +echo "${ECHO_T}$lt_cv_prog_gnu_ld" >&6 +with_gnu_ld=$lt_cv_prog_gnu_ld + + + + # Check if GNU C++ uses GNU ld as the underlying linker, since the + # archiving commands below assume that GNU ld is being used. + if test "$with_gnu_ld" = yes; then + archive_cmds_CXX='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname -o $lib' + archive_expsym_cmds_CXX='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' + + hardcode_libdir_flag_spec_CXX='${wl}--rpath ${wl}$libdir' + export_dynamic_flag_spec_CXX='${wl}--export-dynamic' + + # If archive_cmds runs LD, not CC, wlarc should be empty + # XXX I think wlarc can be eliminated in ltcf-cxx, but I need to + # investigate it a little bit more. (MM) + wlarc='${wl}' + + # ancient GNU ld didn't support --whole-archive et. al. + if eval "`$CC -print-prog-name=ld` --help 2>&1" | \ + grep 'no-whole-archive' > /dev/null; then + whole_archive_flag_spec_CXX="$wlarc"'--whole-archive$convenience '"$wlarc"'--no-whole-archive' + else + whole_archive_flag_spec_CXX= + fi + else + with_gnu_ld=no + wlarc= + + # A generic and very simple default shared library creation + # command for GNU C++ for the case where it uses the native + # linker, instead of GNU ld. If possible, this setting should + # overridden to take advantage of the native linker features on + # the platform it is being used on. + archive_cmds_CXX='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $lib' + fi + + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. + output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | grep "\-L"' + +else + GXX=no + with_gnu_ld=no + wlarc= +fi + +# PORTME: fill in a description of your system's C++ link characteristics +echo "$as_me:$LINENO: checking whether the $compiler linker ($LD) supports shared libraries" >&5 +echo $ECHO_N "checking whether the $compiler linker ($LD) supports shared libraries... $ECHO_C" >&6 +ld_shlibs_CXX=yes +case $host_os in + aix3*) + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + ;; + aix4* | aix5*) + if test "$host_cpu" = ia64; then + # On IA64, the linker does run time linking by default, so we don't + # have to do anything special. + aix_use_runtimelinking=no + exp_sym_flag='-Bexport' + no_entry_flag="" + else + aix_use_runtimelinking=no + + # Test if we are trying to use run time linking or normal + # AIX style linking. If -brtl is somewhere in LDFLAGS, we + # need to do runtime linking. + case $host_os in aix4.[23]|aix4.[23].*|aix5*) + for ld_flag in $LDFLAGS; do + case $ld_flag in + *-brtl*) + aix_use_runtimelinking=yes + break + ;; + esac + done + ;; + esac + + exp_sym_flag='-bexport' + no_entry_flag='-bnoentry' + fi + + # When large executables or shared objects are built, AIX ld can + # have problems creating the table of contents. If linking a library + # or program results in "error TOC overflow" add -mminimal-toc to + # CXXFLAGS/CFLAGS for g++/gcc. In the cases where that is not + # enough to fix the problem, add -Wl,-bbigtoc to LDFLAGS. + + archive_cmds_CXX='' + hardcode_direct_CXX=yes + hardcode_libdir_separator_CXX=':' + link_all_deplibs_CXX=yes + + if test "$GXX" = yes; then + case $host_os in aix4.[012]|aix4.[012].*) + # We only want to do this on AIX 4.2 and lower, the check + # below for broken collect2 doesn't work under 4.3+ + collect2name=`${CC} -print-prog-name=collect2` + if test -f "$collect2name" && \ + strings "$collect2name" | grep resolve_lib_name >/dev/null + then + # We have reworked collect2 + hardcode_direct_CXX=yes + else + # We have old collect2 + hardcode_direct_CXX=unsupported + # It fails to find uninstalled libraries when the uninstalled + # path is not listed in the libpath. Setting hardcode_minus_L + # to unsupported forces relinking + hardcode_minus_L_CXX=yes + hardcode_libdir_flag_spec_CXX='-L$libdir' + hardcode_libdir_separator_CXX= + fi + ;; + esac + shared_flag='-shared' + if test "$aix_use_runtimelinking" = yes; then + shared_flag="$shared_flag "'${wl}-G' + fi + else + # not using gcc + if test "$host_cpu" = ia64; then + # VisualAge C++, Version 5.5 for AIX 5L for IA-64, Beta 3 Release + # chokes on -Wl,-G. The following line is correct: + shared_flag='-G' + else + if test "$aix_use_runtimelinking" = yes; then + shared_flag='${wl}-G' + else + shared_flag='${wl}-bM:SRE' + fi + fi + fi + + # It seems that -bexpall does not export symbols beginning with + # underscore (_), so it is better to generate a list of symbols to export. + always_export_symbols_CXX=yes + if test "$aix_use_runtimelinking" = yes; then + # Warning - without using the other runtime loading flags (-brtl), + # -berok will link without error, but may produce a broken library. + allow_undefined_flag_CXX='-berok' + # Determine the default libpath from the value encoded in an empty executable. + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext conftest$ac_exeext +if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 + (eval $ac_link) 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && + { ac_try='test -z "$ac_cxx_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest$ac_exeext' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; }; then + +aix_libpath=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e '/Import File Strings/,/^$/ { /^0/ { s/^0 *\(.*\)$/\1/; p; } +}'` +# Check for a 64-bit object if we didn't find anything. +if test -z "$aix_libpath"; then aix_libpath=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e '/Import File Strings/,/^$/ { /^0/ { s/^0 *\(.*\)$/\1/; p; } +}'`; fi +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +fi +rm -f conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi + + hardcode_libdir_flag_spec_CXX='${wl}-blibpath:$libdir:'"$aix_libpath" + + archive_expsym_cmds_CXX="\$CC"' -o $output_objdir/$soname $libobjs $deplibs '"\${wl}$no_entry_flag"' $compiler_flags `if test "x${allow_undefined_flag}" != "x"; then echo "${wl}${allow_undefined_flag}"; else :; fi` '"\${wl}$exp_sym_flag:\$export_symbols $shared_flag" + else + if test "$host_cpu" = ia64; then + hardcode_libdir_flag_spec_CXX='${wl}-R $libdir:/usr/lib:/lib' + allow_undefined_flag_CXX="-z nodefs" + archive_expsym_cmds_CXX="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs '"\${wl}$no_entry_flag"' $compiler_flags ${wl}${allow_undefined_flag} '"\${wl}$exp_sym_flag:\$export_symbols" + else + # Determine the default libpath from the value encoded in an empty executable. + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext conftest$ac_exeext +if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 + (eval $ac_link) 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && + { ac_try='test -z "$ac_cxx_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest$ac_exeext' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; }; then + +aix_libpath=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e '/Import File Strings/,/^$/ { /^0/ { s/^0 *\(.*\)$/\1/; p; } +}'` +# Check for a 64-bit object if we didn't find anything. +if test -z "$aix_libpath"; then aix_libpath=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e '/Import File Strings/,/^$/ { /^0/ { s/^0 *\(.*\)$/\1/; p; } +}'`; fi +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +fi +rm -f conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi + + hardcode_libdir_flag_spec_CXX='${wl}-blibpath:$libdir:'"$aix_libpath" + # Warning - without using the other run time loading flags, + # -berok will link without error, but may produce a broken library. + no_undefined_flag_CXX=' ${wl}-bernotok' + allow_undefined_flag_CXX=' ${wl}-berok' + # Exported symbols can be pulled into shared objects from archives + whole_archive_flag_spec_CXX='$convenience' + archive_cmds_need_lc_CXX=yes + # This is similar to how AIX traditionally builds its shared libraries. + archive_expsym_cmds_CXX="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs ${wl}-bnoentry $compiler_flags ${wl}-bE:$export_symbols${allow_undefined_flag}~$AR $AR_FLAGS $output_objdir/$libname$release.a $output_objdir/$soname' + fi + fi + ;; + + beos*) + if $LD --help 2>&1 | grep ': supported targets:.* elf' > /dev/null; then + allow_undefined_flag_CXX=unsupported + # Joseph Beckenbach says some releases of gcc + # support --undefined. This deserves some investigation. FIXME + archive_cmds_CXX='$CC -nostart $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + else + ld_shlibs_CXX=no + fi + ;; + + chorus*) + case $cc_basename in + *) + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + ;; + esac + ;; + + cygwin* | mingw* | pw32*) + # _LT_AC_TAGVAR(hardcode_libdir_flag_spec, CXX) is actually meaningless, + # as there is no search path for DLLs. + hardcode_libdir_flag_spec_CXX='-L$libdir' + allow_undefined_flag_CXX=unsupported + always_export_symbols_CXX=no + enable_shared_with_static_runtimes_CXX=yes + + if $LD --help 2>&1 | grep 'auto-import' > /dev/null; then + archive_cmds_CXX='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $output_objdir/$soname ${wl}--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' + # If the export-symbols file already is a .def file (1st line + # is EXPORTS), use it as is; otherwise, prepend... + archive_expsym_cmds_CXX='if test "x`$SED 1q $export_symbols`" = xEXPORTS; then + cp $export_symbols $output_objdir/$soname.def; + else + echo EXPORTS > $output_objdir/$soname.def; + cat $export_symbols >> $output_objdir/$soname.def; + fi~ + $CC -shared -nostdlib $output_objdir/$soname.def $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $output_objdir/$soname ${wl}--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' + else + ld_shlibs_CXX=no + fi + ;; + darwin* | rhapsody*) + case $host_os in + rhapsody* | darwin1.[012]) + allow_undefined_flag_CXX='${wl}-undefined ${wl}suppress' + ;; + *) # Darwin 1.3 on + if test -z ${MACOSX_DEPLOYMENT_TARGET} ; then + allow_undefined_flag_CXX='${wl}-flat_namespace ${wl}-undefined ${wl}suppress' + else + case ${MACOSX_DEPLOYMENT_TARGET} in + 10.[012]) + allow_undefined_flag_CXX='${wl}-flat_namespace ${wl}-undefined ${wl}suppress' + ;; + 10.*) + allow_undefined_flag_CXX='${wl}-undefined ${wl}dynamic_lookup' + ;; + esac + fi + ;; + esac + archive_cmds_need_lc_CXX=no + hardcode_direct_CXX=no + hardcode_automatic_CXX=yes + hardcode_shlibpath_var_CXX=unsupported + whole_archive_flag_spec_CXX='' + link_all_deplibs_CXX=yes + + if test "$GXX" = yes ; then + lt_int_apple_cc_single_mod=no + output_verbose_link_cmd='echo' + if $CC -dumpspecs 2>&1 | $EGREP 'single_module' >/dev/null ; then + lt_int_apple_cc_single_mod=yes + fi + if test "X$lt_int_apple_cc_single_mod" = Xyes ; then + archive_cmds_CXX='$CC -dynamiclib -single_module $allow_undefined_flag -o $lib $libobjs $deplibs $compiler_flags -install_name $rpath/$soname $verstring' + else + archive_cmds_CXX='$CC -r -keep_private_externs -nostdlib -o ${lib}-master.o $libobjs~$CC -dynamiclib $allow_undefined_flag -o $lib ${lib}-master.o $deplibs $compiler_flags -install_name $rpath/$soname $verstring' + fi + module_cmds_CXX='$CC $allow_undefined_flag -o $lib -bundle $libobjs $deplibs$compiler_flags' + # Don't fix this by using the ld -exported_symbols_list flag, it doesn't exist in older darwin lds + if test "X$lt_int_apple_cc_single_mod" = Xyes ; then + archive_expsym_cmds_CXX='sed -e "s,#.*,," -e "s,^[ ]*,," -e "s,^\(..*\),_&," < $export_symbols > $output_objdir/${libname}-symbols.expsym~$CC -dynamiclib -single_module $allow_undefined_flag -o $lib $libobjs $deplibs $compiler_flags -install_name $rpath/$soname $verstring~nmedit -s $output_objdir/${libname}-symbols.expsym ${lib}' + else + archive_expsym_cmds_CXX='sed -e "s,#.*,," -e "s,^[ ]*,," -e "s,^\(..*\),_&," < $export_symbols > $output_objdir/${libname}-symbols.expsym~$CC -r -keep_private_externs -nostdlib -o ${lib}-master.o $libobjs~$CC -dynamiclib $allow_undefined_flag -o $lib ${lib}-master.o $deplibs $compiler_flags -install_name $rpath/$soname $verstring~nmedit -s $output_objdir/${libname}-symbols.expsym ${lib}' + fi + module_expsym_cmds_CXX='sed -e "s,#.*,," -e "s,^[ ]*,," -e "s,^\(..*\),_&," < $export_symbols > $output_objdir/${libname}-symbols.expsym~$CC $allow_undefined_flag -o $lib -bundle $libobjs $deplibs$compiler_flags~nmedit -s $output_objdir/${libname}-symbols.expsym ${lib}' + else + case $cc_basename in + xlc*) + output_verbose_link_cmd='echo' + archive_cmds_CXX='$CC -qmkshrobj ${wl}-single_module $allow_undefined_flag -o $lib $libobjs $deplibs $compiler_flags ${wl}-install_name ${wl}`echo $rpath/$soname` $verstring' + module_cmds_CXX='$CC $allow_undefined_flag -o $lib -bundle $libobjs $deplibs$compiler_flags' + # Don't fix this by using the ld -exported_symbols_list flag, it doesn't exist in older darwin lds + archive_expsym_cmds_CXX='sed -e "s,#.*,," -e "s,^[ ]*,," -e "s,^\(..*\),_&," < $export_symbols > $output_objdir/${libname}-symbols.expsym~$CC -qmkshrobj ${wl}-single_module $allow_undefined_flag -o $lib $libobjs $deplibs $compiler_flags ${wl}-install_name ${wl}$rpath/$soname $verstring~nmedit -s $output_objdir/${libname}-symbols.expsym ${lib}' + module_expsym_cmds_CXX='sed -e "s,#.*,," -e "s,^[ ]*,," -e "s,^\(..*\),_&," < $export_symbols > $output_objdir/${libname}-symbols.expsym~$CC $allow_undefined_flag -o $lib -bundle $libobjs $deplibs$compiler_flags~nmedit -s $output_objdir/${libname}-symbols.expsym ${lib}' + ;; + *) + ld_shlibs_CXX=no + ;; + esac + fi + ;; + + dgux*) + case $cc_basename in + ec++*) + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + ;; + ghcx*) + # Green Hills C++ Compiler + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + ;; + *) + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + ;; + esac + ;; + freebsd[12]*) + # C++ shared libraries reported to be fairly broken before switch to ELF + ld_shlibs_CXX=no + ;; + freebsd-elf*) + archive_cmds_need_lc_CXX=no + ;; + freebsd* | kfreebsd*-gnu | dragonfly*) + # FreeBSD 3 and later use GNU C++ and GNU ld with standard ELF + # conventions + ld_shlibs_CXX=yes + ;; + gnu*) + ;; + hpux9*) + hardcode_libdir_flag_spec_CXX='${wl}+b ${wl}$libdir' + hardcode_libdir_separator_CXX=: + export_dynamic_flag_spec_CXX='${wl}-E' + hardcode_direct_CXX=yes + hardcode_minus_L_CXX=yes # Not in the search PATH, + # but as the default + # location of the library. + + case $cc_basename in + CC*) + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + ;; + aCC*) + archive_cmds_CXX='$rm $output_objdir/$soname~$CC -b ${wl}+b ${wl}$install_libdir -o $output_objdir/$soname $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. + # + # There doesn't appear to be a way to prevent this compiler from + # explicitly linking system object files so we need to strip them + # from the output so that they don't get included in the library + # dependencies. + output_verbose_link_cmd='templist=`($CC -b $CFLAGS -v conftest.$objext 2>&1) | grep "[-]L"`; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; echo $list' + ;; + *) + if test "$GXX" = yes; then + archive_cmds_CXX='$rm $output_objdir/$soname~$CC -shared -nostdlib -fPIC ${wl}+b ${wl}$install_libdir -o $output_objdir/$soname $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' + else + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + fi + ;; + esac + ;; + hpux10*|hpux11*) + if test $with_gnu_ld = no; then + hardcode_libdir_flag_spec_CXX='${wl}+b ${wl}$libdir' + hardcode_libdir_separator_CXX=: + + case $host_cpu in + hppa*64*|ia64*) + hardcode_libdir_flag_spec_ld_CXX='+b $libdir' + ;; + *) + export_dynamic_flag_spec_CXX='${wl}-E' + ;; + esac + fi + case $host_cpu in + hppa*64*|ia64*) + hardcode_direct_CXX=no + hardcode_shlibpath_var_CXX=no + ;; + *) + hardcode_direct_CXX=yes + hardcode_minus_L_CXX=yes # Not in the search PATH, + # but as the default + # location of the library. + ;; + esac + + case $cc_basename in + CC*) + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + ;; + aCC*) + case $host_cpu in + hppa*64*) + archive_cmds_CXX='$CC -b ${wl}+h ${wl}$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' + ;; + ia64*) + archive_cmds_CXX='$CC -b ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' + ;; + *) + archive_cmds_CXX='$CC -b ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' + ;; + esac + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. + # + # There doesn't appear to be a way to prevent this compiler from + # explicitly linking system object files so we need to strip them + # from the output so that they don't get included in the library + # dependencies. + output_verbose_link_cmd='templist=`($CC -b $CFLAGS -v conftest.$objext 2>&1) | grep "\-L"`; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; echo $list' + ;; + *) + if test "$GXX" = yes; then + if test $with_gnu_ld = no; then + case $host_cpu in + hppa*64*) + archive_cmds_CXX='$CC -shared -nostdlib -fPIC ${wl}+h ${wl}$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' + ;; + ia64*) + archive_cmds_CXX='$CC -shared -nostdlib -fPIC ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' + ;; + *) + archive_cmds_CXX='$CC -shared -nostdlib -fPIC ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' + ;; + esac + fi + else + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + fi + ;; + esac + ;; + interix3*) + hardcode_direct_CXX=no + hardcode_shlibpath_var_CXX=no + hardcode_libdir_flag_spec_CXX='${wl}-rpath,$libdir' + export_dynamic_flag_spec_CXX='${wl}-E' + # Hack: On Interix 3.x, we cannot compile PIC because of a broken gcc. + # Instead, shared libraries are loaded at an image base (0x10000000 by + # default) and relocated if they conflict, which is a slow very memory + # consuming and fragmenting process. To avoid this, we pick a random, + # 256 KiB-aligned image base between 0x50000000 and 0x6FFC0000 at link + # time. Moving up from 0x10000000 also allows more sbrk(2) space. + archive_cmds_CXX='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-h,$soname ${wl}--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib' + archive_expsym_cmds_CXX='sed "s,^,_," $export_symbols >$output_objdir/$soname.expsym~$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-h,$soname ${wl}--retain-symbols-file,$output_objdir/$soname.expsym ${wl}--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib' + ;; + irix5* | irix6*) + case $cc_basename in + CC*) + # SGI C++ + archive_cmds_CXX='$CC -shared -all -multigot $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -soname $soname `test -n "$verstring" && echo -set_version $verstring` -update_registry ${output_objdir}/so_locations -o $lib' + + # Archives containing C++ object files must be created using + # "CC -ar", where "CC" is the IRIX C++ compiler. This is + # necessary to make sure instantiated templates are included + # in the archive. + old_archive_cmds_CXX='$CC -ar -WR,-u -o $oldlib $oldobjs' + ;; + *) + if test "$GXX" = yes; then + if test "$with_gnu_ld" = no; then + archive_cmds_CXX='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && echo ${wl}-set_version ${wl}$verstring` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' + else + archive_cmds_CXX='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && echo ${wl}-set_version ${wl}$verstring` -o $lib' + fi + fi + link_all_deplibs_CXX=yes + ;; + esac + hardcode_libdir_flag_spec_CXX='${wl}-rpath ${wl}$libdir' + hardcode_libdir_separator_CXX=: + ;; + linux*) + case $cc_basename in + KCC*) + # Kuck and Associates, Inc. (KAI) C++ Compiler + + # KCC will only create a shared library if the output file + # ends with ".so" (or ".sl" for HP-UX), so rename the library + # to its proper name (with version) after linking. + archive_cmds_CXX='tempext=`echo $shared_ext | $SED -e '\''s/\([^()0-9A-Za-z{}]\)/\\\\\1/g'\''`; templib=`echo $lib | $SED -e "s/\${tempext}\..*/.so/"`; $CC $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags --soname $soname -o \$templib; mv \$templib $lib' + archive_expsym_cmds_CXX='tempext=`echo $shared_ext | $SED -e '\''s/\([^()0-9A-Za-z{}]\)/\\\\\1/g'\''`; templib=`echo $lib | $SED -e "s/\${tempext}\..*/.so/"`; $CC $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags --soname $soname -o \$templib ${wl}-retain-symbols-file,$export_symbols; mv \$templib $lib' + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. + # + # There doesn't appear to be a way to prevent this compiler from + # explicitly linking system object files so we need to strip them + # from the output so that they don't get included in the library + # dependencies. + output_verbose_link_cmd='templist=`$CC $CFLAGS -v conftest.$objext -o libconftest$shared_ext 2>&1 | grep "ld"`; rm -f libconftest$shared_ext; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; echo $list' + + hardcode_libdir_flag_spec_CXX='${wl}--rpath,$libdir' + export_dynamic_flag_spec_CXX='${wl}--export-dynamic' + + # Archives containing C++ object files must be created using + # "CC -Bstatic", where "CC" is the KAI C++ compiler. + old_archive_cmds_CXX='$CC -Bstatic -o $oldlib $oldobjs' + ;; + icpc*) + # Intel C++ + with_gnu_ld=yes + # version 8.0 and above of icpc choke on multiply defined symbols + # if we add $predep_objects and $postdep_objects, however 7.1 and + # earlier do not add the objects themselves. + case `$CC -V 2>&1` in + *"Version 7."*) + archive_cmds_CXX='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname -o $lib' + archive_expsym_cmds_CXX='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' + ;; + *) # Version 8.0 or newer + tmp_idyn= + case $host_cpu in + ia64*) tmp_idyn=' -i_dynamic';; + esac + archive_cmds_CXX='$CC -shared'"$tmp_idyn"' $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + archive_expsym_cmds_CXX='$CC -shared'"$tmp_idyn"' $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' + ;; + esac + archive_cmds_need_lc_CXX=no + hardcode_libdir_flag_spec_CXX='${wl}-rpath,$libdir' + export_dynamic_flag_spec_CXX='${wl}--export-dynamic' + whole_archive_flag_spec_CXX='${wl}--whole-archive$convenience ${wl}--no-whole-archive' + ;; + pgCC*) + # Portland Group C++ compiler + archive_cmds_CXX='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname -o $lib' + archive_expsym_cmds_CXX='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname ${wl}-retain-symbols-file ${wl}$export_symbols -o $lib' + + hardcode_libdir_flag_spec_CXX='${wl}--rpath ${wl}$libdir' + export_dynamic_flag_spec_CXX='${wl}--export-dynamic' + whole_archive_flag_spec_CXX='${wl}--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; $echo \"$new_convenience\"` ${wl}--no-whole-archive' + ;; + cxx*) + # Compaq C++ + archive_cmds_CXX='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname -o $lib' + archive_expsym_cmds_CXX='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname -o $lib ${wl}-retain-symbols-file $wl$export_symbols' + + runpath_var=LD_RUN_PATH + hardcode_libdir_flag_spec_CXX='-rpath $libdir' + hardcode_libdir_separator_CXX=: + + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. + # + # There doesn't appear to be a way to prevent this compiler from + # explicitly linking system object files so we need to strip them + # from the output so that they don't get included in the library + # dependencies. + output_verbose_link_cmd='templist=`$CC -shared $CFLAGS -v conftest.$objext 2>&1 | grep "ld"`; templist=`echo $templist | $SED "s/\(^.*ld.*\)\( .*ld .*$\)/\1/"`; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; echo $list' + ;; + esac + ;; + lynxos*) + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + ;; + m88k*) + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + ;; + mvs*) + case $cc_basename in + cxx*) + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + ;; + *) + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + ;; + esac + ;; + netbsd*) + if echo __ELF__ | $CC -E - | grep __ELF__ >/dev/null; then + archive_cmds_CXX='$LD -Bshareable -o $lib $predep_objects $libobjs $deplibs $postdep_objects $linker_flags' + wlarc= + hardcode_libdir_flag_spec_CXX='-R$libdir' + hardcode_direct_CXX=yes + hardcode_shlibpath_var_CXX=no + fi + # Workaround some broken pre-1.5 toolchains + output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | grep conftest.$objext | $SED -e "s:-lgcc -lc -lgcc::"' + ;; + openbsd2*) + # C++ shared libraries are fairly broken + ld_shlibs_CXX=no + ;; + openbsd*) + hardcode_direct_CXX=yes + hardcode_shlibpath_var_CXX=no + archive_cmds_CXX='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $lib' + hardcode_libdir_flag_spec_CXX='${wl}-rpath,$libdir' + if test -z "`echo __ELF__ | $CC -E - | grep __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then + archive_expsym_cmds_CXX='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-retain-symbols-file,$export_symbols -o $lib' + export_dynamic_flag_spec_CXX='${wl}-E' + whole_archive_flag_spec_CXX="$wlarc"'--whole-archive$convenience '"$wlarc"'--no-whole-archive' + fi + output_verbose_link_cmd='echo' + ;; + osf3*) + case $cc_basename in + KCC*) + # Kuck and Associates, Inc. (KAI) C++ Compiler + + # KCC will only create a shared library if the output file + # ends with ".so" (or ".sl" for HP-UX), so rename the library + # to its proper name (with version) after linking. + archive_cmds_CXX='tempext=`echo $shared_ext | $SED -e '\''s/\([^()0-9A-Za-z{}]\)/\\\\\1/g'\''`; templib=`echo $lib | $SED -e "s/\${tempext}\..*/.so/"`; $CC $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags --soname $soname -o \$templib; mv \$templib $lib' + + hardcode_libdir_flag_spec_CXX='${wl}-rpath,$libdir' + hardcode_libdir_separator_CXX=: + + # Archives containing C++ object files must be created using + # "CC -Bstatic", where "CC" is the KAI C++ compiler. + old_archive_cmds_CXX='$CC -Bstatic -o $oldlib $oldobjs' + + ;; + RCC*) + # Rational C++ 2.4.1 + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + ;; + cxx*) + allow_undefined_flag_CXX=' ${wl}-expect_unresolved ${wl}\*' + archive_cmds_CXX='$CC -shared${allow_undefined_flag} $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $soname `test -n "$verstring" && echo ${wl}-set_version $verstring` -update_registry ${output_objdir}/so_locations -o $lib' + + hardcode_libdir_flag_spec_CXX='${wl}-rpath ${wl}$libdir' + hardcode_libdir_separator_CXX=: + + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. + # + # There doesn't appear to be a way to prevent this compiler from + # explicitly linking system object files so we need to strip them + # from the output so that they don't get included in the library + # dependencies. + output_verbose_link_cmd='templist=`$CC -shared $CFLAGS -v conftest.$objext 2>&1 | grep "ld" | grep -v "ld:"`; templist=`echo $templist | $SED "s/\(^.*ld.*\)\( .*ld.*$\)/\1/"`; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; echo $list' + ;; + *) + if test "$GXX" = yes && test "$with_gnu_ld" = no; then + allow_undefined_flag_CXX=' ${wl}-expect_unresolved ${wl}\*' + archive_cmds_CXX='$CC -shared -nostdlib ${allow_undefined_flag} $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && echo ${wl}-set_version ${wl}$verstring` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' + + hardcode_libdir_flag_spec_CXX='${wl}-rpath ${wl}$libdir' + hardcode_libdir_separator_CXX=: + + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. + output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | grep "\-L"' + + else + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + fi + ;; + esac + ;; + osf4* | osf5*) + case $cc_basename in + KCC*) + # Kuck and Associates, Inc. (KAI) C++ Compiler + + # KCC will only create a shared library if the output file + # ends with ".so" (or ".sl" for HP-UX), so rename the library + # to its proper name (with version) after linking. + archive_cmds_CXX='tempext=`echo $shared_ext | $SED -e '\''s/\([^()0-9A-Za-z{}]\)/\\\\\1/g'\''`; templib=`echo $lib | $SED -e "s/\${tempext}\..*/.so/"`; $CC $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags --soname $soname -o \$templib; mv \$templib $lib' + + hardcode_libdir_flag_spec_CXX='${wl}-rpath,$libdir' + hardcode_libdir_separator_CXX=: + + # Archives containing C++ object files must be created using + # the KAI C++ compiler. + old_archive_cmds_CXX='$CC -o $oldlib $oldobjs' + ;; + RCC*) + # Rational C++ 2.4.1 + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + ;; + cxx*) + allow_undefined_flag_CXX=' -expect_unresolved \*' + archive_cmds_CXX='$CC -shared${allow_undefined_flag} $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -msym -soname $soname `test -n "$verstring" && echo -set_version $verstring` -update_registry ${output_objdir}/so_locations -o $lib' + archive_expsym_cmds_CXX='for i in `cat $export_symbols`; do printf "%s %s\\n" -exported_symbol "\$i" >> $lib.exp; done~ + echo "-hidden">> $lib.exp~ + $CC -shared$allow_undefined_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -msym -soname $soname -Wl,-input -Wl,$lib.exp `test -n "$verstring" && echo -set_version $verstring` -update_registry ${output_objdir}/so_locations -o $lib~ + $rm $lib.exp' + + hardcode_libdir_flag_spec_CXX='-rpath $libdir' + hardcode_libdir_separator_CXX=: + + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. + # + # There doesn't appear to be a way to prevent this compiler from + # explicitly linking system object files so we need to strip them + # from the output so that they don't get included in the library + # dependencies. + output_verbose_link_cmd='templist=`$CC -shared $CFLAGS -v conftest.$objext 2>&1 | grep "ld" | grep -v "ld:"`; templist=`echo $templist | $SED "s/\(^.*ld.*\)\( .*ld.*$\)/\1/"`; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; echo $list' + ;; + *) + if test "$GXX" = yes && test "$with_gnu_ld" = no; then + allow_undefined_flag_CXX=' ${wl}-expect_unresolved ${wl}\*' + archive_cmds_CXX='$CC -shared -nostdlib ${allow_undefined_flag} $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-msym ${wl}-soname ${wl}$soname `test -n "$verstring" && echo ${wl}-set_version ${wl}$verstring` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' + + hardcode_libdir_flag_spec_CXX='${wl}-rpath ${wl}$libdir' + hardcode_libdir_separator_CXX=: + + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. + output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | grep "\-L"' + + else + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + fi + ;; + esac + ;; + psos*) + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + ;; + sunos4*) + case $cc_basename in + CC*) + # Sun C++ 4.x + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + ;; + lcc*) + # Lucid + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + ;; + *) + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + ;; + esac + ;; + solaris*) + case $cc_basename in + CC*) + # Sun C++ 4.2, 5.x and Centerline C++ + archive_cmds_need_lc_CXX=yes + no_undefined_flag_CXX=' -zdefs' + archive_cmds_CXX='$CC -G${allow_undefined_flag} -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' + archive_expsym_cmds_CXX='$echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~$echo "local: *; };" >> $lib.exp~ + $CC -G${allow_undefined_flag} ${wl}-M ${wl}$lib.exp -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~$rm $lib.exp' + + hardcode_libdir_flag_spec_CXX='-R$libdir' + hardcode_shlibpath_var_CXX=no + case $host_os in + solaris2.[0-5] | solaris2.[0-5].*) ;; + *) + # The C++ compiler is used as linker so we must use $wl + # flag to pass the commands to the underlying system + # linker. We must also pass each convience library through + # to the system linker between allextract/defaultextract. + # The C++ compiler will combine linker options so we + # cannot just pass the convience library names through + # without $wl. + # Supported since Solaris 2.6 (maybe 2.5.1?) + whole_archive_flag_spec_CXX='${wl}-z ${wl}allextract`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; $echo \"$new_convenience\"` ${wl}-z ${wl}defaultextract' + ;; + esac + link_all_deplibs_CXX=yes + + output_verbose_link_cmd='echo' + + # Archives containing C++ object files must be created using + # "CC -xar", where "CC" is the Sun C++ compiler. This is + # necessary to make sure instantiated templates are included + # in the archive. + old_archive_cmds_CXX='$CC -xar -o $oldlib $oldobjs' + ;; + gcx*) + # Green Hills C++ Compiler + archive_cmds_CXX='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-h $wl$soname -o $lib' + + # The C++ compiler must be used to create the archive. + old_archive_cmds_CXX='$CC $LDFLAGS -archive -o $oldlib $oldobjs' + ;; + *) + # GNU C++ compiler with Solaris linker + if test "$GXX" = yes && test "$with_gnu_ld" = no; then + no_undefined_flag_CXX=' ${wl}-z ${wl}defs' + if $CC --version | grep -v '^2\.7' > /dev/null; then + archive_cmds_CXX='$CC -shared -nostdlib $LDFLAGS $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-h $wl$soname -o $lib' + archive_expsym_cmds_CXX='$echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~$echo "local: *; };" >> $lib.exp~ + $CC -shared -nostdlib ${wl}-M $wl$lib.exp -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~$rm $lib.exp' + + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. + output_verbose_link_cmd="$CC -shared $CFLAGS -v conftest.$objext 2>&1 | grep \"\-L\"" + else + # g++ 2.7 appears to require `-G' NOT `-shared' on this + # platform. + archive_cmds_CXX='$CC -G -nostdlib $LDFLAGS $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-h $wl$soname -o $lib' + archive_expsym_cmds_CXX='$echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~$echo "local: *; };" >> $lib.exp~ + $CC -G -nostdlib ${wl}-M $wl$lib.exp -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~$rm $lib.exp' + + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. + output_verbose_link_cmd="$CC -G $CFLAGS -v conftest.$objext 2>&1 | grep \"\-L\"" + fi + + hardcode_libdir_flag_spec_CXX='${wl}-R $wl$libdir' + fi + ;; + esac + ;; + sysv4*uw2* | sysv5OpenUNIX* | sysv5UnixWare7.[01].[10]* | unixware7* | sco3.2v5.0.[024]*) + no_undefined_flag_CXX='${wl}-z,text' + archive_cmds_need_lc_CXX=no + hardcode_shlibpath_var_CXX=no + runpath_var='LD_RUN_PATH' + + case $cc_basename in + CC*) + archive_cmds_CXX='$CC -G ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + archive_expsym_cmds_CXX='$CC -G ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + ;; + *) + archive_cmds_CXX='$CC -shared ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + archive_expsym_cmds_CXX='$CC -shared ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + ;; + esac + ;; + sysv5* | sco3.2v5* | sco5v6*) + # Note: We can NOT use -z defs as we might desire, because we do not + # link with -lc, and that would cause any symbols used from libc to + # always be unresolved, which means just about no library would + # ever link correctly. If we're not using GNU ld we use -z text + # though, which does catch some bad symbols but isn't as heavy-handed + # as -z defs. + # For security reasons, it is highly recommended that you always + # use absolute paths for naming shared libraries, and exclude the + # DT_RUNPATH tag from executables and libraries. But doing so + # requires that you compile everything twice, which is a pain. + # So that behaviour is only enabled if SCOABSPATH is set to a + # non-empty value in the environment. Most likely only useful for + # creating official distributions of packages. + # This is a hack until libtool officially supports absolute path + # names for shared libraries. + no_undefined_flag_CXX='${wl}-z,text' + allow_undefined_flag_CXX='${wl}-z,nodefs' + archive_cmds_need_lc_CXX=no + hardcode_shlibpath_var_CXX=no + hardcode_libdir_flag_spec_CXX='`test -z "$SCOABSPATH" && echo ${wl}-R,$libdir`' + hardcode_libdir_separator_CXX=':' + link_all_deplibs_CXX=yes + export_dynamic_flag_spec_CXX='${wl}-Bexport' + runpath_var='LD_RUN_PATH' + + case $cc_basename in + CC*) + archive_cmds_CXX='$CC -G ${wl}-h,\${SCOABSPATH:+${install_libdir}/}$soname -o $lib $libobjs $deplibs $compiler_flags' + archive_expsym_cmds_CXX='$CC -G ${wl}-Bexport:$export_symbols ${wl}-h,\${SCOABSPATH:+${install_libdir}/}$soname -o $lib $libobjs $deplibs $compiler_flags' + ;; + *) + archive_cmds_CXX='$CC -shared ${wl}-h,\${SCOABSPATH:+${install_libdir}/}$soname -o $lib $libobjs $deplibs $compiler_flags' + archive_expsym_cmds_CXX='$CC -shared ${wl}-Bexport:$export_symbols ${wl}-h,\${SCOABSPATH:+${install_libdir}/}$soname -o $lib $libobjs $deplibs $compiler_flags' + ;; + esac + ;; + tandem*) + case $cc_basename in + NCC*) + # NonStop-UX NCC 3.20 + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + ;; + *) + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + ;; + esac + ;; + vxworks*) + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + ;; + *) + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + ;; +esac +echo "$as_me:$LINENO: result: $ld_shlibs_CXX" >&5 +echo "${ECHO_T}$ld_shlibs_CXX" >&6 +test "$ld_shlibs_CXX" = no && can_build_shared=no + +GCC_CXX="$GXX" +LD_CXX="$LD" + + +cat > conftest.$ac_ext <&5 + (eval $ac_compile) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; then + # Parse the compiler output and extract the necessary + # objects, libraries and library flags. + + # Sentinel used to keep track of whether or not we are before + # the conftest object file. + pre_test_object_deps_done=no + + # The `*' in the case matches for architectures that use `case' in + # $output_verbose_cmd can trigger glob expansion during the loop + # eval without this substitution. + output_verbose_link_cmd=`$echo "X$output_verbose_link_cmd" | $Xsed -e "$no_glob_subst"` + + for p in `eval $output_verbose_link_cmd`; do + case $p in + + -L* | -R* | -l*) + # Some compilers place space between "-{L,R}" and the path. + # Remove the space. + if test $p = "-L" \ + || test $p = "-R"; then + prev=$p + continue + else + prev= + fi + + if test "$pre_test_object_deps_done" = no; then + case $p in + -L* | -R*) + # Internal compiler library paths should come after those + # provided the user. The postdeps already come after the + # user supplied libs so there is no need to process them. + if test -z "$compiler_lib_search_path_CXX"; then + compiler_lib_search_path_CXX="${prev}${p}" + else + compiler_lib_search_path_CXX="${compiler_lib_search_path_CXX} ${prev}${p}" + fi + ;; + # The "-l" case would never come before the object being + # linked, so don't bother handling this case. + esac + else + if test -z "$postdeps_CXX"; then + postdeps_CXX="${prev}${p}" + else + postdeps_CXX="${postdeps_CXX} ${prev}${p}" + fi + fi + ;; + + *.$objext) + # This assumes that the test object file only shows up + # once in the compiler output. + if test "$p" = "conftest.$objext"; then + pre_test_object_deps_done=yes + continue + fi + + if test "$pre_test_object_deps_done" = no; then + if test -z "$predep_objects_CXX"; then + predep_objects_CXX="$p" + else + predep_objects_CXX="$predep_objects_CXX $p" + fi + else + if test -z "$postdep_objects_CXX"; then + postdep_objects_CXX="$p" + else + postdep_objects_CXX="$postdep_objects_CXX $p" + fi + fi + ;; + + *) ;; # Ignore the rest. + + esac + done + + # Clean up. + rm -f a.out a.exe +else + echo "libtool.m4: error: problem compiling CXX test program" +fi + +$rm -f confest.$objext + +# PORTME: override above test on systems where it is broken +case $host_os in +interix3*) + # Interix 3.5 installs completely hosed .la files for C++, so rather than + # hack all around it, let's just trust "g++" to DTRT. + predep_objects_CXX= + postdep_objects_CXX= + postdeps_CXX= + ;; + +solaris*) + case $cc_basename in + CC*) + # Adding this requires a known-good setup of shared libraries for + # Sun compiler versions before 5.6, else PIC objects from an old + # archive will be linked into the output, leading to subtle bugs. + postdeps_CXX='-lCstd -lCrun' + ;; + esac + ;; +esac + + +case " $postdeps_CXX " in +*" -lc "*) archive_cmds_need_lc_CXX=no ;; +esac + +lt_prog_compiler_wl_CXX= +lt_prog_compiler_pic_CXX= +lt_prog_compiler_static_CXX= + +echo "$as_me:$LINENO: checking for $compiler option to produce PIC" >&5 +echo $ECHO_N "checking for $compiler option to produce PIC... $ECHO_C" >&6 + + # C++ specific cases for pic, static, wl, etc. + if test "$GXX" = yes; then + lt_prog_compiler_wl_CXX='-Wl,' + lt_prog_compiler_static_CXX='-static' + + case $host_os in + aix*) + # All AIX code is PIC. + if test "$host_cpu" = ia64; then + # AIX 5 now supports IA64 processor + lt_prog_compiler_static_CXX='-Bstatic' + fi + ;; + amigaos*) + # FIXME: we need at least 68020 code to build shared libraries, but + # adding the `-m68020' flag to GCC prevents building anything better, + # like `-m68040'. + lt_prog_compiler_pic_CXX='-m68020 -resident32 -malways-restore-a4' + ;; + beos* | cygwin* | irix5* | irix6* | nonstopux* | osf3* | osf4* | osf5*) + # PIC is the default for these OSes. + ;; + mingw* | os2* | pw32*) + # This hack is so that the source file can tell whether it is being + # built for inclusion in a dll (and should export symbols for example). + lt_prog_compiler_pic_CXX='-DDLL_EXPORT' + ;; + darwin* | rhapsody*) + # PIC is the default on this platform + # Common symbols not allowed in MH_DYLIB files + lt_prog_compiler_pic_CXX='-fno-common' + ;; + *djgpp*) + # DJGPP does not support shared libraries at all + lt_prog_compiler_pic_CXX= + ;; + interix3*) + # Interix 3.x gcc -fpic/-fPIC options generate broken code. + # Instead, we relocate shared libraries at runtime. + ;; + sysv4*MP*) + if test -d /usr/nec; then + lt_prog_compiler_pic_CXX=-Kconform_pic + fi + ;; + hpux*) + # PIC is the default for IA64 HP-UX and 64-bit HP-UX, but + # not for PA HP-UX. + case $host_cpu in + hppa*64*|ia64*) + ;; + *) + lt_prog_compiler_pic_CXX='-fPIC' + ;; + esac + ;; + *) + lt_prog_compiler_pic_CXX='-fPIC' + ;; + esac + else + case $host_os in + aix4* | aix5*) + # All AIX code is PIC. + if test "$host_cpu" = ia64; then + # AIX 5 now supports IA64 processor + lt_prog_compiler_static_CXX='-Bstatic' + else + lt_prog_compiler_static_CXX='-bnso -bI:/lib/syscalls.exp' + fi + ;; + chorus*) + case $cc_basename in + cxch68*) + # Green Hills C++ Compiler + # _LT_AC_TAGVAR(lt_prog_compiler_static, CXX)="--no_auto_instantiation -u __main -u __premain -u _abort -r $COOL_DIR/lib/libOrb.a $MVME_DIR/lib/CC/libC.a $MVME_DIR/lib/classix/libcx.s.a" + ;; + esac + ;; + darwin*) + # PIC is the default on this platform + # Common symbols not allowed in MH_DYLIB files + case $cc_basename in + xlc*) + lt_prog_compiler_pic_CXX='-qnocommon' + lt_prog_compiler_wl_CXX='-Wl,' + ;; + esac + ;; + dgux*) + case $cc_basename in + ec++*) + lt_prog_compiler_pic_CXX='-KPIC' + ;; + ghcx*) + # Green Hills C++ Compiler + lt_prog_compiler_pic_CXX='-pic' + ;; + *) + ;; + esac + ;; + freebsd* | kfreebsd*-gnu | dragonfly*) + # FreeBSD uses GNU C++ + ;; + hpux9* | hpux10* | hpux11*) + case $cc_basename in + CC*) + lt_prog_compiler_wl_CXX='-Wl,' + lt_prog_compiler_static_CXX='${wl}-a ${wl}archive' + if test "$host_cpu" != ia64; then + lt_prog_compiler_pic_CXX='+Z' + fi + ;; + aCC*) + lt_prog_compiler_wl_CXX='-Wl,' + lt_prog_compiler_static_CXX='${wl}-a ${wl}archive' + case $host_cpu in + hppa*64*|ia64*) + # +Z the default + ;; + *) + lt_prog_compiler_pic_CXX='+Z' + ;; + esac + ;; + *) + ;; + esac + ;; + interix*) + # This is c89, which is MS Visual C++ (no shared libs) + # Anyone wants to do a port? + ;; + irix5* | irix6* | nonstopux*) + case $cc_basename in + CC*) + lt_prog_compiler_wl_CXX='-Wl,' + lt_prog_compiler_static_CXX='-non_shared' + # CC pic flag -KPIC is the default. + ;; + *) + ;; + esac + ;; + linux*) + case $cc_basename in + KCC*) + # KAI C++ Compiler + lt_prog_compiler_wl_CXX='--backend -Wl,' + lt_prog_compiler_pic_CXX='-fPIC' + ;; + icpc* | ecpc*) + # Intel C++ + lt_prog_compiler_wl_CXX='-Wl,' + lt_prog_compiler_pic_CXX='-KPIC' + lt_prog_compiler_static_CXX='-static' + ;; + pgCC*) + # Portland Group C++ compiler. + lt_prog_compiler_wl_CXX='-Wl,' + lt_prog_compiler_pic_CXX='-fpic' + lt_prog_compiler_static_CXX='-Bstatic' + ;; + cxx*) + # Compaq C++ + # Make sure the PIC flag is empty. It appears that all Alpha + # Linux and Compaq Tru64 Unix objects are PIC. + lt_prog_compiler_pic_CXX= + lt_prog_compiler_static_CXX='-non_shared' + ;; + *) + ;; + esac + ;; + lynxos*) + ;; + m88k*) + ;; + mvs*) + case $cc_basename in + cxx*) + lt_prog_compiler_pic_CXX='-W c,exportall' + ;; + *) + ;; + esac + ;; + netbsd*) + ;; + osf3* | osf4* | osf5*) + case $cc_basename in + KCC*) + lt_prog_compiler_wl_CXX='--backend -Wl,' + ;; + RCC*) + # Rational C++ 2.4.1 + lt_prog_compiler_pic_CXX='-pic' + ;; + cxx*) + # Digital/Compaq C++ + lt_prog_compiler_wl_CXX='-Wl,' + # Make sure the PIC flag is empty. It appears that all Alpha + # Linux and Compaq Tru64 Unix objects are PIC. + lt_prog_compiler_pic_CXX= + lt_prog_compiler_static_CXX='-non_shared' + ;; + *) + ;; + esac + ;; + psos*) + ;; + solaris*) + case $cc_basename in + CC*) + # Sun C++ 4.2, 5.x and Centerline C++ + lt_prog_compiler_pic_CXX='-KPIC' + lt_prog_compiler_static_CXX='-Bstatic' + lt_prog_compiler_wl_CXX='-Qoption ld ' + ;; + gcx*) + # Green Hills C++ Compiler + lt_prog_compiler_pic_CXX='-PIC' + ;; + *) + ;; + esac + ;; + sunos4*) + case $cc_basename in + CC*) + # Sun C++ 4.x + lt_prog_compiler_pic_CXX='-pic' + lt_prog_compiler_static_CXX='-Bstatic' + ;; + lcc*) + # Lucid + lt_prog_compiler_pic_CXX='-pic' + ;; + *) + ;; + esac + ;; + tandem*) + case $cc_basename in + NCC*) + # NonStop-UX NCC 3.20 + lt_prog_compiler_pic_CXX='-KPIC' + ;; + *) + ;; + esac + ;; + sysv5* | unixware* | sco3.2v5* | sco5v6* | OpenUNIX*) + case $cc_basename in + CC*) + lt_prog_compiler_wl_CXX='-Wl,' + lt_prog_compiler_pic_CXX='-KPIC' + lt_prog_compiler_static_CXX='-Bstatic' + ;; + esac + ;; + vxworks*) + ;; + *) + lt_prog_compiler_can_build_shared_CXX=no + ;; + esac + fi + +echo "$as_me:$LINENO: result: $lt_prog_compiler_pic_CXX" >&5 +echo "${ECHO_T}$lt_prog_compiler_pic_CXX" >&6 + +# +# Check to make sure the PIC flag actually works. +# +if test -n "$lt_prog_compiler_pic_CXX"; then + +echo "$as_me:$LINENO: checking if $compiler PIC flag $lt_prog_compiler_pic_CXX works" >&5 +echo $ECHO_N "checking if $compiler PIC flag $lt_prog_compiler_pic_CXX works... $ECHO_C" >&6 +if test "${lt_prog_compiler_pic_works_CXX+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + lt_prog_compiler_pic_works_CXX=no + ac_outfile=conftest.$ac_objext + printf "$lt_simple_compile_test_code" > conftest.$ac_ext + lt_compiler_flag="$lt_prog_compiler_pic_CXX -DPIC" + # Insert the option either (1) after the last *FLAGS variable, or + # (2) before a word containing "conftest.", or (3) at the end. + # Note that $ac_compile itself does not contain backslashes and begins + # with a dollar sign (not a hyphen), so the echo should work correctly. + # The option is referenced via a variable to avoid confusing sed. + lt_compile=`echo "$ac_compile" | $SED \ + -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ + -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ + -e 's:$: $lt_compiler_flag:'` + (eval echo "\"\$as_me:11510: $lt_compile\"" >&5) + (eval "$lt_compile" 2>conftest.err) + ac_status=$? + cat conftest.err >&5 + echo "$as_me:11514: \$? = $ac_status" >&5 + if (exit $ac_status) && test -s "$ac_outfile"; then + # The compiler can only warn and ignore the option if not recognized + # So say no if there are warnings other than the usual output. + $echo "X$_lt_compiler_boilerplate" | $Xsed -e '/^$/d' >conftest.exp + $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2 + if test ! -s conftest.er2 || diff conftest.exp conftest.er2 >/dev/null; then + lt_prog_compiler_pic_works_CXX=yes + fi + fi + $rm conftest* + +fi +echo "$as_me:$LINENO: result: $lt_prog_compiler_pic_works_CXX" >&5 +echo "${ECHO_T}$lt_prog_compiler_pic_works_CXX" >&6 + +if test x"$lt_prog_compiler_pic_works_CXX" = xyes; then + case $lt_prog_compiler_pic_CXX in + "" | " "*) ;; + *) lt_prog_compiler_pic_CXX=" $lt_prog_compiler_pic_CXX" ;; + esac +else + lt_prog_compiler_pic_CXX= + lt_prog_compiler_can_build_shared_CXX=no +fi + +fi +case $host_os in + # For platforms which do not support PIC, -DPIC is meaningless: + *djgpp*) + lt_prog_compiler_pic_CXX= + ;; + *) + lt_prog_compiler_pic_CXX="$lt_prog_compiler_pic_CXX -DPIC" + ;; +esac + +# +# Check to make sure the static flag actually works. +# +wl=$lt_prog_compiler_wl_CXX eval lt_tmp_static_flag=\"$lt_prog_compiler_static_CXX\" +echo "$as_me:$LINENO: checking if $compiler static flag $lt_tmp_static_flag works" >&5 +echo $ECHO_N "checking if $compiler static flag $lt_tmp_static_flag works... $ECHO_C" >&6 +if test "${lt_prog_compiler_static_works_CXX+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + lt_prog_compiler_static_works_CXX=no + save_LDFLAGS="$LDFLAGS" + LDFLAGS="$LDFLAGS $lt_tmp_static_flag" + printf "$lt_simple_link_test_code" > conftest.$ac_ext + if (eval $ac_link 2>conftest.err) && test -s conftest$ac_exeext; then + # The linker can only warn and ignore the option if not recognized + # So say no if there are warnings + if test -s conftest.err; then + # Append any errors to the config.log. + cat conftest.err 1>&5 + $echo "X$_lt_linker_boilerplate" | $Xsed -e '/^$/d' > conftest.exp + $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2 + if diff conftest.exp conftest.er2 >/dev/null; then + lt_prog_compiler_static_works_CXX=yes + fi + else + lt_prog_compiler_static_works_CXX=yes + fi + fi + $rm conftest* + LDFLAGS="$save_LDFLAGS" + +fi +echo "$as_me:$LINENO: result: $lt_prog_compiler_static_works_CXX" >&5 +echo "${ECHO_T}$lt_prog_compiler_static_works_CXX" >&6 + +if test x"$lt_prog_compiler_static_works_CXX" = xyes; then + : +else + lt_prog_compiler_static_CXX= +fi + + +echo "$as_me:$LINENO: checking if $compiler supports -c -o file.$ac_objext" >&5 +echo $ECHO_N "checking if $compiler supports -c -o file.$ac_objext... $ECHO_C" >&6 +if test "${lt_cv_prog_compiler_c_o_CXX+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + lt_cv_prog_compiler_c_o_CXX=no + $rm -r conftest 2>/dev/null + mkdir conftest + cd conftest + mkdir out + printf "$lt_simple_compile_test_code" > conftest.$ac_ext + + lt_compiler_flag="-o out/conftest2.$ac_objext" + # Insert the option either (1) after the last *FLAGS variable, or + # (2) before a word containing "conftest.", or (3) at the end. + # Note that $ac_compile itself does not contain backslashes and begins + # with a dollar sign (not a hyphen), so the echo should work correctly. + lt_compile=`echo "$ac_compile" | $SED \ + -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ + -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ + -e 's:$: $lt_compiler_flag:'` + (eval echo "\"\$as_me:11614: $lt_compile\"" >&5) + (eval "$lt_compile" 2>out/conftest.err) + ac_status=$? + cat out/conftest.err >&5 + echo "$as_me:11618: \$? = $ac_status" >&5 + if (exit $ac_status) && test -s out/conftest2.$ac_objext + then + # The compiler can only warn and ignore the option if not recognized + # So say no if there are warnings + $echo "X$_lt_compiler_boilerplate" | $Xsed -e '/^$/d' > out/conftest.exp + $SED '/^$/d; /^ *+/d' out/conftest.err >out/conftest.er2 + if test ! -s out/conftest.er2 || diff out/conftest.exp out/conftest.er2 >/dev/null; then + lt_cv_prog_compiler_c_o_CXX=yes + fi + fi + chmod u+w . 2>&5 + $rm conftest* + # SGI C++ compiler will create directory out/ii_files/ for + # template instantiation + test -d out/ii_files && $rm out/ii_files/* && rmdir out/ii_files + $rm out/* && rmdir out + cd .. + rmdir conftest + $rm conftest* + +fi +echo "$as_me:$LINENO: result: $lt_cv_prog_compiler_c_o_CXX" >&5 +echo "${ECHO_T}$lt_cv_prog_compiler_c_o_CXX" >&6 + + +hard_links="nottested" +if test "$lt_cv_prog_compiler_c_o_CXX" = no && test "$need_locks" != no; then + # do not overwrite the value of need_locks provided by the user + echo "$as_me:$LINENO: checking if we can lock with hard links" >&5 +echo $ECHO_N "checking if we can lock with hard links... $ECHO_C" >&6 + hard_links=yes + $rm conftest* + ln conftest.a conftest.b 2>/dev/null && hard_links=no + touch conftest.a + ln conftest.a conftest.b 2>&5 || hard_links=no + ln conftest.a conftest.b 2>/dev/null && hard_links=no + echo "$as_me:$LINENO: result: $hard_links" >&5 +echo "${ECHO_T}$hard_links" >&6 + if test "$hard_links" = no; then + { echo "$as_me:$LINENO: WARNING: \`$CC' does not support \`-c -o', so \`make -j' may be unsafe" >&5 +echo "$as_me: WARNING: \`$CC' does not support \`-c -o', so \`make -j' may be unsafe" >&2;} + need_locks=warn + fi +else + need_locks=no +fi + +echo "$as_me:$LINENO: checking whether the $compiler linker ($LD) supports shared libraries" >&5 +echo $ECHO_N "checking whether the $compiler linker ($LD) supports shared libraries... $ECHO_C" >&6 + + export_symbols_cmds_CXX='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols' + case $host_os in + aix4* | aix5*) + # If we're using GNU nm, then we don't want the "-C" option. + # -C means demangle to AIX nm, but means don't demangle with GNU nm + if $NM -V 2>&1 | grep 'GNU' > /dev/null; then + export_symbols_cmds_CXX='$NM -Bpg $libobjs $convenience | awk '\''{ if (((\$2 == "T") || (\$2 == "D") || (\$2 == "B")) && (substr(\$3,1,1) != ".")) { print \$3 } }'\'' | sort -u > $export_symbols' + else + export_symbols_cmds_CXX='$NM -BCpg $libobjs $convenience | awk '\''{ if (((\$2 == "T") || (\$2 == "D") || (\$2 == "B")) && (substr(\$3,1,1) != ".")) { print \$3 } }'\'' | sort -u > $export_symbols' + fi + ;; + pw32*) + export_symbols_cmds_CXX="$ltdll_cmds" + ;; + cygwin* | mingw*) + export_symbols_cmds_CXX='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[BCDGRS] /s/.* \([^ ]*\)/\1 DATA/;/^.* __nm__/s/^.* __nm__\([^ ]*\) [^ ]*/\1 DATA/;/^I /d;/^[AITW] /s/.* //'\'' | sort | uniq > $export_symbols' + ;; + *) + export_symbols_cmds_CXX='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols' + ;; + esac + +echo "$as_me:$LINENO: result: $ld_shlibs_CXX" >&5 +echo "${ECHO_T}$ld_shlibs_CXX" >&6 +test "$ld_shlibs_CXX" = no && can_build_shared=no + +# +# Do we need to explicitly link libc? +# +case "x$archive_cmds_need_lc_CXX" in +x|xyes) + # Assume -lc should be added + archive_cmds_need_lc_CXX=yes + + if test "$enable_shared" = yes && test "$GCC" = yes; then + case $archive_cmds_CXX in + *'~'*) + # FIXME: we may have to deal with multi-command sequences. + ;; + '$CC '*) + # Test whether the compiler implicitly links with -lc since on some + # systems, -lgcc has to come before -lc. If gcc already passes -lc + # to ld, don't add -lc before -lgcc. + echo "$as_me:$LINENO: checking whether -lc should be explicitly linked in" >&5 +echo $ECHO_N "checking whether -lc should be explicitly linked in... $ECHO_C" >&6 + $rm conftest* + printf "$lt_simple_compile_test_code" > conftest.$ac_ext + + if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 + (eval $ac_compile) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } 2>conftest.err; then + soname=conftest + lib=conftest + libobjs=conftest.$ac_objext + deplibs= + wl=$lt_prog_compiler_wl_CXX + pic_flag=$lt_prog_compiler_pic_CXX + compiler_flags=-v + linker_flags=-v + verstring= + output_objdir=. + libname=conftest + lt_save_allow_undefined_flag=$allow_undefined_flag_CXX + allow_undefined_flag_CXX= + if { (eval echo "$as_me:$LINENO: \"$archive_cmds_CXX 2\>\&1 \| grep \" -lc \" \>/dev/null 2\>\&1\"") >&5 + (eval $archive_cmds_CXX 2\>\&1 \| grep \" -lc \" \>/dev/null 2\>\&1) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } + then + archive_cmds_need_lc_CXX=no + else + archive_cmds_need_lc_CXX=yes + fi + allow_undefined_flag_CXX=$lt_save_allow_undefined_flag + else + cat conftest.err 1>&5 + fi + $rm conftest* + echo "$as_me:$LINENO: result: $archive_cmds_need_lc_CXX" >&5 +echo "${ECHO_T}$archive_cmds_need_lc_CXX" >&6 + ;; + esac + fi + ;; +esac + +echo "$as_me:$LINENO: checking dynamic linker characteristics" >&5 +echo $ECHO_N "checking dynamic linker characteristics... $ECHO_C" >&6 +library_names_spec= +libname_spec='lib$name' +soname_spec= +shrext_cmds=".so" +postinstall_cmds= +postuninstall_cmds= +finish_cmds= +finish_eval= +shlibpath_var= +shlibpath_overrides_runpath=unknown +version_type=none +dynamic_linker="$host_os ld.so" +sys_lib_dlsearch_path_spec="/lib /usr/lib" +if test "$GCC" = yes; then + sys_lib_search_path_spec=`$CC -print-search-dirs | grep "^libraries:" | $SED -e "s/^libraries://" -e "s,=/,/,g"` + if echo "$sys_lib_search_path_spec" | grep ';' >/dev/null ; then + # if the path contains ";" then we assume it to be the separator + # otherwise default to the standard path separator (i.e. ":") - it is + # assumed that no part of a normal pathname contains ";" but that should + # okay in the real world where ";" in dirpaths is itself problematic. + sys_lib_search_path_spec=`echo "$sys_lib_search_path_spec" | $SED -e 's/;/ /g'` + else + sys_lib_search_path_spec=`echo "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"` + fi +else + sys_lib_search_path_spec="/lib /usr/lib /usr/local/lib" +fi +need_lib_prefix=unknown +hardcode_into_libs=no + +# when you set need_version to no, make sure it does not cause -set_version +# flags to be left without arguments +need_version=unknown + +case $host_os in +aix3*) + version_type=linux + library_names_spec='${libname}${release}${shared_ext}$versuffix $libname.a' + shlibpath_var=LIBPATH + + # AIX 3 has no versioning support, so we append a major version to the name. + soname_spec='${libname}${release}${shared_ext}$major' + ;; + +aix4* | aix5*) + version_type=linux + need_lib_prefix=no + need_version=no + hardcode_into_libs=yes + if test "$host_cpu" = ia64; then + # AIX 5 supports IA64 + library_names_spec='${libname}${release}${shared_ext}$major ${libname}${release}${shared_ext}$versuffix $libname${shared_ext}' + shlibpath_var=LD_LIBRARY_PATH + else + # With GCC up to 2.95.x, collect2 would create an import file + # for dependence libraries. The import file would start with + # the line `#! .'. This would cause the generated library to + # depend on `.', always an invalid library. This was fixed in + # development snapshots of GCC prior to 3.0. + case $host_os in + aix4 | aix4.[01] | aix4.[01].*) + if { echo '#if __GNUC__ > 2 || (__GNUC__ == 2 && __GNUC_MINOR__ >= 97)' + echo ' yes ' + echo '#endif'; } | ${CC} -E - | grep yes > /dev/null; then + : + else + can_build_shared=no + fi + ;; + esac + # AIX (on Power*) has no versioning support, so currently we can not hardcode correct + # soname into executable. Probably we can add versioning support to + # collect2, so additional links can be useful in future. + if test "$aix_use_runtimelinking" = yes; then + # If using run time linking (on AIX 4.2 or later) use lib.so + # instead of lib.a to let people know that these are not + # typical AIX shared libraries. + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + else + # We preserve .a as extension for shared libraries through AIX4.2 + # and later when we are not doing run time linking. + library_names_spec='${libname}${release}.a $libname.a' + soname_spec='${libname}${release}${shared_ext}$major' + fi + shlibpath_var=LIBPATH + fi + ;; + +amigaos*) + library_names_spec='$libname.ixlibrary $libname.a' + # Create ${libname}_ixlibrary.a entries in /sys/libs. + finish_eval='for lib in `ls $libdir/*.ixlibrary 2>/dev/null`; do libname=`$echo "X$lib" | $Xsed -e '\''s%^.*/\([^/]*\)\.ixlibrary$%\1%'\''`; test $rm /sys/libs/${libname}_ixlibrary.a; $show "cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a"; cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a || exit 1; done' + ;; + +beos*) + library_names_spec='${libname}${shared_ext}' + dynamic_linker="$host_os ld.so" + shlibpath_var=LIBRARY_PATH + ;; + +bsdi[45]*) + version_type=linux + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + finish_cmds='PATH="\$PATH:/sbin" ldconfig $libdir' + shlibpath_var=LD_LIBRARY_PATH + sys_lib_search_path_spec="/shlib /usr/lib /usr/X11/lib /usr/contrib/lib /lib /usr/local/lib" + sys_lib_dlsearch_path_spec="/shlib /usr/lib /usr/local/lib" + # the default ld.so.conf also contains /usr/contrib/lib and + # /usr/X11R6/lib (/usr/X11 is a link to /usr/X11R6), but let us allow + # libtool to hard-code these into programs + ;; + +cygwin* | mingw* | pw32*) + version_type=windows + shrext_cmds=".dll" + need_version=no + need_lib_prefix=no + + case $GCC,$host_os in + yes,cygwin* | yes,mingw* | yes,pw32*) + library_names_spec='$libname.dll.a' + # DLL is installed to $(libdir)/../bin by postinstall_cmds + postinstall_cmds='base_file=`basename \${file}`~ + dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\${base_file}'\''i;echo \$dlname'\''`~ + dldir=$destdir/`dirname \$dlpath`~ + test -d \$dldir || mkdir -p \$dldir~ + $install_prog $dir/$dlname \$dldir/$dlname~ + chmod a+x \$dldir/$dlname' + postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; echo \$dlname'\''`~ + dlpath=$dir/\$dldll~ + $rm \$dlpath' + shlibpath_overrides_runpath=yes + + case $host_os in + cygwin*) + # Cygwin DLLs use 'cyg' prefix rather than 'lib' + soname_spec='`echo ${libname} | sed -e 's/^lib/cyg/'``echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext}' + sys_lib_search_path_spec="/usr/lib /lib/w32api /lib /usr/local/lib" + ;; + mingw*) + # MinGW DLLs use traditional 'lib' prefix + soname_spec='${libname}`echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext}' + sys_lib_search_path_spec=`$CC -print-search-dirs | grep "^libraries:" | $SED -e "s/^libraries://" -e "s,=/,/,g"` + if echo "$sys_lib_search_path_spec" | grep ';[c-zC-Z]:/' >/dev/null; then + # It is most probably a Windows format PATH printed by + # mingw gcc, but we are running on Cygwin. Gcc prints its search + # path with ; separators, and with drive letters. We can handle the + # drive letters (cygwin fileutils understands them), so leave them, + # especially as we might pass files found there to a mingw objdump, + # which wouldn't understand a cygwinified path. Ahh. + sys_lib_search_path_spec=`echo "$sys_lib_search_path_spec" | $SED -e 's/;/ /g'` + else + sys_lib_search_path_spec=`echo "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"` + fi + ;; + pw32*) + # pw32 DLLs use 'pw' prefix rather than 'lib' + library_names_spec='`echo ${libname} | sed -e 's/^lib/pw/'``echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext}' + ;; + esac + ;; + + *) + library_names_spec='${libname}`echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext} $libname.lib' + ;; + esac + dynamic_linker='Win32 ld.exe' + # FIXME: first we should search . and the directory the executable is in + shlibpath_var=PATH + ;; + +darwin* | rhapsody*) + dynamic_linker="$host_os dyld" + version_type=darwin + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${versuffix}$shared_ext ${libname}${release}${major}$shared_ext ${libname}$shared_ext' + soname_spec='${libname}${release}${major}$shared_ext' + shlibpath_overrides_runpath=yes + shlibpath_var=DYLD_LIBRARY_PATH + shrext_cmds='`test .$module = .yes && echo .so || echo .dylib`' + # Apple's gcc prints 'gcc -print-search-dirs' doesn't operate the same. + if test "$GCC" = yes; then + sys_lib_search_path_spec=`$CC -print-search-dirs | tr "\n" "$PATH_SEPARATOR" | sed -e 's/libraries:/@libraries:/' | tr "@" "\n" | grep "^libraries:" | sed -e "s/^libraries://" -e "s,=/,/,g" -e "s,$PATH_SEPARATOR, ,g" -e "s,.*,& /lib /usr/lib /usr/local/lib,g"` + else + sys_lib_search_path_spec='/lib /usr/lib /usr/local/lib' + fi + sys_lib_dlsearch_path_spec='/usr/local/lib /lib /usr/lib' + ;; + +dgux*) + version_type=linux + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname$shared_ext' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + ;; + +freebsd1*) + dynamic_linker=no + ;; + +kfreebsd*-gnu) + version_type=linux + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=no + hardcode_into_libs=yes + dynamic_linker='GNU ld.so' + ;; + +freebsd* | dragonfly*) + # DragonFly does not have aout. When/if they implement a new + # versioning mechanism, adjust this. + if test -x /usr/bin/objformat; then + objformat=`/usr/bin/objformat` + else + case $host_os in + freebsd[123]*) objformat=aout ;; + *) objformat=elf ;; + esac + fi + version_type=freebsd-$objformat + case $version_type in + freebsd-elf*) + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext} $libname${shared_ext}' + need_version=no + need_lib_prefix=no + ;; + freebsd-*) + library_names_spec='${libname}${release}${shared_ext}$versuffix $libname${shared_ext}$versuffix' + need_version=yes + ;; + esac + shlibpath_var=LD_LIBRARY_PATH + case $host_os in + freebsd2*) + shlibpath_overrides_runpath=yes + ;; + freebsd3.[01]* | freebsdelf3.[01]*) + shlibpath_overrides_runpath=yes + hardcode_into_libs=yes + ;; + freebsd3.[2-9]* | freebsdelf3.[2-9]* | \ + freebsd4.[0-5] | freebsdelf4.[0-5] | freebsd4.1.1 | freebsdelf4.1.1) + shlibpath_overrides_runpath=no + hardcode_into_libs=yes + ;; + freebsd*) # from 4.6 on + shlibpath_overrides_runpath=yes + hardcode_into_libs=yes + ;; + esac + ;; + +gnu*) + version_type=linux + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}${major} ${libname}${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + hardcode_into_libs=yes + ;; + +hpux9* | hpux10* | hpux11*) + # Give a soname corresponding to the major version so that dld.sl refuses to + # link against other versions. + version_type=sunos + need_lib_prefix=no + need_version=no + case $host_cpu in + ia64*) + shrext_cmds='.so' + hardcode_into_libs=yes + dynamic_linker="$host_os dld.so" + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes # Unless +noenvvar is specified. + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + if test "X$HPUX_IA64_MODE" = X32; then + sys_lib_search_path_spec="/usr/lib/hpux32 /usr/local/lib/hpux32 /usr/local/lib" + else + sys_lib_search_path_spec="/usr/lib/hpux64 /usr/local/lib/hpux64" + fi + sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec + ;; + hppa*64*) + shrext_cmds='.sl' + hardcode_into_libs=yes + dynamic_linker="$host_os dld.sl" + shlibpath_var=LD_LIBRARY_PATH # How should we handle SHLIB_PATH + shlibpath_overrides_runpath=yes # Unless +noenvvar is specified. + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + sys_lib_search_path_spec="/usr/lib/pa20_64 /usr/ccs/lib/pa20_64" + sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec + ;; + *) + shrext_cmds='.sl' + dynamic_linker="$host_os dld.sl" + shlibpath_var=SHLIB_PATH + shlibpath_overrides_runpath=no # +s is required to enable SHLIB_PATH + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + ;; + esac + # HP-UX runs *really* slowly unless shared libraries are mode 555. + postinstall_cmds='chmod 555 $lib' + ;; + +interix3*) + version_type=linux + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + dynamic_linker='Interix 3.x ld.so.1 (PE, like ELF)' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=no + hardcode_into_libs=yes + ;; + +irix5* | irix6* | nonstopux*) + case $host_os in + nonstopux*) version_type=nonstopux ;; + *) + if test "$lt_cv_prog_gnu_ld" = yes; then + version_type=linux + else + version_type=irix + fi ;; + esac + need_lib_prefix=no + need_version=no + soname_spec='${libname}${release}${shared_ext}$major' + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${release}${shared_ext} $libname${shared_ext}' + case $host_os in + irix5* | nonstopux*) + libsuff= shlibsuff= + ;; + *) + case $LD in # libtool.m4 will add one of these switches to LD + *-32|*"-32 "|*-melf32bsmip|*"-melf32bsmip ") + libsuff= shlibsuff= libmagic=32-bit;; + *-n32|*"-n32 "|*-melf32bmipn32|*"-melf32bmipn32 ") + libsuff=32 shlibsuff=N32 libmagic=N32;; + *-64|*"-64 "|*-melf64bmip|*"-melf64bmip ") + libsuff=64 shlibsuff=64 libmagic=64-bit;; + *) libsuff= shlibsuff= libmagic=never-match;; + esac + ;; + esac + shlibpath_var=LD_LIBRARY${shlibsuff}_PATH + shlibpath_overrides_runpath=no + sys_lib_search_path_spec="/usr/lib${libsuff} /lib${libsuff} /usr/local/lib${libsuff}" + sys_lib_dlsearch_path_spec="/usr/lib${libsuff} /lib${libsuff}" + hardcode_into_libs=yes + ;; + +# No shared lib support for Linux oldld, aout, or coff. +linux*oldld* | linux*aout* | linux*coff*) + dynamic_linker=no + ;; + +# This must be Linux ELF. +linux*) + version_type=linux + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + finish_cmds='PATH="\$PATH:/sbin" ldconfig -n $libdir' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=no + # This implies no fast_install, which is unacceptable. + # Some rework will be needed to allow for fast_install + # before this can be enabled. + hardcode_into_libs=yes + + # find out which ABI we are using + libsuff= + case "$host_cpu" in + x86_64*|s390x*|powerpc64*) + echo '#line 12150 "configure"' > conftest.$ac_ext + if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 + (eval $ac_compile) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; then + case `/usr/bin/file conftest.$ac_objext` in + *64-bit*) + libsuff=64 + sys_lib_search_path_spec="/lib${libsuff} /usr/lib${libsuff} /usr/local/lib${libsuff}" + ;; + esac + fi + rm -rf conftest* + ;; + esac + + # Append ld.so.conf contents to the search path + if test -f /etc/ld.so.conf; then + lt_ld_extra=`awk '/^include / { system(sprintf("cd /etc; cat %s 2>/dev/null", \$2)); skip = 1; } { if (!skip) print \$0; skip = 0; }' < /etc/ld.so.conf | $SED -e 's/#.*//;s/[:, ]/ /g;s/=[^=]*$//;s/=[^= ]* / /g;/^$/d' | tr '\n' ' '` + sys_lib_dlsearch_path_spec="/lib${libsuff} /usr/lib${libsuff} $lt_ld_extra" + fi + + # We used to test for /lib/ld.so.1 and disable shared libraries on + # powerpc, because MkLinux only supported shared libraries with the + # GNU dynamic linker. Since this was broken with cross compilers, + # most powerpc-linux boxes support dynamic linking these days and + # people can always --disable-shared, the test was removed, and we + # assume the GNU/Linux dynamic linker is in use. + dynamic_linker='GNU/Linux ld.so' + ;; + +knetbsd*-gnu) + version_type=linux + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=no + hardcode_into_libs=yes + dynamic_linker='GNU ld.so' + ;; + +netbsd*) + version_type=sunos + need_lib_prefix=no + need_version=no + if echo __ELF__ | $CC -E - | grep __ELF__ >/dev/null; then + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${shared_ext}$versuffix' + finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir' + dynamic_linker='NetBSD (a.out) ld.so' + else + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + dynamic_linker='NetBSD ld.elf_so' + fi + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + hardcode_into_libs=yes + ;; + +newsos6) + version_type=linux + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + ;; + +nto-qnx*) + version_type=linux + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + ;; + +openbsd*) + version_type=sunos + sys_lib_dlsearch_path_spec="/usr/lib" + need_lib_prefix=no + # Some older versions of OpenBSD (3.3 at least) *do* need versioned libs. + case $host_os in + openbsd3.3 | openbsd3.3.*) need_version=yes ;; + *) need_version=no ;; + esac + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${shared_ext}$versuffix' + finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir' + shlibpath_var=LD_LIBRARY_PATH + if test -z "`echo __ELF__ | $CC -E - | grep __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then + case $host_os in + openbsd2.[89] | openbsd2.[89].*) + shlibpath_overrides_runpath=no + ;; + *) + shlibpath_overrides_runpath=yes + ;; + esac + else + shlibpath_overrides_runpath=yes + fi + ;; + +os2*) + libname_spec='$name' + shrext_cmds=".dll" + need_lib_prefix=no + library_names_spec='$libname${shared_ext} $libname.a' + dynamic_linker='OS/2 ld.exe' + shlibpath_var=LIBPATH + ;; + +osf3* | osf4* | osf5*) + version_type=osf + need_lib_prefix=no + need_version=no + soname_spec='${libname}${release}${shared_ext}$major' + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + shlibpath_var=LD_LIBRARY_PATH + sys_lib_search_path_spec="/usr/shlib /usr/ccs/lib /usr/lib/cmplrs/cc /usr/lib /usr/local/lib /var/shlib" + sys_lib_dlsearch_path_spec="$sys_lib_search_path_spec" + ;; + +solaris*) + version_type=linux + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + hardcode_into_libs=yes + # ldd complains unless libraries are executable + postinstall_cmds='chmod +x $lib' + ;; + +sunos4*) + version_type=sunos + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${shared_ext}$versuffix' + finish_cmds='PATH="\$PATH:/usr/etc" ldconfig $libdir' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + if test "$with_gnu_ld" = yes; then + need_lib_prefix=no + fi + need_version=yes + ;; + +sysv4 | sysv4.3*) + version_type=linux + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + case $host_vendor in + sni) + shlibpath_overrides_runpath=no + need_lib_prefix=no + export_dynamic_flag_spec='${wl}-Blargedynsym' + runpath_var=LD_RUN_PATH + ;; + siemens) + need_lib_prefix=no + ;; + motorola) + need_lib_prefix=no + need_version=no + shlibpath_overrides_runpath=no + sys_lib_search_path_spec='/lib /usr/lib /usr/ccs/lib' + ;; + esac + ;; + +sysv4*MP*) + if test -d /usr/nec ;then + version_type=linux + library_names_spec='$libname${shared_ext}.$versuffix $libname${shared_ext}.$major $libname${shared_ext}' + soname_spec='$libname${shared_ext}.$major' + shlibpath_var=LD_LIBRARY_PATH + fi + ;; + +sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX* | sysv4*uw2*) + version_type=freebsd-elf + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext} $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + hardcode_into_libs=yes + if test "$with_gnu_ld" = yes; then + sys_lib_search_path_spec='/usr/local/lib /usr/gnu/lib /usr/ccs/lib /usr/lib /lib' + shlibpath_overrides_runpath=no + else + sys_lib_search_path_spec='/usr/ccs/lib /usr/lib' + shlibpath_overrides_runpath=yes + case $host_os in + sco3.2v5*) + sys_lib_search_path_spec="$sys_lib_search_path_spec /lib" + ;; + esac + fi + sys_lib_dlsearch_path_spec='/usr/lib' + ;; + +uts4*) + version_type=linux + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + ;; + +*) + dynamic_linker=no + ;; +esac +echo "$as_me:$LINENO: result: $dynamic_linker" >&5 +echo "${ECHO_T}$dynamic_linker" >&6 +test "$dynamic_linker" = no && can_build_shared=no + +variables_saved_for_relink="PATH $shlibpath_var $runpath_var" +if test "$GCC" = yes; then + variables_saved_for_relink="$variables_saved_for_relink GCC_EXEC_PREFIX COMPILER_PATH LIBRARY_PATH" +fi + +echo "$as_me:$LINENO: checking how to hardcode library paths into programs" >&5 +echo $ECHO_N "checking how to hardcode library paths into programs... $ECHO_C" >&6 +hardcode_action_CXX= +if test -n "$hardcode_libdir_flag_spec_CXX" || \ + test -n "$runpath_var_CXX" || \ + test "X$hardcode_automatic_CXX" = "Xyes" ; then + + # We can hardcode non-existant directories. + if test "$hardcode_direct_CXX" != no && + # If the only mechanism to avoid hardcoding is shlibpath_var, we + # have to relink, otherwise we might link with an installed library + # when we should be linking with a yet-to-be-installed one + ## test "$_LT_AC_TAGVAR(hardcode_shlibpath_var, CXX)" != no && + test "$hardcode_minus_L_CXX" != no; then + # Linking always hardcodes the temporary library directory. + hardcode_action_CXX=relink + else + # We can link without hardcoding, and we can hardcode nonexisting dirs. + hardcode_action_CXX=immediate + fi +else + # We cannot hardcode anything, or else we can only hardcode existing + # directories. + hardcode_action_CXX=unsupported +fi +echo "$as_me:$LINENO: result: $hardcode_action_CXX" >&5 +echo "${ECHO_T}$hardcode_action_CXX" >&6 + +if test "$hardcode_action_CXX" = relink; then + # Fast installation is not supported + enable_fast_install=no +elif test "$shlibpath_overrides_runpath" = yes || + test "$enable_shared" = no; then + # Fast installation is not necessary + enable_fast_install=needless +fi + + +# The else clause should only fire when bootstrapping the +# libtool distribution, otherwise you forgot to ship ltmain.sh +# with your package, and you will get complaints that there are +# no rules to generate ltmain.sh. +if test -f "$ltmain"; then + # See if we are running on zsh, and set the options which allow our commands through + # without removal of \ escapes. + if test -n "${ZSH_VERSION+set}" ; then + setopt NO_GLOB_SUBST + fi + # Now quote all the things that may contain metacharacters while being + # careful not to overquote the AC_SUBSTed values. We take copies of the + # variables and quote the copies for generation of the libtool script. + for var in echo old_CC old_CFLAGS AR AR_FLAGS EGREP RANLIB LN_S LTCC LTCFLAGS NM \ + SED SHELL STRIP \ + libname_spec library_names_spec soname_spec extract_expsyms_cmds \ + old_striplib striplib file_magic_cmd finish_cmds finish_eval \ + deplibs_check_method reload_flag reload_cmds need_locks \ + lt_cv_sys_global_symbol_pipe lt_cv_sys_global_symbol_to_cdecl \ + lt_cv_sys_global_symbol_to_c_name_address \ + sys_lib_search_path_spec sys_lib_dlsearch_path_spec \ + old_postinstall_cmds old_postuninstall_cmds \ + compiler_CXX \ + CC_CXX \ + LD_CXX \ + lt_prog_compiler_wl_CXX \ + lt_prog_compiler_pic_CXX \ + lt_prog_compiler_static_CXX \ + lt_prog_compiler_no_builtin_flag_CXX \ + export_dynamic_flag_spec_CXX \ + thread_safe_flag_spec_CXX \ + whole_archive_flag_spec_CXX \ + enable_shared_with_static_runtimes_CXX \ + old_archive_cmds_CXX \ + old_archive_from_new_cmds_CXX \ + predep_objects_CXX \ + postdep_objects_CXX \ + predeps_CXX \ + postdeps_CXX \ + compiler_lib_search_path_CXX \ + archive_cmds_CXX \ + archive_expsym_cmds_CXX \ + postinstall_cmds_CXX \ + postuninstall_cmds_CXX \ + old_archive_from_expsyms_cmds_CXX \ + allow_undefined_flag_CXX \ + no_undefined_flag_CXX \ + export_symbols_cmds_CXX \ + hardcode_libdir_flag_spec_CXX \ + hardcode_libdir_flag_spec_ld_CXX \ + hardcode_libdir_separator_CXX \ + hardcode_automatic_CXX \ + module_cmds_CXX \ + module_expsym_cmds_CXX \ + lt_cv_prog_compiler_c_o_CXX \ + exclude_expsyms_CXX \ + include_expsyms_CXX; do + + case $var in + old_archive_cmds_CXX | \ + old_archive_from_new_cmds_CXX | \ + archive_cmds_CXX | \ + archive_expsym_cmds_CXX | \ + module_cmds_CXX | \ + module_expsym_cmds_CXX | \ + old_archive_from_expsyms_cmds_CXX | \ + export_symbols_cmds_CXX | \ + extract_expsyms_cmds | reload_cmds | finish_cmds | \ + postinstall_cmds | postuninstall_cmds | \ + old_postinstall_cmds | old_postuninstall_cmds | \ + sys_lib_search_path_spec | sys_lib_dlsearch_path_spec) + # Double-quote double-evaled strings. + eval "lt_$var=\\\"\`\$echo \"X\$$var\" | \$Xsed -e \"\$double_quote_subst\" -e \"\$sed_quote_subst\" -e \"\$delay_variable_subst\"\`\\\"" + ;; + *) + eval "lt_$var=\\\"\`\$echo \"X\$$var\" | \$Xsed -e \"\$sed_quote_subst\"\`\\\"" + ;; + esac + done + + case $lt_echo in + *'\$0 --fallback-echo"') + lt_echo=`$echo "X$lt_echo" | $Xsed -e 's/\\\\\\\$0 --fallback-echo"$/$0 --fallback-echo"/'` + ;; + esac + +cfgfile="$ofile" + + cat <<__EOF__ >> "$cfgfile" +# ### BEGIN LIBTOOL TAG CONFIG: $tagname + +# Libtool was configured on host `(hostname || uname -n) 2>/dev/null | sed 1q`: + +# Shell to use when invoking shell scripts. +SHELL=$lt_SHELL + +# Whether or not to build shared libraries. +build_libtool_libs=$enable_shared + +# Whether or not to build static libraries. +build_old_libs=$enable_static + +# Whether or not to add -lc for building shared libraries. +build_libtool_need_lc=$archive_cmds_need_lc_CXX + +# Whether or not to disallow shared libs when runtime libs are static +allow_libtool_libs_with_static_runtimes=$enable_shared_with_static_runtimes_CXX + +# Whether or not to optimize for fast installation. +fast_install=$enable_fast_install + +# The host system. +host_alias=$host_alias +host=$host +host_os=$host_os + +# The build system. +build_alias=$build_alias +build=$build +build_os=$build_os + +# An echo program that does not interpret backslashes. +echo=$lt_echo + +# The archiver. +AR=$lt_AR +AR_FLAGS=$lt_AR_FLAGS + +# A C compiler. +LTCC=$lt_LTCC + +# LTCC compiler flags. +LTCFLAGS=$lt_LTCFLAGS + +# A language-specific compiler. +CC=$lt_compiler_CXX + +# Is the compiler the GNU C compiler? +with_gcc=$GCC_CXX + +gcc_dir=\`gcc -print-file-name=. | $SED 's,/\.$,,'\` +gcc_ver=\`gcc -dumpversion\` + +# An ERE matcher. +EGREP=$lt_EGREP + +# The linker used to build libraries. +LD=$lt_LD_CXX + +# Whether we need hard or soft links. +LN_S=$lt_LN_S + +# A BSD-compatible nm program. +NM=$lt_NM + +# A symbol stripping program +STRIP=$lt_STRIP + +# Used to examine libraries when file_magic_cmd begins "file" +MAGIC_CMD=$MAGIC_CMD + +# Used on cygwin: DLL creation program. +DLLTOOL="$DLLTOOL" + +# Used on cygwin: object dumper. +OBJDUMP="$OBJDUMP" + +# Used on cygwin: assembler. +AS="$AS" + +# The name of the directory that contains temporary libtool files. +objdir=$objdir + +# How to create reloadable object files. +reload_flag=$lt_reload_flag +reload_cmds=$lt_reload_cmds + +# How to pass a linker flag through the compiler. +wl=$lt_lt_prog_compiler_wl_CXX + +# Object file suffix (normally "o"). +objext="$ac_objext" + +# Old archive suffix (normally "a"). +libext="$libext" + +# Shared library suffix (normally ".so"). +shrext_cmds='$shrext_cmds' + +# Executable file suffix (normally ""). +exeext="$exeext" + +# Additional compiler flags for building library objects. +pic_flag=$lt_lt_prog_compiler_pic_CXX +pic_mode=$pic_mode + +# What is the maximum length of a command? +max_cmd_len=$lt_cv_sys_max_cmd_len + +# Does compiler simultaneously support -c and -o options? +compiler_c_o=$lt_lt_cv_prog_compiler_c_o_CXX + +# Must we lock files when doing compilation? +need_locks=$lt_need_locks + +# Do we need the lib prefix for modules? +need_lib_prefix=$need_lib_prefix + +# Do we need a version for libraries? +need_version=$need_version + +# Whether dlopen is supported. +dlopen_support=$enable_dlopen + +# Whether dlopen of programs is supported. +dlopen_self=$enable_dlopen_self + +# Whether dlopen of statically linked programs is supported. +dlopen_self_static=$enable_dlopen_self_static + +# Compiler flag to prevent dynamic linking. +link_static_flag=$lt_lt_prog_compiler_static_CXX + +# Compiler flag to turn off builtin functions. +no_builtin_flag=$lt_lt_prog_compiler_no_builtin_flag_CXX + +# Compiler flag to allow reflexive dlopens. +export_dynamic_flag_spec=$lt_export_dynamic_flag_spec_CXX + +# Compiler flag to generate shared objects directly from archives. +whole_archive_flag_spec=$lt_whole_archive_flag_spec_CXX + +# Compiler flag to generate thread-safe objects. +thread_safe_flag_spec=$lt_thread_safe_flag_spec_CXX + +# Library versioning type. +version_type=$version_type + +# Format of library name prefix. +libname_spec=$lt_libname_spec + +# List of archive names. First name is the real one, the rest are links. +# The last name is the one that the linker finds with -lNAME. +library_names_spec=$lt_library_names_spec + +# The coded name of the library, if different from the real name. +soname_spec=$lt_soname_spec + +# Commands used to build and install an old-style archive. +RANLIB=$lt_RANLIB +old_archive_cmds=$lt_old_archive_cmds_CXX +old_postinstall_cmds=$lt_old_postinstall_cmds +old_postuninstall_cmds=$lt_old_postuninstall_cmds + +# Create an old-style archive from a shared archive. +old_archive_from_new_cmds=$lt_old_archive_from_new_cmds_CXX + +# Create a temporary old-style archive to link instead of a shared archive. +old_archive_from_expsyms_cmds=$lt_old_archive_from_expsyms_cmds_CXX + +# Commands used to build and install a shared archive. +archive_cmds=$lt_archive_cmds_CXX +archive_expsym_cmds=$lt_archive_expsym_cmds_CXX +postinstall_cmds=$lt_postinstall_cmds +postuninstall_cmds=$lt_postuninstall_cmds + +# Commands used to build a loadable module (assumed same as above if empty) +module_cmds=$lt_module_cmds_CXX +module_expsym_cmds=$lt_module_expsym_cmds_CXX + +# Commands to strip libraries. +old_striplib=$lt_old_striplib +striplib=$lt_striplib + +# Dependencies to place before the objects being linked to create a +# shared library. +predep_objects=\`echo $lt_predep_objects_CXX | \$SED -e "s@\${gcc_dir}@\\\${gcc_dir}@g;s@\${gcc_ver}@\\\${gcc_ver}@g"\` + +# Dependencies to place after the objects being linked to create a +# shared library. +postdep_objects=\`echo $lt_postdep_objects_CXX | \$SED -e "s@\${gcc_dir}@\\\${gcc_dir}@g;s@\${gcc_ver}@\\\${gcc_ver}@g"\` + +# Dependencies to place before the objects being linked to create a +# shared library. +predeps=$lt_predeps_CXX + +# Dependencies to place after the objects being linked to create a +# shared library. +postdeps=$lt_postdeps_CXX + +# The library search path used internally by the compiler when linking +# a shared library. +compiler_lib_search_path=\`echo $lt_compiler_lib_search_path_CXX | \$SED -e "s@\${gcc_dir}@\\\${gcc_dir}@g;s@\${gcc_ver}@\\\${gcc_ver}@g"\` + +# Method to check whether dependent libraries are shared objects. +deplibs_check_method=$lt_deplibs_check_method + +# Command to use when deplibs_check_method == file_magic. +file_magic_cmd=$lt_file_magic_cmd + +# Flag that allows shared libraries with undefined symbols to be built. +allow_undefined_flag=$lt_allow_undefined_flag_CXX + +# Flag that forces no undefined symbols. +no_undefined_flag=$lt_no_undefined_flag_CXX + +# Commands used to finish a libtool library installation in a directory. +finish_cmds=$lt_finish_cmds + +# Same as above, but a single script fragment to be evaled but not shown. +finish_eval=$lt_finish_eval + +# Take the output of nm and produce a listing of raw symbols and C names. +global_symbol_pipe=$lt_lt_cv_sys_global_symbol_pipe + +# Transform the output of nm in a proper C declaration +global_symbol_to_cdecl=$lt_lt_cv_sys_global_symbol_to_cdecl + +# Transform the output of nm in a C name address pair +global_symbol_to_c_name_address=$lt_lt_cv_sys_global_symbol_to_c_name_address + +# This is the shared library runtime path variable. +runpath_var=$runpath_var + +# This is the shared library path variable. +shlibpath_var=$shlibpath_var + +# Is shlibpath searched before the hard-coded library search path? +shlibpath_overrides_runpath=$shlibpath_overrides_runpath + +# How to hardcode a shared library path into an executable. +hardcode_action=$hardcode_action_CXX + +# Whether we should hardcode library paths into libraries. +hardcode_into_libs=$hardcode_into_libs + +# Flag to hardcode \$libdir into a binary during linking. +# This must work even if \$libdir does not exist. +hardcode_libdir_flag_spec=$lt_hardcode_libdir_flag_spec_CXX + +# If ld is used when linking, flag to hardcode \$libdir into +# a binary during linking. This must work even if \$libdir does +# not exist. +hardcode_libdir_flag_spec_ld=$lt_hardcode_libdir_flag_spec_ld_CXX + +# Whether we need a single -rpath flag with a separated argument. +hardcode_libdir_separator=$lt_hardcode_libdir_separator_CXX + +# Set to yes if using DIR/libNAME${shared_ext} during linking hardcodes DIR into the +# resulting binary. +hardcode_direct=$hardcode_direct_CXX + +# Set to yes if using the -LDIR flag during linking hardcodes DIR into the +# resulting binary. +hardcode_minus_L=$hardcode_minus_L_CXX + +# Set to yes if using SHLIBPATH_VAR=DIR during linking hardcodes DIR into +# the resulting binary. +hardcode_shlibpath_var=$hardcode_shlibpath_var_CXX + +# Set to yes if building a shared library automatically hardcodes DIR into the library +# and all subsequent libraries and executables linked against it. +hardcode_automatic=$hardcode_automatic_CXX + +# Variables whose values should be saved in libtool wrapper scripts and +# restored at relink time. +variables_saved_for_relink="$variables_saved_for_relink" + +# Whether libtool must link a program against all its dependency libraries. +link_all_deplibs=$link_all_deplibs_CXX + +# Compile-time system search path for libraries +sys_lib_search_path_spec=\`echo $lt_sys_lib_search_path_spec | \$SED -e "s@\${gcc_dir}@\\\${gcc_dir}@g;s@\${gcc_ver}@\\\${gcc_ver}@g"\` + +# Run-time system search path for libraries +sys_lib_dlsearch_path_spec=$lt_sys_lib_dlsearch_path_spec + +# Fix the shell variable \$srcfile for the compiler. +fix_srcfile_path="$fix_srcfile_path_CXX" + +# Set to yes if exported symbols are required. +always_export_symbols=$always_export_symbols_CXX + +# The commands to list exported symbols. +export_symbols_cmds=$lt_export_symbols_cmds_CXX + +# The commands to extract the exported symbol list from a shared archive. +extract_expsyms_cmds=$lt_extract_expsyms_cmds + +# Symbols that should not be listed in the preloaded symbols. +exclude_expsyms=$lt_exclude_expsyms_CXX + +# Symbols that must always be exported. +include_expsyms=$lt_include_expsyms_CXX + +# ### END LIBTOOL TAG CONFIG: $tagname + +__EOF__ + + +else + # If there is no Makefile yet, we rely on a make rule to execute + # `config.status --recheck' to rerun these tests and create the + # libtool script then. + ltmain_in=`echo $ltmain | sed -e 's/\.sh$/.in/'` + if test -f "$ltmain_in"; then + test -f Makefile && make "$ltmain" + fi +fi + + +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu + +CC=$lt_save_CC +LDCXX=$LD +LD=$lt_save_LD +GCC=$lt_save_GCC +with_gnu_ldcxx=$with_gnu_ld +with_gnu_ld=$lt_save_with_gnu_ld +lt_cv_path_LDCXX=$lt_cv_path_LD +lt_cv_path_LD=$lt_save_path_LD +lt_cv_prog_gnu_ldcxx=$lt_cv_prog_gnu_ld +lt_cv_prog_gnu_ld=$lt_save_with_gnu_ld + + else + tagname="" + fi + ;; + + F77) + if test -n "$F77" && test "X$F77" != "Xno"; then + +ac_ext=f +ac_compile='$F77 -c $FFLAGS conftest.$ac_ext >&5' +ac_link='$F77 -o conftest$ac_exeext $FFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_f77_compiler_gnu + + +archive_cmds_need_lc_F77=no +allow_undefined_flag_F77= +always_export_symbols_F77=no +archive_expsym_cmds_F77= +export_dynamic_flag_spec_F77= +hardcode_direct_F77=no +hardcode_libdir_flag_spec_F77= +hardcode_libdir_flag_spec_ld_F77= +hardcode_libdir_separator_F77= +hardcode_minus_L_F77=no +hardcode_automatic_F77=no +module_cmds_F77= +module_expsym_cmds_F77= +link_all_deplibs_F77=unknown +old_archive_cmds_F77=$old_archive_cmds +no_undefined_flag_F77= +whole_archive_flag_spec_F77= +enable_shared_with_static_runtimes_F77=no + +# Source file extension for f77 test sources. +ac_ext=f + +# Object file extension for compiled f77 test sources. +objext=o +objext_F77=$objext + +# Code to be used in simple compile tests +lt_simple_compile_test_code=" subroutine t\n return\n end\n" + +# Code to be used in simple link tests +lt_simple_link_test_code=" program t\n end\n" + +# ltmain only uses $CC for tagged configurations so make sure $CC is set. + +# If no C compiler was specified, use CC. +LTCC=${LTCC-"$CC"} + +# If no C compiler flags were specified, use CFLAGS. +LTCFLAGS=${LTCFLAGS-"$CFLAGS"} + +# Allow CC to be a program name with arguments. +compiler=$CC + + +# save warnings/boilerplate of simple test code +ac_outfile=conftest.$ac_objext +printf "$lt_simple_compile_test_code" >conftest.$ac_ext +eval "$ac_compile" 2>&1 >/dev/null | $SED '/^$/d; /^ *+/d' >conftest.err +_lt_compiler_boilerplate=`cat conftest.err` +$rm conftest* + +ac_outfile=conftest.$ac_objext +printf "$lt_simple_link_test_code" >conftest.$ac_ext +eval "$ac_link" 2>&1 >/dev/null | $SED '/^$/d; /^ *+/d' >conftest.err +_lt_linker_boilerplate=`cat conftest.err` +$rm conftest* + + +# Allow CC to be a program name with arguments. +lt_save_CC="$CC" +CC=${F77-"f77"} +compiler=$CC +compiler_F77=$CC +for cc_temp in $compiler""; do + case $cc_temp in + compile | *[\\/]compile | ccache | *[\\/]ccache ) ;; + distcc | *[\\/]distcc | purify | *[\\/]purify ) ;; + \-*) ;; + *) break;; + esac +done +cc_basename=`$echo "X$cc_temp" | $Xsed -e 's%.*/%%' -e "s%^$host_alias-%%"` + + +echo "$as_me:$LINENO: checking if libtool supports shared libraries" >&5 +echo $ECHO_N "checking if libtool supports shared libraries... $ECHO_C" >&6 +echo "$as_me:$LINENO: result: $can_build_shared" >&5 +echo "${ECHO_T}$can_build_shared" >&6 + +echo "$as_me:$LINENO: checking whether to build shared libraries" >&5 +echo $ECHO_N "checking whether to build shared libraries... $ECHO_C" >&6 +test "$can_build_shared" = "no" && enable_shared=no + +# On AIX, shared libraries and static libraries use the same namespace, and +# are all built from PIC. +case $host_os in +aix3*) + test "$enable_shared" = yes && enable_static=no + if test -n "$RANLIB"; then + archive_cmds="$archive_cmds~\$RANLIB \$lib" + postinstall_cmds='$RANLIB $lib' + fi + ;; +aix4* | aix5*) + if test "$host_cpu" != ia64 && test "$aix_use_runtimelinking" = no ; then + test "$enable_shared" = yes && enable_static=no + fi + ;; +esac +echo "$as_me:$LINENO: result: $enable_shared" >&5 +echo "${ECHO_T}$enable_shared" >&6 + +echo "$as_me:$LINENO: checking whether to build static libraries" >&5 +echo $ECHO_N "checking whether to build static libraries... $ECHO_C" >&6 +# Make sure either enable_shared or enable_static is yes. +test "$enable_shared" = yes || enable_static=yes +echo "$as_me:$LINENO: result: $enable_static" >&5 +echo "${ECHO_T}$enable_static" >&6 + +GCC_F77="$G77" +LD_F77="$LD" + +lt_prog_compiler_wl_F77= +lt_prog_compiler_pic_F77= +lt_prog_compiler_static_F77= + +echo "$as_me:$LINENO: checking for $compiler option to produce PIC" >&5 +echo $ECHO_N "checking for $compiler option to produce PIC... $ECHO_C" >&6 + + if test "$GCC" = yes; then + lt_prog_compiler_wl_F77='-Wl,' + lt_prog_compiler_static_F77='-static' + + case $host_os in + aix*) + # All AIX code is PIC. + if test "$host_cpu" = ia64; then + # AIX 5 now supports IA64 processor + lt_prog_compiler_static_F77='-Bstatic' + fi + ;; + + amigaos*) + # FIXME: we need at least 68020 code to build shared libraries, but + # adding the `-m68020' flag to GCC prevents building anything better, + # like `-m68040'. + lt_prog_compiler_pic_F77='-m68020 -resident32 -malways-restore-a4' + ;; + + beos* | cygwin* | irix5* | irix6* | nonstopux* | osf3* | osf4* | osf5*) + # PIC is the default for these OSes. + ;; + + mingw* | pw32* | os2*) + # This hack is so that the source file can tell whether it is being + # built for inclusion in a dll (and should export symbols for example). + lt_prog_compiler_pic_F77='-DDLL_EXPORT' + ;; + + darwin* | rhapsody*) + # PIC is the default on this platform + # Common symbols not allowed in MH_DYLIB files + lt_prog_compiler_pic_F77='-fno-common' + ;; + + interix3*) + # Interix 3.x gcc -fpic/-fPIC options generate broken code. + # Instead, we relocate shared libraries at runtime. + ;; + + msdosdjgpp*) + # Just because we use GCC doesn't mean we suddenly get shared libraries + # on systems that don't support them. + lt_prog_compiler_can_build_shared_F77=no + enable_shared=no + ;; + + sysv4*MP*) + if test -d /usr/nec; then + lt_prog_compiler_pic_F77=-Kconform_pic + fi + ;; + + hpux*) + # PIC is the default for IA64 HP-UX and 64-bit HP-UX, but + # not for PA HP-UX. + case $host_cpu in + hppa*64*|ia64*) + # +Z the default + ;; + *) + lt_prog_compiler_pic_F77='-fPIC' + ;; + esac + ;; + + *) + lt_prog_compiler_pic_F77='-fPIC' + ;; + esac + else + # PORTME Check for flag to pass linker flags through the system compiler. + case $host_os in + aix*) + lt_prog_compiler_wl_F77='-Wl,' + if test "$host_cpu" = ia64; then + # AIX 5 now supports IA64 processor + lt_prog_compiler_static_F77='-Bstatic' + else + lt_prog_compiler_static_F77='-bnso -bI:/lib/syscalls.exp' + fi + ;; + darwin*) + # PIC is the default on this platform + # Common symbols not allowed in MH_DYLIB files + case $cc_basename in + xlc*) + lt_prog_compiler_pic_F77='-qnocommon' + lt_prog_compiler_wl_F77='-Wl,' + ;; + esac + ;; + + mingw* | pw32* | os2*) + # This hack is so that the source file can tell whether it is being + # built for inclusion in a dll (and should export symbols for example). + lt_prog_compiler_pic_F77='-DDLL_EXPORT' + ;; + + hpux9* | hpux10* | hpux11*) + lt_prog_compiler_wl_F77='-Wl,' + # PIC is the default for IA64 HP-UX and 64-bit HP-UX, but + # not for PA HP-UX. + case $host_cpu in + hppa*64*|ia64*) + # +Z the default + ;; + *) + lt_prog_compiler_pic_F77='+Z' + ;; + esac + # Is there a better lt_prog_compiler_static that works with the bundled CC? + lt_prog_compiler_static_F77='${wl}-a ${wl}archive' + ;; + + irix5* | irix6* | nonstopux*) + lt_prog_compiler_wl_F77='-Wl,' + # PIC (with -KPIC) is the default. + lt_prog_compiler_static_F77='-non_shared' + ;; + + newsos6) + lt_prog_compiler_pic_F77='-KPIC' + lt_prog_compiler_static_F77='-Bstatic' + ;; + + linux*) + case $cc_basename in + icc* | ecc*) + lt_prog_compiler_wl_F77='-Wl,' + lt_prog_compiler_pic_F77='-KPIC' + lt_prog_compiler_static_F77='-static' + ;; + pgcc* | pgf77* | pgf90* | pgf95*) + # Portland Group compilers (*not* the Pentium gcc compiler, + # which looks to be a dead project) + lt_prog_compiler_wl_F77='-Wl,' + lt_prog_compiler_pic_F77='-fpic' + lt_prog_compiler_static_F77='-Bstatic' + ;; + ccc*) + lt_prog_compiler_wl_F77='-Wl,' + # All Alpha code is PIC. + lt_prog_compiler_static_F77='-non_shared' + ;; + esac + ;; + + osf3* | osf4* | osf5*) + lt_prog_compiler_wl_F77='-Wl,' + # All OSF/1 code is PIC. + lt_prog_compiler_static_F77='-non_shared' + ;; + + solaris*) + lt_prog_compiler_pic_F77='-KPIC' + lt_prog_compiler_static_F77='-Bstatic' + case $cc_basename in + f77* | f90* | f95*) + lt_prog_compiler_wl_F77='-Qoption ld ';; + *) + lt_prog_compiler_wl_F77='-Wl,';; + esac + ;; + + sunos4*) + lt_prog_compiler_wl_F77='-Qoption ld ' + lt_prog_compiler_pic_F77='-PIC' + lt_prog_compiler_static_F77='-Bstatic' + ;; + + sysv4 | sysv4.2uw2* | sysv4.3*) + lt_prog_compiler_wl_F77='-Wl,' + lt_prog_compiler_pic_F77='-KPIC' + lt_prog_compiler_static_F77='-Bstatic' + ;; + + sysv4*MP*) + if test -d /usr/nec ;then + lt_prog_compiler_pic_F77='-Kconform_pic' + lt_prog_compiler_static_F77='-Bstatic' + fi + ;; + + sysv5* | unixware* | sco3.2v5* | sco5v6* | OpenUNIX*) + lt_prog_compiler_wl_F77='-Wl,' + lt_prog_compiler_pic_F77='-KPIC' + lt_prog_compiler_static_F77='-Bstatic' + ;; + + unicos*) + lt_prog_compiler_wl_F77='-Wl,' + lt_prog_compiler_can_build_shared_F77=no + ;; + + uts4*) + lt_prog_compiler_pic_F77='-pic' + lt_prog_compiler_static_F77='-Bstatic' + ;; + + *) + lt_prog_compiler_can_build_shared_F77=no + ;; + esac + fi + +echo "$as_me:$LINENO: result: $lt_prog_compiler_pic_F77" >&5 +echo "${ECHO_T}$lt_prog_compiler_pic_F77" >&6 + +# +# Check to make sure the PIC flag actually works. +# +if test -n "$lt_prog_compiler_pic_F77"; then + +echo "$as_me:$LINENO: checking if $compiler PIC flag $lt_prog_compiler_pic_F77 works" >&5 +echo $ECHO_N "checking if $compiler PIC flag $lt_prog_compiler_pic_F77 works... $ECHO_C" >&6 +if test "${lt_prog_compiler_pic_works_F77+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + lt_prog_compiler_pic_works_F77=no + ac_outfile=conftest.$ac_objext + printf "$lt_simple_compile_test_code" > conftest.$ac_ext + lt_compiler_flag="$lt_prog_compiler_pic_F77" + # Insert the option either (1) after the last *FLAGS variable, or + # (2) before a word containing "conftest.", or (3) at the end. + # Note that $ac_compile itself does not contain backslashes and begins + # with a dollar sign (not a hyphen), so the echo should work correctly. + # The option is referenced via a variable to avoid confusing sed. + lt_compile=`echo "$ac_compile" | $SED \ + -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ + -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ + -e 's:$: $lt_compiler_flag:'` + (eval echo "\"\$as_me:13208: $lt_compile\"" >&5) + (eval "$lt_compile" 2>conftest.err) + ac_status=$? + cat conftest.err >&5 + echo "$as_me:13212: \$? = $ac_status" >&5 + if (exit $ac_status) && test -s "$ac_outfile"; then + # The compiler can only warn and ignore the option if not recognized + # So say no if there are warnings other than the usual output. + $echo "X$_lt_compiler_boilerplate" | $Xsed -e '/^$/d' >conftest.exp + $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2 + if test ! -s conftest.er2 || diff conftest.exp conftest.er2 >/dev/null; then + lt_prog_compiler_pic_works_F77=yes + fi + fi + $rm conftest* + +fi +echo "$as_me:$LINENO: result: $lt_prog_compiler_pic_works_F77" >&5 +echo "${ECHO_T}$lt_prog_compiler_pic_works_F77" >&6 + +if test x"$lt_prog_compiler_pic_works_F77" = xyes; then + case $lt_prog_compiler_pic_F77 in + "" | " "*) ;; + *) lt_prog_compiler_pic_F77=" $lt_prog_compiler_pic_F77" ;; + esac +else + lt_prog_compiler_pic_F77= + lt_prog_compiler_can_build_shared_F77=no +fi + +fi +case $host_os in + # For platforms which do not support PIC, -DPIC is meaningless: + *djgpp*) + lt_prog_compiler_pic_F77= + ;; + *) + lt_prog_compiler_pic_F77="$lt_prog_compiler_pic_F77" + ;; +esac + +# +# Check to make sure the static flag actually works. +# +wl=$lt_prog_compiler_wl_F77 eval lt_tmp_static_flag=\"$lt_prog_compiler_static_F77\" +echo "$as_me:$LINENO: checking if $compiler static flag $lt_tmp_static_flag works" >&5 +echo $ECHO_N "checking if $compiler static flag $lt_tmp_static_flag works... $ECHO_C" >&6 +if test "${lt_prog_compiler_static_works_F77+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + lt_prog_compiler_static_works_F77=no + save_LDFLAGS="$LDFLAGS" + LDFLAGS="$LDFLAGS $lt_tmp_static_flag" + printf "$lt_simple_link_test_code" > conftest.$ac_ext + if (eval $ac_link 2>conftest.err) && test -s conftest$ac_exeext; then + # The linker can only warn and ignore the option if not recognized + # So say no if there are warnings + if test -s conftest.err; then + # Append any errors to the config.log. + cat conftest.err 1>&5 + $echo "X$_lt_linker_boilerplate" | $Xsed -e '/^$/d' > conftest.exp + $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2 + if diff conftest.exp conftest.er2 >/dev/null; then + lt_prog_compiler_static_works_F77=yes + fi + else + lt_prog_compiler_static_works_F77=yes + fi + fi + $rm conftest* + LDFLAGS="$save_LDFLAGS" + +fi +echo "$as_me:$LINENO: result: $lt_prog_compiler_static_works_F77" >&5 +echo "${ECHO_T}$lt_prog_compiler_static_works_F77" >&6 + +if test x"$lt_prog_compiler_static_works_F77" = xyes; then + : +else + lt_prog_compiler_static_F77= +fi + + +echo "$as_me:$LINENO: checking if $compiler supports -c -o file.$ac_objext" >&5 +echo $ECHO_N "checking if $compiler supports -c -o file.$ac_objext... $ECHO_C" >&6 +if test "${lt_cv_prog_compiler_c_o_F77+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + lt_cv_prog_compiler_c_o_F77=no + $rm -r conftest 2>/dev/null + mkdir conftest + cd conftest + mkdir out + printf "$lt_simple_compile_test_code" > conftest.$ac_ext + + lt_compiler_flag="-o out/conftest2.$ac_objext" + # Insert the option either (1) after the last *FLAGS variable, or + # (2) before a word containing "conftest.", or (3) at the end. + # Note that $ac_compile itself does not contain backslashes and begins + # with a dollar sign (not a hyphen), so the echo should work correctly. + lt_compile=`echo "$ac_compile" | $SED \ + -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ + -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ + -e 's:$: $lt_compiler_flag:'` + (eval echo "\"\$as_me:13312: $lt_compile\"" >&5) + (eval "$lt_compile" 2>out/conftest.err) + ac_status=$? + cat out/conftest.err >&5 + echo "$as_me:13316: \$? = $ac_status" >&5 + if (exit $ac_status) && test -s out/conftest2.$ac_objext + then + # The compiler can only warn and ignore the option if not recognized + # So say no if there are warnings + $echo "X$_lt_compiler_boilerplate" | $Xsed -e '/^$/d' > out/conftest.exp + $SED '/^$/d; /^ *+/d' out/conftest.err >out/conftest.er2 + if test ! -s out/conftest.er2 || diff out/conftest.exp out/conftest.er2 >/dev/null; then + lt_cv_prog_compiler_c_o_F77=yes + fi + fi + chmod u+w . 2>&5 + $rm conftest* + # SGI C++ compiler will create directory out/ii_files/ for + # template instantiation + test -d out/ii_files && $rm out/ii_files/* && rmdir out/ii_files + $rm out/* && rmdir out + cd .. + rmdir conftest + $rm conftest* + +fi +echo "$as_me:$LINENO: result: $lt_cv_prog_compiler_c_o_F77" >&5 +echo "${ECHO_T}$lt_cv_prog_compiler_c_o_F77" >&6 + + +hard_links="nottested" +if test "$lt_cv_prog_compiler_c_o_F77" = no && test "$need_locks" != no; then + # do not overwrite the value of need_locks provided by the user + echo "$as_me:$LINENO: checking if we can lock with hard links" >&5 +echo $ECHO_N "checking if we can lock with hard links... $ECHO_C" >&6 + hard_links=yes + $rm conftest* + ln conftest.a conftest.b 2>/dev/null && hard_links=no + touch conftest.a + ln conftest.a conftest.b 2>&5 || hard_links=no + ln conftest.a conftest.b 2>/dev/null && hard_links=no + echo "$as_me:$LINENO: result: $hard_links" >&5 +echo "${ECHO_T}$hard_links" >&6 + if test "$hard_links" = no; then + { echo "$as_me:$LINENO: WARNING: \`$CC' does not support \`-c -o', so \`make -j' may be unsafe" >&5 +echo "$as_me: WARNING: \`$CC' does not support \`-c -o', so \`make -j' may be unsafe" >&2;} + need_locks=warn + fi +else + need_locks=no +fi + +echo "$as_me:$LINENO: checking whether the $compiler linker ($LD) supports shared libraries" >&5 +echo $ECHO_N "checking whether the $compiler linker ($LD) supports shared libraries... $ECHO_C" >&6 + + runpath_var= + allow_undefined_flag_F77= + enable_shared_with_static_runtimes_F77=no + archive_cmds_F77= + archive_expsym_cmds_F77= + old_archive_From_new_cmds_F77= + old_archive_from_expsyms_cmds_F77= + export_dynamic_flag_spec_F77= + whole_archive_flag_spec_F77= + thread_safe_flag_spec_F77= + hardcode_libdir_flag_spec_F77= + hardcode_libdir_flag_spec_ld_F77= + hardcode_libdir_separator_F77= + hardcode_direct_F77=no + hardcode_minus_L_F77=no + hardcode_shlibpath_var_F77=unsupported + link_all_deplibs_F77=unknown + hardcode_automatic_F77=no + module_cmds_F77= + module_expsym_cmds_F77= + always_export_symbols_F77=no + export_symbols_cmds_F77='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols' + # include_expsyms should be a list of space-separated symbols to be *always* + # included in the symbol list + include_expsyms_F77= + # exclude_expsyms can be an extended regexp of symbols to exclude + # it will be wrapped by ` (' and `)$', so one must not match beginning or + # end of line. Example: `a|bc|.*d.*' will exclude the symbols `a' and `bc', + # as well as any symbol that contains `d'. + exclude_expsyms_F77="_GLOBAL_OFFSET_TABLE_" + # Although _GLOBAL_OFFSET_TABLE_ is a valid symbol C name, most a.out + # platforms (ab)use it in PIC code, but their linkers get confused if + # the symbol is explicitly referenced. Since portable code cannot + # rely on this symbol name, it's probably fine to never include it in + # preloaded symbol tables. + extract_expsyms_cmds= + # Just being paranoid about ensuring that cc_basename is set. + for cc_temp in $compiler""; do + case $cc_temp in + compile | *[\\/]compile | ccache | *[\\/]ccache ) ;; + distcc | *[\\/]distcc | purify | *[\\/]purify ) ;; + \-*) ;; + *) break;; + esac +done +cc_basename=`$echo "X$cc_temp" | $Xsed -e 's%.*/%%' -e "s%^$host_alias-%%"` + + case $host_os in + cygwin* | mingw* | pw32*) + # FIXME: the MSVC++ port hasn't been tested in a loooong time + # When not using gcc, we currently assume that we are using + # Microsoft Visual C++. + if test "$GCC" != yes; then + with_gnu_ld=no + fi + ;; + interix*) + # we just hope/assume this is gcc and not c89 (= MSVC++) + with_gnu_ld=yes + ;; + openbsd*) + with_gnu_ld=no + ;; + esac + + ld_shlibs_F77=yes + if test "$with_gnu_ld" = yes; then + # If archive_cmds runs LD, not CC, wlarc should be empty + wlarc='${wl}' + + # Set some defaults for GNU ld with shared library support. These + # are reset later if shared libraries are not supported. Putting them + # here allows them to be overridden if necessary. + runpath_var=LD_RUN_PATH + hardcode_libdir_flag_spec_F77='${wl}--rpath ${wl}$libdir' + export_dynamic_flag_spec_F77='${wl}--export-dynamic' + # ancient GNU ld didn't support --whole-archive et. al. + if $LD --help 2>&1 | grep 'no-whole-archive' > /dev/null; then + whole_archive_flag_spec_F77="$wlarc"'--whole-archive$convenience '"$wlarc"'--no-whole-archive' + else + whole_archive_flag_spec_F77= + fi + supports_anon_versioning=no + case `$LD -v 2>/dev/null` in + *\ [01].* | *\ 2.[0-9].* | *\ 2.10.*) ;; # catch versions < 2.11 + *\ 2.11.93.0.2\ *) supports_anon_versioning=yes ;; # RH7.3 ... + *\ 2.11.92.0.12\ *) supports_anon_versioning=yes ;; # Mandrake 8.2 ... + *\ 2.11.*) ;; # other 2.11 versions + *) supports_anon_versioning=yes ;; + esac + + # See if GNU ld supports shared libraries. + case $host_os in + aix3* | aix4* | aix5*) + # On AIX/PPC, the GNU linker is very broken + if test "$host_cpu" != ia64; then + ld_shlibs_F77=no + cat <&2 + +*** Warning: the GNU linker, at least up to release 2.9.1, is reported +*** to be unable to reliably create shared libraries on AIX. +*** Therefore, libtool is disabling shared libraries support. If you +*** really care for shared libraries, you may want to modify your PATH +*** so that a non-GNU linker is found, and then restart. + +EOF + fi + ;; + + amigaos*) + archive_cmds_F77='$rm $output_objdir/a2ixlibrary.data~$echo "#define NAME $libname" > $output_objdir/a2ixlibrary.data~$echo "#define LIBRARY_ID 1" >> $output_objdir/a2ixlibrary.data~$echo "#define VERSION $major" >> $output_objdir/a2ixlibrary.data~$echo "#define REVISION $revision" >> $output_objdir/a2ixlibrary.data~$AR $AR_FLAGS $lib $libobjs~$RANLIB $lib~(cd $output_objdir && a2ixlibrary -32)' + hardcode_libdir_flag_spec_F77='-L$libdir' + hardcode_minus_L_F77=yes + + # Samuel A. Falvo II reports + # that the semantics of dynamic libraries on AmigaOS, at least up + # to version 4, is to share data among multiple programs linked + # with the same dynamic library. Since this doesn't match the + # behavior of shared libraries on other platforms, we can't use + # them. + ld_shlibs_F77=no + ;; + + beos*) + if $LD --help 2>&1 | grep ': supported targets:.* elf' > /dev/null; then + allow_undefined_flag_F77=unsupported + # Joseph Beckenbach says some releases of gcc + # support --undefined. This deserves some investigation. FIXME + archive_cmds_F77='$CC -nostart $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + else + ld_shlibs_F77=no + fi + ;; + + cygwin* | mingw* | pw32*) + # _LT_AC_TAGVAR(hardcode_libdir_flag_spec, F77) is actually meaningless, + # as there is no search path for DLLs. + hardcode_libdir_flag_spec_F77='-L$libdir' + allow_undefined_flag_F77=unsupported + always_export_symbols_F77=no + enable_shared_with_static_runtimes_F77=yes + export_symbols_cmds_F77='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[BCDGRS] /s/.* \([^ ]*\)/\1 DATA/'\'' | $SED -e '\''/^[AITW] /s/.* //'\'' | sort | uniq > $export_symbols' + + if $LD --help 2>&1 | grep 'auto-import' > /dev/null; then + archive_cmds_F77='$CC -shared $libobjs $deplibs $compiler_flags -o $output_objdir/$soname ${wl}--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' + # If the export-symbols file already is a .def file (1st line + # is EXPORTS), use it as is; otherwise, prepend... + archive_expsym_cmds_F77='if test "x`$SED 1q $export_symbols`" = xEXPORTS; then + cp $export_symbols $output_objdir/$soname.def; + else + echo EXPORTS > $output_objdir/$soname.def; + cat $export_symbols >> $output_objdir/$soname.def; + fi~ + $CC -shared $output_objdir/$soname.def $libobjs $deplibs $compiler_flags -o $output_objdir/$soname ${wl}--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' + else + ld_shlibs_F77=no + fi + ;; + + interix3*) + hardcode_direct_F77=no + hardcode_shlibpath_var_F77=no + hardcode_libdir_flag_spec_F77='${wl}-rpath,$libdir' + export_dynamic_flag_spec_F77='${wl}-E' + # Hack: On Interix 3.x, we cannot compile PIC because of a broken gcc. + # Instead, shared libraries are loaded at an image base (0x10000000 by + # default) and relocated if they conflict, which is a slow very memory + # consuming and fragmenting process. To avoid this, we pick a random, + # 256 KiB-aligned image base between 0x50000000 and 0x6FFC0000 at link + # time. Moving up from 0x10000000 also allows more sbrk(2) space. + archive_cmds_F77='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-h,$soname ${wl}--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib' + archive_expsym_cmds_F77='sed "s,^,_," $export_symbols >$output_objdir/$soname.expsym~$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-h,$soname ${wl}--retain-symbols-file,$output_objdir/$soname.expsym ${wl}--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib' + ;; + + linux*) + if $LD --help 2>&1 | grep ': supported targets:.* elf' > /dev/null; then + tmp_addflag= + case $cc_basename,$host_cpu in + pgcc*) # Portland Group C compiler + whole_archive_flag_spec_F77='${wl}--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; $echo \"$new_convenience\"` ${wl}--no-whole-archive' + tmp_addflag=' $pic_flag' + ;; + pgf77* | pgf90* | pgf95*) # Portland Group f77 and f90 compilers + whole_archive_flag_spec_F77='${wl}--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; $echo \"$new_convenience\"` ${wl}--no-whole-archive' + tmp_addflag=' $pic_flag -Mnomain' ;; + ecc*,ia64* | icc*,ia64*) # Intel C compiler on ia64 + tmp_addflag=' -i_dynamic' ;; + efc*,ia64* | ifort*,ia64*) # Intel Fortran compiler on ia64 + tmp_addflag=' -i_dynamic -nofor_main' ;; + ifc* | ifort*) # Intel Fortran compiler + tmp_addflag=' -nofor_main' ;; + esac + archive_cmds_F77='$CC -shared'"$tmp_addflag"' $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + + if test $supports_anon_versioning = yes; then + archive_expsym_cmds_F77='$echo "{ global:" > $output_objdir/$libname.ver~ + cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~ + $echo "local: *; };" >> $output_objdir/$libname.ver~ + $CC -shared'"$tmp_addflag"' $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-version-script ${wl}$output_objdir/$libname.ver -o $lib' + fi + else + ld_shlibs_F77=no + fi + ;; + + netbsd*) + if echo __ELF__ | $CC -E - | grep __ELF__ >/dev/null; then + archive_cmds_F77='$LD -Bshareable $libobjs $deplibs $linker_flags -o $lib' + wlarc= + else + archive_cmds_F77='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + archive_expsym_cmds_F77='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' + fi + ;; + + solaris*) + if $LD -v 2>&1 | grep 'BFD 2\.8' > /dev/null; then + ld_shlibs_F77=no + cat <&2 + +*** Warning: The releases 2.8.* of the GNU linker cannot reliably +*** create shared libraries on Solaris systems. Therefore, libtool +*** is disabling shared libraries support. We urge you to upgrade GNU +*** binutils to release 2.9.1 or newer. Another option is to modify +*** your PATH or compiler configuration so that the native linker is +*** used, and then restart. + +EOF + elif $LD --help 2>&1 | grep ': supported targets:.* elf' > /dev/null; then + archive_cmds_F77='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + archive_expsym_cmds_F77='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' + else + ld_shlibs_F77=no + fi + ;; + + sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX*) + case `$LD -v 2>&1` in + *\ [01].* | *\ 2.[0-9].* | *\ 2.1[0-5].*) + ld_shlibs_F77=no + cat <<_LT_EOF 1>&2 + +*** Warning: Releases of the GNU linker prior to 2.16.91.0.3 can not +*** reliably create shared libraries on SCO systems. Therefore, libtool +*** is disabling shared libraries support. We urge you to upgrade GNU +*** binutils to release 2.16.91.0.3 or newer. Another option is to modify +*** your PATH or compiler configuration so that the native linker is +*** used, and then restart. + +_LT_EOF + ;; + *) + if $LD --help 2>&1 | grep ': supported targets:.* elf' > /dev/null; then + hardcode_libdir_flag_spec_F77='`test -z "$SCOABSPATH" && echo ${wl}-rpath,$libdir`' + archive_cmds_F77='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname,\${SCOABSPATH:+${install_libdir}/}$soname -o $lib' + archive_expsym_cmds_F77='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname,\${SCOABSPATH:+${install_libdir}/}$soname,-retain-symbols-file,$export_symbols -o $lib' + else + ld_shlibs_F77=no + fi + ;; + esac + ;; + + sunos4*) + archive_cmds_F77='$LD -assert pure-text -Bshareable -o $lib $libobjs $deplibs $linker_flags' + wlarc= + hardcode_direct_F77=yes + hardcode_shlibpath_var_F77=no + ;; + + *) + if $LD --help 2>&1 | grep ': supported targets:.* elf' > /dev/null; then + archive_cmds_F77='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + archive_expsym_cmds_F77='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' + else + ld_shlibs_F77=no + fi + ;; + esac + + if test "$ld_shlibs_F77" = no; then + runpath_var= + hardcode_libdir_flag_spec_F77= + export_dynamic_flag_spec_F77= + whole_archive_flag_spec_F77= + fi + else + # PORTME fill in a description of your system's linker (not GNU ld) + case $host_os in + aix3*) + allow_undefined_flag_F77=unsupported + always_export_symbols_F77=yes + archive_expsym_cmds_F77='$LD -o $output_objdir/$soname $libobjs $deplibs $linker_flags -bE:$export_symbols -T512 -H512 -bM:SRE~$AR $AR_FLAGS $lib $output_objdir/$soname' + # Note: this linker hardcodes the directories in LIBPATH if there + # are no directories specified by -L. + hardcode_minus_L_F77=yes + if test "$GCC" = yes && test -z "$lt_prog_compiler_static"; then + # Neither direct hardcoding nor static linking is supported with a + # broken collect2. + hardcode_direct_F77=unsupported + fi + ;; + + aix4* | aix5*) + if test "$host_cpu" = ia64; then + # On IA64, the linker does run time linking by default, so we don't + # have to do anything special. + aix_use_runtimelinking=no + exp_sym_flag='-Bexport' + no_entry_flag="" + else + # If we're using GNU nm, then we don't want the "-C" option. + # -C means demangle to AIX nm, but means don't demangle with GNU nm + if $NM -V 2>&1 | grep 'GNU' > /dev/null; then + export_symbols_cmds_F77='$NM -Bpg $libobjs $convenience | awk '\''{ if (((\$2 == "T") || (\$2 == "D") || (\$2 == "B")) && (substr(\$3,1,1) != ".")) { print \$3 } }'\'' | sort -u > $export_symbols' + else + export_symbols_cmds_F77='$NM -BCpg $libobjs $convenience | awk '\''{ if (((\$2 == "T") || (\$2 == "D") || (\$2 == "B")) && (substr(\$3,1,1) != ".")) { print \$3 } }'\'' | sort -u > $export_symbols' + fi + aix_use_runtimelinking=no + + # Test if we are trying to use run time linking or normal + # AIX style linking. If -brtl is somewhere in LDFLAGS, we + # need to do runtime linking. + case $host_os in aix4.[23]|aix4.[23].*|aix5*) + for ld_flag in $LDFLAGS; do + if (test $ld_flag = "-brtl" || test $ld_flag = "-Wl,-brtl"); then + aix_use_runtimelinking=yes + break + fi + done + ;; + esac + + exp_sym_flag='-bexport' + no_entry_flag='-bnoentry' + fi + + # When large executables or shared objects are built, AIX ld can + # have problems creating the table of contents. If linking a library + # or program results in "error TOC overflow" add -mminimal-toc to + # CXXFLAGS/CFLAGS for g++/gcc. In the cases where that is not + # enough to fix the problem, add -Wl,-bbigtoc to LDFLAGS. + + archive_cmds_F77='' + hardcode_direct_F77=yes + hardcode_libdir_separator_F77=':' + link_all_deplibs_F77=yes + + if test "$GCC" = yes; then + case $host_os in aix4.[012]|aix4.[012].*) + # We only want to do this on AIX 4.2 and lower, the check + # below for broken collect2 doesn't work under 4.3+ + collect2name=`${CC} -print-prog-name=collect2` + if test -f "$collect2name" && \ + strings "$collect2name" | grep resolve_lib_name >/dev/null + then + # We have reworked collect2 + hardcode_direct_F77=yes + else + # We have old collect2 + hardcode_direct_F77=unsupported + # It fails to find uninstalled libraries when the uninstalled + # path is not listed in the libpath. Setting hardcode_minus_L + # to unsupported forces relinking + hardcode_minus_L_F77=yes + hardcode_libdir_flag_spec_F77='-L$libdir' + hardcode_libdir_separator_F77= + fi + ;; + esac + shared_flag='-shared' + if test "$aix_use_runtimelinking" = yes; then + shared_flag="$shared_flag "'${wl}-G' + fi + else + # not using gcc + if test "$host_cpu" = ia64; then + # VisualAge C++, Version 5.5 for AIX 5L for IA-64, Beta 3 Release + # chokes on -Wl,-G. The following line is correct: + shared_flag='-G' + else + if test "$aix_use_runtimelinking" = yes; then + shared_flag='${wl}-G' + else + shared_flag='${wl}-bM:SRE' + fi + fi + fi + + # It seems that -bexpall does not export symbols beginning with + # underscore (_), so it is better to generate a list of symbols to export. + always_export_symbols_F77=yes + if test "$aix_use_runtimelinking" = yes; then + # Warning - without using the other runtime loading flags (-brtl), + # -berok will link without error, but may produce a broken library. + allow_undefined_flag_F77='-berok' + # Determine the default libpath from the value encoded in an empty executable. + cat >conftest.$ac_ext <<_ACEOF + program main + + end +_ACEOF +rm -f conftest.$ac_objext conftest$ac_exeext +if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 + (eval $ac_link) 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && + { ac_try='test -z "$ac_f77_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest$ac_exeext' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; }; then + +aix_libpath=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e '/Import File Strings/,/^$/ { /^0/ { s/^0 *\(.*\)$/\1/; p; } +}'` +# Check for a 64-bit object if we didn't find anything. +if test -z "$aix_libpath"; then aix_libpath=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e '/Import File Strings/,/^$/ { /^0/ { s/^0 *\(.*\)$/\1/; p; } +}'`; fi +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +fi +rm -f conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi + + hardcode_libdir_flag_spec_F77='${wl}-blibpath:$libdir:'"$aix_libpath" + archive_expsym_cmds_F77="\$CC"' -o $output_objdir/$soname $libobjs $deplibs '"\${wl}$no_entry_flag"' $compiler_flags `if test "x${allow_undefined_flag}" != "x"; then echo "${wl}${allow_undefined_flag}"; else :; fi` '"\${wl}$exp_sym_flag:\$export_symbols $shared_flag" + else + if test "$host_cpu" = ia64; then + hardcode_libdir_flag_spec_F77='${wl}-R $libdir:/usr/lib:/lib' + allow_undefined_flag_F77="-z nodefs" + archive_expsym_cmds_F77="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs '"\${wl}$no_entry_flag"' $compiler_flags ${wl}${allow_undefined_flag} '"\${wl}$exp_sym_flag:\$export_symbols" + else + # Determine the default libpath from the value encoded in an empty executable. + cat >conftest.$ac_ext <<_ACEOF + program main + + end +_ACEOF +rm -f conftest.$ac_objext conftest$ac_exeext +if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 + (eval $ac_link) 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && + { ac_try='test -z "$ac_f77_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest$ac_exeext' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; }; then + +aix_libpath=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e '/Import File Strings/,/^$/ { /^0/ { s/^0 *\(.*\)$/\1/; p; } +}'` +# Check for a 64-bit object if we didn't find anything. +if test -z "$aix_libpath"; then aix_libpath=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e '/Import File Strings/,/^$/ { /^0/ { s/^0 *\(.*\)$/\1/; p; } +}'`; fi +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +fi +rm -f conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi + + hardcode_libdir_flag_spec_F77='${wl}-blibpath:$libdir:'"$aix_libpath" + # Warning - without using the other run time loading flags, + # -berok will link without error, but may produce a broken library. + no_undefined_flag_F77=' ${wl}-bernotok' + allow_undefined_flag_F77=' ${wl}-berok' + # Exported symbols can be pulled into shared objects from archives + whole_archive_flag_spec_F77='$convenience' + archive_cmds_need_lc_F77=yes + # This is similar to how AIX traditionally builds its shared libraries. + archive_expsym_cmds_F77="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs ${wl}-bnoentry $compiler_flags ${wl}-bE:$export_symbols${allow_undefined_flag}~$AR $AR_FLAGS $output_objdir/$libname$release.a $output_objdir/$soname' + fi + fi + ;; + + amigaos*) + archive_cmds_F77='$rm $output_objdir/a2ixlibrary.data~$echo "#define NAME $libname" > $output_objdir/a2ixlibrary.data~$echo "#define LIBRARY_ID 1" >> $output_objdir/a2ixlibrary.data~$echo "#define VERSION $major" >> $output_objdir/a2ixlibrary.data~$echo "#define REVISION $revision" >> $output_objdir/a2ixlibrary.data~$AR $AR_FLAGS $lib $libobjs~$RANLIB $lib~(cd $output_objdir && a2ixlibrary -32)' + hardcode_libdir_flag_spec_F77='-L$libdir' + hardcode_minus_L_F77=yes + # see comment about different semantics on the GNU ld section + ld_shlibs_F77=no + ;; + + bsdi[45]*) + export_dynamic_flag_spec_F77=-rdynamic + ;; + + cygwin* | mingw* | pw32*) + # When not using gcc, we currently assume that we are using + # Microsoft Visual C++. + # hardcode_libdir_flag_spec is actually meaningless, as there is + # no search path for DLLs. + hardcode_libdir_flag_spec_F77=' ' + allow_undefined_flag_F77=unsupported + # Tell ltmain to make .lib files, not .a files. + libext=lib + # Tell ltmain to make .dll files, not .so files. + shrext_cmds=".dll" + # FIXME: Setting linknames here is a bad hack. + archive_cmds_F77='$CC -o $lib $libobjs $compiler_flags `echo "$deplibs" | $SED -e '\''s/ -lc$//'\''` -link -dll~linknames=' + # The linker will automatically build a .lib file if we build a DLL. + old_archive_From_new_cmds_F77='true' + # FIXME: Should let the user specify the lib program. + old_archive_cmds_F77='lib /OUT:$oldlib$oldobjs$old_deplibs' + fix_srcfile_path_F77='`cygpath -w "$srcfile"`' + enable_shared_with_static_runtimes_F77=yes + ;; + + darwin* | rhapsody*) + case $host_os in + rhapsody* | darwin1.[012]) + allow_undefined_flag_F77='${wl}-undefined ${wl}suppress' + ;; + *) # Darwin 1.3 on + if test -z ${MACOSX_DEPLOYMENT_TARGET} ; then + allow_undefined_flag_F77='${wl}-flat_namespace ${wl}-undefined ${wl}suppress' + else + case ${MACOSX_DEPLOYMENT_TARGET} in + 10.[012]) + allow_undefined_flag_F77='${wl}-flat_namespace ${wl}-undefined ${wl}suppress' + ;; + 10.*) + allow_undefined_flag_F77='${wl}-undefined ${wl}dynamic_lookup' + ;; + esac + fi + ;; + esac + archive_cmds_need_lc_F77=no + hardcode_direct_F77=no + hardcode_automatic_F77=yes + hardcode_shlibpath_var_F77=unsupported + whole_archive_flag_spec_F77='' + link_all_deplibs_F77=yes + if test "$GCC" = yes ; then + output_verbose_link_cmd='echo' + archive_cmds_F77='$CC -dynamiclib $allow_undefined_flag -o $lib $libobjs $deplibs $compiler_flags -install_name $rpath/$soname $verstring' + module_cmds_F77='$CC $allow_undefined_flag -o $lib -bundle $libobjs $deplibs$compiler_flags' + # Don't fix this by using the ld -exported_symbols_list flag, it doesn't exist in older darwin lds + archive_expsym_cmds_F77='sed -e "s,#.*,," -e "s,^[ ]*,," -e "s,^\(..*\),_&," < $export_symbols > $output_objdir/${libname}-symbols.expsym~$CC -dynamiclib $allow_undefined_flag -o $lib $libobjs $deplibs $compiler_flags -install_name $rpath/$soname $verstring~nmedit -s $output_objdir/${libname}-symbols.expsym ${lib}' + module_expsym_cmds_F77='sed -e "s,#.*,," -e "s,^[ ]*,," -e "s,^\(..*\),_&," < $export_symbols > $output_objdir/${libname}-symbols.expsym~$CC $allow_undefined_flag -o $lib -bundle $libobjs $deplibs$compiler_flags~nmedit -s $output_objdir/${libname}-symbols.expsym ${lib}' + else + case $cc_basename in + xlc*) + output_verbose_link_cmd='echo' + archive_cmds_F77='$CC -qmkshrobj $allow_undefined_flag -o $lib $libobjs $deplibs $compiler_flags ${wl}-install_name ${wl}`echo $rpath/$soname` $verstring' + module_cmds_F77='$CC $allow_undefined_flag -o $lib -bundle $libobjs $deplibs$compiler_flags' + # Don't fix this by using the ld -exported_symbols_list flag, it doesn't exist in older darwin lds + archive_expsym_cmds_F77='sed -e "s,#.*,," -e "s,^[ ]*,," -e "s,^\(..*\),_&," < $export_symbols > $output_objdir/${libname}-symbols.expsym~$CC -qmkshrobj $allow_undefined_flag -o $lib $libobjs $deplibs $compiler_flags ${wl}-install_name ${wl}$rpath/$soname $verstring~nmedit -s $output_objdir/${libname}-symbols.expsym ${lib}' + module_expsym_cmds_F77='sed -e "s,#.*,," -e "s,^[ ]*,," -e "s,^\(..*\),_&," < $export_symbols > $output_objdir/${libname}-symbols.expsym~$CC $allow_undefined_flag -o $lib -bundle $libobjs $deplibs$compiler_flags~nmedit -s $output_objdir/${libname}-symbols.expsym ${lib}' + ;; + *) + ld_shlibs_F77=no + ;; + esac + fi + ;; + + dgux*) + archive_cmds_F77='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + hardcode_libdir_flag_spec_F77='-L$libdir' + hardcode_shlibpath_var_F77=no + ;; + + freebsd1*) + ld_shlibs_F77=no + ;; + + # FreeBSD 2.2.[012] allows us to include c++rt0.o to get C++ constructor + # support. Future versions do this automatically, but an explicit c++rt0.o + # does not break anything, and helps significantly (at the cost of a little + # extra space). + freebsd2.2*) + archive_cmds_F77='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags /usr/lib/c++rt0.o' + hardcode_libdir_flag_spec_F77='-R$libdir' + hardcode_direct_F77=yes + hardcode_shlibpath_var_F77=no + ;; + + # Unfortunately, older versions of FreeBSD 2 do not have this feature. + freebsd2*) + archive_cmds_F77='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags' + hardcode_direct_F77=yes + hardcode_minus_L_F77=yes + hardcode_shlibpath_var_F77=no + ;; + + # FreeBSD 3 and greater uses gcc -shared to do shared libraries. + freebsd* | kfreebsd*-gnu | dragonfly*) + archive_cmds_F77='$CC -shared -o $lib $libobjs $deplibs $compiler_flags' + hardcode_libdir_flag_spec_F77='-R$libdir' + hardcode_direct_F77=yes + hardcode_shlibpath_var_F77=no + ;; + + hpux9*) + if test "$GCC" = yes; then + archive_cmds_F77='$rm $output_objdir/$soname~$CC -shared -fPIC ${wl}+b ${wl}$install_libdir -o $output_objdir/$soname $libobjs $deplibs $compiler_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' + else + archive_cmds_F77='$rm $output_objdir/$soname~$LD -b +b $install_libdir -o $output_objdir/$soname $libobjs $deplibs $linker_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' + fi + hardcode_libdir_flag_spec_F77='${wl}+b ${wl}$libdir' + hardcode_libdir_separator_F77=: + hardcode_direct_F77=yes + + # hardcode_minus_L: Not really in the search PATH, + # but as the default location of the library. + hardcode_minus_L_F77=yes + export_dynamic_flag_spec_F77='${wl}-E' + ;; + + hpux10*) + if test "$GCC" = yes -a "$with_gnu_ld" = no; then + archive_cmds_F77='$CC -shared -fPIC ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags' + else + archive_cmds_F77='$LD -b +h $soname +b $install_libdir -o $lib $libobjs $deplibs $linker_flags' + fi + if test "$with_gnu_ld" = no; then + hardcode_libdir_flag_spec_F77='${wl}+b ${wl}$libdir' + hardcode_libdir_separator_F77=: + + hardcode_direct_F77=yes + export_dynamic_flag_spec_F77='${wl}-E' + + # hardcode_minus_L: Not really in the search PATH, + # but as the default location of the library. + hardcode_minus_L_F77=yes + fi + ;; + + hpux11*) + if test "$GCC" = yes -a "$with_gnu_ld" = no; then + case $host_cpu in + hppa*64*) + archive_cmds_F77='$CC -shared ${wl}+h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags' + ;; + ia64*) + archive_cmds_F77='$CC -shared ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $libobjs $deplibs $compiler_flags' + ;; + *) + archive_cmds_F77='$CC -shared -fPIC ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags' + ;; + esac + else + case $host_cpu in + hppa*64*) + archive_cmds_F77='$CC -b ${wl}+h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags' + ;; + ia64*) + archive_cmds_F77='$CC -b ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $libobjs $deplibs $compiler_flags' + ;; + *) + archive_cmds_F77='$CC -b ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags' + ;; + esac + fi + if test "$with_gnu_ld" = no; then + hardcode_libdir_flag_spec_F77='${wl}+b ${wl}$libdir' + hardcode_libdir_separator_F77=: + + case $host_cpu in + hppa*64*|ia64*) + hardcode_libdir_flag_spec_ld_F77='+b $libdir' + hardcode_direct_F77=no + hardcode_shlibpath_var_F77=no + ;; + *) + hardcode_direct_F77=yes + export_dynamic_flag_spec_F77='${wl}-E' + + # hardcode_minus_L: Not really in the search PATH, + # but as the default location of the library. + hardcode_minus_L_F77=yes + ;; + esac + fi + ;; + + irix5* | irix6* | nonstopux*) + if test "$GCC" = yes; then + archive_cmds_F77='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && echo ${wl}-set_version ${wl}$verstring` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' + else + archive_cmds_F77='$LD -shared $libobjs $deplibs $linker_flags -soname $soname `test -n "$verstring" && echo -set_version $verstring` -update_registry ${output_objdir}/so_locations -o $lib' + hardcode_libdir_flag_spec_ld_F77='-rpath $libdir' + fi + hardcode_libdir_flag_spec_F77='${wl}-rpath ${wl}$libdir' + hardcode_libdir_separator_F77=: + link_all_deplibs_F77=yes + ;; + + netbsd*) + if echo __ELF__ | $CC -E - | grep __ELF__ >/dev/null; then + archive_cmds_F77='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags' # a.out + else + archive_cmds_F77='$LD -shared -o $lib $libobjs $deplibs $linker_flags' # ELF + fi + hardcode_libdir_flag_spec_F77='-R$libdir' + hardcode_direct_F77=yes + hardcode_shlibpath_var_F77=no + ;; + + newsos6) + archive_cmds_F77='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + hardcode_direct_F77=yes + hardcode_libdir_flag_spec_F77='${wl}-rpath ${wl}$libdir' + hardcode_libdir_separator_F77=: + hardcode_shlibpath_var_F77=no + ;; + + openbsd*) + hardcode_direct_F77=yes + hardcode_shlibpath_var_F77=no + if test -z "`echo __ELF__ | $CC -E - | grep __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then + archive_cmds_F77='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags' + archive_expsym_cmds_F77='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags ${wl}-retain-symbols-file,$export_symbols' + hardcode_libdir_flag_spec_F77='${wl}-rpath,$libdir' + export_dynamic_flag_spec_F77='${wl}-E' + else + case $host_os in + openbsd[01].* | openbsd2.[0-7] | openbsd2.[0-7].*) + archive_cmds_F77='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags' + hardcode_libdir_flag_spec_F77='-R$libdir' + ;; + *) + archive_cmds_F77='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags' + hardcode_libdir_flag_spec_F77='${wl}-rpath,$libdir' + ;; + esac + fi + ;; + + os2*) + hardcode_libdir_flag_spec_F77='-L$libdir' + hardcode_minus_L_F77=yes + allow_undefined_flag_F77=unsupported + archive_cmds_F77='$echo "LIBRARY $libname INITINSTANCE" > $output_objdir/$libname.def~$echo "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~$echo DATA >> $output_objdir/$libname.def~$echo " SINGLE NONSHARED" >> $output_objdir/$libname.def~$echo EXPORTS >> $output_objdir/$libname.def~emxexp $libobjs >> $output_objdir/$libname.def~$CC -Zdll -Zcrtdll -o $lib $libobjs $deplibs $compiler_flags $output_objdir/$libname.def' + old_archive_From_new_cmds_F77='emximp -o $output_objdir/$libname.a $output_objdir/$libname.def' + ;; + + osf3*) + if test "$GCC" = yes; then + allow_undefined_flag_F77=' ${wl}-expect_unresolved ${wl}\*' + archive_cmds_F77='$CC -shared${allow_undefined_flag} $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && echo ${wl}-set_version ${wl}$verstring` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' + else + allow_undefined_flag_F77=' -expect_unresolved \*' + archive_cmds_F77='$LD -shared${allow_undefined_flag} $libobjs $deplibs $linker_flags -soname $soname `test -n "$verstring" && echo -set_version $verstring` -update_registry ${output_objdir}/so_locations -o $lib' + fi + hardcode_libdir_flag_spec_F77='${wl}-rpath ${wl}$libdir' + hardcode_libdir_separator_F77=: + ;; + + osf4* | osf5*) # as osf3* with the addition of -msym flag + if test "$GCC" = yes; then + allow_undefined_flag_F77=' ${wl}-expect_unresolved ${wl}\*' + archive_cmds_F77='$CC -shared${allow_undefined_flag} $libobjs $deplibs $compiler_flags ${wl}-msym ${wl}-soname ${wl}$soname `test -n "$verstring" && echo ${wl}-set_version ${wl}$verstring` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' + hardcode_libdir_flag_spec_F77='${wl}-rpath ${wl}$libdir' + else + allow_undefined_flag_F77=' -expect_unresolved \*' + archive_cmds_F77='$LD -shared${allow_undefined_flag} $libobjs $deplibs $linker_flags -msym -soname $soname `test -n "$verstring" && echo -set_version $verstring` -update_registry ${output_objdir}/so_locations -o $lib' + archive_expsym_cmds_F77='for i in `cat $export_symbols`; do printf "%s %s\\n" -exported_symbol "\$i" >> $lib.exp; done; echo "-hidden">> $lib.exp~ + $LD -shared${allow_undefined_flag} -input $lib.exp $linker_flags $libobjs $deplibs -soname $soname `test -n "$verstring" && echo -set_version $verstring` -update_registry ${output_objdir}/so_locations -o $lib~$rm $lib.exp' + + # Both c and cxx compiler support -rpath directly + hardcode_libdir_flag_spec_F77='-rpath $libdir' + fi + hardcode_libdir_separator_F77=: + ;; + + solaris*) + no_undefined_flag_F77=' -z text' + if test "$GCC" = yes; then + wlarc='${wl}' + archive_cmds_F77='$CC -shared ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags' + archive_expsym_cmds_F77='$echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~$echo "local: *; };" >> $lib.exp~ + $CC -shared ${wl}-M ${wl}$lib.exp ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags~$rm $lib.exp' + else + wlarc='' + archive_cmds_F77='$LD -G${allow_undefined_flag} -h $soname -o $lib $libobjs $deplibs $linker_flags' + archive_expsym_cmds_F77='$echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~$echo "local: *; };" >> $lib.exp~ + $LD -G${allow_undefined_flag} -M $lib.exp -h $soname -o $lib $libobjs $deplibs $linker_flags~$rm $lib.exp' + fi + hardcode_libdir_flag_spec_F77='-R$libdir' + hardcode_shlibpath_var_F77=no + case $host_os in + solaris2.[0-5] | solaris2.[0-5].*) ;; + *) + # The compiler driver will combine linker options so we + # cannot just pass the convience library names through + # without $wl, iff we do not link with $LD. + # Luckily, gcc supports the same syntax we need for Sun Studio. + # Supported since Solaris 2.6 (maybe 2.5.1?) + case $wlarc in + '') + whole_archive_flag_spec_F77='-z allextract$convenience -z defaultextract' ;; + *) + whole_archive_flag_spec_F77='${wl}-z ${wl}allextract`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; $echo \"$new_convenience\"` ${wl}-z ${wl}defaultextract' ;; + esac ;; + esac + link_all_deplibs_F77=yes + ;; + + sunos4*) + if test "x$host_vendor" = xsequent; then + # Use $CC to link under sequent, because it throws in some extra .o + # files that make .init and .fini sections work. + archive_cmds_F77='$CC -G ${wl}-h $soname -o $lib $libobjs $deplibs $compiler_flags' + else + archive_cmds_F77='$LD -assert pure-text -Bstatic -o $lib $libobjs $deplibs $linker_flags' + fi + hardcode_libdir_flag_spec_F77='-L$libdir' + hardcode_direct_F77=yes + hardcode_minus_L_F77=yes + hardcode_shlibpath_var_F77=no + ;; + + sysv4) + case $host_vendor in + sni) + archive_cmds_F77='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + hardcode_direct_F77=yes # is this really true??? + ;; + siemens) + ## LD is ld it makes a PLAMLIB + ## CC just makes a GrossModule. + archive_cmds_F77='$LD -G -o $lib $libobjs $deplibs $linker_flags' + reload_cmds_F77='$CC -r -o $output$reload_objs' + hardcode_direct_F77=no + ;; + motorola) + archive_cmds_F77='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + hardcode_direct_F77=no #Motorola manual says yes, but my tests say they lie + ;; + esac + runpath_var='LD_RUN_PATH' + hardcode_shlibpath_var_F77=no + ;; + + sysv4.3*) + archive_cmds_F77='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + hardcode_shlibpath_var_F77=no + export_dynamic_flag_spec_F77='-Bexport' + ;; + + sysv4*MP*) + if test -d /usr/nec; then + archive_cmds_F77='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + hardcode_shlibpath_var_F77=no + runpath_var=LD_RUN_PATH + hardcode_runpath_var=yes + ld_shlibs_F77=yes + fi + ;; + + sysv4*uw2* | sysv5OpenUNIX* | sysv5UnixWare7.[01].[10]* | unixware7*) + no_undefined_flag_F77='${wl}-z,text' + archive_cmds_need_lc_F77=no + hardcode_shlibpath_var_F77=no + runpath_var='LD_RUN_PATH' + + if test "$GCC" = yes; then + archive_cmds_F77='$CC -shared ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + archive_expsym_cmds_F77='$CC -shared ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + else + archive_cmds_F77='$CC -G ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + archive_expsym_cmds_F77='$CC -G ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + fi + ;; + + sysv5* | sco3.2v5* | sco5v6*) + # Note: We can NOT use -z defs as we might desire, because we do not + # link with -lc, and that would cause any symbols used from libc to + # always be unresolved, which means just about no library would + # ever link correctly. If we're not using GNU ld we use -z text + # though, which does catch some bad symbols but isn't as heavy-handed + # as -z defs. + no_undefined_flag_F77='${wl}-z,text' + allow_undefined_flag_F77='${wl}-z,nodefs' + archive_cmds_need_lc_F77=no + hardcode_shlibpath_var_F77=no + hardcode_libdir_flag_spec_F77='`test -z "$SCOABSPATH" && echo ${wl}-R,$libdir`' + hardcode_libdir_separator_F77=':' + link_all_deplibs_F77=yes + export_dynamic_flag_spec_F77='${wl}-Bexport' + runpath_var='LD_RUN_PATH' + + if test "$GCC" = yes; then + archive_cmds_F77='$CC -shared ${wl}-h,\${SCOABSPATH:+${install_libdir}/}$soname -o $lib $libobjs $deplibs $compiler_flags' + archive_expsym_cmds_F77='$CC -shared ${wl}-Bexport:$export_symbols ${wl}-h,\${SCOABSPATH:+${install_libdir}/}$soname -o $lib $libobjs $deplibs $compiler_flags' + else + archive_cmds_F77='$CC -G ${wl}-h,\${SCOABSPATH:+${install_libdir}/}$soname -o $lib $libobjs $deplibs $compiler_flags' + archive_expsym_cmds_F77='$CC -G ${wl}-Bexport:$export_symbols ${wl}-h,\${SCOABSPATH:+${install_libdir}/}$soname -o $lib $libobjs $deplibs $compiler_flags' + fi + ;; + + uts4*) + archive_cmds_F77='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + hardcode_libdir_flag_spec_F77='-L$libdir' + hardcode_shlibpath_var_F77=no + ;; + + *) + ld_shlibs_F77=no + ;; + esac + fi + +echo "$as_me:$LINENO: result: $ld_shlibs_F77" >&5 +echo "${ECHO_T}$ld_shlibs_F77" >&6 +test "$ld_shlibs_F77" = no && can_build_shared=no + +# +# Do we need to explicitly link libc? +# +case "x$archive_cmds_need_lc_F77" in +x|xyes) + # Assume -lc should be added + archive_cmds_need_lc_F77=yes + + if test "$enable_shared" = yes && test "$GCC" = yes; then + case $archive_cmds_F77 in + *'~'*) + # FIXME: we may have to deal with multi-command sequences. + ;; + '$CC '*) + # Test whether the compiler implicitly links with -lc since on some + # systems, -lgcc has to come before -lc. If gcc already passes -lc + # to ld, don't add -lc before -lgcc. + echo "$as_me:$LINENO: checking whether -lc should be explicitly linked in" >&5 +echo $ECHO_N "checking whether -lc should be explicitly linked in... $ECHO_C" >&6 + $rm conftest* + printf "$lt_simple_compile_test_code" > conftest.$ac_ext + + if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 + (eval $ac_compile) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } 2>conftest.err; then + soname=conftest + lib=conftest + libobjs=conftest.$ac_objext + deplibs= + wl=$lt_prog_compiler_wl_F77 + pic_flag=$lt_prog_compiler_pic_F77 + compiler_flags=-v + linker_flags=-v + verstring= + output_objdir=. + libname=conftest + lt_save_allow_undefined_flag=$allow_undefined_flag_F77 + allow_undefined_flag_F77= + if { (eval echo "$as_me:$LINENO: \"$archive_cmds_F77 2\>\&1 \| grep \" -lc \" \>/dev/null 2\>\&1\"") >&5 + (eval $archive_cmds_F77 2\>\&1 \| grep \" -lc \" \>/dev/null 2\>\&1) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } + then + archive_cmds_need_lc_F77=no + else + archive_cmds_need_lc_F77=yes + fi + allow_undefined_flag_F77=$lt_save_allow_undefined_flag + else + cat conftest.err 1>&5 + fi + $rm conftest* + echo "$as_me:$LINENO: result: $archive_cmds_need_lc_F77" >&5 +echo "${ECHO_T}$archive_cmds_need_lc_F77" >&6 + ;; + esac + fi + ;; +esac + +echo "$as_me:$LINENO: checking dynamic linker characteristics" >&5 +echo $ECHO_N "checking dynamic linker characteristics... $ECHO_C" >&6 +library_names_spec= +libname_spec='lib$name' +soname_spec= +shrext_cmds=".so" +postinstall_cmds= +postuninstall_cmds= +finish_cmds= +finish_eval= +shlibpath_var= +shlibpath_overrides_runpath=unknown +version_type=none +dynamic_linker="$host_os ld.so" +sys_lib_dlsearch_path_spec="/lib /usr/lib" +if test "$GCC" = yes; then + sys_lib_search_path_spec=`$CC -print-search-dirs | grep "^libraries:" | $SED -e "s/^libraries://" -e "s,=/,/,g"` + if echo "$sys_lib_search_path_spec" | grep ';' >/dev/null ; then + # if the path contains ";" then we assume it to be the separator + # otherwise default to the standard path separator (i.e. ":") - it is + # assumed that no part of a normal pathname contains ";" but that should + # okay in the real world where ";" in dirpaths is itself problematic. + sys_lib_search_path_spec=`echo "$sys_lib_search_path_spec" | $SED -e 's/;/ /g'` + else + sys_lib_search_path_spec=`echo "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"` + fi +else + sys_lib_search_path_spec="/lib /usr/lib /usr/local/lib" +fi +need_lib_prefix=unknown +hardcode_into_libs=no + +# when you set need_version to no, make sure it does not cause -set_version +# flags to be left without arguments +need_version=unknown + +case $host_os in +aix3*) + version_type=linux + library_names_spec='${libname}${release}${shared_ext}$versuffix $libname.a' + shlibpath_var=LIBPATH + + # AIX 3 has no versioning support, so we append a major version to the name. + soname_spec='${libname}${release}${shared_ext}$major' + ;; + +aix4* | aix5*) + version_type=linux + need_lib_prefix=no + need_version=no + hardcode_into_libs=yes + if test "$host_cpu" = ia64; then + # AIX 5 supports IA64 + library_names_spec='${libname}${release}${shared_ext}$major ${libname}${release}${shared_ext}$versuffix $libname${shared_ext}' + shlibpath_var=LD_LIBRARY_PATH + else + # With GCC up to 2.95.x, collect2 would create an import file + # for dependence libraries. The import file would start with + # the line `#! .'. This would cause the generated library to + # depend on `.', always an invalid library. This was fixed in + # development snapshots of GCC prior to 3.0. + case $host_os in + aix4 | aix4.[01] | aix4.[01].*) + if { echo '#if __GNUC__ > 2 || (__GNUC__ == 2 && __GNUC_MINOR__ >= 97)' + echo ' yes ' + echo '#endif'; } | ${CC} -E - | grep yes > /dev/null; then + : + else + can_build_shared=no + fi + ;; + esac + # AIX (on Power*) has no versioning support, so currently we can not hardcode correct + # soname into executable. Probably we can add versioning support to + # collect2, so additional links can be useful in future. + if test "$aix_use_runtimelinking" = yes; then + # If using run time linking (on AIX 4.2 or later) use lib.so + # instead of lib.a to let people know that these are not + # typical AIX shared libraries. + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + else + # We preserve .a as extension for shared libraries through AIX4.2 + # and later when we are not doing run time linking. + library_names_spec='${libname}${release}.a $libname.a' + soname_spec='${libname}${release}${shared_ext}$major' + fi + shlibpath_var=LIBPATH + fi + ;; + +amigaos*) + library_names_spec='$libname.ixlibrary $libname.a' + # Create ${libname}_ixlibrary.a entries in /sys/libs. + finish_eval='for lib in `ls $libdir/*.ixlibrary 2>/dev/null`; do libname=`$echo "X$lib" | $Xsed -e '\''s%^.*/\([^/]*\)\.ixlibrary$%\1%'\''`; test $rm /sys/libs/${libname}_ixlibrary.a; $show "cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a"; cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a || exit 1; done' + ;; + +beos*) + library_names_spec='${libname}${shared_ext}' + dynamic_linker="$host_os ld.so" + shlibpath_var=LIBRARY_PATH + ;; + +bsdi[45]*) + version_type=linux + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + finish_cmds='PATH="\$PATH:/sbin" ldconfig $libdir' + shlibpath_var=LD_LIBRARY_PATH + sys_lib_search_path_spec="/shlib /usr/lib /usr/X11/lib /usr/contrib/lib /lib /usr/local/lib" + sys_lib_dlsearch_path_spec="/shlib /usr/lib /usr/local/lib" + # the default ld.so.conf also contains /usr/contrib/lib and + # /usr/X11R6/lib (/usr/X11 is a link to /usr/X11R6), but let us allow + # libtool to hard-code these into programs + ;; + +cygwin* | mingw* | pw32*) + version_type=windows + shrext_cmds=".dll" + need_version=no + need_lib_prefix=no + + case $GCC,$host_os in + yes,cygwin* | yes,mingw* | yes,pw32*) + library_names_spec='$libname.dll.a' + # DLL is installed to $(libdir)/../bin by postinstall_cmds + postinstall_cmds='base_file=`basename \${file}`~ + dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\${base_file}'\''i;echo \$dlname'\''`~ + dldir=$destdir/`dirname \$dlpath`~ + test -d \$dldir || mkdir -p \$dldir~ + $install_prog $dir/$dlname \$dldir/$dlname~ + chmod a+x \$dldir/$dlname' + postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; echo \$dlname'\''`~ + dlpath=$dir/\$dldll~ + $rm \$dlpath' + shlibpath_overrides_runpath=yes + + case $host_os in + cygwin*) + # Cygwin DLLs use 'cyg' prefix rather than 'lib' + soname_spec='`echo ${libname} | sed -e 's/^lib/cyg/'``echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext}' + sys_lib_search_path_spec="/usr/lib /lib/w32api /lib /usr/local/lib" + ;; + mingw*) + # MinGW DLLs use traditional 'lib' prefix + soname_spec='${libname}`echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext}' + sys_lib_search_path_spec=`$CC -print-search-dirs | grep "^libraries:" | $SED -e "s/^libraries://" -e "s,=/,/,g"` + if echo "$sys_lib_search_path_spec" | grep ';[c-zC-Z]:/' >/dev/null; then + # It is most probably a Windows format PATH printed by + # mingw gcc, but we are running on Cygwin. Gcc prints its search + # path with ; separators, and with drive letters. We can handle the + # drive letters (cygwin fileutils understands them), so leave them, + # especially as we might pass files found there to a mingw objdump, + # which wouldn't understand a cygwinified path. Ahh. + sys_lib_search_path_spec=`echo "$sys_lib_search_path_spec" | $SED -e 's/;/ /g'` + else + sys_lib_search_path_spec=`echo "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"` + fi + ;; + pw32*) + # pw32 DLLs use 'pw' prefix rather than 'lib' + library_names_spec='`echo ${libname} | sed -e 's/^lib/pw/'``echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext}' + ;; + esac + ;; + + *) + library_names_spec='${libname}`echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext} $libname.lib' + ;; + esac + dynamic_linker='Win32 ld.exe' + # FIXME: first we should search . and the directory the executable is in + shlibpath_var=PATH + ;; + +darwin* | rhapsody*) + dynamic_linker="$host_os dyld" + version_type=darwin + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${versuffix}$shared_ext ${libname}${release}${major}$shared_ext ${libname}$shared_ext' + soname_spec='${libname}${release}${major}$shared_ext' + shlibpath_overrides_runpath=yes + shlibpath_var=DYLD_LIBRARY_PATH + shrext_cmds='`test .$module = .yes && echo .so || echo .dylib`' + # Apple's gcc prints 'gcc -print-search-dirs' doesn't operate the same. + if test "$GCC" = yes; then + sys_lib_search_path_spec=`$CC -print-search-dirs | tr "\n" "$PATH_SEPARATOR" | sed -e 's/libraries:/@libraries:/' | tr "@" "\n" | grep "^libraries:" | sed -e "s/^libraries://" -e "s,=/,/,g" -e "s,$PATH_SEPARATOR, ,g" -e "s,.*,& /lib /usr/lib /usr/local/lib,g"` + else + sys_lib_search_path_spec='/lib /usr/lib /usr/local/lib' + fi + sys_lib_dlsearch_path_spec='/usr/local/lib /lib /usr/lib' + ;; + +dgux*) + version_type=linux + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname$shared_ext' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + ;; + +freebsd1*) + dynamic_linker=no + ;; + +kfreebsd*-gnu) + version_type=linux + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=no + hardcode_into_libs=yes + dynamic_linker='GNU ld.so' + ;; + +freebsd* | dragonfly*) + # DragonFly does not have aout. When/if they implement a new + # versioning mechanism, adjust this. + if test -x /usr/bin/objformat; then + objformat=`/usr/bin/objformat` + else + case $host_os in + freebsd[123]*) objformat=aout ;; + *) objformat=elf ;; + esac + fi + version_type=freebsd-$objformat + case $version_type in + freebsd-elf*) + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext} $libname${shared_ext}' + need_version=no + need_lib_prefix=no + ;; + freebsd-*) + library_names_spec='${libname}${release}${shared_ext}$versuffix $libname${shared_ext}$versuffix' + need_version=yes + ;; + esac + shlibpath_var=LD_LIBRARY_PATH + case $host_os in + freebsd2*) + shlibpath_overrides_runpath=yes + ;; + freebsd3.[01]* | freebsdelf3.[01]*) + shlibpath_overrides_runpath=yes + hardcode_into_libs=yes + ;; + freebsd3.[2-9]* | freebsdelf3.[2-9]* | \ + freebsd4.[0-5] | freebsdelf4.[0-5] | freebsd4.1.1 | freebsdelf4.1.1) + shlibpath_overrides_runpath=no + hardcode_into_libs=yes + ;; + freebsd*) # from 4.6 on + shlibpath_overrides_runpath=yes + hardcode_into_libs=yes + ;; + esac + ;; + +gnu*) + version_type=linux + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}${major} ${libname}${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + hardcode_into_libs=yes + ;; + +hpux9* | hpux10* | hpux11*) + # Give a soname corresponding to the major version so that dld.sl refuses to + # link against other versions. + version_type=sunos + need_lib_prefix=no + need_version=no + case $host_cpu in + ia64*) + shrext_cmds='.so' + hardcode_into_libs=yes + dynamic_linker="$host_os dld.so" + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes # Unless +noenvvar is specified. + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + if test "X$HPUX_IA64_MODE" = X32; then + sys_lib_search_path_spec="/usr/lib/hpux32 /usr/local/lib/hpux32 /usr/local/lib" + else + sys_lib_search_path_spec="/usr/lib/hpux64 /usr/local/lib/hpux64" + fi + sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec + ;; + hppa*64*) + shrext_cmds='.sl' + hardcode_into_libs=yes + dynamic_linker="$host_os dld.sl" + shlibpath_var=LD_LIBRARY_PATH # How should we handle SHLIB_PATH + shlibpath_overrides_runpath=yes # Unless +noenvvar is specified. + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + sys_lib_search_path_spec="/usr/lib/pa20_64 /usr/ccs/lib/pa20_64" + sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec + ;; + *) + shrext_cmds='.sl' + dynamic_linker="$host_os dld.sl" + shlibpath_var=SHLIB_PATH + shlibpath_overrides_runpath=no # +s is required to enable SHLIB_PATH + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + ;; + esac + # HP-UX runs *really* slowly unless shared libraries are mode 555. + postinstall_cmds='chmod 555 $lib' + ;; + +interix3*) + version_type=linux + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + dynamic_linker='Interix 3.x ld.so.1 (PE, like ELF)' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=no + hardcode_into_libs=yes + ;; + +irix5* | irix6* | nonstopux*) + case $host_os in + nonstopux*) version_type=nonstopux ;; + *) + if test "$lt_cv_prog_gnu_ld" = yes; then + version_type=linux + else + version_type=irix + fi ;; + esac + need_lib_prefix=no + need_version=no + soname_spec='${libname}${release}${shared_ext}$major' + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${release}${shared_ext} $libname${shared_ext}' + case $host_os in + irix5* | nonstopux*) + libsuff= shlibsuff= + ;; + *) + case $LD in # libtool.m4 will add one of these switches to LD + *-32|*"-32 "|*-melf32bsmip|*"-melf32bsmip ") + libsuff= shlibsuff= libmagic=32-bit;; + *-n32|*"-n32 "|*-melf32bmipn32|*"-melf32bmipn32 ") + libsuff=32 shlibsuff=N32 libmagic=N32;; + *-64|*"-64 "|*-melf64bmip|*"-melf64bmip ") + libsuff=64 shlibsuff=64 libmagic=64-bit;; + *) libsuff= shlibsuff= libmagic=never-match;; + esac + ;; + esac + shlibpath_var=LD_LIBRARY${shlibsuff}_PATH + shlibpath_overrides_runpath=no + sys_lib_search_path_spec="/usr/lib${libsuff} /lib${libsuff} /usr/local/lib${libsuff}" + sys_lib_dlsearch_path_spec="/usr/lib${libsuff} /lib${libsuff}" + hardcode_into_libs=yes + ;; + +# No shared lib support for Linux oldld, aout, or coff. +linux*oldld* | linux*aout* | linux*coff*) + dynamic_linker=no + ;; + +# This must be Linux ELF. +linux*) + version_type=linux + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + finish_cmds='PATH="\$PATH:/sbin" ldconfig -n $libdir' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=no + # This implies no fast_install, which is unacceptable. + # Some rework will be needed to allow for fast_install + # before this can be enabled. + hardcode_into_libs=yes + + # find out which ABI we are using + libsuff= + case "$host_cpu" in + x86_64*|s390x*|powerpc64*) + echo '#line 14761 "configure"' > conftest.$ac_ext + if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 + (eval $ac_compile) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; then + case `/usr/bin/file conftest.$ac_objext` in + *64-bit*) + libsuff=64 + sys_lib_search_path_spec="/lib${libsuff} /usr/lib${libsuff} /usr/local/lib${libsuff}" + ;; + esac + fi + rm -rf conftest* + ;; + esac + + # Append ld.so.conf contents to the search path + if test -f /etc/ld.so.conf; then + lt_ld_extra=`awk '/^include / { system(sprintf("cd /etc; cat %s 2>/dev/null", \$2)); skip = 1; } { if (!skip) print \$0; skip = 0; }' < /etc/ld.so.conf | $SED -e 's/#.*//;s/[:, ]/ /g;s/=[^=]*$//;s/=[^= ]* / /g;/^$/d' | tr '\n' ' '` + sys_lib_dlsearch_path_spec="/lib${libsuff} /usr/lib${libsuff} $lt_ld_extra" + fi + + # We used to test for /lib/ld.so.1 and disable shared libraries on + # powerpc, because MkLinux only supported shared libraries with the + # GNU dynamic linker. Since this was broken with cross compilers, + # most powerpc-linux boxes support dynamic linking these days and + # people can always --disable-shared, the test was removed, and we + # assume the GNU/Linux dynamic linker is in use. + dynamic_linker='GNU/Linux ld.so' + ;; + +knetbsd*-gnu) + version_type=linux + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=no + hardcode_into_libs=yes + dynamic_linker='GNU ld.so' + ;; + +netbsd*) + version_type=sunos + need_lib_prefix=no + need_version=no + if echo __ELF__ | $CC -E - | grep __ELF__ >/dev/null; then + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${shared_ext}$versuffix' + finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir' + dynamic_linker='NetBSD (a.out) ld.so' + else + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + dynamic_linker='NetBSD ld.elf_so' + fi + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + hardcode_into_libs=yes + ;; + +newsos6) + version_type=linux + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + ;; + +nto-qnx*) + version_type=linux + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + ;; + +openbsd*) + version_type=sunos + sys_lib_dlsearch_path_spec="/usr/lib" + need_lib_prefix=no + # Some older versions of OpenBSD (3.3 at least) *do* need versioned libs. + case $host_os in + openbsd3.3 | openbsd3.3.*) need_version=yes ;; + *) need_version=no ;; + esac + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${shared_ext}$versuffix' + finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir' + shlibpath_var=LD_LIBRARY_PATH + if test -z "`echo __ELF__ | $CC -E - | grep __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then + case $host_os in + openbsd2.[89] | openbsd2.[89].*) + shlibpath_overrides_runpath=no + ;; + *) + shlibpath_overrides_runpath=yes + ;; + esac + else + shlibpath_overrides_runpath=yes + fi + ;; + +os2*) + libname_spec='$name' + shrext_cmds=".dll" + need_lib_prefix=no + library_names_spec='$libname${shared_ext} $libname.a' + dynamic_linker='OS/2 ld.exe' + shlibpath_var=LIBPATH + ;; + +osf3* | osf4* | osf5*) + version_type=osf + need_lib_prefix=no + need_version=no + soname_spec='${libname}${release}${shared_ext}$major' + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + shlibpath_var=LD_LIBRARY_PATH + sys_lib_search_path_spec="/usr/shlib /usr/ccs/lib /usr/lib/cmplrs/cc /usr/lib /usr/local/lib /var/shlib" + sys_lib_dlsearch_path_spec="$sys_lib_search_path_spec" + ;; + +solaris*) + version_type=linux + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + hardcode_into_libs=yes + # ldd complains unless libraries are executable + postinstall_cmds='chmod +x $lib' + ;; + +sunos4*) + version_type=sunos + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${shared_ext}$versuffix' + finish_cmds='PATH="\$PATH:/usr/etc" ldconfig $libdir' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + if test "$with_gnu_ld" = yes; then + need_lib_prefix=no + fi + need_version=yes + ;; + +sysv4 | sysv4.3*) + version_type=linux + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + case $host_vendor in + sni) + shlibpath_overrides_runpath=no + need_lib_prefix=no + export_dynamic_flag_spec='${wl}-Blargedynsym' + runpath_var=LD_RUN_PATH + ;; + siemens) + need_lib_prefix=no + ;; + motorola) + need_lib_prefix=no + need_version=no + shlibpath_overrides_runpath=no + sys_lib_search_path_spec='/lib /usr/lib /usr/ccs/lib' + ;; + esac + ;; + +sysv4*MP*) + if test -d /usr/nec ;then + version_type=linux + library_names_spec='$libname${shared_ext}.$versuffix $libname${shared_ext}.$major $libname${shared_ext}' + soname_spec='$libname${shared_ext}.$major' + shlibpath_var=LD_LIBRARY_PATH + fi + ;; + +sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX* | sysv4*uw2*) + version_type=freebsd-elf + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext} $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + hardcode_into_libs=yes + if test "$with_gnu_ld" = yes; then + sys_lib_search_path_spec='/usr/local/lib /usr/gnu/lib /usr/ccs/lib /usr/lib /lib' + shlibpath_overrides_runpath=no + else + sys_lib_search_path_spec='/usr/ccs/lib /usr/lib' + shlibpath_overrides_runpath=yes + case $host_os in + sco3.2v5*) + sys_lib_search_path_spec="$sys_lib_search_path_spec /lib" + ;; + esac + fi + sys_lib_dlsearch_path_spec='/usr/lib' + ;; + +uts4*) + version_type=linux + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + ;; + +*) + dynamic_linker=no + ;; +esac +echo "$as_me:$LINENO: result: $dynamic_linker" >&5 +echo "${ECHO_T}$dynamic_linker" >&6 +test "$dynamic_linker" = no && can_build_shared=no + +variables_saved_for_relink="PATH $shlibpath_var $runpath_var" +if test "$GCC" = yes; then + variables_saved_for_relink="$variables_saved_for_relink GCC_EXEC_PREFIX COMPILER_PATH LIBRARY_PATH" +fi + +echo "$as_me:$LINENO: checking how to hardcode library paths into programs" >&5 +echo $ECHO_N "checking how to hardcode library paths into programs... $ECHO_C" >&6 +hardcode_action_F77= +if test -n "$hardcode_libdir_flag_spec_F77" || \ + test -n "$runpath_var_F77" || \ + test "X$hardcode_automatic_F77" = "Xyes" ; then + + # We can hardcode non-existant directories. + if test "$hardcode_direct_F77" != no && + # If the only mechanism to avoid hardcoding is shlibpath_var, we + # have to relink, otherwise we might link with an installed library + # when we should be linking with a yet-to-be-installed one + ## test "$_LT_AC_TAGVAR(hardcode_shlibpath_var, F77)" != no && + test "$hardcode_minus_L_F77" != no; then + # Linking always hardcodes the temporary library directory. + hardcode_action_F77=relink + else + # We can link without hardcoding, and we can hardcode nonexisting dirs. + hardcode_action_F77=immediate + fi +else + # We cannot hardcode anything, or else we can only hardcode existing + # directories. + hardcode_action_F77=unsupported +fi +echo "$as_me:$LINENO: result: $hardcode_action_F77" >&5 +echo "${ECHO_T}$hardcode_action_F77" >&6 + +if test "$hardcode_action_F77" = relink; then + # Fast installation is not supported + enable_fast_install=no +elif test "$shlibpath_overrides_runpath" = yes || + test "$enable_shared" = no; then + # Fast installation is not necessary + enable_fast_install=needless +fi + + +# The else clause should only fire when bootstrapping the +# libtool distribution, otherwise you forgot to ship ltmain.sh +# with your package, and you will get complaints that there are +# no rules to generate ltmain.sh. +if test -f "$ltmain"; then + # See if we are running on zsh, and set the options which allow our commands through + # without removal of \ escapes. + if test -n "${ZSH_VERSION+set}" ; then + setopt NO_GLOB_SUBST + fi + # Now quote all the things that may contain metacharacters while being + # careful not to overquote the AC_SUBSTed values. We take copies of the + # variables and quote the copies for generation of the libtool script. + for var in echo old_CC old_CFLAGS AR AR_FLAGS EGREP RANLIB LN_S LTCC LTCFLAGS NM \ + SED SHELL STRIP \ + libname_spec library_names_spec soname_spec extract_expsyms_cmds \ + old_striplib striplib file_magic_cmd finish_cmds finish_eval \ + deplibs_check_method reload_flag reload_cmds need_locks \ + lt_cv_sys_global_symbol_pipe lt_cv_sys_global_symbol_to_cdecl \ + lt_cv_sys_global_symbol_to_c_name_address \ + sys_lib_search_path_spec sys_lib_dlsearch_path_spec \ + old_postinstall_cmds old_postuninstall_cmds \ + compiler_F77 \ + CC_F77 \ + LD_F77 \ + lt_prog_compiler_wl_F77 \ + lt_prog_compiler_pic_F77 \ + lt_prog_compiler_static_F77 \ + lt_prog_compiler_no_builtin_flag_F77 \ + export_dynamic_flag_spec_F77 \ + thread_safe_flag_spec_F77 \ + whole_archive_flag_spec_F77 \ + enable_shared_with_static_runtimes_F77 \ + old_archive_cmds_F77 \ + old_archive_from_new_cmds_F77 \ + predep_objects_F77 \ + postdep_objects_F77 \ + predeps_F77 \ + postdeps_F77 \ + compiler_lib_search_path_F77 \ + archive_cmds_F77 \ + archive_expsym_cmds_F77 \ + postinstall_cmds_F77 \ + postuninstall_cmds_F77 \ + old_archive_from_expsyms_cmds_F77 \ + allow_undefined_flag_F77 \ + no_undefined_flag_F77 \ + export_symbols_cmds_F77 \ + hardcode_libdir_flag_spec_F77 \ + hardcode_libdir_flag_spec_ld_F77 \ + hardcode_libdir_separator_F77 \ + hardcode_automatic_F77 \ + module_cmds_F77 \ + module_expsym_cmds_F77 \ + lt_cv_prog_compiler_c_o_F77 \ + exclude_expsyms_F77 \ + include_expsyms_F77; do + + case $var in + old_archive_cmds_F77 | \ + old_archive_from_new_cmds_F77 | \ + archive_cmds_F77 | \ + archive_expsym_cmds_F77 | \ + module_cmds_F77 | \ + module_expsym_cmds_F77 | \ + old_archive_from_expsyms_cmds_F77 | \ + export_symbols_cmds_F77 | \ + extract_expsyms_cmds | reload_cmds | finish_cmds | \ + postinstall_cmds | postuninstall_cmds | \ + old_postinstall_cmds | old_postuninstall_cmds | \ + sys_lib_search_path_spec | sys_lib_dlsearch_path_spec) + # Double-quote double-evaled strings. + eval "lt_$var=\\\"\`\$echo \"X\$$var\" | \$Xsed -e \"\$double_quote_subst\" -e \"\$sed_quote_subst\" -e \"\$delay_variable_subst\"\`\\\"" + ;; + *) + eval "lt_$var=\\\"\`\$echo \"X\$$var\" | \$Xsed -e \"\$sed_quote_subst\"\`\\\"" + ;; + esac + done + + case $lt_echo in + *'\$0 --fallback-echo"') + lt_echo=`$echo "X$lt_echo" | $Xsed -e 's/\\\\\\\$0 --fallback-echo"$/$0 --fallback-echo"/'` + ;; + esac + +cfgfile="$ofile" + + cat <<__EOF__ >> "$cfgfile" +# ### BEGIN LIBTOOL TAG CONFIG: $tagname + +# Libtool was configured on host `(hostname || uname -n) 2>/dev/null | sed 1q`: + +# Shell to use when invoking shell scripts. +SHELL=$lt_SHELL + +# Whether or not to build shared libraries. +build_libtool_libs=$enable_shared + +# Whether or not to build static libraries. +build_old_libs=$enable_static + +# Whether or not to add -lc for building shared libraries. +build_libtool_need_lc=$archive_cmds_need_lc_F77 + +# Whether or not to disallow shared libs when runtime libs are static +allow_libtool_libs_with_static_runtimes=$enable_shared_with_static_runtimes_F77 + +# Whether or not to optimize for fast installation. +fast_install=$enable_fast_install + +# The host system. +host_alias=$host_alias +host=$host +host_os=$host_os + +# The build system. +build_alias=$build_alias +build=$build +build_os=$build_os + +# An echo program that does not interpret backslashes. +echo=$lt_echo + +# The archiver. +AR=$lt_AR +AR_FLAGS=$lt_AR_FLAGS + +# A C compiler. +LTCC=$lt_LTCC + +# LTCC compiler flags. +LTCFLAGS=$lt_LTCFLAGS + +# A language-specific compiler. +CC=$lt_compiler_F77 + +# Is the compiler the GNU C compiler? +with_gcc=$GCC_F77 + +gcc_dir=\`gcc -print-file-name=. | $SED 's,/\.$,,'\` +gcc_ver=\`gcc -dumpversion\` + +# An ERE matcher. +EGREP=$lt_EGREP + +# The linker used to build libraries. +LD=$lt_LD_F77 + +# Whether we need hard or soft links. +LN_S=$lt_LN_S + +# A BSD-compatible nm program. +NM=$lt_NM + +# A symbol stripping program +STRIP=$lt_STRIP + +# Used to examine libraries when file_magic_cmd begins "file" +MAGIC_CMD=$MAGIC_CMD + +# Used on cygwin: DLL creation program. +DLLTOOL="$DLLTOOL" + +# Used on cygwin: object dumper. +OBJDUMP="$OBJDUMP" + +# Used on cygwin: assembler. +AS="$AS" + +# The name of the directory that contains temporary libtool files. +objdir=$objdir + +# How to create reloadable object files. +reload_flag=$lt_reload_flag +reload_cmds=$lt_reload_cmds + +# How to pass a linker flag through the compiler. +wl=$lt_lt_prog_compiler_wl_F77 + +# Object file suffix (normally "o"). +objext="$ac_objext" + +# Old archive suffix (normally "a"). +libext="$libext" + +# Shared library suffix (normally ".so"). +shrext_cmds='$shrext_cmds' + +# Executable file suffix (normally ""). +exeext="$exeext" + +# Additional compiler flags for building library objects. +pic_flag=$lt_lt_prog_compiler_pic_F77 +pic_mode=$pic_mode + +# What is the maximum length of a command? +max_cmd_len=$lt_cv_sys_max_cmd_len + +# Does compiler simultaneously support -c and -o options? +compiler_c_o=$lt_lt_cv_prog_compiler_c_o_F77 + +# Must we lock files when doing compilation? +need_locks=$lt_need_locks + +# Do we need the lib prefix for modules? +need_lib_prefix=$need_lib_prefix + +# Do we need a version for libraries? +need_version=$need_version + +# Whether dlopen is supported. +dlopen_support=$enable_dlopen + +# Whether dlopen of programs is supported. +dlopen_self=$enable_dlopen_self + +# Whether dlopen of statically linked programs is supported. +dlopen_self_static=$enable_dlopen_self_static + +# Compiler flag to prevent dynamic linking. +link_static_flag=$lt_lt_prog_compiler_static_F77 + +# Compiler flag to turn off builtin functions. +no_builtin_flag=$lt_lt_prog_compiler_no_builtin_flag_F77 + +# Compiler flag to allow reflexive dlopens. +export_dynamic_flag_spec=$lt_export_dynamic_flag_spec_F77 + +# Compiler flag to generate shared objects directly from archives. +whole_archive_flag_spec=$lt_whole_archive_flag_spec_F77 + +# Compiler flag to generate thread-safe objects. +thread_safe_flag_spec=$lt_thread_safe_flag_spec_F77 + +# Library versioning type. +version_type=$version_type + +# Format of library name prefix. +libname_spec=$lt_libname_spec + +# List of archive names. First name is the real one, the rest are links. +# The last name is the one that the linker finds with -lNAME. +library_names_spec=$lt_library_names_spec + +# The coded name of the library, if different from the real name. +soname_spec=$lt_soname_spec + +# Commands used to build and install an old-style archive. +RANLIB=$lt_RANLIB +old_archive_cmds=$lt_old_archive_cmds_F77 +old_postinstall_cmds=$lt_old_postinstall_cmds +old_postuninstall_cmds=$lt_old_postuninstall_cmds + +# Create an old-style archive from a shared archive. +old_archive_from_new_cmds=$lt_old_archive_from_new_cmds_F77 + +# Create a temporary old-style archive to link instead of a shared archive. +old_archive_from_expsyms_cmds=$lt_old_archive_from_expsyms_cmds_F77 + +# Commands used to build and install a shared archive. +archive_cmds=$lt_archive_cmds_F77 +archive_expsym_cmds=$lt_archive_expsym_cmds_F77 +postinstall_cmds=$lt_postinstall_cmds +postuninstall_cmds=$lt_postuninstall_cmds + +# Commands used to build a loadable module (assumed same as above if empty) +module_cmds=$lt_module_cmds_F77 +module_expsym_cmds=$lt_module_expsym_cmds_F77 + +# Commands to strip libraries. +old_striplib=$lt_old_striplib +striplib=$lt_striplib + +# Dependencies to place before the objects being linked to create a +# shared library. +predep_objects=\`echo $lt_predep_objects_F77 | \$SED -e "s@\${gcc_dir}@\\\${gcc_dir}@g;s@\${gcc_ver}@\\\${gcc_ver}@g"\` + +# Dependencies to place after the objects being linked to create a +# shared library. +postdep_objects=\`echo $lt_postdep_objects_F77 | \$SED -e "s@\${gcc_dir}@\\\${gcc_dir}@g;s@\${gcc_ver}@\\\${gcc_ver}@g"\` + +# Dependencies to place before the objects being linked to create a +# shared library. +predeps=$lt_predeps_F77 + +# Dependencies to place after the objects being linked to create a +# shared library. +postdeps=$lt_postdeps_F77 + +# The library search path used internally by the compiler when linking +# a shared library. +compiler_lib_search_path=\`echo $lt_compiler_lib_search_path_F77 | \$SED -e "s@\${gcc_dir}@\\\${gcc_dir}@g;s@\${gcc_ver}@\\\${gcc_ver}@g"\` + +# Method to check whether dependent libraries are shared objects. +deplibs_check_method=$lt_deplibs_check_method + +# Command to use when deplibs_check_method == file_magic. +file_magic_cmd=$lt_file_magic_cmd + +# Flag that allows shared libraries with undefined symbols to be built. +allow_undefined_flag=$lt_allow_undefined_flag_F77 + +# Flag that forces no undefined symbols. +no_undefined_flag=$lt_no_undefined_flag_F77 + +# Commands used to finish a libtool library installation in a directory. +finish_cmds=$lt_finish_cmds + +# Same as above, but a single script fragment to be evaled but not shown. +finish_eval=$lt_finish_eval + +# Take the output of nm and produce a listing of raw symbols and C names. +global_symbol_pipe=$lt_lt_cv_sys_global_symbol_pipe + +# Transform the output of nm in a proper C declaration +global_symbol_to_cdecl=$lt_lt_cv_sys_global_symbol_to_cdecl + +# Transform the output of nm in a C name address pair +global_symbol_to_c_name_address=$lt_lt_cv_sys_global_symbol_to_c_name_address + +# This is the shared library runtime path variable. +runpath_var=$runpath_var + +# This is the shared library path variable. +shlibpath_var=$shlibpath_var + +# Is shlibpath searched before the hard-coded library search path? +shlibpath_overrides_runpath=$shlibpath_overrides_runpath + +# How to hardcode a shared library path into an executable. +hardcode_action=$hardcode_action_F77 + +# Whether we should hardcode library paths into libraries. +hardcode_into_libs=$hardcode_into_libs + +# Flag to hardcode \$libdir into a binary during linking. +# This must work even if \$libdir does not exist. +hardcode_libdir_flag_spec=$lt_hardcode_libdir_flag_spec_F77 + +# If ld is used when linking, flag to hardcode \$libdir into +# a binary during linking. This must work even if \$libdir does +# not exist. +hardcode_libdir_flag_spec_ld=$lt_hardcode_libdir_flag_spec_ld_F77 + +# Whether we need a single -rpath flag with a separated argument. +hardcode_libdir_separator=$lt_hardcode_libdir_separator_F77 + +# Set to yes if using DIR/libNAME${shared_ext} during linking hardcodes DIR into the +# resulting binary. +hardcode_direct=$hardcode_direct_F77 + +# Set to yes if using the -LDIR flag during linking hardcodes DIR into the +# resulting binary. +hardcode_minus_L=$hardcode_minus_L_F77 + +# Set to yes if using SHLIBPATH_VAR=DIR during linking hardcodes DIR into +# the resulting binary. +hardcode_shlibpath_var=$hardcode_shlibpath_var_F77 + +# Set to yes if building a shared library automatically hardcodes DIR into the library +# and all subsequent libraries and executables linked against it. +hardcode_automatic=$hardcode_automatic_F77 + +# Variables whose values should be saved in libtool wrapper scripts and +# restored at relink time. +variables_saved_for_relink="$variables_saved_for_relink" + +# Whether libtool must link a program against all its dependency libraries. +link_all_deplibs=$link_all_deplibs_F77 + +# Compile-time system search path for libraries +sys_lib_search_path_spec=\`echo $lt_sys_lib_search_path_spec | \$SED -e "s@\${gcc_dir}@\\\${gcc_dir}@g;s@\${gcc_ver}@\\\${gcc_ver}@g"\` + +# Run-time system search path for libraries +sys_lib_dlsearch_path_spec=$lt_sys_lib_dlsearch_path_spec + +# Fix the shell variable \$srcfile for the compiler. +fix_srcfile_path="$fix_srcfile_path_F77" + +# Set to yes if exported symbols are required. +always_export_symbols=$always_export_symbols_F77 + +# The commands to list exported symbols. +export_symbols_cmds=$lt_export_symbols_cmds_F77 + +# The commands to extract the exported symbol list from a shared archive. +extract_expsyms_cmds=$lt_extract_expsyms_cmds + +# Symbols that should not be listed in the preloaded symbols. +exclude_expsyms=$lt_exclude_expsyms_F77 + +# Symbols that must always be exported. +include_expsyms=$lt_include_expsyms_F77 + +# ### END LIBTOOL TAG CONFIG: $tagname + +__EOF__ + + +else + # If there is no Makefile yet, we rely on a make rule to execute + # `config.status --recheck' to rerun these tests and create the + # libtool script then. + ltmain_in=`echo $ltmain | sed -e 's/\.sh$/.in/'` + if test -f "$ltmain_in"; then + test -f Makefile && make "$ltmain" + fi +fi + + +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu + +CC="$lt_save_CC" + + else + tagname="" + fi + ;; + + GCJ) + if test -n "$GCJ" && test "X$GCJ" != "Xno"; then + + + +# Source file extension for Java test sources. +ac_ext=java + +# Object file extension for compiled Java test sources. +objext=o +objext_GCJ=$objext + +# Code to be used in simple compile tests +lt_simple_compile_test_code="class foo {}\n" + +# Code to be used in simple link tests +lt_simple_link_test_code='public class conftest { public static void main(String[] argv) {}; }\n' + +# ltmain only uses $CC for tagged configurations so make sure $CC is set. + +# If no C compiler was specified, use CC. +LTCC=${LTCC-"$CC"} + +# If no C compiler flags were specified, use CFLAGS. +LTCFLAGS=${LTCFLAGS-"$CFLAGS"} + +# Allow CC to be a program name with arguments. +compiler=$CC + + +# save warnings/boilerplate of simple test code +ac_outfile=conftest.$ac_objext +printf "$lt_simple_compile_test_code" >conftest.$ac_ext +eval "$ac_compile" 2>&1 >/dev/null | $SED '/^$/d; /^ *+/d' >conftest.err +_lt_compiler_boilerplate=`cat conftest.err` +$rm conftest* + +ac_outfile=conftest.$ac_objext +printf "$lt_simple_link_test_code" >conftest.$ac_ext +eval "$ac_link" 2>&1 >/dev/null | $SED '/^$/d; /^ *+/d' >conftest.err +_lt_linker_boilerplate=`cat conftest.err` +$rm conftest* + + +# Allow CC to be a program name with arguments. +lt_save_CC="$CC" +CC=${GCJ-"gcj"} +compiler=$CC +compiler_GCJ=$CC +for cc_temp in $compiler""; do + case $cc_temp in + compile | *[\\/]compile | ccache | *[\\/]ccache ) ;; + distcc | *[\\/]distcc | purify | *[\\/]purify ) ;; + \-*) ;; + *) break;; + esac +done +cc_basename=`$echo "X$cc_temp" | $Xsed -e 's%.*/%%' -e "s%^$host_alias-%%"` + + +# GCJ did not exist at the time GCC didn't implicitly link libc in. +archive_cmds_need_lc_GCJ=no + +old_archive_cmds_GCJ=$old_archive_cmds + + +lt_prog_compiler_no_builtin_flag_GCJ= + +if test "$GCC" = yes; then + lt_prog_compiler_no_builtin_flag_GCJ=' -fno-builtin' + + +echo "$as_me:$LINENO: checking if $compiler supports -fno-rtti -fno-exceptions" >&5 +echo $ECHO_N "checking if $compiler supports -fno-rtti -fno-exceptions... $ECHO_C" >&6 +if test "${lt_cv_prog_compiler_rtti_exceptions+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + lt_cv_prog_compiler_rtti_exceptions=no + ac_outfile=conftest.$ac_objext + printf "$lt_simple_compile_test_code" > conftest.$ac_ext + lt_compiler_flag="-fno-rtti -fno-exceptions" + # Insert the option either (1) after the last *FLAGS variable, or + # (2) before a word containing "conftest.", or (3) at the end. + # Note that $ac_compile itself does not contain backslashes and begins + # with a dollar sign (not a hyphen), so the echo should work correctly. + # The option is referenced via a variable to avoid confusing sed. + lt_compile=`echo "$ac_compile" | $SED \ + -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ + -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ + -e 's:$: $lt_compiler_flag:'` + (eval echo "\"\$as_me:15539: $lt_compile\"" >&5) + (eval "$lt_compile" 2>conftest.err) + ac_status=$? + cat conftest.err >&5 + echo "$as_me:15543: \$? = $ac_status" >&5 + if (exit $ac_status) && test -s "$ac_outfile"; then + # The compiler can only warn and ignore the option if not recognized + # So say no if there are warnings other than the usual output. + $echo "X$_lt_compiler_boilerplate" | $Xsed -e '/^$/d' >conftest.exp + $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2 + if test ! -s conftest.er2 || diff conftest.exp conftest.er2 >/dev/null; then + lt_cv_prog_compiler_rtti_exceptions=yes + fi + fi + $rm conftest* + +fi +echo "$as_me:$LINENO: result: $lt_cv_prog_compiler_rtti_exceptions" >&5 +echo "${ECHO_T}$lt_cv_prog_compiler_rtti_exceptions" >&6 + +if test x"$lt_cv_prog_compiler_rtti_exceptions" = xyes; then + lt_prog_compiler_no_builtin_flag_GCJ="$lt_prog_compiler_no_builtin_flag_GCJ -fno-rtti -fno-exceptions" +else + : +fi + +fi + +lt_prog_compiler_wl_GCJ= +lt_prog_compiler_pic_GCJ= +lt_prog_compiler_static_GCJ= + +echo "$as_me:$LINENO: checking for $compiler option to produce PIC" >&5 +echo $ECHO_N "checking for $compiler option to produce PIC... $ECHO_C" >&6 + + if test "$GCC" = yes; then + lt_prog_compiler_wl_GCJ='-Wl,' + lt_prog_compiler_static_GCJ='-static' + + case $host_os in + aix*) + # All AIX code is PIC. + if test "$host_cpu" = ia64; then + # AIX 5 now supports IA64 processor + lt_prog_compiler_static_GCJ='-Bstatic' + fi + ;; + + amigaos*) + # FIXME: we need at least 68020 code to build shared libraries, but + # adding the `-m68020' flag to GCC prevents building anything better, + # like `-m68040'. + lt_prog_compiler_pic_GCJ='-m68020 -resident32 -malways-restore-a4' + ;; + + beos* | cygwin* | irix5* | irix6* | nonstopux* | osf3* | osf4* | osf5*) + # PIC is the default for these OSes. + ;; + + mingw* | pw32* | os2*) + # This hack is so that the source file can tell whether it is being + # built for inclusion in a dll (and should export symbols for example). + lt_prog_compiler_pic_GCJ='-DDLL_EXPORT' + ;; + + darwin* | rhapsody*) + # PIC is the default on this platform + # Common symbols not allowed in MH_DYLIB files + lt_prog_compiler_pic_GCJ='-fno-common' + ;; + + interix3*) + # Interix 3.x gcc -fpic/-fPIC options generate broken code. + # Instead, we relocate shared libraries at runtime. + ;; + + msdosdjgpp*) + # Just because we use GCC doesn't mean we suddenly get shared libraries + # on systems that don't support them. + lt_prog_compiler_can_build_shared_GCJ=no + enable_shared=no + ;; + + sysv4*MP*) + if test -d /usr/nec; then + lt_prog_compiler_pic_GCJ=-Kconform_pic + fi + ;; + + hpux*) + # PIC is the default for IA64 HP-UX and 64-bit HP-UX, but + # not for PA HP-UX. + case $host_cpu in + hppa*64*|ia64*) + # +Z the default + ;; + *) + lt_prog_compiler_pic_GCJ='-fPIC' + ;; + esac + ;; + + *) + lt_prog_compiler_pic_GCJ='-fPIC' + ;; + esac + else + # PORTME Check for flag to pass linker flags through the system compiler. + case $host_os in + aix*) + lt_prog_compiler_wl_GCJ='-Wl,' + if test "$host_cpu" = ia64; then + # AIX 5 now supports IA64 processor + lt_prog_compiler_static_GCJ='-Bstatic' + else + lt_prog_compiler_static_GCJ='-bnso -bI:/lib/syscalls.exp' + fi + ;; + darwin*) + # PIC is the default on this platform + # Common symbols not allowed in MH_DYLIB files + case $cc_basename in + xlc*) + lt_prog_compiler_pic_GCJ='-qnocommon' + lt_prog_compiler_wl_GCJ='-Wl,' + ;; + esac + ;; + + mingw* | pw32* | os2*) + # This hack is so that the source file can tell whether it is being + # built for inclusion in a dll (and should export symbols for example). + lt_prog_compiler_pic_GCJ='-DDLL_EXPORT' + ;; + + hpux9* | hpux10* | hpux11*) + lt_prog_compiler_wl_GCJ='-Wl,' + # PIC is the default for IA64 HP-UX and 64-bit HP-UX, but + # not for PA HP-UX. + case $host_cpu in + hppa*64*|ia64*) + # +Z the default + ;; + *) + lt_prog_compiler_pic_GCJ='+Z' + ;; + esac + # Is there a better lt_prog_compiler_static that works with the bundled CC? + lt_prog_compiler_static_GCJ='${wl}-a ${wl}archive' + ;; + + irix5* | irix6* | nonstopux*) + lt_prog_compiler_wl_GCJ='-Wl,' + # PIC (with -KPIC) is the default. + lt_prog_compiler_static_GCJ='-non_shared' + ;; + + newsos6) + lt_prog_compiler_pic_GCJ='-KPIC' + lt_prog_compiler_static_GCJ='-Bstatic' + ;; + + linux*) + case $cc_basename in + icc* | ecc*) + lt_prog_compiler_wl_GCJ='-Wl,' + lt_prog_compiler_pic_GCJ='-KPIC' + lt_prog_compiler_static_GCJ='-static' + ;; + pgcc* | pgf77* | pgf90* | pgf95*) + # Portland Group compilers (*not* the Pentium gcc compiler, + # which looks to be a dead project) + lt_prog_compiler_wl_GCJ='-Wl,' + lt_prog_compiler_pic_GCJ='-fpic' + lt_prog_compiler_static_GCJ='-Bstatic' + ;; + ccc*) + lt_prog_compiler_wl_GCJ='-Wl,' + # All Alpha code is PIC. + lt_prog_compiler_static_GCJ='-non_shared' + ;; + esac + ;; + + osf3* | osf4* | osf5*) + lt_prog_compiler_wl_GCJ='-Wl,' + # All OSF/1 code is PIC. + lt_prog_compiler_static_GCJ='-non_shared' + ;; + + solaris*) + lt_prog_compiler_pic_GCJ='-KPIC' + lt_prog_compiler_static_GCJ='-Bstatic' + case $cc_basename in + f77* | f90* | f95*) + lt_prog_compiler_wl_GCJ='-Qoption ld ';; + *) + lt_prog_compiler_wl_GCJ='-Wl,';; + esac + ;; + + sunos4*) + lt_prog_compiler_wl_GCJ='-Qoption ld ' + lt_prog_compiler_pic_GCJ='-PIC' + lt_prog_compiler_static_GCJ='-Bstatic' + ;; + + sysv4 | sysv4.2uw2* | sysv4.3*) + lt_prog_compiler_wl_GCJ='-Wl,' + lt_prog_compiler_pic_GCJ='-KPIC' + lt_prog_compiler_static_GCJ='-Bstatic' + ;; + + sysv4*MP*) + if test -d /usr/nec ;then + lt_prog_compiler_pic_GCJ='-Kconform_pic' + lt_prog_compiler_static_GCJ='-Bstatic' + fi + ;; + + sysv5* | unixware* | sco3.2v5* | sco5v6* | OpenUNIX*) + lt_prog_compiler_wl_GCJ='-Wl,' + lt_prog_compiler_pic_GCJ='-KPIC' + lt_prog_compiler_static_GCJ='-Bstatic' + ;; + + unicos*) + lt_prog_compiler_wl_GCJ='-Wl,' + lt_prog_compiler_can_build_shared_GCJ=no + ;; + + uts4*) + lt_prog_compiler_pic_GCJ='-pic' + lt_prog_compiler_static_GCJ='-Bstatic' + ;; + + *) + lt_prog_compiler_can_build_shared_GCJ=no + ;; + esac + fi + +echo "$as_me:$LINENO: result: $lt_prog_compiler_pic_GCJ" >&5 +echo "${ECHO_T}$lt_prog_compiler_pic_GCJ" >&6 + +# +# Check to make sure the PIC flag actually works. +# +if test -n "$lt_prog_compiler_pic_GCJ"; then + +echo "$as_me:$LINENO: checking if $compiler PIC flag $lt_prog_compiler_pic_GCJ works" >&5 +echo $ECHO_N "checking if $compiler PIC flag $lt_prog_compiler_pic_GCJ works... $ECHO_C" >&6 +if test "${lt_prog_compiler_pic_works_GCJ+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + lt_prog_compiler_pic_works_GCJ=no + ac_outfile=conftest.$ac_objext + printf "$lt_simple_compile_test_code" > conftest.$ac_ext + lt_compiler_flag="$lt_prog_compiler_pic_GCJ" + # Insert the option either (1) after the last *FLAGS variable, or + # (2) before a word containing "conftest.", or (3) at the end. + # Note that $ac_compile itself does not contain backslashes and begins + # with a dollar sign (not a hyphen), so the echo should work correctly. + # The option is referenced via a variable to avoid confusing sed. + lt_compile=`echo "$ac_compile" | $SED \ + -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ + -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ + -e 's:$: $lt_compiler_flag:'` + (eval echo "\"\$as_me:15807: $lt_compile\"" >&5) + (eval "$lt_compile" 2>conftest.err) + ac_status=$? + cat conftest.err >&5 + echo "$as_me:15811: \$? = $ac_status" >&5 + if (exit $ac_status) && test -s "$ac_outfile"; then + # The compiler can only warn and ignore the option if not recognized + # So say no if there are warnings other than the usual output. + $echo "X$_lt_compiler_boilerplate" | $Xsed -e '/^$/d' >conftest.exp + $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2 + if test ! -s conftest.er2 || diff conftest.exp conftest.er2 >/dev/null; then + lt_prog_compiler_pic_works_GCJ=yes + fi + fi + $rm conftest* + +fi +echo "$as_me:$LINENO: result: $lt_prog_compiler_pic_works_GCJ" >&5 +echo "${ECHO_T}$lt_prog_compiler_pic_works_GCJ" >&6 + +if test x"$lt_prog_compiler_pic_works_GCJ" = xyes; then + case $lt_prog_compiler_pic_GCJ in + "" | " "*) ;; + *) lt_prog_compiler_pic_GCJ=" $lt_prog_compiler_pic_GCJ" ;; + esac +else + lt_prog_compiler_pic_GCJ= + lt_prog_compiler_can_build_shared_GCJ=no +fi + +fi +case $host_os in + # For platforms which do not support PIC, -DPIC is meaningless: + *djgpp*) + lt_prog_compiler_pic_GCJ= + ;; + *) + lt_prog_compiler_pic_GCJ="$lt_prog_compiler_pic_GCJ" + ;; +esac + +# +# Check to make sure the static flag actually works. +# +wl=$lt_prog_compiler_wl_GCJ eval lt_tmp_static_flag=\"$lt_prog_compiler_static_GCJ\" +echo "$as_me:$LINENO: checking if $compiler static flag $lt_tmp_static_flag works" >&5 +echo $ECHO_N "checking if $compiler static flag $lt_tmp_static_flag works... $ECHO_C" >&6 +if test "${lt_prog_compiler_static_works_GCJ+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + lt_prog_compiler_static_works_GCJ=no + save_LDFLAGS="$LDFLAGS" + LDFLAGS="$LDFLAGS $lt_tmp_static_flag" + printf "$lt_simple_link_test_code" > conftest.$ac_ext + if (eval $ac_link 2>conftest.err) && test -s conftest$ac_exeext; then + # The linker can only warn and ignore the option if not recognized + # So say no if there are warnings + if test -s conftest.err; then + # Append any errors to the config.log. + cat conftest.err 1>&5 + $echo "X$_lt_linker_boilerplate" | $Xsed -e '/^$/d' > conftest.exp + $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2 + if diff conftest.exp conftest.er2 >/dev/null; then + lt_prog_compiler_static_works_GCJ=yes + fi + else + lt_prog_compiler_static_works_GCJ=yes + fi + fi + $rm conftest* + LDFLAGS="$save_LDFLAGS" + +fi +echo "$as_me:$LINENO: result: $lt_prog_compiler_static_works_GCJ" >&5 +echo "${ECHO_T}$lt_prog_compiler_static_works_GCJ" >&6 + +if test x"$lt_prog_compiler_static_works_GCJ" = xyes; then + : +else + lt_prog_compiler_static_GCJ= +fi + + +echo "$as_me:$LINENO: checking if $compiler supports -c -o file.$ac_objext" >&5 +echo $ECHO_N "checking if $compiler supports -c -o file.$ac_objext... $ECHO_C" >&6 +if test "${lt_cv_prog_compiler_c_o_GCJ+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + lt_cv_prog_compiler_c_o_GCJ=no + $rm -r conftest 2>/dev/null + mkdir conftest + cd conftest + mkdir out + printf "$lt_simple_compile_test_code" > conftest.$ac_ext + + lt_compiler_flag="-o out/conftest2.$ac_objext" + # Insert the option either (1) after the last *FLAGS variable, or + # (2) before a word containing "conftest.", or (3) at the end. + # Note that $ac_compile itself does not contain backslashes and begins + # with a dollar sign (not a hyphen), so the echo should work correctly. + lt_compile=`echo "$ac_compile" | $SED \ + -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ + -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ + -e 's:$: $lt_compiler_flag:'` + (eval echo "\"\$as_me:15911: $lt_compile\"" >&5) + (eval "$lt_compile" 2>out/conftest.err) + ac_status=$? + cat out/conftest.err >&5 + echo "$as_me:15915: \$? = $ac_status" >&5 + if (exit $ac_status) && test -s out/conftest2.$ac_objext + then + # The compiler can only warn and ignore the option if not recognized + # So say no if there are warnings + $echo "X$_lt_compiler_boilerplate" | $Xsed -e '/^$/d' > out/conftest.exp + $SED '/^$/d; /^ *+/d' out/conftest.err >out/conftest.er2 + if test ! -s out/conftest.er2 || diff out/conftest.exp out/conftest.er2 >/dev/null; then + lt_cv_prog_compiler_c_o_GCJ=yes + fi + fi + chmod u+w . 2>&5 + $rm conftest* + # SGI C++ compiler will create directory out/ii_files/ for + # template instantiation + test -d out/ii_files && $rm out/ii_files/* && rmdir out/ii_files + $rm out/* && rmdir out + cd .. + rmdir conftest + $rm conftest* + +fi +echo "$as_me:$LINENO: result: $lt_cv_prog_compiler_c_o_GCJ" >&5 +echo "${ECHO_T}$lt_cv_prog_compiler_c_o_GCJ" >&6 + + +hard_links="nottested" +if test "$lt_cv_prog_compiler_c_o_GCJ" = no && test "$need_locks" != no; then + # do not overwrite the value of need_locks provided by the user + echo "$as_me:$LINENO: checking if we can lock with hard links" >&5 +echo $ECHO_N "checking if we can lock with hard links... $ECHO_C" >&6 + hard_links=yes + $rm conftest* + ln conftest.a conftest.b 2>/dev/null && hard_links=no + touch conftest.a + ln conftest.a conftest.b 2>&5 || hard_links=no + ln conftest.a conftest.b 2>/dev/null && hard_links=no + echo "$as_me:$LINENO: result: $hard_links" >&5 +echo "${ECHO_T}$hard_links" >&6 + if test "$hard_links" = no; then + { echo "$as_me:$LINENO: WARNING: \`$CC' does not support \`-c -o', so \`make -j' may be unsafe" >&5 +echo "$as_me: WARNING: \`$CC' does not support \`-c -o', so \`make -j' may be unsafe" >&2;} + need_locks=warn + fi +else + need_locks=no +fi + +echo "$as_me:$LINENO: checking whether the $compiler linker ($LD) supports shared libraries" >&5 +echo $ECHO_N "checking whether the $compiler linker ($LD) supports shared libraries... $ECHO_C" >&6 + + runpath_var= + allow_undefined_flag_GCJ= + enable_shared_with_static_runtimes_GCJ=no + archive_cmds_GCJ= + archive_expsym_cmds_GCJ= + old_archive_From_new_cmds_GCJ= + old_archive_from_expsyms_cmds_GCJ= + export_dynamic_flag_spec_GCJ= + whole_archive_flag_spec_GCJ= + thread_safe_flag_spec_GCJ= + hardcode_libdir_flag_spec_GCJ= + hardcode_libdir_flag_spec_ld_GCJ= + hardcode_libdir_separator_GCJ= + hardcode_direct_GCJ=no + hardcode_minus_L_GCJ=no + hardcode_shlibpath_var_GCJ=unsupported + link_all_deplibs_GCJ=unknown + hardcode_automatic_GCJ=no + module_cmds_GCJ= + module_expsym_cmds_GCJ= + always_export_symbols_GCJ=no + export_symbols_cmds_GCJ='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols' + # include_expsyms should be a list of space-separated symbols to be *always* + # included in the symbol list + include_expsyms_GCJ= + # exclude_expsyms can be an extended regexp of symbols to exclude + # it will be wrapped by ` (' and `)$', so one must not match beginning or + # end of line. Example: `a|bc|.*d.*' will exclude the symbols `a' and `bc', + # as well as any symbol that contains `d'. + exclude_expsyms_GCJ="_GLOBAL_OFFSET_TABLE_" + # Although _GLOBAL_OFFSET_TABLE_ is a valid symbol C name, most a.out + # platforms (ab)use it in PIC code, but their linkers get confused if + # the symbol is explicitly referenced. Since portable code cannot + # rely on this symbol name, it's probably fine to never include it in + # preloaded symbol tables. + extract_expsyms_cmds= + # Just being paranoid about ensuring that cc_basename is set. + for cc_temp in $compiler""; do + case $cc_temp in + compile | *[\\/]compile | ccache | *[\\/]ccache ) ;; + distcc | *[\\/]distcc | purify | *[\\/]purify ) ;; + \-*) ;; + *) break;; + esac +done +cc_basename=`$echo "X$cc_temp" | $Xsed -e 's%.*/%%' -e "s%^$host_alias-%%"` + + case $host_os in + cygwin* | mingw* | pw32*) + # FIXME: the MSVC++ port hasn't been tested in a loooong time + # When not using gcc, we currently assume that we are using + # Microsoft Visual C++. + if test "$GCC" != yes; then + with_gnu_ld=no + fi + ;; + interix*) + # we just hope/assume this is gcc and not c89 (= MSVC++) + with_gnu_ld=yes + ;; + openbsd*) + with_gnu_ld=no + ;; + esac + + ld_shlibs_GCJ=yes + if test "$with_gnu_ld" = yes; then + # If archive_cmds runs LD, not CC, wlarc should be empty + wlarc='${wl}' + + # Set some defaults for GNU ld with shared library support. These + # are reset later if shared libraries are not supported. Putting them + # here allows them to be overridden if necessary. + runpath_var=LD_RUN_PATH + hardcode_libdir_flag_spec_GCJ='${wl}--rpath ${wl}$libdir' + export_dynamic_flag_spec_GCJ='${wl}--export-dynamic' + # ancient GNU ld didn't support --whole-archive et. al. + if $LD --help 2>&1 | grep 'no-whole-archive' > /dev/null; then + whole_archive_flag_spec_GCJ="$wlarc"'--whole-archive$convenience '"$wlarc"'--no-whole-archive' + else + whole_archive_flag_spec_GCJ= + fi + supports_anon_versioning=no + case `$LD -v 2>/dev/null` in + *\ [01].* | *\ 2.[0-9].* | *\ 2.10.*) ;; # catch versions < 2.11 + *\ 2.11.93.0.2\ *) supports_anon_versioning=yes ;; # RH7.3 ... + *\ 2.11.92.0.12\ *) supports_anon_versioning=yes ;; # Mandrake 8.2 ... + *\ 2.11.*) ;; # other 2.11 versions + *) supports_anon_versioning=yes ;; + esac + + # See if GNU ld supports shared libraries. + case $host_os in + aix3* | aix4* | aix5*) + # On AIX/PPC, the GNU linker is very broken + if test "$host_cpu" != ia64; then + ld_shlibs_GCJ=no + cat <&2 + +*** Warning: the GNU linker, at least up to release 2.9.1, is reported +*** to be unable to reliably create shared libraries on AIX. +*** Therefore, libtool is disabling shared libraries support. If you +*** really care for shared libraries, you may want to modify your PATH +*** so that a non-GNU linker is found, and then restart. + +EOF + fi + ;; + + amigaos*) + archive_cmds_GCJ='$rm $output_objdir/a2ixlibrary.data~$echo "#define NAME $libname" > $output_objdir/a2ixlibrary.data~$echo "#define LIBRARY_ID 1" >> $output_objdir/a2ixlibrary.data~$echo "#define VERSION $major" >> $output_objdir/a2ixlibrary.data~$echo "#define REVISION $revision" >> $output_objdir/a2ixlibrary.data~$AR $AR_FLAGS $lib $libobjs~$RANLIB $lib~(cd $output_objdir && a2ixlibrary -32)' + hardcode_libdir_flag_spec_GCJ='-L$libdir' + hardcode_minus_L_GCJ=yes + + # Samuel A. Falvo II reports + # that the semantics of dynamic libraries on AmigaOS, at least up + # to version 4, is to share data among multiple programs linked + # with the same dynamic library. Since this doesn't match the + # behavior of shared libraries on other platforms, we can't use + # them. + ld_shlibs_GCJ=no + ;; + + beos*) + if $LD --help 2>&1 | grep ': supported targets:.* elf' > /dev/null; then + allow_undefined_flag_GCJ=unsupported + # Joseph Beckenbach says some releases of gcc + # support --undefined. This deserves some investigation. FIXME + archive_cmds_GCJ='$CC -nostart $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + else + ld_shlibs_GCJ=no + fi + ;; + + cygwin* | mingw* | pw32*) + # _LT_AC_TAGVAR(hardcode_libdir_flag_spec, GCJ) is actually meaningless, + # as there is no search path for DLLs. + hardcode_libdir_flag_spec_GCJ='-L$libdir' + allow_undefined_flag_GCJ=unsupported + always_export_symbols_GCJ=no + enable_shared_with_static_runtimes_GCJ=yes + export_symbols_cmds_GCJ='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[BCDGRS] /s/.* \([^ ]*\)/\1 DATA/'\'' | $SED -e '\''/^[AITW] /s/.* //'\'' | sort | uniq > $export_symbols' + + if $LD --help 2>&1 | grep 'auto-import' > /dev/null; then + archive_cmds_GCJ='$CC -shared $libobjs $deplibs $compiler_flags -o $output_objdir/$soname ${wl}--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' + # If the export-symbols file already is a .def file (1st line + # is EXPORTS), use it as is; otherwise, prepend... + archive_expsym_cmds_GCJ='if test "x`$SED 1q $export_symbols`" = xEXPORTS; then + cp $export_symbols $output_objdir/$soname.def; + else + echo EXPORTS > $output_objdir/$soname.def; + cat $export_symbols >> $output_objdir/$soname.def; + fi~ + $CC -shared $output_objdir/$soname.def $libobjs $deplibs $compiler_flags -o $output_objdir/$soname ${wl}--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' + else + ld_shlibs_GCJ=no + fi + ;; + + interix3*) + hardcode_direct_GCJ=no + hardcode_shlibpath_var_GCJ=no + hardcode_libdir_flag_spec_GCJ='${wl}-rpath,$libdir' + export_dynamic_flag_spec_GCJ='${wl}-E' + # Hack: On Interix 3.x, we cannot compile PIC because of a broken gcc. + # Instead, shared libraries are loaded at an image base (0x10000000 by + # default) and relocated if they conflict, which is a slow very memory + # consuming and fragmenting process. To avoid this, we pick a random, + # 256 KiB-aligned image base between 0x50000000 and 0x6FFC0000 at link + # time. Moving up from 0x10000000 also allows more sbrk(2) space. + archive_cmds_GCJ='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-h,$soname ${wl}--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib' + archive_expsym_cmds_GCJ='sed "s,^,_," $export_symbols >$output_objdir/$soname.expsym~$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-h,$soname ${wl}--retain-symbols-file,$output_objdir/$soname.expsym ${wl}--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib' + ;; + + linux*) + if $LD --help 2>&1 | grep ': supported targets:.* elf' > /dev/null; then + tmp_addflag= + case $cc_basename,$host_cpu in + pgcc*) # Portland Group C compiler + whole_archive_flag_spec_GCJ='${wl}--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; $echo \"$new_convenience\"` ${wl}--no-whole-archive' + tmp_addflag=' $pic_flag' + ;; + pgf77* | pgf90* | pgf95*) # Portland Group f77 and f90 compilers + whole_archive_flag_spec_GCJ='${wl}--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; $echo \"$new_convenience\"` ${wl}--no-whole-archive' + tmp_addflag=' $pic_flag -Mnomain' ;; + ecc*,ia64* | icc*,ia64*) # Intel C compiler on ia64 + tmp_addflag=' -i_dynamic' ;; + efc*,ia64* | ifort*,ia64*) # Intel Fortran compiler on ia64 + tmp_addflag=' -i_dynamic -nofor_main' ;; + ifc* | ifort*) # Intel Fortran compiler + tmp_addflag=' -nofor_main' ;; + esac + archive_cmds_GCJ='$CC -shared'"$tmp_addflag"' $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + + if test $supports_anon_versioning = yes; then + archive_expsym_cmds_GCJ='$echo "{ global:" > $output_objdir/$libname.ver~ + cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~ + $echo "local: *; };" >> $output_objdir/$libname.ver~ + $CC -shared'"$tmp_addflag"' $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-version-script ${wl}$output_objdir/$libname.ver -o $lib' + fi + else + ld_shlibs_GCJ=no + fi + ;; + + netbsd*) + if echo __ELF__ | $CC -E - | grep __ELF__ >/dev/null; then + archive_cmds_GCJ='$LD -Bshareable $libobjs $deplibs $linker_flags -o $lib' + wlarc= + else + archive_cmds_GCJ='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + archive_expsym_cmds_GCJ='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' + fi + ;; + + solaris*) + if $LD -v 2>&1 | grep 'BFD 2\.8' > /dev/null; then + ld_shlibs_GCJ=no + cat <&2 + +*** Warning: The releases 2.8.* of the GNU linker cannot reliably +*** create shared libraries on Solaris systems. Therefore, libtool +*** is disabling shared libraries support. We urge you to upgrade GNU +*** binutils to release 2.9.1 or newer. Another option is to modify +*** your PATH or compiler configuration so that the native linker is +*** used, and then restart. + +EOF + elif $LD --help 2>&1 | grep ': supported targets:.* elf' > /dev/null; then + archive_cmds_GCJ='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + archive_expsym_cmds_GCJ='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' + else + ld_shlibs_GCJ=no + fi + ;; + + sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX*) + case `$LD -v 2>&1` in + *\ [01].* | *\ 2.[0-9].* | *\ 2.1[0-5].*) + ld_shlibs_GCJ=no + cat <<_LT_EOF 1>&2 + +*** Warning: Releases of the GNU linker prior to 2.16.91.0.3 can not +*** reliably create shared libraries on SCO systems. Therefore, libtool +*** is disabling shared libraries support. We urge you to upgrade GNU +*** binutils to release 2.16.91.0.3 or newer. Another option is to modify +*** your PATH or compiler configuration so that the native linker is +*** used, and then restart. + +_LT_EOF + ;; + *) + if $LD --help 2>&1 | grep ': supported targets:.* elf' > /dev/null; then + hardcode_libdir_flag_spec_GCJ='`test -z "$SCOABSPATH" && echo ${wl}-rpath,$libdir`' + archive_cmds_GCJ='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname,\${SCOABSPATH:+${install_libdir}/}$soname -o $lib' + archive_expsym_cmds_GCJ='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname,\${SCOABSPATH:+${install_libdir}/}$soname,-retain-symbols-file,$export_symbols -o $lib' + else + ld_shlibs_GCJ=no + fi + ;; + esac + ;; + + sunos4*) + archive_cmds_GCJ='$LD -assert pure-text -Bshareable -o $lib $libobjs $deplibs $linker_flags' + wlarc= + hardcode_direct_GCJ=yes + hardcode_shlibpath_var_GCJ=no + ;; + + *) + if $LD --help 2>&1 | grep ': supported targets:.* elf' > /dev/null; then + archive_cmds_GCJ='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + archive_expsym_cmds_GCJ='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' + else + ld_shlibs_GCJ=no + fi + ;; + esac + + if test "$ld_shlibs_GCJ" = no; then + runpath_var= + hardcode_libdir_flag_spec_GCJ= + export_dynamic_flag_spec_GCJ= + whole_archive_flag_spec_GCJ= + fi + else + # PORTME fill in a description of your system's linker (not GNU ld) + case $host_os in + aix3*) + allow_undefined_flag_GCJ=unsupported + always_export_symbols_GCJ=yes + archive_expsym_cmds_GCJ='$LD -o $output_objdir/$soname $libobjs $deplibs $linker_flags -bE:$export_symbols -T512 -H512 -bM:SRE~$AR $AR_FLAGS $lib $output_objdir/$soname' + # Note: this linker hardcodes the directories in LIBPATH if there + # are no directories specified by -L. + hardcode_minus_L_GCJ=yes + if test "$GCC" = yes && test -z "$lt_prog_compiler_static"; then + # Neither direct hardcoding nor static linking is supported with a + # broken collect2. + hardcode_direct_GCJ=unsupported + fi + ;; + + aix4* | aix5*) + if test "$host_cpu" = ia64; then + # On IA64, the linker does run time linking by default, so we don't + # have to do anything special. + aix_use_runtimelinking=no + exp_sym_flag='-Bexport' + no_entry_flag="" + else + # If we're using GNU nm, then we don't want the "-C" option. + # -C means demangle to AIX nm, but means don't demangle with GNU nm + if $NM -V 2>&1 | grep 'GNU' > /dev/null; then + export_symbols_cmds_GCJ='$NM -Bpg $libobjs $convenience | awk '\''{ if (((\$2 == "T") || (\$2 == "D") || (\$2 == "B")) && (substr(\$3,1,1) != ".")) { print \$3 } }'\'' | sort -u > $export_symbols' + else + export_symbols_cmds_GCJ='$NM -BCpg $libobjs $convenience | awk '\''{ if (((\$2 == "T") || (\$2 == "D") || (\$2 == "B")) && (substr(\$3,1,1) != ".")) { print \$3 } }'\'' | sort -u > $export_symbols' + fi + aix_use_runtimelinking=no + + # Test if we are trying to use run time linking or normal + # AIX style linking. If -brtl is somewhere in LDFLAGS, we + # need to do runtime linking. + case $host_os in aix4.[23]|aix4.[23].*|aix5*) + for ld_flag in $LDFLAGS; do + if (test $ld_flag = "-brtl" || test $ld_flag = "-Wl,-brtl"); then + aix_use_runtimelinking=yes + break + fi + done + ;; + esac + + exp_sym_flag='-bexport' + no_entry_flag='-bnoentry' + fi + + # When large executables or shared objects are built, AIX ld can + # have problems creating the table of contents. If linking a library + # or program results in "error TOC overflow" add -mminimal-toc to + # CXXFLAGS/CFLAGS for g++/gcc. In the cases where that is not + # enough to fix the problem, add -Wl,-bbigtoc to LDFLAGS. + + archive_cmds_GCJ='' + hardcode_direct_GCJ=yes + hardcode_libdir_separator_GCJ=':' + link_all_deplibs_GCJ=yes + + if test "$GCC" = yes; then + case $host_os in aix4.[012]|aix4.[012].*) + # We only want to do this on AIX 4.2 and lower, the check + # below for broken collect2 doesn't work under 4.3+ + collect2name=`${CC} -print-prog-name=collect2` + if test -f "$collect2name" && \ + strings "$collect2name" | grep resolve_lib_name >/dev/null + then + # We have reworked collect2 + hardcode_direct_GCJ=yes + else + # We have old collect2 + hardcode_direct_GCJ=unsupported + # It fails to find uninstalled libraries when the uninstalled + # path is not listed in the libpath. Setting hardcode_minus_L + # to unsupported forces relinking + hardcode_minus_L_GCJ=yes + hardcode_libdir_flag_spec_GCJ='-L$libdir' + hardcode_libdir_separator_GCJ= + fi + ;; + esac + shared_flag='-shared' + if test "$aix_use_runtimelinking" = yes; then + shared_flag="$shared_flag "'${wl}-G' + fi + else + # not using gcc + if test "$host_cpu" = ia64; then + # VisualAge C++, Version 5.5 for AIX 5L for IA-64, Beta 3 Release + # chokes on -Wl,-G. The following line is correct: + shared_flag='-G' + else + if test "$aix_use_runtimelinking" = yes; then + shared_flag='${wl}-G' + else + shared_flag='${wl}-bM:SRE' + fi + fi + fi + + # It seems that -bexpall does not export symbols beginning with + # underscore (_), so it is better to generate a list of symbols to export. + always_export_symbols_GCJ=yes + if test "$aix_use_runtimelinking" = yes; then + # Warning - without using the other runtime loading flags (-brtl), + # -berok will link without error, but may produce a broken library. + allow_undefined_flag_GCJ='-berok' + # Determine the default libpath from the value encoded in an empty executable. + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext conftest$ac_exeext +if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 + (eval $ac_link) 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest$ac_exeext' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; }; then + +aix_libpath=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e '/Import File Strings/,/^$/ { /^0/ { s/^0 *\(.*\)$/\1/; p; } +}'` +# Check for a 64-bit object if we didn't find anything. +if test -z "$aix_libpath"; then aix_libpath=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e '/Import File Strings/,/^$/ { /^0/ { s/^0 *\(.*\)$/\1/; p; } +}'`; fi +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +fi +rm -f conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi + + hardcode_libdir_flag_spec_GCJ='${wl}-blibpath:$libdir:'"$aix_libpath" + archive_expsym_cmds_GCJ="\$CC"' -o $output_objdir/$soname $libobjs $deplibs '"\${wl}$no_entry_flag"' $compiler_flags `if test "x${allow_undefined_flag}" != "x"; then echo "${wl}${allow_undefined_flag}"; else :; fi` '"\${wl}$exp_sym_flag:\$export_symbols $shared_flag" + else + if test "$host_cpu" = ia64; then + hardcode_libdir_flag_spec_GCJ='${wl}-R $libdir:/usr/lib:/lib' + allow_undefined_flag_GCJ="-z nodefs" + archive_expsym_cmds_GCJ="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs '"\${wl}$no_entry_flag"' $compiler_flags ${wl}${allow_undefined_flag} '"\${wl}$exp_sym_flag:\$export_symbols" + else + # Determine the default libpath from the value encoded in an empty executable. + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext conftest$ac_exeext +if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 + (eval $ac_link) 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest$ac_exeext' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; }; then + +aix_libpath=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e '/Import File Strings/,/^$/ { /^0/ { s/^0 *\(.*\)$/\1/; p; } +}'` +# Check for a 64-bit object if we didn't find anything. +if test -z "$aix_libpath"; then aix_libpath=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e '/Import File Strings/,/^$/ { /^0/ { s/^0 *\(.*\)$/\1/; p; } +}'`; fi +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +fi +rm -f conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi + + hardcode_libdir_flag_spec_GCJ='${wl}-blibpath:$libdir:'"$aix_libpath" + # Warning - without using the other run time loading flags, + # -berok will link without error, but may produce a broken library. + no_undefined_flag_GCJ=' ${wl}-bernotok' + allow_undefined_flag_GCJ=' ${wl}-berok' + # Exported symbols can be pulled into shared objects from archives + whole_archive_flag_spec_GCJ='$convenience' + archive_cmds_need_lc_GCJ=yes + # This is similar to how AIX traditionally builds its shared libraries. + archive_expsym_cmds_GCJ="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs ${wl}-bnoentry $compiler_flags ${wl}-bE:$export_symbols${allow_undefined_flag}~$AR $AR_FLAGS $output_objdir/$libname$release.a $output_objdir/$soname' + fi + fi + ;; + + amigaos*) + archive_cmds_GCJ='$rm $output_objdir/a2ixlibrary.data~$echo "#define NAME $libname" > $output_objdir/a2ixlibrary.data~$echo "#define LIBRARY_ID 1" >> $output_objdir/a2ixlibrary.data~$echo "#define VERSION $major" >> $output_objdir/a2ixlibrary.data~$echo "#define REVISION $revision" >> $output_objdir/a2ixlibrary.data~$AR $AR_FLAGS $lib $libobjs~$RANLIB $lib~(cd $output_objdir && a2ixlibrary -32)' + hardcode_libdir_flag_spec_GCJ='-L$libdir' + hardcode_minus_L_GCJ=yes + # see comment about different semantics on the GNU ld section + ld_shlibs_GCJ=no + ;; + + bsdi[45]*) + export_dynamic_flag_spec_GCJ=-rdynamic + ;; + + cygwin* | mingw* | pw32*) + # When not using gcc, we currently assume that we are using + # Microsoft Visual C++. + # hardcode_libdir_flag_spec is actually meaningless, as there is + # no search path for DLLs. + hardcode_libdir_flag_spec_GCJ=' ' + allow_undefined_flag_GCJ=unsupported + # Tell ltmain to make .lib files, not .a files. + libext=lib + # Tell ltmain to make .dll files, not .so files. + shrext_cmds=".dll" + # FIXME: Setting linknames here is a bad hack. + archive_cmds_GCJ='$CC -o $lib $libobjs $compiler_flags `echo "$deplibs" | $SED -e '\''s/ -lc$//'\''` -link -dll~linknames=' + # The linker will automatically build a .lib file if we build a DLL. + old_archive_From_new_cmds_GCJ='true' + # FIXME: Should let the user specify the lib program. + old_archive_cmds_GCJ='lib /OUT:$oldlib$oldobjs$old_deplibs' + fix_srcfile_path_GCJ='`cygpath -w "$srcfile"`' + enable_shared_with_static_runtimes_GCJ=yes + ;; + + darwin* | rhapsody*) + case $host_os in + rhapsody* | darwin1.[012]) + allow_undefined_flag_GCJ='${wl}-undefined ${wl}suppress' + ;; + *) # Darwin 1.3 on + if test -z ${MACOSX_DEPLOYMENT_TARGET} ; then + allow_undefined_flag_GCJ='${wl}-flat_namespace ${wl}-undefined ${wl}suppress' + else + case ${MACOSX_DEPLOYMENT_TARGET} in + 10.[012]) + allow_undefined_flag_GCJ='${wl}-flat_namespace ${wl}-undefined ${wl}suppress' + ;; + 10.*) + allow_undefined_flag_GCJ='${wl}-undefined ${wl}dynamic_lookup' + ;; + esac + fi + ;; + esac + archive_cmds_need_lc_GCJ=no + hardcode_direct_GCJ=no + hardcode_automatic_GCJ=yes + hardcode_shlibpath_var_GCJ=unsupported + whole_archive_flag_spec_GCJ='' + link_all_deplibs_GCJ=yes + if test "$GCC" = yes ; then + output_verbose_link_cmd='echo' + archive_cmds_GCJ='$CC -dynamiclib $allow_undefined_flag -o $lib $libobjs $deplibs $compiler_flags -install_name $rpath/$soname $verstring' + module_cmds_GCJ='$CC $allow_undefined_flag -o $lib -bundle $libobjs $deplibs$compiler_flags' + # Don't fix this by using the ld -exported_symbols_list flag, it doesn't exist in older darwin lds + archive_expsym_cmds_GCJ='sed -e "s,#.*,," -e "s,^[ ]*,," -e "s,^\(..*\),_&," < $export_symbols > $output_objdir/${libname}-symbols.expsym~$CC -dynamiclib $allow_undefined_flag -o $lib $libobjs $deplibs $compiler_flags -install_name $rpath/$soname $verstring~nmedit -s $output_objdir/${libname}-symbols.expsym ${lib}' + module_expsym_cmds_GCJ='sed -e "s,#.*,," -e "s,^[ ]*,," -e "s,^\(..*\),_&," < $export_symbols > $output_objdir/${libname}-symbols.expsym~$CC $allow_undefined_flag -o $lib -bundle $libobjs $deplibs$compiler_flags~nmedit -s $output_objdir/${libname}-symbols.expsym ${lib}' + else + case $cc_basename in + xlc*) + output_verbose_link_cmd='echo' + archive_cmds_GCJ='$CC -qmkshrobj $allow_undefined_flag -o $lib $libobjs $deplibs $compiler_flags ${wl}-install_name ${wl}`echo $rpath/$soname` $verstring' + module_cmds_GCJ='$CC $allow_undefined_flag -o $lib -bundle $libobjs $deplibs$compiler_flags' + # Don't fix this by using the ld -exported_symbols_list flag, it doesn't exist in older darwin lds + archive_expsym_cmds_GCJ='sed -e "s,#.*,," -e "s,^[ ]*,," -e "s,^\(..*\),_&," < $export_symbols > $output_objdir/${libname}-symbols.expsym~$CC -qmkshrobj $allow_undefined_flag -o $lib $libobjs $deplibs $compiler_flags ${wl}-install_name ${wl}$rpath/$soname $verstring~nmedit -s $output_objdir/${libname}-symbols.expsym ${lib}' + module_expsym_cmds_GCJ='sed -e "s,#.*,," -e "s,^[ ]*,," -e "s,^\(..*\),_&," < $export_symbols > $output_objdir/${libname}-symbols.expsym~$CC $allow_undefined_flag -o $lib -bundle $libobjs $deplibs$compiler_flags~nmedit -s $output_objdir/${libname}-symbols.expsym ${lib}' + ;; + *) + ld_shlibs_GCJ=no + ;; + esac + fi + ;; + + dgux*) + archive_cmds_GCJ='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + hardcode_libdir_flag_spec_GCJ='-L$libdir' + hardcode_shlibpath_var_GCJ=no + ;; + + freebsd1*) + ld_shlibs_GCJ=no + ;; + + # FreeBSD 2.2.[012] allows us to include c++rt0.o to get C++ constructor + # support. Future versions do this automatically, but an explicit c++rt0.o + # does not break anything, and helps significantly (at the cost of a little + # extra space). + freebsd2.2*) + archive_cmds_GCJ='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags /usr/lib/c++rt0.o' + hardcode_libdir_flag_spec_GCJ='-R$libdir' + hardcode_direct_GCJ=yes + hardcode_shlibpath_var_GCJ=no + ;; + + # Unfortunately, older versions of FreeBSD 2 do not have this feature. + freebsd2*) + archive_cmds_GCJ='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags' + hardcode_direct_GCJ=yes + hardcode_minus_L_GCJ=yes + hardcode_shlibpath_var_GCJ=no + ;; + + # FreeBSD 3 and greater uses gcc -shared to do shared libraries. + freebsd* | kfreebsd*-gnu | dragonfly*) + archive_cmds_GCJ='$CC -shared -o $lib $libobjs $deplibs $compiler_flags' + hardcode_libdir_flag_spec_GCJ='-R$libdir' + hardcode_direct_GCJ=yes + hardcode_shlibpath_var_GCJ=no + ;; + + hpux9*) + if test "$GCC" = yes; then + archive_cmds_GCJ='$rm $output_objdir/$soname~$CC -shared -fPIC ${wl}+b ${wl}$install_libdir -o $output_objdir/$soname $libobjs $deplibs $compiler_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' + else + archive_cmds_GCJ='$rm $output_objdir/$soname~$LD -b +b $install_libdir -o $output_objdir/$soname $libobjs $deplibs $linker_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' + fi + hardcode_libdir_flag_spec_GCJ='${wl}+b ${wl}$libdir' + hardcode_libdir_separator_GCJ=: + hardcode_direct_GCJ=yes + + # hardcode_minus_L: Not really in the search PATH, + # but as the default location of the library. + hardcode_minus_L_GCJ=yes + export_dynamic_flag_spec_GCJ='${wl}-E' + ;; + + hpux10*) + if test "$GCC" = yes -a "$with_gnu_ld" = no; then + archive_cmds_GCJ='$CC -shared -fPIC ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags' + else + archive_cmds_GCJ='$LD -b +h $soname +b $install_libdir -o $lib $libobjs $deplibs $linker_flags' + fi + if test "$with_gnu_ld" = no; then + hardcode_libdir_flag_spec_GCJ='${wl}+b ${wl}$libdir' + hardcode_libdir_separator_GCJ=: + + hardcode_direct_GCJ=yes + export_dynamic_flag_spec_GCJ='${wl}-E' + + # hardcode_minus_L: Not really in the search PATH, + # but as the default location of the library. + hardcode_minus_L_GCJ=yes + fi + ;; + + hpux11*) + if test "$GCC" = yes -a "$with_gnu_ld" = no; then + case $host_cpu in + hppa*64*) + archive_cmds_GCJ='$CC -shared ${wl}+h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags' + ;; + ia64*) + archive_cmds_GCJ='$CC -shared ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $libobjs $deplibs $compiler_flags' + ;; + *) + archive_cmds_GCJ='$CC -shared -fPIC ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags' + ;; + esac + else + case $host_cpu in + hppa*64*) + archive_cmds_GCJ='$CC -b ${wl}+h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags' + ;; + ia64*) + archive_cmds_GCJ='$CC -b ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $libobjs $deplibs $compiler_flags' + ;; + *) + archive_cmds_GCJ='$CC -b ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags' + ;; + esac + fi + if test "$with_gnu_ld" = no; then + hardcode_libdir_flag_spec_GCJ='${wl}+b ${wl}$libdir' + hardcode_libdir_separator_GCJ=: + + case $host_cpu in + hppa*64*|ia64*) + hardcode_libdir_flag_spec_ld_GCJ='+b $libdir' + hardcode_direct_GCJ=no + hardcode_shlibpath_var_GCJ=no + ;; + *) + hardcode_direct_GCJ=yes + export_dynamic_flag_spec_GCJ='${wl}-E' + + # hardcode_minus_L: Not really in the search PATH, + # but as the default location of the library. + hardcode_minus_L_GCJ=yes + ;; + esac + fi + ;; + + irix5* | irix6* | nonstopux*) + if test "$GCC" = yes; then + archive_cmds_GCJ='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && echo ${wl}-set_version ${wl}$verstring` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' + else + archive_cmds_GCJ='$LD -shared $libobjs $deplibs $linker_flags -soname $soname `test -n "$verstring" && echo -set_version $verstring` -update_registry ${output_objdir}/so_locations -o $lib' + hardcode_libdir_flag_spec_ld_GCJ='-rpath $libdir' + fi + hardcode_libdir_flag_spec_GCJ='${wl}-rpath ${wl}$libdir' + hardcode_libdir_separator_GCJ=: + link_all_deplibs_GCJ=yes + ;; + + netbsd*) + if echo __ELF__ | $CC -E - | grep __ELF__ >/dev/null; then + archive_cmds_GCJ='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags' # a.out + else + archive_cmds_GCJ='$LD -shared -o $lib $libobjs $deplibs $linker_flags' # ELF + fi + hardcode_libdir_flag_spec_GCJ='-R$libdir' + hardcode_direct_GCJ=yes + hardcode_shlibpath_var_GCJ=no + ;; + + newsos6) + archive_cmds_GCJ='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + hardcode_direct_GCJ=yes + hardcode_libdir_flag_spec_GCJ='${wl}-rpath ${wl}$libdir' + hardcode_libdir_separator_GCJ=: + hardcode_shlibpath_var_GCJ=no + ;; + + openbsd*) + hardcode_direct_GCJ=yes + hardcode_shlibpath_var_GCJ=no + if test -z "`echo __ELF__ | $CC -E - | grep __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then + archive_cmds_GCJ='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags' + archive_expsym_cmds_GCJ='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags ${wl}-retain-symbols-file,$export_symbols' + hardcode_libdir_flag_spec_GCJ='${wl}-rpath,$libdir' + export_dynamic_flag_spec_GCJ='${wl}-E' + else + case $host_os in + openbsd[01].* | openbsd2.[0-7] | openbsd2.[0-7].*) + archive_cmds_GCJ='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags' + hardcode_libdir_flag_spec_GCJ='-R$libdir' + ;; + *) + archive_cmds_GCJ='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags' + hardcode_libdir_flag_spec_GCJ='${wl}-rpath,$libdir' + ;; + esac + fi + ;; + + os2*) + hardcode_libdir_flag_spec_GCJ='-L$libdir' + hardcode_minus_L_GCJ=yes + allow_undefined_flag_GCJ=unsupported + archive_cmds_GCJ='$echo "LIBRARY $libname INITINSTANCE" > $output_objdir/$libname.def~$echo "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~$echo DATA >> $output_objdir/$libname.def~$echo " SINGLE NONSHARED" >> $output_objdir/$libname.def~$echo EXPORTS >> $output_objdir/$libname.def~emxexp $libobjs >> $output_objdir/$libname.def~$CC -Zdll -Zcrtdll -o $lib $libobjs $deplibs $compiler_flags $output_objdir/$libname.def' + old_archive_From_new_cmds_GCJ='emximp -o $output_objdir/$libname.a $output_objdir/$libname.def' + ;; + + osf3*) + if test "$GCC" = yes; then + allow_undefined_flag_GCJ=' ${wl}-expect_unresolved ${wl}\*' + archive_cmds_GCJ='$CC -shared${allow_undefined_flag} $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && echo ${wl}-set_version ${wl}$verstring` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' + else + allow_undefined_flag_GCJ=' -expect_unresolved \*' + archive_cmds_GCJ='$LD -shared${allow_undefined_flag} $libobjs $deplibs $linker_flags -soname $soname `test -n "$verstring" && echo -set_version $verstring` -update_registry ${output_objdir}/so_locations -o $lib' + fi + hardcode_libdir_flag_spec_GCJ='${wl}-rpath ${wl}$libdir' + hardcode_libdir_separator_GCJ=: + ;; + + osf4* | osf5*) # as osf3* with the addition of -msym flag + if test "$GCC" = yes; then + allow_undefined_flag_GCJ=' ${wl}-expect_unresolved ${wl}\*' + archive_cmds_GCJ='$CC -shared${allow_undefined_flag} $libobjs $deplibs $compiler_flags ${wl}-msym ${wl}-soname ${wl}$soname `test -n "$verstring" && echo ${wl}-set_version ${wl}$verstring` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' + hardcode_libdir_flag_spec_GCJ='${wl}-rpath ${wl}$libdir' + else + allow_undefined_flag_GCJ=' -expect_unresolved \*' + archive_cmds_GCJ='$LD -shared${allow_undefined_flag} $libobjs $deplibs $linker_flags -msym -soname $soname `test -n "$verstring" && echo -set_version $verstring` -update_registry ${output_objdir}/so_locations -o $lib' + archive_expsym_cmds_GCJ='for i in `cat $export_symbols`; do printf "%s %s\\n" -exported_symbol "\$i" >> $lib.exp; done; echo "-hidden">> $lib.exp~ + $LD -shared${allow_undefined_flag} -input $lib.exp $linker_flags $libobjs $deplibs -soname $soname `test -n "$verstring" && echo -set_version $verstring` -update_registry ${output_objdir}/so_locations -o $lib~$rm $lib.exp' + + # Both c and cxx compiler support -rpath directly + hardcode_libdir_flag_spec_GCJ='-rpath $libdir' + fi + hardcode_libdir_separator_GCJ=: + ;; + + solaris*) + no_undefined_flag_GCJ=' -z text' + if test "$GCC" = yes; then + wlarc='${wl}' + archive_cmds_GCJ='$CC -shared ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags' + archive_expsym_cmds_GCJ='$echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~$echo "local: *; };" >> $lib.exp~ + $CC -shared ${wl}-M ${wl}$lib.exp ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags~$rm $lib.exp' + else + wlarc='' + archive_cmds_GCJ='$LD -G${allow_undefined_flag} -h $soname -o $lib $libobjs $deplibs $linker_flags' + archive_expsym_cmds_GCJ='$echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~$echo "local: *; };" >> $lib.exp~ + $LD -G${allow_undefined_flag} -M $lib.exp -h $soname -o $lib $libobjs $deplibs $linker_flags~$rm $lib.exp' + fi + hardcode_libdir_flag_spec_GCJ='-R$libdir' + hardcode_shlibpath_var_GCJ=no + case $host_os in + solaris2.[0-5] | solaris2.[0-5].*) ;; + *) + # The compiler driver will combine linker options so we + # cannot just pass the convience library names through + # without $wl, iff we do not link with $LD. + # Luckily, gcc supports the same syntax we need for Sun Studio. + # Supported since Solaris 2.6 (maybe 2.5.1?) + case $wlarc in + '') + whole_archive_flag_spec_GCJ='-z allextract$convenience -z defaultextract' ;; + *) + whole_archive_flag_spec_GCJ='${wl}-z ${wl}allextract`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; $echo \"$new_convenience\"` ${wl}-z ${wl}defaultextract' ;; + esac ;; + esac + link_all_deplibs_GCJ=yes + ;; + + sunos4*) + if test "x$host_vendor" = xsequent; then + # Use $CC to link under sequent, because it throws in some extra .o + # files that make .init and .fini sections work. + archive_cmds_GCJ='$CC -G ${wl}-h $soname -o $lib $libobjs $deplibs $compiler_flags' + else + archive_cmds_GCJ='$LD -assert pure-text -Bstatic -o $lib $libobjs $deplibs $linker_flags' + fi + hardcode_libdir_flag_spec_GCJ='-L$libdir' + hardcode_direct_GCJ=yes + hardcode_minus_L_GCJ=yes + hardcode_shlibpath_var_GCJ=no + ;; + + sysv4) + case $host_vendor in + sni) + archive_cmds_GCJ='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + hardcode_direct_GCJ=yes # is this really true??? + ;; + siemens) + ## LD is ld it makes a PLAMLIB + ## CC just makes a GrossModule. + archive_cmds_GCJ='$LD -G -o $lib $libobjs $deplibs $linker_flags' + reload_cmds_GCJ='$CC -r -o $output$reload_objs' + hardcode_direct_GCJ=no + ;; + motorola) + archive_cmds_GCJ='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + hardcode_direct_GCJ=no #Motorola manual says yes, but my tests say they lie + ;; + esac + runpath_var='LD_RUN_PATH' + hardcode_shlibpath_var_GCJ=no + ;; + + sysv4.3*) + archive_cmds_GCJ='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + hardcode_shlibpath_var_GCJ=no + export_dynamic_flag_spec_GCJ='-Bexport' + ;; + + sysv4*MP*) + if test -d /usr/nec; then + archive_cmds_GCJ='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + hardcode_shlibpath_var_GCJ=no + runpath_var=LD_RUN_PATH + hardcode_runpath_var=yes + ld_shlibs_GCJ=yes + fi + ;; + + sysv4*uw2* | sysv5OpenUNIX* | sysv5UnixWare7.[01].[10]* | unixware7*) + no_undefined_flag_GCJ='${wl}-z,text' + archive_cmds_need_lc_GCJ=no + hardcode_shlibpath_var_GCJ=no + runpath_var='LD_RUN_PATH' + + if test "$GCC" = yes; then + archive_cmds_GCJ='$CC -shared ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + archive_expsym_cmds_GCJ='$CC -shared ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + else + archive_cmds_GCJ='$CC -G ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + archive_expsym_cmds_GCJ='$CC -G ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + fi + ;; + + sysv5* | sco3.2v5* | sco5v6*) + # Note: We can NOT use -z defs as we might desire, because we do not + # link with -lc, and that would cause any symbols used from libc to + # always be unresolved, which means just about no library would + # ever link correctly. If we're not using GNU ld we use -z text + # though, which does catch some bad symbols but isn't as heavy-handed + # as -z defs. + no_undefined_flag_GCJ='${wl}-z,text' + allow_undefined_flag_GCJ='${wl}-z,nodefs' + archive_cmds_need_lc_GCJ=no + hardcode_shlibpath_var_GCJ=no + hardcode_libdir_flag_spec_GCJ='`test -z "$SCOABSPATH" && echo ${wl}-R,$libdir`' + hardcode_libdir_separator_GCJ=':' + link_all_deplibs_GCJ=yes + export_dynamic_flag_spec_GCJ='${wl}-Bexport' + runpath_var='LD_RUN_PATH' + + if test "$GCC" = yes; then + archive_cmds_GCJ='$CC -shared ${wl}-h,\${SCOABSPATH:+${install_libdir}/}$soname -o $lib $libobjs $deplibs $compiler_flags' + archive_expsym_cmds_GCJ='$CC -shared ${wl}-Bexport:$export_symbols ${wl}-h,\${SCOABSPATH:+${install_libdir}/}$soname -o $lib $libobjs $deplibs $compiler_flags' + else + archive_cmds_GCJ='$CC -G ${wl}-h,\${SCOABSPATH:+${install_libdir}/}$soname -o $lib $libobjs $deplibs $compiler_flags' + archive_expsym_cmds_GCJ='$CC -G ${wl}-Bexport:$export_symbols ${wl}-h,\${SCOABSPATH:+${install_libdir}/}$soname -o $lib $libobjs $deplibs $compiler_flags' + fi + ;; + + uts4*) + archive_cmds_GCJ='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + hardcode_libdir_flag_spec_GCJ='-L$libdir' + hardcode_shlibpath_var_GCJ=no + ;; + + *) + ld_shlibs_GCJ=no + ;; + esac + fi + +echo "$as_me:$LINENO: result: $ld_shlibs_GCJ" >&5 +echo "${ECHO_T}$ld_shlibs_GCJ" >&6 +test "$ld_shlibs_GCJ" = no && can_build_shared=no + +# +# Do we need to explicitly link libc? +# +case "x$archive_cmds_need_lc_GCJ" in +x|xyes) + # Assume -lc should be added + archive_cmds_need_lc_GCJ=yes + + if test "$enable_shared" = yes && test "$GCC" = yes; then + case $archive_cmds_GCJ in + *'~'*) + # FIXME: we may have to deal with multi-command sequences. + ;; + '$CC '*) + # Test whether the compiler implicitly links with -lc since on some + # systems, -lgcc has to come before -lc. If gcc already passes -lc + # to ld, don't add -lc before -lgcc. + echo "$as_me:$LINENO: checking whether -lc should be explicitly linked in" >&5 +echo $ECHO_N "checking whether -lc should be explicitly linked in... $ECHO_C" >&6 + $rm conftest* + printf "$lt_simple_compile_test_code" > conftest.$ac_ext + + if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 + (eval $ac_compile) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } 2>conftest.err; then + soname=conftest + lib=conftest + libobjs=conftest.$ac_objext + deplibs= + wl=$lt_prog_compiler_wl_GCJ + pic_flag=$lt_prog_compiler_pic_GCJ + compiler_flags=-v + linker_flags=-v + verstring= + output_objdir=. + libname=conftest + lt_save_allow_undefined_flag=$allow_undefined_flag_GCJ + allow_undefined_flag_GCJ= + if { (eval echo "$as_me:$LINENO: \"$archive_cmds_GCJ 2\>\&1 \| grep \" -lc \" \>/dev/null 2\>\&1\"") >&5 + (eval $archive_cmds_GCJ 2\>\&1 \| grep \" -lc \" \>/dev/null 2\>\&1) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } + then + archive_cmds_need_lc_GCJ=no + else + archive_cmds_need_lc_GCJ=yes + fi + allow_undefined_flag_GCJ=$lt_save_allow_undefined_flag + else + cat conftest.err 1>&5 + fi + $rm conftest* + echo "$as_me:$LINENO: result: $archive_cmds_need_lc_GCJ" >&5 +echo "${ECHO_T}$archive_cmds_need_lc_GCJ" >&6 + ;; + esac + fi + ;; +esac + +echo "$as_me:$LINENO: checking dynamic linker characteristics" >&5 +echo $ECHO_N "checking dynamic linker characteristics... $ECHO_C" >&6 +library_names_spec= +libname_spec='lib$name' +soname_spec= +shrext_cmds=".so" +postinstall_cmds= +postuninstall_cmds= +finish_cmds= +finish_eval= +shlibpath_var= +shlibpath_overrides_runpath=unknown +version_type=none +dynamic_linker="$host_os ld.so" +sys_lib_dlsearch_path_spec="/lib /usr/lib" +if test "$GCC" = yes; then + sys_lib_search_path_spec=`$CC -print-search-dirs | grep "^libraries:" | $SED -e "s/^libraries://" -e "s,=/,/,g"` + if echo "$sys_lib_search_path_spec" | grep ';' >/dev/null ; then + # if the path contains ";" then we assume it to be the separator + # otherwise default to the standard path separator (i.e. ":") - it is + # assumed that no part of a normal pathname contains ";" but that should + # okay in the real world where ";" in dirpaths is itself problematic. + sys_lib_search_path_spec=`echo "$sys_lib_search_path_spec" | $SED -e 's/;/ /g'` + else + sys_lib_search_path_spec=`echo "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"` + fi +else + sys_lib_search_path_spec="/lib /usr/lib /usr/local/lib" +fi +need_lib_prefix=unknown +hardcode_into_libs=no + +# when you set need_version to no, make sure it does not cause -set_version +# flags to be left without arguments +need_version=unknown + +case $host_os in +aix3*) + version_type=linux + library_names_spec='${libname}${release}${shared_ext}$versuffix $libname.a' + shlibpath_var=LIBPATH + + # AIX 3 has no versioning support, so we append a major version to the name. + soname_spec='${libname}${release}${shared_ext}$major' + ;; + +aix4* | aix5*) + version_type=linux + need_lib_prefix=no + need_version=no + hardcode_into_libs=yes + if test "$host_cpu" = ia64; then + # AIX 5 supports IA64 + library_names_spec='${libname}${release}${shared_ext}$major ${libname}${release}${shared_ext}$versuffix $libname${shared_ext}' + shlibpath_var=LD_LIBRARY_PATH + else + # With GCC up to 2.95.x, collect2 would create an import file + # for dependence libraries. The import file would start with + # the line `#! .'. This would cause the generated library to + # depend on `.', always an invalid library. This was fixed in + # development snapshots of GCC prior to 3.0. + case $host_os in + aix4 | aix4.[01] | aix4.[01].*) + if { echo '#if __GNUC__ > 2 || (__GNUC__ == 2 && __GNUC_MINOR__ >= 97)' + echo ' yes ' + echo '#endif'; } | ${CC} -E - | grep yes > /dev/null; then + : + else + can_build_shared=no + fi + ;; + esac + # AIX (on Power*) has no versioning support, so currently we can not hardcode correct + # soname into executable. Probably we can add versioning support to + # collect2, so additional links can be useful in future. + if test "$aix_use_runtimelinking" = yes; then + # If using run time linking (on AIX 4.2 or later) use lib.so + # instead of lib.a to let people know that these are not + # typical AIX shared libraries. + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + else + # We preserve .a as extension for shared libraries through AIX4.2 + # and later when we are not doing run time linking. + library_names_spec='${libname}${release}.a $libname.a' + soname_spec='${libname}${release}${shared_ext}$major' + fi + shlibpath_var=LIBPATH + fi + ;; + +amigaos*) + library_names_spec='$libname.ixlibrary $libname.a' + # Create ${libname}_ixlibrary.a entries in /sys/libs. + finish_eval='for lib in `ls $libdir/*.ixlibrary 2>/dev/null`; do libname=`$echo "X$lib" | $Xsed -e '\''s%^.*/\([^/]*\)\.ixlibrary$%\1%'\''`; test $rm /sys/libs/${libname}_ixlibrary.a; $show "cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a"; cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a || exit 1; done' + ;; + +beos*) + library_names_spec='${libname}${shared_ext}' + dynamic_linker="$host_os ld.so" + shlibpath_var=LIBRARY_PATH + ;; + +bsdi[45]*) + version_type=linux + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + finish_cmds='PATH="\$PATH:/sbin" ldconfig $libdir' + shlibpath_var=LD_LIBRARY_PATH + sys_lib_search_path_spec="/shlib /usr/lib /usr/X11/lib /usr/contrib/lib /lib /usr/local/lib" + sys_lib_dlsearch_path_spec="/shlib /usr/lib /usr/local/lib" + # the default ld.so.conf also contains /usr/contrib/lib and + # /usr/X11R6/lib (/usr/X11 is a link to /usr/X11R6), but let us allow + # libtool to hard-code these into programs + ;; + +cygwin* | mingw* | pw32*) + version_type=windows + shrext_cmds=".dll" + need_version=no + need_lib_prefix=no + + case $GCC,$host_os in + yes,cygwin* | yes,mingw* | yes,pw32*) + library_names_spec='$libname.dll.a' + # DLL is installed to $(libdir)/../bin by postinstall_cmds + postinstall_cmds='base_file=`basename \${file}`~ + dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\${base_file}'\''i;echo \$dlname'\''`~ + dldir=$destdir/`dirname \$dlpath`~ + test -d \$dldir || mkdir -p \$dldir~ + $install_prog $dir/$dlname \$dldir/$dlname~ + chmod a+x \$dldir/$dlname' + postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; echo \$dlname'\''`~ + dlpath=$dir/\$dldll~ + $rm \$dlpath' + shlibpath_overrides_runpath=yes + + case $host_os in + cygwin*) + # Cygwin DLLs use 'cyg' prefix rather than 'lib' + soname_spec='`echo ${libname} | sed -e 's/^lib/cyg/'``echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext}' + sys_lib_search_path_spec="/usr/lib /lib/w32api /lib /usr/local/lib" + ;; + mingw*) + # MinGW DLLs use traditional 'lib' prefix + soname_spec='${libname}`echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext}' + sys_lib_search_path_spec=`$CC -print-search-dirs | grep "^libraries:" | $SED -e "s/^libraries://" -e "s,=/,/,g"` + if echo "$sys_lib_search_path_spec" | grep ';[c-zC-Z]:/' >/dev/null; then + # It is most probably a Windows format PATH printed by + # mingw gcc, but we are running on Cygwin. Gcc prints its search + # path with ; separators, and with drive letters. We can handle the + # drive letters (cygwin fileutils understands them), so leave them, + # especially as we might pass files found there to a mingw objdump, + # which wouldn't understand a cygwinified path. Ahh. + sys_lib_search_path_spec=`echo "$sys_lib_search_path_spec" | $SED -e 's/;/ /g'` + else + sys_lib_search_path_spec=`echo "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"` + fi + ;; + pw32*) + # pw32 DLLs use 'pw' prefix rather than 'lib' + library_names_spec='`echo ${libname} | sed -e 's/^lib/pw/'``echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext}' + ;; + esac + ;; + + *) + library_names_spec='${libname}`echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext} $libname.lib' + ;; + esac + dynamic_linker='Win32 ld.exe' + # FIXME: first we should search . and the directory the executable is in + shlibpath_var=PATH + ;; + +darwin* | rhapsody*) + dynamic_linker="$host_os dyld" + version_type=darwin + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${versuffix}$shared_ext ${libname}${release}${major}$shared_ext ${libname}$shared_ext' + soname_spec='${libname}${release}${major}$shared_ext' + shlibpath_overrides_runpath=yes + shlibpath_var=DYLD_LIBRARY_PATH + shrext_cmds='`test .$module = .yes && echo .so || echo .dylib`' + # Apple's gcc prints 'gcc -print-search-dirs' doesn't operate the same. + if test "$GCC" = yes; then + sys_lib_search_path_spec=`$CC -print-search-dirs | tr "\n" "$PATH_SEPARATOR" | sed -e 's/libraries:/@libraries:/' | tr "@" "\n" | grep "^libraries:" | sed -e "s/^libraries://" -e "s,=/,/,g" -e "s,$PATH_SEPARATOR, ,g" -e "s,.*,& /lib /usr/lib /usr/local/lib,g"` + else + sys_lib_search_path_spec='/lib /usr/lib /usr/local/lib' + fi + sys_lib_dlsearch_path_spec='/usr/local/lib /lib /usr/lib' + ;; + +dgux*) + version_type=linux + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname$shared_ext' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + ;; + +freebsd1*) + dynamic_linker=no + ;; + +kfreebsd*-gnu) + version_type=linux + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=no + hardcode_into_libs=yes + dynamic_linker='GNU ld.so' + ;; + +freebsd* | dragonfly*) + # DragonFly does not have aout. When/if they implement a new + # versioning mechanism, adjust this. + if test -x /usr/bin/objformat; then + objformat=`/usr/bin/objformat` + else + case $host_os in + freebsd[123]*) objformat=aout ;; + *) objformat=elf ;; + esac + fi + version_type=freebsd-$objformat + case $version_type in + freebsd-elf*) + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext} $libname${shared_ext}' + need_version=no + need_lib_prefix=no + ;; + freebsd-*) + library_names_spec='${libname}${release}${shared_ext}$versuffix $libname${shared_ext}$versuffix' + need_version=yes + ;; + esac + shlibpath_var=LD_LIBRARY_PATH + case $host_os in + freebsd2*) + shlibpath_overrides_runpath=yes + ;; + freebsd3.[01]* | freebsdelf3.[01]*) + shlibpath_overrides_runpath=yes + hardcode_into_libs=yes + ;; + freebsd3.[2-9]* | freebsdelf3.[2-9]* | \ + freebsd4.[0-5] | freebsdelf4.[0-5] | freebsd4.1.1 | freebsdelf4.1.1) + shlibpath_overrides_runpath=no + hardcode_into_libs=yes + ;; + freebsd*) # from 4.6 on + shlibpath_overrides_runpath=yes + hardcode_into_libs=yes + ;; + esac + ;; + +gnu*) + version_type=linux + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}${major} ${libname}${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + hardcode_into_libs=yes + ;; + +hpux9* | hpux10* | hpux11*) + # Give a soname corresponding to the major version so that dld.sl refuses to + # link against other versions. + version_type=sunos + need_lib_prefix=no + need_version=no + case $host_cpu in + ia64*) + shrext_cmds='.so' + hardcode_into_libs=yes + dynamic_linker="$host_os dld.so" + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes # Unless +noenvvar is specified. + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + if test "X$HPUX_IA64_MODE" = X32; then + sys_lib_search_path_spec="/usr/lib/hpux32 /usr/local/lib/hpux32 /usr/local/lib" + else + sys_lib_search_path_spec="/usr/lib/hpux64 /usr/local/lib/hpux64" + fi + sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec + ;; + hppa*64*) + shrext_cmds='.sl' + hardcode_into_libs=yes + dynamic_linker="$host_os dld.sl" + shlibpath_var=LD_LIBRARY_PATH # How should we handle SHLIB_PATH + shlibpath_overrides_runpath=yes # Unless +noenvvar is specified. + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + sys_lib_search_path_spec="/usr/lib/pa20_64 /usr/ccs/lib/pa20_64" + sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec + ;; + *) + shrext_cmds='.sl' + dynamic_linker="$host_os dld.sl" + shlibpath_var=SHLIB_PATH + shlibpath_overrides_runpath=no # +s is required to enable SHLIB_PATH + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + ;; + esac + # HP-UX runs *really* slowly unless shared libraries are mode 555. + postinstall_cmds='chmod 555 $lib' + ;; + +interix3*) + version_type=linux + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + dynamic_linker='Interix 3.x ld.so.1 (PE, like ELF)' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=no + hardcode_into_libs=yes + ;; + +irix5* | irix6* | nonstopux*) + case $host_os in + nonstopux*) version_type=nonstopux ;; + *) + if test "$lt_cv_prog_gnu_ld" = yes; then + version_type=linux + else + version_type=irix + fi ;; + esac + need_lib_prefix=no + need_version=no + soname_spec='${libname}${release}${shared_ext}$major' + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${release}${shared_ext} $libname${shared_ext}' + case $host_os in + irix5* | nonstopux*) + libsuff= shlibsuff= + ;; + *) + case $LD in # libtool.m4 will add one of these switches to LD + *-32|*"-32 "|*-melf32bsmip|*"-melf32bsmip ") + libsuff= shlibsuff= libmagic=32-bit;; + *-n32|*"-n32 "|*-melf32bmipn32|*"-melf32bmipn32 ") + libsuff=32 shlibsuff=N32 libmagic=N32;; + *-64|*"-64 "|*-melf64bmip|*"-melf64bmip ") + libsuff=64 shlibsuff=64 libmagic=64-bit;; + *) libsuff= shlibsuff= libmagic=never-match;; + esac + ;; + esac + shlibpath_var=LD_LIBRARY${shlibsuff}_PATH + shlibpath_overrides_runpath=no + sys_lib_search_path_spec="/usr/lib${libsuff} /lib${libsuff} /usr/local/lib${libsuff}" + sys_lib_dlsearch_path_spec="/usr/lib${libsuff} /lib${libsuff}" + hardcode_into_libs=yes + ;; + +# No shared lib support for Linux oldld, aout, or coff. +linux*oldld* | linux*aout* | linux*coff*) + dynamic_linker=no + ;; + +# This must be Linux ELF. +linux*) + version_type=linux + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + finish_cmds='PATH="\$PATH:/sbin" ldconfig -n $libdir' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=no + # This implies no fast_install, which is unacceptable. + # Some rework will be needed to allow for fast_install + # before this can be enabled. + hardcode_into_libs=yes + + # find out which ABI we are using + libsuff= + case "$host_cpu" in + x86_64*|s390x*|powerpc64*) + echo '#line 17380 "configure"' > conftest.$ac_ext + if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 + (eval $ac_compile) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; then + case `/usr/bin/file conftest.$ac_objext` in + *64-bit*) + libsuff=64 + sys_lib_search_path_spec="/lib${libsuff} /usr/lib${libsuff} /usr/local/lib${libsuff}" + ;; + esac + fi + rm -rf conftest* + ;; + esac + + # Append ld.so.conf contents to the search path + if test -f /etc/ld.so.conf; then + lt_ld_extra=`awk '/^include / { system(sprintf("cd /etc; cat %s 2>/dev/null", \$2)); skip = 1; } { if (!skip) print \$0; skip = 0; }' < /etc/ld.so.conf | $SED -e 's/#.*//;s/[:, ]/ /g;s/=[^=]*$//;s/=[^= ]* / /g;/^$/d' | tr '\n' ' '` + sys_lib_dlsearch_path_spec="/lib${libsuff} /usr/lib${libsuff} $lt_ld_extra" + fi + + # We used to test for /lib/ld.so.1 and disable shared libraries on + # powerpc, because MkLinux only supported shared libraries with the + # GNU dynamic linker. Since this was broken with cross compilers, + # most powerpc-linux boxes support dynamic linking these days and + # people can always --disable-shared, the test was removed, and we + # assume the GNU/Linux dynamic linker is in use. + dynamic_linker='GNU/Linux ld.so' + ;; + +knetbsd*-gnu) + version_type=linux + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=no + hardcode_into_libs=yes + dynamic_linker='GNU ld.so' + ;; + +netbsd*) + version_type=sunos + need_lib_prefix=no + need_version=no + if echo __ELF__ | $CC -E - | grep __ELF__ >/dev/null; then + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${shared_ext}$versuffix' + finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir' + dynamic_linker='NetBSD (a.out) ld.so' + else + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + dynamic_linker='NetBSD ld.elf_so' + fi + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + hardcode_into_libs=yes + ;; + +newsos6) + version_type=linux + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + ;; + +nto-qnx*) + version_type=linux + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + ;; + +openbsd*) + version_type=sunos + sys_lib_dlsearch_path_spec="/usr/lib" + need_lib_prefix=no + # Some older versions of OpenBSD (3.3 at least) *do* need versioned libs. + case $host_os in + openbsd3.3 | openbsd3.3.*) need_version=yes ;; + *) need_version=no ;; + esac + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${shared_ext}$versuffix' + finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir' + shlibpath_var=LD_LIBRARY_PATH + if test -z "`echo __ELF__ | $CC -E - | grep __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then + case $host_os in + openbsd2.[89] | openbsd2.[89].*) + shlibpath_overrides_runpath=no + ;; + *) + shlibpath_overrides_runpath=yes + ;; + esac + else + shlibpath_overrides_runpath=yes + fi + ;; + +os2*) + libname_spec='$name' + shrext_cmds=".dll" + need_lib_prefix=no + library_names_spec='$libname${shared_ext} $libname.a' + dynamic_linker='OS/2 ld.exe' + shlibpath_var=LIBPATH + ;; + +osf3* | osf4* | osf5*) + version_type=osf + need_lib_prefix=no + need_version=no + soname_spec='${libname}${release}${shared_ext}$major' + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + shlibpath_var=LD_LIBRARY_PATH + sys_lib_search_path_spec="/usr/shlib /usr/ccs/lib /usr/lib/cmplrs/cc /usr/lib /usr/local/lib /var/shlib" + sys_lib_dlsearch_path_spec="$sys_lib_search_path_spec" + ;; + +solaris*) + version_type=linux + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + hardcode_into_libs=yes + # ldd complains unless libraries are executable + postinstall_cmds='chmod +x $lib' + ;; + +sunos4*) + version_type=sunos + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${shared_ext}$versuffix' + finish_cmds='PATH="\$PATH:/usr/etc" ldconfig $libdir' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + if test "$with_gnu_ld" = yes; then + need_lib_prefix=no + fi + need_version=yes + ;; + +sysv4 | sysv4.3*) + version_type=linux + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + case $host_vendor in + sni) + shlibpath_overrides_runpath=no + need_lib_prefix=no + export_dynamic_flag_spec='${wl}-Blargedynsym' + runpath_var=LD_RUN_PATH + ;; + siemens) + need_lib_prefix=no + ;; + motorola) + need_lib_prefix=no + need_version=no + shlibpath_overrides_runpath=no + sys_lib_search_path_spec='/lib /usr/lib /usr/ccs/lib' + ;; + esac + ;; + +sysv4*MP*) + if test -d /usr/nec ;then + version_type=linux + library_names_spec='$libname${shared_ext}.$versuffix $libname${shared_ext}.$major $libname${shared_ext}' + soname_spec='$libname${shared_ext}.$major' + shlibpath_var=LD_LIBRARY_PATH + fi + ;; + +sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX* | sysv4*uw2*) + version_type=freebsd-elf + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext} $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + hardcode_into_libs=yes + if test "$with_gnu_ld" = yes; then + sys_lib_search_path_spec='/usr/local/lib /usr/gnu/lib /usr/ccs/lib /usr/lib /lib' + shlibpath_overrides_runpath=no + else + sys_lib_search_path_spec='/usr/ccs/lib /usr/lib' + shlibpath_overrides_runpath=yes + case $host_os in + sco3.2v5*) + sys_lib_search_path_spec="$sys_lib_search_path_spec /lib" + ;; + esac + fi + sys_lib_dlsearch_path_spec='/usr/lib' + ;; + +uts4*) + version_type=linux + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + ;; + +*) + dynamic_linker=no + ;; +esac +echo "$as_me:$LINENO: result: $dynamic_linker" >&5 +echo "${ECHO_T}$dynamic_linker" >&6 +test "$dynamic_linker" = no && can_build_shared=no + +variables_saved_for_relink="PATH $shlibpath_var $runpath_var" +if test "$GCC" = yes; then + variables_saved_for_relink="$variables_saved_for_relink GCC_EXEC_PREFIX COMPILER_PATH LIBRARY_PATH" +fi + +echo "$as_me:$LINENO: checking how to hardcode library paths into programs" >&5 +echo $ECHO_N "checking how to hardcode library paths into programs... $ECHO_C" >&6 +hardcode_action_GCJ= +if test -n "$hardcode_libdir_flag_spec_GCJ" || \ + test -n "$runpath_var_GCJ" || \ + test "X$hardcode_automatic_GCJ" = "Xyes" ; then + + # We can hardcode non-existant directories. + if test "$hardcode_direct_GCJ" != no && + # If the only mechanism to avoid hardcoding is shlibpath_var, we + # have to relink, otherwise we might link with an installed library + # when we should be linking with a yet-to-be-installed one + ## test "$_LT_AC_TAGVAR(hardcode_shlibpath_var, GCJ)" != no && + test "$hardcode_minus_L_GCJ" != no; then + # Linking always hardcodes the temporary library directory. + hardcode_action_GCJ=relink + else + # We can link without hardcoding, and we can hardcode nonexisting dirs. + hardcode_action_GCJ=immediate + fi +else + # We cannot hardcode anything, or else we can only hardcode existing + # directories. + hardcode_action_GCJ=unsupported +fi +echo "$as_me:$LINENO: result: $hardcode_action_GCJ" >&5 +echo "${ECHO_T}$hardcode_action_GCJ" >&6 + +if test "$hardcode_action_GCJ" = relink; then + # Fast installation is not supported + enable_fast_install=no +elif test "$shlibpath_overrides_runpath" = yes || + test "$enable_shared" = no; then + # Fast installation is not necessary + enable_fast_install=needless +fi + + +# The else clause should only fire when bootstrapping the +# libtool distribution, otherwise you forgot to ship ltmain.sh +# with your package, and you will get complaints that there are +# no rules to generate ltmain.sh. +if test -f "$ltmain"; then + # See if we are running on zsh, and set the options which allow our commands through + # without removal of \ escapes. + if test -n "${ZSH_VERSION+set}" ; then + setopt NO_GLOB_SUBST + fi + # Now quote all the things that may contain metacharacters while being + # careful not to overquote the AC_SUBSTed values. We take copies of the + # variables and quote the copies for generation of the libtool script. + for var in echo old_CC old_CFLAGS AR AR_FLAGS EGREP RANLIB LN_S LTCC LTCFLAGS NM \ + SED SHELL STRIP \ + libname_spec library_names_spec soname_spec extract_expsyms_cmds \ + old_striplib striplib file_magic_cmd finish_cmds finish_eval \ + deplibs_check_method reload_flag reload_cmds need_locks \ + lt_cv_sys_global_symbol_pipe lt_cv_sys_global_symbol_to_cdecl \ + lt_cv_sys_global_symbol_to_c_name_address \ + sys_lib_search_path_spec sys_lib_dlsearch_path_spec \ + old_postinstall_cmds old_postuninstall_cmds \ + compiler_GCJ \ + CC_GCJ \ + LD_GCJ \ + lt_prog_compiler_wl_GCJ \ + lt_prog_compiler_pic_GCJ \ + lt_prog_compiler_static_GCJ \ + lt_prog_compiler_no_builtin_flag_GCJ \ + export_dynamic_flag_spec_GCJ \ + thread_safe_flag_spec_GCJ \ + whole_archive_flag_spec_GCJ \ + enable_shared_with_static_runtimes_GCJ \ + old_archive_cmds_GCJ \ + old_archive_from_new_cmds_GCJ \ + predep_objects_GCJ \ + postdep_objects_GCJ \ + predeps_GCJ \ + postdeps_GCJ \ + compiler_lib_search_path_GCJ \ + archive_cmds_GCJ \ + archive_expsym_cmds_GCJ \ + postinstall_cmds_GCJ \ + postuninstall_cmds_GCJ \ + old_archive_from_expsyms_cmds_GCJ \ + allow_undefined_flag_GCJ \ + no_undefined_flag_GCJ \ + export_symbols_cmds_GCJ \ + hardcode_libdir_flag_spec_GCJ \ + hardcode_libdir_flag_spec_ld_GCJ \ + hardcode_libdir_separator_GCJ \ + hardcode_automatic_GCJ \ + module_cmds_GCJ \ + module_expsym_cmds_GCJ \ + lt_cv_prog_compiler_c_o_GCJ \ + exclude_expsyms_GCJ \ + include_expsyms_GCJ; do + + case $var in + old_archive_cmds_GCJ | \ + old_archive_from_new_cmds_GCJ | \ + archive_cmds_GCJ | \ + archive_expsym_cmds_GCJ | \ + module_cmds_GCJ | \ + module_expsym_cmds_GCJ | \ + old_archive_from_expsyms_cmds_GCJ | \ + export_symbols_cmds_GCJ | \ + extract_expsyms_cmds | reload_cmds | finish_cmds | \ + postinstall_cmds | postuninstall_cmds | \ + old_postinstall_cmds | old_postuninstall_cmds | \ + sys_lib_search_path_spec | sys_lib_dlsearch_path_spec) + # Double-quote double-evaled strings. + eval "lt_$var=\\\"\`\$echo \"X\$$var\" | \$Xsed -e \"\$double_quote_subst\" -e \"\$sed_quote_subst\" -e \"\$delay_variable_subst\"\`\\\"" + ;; + *) + eval "lt_$var=\\\"\`\$echo \"X\$$var\" | \$Xsed -e \"\$sed_quote_subst\"\`\\\"" + ;; + esac + done + + case $lt_echo in + *'\$0 --fallback-echo"') + lt_echo=`$echo "X$lt_echo" | $Xsed -e 's/\\\\\\\$0 --fallback-echo"$/$0 --fallback-echo"/'` + ;; + esac + +cfgfile="$ofile" + + cat <<__EOF__ >> "$cfgfile" +# ### BEGIN LIBTOOL TAG CONFIG: $tagname + +# Libtool was configured on host `(hostname || uname -n) 2>/dev/null | sed 1q`: + +# Shell to use when invoking shell scripts. +SHELL=$lt_SHELL + +# Whether or not to build shared libraries. +build_libtool_libs=$enable_shared + +# Whether or not to build static libraries. +build_old_libs=$enable_static + +# Whether or not to add -lc for building shared libraries. +build_libtool_need_lc=$archive_cmds_need_lc_GCJ + +# Whether or not to disallow shared libs when runtime libs are static +allow_libtool_libs_with_static_runtimes=$enable_shared_with_static_runtimes_GCJ + +# Whether or not to optimize for fast installation. +fast_install=$enable_fast_install + +# The host system. +host_alias=$host_alias +host=$host +host_os=$host_os + +# The build system. +build_alias=$build_alias +build=$build +build_os=$build_os + +# An echo program that does not interpret backslashes. +echo=$lt_echo + +# The archiver. +AR=$lt_AR +AR_FLAGS=$lt_AR_FLAGS + +# A C compiler. +LTCC=$lt_LTCC + +# LTCC compiler flags. +LTCFLAGS=$lt_LTCFLAGS + +# A language-specific compiler. +CC=$lt_compiler_GCJ + +# Is the compiler the GNU C compiler? +with_gcc=$GCC_GCJ + +gcc_dir=\`gcc -print-file-name=. | $SED 's,/\.$,,'\` +gcc_ver=\`gcc -dumpversion\` + +# An ERE matcher. +EGREP=$lt_EGREP + +# The linker used to build libraries. +LD=$lt_LD_GCJ + +# Whether we need hard or soft links. +LN_S=$lt_LN_S + +# A BSD-compatible nm program. +NM=$lt_NM + +# A symbol stripping program +STRIP=$lt_STRIP + +# Used to examine libraries when file_magic_cmd begins "file" +MAGIC_CMD=$MAGIC_CMD + +# Used on cygwin: DLL creation program. +DLLTOOL="$DLLTOOL" + +# Used on cygwin: object dumper. +OBJDUMP="$OBJDUMP" + +# Used on cygwin: assembler. +AS="$AS" + +# The name of the directory that contains temporary libtool files. +objdir=$objdir + +# How to create reloadable object files. +reload_flag=$lt_reload_flag +reload_cmds=$lt_reload_cmds + +# How to pass a linker flag through the compiler. +wl=$lt_lt_prog_compiler_wl_GCJ + +# Object file suffix (normally "o"). +objext="$ac_objext" + +# Old archive suffix (normally "a"). +libext="$libext" + +# Shared library suffix (normally ".so"). +shrext_cmds='$shrext_cmds' + +# Executable file suffix (normally ""). +exeext="$exeext" + +# Additional compiler flags for building library objects. +pic_flag=$lt_lt_prog_compiler_pic_GCJ +pic_mode=$pic_mode + +# What is the maximum length of a command? +max_cmd_len=$lt_cv_sys_max_cmd_len + +# Does compiler simultaneously support -c and -o options? +compiler_c_o=$lt_lt_cv_prog_compiler_c_o_GCJ + +# Must we lock files when doing compilation? +need_locks=$lt_need_locks + +# Do we need the lib prefix for modules? +need_lib_prefix=$need_lib_prefix + +# Do we need a version for libraries? +need_version=$need_version + +# Whether dlopen is supported. +dlopen_support=$enable_dlopen + +# Whether dlopen of programs is supported. +dlopen_self=$enable_dlopen_self + +# Whether dlopen of statically linked programs is supported. +dlopen_self_static=$enable_dlopen_self_static + +# Compiler flag to prevent dynamic linking. +link_static_flag=$lt_lt_prog_compiler_static_GCJ + +# Compiler flag to turn off builtin functions. +no_builtin_flag=$lt_lt_prog_compiler_no_builtin_flag_GCJ + +# Compiler flag to allow reflexive dlopens. +export_dynamic_flag_spec=$lt_export_dynamic_flag_spec_GCJ + +# Compiler flag to generate shared objects directly from archives. +whole_archive_flag_spec=$lt_whole_archive_flag_spec_GCJ + +# Compiler flag to generate thread-safe objects. +thread_safe_flag_spec=$lt_thread_safe_flag_spec_GCJ + +# Library versioning type. +version_type=$version_type + +# Format of library name prefix. +libname_spec=$lt_libname_spec + +# List of archive names. First name is the real one, the rest are links. +# The last name is the one that the linker finds with -lNAME. +library_names_spec=$lt_library_names_spec + +# The coded name of the library, if different from the real name. +soname_spec=$lt_soname_spec + +# Commands used to build and install an old-style archive. +RANLIB=$lt_RANLIB +old_archive_cmds=$lt_old_archive_cmds_GCJ +old_postinstall_cmds=$lt_old_postinstall_cmds +old_postuninstall_cmds=$lt_old_postuninstall_cmds + +# Create an old-style archive from a shared archive. +old_archive_from_new_cmds=$lt_old_archive_from_new_cmds_GCJ + +# Create a temporary old-style archive to link instead of a shared archive. +old_archive_from_expsyms_cmds=$lt_old_archive_from_expsyms_cmds_GCJ + +# Commands used to build and install a shared archive. +archive_cmds=$lt_archive_cmds_GCJ +archive_expsym_cmds=$lt_archive_expsym_cmds_GCJ +postinstall_cmds=$lt_postinstall_cmds +postuninstall_cmds=$lt_postuninstall_cmds + +# Commands used to build a loadable module (assumed same as above if empty) +module_cmds=$lt_module_cmds_GCJ +module_expsym_cmds=$lt_module_expsym_cmds_GCJ + +# Commands to strip libraries. +old_striplib=$lt_old_striplib +striplib=$lt_striplib + +# Dependencies to place before the objects being linked to create a +# shared library. +predep_objects=\`echo $lt_predep_objects_GCJ | \$SED -e "s@\${gcc_dir}@\\\${gcc_dir}@g;s@\${gcc_ver}@\\\${gcc_ver}@g"\` + +# Dependencies to place after the objects being linked to create a +# shared library. +postdep_objects=\`echo $lt_postdep_objects_GCJ | \$SED -e "s@\${gcc_dir}@\\\${gcc_dir}@g;s@\${gcc_ver}@\\\${gcc_ver}@g"\` + +# Dependencies to place before the objects being linked to create a +# shared library. +predeps=$lt_predeps_GCJ + +# Dependencies to place after the objects being linked to create a +# shared library. +postdeps=$lt_postdeps_GCJ + +# The library search path used internally by the compiler when linking +# a shared library. +compiler_lib_search_path=\`echo $lt_compiler_lib_search_path_GCJ | \$SED -e "s@\${gcc_dir}@\\\${gcc_dir}@g;s@\${gcc_ver}@\\\${gcc_ver}@g"\` + +# Method to check whether dependent libraries are shared objects. +deplibs_check_method=$lt_deplibs_check_method + +# Command to use when deplibs_check_method == file_magic. +file_magic_cmd=$lt_file_magic_cmd + +# Flag that allows shared libraries with undefined symbols to be built. +allow_undefined_flag=$lt_allow_undefined_flag_GCJ + +# Flag that forces no undefined symbols. +no_undefined_flag=$lt_no_undefined_flag_GCJ + +# Commands used to finish a libtool library installation in a directory. +finish_cmds=$lt_finish_cmds + +# Same as above, but a single script fragment to be evaled but not shown. +finish_eval=$lt_finish_eval + +# Take the output of nm and produce a listing of raw symbols and C names. +global_symbol_pipe=$lt_lt_cv_sys_global_symbol_pipe + +# Transform the output of nm in a proper C declaration +global_symbol_to_cdecl=$lt_lt_cv_sys_global_symbol_to_cdecl + +# Transform the output of nm in a C name address pair +global_symbol_to_c_name_address=$lt_lt_cv_sys_global_symbol_to_c_name_address + +# This is the shared library runtime path variable. +runpath_var=$runpath_var + +# This is the shared library path variable. +shlibpath_var=$shlibpath_var + +# Is shlibpath searched before the hard-coded library search path? +shlibpath_overrides_runpath=$shlibpath_overrides_runpath + +# How to hardcode a shared library path into an executable. +hardcode_action=$hardcode_action_GCJ + +# Whether we should hardcode library paths into libraries. +hardcode_into_libs=$hardcode_into_libs + +# Flag to hardcode \$libdir into a binary during linking. +# This must work even if \$libdir does not exist. +hardcode_libdir_flag_spec=$lt_hardcode_libdir_flag_spec_GCJ + +# If ld is used when linking, flag to hardcode \$libdir into +# a binary during linking. This must work even if \$libdir does +# not exist. +hardcode_libdir_flag_spec_ld=$lt_hardcode_libdir_flag_spec_ld_GCJ + +# Whether we need a single -rpath flag with a separated argument. +hardcode_libdir_separator=$lt_hardcode_libdir_separator_GCJ + +# Set to yes if using DIR/libNAME${shared_ext} during linking hardcodes DIR into the +# resulting binary. +hardcode_direct=$hardcode_direct_GCJ + +# Set to yes if using the -LDIR flag during linking hardcodes DIR into the +# resulting binary. +hardcode_minus_L=$hardcode_minus_L_GCJ + +# Set to yes if using SHLIBPATH_VAR=DIR during linking hardcodes DIR into +# the resulting binary. +hardcode_shlibpath_var=$hardcode_shlibpath_var_GCJ + +# Set to yes if building a shared library automatically hardcodes DIR into the library +# and all subsequent libraries and executables linked against it. +hardcode_automatic=$hardcode_automatic_GCJ + +# Variables whose values should be saved in libtool wrapper scripts and +# restored at relink time. +variables_saved_for_relink="$variables_saved_for_relink" + +# Whether libtool must link a program against all its dependency libraries. +link_all_deplibs=$link_all_deplibs_GCJ + +# Compile-time system search path for libraries +sys_lib_search_path_spec=\`echo $lt_sys_lib_search_path_spec | \$SED -e "s@\${gcc_dir}@\\\${gcc_dir}@g;s@\${gcc_ver}@\\\${gcc_ver}@g"\` + +# Run-time system search path for libraries +sys_lib_dlsearch_path_spec=$lt_sys_lib_dlsearch_path_spec + +# Fix the shell variable \$srcfile for the compiler. +fix_srcfile_path="$fix_srcfile_path_GCJ" + +# Set to yes if exported symbols are required. +always_export_symbols=$always_export_symbols_GCJ + +# The commands to list exported symbols. +export_symbols_cmds=$lt_export_symbols_cmds_GCJ + +# The commands to extract the exported symbol list from a shared archive. +extract_expsyms_cmds=$lt_extract_expsyms_cmds + +# Symbols that should not be listed in the preloaded symbols. +exclude_expsyms=$lt_exclude_expsyms_GCJ + +# Symbols that must always be exported. +include_expsyms=$lt_include_expsyms_GCJ + +# ### END LIBTOOL TAG CONFIG: $tagname + +__EOF__ + + +else + # If there is no Makefile yet, we rely on a make rule to execute + # `config.status --recheck' to rerun these tests and create the + # libtool script then. + ltmain_in=`echo $ltmain | sed -e 's/\.sh$/.in/'` + if test -f "$ltmain_in"; then + test -f Makefile && make "$ltmain" + fi +fi + + +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu + +CC="$lt_save_CC" + + else + tagname="" + fi + ;; + + RC) + + + +# Source file extension for RC test sources. +ac_ext=rc + +# Object file extension for compiled RC test sources. +objext=o +objext_RC=$objext + +# Code to be used in simple compile tests +lt_simple_compile_test_code='sample MENU { MENUITEM "&Soup", 100, CHECKED }\n' + +# Code to be used in simple link tests +lt_simple_link_test_code="$lt_simple_compile_test_code" + +# ltmain only uses $CC for tagged configurations so make sure $CC is set. + +# If no C compiler was specified, use CC. +LTCC=${LTCC-"$CC"} + +# If no C compiler flags were specified, use CFLAGS. +LTCFLAGS=${LTCFLAGS-"$CFLAGS"} + +# Allow CC to be a program name with arguments. +compiler=$CC + + +# save warnings/boilerplate of simple test code +ac_outfile=conftest.$ac_objext +printf "$lt_simple_compile_test_code" >conftest.$ac_ext +eval "$ac_compile" 2>&1 >/dev/null | $SED '/^$/d; /^ *+/d' >conftest.err +_lt_compiler_boilerplate=`cat conftest.err` +$rm conftest* + +ac_outfile=conftest.$ac_objext +printf "$lt_simple_link_test_code" >conftest.$ac_ext +eval "$ac_link" 2>&1 >/dev/null | $SED '/^$/d; /^ *+/d' >conftest.err +_lt_linker_boilerplate=`cat conftest.err` +$rm conftest* + + +# Allow CC to be a program name with arguments. +lt_save_CC="$CC" +CC=${RC-"windres"} +compiler=$CC +compiler_RC=$CC +for cc_temp in $compiler""; do + case $cc_temp in + compile | *[\\/]compile | ccache | *[\\/]ccache ) ;; + distcc | *[\\/]distcc | purify | *[\\/]purify ) ;; + \-*) ;; + *) break;; + esac +done +cc_basename=`$echo "X$cc_temp" | $Xsed -e 's%.*/%%' -e "s%^$host_alias-%%"` + +lt_cv_prog_compiler_c_o_RC=yes + +# The else clause should only fire when bootstrapping the +# libtool distribution, otherwise you forgot to ship ltmain.sh +# with your package, and you will get complaints that there are +# no rules to generate ltmain.sh. +if test -f "$ltmain"; then + # See if we are running on zsh, and set the options which allow our commands through + # without removal of \ escapes. + if test -n "${ZSH_VERSION+set}" ; then + setopt NO_GLOB_SUBST + fi + # Now quote all the things that may contain metacharacters while being + # careful not to overquote the AC_SUBSTed values. We take copies of the + # variables and quote the copies for generation of the libtool script. + for var in echo old_CC old_CFLAGS AR AR_FLAGS EGREP RANLIB LN_S LTCC LTCFLAGS NM \ + SED SHELL STRIP \ + libname_spec library_names_spec soname_spec extract_expsyms_cmds \ + old_striplib striplib file_magic_cmd finish_cmds finish_eval \ + deplibs_check_method reload_flag reload_cmds need_locks \ + lt_cv_sys_global_symbol_pipe lt_cv_sys_global_symbol_to_cdecl \ + lt_cv_sys_global_symbol_to_c_name_address \ + sys_lib_search_path_spec sys_lib_dlsearch_path_spec \ + old_postinstall_cmds old_postuninstall_cmds \ + compiler_RC \ + CC_RC \ + LD_RC \ + lt_prog_compiler_wl_RC \ + lt_prog_compiler_pic_RC \ + lt_prog_compiler_static_RC \ + lt_prog_compiler_no_builtin_flag_RC \ + export_dynamic_flag_spec_RC \ + thread_safe_flag_spec_RC \ + whole_archive_flag_spec_RC \ + enable_shared_with_static_runtimes_RC \ + old_archive_cmds_RC \ + old_archive_from_new_cmds_RC \ + predep_objects_RC \ + postdep_objects_RC \ + predeps_RC \ + postdeps_RC \ + compiler_lib_search_path_RC \ + archive_cmds_RC \ + archive_expsym_cmds_RC \ + postinstall_cmds_RC \ + postuninstall_cmds_RC \ + old_archive_from_expsyms_cmds_RC \ + allow_undefined_flag_RC \ + no_undefined_flag_RC \ + export_symbols_cmds_RC \ + hardcode_libdir_flag_spec_RC \ + hardcode_libdir_flag_spec_ld_RC \ + hardcode_libdir_separator_RC \ + hardcode_automatic_RC \ + module_cmds_RC \ + module_expsym_cmds_RC \ + lt_cv_prog_compiler_c_o_RC \ + exclude_expsyms_RC \ + include_expsyms_RC; do + + case $var in + old_archive_cmds_RC | \ + old_archive_from_new_cmds_RC | \ + archive_cmds_RC | \ + archive_expsym_cmds_RC | \ + module_cmds_RC | \ + module_expsym_cmds_RC | \ + old_archive_from_expsyms_cmds_RC | \ + export_symbols_cmds_RC | \ + extract_expsyms_cmds | reload_cmds | finish_cmds | \ + postinstall_cmds | postuninstall_cmds | \ + old_postinstall_cmds | old_postuninstall_cmds | \ + sys_lib_search_path_spec | sys_lib_dlsearch_path_spec) + # Double-quote double-evaled strings. + eval "lt_$var=\\\"\`\$echo \"X\$$var\" | \$Xsed -e \"\$double_quote_subst\" -e \"\$sed_quote_subst\" -e \"\$delay_variable_subst\"\`\\\"" + ;; + *) + eval "lt_$var=\\\"\`\$echo \"X\$$var\" | \$Xsed -e \"\$sed_quote_subst\"\`\\\"" + ;; + esac + done + + case $lt_echo in + *'\$0 --fallback-echo"') + lt_echo=`$echo "X$lt_echo" | $Xsed -e 's/\\\\\\\$0 --fallback-echo"$/$0 --fallback-echo"/'` + ;; + esac + +cfgfile="$ofile" + + cat <<__EOF__ >> "$cfgfile" +# ### BEGIN LIBTOOL TAG CONFIG: $tagname + +# Libtool was configured on host `(hostname || uname -n) 2>/dev/null | sed 1q`: + +# Shell to use when invoking shell scripts. +SHELL=$lt_SHELL + +# Whether or not to build shared libraries. +build_libtool_libs=$enable_shared + +# Whether or not to build static libraries. +build_old_libs=$enable_static + +# Whether or not to add -lc for building shared libraries. +build_libtool_need_lc=$archive_cmds_need_lc_RC + +# Whether or not to disallow shared libs when runtime libs are static +allow_libtool_libs_with_static_runtimes=$enable_shared_with_static_runtimes_RC + +# Whether or not to optimize for fast installation. +fast_install=$enable_fast_install + +# The host system. +host_alias=$host_alias +host=$host +host_os=$host_os + +# The build system. +build_alias=$build_alias +build=$build +build_os=$build_os + +# An echo program that does not interpret backslashes. +echo=$lt_echo + +# The archiver. +AR=$lt_AR +AR_FLAGS=$lt_AR_FLAGS + +# A C compiler. +LTCC=$lt_LTCC + +# LTCC compiler flags. +LTCFLAGS=$lt_LTCFLAGS + +# A language-specific compiler. +CC=$lt_compiler_RC + +# Is the compiler the GNU C compiler? +with_gcc=$GCC_RC + +gcc_dir=\`gcc -print-file-name=. | $SED 's,/\.$,,'\` +gcc_ver=\`gcc -dumpversion\` + +# An ERE matcher. +EGREP=$lt_EGREP + +# The linker used to build libraries. +LD=$lt_LD_RC + +# Whether we need hard or soft links. +LN_S=$lt_LN_S + +# A BSD-compatible nm program. +NM=$lt_NM + +# A symbol stripping program +STRIP=$lt_STRIP + +# Used to examine libraries when file_magic_cmd begins "file" +MAGIC_CMD=$MAGIC_CMD + +# Used on cygwin: DLL creation program. +DLLTOOL="$DLLTOOL" + +# Used on cygwin: object dumper. +OBJDUMP="$OBJDUMP" + +# Used on cygwin: assembler. +AS="$AS" + +# The name of the directory that contains temporary libtool files. +objdir=$objdir + +# How to create reloadable object files. +reload_flag=$lt_reload_flag +reload_cmds=$lt_reload_cmds + +# How to pass a linker flag through the compiler. +wl=$lt_lt_prog_compiler_wl_RC + +# Object file suffix (normally "o"). +objext="$ac_objext" + +# Old archive suffix (normally "a"). +libext="$libext" + +# Shared library suffix (normally ".so"). +shrext_cmds='$shrext_cmds' + +# Executable file suffix (normally ""). +exeext="$exeext" + +# Additional compiler flags for building library objects. +pic_flag=$lt_lt_prog_compiler_pic_RC +pic_mode=$pic_mode + +# What is the maximum length of a command? +max_cmd_len=$lt_cv_sys_max_cmd_len + +# Does compiler simultaneously support -c and -o options? +compiler_c_o=$lt_lt_cv_prog_compiler_c_o_RC + +# Must we lock files when doing compilation? +need_locks=$lt_need_locks + +# Do we need the lib prefix for modules? +need_lib_prefix=$need_lib_prefix + +# Do we need a version for libraries? +need_version=$need_version + +# Whether dlopen is supported. +dlopen_support=$enable_dlopen + +# Whether dlopen of programs is supported. +dlopen_self=$enable_dlopen_self + +# Whether dlopen of statically linked programs is supported. +dlopen_self_static=$enable_dlopen_self_static + +# Compiler flag to prevent dynamic linking. +link_static_flag=$lt_lt_prog_compiler_static_RC + +# Compiler flag to turn off builtin functions. +no_builtin_flag=$lt_lt_prog_compiler_no_builtin_flag_RC + +# Compiler flag to allow reflexive dlopens. +export_dynamic_flag_spec=$lt_export_dynamic_flag_spec_RC + +# Compiler flag to generate shared objects directly from archives. +whole_archive_flag_spec=$lt_whole_archive_flag_spec_RC + +# Compiler flag to generate thread-safe objects. +thread_safe_flag_spec=$lt_thread_safe_flag_spec_RC + +# Library versioning type. +version_type=$version_type + +# Format of library name prefix. +libname_spec=$lt_libname_spec + +# List of archive names. First name is the real one, the rest are links. +# The last name is the one that the linker finds with -lNAME. +library_names_spec=$lt_library_names_spec + +# The coded name of the library, if different from the real name. +soname_spec=$lt_soname_spec + +# Commands used to build and install an old-style archive. +RANLIB=$lt_RANLIB +old_archive_cmds=$lt_old_archive_cmds_RC +old_postinstall_cmds=$lt_old_postinstall_cmds +old_postuninstall_cmds=$lt_old_postuninstall_cmds + +# Create an old-style archive from a shared archive. +old_archive_from_new_cmds=$lt_old_archive_from_new_cmds_RC + +# Create a temporary old-style archive to link instead of a shared archive. +old_archive_from_expsyms_cmds=$lt_old_archive_from_expsyms_cmds_RC + +# Commands used to build and install a shared archive. +archive_cmds=$lt_archive_cmds_RC +archive_expsym_cmds=$lt_archive_expsym_cmds_RC +postinstall_cmds=$lt_postinstall_cmds +postuninstall_cmds=$lt_postuninstall_cmds + +# Commands used to build a loadable module (assumed same as above if empty) +module_cmds=$lt_module_cmds_RC +module_expsym_cmds=$lt_module_expsym_cmds_RC + +# Commands to strip libraries. +old_striplib=$lt_old_striplib +striplib=$lt_striplib + +# Dependencies to place before the objects being linked to create a +# shared library. +predep_objects=\`echo $lt_predep_objects_RC | \$SED -e "s@\${gcc_dir}@\\\${gcc_dir}@g;s@\${gcc_ver}@\\\${gcc_ver}@g"\` + +# Dependencies to place after the objects being linked to create a +# shared library. +postdep_objects=\`echo $lt_postdep_objects_RC | \$SED -e "s@\${gcc_dir}@\\\${gcc_dir}@g;s@\${gcc_ver}@\\\${gcc_ver}@g"\` + +# Dependencies to place before the objects being linked to create a +# shared library. +predeps=$lt_predeps_RC + +# Dependencies to place after the objects being linked to create a +# shared library. +postdeps=$lt_postdeps_RC + +# The library search path used internally by the compiler when linking +# a shared library. +compiler_lib_search_path=\`echo $lt_compiler_lib_search_path_RC | \$SED -e "s@\${gcc_dir}@\\\${gcc_dir}@g;s@\${gcc_ver}@\\\${gcc_ver}@g"\` + +# Method to check whether dependent libraries are shared objects. +deplibs_check_method=$lt_deplibs_check_method + +# Command to use when deplibs_check_method == file_magic. +file_magic_cmd=$lt_file_magic_cmd + +# Flag that allows shared libraries with undefined symbols to be built. +allow_undefined_flag=$lt_allow_undefined_flag_RC + +# Flag that forces no undefined symbols. +no_undefined_flag=$lt_no_undefined_flag_RC + +# Commands used to finish a libtool library installation in a directory. +finish_cmds=$lt_finish_cmds + +# Same as above, but a single script fragment to be evaled but not shown. +finish_eval=$lt_finish_eval + +# Take the output of nm and produce a listing of raw symbols and C names. +global_symbol_pipe=$lt_lt_cv_sys_global_symbol_pipe + +# Transform the output of nm in a proper C declaration +global_symbol_to_cdecl=$lt_lt_cv_sys_global_symbol_to_cdecl + +# Transform the output of nm in a C name address pair +global_symbol_to_c_name_address=$lt_lt_cv_sys_global_symbol_to_c_name_address + +# This is the shared library runtime path variable. +runpath_var=$runpath_var + +# This is the shared library path variable. +shlibpath_var=$shlibpath_var + +# Is shlibpath searched before the hard-coded library search path? +shlibpath_overrides_runpath=$shlibpath_overrides_runpath + +# How to hardcode a shared library path into an executable. +hardcode_action=$hardcode_action_RC + +# Whether we should hardcode library paths into libraries. +hardcode_into_libs=$hardcode_into_libs + +# Flag to hardcode \$libdir into a binary during linking. +# This must work even if \$libdir does not exist. +hardcode_libdir_flag_spec=$lt_hardcode_libdir_flag_spec_RC + +# If ld is used when linking, flag to hardcode \$libdir into +# a binary during linking. This must work even if \$libdir does +# not exist. +hardcode_libdir_flag_spec_ld=$lt_hardcode_libdir_flag_spec_ld_RC + +# Whether we need a single -rpath flag with a separated argument. +hardcode_libdir_separator=$lt_hardcode_libdir_separator_RC + +# Set to yes if using DIR/libNAME${shared_ext} during linking hardcodes DIR into the +# resulting binary. +hardcode_direct=$hardcode_direct_RC + +# Set to yes if using the -LDIR flag during linking hardcodes DIR into the +# resulting binary. +hardcode_minus_L=$hardcode_minus_L_RC + +# Set to yes if using SHLIBPATH_VAR=DIR during linking hardcodes DIR into +# the resulting binary. +hardcode_shlibpath_var=$hardcode_shlibpath_var_RC + +# Set to yes if building a shared library automatically hardcodes DIR into the library +# and all subsequent libraries and executables linked against it. +hardcode_automatic=$hardcode_automatic_RC + +# Variables whose values should be saved in libtool wrapper scripts and +# restored at relink time. +variables_saved_for_relink="$variables_saved_for_relink" + +# Whether libtool must link a program against all its dependency libraries. +link_all_deplibs=$link_all_deplibs_RC + +# Compile-time system search path for libraries +sys_lib_search_path_spec=\`echo $lt_sys_lib_search_path_spec | \$SED -e "s@\${gcc_dir}@\\\${gcc_dir}@g;s@\${gcc_ver}@\\\${gcc_ver}@g"\` + +# Run-time system search path for libraries +sys_lib_dlsearch_path_spec=$lt_sys_lib_dlsearch_path_spec + +# Fix the shell variable \$srcfile for the compiler. +fix_srcfile_path="$fix_srcfile_path_RC" + +# Set to yes if exported symbols are required. +always_export_symbols=$always_export_symbols_RC + +# The commands to list exported symbols. +export_symbols_cmds=$lt_export_symbols_cmds_RC + +# The commands to extract the exported symbol list from a shared archive. +extract_expsyms_cmds=$lt_extract_expsyms_cmds + +# Symbols that should not be listed in the preloaded symbols. +exclude_expsyms=$lt_exclude_expsyms_RC + +# Symbols that must always be exported. +include_expsyms=$lt_include_expsyms_RC + +# ### END LIBTOOL TAG CONFIG: $tagname + +__EOF__ + + +else + # If there is no Makefile yet, we rely on a make rule to execute + # `config.status --recheck' to rerun these tests and create the + # libtool script then. + ltmain_in=`echo $ltmain | sed -e 's/\.sh$/.in/'` + if test -f "$ltmain_in"; then + test -f Makefile && make "$ltmain" + fi +fi + + +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu + +CC="$lt_save_CC" + + ;; + + *) + { { echo "$as_me:$LINENO: error: Unsupported tag name: $tagname" >&5 +echo "$as_me: error: Unsupported tag name: $tagname" >&2;} + { (exit 1); exit 1; }; } + ;; + esac + + # Append the new tag name to the list of available tags. + if test -n "$tagname" ; then + available_tags="$available_tags $tagname" + fi + fi + done + IFS="$lt_save_ifs" + + # Now substitute the updated list of available tags. + if eval "sed -e 's/^available_tags=.*\$/available_tags=\"$available_tags\"/' \"$ofile\" > \"${ofile}T\""; then + mv "${ofile}T" "$ofile" + chmod +x "$ofile" + else + rm -f "${ofile}T" + { { echo "$as_me:$LINENO: error: unable to update list of available tagged configurations." >&5 +echo "$as_me: error: unable to update list of available tagged configurations." >&2;} + { (exit 1); exit 1; }; } + fi +fi + + + +# This can be used to rebuild libtool when needed +LIBTOOL_DEPS="$ac_aux_dir/ltmain.sh" + +# Always use our own libtool. +LIBTOOL='$(SHELL) $(top_builddir)/libtool' + +# Prevent multiple expansion + + + + + + + + + + + + + + + + + + + + + +# Checks for libraries. + +echo "$as_me:$LINENO: checking for dlopen in -ldl" >&5 +echo $ECHO_N "checking for dlopen in -ldl... $ECHO_C" >&6 +if test "${ac_cv_lib_dl_dlopen+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + ac_check_lib_save_LIBS=$LIBS +LIBS="-ldl $LIBS" +cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ + +/* Override any gcc2 internal prototype to avoid an error. */ +#ifdef __cplusplus +extern "C" +#endif +/* We use char because int might match the return type of a gcc2 + builtin and then its argument prototype would still apply. */ +char dlopen (); +int +main () +{ +dlopen (); + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext conftest$ac_exeext +if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 + (eval $ac_link) 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest$ac_exeext' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; }; then + ac_cv_lib_dl_dlopen=yes +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +ac_cv_lib_dl_dlopen=no +fi +rm -f conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS +fi +echo "$as_me:$LINENO: result: $ac_cv_lib_dl_dlopen" >&5 +echo "${ECHO_T}$ac_cv_lib_dl_dlopen" >&6 +if test $ac_cv_lib_dl_dlopen = yes; then + cat >>confdefs.h <<_ACEOF +#define HAVE_LIBDL 1 +_ACEOF + + LIBS="-ldl $LIBS" + +fi + + +JNI_LDFLAGS="" +if test $JAVA_HOME != "" +then + JNI_LDFLAGS="-L$JAVA_HOME/jre/lib/$OS_ARCH/server" +fi +ldflags_bak=$LDFLAGS +LDFLAGS="$LDFLAGS $JNI_LDFLAGS" + +echo "$as_me:$LINENO: checking for JNI_GetCreatedJavaVMs in -ljvm" >&5 +echo $ECHO_N "checking for JNI_GetCreatedJavaVMs in -ljvm... $ECHO_C" >&6 +if test "${ac_cv_lib_jvm_JNI_GetCreatedJavaVMs+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + ac_check_lib_save_LIBS=$LIBS +LIBS="-ljvm $LIBS" +cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ + +/* Override any gcc2 internal prototype to avoid an error. */ +#ifdef __cplusplus +extern "C" +#endif +/* We use char because int might match the return type of a gcc2 + builtin and then its argument prototype would still apply. */ +char JNI_GetCreatedJavaVMs (); +int +main () +{ +JNI_GetCreatedJavaVMs (); + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext conftest$ac_exeext +if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 + (eval $ac_link) 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest$ac_exeext' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; }; then + ac_cv_lib_jvm_JNI_GetCreatedJavaVMs=yes +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +ac_cv_lib_jvm_JNI_GetCreatedJavaVMs=no +fi +rm -f conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS +fi +echo "$as_me:$LINENO: result: $ac_cv_lib_jvm_JNI_GetCreatedJavaVMs" >&5 +echo "${ECHO_T}$ac_cv_lib_jvm_JNI_GetCreatedJavaVMs" >&6 +if test $ac_cv_lib_jvm_JNI_GetCreatedJavaVMs = yes; then + cat >>confdefs.h <<_ACEOF +#define HAVE_LIBJVM 1 +_ACEOF + + LIBS="-ljvm $LIBS" + +fi + +LDFLAGS=$ldflags_bak + + +# Checks for header files. +echo "$as_me:$LINENO: checking for ANSI C header files" >&5 +echo $ECHO_N "checking for ANSI C header files... $ECHO_C" >&6 +if test "${ac_cv_header_stdc+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +#include +#include +#include +#include + +int +main () +{ + + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext +if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 + (eval $ac_compile) 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; }; then + ac_cv_header_stdc=yes +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +ac_cv_header_stdc=no +fi +rm -f conftest.err conftest.$ac_objext conftest.$ac_ext + +if test $ac_cv_header_stdc = yes; then + # SunOS 4.x string.h does not declare mem*, contrary to ANSI. + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +#include + +_ACEOF +if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | + $EGREP "memchr" >/dev/null 2>&1; then + : +else + ac_cv_header_stdc=no +fi +rm -f conftest* + +fi + +if test $ac_cv_header_stdc = yes; then + # ISC 2.0.2 stdlib.h does not declare free, contrary to ANSI. + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +#include + +_ACEOF +if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | + $EGREP "free" >/dev/null 2>&1; then + : +else + ac_cv_header_stdc=no +fi +rm -f conftest* + +fi + +if test $ac_cv_header_stdc = yes; then + # /bin/cc in Irix-4.0.5 gets non-ANSI ctype macros unless using -ansi. + if test "$cross_compiling" = yes; then + : +else + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +#include +#if ((' ' & 0x0FF) == 0x020) +# define ISLOWER(c) ('a' <= (c) && (c) <= 'z') +# define TOUPPER(c) (ISLOWER(c) ? 'A' + ((c) - 'a') : (c)) +#else +# define ISLOWER(c) \ + (('a' <= (c) && (c) <= 'i') \ + || ('j' <= (c) && (c) <= 'r') \ + || ('s' <= (c) && (c) <= 'z')) +# define TOUPPER(c) (ISLOWER(c) ? ((c) | 0x40) : (c)) +#endif + +#define XOR(e, f) (((e) && !(f)) || (!(e) && (f))) +int +main () +{ + int i; + for (i = 0; i < 256; i++) + if (XOR (islower (i), ISLOWER (i)) + || toupper (i) != TOUPPER (i)) + exit(2); + exit (0); +} +_ACEOF +rm -f conftest$ac_exeext +if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 + (eval $ac_link) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { ac_try='./conftest$ac_exeext' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; }; then + : +else + echo "$as_me: program exited with status $ac_status" >&5 +echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +( exit $ac_status ) +ac_cv_header_stdc=no +fi +rm -f core *.core gmon.out bb.out conftest$ac_exeext conftest.$ac_objext conftest.$ac_ext +fi +fi +fi +echo "$as_me:$LINENO: result: $ac_cv_header_stdc" >&5 +echo "${ECHO_T}$ac_cv_header_stdc" >&6 +if test $ac_cv_header_stdc = yes; then + +cat >>confdefs.h <<\_ACEOF +#define STDC_HEADERS 1 +_ACEOF + +fi + + + + +for ac_header in stdio.h stddef.h +do +as_ac_Header=`echo "ac_cv_header_$ac_header" | $as_tr_sh` +if eval "test \"\${$as_ac_Header+set}\" = set"; then + echo "$as_me:$LINENO: checking for $ac_header" >&5 +echo $ECHO_N "checking for $ac_header... $ECHO_C" >&6 +if eval "test \"\${$as_ac_Header+set}\" = set"; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +fi +echo "$as_me:$LINENO: result: `eval echo '${'$as_ac_Header'}'`" >&5 +echo "${ECHO_T}`eval echo '${'$as_ac_Header'}'`" >&6 +else + # Is the header compilable? +echo "$as_me:$LINENO: checking $ac_header usability" >&5 +echo $ECHO_N "checking $ac_header usability... $ECHO_C" >&6 +cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +$ac_includes_default +#include <$ac_header> +_ACEOF +rm -f conftest.$ac_objext +if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 + (eval $ac_compile) 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; }; then + ac_header_compiler=yes +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +ac_header_compiler=no +fi +rm -f conftest.err conftest.$ac_objext conftest.$ac_ext +echo "$as_me:$LINENO: result: $ac_header_compiler" >&5 +echo "${ECHO_T}$ac_header_compiler" >&6 + +# Is the header present? +echo "$as_me:$LINENO: checking $ac_header presence" >&5 +echo $ECHO_N "checking $ac_header presence... $ECHO_C" >&6 +cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +#include <$ac_header> +_ACEOF +if { (eval echo "$as_me:$LINENO: \"$ac_cpp conftest.$ac_ext\"") >&5 + (eval $ac_cpp conftest.$ac_ext) 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } >/dev/null; then + if test -s conftest.err; then + ac_cpp_err=$ac_c_preproc_warn_flag + ac_cpp_err=$ac_cpp_err$ac_c_werror_flag + else + ac_cpp_err= + fi +else + ac_cpp_err=yes +fi +if test -z "$ac_cpp_err"; then + ac_header_preproc=yes +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + ac_header_preproc=no +fi +rm -f conftest.err conftest.$ac_ext +echo "$as_me:$LINENO: result: $ac_header_preproc" >&5 +echo "${ECHO_T}$ac_header_preproc" >&6 + +# So? What about this header? +case $ac_header_compiler:$ac_header_preproc:$ac_c_preproc_warn_flag in + yes:no: ) + { echo "$as_me:$LINENO: WARNING: $ac_header: accepted by the compiler, rejected by the preprocessor!" >&5 +echo "$as_me: WARNING: $ac_header: accepted by the compiler, rejected by the preprocessor!" >&2;} + { echo "$as_me:$LINENO: WARNING: $ac_header: proceeding with the compiler's result" >&5 +echo "$as_me: WARNING: $ac_header: proceeding with the compiler's result" >&2;} + ac_header_preproc=yes + ;; + no:yes:* ) + { echo "$as_me:$LINENO: WARNING: $ac_header: present but cannot be compiled" >&5 +echo "$as_me: WARNING: $ac_header: present but cannot be compiled" >&2;} + { echo "$as_me:$LINENO: WARNING: $ac_header: check for missing prerequisite headers?" >&5 +echo "$as_me: WARNING: $ac_header: check for missing prerequisite headers?" >&2;} + { echo "$as_me:$LINENO: WARNING: $ac_header: see the Autoconf documentation" >&5 +echo "$as_me: WARNING: $ac_header: see the Autoconf documentation" >&2;} + { echo "$as_me:$LINENO: WARNING: $ac_header: section \"Present But Cannot Be Compiled\"" >&5 +echo "$as_me: WARNING: $ac_header: section \"Present But Cannot Be Compiled\"" >&2;} + { echo "$as_me:$LINENO: WARNING: $ac_header: proceeding with the preprocessor's result" >&5 +echo "$as_me: WARNING: $ac_header: proceeding with the preprocessor's result" >&2;} + { echo "$as_me:$LINENO: WARNING: $ac_header: in the future, the compiler will take precedence" >&5 +echo "$as_me: WARNING: $ac_header: in the future, the compiler will take precedence" >&2;} + ( + cat <<\_ASBOX +## ------------------------------------------ ## +## Report this to the AC_PACKAGE_NAME lists. ## +## ------------------------------------------ ## +_ASBOX + ) | + sed "s/^/$as_me: WARNING: /" >&2 + ;; +esac +echo "$as_me:$LINENO: checking for $ac_header" >&5 +echo $ECHO_N "checking for $ac_header... $ECHO_C" >&6 +if eval "test \"\${$as_ac_Header+set}\" = set"; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + eval "$as_ac_Header=\$ac_header_preproc" +fi +echo "$as_me:$LINENO: result: `eval echo '${'$as_ac_Header'}'`" >&5 +echo "${ECHO_T}`eval echo '${'$as_ac_Header'}'`" >&6 + +fi +if test `eval echo '${'$as_ac_Header'}'` = yes; then + cat >>confdefs.h <<_ACEOF +#define `echo "HAVE_$ac_header" | $as_tr_cpp` 1 +_ACEOF + +else + { { echo "$as_me:$LINENO: error: Some system headers not found... please ensure their presence on your platform." >&5 +echo "$as_me: error: Some system headers not found... please ensure their presence on your platform." >&2;} + { (exit 1); exit 1; }; } +fi + +done + + +JNI_CPPFLAGS="" +if test $JAVA_HOME != "" +then + for dir in `find $JAVA_HOME/include -follow -type d` + do + JNI_CPPFLAGS="$JNI_CPPFLAGS -I$dir" + done +fi +cppflags_bak=$CPPFLAGS +CPPFLAGS="$CPPFLAGS $JNI_CPPFLAGS" + +for ac_header in jni.h +do +as_ac_Header=`echo "ac_cv_header_$ac_header" | $as_tr_sh` +if eval "test \"\${$as_ac_Header+set}\" = set"; then + echo "$as_me:$LINENO: checking for $ac_header" >&5 +echo $ECHO_N "checking for $ac_header... $ECHO_C" >&6 +if eval "test \"\${$as_ac_Header+set}\" = set"; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +fi +echo "$as_me:$LINENO: result: `eval echo '${'$as_ac_Header'}'`" >&5 +echo "${ECHO_T}`eval echo '${'$as_ac_Header'}'`" >&6 +else + # Is the header compilable? +echo "$as_me:$LINENO: checking $ac_header usability" >&5 +echo $ECHO_N "checking $ac_header usability... $ECHO_C" >&6 +cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +$ac_includes_default +#include <$ac_header> +_ACEOF +rm -f conftest.$ac_objext +if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 + (eval $ac_compile) 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; }; then + ac_header_compiler=yes +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +ac_header_compiler=no +fi +rm -f conftest.err conftest.$ac_objext conftest.$ac_ext +echo "$as_me:$LINENO: result: $ac_header_compiler" >&5 +echo "${ECHO_T}$ac_header_compiler" >&6 + +# Is the header present? +echo "$as_me:$LINENO: checking $ac_header presence" >&5 +echo $ECHO_N "checking $ac_header presence... $ECHO_C" >&6 +cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +#include <$ac_header> +_ACEOF +if { (eval echo "$as_me:$LINENO: \"$ac_cpp conftest.$ac_ext\"") >&5 + (eval $ac_cpp conftest.$ac_ext) 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } >/dev/null; then + if test -s conftest.err; then + ac_cpp_err=$ac_c_preproc_warn_flag + ac_cpp_err=$ac_cpp_err$ac_c_werror_flag + else + ac_cpp_err= + fi +else + ac_cpp_err=yes +fi +if test -z "$ac_cpp_err"; then + ac_header_preproc=yes +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + ac_header_preproc=no +fi +rm -f conftest.err conftest.$ac_ext +echo "$as_me:$LINENO: result: $ac_header_preproc" >&5 +echo "${ECHO_T}$ac_header_preproc" >&6 + +# So? What about this header? +case $ac_header_compiler:$ac_header_preproc:$ac_c_preproc_warn_flag in + yes:no: ) + { echo "$as_me:$LINENO: WARNING: $ac_header: accepted by the compiler, rejected by the preprocessor!" >&5 +echo "$as_me: WARNING: $ac_header: accepted by the compiler, rejected by the preprocessor!" >&2;} + { echo "$as_me:$LINENO: WARNING: $ac_header: proceeding with the compiler's result" >&5 +echo "$as_me: WARNING: $ac_header: proceeding with the compiler's result" >&2;} + ac_header_preproc=yes + ;; + no:yes:* ) + { echo "$as_me:$LINENO: WARNING: $ac_header: present but cannot be compiled" >&5 +echo "$as_me: WARNING: $ac_header: present but cannot be compiled" >&2;} + { echo "$as_me:$LINENO: WARNING: $ac_header: check for missing prerequisite headers?" >&5 +echo "$as_me: WARNING: $ac_header: check for missing prerequisite headers?" >&2;} + { echo "$as_me:$LINENO: WARNING: $ac_header: see the Autoconf documentation" >&5 +echo "$as_me: WARNING: $ac_header: see the Autoconf documentation" >&2;} + { echo "$as_me:$LINENO: WARNING: $ac_header: section \"Present But Cannot Be Compiled\"" >&5 +echo "$as_me: WARNING: $ac_header: section \"Present But Cannot Be Compiled\"" >&2;} + { echo "$as_me:$LINENO: WARNING: $ac_header: proceeding with the preprocessor's result" >&5 +echo "$as_me: WARNING: $ac_header: proceeding with the preprocessor's result" >&2;} + { echo "$as_me:$LINENO: WARNING: $ac_header: in the future, the compiler will take precedence" >&5 +echo "$as_me: WARNING: $ac_header: in the future, the compiler will take precedence" >&2;} + ( + cat <<\_ASBOX +## ------------------------------------------ ## +## Report this to the AC_PACKAGE_NAME lists. ## +## ------------------------------------------ ## +_ASBOX + ) | + sed "s/^/$as_me: WARNING: /" >&2 + ;; +esac +echo "$as_me:$LINENO: checking for $ac_header" >&5 +echo $ECHO_N "checking for $ac_header... $ECHO_C" >&6 +if eval "test \"\${$as_ac_Header+set}\" = set"; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + eval "$as_ac_Header=\$ac_header_preproc" +fi +echo "$as_me:$LINENO: result: `eval echo '${'$as_ac_Header'}'`" >&5 +echo "${ECHO_T}`eval echo '${'$as_ac_Header'}'`" >&6 + +fi +if test `eval echo '${'$as_ac_Header'}'` = yes; then + cat >>confdefs.h <<_ACEOF +#define `echo "HAVE_$ac_header" | $as_tr_cpp` 1 +_ACEOF + +else + { { echo "$as_me:$LINENO: error: Native java headers not found. Is \$JAVA_HOME set correctly?" >&5 +echo "$as_me: error: Native java headers not found. Is \$JAVA_HOME set correctly?" >&2;} + { (exit 1); exit 1; }; } +fi + +done + +CPPFLAGS=$cppflags_bak + + + + +for ac_header in zlib.h zconf.h +do +as_ac_Header=`echo "ac_cv_header_$ac_header" | $as_tr_sh` +if eval "test \"\${$as_ac_Header+set}\" = set"; then + echo "$as_me:$LINENO: checking for $ac_header" >&5 +echo $ECHO_N "checking for $ac_header... $ECHO_C" >&6 +if eval "test \"\${$as_ac_Header+set}\" = set"; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +fi +echo "$as_me:$LINENO: result: `eval echo '${'$as_ac_Header'}'`" >&5 +echo "${ECHO_T}`eval echo '${'$as_ac_Header'}'`" >&6 +else + # Is the header compilable? +echo "$as_me:$LINENO: checking $ac_header usability" >&5 +echo $ECHO_N "checking $ac_header usability... $ECHO_C" >&6 +cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +$ac_includes_default +#include <$ac_header> +_ACEOF +rm -f conftest.$ac_objext +if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 + (eval $ac_compile) 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; }; then + ac_header_compiler=yes +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +ac_header_compiler=no +fi +rm -f conftest.err conftest.$ac_objext conftest.$ac_ext +echo "$as_me:$LINENO: result: $ac_header_compiler" >&5 +echo "${ECHO_T}$ac_header_compiler" >&6 + +# Is the header present? +echo "$as_me:$LINENO: checking $ac_header presence" >&5 +echo $ECHO_N "checking $ac_header presence... $ECHO_C" >&6 +cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +#include <$ac_header> +_ACEOF +if { (eval echo "$as_me:$LINENO: \"$ac_cpp conftest.$ac_ext\"") >&5 + (eval $ac_cpp conftest.$ac_ext) 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } >/dev/null; then + if test -s conftest.err; then + ac_cpp_err=$ac_c_preproc_warn_flag + ac_cpp_err=$ac_cpp_err$ac_c_werror_flag + else + ac_cpp_err= + fi +else + ac_cpp_err=yes +fi +if test -z "$ac_cpp_err"; then + ac_header_preproc=yes +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + ac_header_preproc=no +fi +rm -f conftest.err conftest.$ac_ext +echo "$as_me:$LINENO: result: $ac_header_preproc" >&5 +echo "${ECHO_T}$ac_header_preproc" >&6 + +# So? What about this header? +case $ac_header_compiler:$ac_header_preproc:$ac_c_preproc_warn_flag in + yes:no: ) + { echo "$as_me:$LINENO: WARNING: $ac_header: accepted by the compiler, rejected by the preprocessor!" >&5 +echo "$as_me: WARNING: $ac_header: accepted by the compiler, rejected by the preprocessor!" >&2;} + { echo "$as_me:$LINENO: WARNING: $ac_header: proceeding with the compiler's result" >&5 +echo "$as_me: WARNING: $ac_header: proceeding with the compiler's result" >&2;} + ac_header_preproc=yes + ;; + no:yes:* ) + { echo "$as_me:$LINENO: WARNING: $ac_header: present but cannot be compiled" >&5 +echo "$as_me: WARNING: $ac_header: present but cannot be compiled" >&2;} + { echo "$as_me:$LINENO: WARNING: $ac_header: check for missing prerequisite headers?" >&5 +echo "$as_me: WARNING: $ac_header: check for missing prerequisite headers?" >&2;} + { echo "$as_me:$LINENO: WARNING: $ac_header: see the Autoconf documentation" >&5 +echo "$as_me: WARNING: $ac_header: see the Autoconf documentation" >&2;} + { echo "$as_me:$LINENO: WARNING: $ac_header: section \"Present But Cannot Be Compiled\"" >&5 +echo "$as_me: WARNING: $ac_header: section \"Present But Cannot Be Compiled\"" >&2;} + { echo "$as_me:$LINENO: WARNING: $ac_header: proceeding with the preprocessor's result" >&5 +echo "$as_me: WARNING: $ac_header: proceeding with the preprocessor's result" >&2;} + { echo "$as_me:$LINENO: WARNING: $ac_header: in the future, the compiler will take precedence" >&5 +echo "$as_me: WARNING: $ac_header: in the future, the compiler will take precedence" >&2;} + ( + cat <<\_ASBOX +## ------------------------------------------ ## +## Report this to the AC_PACKAGE_NAME lists. ## +## ------------------------------------------ ## +_ASBOX + ) | + sed "s/^/$as_me: WARNING: /" >&2 + ;; +esac +echo "$as_me:$LINENO: checking for $ac_header" >&5 +echo $ECHO_N "checking for $ac_header... $ECHO_C" >&6 +if eval "test \"\${$as_ac_Header+set}\" = set"; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + eval "$as_ac_Header=\$ac_header_preproc" +fi +echo "$as_me:$LINENO: result: `eval echo '${'$as_ac_Header'}'`" >&5 +echo "${ECHO_T}`eval echo '${'$as_ac_Header'}'`" >&6 + +fi +if test `eval echo '${'$as_ac_Header'}'` = yes; then + cat >>confdefs.h <<_ACEOF +#define `echo "HAVE_$ac_header" | $as_tr_cpp` 1 +_ACEOF + +echo "$as_me:$LINENO: checking Checking for the 'actual' dynamic-library for '-lz'" >&5 +echo $ECHO_N "checking Checking for the 'actual' dynamic-library for '-lz'... $ECHO_C" >&6 +if test "${ac_cv_libname_z+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + + echo 'int main(int argc, char **argv){return 0;}' > conftest.c + if test -z "`${CC} ${LDFLAGS} -o conftest conftest.c -lz 2>&1`"; then + if test ! -z "`which objdump | grep -v 'no objdump'`"; then + ac_cv_libname_z="`objdump -p conftest | grep NEEDED | grep z | sed 's/\W*NEEDED\W*\(.*\)\W*$/\"\1\"/'`" + elif test ! -z "`which ldd | grep -v 'no ldd'`"; then + ac_cv_libname_z="`ldd conftest | grep z | sed 's/^[^A-Za-z0-9]*\([A-Za-z0-9\.]*\)[^A-Za-z0-9]*=>.*$/\"\1\"/'`" + else + { { echo "$as_me:$LINENO: error: Can't find either 'objdump' or 'ldd' to compute the dynamic library for '-lz'" >&5 +echo "$as_me: error: Can't find either 'objdump' or 'ldd' to compute the dynamic library for '-lz'" >&2;} + { (exit 1); exit 1; }; } + fi + else + ac_cv_libname_z=libnotfound.so + fi + rm -f conftest* + + +fi +echo "$as_me:$LINENO: result: $ac_cv_libname_z" >&5 +echo "${ECHO_T}$ac_cv_libname_z" >&6 + +cat >>confdefs.h <<_ACEOF +#define HADOOP_ZLIB_LIBRARY ${ac_cv_libname_z} +_ACEOF + + +else + { { echo "$as_me:$LINENO: error: Zlib headers were not found... native-hadoop library needs zlib to build. Please install the requisite zlib development package." >&5 +echo "$as_me: error: Zlib headers were not found... native-hadoop library needs zlib to build. Please install the requisite zlib development package." >&2;} + { (exit 1); exit 1; }; } +fi + +done + + + +for ac_header in lzma/lzma.h +do +as_ac_Header=`echo "ac_cv_header_$ac_header" | $as_tr_sh` +if eval "test \"\${$as_ac_Header+set}\" = set"; then + echo "$as_me:$LINENO: checking for $ac_header" >&5 +echo $ECHO_N "checking for $ac_header... $ECHO_C" >&6 +if eval "test \"\${$as_ac_Header+set}\" = set"; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +fi +echo "$as_me:$LINENO: result: `eval echo '${'$as_ac_Header'}'`" >&5 +echo "${ECHO_T}`eval echo '${'$as_ac_Header'}'`" >&6 +else + # Is the header compilable? +echo "$as_me:$LINENO: checking $ac_header usability" >&5 +echo $ECHO_N "checking $ac_header usability... $ECHO_C" >&6 +cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +$ac_includes_default +#include <$ac_header> +_ACEOF +rm -f conftest.$ac_objext +if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 + (eval $ac_compile) 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; }; then + ac_header_compiler=yes +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +ac_header_compiler=no +fi +rm -f conftest.err conftest.$ac_objext conftest.$ac_ext +echo "$as_me:$LINENO: result: $ac_header_compiler" >&5 +echo "${ECHO_T}$ac_header_compiler" >&6 + +# Is the header present? +echo "$as_me:$LINENO: checking $ac_header presence" >&5 +echo $ECHO_N "checking $ac_header presence... $ECHO_C" >&6 +cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +#include <$ac_header> +_ACEOF +if { (eval echo "$as_me:$LINENO: \"$ac_cpp conftest.$ac_ext\"") >&5 + (eval $ac_cpp conftest.$ac_ext) 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } >/dev/null; then + if test -s conftest.err; then + ac_cpp_err=$ac_c_preproc_warn_flag + ac_cpp_err=$ac_cpp_err$ac_c_werror_flag + else + ac_cpp_err= + fi +else + ac_cpp_err=yes +fi +if test -z "$ac_cpp_err"; then + ac_header_preproc=yes +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + ac_header_preproc=no +fi +rm -f conftest.err conftest.$ac_ext +echo "$as_me:$LINENO: result: $ac_header_preproc" >&5 +echo "${ECHO_T}$ac_header_preproc" >&6 + +# So? What about this header? +case $ac_header_compiler:$ac_header_preproc:$ac_c_preproc_warn_flag in + yes:no: ) + { echo "$as_me:$LINENO: WARNING: $ac_header: accepted by the compiler, rejected by the preprocessor!" >&5 +echo "$as_me: WARNING: $ac_header: accepted by the compiler, rejected by the preprocessor!" >&2;} + { echo "$as_me:$LINENO: WARNING: $ac_header: proceeding with the compiler's result" >&5 +echo "$as_me: WARNING: $ac_header: proceeding with the compiler's result" >&2;} + ac_header_preproc=yes + ;; + no:yes:* ) + { echo "$as_me:$LINENO: WARNING: $ac_header: present but cannot be compiled" >&5 +echo "$as_me: WARNING: $ac_header: present but cannot be compiled" >&2;} + { echo "$as_me:$LINENO: WARNING: $ac_header: check for missing prerequisite headers?" >&5 +echo "$as_me: WARNING: $ac_header: check for missing prerequisite headers?" >&2;} + { echo "$as_me:$LINENO: WARNING: $ac_header: see the Autoconf documentation" >&5 +echo "$as_me: WARNING: $ac_header: see the Autoconf documentation" >&2;} + { echo "$as_me:$LINENO: WARNING: $ac_header: section \"Present But Cannot Be Compiled\"" >&5 +echo "$as_me: WARNING: $ac_header: section \"Present But Cannot Be Compiled\"" >&2;} + { echo "$as_me:$LINENO: WARNING: $ac_header: proceeding with the preprocessor's result" >&5 +echo "$as_me: WARNING: $ac_header: proceeding with the preprocessor's result" >&2;} + { echo "$as_me:$LINENO: WARNING: $ac_header: in the future, the compiler will take precedence" >&5 +echo "$as_me: WARNING: $ac_header: in the future, the compiler will take precedence" >&2;} + ( + cat <<\_ASBOX +## ------------------------------------------ ## +## Report this to the AC_PACKAGE_NAME lists. ## +## ------------------------------------------ ## +_ASBOX + ) | + sed "s/^/$as_me: WARNING: /" >&2 + ;; +esac +echo "$as_me:$LINENO: checking for $ac_header" >&5 +echo $ECHO_N "checking for $ac_header... $ECHO_C" >&6 +if eval "test \"\${$as_ac_Header+set}\" = set"; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + eval "$as_ac_Header=\$ac_header_preproc" +fi +echo "$as_me:$LINENO: result: `eval echo '${'$as_ac_Header'}'`" >&5 +echo "${ECHO_T}`eval echo '${'$as_ac_Header'}'`" >&6 + +fi +if test `eval echo '${'$as_ac_Header'}'` = yes; then + cat >>confdefs.h <<_ACEOF +#define `echo "HAVE_$ac_header" | $as_tr_cpp` 1 +_ACEOF + +echo "$as_me:$LINENO: checking Checking for the 'actual' dynamic-library for '-llzma'" >&5 +echo $ECHO_N "checking Checking for the 'actual' dynamic-library for '-llzma'... $ECHO_C" >&6 +if test "${ac_cv_libname_lzma+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + + echo 'int main(int argc, char **argv){return 0;}' > conftest.c + if test -z "`${CC} ${LDFLAGS} -o conftest conftest.c -llzma 2>&1`"; then + if test ! -z "`which objdump | grep -v 'no objdump'`"; then + ac_cv_libname_lzma="`objdump -p conftest | grep NEEDED | grep lzma | sed 's/\W*NEEDED\W*\(.*\)\W*$/\"\1\"/'`" + elif test ! -z "`which ldd | grep -v 'no ldd'`"; then + ac_cv_libname_lzma="`ldd conftest | grep lzma | sed 's/^[^A-Za-z0-9]*\([A-Za-z0-9\.]*\)[^A-Za-z0-9]*=>.*$/\"\1\"/'`" + else + { { echo "$as_me:$LINENO: error: Can't find either 'objdump' or 'ldd' to compute the dynamic library for '-llzma'" >&5 +echo "$as_me: error: Can't find either 'objdump' or 'ldd' to compute the dynamic library for '-llzma'" >&2;} + { (exit 1); exit 1; }; } + fi + else + ac_cv_libname_lzma=libnotfound.so + fi + rm -f conftest* + + +fi +echo "$as_me:$LINENO: result: $ac_cv_libname_lzma" >&5 +echo "${ECHO_T}$ac_cv_libname_lzma" >&6 + +cat >>confdefs.h <<_ACEOF +#define HADOOP_LZMA_LIBRARY ${ac_cv_libname_lzma} +_ACEOF + + +else + { { echo "$as_me:$LINENO: error: lzma headers were not found... native-hadoop library needs lzma to build. Please install the requisite lzma-4.999.5alpha development package. +" >&5 +echo "$as_me: error: lzma headers were not found... native-hadoop library needs lzma to build. Please install the requisite lzma-4.999.5alpha development package. +" >&2;} + { (exit 1); exit 1; }; } +fi + +done + + +# Checks for typedefs, structures, and compiler characteristics. +echo "$as_me:$LINENO: checking for an ANSI C-conforming const" >&5 +echo $ECHO_N "checking for an ANSI C-conforming const... $ECHO_C" >&6 +if test "${ac_cv_c_const+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ + +int +main () +{ +/* FIXME: Include the comments suggested by Paul. */ +#ifndef __cplusplus + /* Ultrix mips cc rejects this. */ + typedef int charset[2]; + const charset x; + /* SunOS 4.1.1 cc rejects this. */ + char const *const *ccp; + char **p; + /* NEC SVR4.0.2 mips cc rejects this. */ + struct point {int x, y;}; + static struct point const zero = {0,0}; + /* AIX XL C 1.02.0.0 rejects this. + It does not let you subtract one const X* pointer from another in + an arm of an if-expression whose if-part is not a constant + expression */ + const char *g = "string"; + ccp = &g + (g ? g-g : 0); + /* HPUX 7.0 cc rejects these. */ + ++ccp; + p = (char**) ccp; + ccp = (char const *const *) p; + { /* SCO 3.2v4 cc rejects this. */ + char *t; + char const *s = 0 ? (char *) 0 : (char const *) 0; + + *t++ = 0; + } + { /* Someone thinks the Sun supposedly-ANSI compiler will reject this. */ + int x[] = {25, 17}; + const int *foo = &x[0]; + ++foo; + } + { /* Sun SC1.0 ANSI compiler rejects this -- but not the above. */ + typedef const int *iptr; + iptr p = 0; + ++p; + } + { /* AIX XL C 1.02.0.0 rejects this saying + "k.c", line 2.27: 1506-025 (S) Operand must be a modifiable lvalue. */ + struct s { int j; const int *ap[3]; }; + struct s *b; b->j = 5; + } + { /* ULTRIX-32 V3.1 (Rev 9) vcc rejects this */ + const int foo = 10; + } +#endif + + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext +if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 + (eval $ac_compile) 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest.$ac_objext' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; }; then + ac_cv_c_const=yes +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +ac_cv_c_const=no +fi +rm -f conftest.err conftest.$ac_objext conftest.$ac_ext +fi +echo "$as_me:$LINENO: result: $ac_cv_c_const" >&5 +echo "${ECHO_T}$ac_cv_c_const" >&6 +if test $ac_cv_c_const = no; then + +cat >>confdefs.h <<\_ACEOF +#define const +_ACEOF + +fi + + +# Checks for library functions. + +for ac_func in memset +do +as_ac_var=`echo "ac_cv_func_$ac_func" | $as_tr_sh` +echo "$as_me:$LINENO: checking for $ac_func" >&5 +echo $ECHO_N "checking for $ac_func... $ECHO_C" >&6 +if eval "test \"\${$as_ac_var+set}\" = set"; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +/* Define $ac_func to an innocuous variant, in case declares $ac_func. + For example, HP-UX 11i declares gettimeofday. */ +#define $ac_func innocuous_$ac_func + +/* System header to define __stub macros and hopefully few prototypes, + which can conflict with char $ac_func (); below. + Prefer to if __STDC__ is defined, since + exists even on freestanding compilers. */ + +#ifdef __STDC__ +# include +#else +# include +#endif + +#undef $ac_func + +/* Override any gcc2 internal prototype to avoid an error. */ +#ifdef __cplusplus +extern "C" +{ +#endif +/* We use char because int might match the return type of a gcc2 + builtin and then its argument prototype would still apply. */ +char $ac_func (); +/* The GNU C library defines this for functions which it implements + to always fail with ENOSYS. Some functions are actually named + something starting with __ and the normal name is an alias. */ +#if defined (__stub_$ac_func) || defined (__stub___$ac_func) +choke me +#else +char (*f) () = $ac_func; +#endif +#ifdef __cplusplus +} +#endif + +int +main () +{ +return f != $ac_func; + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext conftest$ac_exeext +if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 + (eval $ac_link) 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && + { ac_try='test -z "$ac_c_werror_flag" + || test ! -s conftest.err' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; } && + { ac_try='test -s conftest$ac_exeext' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; }; then + eval "$as_ac_var=yes" +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +eval "$as_ac_var=no" +fi +rm -f conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +fi +echo "$as_me:$LINENO: result: `eval echo '${'$as_ac_var'}'`" >&5 +echo "${ECHO_T}`eval echo '${'$as_ac_var'}'`" >&6 +if test `eval echo '${'$as_ac_var'}'` = yes; then + cat >>confdefs.h <<_ACEOF +#define `echo "HAVE_$ac_func" | $as_tr_cpp` 1 +_ACEOF + +fi +done + + + ac_config_files="$ac_config_files Makefile src/org/apache/hadoop/io/compress/zlib/Makefile src/org/apache/hadoop/io/compress/lzma/Makefile lib/Makefile" + +cat >confcache <<\_ACEOF +# This file is a shell script that caches the results of configure +# tests run on this system so they can be shared between configure +# scripts and configure runs, see configure's option --config-cache. +# It is not useful on other systems. If it contains results you don't +# want to keep, you may remove or edit it. +# +# config.status only pays attention to the cache file if you give it +# the --recheck option to rerun configure. +# +# `ac_cv_env_foo' variables (set or unset) will be overridden when +# loading this file, other *unset* `ac_cv_foo' will be assigned the +# following values. + +_ACEOF + +# The following way of writing the cache mishandles newlines in values, +# but we know of no workaround that is simple, portable, and efficient. +# So, don't put newlines in cache variables' values. +# Ultrix sh set writes to stderr and can't be redirected directly, +# and sets the high bit in the cache file unless we assign to the vars. +{ + (set) 2>&1 | + case `(ac_space=' '; set | grep ac_space) 2>&1` in + *ac_space=\ *) + # `set' does not quote correctly, so add quotes (double-quote + # substitution turns \\\\ into \\, and sed turns \\ into \). + sed -n \ + "s/'/'\\\\''/g; + s/^\\([_$as_cr_alnum]*_cv_[_$as_cr_alnum]*\\)=\\(.*\\)/\\1='\\2'/p" + ;; + *) + # `set' quotes correctly as required by POSIX, so do not add quotes. + sed -n \ + "s/^\\([_$as_cr_alnum]*_cv_[_$as_cr_alnum]*\\)=\\(.*\\)/\\1=\\2/p" + ;; + esac; +} | + sed ' + t clear + : clear + s/^\([^=]*\)=\(.*[{}].*\)$/test "${\1+set}" = set || &/ + t end + /^ac_cv_env/!s/^\([^=]*\)=\(.*\)$/\1=${\1=\2}/ + : end' >>confcache +if diff $cache_file confcache >/dev/null 2>&1; then :; else + if test -w $cache_file; then + test "x$cache_file" != "x/dev/null" && echo "updating cache $cache_file" + cat confcache >$cache_file + else + echo "not updating unwritable cache $cache_file" + fi +fi +rm -f confcache + +test "x$prefix" = xNONE && prefix=$ac_default_prefix +# Let make expand exec_prefix. +test "x$exec_prefix" = xNONE && exec_prefix='${prefix}' + +# VPATH may cause trouble with some makes, so we remove $(srcdir), +# ${srcdir} and @srcdir@ from VPATH if srcdir is ".", strip leading and +# trailing colons and then remove the whole line if VPATH becomes empty +# (actually we leave an empty line to preserve line numbers). +if test "x$srcdir" = x.; then + ac_vpsub='/^[ ]*VPATH[ ]*=/{ +s/:*\$(srcdir):*/:/; +s/:*\${srcdir}:*/:/; +s/:*@srcdir@:*/:/; +s/^\([^=]*=[ ]*\):*/\1/; +s/:*$//; +s/^[^=]*=[ ]*$//; +}' +fi + +DEFS=-DHAVE_CONFIG_H + +ac_libobjs= +ac_ltlibobjs= +for ac_i in : $LIBOBJS; do test "x$ac_i" = x: && continue + # 1. Remove the extension, and $U if already installed. + ac_i=`echo "$ac_i" | + sed 's/\$U\././;s/\.o$//;s/\.obj$//'` + # 2. Add them. + ac_libobjs="$ac_libobjs $ac_i\$U.$ac_objext" + ac_ltlibobjs="$ac_ltlibobjs $ac_i"'$U.lo' +done +LIBOBJS=$ac_libobjs + +LTLIBOBJS=$ac_ltlibobjs + + +if test -z "${AMDEP_TRUE}" && test -z "${AMDEP_FALSE}"; then + { { echo "$as_me:$LINENO: error: conditional \"AMDEP\" was never defined. +Usually this means the macro was only invoked conditionally." >&5 +echo "$as_me: error: conditional \"AMDEP\" was never defined. +Usually this means the macro was only invoked conditionally." >&2;} + { (exit 1); exit 1; }; } +fi +if test -z "${am__fastdepCC_TRUE}" && test -z "${am__fastdepCC_FALSE}"; then + { { echo "$as_me:$LINENO: error: conditional \"am__fastdepCC\" was never defined. +Usually this means the macro was only invoked conditionally." >&5 +echo "$as_me: error: conditional \"am__fastdepCC\" was never defined. +Usually this means the macro was only invoked conditionally." >&2;} + { (exit 1); exit 1; }; } +fi +if test -z "${am__fastdepCXX_TRUE}" && test -z "${am__fastdepCXX_FALSE}"; then + { { echo "$as_me:$LINENO: error: conditional \"am__fastdepCXX\" was never defined. +Usually this means the macro was only invoked conditionally." >&5 +echo "$as_me: error: conditional \"am__fastdepCXX\" was never defined. +Usually this means the macro was only invoked conditionally." >&2;} + { (exit 1); exit 1; }; } +fi + +: ${CONFIG_STATUS=./config.status} +ac_clean_files_save=$ac_clean_files +ac_clean_files="$ac_clean_files $CONFIG_STATUS" +{ echo "$as_me:$LINENO: creating $CONFIG_STATUS" >&5 +echo "$as_me: creating $CONFIG_STATUS" >&6;} +cat >$CONFIG_STATUS <<_ACEOF +#! $SHELL +# Generated by $as_me. +# Run this file to recreate the current configuration. +# Compiler output produced by configure, useful for debugging +# configure, is in config.log if it exists. + +debug=false +ac_cs_recheck=false +ac_cs_silent=false +SHELL=\${CONFIG_SHELL-$SHELL} +_ACEOF + +cat >>$CONFIG_STATUS <<\_ACEOF +## --------------------- ## +## M4sh Initialization. ## +## --------------------- ## + +# Be Bourne compatible +if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then + emulate sh + NULLCMD=: + # Zsh 3.x and 4.x performs word splitting on ${1+"$@"}, which + # is contrary to our usage. Disable this feature. + alias -g '${1+"$@"}'='"$@"' +elif test -n "${BASH_VERSION+set}" && (set -o posix) >/dev/null 2>&1; then + set -o posix +fi +DUALCASE=1; export DUALCASE # for MKS sh + +# Support unset when possible. +if ( (MAIL=60; unset MAIL) || exit) >/dev/null 2>&1; then + as_unset=unset +else + as_unset=false +fi + + +# Work around bugs in pre-3.0 UWIN ksh. +$as_unset ENV MAIL MAILPATH +PS1='$ ' +PS2='> ' +PS4='+ ' + +# NLS nuisances. +for as_var in \ + LANG LANGUAGE LC_ADDRESS LC_ALL LC_COLLATE LC_CTYPE LC_IDENTIFICATION \ + LC_MEASUREMENT LC_MESSAGES LC_MONETARY LC_NAME LC_NUMERIC LC_PAPER \ + LC_TELEPHONE LC_TIME +do + if (set +x; test -z "`(eval $as_var=C; export $as_var) 2>&1`"); then + eval $as_var=C; export $as_var + else + $as_unset $as_var + fi +done + +# Required to use basename. +if expr a : '\(a\)' >/dev/null 2>&1; then + as_expr=expr +else + as_expr=false +fi + +if (basename /) >/dev/null 2>&1 && test "X`basename / 2>&1`" = "X/"; then + as_basename=basename +else + as_basename=false +fi + + +# Name of the executable. +as_me=`$as_basename "$0" || +$as_expr X/"$0" : '.*/\([^/][^/]*\)/*$' \| \ + X"$0" : 'X\(//\)$' \| \ + X"$0" : 'X\(/\)$' \| \ + . : '\(.\)' 2>/dev/null || +echo X/"$0" | + sed '/^.*\/\([^/][^/]*\)\/*$/{ s//\1/; q; } + /^X\/\(\/\/\)$/{ s//\1/; q; } + /^X\/\(\/\).*/{ s//\1/; q; } + s/.*/./; q'` + + +# PATH needs CR, and LINENO needs CR and PATH. +# Avoid depending upon Character Ranges. +as_cr_letters='abcdefghijklmnopqrstuvwxyz' +as_cr_LETTERS='ABCDEFGHIJKLMNOPQRSTUVWXYZ' +as_cr_Letters=$as_cr_letters$as_cr_LETTERS +as_cr_digits='0123456789' +as_cr_alnum=$as_cr_Letters$as_cr_digits + +# The user is always right. +if test "${PATH_SEPARATOR+set}" != set; then + echo "#! /bin/sh" >conf$$.sh + echo "exit 0" >>conf$$.sh + chmod +x conf$$.sh + if (PATH="/nonexistent;."; conf$$.sh) >/dev/null 2>&1; then + PATH_SEPARATOR=';' + else + PATH_SEPARATOR=: + fi + rm -f conf$$.sh +fi + + + as_lineno_1=$LINENO + as_lineno_2=$LINENO + as_lineno_3=`(expr $as_lineno_1 + 1) 2>/dev/null` + test "x$as_lineno_1" != "x$as_lineno_2" && + test "x$as_lineno_3" = "x$as_lineno_2" || { + # Find who we are. Look in the path if we contain no path at all + # relative or not. + case $0 in + *[\\/]* ) as_myself=$0 ;; + *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + test -r "$as_dir/$0" && as_myself=$as_dir/$0 && break +done + + ;; + esac + # We did not find ourselves, most probably we were run as `sh COMMAND' + # in which case we are not to be found in the path. + if test "x$as_myself" = x; then + as_myself=$0 + fi + if test ! -f "$as_myself"; then + { { echo "$as_me:$LINENO: error: cannot find myself; rerun with an absolute path" >&5 +echo "$as_me: error: cannot find myself; rerun with an absolute path" >&2;} + { (exit 1); exit 1; }; } + fi + case $CONFIG_SHELL in + '') + as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in /bin$PATH_SEPARATOR/usr/bin$PATH_SEPARATOR$PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for as_base in sh bash ksh sh5; do + case $as_dir in + /*) + if ("$as_dir/$as_base" -c ' + as_lineno_1=$LINENO + as_lineno_2=$LINENO + as_lineno_3=`(expr $as_lineno_1 + 1) 2>/dev/null` + test "x$as_lineno_1" != "x$as_lineno_2" && + test "x$as_lineno_3" = "x$as_lineno_2" ') 2>/dev/null; then + $as_unset BASH_ENV || test "${BASH_ENV+set}" != set || { BASH_ENV=; export BASH_ENV; } + $as_unset ENV || test "${ENV+set}" != set || { ENV=; export ENV; } + CONFIG_SHELL=$as_dir/$as_base + export CONFIG_SHELL + exec "$CONFIG_SHELL" "$0" ${1+"$@"} + fi;; + esac + done +done +;; + esac + + # Create $as_me.lineno as a copy of $as_myself, but with $LINENO + # uniformly replaced by the line number. The first 'sed' inserts a + # line-number line before each line; the second 'sed' does the real + # work. The second script uses 'N' to pair each line-number line + # with the numbered line, and appends trailing '-' during + # substitution so that $LINENO is not a special case at line end. + # (Raja R Harinath suggested sed '=', and Paul Eggert wrote the + # second 'sed' script. Blame Lee E. McMahon for sed's syntax. :-) + sed '=' <$as_myself | + sed ' + N + s,$,-, + : loop + s,^\(['$as_cr_digits']*\)\(.*\)[$]LINENO\([^'$as_cr_alnum'_]\),\1\2\1\3, + t loop + s,-$,, + s,^['$as_cr_digits']*\n,, + ' >$as_me.lineno && + chmod +x $as_me.lineno || + { { echo "$as_me:$LINENO: error: cannot create $as_me.lineno; rerun with a POSIX shell" >&5 +echo "$as_me: error: cannot create $as_me.lineno; rerun with a POSIX shell" >&2;} + { (exit 1); exit 1; }; } + + # Don't try to exec as it changes $[0], causing all sort of problems + # (the dirname of $[0] is not the place where we might find the + # original and so on. Autoconf is especially sensible to this). + . ./$as_me.lineno + # Exit status is that of the last command. + exit +} + + +case `echo "testing\c"; echo 1,2,3`,`echo -n testing; echo 1,2,3` in + *c*,-n*) ECHO_N= ECHO_C=' +' ECHO_T=' ' ;; + *c*,* ) ECHO_N=-n ECHO_C= ECHO_T= ;; + *) ECHO_N= ECHO_C='\c' ECHO_T= ;; +esac + +if expr a : '\(a\)' >/dev/null 2>&1; then + as_expr=expr +else + as_expr=false +fi + +rm -f conf$$ conf$$.exe conf$$.file +echo >conf$$.file +if ln -s conf$$.file conf$$ 2>/dev/null; then + # We could just check for DJGPP; but this test a) works b) is more generic + # and c) will remain valid once DJGPP supports symlinks (DJGPP 2.04). + if test -f conf$$.exe; then + # Don't use ln at all; we don't have any links + as_ln_s='cp -p' + else + as_ln_s='ln -s' + fi +elif ln conf$$.file conf$$ 2>/dev/null; then + as_ln_s=ln +else + as_ln_s='cp -p' +fi +rm -f conf$$ conf$$.exe conf$$.file + +if mkdir -p . 2>/dev/null; then + as_mkdir_p=: +else + test -d ./-p && rmdir ./-p + as_mkdir_p=false +fi + +as_executable_p="test -f" + +# Sed expression to map a string onto a valid CPP name. +as_tr_cpp="eval sed 'y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g'" + +# Sed expression to map a string onto a valid variable name. +as_tr_sh="eval sed 'y%*+%pp%;s%[^_$as_cr_alnum]%_%g'" + + +# IFS +# We need space, tab and new line, in precisely that order. +as_nl=' +' +IFS=" $as_nl" + +# CDPATH. +$as_unset CDPATH + +exec 6>&1 + +# Open the log real soon, to keep \$[0] and so on meaningful, and to +# report actual input values of CONFIG_FILES etc. instead of their +# values after options handling. Logging --version etc. is OK. +exec 5>>config.log +{ + echo + sed 'h;s/./-/g;s/^.../## /;s/...$/ ##/;p;x;p;x' <<_ASBOX +## Running $as_me. ## +_ASBOX +} >&5 +cat >&5 <<_CSEOF + +This file was extended by $as_me, which was +generated by GNU Autoconf 2.59. Invocation command line was + + CONFIG_FILES = $CONFIG_FILES + CONFIG_HEADERS = $CONFIG_HEADERS + CONFIG_LINKS = $CONFIG_LINKS + CONFIG_COMMANDS = $CONFIG_COMMANDS + $ $0 $@ + +_CSEOF +echo "on `(hostname || uname -n) 2>/dev/null | sed 1q`" >&5 +echo >&5 +_ACEOF + +# Files that config.status was made for. +if test -n "$ac_config_files"; then + echo "config_files=\"$ac_config_files\"" >>$CONFIG_STATUS +fi + +if test -n "$ac_config_headers"; then + echo "config_headers=\"$ac_config_headers\"" >>$CONFIG_STATUS +fi + +if test -n "$ac_config_links"; then + echo "config_links=\"$ac_config_links\"" >>$CONFIG_STATUS +fi + +if test -n "$ac_config_commands"; then + echo "config_commands=\"$ac_config_commands\"" >>$CONFIG_STATUS +fi + +cat >>$CONFIG_STATUS <<\_ACEOF + +ac_cs_usage="\ +\`$as_me' instantiates files from templates according to the +current configuration. + +Usage: $0 [OPTIONS] [FILE]... + + -h, --help print this help, then exit + -V, --version print version number, then exit + -q, --quiet do not print progress messages + -d, --debug don't remove temporary files + --recheck update $as_me by reconfiguring in the same conditions + --file=FILE[:TEMPLATE] + instantiate the configuration file FILE + --header=FILE[:TEMPLATE] + instantiate the configuration header FILE + +Configuration files: +$config_files + +Configuration headers: +$config_headers + +Configuration commands: +$config_commands + +Report bugs to ." +_ACEOF + +cat >>$CONFIG_STATUS <<_ACEOF +ac_cs_version="\\ +config.status +configured by $0, generated by GNU Autoconf 2.59, + with options \\"`echo "$ac_configure_args" | sed 's/[\\""\`\$]/\\\\&/g'`\\" + +Copyright (C) 2003 Free Software Foundation, Inc. +This config.status script is free software; the Free Software Foundation +gives unlimited permission to copy, distribute and modify it." +srcdir=$srcdir +INSTALL="$INSTALL" +_ACEOF + +cat >>$CONFIG_STATUS <<\_ACEOF +# If no file are specified by the user, then we need to provide default +# value. By we need to know if files were specified by the user. +ac_need_defaults=: +while test $# != 0 +do + case $1 in + --*=*) + ac_option=`expr "x$1" : 'x\([^=]*\)='` + ac_optarg=`expr "x$1" : 'x[^=]*=\(.*\)'` + ac_shift=: + ;; + -*) + ac_option=$1 + ac_optarg=$2 + ac_shift=shift + ;; + *) # This is not an option, so the user has probably given explicit + # arguments. + ac_option=$1 + ac_need_defaults=false;; + esac + + case $ac_option in + # Handling of the options. +_ACEOF +cat >>$CONFIG_STATUS <<\_ACEOF + -recheck | --recheck | --rechec | --reche | --rech | --rec | --re | --r) + ac_cs_recheck=: ;; + --version | --vers* | -V ) + echo "$ac_cs_version"; exit 0 ;; + --he | --h) + # Conflict between --help and --header + { { echo "$as_me:$LINENO: error: ambiguous option: $1 +Try \`$0 --help' for more information." >&5 +echo "$as_me: error: ambiguous option: $1 +Try \`$0 --help' for more information." >&2;} + { (exit 1); exit 1; }; };; + --help | --hel | -h ) + echo "$ac_cs_usage"; exit 0 ;; + --debug | --d* | -d ) + debug=: ;; + --file | --fil | --fi | --f ) + $ac_shift + CONFIG_FILES="$CONFIG_FILES $ac_optarg" + ac_need_defaults=false;; + --header | --heade | --head | --hea ) + $ac_shift + CONFIG_HEADERS="$CONFIG_HEADERS $ac_optarg" + ac_need_defaults=false;; + -q | -quiet | --quiet | --quie | --qui | --qu | --q \ + | -silent | --silent | --silen | --sile | --sil | --si | --s) + ac_cs_silent=: ;; + + # This is an error. + -*) { { echo "$as_me:$LINENO: error: unrecognized option: $1 +Try \`$0 --help' for more information." >&5 +echo "$as_me: error: unrecognized option: $1 +Try \`$0 --help' for more information." >&2;} + { (exit 1); exit 1; }; } ;; + + *) ac_config_targets="$ac_config_targets $1" ;; + + esac + shift +done + +ac_configure_extra_args= + +if $ac_cs_silent; then + exec 6>/dev/null + ac_configure_extra_args="$ac_configure_extra_args --silent" +fi + +_ACEOF +cat >>$CONFIG_STATUS <<_ACEOF +if \$ac_cs_recheck; then + echo "running $SHELL $0 " $ac_configure_args \$ac_configure_extra_args " --no-create --no-recursion" >&6 + exec $SHELL $0 $ac_configure_args \$ac_configure_extra_args --no-create --no-recursion +fi + +_ACEOF + +cat >>$CONFIG_STATUS <<_ACEOF +# +# INIT-COMMANDS section. +# + +AMDEP_TRUE="$AMDEP_TRUE" ac_aux_dir="$ac_aux_dir" + +_ACEOF + + + +cat >>$CONFIG_STATUS <<\_ACEOF +for ac_config_target in $ac_config_targets +do + case "$ac_config_target" in + # Handling of arguments. + "Makefile" ) CONFIG_FILES="$CONFIG_FILES Makefile" ;; + "src/org/apache/hadoop/io/compress/zlib/Makefile" ) CONFIG_FILES="$CONFIG_FILES src/org/apache/hadoop/io/compress/zlib/Makefile" ;; + "src/org/apache/hadoop/io/compress/lzma/Makefile" ) CONFIG_FILES="$CONFIG_FILES src/org/apache/hadoop/io/compress/lzma/Makefile" ;; + "lib/Makefile" ) CONFIG_FILES="$CONFIG_FILES lib/Makefile" ;; + "depfiles" ) CONFIG_COMMANDS="$CONFIG_COMMANDS depfiles" ;; + "config.h" ) CONFIG_HEADERS="$CONFIG_HEADERS config.h" ;; + *) { { echo "$as_me:$LINENO: error: invalid argument: $ac_config_target" >&5 +echo "$as_me: error: invalid argument: $ac_config_target" >&2;} + { (exit 1); exit 1; }; };; + esac +done + +# If the user did not use the arguments to specify the items to instantiate, +# then the envvar interface is used. Set only those that are not. +# We use the long form for the default assignment because of an extremely +# bizarre bug on SunOS 4.1.3. +if $ac_need_defaults; then + test "${CONFIG_FILES+set}" = set || CONFIG_FILES=$config_files + test "${CONFIG_HEADERS+set}" = set || CONFIG_HEADERS=$config_headers + test "${CONFIG_COMMANDS+set}" = set || CONFIG_COMMANDS=$config_commands +fi + +# Have a temporary directory for convenience. Make it in the build tree +# simply because there is no reason to put it here, and in addition, +# creating and moving files from /tmp can sometimes cause problems. +# Create a temporary directory, and hook for its removal unless debugging. +$debug || +{ + trap 'exit_status=$?; rm -rf $tmp && exit $exit_status' 0 + trap '{ (exit 1); exit 1; }' 1 2 13 15 +} + +# Create a (secure) tmp directory for tmp files. + +{ + tmp=`(umask 077 && mktemp -d -q "./confstatXXXXXX") 2>/dev/null` && + test -n "$tmp" && test -d "$tmp" +} || +{ + tmp=./confstat$$-$RANDOM + (umask 077 && mkdir $tmp) +} || +{ + echo "$me: cannot create a temporary directory in ." >&2 + { (exit 1); exit 1; } +} + +_ACEOF + +cat >>$CONFIG_STATUS <<_ACEOF + +# +# CONFIG_FILES section. +# + +# No need to generate the scripts if there are no CONFIG_FILES. +# This happens for instance when ./config.status config.h +if test -n "\$CONFIG_FILES"; then + # Protect against being on the right side of a sed subst in config.status. + sed 's/,@/@@/; s/@,/@@/; s/,;t t\$/@;t t/; /@;t t\$/s/[\\\\&,]/\\\\&/g; + s/@@/,@/; s/@@/@,/; s/@;t t\$/,;t t/' >\$tmp/subs.sed <<\\CEOF +s,@SHELL@,$SHELL,;t t +s,@PATH_SEPARATOR@,$PATH_SEPARATOR,;t t +s,@PACKAGE_NAME@,$PACKAGE_NAME,;t t +s,@PACKAGE_TARNAME@,$PACKAGE_TARNAME,;t t +s,@PACKAGE_VERSION@,$PACKAGE_VERSION,;t t +s,@PACKAGE_STRING@,$PACKAGE_STRING,;t t +s,@PACKAGE_BUGREPORT@,$PACKAGE_BUGREPORT,;t t +s,@exec_prefix@,$exec_prefix,;t t +s,@prefix@,$prefix,;t t +s,@program_transform_name@,$program_transform_name,;t t +s,@bindir@,$bindir,;t t +s,@sbindir@,$sbindir,;t t +s,@libexecdir@,$libexecdir,;t t +s,@datadir@,$datadir,;t t +s,@sysconfdir@,$sysconfdir,;t t +s,@sharedstatedir@,$sharedstatedir,;t t +s,@localstatedir@,$localstatedir,;t t +s,@libdir@,$libdir,;t t +s,@includedir@,$includedir,;t t +s,@oldincludedir@,$oldincludedir,;t t +s,@infodir@,$infodir,;t t +s,@mandir@,$mandir,;t t +s,@build_alias@,$build_alias,;t t +s,@host_alias@,$host_alias,;t t +s,@target_alias@,$target_alias,;t t +s,@DEFS@,$DEFS,;t t +s,@ECHO_C@,$ECHO_C,;t t +s,@ECHO_N@,$ECHO_N,;t t +s,@ECHO_T@,$ECHO_T,;t t +s,@LIBS@,$LIBS,;t t +s,@INSTALL_PROGRAM@,$INSTALL_PROGRAM,;t t +s,@INSTALL_SCRIPT@,$INSTALL_SCRIPT,;t t +s,@INSTALL_DATA@,$INSTALL_DATA,;t t +s,@CYGPATH_W@,$CYGPATH_W,;t t +s,@PACKAGE@,$PACKAGE,;t t +s,@VERSION@,$VERSION,;t t +s,@ACLOCAL@,$ACLOCAL,;t t +s,@AUTOCONF@,$AUTOCONF,;t t +s,@AUTOMAKE@,$AUTOMAKE,;t t +s,@AUTOHEADER@,$AUTOHEADER,;t t +s,@MAKEINFO@,$MAKEINFO,;t t +s,@install_sh@,$install_sh,;t t +s,@STRIP@,$STRIP,;t t +s,@ac_ct_STRIP@,$ac_ct_STRIP,;t t +s,@INSTALL_STRIP_PROGRAM@,$INSTALL_STRIP_PROGRAM,;t t +s,@mkdir_p@,$mkdir_p,;t t +s,@AWK@,$AWK,;t t +s,@SET_MAKE@,$SET_MAKE,;t t +s,@am__leading_dot@,$am__leading_dot,;t t +s,@AMTAR@,$AMTAR,;t t +s,@am__tar@,$am__tar,;t t +s,@am__untar@,$am__untar,;t t +s,@CC@,$CC,;t t +s,@CFLAGS@,$CFLAGS,;t t +s,@LDFLAGS@,$LDFLAGS,;t t +s,@CPPFLAGS@,$CPPFLAGS,;t t +s,@ac_ct_CC@,$ac_ct_CC,;t t +s,@EXEEXT@,$EXEEXT,;t t +s,@OBJEXT@,$OBJEXT,;t t +s,@DEPDIR@,$DEPDIR,;t t +s,@am__include@,$am__include,;t t +s,@am__quote@,$am__quote,;t t +s,@AMDEP_TRUE@,$AMDEP_TRUE,;t t +s,@AMDEP_FALSE@,$AMDEP_FALSE,;t t +s,@AMDEPBACKSLASH@,$AMDEPBACKSLASH,;t t +s,@CCDEPMODE@,$CCDEPMODE,;t t +s,@am__fastdepCC_TRUE@,$am__fastdepCC_TRUE,;t t +s,@am__fastdepCC_FALSE@,$am__fastdepCC_FALSE,;t t +s,@build@,$build,;t t +s,@build_cpu@,$build_cpu,;t t +s,@build_vendor@,$build_vendor,;t t +s,@build_os@,$build_os,;t t +s,@host@,$host,;t t +s,@host_cpu@,$host_cpu,;t t +s,@host_vendor@,$host_vendor,;t t +s,@host_os@,$host_os,;t t +s,@SED@,$SED,;t t +s,@EGREP@,$EGREP,;t t +s,@LN_S@,$LN_S,;t t +s,@ECHO@,$ECHO,;t t +s,@AR@,$AR,;t t +s,@ac_ct_AR@,$ac_ct_AR,;t t +s,@RANLIB@,$RANLIB,;t t +s,@ac_ct_RANLIB@,$ac_ct_RANLIB,;t t +s,@CPP@,$CPP,;t t +s,@CXX@,$CXX,;t t +s,@CXXFLAGS@,$CXXFLAGS,;t t +s,@ac_ct_CXX@,$ac_ct_CXX,;t t +s,@CXXDEPMODE@,$CXXDEPMODE,;t t +s,@am__fastdepCXX_TRUE@,$am__fastdepCXX_TRUE,;t t +s,@am__fastdepCXX_FALSE@,$am__fastdepCXX_FALSE,;t t +s,@CXXCPP@,$CXXCPP,;t t +s,@F77@,$F77,;t t +s,@FFLAGS@,$FFLAGS,;t t +s,@ac_ct_F77@,$ac_ct_F77,;t t +s,@LIBTOOL@,$LIBTOOL,;t t +s,@JNI_LDFLAGS@,$JNI_LDFLAGS,;t t +s,@JNI_CPPFLAGS@,$JNI_CPPFLAGS,;t t +s,@LIBOBJS@,$LIBOBJS,;t t +s,@LTLIBOBJS@,$LTLIBOBJS,;t t +CEOF + +_ACEOF + + cat >>$CONFIG_STATUS <<\_ACEOF + # Split the substitutions into bite-sized pieces for seds with + # small command number limits, like on Digital OSF/1 and HP-UX. + ac_max_sed_lines=48 + ac_sed_frag=1 # Number of current file. + ac_beg=1 # First line for current file. + ac_end=$ac_max_sed_lines # Line after last line for current file. + ac_more_lines=: + ac_sed_cmds= + while $ac_more_lines; do + if test $ac_beg -gt 1; then + sed "1,${ac_beg}d; ${ac_end}q" $tmp/subs.sed >$tmp/subs.frag + else + sed "${ac_end}q" $tmp/subs.sed >$tmp/subs.frag + fi + if test ! -s $tmp/subs.frag; then + ac_more_lines=false + else + # The purpose of the label and of the branching condition is to + # speed up the sed processing (if there are no `@' at all, there + # is no need to browse any of the substitutions). + # These are the two extra sed commands mentioned above. + (echo ':t + /@[a-zA-Z_][a-zA-Z_0-9]*@/!b' && cat $tmp/subs.frag) >$tmp/subs-$ac_sed_frag.sed + if test -z "$ac_sed_cmds"; then + ac_sed_cmds="sed -f $tmp/subs-$ac_sed_frag.sed" + else + ac_sed_cmds="$ac_sed_cmds | sed -f $tmp/subs-$ac_sed_frag.sed" + fi + ac_sed_frag=`expr $ac_sed_frag + 1` + ac_beg=$ac_end + ac_end=`expr $ac_end + $ac_max_sed_lines` + fi + done + if test -z "$ac_sed_cmds"; then + ac_sed_cmds=cat + fi +fi # test -n "$CONFIG_FILES" + +_ACEOF +cat >>$CONFIG_STATUS <<\_ACEOF +for ac_file in : $CONFIG_FILES; do test "x$ac_file" = x: && continue + # Support "outfile[:infile[:infile...]]", defaulting infile="outfile.in". + case $ac_file in + - | *:- | *:-:* ) # input from stdin + cat >$tmp/stdin + ac_file_in=`echo "$ac_file" | sed 's,[^:]*:,,'` + ac_file=`echo "$ac_file" | sed 's,:.*,,'` ;; + *:* ) ac_file_in=`echo "$ac_file" | sed 's,[^:]*:,,'` + ac_file=`echo "$ac_file" | sed 's,:.*,,'` ;; + * ) ac_file_in=$ac_file.in ;; + esac + + # Compute @srcdir@, @top_srcdir@, and @INSTALL@ for subdirectories. + ac_dir=`(dirname "$ac_file") 2>/dev/null || +$as_expr X"$ac_file" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ + X"$ac_file" : 'X\(//\)[^/]' \| \ + X"$ac_file" : 'X\(//\)$' \| \ + X"$ac_file" : 'X\(/\)' \| \ + . : '\(.\)' 2>/dev/null || +echo X"$ac_file" | + sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ s//\1/; q; } + /^X\(\/\/\)[^/].*/{ s//\1/; q; } + /^X\(\/\/\)$/{ s//\1/; q; } + /^X\(\/\).*/{ s//\1/; q; } + s/.*/./; q'` + { if $as_mkdir_p; then + mkdir -p "$ac_dir" + else + as_dir="$ac_dir" + as_dirs= + while test ! -d "$as_dir"; do + as_dirs="$as_dir $as_dirs" + as_dir=`(dirname "$as_dir") 2>/dev/null || +$as_expr X"$as_dir" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ + X"$as_dir" : 'X\(//\)[^/]' \| \ + X"$as_dir" : 'X\(//\)$' \| \ + X"$as_dir" : 'X\(/\)' \| \ + . : '\(.\)' 2>/dev/null || +echo X"$as_dir" | + sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ s//\1/; q; } + /^X\(\/\/\)[^/].*/{ s//\1/; q; } + /^X\(\/\/\)$/{ s//\1/; q; } + /^X\(\/\).*/{ s//\1/; q; } + s/.*/./; q'` + done + test ! -n "$as_dirs" || mkdir $as_dirs + fi || { { echo "$as_me:$LINENO: error: cannot create directory \"$ac_dir\"" >&5 +echo "$as_me: error: cannot create directory \"$ac_dir\"" >&2;} + { (exit 1); exit 1; }; }; } + + ac_builddir=. + +if test "$ac_dir" != .; then + ac_dir_suffix=/`echo "$ac_dir" | sed 's,^\.[\\/],,'` + # A "../" for each directory in $ac_dir_suffix. + ac_top_builddir=`echo "$ac_dir_suffix" | sed 's,/[^\\/]*,../,g'` +else + ac_dir_suffix= ac_top_builddir= +fi + +case $srcdir in + .) # No --srcdir option. We are building in place. + ac_srcdir=. + if test -z "$ac_top_builddir"; then + ac_top_srcdir=. + else + ac_top_srcdir=`echo $ac_top_builddir | sed 's,/$,,'` + fi ;; + [\\/]* | ?:[\\/]* ) # Absolute path. + ac_srcdir=$srcdir$ac_dir_suffix; + ac_top_srcdir=$srcdir ;; + *) # Relative path. + ac_srcdir=$ac_top_builddir$srcdir$ac_dir_suffix + ac_top_srcdir=$ac_top_builddir$srcdir ;; +esac + +# Do not use `cd foo && pwd` to compute absolute paths, because +# the directories may not exist. +case `pwd` in +.) ac_abs_builddir="$ac_dir";; +*) + case "$ac_dir" in + .) ac_abs_builddir=`pwd`;; + [\\/]* | ?:[\\/]* ) ac_abs_builddir="$ac_dir";; + *) ac_abs_builddir=`pwd`/"$ac_dir";; + esac;; +esac +case $ac_abs_builddir in +.) ac_abs_top_builddir=${ac_top_builddir}.;; +*) + case ${ac_top_builddir}. in + .) ac_abs_top_builddir=$ac_abs_builddir;; + [\\/]* | ?:[\\/]* ) ac_abs_top_builddir=${ac_top_builddir}.;; + *) ac_abs_top_builddir=$ac_abs_builddir/${ac_top_builddir}.;; + esac;; +esac +case $ac_abs_builddir in +.) ac_abs_srcdir=$ac_srcdir;; +*) + case $ac_srcdir in + .) ac_abs_srcdir=$ac_abs_builddir;; + [\\/]* | ?:[\\/]* ) ac_abs_srcdir=$ac_srcdir;; + *) ac_abs_srcdir=$ac_abs_builddir/$ac_srcdir;; + esac;; +esac +case $ac_abs_builddir in +.) ac_abs_top_srcdir=$ac_top_srcdir;; +*) + case $ac_top_srcdir in + .) ac_abs_top_srcdir=$ac_abs_builddir;; + [\\/]* | ?:[\\/]* ) ac_abs_top_srcdir=$ac_top_srcdir;; + *) ac_abs_top_srcdir=$ac_abs_builddir/$ac_top_srcdir;; + esac;; +esac + + + case $INSTALL in + [\\/$]* | ?:[\\/]* ) ac_INSTALL=$INSTALL ;; + *) ac_INSTALL=$ac_top_builddir$INSTALL ;; + esac + + if test x"$ac_file" != x-; then + { echo "$as_me:$LINENO: creating $ac_file" >&5 +echo "$as_me: creating $ac_file" >&6;} + rm -f "$ac_file" + fi + # Let's still pretend it is `configure' which instantiates (i.e., don't + # use $as_me), people would be surprised to read: + # /* config.h. Generated by config.status. */ + if test x"$ac_file" = x-; then + configure_input= + else + configure_input="$ac_file. " + fi + configure_input=$configure_input"Generated from `echo $ac_file_in | + sed 's,.*/,,'` by configure." + + # First look for the input files in the build tree, otherwise in the + # src tree. + ac_file_inputs=`IFS=: + for f in $ac_file_in; do + case $f in + -) echo $tmp/stdin ;; + [\\/$]*) + # Absolute (can't be DOS-style, as IFS=:) + test -f "$f" || { { echo "$as_me:$LINENO: error: cannot find input file: $f" >&5 +echo "$as_me: error: cannot find input file: $f" >&2;} + { (exit 1); exit 1; }; } + echo "$f";; + *) # Relative + if test -f "$f"; then + # Build tree + echo "$f" + elif test -f "$srcdir/$f"; then + # Source tree + echo "$srcdir/$f" + else + # /dev/null tree + { { echo "$as_me:$LINENO: error: cannot find input file: $f" >&5 +echo "$as_me: error: cannot find input file: $f" >&2;} + { (exit 1); exit 1; }; } + fi;; + esac + done` || { (exit 1); exit 1; } +_ACEOF +cat >>$CONFIG_STATUS <<_ACEOF + sed "$ac_vpsub +$extrasub +_ACEOF +cat >>$CONFIG_STATUS <<\_ACEOF +:t +/@[a-zA-Z_][a-zA-Z_0-9]*@/!b +s,@configure_input@,$configure_input,;t t +s,@srcdir@,$ac_srcdir,;t t +s,@abs_srcdir@,$ac_abs_srcdir,;t t +s,@top_srcdir@,$ac_top_srcdir,;t t +s,@abs_top_srcdir@,$ac_abs_top_srcdir,;t t +s,@builddir@,$ac_builddir,;t t +s,@abs_builddir@,$ac_abs_builddir,;t t +s,@top_builddir@,$ac_top_builddir,;t t +s,@abs_top_builddir@,$ac_abs_top_builddir,;t t +s,@INSTALL@,$ac_INSTALL,;t t +" $ac_file_inputs | (eval "$ac_sed_cmds") >$tmp/out + rm -f $tmp/stdin + if test x"$ac_file" != x-; then + mv $tmp/out $ac_file + else + cat $tmp/out + rm -f $tmp/out + fi + +done +_ACEOF +cat >>$CONFIG_STATUS <<\_ACEOF + +# +# CONFIG_HEADER section. +# + +# These sed commands are passed to sed as "A NAME B NAME C VALUE D", where +# NAME is the cpp macro being defined and VALUE is the value it is being given. +# +# ac_d sets the value in "#define NAME VALUE" lines. +ac_dA='s,^\([ ]*\)#\([ ]*define[ ][ ]*\)' +ac_dB='[ ].*$,\1#\2' +ac_dC=' ' +ac_dD=',;t' +# ac_u turns "#undef NAME" without trailing blanks into "#define NAME VALUE". +ac_uA='s,^\([ ]*\)#\([ ]*\)undef\([ ][ ]*\)' +ac_uB='$,\1#\2define\3' +ac_uC=' ' +ac_uD=',;t' + +for ac_file in : $CONFIG_HEADERS; do test "x$ac_file" = x: && continue + # Support "outfile[:infile[:infile...]]", defaulting infile="outfile.in". + case $ac_file in + - | *:- | *:-:* ) # input from stdin + cat >$tmp/stdin + ac_file_in=`echo "$ac_file" | sed 's,[^:]*:,,'` + ac_file=`echo "$ac_file" | sed 's,:.*,,'` ;; + *:* ) ac_file_in=`echo "$ac_file" | sed 's,[^:]*:,,'` + ac_file=`echo "$ac_file" | sed 's,:.*,,'` ;; + * ) ac_file_in=$ac_file.in ;; + esac + + test x"$ac_file" != x- && { echo "$as_me:$LINENO: creating $ac_file" >&5 +echo "$as_me: creating $ac_file" >&6;} + + # First look for the input files in the build tree, otherwise in the + # src tree. + ac_file_inputs=`IFS=: + for f in $ac_file_in; do + case $f in + -) echo $tmp/stdin ;; + [\\/$]*) + # Absolute (can't be DOS-style, as IFS=:) + test -f "$f" || { { echo "$as_me:$LINENO: error: cannot find input file: $f" >&5 +echo "$as_me: error: cannot find input file: $f" >&2;} + { (exit 1); exit 1; }; } + # Do quote $f, to prevent DOS paths from being IFS'd. + echo "$f";; + *) # Relative + if test -f "$f"; then + # Build tree + echo "$f" + elif test -f "$srcdir/$f"; then + # Source tree + echo "$srcdir/$f" + else + # /dev/null tree + { { echo "$as_me:$LINENO: error: cannot find input file: $f" >&5 +echo "$as_me: error: cannot find input file: $f" >&2;} + { (exit 1); exit 1; }; } + fi;; + esac + done` || { (exit 1); exit 1; } + # Remove the trailing spaces. + sed 's/[ ]*$//' $ac_file_inputs >$tmp/in + +_ACEOF + +# Transform confdefs.h into two sed scripts, `conftest.defines' and +# `conftest.undefs', that substitutes the proper values into +# config.h.in to produce config.h. The first handles `#define' +# templates, and the second `#undef' templates. +# And first: Protect against being on the right side of a sed subst in +# config.status. Protect against being in an unquoted here document +# in config.status. +rm -f conftest.defines conftest.undefs +# Using a here document instead of a string reduces the quoting nightmare. +# Putting comments in sed scripts is not portable. +# +# `end' is used to avoid that the second main sed command (meant for +# 0-ary CPP macros) applies to n-ary macro definitions. +# See the Autoconf documentation for `clear'. +cat >confdef2sed.sed <<\_ACEOF +s/[\\&,]/\\&/g +s,[\\$`],\\&,g +t clear +: clear +s,^[ ]*#[ ]*define[ ][ ]*\([^ (][^ (]*\)\(([^)]*)\)[ ]*\(.*\)$,${ac_dA}\1${ac_dB}\1\2${ac_dC}\3${ac_dD},gp +t end +s,^[ ]*#[ ]*define[ ][ ]*\([^ ][^ ]*\)[ ]*\(.*\)$,${ac_dA}\1${ac_dB}\1${ac_dC}\2${ac_dD},gp +: end +_ACEOF +# If some macros were called several times there might be several times +# the same #defines, which is useless. Nevertheless, we may not want to +# sort them, since we want the *last* AC-DEFINE to be honored. +uniq confdefs.h | sed -n -f confdef2sed.sed >conftest.defines +sed 's/ac_d/ac_u/g' conftest.defines >conftest.undefs +rm -f confdef2sed.sed + +# This sed command replaces #undef with comments. This is necessary, for +# example, in the case of _POSIX_SOURCE, which is predefined and required +# on some systems where configure will not decide to define it. +cat >>conftest.undefs <<\_ACEOF +s,^[ ]*#[ ]*undef[ ][ ]*[a-zA-Z_][a-zA-Z_0-9]*,/* & */, +_ACEOF + +# Break up conftest.defines because some shells have a limit on the size +# of here documents, and old seds have small limits too (100 cmds). +echo ' # Handle all the #define templates only if necessary.' >>$CONFIG_STATUS +echo ' if grep "^[ ]*#[ ]*define" $tmp/in >/dev/null; then' >>$CONFIG_STATUS +echo ' # If there are no defines, we may have an empty if/fi' >>$CONFIG_STATUS +echo ' :' >>$CONFIG_STATUS +rm -f conftest.tail +while grep . conftest.defines >/dev/null +do + # Write a limited-size here document to $tmp/defines.sed. + echo ' cat >$tmp/defines.sed <>$CONFIG_STATUS + # Speed up: don't consider the non `#define' lines. + echo '/^[ ]*#[ ]*define/!b' >>$CONFIG_STATUS + # Work around the forget-to-reset-the-flag bug. + echo 't clr' >>$CONFIG_STATUS + echo ': clr' >>$CONFIG_STATUS + sed ${ac_max_here_lines}q conftest.defines >>$CONFIG_STATUS + echo 'CEOF + sed -f $tmp/defines.sed $tmp/in >$tmp/out + rm -f $tmp/in + mv $tmp/out $tmp/in +' >>$CONFIG_STATUS + sed 1,${ac_max_here_lines}d conftest.defines >conftest.tail + rm -f conftest.defines + mv conftest.tail conftest.defines +done +rm -f conftest.defines +echo ' fi # grep' >>$CONFIG_STATUS +echo >>$CONFIG_STATUS + +# Break up conftest.undefs because some shells have a limit on the size +# of here documents, and old seds have small limits too (100 cmds). +echo ' # Handle all the #undef templates' >>$CONFIG_STATUS +rm -f conftest.tail +while grep . conftest.undefs >/dev/null +do + # Write a limited-size here document to $tmp/undefs.sed. + echo ' cat >$tmp/undefs.sed <>$CONFIG_STATUS + # Speed up: don't consider the non `#undef' + echo '/^[ ]*#[ ]*undef/!b' >>$CONFIG_STATUS + # Work around the forget-to-reset-the-flag bug. + echo 't clr' >>$CONFIG_STATUS + echo ': clr' >>$CONFIG_STATUS + sed ${ac_max_here_lines}q conftest.undefs >>$CONFIG_STATUS + echo 'CEOF + sed -f $tmp/undefs.sed $tmp/in >$tmp/out + rm -f $tmp/in + mv $tmp/out $tmp/in +' >>$CONFIG_STATUS + sed 1,${ac_max_here_lines}d conftest.undefs >conftest.tail + rm -f conftest.undefs + mv conftest.tail conftest.undefs +done +rm -f conftest.undefs + +cat >>$CONFIG_STATUS <<\_ACEOF + # Let's still pretend it is `configure' which instantiates (i.e., don't + # use $as_me), people would be surprised to read: + # /* config.h. Generated by config.status. */ + if test x"$ac_file" = x-; then + echo "/* Generated by configure. */" >$tmp/config.h + else + echo "/* $ac_file. Generated by configure. */" >$tmp/config.h + fi + cat $tmp/in >>$tmp/config.h + rm -f $tmp/in + if test x"$ac_file" != x-; then + if diff $ac_file $tmp/config.h >/dev/null 2>&1; then + { echo "$as_me:$LINENO: $ac_file is unchanged" >&5 +echo "$as_me: $ac_file is unchanged" >&6;} + else + ac_dir=`(dirname "$ac_file") 2>/dev/null || +$as_expr X"$ac_file" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ + X"$ac_file" : 'X\(//\)[^/]' \| \ + X"$ac_file" : 'X\(//\)$' \| \ + X"$ac_file" : 'X\(/\)' \| \ + . : '\(.\)' 2>/dev/null || +echo X"$ac_file" | + sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ s//\1/; q; } + /^X\(\/\/\)[^/].*/{ s//\1/; q; } + /^X\(\/\/\)$/{ s//\1/; q; } + /^X\(\/\).*/{ s//\1/; q; } + s/.*/./; q'` + { if $as_mkdir_p; then + mkdir -p "$ac_dir" + else + as_dir="$ac_dir" + as_dirs= + while test ! -d "$as_dir"; do + as_dirs="$as_dir $as_dirs" + as_dir=`(dirname "$as_dir") 2>/dev/null || +$as_expr X"$as_dir" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ + X"$as_dir" : 'X\(//\)[^/]' \| \ + X"$as_dir" : 'X\(//\)$' \| \ + X"$as_dir" : 'X\(/\)' \| \ + . : '\(.\)' 2>/dev/null || +echo X"$as_dir" | + sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ s//\1/; q; } + /^X\(\/\/\)[^/].*/{ s//\1/; q; } + /^X\(\/\/\)$/{ s//\1/; q; } + /^X\(\/\).*/{ s//\1/; q; } + s/.*/./; q'` + done + test ! -n "$as_dirs" || mkdir $as_dirs + fi || { { echo "$as_me:$LINENO: error: cannot create directory \"$ac_dir\"" >&5 +echo "$as_me: error: cannot create directory \"$ac_dir\"" >&2;} + { (exit 1); exit 1; }; }; } + + rm -f $ac_file + mv $tmp/config.h $ac_file + fi + else + cat $tmp/config.h + rm -f $tmp/config.h + fi +# Compute $ac_file's index in $config_headers. +_am_stamp_count=1 +for _am_header in $config_headers :; do + case $_am_header in + $ac_file | $ac_file:* ) + break ;; + * ) + _am_stamp_count=`expr $_am_stamp_count + 1` ;; + esac +done +echo "timestamp for $ac_file" >`(dirname $ac_file) 2>/dev/null || +$as_expr X$ac_file : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ + X$ac_file : 'X\(//\)[^/]' \| \ + X$ac_file : 'X\(//\)$' \| \ + X$ac_file : 'X\(/\)' \| \ + . : '\(.\)' 2>/dev/null || +echo X$ac_file | + sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ s//\1/; q; } + /^X\(\/\/\)[^/].*/{ s//\1/; q; } + /^X\(\/\/\)$/{ s//\1/; q; } + /^X\(\/\).*/{ s//\1/; q; } + s/.*/./; q'`/stamp-h$_am_stamp_count +done +_ACEOF +cat >>$CONFIG_STATUS <<\_ACEOF + +# +# CONFIG_COMMANDS section. +# +for ac_file in : $CONFIG_COMMANDS; do test "x$ac_file" = x: && continue + ac_dest=`echo "$ac_file" | sed 's,:.*,,'` + ac_source=`echo "$ac_file" | sed 's,[^:]*:,,'` + ac_dir=`(dirname "$ac_dest") 2>/dev/null || +$as_expr X"$ac_dest" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ + X"$ac_dest" : 'X\(//\)[^/]' \| \ + X"$ac_dest" : 'X\(//\)$' \| \ + X"$ac_dest" : 'X\(/\)' \| \ + . : '\(.\)' 2>/dev/null || +echo X"$ac_dest" | + sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ s//\1/; q; } + /^X\(\/\/\)[^/].*/{ s//\1/; q; } + /^X\(\/\/\)$/{ s//\1/; q; } + /^X\(\/\).*/{ s//\1/; q; } + s/.*/./; q'` + { if $as_mkdir_p; then + mkdir -p "$ac_dir" + else + as_dir="$ac_dir" + as_dirs= + while test ! -d "$as_dir"; do + as_dirs="$as_dir $as_dirs" + as_dir=`(dirname "$as_dir") 2>/dev/null || +$as_expr X"$as_dir" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ + X"$as_dir" : 'X\(//\)[^/]' \| \ + X"$as_dir" : 'X\(//\)$' \| \ + X"$as_dir" : 'X\(/\)' \| \ + . : '\(.\)' 2>/dev/null || +echo X"$as_dir" | + sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ s//\1/; q; } + /^X\(\/\/\)[^/].*/{ s//\1/; q; } + /^X\(\/\/\)$/{ s//\1/; q; } + /^X\(\/\).*/{ s//\1/; q; } + s/.*/./; q'` + done + test ! -n "$as_dirs" || mkdir $as_dirs + fi || { { echo "$as_me:$LINENO: error: cannot create directory \"$ac_dir\"" >&5 +echo "$as_me: error: cannot create directory \"$ac_dir\"" >&2;} + { (exit 1); exit 1; }; }; } + + ac_builddir=. + +if test "$ac_dir" != .; then + ac_dir_suffix=/`echo "$ac_dir" | sed 's,^\.[\\/],,'` + # A "../" for each directory in $ac_dir_suffix. + ac_top_builddir=`echo "$ac_dir_suffix" | sed 's,/[^\\/]*,../,g'` +else + ac_dir_suffix= ac_top_builddir= +fi + +case $srcdir in + .) # No --srcdir option. We are building in place. + ac_srcdir=. + if test -z "$ac_top_builddir"; then + ac_top_srcdir=. + else + ac_top_srcdir=`echo $ac_top_builddir | sed 's,/$,,'` + fi ;; + [\\/]* | ?:[\\/]* ) # Absolute path. + ac_srcdir=$srcdir$ac_dir_suffix; + ac_top_srcdir=$srcdir ;; + *) # Relative path. + ac_srcdir=$ac_top_builddir$srcdir$ac_dir_suffix + ac_top_srcdir=$ac_top_builddir$srcdir ;; +esac + +# Do not use `cd foo && pwd` to compute absolute paths, because +# the directories may not exist. +case `pwd` in +.) ac_abs_builddir="$ac_dir";; +*) + case "$ac_dir" in + .) ac_abs_builddir=`pwd`;; + [\\/]* | ?:[\\/]* ) ac_abs_builddir="$ac_dir";; + *) ac_abs_builddir=`pwd`/"$ac_dir";; + esac;; +esac +case $ac_abs_builddir in +.) ac_abs_top_builddir=${ac_top_builddir}.;; +*) + case ${ac_top_builddir}. in + .) ac_abs_top_builddir=$ac_abs_builddir;; + [\\/]* | ?:[\\/]* ) ac_abs_top_builddir=${ac_top_builddir}.;; + *) ac_abs_top_builddir=$ac_abs_builddir/${ac_top_builddir}.;; + esac;; +esac +case $ac_abs_builddir in +.) ac_abs_srcdir=$ac_srcdir;; +*) + case $ac_srcdir in + .) ac_abs_srcdir=$ac_abs_builddir;; + [\\/]* | ?:[\\/]* ) ac_abs_srcdir=$ac_srcdir;; + *) ac_abs_srcdir=$ac_abs_builddir/$ac_srcdir;; + esac;; +esac +case $ac_abs_builddir in +.) ac_abs_top_srcdir=$ac_top_srcdir;; +*) + case $ac_top_srcdir in + .) ac_abs_top_srcdir=$ac_abs_builddir;; + [\\/]* | ?:[\\/]* ) ac_abs_top_srcdir=$ac_top_srcdir;; + *) ac_abs_top_srcdir=$ac_abs_builddir/$ac_top_srcdir;; + esac;; +esac + + + { echo "$as_me:$LINENO: executing $ac_dest commands" >&5 +echo "$as_me: executing $ac_dest commands" >&6;} + case $ac_dest in + depfiles ) test x"$AMDEP_TRUE" != x"" || for mf in $CONFIG_FILES; do + # Strip MF so we end up with the name of the file. + mf=`echo "$mf" | sed -e 's/:.*$//'` + # Check whether this is an Automake generated Makefile or not. + # We used to match only the files named `Makefile.in', but + # some people rename them; so instead we look at the file content. + # Grep'ing the first line is not enough: some people post-process + # each Makefile.in and add a new line on top of each file to say so. + # So let's grep whole file. + if grep '^#.*generated by automake' $mf > /dev/null 2>&1; then + dirpart=`(dirname "$mf") 2>/dev/null || +$as_expr X"$mf" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ + X"$mf" : 'X\(//\)[^/]' \| \ + X"$mf" : 'X\(//\)$' \| \ + X"$mf" : 'X\(/\)' \| \ + . : '\(.\)' 2>/dev/null || +echo X"$mf" | + sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ s//\1/; q; } + /^X\(\/\/\)[^/].*/{ s//\1/; q; } + /^X\(\/\/\)$/{ s//\1/; q; } + /^X\(\/\).*/{ s//\1/; q; } + s/.*/./; q'` + else + continue + fi + # Extract the definition of DEPDIR, am__include, and am__quote + # from the Makefile without running `make'. + DEPDIR=`sed -n 's/^DEPDIR = //p' < "$mf"` + test -z "$DEPDIR" && continue + am__include=`sed -n 's/^am__include = //p' < "$mf"` + test -z "am__include" && continue + am__quote=`sed -n 's/^am__quote = //p' < "$mf"` + # When using ansi2knr, U may be empty or an underscore; expand it + U=`sed -n 's/^U = //p' < "$mf"` + # Find all dependency output files, they are included files with + # $(DEPDIR) in their names. We invoke sed twice because it is the + # simplest approach to changing $(DEPDIR) to its actual value in the + # expansion. + for file in `sed -n " + s/^$am__include $am__quote\(.*(DEPDIR).*\)$am__quote"'$/\1/p' <"$mf" | \ + sed -e 's/\$(DEPDIR)/'"$DEPDIR"'/g' -e 's/\$U/'"$U"'/g'`; do + # Make sure the directory exists. + test -f "$dirpart/$file" && continue + fdir=`(dirname "$file") 2>/dev/null || +$as_expr X"$file" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ + X"$file" : 'X\(//\)[^/]' \| \ + X"$file" : 'X\(//\)$' \| \ + X"$file" : 'X\(/\)' \| \ + . : '\(.\)' 2>/dev/null || +echo X"$file" | + sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ s//\1/; q; } + /^X\(\/\/\)[^/].*/{ s//\1/; q; } + /^X\(\/\/\)$/{ s//\1/; q; } + /^X\(\/\).*/{ s//\1/; q; } + s/.*/./; q'` + { if $as_mkdir_p; then + mkdir -p $dirpart/$fdir + else + as_dir=$dirpart/$fdir + as_dirs= + while test ! -d "$as_dir"; do + as_dirs="$as_dir $as_dirs" + as_dir=`(dirname "$as_dir") 2>/dev/null || +$as_expr X"$as_dir" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ + X"$as_dir" : 'X\(//\)[^/]' \| \ + X"$as_dir" : 'X\(//\)$' \| \ + X"$as_dir" : 'X\(/\)' \| \ + . : '\(.\)' 2>/dev/null || +echo X"$as_dir" | + sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ s//\1/; q; } + /^X\(\/\/\)[^/].*/{ s//\1/; q; } + /^X\(\/\/\)$/{ s//\1/; q; } + /^X\(\/\).*/{ s//\1/; q; } + s/.*/./; q'` + done + test ! -n "$as_dirs" || mkdir $as_dirs + fi || { { echo "$as_me:$LINENO: error: cannot create directory $dirpart/$fdir" >&5 +echo "$as_me: error: cannot create directory $dirpart/$fdir" >&2;} + { (exit 1); exit 1; }; }; } + + # echo "creating $dirpart/$file" + echo '# dummy' > "$dirpart/$file" + done +done + ;; + esac +done +_ACEOF + +cat >>$CONFIG_STATUS <<\_ACEOF + +{ (exit 0); exit 0; } +_ACEOF +chmod +x $CONFIG_STATUS +ac_clean_files=$ac_clean_files_save + + +# configure is writing to config.log, and then calls config.status. +# config.status does its own redirection, appending to config.log. +# Unfortunately, on DOS this fails, as config.log is still kept open +# by configure, so config.status won't be able to write to it; its +# output is simply discarded. So we exec the FD to /dev/null, +# effectively closing config.log, so it can be properly (re)opened and +# appended to by config.status. When coming back to configure, we +# need to make the FD available again. +if test "$no_create" != yes; then + ac_cs_success=: + ac_config_status_args= + test "$silent" = yes && + ac_config_status_args="$ac_config_status_args --quiet" + exec 5>/dev/null + $SHELL $CONFIG_STATUS $ac_config_status_args || ac_cs_success=false + exec 5>>config.log + # Use ||, not &&, to avoid exiting from the if with $? = 1, which + # would make configure fail if this is the last instruction. + $ac_cs_success || { (exit 1); exit 1; } +fi + + +# +#vim: sw=2: ts=2: noet +# diff --git a/src/native/configure.ac b/src/native/configure.ac new file mode 100644 index 0000000..0cf70f7 --- /dev/null +++ b/src/native/configure.ac @@ -0,0 +1,107 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# +# configure.ac for hadoop native code. +# + +# Notes: +# 1. This configure.ac depends on the following environment variables to function correctly: +# * HADOOP_NATIVE_SRCDIR +# * JAVA_HOME +# * JVM_DATA_MODEL +# * OS_NAME +# * OS_ARCH +# All these are setup by build.xml. + +# -*- Autoconf -*- +# Process this file with autoconf to produce a configure script. +# + +AC_PREREQ(2.59) +AC_INIT(src/org_apache_hadoop.h) +AC_CONFIG_SRCDIR([src/org_apache_hadoop.h]) +AC_CONFIG_AUX_DIR(config) +AC_CONFIG_HEADER([config.h]) + +AM_INIT_AUTOMAKE(hadoop,1.0.0) + +# Checks for programs. +AC_PROG_CC +AC_PROG_LIBTOOL + +# Checks for libraries. +dnl Check for '-ldl' +AC_CHECK_LIB([dl], [dlopen]) + +dnl Check for '-ljvm' +JNI_LDFLAGS="" +if test $JAVA_HOME != "" +then + JNI_LDFLAGS="-L$JAVA_HOME/jre/lib/$OS_ARCH/server" +fi +ldflags_bak=$LDFLAGS +LDFLAGS="$LDFLAGS $JNI_LDFLAGS" +AC_CHECK_LIB([jvm], [JNI_GetCreatedJavaVMs]) +LDFLAGS=$ldflags_bak +AC_SUBST([JNI_LDFLAGS]) + +# Checks for header files. +dnl Check for Ansi C headers +AC_HEADER_STDC + +dnl Check for other standard C headers +AC_CHECK_HEADERS([stdio.h stddef.h], [], AC_MSG_ERROR(Some system headers not found... please ensure their presence on your platform.)) + +dnl Check for JNI headers +JNI_CPPFLAGS="" +if test $JAVA_HOME != "" +then + for dir in `find $JAVA_HOME/include -follow -type d` + do + JNI_CPPFLAGS="$JNI_CPPFLAGS -I$dir" + done +fi +cppflags_bak=$CPPFLAGS +CPPFLAGS="$CPPFLAGS $JNI_CPPFLAGS" +AC_CHECK_HEADERS([jni.h], [], AC_MSG_ERROR([Native java headers not found. Is \$JAVA_HOME set correctly?])) +CPPFLAGS=$cppflags_bak +AC_SUBST([JNI_CPPFLAGS]) + +dnl Check for zlib headers +AC_CHECK_HEADERS([zlib.h zconf.h], AC_COMPUTE_NEEDED_DSO(z,HADOOP_ZLIB_LIBRARY), AC_MSG_ERROR(Zlib headers were not found... native-hadoop library needs zlib to build. Please install the requisite zlib development package.)) + +dnl Check for lzma headers +AC_CHECK_HEADERS([lzma/lzma.h], AC_COMPUTE_NEEDED_DSO(lzma,HADOOP_LZMA_LIBRARY), AC_MSG_ERROR(lzma headers were not found... native-hadoop library needs lzma to build. Please install the requisite lzma-4.999.5alpha development package. +)) + +# Checks for typedefs, structures, and compiler characteristics. +AC_C_CONST + +# Checks for library functions. +AC_CHECK_FUNCS([memset]) + +AC_CONFIG_FILES([Makefile + src/org/apache/hadoop/io/compress/zlib/Makefile + src/org/apache/hadoop/io/compress/lzma/Makefile + lib/Makefile]) +AC_OUTPUT + +# +#vim: sw=2: ts=2: noet +# diff --git a/src/native/lib/Makefile.am b/src/native/lib/Makefile.am new file mode 100644 index 0000000..5d51cc5 --- /dev/null +++ b/src/native/lib/Makefile.am @@ -0,0 +1,44 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# +# Makefile template for building libhadoop.so +# + +# +# Notes: +# 1. This makefile is designed to do the actual builds in $(HADOOP_HOME)/build/native/${os.name}-${os.arch}/lib +# 2. This makefile depends on the following environment variables to function correctly: +# * HADOOP_NATIVE_SRCDIR +# * JAVA_HOME +# * OS_ARCH +# All these are setup by build.xml and/or the top-level makefile. +# + +# Add .lo files in $(SUBDIRS) to construct libhadoop.so +HADOOP_OBJS = $(foreach path,$(addprefix ../,$(SUBDIRS)),$(wildcard $(path)/*.lo)) +AM_LDFLAGS = @JNI_LDFLAGS@ -m$(JVM_DATA_MODEL) + +lib_LTLIBRARIES = libhadoop.la +libhadoop_la_SOURCES = +libhadoop_la_LDFLAGS = -version-info 1:0:0 +libhadoop_la_LIBADD = $(HADOOP_OBJS) -ldl -ljvm + +# +#vim: sw=4: ts=4: noet +# diff --git a/src/native/lib/Makefile.in b/src/native/lib/Makefile.in new file mode 100644 index 0000000..c43e1fb --- /dev/null +++ b/src/native/lib/Makefile.in @@ -0,0 +1,423 @@ +# Makefile.in generated by automake 1.9.6 from Makefile.am. +# @configure_input@ + +# Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, +# 2003, 2004, 2005 Free Software Foundation, Inc. +# This Makefile.in is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY, to the extent permitted by law; without +# even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. + +@SET_MAKE@ + +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# +# Makefile template for building libhadoop.so +# + +# +# Notes: +# 1. This makefile is designed to do the actual builds in $(HADOOP_HOME)/build/native/${os.name}-${os.arch}/lib +# 2. This makefile depends on the following environment variables to function correctly: +# * HADOOP_NATIVE_SRCDIR +# * JAVA_HOME +# * OS_ARCH +# All these are setup by build.xml and/or the top-level makefile. +# + +srcdir = @srcdir@ +top_srcdir = @top_srcdir@ +VPATH = @srcdir@ +pkgdatadir = $(datadir)/@PACKAGE@ +pkglibdir = $(libdir)/@PACKAGE@ +pkgincludedir = $(includedir)/@PACKAGE@ +top_builddir = .. +am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd +INSTALL = @INSTALL@ +install_sh_DATA = $(install_sh) -c -m 644 +install_sh_PROGRAM = $(install_sh) -c +install_sh_SCRIPT = $(install_sh) -c +INSTALL_HEADER = $(INSTALL_DATA) +transform = $(program_transform_name) +NORMAL_INSTALL = : +PRE_INSTALL = : +POST_INSTALL = : +NORMAL_UNINSTALL = : +PRE_UNINSTALL = : +POST_UNINSTALL = : +build_triplet = @build@ +host_triplet = @host@ +subdir = lib +DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in +ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 +am__aclocal_m4_deps = $(top_srcdir)/acinclude.m4 \ + $(top_srcdir)/configure.ac +am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ + $(ACLOCAL_M4) +mkinstalldirs = $(install_sh) -d +CONFIG_HEADER = $(top_builddir)/config.h +CONFIG_CLEAN_FILES = +am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; +am__vpath_adj = case $$p in \ + $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ + *) f=$$p;; \ + esac; +am__strip_dir = `echo $$p | sed -e 's|^.*/||'`; +am__installdirs = "$(DESTDIR)$(libdir)" +libLTLIBRARIES_INSTALL = $(INSTALL) +LTLIBRARIES = $(lib_LTLIBRARIES) +am__DEPENDENCIES_1 = $(foreach path,$(addprefix \ + ../,$(SUBDIRS)),$(wildcard $(path)/*.lo)) +libhadoop_la_DEPENDENCIES = $(am__DEPENDENCIES_1) +am_libhadoop_la_OBJECTS = +libhadoop_la_OBJECTS = $(am_libhadoop_la_OBJECTS) +DEFAULT_INCLUDES = -I. -I$(srcdir) -I$(top_builddir) +COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ + $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) +LTCOMPILE = $(LIBTOOL) --tag=CC --mode=compile $(CC) $(DEFS) \ + $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) \ + $(AM_CFLAGS) $(CFLAGS) +CCLD = $(CC) +LINK = $(LIBTOOL) --tag=CC --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) \ + $(AM_LDFLAGS) $(LDFLAGS) -o $@ +SOURCES = $(libhadoop_la_SOURCES) +DIST_SOURCES = $(libhadoop_la_SOURCES) +DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) +ACLOCAL = @ACLOCAL@ +AMDEP_FALSE = @AMDEP_FALSE@ +AMDEP_TRUE = @AMDEP_TRUE@ +AMTAR = @AMTAR@ +AR = @AR@ +AUTOCONF = @AUTOCONF@ +AUTOHEADER = @AUTOHEADER@ +AUTOMAKE = @AUTOMAKE@ +AWK = @AWK@ +CC = @CC@ +CCDEPMODE = @CCDEPMODE@ +CFLAGS = @CFLAGS@ +CPP = @CPP@ +CPPFLAGS = @CPPFLAGS@ +CXX = @CXX@ +CXXCPP = @CXXCPP@ +CXXDEPMODE = @CXXDEPMODE@ +CXXFLAGS = @CXXFLAGS@ +CYGPATH_W = @CYGPATH_W@ +DEFS = @DEFS@ +DEPDIR = @DEPDIR@ +ECHO = @ECHO@ +ECHO_C = @ECHO_C@ +ECHO_N = @ECHO_N@ +ECHO_T = @ECHO_T@ +EGREP = @EGREP@ +EXEEXT = @EXEEXT@ +F77 = @F77@ +FFLAGS = @FFLAGS@ +INSTALL_DATA = @INSTALL_DATA@ +INSTALL_PROGRAM = @INSTALL_PROGRAM@ +INSTALL_SCRIPT = @INSTALL_SCRIPT@ +INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ +JNI_CPPFLAGS = @JNI_CPPFLAGS@ +JNI_LDFLAGS = @JNI_LDFLAGS@ +LDFLAGS = @LDFLAGS@ +LIBOBJS = @LIBOBJS@ +LIBS = @LIBS@ +LIBTOOL = @LIBTOOL@ +LN_S = @LN_S@ +LTLIBOBJS = @LTLIBOBJS@ +MAKEINFO = @MAKEINFO@ +OBJEXT = @OBJEXT@ +PACKAGE = @PACKAGE@ +PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ +PACKAGE_NAME = @PACKAGE_NAME@ +PACKAGE_STRING = @PACKAGE_STRING@ +PACKAGE_TARNAME = @PACKAGE_TARNAME@ +PACKAGE_VERSION = @PACKAGE_VERSION@ +PATH_SEPARATOR = @PATH_SEPARATOR@ +RANLIB = @RANLIB@ +SED = @SED@ +SET_MAKE = @SET_MAKE@ +SHELL = @SHELL@ +STRIP = @STRIP@ +VERSION = @VERSION@ +ac_ct_AR = @ac_ct_AR@ +ac_ct_CC = @ac_ct_CC@ +ac_ct_CXX = @ac_ct_CXX@ +ac_ct_F77 = @ac_ct_F77@ +ac_ct_RANLIB = @ac_ct_RANLIB@ +ac_ct_STRIP = @ac_ct_STRIP@ +am__fastdepCC_FALSE = @am__fastdepCC_FALSE@ +am__fastdepCC_TRUE = @am__fastdepCC_TRUE@ +am__fastdepCXX_FALSE = @am__fastdepCXX_FALSE@ +am__fastdepCXX_TRUE = @am__fastdepCXX_TRUE@ +am__include = @am__include@ +am__leading_dot = @am__leading_dot@ +am__quote = @am__quote@ +am__tar = @am__tar@ +am__untar = @am__untar@ +bindir = @bindir@ +build = @build@ +build_alias = @build_alias@ +build_cpu = @build_cpu@ +build_os = @build_os@ +build_vendor = @build_vendor@ +datadir = @datadir@ +exec_prefix = @exec_prefix@ +host = @host@ +host_alias = @host_alias@ +host_cpu = @host_cpu@ +host_os = @host_os@ +host_vendor = @host_vendor@ +includedir = @includedir@ +infodir = @infodir@ +install_sh = @install_sh@ +libdir = @libdir@ +libexecdir = @libexecdir@ +localstatedir = @localstatedir@ +mandir = @mandir@ +mkdir_p = @mkdir_p@ +oldincludedir = @oldincludedir@ +prefix = @prefix@ +program_transform_name = @program_transform_name@ +sbindir = @sbindir@ +sharedstatedir = @sharedstatedir@ +sysconfdir = @sysconfdir@ +target_alias = @target_alias@ + +# Add .lo files in $(SUBDIRS) to construct libhadoop.so +HADOOP_OBJS = $(foreach path,$(addprefix ../,$(SUBDIRS)),$(wildcard $(path)/*.lo)) +AM_LDFLAGS = @JNI_LDFLAGS@ -m$(JVM_DATA_MODEL) +lib_LTLIBRARIES = libhadoop.la +libhadoop_la_SOURCES = +libhadoop_la_LDFLAGS = -version-info 1:0:0 +libhadoop_la_LIBADD = $(HADOOP_OBJS) -ldl -ljvm +all: all-am + +.SUFFIXES: +$(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) + @for dep in $?; do \ + case '$(am__configure_deps)' in \ + *$$dep*) \ + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh \ + && exit 0; \ + exit 1;; \ + esac; \ + done; \ + echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu lib/Makefile'; \ + cd $(top_srcdir) && \ + $(AUTOMAKE) --gnu lib/Makefile +.PRECIOUS: Makefile +Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status + @case '$?' in \ + *config.status*) \ + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ + *) \ + echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ + cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ + esac; + +$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh + +$(top_srcdir)/configure: $(am__configure_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(ACLOCAL_M4): $(am__aclocal_m4_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +install-libLTLIBRARIES: $(lib_LTLIBRARIES) + @$(NORMAL_INSTALL) + test -z "$(libdir)" || $(mkdir_p) "$(DESTDIR)$(libdir)" + @list='$(lib_LTLIBRARIES)'; for p in $$list; do \ + if test -f $$p; then \ + f=$(am__strip_dir) \ + echo " $(LIBTOOL) --mode=install $(libLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) '$$p' '$(DESTDIR)$(libdir)/$$f'"; \ + $(LIBTOOL) --mode=install $(libLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) "$$p" "$(DESTDIR)$(libdir)/$$f"; \ + else :; fi; \ + done + +uninstall-libLTLIBRARIES: + @$(NORMAL_UNINSTALL) + @set -x; list='$(lib_LTLIBRARIES)'; for p in $$list; do \ + p=$(am__strip_dir) \ + echo " $(LIBTOOL) --mode=uninstall rm -f '$(DESTDIR)$(libdir)/$$p'"; \ + $(LIBTOOL) --mode=uninstall rm -f "$(DESTDIR)$(libdir)/$$p"; \ + done + +clean-libLTLIBRARIES: + -test -z "$(lib_LTLIBRARIES)" || rm -f $(lib_LTLIBRARIES) + @list='$(lib_LTLIBRARIES)'; for p in $$list; do \ + dir="`echo $$p | sed -e 's|/[^/]*$$||'`"; \ + test "$$dir" != "$$p" || dir=.; \ + echo "rm -f \"$${dir}/so_locations\""; \ + rm -f "$${dir}/so_locations"; \ + done +libhadoop.la: $(libhadoop_la_OBJECTS) $(libhadoop_la_DEPENDENCIES) + $(LINK) -rpath $(libdir) $(libhadoop_la_LDFLAGS) $(libhadoop_la_OBJECTS) $(libhadoop_la_LIBADD) $(LIBS) + +mostlyclean-compile: + -rm -f *.$(OBJEXT) + +distclean-compile: + -rm -f *.tab.c + +mostlyclean-libtool: + -rm -f *.lo + +clean-libtool: + -rm -rf .libs _libs + +distclean-libtool: + -rm -f libtool +uninstall-info-am: +tags: TAGS +TAGS: + +ctags: CTAGS +CTAGS: + + +distdir: $(DISTFILES) + @srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; \ + topsrcdirstrip=`echo "$(top_srcdir)" | sed 's|.|.|g'`; \ + list='$(DISTFILES)'; for file in $$list; do \ + case $$file in \ + $(srcdir)/*) file=`echo "$$file" | sed "s|^$$srcdirstrip/||"`;; \ + $(top_srcdir)/*) file=`echo "$$file" | sed "s|^$$topsrcdirstrip/|$(top_builddir)/|"`;; \ + esac; \ + if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ + dir=`echo "$$file" | sed -e 's,/[^/]*$$,,'`; \ + if test "$$dir" != "$$file" && test "$$dir" != "."; then \ + dir="/$$dir"; \ + $(mkdir_p) "$(distdir)$$dir"; \ + else \ + dir=''; \ + fi; \ + if test -d $$d/$$file; then \ + if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ + cp -pR $(srcdir)/$$file $(distdir)$$dir || exit 1; \ + fi; \ + cp -pR $$d/$$file $(distdir)$$dir || exit 1; \ + else \ + test -f $(distdir)/$$file \ + || cp -p $$d/$$file $(distdir)/$$file \ + || exit 1; \ + fi; \ + done +check-am: all-am +check: check-am +all-am: Makefile $(LTLIBRARIES) +installdirs: + for dir in "$(DESTDIR)$(libdir)"; do \ + test -z "$$dir" || $(mkdir_p) "$$dir"; \ + done +install: install-am +install-exec: install-exec-am +install-data: install-data-am +uninstall: uninstall-am + +install-am: all-am + @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am + +installcheck: installcheck-am +install-strip: + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + `test -z '$(STRIP)' || \ + echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install +mostlyclean-generic: + +clean-generic: + +distclean-generic: + -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) + +maintainer-clean-generic: + @echo "This command is intended for maintainers to use" + @echo "it deletes files that may require special tools to rebuild." +clean: clean-am + +clean-am: clean-generic clean-libLTLIBRARIES clean-libtool \ + mostlyclean-am + +distclean: distclean-am + -rm -f Makefile +distclean-am: clean-am distclean-compile distclean-generic \ + distclean-libtool + +dvi: dvi-am + +dvi-am: + +html: html-am + +info: info-am + +info-am: + +install-data-am: + +install-exec-am: install-libLTLIBRARIES + +install-info: install-info-am + +install-man: + +installcheck-am: + +maintainer-clean: maintainer-clean-am + -rm -f Makefile +maintainer-clean-am: distclean-am maintainer-clean-generic + +mostlyclean: mostlyclean-am + +mostlyclean-am: mostlyclean-compile mostlyclean-generic \ + mostlyclean-libtool + +pdf: pdf-am + +pdf-am: + +ps: ps-am + +ps-am: + +uninstall-am: uninstall-info-am uninstall-libLTLIBRARIES + +.PHONY: all all-am check check-am clean clean-generic \ + clean-libLTLIBRARIES clean-libtool distclean distclean-compile \ + distclean-generic distclean-libtool distdir dvi dvi-am html \ + html-am info info-am install install-am install-data \ + install-data-am install-exec install-exec-am install-info \ + install-info-am install-libLTLIBRARIES install-man \ + install-strip installcheck installcheck-am installdirs \ + maintainer-clean maintainer-clean-generic mostlyclean \ + mostlyclean-compile mostlyclean-generic mostlyclean-libtool \ + pdf pdf-am ps ps-am uninstall uninstall-am uninstall-info-am \ + uninstall-libLTLIBRARIES + + +# +#vim: sw=4: ts=4: noet +# +# Tell versions [3.59,3.63) of GNU make to not export all variables. +# Otherwise a system limit (for SysV at least) may be exceeded. +.NOEXPORT: diff --git a/src/native/packageNativeHadoop.sh b/src/native/packageNativeHadoop.sh new file mode 100755 index 0000000..6088944 --- /dev/null +++ b/src/native/packageNativeHadoop.sh @@ -0,0 +1,67 @@ +#!/bin/sh + +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# packageNativeHadoop.sh - A simple script to help package native-hadoop libraries + +# +# Note: +# This script relies on the following environment variables to function correctly: +# * BASE_NATIVE_LIB_DIR +# * BUILD_NATIVE_DIR +# * DIST_LIB_DIR +# All these are setup by build.xml. +# + +TAR='tar cf -' +UNTAR='tar xfBp -' + +# Copy the pre-built libraries in $BASE_NATIVE_LIB_DIR +if [ -d $BASE_NATIVE_LIB_DIR ] +then + for platform in `ls $BASE_NATIVE_LIB_DIR` + do + if [ ! -d $DIST_LIB_DIR/$platform ] + then + mkdir -p $DIST_LIB_DIR/$platform + echo "Created $DIST_LIB_DIR/$platform" + fi + echo "Copying libraries in $BASE_NATIVE_LIB_DIR/$platform to $DIST_LIB_DIR/$platform/" + cd $BASE_NATIVE_LIB_DIR/$platform/ + $TAR *hadoop* | (cd $DIST_LIB_DIR/$platform/; $UNTAR) + done +fi + +# Copy the custom-built libraries in $BUILD_DIR +if [ -d $BUILD_NATIVE_DIR ] +then + for platform in `ls $BUILD_NATIVE_DIR` + do + if [ ! -d $DIST_LIB_DIR/$platform ] + then + mkdir -p $DIST_LIB_DIR/$platform + echo "Created $DIST_LIB_DIR/$platform" + fi + echo "Copying libraries in $BUILD_NATIVE_DIR/$platform/lib to $DIST_LIB_DIR/$platform/" + cd $BUILD_NATIVE_DIR/$platform/lib + $TAR *hadoop* | (cd $DIST_LIB_DIR/$platform/; $UNTAR) + $TAR *liblzma* | (cd $DIST_LIB_DIR/$platform/; $UNTAR) + mv $DIST_LIB_DIR/$platform/liblzma.so $DIST_LIB_DIR/$platform/liblzma.so.0 + done +fi + +#vim: ts=2: sw=2: et diff --git a/src/native/src/org/apache/hadoop/io/compress/lzma/LzmaCompressor.c b/src/native/src/org/apache/hadoop/io/compress/lzma/LzmaCompressor.c new file mode 100644 index 0000000..7fb9faa --- /dev/null +++ b/src/native/src/org/apache/hadoop/io/compress/lzma/LzmaCompressor.c @@ -0,0 +1,238 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#if defined HAVE_CONFIG_H + #include +#endif + +#if defined HAVE_STDIO_H + #include +#else + #error 'stdio.h not found' +#endif + +#if defined HAVE_STDLIB_H + #include +#else + #error 'stdlib.h not found' +#endif + +#include "org_apache_hadoop_io_compress_lzma.h" + +// The lzma library-handle +static void *liblzma = NULL; + +static jfieldID LzmaCompressor_clazz; +static jfieldID LzmaCompressor_stream; +static jfieldID LzmaCompressor_uncompressedDirectBuf; +static jfieldID LzmaCompressor_uncompressedDirectBufOff; +static jfieldID LzmaCompressor_uncompressedDirectBufLen; +static jfieldID LzmaCompressor_compressedDirectBuf; +static jfieldID LzmaCompressor_directBufferSize; +static jfieldID LzmaCompressor_finish; +static jfieldID LzmaCompressor_finished; + +static int (*dlsym_lzma_easy_encoder)(lzma_stream *strm, lzma_easy_level level); +static int (*dlsym_lzma_code)(lzma_stream *strm, lzma_action action); +static int (*dlsym_lzma_end)(lzma_stream *strm); +static int (*dlsym_lzma_auto_decoder)(lzma_stream *strm, uint64_t memlimit, uint32_t flags); + +JNIEXPORT void JNICALL +Java_org_apache_hadoop_io_compress_lzma_LzmaCompressor_initIDs( + JNIEnv *env, jclass class + ) { + // Load liblzma.so + liblzma = dlopen(HADOOP_LZMA_LIBRARY, RTLD_LAZY | RTLD_GLOBAL); + if (!liblzma) { + THROW(env, "java/lang/UnsatisfiedLinkError", "Cannot load liblzma.so!"); + return; + } + + // Locate the requisite symbols from liblzma.so + dlerror(); // Clear any existing error + LOAD_DYNAMIC_SYMBOL(dlsym_lzma_easy_encoder, env, liblzma, "lzma_easy_encoder"); + LOAD_DYNAMIC_SYMBOL(dlsym_lzma_code, env, liblzma, "lzma_code"); + LOAD_DYNAMIC_SYMBOL(dlsym_lzma_end, env, liblzma, "lzma_end"); + LOAD_DYNAMIC_SYMBOL(dlsym_lzma_auto_decoder, env, liblzma, "lzma_auto_decoder"); + + // Initialize the requisite fieldIds + LzmaCompressor_clazz = (*env)->GetStaticFieldID(env, class, "clazz", + "Ljava/lang/Class;"); + LzmaCompressor_stream = (*env)->GetFieldID(env, class, "stream", "J"); + LzmaCompressor_finish = (*env)->GetFieldID(env, class, "finish", "Z"); + LzmaCompressor_finished = (*env)->GetFieldID(env, class, "finished", "Z"); + LzmaCompressor_uncompressedDirectBuf = (*env)->GetFieldID(env, class, + "uncompressedDirectBuf", + "Ljava/nio/Buffer;"); + LzmaCompressor_uncompressedDirectBufOff = (*env)->GetFieldID(env, class, + "uncompressedDirectBufOff", "I"); + LzmaCompressor_uncompressedDirectBufLen = (*env)->GetFieldID(env, class, + "uncompressedDirectBufLen", "I"); + LzmaCompressor_compressedDirectBuf = (*env)->GetFieldID(env, class, + "compressedDirectBuf", + "Ljava/nio/Buffer;"); + LzmaCompressor_directBufferSize = (*env)->GetFieldID(env, class, + "directBufferSize", "I"); + +} + +JNIEXPORT jlong JNICALL +Java_org_apache_hadoop_io_compress_lzma_LzmaCompressor_init( + JNIEnv *env, jclass class, jint level + ) { + //Create a lzma_stream + lzma_stream *stream = NULL; + + stream = malloc(sizeof(lzma_stream)); + if (!stream) { + THROW(env, "java/lang/OutOfMemoryError", NULL); + return (jlong)0; + } + + // Initialize stream + lzma_stream tmp = (lzma_stream)LZMA_STREAM_INIT; + *stream = tmp; + + lzma_ret ret = (*dlsym_lzma_easy_encoder)(stream, level); + + if (ret != LZMA_OK) { + // Contingency - Report error by throwing appropriate exceptions + free(stream); + stream = NULL; + THROW(env, "java/lang/InternalError", NULL); + } + + return (jlong)(stream); +} + +JNIEXPORT jint JNICALL +Java_org_apache_hadoop_io_compress_lzma_LzmaCompressor_compressBytesDirect( + JNIEnv *env, jobject this + ) { + // Get members of LzmaCompressor + lzma_stream *stream = (lzma_stream*)( + (*env)->GetLongField(env, this, + LzmaCompressor_stream) + ); + if (!stream) { + THROW(env, "java/lang/NullPointerException", NULL); + return (jint)0; + } + + // Get members of LzmaCompressor + jobject clazz = (*env)->GetStaticObjectField(env, this, + LzmaCompressor_clazz); + jobject uncompressed_direct_buf = (*env)->GetObjectField(env, this, + LzmaCompressor_uncompressedDirectBuf); + jint uncompressed_direct_buf_off = (*env)->GetIntField(env, this, + LzmaCompressor_uncompressedDirectBufOff); + jint uncompressed_direct_buf_len = (*env)->GetIntField(env, this, + LzmaCompressor_uncompressedDirectBufLen); + + jobject compressed_direct_buf = (*env)->GetObjectField(env, this, + LzmaCompressor_compressedDirectBuf); + jint compressed_direct_buf_len = (*env)->GetIntField(env, this, + LzmaCompressor_directBufferSize); + + jboolean finish = (*env)->GetBooleanField(env, this, LzmaCompressor_finish); + + // Get the input direct buffer + LOCK_CLASS(env, clazz, "LzmaCompressor"); + uint8_t* uncompressed_bytes = (*env)->GetDirectBufferAddress(env, + uncompressed_direct_buf); + UNLOCK_CLASS(env, clazz, "LzmaCompressor"); + + if (uncompressed_bytes == 0) { + return (jint)0; + } + + // Get the output direct buffer + LOCK_CLASS(env, clazz, "LzmaCompressor"); + uint8_t* compressed_bytes = (*env)->GetDirectBufferAddress(env, + compressed_direct_buf); + UNLOCK_CLASS(env, clazz, "LzmaCompressor"); + + if (compressed_bytes == 0) { + return (jint)0; + } + + // Re-calibrate the lzma_stream + stream->next_in = uncompressed_bytes + uncompressed_direct_buf_off; + stream->next_out = compressed_bytes; + stream->avail_in = uncompressed_direct_buf_len; + stream->avail_out = compressed_direct_buf_len; + + // Compress + lzma_ret ret = dlsym_lzma_code(stream, finish ? LZMA_FINISH : LZMA_RUN); + + jint no_compressed_bytes = 0; + + switch (ret) { + // Contingency? - Report error by throwing appropriate exceptions + case LZMA_STREAM_END: + { + (*env)->SetBooleanField(env, this, LzmaCompressor_finished, JNI_TRUE); + } // cascade + case LZMA_OK: + { + uncompressed_direct_buf_off += uncompressed_direct_buf_len - stream->avail_in; + (*env)->SetIntField(env, this, + LzmaCompressor_uncompressedDirectBufOff, uncompressed_direct_buf_off); + (*env)->SetIntField(env, this, + LzmaCompressor_uncompressedDirectBufLen, stream->avail_in); + no_compressed_bytes = compressed_direct_buf_len - stream->avail_out; + } + break; + default: + { + fprintf(stderr, "java/lang/InternalError would throw: %d\n", (int)ret); + THROW(env, "java/lang/InternalError", NULL); + } + break; + } + + return no_compressed_bytes; +} + +JNIEXPORT jlong JNICALL +Java_org_apache_hadoop_io_compress_lzma_LzmaCompressor_getBytesRead( + JNIEnv *env, jclass class, jlong stream + ) { + return ((lzma_stream*)(stream))->total_in; +} + +JNIEXPORT jlong JNICALL +Java_org_apache_hadoop_io_compress_lzma_LzmaCompressor_getBytesWritten( + JNIEnv *env, jclass class, jlong stream + ) { + return ((lzma_stream*)(stream))->total_out; +} + +JNIEXPORT void JNICALL +Java_org_apache_hadoop_io_compress_lzma_LzmaCompressor_end( + JNIEnv *env, jclass class, jlong stream + ) { + dlsym_lzma_end((lzma_stream*)(stream)); + free((lzma_stream*)(stream)); + //(lzma_stream*)(stream) = NULL; +} + +/** + * vim: sw=2: ts=2: et: + */ + diff --git a/src/native/src/org/apache/hadoop/io/compress/lzma/LzmaDecompressor.c b/src/native/src/org/apache/hadoop/io/compress/lzma/LzmaDecompressor.c new file mode 100644 index 0000000..eb1ab8a --- /dev/null +++ b/src/native/src/org/apache/hadoop/io/compress/lzma/LzmaDecompressor.c @@ -0,0 +1,244 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#if defined HAVE_CONFIG_H + #include +#endif + +#if defined HAVE_STDIO_H + #include +#else + #error 'stdio.h not found' +#endif + +#if defined HAVE_STDLIB_H + #include +#else + #error 'stdlib.h not found' +#endif + +#if defined HAVE_STRING_H + #include +#else + #error 'string.h not found' +#endif + +#if defined HAVE_DLFCN_H + #include +#else + #error 'dlfcn.h not found' +#endif + +#include "org_apache_hadoop_io_compress_lzma.h" + +// The lzma library-handle +static void *liblzma = NULL; + +static jfieldID LzmaDecompressor_clazz; +static jfieldID LzmaDecompressor_stream; +static jfieldID LzmaDecompressor_compressedDirectBuf; +static jfieldID LzmaDecompressor_compressedDirectBufOff; +static jfieldID LzmaDecompressor_compressedDirectBufLen; +static jfieldID LzmaDecompressor_uncompressedDirectBuf; +static jfieldID LzmaDecompressor_directBufferSize; +static jfieldID LzmaDecompressor_finished; + +static int (*dlsym_lzma_easy_encoder)(lzma_stream *strm, lzma_easy_level level); +static int (*dlsym_lzma_code)(lzma_stream *strm, lzma_action action); +static int (*dlsym_lzma_end)(lzma_stream *strm); +static int (*dlsym_lzma_auto_decoder)(lzma_stream *strm, uint64_t memlimit, uint32_t flags); + +JNIEXPORT void JNICALL +Java_org_apache_hadoop_io_compress_lzma_LzmaDecompressor_initIDs( + JNIEnv *env, jclass class + ) { + // Load liblzma.so + liblzma = dlopen(HADOOP_LZMA_LIBRARY, RTLD_LAZY | RTLD_GLOBAL); + if (!liblzma) { + THROW(env, "java/lang/UnsatisfiedLinkError", "Cannot load liblzma.so"); + return; + } + + // Locate the requisite symbols from liblzma.so + dlerror(); // Clear any existing error + LOAD_DYNAMIC_SYMBOL(dlsym_lzma_easy_encoder, env, liblzma, "lzma_easy_encoder"); + LOAD_DYNAMIC_SYMBOL(dlsym_lzma_code, env, liblzma, "lzma_code"); + LOAD_DYNAMIC_SYMBOL(dlsym_lzma_end, env, liblzma, "lzma_end"); + LOAD_DYNAMIC_SYMBOL(dlsym_lzma_auto_decoder, env, liblzma, "lzma_auto_decoder"); + + // Initialize the requisite fieldIds + LzmaDecompressor_clazz = (*env)->GetStaticFieldID(env, class, "clazz", + "Ljava/lang/Class;"); + LzmaDecompressor_stream = (*env)->GetFieldID(env, class, "stream", "J"); + + LzmaDecompressor_finished = (*env)->GetFieldID(env, class, "finished", "Z"); + + LzmaDecompressor_compressedDirectBuf = (*env)->GetFieldID(env, class, + "compressedDirectBuf", + "Ljava/nio/Buffer;"); + LzmaDecompressor_compressedDirectBufOff = (*env)->GetFieldID(env, class, + "compressedDirectBufOff", "I"); + LzmaDecompressor_compressedDirectBufLen = (*env)->GetFieldID(env, class, + "compressedDirectBufLen", "I"); + LzmaDecompressor_uncompressedDirectBuf = (*env)->GetFieldID(env, class, + "uncompressedDirectBuf", + "Ljava/nio/Buffer;"); + LzmaDecompressor_directBufferSize = (*env)->GetFieldID(env, class, + "directBufferSize", "I"); +} + +JNIEXPORT jlong JNICALL +Java_org_apache_hadoop_io_compress_lzma_LzmaDecompressor_init( + JNIEnv *env, jclass class + ) { + //Create a lzma_stream + lzma_stream *stream = NULL; + + stream = malloc(sizeof(lzma_stream)); + if (!stream) { + THROW(env, "java/lang/OutOfMemoryError", NULL); + return (jlong)0; + } + + // Initialize stream + lzma_stream tmp = LZMA_STREAM_INIT; + *stream = tmp; + + lzma_ret ret = (*dlsym_lzma_auto_decoder)(stream, 100<<20, 0); + + if (ret != LZMA_OK) { + // Contingency - Report error by throwing appropriate exceptions + free(stream); + stream = NULL; + THROW(env, "java/lang/InternalError", NULL); + } + return (jlong)(stream); +} + +JNIEXPORT jint JNICALL +Java_org_apache_hadoop_io_compress_lzma_LzmaDecompressor_decompressBytesDirect( + JNIEnv *env, jobject this + ) { + // Get members of LzmaCompressor + lzma_stream *stream = (lzma_stream*)( + (*env)->GetLongField(env, this, + LzmaDecompressor_stream) + ); + if (!stream) { + THROW(env, "java/lang/NullPointerException", NULL); + return (jint)0; + } + + // Get members of ZlibDecompressor + jobject clazz = (*env)->GetStaticObjectField(env, this, + LzmaDecompressor_clazz); + jarray compressed_direct_buf = (jarray)(*env)->GetObjectField(env, this, + LzmaDecompressor_compressedDirectBuf); + jint compressed_direct_buf_off = (*env)->GetIntField(env, this, + LzmaDecompressor_compressedDirectBufOff); + jint compressed_direct_buf_len = (*env)->GetIntField(env, this, + LzmaDecompressor_compressedDirectBufLen); + + jarray uncompressed_direct_buf = (jarray)(*env)->GetObjectField(env, this, + LzmaDecompressor_uncompressedDirectBuf); + jint uncompressed_direct_buf_len = (*env)->GetIntField(env, this, + LzmaDecompressor_directBufferSize); + + // Get the input direct buffer + LOCK_CLASS(env, clazz, "LzmaDecompressor"); + uint8_t *compressed_bytes = (*env)->GetDirectBufferAddress(env, + compressed_direct_buf); + UNLOCK_CLASS(env, clazz, "LzmaDecompressor"); + + if (!compressed_bytes) { + return (jint)0; + } + + // Get the output direct buffer + LOCK_CLASS(env, clazz, "LzmaDecompressor"); + uint8_t *uncompressed_bytes = (*env)->GetDirectBufferAddress(env, + uncompressed_direct_buf); + UNLOCK_CLASS(env, clazz, "LzmaDecompressor"); + + if (!uncompressed_bytes) { + return (jint)0; + } + + // Re-calibrate the lzma_stream + stream->next_in = compressed_bytes + compressed_direct_buf_off; + stream->next_out = uncompressed_bytes; + stream->avail_in = compressed_direct_buf_len; + stream->avail_out = uncompressed_direct_buf_len; + + // Decompress + lzma_ret ret = dlsym_lzma_code(stream, LZMA_RUN); + + jint no_decompressed_bytes = 0; + + // Contingency? - Report error by throwing appropriate exceptions + switch (ret) { + case LZMA_STREAM_END: + { + (*env)->SetBooleanField(env, this, LzmaDecompressor_finished, JNI_TRUE); + } // cascade down + case LZMA_OK: + { + compressed_direct_buf_off += compressed_direct_buf_len - stream->avail_in; + (*env)->SetIntField(env, this, LzmaDecompressor_compressedDirectBufOff, + compressed_direct_buf_off); + (*env)->SetIntField(env, this, LzmaDecompressor_compressedDirectBufLen, + stream->avail_in); + no_decompressed_bytes = uncompressed_direct_buf_len - stream->avail_out; + } + break; + default: + { + fprintf(stderr, "java/lang/InternalError would throw: %d\n", (int)ret); + THROW(env, "java/lang/InternalError", NULL); + } + break; + } + return no_decompressed_bytes; +} + +JNIEXPORT jlong JNICALL +Java_org_apache_hadoop_io_compress_lzma_LzmaDecompressor_getBytesRead( + JNIEnv *env, jclass class, jlong stream + ) { + return ((lzma_stream*)(stream))->total_in; +} + +JNIEXPORT jlong JNICALL +Java_org_apache_hadoop_io_compress_lzma_LzmaDecompressor_getBytesWritten( + JNIEnv *env, jclass class, jlong stream + ) { + return ((lzma_stream*)(stream))->total_out; +} + +JNIEXPORT void JNICALL +Java_org_apache_hadoop_io_compress_lzma_LzmaDecompressor_end( + JNIEnv *env, jclass class, jlong stream + ) { + dlsym_lzma_end((lzma_stream*)(stream)); + free((lzma_stream*)(stream)); +} + +/** + * vim: sw=2: ts=2: et: + */ + diff --git a/src/native/src/org/apache/hadoop/io/compress/lzma/Makefile.am b/src/native/src/org/apache/hadoop/io/compress/lzma/Makefile.am new file mode 100644 index 0000000..7127a7d --- /dev/null +++ b/src/native/src/org/apache/hadoop/io/compress/lzma/Makefile.am @@ -0,0 +1,50 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# +# Makefile template for building native 'lzma' for hadoop. +# + +# +# Notes: +# 1. This makefile is designed to do the actual builds in $(HADOOP_HOME)/build/native/${os.name}-${os.arch}/$(subdir) . +# 2. This makefile depends on the following environment variables to function correctly: +# * HADOOP_NATIVE_SRCDIR +# * JAVA_HOME +# * JVM_DATA_MODEL +# * OS_ARCH +# * PLATFORM +# All these are setup by build.xml and/or the top-level makefile. +# 3. The creation of requisite jni headers/stubs are also done by build.xml and they are +# assumed to be in $(HADOOP_HOME)/build/native/src/org/apache/hadoop/io/compress/lzma. +# + +# The 'vpath directive' to locate the actual source files +vpath %.c $(HADOOP_NATIVE_SRCDIR)/$(subdir) + +AM_CPPFLAGS = @JNI_CPPFLAGS@ -I$(HADOOP_NATIVE_SRCDIR)/src +AM_LDFLAGS = @JNI_LDFLAGS@ +AM_CFLAGS = -g -Wall -fPIC -O2 -m$(JVM_DATA_MODEL) + +noinst_LTLIBRARIES = libnativelzma.la +libnativelzma_la_SOURCES = LzmaCompressor.c LzmaDecompressor.c +libnativelzma_la_LIBADD = -ldl -ljvm + +# +#vim: sw=4: ts=4: noet +# diff --git a/src/native/src/org/apache/hadoop/io/compress/lzma/Makefile.in b/src/native/src/org/apache/hadoop/io/compress/lzma/Makefile.in new file mode 100644 index 0000000..b83d1e6 --- /dev/null +++ b/src/native/src/org/apache/hadoop/io/compress/lzma/Makefile.in @@ -0,0 +1,470 @@ +# Makefile.in generated by automake 1.9.6 from Makefile.am. +# @configure_input@ + +# Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, +# 2003, 2004, 2005 Free Software Foundation, Inc. +# This Makefile.in is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY, to the extent permitted by law; without +# even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. + +@SET_MAKE@ + +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# +# Makefile template for building native 'lzma' for hadoop. +# + +# +# Notes: +# 1. This makefile is designed to do the actual builds in $(HADOOP_HOME)/build/native/${os.name}-${os.arch}/$(subdir) . +# 2. This makefile depends on the following environment variables to function correctly: +# * HADOOP_NATIVE_SRCDIR +# * JAVA_HOME +# * JVM_DATA_MODEL +# * OS_ARCH +# * PLATFORM +# All these are setup by build.xml and/or the top-level makefile. +# 3. The creation of requisite jni headers/stubs are also done by build.xml and they are +# assumed to be in $(HADOOP_HOME)/build/native/src/org/apache/hadoop/io/compress/lzma. +# + +srcdir = @srcdir@ +top_srcdir = @top_srcdir@ +VPATH = @srcdir@ +pkgdatadir = $(datadir)/@PACKAGE@ +pkglibdir = $(libdir)/@PACKAGE@ +pkgincludedir = $(includedir)/@PACKAGE@ +top_builddir = ../../../../../../.. +am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd +INSTALL = @INSTALL@ +install_sh_DATA = $(install_sh) -c -m 644 +install_sh_PROGRAM = $(install_sh) -c +install_sh_SCRIPT = $(install_sh) -c +INSTALL_HEADER = $(INSTALL_DATA) +transform = $(program_transform_name) +NORMAL_INSTALL = : +PRE_INSTALL = : +POST_INSTALL = : +NORMAL_UNINSTALL = : +PRE_UNINSTALL = : +POST_UNINSTALL = : +build_triplet = @build@ +host_triplet = @host@ +subdir = src/org/apache/hadoop/io/compress/lzma +DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in +ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 +am__aclocal_m4_deps = $(top_srcdir)/acinclude.m4 \ + $(top_srcdir)/configure.ac +am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ + $(ACLOCAL_M4) +mkinstalldirs = $(install_sh) -d +CONFIG_HEADER = $(top_builddir)/config.h +CONFIG_CLEAN_FILES = +LTLIBRARIES = $(noinst_LTLIBRARIES) +libnativelzma_la_DEPENDENCIES = +am_libnativelzma_la_OBJECTS = LzmaCompressor.lo LzmaDecompressor.lo +libnativelzma_la_OBJECTS = $(am_libnativelzma_la_OBJECTS) +DEFAULT_INCLUDES = -I. -I$(srcdir) -I$(top_builddir) +depcomp = $(SHELL) $(top_srcdir)/config/depcomp +am__depfiles_maybe = depfiles +COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ + $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) +LTCOMPILE = $(LIBTOOL) --tag=CC --mode=compile $(CC) $(DEFS) \ + $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) \ + $(AM_CFLAGS) $(CFLAGS) +CCLD = $(CC) +LINK = $(LIBTOOL) --tag=CC --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) \ + $(AM_LDFLAGS) $(LDFLAGS) -o $@ +SOURCES = $(libnativelzma_la_SOURCES) +DIST_SOURCES = $(libnativelzma_la_SOURCES) +ETAGS = etags +CTAGS = ctags +DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) +ACLOCAL = @ACLOCAL@ +AMDEP_FALSE = @AMDEP_FALSE@ +AMDEP_TRUE = @AMDEP_TRUE@ +AMTAR = @AMTAR@ +AR = @AR@ +AUTOCONF = @AUTOCONF@ +AUTOHEADER = @AUTOHEADER@ +AUTOMAKE = @AUTOMAKE@ +AWK = @AWK@ +CC = @CC@ +CCDEPMODE = @CCDEPMODE@ +CFLAGS = @CFLAGS@ +CPP = @CPP@ +CPPFLAGS = @CPPFLAGS@ +CXX = @CXX@ +CXXCPP = @CXXCPP@ +CXXDEPMODE = @CXXDEPMODE@ +CXXFLAGS = @CXXFLAGS@ +CYGPATH_W = @CYGPATH_W@ +DEFS = @DEFS@ +DEPDIR = @DEPDIR@ +ECHO = @ECHO@ +ECHO_C = @ECHO_C@ +ECHO_N = @ECHO_N@ +ECHO_T = @ECHO_T@ +EGREP = @EGREP@ +EXEEXT = @EXEEXT@ +F77 = @F77@ +FFLAGS = @FFLAGS@ +INSTALL_DATA = @INSTALL_DATA@ +INSTALL_PROGRAM = @INSTALL_PROGRAM@ +INSTALL_SCRIPT = @INSTALL_SCRIPT@ +INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ +JNI_CPPFLAGS = @JNI_CPPFLAGS@ +JNI_LDFLAGS = @JNI_LDFLAGS@ +LDFLAGS = @LDFLAGS@ +LIBOBJS = @LIBOBJS@ +LIBS = @LIBS@ +LIBTOOL = @LIBTOOL@ +LN_S = @LN_S@ +LTLIBOBJS = @LTLIBOBJS@ +MAKEINFO = @MAKEINFO@ +OBJEXT = @OBJEXT@ +PACKAGE = @PACKAGE@ +PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ +PACKAGE_NAME = @PACKAGE_NAME@ +PACKAGE_STRING = @PACKAGE_STRING@ +PACKAGE_TARNAME = @PACKAGE_TARNAME@ +PACKAGE_VERSION = @PACKAGE_VERSION@ +PATH_SEPARATOR = @PATH_SEPARATOR@ +RANLIB = @RANLIB@ +SED = @SED@ +SET_MAKE = @SET_MAKE@ +SHELL = @SHELL@ +STRIP = @STRIP@ +VERSION = @VERSION@ +ac_ct_AR = @ac_ct_AR@ +ac_ct_CC = @ac_ct_CC@ +ac_ct_CXX = @ac_ct_CXX@ +ac_ct_F77 = @ac_ct_F77@ +ac_ct_RANLIB = @ac_ct_RANLIB@ +ac_ct_STRIP = @ac_ct_STRIP@ +am__fastdepCC_FALSE = @am__fastdepCC_FALSE@ +am__fastdepCC_TRUE = @am__fastdepCC_TRUE@ +am__fastdepCXX_FALSE = @am__fastdepCXX_FALSE@ +am__fastdepCXX_TRUE = @am__fastdepCXX_TRUE@ +am__include = @am__include@ +am__leading_dot = @am__leading_dot@ +am__quote = @am__quote@ +am__tar = @am__tar@ +am__untar = @am__untar@ +bindir = @bindir@ +build = @build@ +build_alias = @build_alias@ +build_cpu = @build_cpu@ +build_os = @build_os@ +build_vendor = @build_vendor@ +datadir = @datadir@ +exec_prefix = @exec_prefix@ +host = @host@ +host_alias = @host_alias@ +host_cpu = @host_cpu@ +host_os = @host_os@ +host_vendor = @host_vendor@ +includedir = @includedir@ +infodir = @infodir@ +install_sh = @install_sh@ +libdir = @libdir@ +libexecdir = @libexecdir@ +localstatedir = @localstatedir@ +mandir = @mandir@ +mkdir_p = @mkdir_p@ +oldincludedir = @oldincludedir@ +prefix = @prefix@ +program_transform_name = @program_transform_name@ +sbindir = @sbindir@ +sharedstatedir = @sharedstatedir@ +sysconfdir = @sysconfdir@ +target_alias = @target_alias@ +AM_CPPFLAGS = @JNI_CPPFLAGS@ -I$(HADOOP_NATIVE_SRCDIR)/src +AM_LDFLAGS = @JNI_LDFLAGS@ +AM_CFLAGS = -g -Wall -fPIC -O2 -m$(JVM_DATA_MODEL) +noinst_LTLIBRARIES = libnativelzma.la +libnativelzma_la_SOURCES = LzmaCompressor.c LzmaDecompressor.c +libnativelzma_la_LIBADD = -ldl -ljvm +all: all-am + +.SUFFIXES: +.SUFFIXES: .c .lo .o .obj +$(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) + @for dep in $?; do \ + case '$(am__configure_deps)' in \ + *$$dep*) \ + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh \ + && exit 0; \ + exit 1;; \ + esac; \ + done; \ + echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu src/org/apache/hadoop/io/compress/lzma/Makefile'; \ + cd $(top_srcdir) && \ + $(AUTOMAKE) --gnu src/org/apache/hadoop/io/compress/lzma/Makefile +.PRECIOUS: Makefile +Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status + @case '$?' in \ + *config.status*) \ + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ + *) \ + echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ + cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ + esac; + +$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh + +$(top_srcdir)/configure: $(am__configure_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(ACLOCAL_M4): $(am__aclocal_m4_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh + +clean-noinstLTLIBRARIES: + -test -z "$(noinst_LTLIBRARIES)" || rm -f $(noinst_LTLIBRARIES) + @list='$(noinst_LTLIBRARIES)'; for p in $$list; do \ + dir="`echo $$p | sed -e 's|/[^/]*$$||'`"; \ + test "$$dir" != "$$p" || dir=.; \ + echo "rm -f \"$${dir}/so_locations\""; \ + rm -f "$${dir}/so_locations"; \ + done +libnativelzma.la: $(libnativelzma_la_OBJECTS) $(libnativelzma_la_DEPENDENCIES) + $(LINK) $(libnativelzma_la_LDFLAGS) $(libnativelzma_la_OBJECTS) $(libnativelzma_la_LIBADD) $(LIBS) + +mostlyclean-compile: + -rm -f *.$(OBJEXT) + +distclean-compile: + -rm -f *.tab.c + +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/LzmaCompressor.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/LzmaDecompressor.Plo@am__quote@ + +.c.o: +@am__fastdepCC_TRUE@ if $(COMPILE) -MT $@ -MD -MP -MF "$(DEPDIR)/$*.Tpo" -c -o $@ $<; \ +@am__fastdepCC_TRUE@ then mv -f "$(DEPDIR)/$*.Tpo" "$(DEPDIR)/$*.Po"; else rm -f "$(DEPDIR)/$*.Tpo"; exit 1; fi +@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ +@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +@am__fastdepCC_FALSE@ $(COMPILE) -c $< + +.c.obj: +@am__fastdepCC_TRUE@ if $(COMPILE) -MT $@ -MD -MP -MF "$(DEPDIR)/$*.Tpo" -c -o $@ `$(CYGPATH_W) '$<'`; \ +@am__fastdepCC_TRUE@ then mv -f "$(DEPDIR)/$*.Tpo" "$(DEPDIR)/$*.Po"; else rm -f "$(DEPDIR)/$*.Tpo"; exit 1; fi +@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ +@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +@am__fastdepCC_FALSE@ $(COMPILE) -c `$(CYGPATH_W) '$<'` + +.c.lo: +@am__fastdepCC_TRUE@ if $(LTCOMPILE) -MT $@ -MD -MP -MF "$(DEPDIR)/$*.Tpo" -c -o $@ $<; \ +@am__fastdepCC_TRUE@ then mv -f "$(DEPDIR)/$*.Tpo" "$(DEPDIR)/$*.Plo"; else rm -f "$(DEPDIR)/$*.Tpo"; exit 1; fi +@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ +@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +@am__fastdepCC_FALSE@ $(LTCOMPILE) -c -o $@ $< + +mostlyclean-libtool: + -rm -f *.lo + +clean-libtool: + -rm -rf .libs _libs + +distclean-libtool: + -rm -f libtool +uninstall-info-am: + +ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) + list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ + unique=`for i in $$list; do \ + if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ + done | \ + $(AWK) ' { files[$$0] = 1; } \ + END { for (i in files) print i; }'`; \ + mkid -fID $$unique +tags: TAGS + +TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ + $(TAGS_FILES) $(LISP) + tags=; \ + here=`pwd`; \ + list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ + unique=`for i in $$list; do \ + if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ + done | \ + $(AWK) ' { files[$$0] = 1; } \ + END { for (i in files) print i; }'`; \ + if test -z "$(ETAGS_ARGS)$$tags$$unique"; then :; else \ + test -n "$$unique" || unique=$$empty_fix; \ + $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ + $$tags $$unique; \ + fi +ctags: CTAGS +CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ + $(TAGS_FILES) $(LISP) + tags=; \ + here=`pwd`; \ + list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ + unique=`for i in $$list; do \ + if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ + done | \ + $(AWK) ' { files[$$0] = 1; } \ + END { for (i in files) print i; }'`; \ + test -z "$(CTAGS_ARGS)$$tags$$unique" \ + || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ + $$tags $$unique + +GTAGS: + here=`$(am__cd) $(top_builddir) && pwd` \ + && cd $(top_srcdir) \ + && gtags -i $(GTAGS_ARGS) $$here + +distclean-tags: + -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags + +distdir: $(DISTFILES) + @srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; \ + topsrcdirstrip=`echo "$(top_srcdir)" | sed 's|.|.|g'`; \ + list='$(DISTFILES)'; for file in $$list; do \ + case $$file in \ + $(srcdir)/*) file=`echo "$$file" | sed "s|^$$srcdirstrip/||"`;; \ + $(top_srcdir)/*) file=`echo "$$file" | sed "s|^$$topsrcdirstrip/|$(top_builddir)/|"`;; \ + esac; \ + if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ + dir=`echo "$$file" | sed -e 's,/[^/]*$$,,'`; \ + if test "$$dir" != "$$file" && test "$$dir" != "."; then \ + dir="/$$dir"; \ + $(mkdir_p) "$(distdir)$$dir"; \ + else \ + dir=''; \ + fi; \ + if test -d $$d/$$file; then \ + if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ + cp -pR $(srcdir)/$$file $(distdir)$$dir || exit 1; \ + fi; \ + cp -pR $$d/$$file $(distdir)$$dir || exit 1; \ + else \ + test -f $(distdir)/$$file \ + || cp -p $$d/$$file $(distdir)/$$file \ + || exit 1; \ + fi; \ + done +check-am: all-am +check: check-am +all-am: Makefile $(LTLIBRARIES) +installdirs: +install: install-am +install-exec: install-exec-am +install-data: install-data-am +uninstall: uninstall-am + +install-am: all-am + @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am + +installcheck: installcheck-am +install-strip: + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + `test -z '$(STRIP)' || \ + echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install +mostlyclean-generic: + +clean-generic: + +distclean-generic: + -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) + +maintainer-clean-generic: + @echo "This command is intended for maintainers to use" + @echo "it deletes files that may require special tools to rebuild." +clean: clean-am + +clean-am: clean-generic clean-libtool clean-noinstLTLIBRARIES \ + mostlyclean-am + +distclean: distclean-am + -rm -rf ./$(DEPDIR) + -rm -f Makefile +distclean-am: clean-am distclean-compile distclean-generic \ + distclean-libtool distclean-tags + +dvi: dvi-am + +dvi-am: + +html: html-am + +info: info-am + +info-am: + +install-data-am: + +install-exec-am: + +install-info: install-info-am + +install-man: + +installcheck-am: + +maintainer-clean: maintainer-clean-am + -rm -rf ./$(DEPDIR) + -rm -f Makefile +maintainer-clean-am: distclean-am maintainer-clean-generic + +mostlyclean: mostlyclean-am + +mostlyclean-am: mostlyclean-compile mostlyclean-generic \ + mostlyclean-libtool + +pdf: pdf-am + +pdf-am: + +ps: ps-am + +ps-am: + +uninstall-am: uninstall-info-am + +.PHONY: CTAGS GTAGS all all-am check check-am clean clean-generic \ + clean-libtool clean-noinstLTLIBRARIES ctags distclean \ + distclean-compile distclean-generic distclean-libtool \ + distclean-tags distdir dvi dvi-am html html-am info info-am \ + install install-am install-data install-data-am install-exec \ + install-exec-am install-info install-info-am install-man \ + install-strip installcheck installcheck-am installdirs \ + maintainer-clean maintainer-clean-generic mostlyclean \ + mostlyclean-compile mostlyclean-generic mostlyclean-libtool \ + pdf pdf-am ps ps-am tags uninstall uninstall-am \ + uninstall-info-am + + +# The 'vpath directive' to locate the actual source files +vpath %.c $(HADOOP_NATIVE_SRCDIR)/$(subdir) + +# +#vim: sw=4: ts=4: noet +# +# Tell versions [3.59,3.63) of GNU make to not export all variables. +# Otherwise a system limit (for SysV at least) may be exceeded. +.NOEXPORT: diff --git a/src/native/src/org/apache/hadoop/io/compress/lzma/org_apache_hadoop_io_compress_lzma.h b/src/native/src/org/apache/hadoop/io/compress/lzma/org_apache_hadoop_io_compress_lzma.h new file mode 100644 index 0000000..e7cd979 --- /dev/null +++ b/src/native/src/org/apache/hadoop/io/compress/lzma/org_apache_hadoop_io_compress_lzma.h @@ -0,0 +1,78 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#if !defined ORG_APACHE_HADOOP_IO_COMPRESS_LZMA_LZMA_H +#define ORG_APACHE_HADOOP_IO_COMPRESS_LZMA_LZMA_H + +#if defined HAVE_CONFIG_H + #include +#endif + +#if defined HAVE_STDDEF_H + #include +#else + #error 'stddef.h not found' +#endif + +#if defined HAVE_DLFCN_H + #include +#else + #error "dlfcn.h not found" +#endif + +#if defined HAVE_JNI_H + #include +#else + #error 'jni.h not found' +#endif + +//#if defined HAVE_LZMA_H + #include +//#else +// #error 'lzma.h not found' +//#endif + +#include "org_apache_hadoop.h" + +/* + * No compression; the data is just wrapped into .lzma + * container. + */ +#define LZMA_EASY_COPY 0 +#define LZMA_EASY_LZMA2_1 1 +#define LZMA_EASY_LZMA_2 2 +#define LZMA_EASY_LZMA_3 3 +#define LZMA_EASY_LZMA_4 4 +#define LZMA_EASY_LZMA_5 5 +#define LZMA_EASY_LZMA_6 6 +#define LZMA_EASY_LZMA_7 7 +#define LZMA_EASY_LZMA_8 8 +#define LZMA_EASY_LZMA_9 9 + +#define COMPRESS_LEVEL_BEST LZMA_EASY_LZMA_9 +#define COMPRESS_LEVEL_DEFAULT LZMA_EASY_LZMA_7 + +#define kBufferSize (1 << 15) + +/* A helper macro to convert the java 'function-pointer' to a void*. */ +#define FUNC_PTR(func_ptr) ((void*)((ptrdiff_t)(func_ptr))) + +/* A helper macro to convert the void* to the java 'function-pointer'. */ +#define JLONG(func_ptr) ((jlong)((ptrdiff_t)(func_ptr))) + +#endif //ORG_APACHE_HADOOP_IO_COMPRESS_LZMA_LZMA_H diff --git a/src/native/src/org/apache/hadoop/io/compress/zlib/Makefile.am b/src/native/src/org/apache/hadoop/io/compress/zlib/Makefile.am new file mode 100644 index 0000000..a2cb5fe --- /dev/null +++ b/src/native/src/org/apache/hadoop/io/compress/zlib/Makefile.am @@ -0,0 +1,50 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# +# Makefile template for building native 'zlib' for hadoop. +# + +# +# Notes: +# 1. This makefile is designed to do the actual builds in $(HADOOP_HOME)/build/native/${os.name}-${os.arch}/$(subdir) . +# 2. This makefile depends on the following environment variables to function correctly: +# * HADOOP_NATIVE_SRCDIR +# * JAVA_HOME +# * JVM_DATA_MODEL +# * OS_ARCH +# * PLATFORM +# All these are setup by build.xml and/or the top-level makefile. +# 3. The creation of requisite jni headers/stubs are also done by build.xml and they are +# assumed to be in $(HADOOP_HOME)/build/native/src/org/apache/hadoop/io/compress/zlib. +# + +# The 'vpath directive' to locate the actual source files +vpath %.c $(HADOOP_NATIVE_SRCDIR)/$(subdir) + +AM_CPPFLAGS = @JNI_CPPFLAGS@ -I$(HADOOP_NATIVE_SRCDIR)/src +AM_LDFLAGS = @JNI_LDFLAGS@ +AM_CFLAGS = -g -Wall -fPIC -O2 -m$(JVM_DATA_MODEL) + +noinst_LTLIBRARIES = libnativezlib.la +libnativezlib_la_SOURCES = ZlibCompressor.c ZlibDecompressor.c +libnativezlib_la_LIBADD = -ldl -ljvm + +# +#vim: sw=4: ts=4: noet +# diff --git a/src/native/src/org/apache/hadoop/io/compress/zlib/Makefile.in b/src/native/src/org/apache/hadoop/io/compress/zlib/Makefile.in new file mode 100644 index 0000000..f95bdd9 --- /dev/null +++ b/src/native/src/org/apache/hadoop/io/compress/zlib/Makefile.in @@ -0,0 +1,470 @@ +# Makefile.in generated by automake 1.9.6 from Makefile.am. +# @configure_input@ + +# Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, +# 2003, 2004, 2005 Free Software Foundation, Inc. +# This Makefile.in is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY, to the extent permitted by law; without +# even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. + +@SET_MAKE@ + +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# +# Makefile template for building native 'zlib' for hadoop. +# + +# +# Notes: +# 1. This makefile is designed to do the actual builds in $(HADOOP_HOME)/build/native/${os.name}-${os.arch}/$(subdir) . +# 2. This makefile depends on the following environment variables to function correctly: +# * HADOOP_NATIVE_SRCDIR +# * JAVA_HOME +# * JVM_DATA_MODEL +# * OS_ARCH +# * PLATFORM +# All these are setup by build.xml and/or the top-level makefile. +# 3. The creation of requisite jni headers/stubs are also done by build.xml and they are +# assumed to be in $(HADOOP_HOME)/build/native/src/org/apache/hadoop/io/compress/zlib. +# + +srcdir = @srcdir@ +top_srcdir = @top_srcdir@ +VPATH = @srcdir@ +pkgdatadir = $(datadir)/@PACKAGE@ +pkglibdir = $(libdir)/@PACKAGE@ +pkgincludedir = $(includedir)/@PACKAGE@ +top_builddir = ../../../../../../.. +am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd +INSTALL = @INSTALL@ +install_sh_DATA = $(install_sh) -c -m 644 +install_sh_PROGRAM = $(install_sh) -c +install_sh_SCRIPT = $(install_sh) -c +INSTALL_HEADER = $(INSTALL_DATA) +transform = $(program_transform_name) +NORMAL_INSTALL = : +PRE_INSTALL = : +POST_INSTALL = : +NORMAL_UNINSTALL = : +PRE_UNINSTALL = : +POST_UNINSTALL = : +build_triplet = @build@ +host_triplet = @host@ +subdir = src/org/apache/hadoop/io/compress/zlib +DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in +ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 +am__aclocal_m4_deps = $(top_srcdir)/acinclude.m4 \ + $(top_srcdir)/configure.ac +am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ + $(ACLOCAL_M4) +mkinstalldirs = $(install_sh) -d +CONFIG_HEADER = $(top_builddir)/config.h +CONFIG_CLEAN_FILES = +LTLIBRARIES = $(noinst_LTLIBRARIES) +libnativezlib_la_DEPENDENCIES = +am_libnativezlib_la_OBJECTS = ZlibCompressor.lo ZlibDecompressor.lo +libnativezlib_la_OBJECTS = $(am_libnativezlib_la_OBJECTS) +DEFAULT_INCLUDES = -I. -I$(srcdir) -I$(top_builddir) +depcomp = $(SHELL) $(top_srcdir)/config/depcomp +am__depfiles_maybe = depfiles +COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ + $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) +LTCOMPILE = $(LIBTOOL) --tag=CC --mode=compile $(CC) $(DEFS) \ + $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) \ + $(AM_CFLAGS) $(CFLAGS) +CCLD = $(CC) +LINK = $(LIBTOOL) --tag=CC --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) \ + $(AM_LDFLAGS) $(LDFLAGS) -o $@ +SOURCES = $(libnativezlib_la_SOURCES) +DIST_SOURCES = $(libnativezlib_la_SOURCES) +ETAGS = etags +CTAGS = ctags +DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) +ACLOCAL = @ACLOCAL@ +AMDEP_FALSE = @AMDEP_FALSE@ +AMDEP_TRUE = @AMDEP_TRUE@ +AMTAR = @AMTAR@ +AR = @AR@ +AUTOCONF = @AUTOCONF@ +AUTOHEADER = @AUTOHEADER@ +AUTOMAKE = @AUTOMAKE@ +AWK = @AWK@ +CC = @CC@ +CCDEPMODE = @CCDEPMODE@ +CFLAGS = @CFLAGS@ +CPP = @CPP@ +CPPFLAGS = @CPPFLAGS@ +CXX = @CXX@ +CXXCPP = @CXXCPP@ +CXXDEPMODE = @CXXDEPMODE@ +CXXFLAGS = @CXXFLAGS@ +CYGPATH_W = @CYGPATH_W@ +DEFS = @DEFS@ +DEPDIR = @DEPDIR@ +ECHO = @ECHO@ +ECHO_C = @ECHO_C@ +ECHO_N = @ECHO_N@ +ECHO_T = @ECHO_T@ +EGREP = @EGREP@ +EXEEXT = @EXEEXT@ +F77 = @F77@ +FFLAGS = @FFLAGS@ +INSTALL_DATA = @INSTALL_DATA@ +INSTALL_PROGRAM = @INSTALL_PROGRAM@ +INSTALL_SCRIPT = @INSTALL_SCRIPT@ +INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ +JNI_CPPFLAGS = @JNI_CPPFLAGS@ +JNI_LDFLAGS = @JNI_LDFLAGS@ +LDFLAGS = @LDFLAGS@ +LIBOBJS = @LIBOBJS@ +LIBS = @LIBS@ +LIBTOOL = @LIBTOOL@ +LN_S = @LN_S@ +LTLIBOBJS = @LTLIBOBJS@ +MAKEINFO = @MAKEINFO@ +OBJEXT = @OBJEXT@ +PACKAGE = @PACKAGE@ +PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ +PACKAGE_NAME = @PACKAGE_NAME@ +PACKAGE_STRING = @PACKAGE_STRING@ +PACKAGE_TARNAME = @PACKAGE_TARNAME@ +PACKAGE_VERSION = @PACKAGE_VERSION@ +PATH_SEPARATOR = @PATH_SEPARATOR@ +RANLIB = @RANLIB@ +SED = @SED@ +SET_MAKE = @SET_MAKE@ +SHELL = @SHELL@ +STRIP = @STRIP@ +VERSION = @VERSION@ +ac_ct_AR = @ac_ct_AR@ +ac_ct_CC = @ac_ct_CC@ +ac_ct_CXX = @ac_ct_CXX@ +ac_ct_F77 = @ac_ct_F77@ +ac_ct_RANLIB = @ac_ct_RANLIB@ +ac_ct_STRIP = @ac_ct_STRIP@ +am__fastdepCC_FALSE = @am__fastdepCC_FALSE@ +am__fastdepCC_TRUE = @am__fastdepCC_TRUE@ +am__fastdepCXX_FALSE = @am__fastdepCXX_FALSE@ +am__fastdepCXX_TRUE = @am__fastdepCXX_TRUE@ +am__include = @am__include@ +am__leading_dot = @am__leading_dot@ +am__quote = @am__quote@ +am__tar = @am__tar@ +am__untar = @am__untar@ +bindir = @bindir@ +build = @build@ +build_alias = @build_alias@ +build_cpu = @build_cpu@ +build_os = @build_os@ +build_vendor = @build_vendor@ +datadir = @datadir@ +exec_prefix = @exec_prefix@ +host = @host@ +host_alias = @host_alias@ +host_cpu = @host_cpu@ +host_os = @host_os@ +host_vendor = @host_vendor@ +includedir = @includedir@ +infodir = @infodir@ +install_sh = @install_sh@ +libdir = @libdir@ +libexecdir = @libexecdir@ +localstatedir = @localstatedir@ +mandir = @mandir@ +mkdir_p = @mkdir_p@ +oldincludedir = @oldincludedir@ +prefix = @prefix@ +program_transform_name = @program_transform_name@ +sbindir = @sbindir@ +sharedstatedir = @sharedstatedir@ +sysconfdir = @sysconfdir@ +target_alias = @target_alias@ +AM_CPPFLAGS = @JNI_CPPFLAGS@ -I$(HADOOP_NATIVE_SRCDIR)/src +AM_LDFLAGS = @JNI_LDFLAGS@ +AM_CFLAGS = -g -Wall -fPIC -O2 -m$(JVM_DATA_MODEL) +noinst_LTLIBRARIES = libnativezlib.la +libnativezlib_la_SOURCES = ZlibCompressor.c ZlibDecompressor.c +libnativezlib_la_LIBADD = -ldl -ljvm +all: all-am + +.SUFFIXES: +.SUFFIXES: .c .lo .o .obj +$(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) + @for dep in $?; do \ + case '$(am__configure_deps)' in \ + *$$dep*) \ + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh \ + && exit 0; \ + exit 1;; \ + esac; \ + done; \ + echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu src/org/apache/hadoop/io/compress/zlib/Makefile'; \ + cd $(top_srcdir) && \ + $(AUTOMAKE) --gnu src/org/apache/hadoop/io/compress/zlib/Makefile +.PRECIOUS: Makefile +Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status + @case '$?' in \ + *config.status*) \ + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ + *) \ + echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ + cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ + esac; + +$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh + +$(top_srcdir)/configure: $(am__configure_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(ACLOCAL_M4): $(am__aclocal_m4_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh + +clean-noinstLTLIBRARIES: + -test -z "$(noinst_LTLIBRARIES)" || rm -f $(noinst_LTLIBRARIES) + @list='$(noinst_LTLIBRARIES)'; for p in $$list; do \ + dir="`echo $$p | sed -e 's|/[^/]*$$||'`"; \ + test "$$dir" != "$$p" || dir=.; \ + echo "rm -f \"$${dir}/so_locations\""; \ + rm -f "$${dir}/so_locations"; \ + done +libnativezlib.la: $(libnativezlib_la_OBJECTS) $(libnativezlib_la_DEPENDENCIES) + $(LINK) $(libnativezlib_la_LDFLAGS) $(libnativezlib_la_OBJECTS) $(libnativezlib_la_LIBADD) $(LIBS) + +mostlyclean-compile: + -rm -f *.$(OBJEXT) + +distclean-compile: + -rm -f *.tab.c + +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/ZlibCompressor.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/ZlibDecompressor.Plo@am__quote@ + +.c.o: +@am__fastdepCC_TRUE@ if $(COMPILE) -MT $@ -MD -MP -MF "$(DEPDIR)/$*.Tpo" -c -o $@ $<; \ +@am__fastdepCC_TRUE@ then mv -f "$(DEPDIR)/$*.Tpo" "$(DEPDIR)/$*.Po"; else rm -f "$(DEPDIR)/$*.Tpo"; exit 1; fi +@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ +@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +@am__fastdepCC_FALSE@ $(COMPILE) -c $< + +.c.obj: +@am__fastdepCC_TRUE@ if $(COMPILE) -MT $@ -MD -MP -MF "$(DEPDIR)/$*.Tpo" -c -o $@ `$(CYGPATH_W) '$<'`; \ +@am__fastdepCC_TRUE@ then mv -f "$(DEPDIR)/$*.Tpo" "$(DEPDIR)/$*.Po"; else rm -f "$(DEPDIR)/$*.Tpo"; exit 1; fi +@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ +@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +@am__fastdepCC_FALSE@ $(COMPILE) -c `$(CYGPATH_W) '$<'` + +.c.lo: +@am__fastdepCC_TRUE@ if $(LTCOMPILE) -MT $@ -MD -MP -MF "$(DEPDIR)/$*.Tpo" -c -o $@ $<; \ +@am__fastdepCC_TRUE@ then mv -f "$(DEPDIR)/$*.Tpo" "$(DEPDIR)/$*.Plo"; else rm -f "$(DEPDIR)/$*.Tpo"; exit 1; fi +@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ +@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +@am__fastdepCC_FALSE@ $(LTCOMPILE) -c -o $@ $< + +mostlyclean-libtool: + -rm -f *.lo + +clean-libtool: + -rm -rf .libs _libs + +distclean-libtool: + -rm -f libtool +uninstall-info-am: + +ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) + list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ + unique=`for i in $$list; do \ + if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ + done | \ + $(AWK) ' { files[$$0] = 1; } \ + END { for (i in files) print i; }'`; \ + mkid -fID $$unique +tags: TAGS + +TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ + $(TAGS_FILES) $(LISP) + tags=; \ + here=`pwd`; \ + list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ + unique=`for i in $$list; do \ + if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ + done | \ + $(AWK) ' { files[$$0] = 1; } \ + END { for (i in files) print i; }'`; \ + if test -z "$(ETAGS_ARGS)$$tags$$unique"; then :; else \ + test -n "$$unique" || unique=$$empty_fix; \ + $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ + $$tags $$unique; \ + fi +ctags: CTAGS +CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ + $(TAGS_FILES) $(LISP) + tags=; \ + here=`pwd`; \ + list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ + unique=`for i in $$list; do \ + if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ + done | \ + $(AWK) ' { files[$$0] = 1; } \ + END { for (i in files) print i; }'`; \ + test -z "$(CTAGS_ARGS)$$tags$$unique" \ + || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ + $$tags $$unique + +GTAGS: + here=`$(am__cd) $(top_builddir) && pwd` \ + && cd $(top_srcdir) \ + && gtags -i $(GTAGS_ARGS) $$here + +distclean-tags: + -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags + +distdir: $(DISTFILES) + @srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; \ + topsrcdirstrip=`echo "$(top_srcdir)" | sed 's|.|.|g'`; \ + list='$(DISTFILES)'; for file in $$list; do \ + case $$file in \ + $(srcdir)/*) file=`echo "$$file" | sed "s|^$$srcdirstrip/||"`;; \ + $(top_srcdir)/*) file=`echo "$$file" | sed "s|^$$topsrcdirstrip/|$(top_builddir)/|"`;; \ + esac; \ + if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ + dir=`echo "$$file" | sed -e 's,/[^/]*$$,,'`; \ + if test "$$dir" != "$$file" && test "$$dir" != "."; then \ + dir="/$$dir"; \ + $(mkdir_p) "$(distdir)$$dir"; \ + else \ + dir=''; \ + fi; \ + if test -d $$d/$$file; then \ + if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ + cp -pR $(srcdir)/$$file $(distdir)$$dir || exit 1; \ + fi; \ + cp -pR $$d/$$file $(distdir)$$dir || exit 1; \ + else \ + test -f $(distdir)/$$file \ + || cp -p $$d/$$file $(distdir)/$$file \ + || exit 1; \ + fi; \ + done +check-am: all-am +check: check-am +all-am: Makefile $(LTLIBRARIES) +installdirs: +install: install-am +install-exec: install-exec-am +install-data: install-data-am +uninstall: uninstall-am + +install-am: all-am + @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am + +installcheck: installcheck-am +install-strip: + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + `test -z '$(STRIP)' || \ + echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install +mostlyclean-generic: + +clean-generic: + +distclean-generic: + -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) + +maintainer-clean-generic: + @echo "This command is intended for maintainers to use" + @echo "it deletes files that may require special tools to rebuild." +clean: clean-am + +clean-am: clean-generic clean-libtool clean-noinstLTLIBRARIES \ + mostlyclean-am + +distclean: distclean-am + -rm -rf ./$(DEPDIR) + -rm -f Makefile +distclean-am: clean-am distclean-compile distclean-generic \ + distclean-libtool distclean-tags + +dvi: dvi-am + +dvi-am: + +html: html-am + +info: info-am + +info-am: + +install-data-am: + +install-exec-am: + +install-info: install-info-am + +install-man: + +installcheck-am: + +maintainer-clean: maintainer-clean-am + -rm -rf ./$(DEPDIR) + -rm -f Makefile +maintainer-clean-am: distclean-am maintainer-clean-generic + +mostlyclean: mostlyclean-am + +mostlyclean-am: mostlyclean-compile mostlyclean-generic \ + mostlyclean-libtool + +pdf: pdf-am + +pdf-am: + +ps: ps-am + +ps-am: + +uninstall-am: uninstall-info-am + +.PHONY: CTAGS GTAGS all all-am check check-am clean clean-generic \ + clean-libtool clean-noinstLTLIBRARIES ctags distclean \ + distclean-compile distclean-generic distclean-libtool \ + distclean-tags distdir dvi dvi-am html html-am info info-am \ + install install-am install-data install-data-am install-exec \ + install-exec-am install-info install-info-am install-man \ + install-strip installcheck installcheck-am installdirs \ + maintainer-clean maintainer-clean-generic mostlyclean \ + mostlyclean-compile mostlyclean-generic mostlyclean-libtool \ + pdf pdf-am ps ps-am tags uninstall uninstall-am \ + uninstall-info-am + + +# The 'vpath directive' to locate the actual source files +vpath %.c $(HADOOP_NATIVE_SRCDIR)/$(subdir) + +# +#vim: sw=4: ts=4: noet +# +# Tell versions [3.59,3.63) of GNU make to not export all variables. +# Otherwise a system limit (for SysV at least) may be exceeded. +.NOEXPORT: diff --git a/src/native/src/org/apache/hadoop/io/compress/zlib/ZlibCompressor.c b/src/native/src/org/apache/hadoop/io/compress/zlib/ZlibCompressor.c new file mode 100644 index 0000000..a8cb76a --- /dev/null +++ b/src/native/src/org/apache/hadoop/io/compress/zlib/ZlibCompressor.c @@ -0,0 +1,304 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#if defined HAVE_CONFIG_H + #include +#endif + +#if defined HAVE_STDIO_H + #include +#else + #error 'stdio.h not found' +#endif + +#if defined HAVE_STDLIB_H + #include +#else + #error 'stdlib.h not found' +#endif + +#if defined HAVE_STRING_H + #include +#else + #error 'string.h not found' +#endif + +#if defined HAVE_DLFCN_H + #include +#else + #error 'dlfcn.h not found' +#endif + +#include "org_apache_hadoop_io_compress_zlib.h" +#include "org_apache_hadoop_io_compress_zlib_ZlibCompressor.h" + +static jfieldID ZlibCompressor_clazz; +static jfieldID ZlibCompressor_stream; +static jfieldID ZlibCompressor_uncompressedDirectBuf; +static jfieldID ZlibCompressor_uncompressedDirectBufOff; +static jfieldID ZlibCompressor_uncompressedDirectBufLen; +static jfieldID ZlibCompressor_compressedDirectBuf; +static jfieldID ZlibCompressor_directBufferSize; +static jfieldID ZlibCompressor_finish; +static jfieldID ZlibCompressor_finished; + +static int (*dlsym_deflateInit2_)(z_streamp, int, int, int, int, int, const char *, int); +static int (*dlsym_deflate)(z_streamp, int); +static int (*dlsym_deflateSetDictionary)(z_streamp, const Bytef *, uInt); +static int (*dlsym_deflateReset)(z_streamp); +static int (*dlsym_deflateEnd)(z_streamp); + +JNIEXPORT void JNICALL +Java_org_apache_hadoop_io_compress_zlib_ZlibCompressor_initIDs( + JNIEnv *env, jclass class + ) { + // Load libz.so + void *libz = dlopen(HADOOP_ZLIB_LIBRARY, RTLD_LAZY | RTLD_GLOBAL); + if (!libz) { + THROW(env, "java/lang/UnsatisfiedLinkError", "Cannot load libz.so"); + return; + } + + // Locate the requisite symbols from libz.so + dlerror(); // Clear any existing error + LOAD_DYNAMIC_SYMBOL(dlsym_deflateInit2_, env, libz, "deflateInit2_"); + LOAD_DYNAMIC_SYMBOL(dlsym_deflate, env, libz, "deflate"); + LOAD_DYNAMIC_SYMBOL(dlsym_deflateSetDictionary, env, libz, "deflateSetDictionary"); + LOAD_DYNAMIC_SYMBOL(dlsym_deflateReset, env, libz, "deflateReset"); + LOAD_DYNAMIC_SYMBOL(dlsym_deflateEnd, env, libz, "deflateEnd"); + + // Initialize the requisite fieldIds + ZlibCompressor_clazz = (*env)->GetStaticFieldID(env, class, "clazz", + "Ljava/lang/Class;"); + ZlibCompressor_stream = (*env)->GetFieldID(env, class, "stream", "J"); + ZlibCompressor_finish = (*env)->GetFieldID(env, class, "finish", "Z"); + ZlibCompressor_finished = (*env)->GetFieldID(env, class, "finished", "Z"); + ZlibCompressor_uncompressedDirectBuf = (*env)->GetFieldID(env, class, + "uncompressedDirectBuf", + "Ljava/nio/Buffer;"); + ZlibCompressor_uncompressedDirectBufOff = (*env)->GetFieldID(env, class, + "uncompressedDirectBufOff", "I"); + ZlibCompressor_uncompressedDirectBufLen = (*env)->GetFieldID(env, class, + "uncompressedDirectBufLen", "I"); + ZlibCompressor_compressedDirectBuf = (*env)->GetFieldID(env, class, + "compressedDirectBuf", + "Ljava/nio/Buffer;"); + ZlibCompressor_directBufferSize = (*env)->GetFieldID(env, class, + "directBufferSize", "I"); +} + +JNIEXPORT jlong JNICALL +Java_org_apache_hadoop_io_compress_zlib_ZlibCompressor_init( + JNIEnv *env, jclass class, jint level, jint strategy, jint windowBits + ) { + // Create a z_stream + z_stream *stream = malloc(sizeof(z_stream)); + if (!stream) { + THROW(env, "java/lang/OutOfMemoryError", NULL); + return (jlong)0; + } + memset((void*)stream, 0, sizeof(z_stream)); + + // Initialize stream + static const int memLevel = 8; // See zconf.h + int rv = (*dlsym_deflateInit2_)(stream, level, Z_DEFLATED, windowBits, + memLevel, strategy, ZLIB_VERSION, sizeof(z_stream)); + + if (rv != Z_OK) { + // Contingency - Report error by throwing appropriate exceptions + free(stream); + stream = NULL; + + switch (rv) { + case Z_MEM_ERROR: + { + THROW(env, "java/lang/OutOfMemoryError", NULL); + } + break; + case Z_STREAM_ERROR: + { + THROW(env, "java/lang/IllegalArgumentException", NULL); + } + break; + default: + { + THROW(env, "java/lang/InternalError", NULL); + } + break; + } + } + + return JLONG(stream); +} + +JNIEXPORT void JNICALL +Java_org_apache_hadoop_io_compress_ZlibCompressor_setDictionary( + JNIEnv *env, jclass class, jlong stream, + jarray b, jint off, jint len + ) { + Bytef *buf = (*env)->GetPrimitiveArrayCritical(env, b, 0); + if (!buf) { + return; + } + int rv = dlsym_deflateSetDictionary(ZSTREAM(stream), buf + off, len); + (*env)->ReleasePrimitiveArrayCritical(env, b, buf, 0); + + if (rv != Z_OK) { + // Contingency - Report error by throwing appropriate exceptions + switch (rv) { + case Z_STREAM_ERROR: + { + THROW(env, "java/lang/IllegalArgumentException", NULL); + } + break; + default: + { + THROW(env, "java/lang/InternalError", (ZSTREAM(stream))->msg); + } + break; + } + } +} + +JNIEXPORT jint JNICALL +Java_org_apache_hadoop_io_compress_zlib_ZlibCompressor_deflateBytesDirect( + JNIEnv *env, jobject this + ) { + // Get members of ZlibCompressor + z_stream *stream = ZSTREAM( + (*env)->GetLongField(env, this, + ZlibCompressor_stream) + ); + if (!stream) { + THROW(env, "java/lang/NullPointerException", NULL); + return (jint)0; + } + + // Get members of ZlibCompressor + jobject clazz = (*env)->GetStaticObjectField(env, this, + ZlibCompressor_clazz); + jobject uncompressed_direct_buf = (*env)->GetObjectField(env, this, + ZlibCompressor_uncompressedDirectBuf); + jint uncompressed_direct_buf_off = (*env)->GetIntField(env, this, + ZlibCompressor_uncompressedDirectBufOff); + jint uncompressed_direct_buf_len = (*env)->GetIntField(env, this, + ZlibCompressor_uncompressedDirectBufLen); + + jobject compressed_direct_buf = (*env)->GetObjectField(env, this, + ZlibCompressor_compressedDirectBuf); + jint compressed_direct_buf_len = (*env)->GetIntField(env, this, + ZlibCompressor_directBufferSize); + + jboolean finish = (*env)->GetBooleanField(env, this, ZlibCompressor_finish); + + // Get the input direct buffer + LOCK_CLASS(env, clazz, "ZlibCompressor"); + Bytef* uncompressed_bytes = (*env)->GetDirectBufferAddress(env, + uncompressed_direct_buf); + UNLOCK_CLASS(env, clazz, "ZlibCompressor"); + + if (uncompressed_bytes == 0) { + return (jint)0; + } + + // Get the output direct buffer + LOCK_CLASS(env, clazz, "ZlibCompressor"); + Bytef* compressed_bytes = (*env)->GetDirectBufferAddress(env, + compressed_direct_buf); + UNLOCK_CLASS(env, clazz, "ZlibCompressor"); + + if (compressed_bytes == 0) { + return (jint)0; + } + + // Re-calibrate the z_stream + stream->next_in = uncompressed_bytes + uncompressed_direct_buf_off; + stream->next_out = compressed_bytes; + stream->avail_in = uncompressed_direct_buf_len; + stream->avail_out = compressed_direct_buf_len; + + // Compress + int rv = dlsym_deflate(stream, finish ? Z_FINISH : Z_NO_FLUSH); + + jint no_compressed_bytes = 0; + switch (rv) { + // Contingency? - Report error by throwing appropriate exceptions + case Z_STREAM_END: + { + (*env)->SetBooleanField(env, this, ZlibCompressor_finished, JNI_TRUE); + } // cascade + case Z_OK: + { + uncompressed_direct_buf_off += uncompressed_direct_buf_len - stream->avail_in; + (*env)->SetIntField(env, this, + ZlibCompressor_uncompressedDirectBufOff, uncompressed_direct_buf_off); + (*env)->SetIntField(env, this, + ZlibCompressor_uncompressedDirectBufLen, stream->avail_in); + no_compressed_bytes = compressed_direct_buf_len - stream->avail_out; + } + break; + case Z_BUF_ERROR: + break; + default: + { + THROW(env, "java/lang/InternalError", stream->msg); + } + break; + } + + return no_compressed_bytes; +} + +JNIEXPORT jlong JNICALL +Java_org_apache_hadoop_io_compress_zlib_ZlibCompressor_getBytesRead( + JNIEnv *env, jclass class, jlong stream + ) { + return (ZSTREAM(stream))->total_in; +} + +JNIEXPORT jlong JNICALL +Java_org_apache_hadoop_io_compress_zlib_ZlibCompressor_getBytesWritten( + JNIEnv *env, jclass class, jlong stream + ) { + return (ZSTREAM(stream))->total_out; +} + +JNIEXPORT void JNICALL +Java_org_apache_hadoop_io_compress_zlib_ZlibCompressor_reset( + JNIEnv *env, jclass class, jlong stream + ) { + if (dlsym_deflateReset(ZSTREAM(stream)) != Z_OK) { + THROW(env, "java/lang/InternalError", NULL); + } +} + +JNIEXPORT void JNICALL +Java_org_apache_hadoop_io_compress_zlib_ZlibCompressor_end( + JNIEnv *env, jclass class, jlong stream + ) { + if (dlsym_deflateEnd(ZSTREAM(stream)) == Z_STREAM_ERROR) { + THROW(env, "java/lang/InternalError", NULL); + } else { + free(ZSTREAM(stream)); + } +} + +/** + * vim: sw=2: ts=2: et: + */ + diff --git a/src/native/src/org/apache/hadoop/io/compress/zlib/ZlibDecompressor.c b/src/native/src/org/apache/hadoop/io/compress/zlib/ZlibDecompressor.c new file mode 100644 index 0000000..e9e3515 --- /dev/null +++ b/src/native/src/org/apache/hadoop/io/compress/zlib/ZlibDecompressor.c @@ -0,0 +1,317 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#if defined HAVE_CONFIG_H + #include +#endif + +#if defined HAVE_STDIO_H + #include +#else + #error 'stdio.h not found' +#endif + +#if defined HAVE_STDLIB_H + #include +#else + #error 'stdlib.h not found' +#endif + +#if defined HAVE_STRING_H + #include +#else + #error 'string.h not found' +#endif + +#if defined HAVE_DLFCN_H + #include +#else + #error 'dlfcn.h not found' +#endif + +#include "org_apache_hadoop_io_compress_zlib.h" +#include "org_apache_hadoop_io_compress_zlib_ZlibDecompressor.h" + +static jfieldID ZlibDecompressor_clazz; +static jfieldID ZlibDecompressor_stream; +static jfieldID ZlibDecompressor_compressedDirectBuf; +static jfieldID ZlibDecompressor_compressedDirectBufOff; +static jfieldID ZlibDecompressor_compressedDirectBufLen; +static jfieldID ZlibDecompressor_uncompressedDirectBuf; +static jfieldID ZlibDecompressor_directBufferSize; +static jfieldID ZlibDecompressor_needDict; +static jfieldID ZlibDecompressor_finished; + +static int (*dlsym_inflateInit2_)(z_streamp, int, const char *, int); +static int (*dlsym_inflate)(z_streamp, int); +static int (*dlsym_inflateSetDictionary)(z_streamp, const Bytef *, uInt); +static int (*dlsym_inflateReset)(z_streamp); +static int (*dlsym_inflateEnd)(z_streamp); + +JNIEXPORT void JNICALL +Java_org_apache_hadoop_io_compress_zlib_ZlibDecompressor_initIDs( + JNIEnv *env, jclass class + ) { + // Load libz.so + void *libz = dlopen(HADOOP_ZLIB_LIBRARY, RTLD_LAZY | RTLD_GLOBAL); + if (!libz) { + THROW(env, "java/lang/UnsatisfiedLinkError", "Cannot load libz.so"); + return; + } + + // Locate the requisite symbols from libz.so + dlerror(); // Clear any existing error + LOAD_DYNAMIC_SYMBOL(dlsym_inflateInit2_, env, libz, "inflateInit2_"); + LOAD_DYNAMIC_SYMBOL(dlsym_inflate, env, libz, "inflate"); + LOAD_DYNAMIC_SYMBOL(dlsym_inflateSetDictionary, env, libz, "inflateSetDictionary"); + LOAD_DYNAMIC_SYMBOL(dlsym_inflateReset, env, libz, "inflateReset"); + LOAD_DYNAMIC_SYMBOL(dlsym_inflateEnd, env, libz, "inflateEnd"); + + // Initialize the requisite fieldIds + ZlibDecompressor_clazz = (*env)->GetStaticFieldID(env, class, "clazz", + "Ljava/lang/Class;"); + ZlibDecompressor_stream = (*env)->GetFieldID(env, class, "stream", "J"); + ZlibDecompressor_needDict = (*env)->GetFieldID(env, class, "needDict", "Z"); + ZlibDecompressor_finished = (*env)->GetFieldID(env, class, "finished", "Z"); + ZlibDecompressor_compressedDirectBuf = (*env)->GetFieldID(env, class, + "compressedDirectBuf", + "Ljava/nio/Buffer;"); + ZlibDecompressor_compressedDirectBufOff = (*env)->GetFieldID(env, class, + "compressedDirectBufOff", "I"); + ZlibDecompressor_compressedDirectBufLen = (*env)->GetFieldID(env, class, + "compressedDirectBufLen", "I"); + ZlibDecompressor_uncompressedDirectBuf = (*env)->GetFieldID(env, class, + "uncompressedDirectBuf", + "Ljava/nio/Buffer;"); + ZlibDecompressor_directBufferSize = (*env)->GetFieldID(env, class, + "directBufferSize", "I"); +} + +JNIEXPORT jlong JNICALL +Java_org_apache_hadoop_io_compress_zlib_ZlibDecompressor_init( + JNIEnv *env, jclass cls, jint windowBits + ) { + z_stream *stream = malloc(sizeof(z_stream)); + memset((void*)stream, 0, sizeof(z_stream)); + + if (stream == 0) { + THROW(env, "java/lang/OutOfMemoryError", NULL); + return (jlong)0; + } + + int rv = dlsym_inflateInit2_(stream, windowBits, ZLIB_VERSION, sizeof(z_stream)); + + if (rv != Z_OK) { + // Contingency - Report error by throwing appropriate exceptions + free(stream); + stream = NULL; + + switch (rv) { + case Z_MEM_ERROR: + { + THROW(env, "java/lang/OutOfMemoryError", NULL); + } + break; + default: + { + THROW(env, "java/lang/InternalError", NULL); + } + break; + } + } + + return JLONG(stream); +} + +JNIEXPORT void JNICALL +Java_org_apache_hadoop_io_compress_zlib_ZlibDecompressor_setDictionary( + JNIEnv *env, jclass cls, jlong stream, + jarray b, jint off, jint len + ) { + Bytef *buf = (*env)->GetPrimitiveArrayCritical(env, b, 0); + if (!buf) { + THROW(env, "java/lang/InternalError", NULL); + return; + } + int rv = dlsym_inflateSetDictionary(ZSTREAM(stream), buf + off, len); + (*env)->ReleasePrimitiveArrayCritical(env, b, buf, 0); + + if (rv != Z_OK) { + // Contingency - Report error by throwing appropriate exceptions + switch (rv) { + case Z_STREAM_ERROR: + case Z_DATA_ERROR: + { + THROW(env, "java/lang/IllegalArgumentException", + (ZSTREAM(stream))->msg); + } + break; + default: + { + THROW(env, "java/lang/InternalError", (ZSTREAM(stream))->msg); + } + break; + } + } +} + +JNIEXPORT jint JNICALL +Java_org_apache_hadoop_io_compress_zlib_ZlibDecompressor_inflateBytesDirect( + JNIEnv *env, jobject this + ) { + // Get members of ZlibDecompressor + z_stream *stream = ZSTREAM( + (*env)->GetLongField(env, this, + ZlibDecompressor_stream) + ); + if (!stream) { + THROW(env, "java/lang/NullPointerException", NULL); + return (jint)0; + } + + // Get members of ZlibDecompressor + jobject clazz = (*env)->GetStaticObjectField(env, this, + ZlibDecompressor_clazz); + jarray compressed_direct_buf = (jarray)(*env)->GetObjectField(env, this, + ZlibDecompressor_compressedDirectBuf); + jint compressed_direct_buf_off = (*env)->GetIntField(env, this, + ZlibDecompressor_compressedDirectBufOff); + jint compressed_direct_buf_len = (*env)->GetIntField(env, this, + ZlibDecompressor_compressedDirectBufLen); + + jarray uncompressed_direct_buf = (jarray)(*env)->GetObjectField(env, this, + ZlibDecompressor_uncompressedDirectBuf); + jint uncompressed_direct_buf_len = (*env)->GetIntField(env, this, + ZlibDecompressor_directBufferSize); + + // Get the input direct buffer + LOCK_CLASS(env, clazz, "ZlibDecompressor"); + Bytef *compressed_bytes = (*env)->GetDirectBufferAddress(env, + compressed_direct_buf); + UNLOCK_CLASS(env, clazz, "ZlibDecompressor"); + + if (!compressed_bytes) { + return (jint)0; + } + + // Get the output direct buffer + LOCK_CLASS(env, clazz, "ZlibDecompressor"); + Bytef *uncompressed_bytes = (*env)->GetDirectBufferAddress(env, + uncompressed_direct_buf); + UNLOCK_CLASS(env, clazz, "ZlibDecompressor"); + + if (!uncompressed_bytes) { + return (jint)0; + } + + // Re-calibrate the z_stream + stream->next_in = compressed_bytes + compressed_direct_buf_off; + stream->next_out = uncompressed_bytes; + stream->avail_in = compressed_direct_buf_len; + stream->avail_out = uncompressed_direct_buf_len; + + // Decompress + int rv = dlsym_inflate(stream, Z_PARTIAL_FLUSH); + + // Contingency? - Report error by throwing appropriate exceptions + int no_decompressed_bytes = 0; + switch (rv) { + case Z_STREAM_END: + { + (*env)->SetBooleanField(env, this, ZlibDecompressor_finished, JNI_TRUE); + } // cascade down + case Z_OK: + { + compressed_direct_buf_off += compressed_direct_buf_len - stream->avail_in; + (*env)->SetIntField(env, this, ZlibDecompressor_compressedDirectBufOff, + compressed_direct_buf_off); + (*env)->SetIntField(env, this, ZlibDecompressor_compressedDirectBufLen, + stream->avail_in); + no_decompressed_bytes = uncompressed_direct_buf_len - stream->avail_out; + } + break; + case Z_NEED_DICT: + { + (*env)->SetBooleanField(env, this, ZlibDecompressor_needDict, JNI_TRUE); + compressed_direct_buf_off += compressed_direct_buf_len - stream->avail_in; + (*env)->SetIntField(env, this, ZlibDecompressor_compressedDirectBufOff, + compressed_direct_buf_off); + (*env)->SetIntField(env, this, ZlibDecompressor_compressedDirectBufLen, + stream->avail_in); + } + break; + case Z_BUF_ERROR: + break; + case Z_DATA_ERROR: + { + THROW(env, "java/io/IOException", stream->msg); + } + break; + case Z_MEM_ERROR: + { + THROW(env, "java/lang/OutOfMemoryError", NULL); + } + break; + default: + { + THROW(env, "java/lang/InternalError", stream->msg); + } + break; + } + + return no_decompressed_bytes; +} + +JNIEXPORT jlong JNICALL +Java_org_apache_hadoop_io_compress_zlib_ZlibDecompressor_getBytesRead( + JNIEnv *env, jclass cls, jlong stream + ) { + return (ZSTREAM(stream))->total_in; +} + +JNIEXPORT jlong JNICALL +Java_org_apache_hadoop_io_compress_zlib_ZlibDecompressor_getBytesWritten( + JNIEnv *env, jclass cls, jlong stream + ) { + return (ZSTREAM(stream))->total_out; +} + +JNIEXPORT void JNICALL +Java_org_apache_hadoop_io_compress_zlib_ZlibDecompressor_reset( + JNIEnv *env, jclass cls, jlong stream + ) { + if (dlsym_inflateReset(ZSTREAM(stream)) != Z_OK) { + THROW(env, "java/lang/InternalError", 0); + } +} + +JNIEXPORT void JNICALL +Java_org_apache_hadoop_io_compress_zlib_ZlibDecompressor_end( + JNIEnv *env, jclass cls, jlong stream + ) { + if (dlsym_inflateEnd(ZSTREAM(stream)) == Z_STREAM_ERROR) { + THROW(env, "java/lang/InternalError", 0); + } else { + free(ZSTREAM(stream)); + } +} + +/** + * vim: sw=2: ts=2: et: + */ + diff --git a/src/native/src/org/apache/hadoop/io/compress/zlib/org_apache_hadoop_io_compress_zlib.h b/src/native/src/org/apache/hadoop/io/compress/zlib/org_apache_hadoop_io_compress_zlib.h new file mode 100644 index 0000000..16b607b --- /dev/null +++ b/src/native/src/org/apache/hadoop/io/compress/zlib/org_apache_hadoop_io_compress_zlib.h @@ -0,0 +1,64 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#if !defined ORG_APACHE_HADOOP_IO_COMPRESS_ZLIB_ZLIB_H +#define ORG_APACHE_HADOOP_IO_COMPRESS_ZLIB_ZLIB_H + +#if defined HAVE_CONFIG_H + #include +#endif + +#if defined HAVE_STDDEF_H + #include +#else + #error 'stddef.h not found' +#endif + +#if defined HAVE_ZLIB_H + #include +#else + #error 'Please install zlib-development packages for your platform.' +#endif + +#if defined HAVE_ZCONF_H + #include +#else + #error 'Please install zlib-development packages for your platform.' +#endif + +#if defined HAVE_DLFCN_H + #include +#else + #error "dlfcn.h not found" +#endif + +#if defined HAVE_JNI_H + #include +#else + #error 'jni.h not found' +#endif + +#include "org_apache_hadoop.h" + +/* A helper macro to convert the java 'stream-handle' to a z_stream pointer. */ +#define ZSTREAM(stream) ((z_stream*)((ptrdiff_t)(stream))) + +/* A helper macro to convert the z_stream pointer to the java 'stream-handle'. */ +#define JLONG(stream) ((jlong)((ptrdiff_t)(stream))) + +#endif //ORG_APACHE_HADOOP_IO_COMPRESS_ZLIB_ZLIB_H diff --git a/src/native/src/org_apache_hadoop.h b/src/native/src/org_apache_hadoop.h new file mode 100644 index 0000000..325dcd6 --- /dev/null +++ b/src/native/src/org_apache_hadoop.h @@ -0,0 +1,98 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * This file includes some common utilities + * for all native code used in hadoop. + */ + +#if !defined ORG_APACHE_HADOOP_H +#define ORG_APACHE_HADOOP_H + +#if defined HAVE_CONFIG_H + #include +#endif + +#if defined HAVE_DLFCN_H + #include +#else + #error "dlfcn.h not found" +#endif + +#if defined HAVE_JNI_H + #include +#else + #error 'jni.h not found' +#endif + +/* A helper macro to 'throw' a java exception. */ +#define THROW(env, exception_name, message) \ + { \ + jclass ecls = (*env)->FindClass(env, exception_name); \ + if (ecls) { \ + (*env)->ThrowNew(env, ecls, message); \ + (*env)->DeleteLocalRef(env, ecls); \ + } \ + } + +/** + * A helper function to dlsym a 'symbol' from a given library-handle. + * + * @param env jni handle to report contingencies. + * @param handle handle to the dlopen'ed library. + * @param symbol symbol to load. + * @return returns the address where the symbol is loaded in memory, + * NULL on error. + */ +static void *do_dlsym(JNIEnv *env, void *handle, const char *symbol) { + if (!env || !handle || !symbol) { + THROW(env, "java/lang/InternalError", NULL); + return NULL; + } + char *error = NULL; + void *func_ptr = dlsym(handle, symbol); + if ((error = dlerror()) != NULL) { + THROW(env, "java/lang/UnsatisfiedLinkError", symbol); + return NULL; + } + return func_ptr; +} + +/* A helper macro to dlsym the requisite dynamic symbol and bail-out on error. */ +#define LOAD_DYNAMIC_SYMBOL(func_ptr, env, handle, symbol) \ + if ((func_ptr = do_dlsym(env, handle, symbol)) == NULL) { \ + return; \ + } + +#define LOCK_CLASS(env, clazz, classname) \ + if ((*env)->MonitorEnter(env, clazz) != 0) { \ + char exception_msg[128]; \ + snprintf(exception_msg, 128, "Failed to lock %s", classname); \ + THROW(env, "java/lang/InternalError", exception_msg); \ + } + +#define UNLOCK_CLASS(env, clazz, classname) \ + if ((*env)->MonitorExit(env, clazz) != 0) { \ + char exception_msg[128]; \ + snprintf(exception_msg, 128, "Failed to unlock %s", classname); \ + THROW(env, "java/lang/InternalError", exception_msg); \ + } + +#endif + +//vim: sw=2: ts=2: et diff --git a/src/saveVersion.sh b/src/saveVersion.sh new file mode 100755 index 0000000..dfc15b7 --- /dev/null +++ b/src/saveVersion.sh @@ -0,0 +1,50 @@ +#!/bin/sh + +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# This file is used to generate the BuildStamp.java class that +# records the user, url, revision and timestamp. +unset LANG +unset LC_CTYPE +version=$1 +user=`whoami` +date=`date` + +# if this is ture, we use svn revision number instead of git revision number +use_svn="true" + +if [ $use_svn != "true" -a -d .git ]; then + revision=`git log -1 --pretty=format:"%H"` + hostname=`hostname` + branch=`git branch | sed -n -e 's/^* //p'` + url="git://$hostname/$cwd on branch $branch" +else + revision=`svn info | sed -n -e 's/Last Changed Rev: \(.*\)/\1/p'` + url=`svn info | sed -n -e 's/URL: \(.*\)/\1/p'` +fi +mkdir -p build/src/org/apache/hadoop +cat << EOF | \ + sed -e "s/VERSION/$version/" -e "s/USER/$user/" -e "s/DATE/$date/" \ + -e "s|URL|$url|" -e "s/REV/$revision/" \ + > build/src/org/apache/hadoop/package-info.java +/* + * Generated by src/saveVersion.sh + */ +@HadoopVersionAnnotation(version="VERSION", revision="REV", + user="USER", date="DATE", url="URL") +package org.apache.hadoop; +EOF diff --git a/src/test/bin/test-patch.sh b/src/test/bin/test-patch.sh new file mode 100755 index 0000000..7f36408 --- /dev/null +++ b/src/test/bin/test-patch.sh @@ -0,0 +1,694 @@ +#!/usr/bin/env bash + +#set -x +ulimit -n 1024 + +### Setup some variables. +### JOB_NAME, SVN_REVISION, and BUILD_NUMBER are set by Hudson if it is run by patch process + +############################################################################### +parseArgs() { + case "$1" in + HUDSON) + ### Set HUDSON to true to indicate that this script is being run by Hudson + HUDSON=true + if [[ $# != 17 ]] ; then + echo "ERROR: usage $0 HUDSON " + cleanupAndExit 0 + fi + PATCH_DIR=$2 + SUPPORT_DIR=$3 + PS=$4 + WGET=$5 + JIRACLI=$6 + SVN=$7 + GREP=$8 + PATCH=$9 + FINDBUGS_HOME=${10} + FORREST_HOME=${11} + ECLIPSE_HOME=${12} + PYTHON_HOME=${13} + BASEDIR=${14} + TRIGGER_BUILD_URL=${15} + JIRA_PASSWD=${16} + JAVA5_HOME=${17} + ### Retrieve the defect number + if [ ! -e $PATCH_DIR/defectNum ] ; then + echo "Could not determine the patch to test. Exiting." + cleanupAndExit 0 + fi + defect=`cat $PATCH_DIR/defectNum` + if [ -z "$defect" ] ; then + echo "Could not determine the patch to test. Exiting." + cleanupAndExit 0 + fi + ECLIPSE_PROPERTY="-Declipse.home=$ECLIPSE_HOME" + PYTHON_PROPERTY="-Dpython.home=$PYTHON_HOME" + ;; + DEVELOPER) + ### Set HUDSON to false to indicate that this script is being run by a developer + HUDSON=false + if [[ $# != 10 ]] ; then + echo "ERROR: usage $0 DEVELOPER " + cleanupAndExit 0 + fi + ### PATCH_FILE contains the location of the patchfile + PATCH_FILE=$2 + if [[ ! -e "$PATCH_FILE" ]] ; then + echo "Unable to locate the patch file $PATCH_FILE" + cleanupAndExit 0 + fi + PATCH_DIR=$3 + ### Check if $PATCH_DIR exists. If it does not exist, create a new directory + if [[ ! -e "$PATCH_DIR" ]] ; then + mkdir "$PATCH_DIR" + if [[ $? == 0 ]] ; then + echo "$PATCH_DIR has been created" + else + echo "Unable to create $PATCH_DIR" + cleanupAndExit 0 + fi + fi + SVN=$4 + GREP=$5 + PATCH=$6 + FINDBUGS_HOME=$7 + FORREST_HOME=$8 + BASEDIR=$9 + JAVA5_HOME=${10} + ### Obtain the patch filename to append it to the version number + defect=`basename $PATCH_FILE` + ;; + *) + echo "ERROR: usage $0 HUDSON [args] | DEVELOPER [args]" + cleanupAndExit 0 + ;; + esac +} + +############################################################################### +checkout () { + echo "" + echo "" + echo "======================================================================" + echo "======================================================================" + echo " Testing patch for ${defect}." + echo "======================================================================" + echo "======================================================================" + echo "" + echo "" + ### When run by a developer, if the workspace contains modifications, do not continue + status=`$SVN stat` + if [[ $HUDSON == "false" ]] ; then + if [[ "$status" != "" ]] ; then + echo "ERROR: can't run in a workspace that contains the following modifications" + echo "$status" + cleanupAndExit 1 + fi + else + cd $BASEDIR + $SVN revert -R . + rm -rf `$SVN status` + $SVN update + fi + return $? +} + +############################################################################### +setup () { + ### Download latest patch file (ignoring .htm and .html) when run from patch process + if [[ $HUDSON == "true" ]] ; then + $WGET -q -O $PATCH_DIR/jira http://issues.apache.org/jira/browse/$defect + if [[ `$GREP -c 'Patch Available' $PATCH_DIR/jira` == 0 ]] ; then + echo "$defect is not \"Patch Available\". Exiting." + cleanupAndExit 0 + fi + relativePatchURL=`$GREP -o '"/jira/secure/attachment/[0-9]*/[^"]*' $PATCH_DIR/jira | $GREP -v -e 'htm[l]*$' | sort | tail -1 | $GREP -o '/jira/secure/attachment/[0-9]*/[^"]*'` + patchURL="http://issues.apache.org${relativePatchURL}" + patchNum=`echo $patchURL | $GREP -o '[0-9]*/' | $GREP -o '[0-9]*'` + echo "$defect patch is being downloaded at `date` from" + echo "$patchURL" + $WGET -q -O $PATCH_DIR/patch $patchURL + VERSION=${SVN_REVISION}_${defect}_PATCH-${patchNum} + JIRA_COMMENT="Here are the results of testing the latest attachment + $patchURL + against trunk revision ${SVN_REVISION}." + + ### Copy in any supporting files needed by this process + cp -r $SUPPORT_DIR/lib/* ./lib + #PENDING: cp -f $SUPPORT_DIR/etc/checkstyle* ./src/test + ### Copy the patch file to $PATCH_DIR + else + VERSION=PATCH-${defect} + cp $PATCH_FILE $PATCH_DIR/patch + if [[ $? == 0 ]] ; then + echo "Patch file $PATCH_FILE copied to $PATCH_DIR" + else + echo "Could not copy $PATCH_FILE to $PATCH_DIR" + cleanupAndExit 0 + fi + fi + echo "" + echo "" + echo "======================================================================" + echo "======================================================================" + echo " Pre-building trunk to determine trunk number" + echo " of release audit, javac, and Findbugs warnings." + echo "======================================================================" + echo "======================================================================" + echo "" + echo "" + ### DISABLE RELEASE AUDIT UNTIL HADOOP-4074 IS FIXED + ### Do not call releaseaudit when run by a developer + ### if [[ $HUDSON == "true" ]] ; then + ### echo "$ANT_HOME/bin/ant -Dversion="${VERSION}" -DHadoopPatchProcess= releaseaudit > $PATCH_DIR/trunkReleaseAuditWarnings.txt 2>&1" + ### $ANT_HOME/bin/ant -Dversion="${VERSION}" -DHadoopPatchProcess= releaseaudit > $PATCH_DIR/trunkReleaseAuditWarnings.txt 2>&1 + ### fi + echo "$ANT_HOME/bin/ant -Dversion="${VERSION}" -Djavac.args="-Xlint -Xmaxwarns 1000" $ECLIPSE_PROPERTY -Djava5.home=${JAVA5_HOME} -Dforrest.home=${FORREST_HOME} -DHadoopPatchProcess= clean tar > $PATCH_DIR/trunkJavacWarnings.txt 2>&1" + $ANT_HOME/bin/ant -Dversion="${VERSION}" -Djavac.args="-Xlint -Xmaxwarns 1000" $ECLIPSE_PROPERTY -Djava5.home=${JAVA5_HOME} -Dforrest.home=${FORREST_HOME} -DHadoopPatchProcess= clean tar > $PATCH_DIR/trunkJavacWarnings.txt 2>&1 + if [[ $? != 0 ]] ; then + echo "Trunk compilation is broken?" + cleanupAndExit 1 + fi + echo "$ANT_HOME/bin/ant -Dversion="${VERSION}" -Dfindbugs.home=$FINDBUGS_HOME -Djava5.home=${JAVA5_HOME} -Dforrest.home=${FORREST_HOME} -DHadoopPatchProcess= findbugs > /dev/null 2>&1" + $ANT_HOME/bin/ant -Dversion="${VERSION}" -Dfindbugs.home=$FINDBUGS_HOME -Djava5.home=${JAVA5_HOME} -Dforrest.home=${FORREST_HOME} -DHadoopPatchProcess= findbugs > /dev/null 2>&1 + if [[ $? != 0 ]] ; then + echo "Trunk findbugs is broken?" + cleanupAndExit 1 + fi + cp $BASEDIR/build/test/findbugs/*.xml $PATCH_DIR/trunkFindbugsWarnings.xml +} + +############################################################################### +### Check for @author tags in the patch +checkAuthor () { + echo "" + echo "" + echo "======================================================================" + echo "======================================================================" + echo " Checking there are no @author tags in the patch." + echo "======================================================================" + echo "======================================================================" + echo "" + echo "" + authorTags=`$GREP -c -i '@author' $PATCH_DIR/patch` + echo "There appear to be $authorTags @author tags in the patch." + if [[ $authorTags != 0 ]] ; then + JIRA_COMMENT="$JIRA_COMMENT + + -1 @author. The patch appears to contain $authorTags @author tags which the Hadoop community has agreed to not allow in code contributions." + return 1 + fi + JIRA_COMMENT="$JIRA_COMMENT + + +1 @author. The patch does not contain any @author tags." + return 0 +} + +############################################################################### +### Check for tests in the patch +checkTests () { + echo "" + echo "" + echo "======================================================================" + echo "======================================================================" + echo " Checking there are new or changed tests in the patch." + echo "======================================================================" + echo "======================================================================" + echo "" + echo "" + testReferences=`$GREP -c -i '/test' $PATCH_DIR/patch` + echo "There appear to be $testReferences test files referenced in the patch." + if [[ $testReferences == 0 ]] ; then + if [[ $HUDSON == "true" ]] ; then + patchIsDoc=`$GREP -c -i 'title="documentation' $PATCH_DIR/jira` + if [[ $patchIsDoc != 0 ]] ; then + echo "The patch appears to be a documentation patch that doesn't require tests." + JIRA_COMMENT="$JIRA_COMMENT + + +0 tests included. The patch appears to be a documentation patch that doesn't require tests." + return 0 + fi + fi + JIRA_COMMENT="$JIRA_COMMENT + + -1 tests included. The patch doesn't appear to include any new or modified tests. + Please justify why no tests are needed for this patch." + return 1 + fi + JIRA_COMMENT="$JIRA_COMMENT + + +1 tests included. The patch appears to include $testReferences new or modified tests." + return 0 +} + +############################################################################### +### Attempt to apply the patch +applyPatch () { + echo "" + echo "" + echo "======================================================================" + echo "======================================================================" + echo " Applying patch." + echo "======================================================================" + echo "======================================================================" + echo "" + echo "" + $PATCH -E -p0 < $PATCH_DIR/patch + if [[ $? != 0 ]] ; then + echo "PATCH APPLICATION FAILED" + JIRA_COMMENT="$JIRA_COMMENT + + -1 patch. The patch command could not apply the patch." + return 1 + fi + return 0 +} + +############################################################################### +### Check there are no javadoc warnings +checkJavadocWarnings () { + echo "" + echo "" + echo "======================================================================" + echo "======================================================================" + echo " Determining number of patched javadoc warnings." + echo "======================================================================" + echo "======================================================================" + echo "" + echo "" + echo "$ANT_HOME/bin/ant -Dversion="${VERSION}" -DHadoopPatchProcess= clean javadoc | tee $PATCH_DIR/patchJavadocWarnings.txt" + $ANT_HOME/bin/ant -Dversion="${VERSION}" -DHadoopPatchProcess= clean javadoc | tee $PATCH_DIR/patchJavadocWarnings.txt + javadocWarnings=`$GREP -c '\[javadoc\] [0-9]* warning' $PATCH_DIR/patchJavadocWarnings.txt` + echo "" + echo "" + echo "There appear to be $javadocWarnings javadoc warnings generated by the patched build." + if [[ $javadocWarnings != 0 ]] ; then + JIRA_COMMENT="$JIRA_COMMENT + + -1 javadoc. The javadoc tool appears to have generated $javadocWarnings warning messages." + return 1 + fi + JIRA_COMMENT="$JIRA_COMMENT + + +1 javadoc. The javadoc tool did not generate any warning messages." +return 0 +} + +############################################################################### +### Check there are no changes in the number of Javac warnings +checkJavacWarnings () { + echo "" + echo "" + echo "======================================================================" + echo "======================================================================" + echo " Determining number of patched javac warnings." + echo "======================================================================" + echo "======================================================================" + echo "" + echo "" + echo "$ANT_HOME/bin/ant -Dversion="${VERSION}" -Djavac.args="-Xlint -Xmaxwarns 1000" $ECLIPSE_PROPERTY -Djava5.home=${JAVA5_HOME} -Dforrest.home=${FORREST_HOME} -DHadoopPatchProcess= tar > $PATCH_DIR/patchJavacWarnings.txt 2>&1" + $ANT_HOME/bin/ant -Dversion="${VERSION}" -Djavac.args="-Xlint -Xmaxwarns 1000" $ECLIPSE_PROPERTY -Djava5.home=${JAVA5_HOME} -Dforrest.home=${FORREST_HOME} -DHadoopPatchProcess= tar > $PATCH_DIR/patchJavacWarnings.txt 2>&1 + + ### Compare trunk and patch javac warning numbers + if [[ -f $PATCH_DIR/patchJavacWarnings.txt ]] ; then + trunkJavacWarnings=`$GREP -o '\[javac\] [0-9]* warning' $PATCH_DIR/trunkJavacWarnings.txt | awk '{total += $2} END {print total}'` + patchJavacWarnings=`$GREP -o '\[javac\] [0-9]* warning' $PATCH_DIR/patchJavacWarnings.txt | awk '{total += $2} END {print total}'` + echo "There appear to be $trunkJavacWarnings javac compiler warnings before the patch and $patchJavacWarnings javac compiler warnings after applying the patch." + if [[ $patchJavacWarnings != "" && $trunkJavacWarnings != "" ]] ; then + if [[ $patchJavacWarnings > $trunkJavacWarnings ]] ; then + JIRA_COMMENT="$JIRA_COMMENT + + -1 javac. The applied patch generated $patchJavacWarnings javac compiler warnings (more than the trunk's current $trunkJavacWarnings warnings)." + return 1 + fi + fi + fi + JIRA_COMMENT="$JIRA_COMMENT + + +1 javac. The applied patch does not increase the total number of javac compiler warnings." + return 0 +} + +############################################################################### +### Check there are no changes in the number of release audit (RAT) warnings +checkReleaseAuditWarnings () { + echo "" + echo "" + echo "======================================================================" + echo "======================================================================" + echo " Determining number of patched release audit warnings." + echo "======================================================================" + echo "======================================================================" + echo "" + echo "" + echo "$ANT_HOME/bin/ant -Dversion="${VERSION}" -Djava5.home=${JAVA5_HOME} -Dforrest.home=${FORREST_HOME} -DHadoopPatchProcess= releaseaudit > $PATCH_DIR/patchReleaseAuditWarnings.txt 2>&1" + $ANT_HOME/bin/ant -Dversion="${VERSION}" -Djava5.home=${JAVA5_HOME} -Dforrest.home=${FORREST_HOME} -DHadoopPatchProcess= releaseaudit > $PATCH_DIR/patchReleaseAuditWarnings.txt 2>&1 + + ### Compare trunk and patch release audit warning numbers + if [[ -f $PATCH_DIR/patchReleaseAuditWarnings.txt ]] ; then + trunkReleaseAuditWarnings=`$GREP -c '\!?????' $PATCH_DIR/trunkReleaseAuditWarnings.txt` + patchReleaseAuditWarnings=`$GREP -c '\!?????' $PATCH_DIR/patchReleaseAuditWarnings.txt` + echo "" + echo "" + echo "There appear to be $trunkReleaseAuditWarnings release audit warnings before the patch and $patchReleaseAuditWarnings release audit warnings after applying the patch." + if [[ $patchReleaseAuditWarnings != "" && $trunkReleaseAuditWarnings != "" ]] ; then + if [[ $patchReleaseAuditWarnings > $trunkReleaseAuditWarnings ]] ; then + JIRA_COMMENT="$JIRA_COMMENT + + -1 release audit. The applied patch generated $patchReleaseAuditWarnings release audit warnings (more than the trunk's current $trunkReleaseAuditWarnings warnings)." + $GREP '\!?????' $PATCH_DIR/patchReleaseAuditWarnings.txt > $PATCH_DIR/patchReleaseAuditProblems.txt + $GREP '\!?????' $PATCH_DIR/trunkReleaseAuditWarnings.txt > $PATCH_DIR/trunkReleaseAuditProblems.txt + echo "A diff of patched release audit warnings with trunk release audit warnings." > $PATCH_DIR/releaseAuditDiffWarnings.txt + echo "Lines that start with ????? in the release audit report indicate files that do not have an Apache license header." > $PATCH_DIR/releaseAuditDiffWarnings.txt + echo "" > $PATCH_DIR/releaseAuditDiffWarnings.txt + diff $PATCH_DIR/patchReleaseAuditProblems.txt $PATCH_DIR/trunkReleaseAuditProblems.txt >> $PATCH_DIR/releaseAuditDiffWarnings.txt + JIRA_COMMENT_FOOTER="Release audit warnings: http://hudson.zones.apache.org/hudson/job/$JOB_NAME/$BUILD_NUMBER/artifact/trunk/current/releaseAuditDiffWarnings.txt +$JIRA_COMMENT_FOOTER" + return 1 + fi + fi + fi + JIRA_COMMENT="$JIRA_COMMENT + + +1 release audit. The applied patch does not increase the total number of release audit warnings." + return 0 +} + +############################################################################### +### Check there are no changes in the number of Checkstyle warnings +checkStyle () { + echo "" + echo "" + echo "======================================================================" + echo "======================================================================" + echo " Determining number of patched checkstyle warnings." + echo "======================================================================" + echo "======================================================================" + echo "" + echo "" + echo "THIS IS NOT IMPLEMENTED YET" + echo "" + echo "" + echo "$ANT_HOME/bin/ant -Dversion="${VERSION}" -DHadoopPatchProcess= checkstyle" + $ANT_HOME/bin/ant -Dversion="${VERSION}" -DHadoopPatchProcess= checkstyle + JIRA_COMMENT_FOOTER="Checkstyle results: http://hudson.zones.apache.org/hudson/job/$JOB_NAME/$BUILD_NUMBER/artifact/trunk/build/test/checkstyle-errors.html +$JIRA_COMMENT_FOOTER" + ### TODO: calculate actual patchStyleErrors +# patchStyleErrors=0 +# if [[ $patchStyleErrors != 0 ]] ; then +# JIRA_COMMENT="$JIRA_COMMENT +# +# -1 checkstyle. The patch generated $patchStyleErrors code style errors." +# return 1 +# fi +# JIRA_COMMENT="$JIRA_COMMENT +# +# +1 checkstyle. The patch generated 0 code style errors." + return 0 +} + +############################################################################### +### Check there are no changes in the number of Findbugs warnings +checkFindbugsWarnings () { + echo "" + echo "" + echo "======================================================================" + echo "======================================================================" + echo " Determining number of patched Findbugs warnings." + echo "======================================================================" + echo "======================================================================" + echo "" + echo "" + echo "$ANT_HOME/bin/ant -Dversion="${VERSION}" -Dfindbugs.home=$FINDBUGS_HOME -Djava5.home=${JAVA5_HOME} -Dforrest.home=${FORREST_HOME} -DHadoopPatchProcess= findbugs" + $ANT_HOME/bin/ant -Dversion="${VERSION}" -Dfindbugs.home=$FINDBUGS_HOME -Djava5.home=${JAVA5_HOME} -Dforrest.home=${FORREST_HOME} -DHadoopPatchProcess= findbugs + if [ $? != 0 ] ; then + JIRA_COMMENT="$JIRA_COMMENT + + -1 findbugs. The patch appears to cause Findbugs to fail." + return 1 + fi +JIRA_COMMENT_FOOTER="Findbugs warnings: http://hudson.zones.apache.org/hudson/job/$JOB_NAME/$BUILD_NUMBER/artifact/trunk/build/test/findbugs/newPatchFindbugsWarnings.html +$JIRA_COMMENT_FOOTER" + cp $BASEDIR/build/test/findbugs/*.xml $PATCH_DIR/patchFindbugsWarnings.xml +$FINDBUGS_HOME/bin/setBugDatabaseInfo -timestamp "01/01/1999" \ + $PATCH_DIR/trunkFindbugsWarnings.xml \ + $PATCH_DIR/trunkFindbugsWarnings.xml + $FINDBUGS_HOME/bin/setBugDatabaseInfo -timestamp "01/01/2000" \ + $PATCH_DIR/patchFindbugsWarnings.xml \ + $PATCH_DIR/patchFindbugsWarnings.xml + $FINDBUGS_HOME/bin/computeBugHistory -output $PATCH_DIR/findbugsMerge.xml \ + $PATCH_DIR/trunkFindbugsWarnings.xml \ + $PATCH_DIR/patchFindbugsWarnings.xml + findbugsWarnings=`$FINDBUGS_HOME/bin/filterBugs -first "01/01/2000" $PATCH_DIR/findbugsMerge.xml \ + $BASEDIR/build/test/findbugs/newPatchFindbugsWarnings.xml | /usr/bin/awk '{print $1}'` + $FINDBUGS_HOME/bin/convertXmlToText -html \ + $BASEDIR/build/test/findbugs/newPatchFindbugsWarnings.xml \ + $BASEDIR/build/test/findbugs/newPatchFindbugsWarnings.html + cp $BASEDIR/build/test/findbugs/newPatchFindbugsWarnings.html $PATCH_DIR/newPatchFindbugsWarnings.html + cp $BASEDIR/build/test/findbugs/newPatchFindbugsWarnings.xml $PATCH_DIR/newPatchFindbugsWarnings.xml + if [[ $findbugsWarnings != 0 ]] ; then + JIRA_COMMENT="$JIRA_COMMENT + + -1 findbugs. The patch appears to introduce $findbugsWarnings new Findbugs warnings." + return 1 + fi + JIRA_COMMENT="$JIRA_COMMENT + + +1 findbugs. The patch does not introduce any new Findbugs warnings." + return 0 +} + +############################################################################### +### Run the test-core target +runCoreTests () { + echo "" + echo "" + echo "======================================================================" + echo "======================================================================" + echo " Running core tests." + echo "======================================================================" + echo "======================================================================" + echo "" + echo "" + + ### Kill any rogue build processes from the last attempt + $PS -auxwww | $GREP HadoopPatchProcess | /usr/bin/nawk '{print $2}' | /usr/bin/xargs -t -I {} /usr/bin/kill -9 {} > /dev/null + + echo "$ANT_HOME/bin/ant -Dversion="${VERSION}" -DHadoopPatchProcess= -Dtest.junit.output.format=xml -Dtest.output=yes -Dcompile.c++=yes -Dforrest.home=$FORREST_HOME -Djava5.home=$JAVA5_HOME create-c++-configure docs tar test-core" + $ANT_HOME/bin/ant -Dversion="${VERSION}" -DHadoopPatchProcess= -Dtest.junit.output.format=xml -Dtest.output=yes -Dcompile.c++=yes -Dforrest.home=$FORREST_HOME -Djava5.home=$JAVA5_HOME create-c++-configure docs tar test-core + if [[ $? != 0 ]] ; then + JIRA_COMMENT="$JIRA_COMMENT + + -1 core tests. The patch failed core unit tests." + return 1 + fi + JIRA_COMMENT="$JIRA_COMMENT + + +1 core tests. The patch passed core unit tests." + return 0 +} + +############################################################################### +### Tests parts of contrib specific to the eclipse files +checkJarFilesDeclaredInEclipse () { + export DECLARED_JARS=$(sed -n 's@.*kind="lib".*path="\(.*jar\)".*@\1@p' < .eclipse.templates/.classpath) + export PRESENT_JARS=$(find build/ivy/lib/Hadoop/common/ lib/ src/test/lib/ -name '*.jar' |sort) + # When run by Hudson, consider libs from ${SUPPORT_DIR} declared + if [[ ${HUDSON} == "true" ]]; then + DECLARED_JARS="${DECLARED_JARS} $(cd "${SUPPORT_DIR}"; find lib -name '*.jar')" + fi + DECLARED_JARS=$(sed 'y/ /\n/' <<< ${DECLARED_JARS} | sort) + export ECLIPSE_DECLARED_SRC=$(sed -n 's@.*kind="src".*path="\(.*\)".*@\1@p' < .eclipse.templates/.classpath |sort) + + if [ "${DECLARED_JARS}" != "${PRESENT_JARS}" ]; then + echo " +FAILED. Some jars are not declared in the Eclipse project. + Declared jars: ${DECLARED_JARS} + Present jars: ${PRESENT_JARS}" + return 1 + fi + for dir in $ECLIPSE_DECLARED_SRC; do + [ '!' -d $dir ] && echo " +FAILED: $dir is referenced in the Eclipse project although it doesn't exists anymore." && return 1 + done + return 0 +} + +checkEclipse () { + echo "" + echo "" + echo "======================================================================" + echo "======================================================================" + echo " Running Eclipse classpath verification." + echo "======================================================================" + echo "======================================================================" + echo "" + echo "" + + checkJarFilesDeclaredInEclipse + if [[ $? != 0 ]] ; then + JIRA_COMMENT="$JIRA_COMMENT + + -1 Eclipse classpath. The patch causes the Eclipse classpath to differ from the contents of the lib directories." + return 1 + fi + JIRA_COMMENT="$JIRA_COMMENT + + +1 Eclipse classpath. The patch retains Eclipse classpath integrity." + return 0 +} +############################################################################### +### Run the test-contrib target +runContribTests () { + echo "" + echo "" + echo "======================================================================" + echo "======================================================================" + echo " Running contrib tests." + echo "======================================================================" + echo "======================================================================" + echo "" + echo "" + + ### Kill any rogue build processes from the last attempt + $PS -auxwww | $GREP HadoopPatchProcess | /usr/bin/nawk '{print $2}' | /usr/bin/xargs -t -I {} /usr/bin/kill -9 {} > /dev/null + + echo "$ANT_HOME/bin/ant -Dversion="${VERSION}" $ECLIPSE_PROPERTY $PYTHON_PROPERTY -DHadoopPatchProcess= -Dtest.junit.output.format=xml -Dtest.output=yes test-contrib" + $ANT_HOME/bin/ant -Dversion="${VERSION}" $ECLIPSE_PROPERTY $PYTHON_PROPERTY -DHadoopPatchProcess= -Dtest.junit.output.format=xml -Dtest.output=yes test-contrib + if [[ $? != 0 ]] ; then + JIRA_COMMENT="$JIRA_COMMENT + + -1 contrib tests. The patch failed contrib unit tests." + return 1 + fi + JIRA_COMMENT="$JIRA_COMMENT + + +1 contrib tests. The patch passed contrib unit tests." + return 0 +} + +############################################################################### +### Submit a comment to the defect's Jira +submitJiraComment () { + local result=$1 + ### Do not output the value of JIRA_COMMENT_FOOTER when run by a developer + if [[ $HUDSON == "false" ]] ; then + JIRA_COMMENT_FOOTER="" + fi + if [[ $result == 0 ]] ; then + comment="+1 overall. $JIRA_COMMENT + +$JIRA_COMMENT_FOOTER" + else + comment="-1 overall. $JIRA_COMMENT + +$JIRA_COMMENT_FOOTER" + fi + ### Output the test result to the console + echo " + + + +$comment" + + if [[ $HUDSON == "true" ]] ; then + echo "" + echo "" + echo "======================================================================" + echo "======================================================================" + echo " Adding comment to Jira." + echo "======================================================================" + echo "======================================================================" + echo "" + echo "" + + ### Update Jira with a comment + export USER=hudson + $JIRACLI -s issues.apache.org/jira login hadoopqa $JIRA_PASSWD + $JIRACLI -s issues.apache.org/jira comment $defect "$comment" + $JIRACLI -s issues.apache.org/jira logout + fi +} + +############################################################################### +### Cleanup files +cleanupAndExit () { + local result=$1 + if [[ $HUDSON == "true" ]] ; then + if [ -e "$PATCH_DIR" ] ; then + mv $PATCH_DIR $BASEDIR + fi + fi + echo "" + echo "" + echo "======================================================================" + echo "======================================================================" + echo " Finished build." + echo "======================================================================" + echo "======================================================================" + echo "" + echo "" + exit $result +} + +############################################################################### +############################################################################### +############################################################################### + +JIRA_COMMENT="" +JIRA_COMMENT_FOOTER="Console output: http://hudson.zones.apache.org/hudson/job/$JOB_NAME/$BUILD_NUMBER/console + +This message is automatically generated." + +### Check if arguments to the script have been specified properly or not +parseArgs $@ +cd $BASEDIR + +checkout +RESULT=$? +if [[ $HUDSON == "true" ]] ; then + if [[ $RESULT != 0 ]] ; then + ### Resubmit build. + $WGET -q -O $PATCH_DIR/build $TRIGGER_BUILD_URL + exit 100 + fi +fi +setup +checkAuthor +RESULT=$? + +checkTests +(( RESULT = RESULT + $? )) +applyPatch +if [[ $? != 0 ]] ; then + submitJiraComment 1 + cleanupAndExit 1 +fi +checkJavadocWarnings +(( RESULT = RESULT + $? )) +checkJavacWarnings +(( RESULT = RESULT + $? )) +checkStyle +(( RESULT = RESULT + $? )) +checkFindbugsWarnings +(( RESULT = RESULT + $? )) +checkEclipse +(( RESULT = RESULT + $? )) +### Do not call these when run by a developer +if [[ $HUDSON == "true" ]] ; then + ### DISABLE RELEASE AUDIT UNTIL HADOOP-4074 IS FIXED + ### checkReleaseAuditWarnings + ### (( RESULT = RESULT + $? )) + runCoreTests + (( RESULT = RESULT + $? )) + runContribTests + (( RESULT = RESULT + $? )) +fi +JIRA_COMMENT_FOOTER="Test results: http://hudson.zones.apache.org/hudson/job/$JOB_NAME/$BUILD_NUMBER/testReport/ +$JIRA_COMMENT_FOOTER" + +submitJiraComment $RESULT +cleanupAndExit $RESULT + + diff --git a/src/test/checkstyle-noframes-sorted.xsl b/src/test/checkstyle-noframes-sorted.xsl new file mode 100644 index 0000000..5f9e93b --- /dev/null +++ b/src/test/checkstyle-noframes-sorted.xsl @@ -0,0 +1,178 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +

CheckStyle Audit

Designed for use with CheckStyle and Ant.
+
+ + + +
+ + + +
+ + + + +
+ + + + +
+ + + + +

Files

+ + + + + + + + + + + + + + +
NameErrors
+
+ + + + +

File

+ + + + + + + + + + + + + + +
Error DescriptionLine
+ Back to top +
+ + + +

Summary

+ + + + + + + + + + + + +
FilesErrors
+
+ + + + a + b + + +
+ + diff --git a/src/test/checkstyle.xml b/src/test/checkstyle.xml new file mode 100644 index 0000000..5e3b894 --- /dev/null +++ b/src/test/checkstyle.xml @@ -0,0 +1,170 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/src/test/core-site.xml b/src/test/core-site.xml new file mode 100644 index 0000000..d33d4d2 --- /dev/null +++ b/src/test/core-site.xml @@ -0,0 +1,50 @@ + + + + + + + + + + + + hadoop.tmp.dir + build/test + A base for other temporary directories. + true + + + + test.fs.s3.name + s3:/// + The name of the s3 file system for testing. + + + + fs.s3.block.size + 128 + Size of a block in bytes. + + + + fs.ftp.user.localhost + user + The username for connecting to FTP server running on localhost. + This is required by FTPFileSystem + + + + fs.ftp.password.localhost + password + The password for connecting to FTP server running on localhost. + This is required by FTPFileSystem + + + + test.fs.s3n.name + s3n:/// + The name of the s3n file system for testing. + + + diff --git a/src/test/ddl/buffer.jr b/src/test/ddl/buffer.jr new file mode 100644 index 0000000..797aa67 --- /dev/null +++ b/src/test/ddl/buffer.jr @@ -0,0 +1,6 @@ +module org.apache.hadoop.record { + class RecBuffer { + buffer data; + } +} + diff --git a/src/test/ddl/int.jr b/src/test/ddl/int.jr new file mode 100644 index 0000000..61eae7f --- /dev/null +++ b/src/test/ddl/int.jr @@ -0,0 +1,6 @@ +module org.apache.hadoop.record { + class RecInt { + int data; + } +} + diff --git a/src/test/ddl/string.jr b/src/test/ddl/string.jr new file mode 100644 index 0000000..cdd3e70 --- /dev/null +++ b/src/test/ddl/string.jr @@ -0,0 +1,6 @@ +module org.apache.hadoop.record { + class RecString { + ustring data; + } +} + diff --git a/src/test/ddl/test.jr b/src/test/ddl/test.jr new file mode 100644 index 0000000..46c181d --- /dev/null +++ b/src/test/ddl/test.jr @@ -0,0 +1,46 @@ +module org.apache.hadoop.record { + class RecRecord0 { + ustring stringVal; + } + + class RecRecord1 { + boolean boolVal; + byte byteVal; + int intVal; + long longVal; + float floatVal; // testing inline comment + double doubleVal; /* testing comment */ + ustring stringVal; /* testing multi-line + * comment */ + buffer bufferVal; // testing another // inline comment + vector vectorVal; + map mapVal; + RecRecord0 recordVal; + } + + class RecRecordOld { + ustring name; + vector ivec; + vector> svec; + RecRecord0 inner; + vector>> strvec; + float i1; + map map1; + vector> mvec1; + vector> mvec2; + } + + /* RecRecordNew is a lot like RecRecordOld. Helps test for versioning. */ + class RecRecordNew { + ustring name2; + RecRecord0 inner; + vector ivec; + vector> svec; + vector>> strvec; + int i1; + map map1; + vector> mvec2; + } + +} + diff --git a/src/test/findbugsExcludeFile.xml b/src/test/findbugsExcludeFile.xml new file mode 100644 index 0000000..35f1665 --- /dev/null +++ b/src/test/findbugsExcludeFile.xml @@ -0,0 +1,83 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/src/test/hadoop-policy.xml b/src/test/hadoop-policy.xml new file mode 100644 index 0000000..f57800f --- /dev/null +++ b/src/test/hadoop-policy.xml @@ -0,0 +1,97 @@ + + + + + + + + security.client.protocol.acl + * + ACL for ClientProtocol, which is used by user code + via the DistributedFileSystem. + The ACL is a comma-separated list of user and group names. The user and + group list is separated by a blank. For e.g. "alice,bob users,wheel". + A special value of "*" means all users are allowed. + + + + security.client.datanode.protocol.acl + * + ACL for ClientDatanodeProtocol, the client-to-datanode protocol + for block recovery. + The ACL is a comma-separated list of user and group names. The user and + group list is separated by a blank. For e.g. "alice,bob users,wheel". + A special value of "*" means all users are allowed. + + + + security.datanode.protocol.acl + * + ACL for DatanodeProtocol, which is used by datanodes to + communicate with the namenode. + The ACL is a comma-separated list of user and group names. The user and + group list is separated by a blank. For e.g. "alice,bob users,wheel". + A special value of "*" means all users are allowed. + + + + security.inter.datanode.protocol.acl + * + ACL for InterDatanodeProtocol, the inter-datanode protocol + for updating generation timestamp. + The ACL is a comma-separated list of user and group names. The user and + group list is separated by a blank. For e.g. "alice,bob users,wheel". + A special value of "*" means all users are allowed. + + + + security.namenode.protocol.acl + * + ACL for NamenodeProtocol, the protocol used by the secondary + namenode to communicate with the namenode. + The ACL is a comma-separated list of user and group names. The user and + group list is separated by a blank. For e.g. "alice,bob users,wheel". + A special value of "*" means all users are allowed. + + + + security.inter.tracker.protocol.acl + * + ACL for InterTrackerProtocol, used by the tasktrackers to + communicate with the jobtracker. + The ACL is a comma-separated list of user and group names. The user and + group list is separated by a blank. For e.g. "alice,bob users,wheel". + A special value of "*" means all users are allowed. + + + + security.job.submission.protocol.acl + * + ACL for JobSubmissionProtocol, used by job clients to + communciate with the jobtracker for job submission, querying job status etc. + The ACL is a comma-separated list of user and group names. The user and + group list is separated by a blank. For e.g. "alice,bob users,wheel". + A special value of "*" means all users are allowed. + + + + security.task.umbilical.protocol.acl + * + ACL for TaskUmbilicalProtocol, used by the map and reduce + tasks to communicate with the parent tasktracker. + The ACL is a comma-separated list of user and group names. The user and + group list is separated by a blank. For e.g. "alice,bob users,wheel". + A special value of "*" means all users are allowed. + + + + security.refresh.policy.protocol.acl + ${user.name} + ACL for RefreshAuthorizationPolicyProtocol, used by the + dfsadmin and mradmin commands to refresh the security policy in-effect. + The ACL is a comma-separated list of user and group names. The user and + group list is separated by a blank. For e.g. "alice,bob users,wheel". + A special value of "*" means all users are allowed. + + + diff --git a/src/test/hadoop-site.xml b/src/test/hadoop-site.xml new file mode 100644 index 0000000..352c4ff --- /dev/null +++ b/src/test/hadoop-site.xml @@ -0,0 +1,14 @@ + + + + + + + + + + + + + + diff --git a/src/test/hdfs-site.xml b/src/test/hdfs-site.xml new file mode 100644 index 0000000..cbd6ab6 --- /dev/null +++ b/src/test/hdfs-site.xml @@ -0,0 +1,9 @@ + + + + + + + + + diff --git a/src/test/lib/ftplet-api-1.0.0-SNAPSHOT.jar b/src/test/lib/ftplet-api-1.0.0-SNAPSHOT.jar new file mode 100644 index 0000000000000000000000000000000000000000..bc806e401477bf1e220f19c9eddda760972f5dd1 GIT binary patch literal 19002 zcmb7s1ymi&wk__#-QC^Y-QAtw?(QBuxCSS7E8t^G6rYb}yB`?mP`13X>pd!F+acu() z)_|`A0VnEz-X@~ zHF0ruGNm`MF>-c}QC_tBDu~E8GRp~}>|_lsM@YAwBIxC589`k{DVd;nAJ2{)*KZS{ zVnSk4@>tSd$Pxc$rv0X%Yz#YGREoN9YHTdpd@PIalmFAx1Na)1esAuYBkCPx&LC7i zVIJl6TSikOYG#o6qy$l+89OnhWW~gx;_?}!QxY4sN^yno?>m?S{hXwiM3g_r^XoP4TqD;2^rSqUFMkA^+XYa zfV<3$rCx>Lee_K{brD8A!9>%;-jA^|&diPcTwXoT+mC$I8QfYEBN+>vik5Nfgp?+7 z`gK}I)JkAFp96ieiNZ$d1IH@diel)C9~2UpLn*K=t7AhhBsi#$3%1VF)VCpb+I2;|q_*55?3P8ebJREU+;P_< zK@(~=#C=9HFhJMO-AA*>*NF9nZER$t`H75i7=d~x~>SAH!q(d0#`2lVvi=X06u*jKl!a&QZaPXltH}bnAEqO{aC@vy5Fj;`>vyTz< zLq?G-qbsKT!DfcC6uVtxH1oLHiA}&Oq6_cBn4jKolbJ}?dL}oXWYDVjp+`xitv#2? zOgv}j<&Zi|$7!S8xqK}+JXb_ZrxVGBvEWw1zSrt=k;hE^o*vN2CNgJ}DZ^kgh095Q zgQ6%in|4+;t>T3_tSe3eH*RaE7^n0JX)d&?<*6Fh?xvG%Rq}&S3D)l6lTStJeH^QF z)MNKf=ys2II?ViG)*TNanz}%((fTcQ_tJS}8qoEvA_Z)g8QQb>tij#JA*k9O6h-QE zmbmB1F!-SW{fB_8i7Rkyim+yISG7Tdn3j&Rx!v&O;ZG zyDBI4Ui3ZELuwYuHr`ct-(APeYm}T&a#DSO8Mszy4!CvIfoV(;{G`Nzbz!GJI!hL~SnK1kGD zKsPYy+U_fmX+m8yRd@{~;I>eHw?gL+Kp-P0e3QL+ICjXv7`J-_PbkD7&LbW^!2*7e zKADj$6GJX5zL{JLNd#LOOM6{KxsbCEMq^H~Gd08@K-Pec(I!fndApSnK zUuWEIbckBNmi{LuC7P(3N>v%3c*^dll#%M4`gis{q-h)<=$XYN2np)~2O zOrrCTJ8JW;C%qOPViph}l^DMZ1U+vkb{!xHU4TFs|GxzC*QPLy8SXe_^C zc#~^y$j9k%T&t}Z!l&9gK&Dq>W1h0Guu<=M3hYD41lrVy2_@COenWO~^>_xohCYA= z&y0&KZ(=w%N-7&<5KTivk4#^kv(x(8q-|(Xv)K_l_|%xyhZOF36}GhCs%pqVlv4|f z_a4o!wu@ki-S_EUwU#F4+n3~m^{9{XzBn-H^U_e}?zoI3e`LE^b0hVHYo3`KbJ@(apQM8;wa^ zi`A|tgx|$=Un>jJ1Q3%rKwK<;5Z4d=q2lT6VrnaAWM^b<>hx2M$n*SAoxUh+XBwIj z<`JRvU%qHl!Q?-FjAkMtH>!WCGFu1cP5OYOWYZgHv=V%izWC?_&L}u7@>wK%LOr8s z-4XL*BMWDxQkWlk=ZSPSFg{&X2OTGIy8@vC7PTHpMkaju-P@C1qC9?XsxjF#@|N(h z?I@S(+4}T}yTKCX1mFWdQiQh?_tl%G#0@Z+5vIry3*5%0P*3AfiEYnL&|r{#is_2N z3+Hc4`&^H_(0ShT|7~sk_2ww^5^0hIhyVhhM(O?_1~q`i^t%9*b9m~%BOWg3n1r1^37JpzOALRCc2&Dy#;+x>Tk9Zq#%~bXjFI?cfEhGcmW%L zf~z@STQj0~9onu$%;o=aOQg%e${o^YG&>-?eR?_)?$N9OhpfHFmKL{jzXW%d!FTDd$*)3S> z8&I+Fz1kzrYpL(0&&$)(%Pb$CS1}Ag0>JI#x+|4DqPi9oU0QSMD&D||po{}?NXv9d>{K}}V3uD1{)Rya$5 z+*patyE6=3h$?>wq3px-39vQUL?$0oGFpeAd>DSt3a`b3@TOqB8Zu1xxg_GRv1v6Y%y&nu<_ z!b4N;%G=nkT3w}rJFCr`9@WZ^+KBoRcf@ZzWGoI4NIz7P-iYOhFCAd^#lgwHeiIH` z`B{MJLsll;wujKxa0jbg4d~R3()AINQSzA=r{`o2^tM zwQ`m|X#}y!ZUd~WkdAdxatx}9R%2oG%)m__Fr=S8N+(Ha%)41V3_A;=q6s_oRHwU!55}>K;AUq$ zsm51tAJmH*H!1X89OgH5po%l32{(CLocc+phu_43Ep=>S=KziIB>%>TAF6)Xcg&Fc zV!=6%dpWK7^O>mu(}y%3&0eDEw-W{!bXAM9%fom|9YzjhvtK>73UZ?F>X3~hjs_Jx zG&UCCw5QsV!=8f!lRs(l=v~@eAVLC#qg10UIn00i79|wNeP561>b(jZ@~CDrTQxJo zYi|hZh=R<6$?7bDa&8ZCrK`0{Q;1dN5jfDZ)o>!vhi14bHOYUOn-t5OHvBmb^9z(c z4U7LgL$-&#w1$lAry)2E>uKC+~U39vaH0Ohx;8LPbNP`br>|Nu!hCm}sAuswd zvTeOMQi|Us8Z9>Zp38z}Oxwi9?1IY_^A%Y_6qFBjE_g5)F*38o_}BFe`qi~98}un` zs!XqBKehRJR(7;&$3o~d&uQ#p;$qtro>0cIAKO)ueqOYuB(R2O?|JQfRABuTy8J_J zqxP}=wFbZB@CgEYb7H0UP>P?948*qX$l9Podk9-0K|iT}7KU@Z+L1ncpRlLbHr|@RJwtMIADc|HA7efIF;1FKDKY4wmD%}@E-fdf(bfyA?X@9IA;x?$l)^GiW2P=2xeP`8tW|82+1 zndJJ4fHc;`4R%{M_4|x*ry)qbJy~EiD)RdhMX~bG-Y`qlTPLCgXWHmy!cdzOL(8=L z0cNlfWSbmASTG&ZdE~AX=qBMBwV+BQn`}cIutm~&^sXl8CXt%u4!BwTGU)l|7dE$F zHp62LV2;fI#)7~f=J+UX=3!3%?&W2UW<8{m^%FU^RKDTPFR&k5kl?nnC{Si zGeccOEO}N|8HoeRQ9KnA@G3rr!|){BbT4+{_{*}^<*Umn9Q;m+(>J}a1fA|ot-$M} zZbV?(^sQXGqvL?t0-7^Kf zLl!DK;~Gv~4LoW`pYP8u9`1MTOG3JNwrQ5hUT0SF6E&mZN_A&d`rHxW;}4$-;(9HW zehuu=>~hdA`JCmZCb~B-#97lT$ywD9dlf;KH8`P~GxNo)LT}%C8|rhcm$2vg-lK*@ z>}Z`xn*G;zy^%G%>j)LTc+~zAOzL$#7KUSO^-}Lwak(6N)}qc5WlbAK1|t_ku0sln z-%8&L;d0lNjLI3Tct_2JDfPn*ldH<5QM5juXb`cUWFGBLHlmgfo;`zxh@mQOJp^cA z@ow>B$>5r|)c2=z?k<|=8bod(4IrE6c5SGQI;!DEDRymw4bMpd>#1Yg<~@FcOJ58@ zA8$YrwyltcF7^s48Crj!PSjjjtY9;%R4|&YiLd)cWgX78P}KNwlN=gCx)3_JIf)}VZ(xA;Hf%*QhNWQI9w|9v_r@bvnX4%O^OP}0 z5i$vqL#V3foMQ*NI18K;)yxcR#MXF z9g)^2!0L%r-kF!7fX#SzX&1~xKJ^fYTYxTv3ih2Xqd=$;`xX0P9nxV2n>9v#|K3Q_ z!&|(9Wt06W)&toKluQg6*|Be4x;raewThhi9A{%fVU)fdwcUGW&@h84S^><@d_vpM zIA@jqOGU6m0g|hQatA~HJH~aU>(|&UoUP(t3#M7VhO$+;`|;u>I__dOi?AFOzklRa zY#}ZBB&ela*-&8NQqn@a-|p<0UoS%6_NAU{xeM*%2y@#_STASxT6B}Anw;?$2upg< z84_YGg&0Vek-gz2*3610_bRf?_Q@#z6f8zfkjKTUiJe>!L67MTX4O|KL5mR9HO4jY zO#S}!j!bUhTnmnCz8u(c-)$RdE!=_O>Z#Yz^`~hP`pr)0N zg_H>*C){ygt`YMa{3XRnRXB)ZHP(WBEVR#M7V2Wo)EQB0 zW?;>j5V%H}t*!Jn0}W-?arn$N18{9o>az*0)N`IK^xYjR{tu7GSBnU6N?LW5eC=xJ zaFK^H?ISVF)#xz~N}f4+)?F1D=fEP2e6?eU+6_(nZvI~?-cVc(r_@~NKO26bMr5M^ zPP*@lr%i_s&S^Z%@3L-HMDCBRNEp>KDt}K~SW9ayV-J&0F2#aGK1C<(G{s)t=h=r_ z!6_!@yb({7d`zkD3jR9smPat>YaUvtkC~QJ!W^Dgw}aOPs`+~$=i8`v|V(hk{FQlfhs^$#?mm-YWzG7?j-u`J{_`xjkBq9_GBuT{5 zcb5CLy2GPbH%KZKH3F{j`J5vRCEi(a&*dUutulv4 z@A+XrWw+{6CTL$0s2VKWW ziMDo#?*R-3Vs9kwxWh*HY@-eIrl**P}$eRb&3&Y>~SY5%JBDq55KW2 zN%GmZ!pM{WvnF*3w#uYaFT^%h`4DzcMT-5^zeY5JcKL{wAtDrIgniac-bl)%+nfvg zLxsKQl!=lv#&Sta9cb@YuHjy#y!}$8j^z#Iy7jlmZm*v}epkr4U{-+mA`nmvV26{RW51n*(N97_S}6{f3SqD8CyRY;xBu0~I3 zp{hT0GhPJeI{EbF{GP(pWDwVYjN17EW6^L$#>p$y+|b?K4uGK*U~T#a@H{Medtn<48oQdn*0E|+3x(7|F+1E9Xb;@1FiQbuftf9FNjjlU z_prZ<-N*|UbsZq?PXN&~{6Xvhr35g10ij42Q;(naZ)jW>!2U%Hd+w43M~XNjA{6CV zlH|W?*?|a1Ry8};3vaV)9d?2~V2Dsa(CecM6Z zih#HL-{}DhwEF)Nw*Qy3#Oz#cf4T|)s{vUFl`jqpGfh4(-?kFX`td!eq(lr4Hi`%l zQLG}k)VE?R2kCHG;1bYCfxc`W4n2#R84F{VgE;1u`%dk0SdN>Vd)G1F)b;x~1p}a^ zemXqvD$?AwvM_7zDigVqH3)NAb}e=;cF$KF;AZ%#nLL@>!;chhDxD3GLUT@G^5)}> zZawRPIMK@i2`6FdVjrFS8w^m>w!5@_OQx3UMp&_TKb+-$B#TeVniVhAFHe?HRq4SH z(GXMxH^$Llg*{Okt1#2qk`6Xkm0r+at&F)|w3~4G%5OS#ox6EUM{&xTLq6*;&O?ZN zb2oSfJ(}q@j>VI5a`{QUZP}T}i0NKc5Uask_Dmz;8V)js@$|c1ptISv=m6;m9uA)W z$3o-tST7Ss7;d}1O*G|>>YA}0h$&4b#&8x8>t}bqSKy!rH6E80fmoOjW(%YXg@#b0 z*((omGu4n5CIbX5S9<)SR|NKX2*PR6{HB(G2%6#C)INg6jk#m zh4shrgtk%y2+!58#}eg`IclU(OiAB62j+)LaiEh?keC%2;O|OVcDL|tk8X7cc}Dgv zd5fMKz1p1^+Jr5p(UD{X+?c?&rI57w%(siUi@^Up0R7F%l$`| zL%Xz<^fehR5{ByS{*vD5cc3nz*dBsZVuJX%x>iX^n_3hzakoqFDBCUw( zVUG#^UdKMln&dY5ddlC*)svgVi&oIS974mc^JjkiaxJU*hKqB0&Zg~yu6v?)gd0WI zbg?_ufEZcA!F;pHz&)beJo^~s@;6#@*E8FJ<6%2RG;g-akmmj5>XWxD0u+d5nB-7} zuL7aEH$8SYU!9SHt!rJwMe+PD2Df?jh+4q*!-xB`vJy2lGjg?Y`AcX0^w^SgR8W{u z2NbmTq%F619)q^Y>5}Q8W+wAD1k)Xhhh=G6?H7T%*tOi7BmAg+o%$gGhWqfDjazd= z_k4UfIaliQNfznnr+-=Q6~au=j+#t{vjHU#6-bgc%ww$@U>gfdz=3(TlZ9aY)X~#7 z*>2g~1QlMee2Hk2HQJCVJw7}x@SX#sWbxHdyMqlj<&LtSMaJDqou%>oXY+GfL=YN=sNc&mu_vp{%mOXlb44rEQf5k^xVl&JRm@KT?jbH#1 z2OD6O#~G}Ew<#0X!yqck624+SX{c>jhrk9Ib^&FFDg;|KvLpHS8L^c(7Sp($+8JV7 zZNCIQrzhEM6AU|aS=6PKX9-4(q0%!YcxrNx{-QYgc#+PCZM-0muD~RXhW{eR12b#E{HHiyCQEVG)p_=9K37m3@sfP{Ypx#O5H6I*l?auhC99i5`WjPK zB99o)TkW@OlaAOfsY5q2H?%MmUrLjMj3f~UsRYPDHrvN+v>pbR%HIuk3>~gz_Lk$s z&-l`Mz}11F>@2obVVDtyts?o1+$y!iu&~D#1g1o?ba{I0L&l$ci0ER&MVaDyMZ#Ge z1SZ){Ip!f2-%o4MN_4TF&qxy59Arp&sWE2+H{hs?u+AIUF=R*KneGT}Q(OW&bz}@< z7ovwcc@En**zBL=%xByC(YAMu{Cf~PvW46rNlQUzW(UX#{SvIFjzu5CHl3tCd4&Gj zUHSbf(172$2>5wEx~Fzv)-17rG9iOy0t?JmE4rt6U~7d~jv66G!=N+F78>1oD~P9B zRBNuF{xq?GI$^bS0tVWk7@d=P;Dc;bmI|T395Dk`yn#i+1~s;V#WA<{?;*2VA=sLq?|g2(`F{NXvIf@%lnta8 z*pswWIkqp2^;SMJxl?MMz$#H(Dt(YjEUv2IHTvrZ1`>?r_tD4wEc66WtktP+rAu6l zKyhJRT2{B0FsYZcN1C$8#Xj0i!G_nrP7uYqP&$mJo)UT%%I*6;EV%Yi^6`1#fYSRN zDoVD2q|}!(M{y9HJa-uK;gqj_Ye)*n2Q3L(+q_}^8_BLoaR7JdJ6vmehYt<7ebewG zm+Wo%O0vNx8z`umHHCsPU?ua`&x0K~(Cth-^UH=A^NWVplV1(tC(L?{?yA=J@O~HY z+QM0*1wgo>fSACaQWrmxvZ|gAf0w{mEBtWIrg!V?fJbByYe9?&I?quFdNs>>$zPEM zkv?)Zm}u#`ag3l}LEc(F1ifdijr6;bz&e40#q11`Gdngp$?`r0vAu^~UTh3U%r&azqIxLhF9SfOeyK&=yx<|C{ z3Wxm0!&V~A9@O687P31vhYsWd9N~6HLV?#b0)s<(n^}Z5H;qVdVHE;d*X)=;2u3k0o&-Ip(Ef&i| zD~uFCcukrR#^8rdI#<-$MnN4A*?SNCJApNKJEsy(c2jbL*N!=DRngnUHtidiVXh>V1L>^s#4mDT}Ng z*#q{=3w@+L$Dir55P?l8CLSoBMSvUS8GOK@{zWaqRl zV)UAj#Uv*Auf~|}s&tk(%GyPg!s_GA4kQM*@S7u8+7z!KcqvASV`}r`zblj_D49qr zz)N?J$iOnmp@E(~aA1a;J3uGGE^!DRu( zoq7_|2R5*Z&LlyVQ792khOg0wBqkk-M=Kkb7eor4*>xo z0^#Ncq3#C3E)G$(_Ar&drP@52r78|#G%)oxdZzX5=vMFB(FfkPirzX=afr%?e3!@G zpx&)$)yIanK^rsqbu+|>G!%`Z1Zt2Z!9bF6e)7ipAKU$v)vcVMC*Z{)$RVH%UmHTa zm4%~yk;HE547PUyOd&16srWy*U@9(vn6xt>H0}ITt-t;J!vz~KM-n=VvKNcwK>0B5 zu_H=IikTm|23q0LOJy^l_@5$i|(jWj8_Hadbzcu3i#-GT~M@5Rp^nb2XdNJow!s2-^DX=p8hVe@~xY!?b_1vQh!Oy!oR2QuME<&P@%FI64*&|3@`@!u`FW-Wjv~sZjFgXMqbK{{Iq;z zYX%^75Y9Y?%1{dzW;eF=JSMO^<@xO43+fQ5@=5|b7MhjAiVxC9xJI@72KpR+n-BkE zyNHAOL2N~>sYl8Qrb?~SyJV=#FHv%!$IJp#4#EyajPxFA@-rO1@`_4m9MEMj%SnPL zWAur>G?jRbjU<8e74FfVGv~V>?ycOPy-nrN5qP-=mQ*t{k+o;G(}t@)>vai5keW zxYA8`;alvBdFbIo4`;Xi2DHldd3zyBy zZ@{~OFQ8yy9Y#$HIdMO{aI7vzNxHlMnj;x-vIEW^<+?wMan1g&u?1ZHD46|Odg%49 z$@|U4`jZZk`k_M}@<+>lsF2p&neM5X4dkDiB=~>VB$@o;5Ww;Bk6FE~F22u&6*F7N zxxUeGvT%r6n@?Gzn~f{Lm<16peu8hyeKfyMS6=K~+jbtTL|4s)rF#^yK>+ zuBLcR^8LI^bYYmlcnB*mjH$2an3eHgm>%yXLk06DWO%~$G|j@AwM`z)sVW`)4r1>? zy#$bGOGLSx9Qff7!=giE)p;R#xg6rEbloQB>*QCI2OG-h2$G!GY%|=z38W{?H4~xv zO1#$^KV?bQe8RQfA~>BL-{2b<-wIc%v0Oa*7`=Nkda^WPtQ>iYn^hV|HLEtCHvd^2 zrZwA40_)Ds)ukwsgbP!t!+qTdHIhM_VPq0P79o zKL;)Qs1ww=g!-a{$bLOU~SJc0JoTBchd%USa zZXOykG;>>t#&0j7J=t=}bSNZP14#=Ij;~NmEb&2>r&!yviW`0lgyupxFSmE-oRrIA z7G%dz+#!wcKJYjVF-%%2NsjR7bi$<@ClxlAx=ZJ>yzaIP#O$kXM-LsqB%IGhA22pq zc7!}oM1?XUCq3T>ko_Gz}Q>4ypb0Ep` zxMg@_$`_tU?K!jF(aKsjn8|24Go1!v2!64;R@ij94^I0|Djqb0F#0xu9dvdo6^x9A zfuV=*1nsNME6}BeWa!-y+?MP=z3@jG8aBdg0n1Du5RpLqPoc{K)|$ME*s`{c^I|`m z-%a(rQ?ZIAL~%B&CC+YS?N(T&h^1(Y9eH9nSq)(`M9m;Q;hV=RzSOqB!q@KK)#ZxW zqz)JAch9p=vie}pS~|oSMzWDlTuZr2^m#9~1d?38x@;WW9SA?jxfL`(o?RWxt@5^5 zaKJrgYE)m}Kk81gJDW6DW3LRic?H6rRG;{1NA-P^Zf(=jqdQ6EC~x#|cc$R4sD^cP z_w{ynzFMdA)0_$-@`mWcSo<&2mAJ0o|M^N1{!}!_+AS zX{tlgVa0fs&*zm6q`W*gR-H&OO`2N|`<&q5VyL&EJK zcVD6xvv1|ou=y=K#*$3RKdx&sm?)sq$obcJeZmP@a36+Ighv0iPPoN+7(M%3{Mef1 zOcR!k3ug1N5gFZ0$7^trNJY@rl%z`PG7{M&9Fp`XIi5@kS~~!1O^czw6I5TQ64(9} z1YU(D3{fQv0k-nT&gr#g6OFFAsfNn^R?_F(`2(W?Cy$;M4|Cf6$+#vCM5dv#$@MM? z76wys3C9g#gLCD2jV7QQty5&`Jf0_aA_x*`X!(KNR@A3RT0=1!o))z|-le&i?J*kl zh)4O!v#$GwYSM)k(`)Pk=t5nKiIAmDw~rE1ja6M4ZFOXC+HbF~P8ckCKzEFKAcZ#9 zAZob311-HyhD8?I5_dHWQ4Xe|m}22e9Pwx0l-2Fjen`#M+tCkyo*+l~h^>@@=IP@B z{0*sL79I__P3H&~oVO2)0=Q+HbmmKq4m1_i{w%(Pw!Km0+y?qfm5djBF-X^gYYF1A$>Rj#gK>UE4PMAo)1Mz zSUyWOPvChDWgidWAMiey*iZ^sS!>_cE)Bsqz(a#5O>+4thXv%@V?WoTE7y2R>N4@= zraHRFN@nVt2xBRa5jevp0GS6=Q$UVQ!peW|+&fZ)-?b~Jrt5RqyD;%*y`F}qJY1KV zN^^iBp(_eF?q)Iuso8(1)z$)z=apO@ltVce=qUo3LjYvZKO>?iasQG+|>oD6%Gi|ei8Wn)Jt!cLR)H9_nlG$fjO-Muz}Jo zXM@Q#j8TIveR80{(b?I3K#*2Gd$M~PX>KP4OA;N(%#X4rq1MRSyFz}-`u>0#2P!|C zc*)RPd)Io!n9+73*;1s@N&Ou@Vakq^FEm{|D{PJRwZ zv?+xQEt0rAVe&nNbe?NcY2w@}ane1u|Ch}HE<_dfMF#8%3f=cbN(iXzXV85P+v$?= zujkntVEp{0?}%mI04uF_evu2X<*)4X? ziSTwS=~j@`Rq1OEAncvZ+76@f*H9?J#I3Zs#F4&Bw3P{hvEKmyc zI^@Khs&|ZT0n4(?d22Ar=0Iwv*C9F%g>(F82M;YB@OP|;pCDC;*iOV7Pjc=?cbgz! zfn1gbTYd&T(d}fOMMyK8aDodY4h)LL$(NbD&|*0O)`K+Sxyy~%1`mB7$L<=#i|{7E z3drxyA6O|GZ=MMWl2_@vBHoZZ3r?<+ALi3=yM4_vD;{TdMfCi(8Vycq*UE#98)pqM z^iU;^V}0pDtgvWCzpg;2m7q<;ED2Mo zPIc{Mi@g{5p&jzj3+3BkXzNyDETW~(494uHALvrqdIXd<*Hz3i(Ni|AsX#k%4->QXv!5T&kFMa}o;VVE zn|!_Fi+3`=-&#l(>P(mAs-?Z7K|#}2faXs^yGo01uen}b%wL}@V!jXbS z(;(;1IG^Sj6_$0f*p$9eM>I{<k5Jr z?j!R#wgY-ipRHDq1A~D3eSU0KF}_iOvdO&+ay{%@&Q(s_`azqtJuKm|Kq_}Pl8~?)3@cfKZniKRM>}+^SMqI1zMbjq&rsZVm^X zg@x?1Wvk*x_Hi}2Jg3kpE>2Xae$yLQtX81v5GzXEz-=_f(}9U!6Yp#Wi`O^|UX3LI zHO;)+-H_(7%08WHLMI6H57L8Yg3d3?1m&oi@id7_HVkN${4A3Zgf?Wvf1{^-1>)M5 z*X0>+JMKs zUVq$>U-J9#sZGnkxp8~38^Ru4@EMhYEWE4kenx)7_IfQ&5AY|p;;#qf@9_MB@@>BBZzLgdbw#|HQ|2hxwj#~+ zIz(>CBlg&lLu>`_T>E^~g+l`w80;7U_UM7QYuan{u>zk}@9meTt5<_S_93TE!5HIM zr^~vu(iPAap#iNQE{M^-(Nggr$A46qzPhbDT{vj!6duWD;sq~~+TE(lHmmAfv=Z|G zQIx;Y0 zgDj>uxgHC@Nw;%*qU|?}OzGK)s#6%l{@L`&mIgW^CBnJMH&odY*3-7>?{GoMMKoeR zQzIxvO}CuwtPP2CPXN=!+h7kp@JIDWvy1C-22^WkXljhNK}d8k*x9cqYzwUB!dhwr zMR=W7Gi+5KuiF<6^RJL~tv+JVQ$ARuPFo7+&O+4n3*+#Il!_;C-)WZ@JAuG}k@5sT zLD*BFiS8Vx8ki&9S}?DdFB5^G~N|3BPR@Oeqk$J)sA8J(wxx&c#54 z7CbA!Z|4~tc5~y1YT1Af*o`|QPRj672XX*GXNPBae`_f~@rqJ@+-XTLiO6KTDayi( z;Em%4?dA8E!!XP75OTQmS%FThAH?Buj4IPse7>g|D9SV?CtT#Q9@5QNU9bs4a~zO$6t zI5@u4y}I@8m3JT!d0FM90rT^C#05|;NwRBO>=tUHu@)|8@LmhyaKM z{YS%Jp}c>B_(7Qe1tI*uLHvvc{zCfi1p0qt6$bkJ0qF&u{w45BcJwdcTR_D5zmD?v zfaSlr(l3Eua*%%k9{?uqpMYO7l3yafWVikYGQ>Y4{{z?cWhXBgqrWDH zDE}Gw_dWmLb@$7J`Y-gGKcEAa_RIA8%SV5i?)?Sc1gIJMXPNwuB7W!tuG+jiZuZQrtO+qP}nxPSF@znSU&-`6wiowfHlXRRHv zGcq$GGAJhr1PlrAx6go27u$dQ`NtQ;_ph|D53@>{|A&D32ZX+zzM+}%e*>)i zmsg?uGoXo+oujdXi?PGMLBahi6f1K_Cu5s`i%axB;#!;A=>MC+G5+cWj*eFU1`YeK z(7q>ZW$g5CF!BEi)7-|%*ug~K(Ae?cU=#cmw)6KgTkG5CoBrG7!u(5gC-eV4-HLzN z)xVyuxz#t-|At_kzZ&sxn`>_K-+As|SqcBIP)*FOj2+#-dBghuWne>F>+gj!`hP*S zHg4m5^V`i_o` zs+y`;%NU!YVv(^!tWs9v(u$c(O?9d6r%t5DNU~Du< zd)Ow-yi<2_y|-E}+MEa`G+7?zUVEQkpBSn>vtx>Y1B+OXX`E!T-)lcNci)Tq{&~*% zg5E*)N$Dj@Ww0X&C{MlO2V^|kH-5CI4mfus3BYP8b6ls*bkL!NsSr*Q z2B)mJTn1i3^$qa#>FpeF@YYPQ^6EbNJP-|C%|}lIU;XZ=pr^}zI0@--t>U3ttzNm5 zsHyU-;gIu;okJ=ZN~Wf=J;GtDOjR)Wt>bV?xv{lNCa4fLlJd^tKDF{FcvP*RMyDUA?$XNIY&~NR&Kf!zd>M( z`<9Ouh7XxxL9Z(sXJ(bB^{T>XkrFDfc|*TxKVr$cT&;0K;&#mcn!5bV0$ia<8BXma zWKCkmISRERyo1X6bVFrS>wLjk)BLK8K`B1v@Z1?AkR!d~a?zq-E>L+s(rSna6;`lNC5hlxcp^<^ajxe9MJ$J!9Yj&59-TFj8g7%so zc)o24bG@nZQ%2(LfrI%N8Hl+>LgF2qhW^mv{^;wWmHvRdu9erO#kRK?C?85M72#e9 zE;%obR>jWPu=60!+vkX+z#C3}{ejdyv~J|BwB{31z3(Rd0oskSvhBpg-cD}KaHMFx zYwk~D=XUQEiPct2*gq84v;PWnsZyr#V|Bx(4%WF(cGF?kjY|ruJ-aBp?$TTtF3VzK zzT8U>H`lh6KWSfl2k%d@Wc@t>XVas?rbpdmV@o5B;&@m7ou`9g7ACP7bKPxGnmos$ zhuZUEAp5F0Br!I|+?+aK|0~$g27`hppfp2u)r2)E@_l^QwLQ+nHMr_I6|=JDhC7p z>tpkYd(5&8Cs&buWVbo)4_kGoT-7M*FC+1i(Lz;N+A40ZY5I|AuAJO5NLa2UwvYRM?{kL785Y2=U7bawvel$w9dCh zItAqE-zCzItOG=f7R7`p9>oUAaVYlj=T|VU60~A&{#aSLVFxE)Qz8RP7sKx42z10ect1>7J=w#8-KN3tycytmTYGUK zyw?y}IgIsUG63QcdYYdQ83jXH(jl2~oE37=g>$pT098yQaMk~))ViQUW8dhH&oq<^ zLtU2LtB*TZEiGn>nRBjlou0vba>=#Bdw;9(h3V8zUDYb2oiPY|qP4m4WP0*sx+{0d zce6;?zE78bptM0;V%n~l+voQS6FKq*%pcWqaD#y(p-Uetr99pll?8S(k8{hP=xE-gsJ+B?F(q51Eb$x+aB2C?) zcO_s$?3Af_5A0!Ra*&9YK%kTQUZ@fuKN9VLpgsYq_5b z=iyUJJ$>>`8mT-LmvlfYn;u*Xsw;}1Eh65OhMT`n30tTFjjd!dxiVXIPL)dY)~L1l zM{0Ah$imB_#&$}&1r#kSjY}58R^{Sd4gTR2z~ipSXfy{11hQb@EJ+D2PZ*ZD>^MSDHa9*~~N~AY7Kp&P$v|H(k4|{jh+sO?Rb+g(7k4{e=NbJc~dkC_YHrxpd z<4BgS*eT_E-Ihcp7Xr*zDvQ9j63K}Nmho|>0v352>Q}psXovMga4CG2*=Lb`2s#Ir zVGMNRk6?3OPf@`aOh4Xc^F=V`uHx@C<0qb3`tlU-Sqrt7rh3>6EyXBCx`OfMC#!_y9WxnOoT8Mlejw$5=-p zAvQme`tVDLC#(%H=k=H0cASP!e}Vs!xrkA}6^dN`GvuXZq?Yl1)$n3Ut18{({QCn4 zw89c}?NbnVW-)wj(1nP=P-ASbLawTqa?OP|bN&H^h{f25U8ndp)#3!bOU$hM9nC=a zP(vP+#;jXKNnnIVoE|k&Ty(k**j(r5q}j28s_r~OWDkk=*&W$GOC?|T7NhwO0D$;! z$t3wJ8}wg|&&qgd**-q_U{i#j{$Auhn;mC^!HmNqgb)xAB$#lx$qb3{F3m2@p}vy= zyghiv8`O*Oef)XSELqQ|7ag4QpD$jXV82+j(CsXGF#Pg{xx{x)%chBz3kaM|%w&wo z=`{3=JfsYm3SlAZDFr$GP*v@Ia<03+WXFyu&Bz8!J#zbvUC#B&d1OKWO+hSxK;|Z* z5G1h?%zQ2W@Yv9~lnh^*BMgvxm13}d)iTqTcDCIk@|%r@Jzw=3E;+ry-2&_H(>nF_PX*2`#+=)nxU(ex^fI6n12}^aa zTXZysHKiagTBXo0z8?>YG>9t!d2{a)r>^1%Kx6CtvNr`prTp6 z6w}@S27?QnONiWvkIV(k<)*pMR5U<*VI$$20p|OQS0b}Oo}VIpeBJ$iYkZ=kv%3S3 zt=BI~8_N^0@mFLqkzc-!Z)QnPQt3m^NLH#+LVBD-W3-lv7U?Z9Gy{or>Hv@*4OVqQ z+{Ey~@qqw=-IJyQI+g;`X@W}%J9|S}l#<9!!?S7MNP!N2-RjH+^qVW-<9VyBIc}-- z1(ezvacPePtu06HQ;XDTt=<)gRn}>uL6v2=(~}e+3@Y`A*3uD42-jXD*BnQTarAP= zSeg?f(w*_h%|8ftN0i%G!0wz6-nBiIkE`$3Ut3(oQS2gHw=AuZ@ZwZU1uEL5Dxw<$jCP%=gXmS7P;_TP0{~V`FUS zWNvFCYw-7|$KN;2II0%bpATNh6al2vDzf``RB>ZrSP}$C*eAaVm%!D{)y_WE2LSJw z+dyBRxrwQ+%kiTR*N=-h5;3F+h_FM?=_#b-b4MbupBkl>@^0l$-YZ(*rN+w`m#MKR zMa?KVN?3hoW(`MUl==5_wKyv_!(f3~wT-b^6*g2=UGEID6vyfzyMLl_6Zcb19CWg4<;r@$1|K`u~_#U@BeDfv?H~;|C|2S!W14kzZeM6^z=F_A^ zOIaj-s+fseAQb(of>3)1YrIe4FZeC+jvaG*Cy*?4Xs)WFMyv6Qr|`eW~DM-!9h;di9@8>tp?5A zDhTp-!-9*UA$m87u1v;_Q=oe3l6mQ3r22QWpJ`w>bQgB>Q2)Vk8a{8%puU1 z7Mhax?VAmfTj*SnIm+PI>#)bi;dLx*Qrwy?ji=~Ro3X!&?9~pg!0OEujo5T4)1InM zH5@i!MLN7A+H@7rnyGi~LvZVacUw(H$D4taSoFg-R+maRm|ex~uC>_W*AkR8l02_J zq6)ayB&0Jdh)Of1#1sT}KjNecj@ob}6r7A?ixx)bxM*aS8+7a9g*5U1L|_|BH3UVJ zLY)|W+EbKra0fryKzB}iYdJA)p7bq`E9SvEx6D1ESim$7Pt%IQn0k&cuon>EiSUj7U|k3^Tm11d=-ngL$e zFC93~3;*gCX@*ow+w}q&)&GzJSY+NI9CR(0t1cxkWUIbIQ8hbZhr_z|1Uh-A&Gd<~ zD_bl=&&%d2g3XO0Ygj}*CFde$Qd}H*LMyTnvqd!Ne@(YEX2Cf^wpvT&mB7czFTf8k z2;q>yq4X=zAAh_+{S=TeMgTQ?P#Yxli5;M>3|M?t28918W5yrjPM)PTcZ2t`3~n`MraV{xfbef4nfZT|qW_3eN`Hqc z|5=PGHK5$ERh_?L8s;iQ3^i#fldWG7x*zJ6tJ%UX+2V|TCfCPDEw98Okzrg$7uc(` z8<;M+Ad(&Q5=pPk1tZhJfM|r0&m8d+3xk8VEc{HIV}+4B0g^b4=*p!IddN&$U2P^g zF7L9v-e`T!cKq_ZVn5Di%<1}=m-U71f%PbcL`p&aNDimnYqpg08=vba+i_J=+K=z> z67Nk>UTC51rkqqQcOxS93jh1%7dIH@SqBUYl@rV$72wjsWR*Dz^)`)kmECp` z4@(q{;yqlgvcg9vYijG?QFB%bX7vJ^N|6c~6AhFL{3XzMtw}TRGDSlIDqc>{X>x4r zEG7j{2>u>S7E)`Y$oe6S)v{-^_C!qjqz|!Kb4W`@48|c9Mh`muu)>+5&7yq=;kXfn z(Bp&^usO5SXSu-Dm^(w7r&D)*n3UGim|>|1hL>4{R5;B0O>t+OwnFpK50fs0Q)=e}yn(VXvuBkee6N(k{Jr-(=mJv*AmJ9Xo&OS~08sKhT zn;UcRfjtuFwQElc-`?r1g8$^T&F?J$v)aN71A=**wr%C093cH zPwD<#Zt2olpkPm7;8mkNWqRwSQIz-fE*MX+~MuPUyyHh z1DTsYOEK77OZUQto_^2$)HNEp>_Wq#0+I1{q4M#ou^afP99;eTt>YQt{M6}&peC&A zAL>MXD}z(+RB$E(H8_$^#SqJ=5TT@Gr14=hd~=4q}Vwp z4HzoPj$T(sTht?}+@+c_4>c6XjTKop2g@m8KLte#arZmdpp5(r8l`1Qd_5$+d~{C{fJ~zrXI1ExXJ?(O(xh&S0PVjzty`bQR`j#tT8LkIQk+=jN9dWJQwh$9KQD9#TipN&GRH zKK?C=G3*Vv3%+e@PliJ3lw@@BLtjQ;^(V$_nXhdLKc@k-H504BGPt@4R} z6fS}_uSAT!PrY0PwQGg3f1cTiHD+hlR-ElDiP8qQK`pE3rPR#$NsqijG;GyRsy43s z{fUU`#u`(vj5P;_#;ukqRVU94(8YQ85{+# z=1EUPG&0saWtO?EkMzT1oJ-!lhi=G+xt@vd%uK8}MWnjShy!g!rqbUbn^=n97o{cKOF^OWfgI9louF4&jcBn;2~ zX6XPg0wV+K$Uo*9Rzo9FQyh3oZRXzhx&|TvXF8j0@LRQ|O>5c}JSBChXTWG9GTlXp zV$y=$=G2N>wI%T<$2wYPYEXErSPSdDvVNm!*D=nzli7#5$>y4h&(bPT{drVIMdDQh zH%+k%$jNHZgnK_>axEG3<0H<&N{d*LL4{y2!nmP*o##mlr?Z0WL9VsQgExfzomw?J z`&4oQi}lY|Hi`{-D@G~3Bmy%V%32*{WFyK8Rz-tgOEhw7j54nZf=nMEJSx-|k$vBE z1Sztw?H9C#!HltYD+X6>y@A9SkaCASgRfx$GAa&NqU9DJH7!MS3#e{7hsR@(jWegd zq}&0iXnWM*fT)PqaD3x!x3y@J>DuashF(8AH~hI}PoopP!0BeEnvn`q zFXuP*&4w7PpYl=XF<5J`w0xx7hG&rX08<(4APM80`AT-<(RTDnT*upSSmU}Mt4Idb z$G98&axDvBE{Os#}+0ks<*hj%O2H$?!1Z z?gGMOb$Z(vP%g1P=7wb`WOS(-KarR60Z9Q+U=$`Css+X*jz^7??>Dp0HIK8#m5xV2 zO70IZ7u+3Y!TUaC{`~mT9&z`L_0^&nTlU4>x5!u*eWebQxcb4#VZx6SRpN$`G!O1% zr?^NTbGEBaJ`e8D9Nd1!WWjk>YKgf91-m}nq8(;o^Gx&$NG&{o>NlRH9rz_%Wbr%nLwuRqP^8`Q1**)QNae z>*nsvMb#afnCycv*IPk#5L`W~(IP&Ox&ubY0ewiJmBhG-+_U>a2q&!}SqzKT(436> z)Ngyp^Oog7+O1CrjI9c;*y{7XF9}Fr}dW%#lvWDWr&>FP) zLBq??k~zMXrnNSh=fyLz7>99|?m{k@sr|u_EuLE=u4?d(D~XIZge>DfCiNwar^+C8 z7FGP*8}5BLLF*~bFVgu{qd|}5rzxApcBq<+$pr3KcqXcQ)h|Z!SJ-2;`|npKm6j(B zWVbdR?_2zaZ6~lc&c!l&yCTqchre>&H<&t{VZp%bTik7bwKtEjwB-aXA%ai*HiaB# z4CtKB8^YikA<7iFb8iCPxPpGG7qGG3Wj4TQ%))U*w^OE%>F;-VYpidvhSgU!|Ip+O z;rFpG4GEl&!@!=@7|2&UW^+PmcVKL{r^Gs>aoNFj*`VXRGIv=)Za+7)%9C|?_i6Gw zUUB3ZguOv=@w={f{s6uvgprje+LETcVY2S^G(+2xe81)42#jC~G(92a;Ujv1FT6qU zh%MhPZXy4J&m%Lvixk~>DS|Se-AB8FCO?X8)34~p>dCOKGNj)^<`-~7Y~45iLbBEC z>BiPWbbVO!$g>^RHDH+Zfo+#%R@nZ^8fVHl+#)z1?c$$;69G9zur~pkVfP`Nnc6=J zAS1&MBb)0{bDo5jy36u(Z}Vj2{zQLoqY}sU&NlM!dbTlY59pN)Nm&9*TMH!Z2=0NA zu_Dr38;09MDfNm`)yFykaZ@^_O(rIoSbGa05+ZmZ#2YjE|GA`577a= zUE>?UqF4E}aXuPFgb#TxO+Ybgf>^v1V1}R=_km(={7S%rC1u6@bAV!qoZ;OmjE$th zW&Ez-rb9Qxt>hV^~IiJ;iuN=9K zt}al(xlX-z|NLiB(chCp4`h1nyx*ZC0T2KHk^i03`yG^4{ErGDIR{%OTSHqbL0cnZ zLy_-@lC6XL-{WVMO4i@OBzT`DThVEx8uG3@Ik`1L?sEEBy-0+$fdc;dB~q^r-1VoC z$EH?I@GnYfV&?Q0pbz<>)%K=PiJ+5^_N=tcTYV&rw{Tu;JMHE)! z)*4;;ANq6z?J_+#cgF?Rt8gv8d_T3xVmFA0D)I^6AL)3?_>Jo*lc|ILL_^MUYS}~~ zmmK<>_oH@Z-%P0HO$GEIY>9krP=FxB6TG{+P?UI?-FFd_&B#SUQhEOA?%7vAajv~y z@7N=^u15l)yix~C_LCf;=4pDV5uzHVyK7h)L|B4Lh7ev^Pcu}#q>Pu)k=fH=M*jg93 zk7o&4xA(YgPp*cR3RD=W(OcX?2j!j`H)h=gNu< zVe4WeOkSZv5LBv+zHPA*_{4 zNyJt2MmDWBZ?SN4!0iDb3JK0UG61_>%M$0r`M7KsF94@8^DqOXO$}5oioyvlN?HPc zpair`&~1vJhn?6WUzDNuDvp(BeaF#RDHiL(O;?cYr@huQCM#U z1~bu*ZWRUzb?B%vk9qW3UVLa7wanLPdU3tnFj27CgHKw>KjvZIsW+r4VnwG`T!XDj zI5`FyU3weus;pCmgC!RVi*2%#)*SRYRw)RdIg;S-n#^b}|M*FBlf>CNhf*21dPDRBOsW@AF-{5#OB$jWQ63kj&K()h8eI2B6Egg;Hj9MLE#Q}{+*6V7a1hZ0)nGxo(gpgh2fNPKy-*DwUvrp&yoAQh4xqT9Uje<8 zzJ>(m6A8?`NAO%2bLc#vy>U0kDeB@YROhf<22_Jk>iC0KaTVVwpEQSD0_*~@Ch`fX zw}A(yPQ3IE?4qR`G1Ckv38D0dE5t4C<-LVAD>TT5AdfF#XFT&<4+mOwV|Cgi4?z`z zLq%mNpDWDsA%CIH zfm$dHK%vr(NGk@4AauwHAw=Y%#O6)0eL?7J`kS+mVm+99fjJlH_`Cq{B<`;w`{gqH z@}0`w%sh8}TK{)rMx$U5UJk*!^+qa zv@WqROP`zH^Be8h6craW1>HmP{3+P_wW=nQnKQDb!35KC)^&gaZj^)j^sOq>z>L|E z7NY@!!<0?O!J7wfS;eizs@lzIP8~7&va>-@^^z5ix%Ef{x>Lm({iJ11b=TA z6png2iuhT)w6)86a!fBw)bECtjN zy3N8sd(v>Ed;nDDx8krikYY>mewUv)*9^Vk4na7m$?Zswy%V}yBs-AojgshS595R) z7;UntsLi<=T}|jQXtJCM`k!j7Ub}rde3|MsKP4Tq=Gyf>zg)xT3UJh)m^%!a432;N zI}D#Ov=w6Bm0OeLjG}W*bIj#X*Y88A<8N^3jvc_zx8?T^oT!9v1@+#`$1WfFH`Y#J zKP&H|TJv#)?-_j2kUHJ;w^euWOB=$EL7_j7_#;K(WI@_QuLV{4UDm8f(+DqDJJ<02 zRfO1Vmq*xbS>TwP$pFH?^Ml>a?{AZx)_?qibUZ?1W(_B2D#`gZC{Km(;>pZSXUEK> zVZ1@!g(RS4;-ttzKG~$_%+PnB+v535<47nEzjFkI=96&CvkbY$jT{=?{P`rzabrbw zYo-O~GrGo#&JCTIqeWf$ognM#ZAjMy@%;_c`C!X*Vt7xniL8_VSjY#!t-^ENk!8USsDI|G|F!rCfzKVg-OA*wzYG(^VA@ghxVZx`7(!)>SI(zn%p9!L?Q|e z)$X@T`H*LfeOu+L8Yywp5e=6YZjv3!GGeak`eYKH2^woG6Jc32stVKLT@d4_tv|Fg z3(qF|l+Ui?CSsq@*|#|3GM9!Lg=UxPv2|keC88U!Ht+SdVoX-E%*)CTs}|iL9;2f* z`dpeeE~{`6-Kojqi{cb#SVE9JLu3^NkQk~Je^Tt}%!$Ycn>p;?DF z(fHVk^b&t=DmkF2X~x5ps6ZU|BTjk#RPVbVZnUBwI1|+pOS@X0DijgxpTd|;D==Gy zq*fhLoM^;H(^68f(N_QUi3Fk%HqBplak?0KM^ki}CqA;I9@|C9kM-Gqf>=};b7mfo zpEYvkc24h_kSvBKy_qdmFiL=Ma~EAVz55hXH>As9mfBQaRv^NGWsfY4+gLP$u}^3R z5LG8wh&cEbnU{uIp#Tyg;n@{{K(Tfc9nPjf}IECh%SQOEz1$C#a=TQz4Y(E%FvwEscVwk9R1ObY(}mxvSbn z$XiMn zCTm6|10<%!l2neJhqbJP%~u^B;HDzX3tNuf^E;dfUE%g8P5DdYHm<1cuLXAewqyeC zUW|(Del1eJ!1q9o7hkaW7}vEBrkXFGE=EX}l=-gmQHT31}q!EJk_9 zU}~=*S}53ZMDZI}aw3x<4Q;i>hpjs1`6f_j&x**anBd@zhnKjhOl!npb^*&QTWD|B zPb>FW$7utS&n$JY^oWP@ouj;N4$0Flm`kJOdByl8$`ee>4F9>MFCvPr;$lcH0T+U! zxJy0~#83mE%^#9lwi>GdY_1a$iZPl?`x^&bK#SJAzYP%YZC)akr`}XT#GSplNQ2tE z>#=p+83VVe2j_ps?jMKTdKSJ6*G%@?7USHvuW6QGIVn#p6~ATTJG@=A`-nuAtyhh%hS-Nv@G@op=%f3p zK$b<)jjo2-pO(D!tex(E_INQt(|v|ItIpr@YzcT1XWCFQ5!_MPB^gxffRzf4fWJn9nAc0-A+tf3ePokmq9{e^$mr#^sHFc*+#w zY^RtG>#ceCf};PCh%B39%>I>2_mzz7+mH#X>Ca*Hv$%h~pr5m#U$>Bh>nLaO;(*c? z7_>$53oGuA%z0RI`(b{bL+37IkaV$5}E zggvxhvU-EHQfE92iV}v|3rIjvyMuQ_LQMPxy4P_$I*_&!tE8OB$@o0U`JCxFjrHea z+I1UCpRs1DNB=#~_f(G5!S?A973S-B;zkCQ@6d3Td(3kkhM zLUqjqA4DeUIwRw#KaVWHAb%Hio0qk@=7TO)M_HwMwDP>jbZw3n0^3H+SMhCvx-EJY z9(|EBqUv;YP-2HOd$L&p4QCpH1`64ceh0Ru6YJg;Bwq570s#Z9^sZ^eR4Ht=ZeJbPISj7c6cH zp}X=cMY86$X_! zuiX-M#AspqsT&}a_Q=7$6_w4r%;k#gfKNTOnHs@$q$ES58-CPl)u(C=PLyIzNa`8_ z3Eg_sr!VuankVjfw)FCK-AX4}%}R6{Ws(C{{kxlj0xhJ}(+1jp(>OgI_6GfXQF7@* zMi)sn+gT#swSB1Rq6pA_3#pw0FCHCGKRP{KI6ctxt~-z;TY|`Hk8?$VeSeSi27?`B zH9EQ8T95ezCV811YBi=C%rVFj>|5zB{YWY8x)3c8I6Bizp5=0@oF*FjwDaYt9ak~w zi~YP4djwHdeM_x}t_D~9^NS<>z%=*Ec5%TuXW6kcd_@3Jr0DAd!aanJDAYC|a*;3J zwjnVjV@%wK#5H@-Qmj#?DHqsc1VZL8Fe7b)s{x|ouP#r};jr?nY~xCvt@>5{Lda*z zOsic>;v!R-=9xd_6mfIwDoj?npBlg!#<^&Z9;jqD@ySVp;vExr0+Uo!Hz3cC_ZkL% z2ePU1&I{I1rBI{dCZwbZ+l16~n|(c|=yH}Zz<&q@0yqO@^nT3>U@E&v#!p~Et)}_~ zsJA@GYA0lwj@p+^5mcL}5U!DYn;Z?22$u>%88lGlx^s+sT1-)>47XKYc~Cgk-N?Og z@!|exW90-3$^zwaMr+|3BSW=3m*`rgePcpOj0DQL6WdUWq4_>two16|xOIy5Gqit1 zS>Q}$O>x2lKHUIJC+CkWgt#7LC)Gptr{035-jb$Xql|ot>%V?i@vh!=ujF;-jHs_S z=q@Lreu4b6pQq9lt`GTL2io_2)Bcso`8Rc;==dEJwJ|X_b#~DITXo^z^@Mk9r%XQ` za!`z<*$ZXj4^k7cWQ~a|IVCj;dN(77e(Sw42g3EzzH<5-050S{h)^-%o!%w(E}rLm z?>}zdKz0%L=*9F?=%gU-1j*{gT#ebg6yhmZtx43Z2d$3jaLtav`NA2Bm&(Rkkw5#X zPRm=9i+1`njl*BNf28DY_l+bK9ZM~esLztiM(+}e=lGa<|1i;ryLFAC#jpX?GlrtE zOiLM9?s73|HhNbP)E}lO8#;2TW(soKb^5JVo>yD9pX1z#KF3?{f=4F>)-k*od`DMx zzU7d}s`85V#TQ($u&4P4+H#_YPM|q2R3A=$<~^1lOAT4-&*+jyFr2G-!1F^UHd9&I?jtAe=WJ}!B>lm;Y%z0rY>A?p@DQ2 z0AO{N)D-(^O+Ih_o);QLl{dm4&v;PT?Z>+HzLrMUZ`=noU(9oppX$@Df`YV;Y=R!6 z+AiwZHNIN!@p|?O1wdwR4gYS>5ySW{H=xd)Nr@kD@6jUH!$kvY@FV5nt`4{AB)2St z_D*eLU((Nyb4;}ghte-!()CjMy$hveC7f{k?eJpb!@2RX6KACg5)JrClww1Pz%ZiA zR1}biwEyn1r!jed21@Jdtor~&$4j_};_`9ojN?cnKA=z;Yd{RdH3tqFX-696V-Pwfaa^dv1@Bt$xLb#@Y2R zMO!99<$G?F0r89CIoqtg1i6AtlaoQsC20{H+x{n{{B*O{-HKAT+>J^TcZaRDwcn+) zG$&ry!t#ni1vwYegL9(60ezfiij3n$Tw!T=lGodk$vR(@x0d7?7Yr)tnj+dL%Sz~; z<=Opul{TJ!Jzi+^hn*JKMQKx3<75Uon+!}9ajr3v^bIdExZ~RA)%xp@#rhG@m+6S9 zmqlOF!suEyOoF1?k506B8}nuL9P$Rp%zpj0ZOMp&=*euq5W;sAL*ccmNBmGK~-V@wATh;$! z%&gynn4g|lW*ZzcG%7AKb(u-&cafO0MYP4-k+3v`)EW>aMlyMVh^7~u<{7kygn!#X z4DU`|9yU!vZuO-rPkV3z`^3OrQD?eDb35sC(09l1zcrrz(MkJ%SBgoh5MIcO z7=OAif>*6b3Bbg`pC?U_>Oy}I^CJt+@WC1p!7DcQo5WMmwA-7ag*L5~RW??N(rQw5 zUhpq!%*G3v^IMl!J~cFdUrkRn&$*vnX`@$0cc7=V-4ovpHruzp+walMvRxlH;sB;$ zN&HrW+>lo=dx|a9;2;JU_+x`uw$ z_ZL|FUP!WFVY{0AURc}uOV@W7p8Q^TygP@CT%Ibua6m8e^gfX}gPDYiOrpFtBH*s6QShyaz@K=H8av1klJYSBi0F z2yc}Fm#02)()DX#9eHy}z=&q0II^*w?j9nc6Qb`%503`)C~&5sFUdO7oR&s}aDwlE zCF=w9N=qk@u*1V)h-!(l#tIBdfguJF?YYxdGbf=h7pl+D)NHclv6c)3J&f=ftxisX zJU9dd;+C)}`BlL4jam8XC&OAy)Ot7@3q-P4%LYeoLVFDEdqN1sxeYB=uT0F-gwB$+ z&f+i9riAK98Wz(fOdbRqSf$#iz)Tr$g$@WO(FqCdQH1vc#Hkac6LeW*k5||}RkVik zXJOYR#z{pg$QnB#I=h*9xC)B%SKUO4?^T~hZCrT2oGz?Obqwm*9-xvk$=Q@VoHJVO@m&~k&pUr&k%a2dVLMs1o2H~qCOTEGtn^hJv11BRS5 z7Lj>E@Sxef%XHf5(C5t;6<@H2t5~>L!V!N~Zg)ue!7gii9`r6~)`iunOqnTS zX*=iYmu6i~DjeL1$L(VaM-!f6g(h5E7V}xk!kU(*7CKw=nnN_8M;_a+?QBJRj3Fn3jsx{RB1M3 zna&s$#acXXrt#==4ZR+xJmd^nzA)TEo@j=+Gp;giw}d^z0fW60SbZT@`=Q(o&U?U0 zUXpo$QVxQ0t2~_Dpo-!s>=kfaYD#3!ig-$-E3HWogL}35`S@N00ZOS>j*3HjK}sU+ zHWf{|HpCkbDdHzpah`#uyNF+Gu}!l~tq_&5F0JCAL#@CdjuWZbilj8h_1jhF`vW>k zE{AO66?jeuJ7naO&G2MpszxmBLV|~eq|jw!K<&`y&}8RuIJq-5A~#Z&axF8za(ki7 z6Gv6@^N;e{wO*seKZT}pz2pRKZS8ACEhA;rLUYw)77+Q~bmDKC%!1Eu23lhhYimt2 zHJ*;DG0Jgk2_hnrLt;g}^&67y>h`3I90i4}#q*f%BeKSINuV}0O4s+Di>n=P8m+K2 z$+*iE@Ry~MX1_hu&?O6Au42{??bfMK4WEI zIR>w0%scEf)R##z4#l=hN!}Gz+}_F8K&IlfW-zodQ0HS_D&bq;4@-8k;oyTX+t?NE znTKgSFPesNp1ua3Jc_wRE^CKzOB*xxZT>&T-Z4nCZRr*+ySi-Kwr#u1wr$(CZL^DA zwr$&0T}I#Pcb|CAJ}2(J-}ht1vm%~fD`(D}nPX;-;o0@|Qky~KtmIfED~Db~e_UJa zTXDy%E6nUMIHOj&i12I<<`g_k(3eb}=4Xric>{_3s5*fj${4}#1|(7l5keIqmah?s z=%|2Mlg1Iza1%(5CjvUE{1{`nL|%bQZ~bkCEehWoYjn&a$pH4K08*+BmB5N0WRiJ3 z3^gkfx(+mOpj^X@K8QUU=*_M;npu$O8YLu_$d1}nFGS+P2?FgX2x-L@+LrJN=;+(> zy#L)z0OT=&<`kcj5rH6_-rFEZa=$)I(;E1 z;uEk2T_|EAa6~8I(S!*=IDBo^xGLnZGcbp0W^iCGlVuAk{_u}OeP+s z7v*J3y6-!R^VlU-jBKWbde&h-%~4$Ub8cJ%H1dL*@PIag9$A#D9d&dul0J;FIMhzK zdL9ywX5HP4*s=Lry=!4LKPMBuHyZ2t_OS1-Z*Ca0%h`@xRN_J&1TSM;NGGr|AmS-pI*Z5>8Gvg*$$qShyf<)%6W@>TJ z+rh4xs&H$xT75m$j{XKB>0_lHK$31SmUp{~V>O7Cd10c+k&7{TO4izuFUJ)qnU0PE$zJ0%JFt`dLDlF!SzOS#|utQ7>j&{^}t0enQ zqED5t(EA_tmlYpEHvM8uc4Ld#-#AgA-a9N@aYA1_`gD)^xG!XT9h_B*$vneSy)}-~ zZ$$40B?W0uTiS$u5ndBT!8X8oHvk4INDr_>vKVwvqnjEQtOp$*P;HNNsX|(xA#It* zapFhIW*2U<9F*O__c#&rq`N}){?QR85AKEcO)1RYGXrLYsBK=lLPK)qX}-b_Q9n&% z8GN(@yX1YVBl?ps2u{kOr!V=7S-MeHC8`rYe1WjKz#Mj@O@e@ z<$%J;YLo8~`-abLRbb6OEaKWQgZD6FF$bm%Sl=Pd88%M99J-K46x{#k4I1#rBJc=q z2c_>^(vLUae^=apF04y7W|JAdy1ux++G_ss?(7ecA!KE4Y~v*0>}2*Agb}S|X^W@; z&x3}VGOUO@NElYjOD=yIAsOY*OcE9j=?|w2b2}fPxniQ8VZ7_Ap7FhK_E#g{82eMZ zLRhpuTl(k*$MM9(<|-avj~BSSAO{5G8^M7>TzrxOj6N{<1~(KwX2F4B3_A?|R2L?H zz#AE(B#dI^tlMDSa@LBF3fD3ikv5O>RB1cUvb8fO%VN9vNs3Y?MpK-SO-AO1*~g#p#TXSK#_VD+nrzD6N+*cx z7#Pfkb%roBlj*BP2b1^0d7SjxcabDE{Tb1fmd&2R4?@|a6Sya}uv=!oZ13WrDOV1p z;*@WYr_o)O;8o&=96_J#wpkPgkOzK9;1AW+uaO7Z>d8`9WKW=_gqS@||MD05NKUOS zwy~HuKP;(jtkB?~omI4%5tSM?-vZ?4y?O|KNre+y%JJ1>D|52Y+U#7@z^iCwWvi&rLdZuXm;$ex{GW7^KH z?D=%RCG!JqQeyY7r09$$K^>>i>K}v8u4#OZ8>D<&`o1AvF!4$nsY$)0?lrJy_^n#} znK;-U412g#LRsCms6L!Vblc=MJqQP~1Mv~WpxvYlDE&<6FkW#ErBG$fnR?QU23j1p zab6*+gkP~l_H_h0PMxVnJcmN>Qv(#los_xx8{IM{&6ecQT}48ClAZ%Wf#E#RNL* zo2~KY=~dbGFXi=er*=*iIn8eMhmsgqlyTB*6U4^}>+Oj3O(-}TN+61kH*G(B_-338 z&PB!SJ1T>iV`%+}VhWRLxm5^tF0u2H^$720jtsgDA|0H0xl*VOlWB{xb4)n5iwBIO z9~n_fZ8U{pSyAo>#0y4VF`yXgomBfgsATFJz1kR0H^Bf#Ug2+6UL^aL-@E|=$_oh$ zF`0mr0+lvS%srHvjhF?rg+r7WX$=MXx}#8+d8YBv>hR;<;1-~xa8Oq&_==k0*^x5D z$+Vqg4m@ENb%fjom7|^CL;QDG5#D5Op*mVaq<(-~0K)X4rjukF`U|8w zuZVV3%;pPdA$EM2dsukCe~KCLC+M&}LVsHjO!YG|YIq72^M=YDtvbdTeEkOQ;jZXXo4E2Z0p1mX8t_~<|Lgqp$s1&VYbL%INL569oT5U2$~Wd!C($Psy8P< z0UjX6RiM_l)1QNyfjI2>IMf~rYJUgX|HKJsf$suCUm&}|SFHHQme2p~L;eq_UBu1M z*zVt$dv^S`B$fd3P!EG|3v9hb1?!qGC6qZK?UG^{f^Aa($*+R?AJkFjMoc8Du#Jst z!#5>xdn!L8dHHKS_l2IJ1|3-=X&4mAp|?34k0!3YnBMN64&eYYzf<7Hno>`z>(q5R z)g9=m>v1ZIj@wA3i--0e_j?203g|h|!4OgbuP1DNKdhPFr|YSi>V`gWb;`S};V-$X zAc@-4L*I(qIq#*W@^{-5m{J6}LBb(}RJyUi&g5_I z*q?iNy19El&vM!~2N)fGEd!nf1cD>u%~=3}8c~oT6~&e{h$zTUsDQ(xNsJ?#`xDkl zPFkjwRIyw2!AL?T(Z&INi_eF%t7v`zBo#$qaj4J1PYJQ62xwHFAo&h{? zt#SY2)y&FolJuHUWks^^qQ6YTlB<~BOdpesW86iw^vFZ~NRq&Iz$9n;Up?!QEmJvq zU)c-tSGyhKKiwDn3v~Q9!^2-*@>d#@oUr|;KYh4oeM>t(>jRKRPe1{l0{oFU!wg6O zGC8Q|EHDH-XCuX$!=a1Fl_vi8C>jzL65c!TyQdfH6fRkc<8|*B2;j3FTK{_MI3m7?9$ztM41gv^_@jF4)r;7mP;d=VCBx-M)nA5j4X{ zwjm!a5L@GcCs8TeyH^Ns*ciiKhvXT2>+D@%&g8WFvGU4huRD1FG{TC^?4gir-L+@V z2@Ljjp~~{9HIuT3!OUwbnuWkt600s1Ewf{?+{P&OxFY*;kmp$_Av;ep#uoYVS?Jx; zeUePP`GcXl0<2O7l!}as;Ni#J^}GHI0(gv9$6(U2dDbnoO+l17lDl*|z5TebKuyG4 z68&J^3m=?pBYt|zcvsFxi?^sJOr@ltV#FJB3R0eZP)yWC=ny-RW`q|(Ok_Qsh)={> zxpxB-vUeUgQIC~3Xrtr{0%HoRAghK&3PXTR^6E4~)HFh&U<+8_1vT@;e0poRt`G{e z5!z_rRBV_sgILaItVNA%;g&k1;X4Ni*RT zl`5-*hp6wjYxkVv zb`IhNuj$}^H^p8AZW1%G!J7}KmEYKENYK=Zb}(||&;9|XH&p@XIY|BYePhrD&hjT7 z3Pbi7WJaCD0<6yS%z^@t_gI4QF;FyBFI*21E>u=$@!1|leGnQRMcVi0CL-A=HE=A$ zFR79v^g^GWPEvw8{Vvv}st60!5hj@fLg|F66S>Wp6Z;)sf>6 zo8wV=`ZTF}$y%lFNAI2P=l86Vc1s~^PLLe1qVl?EmllgeG#ZwEN$(+Xy3ke&gdrYWIf1h*2iymhi5V&{`{K!pq}S*CRBY$GhnC{ihjF^v6?g0{fXYSp46D_AIYhK>L! z*AcvmyGxmg6H}5>1e0Y?GPwbDLp%ngSyAW}B3jWW3+!(q+&ik}GRnD%K%d~sz*6lR zExDm~907(zDW@s@Cf{XO#y+aiC4MQ+Y;|fq%9deX`Yubu|1)Ximl#nz$fxS3>Yb^$ z_)};BP$yMh`V#46mA2fgYRO}qD?(TG>WU1-D!p2oPaq@h{2ZZb1)c(`$< z$IA&cNfqkg^1b~sWrbi1lw5w;MhcNFsWgR3Q1dc&5Oyka~Jz@*v zEJb@vz9jnbCFJHP{y`g}iJMk%^d)gbNrLBy2&TShovLJd&!!Pd9@+#L^^so^-|v1K zwMO}%nA*z@`9<7+aaE`>lo^m%sCa$LO|=&o6xp4%4Cn;2984K1DVD602~U#Qo>5fO zq1kw?k6Kpg^q-`pg%n4-Es4n8DP{a>g2V8k-AAMJ3D+%uW~nKAhTTfIZHd_4bL~71 zzgHgOqV$P%HSQ#%eiwkbt_*`<@&&m+c*Lqm_tGPB$;8 z)I3;;y+6sxNA=)|NLSC>mN92vkll`E6ia)*VC@Ppb?INW zIx0nru?`vZUA17hgm=A1#RETh~aA!&&@HqHmCVX2SNb_v9s6xq2obR?#{bA6Y( z1eQIh61g3ubVNpyb=8Qf42F1){05z}q)~0#TBt&ORhE5F%ln2kVqBs{)HC!DHVb?ws!fcwH5Ch$eDLBY(`7DMs zdwA!NKxkRHwSfuogo&~OBsw0_vpV#_(%%q4cb(^mrq_ThjLy1rst=^+9QL2(&r9qb zF~rY6q}L&%Np#!ATgjKM-nRAAhn&b;1&c+>{0<8pXPZ%;){xGoZoJGNm- zq+wvHwP~lixXUCw5G!l(LcOH4?vzx3K?Bnb$8=%o7}M7Rh-HhruIxW4WmT+m4AElV zzfvOeHDVRx8D>q#vms^$hpWI8p%0*|N;~RuTxGbz${|#fh8xfNXyFyjkgnI{j-o_!4MRG+MRdc?h1#;t$ zwl30fh4Lxx07*uaQIF(~k;^q-2n;B-DZvSrhz)HO4iOHfrz^gIPQj#K{b(I7#iVp0 z+B$>hXCmsw>-(}?5mD$)azg@DF3KjJ4LnzdWw-6ztC3WW+#El~6b{@LgycKbe_ZJ# zB_8lXr8X)?69nSAHF9$hUyr63EN_MNUQXV%W8*qZ(K&c*c(MSUi-=sTnb}1oz%fC`(z@E*z-Mk~LSz72@_4AIY%y@6Va1qmaXVh2yDb+zQIjskUknPE|6| z%L%eC1F?q@iK&7B)Bb3iT@f9ni|ZTO!{@MQ<hQHoHAivSW9M}P8Lk$4q{S+fG!9fzV%l~C;;_5P$-e_v< z`}TQAg022a(0@{jVMNi(j{t$G{gnSn2H6SAfa)cXv*7g}k zK(+bdPr-rb|Skvc0JB(EEoxP~pr}DM~MYqfrjaX3hHnj%X zQes+CF#>`S1E!v1sVyt;w8JZby9Yh*^lx`*k&Jh<#~*h-(h8RCLbXJ=nsZypJ?PB@ z!zXECG5*$+^v+fWQlT+SoXTdLM|l)&{dT{bY=73>oYPLPhc5KXznALHL0`Rq^q?It zhQn;X>rRr-9VZsUQHKY0@>V0x`O_zoC85xpqYZ+tSu15LAMJp3AR@oz*oCVaJV7Sia|)oK$(j`aR5~#vSUFkz)W_d(uwP$D{U%6qM@wO zC&|)|Vj*FwJXEPlIlnD2G70V^oyatlPKTC}&|?{>(q|B!#e-_K!#(>OC*Yr=LwgY{5&eQt{<%p0kCNlRpp)cyF4-^U%8=iqNP=#OH$H3x_|!ppkU+$^K*+NIr)Z7D zqC+tkuy!DlHT=DRWrPZL02)*NZGZ+#3=jf}GZParzh92FAI690nfyqki{XdMz8P{x zkuk&%5a$o4=8#+@{PG%imVIo6bmBkRaKO=p6iW`$PP}dHczar(I|nAHH{nRpAQN_U zBoVY4=Sd8@DmWbJUpp-S6*E{qncZDr$*4-7HLcH7LYX4>ow{9T6<$SOwaC7v!fIF2 zW2i+wV3~7H*~<8MC`m;d86sWlR!|GFA60ghT`}h{3EIQ+*BsrsiZ3r>h))c*8uyci zCPWEgMc+tCF60YorAC6Hn9U^9VNEd|+fv`}f*jdGU~`;TrxAG6&wG#{EhP@M7UiLu z3LhzkX!PH@e)It#30dpU$O2qX5*<4$quIt&0~|TEfw;gDXv4@~_pXDDCr6Z<@cH0f zfJwISKuhZ7&EfA}IPu#-?m6lZ5UU}S#3vPaz`YJWVJ1~4wo6crs+x5T=rHO+1eUQZ zvnk!IY0VgQC72a_-Q8sfHKBp0@pAtemzercaPYpCyY~M@18MS?+=Ym>os;`t;pjhl zRFhLQ|Ik1V|8At|qE>-FC@64<*Pv8H01X<$F{t+sMMM<$N=qsJ>c*OMkvf=!D^f5d z3@3iQ3T4c}U@f*m)50?`J(|ecewj{}dOLr5!UGtpFA^M7MxLicR_KvO7>atH5RF$s z7*x*Ur7KK}G{_-HStqxmv7xSIL}Ul?H(a$8-SM_;>ZFSlNZiOA?0j098Xyi*OwTHja_*=7mJ z2JRYzU#v`^A?Sd)Z8$>4)?^!p8CCv_!@G~_opdqt;V zNTAqgf@9Q+6A$KMjA$>tywBLRaty30Vl(B0(e^QLgxTbM%tF^w{>s5x2Q<7lmp?X_ z2LTET65bMwF)-Qa2rgccTm}Ig=erCR4`QByRNn!q4Ez+rl`RJ=M9*`dHmfS9lAhSZ zjXIZPD+miZfLX-Y7&*iWIZ|Yl_CUL=97;_RH=3@CtQsRY9HNY+LOj1O2B^_(a311+Pk(Wy47 zb1z6Hoy7m-D3(eY3(v%u-@MTCv+02fQKf7ZhVWorJE_iFOeoIwKvHI^(3~sP(=E&2 z0rMUV1cI5DN)QHbMm$cOp+JCPn>_>))%HXDlz)m5A2$muReZV-g?k7xO|d{AaRD>P3^3?~l6Xo+bV?td0+p9@dX@<&l7pIux>iDRt*sOoA?@j2oU84Ojf@DcNvxPh-B7hEbiJP36v)m z@;50BL^xndFC_L*iK5T+{w@Fx?D!^TTu|j_*1Eo?PFUYV`iD`WMqqL;YIDPpwkCYn%PoZ0^ip5Qh;ays*a3__u3e=jFQEY`&uu$HB-+dJEGB_-Z&hFB5F_rq z2N;F1?-x{id#8G^p@- z;SYda#WK~<3!}!mRc(_mkVi$MQlxsWqI%Jze2ucY8np}rxVjYzqGe^xA{eU0jHh(j zaV=+l{!`)derjS%P-Ccl(=oyIt4HGc-m}NH`^6=uyY)8)0NSoLorzvYL}phTXlqI# zgs!}OCR*_dZSXo(9k6cI&r~}x(5nG$gYK$5DwG+!)`(hFJ@P9B``U>pe*OkD|2e z@Kq9%)-)c2Zn`512G7A?;j3gv<_sSOkC7bsN8koNg z$jYo@lX-l&S5_N4wlQFVAskZvYw2OZ(ZX82bw{wTCY!m^L4BX7JVPyakMi^dU*m%L zdI~n@RvqX|L;}8^eJ~;cjnvm3($^=9?@pL(6O$pQt($LX8ma1zCyPv6$dDvQ@+vZy z7N8{eg@8f@p@gEA4W3>4H4Dq3uu?XmxH{FD1QcA!B;hsi&8(lxdeBj_JQ%Vo498+b zklEcdEBxfRkwd4YBPR;^?rj2qBnqKl60?x8*P_DBnob+wvaWc z)PWE?&fNjIiY(zW-X6Ygh}`x#MV&5c(2VQCK#DYysS{$4LRY~)av(wkEqEJ)oQoNK zvkI#)rn;;#wPYJyPcf?HlNXith>qq&aSas$Kd4tdn=>Qxbq|`HNeEc!tU`^w2%N6k zVb%Q2aQh@hT=gwZI(jiMTw@joB3-s|nWAJ(gY~FMeNcJhM|~Q?XoO{I^-??qN+ltI z1G~5A!BWY475zc^=yUzJY9$$_IUI=*c;cp9uQFSu;3I9}un&HylZhHd*&D#VM3#?T zn}2j!IgeqjE@R!5(1pO3n`+%y24FqN(O3pID3a;cquJaAJ!*&%9}e5|5ueJtF6ftJ(!Es8Ts$DYn zl06jIFYy!R&WKz`Z`g(p%jaNbm0TWROkM(EB5?Ou3U*UH*<%u+T|a#bH<=AMyp^h5 z=l2ARstk_CF_)h2u^@fYIgRs63l?s`!zx)bH&mVFyTnHp-$R=hGVEK3u~`E=+-fkS zU6|0+11PO)RWt>|Ybf0rD=Ih|+9lyKLvT;38Kd>W{Gv>GzWf# zouV|N^kxM$S8wZG4mn@t2t)>G9;$no7LVo3$}jX3(Rhl2oyc&vU+_%OZY0TMerr(4Qw3{=4pKP@T|14HhPq zcO3>Vjhy3KM#;$|tV`C|Le&Lhq2__SskLM=j=s=w-fSR+k*7U1#|`-bey%!`nNCC? zBcu|TJ-wN0tT)&`gzL3icaJJ_b*G8IeBY1C3^fBF-_{9$*rZO?G13uTHIas{dYc$$ zU(liZ94zWf?j`2JG}oWsw|U9C_&~K(WYjW$%={1gR z(6Q|BxTk=6`R$g~BLPO{Y4Jh-f5H22PkK5Ju8&J#ofHhrgulpAvGl(Eb3B? z%w5bcclvPlH&C?!Yc1eNnhvR^mVu4jSXheVjwYi&NXkJaBP2x3%K2bahL+~Jp{8X& zyL^juMjDp47L)EIwEQferFF(jW#W{V zo$OfZ{6a{WNAe<-;^w;gf09V zd%Ya|19JU#!OwZn-k1ZpjwzPMYx0|?q+enNzr$C2NzZh{KiHt&Jq79tU;1lYqd(}N zyx@2#bE_!!7hLA0dvS@l?6jEC_gt~7>wbJQQhd*ap~BYElz z)a}3Ych9h}#->YB)u$qOa@4EAd$QCo!+UBA)FFNSH^R~)d+{*v4SpTpg{+z@!i zVprb6z5Q0EkwH4xd{!5bra5F4@z8m?IggQ??T@hX?%;E-j ztxlMh{DKCCdaA7r4HsLM&~B3O+uW2Wb%FvEZ{cscOAMvMC04BN>~OsR@FcUWt&#|p z2l^Se>6v=ImhgWxEe7)g;{$=g4gGU6rb|iXMj*008-pv7q?Iv8f1a>MlQv3=DIQXK z5x$t+1vrVUQP#vc?+)~!omhHIQEKE*`S8E==-&z>q@xU3I}yifcFKAHUOO4`xk-@}mxp{fy;T~72b5<&R zArJ)gyZ&kso z(5)O#kM3iyD~{G4AKy1XJ={}zWMZKcFGFmMqx##v zrtIn`P#tHd`MkXZr6ow(I9hg(Y-GtP`nKe9vzE!xo-DR%Ma4>sU`9Dyc9l#`XB!f> z7k|}C_RNezV`)TWVb+FkzvhgHkcz*ZJ*qb149LMku5NHViP!Vk7sc5ePsw4GOjpau zg>$_~ePBfmayi!x+rZ&QxDQ8UjJcnQj)zBQ)U2dtDN>UO_OF{vQr=Y2#uYASd8jHY z&BU%l*%WB@()^+b%hMdn2ym9BIuW4jA8EeV`+j5Px>Rn{p<4KTa!&Xc=&GG6(SX4M zL!py&qvEl`lVUPON1g|%VW9pgpXun#xq%vUVUky~uQ&nBht5aWjFvq;SThH>TKnbq zb_Gf8n~Nlx`|ny1uJn$-$3pF2QBYpBNuGt*NAJRUJ8U!j-eTH5;nUK-RRcG@?N9=! z@Bk|nD?Ka!rg%4qU#LCd6xkvAFx^J*T1S)2B6z1}bz5V>e+CR{$=;AXy@K9TV1una4GXC0H|(oYV9=SM2%uG%A8-3{>N| zB~!=@q_JneVH>ebp@B`F_ibKZR^zPhTpNGV09yFK$_PHiER&pj7m~0!klH;!+9Oc> z5}&F_!Wkf(S)imWR!smXcb$KUTL5>4e@y_9dD0~gcCLuaFASO(YS>&_P2l|!XVfi# z*z6S-=e9M_NaDT|IIOKVs*|b4B*i9Ko}pcgsx5Tpzcv^GcpN?aSLkZ}dj7HG{+lqX zlGE1!_Lpv{q2qrkyu@uwe$8SJ$(d+3SFSIFKZ89C<+bXJ4)LcUV|(msFXb#vq$Lq& zK3nMg;nzxQ(MO&be7W^Lrh9q$cm?ybObfC@?4$7iPB|VqT`EbQg0vz=_)8gB;aR13k)2#G)mE+5gu0N*oQ zPv3*?_^AYu_a)RVUC6bV7yUqt>8MKRLT+Z(IM_-SC3~CTq%opej278lkI|l%O&V&X zD%z8kzBlqae7r&~HJ<#EqA68$KgYclCmryeD?!n}_jKx3u6hsPhoFxT?1R}&qsQa^ zx%aoW(bHC2SzZg*e;5f!^PJf{|(FyCaKn!1aIaGs# zJV#h4n;+KsF5+G^q#8UD+if@PX0y?$&6?n^#m*Z*FY^5vy#*Fw+OI~6dtavPskh;C zEI+9%Q1)+jh&xI7iQ=xA<}^g5j4~bHNxs5{Z&rFxq?WK;!Z2;?9Ch*T;0&NpCIpgs zwfI3-(`uh^!mbArzd4x+R?cfemb?ZgST`ifo45`l|;R9qvSe1fcO>~dvA~(MGOz-sr zwgH=$-|5rAIWfWqjQ4$Z%_5GzmJrC&$&*Y{Bq<&r;2|0(9lVMZKiU@tW|&mHlcuk{ zRG9k*ea#~{Lyn#tuW?P8shDNfz41n=U|_hKB$>+u((|LV{j5O!TRqD>G=(yj{R5qQeHp1#HaELM-db)c5gHgG{4xOH?q_^ZHPt+(S@zo^nM5US$?l5BAE3` z-}EsvecS8p<>3=>2eXtR#t;VL7$dYp1FP~lWtelCGP@p)2ctVGwS8$UqRgZ>ox6kN zSCMbumK549sr$;+s%nUJ-pIKIeCnuNJ=9Qzm|D_Bfd00qd&=dLEk$ZC@{d@1!IRp} zu;a*s?afR#fys!mVs1uNt*j5hoR!*zN|UfWs8kB=vtP-Jgh@Htld|dDb($whU2Et> zFiT)1_avqMaZQ56jo1t1Nxr&gh!8l;WDyWtVZ;O%1sxF#k44?v$|R4%r~CXmK|(*c z*kAuEDI_d7ZQ=Z~D(u%L_{W~xzghPGcC-1{MzD^X{<9H=O2hKX-jSlZ0kNAHZjZw8 zNi+G0)%SbX>$s=~*GQ}hcr(HAeugrB8z z!`oGZxAaiSWmeRnBHJmOhXq+kIv3HYHq)~!pqP4o`v!;lfgyqiw1(X%(9bNm8EGgL z<5&i0qRRlgj8^fm->l1eJb!69NyF43JIE z43Qdo#L{EQqs@mHARM?-@~u%RdVFH22k1g0tT}VISNg;%6(qd8-$OaF`>8yL9#4p3 z$)T3q4)eU^Rqo;Sy%wE=POwIavzZ+sUHCPS*5BEQ{`h(*(qqr&R|IbUT9tpi!u#8b zh<>ST|K};!9Mk((ND|OPi5t<>GqB=zBq4@}3>QZ1N;XD~wCp%V4XZus1;5FUqS-|M zMw*k>@0D%z>2v?^aSOTgZ4)FLBsD08Ro3`AR>bBY;%1|V&4tE@U>*%Mol3HWt3}X! zm^G0i)!-naKHX8BvJl+Y_DyX{HcEM_@i0V*ElrCKV)FzuqbsIGlGmtbGN=@3G_V6l zWqvuip=Z8-UEgAIa;XFxF-QkyXxfI&fee!EcaC!sfmU$RNU=_&n?NFe8G&-4EV~fS zjg_($A;(SUo=BctlUHs2$KR-&{us+Z4Lf-4%UHf&#-jQ9{l8-Ie>2vXjjxb!E6HU9}^?|Xs*1mkFT2EdG-;XJzq1{NYUsgR3#!Y^K17j`^iiAcVv-9ih1VtyI6md2xJm z=n{MPXeCXTYquSGWlqJi37cmq$6-N!TiVh0y=yut`>j^ip120rx6buMd$4~U*C;7%?+c#oFI_25FCs-||S!rFq*1lNWvbJVXoF`CJ z+_uzyt?>lPcUjs}u)Vx?XxZbq<#~TJx#5|(3*N(Yz2!RfJLfXT^J;wmcegJdz{DWj z*p8Q$3gG$;3~6P0m3~9Ys9Ji~s1Z&gE~zO4B$N6yaUb!RuqmS9+_({NB70(Ex|LCa z-rO+pDDhxX?kI=!oHWcAB@s-bj5I6LYM&8RqGNj2@PQX88vW|ffu%I9(Va6X8pDsY zQ4(od<2zW=ZKFFH(o}{Q*2M92t=F)6ZPw5t>7v4mwbS;#hmE)tPsbDwYfbRr+ zHO3Alc(u3t%@Jjw&bDspJApp2kBHmZi#rd-*#wx-t7TwANJ+mpXVJQ zcC7(BH)HvrccwA+V;sIm_UMH>`@ECDUBT>V1HLEYUDJl*ZYX?1cyGpYeYHp9+d}&3 z3Cu444Q(feF>K$4Z%Yjf%RUzG+O)F^1_f5H4#+pK$GBlSwudUz(asl?*AMRXmZ0TI zpEQ=BFZRF_tkR#$?k316YOAK^pZ5d+Mb-sc(fD9 z*3Q@43A15pOnk<%tQw8SJ=cj78lONgS(ucoS{lTpM5Rld6U(n9MIibbTXL7Tn!YZb zpVr*G22FvL67CL+*>O%8nl>zKz_Y<0As1#WSERF8Ml6&PZY`IJ?dvPaS&95)fygjc z6l)-fXJExwt`F?HEt)k`^r)RqD_Tm!WZ-NRZ!#*B#V7J`PE0f3<>irx!I9~X2T^KV zZAfD>N@M)tjyOap(p-iMZHoBVQgnH-xZ~ta<=#$NX*m={hAGG?l^nd+HA?>mt?#k> z2D--yeY`#ssdN;g$DGer%yE@OoYiAMFmc@Aw z=N!kyTD+983+t81MV6+%_r)0tGSpLD&#N~|tz^yJwDMTkGKpn1mC;#EK!;hlJJJNv zW1(Sq)}-@>u$7(+etE=A7?02&o0hSKl5C%LN@64V+pxi{xaP_VyP5yP4Z@n4CZqYU z(m9h%n~*|%rYL$(`4_LNmzG2a2B$3LQ|SzkxqYZ>>HOSmSB~G<6tW)1V|!p|;AezY zD>b{XRx>ejirTa0v9PgNRdV&KGIzzw1O+aeiV$|RZNmb_83}rcSLNeOkyNcs3w1KG z>N4jaketcf_+)6HgrTrShzg~8;0doL$Msov#Pg-D8cXq&stJ`B6jWAC)zR-tW!x7S zW};iGOlw>)Lzv50sLSGo_>iWQU00I=QfTv3Y}IWG)%1*_JvmWpqUKX0^`+h%ss9zf@ajEqdfzqhJ%wJ8j4_TIk6Ciy;I0Ft7SBQQ8@96<+B){c z!;U%yJ8y;2Yncxj@U^Est6&D7o2PdY;2Bu&()^(HgO=P9HkX;HK=s~|C@L{?;>O4y zld(2PyQ7wL7sO$wfgi_e7<_>Nbiw2!VurDC5cq7s>^B@DrFt^O(>Ss;=gEo6ytO0r zsZnxIFC{`PBx7~JC`=vZc5ItCI$ucKey(d`1agX6acT=*tGB)7F>#}+7j1%ZEtB{xVv<}o zB0(Q;*omNz(z$YXiyiLcY{J4m_1u29il8t4I8vvU&LSl4S6cA{C)U80<)Zr&$KE(= z(Z&FqsJJuH@(LTtUQ=!6uO^IAAucOtKP5|(>ShEOo`ifPdrP4pbWCoops*|g)OFNz zX=a#q?huXCbB}f|i3r3}TYk`O)e<2UZujOUsfrEPWTx=$9SM`!G(ab|Ze{rk;wT~?{ELWwSuZ{alQ-3-G}%a{&@qoV4D?|6-81zEx4uHIwiKJ!J?S|(>i zIjUD_LYMm|7aU1jGJ%!5;Fz>OP04pRW0=H=YtI*M)V*Z;k8gtoqM53MqFpR*OYyV; zi``tX7~IQff%FTMxtEhC$!jPI1&NI(nzehoKS4atL=H1-h*&D><_ki^&5gZRPF%ivJ_2**(EtyPrK8Y)-i4u5sCIM-w>a%MRJZXd*K|fXPS>RN(El`yYO}A z?`A%ygxKzLy#$N}3iP|)^6+)n?gConH#V#&iTAru=CAmVvuFt%LU5%A?a-(omxou! zDgnA0Fl)yfhp6MVOks!0b?>5Vx7;T%VU3OoJ&-whNhJ$}k++xiYNQIiq1*l+#@;bl zmTqer-OIMUmu=g&ZQHhO+qSWnZQHi(eb<;9=3KbISrngl~RgT)@h5U z^Kqqr$@!o=-&VhE1bZd{e{oGxn6C@wl$hxwbX`{UVU`Xk-(12ibS8Ix;Y;~H>;dwvf-2MDc7vkLKdb=sf}!;R zx2-bj2p>pt9l&Dho@2N!ykh0tabWr`FxnK|wQ?P>VCo)Y(Ec299|$vbZ!*Try+Y2Y zmEO5>9Wa`IYR|ZVM3?U_JOI~7En_nqF4N+SKmN&aD`zD1ppK3y)h*h&U-$g=;s;lb zV7Pq|2HUz#6><@7!GgB=i?={mq-~C$ExFD6QL_(R^wJ+O_ZLFb@8NYTSYX~QU{=kE zodShYB_-2#C7g3=0@=B7J#Oyj@<->;XiI1D6@oA|0ysUV*{ks9 za)p~;|XMgDV;~5F1hnnOb1c~(G&C=Dg=cpfg1`3nHGV=Rl*O4gVRbP;L77; z(-3r$3+H9Fa4C*NrhD*3+@?u{LKMK_Ng|xa(2BAFmJ=KideI2G2^0^+7r^?a5^$Bz z>lfq!eWl`a6<6elLM$PDDFl@&*QXH;0*Xo?f1rvhY3bmB{W{uJa8hi5HnOAc2OEjT%TuQ1#+oQvxA#hLy9R-8`O(1yPIvNqI8xgaX&ZzT$G}&Up&2#S1p)TTAVrwiTqGBS5w#=O7prpGjcKgl>e_(H zUqT+Q5%3mqe?OVXLY*I+sUKAf67xtM zT;9Eqw~Y3l$5Q+@3|xi9i@!-KRih<_>Ox-4g&@aBodr%7brDfw~Rk6$Q+O`Oc1rvRvk6 zd3j4R^=;d2=Pk?D5cqgPSrtNK*DODq@`}`1Btp2g-vTO(JV-LF$K%#ZZ2brQQo$)kf zO~#t1U#cvWUGLcX@PUlxWI$+#WxW@zUus8~Zp4*do08VG))c0wPk}31iw&WIK5PQq zoWgo&p?*msvArh4+9p{2^wMA6s=PD=rDW5k>{0uS-aRRDspj2LMx>4DqbB*i3n+C0 zfh8QART4p^teemZwVUwOCY8Xc0=^{-+*KSwfFh`xpr0Et=$A{oe7bp8{$K@kW+Cwm^zT$4-mWM{rD+CT zxJaj4nvZh2$BnY`%gD(F=dZ`zqFFilC-&Hft|ho%GL}ou5NY?vY5=`E5I0CUifLJqVV z5lhu>594BbpfmepPkY!yk~RI7z{s}o>d~!;!34c_=T7ICU`>De-LcHXJ>$n8(oNNM zr4hpq+wu$iK`yKwycm4~l~OLc-o$>x00)zvg}R06$Kas34eaAkh_jFp=!*8AOXqqJ zQ1Jb;WPv}+$N1kYovopzvC}`8M{C)MAD?abENXoJdx|4K-&x$={Yi*?l?o+@A}@b? zR(#?J!j0LJ2x>`NPuU`v4KZF0s6qFpZNwF z5MGNzPrkMdYda>5{&iV<3}t4c{Cx2A1ekzgOc0{rrI3;h5S5u*b$|T*_PzbcaGwC- zkYnBa3Iqg-Fl~W{3X3%=o0ry?E0ispTmIsH_t^UMY+c%*27LYH?VbGG?qq#9-eR2i z{>$}6g%EuNB-0U}+-EM+V($S;-Xduzb`VD*h*`>%KHpX}mQa$~*M*aHRYY}tpw}As z!O@l*_`&Js7YN4T7KbFp4vU*LCg@hyH-OEh6(R~DT+>IG#gvsC7>n9zczB>j8hd!a z@a8r>aVVdaI|iafCLP~bh@(Zt2R*`8?&laBTp!>W5I8c1c)<83l=C|-v54H^RtPMY ziisGOhd7b+t{ha><NvX!d)rW7C5+BNQL~pnQzdlwY@cPe6T}Nb z2(82I&^>^)_WrIUO!s78DyKQl3;io3sWp5Rj8`#}s<>k_6WbC;NmE3+DP22dKz_)6 zU60SF`w&WB2&nBTa*0ki-zrwbVqSR+lL+ty*NlX0{SJ~DSxrokmp^Eqg-ykSP^Y$9 z*LkOpE#PlxKI=^mb&|Zr8`d61@4fjB5#8R;@naD-A6?^Zq*(55b|S zQ!H)(I3qPK=XC6j`Op3qb(rnZ#kNA|aV+Pu0X`9kGIsZJ`rt1sD$TBbTF+$5WcsH@ z6&VE;mo_Yuzn#}%4Q7}u5^5e5Dwh42^m1q_(JdjsRp@0-Gc`mY zXK6WKq&T3o&Smd6(x2)3pn=MOd=Bc$(WK=Od45%-E`+y;)8;uwn+r@1Bxs4k%rJwR*EDd&~%wMgp>5^ySFaqyQ;@~qTh5V-{^O}*XLe) zB)|xxYgjc;OzzSsbU3N=q6%daO9Jp(A-+7Hl%eczfYMWRd><0Az7mey4UVh~eizNB z*(T4~*9*b|0vUMJLNpzr*T$G%2Nb}8D}P=ww|5INPf2My60aTRrko-pZ4!@F%xzLl z5}91Z8q;m`F>@WzPP;W?$E&PLgwYlN7Qi7Sy5m}DR4Xbs>v-gCexr|BLxQNjpLaSt zM2w_G7>1efr%{593X@F2h%U$js&fUaV}GMVG9rDVLpEkyrW<%M1drUTDD4d-L|<|E zu&ofTb)QtrM9mF~`sbW#J#ygaVGR#+$2vL41E)$U< zBURI@OdLi#+=+DKavWWxzdKO^$t5s-^!C3zx}xp$vSNBAM*8-%VtGYI+8%8{cW(`4 zx;6H>w!O-9PYk(6hQCP5Z~u718ueEs+>6j3;h1+i%xyId3ESRk(=81iWe9AO$dc9g z^{$;#cdYDE({zWzw!P|Xk2@(P5Ftjw>XQ@~G3#uZq3w*k6oQLs4K_`8%7gOm?t__Q zE!@%oC^3J0H3oF^4Pr$k25X)=5~oA}fcP)e~ zlj0$#dw0q%#xDKkAKCg~XuueYM=|{GW962PZFh@h;w?VW`h>Q1zH9XAiR2Wn@>WzLZ_~z2 zr!XCAvZYJ$$#olBb0T zu0N$FKOZ_5PoI#HIf!!~54bP0$s9y0Ks4e(`hq(PL8VSGVm08n`m3hh?pAy0c<-yN z*Vm17;&{Ov5XqCL%LpUQ#pe}~fFyMp|25A?SJy$micKS3vCqIYhoNP^9}vECV}1K7 zDF`!_@VNhqojF)qG+^hbs7Qy*f3MZTp-MwyvfacYDkh_y(*CO4#(Hz`MdK5#%bVb) z+(8vZS@_1JjG^tLcdxkW6pIqdm}h}hR9m8LmGqV`T4^ThAv7TqvIltzwFo9}d!84F z$Rj+236}nc3t5YJZ^tWnj`)1A^P2VXI!=CzKlEM!$9uBNk}l}EAukdUn&968}g|620)ce=MQ9+z2HZ@c$tH&`7_eddmb=1(gk=mpi!8Yf^ zXQ#OVZ7|LZxL@}HEI`eDi*4(_^2|H9_}0vK&*v|*)oC}VCCX! z=?me|LwnLni1-~qW0P}I92PMgowEV>QzMJU3u{x;s^^sZv8kzAAu=qIAxP-BYqbvG z=tvVC(4}0&&LIxV?$sfhr;_w$Z=ObhHa6Lr7Yu6c?i}BxiO`vS>_1Oe!AF`9u1lh= zvepjZQdxlm%4E3>M5gVs0xs3cxb(WiNh69>_b(uf4$CVg88XXsX%k#i^Jq{9pV3PB z!~qytsiS=%DA@Lyr~+o18fxBzOkX^KVD8iOYPGK+l0~Gv_j#;e)M+OKAI2v-u=1Tl zOAv}qMF<=cG;98#leb@&q#=mS&T9wVZ3KEb9w9?T@W1>^OO*IE;1QRo)jZB%enB6&lM3w zoZ+iNsLDnZeO6e;9oIO$UXtnJ9EvXRvx3?U1AJHeMF?r9=@&(oR|{e%0`NuUM+tdq z3&2q52Y@*%19Spp;Rtu0hYeBv+s*`k zrNeNld@lH54&Nmrx&VMFZ2P;STbTHfMY=e5LES0N;uMD?bGz8XBNp>a^>z~Ol;E9N zgHCyuhgM18yCL*L&p77h*A^YQwt5<`hWL@V~Q_ep|4SdNtP2D#saD9R|# zv-VO*m%_EP@KLd;TDvs#O-3R|=o(DHX_S|FpGuBjR~r&msDmaD4SM-rO^W)a;e_^E z&`^woLmlCc>@hN8e?m|NpWT}P6|@4k#T(Bw%!J=nM)f(1RzP2QJQ96EzBPyl{Y%ZO zw}eItzCam1D6JlC5A$yL-(=4v{}yPyv!cxUngRYuHzUT`qfHtUEmnmi*)yk)befVp z6$$^CgS-?fbA>J6{_2PWoA-#SJ#tzfb?FF%u>jd}ndJuQV41xF-I~bT0PA4&|6rWO z0p1ctk1WX~LwX~H%J9K8?WAKlm#jPk@j&=QjT8Mqe$K)A#n&uQ!j2J$pHZ{it8J^jrN!?uXL@E4lm(A}Kk5S1{frJ`3u! zqS*n9IaMY(Z=IM|Dp6hXBhew=D8kP)W1F;)>V9g4OV5q z+_7=LH{LG<#{#+6tK2UWe;l%BE)R$53+5D1@W$SvdWvn&b0xM^GV(_j5pE=2qnrt% z00$Xwo&QRJn@I%6`6iL-WxAdavqTXi8$$m?p2uMjaa7=)dG}L6s2RqO)*q!d{4M(@ z4MQBZCcw&Mo-PNZS;a6ElOz6wzxYHBgEpVBJ%wUTkkK6@JNgb$hYwdr3_u51motP0h-EHqh8q)8}uTZ37Q+4g#yi%-<_%wwuwXdhf&4>jTIzKsWpP zv+Tl6J*x&f1_oDPDygYad``S(=hRmmmw{we=pu~q;-%?{XdRO4XGtx%Sfm=c;{B+b4bbe=mO*tybC*&l4L!zLzR|0dBc14?LtO{SN z%g05`5q3{q&v5eMn9EiFqP?Ytb^~HNdm? zV_*xLMF4!3Xd9sbUPVP8z|5FNn^pJ1*V-p&8yq@3k<|ZAokQ(Tz}>GjL#r(;b?3&} zwLEq8qV^tGp?`?J+|OhSwLRYRf{H!bc!l1ce0YU@zUS`Dx;Zl09#6dk@}h1vct+sE z75*qX_94AFcClHwk~Dj{cX2m44E$vK3(D35Hhkje3z1dp|6scvt=2@EG9Gg0yk0)v z>y7gTC}Rv%3u!$2CocAL_(?KJz(q$!v1tOMQ}gF4@@U;=$Q-8c4$5-!7!Dedo`RP> zzppna%5vfC#Z6~kmuS~{I61R)nRDUsRH}zt{m7yyLW+d8!EYjJo5Ds(=tiA&p_%J^ z$GXFIgNxfoSd9X`T*;#Yr;C9^!x{SB;pLMCmWt#v3>+oWoo#NVSQJbN>;Puwi01H3 z_89IaY{hO^4%vN9o*7+m(z&2p*sa#Y9?K4dg~qBg3QG<>=?zD0^BK9iz>Gn5)h2ne zk(>@1D7+FHsF5n4Q01H8gNv>@#XnQDZqFC1cMO#7MOd!CY`3Mk9pe!^!<{#6ED0RBpzD_uIci#JZ0E#`< zIM?lv^lxa6FZQXEz_L3Zf6N=4!%SECg|7sbyIM2Ko*%9s?2!xmrS((u-JjQl)fW(# z2Q1VVG}d>|@Rbr5!j;YcFQiQmP|x4pg;!6SqaH_;;l?*OttsEb;!pAqel$RBs8gC> zTN<<4KpoL}e=yow{cVs=aSwgXO4q{>O)0f51m9sCPDjfO^ZLUT?<8Wpg^6~?NN zOC3qRQg{N^RP$55)HpSqn>-GiQLRaABVq>G3t~dz)A3PSHDXK-!1gOf0M@I6N_X&i zGd@U{;|UK{MK5v`47L<`K7H-F2e#x0Lfi9&I)2{oP_fuIVTc{jWy6RzGsD=*}^r-57w;sIbfN5U|W@m@Ce=uz}ty;>-CiGWp=BKED8k2*;s9>{)(%c{z0#kt>3kGRN~ z=2#;AA1}gdlY9ND_jfuA&HBvn>WwKP%AqME;4j=4qlE$z0FugYe@eBQ-_va!Y$i-7 zAj0b=6ovUqDrUhKKI|?1YDqRZJMROz4pw0ilpAJCj*AKjHstFJfPYjTEy~UEP5~!T z2BUu4(~x?Hs&iMxRN|^+E(ON7l?@O$6{#`V(~Q7RXasQ@P*a%@TUZ%cPJ?@h(y^c6+0zn2)I@}0SwSn=!z@GYq)~l&<8w9L0z;mA8oRR^x7%lG?+VAtum$Ds5 z&-MG|wA^BokrfcG@?W06gtw4`PK~UWbVvX@HuDm1x$qXwP-klB376aEhcP2M=;S~4 z1b%YKu>|u)nBU%HUd`=H@VNDfm%s+m?>JDlo`FNkXA~&>BdwYkUm@t zoN#rr)=^(oH}^rw(5luv)!C6*aT0*bj1RpDb-pQ)F`Ie}2n>TinDYgg!V#k6?q2g- z_OSQG4a#VY8@U7C#XE$C$sIdsh^5&hzFg??`Y7k~Eo|*PHp~F8T?4LNlcI4ZeUf*b z<7*BMDf7&W{UFobicQ;;FkBL3(y=a7Tyo)1I|t7Im5DGGNJZYWSr1F2F6RTOspa z>JbC;MDU_O>*(7Y3s&C6rPeh*Qn}J5hqG&cZk?02eHeaDofC2g?Hp%X;p*n^k(yfe zvRb-*@~{YYsoHZa@`T@77Tfgl;|qIBy3mt#n{j%I<2I`L$n15kfFoq1|UiGYi6@BENA{ukzQzClr-uo5t9bwUe zFvs_-yzg3X@xtKt;d(n$c9Ow$1tu#$(*>OK(Ygw>-?lnKam7S(7K#uIT*xDNw-Ax= z*VKag7D7!%6fZ9H15oWWB$zCywpuEa3%c8POQnJadszgFboG6sA$CXLU@p5WIARv( zPjHN&>oqC|R#Pt$vjHLI6`iu{kv3M~s&_i+94^E=%h8Dc&P5yeM8$+utJJ zv(i02Dm~ECosOytX~|RG$G}g}spI18zi9uYfHnrCi6-HG{Q~}RDf*AoZU0UINg3(e z{Vx)zSPjBWYeo4xmWk1p$s?sW-dIw{n6ssMLK9omK54w+h&bU=tOa>ZX0tS%)I4c! z(g<};W&u4T2vuUj1XD^v3v!Sg*=}AfqJl&QYS77Fu4NY045leK#R)({9qHTi#GDB; zmg?!C@nq|3)A9T7rsK@g$@cpp<*$}8cf5VKfnMFWAB|i4PiSKVns4=Zk9=+q^Q|&% z_i-Q^EA(eJ+>qNqWY5b!p3>8Wj@Lk>t;b!g#Wk<{9ksI0MlbJ29ky-7uq(9JO61L( zT<}fhbC26y?a(XhXK$qM%YNw4E8J(P|KI!l9_1%r=+9cX%-fWV_ZGMh;E*u5W(7H_ z;QjlE4ft4D=sSf1Wc!TQ#nLu4DRl#}a5&Dgd@5t|;&Vllu$|B?w?egX{h}dnVyMvaz<%{lIw->Lh}(sA60ha zsITjs8y=pruC2~jXvjEgp07>U@NOG_WLt4Kmj^7~#=H_Ds&>`L=B=aUBg!w3vzul} zK(L-Z8>bK=y9TEEE`eSaI5udxki-o{MHwIJ)=2E1F zL9mV!3qk|~%ZsG(Lr@Xnpdys3rO)z>l8rS)$#12YaEX3*&?aNo6OPICN`w?-ovtB8 z1SM1lHI}rlKieF5_X~ov-RMEH-0_BXh(&VzqzJMl9!nJ&m7z1>D(>k9S zGCAd~>;3jb4HlwoEt@Lc$jQW4aZHVa^|qQWe%nfp?f%3a(^!>Zsa%Ti`wd|y+ytNM zpyGUtT0h-nG6SEATit$C&<75o4 zAkSH^`-GH}H|2fl9>c-Ovo&Nh;(ui3iD_%;DR{YS3Y9fao>ok@4-bHnCa(b_cAiifu zYBIdgm@&dcTirN3^P|%fQ2#{c?ny-P&Tk7rVa`B(8fUiXH~k%wTdm$jtcO4Pyv;bz znk@l`-C~i=I!UpE=e?DvM+G@f zzgIDXrUVu4Pr?#}u*6*#y(gJIn8Kahy_VuNgjS}jN)!(Y_)t|{_(<~8lDjF$zKh&g zLq7zPY?(XrgV3p<`5zM`R0BdV=}xTJJGPm|FkE;@qic#1A}KSrB1g>T@3?Thf!GD= z#dkuF$+&@%P0H*^-h{cdJPF5wuv(Y2S`r`x&Q&9^RPwEe+^wNgIvik9OD z>=K4(Sn|mYqS9OvrU@TO4w-1i^(+k|aXN}IWA@uh7RfC<-T@Y9I1A8cjKz3pNxdJ^ zS*ki;VG@gAQtbGoAe1&Sr1^GoLp|d_E(~Ka7Tcegqe9LU{qYuxGGjm`Twzd*@Y7v3 zFznXjoI?!8a)2?cG7=x%uK{oh5<{h<1D}zj^>l% z-C`s>(KAMC<=Mnn=1YR|#->6ATmVp8Y{+x^^b8=CiF4;|(l{g5)K$R*2IIQdZ@$~b zC0tUF#5M3ej(0C68kJO16vr{DaAy#JX>Cy_z<tbav#=sd5ZmdyM>8x9Fqi7N)70fha1UP(34C>9e#M!`Rlq+5PvJ|TXegXv{oPAQCR|D}y#xSm> z#3dw()7d90H}gwU_V(S3OuESrBm|7F7azZBVBayEv#Dl$H!>Vl&_nU!UDNN&Iy31> z;oCKFpoW{Wp$ys5#D*~@8lR8YC%wHn=5j_WE)-nPK222!ap1GK=uWmYCM8GP9mw=u zb2<-})5SPS6y%JhRRp|eC8CIwhieqGkKBuL=j!9#9;HO&W54Vo&`^RG&AygtU)c%*>X94$V={tsr<)_)wP~mH zqu21&rNJJkTU(4lQ#HwWJliXPMM9PPPQ46lzo;f3u&cFSololxI<>^j4+<2tPGaO` zgN&HAjt&VQZUja}s)SUPxF4V_lQP=~*7w_I?xRjBRVtJL z(^0PTxYGz;ho&{zzQUBN*PL|05~U`PR`^xz+~j@!fKNVQNDI#^Z28yuFuM1<7^!g! zjWv7x0uiZl(^Sab6KwWEcNajgn0P+V5WITBo^(g9t{LOZaN(^I1=#lwZOpHoHPao z-E(7j@$18#varAgQ9(5(AXUx{rG}&DgMw@3!kBl=76Nifw`8v$cA4{5QI54;C+y{* zR2`CsFCRpr%qlRGdDmeplCH*ZjHwgoyFmQ4M!L{V57ET5%Qw8o2=K>A^{d!Ip+jT1 zf>isdQ6r~@#;{pH`J$xWE6)d!vZf&6sRH2dc)xg6N5oQA{d$iHq$SjB?ZpLyb83$V zW#TQ&T`DpT(}5sR*;4%WSo7d#fWBU~c!E;ux{Fh)#1$NB+=xj97=gTjV+uv_o(vQ^R=mkl?d_eiRV6bKk)C85KUn@rcg0T~PR`<^ApFGy2 z;=KCg@S=IL+`T}x?baZ7Y+->T+w|C$F!+EORC~jb1i1WJn9o{rUf??6x&0- zpu(?C*G#OknYy5vE|;1*D!AeoRqjEuQuVrQ))a;SS~E@sJV%%A73a6k4PG&wx=?gR zk~}MH9WY4(G$#IbgSZ$07sVG{F%MK$JDYjc;q_7c&;d~WLgkre_|W=f@fo@67hN^A zJ4}C1kS<6*%Ou(glP1%UpHq|SQqc?DWKZ1o>wG& zu;};KU!2P=dYKs^a%+*N-|UMgCHAgzoXLk&jLxi)-%@&ARRl_P7gA-sZl$C5fWR&3od^=<>_Dh1X9AmmeIe z;BSkBF9@v)lArPA)%7IweMgv>*hqaVk<8vW1A2?&$Xe}K(rKmcQA|cZ^G-%Q6Jv}K zPlr#Wd@|_R6Cp8r3&5S*F_7B8AzX!E4~Adc^---8s++|ch^P>V$tL$4CP3Y z8P0h?@SwVS0~5noO2|t8p8qS9fBW~SRX0taIKzZ^xFG3hppo^$>NyWTb%`v2naPY= zWt?M^GINenb9SU#Q6Uv#mD2s=6E`p_5`T~96F@N9A@l)bbcQny?amX&Hw~o@?)GJ^ zGdwCjaa2rkTj<#FvHx*jC6rxO`%4+UQM_FCY22mFSTx1flVRN5iKL3<+e=4y+!19h zSv8*aP_>9lH6E41VTCJ#>zeFd)E%XKRJn+`mcTblH$Fm3_*o(T*&W%vU;gl^Gs4XQ zSh#?i#sK|$Zi`2S7n{2#S+cKqB=*dBWDj1iKd zft7((b%q7GTRvqQYD8BVcmcS8Qr+^0ICf(4lucIl(!A-pbbj$}Fx+ftLXJ9Ga$6TU z&|h9hlj(HZG+tNM)?eEP)+of2%5sC9)YS~zN%eBW1$^GWd#*)R^x?-E4b7AMR?*Um z-i=?&qOITaZCik|j*=Dq_hpByvY(_=?wJIvCiJ*IO^7s4}|opsbn* zw;m4VTt3D*SqIe4D2uy-ynhR>9efH+(DA6Q>v&YrAfaT+LSKZZXPX%)G8ch825U~3j>2I4}{c5$0(lVy%#v+L4))&kk zM~__H&|5qviMnpDzBRp_KR~2b;P7JqoE}=50Au9C>XpveS9t$RXk5LLmWKficUtKk z1EXzm6W3MC=v|ncv~p_B5|jS%w^E9p>2vV&zH0mLX|hodno%j9FhVtJS-CN8;n@P zaLC0Zlh&)P_f+0m$OCxRoNa6zR6*M$MLu!P=qHh_TGNkUYj46XEqCsMK4jX~SgNUQ z?nu_&TTcFTUfaT}UfL}#Dk;9c>(CgU{mhq&xqtl3xQ1p2EH?MpFzsd8X~~~-o6!vx zuM~fXbQ^?axF`l=++6hE5ySAp2HtjTxm*~igEr?}EB0se(F8}f2@zD3lS>gGj5t72 zsu{bz@fueg9^Qm%Mk@ptnY^md0#)2J}as>zD76eNAJfl6N}3(;`IXv zR>bF$dQ}6Dh3CQTCk2lUxfSm>#OIQ6BM6d3;KAuPJ#}RXnnB>f>qk4ykK}hrV(XxN z?3OZ1aTB?aJLXg~aoVK><*oFie3Y4ogpAa~R2`FW*0P6LF*k1W&(uz*s_xTadH@N` z^#G(7-i8Aa_3Sg3at_RmceRtFV%$1;WsgW#g+#)str5_Zu7Jc~C}O^a(_o)xh*``~ z3EM-paYaGvhzQSXIXr)my*@V;{I5 zlckBD@a_NT5&7@+_Qz$+(AmM^Cx~0h*3{hQpCwm8+V&?s#@mt;0!Btt103HRpIkSb zAHD52j}iomas;>>e;iSiXeROFIwbc9^S=$M> z22RM*Eu4@gh`*iAfGUheu0cwE`-yariP9kge>ScjF|w*(yY$q~gX7)OWk3b4@B7ji zD7)Bp4|r1Z-=`B~1OhQ|pAFa_-vHo5`v1mZO9gWw&&!U$da>JcNq^rPsYsC6*gMDNoS4oS%seef_cz!eSX7t ztq;qj(Mr-*N>Z0QrW%Ka`+m@Qq(E-uJ)WEOWnM)CD89xVBg2dw^vxhCR148{LmVEV za@yw6c-)oHV*>(9K@_!JP{P*9+Xpv}vGc^<3O==*>@ zxtNP9kl9`dy6(g$G-@*gM!Vk?6|tKFZ@J$!1+CM_%kgI|-%!{(7g)ZtALsbUqT`;D z%XY<~M}E?k3&aw^chMQmksr-a56ecoPocY!K~NYAsiK8qpXwe(hxi9+S^q84 z{(Wbc`1ydm>pUPU zpg(M!i?P1@zPI@#EhIeX-cOAlqKvj&zklq_u`xD@_W|ZMD5Q{CE zI|uZ*?p+~f7PVx#u3Rq!Y#uy9M2XYji~O^4D#8>r@_$C-%6v&7exlJ^bT7 z1US6!wXg3`hc*&C`4z>eKeF^#!L@cYP16p&m!6A1{7*BA6T`{Q!H=wjINJ*rsqac? z0h@Jrydejg-Yc`fP1-y=45Ln^+@s2RvdDcUW5WY+8*&rRc|o% ziMi|A`O2nzMYqU;2eNQJv0%84&&H(uN?_^V%YYtB3_2hV4}wbVL##qnYQbX@XI6Fl zE3j%7gCW zsVNHk*)lq!*zV)MG2Oc;^PhNOgcJ1pUIhEo`$F)g4a5||Uv&Tf24)nBUuj}L_GbsX=hG9GpQf4&fxjihWOUzP zXSfz3C~AV*U_$r>iA~xlV-csoHD~vY+qL72BJ|ojF7K{O<{xQKLGcl56^wm2oE7;> z=s7R_#6GvnE@bEf=Mj{PtuGvW-P`Pz*&&_4R@QbWCu|+eilbNP){7jg_09n-YW+51WGxro{Xu_`uQ^)AEDt65#>vDTBeTF<*T5*Xz3@_4F%q$M)>wt% z7LOU~>ZwQC_Vu%anrMm<49Q`bTS0*!OD9n!$?LbVpCi&4fnwb{naVPW)fF>FRcyx% zN+U#Yr_(}fUQXU|A`Yr*QTR^CS0g>!Vm&-c67yM#*$Pz(_-66*KnR;=6o@*_jFv>` z_C~CYnhNnMp!cz5n+u(3-mRHnv27>!w!J>V-vR}dH_0w|e-D}=|53zZ=L zmVRjvB)T#Wcm$viFNc4?IYKvbEXJdQrB{51JrI8buYZ!bLn;sr!y{1n?mazw$tceE z=XBa8c6s?}!kE+vyeLoVGoNY`DMh|Z9RMmz%WVmtSF<0#dpWcPO#tLlD(xIJB7#E< zGUdK}rQGo)t?L>Nz0=k6OfLQQeP;bizWYhACa?+;_Q^D(&QA1;?$-0r+-J^wC_md8F$ChZwdq zvNn&HOa6Q1x6&0W&Dfvp)qUH#>-JS`p)G0nF`_C$o}%Azhj_d`RC+CJm_;1t**}zg%H2bv z1MVeVV%SNH05TzWD9Jna;h!VoJMj(0iDfAofW9rf1N z6LT(*tZ{av+cC@m?4v=YDhb75)EH)o5r`x+2m#xLvnvv~N4zcwUIDbnwNCF>6(SG| zf?{)omE50Vh^XMZQC(!BqVVv;Ai;frrhW5|(CY^vQA7lmT4(PL+vO@2D1wOGv@3s z-j@B>ZQ(We_Y{TJ5OUlo>9ObWenVfmxk!9&ez#7&rG4 z2expOHkwU2BXVe6Fjc+FVt**~G0t3qp)mf|Vz(KQxKn%gIKysWdx*H4?HQYf2Cbgoc`Ca>8`hNgfu^uc<`R?v$yNoOb(-9%ld%un`` zPb8mFP|afd{5duv_=r7ZH$#x(P0WO!corepK+#wFP{mq65>r_FrHy58Sp97Ggbzj? z=D6kaELxdUClNDehOX>bHCI!LF>A>F$Gx03oBsyBg}#rXE3IYSF^* zaLnOFpSp-ToXbsg{&**=ZIS(V5wYab5y@#$r=m#7MA?81cOzU6hlg2Hi+DXgqD_Vk znUr0D#409meW2=nF@6c?y*$bF$+#U&NAwe92c9Ld^VKe3gyn^IGB4R|Soz>625W@@ zF?XEpN8}eRl;kY*D+cu;E0v>A;)9S}!~rkyFx@iSb~K0m*MIvrmoCe=7X5_8|05-y z=RY}<6n@z|nErCMGfq)uNFt{F5 zC~$#9L=E^ScI9iXZtci zJRvL6L={hdzWw(I^3gL%40)F&mzU@p3c5UMMVZ80Gf?g<^Wol-dufz;hb3Iz`To)& zuQr~O$%c+c@_3O-YO1Bq^?oE!?V$&jdWU;&7TRPjvU@<3d>SRRsY624oWq?FdLCt8 z>iAm;UG1uSM%HYhe6dqUl=`?=2~6$c3k~~9lqjqFgBAMPq*u(Si=EY>Cwg7V^E#bf z{4*z3-bLxtrHf-xwf!$A<~k=wX~4X(i$5&-G&{ScHIETl>`-jl9!WfG=(lKm`gk=) z#t9bnD>IufK7!(0xLK~9mC=Wl(CyPY04=ZL$K+yy z>>?vPbQ?FF;Vf}nmwto;9Q&ns9{NN<2q-D2j2^?LWMK?9IwCcSc)& zWOk-LxwNoiU##i`i}?(_GC77hRw$yf2<50^?=J|HBO{M3hYB~eBSzY@1wtlGWMtQn z8<{!BAhK%}?m;MLY>b3sTy4%&#=3(9rDZNuitr#*YuOIVWSEK_05Z4I*xLFKKx7!# z!~pM6z0z)g8qtc;v{K_dt%Yn9@g{H}+mCb#z??gSa3L!$!Rw}i3kAGLVQ49JW=tqz z{}tY79~Bt4k#lazScnnJ8V3iu5_4!mgdMl-;Y0FA*C-@=CcchQ7;9eN`c}t|)`ou~2 z>&?b@T`YG$#b1V{cwQNg%Y?C5TwEE!~@{q-Ev5dNMtO8mh@Em={CwD z(V1GOl?n8+96ebUt49{FU~FfpW|< zZD@HgPK{y{Y#-a4B;$xm6zxP6xsJcf$uLk5*=R;cOEw!Peq|8)36Im44Z-^Jr=HeZ zq(MO8vJhi5&x4E#jpt(uZDy8i7}uv1zS)wUj?XHbM4Fp}GCC_F^4Fje z6IVf#Xjtl=#+W}l@fJ1$i*f~nZB*s1&7$^~7!6YFfZ21X1JQ<^mZpu=st$*I{gNfW z530$#_nFE;m2+?A^O-ZZ4`h4U7_kO(9hKE+a3i@ z$3U3D!WC-()-hX)3W#aOMrwG!mu)F*3yI~^!X>!qB}FEMw1kUSDDK=FR=?`O<*|Nt zZR!B5H^EX-Z76<^tz4~O7kH#|FvXS=j<*7N(Uf6+)tv-K96DulO#_rB|4tE9oY%$Q&zz3SqI> zNCKC^dlE*Ha|w}!%f(k>cVlh`2%-xiTLwo!od8YwT{ZSxR@)>Z=ho%gm|+O@7C1y8 zt>S?gXy4b@ndB{2DYZC|-U8Ng=h$nJsD9-+{W0g2lNyEa=)l2V#g(Lcx&Bwgxj%C$31fyZZ5UsG!3U-#_kMN&x1zOXURkBY1RY>Gl1C zq|IJ80F1l^=OO@H|Xoa;OOXW66m2V*s>0vD6y+PJb_^dRlr2m|RCfC94Q(P`m9b}DSs7^sc*HjkN7qdJJ1ui9f z%GWmNa_>bjcG1~Kg#J4@WsWt^#Uw{Rw9+IIe)s{Ph^Gguv8w@OMsT zbFJk{pgPgogvL(fpn(=}U@Ssftc>NCv=sL8jBu>ly1LF{JSw-#RAY;IrjrstsgFTJ zDlI3Yh9*eKquahOW~w~!P-|;gQ|)bxT4#|+sN(9;X>#HQGGaag@|@VIev%r1^CUYy zr`Bs>(g?%T9r;c#>(wY8ZjnB(Nh#OK;pg`-dVIBXd}+Q`(!w8qEF)uq>Zww_pmKGK znXP13-Ukg`>rmardnv5tkBC(Ru|@#!GBf=88$S<512D!TVf^2rJ_X}nibludA$G#< z$a=ro>v+p+Ikpi=sEL_@VN)}d;NYw5Xt(=GVfy$F*fLDk)hMSg;}C(YIv+}XPHu5_ zpeqM#v-ZmF4F!9Sqiu3NdFR;{Z2kCcw46py?(z-sNo~IKD8@2u<+SE2V2Q_?H+OPy z7!To>SnAQ19I*T50M{&xQ&$7N3R_)WU`+u7!v>7u6#4Ew9xuxMy+4 z;MAv)HvKpBbKG3P1x~S{C0Wn#*(T*X(GZtRUm4_`k}3Ep`$Jgl`T>`pQ-R*CW1mDP zAo*Est~14w&6ME&h>2i%93DrnqP#Kv5tMN&%n^3Fd#`QYDR~*nk!x}EQ?u78Sb6t- zq8rxPI=u4+=%XV8*Aj7>1Ph;vq&Pu1!Xpu1Cpc>H%myyTB~i~fAUH}Z*!&Dmd|>4v zSIYg!g04be)XLu+GFMl%}68+=CjCFL_`D$_lJzQ2r=-k;j8+f;U@+zHJ$xU)~uCj1^7387<>sxZc0!@#A zn?($UB1>duQpU#5D(q&nd!{pWGe9_eY&uxM0I5x;QZf#zF7E`npnX`wEM4GmNoN+s zW4CCQtd;jy!`nLrE60A)oW(oV3Rd~VpH|@N4k&6SbUa&ImM>1rz^bQl_{=R{bYPwr z77;-pyu>tUgoBoUXJ&B=$gX%CZS!cCjh^$^>!IajUdjG^oGx09R%qzlRIT#EI9E$C zY`}rAAU()8iMuUdp#vh&7Eo}@gFxTIULJZsF+C9Pvw!+C;BJ~=6TiPV{(>^ZuDlZ- z?@-wSVr?1v?|k%ECa%%71!-^cp6Q-PDEz{;7xYIiKNk}V}4sIevzzqi`3iAy>%tW^x3(PwP|}%FMq8#mMM4A4_&2slgxs_ zJrgcuBJ+8qr&a>$Ab6xts6OAA!*{%6Z-KITe&bbpglk`E)JOUBPr&Jq9;P+$2tJd- zw9EJ(hA|H~Y`KiXP3xr#5eaTIeK2!sJa>0;u!gO9`AhEA*HIWrR=3sK~oNp3I6zVJ5k0W24Y-Q_J1FO}n3Qc>{ z_(NOIF**jm5t{)PlaL;=?hTSuT++B@>K=oBLM!|0BC|OC5*X(WUf;((bO#nMOTjls z6_Pk&Owy_c8Vb-oxN%2M+?f<`rL77uJ& zrd&B~4=|VdI3R}rf;qX4Sv8T*AgGabfzJ~vuW(}WJDBzBV!$~!)Li3)DBprDOR`U- z>6XL`tB={SkM^>r1v7<6MI)VcBk>NNhs0_v%?z226}dWO<4rHi+BHoW<*6#xy3mOc zkFOEBx>W7xKw^a&Rf#f&Yl#uU4aJ1_Xb0;PClsFe{beaEaNT1?I(0?BxiTU>0p%Cx z`#T}3BhlvYIe}IUnI~_umRqF!#$*^hF<(8wO^*_xBcR`k@P4S)tjS~tXZX8WbSMNV zP?Vf^?J@Fn_a|PF_MqiYUx-_ig1gMSU4w0u0?%z47XU+SgK>Vg53*K*)?Zl_uR&0A z&u4CExtvew&^(+}q7@jWD9{OGV&t(+D$_ljwD`*@fN|g5(gJt6DhfFwm2)F9&Dkvg%^u z@mKPtgP)qwx*}TuH*6uR9=QaPdBe__CQ|h;%ckOl^IauG_;nRIhfB?}VF?L4+-W;r zakx-_kWcnSZ@WU5Eg3_*0y{l=-_sNQ(>yAf}^Hpk#k zO4mJ=q_2=w@JRl$doB^5ySiVOf4$z&Pv>J!pTXbMZ%;r;7W|Yy5L-XFwd7uWxIe+Q zu+;tQzAHE~tM{u@zC%)&kNU7&yP*U+mjj%dm?$Sej#>fCZsvWYbwe+rt_Q=`T8$o99b&4=U0XltFbk~V z-EqYxn2kyA)MN25a~0mI0V)+%t`!tkrlizfjNqa#)$#v$Bzf)3OD|3GuvfNUsKq=& z201HNog4#p9!X5vTznh|a0qhU6y9ygCb_fA9`QoxrYU3U3qHJ1ZH%igv2BO#`q3I) zxS$+ipEC7k)E;Y}n)Rk!?`~T1?<7QaaMcRq6Hgrxv|bKtO4H)LK*A5_qG#9bVMs~NQGSWF>KI`Zko3k3x|AP8=^w%r6NcQCi{Vn_%mWuu-;NL$WKRZKPQw2jOCpY_FCjT!)zeH)% z?uQ=2YfbK$MJH)F^|=b&9HCrK1xUZ8Lg+3fU9u=4;j_*WbwJfXY_9AJO;RYu^G{by zS7T0!76ocOGc)s(Y)lW2iwKHaI8`LE0q9Qr@V#5_}=W!wPt$|%f}4-QPOZ|QRr+(TdV4>IN3YlT9RX3l*^~ETekN+ zH=rYik;csn<~<8ghqNmgXdP>v3$YW#Rsl{Ur@uZLwO^zYmpQ@CB7<#hk!?(+#-n}n zH=po;509ARMv+l_ZsLo1u#er#A4#9lIcDG7`wNLU6Xm~LcEve7_nnnLs`5`(Yel9B z_0UXtC?%`VtPTTfv=2ITZ|F9s{t?YvJ-V0Jiq3cNe=jZOCR=QepVEr`nG*d+gN1+2 z;s1?il%hEGllY6myHpx8T50RkhZZP=zp6N&LKMDRNl||4sa19+k-2;}9HL*64?*_< zz%SLE1cvH-K=hpJ#pyJgG5ybgL)+?TgH>TTC%pS~eA4D!qlJ;gC{wVqBA^KcyX|o9 z0bsNWnmsFNQXq32M)S z$Lou%mskCex)@J(z$yHjuN! z=sUa{w_Xt`o>48h3}?Lm+k}8m>-GfmhoAWY2LM3-pKKCBhEAsc*eL!r;8W9fLsmuk zBFiAL<)XDM(1}zPp|DhnK04%oM`!A-)|$gTr5?V@EGTN&*t>HvA=o9@wwma!@mY}M@$+uiG@HFc8e zDHV`CGY$>h$X$91#*;sE?uha$G1Eu@07DnKTUm1r#vt)vl;&(H&O(yNij(0bo|QIR>@IJ z+B4c-V8R)DOy(2vNwdQq<1in7N)Qipa_dkS)*ILuyD>8qgE`69UNcP~tD6kH%z$-C zc-qGS`f8 zp86XX_58!j^dAe9A-BoZlRW8S{owmuKokZ+7A? z1A0U2(n>tSV&lxkOEG`scj|KnG3B3$Vn`!*Jyoga=J5NF_i7m~!%IfeyUC2uCPg7@ zi_r(H)qPIb3z2H^qEOgE(fQ$26(ty0#bFkk0X5hfB0aILP`JxQ2Z2(rdi9~M_WV5r zIBE^8ay@ab8hI&5*6y%pR&W0(LhtgQDZbSU1U|I|RYy=FK!_C{7MH9Vg~mfW-o)ss zR)$*loKM}jWg4NDko=6%?J;>r*CmDNCHD0K(+wxCduYF)-D;An;`hwH8?VX?DK>T@ zpG<2Q&QuK^K5OgLYbu_#d+MAAuebMl-oBGw92gHQ=-lmhAiVVpF}#kCt{n;XmZPwp zoLoC`T$VActt9?rOg3ppt%FVUb^wxU=^pZ4+*?^eBwBdo5`@_2PZUv1V^(b*$B|aD z;&pH&TCy>6Wj&^J11fbx%!^jU9%^?#uGT8|_j&cU5Fr)EcE`e+$(@Q0Q{)2;|?N5X)Ze_VlgB_*@GydY5AW_Cc|D>{&;e z!yPsvUJ-b|gO1RS?jyUL5c+9f><)#&|BMhfMIw!(n_>f-#cVo>LW&v7v;gp2*5Z}T z?9vbn@Wkg!13_+`Lk{c192S$uv)L1T84(aSW9kBo$KoKkBEu7yjtI=U1FcKGxdTvA zgBdNDc>@v%xj|0H9s`Pec0!B?lgG_*cyYRgr>Y>oV9h;_U{G?9zD!Zi+{usarnb44PLxLfZ1_=1aY=)An2W24Vd9l6p-u&Z`@+FMhnQLtqoq4D|iE>QK>tCA=sO2BFpY z(0V?gh(qiJaGQ{@E>2NvVGj7d>)FzuI&nijJIZt|uAJ8wTslKHNQahejlx)X%9bYT zkJukn6?Sv{_1<2@-&eA=Jx8k#Bx}y<*l+)hp*&lc zbQ<^5D;WH6qwxQyQ%1etH9GVPWBTdlZC*adBlnqR4PG!K-m)QFDj% zgf>ze>2$VDNhPrt@-L8`TTvCW55O;qu`Wr`;Qn%--#uP`w|JkcUcU}5*a05gt@?vk zLcO_58I29loVaXSc2*ee1;%)BpJ7dwd-D$fg+14wwj4qTQ<~T?n+vxPVRA^;kpgFTh}iNYB)5^iF1L>!%EP5x#nFuTBEJK*5IYK;c8U z7;TOcJ4{qXEznEUU8!O7!fGF(N_czcuBe`={)(M;>6IAhRwTO;xrLtjAs8~aiDa-> z8a7M@37C2crV<{MPr2w7RclZ1@tBFLjM9Xwe5n8z0^ZXpVuP6eeGtBWMr*^GzYgtMww zn4++ib;~;MYGsQGAi>Mh>?OpGkwIygrMGuF&uq!?0K(2_5WOasu%v8J4M`9^v}`3g z)(54rr50RxFT(Wn6)br&YWV$erGi+Yw4)zVx;C0i3Z*w>;F@Diz*0!{<)J`^oYR67 zUOEDs3+%6Pw_;fJ)>N+P?QgP3SNndrS@Jeio+`}YLp)!DDg_HzmE&4(W)3-y8{kV^ z2&B_mLgWay)0#>5$9r7N6~upwD9Pp*AkO9!c+E{cqn9YT{dLUT{G``B>nr4`c(Jx! zbO7?U`Ekj(5i1uTL30qEt&}j2APGp!z#t`W_M?J3Wr?Mf5!;7u0048~jr7gM2ttV$ zj3#VUAuz@mmGzYW6TOQD%DMr=-P+}S?~`aJ@LNSCd`te{ zdtZwxixndj0DyZM00650sg_-wEp6z9ZGJ9Nsy}Z||J5wj{3Gi0he2V^#O%h@As_?6 zK)^s_#3UJ=F9s1FAV87`V#2DhK-eJ(M#g+F9UP8bxJzect$MDdQmCR`Zz}{7PQ6^R z`n2*~X=|Hjo4305=Ivv>^31=}(?p7#`uB|9sn2Wnb(V8yy>F^YpZ9?VQTF6{t9T*p z{vjpHb$;!|FcIIiDaMJVwQGGqE`~Km7we>xGOpLT0b70+8R^f+(4lGjN3=sy)`PBh zVidHA=IMTdkO~QoxFyfVi1F4Bd~?sNcHn52Nw7F}KcR8wt|{xF%qpLZU-u#NOBq@o z?GnW!hpGy3@EV0({y>?P&ec&8@4a0GBn48|121nBm5xt0cj-@asJVrAXw;nZohFt# z)UHj8d1pmrFG`~3U@cXg`2)@?r}#k!5TAPgiU&*5i6xKz0Tw&$Fj!|GIz&{+y)t#( zNrwmG+QhlHCM}vshE45S`+%&pqmbPpsZhO&1A4oA#^{u8_uvTipXNwtRz1@26}n!A zv{lEXQR^*twBtm5z17aUVdC+mljB~?9Q-JvtlvB<39A=YtR*`1WJM-ad&BJKhD_JhlnE%UHvQ zV_S7A{xNeUCTh)t#9WvaZ6Rv@!`9(Y9+O51{~|N|APwSERtXx&;SL!*x!p03!gyKo?|gX82Ol8RTI`Rj3NXZ&2UGxC-V)t40R^N4CRodoNgx21xL+Y zhjW}JNKcegM2{37YJj)|1uyn0Bw(_j*Fo>s$Aw$RdDQpiT#^hzG5Q60(5Ya}ab~K# z=nV~%I5^Oq5WU?prsAVwMrR*Oa_?~{AbR|AEX6?!-yHIBH%lQgi*3SN@Tv_{&Mpq? zx5Y3NZRvy7W*iY$%KTb&p(&@dqrIRR|S8hX&OH>)6RhDI3; zrDGn(A1DOXXJBZmMweQOCR0M5Pi;l(DwQA`VjIK{)NUpfLCRa;;3A>lXW6u6K#=j< zRg?LsOmP)BQR3TUp&b8>D~lG^iBzk)#@tjY*NJCD8mNFHN==gu`fM4{YGz_~VH{8H z`z97YmWUv=2AVda^(PL*lv~>9Mxg1$24!uNooda(eVKX*|0>$DRKs=4Y>LbmqO`;8 zFN*}h3?(06Pzp?B=Xsy;J5F9Df}JuprJM|UHbmdSqrP3WlMm6_7OiEAian#&XU;%- zB<_nvkCGsKFk1mAN#=IN*h+ngBP_6e^8HLi&NW;MLzKVnedO6CvIRj#nM370Fe~($ znYE`LGDctm*2jQavqF*SQRCZVCJUozBU3XY$~6=%)Y$$R%2srr8ku~mm_9Z`;Npqx zs42v`1V*Mxjs`D2gE?l}NQ{E?G}Bk}0n;>=2J0$jgPB5U;=zgl8y&oxSXRu2zjGC0 z#ri!A0ymx|q7FOueuUt88cdjBy>^MsP7cRX4ZNgr? zh~zSlQa2_rY)L%<>Fg-6RiBGRrg2u%dIf!uIW&^|ETlU$WUt9z;pG^=l!TU(q35)n zdsFO(;^Tde7d^n8Lsr`^yuo)W9@vkbw7s)t-R|04db4g2FMGId4sT8$L4NpFd^tBK z;OS(zPDVyt>y0`*FbAX((?Kd8tiB2-VK0U`ebh>Bcf?ryl@G?=Wt=i2MqD??b~*Sb zKW*>W`CWQ9iqLwEs)DLsv^MKke4t2-21ok@Lr4*e2?}4%^X+p#0jSPlTnqJZ(nqo! zf7vtZSMMx80`}FB4`zcomdZ)lYZw==_}s+@@fTyJZ=|Ly2L_96Qn(DLnyju?%q;Im z_-xOh4}bTRk9XGS-09XFxHI4Q+_`sn-qIQI8|+f_K}W9aOgp(1Oo`qux{EJl8K63j zfz%|cSw~oyk)vNZX2Yu02-Z@DX2{AY@ze@c(Oa2EVLz4mwuIKliL9I`z;$cQ`nwm{fr|q@w@egVW62wmC2Ai(?=`8R>kol zi>y_Up&ySh1kt8NlD8DH5-QH1z;O^+1aItL{rV&HlpYq{jJ}q# zED+)DZYpAi>n8&?zK;bMJ7jDj$aFqRLyQ(6&DIw^yCF#`@?g;LIw21}5=7n@9p&-1 z7EEIx18501{xZ$b(SGpR5%sqIR(%LxFG}=@ijOd8Zg2? z%fmY%VI~_3DyHIuattC;q+}d^_;|$VUK4~+2OHK+0t&YE32O=LILBAz4q2rSC6B=uGqiry^zzuLogK)odnpqaNjXW0abH5cr`qR27X%_K}IpdS5M?DfY;r zFQ&3j7s;noU2o@dnZ1OJVq^++{9{G1bs?aNx#QYKK_jb|S3{kJR_A;*Rs4YfC;V`e z`gmgXPfCS(?+QR;%0=R`DYGq9{LRZetsc4+Bc1l5Rca>}9)?x2|Bh(Pcwx(%taT z*ndLw*>eM%m%txwtDTd=uS>mdr$=a=%0_mPCIL|ud+Wk%O5Q2U7S#1x^DZg%h6Dvw zB6OK8L1vUbypQqT;Qo{)PnNx(0g@8u;SEoMS;gQLhBc@c`>Z9d8cI@2Nm26yX-X9n zlm0?cAhkl@?!2tKRBsVF^l9E+`_P1BX zErNS;8FHKqF&$HcEtwB}3|F!orze6delw2Pfdx=AyX|h!Uy+u14@8C&;5C3tqkARZ zl?9}3+7db~u&inoS5D+)J*oonQ$^E)t|YWhZOx#Sb4F7_a3TwK7i~08Zv%>51mj%< z`J%dJo*{ZO3LOaCH3F1XI7BE&%d-N4U!*7w1FglMFz+TO8@^bZhtwEr(&THxx9;yn zDr@q{24xYjrU33<7K>IqgxG@UlMfqp2w;kMor8mRj&Rot@QN+rlG4Tc1zd zro1YRYi;ZmFEGI%3tW>!gwPZ4#;x-zRwYVS!l@wTk!NE1GOEGB)lcRtZSgSfyPQ6eck{k`T}lBeEKEpBM5W)YsI49&wbu2nhUbX*h2f{RQMpai?RG!c`Svo+n1%ch=FC%G;RD_{ zJ-1n8X6gI6(ZSB?uZqs1sfyoKid`P?b`UFRY10RMoWKRGl~K#h)z=BF;e_{`hI4&b)DLG%}f}m_! zFUe*MMv2Qo4y3t0Sr2}vj(it3=&>OGE97i=07e$`f5Lm62NVa(7DvQz$ZI1l3mNaR*S)u(^`AyZk7X`mQuKkT~@`0L5 z6&;>P3`z@jlLc3xoO8}7Sfmm7WFhLz9)k#kYN1UHsi78ZDMXcitcN)s3D(}?jlttp z>j>($#pHa#^FMe8@1PC(3O^~1e0Vb7JZNxtE1ct}&JgB`PdxZ~`ePoE*cS|A7ZIC1swaXY>sdcp~Eg6*D>0{_5W zNu8QjXU{tV;f_iLl@gypds^o%gC*}7-HLi})$(dnX6%XRk^70$Lx)V2(5l`J zEUi|vmliaW)JeMbR|T3@?oQ$v>EZ~fIPsF?1#CL(OG~DAF0ED>{02t)I(h8PxV^QA zwZdYqF{pjkyyG5CgV+gOY!nIh)!C4j7IYo()e6nmpCw`4if)9o0(c{~`Koh9`h*m{ zm}3dy6HoR5?E4h;SgNXQhVJzdP(90dl&?nw{KfSNCF^eGBi61X+B5^U=<{0NU((6+W9d{d^^UQK zget1FB(LDecS}{JMXQQ3xeRSOnTgAiQsG>kXdt|qgTaBSt@7{=Ja)3c>$*1gOD|Po zib6(?qO$_iaVVkUu7z*0`kY9OBF4-7hRfonby!)K$^*mZMDh8NZ260h_&^;I-SK1E z%L3c+AaZw}r+X34*q%osdBxNo95r0&gB6Pdfs2juj|-9I+Weit4Xnaexsg`rD?7YT zW-A%GLVk78j%dX&+Pd*`ciPks#2-Jj^VP%*h zD>Y@08EPoqz%L#2wX&p2fvib`QHTLmt&&_R{o6M2`WmOW+2qx4JY$^SOkTPC|x?h11r5eB7#yzhQZO`8nj$FfD5!4GXDy|g;F8SpAMM9 z%5?{{1T@R*kk5c^#=OLk>lamys9g7DVFaFd!jcFoXM&qD!-&L`bD`XByZfRB6v>NllJZ)aA3wt%Nc z)9;gN)Rl>DD3|T7?5_-=df?|?OcibL#dI_LHDR^(nbuz6XsO51HwD-Vxh5#oxb}S_ zvTN*8PkVTsKVUmk#~+?}ZLa%Nk@g*{A31s9sKNc8sMfZ)yuBc2*zeTu{=7xDO$zN2 zTeZe|@4(j-cP_29jN~cv1&7+=q*og%G3?4iO_sc>Qz5>v4G%ioeWx?6+EQ+JygzXX z2JmYN@bvu}^$1seslF2@3*_D5d?#FQiF4-M^vB+{CktEs$Q=pN9+c|$p#C&Rrrm++ z``=L>sBI5cu@1=k1J*BSJU53?FLZii@IYN*^aGHq2QXmxhh}`kv>$+N`z%)+b7Eh~ zz#o|Q^&OlR*dv6SBygKlFG|{^W428kR#`KQn<3p3ts(4H$-U@D4=f~&EI#CshNM2B zrC}L}6LdA(1B}hW$eXcwP+HD5BWn4tx(0EeFc_W zHcbJaZj`#MKsw%EK)#$x2uMZ1YV_rehE28&cCp~oWY&_O1QXBsTPVx3%LKRhw>y46 z`CKv1;JHc!J-1)Bu%=;-BO(~->eykQ6HEcFexH)FNfV*=XuTJXx!8M8Sr#NaGDRo$&^`x1R-4Ga*$G!)4zAIPs8M`OLkVh$+gY3$- zzJdQ8sce@CkJk7>-4lNZARPZ8Qb{6h>LF}z>tOiH(AoZ9L*5kSDZ3?w9}S@6{wfEF zkD^`)3X~7YvMr^JmZ%V7N}-r(!s6$TgSV~{cVkYaH*jAfY*wHK!KybfHo`4txG}et z1TqK>kCXZFbVk_C=i|dCIDoagi2=wn%+^?7tSB%g&6x&UK5^YaKrAY5s(|N|>t#Mg zypGNW`n=S20MfXcjrdh-ob}{pQVSXH0csmv*85_UaM#6hpztR8_+_~|>2o_NFJ^nL zzoskBQA+0GGwKjF>YBAiBk4uhq$gst_fZbToE5n1Ql!9~$NkB>1RLn1r%A)~pwW>; zu0$1OPN*s@&!(jfoM-CfbVH6mk2O%CdCd~h)RsNvoyBIY8%MmzwN0%&>89d#j&sfPv74;KHhi!KuJSn zC>R%nQ*!xOk-dmwKBCG7R^z4+ZM+LTbY6BHd+oVYl6h0<-)VY`W8#cJUr2n}B&_IS zmWz$$pe|Ogw8PkVNaG5n#3$xDr7Xq(jTmKUdzqaRbrEuk@+UiliZlv5t7fImL9=$f z4q!FNq-F`mGwFY&K)!`uQ*gbF54lHgiK&!L7EpSWMMx`@(~l*Md19*C#qgnBvLNKw z9EtbyMrCu6I>>EoN;iMQEfDfGWq*gO1ngig<`gXbi48az#@rgTL78J1li;dy0pI7Jq%|%H0 zmvc>4vEzOw2?kQHrG;hEhAiy9u!NuvLN<^DifVIPOc#lEN9+1;RPZl|4?wye0zvA4 zVMG#N6r)v0+GhXV-Mh=n%Pe;L zJ+i$sJ^o?9_N0nTIL_$mm{4v-umcKL6AC;sZ9j(hd!PP9uJrdF^^+;fCzAE?Fh}79 z4{2}DrUYa-N6@$?7C44Y)%u#{{S5|t7{D;?XsPpt25>_k{FGIIm1wDZjP2hApu$)ACE}m5 zd&+AudDsiGALBD^Pr+7Uq67oO56R#~!pgV@rbw8-9{tI`ZsDEu%WSEa-WK#0(w2ng z<&MY8TE|yUACW*K0oHPOX{Wsox8};bTz%0)*6MY5S}Bd7naz`3>|=4&pOrG!1|V}ln&{;iu|Q=Ho4s}bq*qQ#GxvTQA!8Q+vpXzqfG!1>g91OYnKbzk_%*Q+@pRIpG z0{}pg007Yaf6M6~jF$hSd;Qm_M%}{zWd-wF&$X$E8xu%Ggdm^LKX|5rC{;*+#lIo| zp+38yl}nP$(6kBJ)tpniX2Y|2+qOq5PAf&NAip&pD@DuJw&t&zwwK0L%_^Rnm$l!{ z^reZbn_1Jq7xS3c&bv?F&GYvA_H(ZH#ejIYMQu9el2T_LKD@*uX$H`vX@=HmyiAy7 zd|xloLJ$({1d3r31TRWy*z0Gwj3F1%lKR;KgHOy(T*K=DlTS`B;R3SZ8=1Cu6n0+n z+2*54dhT)F&G@74n_hbF96x{jS=b|SL@wk~)5kXszc0;#yh-N3DeYso41YHo=0CJCiL9A_@g|^Pv1)rXF!*mR8=}WiF;qT`jAejT8ZC z)vj$=4buGC1=Xm_Gk6V^944zWj)7w7}6#;{(rX*4+7NM$;Zh!IX}x@Y1vh*;z|FX*iSU z7BMO1cSFLwh9+%n1_C9mLO-ZXmguYh#n(GUXBIZw!m*tfwr$%^ z$96jW^PO|X*!$lXdyKkVSL<0%t*TkG=45BpV_d1v-R?VEj4AHOom(P}u}uicyo~NA z&ArrNwO~0diUrtc&P5vj*j^knnQh1$Sly+}#b{PC?TPjad7RA&+qOtHlfhRj$oxyN zMrGYzww-pX%stnD=P>=}uFtSTyNO8(;8+E09Y_Riy7R4{9J+*Z%)nej*;*t(0i|AG zxj1NSP>!7eXj@GI>Ei4F8$8BXoh;ZB zqn^tVggWH&COtQLvq#YGT5x5hz_?eS*kKq>TOTuADoQY zmY4kJXW~gy8D@S2xN$Qc#NLo6X<^@q&t~Th(jW=SIKy5}m9+G;E+AV@J6rwD{@7U)tPjMyK zaulsaqlj&gKhWb}jw&Yss1DnoYxe7B`89sL>MbJePk9(u^^%7LZ@)7y%t4Eo>{>%M zkERP~ljG!$48zW{w(9cUS(uGIKLEhIsv!CP9>(@(6Nr4dTC4m!eXFB7a+K1Qy!pKJu%2E}%>$|9b&4F^cvZAOWRE?AhmK^dCanaoRqra476qE`B!Xt78p3t(F5F&<>o6DMvC{e68 z84*oE(3=u7`i9HX;0W}2T1Y_zQwX+?bbgN7-?}3pyCl(K;*kY+p~a{bT*}ecUh#Z5 zI}J<3i;6Z1;e6QB!QYsr5qGZjd6|hbvZ`aDOR_5F?#Ozyk<_6zMU_aw-v^^-yv}i9 z9lRh9BO=!BWoMb54LS-@3iRUYv7n0K>I&BbH^qg1Y) z=|cud|CmAvmOs)fBCIs&4?O*|`{J?vWigtg()(pbePX?JO&m|XwiWB_vUvuqF-{DH zHGC8u-UnOHF77}Cy*z{!Y*r%qv%N&l`vOzJL`|gF^12P$7MLGx|8eHeG#)%uH6+51 z9JK}|<+on&G%YcwV1+f=VWm)=5LR7tBcfXGc#U+Ti#=1L;P0Ic)p{^pTWhtF72oV- zj+?4*Z5eGd5R)f7T@_?@^Qfk8ulGXhhyPO3W!PTMe|`w%aH!mK9hGi=tAKIwL}y{( zGWn7nrPL8G>%Rn5XRg(#^wAog{w_7%$ps0seaN^!;=iiqzoOyY{O3RXZw%R$7F)Yx z!JF8`(ni$;mXzJq7gIaR1*23&cT%{!MO`ZvMmkBCL)ZLHbk5Z}zgRTHRCLmkYsX@* zHb&bDy~l}tza50`;TGL9O#(5gZI)FlvNVQcUKeIzd?}7$n7Kf0$tdVRw-GqC@3)=E z{M(>vYJ44#33?_JDjf~Zuilk8tr8d9pi`478Y-Z8QI_-=O*YYA@_`TU^8qN!wHRZAc*^y`mTdte_zHE|e*#MrDW!6SC9~9EaOs8S0$RQTQN6>Zu7;DPd zffVK7p3%qg!`Q$i85vus2%|WCC4%ulP%3(09Jj12Q@-UCH9=50LOx|~vECxW?w!q7 zG^1`qHnI(>l$U*3uW>X%h{f~^oM9Tw3C`dgMYCTcb*Q2`#o1%5bHw{cNlS%-rG?Q$sR-l&hwSU6t_LiK=&oYZEgUGql&lb znNE%myg~~G7>?1|DhWi(AAnL4_$yh!5vm`2u7qytwW0#<`JBOf!t)a#Iifl+U<;Jj zWOGhpU>m`yh(gH^5=1|c>@om~Y+)XT)vA|l?yoV3UeHM~(kb(!{;!=%!inV@$vm*3 z7O-<+h4yx%KKqHUW5a2Gf>8d@RGc~Y!b2`w8o<=1uvVz)LVd9oQF53>rQ#E?On&(= z;Egudqi^?Qt`no%Advc{izF1H4!{SKp$Xw$Q$G!? zo|{lc5 zKqtb}?Y{FEj0X?tr|N@3r(o2|#}}2;8ttA{v43D#*)lCZ-6*4SWvDOh4?&t6iZUw{ zd7?+<)pF?Hg8Jeb-}XZ2P<1s=Ot_TG+VH zI6Z~lYv2;P`?t>0d7$KmkEj^;y4Y#;!S&VleRhXgYJDiALJwM!j$ zg{yNxvsT5q^Wykrw%;9l=$_(;m~|>;X<0%UKF7_L>wTh9QBr)Svw<;l?^14ClTI0z zmoRu(C`t+;OB|_ghz!Qh92b|JpwQNCjE+DeTI?@O(>}E=D$|ce4V~J6<<2>BQoke2 z--(szQeRdn z6UX+*r7LXVKG|}_=fYoo>OD+WBlPEDujQSQGXQYGv>oR zpQlD|X!G7vn^bu#mS<=|*zMlc;p#Y4#Oz$8#5JJZ14K&u31xLsx2x5zIErA~arZ;c z-7w$4lTS-R-m65Jy-YXqlWP&*Ck9wh176=Q%45!MhD`7hhA`T6uN|7rZAn&XUmptXdTSrB#{0|LlQNh(f@9Zun)W_(} z?8g{a$?o#K>yVv{6{#II2`!l?#IZA zgd7Oyhs_1#;B3KQi9!)cQE)}t z?8YyFJzfAA*R;&H z{CEC*0L&d~?3YZ4!1AeGe!#Jb@v{?Fn$KFX6S%xl>j-`<8-YfHhRL6pj*&*w>{u~g zi9_nxlL3h5L2zD$Lu&#^&z=!j)2jJ4^^s6)hTl&0BQmC4;)gb7y42yci?7^7sWx50 zBPrOmzwhv{SI0GLoq}Uxu_4WD7uvK&&|@_w+r$r>9pMdX@8Fqh7CpOTa9DRJ56Kb3 zdAl;kcUQ2kBOVFt?QAYiFL<^HuFruQAD&O=EoF!^aqZ3~OAg1CEN84`$XM1bnRzhf z{=$`TtxU9BM>J2_IBg7e%koFBZPOJG^dMo?o?jFjaII=G-k|~hCs-ziQH02~*ez+`?tAxGjkmTVHV}m41VCa0) z9yf$)1IpAcvMrHz`LuU~Q!nG&oM_?oeydT{WT)RBQ*EeXmBQ$S3amKrq~ zc+?$l)MUhSWy&%nkRg|Ni#WH#fzh7 zOhhEy9uEkQlgZS3<~csE<~g~g3Y9jhaMmklaDK{9)+uj!26OIE&_;*blL43XS`!Re7?~$??c&KE8eN430r>qo(1&-+s500uQL|O(_hSx}kMu;Z3`=DjSGDO%S73Q?DN+Zh`jieIF1Kh-& zUUVytywxt*4llQO+!UTugHkyv1xyZ^aZ%N@r4yeu51bNpDZ|QX9l0--An|;NyX7w0 z><>rl_4P(MEMirb-l`|GH*;JR_|;9q0D|y1HUfp>Ms9*56K;YNRL_V?PAY|xhWU_i z3fG?K`D2UqAWeFBQ;JY5MOi|-xMFx-4#HWo`rYl}0V7Z~OW0kifq&ARL0t|ru@uVl z`(2NEi!$Nzrtm|b`22)DLaH_})PvD+OY>^IvD9nw_Wd>+_})`VVKz)Z;-j4EMT=rX-#9u~Z!yi3 zmQQZKnm#0ti^m2=b-i^K0z=q;YkaOe+C*I{9MezK2Dp{AEs2HRbHFnCCbFxm)3<`X ze^Kc@+0qcU!jgD^2}^VPmfaS221pF?hfD-asHQRIU2!@J4?U}he+9W|7zcf-!Y|47{z;WPf@j$s%ILnkM4ta+ z{{G2H5KECcmQ0y0RSc+DT9nBxc(yS$;-_8gmKa(N^@vt!d<9nJ!z0*+oAfAU1|Pxo z@{p+%_J15*j_$4TM_jzmuqnp#lRF>Upx;{UcKSSLyE@O}RBW}-?)A#!43u-UnN2=f zSrl!mGdw$To#LC02k0$md;f~;`{NO9N6((EH^r2e_2#0c)<<>D&f=vup=1*F31D81 zGPrpa?t#mss{IV3Ssm%7>=$wvx;m>X+=rQIVzw>GZbOnGuN;N379L2{2mx7OPb)DVf}- z_gY#psAr~@`YlvbHUPT&@qj#0j}%fa?305>pe8$V>q=byP)J-c6AiJXm%BSrh%b<$iZ1770)tD63<>$!_pXIUy2htiPdfMLd_^}w5-DRsguYG@UZDp#AA z&xlz1F_c;pO$JjgyTvy1`PFz=<(P-T8(Q83<&>qDZ@NQKl;a{JOaOw+1&ZfJs4J5 zS;EnmyDh6{7dIrl^QPSBC|~f4)GR*x>&JjuOa}#<&Tb!bznQ(E5wpD$@)gXK=Nl9o zsBEx361}-mYcxs#_ZOQJj8uWVzn*e1x|mq_N@_oMfp?%BuIzDD7{avUkdNctJ4#l% z8RWG$+cPb=#PQOD$oS;&`qLFr9nx4gH>y%?1CB>aA0bcOR&dq?A_u!-G{qaOOp=Ab;|{xXtA{B*@^Wcc`S}&BIQnJSkj3P>MhJwrgb|_40x6HG+~fJ$dcNd0lH%%qy;>LT=hb4 z2B5ZlliPmFt$k%F_F#PDi6l=M`)Bf7d7TnfRR*ruY%QM}FiWlSO3NtCuJ*{v5EP%7 zKBLgZvdS$qNOja)0Q`#Pdv&d*!)%AOw4rwfW53j*T7kMebn~FGW|V#wc}-%!yved* z>AyHKZ6I(yg|fCy8(TTvvd}swwhG0rly*-I?6O+O7hHd#v*r{w%buKi7w3iQ`d8C< zV(0m%%`8T{1|K~lvzixJ2=RrBegMEvy>!F=gdG~>ag$zPTRP2w2X%jf- z6mZFX!ABh;gy}ZNef1)tBl%%a_#wWGzx;ZJbcO#!nvEi$fpuVfX|mJIUkF{k9Grm2 zh}1iJZIxX=3esbkD>7SUnM*9PK9W4V`1@>A8vGSIk*XDYVu`Bq7f=oJCc`Co)&^Pmo|Z2Ns4s}PsugTl6Nc}`!ah38?Z zO$-V<1KG76$;}?m;pgF2Is;6312}ZU%jrVUQyrK~KZLn=*h|njK?2&q9ygd%bFY{? zJ|I5^!Ck~TM4(3zpif4@UV=OUpg97tQwhou0_Q+B+qy9LmAd{B+d4?wy4nrH0IQ54h$q$h|kt?jXv70^BHA8m7LIbLNFSO&tYA#S$emVXnj`{8#miZ zJm@oO;HOfbKMdcI$&Y!m#2v2)$9-d2lmd}gZusI1;f{q>%q-7dsqHHjj{iiMfcYQ? z?Q2Z5lzi{)?teiC{~0gJiqG=6Kmh?=BmEz7-TwtTP;qs(vbXqeYhcal0gkH6XkRTqI&a(W?i^s~NAc_#FFWl&Y6;g| zyB;&`K3@X8ypV=u`Y-zkqHaUJr{QP8u%NVNx~Z>MM6ZyA%G_%C@Uf}X{MNN zX*>LIlVwCq`8%0}xT}jBHo?v57$x&49T-7}*sgfl3 zN)V6jdpww>XW8EWd|k|T?y+!UayD7Wa!w@$ET}BXJa9BrBkfYl@}!M0scz;~o=Hr$ zLn!8pfqdC))k=%j*!4-}CDkn{vZJjkQ#ERe<{5)jB<9?WFzm~2*G%HECCQ0#C#tJE z6I1s%rI^y}EXKn{wZrNx<^BkiMbo22dlTYlv|U22cBY1XO>H963*m;zpZreE9aBB#IwTCC_G!ynDYA4n&Bs}9a&|V2hF)D-i^y;SEk_A zcLDs7pXMfQZ4B2!F4;FqBy2cU19vX9jRl7NwqK*+4_tNLE!LAK3u!!gDG)L(wzAD2IzP6jCJfbn@r!^AQV`d8`8U z(}u)pM-qtfrncgLB}+4F;uVXs zqgP=2g~mc9-ufjg;x4oX2C(Pu&1ifQ7YZXu?6Zxs$9XmySzGJu_E|Wzlb9Gh(W*I$ zG*VOP3{12%0d?4B+eoF=4NwG~PZ*c^(`(E|8L2-rtxP#HgS7dmjpPpE?uTCJf4GY#UzeAdh&qzNj|>mRI|M-n9OCUCl?OWx#UFyCqjc!vJX9K+?5`33pi|O!}_OATVsMT z*)MorVPH}<@hZfCxAwamTFh%KHwVlZ#kNM|%MzYzV6>(hm|xjZ@ThktvSoZk|3ek} z@XC(}+JC2y%Y0RPT^?9eoKbW%g5JgF^5RLozw!VmMw@*=D1{%jPh;dMqn`RB*5uv- zwiXpTw~&|d<_DgD-C(p8L|wXnWsW!s!dt4`$_nkSuMzG5SLfoq5ug8Ejvu}UbU8c0 zkd?yPFBOE`khvOB9XqQEVO-~d#=07z4YVP`uMH(SHZD7bHa;=W*w-w}tag?wDMb4> znShhHFRplRP4CK4)71>}C9`6!QByD;0cmt?d5v__>-PLOw)U(fTmXc)@#qBz!;snX znCpHk3V!yaqXWqnHfh0H@1HBu&pAq7q5ks92@k*4%`!*dS))%2f7+opb&%R-;)uF4Q2b;EHtYOwfX_HRJ_Qrwly@h{Bc?Omw}QJ z!DCl67d`8R&?jgzM04I(0MXx25F&f2F`z+^o|!s#mJbOjNdo9d z!-OfAD9Y56dD;Z9R6&az@rUQFEs6)UB4?|3f9teQYpwG(E~vYqKWQ%qQdn+VNXlUux?pv)a zx`4e?$|sI-XrZCLLi-Cc2SgaSJnDR45;UyvA54uFy{LTP{z)Aiog?PRuVe=@J$ToMf98R@vk!Lf#daSfJu1Mk!hdY$Q3*H}+uCl* zEg$SXHCQN!8&Mh#WyzUQlCfNLz-Nu2bv)$H?2UV5zp*j7;~i2g96SCGpJ2LPArnwaO0cJi7C zi_ofM-S>g|r0-hTavv;jGvJFLMP z2DlV=_x3c`m*@VglK6hX7R4;<7wrGPc9Duj3fKKnZ=(NU+p+$yV-G1?TQdtITSW(F zS20f$GspjO_DD&Pkp>k)3j2=o8pD+H%?gY{CZgtyk|7}ECqxmAFh43m-ISU%C4YPb zLp(t43aku-Mcm6DI%h&sSz&#wXTLi5m3 z-i=6|&B^dyGp{n_+zp>@IxCB)(?M)LiORL~D0(;a?h-bz=3t-ju<2po+cyZ`$QEce zo4r+s4EbmMsq`3n5#8s4BVO6ZQ}+eXsx(jlD@7jZpkqjWqWcuvu2=EmOs_M_tb6FK zhDu4iQFJWmZleiKgtEaryn%o+Gfgw=4jbv3HAG|NKV4>)Z}9&*t^W+E zrhpW1vwk>{zkakcerz26mrC>haU#V$9seJ3H&#W)7Do-hH~VmT*Jgj1Dod4++?KFZ zZw1#hRiXxrMcv-^i(QF^4d#3;gA-MEGln8R5QzY-KP(yWR0291SSmkTeVmKVn}94jamZ za>T4OE;-H!xZD1{=THq?7?tbS7=P>e+dKG_s?kbrYa?2x_DtNrTphX_Jbr3Imu4!7 zywW}OocZ=7u~D4XXf*E@fHhgC^U|}fnMcmTA153T9$2>8?B)x1tC4Z}_ouMKX&!y| z84sFu5_zv0V;_Y-6+S*{Dc^PXG4_WES83+gjunT0%Qq;bN=wO||8+Tsl*oVt1r4Be z?xRXXZKDJ!b#tC480$8w=|b?p!IaqTy6ddy$<4 zR$?SWYL* zwzd#kh@qj#^O3#cFSZ(r2e>NH@{o>dK1)%Q(w=2NUvorm&y+N3hpmvc-O+_WbtGRZ z>ybtUa<>p-AnPuTFF~n-n`O`qLJ2)_RWplUsmK(xjk;)~>vWX#q-HgNz6Kl9`Vje| zW*`GycW6a91L4B?$H>^jb_7wiWu=RLBOJE}+ua(Tb?EdMG0Q;7M-@%qf}BcxQDTr~@P2CFn6ANcq^D1>EJq_CD<$VxyO3RE|b$3N#J$lQhPCeBub)lP! z)s?>g@O7RHL~dXtF2pV14+z4+Pld=BH~3R9a>g;~zjMkXwO<~?HXSPs*--A&$?oe- z9q@Szh~oX(6Uz<_SJ*#v=?Y=j??=%eX4W57@C3!UBd6cT(=}iDGu09I;si>N7aSil zG}={La@t?87Q-;)-ZTW%0}y%DE~+CHca-Oixf!zeWuX0_eBSR`i~l2r-y31!NR(b_ zB|pVkqcp4jMKF4caZMg|`!~Q4=XzgI;eh&@EJ}IyrY(M~FK{d`{=F!(WjTZN{DdAk z@2br}j&>d>@on+95P`=B{D0l?|8$cpnn#wVKN(r%CnNLxuXp_aWn?LPGgl!~Q)e?5 zm;aHNa}vk@Z(?4cMJXI9a{_LS*aJZzJ`jSBfE5kGV54Bjk>Y8Q^%ho$eYttGE2ygu8!S$An%z!_shdRcA>wfQK)|N^&(P{Galt|67^_>;RATraZn7vcsf97^lpF; zz3hO&xBUzHcihsmOo-Uy0|8J7mTBT)_K6Mw#B0JsU;S7P-OLbI=QEtQG?@r4Z1yiM z@hOw^%7aoU63Sx5=Uvbx&9kQV<|m8CPf}Gcb1E`bel@|=hz!#KvFPywT*Ao-&@LUhpP%=36tS$4*yjfVwB0?>e zd^|>|rXX^EsSi3VHFLDyl$s~AyfMBsOkoKFSD`5EcOl$7HNG6&N`Jzwj!Uw1oC0Ya z*+m@Xik}-B9ZzyDVUOrT`H*D;F2tRI=wyvjL@7%$yFI5wQ-Koa+Sf}8I2MQLMTks47EoCJfF$Cu;|`BnX7$TO zvh6AKaeiY?s%lCUV{g#?ken>CDld_weN(qhCfcMp(;D(ncnZ0Z7ckSm-&x4z#*c?B z`ZS9Ej2dP5j;tmrSK0nUNE8yG^Awk6q>h>+kd6dL+zwpKamMLG1(HpsD=4@v*_>wV z2kI=?^gbEM$2M5D*|nzhzV!ByAxOKEDW*zs$5C-13}dgvJ*0$`xxaXiItflfLnzp7a=!MZ@nd$Br5YbM1ccu@7gCt?fJdzAggT1} zv2E8RSg!8B-jO|Cb|naDvBLVTSS^TJ$8`3qSc{oAnZ+>YEdX4~~r7uCwgYO0xY z`x^mB&;}zKq9Z-jhw}BYokIwulV_--^!qlU(%X`8cU|BA@sc@T@Bxaz2EGx z6Bnl4SZ-u7>A`X5rCq(f!nTFBa7rq)&_1uVn<~S}ovqrs;0I4MsbMpozIe_%K7C^Q$PQ#X&A2GkK(HJZ zza$!}i&uo24taU6lYLbuh(qp~4*xMbdv`?L_jqTd@a;t8K*7J=hZyj9bqu)$CP;-M zc-*HnvKINx5ykyH(phmxkr@Pwmy_x8zhO^`*d#Dn)IMqNGRKbmI8_g zYv3kkUw3?4IgmYRpiflW8Bh(NFNgeE6DSSRQ33VE4|uBts77kb1bE95XqDWaJP-$V zZwA?eH;@L>aTfW-6R4Fd@Eyph5qK*V=z4G{r>$N*9b1z`p{gPgbs3ur%TNIUiQl_n!X^Q~* zMP}c>M&D>vm8>IXMa-4fCU|6{75p9W_ zdNtYRRc{}`p-OgKkE##ugRnw%L-;*w+=;})JFnwbNa1b2BXz*S&ktCEpV@O~GIkz0e%vQU0jXvS5L=NpJLi z!KXp^l0fzEH2x9gRBr$Q_;ictMql`6R8;MRd}Ws?wzWh~Cv1O6BL&?WdXeG|y#3fM zi}6OI8GloNw<`jvFG8U|itbCWc?mPdY%C;H8p~>Mtc>hXAbKr5w>pz=2La-M7O41o3P;c|f(izv=z;Va% z?C_D(9U2fq`oRJie42y!paG0#=|AntpzYwF=VkdtUH^+y zFUm?w%0$XgD>&K{-Nbw_5%F15qn zm~#~45w8@z{mOLK;4mqs0nI6aXfwBszpn!@w5Ay8+;+@4s)NJO~aiLsL}6|8c5}$}2wV zWl7pzza*W0Ihx6Fx#s&0nE5=JB2~!Yb;a!Sl~PuW)iLvx`33oI>Z|h$qjkJZPYC}y z!KoC4;98jxVmI+l4{KK???ByYm&SY-V{rSQW~2|Gst*yc75yG^<Ty)sQzNqc-08`(g^tsIW#YR z!c$fV{24lLH1r6X=|gl2AGwiY__CDQ|9DU(Xnq!r77&v=!T|U{>759DDx`d6%T2ti zLYy7<)PMCSLGczEaWMOk4{9(nJFF-PtF}?+kWN?Z<*TDnOksRfTn-a#BWZ9c59u=V z*5pu^9HXMyCGFQiJ)0v1EL|8?Gu1chMK^2%GE7r%o{ku4UZbZo2VNFqzek~1NS2>2legV08E%T}aAY-N#bKhWQyC~=j!wrf zcV0_NEv=&O#GEY*CzEyGp0M2r^?0)q&fd>kN@m2(Mla>!!lN%@-U5pwg*!g)$wIT? z_+*jCD4FZG4MsnmGe-Hal208EjlJ4yAGy>!40rgwXVa=7lg{l7MiDyAw2dj=&)lw9 zhsT?D4fopA-MtCVc|;(y7i;wSJ4C$_Hf35e7$z8Wp(}+@LKtsl!AYy8ZpE(MIXGW3 zd$pXTEcW2VvQs4r27Dk#tM(Tv zX6cGcTFaDor*?v1=rgOz)C7jV23b&{y5$|}EVY~HUjNEIDP0y>TZ_-a>qKC!AW-RW z>5b;CTQfSQT34zTB5BWT3Gs}NVcpO2WYRy>>yvRbs)=SL!O57!y4SK@kmfbQ{L}ygjc4D4|^!hT)1C#qN zoYFsjChO(lt3BNPZxM6>ebLnV1Cz$Dg;Cg5%6Al9#D}xgUqQVv50%vVsFmu6ZdeXw zb7BRkdiX9_0ZDfrSl|bQ&HxcW-YPxZ= zbpbJD=xE>lN@yGhG}JvrVIFq|XdDsi@CEo(OC2QuJ$x~{z@sxtHsaz`a_Mq>I!jSw zK8)n3rQM{vo~ZBrvY78m&R^PC1ZA9&Dz=q{7h!C(F!TT)A*!smTSVv-a33*1x|2oD zvxy;PVC42vPfp0cD$CBNHe?O~(H6z0%V?U-!$ie_xfSpjdVGnDcN3fl@n*(%8#ApI z9-r`P9D7=-O04NCSewq+H_5g|)27s$)W;2oBt`kW9M^6$o6y#fwD|%oW9!nPXcZOWb<$oxn#bss_K&L9PU-y1mDv8 zo|*G&GrQ#h5t(K)tin+Zc&dbMx*-xYL`U7GzWXZiR43C^ByoX;#Ny?pslSh&c7~<+ z=j&A}$=ey+HrJ6j*fb|+aOP`TxHOZVl8Xf^Ud)LDhQuS!Hch8I%@BM;qKwf7$ID1L z`I=2wGvv-QHRDe3Hx-^oE`hoM0!5(Np0`eMUM2N#Zn$bTz$*u+UFiAv@XyE5iGxpq zk*YTVfLuPz!vL_{H~q5U+a_jXdB5Q=)hQ~Z0Fdm-&f&39FUld5-`dv3vh@df$a2ST z##Mvf1pp(^OuhLo^vqS^2biWU+a~OtYT^y+f@-6s4!9XrhrVraDST7Ic`4$wq zzOsI}rf93ixs5YqOT0PTCYm;bD(&;yT1nyc=q~+imI$WmU`3sGrjV|=k++5>2+T6|@HxkCWTywR3G?lP#3V5E z>akA*Mk`X;^OGWcf`w@T2=V)WisaMrwFTtfj~;c!%V?AtscU&8g)H4@tynX(myz*- zODtJ`aEb1)=Zn+!)cjfkU~*#^$v#nuKq`FrV{vVVf=_ero|(DU7$BF(a$I0k9ANVe zOUC!1Z!F8+;M`5Dw;E(a+Hn98!eAJesFt_ak3>*WuH*v*Q_frZJYz&gwadHG#%Zmr zF?xUiGe;!LKc1}LPi&6P=x77@umaR7w&>}e9xTUvk6Y0oTN1mU2L_0jD(_C{2#%k@ zTl3*woMTT?NoU07N`Y3Ah8QGr!(gr^9EDRPt@sTu={7$s2z?jEKOpT>OPIa-r>I`K z<0%CwEHd~Lc2ioHXiXwAwKxpOk3j~B4e|Rp$WYR zzCpWL=2sWeXWqze#u!bNs5Nm6oN^(l2!1jLw|b5Ao(Zohytz*4}mKZb;ASEzn_sK(FouuX1UP z-gE}>JUh*syNz9^F6z?#O7mVBQ1oaAQ=D7i4N(|2Fz(Wx11aHST845? zUkVr+X>?X~1j8>i{PLG2r#FmCG&e2Ir8Ej!V~vu3x{b&T{+x1Ch@pp_fIc7Fft!bG zFq$Xe@f^}u=wxY>nN;4gRA;#Yf93Q610mM%CW?KV62T#y2l6Cedi6Sr?QAFe;RBjo zc@soC<4@=44v6;;EaY4OhM2S9QGKPA{d+;5MEoUlOV}A?y`?zOD%Y zqELR?`dsNapQ(oHPo)3C%>ILm!xBc}LI2?56hH0-|NHWWn7xU!m!s={D+FUz)6bZphKfi#$2GR(D=kqWo2ODCHm`KaFBf%j9;x)P{kNPXsz9=U- zvcN?_hVi}4JA8J0win;uXXF0>T@za5fe%86JIZ7znbDfrVP-3&TP>&4a2;xi4P0*GZRW(Ts`jPd zu4L^uzbtjcI#uiL7hGgQ@A6w&mj0$y=8AkkxJj=K3$MEzt;8ui9Y8d?v$MG_6Zbe) zOl>!+humy}Go7G^Fl3lc-EbEWZ#zc@{oug|OY_R{Q;#76jTB-l50`ucVNX?4d~=M7 zkS+9*!E0)IJT;!p*|pp{>%qo|Ps{@13)V{b^G6tsynln)>Dtbzw7=~h%Y@nIqmtvK zP*u{|6U8Qm;LPQwjPyr?A}!K6oWSw8(SJm!+aVm`G$Zey8?qnHSn6W{3=o+RGaS&^ z+Ho8A#wI$Pv^5Vg7)x`iNY3SdWjc$<4`aLN8;O zc-g~QXt^_WT2AJ8z}wKU$97C)N9XKxid?B8%fm}+cP|??OYQiGvmse++4p0uit!aW zlsv{E-V*J_6)r}~@0JSmCuIyIg#ca4-B8mNv&gH2U!4=Hpk(QA%4zjEjh!=rDC#CU z)0WI6Rf`#Vn;7Fiknn}jFih565#W`&cLnDiPoNMg;ddrDX%{MPp7Dk8;%4bZk|9e~ ziPR-GyDX%7!E#c&4BcmtWm}ty?$Rc?5y^fg+Q%0f#VkaPQD9-0Vp1U;!;k71s`6(S z9lwG*qH5|F)Q4QKkdexjnIM)*r6e0Yr9Q01)FFA2LHRpNB{rxyw>|YgBslt~EEY$8 z#!O`}KtNRgYg;91m>ij$ z6fWygmV3Ob*gpE6%C5LV6HQtp_iB}TKMs~K@+yhMNe zG2jZdc(!M}Sv+VUGHkMzZ%hF)uuEo=7Gw|32jflyMZH#NSR3RA^Gw5;x90>7iFV2? z%shaS{WTL;V+Rb4=Cg@-7VPb(8vn8iIi*Uv)0?}9Lwpy}t`t=5Zq*9rqf)S%z*xzL z!okg4pOARIAfn{m^en0w?|xAp;W@)~bu36qXC&oAylhZQ@pK!gv}-}WC~iF`xoj~d z+0>)O5vzw=?&2(;7{TK0=qeNJ~NJ?*?f^itY@vg-1E5zO0w(UbOB_J_Fh~C+^eXebS16_ z8+g|mt>w9Nxq0Ntur;4*iFZNb!71B%$!vBzMZFXm?I}}!UDEMsj*vnwwPm2Rkl56S zgvu_$5$D8AoM@+7uvUi!1>xn#TCyM?GF64H#g*jwKZJ3fUq_~x_K*3yv7JuwIG_=2 zPEg^Ykua!9$Q~rWiegCYixS)iMa6?odMWOYHeBzb=paH{{(&QG9-B^JBSx;vZZ#DZ%^rL!VTwV!h9O`Xqx#rYQcBoS3!__XFoM^uv2GD2Ft2)N)*OCiE7aV2YmapNFYb6C^yJ_D^7YzyCz2n_4W-R zt_$6?XTaP3tmME8(xu)9;HU2Yx-_UPh9_hjJ;!!q|vr5+GX3eZQHhO+wQV$+qSE^ zY}>ZYu6niidHWu`x9`0XD>5VUS4PH{bIvi>K(p84gS^u;XRQzNwYNBlX^pG)K-H3C zvbK?SVClHSF?NNIYyG^6`aYr*ZQj}mMI&if&1MQr;_(IdMU@hdEkdE^(mwi91jf+i z1PB5s#jm;m!>S;tN_qdKvSG~gcHD`20JuGbbs6)1)26v=ekRvKkdJy40_&gVe1d!B zj_AuB4a+?-@x~134y1!F%C;>Ew>^RzrB63p569;4tJQuM(FacsTbSc~_u!Oy`Cga- z(?506yX%UA3-fjB*8`?m3RSubOU=H(v5ApxEu#@0z?E2n2OFY3cxbv0k+Gn*)^GQ~ zIoj*{(4iVbN-n9ZssLUo0f2d5d&Kq-LdPD6HrU!i_Q80Vv+;yFbB^xmee@G$%?35v znD|IT(+KpTO5_o8r5`sm7#qGNtluHv$N1wl;&n}li$~x~67l5{A!1{f=o~QdQx23b zz=gSnix8f2j?R!Xac5G-o}e>()Hx$s?f^G?BH9GiR_-8raerFfVfYyL)fmx~-2@RN zPuj{~IYwlNUZ`6|wFfyPwb10$XW|3NLPi%xIJqOE-P4HDJ7-Q-NB$l&9B@x?Z3-w4 z@1RbMBT`EVQH#_Pw{J$)jRBe=y)R28y`B|wEh`InDS?JLNdNkotZ7-MIOs+vfxFL7%@1Jw48j*5?7Dk`|>eIUEdLf->4hK1qUc z2EMH*A1Z-iM^%Q_X{@dKbB#E7fMQS+adi`sUZM|*fuVuX5r8T!OiwkOMX$X{Xn%}s z9ZE3CVsJc!tJDWbjbH+mn0l83EhgwbS_l1-7F6MW(iC*Az*Q-=n#M@&Q_x0V$fdL% zB_Ss?K5c&7bd0{=ep%&u5)}~ClXP{6sHrOD%W%dSlFT)qo`jDA9LXZ3 zP5)}b&t7T9{p=$E-ZL@3C7f`~p$Qnc@m#f@%q+uGnm!;~)0U@oy{afW(-~sJz|1`2 zO|Vchms+OPM{*|Lmppq6^y=beQF`gI#c1nE5(>#wiL_jOQVB=f%p@+L@t{W1)T>?+W?nrrmzh#7y)Hslm`gvHzKXec zr|XX?f}PE{Ngi>USa)$`nHXD|moZ1Z)S%-cswPuVagB}R18A=h(7`I|Pe z&vy9k1as$#I>6}!zS=E1F8T9xN%;hl=5Ijenrl(tZhKN~c1rLK* zZ8y|vm0tFr|HOrHGN)9Y0810sa)4yVUTorYEkA?21`=H`7WgO~E#D~~4k=olN%cte zAG_~jAur#1Y>nT;hLNp+DsSJlyq-d-CTCs8DB;tLypZp&e=$0NZh;mJ-Ns`ftpgTz z>iQn>ctv_G!|8fiqVHhBiAqBD0iQt@r@VGZm<+$Ii13&dR441yf0vjx6`sh@42)+S z6ar)*)bXfKloo9SV(rlxk@@Mp2zXZzgnNazM9M+LAdY0zn27f}BKG^P*PU$o+pd@4 z{Uob6Di1+3Xk$KvvAdEKQVo%(p;%f2z9%@r=x%JgV*kb%wQL zHLnJWUKZB|Hp;OWv=a`YG$KBFkW>5(ukI`` zD;Q%-baLQP!JRPjdC}op0o%xnv&|Q~Ht|lU0M=-*a-Wz9&$w}$Jhz*{ltZA_FX(z! zZO9?avy37mTWo?-VN|Dn8=YlarhKriln1q4qhHr=`r(NHuWYYyyszS=ZaRL)!!hTv zLea-N%dmN$9*3|M!BlPZi#a`*UCsmi;QN2X_VtQo0Kok44Mcw2&HgR;{V^?2ayD?b zaB{XVa+0vI|9_nen$@AavwlcYJ9egw>0>L#q%*R5kMZ-&>C7@(=B*Ga8S{wJ3-e9E z%Ps~9(gXaANTf*g9h8x@RObzy1yJM#^7Ua2ip?;|Dw1;n;541*4d;Lpyz(ncDy|;Y zmDk^!Hw?A4HtMT-zP(O(J3J@fd?!B|glz7wjanZd{?ZCaBcdP`l7O_8z3;3^HMEiv6IlM8X`zJ2t{E)~K6(uVi-k;g>r%!$S*&Z?E(( z0eC)Wq8Plfen0rTC~9|A&l#0HygP4{F9Pr-afCieJA-NZ@Cs9H4e#T9$Yz7#Zuv<__dZpYpjw)|K%HCe1bNx}5j?@E?8kMmnYxrSN-=w07cY z+GMdP7b}UQgl|m-Hr^C*h!>;-gAqtz z{+M+O9aDm&oIFM97>Q`Mf^sY?*{JnudS%MdNhy`?+zXA_$9E+J>08S9!ZyhTFaEUOzSs2D~_NC3TdT3Q<+*#csP|Drf&eCDL1 z%)FIMD+@=4c&vS6L;0il%7tc$nPf1@gIJTD2qs+d{#m160wc)e+U=#|U+X}aq9DXk1 za}$X$u)3QroNB-GMtzQnj+Km%CU$xQ*7ky&S~#h{t;)}y5*gd{Fx3)SqgI`aH@3yk zoo}X@m`Lo%v2v9xQMWJv5ebca+dB1jW*mIGw6vLrxRG8##ljNrrYe`wQ7$vcY~Y=C zEmhJuwjkwNN|Z@ht6M3>8LOXU5y`zjyOK{EkmKQo8gs7sG|Db4RoYCC=4b>doqk)6 zV^d@1o0*FjVrNN7&mw>C!mmB<#!u0!o8WMqZY?fdwR}dugw@y~=~QCTEK@mguubcC z%tJxVT`M5Y&Ww5nFsJ54Q976&JM|?p)5y01#nr5^fc2u7#>1T^B_nOx$zN>aK@Q28 zC`uu7k}clg3T#Zox|xEOPhOjM(R5s<=u8M-a*4-o*bORXDf}pb56&cdS)#a*;=pAg zeJqb>Or%@iuW*4ydH&>Ur_51BG^^CMP<0zgyA~}zj3!%nTVQQ8R2dblTA7!fF*haD zFc-IfT+`1&n$8h}y!Z8D+~~|sp4K1}TaEac?IpIGO*s>#t}-1R^Vczt3(fr80M$)? zhV-lWE6Z*c%BPmT3|b;Ps*O*eLRBnDb`XEHTiu+F02NfPh3q+4<~b{!k*mh?9r!Wo zr2(Myu}gW!#_59Lh7a>#Y#1a6=#14MC~50qRM5OvNO}hXPZ{l?*7Q@PdKKZ`FdM>_ z$TsysSOg$K*N9aT!`3h<`|@y}wj`F(Ldvw&$kxnPxKG9~y>xekZ;SojTg0tJfOcw| zm0EL+a{4<)Mu9HFeHDccAUxJP3xwRhDbj0Xht#mYZ9{`*4pQ*7>92O5oMGP#cUYgx zgEwITL5d}eNwIE13f1VN&^kw|&`NJAe+IV<&mWL00O``(Qbkqd^S}Zt6lcLeeB}T2 zlD%(>QqTaYBo~Z4owHWy|CaT#$!P;wIrC~e%OooTC$m{-mR8z$~nIqoF4uu{Q0B|8(BZpogQUoKOrL7 zy-&p8@nme6v3t`YDcpTdYH_DQ%{#m~Yd>T=TI4gn4FQs;ERdi$v^N%}oVUZI zdyYmb#`fdVwQ}ou}8RWW4r?B!{b3w{7^ zmobKJ<)B@CnwN^?2;MBmKbB{=V%efh)df5RT<;?A_9k6)7oJknClnr8fr5~jkh(xJ zh{7->%o2fxkdUx|&{9h%RGzFL2NcV^?hZZ!){<}_=`S=^T@v3-DnSsca$&Obf%ORQnD&tzD$~sJ|deANQPyoG&U$uo77Z`Q4pa4M zv3NHC#s@&g0n|J;mD(GW-zepd zDtVHHipMqNj;KksO$JnII%|bJ#+5iu%C+?>ezF|zi^#6*DX#eLlzU7~$Jsn6^)AY_ zwJJUx z=+S#)8UX|(jUWsan*T8}??33G>ltu-0iM``h~AR?h;%Qg9l%qF6t~R}JMiqv}EAT}ajm!oRGNn>&3#8QDaQmY3;fDDo*Hn2u@=B>xCql1vGCoR3%HG&b#J+XENBXnLbS{&+U*t4Xe|dQyeF}Usqqb98rpz`K_C<*;%1`+ zc&}j;N$^!LjU_0=VFU<)!>7>?=^P?q8d57slAM5vDe*CDR6U!kZ5GUF*C>~So7T$I z7;?fmK-BKB(Hvr;%j_#7ox-CRh+UpxdF`1u>;51ZTTqM&0xuB?FCj8l90SIYq7{vS zv`+yr>{AO7Sk@w5l(gwf+NC_2HscJhX<4NXT_tWhVzu8IMSEAp>k50uzvg@yIqn~T zkZp7Bq^`r&Jbit-aokSOg}+&y@P>JKeJ+?MGvu%1ihipK1ftG+b8&J;OgpO6%rUdf__h&4K2d}Gwkpwp7eq#K#DB^mNn*3wf= zA)CCVKrB&0G$pzDFmk>^zd1Cr?Y}%$0ZZoRhao5m!KgzZm4%F4N)D@Js8Y7DRz6rQ zs2Tn}PFVa3sFK*Gs}q^Ih-|BRGkkT@-)H1!<2*@g7|e#A(xxij>X^pV-_CL3EAKqg@Z4vQC6o; z78rX%)QzkCbbQ6u4cE#{8=d9K(kiF%oO+&XI_-wQH^6H<;r1?H0E{R=Y2%a8q1xdi zAxC$RqqD+jjL0aff-`ojTwSk!pO)DQ$bNPB+X>t*}VP{NY~Tm$y1 zJzcyK_M{JEXW@aryh$8Mb`xPqH?Q|!0X|AK5pD^NUqD9TPF*j=K zj0Is>iyhz#o0u@e)3Wd6nFY%`gm@sKx)n0Cp~ke4OQ=a_1m>hPpD_bP?eTFT!L*`% z)33mvgri>hqgr@Kd|LY6pt0g}^u@SQBOa@sx1F5>^`#8!Jfl}_x89eJTbNdCc3Rqf ztR7m9VQ?UqN~0?~U4ZVk&8d-FXS!;tw}y0P;Uj|ftxoHgB3;Q3SwuReIgp|kS!Y{E zqHLI+gJd?6xuS1jiBJ=6fEnnvu?j#@y`bv0FIhqR9T;+C;14oC_6^@*J~*d*=g8Y1 zD%>WfIgWXQ%PGz!Y6yh#m`pJpnEc7Vq(z%E12~arjE>0}JKhBmG!m97iJjlEA7f89Y2{>sNnHBS5;z$5C8{kl${p>DhLD zg9o$5f_s4#LkjX%9-UIO3zN7Kx?=FDO1Ub5tDiW8I5MXsTzHT?w|c#}d0z527DA(S zz6Dyj(-paXz2ZAo3o>u#^c-_R(_!*55;nsqh>>dHF4gR=?$VbIOGQ1$!g385Av?bK zvaE%>z;i69oq(oH%z^QKDaxId48sO5yZo+n3I>4yVS^rv$yrYGlcur z{`qR4?E8s2vo&j#u4qsUZ5rouII|Qt0uoE(M#PlQUjrPjIwdv9G&NcG9lLOmk12I- zV@-11Lk0moJ)(BOrjeM_C!`(J*V>evV-ot zA<(0siQ$JChRouS=$MyX=Z*|9i{b-GfTa#>QQgJ^GD1`l(YoX*^OiZ~m>NfwoEW%0 zZJDEs3Vw?YN@Wqq?c6dlPiL_)^i6zl-Ylf*yl}48%C10I0$%RE^9h6GMkhvl& zHl5@OM4e;_B>2{A5plJniU5Rqc66iL7ts%C9hqd11?A$Jl1dE4!}{%w9W)&M{hYlN za+8>z9y9_Om9sYJ)CB;-FjLS^b=Qy?zQ_$CU`AKPYZFsF{pDCRx zAE5>>U6;ncUCmJf3$6j-)cQm#xFQaMFmRZA00@4bn(=GMFrnF6^0>ab&gbk&kN(n>Pa9V1HZ30ye|xlWF`mUba*6-a&P0d}`FR~uH2xXz<7tP?0#qXC?MT-g z#6C2nLgw5R;~)R^i^CCOgU3#>^Ca2)rOWFwCpBm5fwIvn1<-XSGgp1#hU^%fYmLN&!0>@AO9VM)pqx&Sh=~NdkCP6I88GC{$oWbvR25HDly?Bid%^_l6cv#8j!q0$w}xo3dUY zs!#_(pWQ)&u0~!r^h(PMu{ut$X8#ai>%u>vvXWvD%;3`c(Yu@e&43029PB0)w$K)r zqh@&tSHACiU>=)J^wBDFj9HN?>~K~NyvqlG%m;$j`4X`=~(SL=;LJDD<8yw+6(a*=nL zq(f}UR7{|%#nlLcz{?@>xBbo?N!-Feq}yqR&7Z(c`ynS@^lBaHcF)E=zwn;*X&sn$ zj>o(Sc=!9Xk4$mzjeEk@UDHy|`doL&1IXJ*tJJt>LsUcdMT0YUdtuhBp zd2h^SXN}z-8heZz7c*(3XH`k3S2>baCM2p*B$7-UOE@+dJ1bJBTjBL06kFqoK;RBI zbIa!X1>D^eTizjK-ZCp4F-66gdE9W1EIx2|-;0QDNVNgbDTUYW;&x?7$2yfyR;Iy< zgoh$YQs)OsdQ*y)84-ovOK4TEFzTuh%5bt$qBbiAPjM6owx;V!F(^6AOAkpf>4*Wb zyI+N4Sw%#von+hTmGavW51s52A0`18MP(6DlqAg%J9<{Iy>Bu0ob5$VNB`BB=N-X5 z`fV){P4k^5f<3oQc(b8lI8hE*)3>^{H8f$>MBC{X2P=G*%eg+ zNlL$tb}Lfr%rrq$4EHvkD?*}34zZk?_Qg|2xurfdNI4cjGL)w86CAzB9HP^`2g_d~+`YUzzhNuU+`hVl>ZN!qNq0ckZk*nDP^NWntPF?T zzx3~p8klvxD1W-jic=wr|4Fe_)FWJBFpd9WhaMT{Tg62=+f{c4%{tMWqJ*mbKJ}o$1>NNI_bje9d5&FJvLP?vL2x}8<$_#W1T zjklw6q*gF!3P@B)1XRccWfb6do|j|_I$i5oE09>XW3#9ksji_tk`Z~v=s`ueK^iC} zsVY4QJsZ3gk{Z3?J^^mJdsG4conbKu>5R;|FBTU@I~)0yTW%S1RVi=JbHBJ}%sijp z3+$*~$D~cYsOT1qmTwuo5FDeJ9BVjmq=0U~$wCS;AP>@2ETPC3`tVq$TZZ8*jI&~P!A`>BYQoEr!C0aqk^%5_8hqH-Kg{a23y%M)sIDA6xpZ`#1>-b zQ8JKE*|M3YS8T%3UAM7zxXEc&A(JRLRju|My0><)gG$yPdB#AEs8M68E>YRqJ5J0y zbFz!|I!5Zmoo&Hnj1LFE zeD~Yt-lca%?lM0F$D`U{8G&@=3COx~BBSp=aDS2FxD6{xqKa9H6L{%Xw8h+fsm9zn zRI$Cevw?9#-zWK^HXjnR&~?JTmJrLDSCVl%DK+3q(Qv_nSz%b&Oc{U7cXg$2k@mG& z8FP*A=zL021Crp-b+fvlF?Dw6IGyZ{^a7o-doxJ$D@W<<fyH0NK+T!H7Czj}Bt* zrc03snJW}otdB{E=_FT}v7t_$puoX;&Cni=B=};sJ|{oGRdHhK&C;tpRAVg1^P6|T zL%~^a=lAeQt+@%FFm8!|LY=(yw|Wifd;NAXVs}C}A)7N4nnpLJjSch1a%_)bnqxeO7hQuYQXC4xF%E?*oTtZspl4To7Y}Paq&88o6kD0G` zirNocJpjJo2?41Xwr7-8oVhJ{z(ZWDVreJ{B?V^zH^*x{*(1RO6D@!8nYSqZc$vq!`znk?3VGr)&j zNf|Zs9V{-&;45@WLQ9(LQ}!{-D5)qR{DR;W?oOB!{u#AbM93;*JzR?}c{(=AD?2qk z^&PWE4-0`I4Al0cFWg7AMaV~qAeM`PqIbYjGvEEG-^r%Baa+PkzNiCf1Q7_6F0zEz zfxjS)T~v?D`NKRgbp0Kz5H*Joc?n>Gsbia6=oUH!U**oee;m}3#gs~Zg@x(sxqw$H zs-IPO6e&GazIIh-S)mEAOg|m+(H8F{Odr&2R^psjT#C#kt`tq9E$RqHD$KSzNJRfn zscH0&wbuAgfTqh&&0zU=-Oc(R91B}BI_aNx0Y@_zo1aD}Z{YZoAn_lqE?Gs+9?1mx z+h@#_O`;VZ$U;KPrYvu)#6}q543C0@Mm{6NP%0le-ljtWwWZU*EpRAM%&d!jU?}|@ zz*N{P9s|crF_e?@pf8YVJL~RrsgaFt|HD;D9@CUQ^rgG&ru*gZ_J{AyM((%oEzfVE zGO@YHczV$Rccv&Kg{Zx@pgVHmthhKhBf?qlL;;c$;&J<#H?ofU?m7qr8VNOHSYbzb=LmYh*(6|KT&pT`$QH=f zV#L#u$yAP7Don#5l&S;^sYSgE7gBt z@p;PGBn{K$#-4qFF0GgOV1upsCfRNq^j442)>3rI0XLtZ-;7vIiq4cNc%=;VA{g#8 zJ_{UCbxFsea9Kf2KIamJlZcj)zS81)f=Ne(Eob8fZMZ5;n{d99>K3g9S@&i(S10>u zp>a`Nu07l_IzJgXBJG%Zzez~U^3i=*D@KA`b$mln+X4#4&29xWN~#HUA+ZriMie6@ zW5ukA_J9ip=#EY>#Q_ZV5o=`r&E1$%NWO})5!TzGro=;ra8>%OP4%JXb2YL{Wd>!r z>3Gwoaw8vwR^=khlB|UlioXcX4%xr=dI9+T5|Vi|P(NO6`8o|I*5-rk1j+Tfu1Yq>#BvvxL)RX$ps=*fRQAgnEoO#> zL*+JVy`-`g%apdksU3%$99fTRVGZUq67eI|lQ?-wMFr3GyT~C#ppu?zh6>r}V#Riy zl?_`-^y-c0C7;%_6Z~q-qw>X{ltlh3bEj(VP=k$gnjDfwVR6Qp#pz;hIs7wT;LFmL zdbCfpB8&Vvf;F*kM{iei7>Z1Ny+p)6k;Ec1tHD6TCOZINcZk&pT~SSV*yw~-X(c!M z-11>yX8VE)ZxFeZFhM1)__UIlaUmczLx$QgHUwQ|gGBwL1p|hP)_FEKXm6BoQe*N! zR~M_v&?Lp6t~#rK#1$jNFju=_9S-(tB`8+;safsOwl?=Telm>{1Qii6!#=)jVg>Ph z=O$72E^-7DvYsc6joLwUxZbm1;`aIyvw(oDg?6>kCv<)>cn`m^+1wQZ4chNOwN?`40={-hEm#{ zU=pjp;Xm&oB2vCZ=76@xKCS2vJz)N4!Z32E*zwIzaa{e;;xqjpi=%{+^3N<~B5LPo z^CKVmU#0OsD8jZ0GWr@M@&W+Ob1mq^X{y!D5q7``E$#KdT{oeCLP!|y!|e1J-1*UCOB|O)1M*Eq|kjM z$UH+gHEweJ!b&@uvtd2Jeu|HZy$g@xlK0>oG0l-70Hp~rgoXlC{O0FnqsC_h{9yd? zucH+N%*Dch2cJ@uZc<}&d1>HhUt^(wA5#aB$z!O^To?E7kZl}U z;t1rP^03lnAyv^j@z{-Oi|inZo7Q?3)TAkYh}7&jO~Ibhox5zx=c=)#EqZ9LzpJt} z*O<^%)tTxM??mmc=eKKk*2Kif$UHVFbdp-{@~WrmEvhNA5Gsx`s*wq&kSi)%)$!CQCai=Z*g7asSw?z>auB2nTO_5 z&5wdVBQvGDj3d@6?Z3Ev=IC%ZpVI8o{U9n&M=S|+X7 zWvn4r51KZm24!rp`tV1ta>WAdKn;yGTdWbn@4mEw-r3lFHhZ4R(wUu(7Q836Ln`Cc*x zyqogzkpbz@O*j0wFVm#7AYHn(cVuns{0SFbi1%JvneI-`3_9GNo9e`opn6xR} zJt@x7-fQI6Eb1V0VwnMcTBW_Ag1;Xwe36;LRY)z*geA5jJt`B}piKQF&xxVYY`KuT z(7qt)DZ75Pk$^S@rv*E&{PGSHfLofxV+mkVyGc69!dJ6mRLeA+!KLu@5xuiptP)&X zigxBx@d^N4oPY^(>0iXFyC{Cvon+Zi#pC?i8OE<#gva>OU_F)XwN-kf{>bNFC^+Ao zU+iCg*p}|0&~j^*A=CLaa!N=b|MQa`$aIMu$iB0M+6NpvH<@FT;AQJIP-GOJ(VWs` zYlpg@2R{P~{6WYftb7GPerN3l(NQb?JG4*VzpM9MJA;s+E9TDb(%^*?4_Q>M%&j2c zd{X&AHwersuO{$UwUoMUV4-enz#}<3#)oV95^nj^UldLdvtwgfN1gE?=hY?ZW zI~0rT5#tJTS4$K3hH-{g?AFt6(bf}N#|rfcm=0PC@rbkW#~(^DQ&|AY-}Vh=j@9Cd zLsMcQu$;cL%PtraR%LehpapWBtsSRS$Dbn2%HC)+WqZ%Ob*7AtL0N+1=m=g zEI|);fHoi4R$}x7UD2L$ml=?lqJgA$QgeAEnr|rbO}el@&@0azie;~A~brolwy&Ed*GPdSEd9Ht? zAgD-HW-SSp306)tq>yBI3%h~3-J|lx2?88vKi{l^UYSw;r+-R*FVT8nTxBU)dMDZ{I-?8s#c~Lov%JM`f^cY#U zPapkGoK8ItyCuX^LaQL>7>g$WoLhSWDIN}~i`X-U<#&$$2>GW?)MoA!1}j+NEkds&~G10ELi(lpcx#=Jy`@K~Bg)-()gD~)sG zf|6$@9tM}>kwmK1#24y{zzLn-Hm<6{Hr(9YM_m-!V;Qv(*cAjPS)_Tl@EU5uBZ&Ir zbYIs@ovzAsJiE}n4zxyrOAJ1L4kVPY{2-%AYAF7C?JtLCR~M* zu5*m}gCdrqQ%P*k+Yo0HbzgxDjQEVvNm~W!R*nKc9IL{ zU(cPYg_DKhPuu=S>;A7#o}!H020y%y5toBKwjdDjI|Op^VPQZ2l2V0kTmf#u^DJfP zChIg{lVz8g&gH=ZH6)FmzYaX3k58Ih*-92wE5c+7Du=ncO zLa=a=b$4Z9BkqO=<Ek3Vxc+U!G6Tufy7ZJ&Xh!S@(u-} zp}3`E5|){9eubix9#Zrd(YSrG5Q%f61hKh(1z(O@HJA=8```U;|5(6HQPSHF50;C~`J(7=1L+3~HX*iri<*D?r^ zl|N-+LLHNM9do;nU-=Nei39RCcI8YNk89jrJYFJ*941+Q!Vl2V$?wkXfm5|s2f2yd z5`q{}*)0RWFa$!GTxtp-GOF?DOQ;{HL7CHv$L6b($~niT)0pR5njmoPhbb7RH6OU5 zsFa<}{{~s?QaK)T2t}f=FJn+Ye8RQsP#eUW=zf!`btYEcT{%*!$y}(~{twy*RG$&- z?N15X{K2^Y$1zSu`u~57ld)I&S&Z2xGRHnHh5SO%lzf{@XsH!`L%75XZVFC$6iZ+& z0oh~mkkIpZ+v`pxY%{r9?#qs0qRuivZYm?Ea@p%oX1d;Nce#kU>%37Aj)FENec622 za+>Bj*}U0)Stzu_1$4(@BV_+wPqP}UMsJJWzo8|0wS#|24C(S{v@ zJ1mLr;lF$rKBfd!K3Obn>q|&eXT#B4QhNa*RhxupvBFAKOyGZeDa^{k!bLFKlvR{y zq%OM60-1inGJs%?%Su(kMLCOWx4Yd*4lk_=6~eDIUWMNHbRS`0_J|TGPOUO^v0PxA zsptm{x}}n6fK*XX1O(umAEa+l~es02VGPEM?|SB5*wlWcuS8NYSt6uF(9w- z%P4q0YoR(!5xtU9x~jYlP>RrmwMm;MZRmoMQXQ(Wq5w4VD*aBrLkHwQgcwHD7%4=V z9dT`+07PjSu?mBgDEOG-bgy4kA$5SWrkp*jZS!0QWyL`T|J{1}F%^Rm$xrSaqxKEx zI}+=G}K|Hes(`@mMa`XYr%r9#<;sGRE_eIKT+XHfb{bE*s(xJeZB+vk*rP86@D z*SO7BNj%2l{GvmJ?Kpz5_$lM(;9Q~lYc<@yvBlLcr6x0)}4>& zG1J)!(O{|MpfQo79)hNX$#z}3)$(DorRIFvLx@fsHZ8_Tm8wf)u%YD)Q!cQA+NRmE zv(YoXzGVa}rID-Wv&$2?^_*yRgzfehu&%%_oO{b#g6p`fLhFRqAjUsWypK+cWk+Fb z3fHHdaC2^GGJ1rw-$Y40zOuY8o1pA1M1%FAa2LEy^!ivrwq|KU+ZkxTAi6-^nc5L@ zA$@^hKQ@?k_FATXlk@P?yW!=h|1nqFK?CRhRL_cQxVY7~SEFC<^1r3ITrnI6Z82{C zszpF}g`~+r#aaKI(MZM`Wv_@eA`t=;U?t2>}^IfgE7#D}yT!h~x)&G!5 z`_Wu>U5n5$aLJ}l$`6v!Uq&^2~*iSI!lG=VM(LFD}*`Zk+A=!HHY zlhywrz7M478^rXQl)K)U)(<3o;FUEnlh#)o^-uUaeE?TM z@z*=zphStFUS=sU=(>hQxK-Wax3+{uyF+q5|BIvgKjt|6U$FJpKLzv<`u}2%6Si

43)!N~2#M9Aq@)&zs@-4QplZl};3qQOdvUhvj^KtF<^3wUz z!!>Z@_xBz9j{~~`F@22zVFdheA^UF$3YoDzZ)h}oh7fkzA$yEmF$WM1i~HcD5|&Bf zdNJFM01X&D(YfGCM-SQ%t2Ww~DXTVuTqw+YOjMRlWO=dbilZoqphTbtz;R=nHRRRQ z#){*|b<4h{T8v;wmaTTpxw1*D7Lt=5MF-WYkU#P@rm2mhTjVuqYY9qrY0o+f(?1^@ zsZ6XR9ra_`K#@5f0t++G0`Jz=yn~u>m5C^FUe!oO`$sX}nf%wiFtm0k8@MzvBO3 zp|n$6tnW;@s=`1iM_%1>a55UzMiXytbvu_a-ONv-u3c#g^Kq_eF-4Ps6S<+{F%h?& zb%4JCzqIoqaK;wZ98r7K9I>Ic$nyaZ!I_|ENpeoAFX)?h0F&a3*+atF$?HE}W}SIZ z9iVJ$9q1+CRIMNUY}s;IqRza~dchV}*}qtjUr-on<+P4Fp3|>SvG=e^5FqNt>QW1! zntiep=ZOX6sTr>@p_{ShP-8iFY3z^&8$%Uob*enKx6BDCmuczp{0qR9G$^0s?k)nf za{dcTe$tZJqtj0492D>cYVAWuL7%y8LsfX=V{=^%xT)dH9j(A=$b9luLOvl8(K{)! zF+|9IF}itFY`27t4_dMB17_!jwG9qn zY+@L+XT&r-@%xR+-R+H8_$Vu^Sop6|du(C25cC!5Co=NvLGtv&vfhYB+FGf>qK85j zp=`=I77SYEf1X$9V&*|5l=n1;_VG`ggB6F-qj$2C1goC(0AW~UCM!<|Qy3%!7 zq!0!XaWhE!B3Xn(6ux#d1|1Tw@Y8Kc#CzU}_BjNcXNc2G;-%XCZ>ExFf0Lf?wK3Ab zWu0{L|6*v~>l~K8F}O4+$3&hvA47Nw#y!0NcL%IpD&`R6z>y`jpM{1=Joy4d#2c90 zBEW&q4dGu*7cPgw#;kC3`lA!!l4bF37EB>l+cJHHNLfS)8x}dB4mjr&n+AEPZhouk zCUE^foV`<&F42~)n=4n^wr$(CZQHhOTPtnbD{b4hZQR^db?&L!x9V1%)7l>||64?x zF=LEANB{bj0z$P@45z@yWV=w`g3npVeS_~m#c+!|GAyS3sE12G7wvziBL06BMC)jJ zm;gQ`p<}{&i=vjep=jnMB_fn8J!0ITNX*+F=EL zaw;(S)zLw=KK)E*1@rmLy@__G&mM;Q7o*kF#%3=rX02ADDGWvoYiZ5X4e?DCaQ$Rg zRq~Z(FihK1%!&GYYk`kcoAX|2fX{b6HD6GkOn85|o=AZ{s65#;SysM>e{$^7-xr3& z{oGvd&qe(|y2*dnYgtFtK>*Uj37Jmc=d-+_QUvs_ZAsMDHfZ*H9D(VK48F5L^7h0c zAW~UrrDYG-a@IWWgX;y>1t_4wQ?Q755mlnRvn*tAXI7j@K4b^!2#}DmbKt~YBv~yRUX#Yj}O8ce<`guN=fb%M@*X1ME`u>-#OVDOQU>;bt zu%73YT>EFUwtcy&@iL+@?}Q?s^}|O$mBGxqfC{G%9%ZL!U=1c@ zw(MOm-$M-=mEo5gkKvK+QY+P_V^+c!QGm8bF9ur5={iJL!7~o2ObBAJx#|3vnKaf` zz$RR{el#6Aa7!Hi0x9YZ+Ii!g?!XXJn`Fy=<54%E%3tA+K;;*RVErhe$+xHjG>b_L z!bkJsOn-saP^2gEYk>6;QDz6kZ1d@h=G7 zfNYH7zbzW{=%2UP8K1KpO?^JzJRo*hSRBO+$s>_cl-1<*nFE`kmVhfibBTeADM7#T zuA4tIj8U6Lhq9zrA# zlkt+KJufq&bYGjgU6h_Rm3d(nd;m1W8FRJT5C)-_uU{UEot_hAREI(tTr~Mv@f$St zeXv+PtkFrBvMfi8jwi~L>yuT3yVx=fi;ZG2nARUUK6w_8R%?KTdYUHX3x3gzB;zkL ze(JPsmZB~j`!YV(L~BOy#&WUTjp$LuRzk3A?OA?4QvoLWT3;=|7-h$)*H@?=9T&Y; zkw6JgkI6xBCEdPGfkPc1flpLjqq$DlmLaN%qB%CF@tVhg z4+%5;uDH-7eCBXxp^KJA(o_5!C7#3Kts->`)UIF?RGow?s5%xe{s3VD;-h8aOnK-P z@@P+%zO;iaoqer~CI>lTU?bGv-u=2Ya#YdEVrDZRKB~opi<0zm3OX@Wk#G0Rt*~^< zDTwv?WsU=Dek}Ie4Qm&in~OL(6xhl#o%r+D{g0aGv9Nc8A0Y&}cHAenJ>_UmYRJ(9qByghCPi|4+>>F8f0|)Bka}*!-02=C(E>`u|e7|C`t42Pd7l?C!&> znPT(`Y8VI&Vh9Bk7u>t6fEN;n_PcihO-ryRjs)y1 zvR-p#t}zhF2uxFa38YzPj#@p{THE!sUUS{K#YK8u>&@r*+qsUa%}Dn0=QQi%*z34s zJBH`|sNoM_twi51tbrm8tR7?RoWfZ03OFUrue1y)unqYq+ek1qz?)w71=z5KZ+Ma*-wtr@8bcS4m)c)F@Uwn3c zY{%)$t6=qZi=+YvJb}mmRdy%dq|5cos-MzBp&(P|&x?}q_70xU--QH7@Ry<~+7#Jh zU^=pSah3+NUqGO~V~;iG+kM z76p15Ycko5id~~ZLp;gwl`MR_vvvn*o6by(XIQR-Zdn`{F)OFlm+7;4dz?ahks*w` zc}7ArGrnA23>(R{jM^XF%Pb`k{aI5q9L!`8bh~Cu#wGIsM}Shep*W_0@N#C2BA_Jx z#vznAJdK!{iU0ob}&f0NcbC&6X0&g{9qf6cvkfiXSVIogZh^0#mc{((XpxY87 z1|+i*xac~J_>;13N!JNcHK@aZcY8;7aT{Q}D!gnNCEYDN4hPjELEmGp|y0cxf942IlN}JXHL&eDu@J!2uBp3J|GB@IEX1 zw%n6O2_Re}*%fz8Y!ZPjq>Gey8%K5wWJ+pkjH$NZX)EEl8pVahK#RqNa!0RJ~wwZi>lT! z991@yc2xV0&?8qi*=#^rjm?92r0h|`QCK!PLLiR3MS6g=SCBK4#Iun^S`Jnd;-HH5 zXovCRh;r5miN`PcDZ8k;MRDVh+w{SQdy2|v%O47PTa)ElX<)(;yjr!nwiKxkkVsXn zcP&;d4m0j%chr$19&?85DTs;=R>3_V5l#bQx;Du+P#E#`ugl~(WUK4gqR@>6$p$*a zbO~A z+|#&%-K<$+M2wfBhT&$OY_mUAkbM>1rHzE8DyoKf@CbyY3dJrExMb=ERjc}Jv*?}M z`YFw&MvEai=bDcZ)qHg+VfFsHeEut*&r*W&*vUmecL+n7nt{={8Wm0xx*a5vbR$p` zqUEd$4YfM8rIXsYO0FjAb*)i$Ws6Z@rQR>fOjP9nO`v-T^&Gzz%vgj- z^F+N_qk=hpbzhk$64@*;()?bSV5|GlsuZh~buWhwV($7u(k`VyE98kk48{~QERxOR z*gUKHb;N7tqnx)%#GmbJMHRnRtw(Xtk*M18XnOkg*$Sk(#uMg^gGHmD&D zC!XsWE2I;@Gl#*9)XKsnn6el4Yo~N#$|^_vff(&0b=7o;#WFD_ce}qL90ngaA&VTP zw^z4n!$Ocu(6Nuo(4M~@Vt{qp{uuB3seLaKVgp zdBTw-zFt0IEorOK9nN>T`3!*b(BM~)m*LzB;uhs>xBcO ztsP@a5me-Q&UBa|wt3}4F)7n+yH8R9H3Rwi6`}&9ii9SvJ0$4r# zU@*OXTHYc6rE{#I>8}@lU$<3yCOjY$6R45tuV(a&+tem8nQaofv`3t~efRkNb@5JD zf)R!py1c;_U%z{2*oc(8;g$2&B1@N&MME*LJM*R@Z-P^FgT6a^vvymNny_4Z0;9Dy zBTieP8D|68R{~y7`Ux%u7#<9xLvj#px5_99FCnVZ_1pJp3d$$C38rkp}-~}DQ zOc`~csr>G5bhYUD6y2c*i=YDn3wI@7XicXv0}=88;FtGQPB1TZCM}TyJ=J4osq+Jw zMdOJ|2DP*3k>k8z_-z3(2 zHA(Glkw~i|W*52dce7}H4FZ8|+yWXIIneU>rc(SEj!sZmg|nvDMIHq=TA!$-mOYZr z=|zRU#SMWEHlMyGttgK2xh^@B1}snN>HJfI4CkDxl+GY_k=m@$qrUN5Z>H6E^Lkjh zxX_t^KZWi%EK2#W?dZopLtW`X|!@Iw7IVwX98L4S<@NA_N=j}U?3`5!%TsBn|feXa%KjarAH1E zU10=sYpS(wZdk0$G@}M(fW~!|^e2@S^(P|)PUzzmb|~I?lqXm@$MU)Mm8D^3hYMj3 zFzFWvM)r;)1s<*sUceP0k2!s@hL3I9F}Cv3B5};w`)Z!aYfz_#yw_UfjSe5cXAkga z->LMle+4|VM8c+99@+Z+-srGT30DG1PkjE4_PkQJuiMU7`J~d^6H&XO`1)=4GrB?i z85+JJ$QiPH30m0c1iFc0+5s#xHijQEMyDBdz@Rwxb2W@x@=c3r#L+dR$X$oGNwFWK zXsSC0pPY;NV^R-a9K#&enUW8=PzDyqX3teu-Whib`rS*)eyp2MVhVd4+` zq`{}-%FHEqB*^_{`XIGQplf^Zg{>EYN@OZ3YqqeW&HWOoyd?{(<1tGwvy|;E$2gUZ z?QPEZ`xA%Mz8kU+SEz?YKK!3N9$meRb{Tcv(Dhq)k|J8V9vvgFze3YK>1D@~BB-2L zSNA@D7&PD;-q=B$lGV#V%EQ0e2v3g5?YRTr1RJwJ?({zhe8S`N-fj`T;58J?M(1s6 zUF=o7n9uNli@QwjDMP@=f7zwOeKwU$9F~_z()`tUx~tixA7+u7Vm_+eh42VTKVVjU zV|z|B%7te!6g={LTe}JItib9okE^RaD`q zBuUsUCOam4CyoNNqp396;(mv1vb2mgx@`>qfupkbt`@yq2W~ruxZUSd*2k?74YmK^ zbGyqwG()HmIy}%fT!eFM1d?NX(T!J#Pwgh2aZ}H61itx7SqnhT z@DbqHNrgKzCN>dAMH#24jAxzzKA)w;q{G`3*^sX<;T}Yk3R???IudlH!)*_IoxwI5 z!=lF?#6v8h5K}FW!f1HIUNWckZl4Bfj`i;f9(@$Qs^rg|ns?}k)An{@$#+=H6QB5CLa_MgO5owvbCd^iAr@t<5I*8fUp{Ap-S%uSsg^#3u} z@t<=X)WF@8mYsP-8Rsx3xqvXjg#m~C)PoB3t)N!Gpi+X2fD(Gd{D|pZ=sglZrjOhS z;bc13?X5`dv)Hx}hU>D0%rQtuB8}3AiZio|#IqWu*PSHi<4CmUn6>9yzuT@S%}kiq z&b-3juYN|4e5bkBxu1PLQ){|pgj8>iyO^XzvnJc~J&4N-P2@S$>dLg?yHPa5;+^jk9P z93MVv72Ja0GTT?hE;(cmJ>EgPxfMu3c$5XxqP|wgnIww26vfKin3y-axm66Qx}O4Q z3-zl?Hb`fHtS-BpUf$d=-))jl((JY`^+)5_+}SlqtyS4a4pm|3mfYe*uNysf)h~PV z*c`(|a~kh{3b5ziK4Rb|*`&sH7~jOj8hEK?5MpfS+Q+6Q)KWZqkPnJ7^Bfh;-*w6F z?@LubeJ<0#uJ4cf=OUkOdr_?5;G)qbuBTJv7aODhKH{H`fF%|*Ni-iwOQ|Rm=>vn# z=>1aaV@NESG?D}9ZLkCT%Nd2XlJN8~__-Ul>bxY8K8xi6>_%u(&Wx;I-2e{RVvQ08`5DP0LQXh_eP1Tk#5a}5i-axe$R_A~7ZSNNG^zXk?V z)EBQ88t-RTq`m8(7LOs3A;sw^>bk;9C}M@Tk}MHo_h$6+R|dku#lTX#&j9){V@39E-vPRAqp*QS zf@O3=c%!fd1OUp7n0?2(Pf^F8zw7S{PIMmZq60mp4Uujh#~d?$)qt=}9YNBioe*xg!$dXIV;rtFEtJCQYck zD)wBwy4Z1~2MkT1?g_7_)`Bm16|a^MMSd_;ix<#kF;%`@(-*8E1Z78I`uet4i82l< z1lP^OFzOTn+RO%B3I((nG2rHFKzQL~7P#QOB>fD=3+|F5^{LQlWrC}G&dm4zMtF}Q>lxf36 zq+w#gNtx_4yVVWiN!*+9B-7#P0S$o%?%uC{A5%Pq=`54_vTa-j4N+WDs>>6rp7;Hm z*orF4Z+$8RHtFiDp6u6l1MEpx?Rv@;>g_zSt!R0mGTWeDo8qkK$%R`1B~-m0u4vL78mcn zIGl>fq-NN!9p0&KIoNq&iXQGAN^asO3{@Fr?W<&xXAf{)AfXwq)`hp=*|r7%8xq&S zpCl~}!?hrF#MxIx?@k^GIRS*6#%%M4z^D!(_l`oEOz^WWg;V%f^QXif>Lt&!)^0cE_7P=I6MCfeGvn%(lo8SxGF8KSY^ppcZXUS>8^Il6!=l)%1|RCc8v)s<^g;`Tmpa{a7RU z?Bkbx3bFgqI+=KdN#GRhO<;Z?sBl!M(-&T-^PBV=bvAF%$T94Fzg%Pc&1m%?WZm(p zi&`nk{41eX$yBV^z5G|23~nx{lTQ+FjNB&LfI2)@l<4kq>}b&ND|q&wgOWlZL2OqR z3|s*u)x(-U%px4zVz6{oe+4Nqwgu5oZd*hC68rqNSS>! zj%DeADl?uw7GBZ82W0OX=8O8sLow#p*cahBc!#GgKhm8*Mz{~1zo6>3SX?f>3gA!>N}7;y@fW)X0m3se6zxyz03 za8Y?$^}RzS~P);#XhIYTTfpRn{Ab=&d?N2!$ zEPK^35Y?UGC@RzmSV@i(Te`lU0BCG=9irnYew;^bDy}Jcx8%RxN^JaA?Vfu2I?b)I zVi8U<;Av+&umXPs>4u<>!W)eXtGyILDs2BDQI))@LaNwo9pVqxT9q zgxR9u-4D=?n)sX(y8+~Er6H9{3^dD>jOj?WW!bCcn}MS*=xsmR!Io=9Lb{GUS3aiZ zql2_nmKP}bWn`wxxSd)~K7@vvz_x@bCawpl-aO;2(Vv9_lX7Ab&K_7-=^Cp7s79wJ z*zuxzn6vGSrH@qPpp#gjf64X-L#Tdk86}j1p1CQX1rsJ|!(<})c z{Ho^WscZBSg_83sylu48{l*<+3dFuX2#q9?B*NGPo3FIpqvF z>Yzj9Mgr;9!l`eZXCUpfd`lIsaOPq2A}Muc{)%};njf5hIt^w4Gpd>OCR-IIzOCpb z+Ty}r`iM#8S=$-RMjXr!Iu@5Widh^IR&7CJCu|W%AsxleJj zkwVe3KRLO--iWgEQON)+HT_TuAdyP8^MJa<#XZo%n)tkuEKUd?z*O4#h#%zN&^+Xb zPjE>Ux`t2e#U6m(Y`la?w4U?q)s z7s%?_(3?<&9_56vfM92UTunZZdw>+IScE<5vw{PKV)u<^jz zm!vZ>^a4S9s(h!nRH$gK@J0EyUvs34~%g#L_C=V>=Mr>RcCcKV{lB>-tvV;Xa6R1%)G^0DL)%RDJZpeI2V1Ow7yYPdKO0iUrQKmU<>ww>|8M7O1=r6(%ab1AF zpkPII_p}BN@a;r(G}g^pu8`G~wP;QSgAHT~9GxkYx~tK%}5_=M`rU3;yX z%+choiCe7+l}K5!$Yo}z-kC1* z>kA^=MZX_FN78)7L{B|OBDPD&KFRYRrtNM%*tK?O^}2C;9(aQie1b|l!$#iev-U_O zZZrM2{5sumMC|5wf@*)D*Mqr{yXxGZy#MOOl9(k4KmlPa8i z4d(8MElx}=f2ZzBWXY-d#=`KXlP zNbm7hl;QR)YZsKjj!(uzZ%~3>K%%&i^ELQ`?(oMT1-uZy^aaIoB~Fq|v3n2~RE1vo z#*tqFUYbhNUloeP zdzH`_sW3yYDPHtKui9sB^k0_$T~i`bT@*(awvVOjvQ6Jwqex<_)HvqvE&RguY^Th12spnCv>%=MKVV72GDj>@I<(tZhP# zj{5Ne`_y=HXW*Ee$BR3vvZJ90gq~4lV_%&D`qyd#eM0Xugxf5!MSSLc^tQ%K3 zd(uZr&zRd#c~(71!C7{fFwj(4mUJpz!7ubcQAUZKms5q637h9W{DOSJEMZJ@^inDl zHF~CSecwnv0F|G7cgi@_F+SKmGnE2SJ&o36a_l|0N6>0D$GcQXT#c9Qhwl z)c;kA_}2sVY?XCeBxj^Ala5VI(Do`he*(FKIU8$K3uFjj>Y|e~g&7r~P8nCE~?Q zsl%4{{=ck$0c%mOiDsft*;sbd$z+IU_N;QE_nZLXO&vo*8@!&eV5jeI0AXqCMB-9&XC{hIEv=7jZ<1XwT;KUo1620^esjiOeQ{4NF61v7?}I1hUl)|#t(7%B9q0%iRX~^%X7gsz0El{gl+MHGN(c`4yNdb^ zgNt5C&}T)JJ$gs2=8($=<+#eP_nuuk@HJQ)Q8&5km9P2K6a z8({>iv6j)fT_iRq6tSDd2xo! zz5Hou@v`k@(#K&o$+AoqfjCm9hwB!GZNE3Gd|gVaO9{GN+JkCg5VKajK#Rq@bJLfO zPK%|O1$TMOMhFJURrX{=^K4X_$JcV$ia5<5*@%e4<`H+{qOz(b9-00VVae<+Kb*J4 ztB{~cFv5XgY2Fun8XC`Hi6+x>whYdZn6p1aNS8A6wjDzEPYcjuWigwh=U_!Pd506w zK{W-*DexL1HH0pcM<*q9iJg9(L8qyIp)uNP{XkEITz-OU`7?+M>xLg4f;xlDWD46k zHNMgbah;okybB0o+%t>D>zvfxntsORzkv0?VB5VyM?GAimBao>%N$|bf3GlmKE6W?@Lr1>Z%rL%8?s+;;hmL| znWH_{$jvg8K6H9H^b)VlbWNTQ9PT{XBFI$95EUq>3hm}QHgY{7gzCi9So1>f04Z-h z7PN9ss;)5jB-^COFHw<_27I=DrcED%BtjzeS)}Ys zF>UwF*i9cKEC->!h^d?L1MKaz+~1B@dGfM-qoa+6-k*Xoze>iepNZp)pvCfE%+<2J3-9Xg!jnd8fmP-wa~3+vVDuB_4}{T=?q z9X1`uz%V3`HK9c;#<{8n^=8Xe4#!)~i*ExPE}aa~5vNue)S5oe(Gi}*Hjop@m{l4} zRvF2hD$Qps>ni8ni(}Pl$_@Li6*l+ni%#A8nxP57ng!<&nmavUfBb5?sO?^urw3)4 zESgSWm#A8n8+p?S7%+a~d+qW^7fQ46TW06k1Ny~*mE7%;!ae$_U9n8l_}9&xG>|P9 zQ|?8cqnxykmh6F2ov{Y+u|wfWTMIlF?SX;GtIz1@Y6!!VVwjGA;Rc2|BFMyD)6JwD zuk52EiVdNcOO1d`zF@}ycQ@}W*Ilb3I&_JchaDO^;(_9nfF?}l%df%EM&Y$CWrYaZ zK5(~gbYRmP&N-KsEtM1EsKm|?xSpnSn=@l3^<$zfECKipNA{?Y zR64_Nc;tqW`hNdJ!ohnK6|g`3DqNAz$nYmOV%xBPN*m;Q1pb=BJ9c(YP%gl73F#VT zzw7t8|M^|GrnHr$ImXPtnjiK(aZcYQwJbc)-f&~Hz~4lbbP;%53-C6@Me-1=e9!0X zI1&a%^Vg~!U*cKA&TB_e%bOAJ69dMxO8mZn0#CZ~3_6v{kG!ptGV1G}zA-XVz>&4< z4-4u@8>0-Agg1YtEvIL$hI>)8CoVp^^G_?^DBgwdt1*cL{Yt*_mr(FY&AN!WN6!ON zVH+SeQ#*{%B1uzi10ts3OP8zO1*+DEKX3Je=J~*Ev{CShSl{Tuu-TRLimAB)?k+my zOnb@c#+84>YZpMXC?xBT`n7hClU@Gdi+|aGQwRGFP#$##U$G0iM~g~7Rr%XM)=iuC zjgQorWKkxbTPVC{ z@H)f7goK&PEGAjBgufBgiyzL2XoY$vpt{*M<9PNH<=0+&oTONA?51_f{sq)R^MQKi zX&N#LT31q<`Tn`}?7nr}b^ZA`eeDZm2l9&dMn`DK62^L~0_oLCmLbW2zz;-6NaBdE zCBhKot_80D%Qn;wnS7RNObsFrdVI1KrO&yOki^{HLpW*+d8op`NpMIS#Mi<-Cy!E@ z&cR-BTE=QvF<2`GQxU3)gw>@jp6r0yXW0O)1-49>xnXc$z2d}bn0bw*-XbLPwoqA0 zb$y1I+GLdK?t^68NUsIWXkGzYasqH(L#M$6T!5&3438KhGb@j~=jw>5@FFPk&r-N^*Z(MAa2zWj7^45VPb!bs{gLOgI2yM{)=96_hjd<)SS z&C@lh^e~}s>1wAmP#cJ1NMgeN#0gm9A(LNWmu_R0G$MT&EEG`!u{XC6f8zYNLB&kBNQY00=+0qA&Imo_Q3K&WDSXw z%OP2ef>#Z3byYipClEvQg?ge)ro#6OF%)d1FD^<>I$JA8=n)F8#-rmdA>D7R*-DH_ z_RnI*2@~oDXfaB}G6Jg7judO97+E}(xlj>yildh@V-ar`HdX4mSnJA<2qpcZ7BHj` z?J`o~D60=l8YQ7+ZL$$tQqlH{isu}y66oDU8Imk|pqD&gBy$-@!{*H(yp#Thb*6fC zHZR?QzOV{W8O1BGV3pi~ouNmhPrx;Dm?DTx`v+!e5+7vkw;bAB{;KB6_SJphYrkdI z?9g-?utl^&Tgsm0x3@=j#eS=zmTO7Fx0tUhue1#~6N>~;SJ-1bs$I#`%JbrSMjSC{ zpeYa<1EG(B(No2wKXZC(WJ)`kl1k!_3%% zYvt#XjBH|c{c#6u^CS991T=}-5HV*Bfv*Qc%$3io&ztU;2~6un+XH%T#N;}9c<5)3x z>an{gOd~|jHeU)3{~hmBJ)xehg>v}v8HZl}kL1MV6v*tJcJjbBi@)ee>2FtmO9&Cl zpO8T`Lc{;jDskjT>KOb`NP55WD}9)2?)VFHt2S_X8{nGs;8?Nm34Ql7>&EBy?+*8M zIFhR{!gN84PS(5f9+Uf?)Lt~m(B$?Ky6@`D3m`JWu-g(fk$-zRe52}Lg3Ou<3gMm# zq+cxxbRMOBCU3U?+p+Qw5M(N9=jq;03FP!+c<27FHbj#j;DMw2k44j(R^}%~&*fhT z2#WvmhqS(pzUlw6Ze}ZQ{v+h{dlRl+U!A`%m0F^p3PHfmQZf>#Xb>PDnm6A+-CcjqwiFDeF`)TV7#Y>ir7$@L^*!8*RxC7uNIYs_l5+^(XDRV_Ns) zp4Ly2tc)!f99SJWv2`v4tXhJeco6mS1z{edx-5*4bTBGF9r-dX%uuF!xhTzc7(q(-d*Dm+EF5 zydLralO*ptZ-++GxzRZ$nJKVsR}oX|8eCR>sUEhqdU2qnCstX7C(Y*gQa-@^E zXZ4*}-WhtlP^3(9DMtIwO`pwMNRdV&34@0(LH`3AvqDe2ADhb34fi*O%zjh|f0lf0 z{x0T5Q8TLoej+k+r9oI8uhec6c0oOj`5BFxO#mmm}hEHD{2+cgqL94lfm#{lAy5j#N@xdo1~>mVC!=+{aN`R=H^wTZb4ni#czOS%%? zv1;THysbF0i7FH!zre|7k%F6bv-9NJ&b$Ld0p~es9O_uUfoptMf`++|#e*C#wL3!e z``;s}j|d#REN|j1kMRIME&XP^Nt*_^kFfpW(&E z9?8RHRNA4cPwQtmlF#RTrELx8M!Eq-;g%Q*%Yyk3>E;kV9j;kukYoxM;>#`;eF@U& z86g#T`V;?=)hIvbG|PB(`1j-AQDpyN_WroQX8`;$C%yk{AZ-6fn}@7{#gC1Ii1A-5 ztYk&)AO0~C_dJa@IVBZ=LN9Dk`6VEYPp7^Zl%9ej9=xE~Lg=Z>8qDey3*$V9Z%Vk> zULbD|u5o9Bfp8+vgTBcXH{uS`JwX2P#PTi z*55d($hP!UOR}$ip$yR5cZKOLy29}@Z%t8yD1yCCg*3JdToond*mpRw90XSBIVcu* zN-Oz>Z;34Vm^gNXpnXHwEHc7IRu7WtVhxis)ls-SRO1F60-_R6QsvDeshZp-b}n

cLo<8?l=;V!pn9|esy`B91bU*ABoRFuxmscH!6jnBcSh0>_V zu*yI<5ovdJ(1|!)mF{YMqk|lq8!y>(R9e<~DGzT)BV@fDP>#MX7NPxj=q$t1eRP=c zd9SIRO74o-<*vq|g_`A235ycy7>{JNQh$1iWaIC>DPj01$&KMLaH{7IWfPkuy9Fz7 zE3*TEF^@-p&rwHwr(~vPwEVouNpD8GlZD^Y2Bq;Dju@lVSIwKE)(_}230wUTz4)&v zNxtf(31zLWcWYtO3~(7$uAg5em)4oE1W-;76i{42T^TQMt_**&jW1E^KVE?U_ebd; z>&mEbfcE7F%47ett~mZj>&n_#*xp&+%JHAkJlP7;w(~z|9?TB=^9&+r^B0Q3!;0qL zbrkqC!det@6u}N=i-MkOX;lr$hM=B^;eYykPk_Tw!hiGn;RdHQNoq>SjNDyJTx7c2 znm!-bT=N20;nmW|{)D8|=1+QK!p_vyMKD|AXt){e)&-M7MS|mocRgUM?#AV<@*XMT z3~+@9XeW;ntr?Yua-pt^(iV_~pGxkSKJSn00)kV7t9T9jUze_dIq>4NMs^ z!L8!p4hS?9;>SL_lQ?_jFfGa*LD9aOqX2tN^yh)aEU=-jDjD_0M+twCs~R9(#O<)+ zl5$OLDXQvc=$~rEO8tgjc@V2CJ4d(hy{22~I3n+ z`why9wW8Kod({(c+Uq(q$Ot+;Yv^{%JBE&tI`7HLhJufs=dOH&o3(qE$D!JSRw+id znUZ(t@nh_^=N2WGH(QBGU_#V%YJnPOypo%)(h#WmC6Nx)gTYktZWboBEvG z=2vjZtJ5euY>2m^$Da2%R~&Ppxvk-(7b7bxiyF{rL$CfV(FT1|Jo2!z0spnzp#Ir>v=y z)>v!0Q1@4J8m^yWcr81{WHQ~8d4p}3%s5>N1``_qv>a2MrJfd*Iq+Cj)pPfvEiyBX z5DxC)dsx)}h8>W=XFWf`;+L32EFdVuhaNM~hnuYkr1dYNdn*Y@}TTXRK05Lc;8g@e7dOzr0+%+yJvMxkUDnTU4@~7As@;tmH(;k3n6)a+_lU6|uSiV)m)_!+Z+`Zlcl3CpAeq%n*Brh1^FC5vk;d}mCx7>c zq*@=THOaOX3sha8`W$jU%A{%o=;}I1a~H<`ZX<3xctA8EP@Z0$SSV8pI`768q<1i- z+KS)EJ+XBW-+n)V<1db#oFw#*2qqGMCuzeWd?6!Gz(mzKeA|?h;GT(Ob`h-N{Ha+q z9%^snb(V-{q~+8T8)NL)JENlK(LznBX)OuxfeWML*hG+HFFJB5&|OOX!-L;g*Tlp_ zZ1gdYQ-=GBm3O?6%Yo8E`^o#oPXn>c9z*9R=}8k8P+S$sb5_r~n~`T10UO zyrpvQ=S00p>biI-9FtkZ40BcN|FgKyRYx-R{ZmAI{ofX+4F5@ST1lYM1Do`Z;uK+< zPP$evpo|H=|J{kxI@HQFGu=FpcU*|*wikCNmS97@4k6)RO4F>ie=SYhJ{~c9G_5A{ z{g^qqY5N8tIjd0RhuR}kX>{mY{~ylYF*x&nT^gNCCbn(cww+9D+qNdUW81cEPi)(^ zt&`{7dsV$_*Qv91ovM7lKjf<4zq_x#x{1D_=@oqiv+PUAH#F_OF35a*$`d#HkRLQi z&eii!NN&b(!7M!mc=hCGfpeMU1Yts;xD3nq&WYal6WQ#GFSnSSDG3jXb|;52vjA+2 zU$5=eQP|=2%fOa|L!4G6>R7RL|I1z zL2v5Efh$M1F8ar~DlTbm<^mi|iIvS24xpLjGb~2atCuyx!AxV((NjrR2P{|diiJ>& zGoYoKkveKC=2rv^B#dT1sHN$w+e_R^2(N$t%?Rd7dDzzP?vLPbmwhnY@y(r?LW|zn&sJuZ#>U?y&o9A zK8Z0x{oos#vaJ3WG=1m4Bs2M}pZJERo_5P%zZlPdqX<<9H>4$m&!M=+x{g1w z4ERyP5Xm$vEu<8=hi2G@J$mWC9OnyU>eVbY1%g+!?H_Db@4M2Ardg+{Y$#+T6wZld z@vT>j#Ad5q=iVfxuHJYBO*gAZ2#f^%XU;wAZB0H?*=#q@(Xu=ZG3Us*WiLcj2P!YSD164}Sn1v4$qhc7@R(tM1rU8#RekPw8%^ga(K) z(wPa93*aWhqI6GaveqSJ>~z-+qem|2PLFA)#RCNjHbJIStkD5-_88sV(6)qBg5P8U zrYD=Z6$dSJ<>!6uM#a#cB~0=3Pxbx^>5nnX5r% z!ix=Dk7U-tG@+OK0*awlPS)0&$qY2A-O@{%E9-Xpqcf`;Srf+CBz@OK5OXz|msW%v zG0aGTCqjVE0cOOdG!;(swGnC}NG99hM5l2B6B?$ay8I&tV2FWTKV#0dNMtd;%$wqR z&s4PP?(IX=AZ6E5DkBel2oxA~v#FLHCf&)&si)kRP5s97Cvee~XK~mMixGEA& zdR8sp8B(+>lAj79-5idWIvw5tfQ6 zl9@oEyLMaUhKj>$#Sdt0{~3WSbczua-0{}389h~)bz^bMcC`tNb-uQH3){tw4`an? zNyD^gc=_DGP{dZPi8j7*Ry3@>ZV8)}I>`!;ad~UNc+&eNp4p0B$Yv1rDsyaw-Ta}2 zzPN&9V$>wFF^*Wncd~BYY>tV!roIA%7uY48Wf*i8(2q5?bI-A|ajxF2?z-LVHd*8#tHi24X<6%nXaD<+8xe?ZjL7`36*E>i>^aC=nPY}&n*Yq>2I zD4dAm@xm#|xtX$`_)FrA>1=_B#1i!3l3Om7;hEd!(CGZe@sl>03XzR8U=kkgwUxTL z7Sh+-RH&Vqv{k#Q+04Y-)bz!}gffBzHN5KFLRLx4nK>OyYaz4FjnR5`u!fb#Sr#nM zULY1WP-#2 zdK)AhdO+|5G+5h7o{edB7yYgK!v&fLj#C{H1Q}16rz;&fgxpI;<*sPq*qA(DGsf)c zT2ZTJ7|+v=WvesA$<|_;OLrnRYBqSl6n;Uxd3uKkqLQN398}@j_V0ElhqLP<<98PlP1R`18xBjX9N16|wx^gvpVH&=6*Ba}EAnE;@p`7+ z*u?1Fonx3Wtiz_O+&%60)_iJdxX^%%c5iCh+Dp5!X9(}YtMYEhD`@@vbR?OJkLY@A zqp#d%d&RJ0c52GHmf`pNcj=;-i0(2Q=-0_$sCwb}Zaz5nBYK*_vnWD^Iq&XGSs+?? zwlMjk8{$H3QT+6kf{E_16GsvG={HrySHX`^8UHIOTnj37tp)1EGPOG0>x)$z#TT(; zNX5D>niSUhLthUqM|2mxG`2-0S9CWcx^-Q`t%`&CXzyrunGE3+@xA=$C0ftLD~m5T z2QLWssdOZv>N%8qF}1|VgJ$jwz7KU=gbuY%rWetf#Y2sfW4oviv5y;CFNOska0`B^ z|I6RI6CYd-XP`j{Jg_+H_n$!UqFXzQG%@*l6Q2PDrJl;ce#Yuyzefzr6&x~?mbNb-WiguUJ6+ar$86=w2{MR4}HXH?`G2E$(o%={3Z zKMFUakxxoT&Q`cmpJElRuBgvGx<>N&iswH^>TcieZUZtJZ>vHN9_+_2!Xphf(GRzR zr6{k#DZPg}t$F@Fh(^-#=L%s$x$dYb}3gJ)^V?nsZP+8K$jD}T4(V@ zQF?*R-S8B;6j7a)R_c9*UmKt-CvO*K`86}>7HE2v?>*@Q**nnaOF_XAVETfGFRRJL zLn=-;d;DrsJ9^n&xPv^T^BcS`=6=Hva{uXg4Ml6yeW>uH$-D>mLdgi`?1q7+ebY^% z8>e~g&>`r)Qn4)ouFDbnHPi7WHTm>)#r+2M_9gdtw>sfa-m^Fyk;kkyZds zJF2qb2ivsZC#6Op+MGd0&{4BO0OSCu3tKPQ4ogR5BKDYub2jL6GibGPa8y8SH>J(s z*|i6(az^tVOh=Lb)qzU~f~|t;K>7{p;d)M(LSfu_EDdi&&|XX3)E(=)mCIaY$ zQa0|8vUeYJHG03JM;a}TNO?@+p4tN1-o4faGk^u;>WE%&n$gS&Fh^^oTl0n%?#UYWQkH|Bq$S%^U|m&i+ho|< zeRwH>8F79h#uNP(6|g=Qnb$1u*dw6jAj)YL+Ppyc`Kgtf&3UqR|kk6o&IM#RIT^x;~3sh zX~&H%VLC(VA08FCYhWt2+3c z`Mt3ru+On_qmeGh4qq{1SsKL#Dj;J&Kp(t463?>3viWwyEo1xE14>aCwbvKWYg9g;`n)$l`Ry%=JEQD+H~l)9SR6n zyTUl)vaXopds?CMH*R7cLyVpJ<@ZTP7?H`Cwv--~ZrS|Kf@{(4H=$PGqMg}E%Suw| z?Ir?4+d|RIj+Ys45UWe}lb;ym|I%2bt&C0nPLYn#fALLaP%<4*gz%+bAg&~ z=BN=AP)?yW2LM93OT(Eh`SZc(M-y%J4z8xVW+nR(JCAa|k8=cfdlOKq3^7hgr#T7& zn~gH*sg8!NrrE_F$9@@-=vNea>eA0!fixpkg>o^^K~{%)vK@30SsG{4sru;K1PpQ< zlJyQqZNKx`;Xc{%0Uh1^F~O{JOL41;t9<3{Gki6<`9-oWjhN3Q?sbFq?*r;!g#oJe zJ)A?|-BRCK20v^aOzHIP^bO67=}esLzN__Ij2-9MLiLFeA^LBT_^RK zCyu78ki5^U7+Y_}&!=veHX1?@ASM~(>lvPp8CyKrPwU;EFMD!7lJy3rgQOs7hpvID z8VCZ*8MOL??IZR)sAd_Y^%NXzh7U}z@pcSB6f2*Jqo9lSQUp|J>m6EA`mqmDyhf+v zG}Da5_pzHR+*M9Si;^5ijW1>Cg~?>rkl7?iSy~3+C)q_Nr(>C{yVS~&TiWx`s4uCo z+8T@6i`JJ?^=tJ#pg(wPbQU6EsuG14_Ab3IEdYw8$1d`kDLUVQ*2rBGk!kpic#0D! z(wcV71z78Ib$Oge`>jW*Q9Sn8CmmDc?0?r$gU#TL?FkB#UtC!(Lm^HaLTs6wj#wYw z+DuMd)YDg3-IRv|Mvn7n-E>+Qe|$;6_cx5))}y5i)4MPxi!Xp%8Mk;je2H(sXz8&N zr+{S|o6)4A>dsR!I>&J8qXjaY;ZT6p1qRtl+&>4VK85!g^ks09RRlNbZEO|zBMOjz8LFqC%wuoZ} zIMSVLGNSo2E>LA&ID5$yImo#{luLH}m2$9Zp zQ}j7k*A)UVN{MD?@mKFG3{4<5?On-B_62$jT2d^?dm{=xH~;F@C)wD15G0CAIJFLm z=p5IvfJLcFLEAH0jnXVpt`}{b%GCvz2YAY3gv|Y$fu$lkND5lsHnad;%={N|1M36xvuC$%K1ayOO0(`)*P6f@- zXyo91_Mu44BWL*NL+AnaN#rc>Lz-@D!92mP;{G|ypvbR4aDZ$=#tk`UH?G_o?FL?vwO68~s2C+`j7=f#K&O2I#FOJ`!$72qkz?FVGXch1Xj89&X?{To zfUX%Sk>~MsY$*G{H5ILRS-ifg!84|tEaptxVE^mOoBr@EkjH@*k?&8wWD4#c2@1aLtxrY)LhKll-7Er#l)fY!^ z5o?gh$P4x3;djNbW5pC>6nmZp7AR{CA&%rV>XWJtoK@OKFE^IW&C(SVa*#E0WS|;? zcwxrrrE}ON=8_&2O?2kRHjIl6wtWNxr$m6|x|dg?r7Y;inoc7Hy( z`u?f`vn`tT%kvjgKp&VP{c9)P`(30ipAe5k;RO*rdN>Bz6(moJjWvD%HV7Il z3j2TxCW_9;Tc|T970=B?U_t9D>qusZSw&wVDOv}ONs8BySw`!{(3}JloE&(OF@l8P zite(dCi>;zff&nfrUq!quCTsBHx7APF_!he0}Q}@TjVd1AME4b*2qr|OD@qJc)QWj z;sIkdJX+=>RVvQq=UB37$h`ehS@=z+YyB9|;Z|wWQtK1YVH%UPc1tW)3hE|NG+@e+;jpgqr;K`aRfb7XqknCnX__vv2N|?Z0Q2|~P&}lgYT+1aej6vAr%EOMX5WeYz!rdFgt;TpXN1|NAQ~cLh z5D}F4uj^P!Neh@KdQ(*zJY-klu0)3FOepQ`sm83zfQ=Edm*#-ta{jJ4Hxdax8#1OU z%2sdvul(OxKD@b}Mb|j;Oc3mv!NH%lmM`=54xOA3CfIZ08v#T;U z*eR;#tgtI7w7&t`T{*iV9R<5!&jkIiG01s;sG}Mo(H#2IQufm>Let2YL-{69V3*1v zX-l3Fd@8n$QL49EZwvRXr0QRIec=rlPP_IbeYz16E&f^{81w5PH8=( zlxV!GAjF#2A=G~_1smZ9sIZ0Wtv7juFCfwltXV=a?<3%*ZmpUw7+G@&8JIIR8kpd4(lBP}i4PsI}oit&L(QatCDPlu6WGsh3{_uEd$Qdllkc!lLX$ z^p8{TzY;O$=DgsudI#%#pEE5iEiE=CU&F%%KYKZUG{|6Rw+~i&T%AV=O3e4VtVtd# zG>mzz=UeidzQI`2$Z|e5bnw5xL%@wWz29L16p(laacbX4dTM zv+tTeWCnpYv}I~|Kc1x64ggzvTGCsr>U!qj;hq`ScR*>p9t3_JbgCPkh83>uG6MVH za5O;_^ry~_6#0U8?mA!z_0Y0UP_P<-y7bv47?fzY20tLK4>qYL5tjz%Ud@8PyhDB3 ztezXaGq(<)5iEFJpY<8PJmqE(FOU16# zdYp<^=nZfv|FNc&peX~ZTw!HS*hNKg8(mIYnDze1MWyS6zL?%mm_xfKZd?3MU8xA` zP}LfOd{YGQO4&V%Qz@PBpe^dv!YM-g4jLS~4velRvxi#A+u`lgAv&=VgIkHm_f7{7eL`B?B;dm931DtJ zjP@|Uhq!s%CcrFIgL|Ep^W*~f8vF<#J}`d=g4??z4*Ja*t;aFC1BJN2{7UFfco+>E zONTj(hJT9g?vm2sW5f{NBt`kkqB|f9>GI~e>@_17?9B^&r#(f#+unBle;FNdtdi6}?zhh-_ZEoU>4c;wBIMN$%(<)>|Cryx1>*7n z;0;HS?Gnun=QTRX*I3UiT9CUv81_e3_$C*SVR7)3`XfwS^>-7RZx=&HbU&Kp$NKsz zscrTq2`^3T;XsJin`>-#fP`K{?+biJW5|;p(c@sg`02GQq~y*4{aL@O-KI!nx-PiZ zH})hkUzAekgBkVIY~L*O?hssIgEp4(`M=8&t%mSmTV3_8TsY}Yp_9;|$mV}}>Sn;k zWTBQ#;UFx_@L0TPBmA0uI{h-(h)jUVPB7#F79dkKGyG8KfDp+!D>9-A>8GEcOhy&R zWXJ8Fd~qah37gYPnTZ(*#HgDZib6M+#IL!Y?gK4t^C`ieNiZSBn7dp>;5+1?CN$J% z3MOP^rgqfNO%TFJ#4cMlARouU7J8^=o$~NgS)YkiGf`v6wLV#^oUE2E?csRHWXQy| z9fU7&Bl(%zo|Znbni2-n=(I-~weDIikwYs#tFtYdbMA$hl_nA$*5+ANv&iiVWd zY`yFlZUKw0=4KM&5MIJH>k0$1TaBpLVx0+dj2H4*OclGJn8OB1-=C`a5vX=BOC!6Y z`%i~*|BmW_{bO~(5*%&n5(}EAJWQ5IyViz@3dEx+pJ>8D>&{~W*`-)If zbU@|GEMZCVpy6^BkC=ZsGYo(NCRI8m_uAn+G`8p$A=X)eYrmR%9?1m%ZPAjZ*n0|B zirD5LAV6l{T;Z^1PzcOe30q0tkX)Oc1@OA%?GdZ8J~yFcs>YhDjnQ_InQbmhlscd3+>#Gyr_*aJxhD$_Yr z1(MN|Z!Haed|+KoBnr2LS~jwD&NA!r>qT9TyDaTMK!u!4e6f~%Nhs}1GD@Z z%yE(}vLvzx>{gV#fb*^iUh!`&%-vWLE@ zh3pCZB%zkvkMm~~YEkeWjwxfek-Dom+`y^aZO~Y!Vf=e2y@q@jIC98}PDoPRivgf4 znoxar$!xdu*x2=}%hkkcvy^OY$3R#s8nPp5QQKOGw60`$CUnFv&|$^v z-@QbuWgK0WcBRAg_((c2GYSl041gQQF`?eNFuCi}Fl13=B_%Ha0}54%f)6AJNVKw7 zlrkWe*W6$@(YbDY7@W+|Y7S%S_i76P_7lC~YAPv?NyncU|Fkfcit^>2B7Zt1uTUEG zM?WOp(r3)Al4sN{`H0DTkK9WLRW{*LB0NtM&Q*uTq55-<))PM+&sMH6kDfn2S9kO3 z2ke;E6oW~mo;nPwbakJeJ`)@YezmRd<&iqAWcM3g4PfTHP9D;#GQEW7n{PVJHo0Z6 zSPY+!_>9RHRdtOg60B}JcZ;CDmE50KShr7hWoQa4MZ*upUA3*F@~GUQ>8V#skZy@87GP1#I#5-eQA-d4sthQQ z-hT%7nS@WU(jxa(ZfFjL!I`)Df#(SoR8Q-Q+CdG5TXGS)552aXO}!AV*&e9&d^RD8Kf8~1bpH9ci0klPM?3^ zpKd@=V~V?fVxop#BbJdHqD1RWS0rYrAdrpYdv6Ixz*BStL05)IB^PT&?p^%x1ZKOI zg--;vG5D!p*Bs;;OR^TQEF5Je!XRk9b|P@<)`ErPhpT9LWOl@Qj(+vo!}}!6uEhX zRa?AH-&HZp^)Hmas|W}kc^LU7AUR<)B7iPxuYE6qZt_|6nb^majRHc(oXHTE^~kIF zPTOb8)y4PiX(;=L)c_pfVjo=|B99GspibARV?G|R4R=JR@iH0V9|2ihM0#q$hQa`w z^C9ppz`ENnuyr@&XbASIJrkV&ZDS4~S9voqx!PxkHPI zxg$+~aZ~MZV541k1bW*0w}q5LxC!)yyGitou}cV`$(9AG+DZ19LsajwKveI+0NF~q zLD@pNiS@O!_fIQ9JCJk)KVh66g=d4^`32Uq6K!V%!rdV7eHcPyZ*%uMjII6Z9gvIu zgUVZxf)g7e+lI(HVEU6*{26 z*T^yh#FjnP*)>mp8wgBh>uqe}89Ix!Hf(z4W0rm?N_g4JJ22Bh(Pdh3tAnrzWO|(5 zsMokPMN_dkstAfBd9wgkr%0ojXeMLZkIu#SRkVa!>&G$su;MhUO-Px#T7ADyhIu#B zGq!1Jb=_(Ig~F9xxEVlh#%LnFfr^Xomo9DW3Q^s&#CbN3vLE{qUv^)CQxfiiGOn(p zO&zEx9ECp9mbgtdXsfL2sPqYBwz3lAWge!A3Bk@HY;%EzEguMDQrWPT?xwaxepD@CVlJM zn=}amp=qwKzjV02-em-pB6VmaPj1K#G38wvRB+h~DEvqyQW+P1=df@Jefq;JDV0PX z$~uB1T4y6JNs}-onVXn0^$)ph4o~KoiaMxQ5#wF69h5=MTp%=wG1(vz6aLni|C=*f zEUv6$-d>VCvbgIN#^Y?|PH7I;pVnGGg2LFkZ~6udJi*m$)i^Ql<_TF;6Zb|OAFoeY zq>ra$V!@i-U(GMa25pG4I33RO#nJPOA~yxoa5ux3|1}$_sHaNZl$=Ey-U0!0pI`-K zB~0|8h!A?YJsK4WzfG#(ZHO6FWS&6Zz|JGP%26yoN)M9^%q3+hIh9*fYpV7@S5<;YC*W4nmlQu}GfXZD3$q+dp5KSLRB20dlAd~Mu1LL!UNh$VR6}H2f z#43~9R=BR1B|5>;Lc8%CPnSuPH*|ndSx-#BC}s=GD~-D~CBY zrxr_Qa&sxYS5>Yyg0bKp(IyDJwjsqPxMIA=G9Rt9s~SxesjR4G9VF>aReQ@Rn$6k6 zW0x0nyBd~gmfyD?+&&Y9vn(=2`#clb=>|`rAu=5xaX*``iYGe_woI{`Q4CizhW?ih zeNY#C7w^3IXJidxAiB3_Ehl!(X;Q(e^e!>0V(gfN<4?_FV7dK=(fPRQCUyAj@{9eT zevOMB*;`eEg?)3Tm$o>LV6;e*n~yNJ7Jr9e*Rwx5&uwa>}qvW!Efe6^uM9n3L+A+|DxKO|C}&>0(4ME zDHZ-gJpjOynw)0H;xUotrVAv1A(Y*+XoWE9HY)4u)jps;F?GxS@rk$FexmNBYVVR* z50Ki_9gTB1PQFai+#S3XPyN8v=L%L|AKhX7R&HHUpB&T%yooB*-SqS`3Eq@m75EIr zQP|+nXCJo3>8TqE=v?x3#7fXQ_F_r5Ahj-W>_{TLs-(}5Fc>Que=az?Ww(>{aV&oP zE@8^WW;=dp@!dbaVs^FsX58Rxv-^)#?3)6w8YPC8VbkWvK1^oe^a*~tZ)j&O6%}9w z*vAmKPPc)qi*LKi#DdyPB%1m^`+sNB(l|IHaRY|tj>Io1B?9yXg+!4|hA*JCW|k88 z&GZBIUEnEHnF}z6Gftcz60+^F9Ea82%)V_0kunKp-+3;AE21EptYvoSDsZJ zn=4mkec?gaM2+z|3OeI(b4rU0Bz0O7R1=~;CC>Af289(5a%0zrZC5)kDZCQBpD1!} zQSx=eEdk4rfsm%NAlUnALJF}&E}<6BW8G?R>8ma>KR?ZfC@o((vh>cti!vUSW>HrRtMOT<(=S%Ve7?c(54ZL_8F;69u&s|1ceh=}p)vi#Yh3d{cP;@Wj(y z6Rxjyz)22S4F#e#^keTwp^i(mhb1>lbIaxaE1e1xOPSnE8UCCkS06nL9n15M!ibly z63Q9a9f{(xDE+K}xXazaaZrmagldN}wr?%FWQRksp5vHyX*1i6C`SAKKd=s~M>}Bp zn|xP(2bKIsYV=z`QRTnKP({=ApIjo@HKVu_JM&&aN&}$Y1VOM`!XgxkLO<~weo3m$ zQzwTsSqalVqgg24(&$h4r09jlKc^bJuiNe2JzqPwT=4`&);|*tgeK4H>urzLk{v(a zkCwK8I)ZJT?+Nk^kqs?g)M%31ZXjq~4Y5_*82rjP<5K2hl(#G`TC0eH<&au(iK5db z50U_-#IUx>RN6(nBsf8H<*Ir~c` zNWe{kJIT0-P}I;}wcM&$yX(BpEqbW~Mw^tgYQ4lRh)h&#j+H=U0|Av`#&nX71SLv^ z-GttAk4eci&1RdpbmPDxUYaI2%FPnWas80Kz!dkE&+9#C$86UmU5wW(hs<7v zJ$)UZYy1cb{r-N4s4(m~KE7}JSZaO1#*G#FD}SJ(t@GB+OVHiGxA;+hosZkZS#86^ zLS>9IGZD- zC#zHDZii#;p zWagWgT3h>*`>NMiTp!2=0m5KWc9Fs3j&9-Q@@PHs9XObnIWw?@)Q33&WLLqtWXIdp zOmhQ`mPibKGJioBYmr?sL)|DJ!S25e0z8Lg-&vOtk63X}ifOCIax<9{1&C96MsY$y zrc?LZSDQSg`%LHF{@(S`_Jl}Z7(AVwrO%|WPK3w^d=Hlhv400e-TFAj4Sk&gsS5ru zINh+6?J<63_CRQfrA5mjvDpDB4)4O++;(yVt6QMDsmkW}xgNQu^ggxV;eH_iu?x*d zDQfQ7LSJnY1GW9>4M`@+Djt@aR-xN03?`<*Iru835OX6uMrIM#BrKRTNQtE!C7#ZV zu+M}$y^CnhUb=s4!0s1M34`_Gw}tRp9$vkf2F=0i={0rRL%X*MEPbJKK5Es0o_m?; z{&&6T#6A>P6zs)O5rp&msqcR+)Inq3@xUN2VuR2{n-?P~^f7x+u!tCP9j{Ocm7bE?{DIJ)j&lo~o zBsk4Lvp46nFPP$oz>f=Z!%n1={$S@rvjb1POn_|98lpZPI>d3qPPfdvH)I;{jE##N z9&F^r8v-u-b_2q|OO30GY=Q@2K!=-nqXs_l9EYciirP)MSMLAWf~OmQBMeW&PP3@{ zw1>smg(2KNeNTh21#8fq!b`DNir>F`&&Ak<^%n?X{fXG^RY8n|`Tk(SYJ5}%Hzn%f zpk96AJPAIm`1Q>bG%DiZDI=z)&{l(|d@!sjD;!6$vec)MKZ{>u3bRa`nFWGu(E-~* zf@5KLaJH!wYsF?LPUAO|qK4Hhty-G93X@@*zU5&-+DwHSHy=|zCelW5>87hVo6{Tp zMnqGT`1Rl&60?(Iwv8F^K&t%&w!)BO+MciIL}W=3?V@qsQr0XDglC!Nc#Da_$^LKu zBqM9tF=Xp79^A?kgKBN@!?`r$6Zd=<{(V`lziwH7IXrP7CLPjfTEH5aL?-ImMTQZ1 zR+|XJLt7(MYJ#*O1j0ER3K()HVi_oMWu=YAoTd9dv7iF;FqVWMT-V){klD0{NWPO) z8VTxBXywi1)m0SxHZ}UNf8A^K-W+?Z9A+rXV zAbI`UjkwFVIk@TeKtSkr#TdW$y<~2oFq$CDh-n)}%%+qtHciFS)=lc=HN!1A+k`qv zH(~y&3xPhDXgph$qi>oDAf-l_sajsVE(huA>WBTuH}!psm$S4P(cUg~1*G zR^ole*UXpW+Jrz&%Q1GZs!YXFzrTRJeD?l z5X-sKfa2uSyF-q)>;|!6*R5Wmt0Oa8aww+QrTwP*k>n~*72%+QIDDB`)B`5t~kS`RV!*`fGz9+LPmO^wmx z36x5TQ!n31Qzs$knk=>uM#e|ffdv!guBDy08s$u7XU}E3GMjIq>jvBJJaTi*qpay` z%v3gH`B zr=~K+$(I_TEA!6{N$LwQR67imMo9wuim7I`mM$v2AFI0wa^m);wReexo^^TkPs+{) zFrnfp$};^Rzchyx$yXPMmSZ^2cb|p@d{*Xj1=e}k+eNS!W2P^D!}a!aR5N&ech0p- z#ItX%H~lEJhB~e4x#R4(gS{Jifp++{x-A0c!LF#~vL3)LCXsQvpHtR`ZsZ^o$Av0S zmpg`a)0Dh@rWJTDp!y-g?EJE-6VOZme?vofyaPfquZ&mP{p;W-O_;>-PgX^fIhFN> zkQ%W3yM#???XHk3%12?ZokK)S*wMB9wxe2#-8-vzsb#{=q#9*tTOz?1>x+v_ob(%6 zp?oJ084@o&Cm)E|c&L^_eNPCkUr$kpyvroIeUsoUUdJ@dt}jHN<=|rP7dQ>AV`ET@ zEXuRpSeQgzbyeYJCy?xZ=TW~c|=;2Vn`%8~I2%hzq zJgd*U1J);O5qG=M2Kp-Qjc$1Qk-1WCm&a$|Ghp`1pY|ojanwBvWJ9*%ewW;9-?oo| zSbg}ZCS4_(5Fb*5wkZXt7%q!%(JM`Hb;oJ)+pu0W||$Bs;iO;zK%d(tu2bhJ6I194wqSqYb-4RATB!qJDWOs zxZkQ%g?JTMh7J1L|1eGCx1Fp$ervI#aQ_Q_BA~1!{;yB5rYhDF$|o$iXb3~o!5^tT z;+7~JpusXlGG}~y86!J!MJ{(FlU1p&;$5&NmJF!pQAoz3PRV;@y+t=a9?IxtX34-hXJ;BdXuw@eNUcpzuoW(SP zcf1(J#}Y(Wfxi>r-T{6mYhhCbIQrGpvOg6oN;0xsIoo0b zqx{LbG!d!6(FT5A4x&|K#l(3uC2WqwLg^w@cJp47UW#^HjVHPiERt5wC^B>}F?-EI zYO)_LD~TA1Ze7L$S2FDcz5S=>5++@}f63Vkcx!$p`UA78aBq!0XNQZkEBuO~C2r!5 zvnv|S>HP=a{0-5&TECoAM@X_$M}#VKN7)`rM;|uycRR0hM=lY-&}bk!Skj?&B@203 zzL*Gv)2rgv742ljCqNEo|kPJyzBea1=6Mx$R^)4Q^sd4lJnL5Wkk?VnN3@zIMFJV zBkMNvVf1g-s*7QOBN(?SXH(gb%!{N+<~xUT?Ddpi=FnioPVgR4u^3`)Xe!c3?akkl z8)2(JU1cI#0FS|>gQcf5C9ks&u`&vjR`Q}VrQfCuTB2zVVSmUFW${CmgECq^Yx>wz z=_KN7l=z-{VuOG90W~7?c)i+oR=RwLiTFNGIz16WXl$$@o6&2P>r2!&Q6Coo`J*4^xx`+>v&(J-udi7D;Iw-7mEa#)e11zmPCxwCf z93fn+ct4#JwXPwD(h+sJX|ava$$iN|N#@v^cQ@GX(<+P zp~+~6)iuV!F~KBMSMy^f|NRC3oHDcTpB|F3v}<$T1v%j9@E;p+3bRs&Eoj*{`fxkE z8t{@{&@h$rALYInG{EI%C~m-+A0NPP&EdYmEcPh9#tkyMyG~{(7e9R~$Gdcyq{OtZ zxi=Znk6{D3l4>8Eu}{w6Tr2d_Z6l-m*QgfJYMGa3Hyo^M3a=o4?yR+Gs?4lJz#(MG z2cS4<6Ae#N4o_mn$W{1ZuYwmKe|LQK8S#+PM}s6OcjBD)ly_KY9jbMzvePb1_0sZQ z<$vf~^#Zpb1++kksZC7n2w;-{4PAe~<__3Q35T)Y_NK22+`=ovYxp5YH*C7?U$JCv z@{A#T{fTic{t{KpLCT+>-hpJ-t{HuNFU7@foI$Jx%zHjws5HVSMNTQRiDfp8WkjaN zyovWeh>*LO|AMD`oe+qwj~{Ou%!{Rt(Kl9Rc&5CtmW~;=g1IffxgC}NKa8DYbR^ue zuxBPtI%dbV?M!T&GqE$VF|lpiwr$(i#J0b@=iYnnIcI%q-S=<*>eZ`y?_E_-Jw;3g z&g*m98Fp^yZu=~_#w+OK1Qp;h`1AKpx=-?&!&g@){Hm~w(8Drvryt)_D_>Dq+(3~} z58>ka4A-Pz+qXT|y_MHy!$OyKYEjksFY4~!VBfH_AZZhfGBTBxO$Xs9=0OMjIkwXNAC4v2jZ*p73e)cjR=M}2~%7{Q&1?S z>AsKupdRuREMPM~vsgByzj1ZSNd1$TRy(mnUi>7cCGSU0Xny)a5t>pW#SF1FAnhm+ za*5;cDadd%GHaFjs`;)lo8Mg2%~_~fShGS!A7=k2LnS9q&JAWOj}AqDnmF$V`ty+ z0gjC%_1-YbY=LE8hw3@Y2W6lb>6mpVUe@@j3Ov^MDH6B~_n1N%8(0iZBj}*ssPI;xo)5k6D~Q1;{rS}5*tq${oMDn@glt_ z)m&5YM{w^vzTCtUDH*JYyuRyz)rB%*s7<9ynlxpEUSE`X*JJJO z!fMH$MpSo*R@Ag!Cvyt1JolTDE$K8ejHeO6Rod}k16ELsOxJP)9}6cM!`RTf_pNi@ zmYSnMj41P{tut%Ptkfu4p{Hc`!=(q*FRDjxNd76VeR$MYNIsQIIM1G{>B4(*g3=SQ zjMbekM7I!8z#uJ$qtF(e-ZH5w43cNbmhoe=p9@%L7Z?;e>R$=;r8!t<_b*iLFdhGt zMH#TpULPOHChBc%Fx9G|s(h7fhP4BELihRTGJR-1t=)!VwDPnjAnS7+kzk&ls^Dct z{Ym_G#EF!trin|>^lcf!rUF_>+*icvKOqgASdrvSA?DBt*52P}Ibj5yw52U*-jA)6 zVN9r|aFg;E;#wBf|VDwG}DJv`wAHKRUciGtivyyaAou4Vxd(=^uBe3U~A6mQ>hLp)@A24 zUoNiy_I&hmp$`mX-UgQwgZS^AQZRd_C%`uZ75c$HcO+?I+ zA&k>FE`^b*P_Fd!3@HM^1?N^zWmR^IT|d&(G+?|iF%tf|BTqI4MTvUajJb5@(22|B zz|G_lJLmH9ohYrGIhR3SZgFj^{v|&oYc4MVa^Bv_>E%z2NFvfNlhsSHB@w=T*Fz@* z1j`){!P4>^npUPelN78ztbjjXlAIZ*A0f|T&NG@gD}5ZSgnC&(EAW?^!v+LZr|rRJ ze5kMrZE&u9h<)oc0|%!>x%y*8_;B-+5$|kvNXNYqh@YdO9ox(i37$8x;RAfV{M>QZ zx3^xR(5I1p0odMwPEIs}9CIV@^Y_7}YK=j65MU2Ttn}3O4h|P4{ml(3j~Ck1~PFlO6C+B> z``f>>XCTn}5_DtCaZQUW3AwJ^02`kvAQ2*sDRN z1QHn8N)o`7I(?%3# z1YmyboRJO~i49a|_++Ki)m?3%R_Y@j1yC$U%+Lc;Yx9NW=)wo9Sll8&RZTGgikOVa z^n&mGOUOjhP(rM@QRe~G*Fush-g{D&Y>|%)bU(jxV4Z+j;ETpGLa^eZ$%*9urZfZO z+Z7JTeUwf*(#?dpwIm+{MoH_41Pj}5n8r5~yhrC`w zP$<(Gp$PfDdc4q~ThzSN!Z)vrbMD;y+-lMo{yCLYr!)~MYdCXS`zwz5j)&$$q-K^2 zDdRoXfE*-4iUN@4;cME@sgmQ3^yi3&eiJ*XnXvAUxSbed%lNRL(I~DvEm)rOPzgNkSU^j1>qo zrz(e&Ta;%soS*Bw@BHot{v*y7e#Xv)BQo%IISAK;~^QCKR0nZ58QlH zba!~2phfY+Hyj8|dz(nSxPP%ai0t%ogZhb*&)&jNvaNuP_p=NL+qC8b&w$%2gsn;s z6vb4Xx1ouk(2Cq9f}O>*>YEOk#xR!*1#T zHJNO!nyK5E1!t8c<_uVMEHJ z#)t5L8%j1+|7yb0j>`pq<0Py6p7fY%dQCNA*_0&&Bk7nGr{QPl1S0>&9_ei?p!#?V z*!D}pN!}0976f`@Ej_CtBIckPf2A3ioz#>|Lk)f_1T4UTt+{(T7OT^UND$YqD=Muq zo0NP!CFHlQ&y4_D?CP(1W&z#|TAUmh3J*Qbz=)wWT>}8adAvF5wwQ7ok3HCtx(vtf zn|kGOD4*UU;hoB;XUy}N_N@hOMFVQ(10&@gn2~@8wx}$798GIj4jepv8BzENlcAGG z(3?4iZ7=&TDK97(l>D+(mc^OHwFCQ}{w2DUufj0V|-aU=z7Z?f;D)f{yzF?Ya5 zS?z>n>D+<5LKE3bNyRUU{9#_4A6t?*@bs12*%6gpz}v2R)Z;_)9s`Vb*|J&TAFgY0 zOxx+%OhNs&;TE1=eh{K`f}9l(Nw$*of@r-@dmRGfOKT6a#jb)}m#)%XQvw5pKkE5a z8I>y$k2OohN8v1$&jue31+tM81{<4Gw)s91fxK&tWO$LMx>A80#k+6FW4H42yx&j- zZ=;Qi?WImDwK|$erJjCeqKg;FG z3AAvjLtudjX1v;X{ZN4}OKOA2ZqF$6$~oI}ZZPzG0>$>`nCJc#)V)W9+I`rH_=vE>7X#R#vtS^@(^0#=uQ z;7D6wt)S_RrV47BSm8#J60QbCin`^D^Sy`Z=$%S5oJ#Nts}8wU6~3V)lGS(rKydM1 zc$u^=$+d(jL`LLnn97*S$jwD;4d8T&4{*)VJY2~$fiSt_CI%2R09KB$3ya7OpE?AB zNTDWCo^8^J3!$#0!*mmf8Jj|>kN${rLCCz^X^w3}jO~(9gSs?JA9Bzt^lXxZ)KtY1 z2;c3tfJ_Gr2X#W+3`b)#j_v8w2 z`aE1fp#Mgqswl4UKke54^$rNuh(Y9w+-5B3qGo^-V@5WtoU?613&9bGd5@_R1yvSM>qYHtTciSbpLUGz@;V zD#1*3e;i^bUFHcJ(dTozJq)E;5|lL2Z+MKBwC8aGa5t+o6zi2M;?`4o&m63%Qk2U} zjKnF%%A}?xkT&;H4q{ev?e)Rd?VGh#SkB2g8d>#Gn|2QqI20K7CFUWGiHF6ukpv%twFz|P2Y9H5n|_BPB^bI#NFsGaRr|c`>Hl`Y-<5PH z*)?}35)_vAAdp1)a|u2=hrOgHy@N5BOfpb7h{J*ZE><-e|2uTFi%)yD?|eQ)CzDKhm~6{(y_h<=Q*NviV~oCCcGJRw=8i zz(|PIK)ZugPPe-O&iym5B(-GIBB7WXCOz%Im6Z(HGS0pl4@NU$v{aIFGpm-u!v_N5 z(jf2S)+8XI9TkKRI52BG@%@d#?%cXnK|`YLP>bBjUwJ{ra|w2{b#mXZOaiMQ)}R_8Dj0 zzTIi2UiKlF{pK$d!5~R^9+mUv+G2B#ZyG$=ilqzg+YKb{E;ZA!k)=#26}qtrGa-D2@9<1LK7X&9!5lge#LDZ zVu2wcT%zRQ`;QRo8WPm?ZgXid;f&l_RlpsorsB58xRL?Hf%~XX9m8u-sHl7-13}*~ z$j&{dO(R04@sfxr=`qM9Aw_=9OR3z?_9lr7A{RPX$-^ugzZvUr=UkKA!s*qatUz4J z_9XmM8i7q>aWuu-@fpRVuLVbh&EbsKMcZ&4g%8v4O`)0cU36-FnFh*v_Foxh%n96h zf?|k+ovi)ICH8evz~)1p-sUb87@l8qP8Gz#r-a_Qd+|;Epl*-@=))jUVnm_taCsgN z2OdUU?*2nd;OUQDd-92JqI}K@{NMbXtiu01z*WWVkVR46R}xrim_ZRrmo0jbT;c`C zkbq)g)%;mM^|5LN2XYgb?@dI;!Yl;uJqTXl#KmkzXq1&mz{!-Mn)8QPnr|R%#g!|F zD@An-Bx@MrZC(C2oVZnPUf-pjb3J&z0=sgX{tOzOCQfaOlEEqE03hCicI3VUBHnCM z@I_=vj@gF}PJeNo!upOE@$@;_G5E&r9g8F*&Q7x(hR1*UuwCYyi5z+Jl^eD{@GDda zybWH{lnkW4I1~jR>-QUd1VP?IJw}}0Ulj!cvbh%=eiL9Yl^ZNAAk+A6CnZbSmd1Ct zYBDi@Dr5xe-vAkoOcOw~L1tgP;R1-J`9i?qnz9ON`;Tg7hK!_Jc8YPMr9Dg)#*mbq zZNJ#+W|pz?Dzk#w5JY$Uw{fU2LO$LY zdmAWt^O~|=u)kNTGzcEAOCojOGZ;qDi_HL+au4wPG0$SvG|Etx7I5%{k7Ram-8)rR zHOI$fbiIx$y_I?Jm8Ov<#3aLaG*G|+@qSq4VZ`sKzXhJc2Ln`{sh04ts^U#W;ZGaa; z&Z|Z!0aMl~xWotR zihSb5&-V&28vOKTlPwleb95}LaW&WSky>MiBnTA0VGG}mls`0fVtk|Mx2la;;yb5rgeh(wu!A(f+%xDeW4th|ktPKybj7l|9YF4-UDUVo4Ei~KVM(JL@{L$N za)Er|-EWsf5NX`4e;!cMoc3Y2(En$Ji!XfX_Zfp&uH)c6CVF~E~WI2!gI*s^LQvFZRP`F!M z(ugYaY{t$pf_j1b-DTa;Wy7%SVv3xO5Ex#?-dvEbLs7$AqnAsyiv;)?i!hn4Fhh)@ zsG3F>Eukod&c5TW)T0m)3R$NAvE28avgFYz3$hYHyZGh?JglSd?a6Q}_9&=$x-lbS z1s7~Q=B9OnI3S@XRzCAIg3B~d8I>2l z1iyDJ%tvPD*kJ1fl&Z=9*j%cLdqa*}qIn#Ub9n`SJH-?IO|VmZ9VR&8yADH};uGy3 zh+(B`leeRpgm^)$w<6c4`M|@?N8o%-ngEst@%jsYDMub;P#e zN;6}UywH3?zQvo`W|M;2VRJLfjBqpYP~cn2&6KtQBHZ*LeaWREs_c_<8H#`WX||`I7<{w{4=!N zv}%G1pyenwIPawQr`bsnJ5Hzl$VFYTd3yxzSZ^10%M0)S!ZZAZ{S(H88{dA;lsSDG z;~D?2*)o1TJ9}GQeS5+Ga&G+>+7K@{DgnZeP$t9L+EPIZ{wss~={uvd3MiDb#%qLTx zThCJ_RKxASxLT^l*;qG_(-w1C{9NeTpyMa8R6X{gcDwJ!-fVZq&!v#UC3L9<*dUv! zmDq-p%YSKX{q-AKkovkHeU9_~{%mgjpLed9u+;yI^D3(=&dQ;@)8>J$ke9-o2kDKX zp&3$z%BhzqV^iXH_*+xxuH=!j1$B0NZFO#e^+WtfztT2-Wr{c`z>$;io>)rWNVPkE zns}Q?aQ}GQr~2)g*E3~J7xr=(wDmPdw{MD~pO(TgaN2hR(*9Ol+Y1x~L+MJcpC};3 zX0)M?SkQsFp(l$t4R%5ds#d708IFx z7=w@lCv8lQd;u7V{5% zr7l0TIIQedSoYaih3+BmypiiVa)oY)5WkTLoPr*QtL+tijIGQ_T)a;Kwrw<8!0CL% z4Qc>EjiK|b)xe~UCg|*K0rnEQO2e0N<4wBQ@5TA_7Iau^kGq{xqNXQG8`;A%a8&bs z6CIy}VjIxycAr$L*exAd=*j9D$_MALdIMtIQuUp3aPp1+#mIe_K@wM4fxj-j2I;+`sMJ6dp)GGQ@D>_J&Iwmu7b85QR)mDI zjz9np_Nnw+?2VK;k2vR9*uC5VSylU1llMZ(cc36qxkmRFX_JCkqr%W&#} zIKmob4nJI^W1*x4esT=L``#w;iyTc1vJm83;k3Mm36wuaG`E;cUyp{rRTG*%*#lL5 zzAsp#mhUO^YWF%Q#YXb3;YUKdgv2baqMYJzst$F8zdM1Toq7GsD9B%bt0ikO)Z5R$ z)#Bf&@hK`PDE~KVrDFb9jn5N~n3_0Lj$EaVvM!%AyDFp}Ou>YJ#F8G3M9ZQ*!vb!Y zF?GGkts(!%9>gR3D~RX0lyvdL=K^a~oc6uMqmmr7mFcN*K}uIcd`7~SDgZb(V2$MzXCc$u79a)S zq35(w^`I*v!5ZXQTa3~-nWS6SKOAkqYph@&kG&*cDfgVKMh>tjetqPl>4b}>Jb7(|kf(CN4QH$)2a>ZXR$ws`1M7Am`ohENa z2W=CqC(%?m88L_IN%}qMA2q&-2I1sYI~|T9Xi-{=7PoQd3j2KPF?NY+-k-)skK$Q{ z-pY$#-#!Y(mLPcSsE2Isb(hefviVbdoaC8ta9dz3GAzVPv&wmh((Wj|zF}v|5=Zvv z9`aD#@pZvR`aE2z?7G7Ajy_=h5uR4wSAtpqhWST5eQY zG=iv{_rr{=?y~{%>1t4z6xut9Kj3D7Qu2$V_}T{LD;ug(X1oaT2>5~Z-u{G}5|pTn zYl$#f6me@AHt~p+VcG^Bino7}yvwzOIO8L_rdDzfUBK`Fff9=95nefhb@;@a1sX5( zf|!R;PrL)*Vk{#U5OLgs;R5g&=s7tZ&#+%gW%aoZ%W}=%fNvW?h+Q;8>qeb(hI$iP95X`IR)QVyDHu)71E)bVHCfbll z^|6Xrvq(*|Zsal};tL_64&SrxFo3o5>PP z2iKV&ACq2uAMax#&Kle7xhKh|xo>9r<~ zz!JtKis@Ge%SE&pNb(USUdgp}ruLzE)I*8fg$lkQ0sEnHnfKkKQy266t9MMxv;Mbk zR`c)ZMW^&q?Npg3i)XnSUI|epa8%`nk){m<4`4s1a2T<@X>bzK#m#y(T*YMJT6!a_ zA`cxIE%$9xIHFEZc-CeKvkr4zIWJ;Pn_K#!0JHOF{50iU!;VhuhIhvi`f5g_W>s!H zKU0ObidsRzRMpqxmw29gUDj3`mkdWYi}*CEoFB9ZlH&j%{nvzQwKYQGa# z{j#}tnNU(2wU*|eaamoEZq$dPQ1>TMNldtrI@V5{ACszCtcPXly~*N6H&J^E5lqQ_ zu#wGS9IqRzPKz%@+R6Lg+}0XTe_`9BFh@`rKJhhPJg$dxEVqX-reTbqX!Za~zP1bq z<3ts4LE@cmenTlf+tCVouGJbUZiS9hBg9Q}+la!JDdV;Q$5|n=vQDLQwn+ts@4ek5 zqA?(}cWI)5=U5z^yOrisi7;ore^$3)HMB+^tJ|sQswNE5K;aN|-PO6kP$fo3uys2<$`6=CVLUotze_$xy0ek(P8QU|00&_s{ zI!3IY&YHiQPWctOQ{@Uv3#BL{(Xn0SIeU3e@k;Ym=I7*~6gz6?Al}~RS_^COZbMWi zXTko|$K0+zsG|v8_l7naFF1<}Gghgc=xP_mdTPT;4;Jq9ysD6s97W;g(xBdIB2EXw zpUNbEAC>-=t7i!w)P(l>OrRl>5hs55nC^pg0jqHtW<^~=* zz01|SR!KjY+l~x)5I0^rxyfNh`&UX8%(7)*2&kS16k`X-dCajCI@=xWlI$Ncr)SUI zNjZAw+Oq1#_H4vwGn{T3XfrG97i@2>YI#zf0!%hqNRC#Bcc`_;N7Qb!9aWgtcxP*a z(v2Y>j0e?HkI%~wsOnBc(0oM_oW&Qu8kvwTb41A`E1JOMq&F0(&({MlK?&N4PDo##WMpx90#r1eA7{UMMfE)c0yf3 z=T5}O*%>^-SK9&;@?9uQt%UN$G*$h{tz)u z)Ru2qs0+H?rtKe>_ghu9sAcW-GGEBYbVnzzLs_JRa&+2BX|W}>nPyBk&~7$3$A}x( zm3{kl^+Mucs!d6<`_T~ZJjBX)0jJ(@1xdrv^qOA|CM6xPZo$^>h~#O;-T_nD3ApM{ za}L8}zIz>u>HOknQOth6MLVo+O}p+0wUgu27OOu5!g`G%=K*@|Xk*kG+!`djvxP>k zCK_`dQ?PXp1O=_g%4@FE#q7pmB1biWBN4dBYRJ|dym#37p-f-{9QiDU>~^Cg5NRxf z3mIi4Y(4e|lw1oPGEzd8xA~~m_Vl-iV~hJC5f`TnJc}#2guaL5Tg&PZ15EGao%;R zdkB$oYTg5P3`JjP*A%tf^)hSZBuYuDKFl(i-mf1*7^o(IQ{ATe?KZZ~5U2L1lqGUw zc{&-FT9S%SS(G;>2gE(-y^FIa#t&iSL629TU!;Q|MFLbMHjs@mzlhvE)|0l5YuS4~ zvIjdsxkXl)J0o1|GV}V7N=(zS>S?Wo zLCHskH6aV`-~((xjr%BiSbAuZc5Kvkur02bv2AcGoDq%tHJz1SQ6qy(`(7Zfc)Mah zjq=DJe?+IfE|u^DjK8s9z|ZOVq=ZAjS)cDLhDeR8A`n#qc!i}Yhl-=0&-qD8Ky**@ zHwds%y$a81q~>7sKwoUwEMGcIa%kXk0C@5%vUaH1eRwmh5QzC}izeB=aujFgbrU?~ zi4;0wUd^0gj_OEF&j3sGOBISWNvcFpPLttQL?Pj<`1@0X<-_X{@!x3ekxH32?z1<1 z|9DU;=Rys3N}qhN_si-*>4a|F{wbR2GGj zq9WM9xyEqypV9h-CoF@%4l9pfcyOOVo;L>H<4bB;bIS3-@-(isxHiX5$CfrdJ)fYr zc+qMx`qHV-3BA4|tD{&?lE(%}86xn_68R>HObZP600ci2k0js12$ZSI8S~7$TYh3# zN&ILF;rv`;?6OKESpjTX)7+Q9e49#I&}1|qX9lT1x}`Kx2uVdRJ>gTSC-!Tqt+hzG-h|1=E}VMSi~5qKR#0*yY7ZrX6n z`n9zs?E@qW;6%Y?^pA@E=HwbK ze>9vul27nmi7=f94ZcYi=EvW%l@8M6&)&?km?AQ^N~FBdpnX8r%x$1mymN_(ergO9 zwz$l}6(jBQK>e$<=^*=Kemvw@hpuRz2A2W{iPrj#bO8G9iIVL*gE;bt;hkq^v!rN0 z5;h3oZ%P?u@siY)bI`@@f|z!alOuoP?d+0G9z9^g zA*0=eszp*4U9run(8O<>GGR62Mit@y@*lE$O-|a>i_dJg?eAo}O6qd|Y&vVJVhLk- zf+MjKYxas(=cr`oTMGQ1Z%~lA_6R7Y!Oh&GxX3M>GTXkpCfDDdLpyk^#am)!IyG#=aaP=u6ce^u_cQ$V`3H@SW>z3}8 z5TiK~Ri?(< zjpE}#NZJ0H&!xwx!%U^VHe{T7VqEJr3h1~6LHr#^wIX_l6jdUS0dw@%4cS^%vm&c) z1I@_Vg!8_uGP|wIA__~fNfeA{EMo86GBMb3S3#E)9Swtb;KVlXo_H@PKop4-EFfzD!T8*W)v- z^^W+qz|1{Y9RUmb<9Mm>Gt=ER-(Wk-t9xNQD z#k)qa+zg?!qvX^WDW|RB$i&Oyc0m9Y$2roryUMTj@%n?dD3-?Mequ6xY*<|SfF8F= zTo=H1N4>u}Ha(+7V+Eu5?i5?Hf0;$4HMk^~6yH+jMv2VRrh;6;LW+DAls1zG8YcFT z_4_%(=_>{xH>aiqH`A3swmZh` zO|HRQF+9Jgkz%qafkYq4nUU8;Zh+?$&+Q5EdEy0CdmO^1tv(_)8u#w<5!_Pkq)tw= zO>K64N0x{M2M_F}v+lt98ujJ?=`O-dyMtzQB@%jzqQ|^}KQkLNCBbJjq=&0o{aqn} z`r;=Xkb8j8Obr@a6aU)oR#g`z5kbYOPD`nF6a!{$qZ@T4mnunH@Y{M_tFSk)FjgTC zHhLcPjnwy2`VHoPFT1}gWS-QFK`ft_9rfpBNBw^l?`7qb6n`iyiLn27wyUIR{+GC? zpxFXhzplLeL9)HBnI$wwE(j6{0O~_gkSLmLL3R{3Zk3i^e#Q1^?q0m(JGs;9CCXi^ zX+{!m+2x;}Z1>0g!2O`-_3`cw>-S)hk?yt%TBD6Inp164py8(`_p+-;FyPkiNc_!* zK%U%+iTLEb;%C!bOolTKT#JC^b%kWgQ^JL{Q!16|9+9EK#qjU!scYrUKZ56{P_2c5 z-f|EE%MpAS*1s05epxtfSXr_z{AKQeBAfGjWB*FRL!%VQMOcO9x^&A$2$^>=KcT-K z9OsZ3N5u2>zf`#*Z7{-F=A^&=QspWY<_gP^MGRgVxPEe%4B^3s*kof-9xS$fa>(9Y zJwz~6dB>00wnBm_1ACF?I_n1z+=1!_)MbC?M3aFObW?R&5j0Ga9Z#h z#IkA-OzEtNrY1ia(|5~LJq1$Ai=1g6o*hN0j_%BNQ+AfGD5Npd=G;i6Jx~0RWmeWb zwhueg0eOSD0nZuj`t;#)ff937Va)C1f9ZPcgv~!)xsD=kCAYv+MSBnL1^YZL;fMt- zi{GI`voe(c+SvH74?~7A_xaZZMkD+6BiJ32WJ~D8@X;$zyEKtPw2MiZrJ`e{Qlyku-@ugnE!70{liQ6FU=37 z|MEr^peHPZD1cs|qg1oCc>hBiRaY%e)JMpja>Pg@*wE_Wh<5Vpa`Dk;03tK|;vMJH z8s(FukR6bIJUUXzUMLQixfWWG%M=E@CE!}Ab zF1P>=v?15~&#rc!*4Y)SHjibKU-KB$3`jCXP%k#$BwRI$aGivHvfWkfI|(6(ZI){H zwIEZR@KT7ne)Ey3m>PJlcSaBSz9_Y!f)J=%BqJJ{C4(KQtN*XjXBmdgFemh>XvMOS*G^V(%JUX0!_7ku z1`C-@t!FOcnV)!Q(rjy%c@QGrSBJm5a+?)hEVqKs0vw8jU`La((fi$@=$TaOjKph9 zjq3^}GrM_(7ZXj=gX5@%rc**;t$}uRvsAvN28LZcw#hjj#qNGW&21{lgAeX89{k6U zN9%kWf1>j0B^plD`b75XQHUbjQ)aoZ>xwqjs1yp#%gXM<`|tNq-n-k3h&?h+rfzQGN`HJmFsxOBh=toOSL;&6b5FE-2>lH{5FdeSy$0yMj z8zN#1@W+m}Q5q^V;HKGQV$49j<^WE@Jo$v_3v`h0k`P2nI;%grk^H9DiN8$oLk9F~ zGHcg1GK@hhJ29cKVtSuG$`dHrlye*t}dh(1dE$ z5Q|7OLLrz#Y?M z^q4eTxOgH1n{Gf13dKW73ul*f!0{@kW4NL)`?%SmqzSWpLniA~jdF+Blfbx2i{tuClK0khk2h$iX=_m8V!%)5hIiQ z5has>dMGpe@~zp&?$Jk=LAs~TH(kRA;?2u}pZ`hWbN-`afDNfb`4VSB@>5vi_WHem z@am^3qg9ay|AU%A@_SHd&wC+r8$?*((e`te#-yapnoX)Mb zAV&@|4K&3|&wiI2W>eD0$0!6Rl1mJCU2ibZ!FOnX5*aSRG7=})aCH5~`&;;2I=IwwEpK23=Q_f+^_A0=L= zW>HUSUP!n7knXoU$RpV$g7zGwkmqLusz_Y~=ohuqRA(>NNV6ERwde;t$*h}+u+L<0 zu-GF@*1 zbcqe#o1mQJE{u?!&D*Eu)HBobOsz#PZFh>ANZEvP0L~Y?3&KA@rv8abXXI#364m^3 zLJ24>ED8*kE}}<4^k`L+CT(UH3l$7+PR%qGuYY27v~|6!W73raPrET#$*G=lQfL8B zKQ_sc!1uCy4brTo&E##>z(|nR`PuUwC2=3OL#)o@rl9^riw!k9p$+v1?OLehlqKQ0 z|9M|XP<+Um(J{%^nIn<^&lO<;(bXftFqS*29abufWeu0N1{J!&msK2dhR zfl@&^YQ!8$rdI5Lh0k(FuykNKnRJ(kT0C`Ml@7BJ?-!k^A?c`Ayam=d;ncUlYwW>e zE9!pN=Xip5=&n98L>{(ja;cbTY7Ga})0o_j%#41xCZyD(;wDh;-<&akyCnf?Mq2i- zP>lS!EV<-GIK9*XEj=Rwn5YriLGp_feb5TdmwpBk-GN-Ta-g`MBBOo=7yr{Ilgum8Q>WiN?WKKEmr( zHIMjwEN?;Vx$L=vc1^M(cX67+MAih5-wP%Wyr#zf=%H(RuZJy_A=8{9+*f@rgEOv5 zOc%sz^>HlB;an@ZqLgT}B)J%KS(SN*NoxcTp@)HP{VtzE-bm?~vcs%MVr59Q1(rcngOzZn18)+XvYp`CqI*N_`kD(PEJAgf2v6;ns%!~ zDDSM%xpQcB;>bSD@jB!a%BGlE^^z-d#o}Fa1QTFA`4(~Er4iK4r4Hhmzzht}FOl4L z)ry?Rknn8{vreASOxz4<@d7s0^UQ$zFvo{OuEvWr?Xz4yuP4+lpq6AVJdtUb9<{HH z)NPN9K9X^r0cCh?(n4*c>s5nJTWmK++J&$V3=B}Ks}(r5l@lyUY6NtpZR#jB!f3JU zEd_`$ETI4bM=R(mSWmD7Xm4O6iQo3Swl^pfD|9ZPU>(+Bfiejo>a{MuT#Nqg`Iy;J zjed3MSvm*r1?^2r>a_GpdYbIg!e*O)IH{UCkZeP_2uoIpUgfzqzE^&#E@yOi$HJ^%)`yBPsh9 z*b5EM=kw;3vS+7j91hAO+o4q)RvT>djK$i)(pUf7_Hh519R@G0RWH}w<}d8t*A;@6 zAjeW|p$jI&tyxzU5Gn1hr;C%B@;KIW@aVJs*m2fJf3GxlDpEweuM)Nh5G86^>(VLW zmWeaj47@`#G4_Hg=U)>?gjQD8dU!c9rlJ)rAe}2KV>#(5od0gsq!`boX->HqYRS^J zN>!H?g9zq%C`|Br-)Yf&@5$}FN>_Iit!UR9;h^M?iY}renE(v*c)LIs3vWtt8?3=!$ zZJfkiBu`mu#muRm&l_cT@+>y%q2wq?;!Xe*iP5{e$G_iP>N__RWCYY#3)Ipy7MMqG zRp!%hn#MdFedLJNDNxUwCF^nzJ7BmvvfS$V7wl=Y{@!I5*i?8s`8GR<$pc|xI6Gt_ z75)n9NaxBUHkl#j3{5vR8}b#SCzP4d;0h^QELr&{5>gXii*Lo1wms8cc-hq#-`w0? zeUH-zPv5t&IXKsabHi|)Bp2VP_9vx4gmc)Y zJdL3TKu4RT?XLlPFHV}w~4v;oQf| z3~dos*A9Utwmz>iJLcDpA>^J!IUO*CSEQ!vE$E6t<6Po@(8V!UA#Cl$g*z)0wdLI6ic@P%RU0sqGTjl9Am*+o1<#%N~@ z>zi4c4s^mWDZ|Xb}UU(H2Vw}EoHt=wj7J|oqC8&Omm~&wG z7!BZa-1VP32qiovQExvTkiAqS(?NVyg}K7iLBE{5*8LbKx7me#)!Og*Tjm8JTdO5f{5_@;iQdj}Eu1?Lq+^j!w>1=p1*!CO}_ zJlJhCj8{*l@~d$F52%QM0c5C+0JS!jq@X%%8<{GCg%z_tbOh)K*4zRI>RnKy3WB}) z{vPn`rtUDz(b%(b5%>5FYzB*+C|vd2Picz`$W0k56MvE!(*oD6Tq>ePH7AmYdpaU~ zfZo}eD>Js1{OVj^jFNskqJ)N&qx4VfZ%k&uD>NPhVxQ3Rh-bCfXtAW~L!aoMY|C9O z7$A$9f^wIXdTKuHGwgJx#ART5w({h_$VCx-`TJlXi5w5V`QoCn_w{YwyhsuY^{U1I zLrwy4hDJUmnHZ>>tb3%pJ_FU=T?`hyo_Avds%nf#52E)2_LqcLa;O87o*#NSJP^ke zbp&f-dhw_`2-gzUs24`@mI^kMzEAg-BeYO>+JGw-huA7uNH1Mv={^k7fDb7R$CYpz z7#c_-_bkM!Zo{-P;wMvW+q^LWj(U8$2zypd96N3LN^>szvON4cE7^F~7WV^^DRAD# z0grN&a{eVv&(AQDo`kF&)Py=lgihv@#w;IpG!nj~H%RX2;2o>}6$0$1(12o3xAPA! z2=9vA2}!sQQ~rO9ol|(EUAMJ6P9+tyV%v5)w(X8>vpcrgv2Ay3+jctW*ytpG-o5{C z<7j^;bzaxC=CjtEW6XPqh>C^;AT?Nu^s%hH+R$1^RkB7;>E^9Sw1w0SHj2_D!!}5y zddcEQAENKp&K?mb4y%-L1aOU$6_sWHnFV9wI)aK=&o?sjRh-1YD45WEUI1nXrwA`W z5wU82qm8+2MDGxq=w(;2`W>mw{Jp4Q-YcJ~u->mtLKg)qq)D}cC}DY17SLseL_$2W zjkTq>Y<3HFRV&G5r?iOF{_6EpK81#`bBPU>;Jk-_M?w4mcdK}k?eDHrA3VZ&4}7!H zmhSt`F3&wDNgOKfsac^U@L;X_wOm}DMs>6(K7%bfVx&NcIk{A|JU*2V@1t3)v#dht zXKqwHeQKv^yJ&$EPnaTqxp2v>CQlmEl$&>wNM3emvF5REe>p?o@8{} zDDX{vVtVQ>Ln*n~nk^7Ks9vbtaWRt3vUE&hN`AP`%iYFG)F?q)9Jn}^F zTIkio2tjoa*6-I+tf(nC8b*{qZE-0hyVq)z=<#cmvP8E4jgkd}s9}^5OO&IE3NotN zT`Zl%xjYE6k$(Q>y#;>RguWzbrCr*6L7R!XP~L;A~{yncC(gP4h7stYsrFh zmm6cAuJ}K;4o(PQ9?__-SZPut=aIyzjh>R=^~0MXxJu9Z(!n|(5C5RZJmJrVHN zvLIbF5U(L)fvO>NG14GXmrqEo$$Y}U@xOXKVXHgFwU6#>B&2|(u1~{kG$#qqbOnYi zNQ^43hCUAAyuV%I`bJg>&J;rsb59R(Bxc2LgE$Afo>zP4SA1en>R(tbISbnG_v$RJ z(DSr18@XH$)^Irwqzq3{Zhd}HU4!nrWy&mJ4SqU7%Doug{qFKhD4)o0a&+$%C%k5w57KZAYyZCv}@S}$qz>A+*rh~@F8NEr`u z@_iLH3JL94LP`+OI0+ugkK6NpV=Ew;1epd6*O;gW$`s$1Qk6u$gujw(;!GWWg?M#3ViP8!)Xa^n`L(@XPz7RH=vIKG*x2a=1zbOPx@bQZc z2J?(wHxXkS1%1QBjHEGE(u!;2(91@g2j`X&yE7Ciyoy6&A=XC2mTL0_8)6mraKM9F zy~OQ{*u7kY&@Un#Yf13e3$TyN^5WwmYGP>$ZmPlCqOVxntCN2o?Gb+{>}$QGAFN4q z5h}&r!qyQqRLaEGRQ>zwSlNG7p_+4X=tXcp;1{kIM{mHV=VoThO^rLiZsoNvJwYdv zPo7m!JTvE(n0>976*X|a`^!OS^luaE3(U;oZzVOlji+a5rx~X>sf-4y!!6YG=;7p1 zL&tlPEVDyV;3Cb*JVIx-v?@-+*}l8eXJJxIMcYW)D^Buqc zb->y2P+rj#ltj7Xf#=DKUAW^ya7g*>8^H||q4|}xOy0h1rfCB|u7MHIOw9qH8fExi z!u$Jh@>E=4R||dNK*Lq2>LMgjQDukLg8;3fyxcr_j&e)b3%r`4Re^fC9fGF7lGlT( zT*>$Jwsfbc1nYU8I4+X2g!!uP8@7NelC*DmOCfwwM&55*iEQKShK*k!c%SFZE7ac@ z^~I(^c2T=X8lHx~tYyTHJGX9xG+8!u@(z3(osmn|PVK zio-Y&SEB(LyZ5Vq%$f7x(b}0t-|yTF>yLHX;8m7hBHWCuqRn|b+L>&t6z(x#3-gEe zonQGj)2egD*JHlQq1@K^06n+9a+=Ys#M}rdCfAhWHsB#YuJuJ)@$soo(>_>wGnI{% zxEi`)xDt?WLdQ1G-=PH7JG}c< zDPu4I?GAX}_IWMM@WZdF1Z|4u>sGu5*Iw|xR%@?af6MJMbHjQgS)wp+e}j30dcyZSo* z3hI3&dH;sejbFrW+Bx<#mc0u0OnoHWhRu5BNlJ6#`;sIeLe7`AZ?Th9+57WhBO>zr znWn>RnqQTU^*O1Gg?4ew&f9OcPSH#v(D+xhc?rK>w3+_0Q&Xw&6s6O>W`Lt+gp@@O z-rYBbhx)?AI%GMZ*5F}oDD^}ALQ|L%cWYWV^OqGV3^bj7ju zv>%2JT>#a&;|}DTJ=?gB7`AW$4cNU6{`C}rVIqizl}qkfCOr% zS82+z!^z@js#C%(PGH%p)%^^In0!SxLk(jbXX*s~&SQ4P+x>Tpdr*h29i<(?sY$*b zTF=NeRIk_N{kAuUB`^@J3)9|32hX6W@;CAEwhyZH^z zL49WmNZw^A^k*L=50ShG?LpJk2o(Jsc|jUSSEtSbGFqOiOWF^9kVTcIP_NyfBJ88I zsUs{AFif5wi27x%I;#P<$f0PLR3)T_)>n1)cqvuACZ?zfnlPMlDu_aqOU_XOUt5eI zY9fve+fa^Jv5;IHW%YW{2ODFjdV-%=q*u_+Tf>ZtlHO*cJ`u}H(YTUrzeY}#ebjIe zYZ&?u=OR2@p{h+P@MBc2*_UXL2!>f=8Y<#rt)Kw3<^6AIo&gosy^d50@y)&qq zAL)~pY(6nmUS(&Fb|=FP8N5TLw`$O>_a0zmYA@pLl-Y5`tU|nM9`knG5?uQBv|k!? z2-LS1)pgQCLO$U%Kai zW`lAtauTDj&E5C~eU-Yc-w8qwf<%cY5o_$5k>23ADBc{a>-W;@v)@^EpbmpDAIRzs zOlzX0vJwvajA}fanBufQZz0U-{?Zz228LOU;egMH>Co^)hYGaY99$pF3;ZQ9m-@mi z(pFzL8D|O89cZ`j5Bgk3nUY<|?8j z_mT?oe2{YfF2@}EhKHfIr^NsYqr z?(~(3V}MV0PYfIDXoedn(i1VG5tt|ZV?-&DLO%-@CPzeRzPczrLde`iFZ05O)omf+ zr>AFA-XsJAZ7s!rbMtR9H3nBf7g}-Rz8nY~qehgfvEbKg<$d^yqzt3w-iY^O1mr0y z`T2i3dzP;6Dt{Y<+cDZ@e{y!#XQf}ftLD?AS3ffw*OJb3$mM*IXel#o9*}}6{s{*a zh=sv4Db!BnMR%*9Ak+PjSL5yGK%Nw zq~cOghvp+n1?^ckkrrAtiTeT!Qs>XmSR_U+{ysE^vCZ$##IjmN5Y|GgJxHDZ?$I==RSBb!lm{OArQ1T z%LtcBBk;prz<)cJtAVzZD*;b7ul)Sy$w~HwbFuf`dXC@++z!1@wh(3mXsX)k`T_Pg zQ%)~?hw(S!KyB)G`)D1yJ} zs=cKDJo|ED%Q~+PORKd#d)erc&#;}{ihsqjo~baui-^AryWmRmS;{$I(h|fXoM|mB z#Q3$ln@x>jRplb5G0VVY z%D@OR*^B0u+z&WyLFEY{KIMkA88I@^;-9&NXiX9i)wZ5&Rmuy#N7iL+?E~>kB~{P< zsK{!4R*u>l{>4-`)@xhMV!?lG=e3dT^S<+pJL)R%L{Mow;H}P~$*EzJ5+3t}QMd6h zLxF3=D&TGgrh)vy+!w})v)X`&Q6LHKs1_sMF(iU{{#&HGnI5=h*KT~S`n^+B+@(d8 znSpe!NM;BOG{SEa1EHljt)_dV3qT*6Jq~rKp`1?t4BM8~xQ@)cw>Muj>nG7jUhM_B zjX|DBO$nx2LnE}D^w!X6$l_RMLvXAdosYnnzw2g}w5nhr>S1gD1{BogmhuV(sMGHw zKdgun>9jCw>s^}B*iPW3L{BM{Fe&*I+g8f00LQFvug@{k(;D-?F=lR=})n6y3o1eC0cs9C8g6V#yf_=#XVPVh)7&2>yCBI$m)woGXD@vy18u zD1+CS)*r@2fqHZV*|R|Kj~kjnh>@x0z}|wDRQrGKNMv>ki-sB2q;=0v_Pf(OLKB2q%Ps-%Dhs? z6Aa(+Y9i;fz9OGTbH}Sn<&DYnJ#a5TsqB+NnOd{``f&1mb$HJEW!Qn=f08TnD^d)Z zA?apF0pKoQLFT2&ga`~K5Dx$ZDFV2Ngqtu4X7tzrzEbn{B-9f7_L zFm&#{9ZdFel^?#!O&E3^DCFdtTg@rv~m=`P-9^R-WG@rO`F zgd&cw-bPKNqN&S>8Fq#twq}lwsH?FzP~^{J{4tALV4jfh63@hpYL@vot&o9*d|Imr zK7d@*Oc(Y-5v@~p^#pX*oxk^Au$Gw^L^Ke5ftP@_A#h)n2Z70Pst=q(#y`Jnyvz%KZ41)Xa!MHKCmb_Y%}>VUEw}aKl>R z&#pX|OOgaZEu0q-@rUEjb4w-*8I}hZ@%VlDmD(L~5>EV|v5J8O?Za8QHN|3TpkSpa zrr>+(kiO3SJ*P$&a#2IyTfK_v-0k?!ri8ogbCt9OXL_Wfl6_v=k6aV8i)@F*Y3cIp z=>k-kE@v5MCs(!909CSaVc%)Tk}Ul{h$62wENwX(Pz;n|Mk~)Q4>}p5adcUVFaW7F zHHQZEf-BF`8gkx>JbV+U1X_9S>(TjAp^$TEq2ZHPP&Vng{{jG8moZd39;fbLvAE!M zp;qQZ%f%Q?Z{@`jd_IRfntF|!jWt~-rVH8|1K&Qcjve(OH&L7xIu~BY(rhKpr(uhn?VhpV=Pq}fm-p){&q_&B?^doUpsGcGHePD7GpIRHJoOqH_ijPo#~(se zmW`2gH7G3jk($)BYDV8ENU;bb=^EZ@pQfCZB*1!+gmy*XpHFvsw6!NgV<4yO&M9OSQr?mwv;Emz>LfDZRTZibv%vlaY>Ek9l0 z<~nn1ToV_F7Qnoa#Jlib;Qn_x`c72-?*H@{^?y1&DE=>to1%v3zaDw(%4kB!{15>6 zQ5q+uUfNt?$sdP%P=I{w!U!5Tx`L|?tq`UKGtQ|g=qG5wp3AGx?P|du_wU2?=84Ls zkdo!HYG!U;3XdoAn+<~Bzkh&zgRVi7+z|ptx|U=tkcuKs9^jhG8X;rym$J=f`36ce zJJv+cR+IsrmtE6`XijbK0yqEk?c*Aj^&EP)?<5qtZ@SyKkEaOdSP!MGp@H>}v@u4e z=rw2Y+ntv?^_T|Bj9<%;udml_+0~eUfPN!cit)=Os>U5x=&9|^{Sek_$;*HFkid7n zSIuUgN78ObJxZspb1744qy2^jL--W6s#ajj*?=llMPk+R$+3pb_jsdv>hs^YN{H?O zD&1~@5j!wIG^|(H)T3DNK^dK(7$<}xT@+<^r|s&M6l-rW+M2?vh&!I^-!r?WVr}3M ztk7noNsl5Y4*21DiRoNLZ?a^DIBBfc*u~+L9#%U)!#(*pYrm01is6uh<9Zkx0T|^v z`dIpBOwoh`7=q}qz{zbT4$I9$x+H_Is+uOdf2oi&QXf!T7wX_!tm($8GU#sdlDkiliO@-u8VngHhb9k%Ecri}db(eb@uoXOn1&{W~a zI|4Tw)78FI=Y_hp?HrRPxxE7e3@2Pn%FsdSU-L$Bz0Q z=omG1{vDl@s_=KLVLD4j?-#T9Azjg5st7s-3kq+o1V!=0-5KaYf(YM`&kuN`bXw&c zp{eS4TV)(C#ZcEA2t^Lq@f>6Rg8g?H{L}2TBP4-of8I#j|4sswvb^el#s99lf37rs z=xHrQ6#^X5g=tb$wTSBRCHzC=I6`(bzwND+9nyUj!IoLR37U;c1Fgyv8B)&-T58P| zQ&Z~z0C961=$Wvvd`^gr4v3U^*=qt1gvTd&NZfB1_?PjduXuM3srFsI?KxeoPqQUo z>;_6uY`)e6oTL1O^fiQxi_RzHRXvOPhUF&VAV`2b9;oye-Fx69kLqF15UMT0pUP<@ zw3nGGCE$rq73o=L8#&nD7yK?vbAl?OR&|9&T8R%%g031);mj1H--u9&38&ZUu6%{@ z0V3e0j*4Jd2qRRLkNAfIT$uuZK3n~YHKgrg%NyHhvsp#**YBO*zGZgpiaYYOBBhhH zz?v+FglJw+0KQu`E8RH<-IfXiqC<%RA=59isgvLGo6G~g;MjK|z*NI3lgiY)4P*rb zFY;90{tch=jDDzr}%FU6Vf{`TVqhL&RK{(3ixU&{A^Q`bdCA#F^c8{0{2NGK3fI# zA81XZ*rkb+{V>FUwJw|1gk2uB4{EMX0dGVxT!FcBj5Q?4nIJi791}xn5R#x$O$>4H zf_b>kM8>#@&Sh%t(g1NVFu;kX-)@dO%pd(P*7-{OUVqx%=>>fP@hhW;MUdbSI_hFMae4N*Y80>SxWiMZW>n~iP za!_;S?|#NVgcLk=1{mYK|40R;XpDkFf(Xk+?O1S8x8nLGP<>FvdL0qI575HtFJ673 z9k1Fnw|a8WJIZz;x8@vBOV~bFjL|zPcG=!195$1}ME$)9$n}ll@ZxGse}LFdVES>E zyO%-z z^qnJUQV4`QA_)6j6+4+B5f0ySnT7Oa8yhLve8_^nM=7vnVpXp>tmfK)9QY-smB2<{ z3qO$(i;KMd4Wl?xtPrk;8}g~d56ulZH^Ci8f!NgwQ>DKVRopk1hOGE+LV`_cMUv9& z9pMg1T8`aJ>+7^zN~fyMK*iG)CKua6ShMbUn@)0ijT7+OGUFYMJ(vc=dZ}wJ)P;4; zJ}eEcT?1*uG$Ai2!^jt{wq^$uUK?~j#ro$={Z`ejQgcU>I_vwzL>ybkJ1Z$S+brvT z&%=LZo|-s+EHjadG)IlYK~S+Nj zZS{$!qJuNfMoNP3yBViY`-n4+4GS0PT5c+L6ej7%@XdPjZgDf^_44-O;e7$Af{oby zS(EQ%G+$%5Y+^B6#V5%g^=v2+A(A(pPG8eheR>JA^WdfXDIx#UYwm|s6=hmS%{3UD zv{1(r2@PnNfr03ZHRsxt#CCwGe*50Tv7}ex7~kMaprwz|T%&`H!KNh!_FDOX6$-Sj zfEVjQn0`8Sh8YrfM7f=$yD7eXj@RJ9i1a(~$o9f51}7g3Ch~#Fkdx7a2^A>pRW>l# zFt5DW=&3KtJQEketIK`42o>pQ0 z`;Yh9F9)p5+cVT(&2geFI^*r!M|f)+Pv|t~ogk<3E<_-wB?mGrqGu3E5Kw1hj|-oV zSE4e=*L56d4UsF_&=ir$XS%g%O3fu<_IsyM?CtDUYop#MF^!ILqeM0r`C_hqB4CKZ z776GpEK{LaxJrh~8=7iczI^Fj!Eh`JC~{>BW-AoAo_W=DLoF{vPYp@b6iQu5*3|Ef z;CbS+{6$kWkW8KTkwuzR1QnTl{S8VGmZ&OQm<~T>Z|}Mm9ZQ2U93r}|!W?CI5w1F3 zeRfwnB((&kCL>xhy(azsE!4RS9x?ojT(kr^^g9;(U?3Wma8_*H%X@<+nhtM_tEiJN z6em-9)zaY7Jjsf|zCq$%xxD`Htfr947ZnEEY!^$9zM|b#26D#gn!Q0lQHdJrj>^m& zies3B0B<0<#n8D$!5dd}QAu48zESAvtqaW0cZ4eII-Lveoe^J1i32~N!RQ-!SK_M} zCKj*G;MC|F->dS)cCG!PyC6(=oHY)Owv;%ox?vQV4KaOl0>L9{A=>%KH{6R}Hy1-r z-|axVm(Vw3E*AnJLPV0tl9QYlls3Ww!O`JsqLZ%F#_hnzE9B@>q#(0{^34q8_0L8Y?y8rx!&KTu?wlw6|+`$T08 zJDDMRO+>O?Vl3X%BzkT8~2HDa*g0uu{%!PLN3BGaaD>^K47yQwe>AB^HgdJs=c+ zM-jMq7X0t)`Jd5HaYO8Z+2<=Y_utt%6cSeW7jZ!KkNiJ77|?L==QPS4+sI0E=xt$f zx$G2h9F}vE!We!Esd>QFi%%Ca-Sp1<6AYuE&zFx^aDlMv2?izPat_BtZq}oTUw%ht zKF{wbtUq2g$BeK!>s!MeW$X@_{MSX&CiW07v`3Qyy%FJnfw87klV~CDeRt3zX5?$A zKvH&41eRk3@q)}2tYkusm;e$-YeI@3y4gg`0x+>jlAV9+_j}A$vz!im6J^qoL=`4d zOGnA{($Y9E<``Cx!{njaq0Ju{t+>8{wN{bl#$FCzWs08iA=aJ^t3iV5CW{5n1J$Q_ z?t0CGxt3%*8YAD&l;rj4{e5F2=oh}^PWUA~|LqujWHbm=Kh4`^fh+x$H;tl;U~miz z!xERprjFtKhW~;jGBXf_C3c`YVKgN)8XSt(G-<`1=5DEssms?x(m!CJGs5(0o)y9o z@oGkX2pX~>$d2kbU7OBq=ROHwPA&<;EVgUK&zvo0s@QM{U_$phwXb9H3cKz^Cp32*_MuQ(N9%$AX-!N4da`gU9ouSWNZS~ZkKM^5 z%+8y33o!Bai|Xz1t%1%VSTB3G$d}E-5Gnddrchl~prUs0&e`Zk5>wch9OCkPJ3M{} zXulE08&YBZvv5c+*dy@m>dVw`#+GMM-|%CWYowE2pQNBPsZG3)+7jkWg}#`5aE62? ze!V`^yM4GURu}i=rKW+~d&2*HR4vll#sxlKl7By6lK*Ezr7ZWahDu}A8+Y+Dghl6J zWG-lhMT({wE)jAyrl1z{02#*yUKVe>u4BO;Qhct7&y0=KtPUR*QdpWeX|A5~OfO*$ zGeRpfthTY^%GcK-=cQ-+tHFAxz}@}^NGsl$yZ$vVbXITbh4(1i`(pe3@dWEjb-w|` zN{BCN2!y0zA;1+I;Tv#Y=r1Ii-x<;l!3M25BBUeoWBBZw5S66N$WuH*fN?#5TQ)qF zrnv-n=e^n%aSzPGh?+16u|GC|5DD6_50y~I2T#y!;G>c#1AY&gAmPg4i-vIyVd&ro zsUIKBdn~M4EP#|C)qyR{jgR<@i(kk3lfG1`5CFTb`U*Z2S%E@Z^kLIFR!L? zt|y9SOK5y`kF7OT>-Tl>IX6+k*;!P|dc$;zYAwyE+(=*hZdIjxaOO9hI-Imw#4yEq zz~CgvZiq!(pyqgW-Xo(5z$i0YQNE$KL}jL_-LzCi6> zH=H}j?G1&9W4VF3(EnE`RL8C5DlJ|N7rHnZL|#no#jwPgx(lUqY+AbS4D<_qf=TXx z@AGU+vMu+L=Mr~PX*OcrP@Pwvo(LMw)oxC&3xXWKO)`94iz8l&I%4U+w+;Mcov4Wc@FSf#rc-b4ks!|!thDM z<$vYQP6?tU{~MK}9Aa$d(T8zZa<;*cPeU4l4~*9?quZqhDeSjKG1#PDMMdQphX$FO z{ssTUvpPxnQSWPb=nuhrDD}y(=nv7d=nnz_l%^da%OLFv6Fkeuj%X81u;>*Picj;s z7%ISi!~@M<%0p*}_c-<1`uZ8ck9OCcMSiz2YI|P~@ICHY;&1rwSpA)3%sNMrjp#RQ zX5Amy3@Gndtv@w3lM=4}K}u#u3GPFHZl8V-Q%S1&iq9@oMj;KpNIYrwQL`quET8>{t(|R6zp79z_4i8o?@=0y zP;pCPJJj5^qF)q$Z@sDijKfc@{n7q#%1NndTwYSJL&ZjDD5m+_Uc?er^inS>slSN9 zxeDaxVDLbVIZw+JJ=3g!cUe@-M8(2f)@&?ZOa>PI`bAfq$%)?-oY`%#W>sMvhv3VV zqb4^dmRO9mNOx>4=fCKo*G8Pq$ft?kL#M#Yz566~Yh*IUW+rB^-wf4+s47)JmFr9T zc>hr1l@J){%TKx|o)^53{Z0%{Q2@eJI0a1B6-%1A4Emg)1ReTVR>+B&Yy4DG^;UJC zA8{!qD`GF_>pI>%e+6la?v%}xAZl?;$Cmk|v_)#i8we}7xm0GS8|q0J*Sj>FRf4yT4pI%>dUkFl=_tYnxr>GRjs-S7R|C>A}^o_gN)L)yH$oD z5?bt3j#U)eAH4%wslq`q*kx3gdsI5OM{taDbdX1sl`G&Z6sspPsS6w&U5e&aJ`mT~ z2)(%^x01mSG|&PO`1=nbdO6+EGc8}&XAi|5hfne z%6RsQgUiF)-rQ9CJ(SE1^BsyUdfJ|r598Krk;Y7tf`Qy;bnP%o7i#Uu6~)+VPs~&cL*o^VfnuJ4qIuZCx9TLqpwFM$ zJ9dsX@daCi;QlT#+MtASU0O>A?*37XvTDtG&%?RN3T*kE!uIfwkir`Zt^0lwlENPL zBfRQtuE6FQcK8)Mih#*HdZUAaY4B!DM|!dHFscHE3jQs0yHoUf-jNrvC>|_kxFjYx zxbZ_R52qT06>fbF&9qxHhEd>aI zL+r=oQ@r8sNRHcLyl-r6xFXoOsW1AnL{c@o#9ro?f&NMf0`fPP#0Y3yA#V#++cI)5 zV5eC~dwPYVH^8Pst-qh&&W|4v_Ds?CC!Oz%d+$ha6G)LweP)gm^`F*5GQ3$TbmS70qJw z>T_%57|OFWngB=)MeVxM&Vh=iW~&olP3y>q`pb+}IgU{~j+5!%kK;b}U7nY|mwuP) z{U59CNIfSuDY#fWI4H|)j`-E?LOtditzfvK8^<9KIT&@b;TanaduCl9+(>r4Rd{Wa zfz$i&crW=d84m4XyqT@@fsBR?@b0_ge;qkLItCGM?e+a(e4ip<3? z2_RhzbBglq3`wMY>Hv@>XI>tI-$%{#>a#uRCbvQCj_=I>!b>zJ|?vK%a1 zqBIF?Ef$~3N$Y5`hX+cxip{dWXRkDZvb3XiO%m(osvvn6Z`=xai%+f9I{C2APnAjX zZoaCBux62!cUm#oA+J`6od6oc@=njC6{C}vu{WD$f8az~&Poo7)~js~Jb z2CD@cbCz50SxuDg&_qg870ycm=l`hFS|{JY?k>z3|7=poXGQf&zh|x#q{<1QS^%!4HZ@n{x>P@=%(RP$1Y=EanP921r+mifEA3^{BbL zI6<3dUj0dCKouoR_>qSVG&f_9GEK&b{2B?;j;mmG)|~#z=22u-c$zq)5}C5hQF!4U zFe_Oo4!}|J#qS60Td5Rb!~G%=K1Fs62=h45+5VM^4#!kKM!m`>29MHK87;mzVkPNZ z7BkQuHnCcTot3g^vmw{mpIV5IL65f#Fsd@SJ$CpKcY)woz-t!=uNKTY1|K~WX4p%1 zcT`8%ApA4W#wkysP>8k|gefn86sFrE>|cMQoFWl>*15!$D75TArJuO|=~n&T=4NQ@2Am`I;+E zF&ZaiRVy+h42#@{P%ut0oCZZO>5BAkTA`$j=6W0lFIh(rX%dp9W%+^|IRS?(p=&7h zn^}#8h0Tqa>vn{xKc;~OpV`xzY7S=P;9^{j-n=n1*1IX7W=9RBM#=-99n~)Y2SqUX z3fhliSMIOw03%sPa0{rou*?=HK>O3D*QDzuN?z~$x=ackzthyD9q5#0#Ar=mv(Jqv7fPg-n4|4;HBwgsLs99=SU^uEXOX|o5>aBzC ztq0fh-sWy8>dmw70<=%jfPv93)N$iyRpF1&J^DIubt;ycf#TXS;IL`=KCncax4-Kq zcyFUJ<;l$6@(9X9I^9YkTftLChN{leL&s?T8jL$^5_#1THE~t;-N~nTM4@=1o}->X z%(v)XKABp^Q2LWucZ_0IO8RR=WeJnoAZad?wjYlDde5a_!Hb(!l{rM1C*MkyR8uu6 z&q8Z!d;xG9rZyHOR81xTDvmu7Ycd{&6Zo~_?Gin$VqR39QJv&wi89FcolUtbVSZr% z;nZi~wQ+Pz9t903VSH-A+bX5GxD#J)w#Or1kUts@RA%+!p_(??K$-Ll`*BB&ZNz%e zlyD{+(te(KHXIL?A8*E5Q9wt1u1;`>sc2q6ela za7a3a3q8uY-k4cd%?mwRfs`-(j%zWPV++MrY&=q@VSGC+GlUPFYWyz7BNZpFrEK&J zm;2Xk5Ft1H%$HR9OmPJ)a5O^pbX>)ww(u z_lgdI)M5-0k^bMtEhtUREC6KAMh8!)bjxBanr$<4K*r(zVXSKh-c-rx^t2`1W#L=A zVnbiVuXGPCn@z!vEa?flAzJ0%aUE1Y`I@TD;9HnA5bWb9&_v`qdzeO&@PA%#><=9s zvs}s{eqr}&tYE49xk`#vbfhPXr4es{Z}eM@F_Ho=a^Tvuzs`fg&Q-gRU)p`i1#FFF z0U_jw@-xb^UFcMARZMS^<^fpsQDQyq#9;km9rBa9NhzC@tYed|avwYM2|dO}ev!{# zFRv7*;$qQbH$zR8TmJ&H(1+y@sv~g7H4AUE(6tQ9dY9>AG?Zq*Y0BEuOg{|)4C_m7 z=`I#&fD>#nF9ttjVT+a(BEh~ zf^XLAUToJdyY^E6gB66 zKuck#P_2D8aS&|cVJdI zkMX+j9RaG=oGv>tnC1dB-a4FD2l8wYYf|+XqBQEH2FHH&cV?(z)7RIFpP-{BI zE|X5!A5AE`Z;LBE>T~S@8LQE)$(upzEA%S^9DzFLZcCxOnt~T5fkDo3E`BJFwaX5S z-Y|~_{;eT}SNEXnv^knMb^a znEH?X(HMbxeTXv?T1NFa3^6|8!{+$2^S%s`8G#AXKO8Ei{L-qw30BEcD}4Y$zEm8R z&QdL2doI*Mmx?F)VjjCp!Qs{AJ&S85j2MG*#ZJD{9;m*#x$T52=tpVk37$-ND zHwpOBeVC2Jl78I=rQ|tKl3i882&xH#qckgsjT!YD1HuQB@~zbR7&k9g9#nQZBNc|_ z@2cBqO*tZZ2gYW?N-nqZLNhowo`}zx+TiD^Jw()dn%1rWJ}G-dg75IBo`Cyx>8d&N zl6zf!H>{D-F_QJNB=jVNoeqAk?_b6z%7yQnyY6NrBwpfG+KfwOCjAq$8E4l~0`$ef z^!=kWPVSZ+2=q%ZpF&nUr<$hZt-dM>Lj-t@AO@^5cmu|W1!U?FHR3ylq?5s>l zeFEQ?GfE8J4F30IE}P;H@9U@Yi1puDHddAw|JM-tU$IlQqr&yDK}NVu(%npiJ!{%J zvKGnWb93U#XmD0du4oK~KPT-rI(2sBvfo5rCZY!S0&B9aHw6bZhMnDw~WGiO&pI}pAkb9H41cnwN1$vwObYqR7bEKv88G<3r z97P5v{d`h-j?xnQbO7kr!4aMzT~3Fe%(1gpxy^>9YP)08lGOt{e_{^nwc=K(G#9g< zLexAJnTDk{Yd*8~i@(}U7U&76IDyuy4sxCsuo!iAUO+7He(`mSN@riWM*a(d-Rdm| zZTf!w#fr!x8c*eCO#7#R1wZ*mbtc>1P44W?30z?{akDR+T%C}8N7TU z-bncH>KnpO=6paQ-mVVN_z+<~JWnTNbK$53r-Q!G7;X*T_*oo;d0Br2IO%yK}8pgJjN7yBsPpkwDf%TKnOS+G7 zt+T5_tGEby|7~}+oPExHqoCf*d`-)@@Cn`vj>Gr>L@{9MoL;I%BXFA5#Eaej z@)(I)s*N+%TX!LdIKyGjLu@cEx99gp@%A1^mV6OwCtO7fyFr?|MTM=cU@pXLVbEGO zM$pwK?HUI0p?7rMc9*CmFkxc$J-qm=UT!(*uUT77-MnHAw&l-e6!Z+^KeWmv2k_g6 zCPqOC46hZ<=YTD3j%NzF+&dJMjsQy1)%^kx^yj6<1u~nbocj8>g6-F-OHE(ValuJl zNs!V`CiADdfqI*<*M*=kqRJ^lTzk}28~DpOmZ9wJH_4?DZDhxRPD*ALmqg7viWkB> zaxz+k8_L?eL&3c*&nE0tkK%(y?HLux^z0*W%Tgghe+f5BWr^oU*$E7`5-yz2Hg19#=-&YD{Wc*(eZYF<nF1OP*U`c+aiE>=K{ zg>d{GoSZu?;(WhKpfI1&fkH7#mTWO;Zq_Fv@|UU$l1SOHKuA&O%u@lw1Vb>p&<9Sn zQY~?~4tDW9Py9$?{gxt7RAOL%A@ zPgpWvF%*<_WqSV^$u;yjm_x(W;+B&X6V5X`3nia!f3z{p&#TGpB?o|ATdY^s%F=O6emuZDJq_# zkQrxy8mc57C4u>qn}|GKs5v!~rd$gU5z54vb~fT>JY%F<30XFk<;+9C3Y;;SfQ+~8 zqic=gzP4uw^8~d8;A5j61rsrhj?k`<=b~f#l-&VB6vrB!n0W==9f(s{oVo4pnb`*C z`}+e)hl@jDBtY6K@M{?K>P~k)Hj)iTp-m(wY-V?vdEDfYtWitWcN(IelOuyHF-5q) z1UM7a7wT{MIF8V5?euolWjXQeCM~t>hWE_!azPEg5C;M?HMni8Uztc}0npTTQ_8uM z)SEk`MSZ}Qp~jzl?$_o`e}l|U_H;3%r#kFd&#q(As)E9cB!*egrGCrs={Sv?nVIN? z0ZP)?ST~wo=QRb1=cZcwTBZ8P%@N?&4u6q0D%_5f+wuC6971;u%;<~kUl-9a@{ln3 zH`8Kcdl4Y9{xqZghK7(DtM1DI6q+1MhTCuu?LEZ_6sjAASHNi?F;6K( zWQ&aP^&(Hur3Z<)^vj9L6QY=nGnu3X6$ob3JR~PD%c!SNy38^~=VN(6GGX8!o2)e1 z9gR?xw*qAb8&+uh)pv7M#H9u?=#=5rT~bta2XSe(2G~LNWgfVfr5;$9WgZ3=wtAC; zvkBQbhnRr|D+(1)UkJ;eU?XWB47(#@uL1q<2u`_kXWTtAzbVYqOj!IIYA8D;lDC^gQC4LPI|7GvP+QqiQE>NQBfpbn2v25&3QRIKig zAO?)N&G1~kyibuKjn=6d-OuV#mU{W4rBXXRenPZ^t;TB9zj*F6Bq5#g>CG`m?CTei zUUKU8ch3w5*a}-Z&La8a?eI{~MVucD}OfpC``3VgEC758T`z z?FF$-f{vIp`+~OSby)kC^^MZqO{vrk;Es&xNSpT#v<1j4YP#2UbFN(T7tSQ^?uZ2W zK1p0Y;z%Zq0A`1TpT` zj{AWa(GUsqo}hivRq~%WVHdyrJ-7wmJ(c*e7mL~^RNkjl4~R3(jDrT8NY^RE7d^Q$ z8n3H5-}>_1;|q&gS-i5iA#W?BUQZNqUFr@o@LXaZwcGuKA|_tu6K5)}!^ave68|9E zQ0Kn>AJnpcjC{A8K?!M}@~h`(Ho^ZXzskw{m;Cz2T~`g`BWsk*ouGv*t9eO@R)o&< z1}~S0i#mxV%YTN=lGjq0P|@B@-ptPHXzRq6O^UYYD@tvI8bLB#SXdRDfU@P<4|Or9 zfo00>WAhKlR6nO*)18TL=`{^hKDF3LJf-_f<3nPB+0+g zosr(hWEf7*3xtt?_!MyIq8#zi`meuqk#)sFItcp^VkCX08~{IAWh6n-$4EO#PXeHx z)WJvUymL##VOG>GRB>K;c$F@iU2C8#zwaZns28vyuC{S^T^yBPJg;YxAtZ#pyn4+Y zO|HZAM{f?xO=e!5#G+@73eKKcH%p;oDtCzmL_}GHtLeNwmRe-qDn?~evTi(O$a($X zWWk2)L6ox8>}_}!Z*w9^cicX5vvEF#zR9gmh zAjj|-IhRGTqmJU`t^}Uls$QS5IA+x!kWvf{KyDU3{7y3_)iy7+cX${Z9P#>*h8!yh z)K>Ya#0q^Xu?_zyvHJfgu~Gk_#5Vkg5}W62I)ZFH<@|GZDY;+ttT+_1AC*}v34nO>qz(;=GaraqeRRrbj&pGRQlK_Qf)jujoIE zAv4H++yhiI)8wS3pL~tqL9yTNq1X3l(&2wO&J_!f>PNI6`h?P-F2)j9dEF6vSPT}k zDwY^K^u!h(#A(_QOepgd9+i_XF*H5EzNgKCxGi zJt2JJ*>phN{aPXSxAqgAG3AG1#g(ho>4kGl!^Yf+v>dVI#w>oZkAOx^7&$@Ybin~a z6Q3YK7MpiLhSVs*-D#G05@fq>Gm{nst>&@=s^rakm#?a%vzwf>MAtZFJdrMMf2}f< zbnU610iO1eyF*Wv=?Qb_sZ>Yik`9@BW>)^0s*K*_SYv?{KHW^wD(z+#! zYaZi9_ww1$L+xKH#v}A5Zt9I_4)n@P<5cJdXV`L*%cIMWo;A0bHupW;WgeA7xSPv8 z&61n0I~$1xs5m~A+^xUo#-0M0n9a${(-*;4bQ_jsRXH1Ex_>0()Sldr&Z0^e>bUEwkl@DTye#=ZQHhO+h)Z{B`3X4_ul99m)-wizSn#-#w{dO zR@LJ6>m*dLH{RZA_<(HhD6#E_JWe1lYj+6c9(!fhnx|w+{?{X6+K&9?nQaVFLyD=3X;(X##BaXL5<1eqcs&3hy+E^ z#-}qIoKytWW@2j{on1(+$N2|L-(sN{{AR14I0`RR&diM@0>N1q=DPgs{bgZ8U1Q;m zCde(n&Z{|~mJxdB`0@azc(UaUexid8jN4Ub{wI$!V3JI>cC_?AMaDwoBs6He20E4< za0_sDpvtNWUt=`cvSg-HAPRMk_GZsC(89^H^k>Ibhn(ym%U^2*D{l-@Aj&X3kG>oz zmU=g<);7O?7Gx6Oz40&M@T1BZSd=J{&$$zD&Qv#p{oTp~hPp(W@~0{1i|<=Ew2kY6 zMph>GYAyG;f;%VgWZOa87Bh?J#Xt!#`(&BciAV9+71KV#N}zy;mi-C}V6yI9Xi*W*+g%D3m7B^8~Tf*y!9)ZTcAowu&A=flQELp6w@7@G$Fcs3RJ&yhyxi z2XY>-biRHgo|ZRxE9)IW~LCV*koU zKkmRT*R&)Cv>M`Q*%7s86k5wsM|k8L!~W1nk>my%{0vfPcxrZ#WP?oZBBWnoq@uuT z7Mz&BwU7&>ra+|PF`HN}j96|f7Ja~x5V%L3+fW7cf+E>`OE!u7_u^YRoK=t_mdK@_ z@7wteAZ=71uF4l*eDReaWs{;7-Hb_cnt!mbwze+&b5ocfLX<+5tS; z|5wC31nKj;dZ^oow|?CmUX;u}p3iJ5^-re9(RuW=-Zx%BQbgG1yOv0v@RsypKD0=? z?Y_1^NVwJGFC-aUxJ_r(Xr#dpwAZeb)a#Z7=P_XIkT04oP;SS;Hu}rMtZX7iI2L2j ziCW^@FTTNtCZ`uIlp7W3u%Qn)`0$Beq6*gmWKI<}r zZa@Xv{ld7ksj&+4KS*-*e<8^h{})L%`X`c{HTB;}vJAyU%r;KDVF9)8OrJ0;xstQL zL~9=Xq>Mw+W+bWigf$(B*=}qkivt^-qd~5r5vVFI`vP_tE99`kTlu6Z<3d<_TjB^D zEky$GD8t&*)EgLX29F_1MGiBhX1H0<#J+WbG}mQfmr`R%P*+#-=P8OZk}k1 z_@7;)Hm1?UG2p$8^bEIB-wn`jwE{STMEny(I0pQ}iSLh`YG%0 zP3-2kC?gv}jJ|}}yIm5EpAt1*b}^6nkevh-4+i+J&r@bEH(UY?L&qjMd;WgXCOSvG zhx{r#FalKrsu_Eh0#&`3#yaP{?gCZ)5R6?R=Px(pA5Y1fNZm$7&-S1;2ilx3_N-ld zy<&(qxBF^Vd;O;To{5OfZa3^z59b6MQ{COY!M3mVkXHvAt}phSSFy9#!uT?XSBHPR z2sWmV>G66&@z(uWx(i~PIbcdYYR~U6e&iQdAf~GADQb; zT$^o^b^%BgMGG3u7*|e8O{Nd#Y5-G;_%dX~<>n@iI0{Ye^1(TTT3vPB3_(u3$ukwB zG<+KerAbqjdh1O#3_K`j6)TREhSnwQV1G5}$=!wV#_+kcOS&ELq($Hci7ecFa+RXT zLX9BXB-?`76ErvJOtA5%Oo^C!i@mjsG#4HY_RPga1AA~tQViX&5 z$_6?*p9!`I3ihLvCGMboO14$QMa9vkAdGzn2N^OOhesb9mqd~wLqJ`^sRfCct&0&C z=LTSw$**u$f--b+a5wquiZCgPJs=T4vqzf4I>8m<4ko0yEhpTPJ4*qH7d=5wul`X z)x&LR>JU=(hI)Z@f+lsJg$0J7Hy;LLH=cz+`37)ia-miO=`rf6nMR3r(H{?!)i)2* zZnILcxmaSy-;Q z@FA#y7zLY=`m5j`0jl=O+g15x-|d<`w?^nuJ(GJE?kodkht6TPdeY$t_lpo$&#lCz zqcDbYprTL2qSI#fS+2{;^RJ!C2zNc;9rnU8ONvisPc^-jMESrMJ-E^@Nl^| zoNB&{&@CGz&Kcei=fOfOc$|T0uR1bEU@PzkTJ(Y^gHsGOwG?jMw(&a(wPudb&}gQaptg zk}z021Jx7NhM7j$&XlPqLzI&=?>E)&y7*Ui_+yNY+e)ePr6B@Mb!x$^_dk)iOP|Rj@%73{lVlE$JI)r|7B&K?~-6EGHqp3crH7j5TDKByf-4HTzk1=?OX0HIay}Ka@$TzW3%O=dtQi8m2fUTgBrC4 zT3p05VlhF%*Iz0+p{wQ7HJZ5s0`3m1w2`7jS;hR#X=CZKua&88@BR@>KIY7`{sG>m zs;!c-Db-5iWX}&q4KsOi*A7T)bsIr#&mt#kCsb%&$5Ma9pe}mEx`6)h2=GGpY2Y$- z2`5qGlXgoR_LP7lKlp%;lHujg_SO#>=vmSD%YCI={cn_v6t9N3x{`^R# z&J~np9eny*hZ$-hBd9<>M^{*j{}pA9m5}aCC+65U3ehqD!;EW1m5K`EYjBF|frMC3 zN0t(@7ZGs}kU5S;9VFzlzU^QPPlaoafGpT`xCwtwuvcM(+&TAb4mbw zu*uCW3z~!(GNP9!$;p_0Q9wvBp>K%#2p{hWx`Tg~nOzGY*~y6r%_))#Ygs^A<2QyX zT)INPx;T>+!zAzksw_WDA@s9`nJD`8+Fa@Ma(E*|zK4(E1l6vdpv4xB3sco4FIx0{ zD?Wi&skY2?M|(aI&T^#bBZu>3I!CQf5`BP&$Dgaf5@z|1>P+owVB-hy+c3na{4hk+ zJ}o9ZXi9SLEy{@QAMD4y011;C2M)=~ksT3EzYkqNDWR4Kvbj7o&G*W45j~8Xzu>|kuwWJlZv8t`-xSIHx{6lDsthNw3Rr^e_j( zY#YlmXT;mBKz>Gfg2^m6ymw*4Y@4{XSKpxb`eP=X z&TJg1&>7Xljn(8CMP*M$#UQ4k!H{4oZ$nRRm9Wrfy(a8&pN+zfAXBz*zH*2hK!|Gx zV)FxXaGEH0wgx9KxrdwsiX#m=u{;?j0yW+Aj;-XKwn5ezNzcB`3kL!Wm zT)%7w=>h2~sXWU#Inb#@?;>FU7c+RBd}I`^5)D}}2vo2v{_AM&!`>u?xkF4HYOMsb zUN~;?iL<6B|7UVUBK;<$0ld8s?lBhgQKG$89cKXb>+vYLqsvR$8Ys9EPREKuj&}fm zs*E)=`yU~#T0xJE3W3y}DgjU4;|Ip1i%XGd33r=sEa?1&M$%`6%3=DtLn?eQie{kE z>U_`=13JS1YdJ_-U0Q|2=R-SN!!S?x%Pb$t^LDjn`6Ei(KaOQi&4 z9|6%FN4XkoBaWCzNtaf{FLnMk7y8HpBmLe##~q!ss@d)+QOg;e{%HHEaWCaWyu-%H z2gVut#TL7%VE?}o?tc`y7ktW}q+cV`qOYKb|8=h=s-X0*wo2W?74<6z+9utU;*_dk z#uCXId9dIIsGq7bhIB3#eM6JN0C=XHO}y`@OiHSg{jk?I=f2~!o7pVR8jV|dw4{t1aIn>+C}REQeJMci^oaHT7Eo8I7U`i)mMdhM;DJ9Lzw8OGl5f5V zJG)N11ijjf%MIfjng9I&HYdtCwr3Kz2y!~RqMnKvfx`L9(GFr`IGs~$r%od{cqDV1 z+f8dIM}jOa4YpCiwP0@=Xk2lrIhG~u-V$>j+j*p{+fR;}s>9MzBwQ3^N7kHdxMty zuk&ul_2BT_zM)C*r00S)3&C{dz=!rGo1(z(gtM|t;rkZrEvF_J)p52~3jLhb7zQ0M zmZTBNn0l+%*g#=IL)*1;$^Q8hE);?S=kCO2_N_wLvmh{9ZBYjpwfw{;{htWZK#AUd zph&M&Mchq^-|!60u06Q?j^x$&2S-wRBo zx;1+e%++oPw$yJxvJ!4`{i1Jb1LV;>!=|yO1n6V2rx?QcvO|xkzYR+D9)FdOn9p$r zqi}vt+H1ZRX|#Z_YBbMCBV{CG=U_mU37Kbp&To5e`9FA|W26=c>K1hN!H z_hs;0ZuU5Qmtw@U!z($xlMU&&-(zJJCJ$Qx&E@%rE?q*+WCC@+WKEjv9+4jy!2Mqu zR(j`Itkj2^#6)(tWDV_Go7=aEn<6e%>N`5RH43$R*;+<*%z}PFQYkwu4%a7*7k4s- zCap}Hx9mN}GK}tTm=8(_a{HDEa>E~%$iHd2N~Mv>0kKB+*!$!2+Sw9l!(EDP5L81g z5+%+EFH_G3Io27@=(0VPh`Ntt@KJE~w$A%`c>0Spb4~MbaC=zVo||VNK{jm%bbp(w zvK9XVi?r9WzF+F`OR}#?5-myl@xTd%_kgxMgH@y#Blvq^c~0hb(VlwA&1QL@WsXRF zF)~I$IuBDFL!sUsMKDN&2;~_Yo>sl;r|R%WRq_hh!FM5#s5mJgN@IZ{YH4af-$|FE z3Xi7GQUi`On5Iw|SQ#v!T{a2~=Rh9>c1<bf8F}{H5w2z>#D;R z88e$ORWqjTu#y!mFyBLp`U^zr31?J(IFg{nk|x~L#nCWkG*uS)6+qM3!iKKw&aGgE zuP8CQw2-+pc#Be}B;$$CZ-s0i;fOn-)Y9m|b|bBZ(Yvktp4Z0nKf3D!yC6qA z2))K0_MyC-ydyUOVtVQOEwOv0Xt$>CU;9hY<{kOhjdxh;3@|g6pcTV{Rq&A-fKw9$ zhIX(&oiFm(ltKD#o!1B}Y(HHvvx|U@5#x?abfkDqjDv8$3&rqbp8Vu-$ z_zOzgCJ^GV+Jv$-k^0FjD^~mT6)oes6x~3CUYj)Q+_dtJRkgCVG^kf9)vnAM3lnS| zS-^rmq%;c3IgVt+PaAcR47zzh^oR*17h=pd!Rz(I)S~m}xBq{bWPVw! zWArt;{P-Fu{jY&a|CJ^uE2I3cpRJm-3aS~}N01B)tAPky5qntWW3DfeMX4yPlxP4B zydQ$!4ruTJBxFC?lnl~E5v;53qtk2_gL_+VVbgJrwY=tI<8@a%L)+D9)#Vdnh?dZz z;~3X_bo$%#mFsn8YWC-=DR@shP#`K3Kp;*iw~$W=YH!u+!7Vow+fPl+PfzX!@6SCo zV4yF-(2p}U_E!gwT7z4x;KOF z02d4YhAp*uTS|80@uk(Mi&>r84%d}Tv)>Bxu(KZlc&yWS$@j);9khlio+^NHW;)7N z2%l;mSu7*w#aziH*IgHdivO>uYNAX=4_3`~L(Hns3nzu!Ku*REt6W}Av-ZJ@nVS%H zu9RjmEICxzJOz03w_OUgC|oGpH3ldF9wZe96Y1wJWtaAB(RG+WFB2F{KUq2;P3RBH zY*fy&KEyVYt+4)2yxW)ZU9<)PLf$kCwSAfspmzn^liIG8)UntOjMI90=@c|)40xHM z?qv1BMxKU(egd8VmSzRiQn{SiYGaume8ybbhr~;pU8Z(lz5$zts&z`UwA?I$i2WqT zR<{CD;&19VoGQy2bTYy-jXW}#XqEX>9Qm7$wJ$^L{;zm0GTup-%s-!|~2YYeLF018w4O%Z5E3Y`S> z0uB@Z-))84VWGG?mABi^vd=6dJSQYANKz76q2E+d+h?o{2{_s5QZxqWdE1fhE4R2lh^>kTZpS_-99NFvU?pZ`F{#0~4@{UkU=*YsxK5p+mH=41(X3(9m zd6Ck9T>_yhKNGmCZt})NSDcQvOw;#q&zgHw-`J&4)g}m#_kPYY7=%j>4JHAncq`z{ z`A~6Y-DqT4CEAmy1#=lq&iSPw!^_nk?o9-+4Z3uDG|bi6C2Lw@8ec>@)Do*rzTV07 ztf$rC+UG6h{d3#@V6s)Wfqpf+^rmois}RhmW0Y%0I0@pY3R1nZI5YbTn5o64W+Nz^ zQOA>NvVi}LgAA@~DP_G;57)?=7O~_NWlZ6iW1W6yMpDN6!wP$Cr86NdqD(zG*LAQf zkU1-ieD6tdyRfjqBxTz`I+@^oQmr)|oF5_Ttcn%=*j)dJ4=;sh!ZtNo3jqnX?nE`NBpxxJ_-qgXUJo+N4jI;`Ag-p3)?7*tX zF_0)6j6&jy9FNkM(jMRJg?knafv%pP2#r@;zNVg zQ8o9L?)9GkJ<(?xI|)HFS=>@f8n729n&M}}kWnc9lDfnOZ2FU;V{t11EbH0de}f2h zW6_35zM7)V|3nBSA}aGQO`-bfmkF%5=?Z#R0!-Y# zI%flO@GtO31wq7KumV{`KumPrH-aImusvo9{&ILbA5Jz=<|re-0#QXjhTf1JOSk|9 zjO*Zmez@MSz93QpRAQO1r-Yc)s6BvOfE`OfaA2Z%z+Z84UJTLi(Xg0VF*jtwUQM4i zK}i(`81YFQ?&m<3UufYQ7lu$*YR-#W!n~nP#6drr6?fnwHC2zj0xCvPb)SGJa?3SWAvca0yXoaL7xNU# zBS6@bzUVY@DHA0WJitN^dhe7F``FEZsxC!dx{n%K9+}zqdvC3{#D@fA4UG^0LF^RNs*_Ax9S%2v`X>bpwcwDCLMo5{NnuXm`POVpPg1(}rgf|@Xd9&37oHeYBhO$yakpk5yHVA$OIyg}9ErS%#qD}OblBrM|Eh*idX80xInN)`vk9NU zJEon5e7l!`Tc#{~6=^QgP*c~gJknl&Q$^retQ`HyFeWOoX|BF(VG$Xv?KGXGw$F4? zr&zOms99UX(_ZA&HL-qWvZj*xuF}l59N6tc>g8z?tm#8~u8Tb{C6(Uo1ry(6p2DVz z{B`@lriL@dLQbx#zG29Ee^3)M3TQHNP_o~57tU!9yh~{9NT)#U`l${Zb66zG9_~DU zRc(AKw7O>-W#FIt)DoN)He2x5ejB4}3r9nGC6Ooh^e$c$o9+g=n2O|ctq%*2HByp! zti}KLc4(PKL6r*?-?P5FZB=`BHRC3q1ZE!50m37K!tlhepux9?h$9W70@hl~UMJY0 zLBG|#Y0+TyR27xCnyaO+Xk>Yu*^&n^6v(wG7PjySGpekgdLuxq3rl#^VO>+vCgjGN33wiCB`YZ-(RFRvb%z28T z8#-SNFt?SJc^BvYiV-JC;-Kabfs4NehT)F(YIy(jUI(h=wL~!AymBQNWBOr^ol5^s zV0qEVG{q)5ONvgsYj&$ML3G#BDVL|<;XB9tb|J!RxiGw3gmgn$KD(^eeMhW|Yl}xt z?ceVj9<*l+c^|yr>lyZC?;~b#$#95r(=Et7E?U5w+bnJST|@m<7HtCpsA5gIk7j1L zqKSlQkYGK1Bk}PwdD2$9wa8qMbGZV7&0Hr)g|czKpmscR(^i7g9XO7{g2!lN^Hvgy z$^6{RKPqGg>W zBw1IIV4yp*cg{#Lk$$zBA}kMmzfb9JHY8qFl6lQD%(d|tZ!V{cA>Fno%^h@xv!#DQ z)|+Zf4Q-wK8{(tRC0S*aI;=KVI}vQ%2Wi;SbofeRdsN$r5HJM~mrWcCGu($Sln|M?WZ?iZn~U`k~1GWWoN$>i>($QHKSheT98p zlNocg$CIz#X#H(~6@i{s6$t>R#tMNS2rvazD@z=`B1>)Ish^sl`0cfV(TGvmq!AnZ zOe0dL>W`!0A7;HM)yUZRsC%BD$ZuV_7Tn19G)1vWhKupqy4~z}&G(eyIDMYwcoq9Z z=myu5S}_tn$YfH`(SoN&O}6qAAn`U6%7DlLtVK=-9Kd|MkbQcN1YuGpV=r3W z)uL226JuXrk~NyW)fWe^86w+(Fhhe$BWDXX;ifqQ=VJLK;oL-OeFaG12uT%YgXyAy6{!MlgXVZ8m-jXTPA*gK}| z#=bo%q#pqi_=R{Mg^Qr9AVdLToZO261j)DB;NATO;I=zdn^LvOa}@;)-=&AH(Z4`_ z;}nj#@_D&2T(oXtZ)KsT5JT{<b6M?+S~pZ4ox zW>l$Slp-8NQ6ZSE-gcFrLiHA}hzy$ow~XMQLmmk>AJG@ZI|4wxAs_U%@CbuB^U#=G zHcS@t4+Y*ya%WXE?Dj-hom5KQI95kBH)wQf}M zfDRqRf>R}jbXXGlaz`l{S)oEdc0Rq_X~ttJ$&lQLC?x8J#>J!4)HTpDt71Yu{@gmD zgYk+cCnZ*M_9}PxF7A|U?X_9d-H-6LAdG_Q8aJWC$^LLci9bv%;90<;O5DY$> z3c)0nuvQevca&1k-;+J;sfsD1`nuUl&ww^yQLWg4++Dutxh#12bEw$sZAs>3!&opS zO4xkMQk>0x8X@a@`Z|4mcsEMGG|XY-MJO7vENvR2?oZ503oCOC#d(=Z8b}fK#mXW> zO2e|*64`=Fmpx6sv&NF$5YS~qDFZqwMa$$eo?mdt;%aG1cCfanVdd&_ddEL0vf%S^ zt;MCQ78@BX&tY1FVdWKzoOmXqrFBhSPUi>N4Z4`~t z6oR?O{G&B4 zvqa{kSn0R;;`Zk|RlM8M%3i4Q57a2py#Scq7G7P)aWBs#rC57qwEi~41{_4O3n_+QAL>CTh-Wfe*fBRo zB1(Xu`H<*pfx;@Ly%ad;aFCckFY^JWD3hnE!rLYd-0Bs+wX6ugCrbjHkVFylTf~X7 z#Pt{>dxX%8kS)!NT23^g(EY6GrzqhJ*^gvLu*8k8CoTq49K+1a6#9^;p_6zcsaegY zvQ&Gw5JoPDcsr>&W9&NapGy zftFxN^3eK!%M@+_D=$zUMkiKUiws4+jtEx3OF_CfWvYdGW2RQZq1%k!`KbbD(wiIq zxToTm!G=I-W+#Oll-Wn(X>VCW=8q^D0&GVPc?SDbn4x@z?36bun^ETGW8%_?Q&(gd zM>9ZAlQCIgmQZ4c30)bU1ZnlrlK`gC67OcnU3hM$;3@_H=KaEuh8v&x{EhopFecSC z3g}KU6*2_bH7n>&V3SOkbo;ox`_f{2Wejzu+#U^I{73Xes?Ii<#3)Pe^UQ;9B2W99b6IQBqy50+(#)DjlfqDiqI~&oMYGn zRgtWob7fcxpY72#yy#Un^z==JDTP`7bo~6AleWljf!5xq8U3F=Sk+bIK#z?K-3D|^jSt8l@a0rsq^Uo-ejQfgpcG*q|N4ERS zvBcXwB6SAn{C?cP5iJm{$~<0oG!x@n)tgwgdT<%ymvcK1oE=K~q?kqZ{ET~r5nS04 zBKRUCQ8e$k)af*fCZ6%>Xp~c~J0(yPQ-RUOBy;JSA8Vnoqth(hh0=D&OR-?-g<^L* zgH(gk7koNGS@%jhRh05ol$uwjtfR^FWaIp5vILRw2?_o6FPYP9cl44<8A_1S41 zM?XrVP=a#3&T2~W#y?#@y?p%we0u8k@rKx;vpK$nnL1D$k^sranonT= zeapKoYi`OC?Fi88K`Vm&>h9ArnH89sXk*fyz0`FvP8i{#Z2!K_vV zgKMS8)R?_kS8DS;r^Vi?$H&(EQ>KiV#uB#CQ{55a!iv1~9cFof0Hc7nwBfPtog^u? z%zUKpuzTt~&j#RIyET_zIv+{PH0x2thS+`zb)lHD843#rY}yXHRGUkOd=y@v`(9ou zv}}9pm5bk)WP&!3E1X#WcGUQT?f!8ifB@oj;lZgGH$8XdoHV--e0;i+t1e$7a-@eG zG^5y)^_KPeOpq(bOkV~!4Tug2ufuGwnyrdTm}k02I$@vDkmT!zx@@;8g3tMtWs~Qd z<_nG*uEAnAKR~I$F*`u-i~2T|*)sy`Tvi?nV=cbe#(PVCve8qQBv3L*k zm_Qgu82B&3huFy&fb`vvpWNuu{Zi95hT^_eE(cL9FchLOzP6X zXd7>Us7WPo7bo)huEzm<(s=6Pd)O>dvb!S8X40&?$e&q!X0T#hnmYqME&1fOXz$8| zQ{2&ZZJ6||*i*2b%*~u#g+gcZ6Q5X(I`QaGkJu1t@)-7pkx6>|{m@s~~1_g*RmDuX&5s>Wt-@ zg~oc0l`Q~P%p9B4Spgroz(rETHl!}G{QJmO%%C@`K_pEozK*(#BaEpo>#OvAw z_1fR@z-*HbB5D^xX|J@$Lb9h_pMN709*g9VSiZCpWhnm%btuCA&xft*V!qc5&@nnBfm45PC~1hH(?kI>e36a(!8uO0TsPHfGrd>$Nqu z0?9N>0$4GlSDCef>ji$6`R;Y>&rzqM%&Gg>k11UuFwB|MsUFwqm#HW3%zE38r$pLs zOFQ_SOC(i9yF=d)jz|$DIhB}nB|Pk)f;Xv#a&+KF7m=a)cl#7MuLiy1Zc<>Mqz@?3 z@zigqIXgn6-=PutBd4jJEBvNgnGx+V`5C+QVs;83^#(u*GWz^ccn^x!7+Y^@4!lMD z=52Vx7-@kMfy250QbkH#!1<8+jVidFV?K(!#Emm_7xCs)EE#G_PBHpsfDsz6T3mRvoY6E*_y9QWUEK&noe>X(wd6*5w`jLaY!VZSYZJr z6qjed)`oO7*Hs)|T16uIYAOEfGEjJJvP(irKeSMca7NfmEN2-{0((?gj0H(*z|g&e za;B{?$r9aY2C7ijA@lyYD0)DabhSJaE$H$ot*yfD={C^;{I;WQf~~izSJNa_mXnEuKa`K=XS9&2%DZ#GK zVbe}0T|6}IpC)U_Ri#2W($7qVm-gkNa%t6(rYrnd)f4epRVSt#lMIN=2n{SKlX#CIh`gr) zYKlUT(umo|x~P={!IQxnN_){#(s|@*81_}c)m1uzu36`~+!ge<9y(&pqa*g_pv&%R z4AOXMINu^1GR%O;K=0SVx5BORyUbuZPg*t=+RMC_Z4h~{@tJIow{$e_0LkRl5xD#!YFb}pW?-;hQ z36n}ZEVbRrv;O5Y-q8XjfZqaTOf$5I9|d_B&fgbVi#oe_Oeh{=R#P^k#kF08)(_0( zJDJi(;w2Dj<*@hZr1VpLl!n>F9#q2k686hRt>@TSLsT=_lpu7M4(V&(hl-$D%~t}{ zo1YyySwkO6@N(km3DYal@%RZ~O0!c0TZjXq(~Ik=ti|#)Js|RgGdGx2 zBAj^98#h{BeOfQ72(vf^^3$skCmvG5Rv+eN#TM*S3Z*`mj$uxWiEAZvn#-7w5ieqS zWi430K9G=wA8dW*)i=jAq{j=4rb!1AzX+vL#jIQh02AR;0^yT{ImIz!xWyP$So0|l zGWyC4+P3c~u4ka2K^60fT!%8OX9x4WG7*T%)|UCqaeQK~Y2t}o1#*2(_vEO-hca~p z{H&ZSWZ7=gW{fyZAlik1fmmal_#sqV!_TNNi!6#o&Xd!XLESUkj9sVOd-|1(n8{%b z*MPjx1Y+wkuhsd7Ic%Hz3p(?tVi`VyycG8Nb%D$H9FFEQ?A8KZ1h-GeIr9enFh!G( z)r7Ub=y#fe*pcY%wJ8GNOLwkv1U^4*l8!BXJ1Cx=e~h?(--Wp1mx1ZERXl_eImA1N z?0aOnz@%Fu%6)eR0n5fUh>r2gvr<)1kXXo9#Rgb8yHq0SD< zAF$lPisnA(gSs5q`FtlGV7ALpM_}mMndJkAjz77AUA?AImbJp*j>2=&F*f$byF*Z3 zcL;_m1?3N=YxMef>n5LGSu}RWDMxY^j$1$(O zdk%Je0TKjQ zwFF^(L;*hhL1Kgb`{0#Yby&J-9@K6ml z7^8Mu0FkI_wfs{;e32|#KM@hh8S@7t6{wAn`Buja`GW-Jv&9bPmt0R4T`=C+@kCg? z?ldqq1wVD_Du$(gR%z8Le|0_9Ey^5F>V$>Y97}K1uiL#PiQ#PA)T_RgdvaoCh#4oC zs?S($Sks+o#>Ej{2S>PU>q1uOJf*)Os(sMVE0YygRaN=?$}$ICbX>qMX}uod%Fgk~ zqdF)R0bRJ>*S5(kxU{T0+hvMiVl}VtA_SOO9|Sf#?6$on> zk!jYf0~oCN47!?pXDSy_-5Vf;RDY4t%`_olSZYIuHxx}_)*kt>i}pBTDY=tQ? zD5$6?m`XBs($#gmO@nddB#|wi!rerAF~=2)6uF3he|H_5z0NG=Gn2_NH8;MJOyga$ znq50i;k{h@e7?i>u&3C=8Gap)+UpEsc0%on;prK22UVk_2=W6bAd=FdCw7zks*O_Q zcQ{~ry@`E!sD^5=hIQ${Ydm1CSnm_mFtFpuhpo-CRJ*%M6Enp8+uedrDlF0sqnJ`J zxP@DG768I}qYW>0+hq=}vejBT+_-IxRqduV0!%@o^Y>U{2feRJHf*hGnA!xc&8_9w za^;2V?3D_dIX_^w4}%y-&YNXiJ1k(YmYn@Pv=puAxlFFyij0n@{TQT;dWrX|07L$2 z7?>dLi2`@6ly|;+NZTe@4Ay7sZKkI#!tM3HZs?K0DCT~qgZ3Z10OHGMDf*e5y+IDH znz&f-;w$qlM@miG#@(fp&Kq;;>}1=K7uv3@(>;R?-VaD+!;_zQd$rF4k~DAN^CS~Z zap7*&q3_TV&^(2U>h{|q5b>}LRjx;g+QMAzV~M@A9H}(aYW*N-f|x3Hw9(6b3Fpxg zR4aCK!<GEfv;Y?QlGngAmOl9177vSgx#d%nwfH%- z_kumM_q;vz6PPj@IF%C&34V5TJZE%xHfl}EyX-EO7dLNAQFDFjipGZ2~%%j1H+?tY@FtcfV1-S!1(sXI041x5P6(s6?`fzT}kN;(*ek1TUSFUb}l{G38jVj{cfx{!;)4 zGyiX&xWA=0uyDM>b$4LGb67aCkM*GI%6Y7Q&^n@fzu)nY!HYsYvq~lnf8Q4|MN8iw z8-c+FbV{F)6xD=iYmkidrR_gi1Th$4A{OkZ-evTG-$};*h;t?4)6o6OrP1;7lQ+qG zv;)sSAh4&P%X7@KL3BTl=?>%`kk8KLGCAx8wiB zC9Wi@^3VNbqvk(d;`QlYE^*@3m>!y)AXaE`u+|NIbO#A0l(YZ|D#QPBiL;`*nn44X zK&@*-tQ8e2)DwXVpbI@=VUS^K7zLG0g+CY7O4Zia@+z$hzhvTM={4g)A9UT@-oxon zTTfNjolk-4U7mXu-{v)IaTi-;2AVfo@YGv~cBD9~`nVCRhS=pi*CVg>`GR%EgRk!l z{QnPU?-*Qb6m4n8Ik9cqwr$(CZQHhO+qSI}+sTQOFSl=3ci*o1s=NBv{IVn1_* zyXnu7-98?U<8H@#xig@`>&$6Sn|tAK=nDtmMi0Y_a-WX~+fMfndmD&jZ=a`UtFvv?|*aqHoAB{D4z=1Hw)gU$uCEGoY zvOdMpb2K>RE;20F4$bFs2>m%7rVVn3W0Zss6KYLAJz1VvqShjkFABplKF3v{7Z1kc-fKtZ=Rj^##)+*}Z4ne80w39zZC{VHqjfXZzvBcgHj5(h|HJKhD z{HZu-^^3>JP$xf7L-dW=3uh#MMrIfx z0? zR5&tMSHWjF8XZhqjgX;bh)XI#opl^;oj&Z?p(h4C+XrQ6$v*t3O$$fjCw{v)*oplB z{`rdw*QE^pu4tYrpPh~1U-*~@gpBK zX-Q_blA=Q6;1e+{6PMa7Ax>U5_a|VOX|TzqC@9z?@sX(^EgTp10I7BfOvm9M-Y0h+R7W(mNjCib*kus;CMbzfMR$ zm=uV?=+EU@HlpctsJOzGl*gbcFS;WY@Ji7?FA(y$ANY^+33;2?W7ty-&)-9n;?hEI z%;~GeQU*@TOr4>zlrF~)?d_dZQ)S}-dsOiNE8@4tO6dlrn3&G(Bu7noW8GjhO$ZYb zkqALL=#AhUyA3#zN04MpZ~jvA3!o}$8ro>T7>(K*c?sVh#9=*J#Gfyc$rhI;RGxXH zu)v_WM4_pvFe`@50K%B9P#08MfeGiR;>3zyT27Dapi; zNI*ZHL)A6E9*L4n)nW@MYYc5s5IQ$kOrgDyWa97x;GIz5biOJ+$~coaDWP{edHnGF zG2+5M$If-BGTN_yE_VoVCedsWjnO>MqTZJ;Qaw#yqZPYE;br_N37z;PrymnHGFl(Z{5?>|E>E6~D^R9CTDIaf)I6jIVdy|k-+HOOdE zf5_SpLMddEV4(i#>#@vS>=B)Q&@Fl#lsz!ZupsDJ3zu>AQ>{5Tc_xO`I^2}~4#pMq zc9?!}zJ9~!Mzru15`p3ACWTG0(~UP~Nd}x7Nf@!fbA6-eKk~>FJmIMkKmbt&&c7JT zvuJKy$kcUb&K2oeI^+7yil}x5SGV8|&Z*~t{a^)xC@)F)$$&|R;sRpNQcVnr9Vutp zpdDC)2vX-LglEWpRg<|9SjGNWWewT9ozt3;|M-1%_D>L zRJO@N6l2|w4x=0;*jc(|WqL zqZ*;xVqi{40Mmbs{f4{ybFjjY>VaLY!nM87N}JCb<$00a z^cN#HXS3N}xrN!ZhJg}86{I^)-Hbl*$690|>cv`MO@+LaI2oK9=T*(`1}*v@P5qN_ zkVd5^3+qj?D?=R9p0!3RWx3_gcsasNWKRRTh3<8npAS+xNT;xW(tw`Rcg3Gedm#*c zdtf(0G>}F0UX-1_4o!@wj*p+_-7r=a8QU!Wh2D#}LXdf&ta%ck9bvk~$p}pG< zbfTf^$%+`~bJR77bj!KE5X-9*@PaE05 zZB08F5^TahRCe0+cXWzf@<|DCIq4QyH=T7bNaMgmhK0aGz|-%x0gGXpH=SjuW;>l> z*Ppdx;>tUp;GM}nZ7@06o@C8<)VTt7;|juYLtRKPV-Lm z;r>^E{&7A`QY-^}?gT4f(uO)Z4ApDoAE;0>In54yVa&ADku>Z=7$yt5_l z1qxS)BA1WTRd4{X-z~~+M-(kIe^0@gf%B1$^pSzazzSKl!Mr(_|HZ96TYLkp|i_G1wMLx_kXwj~w9{v&BW=;qh;l6^&=gVNKFvj?Hu6u$b^ z_`F-D`Udsv+h4Om8G<&J#p-)%>^?^r_}^C3RRcE0UT%*?+Tv!)%hzg}5BWYwZwg zfB_S=MWIlFLW;%U3D_G%+C#b5A<>|cY_`UuY*&SJ+YG=_K^h@PP;5P7)tk>%lV2M>eJ8?TQoHQpe4|V6y}6PP2hg=&8Ys<3{ZujnvdL4q*vR1XFV{N zlDGF-hp5W-O-o_l z^9&BXJ}15DF$a# zs!;?==s$PEFoB5GBjP(Rz_$_z7tcG_>C-gC*UcB?JYLxn@g+0#v9c3Tdmrp zH#AoQ04LgjvBD709cxrW8fn?0lA0Fn zfw(A-n5gqY6Yai}1Z9+{EZbwif~}8p4_RFvC{mcXgf#>~)G#`p@(tAnpmEIHqm)DA zAyEDF&uX)l3uw6pk=ib3F{pam`?cLZVN9Wm8j(Z9{8F5-w?R}SM#Kfl9mX%FKS|T ze0C~gy-l;KCiMw0yQTueei1WCGAm7re0_}`*#s%J^sL0(2EV*rj>D)IhF`#1j)JE1 z==~(r?H9$a`~K^V#PB-EAEw#WNfs$lI{|BBOC!0^1{w055mB+8PS+ z&qgfaAL>f=v-4RRy2gJ@sJn_ejt`DXQyU~nY{@XZmqIPVH5Ma;*U-5ss`@eBnL;OS zS1CDc>n=JN-C`a+J!INoIP4&K42SH|MYYE~Rp#vMYsf2Me>GgqagC%18`Nfw^e;7v zqI>p{MZi=~j1vpJ^slSJoRgST)J0;-ajcQLX(QHkHI=jmadngswYl<%pk>|N7|Ts~ zaqtzLrmF~<9`yaJtH}^TsK#&bK}K2DBuIJK8iwImh-Uezhl)75aKIA(FuemK*f{f^ z|ET@*k#l^<4*#_MpO@=x`X?2at^E*>E@x^MjCYbA&5*3cMI~lqA8e#3=TxUGZaNwu zaBdFB-7=5W$_dcvTI8@S#vxtSi>RP*jSyG7a0k>2Y1#HSz(541?gLzQeLIDAyH! z@z`eQT#2o3*+kYnbc!(?CbuWH>YIy>Qo~MjdxRFhZhoB2QbaTugrO91s0U>Yw`}9) zrLQH95}{c@1#73UsOtG+9>z@_5+)Fe2EWQA19C*0JruB-OQ1foI8cM9&eI?R+Ql0> z&%#GWeuzoU$s6`SwN>a|88zV}cs!c1Q#~I?#A{B%9q<`{=KvSHpnc_~Y6okK(k`*x z^Lf0U02+sPNK`k}i_mRpOJz6=<&Kg-!jtV(#?4IeCBJ5!VSCoaeep?FduFd6p0i%j zDf@djh*qNsmr*?=(GG>)m4{iMOn6_;^in0)s_&Pd60g?17cx64sq1uEZCIIf;ZV0N zR^r}@SgnnNV#D-KFAI&*R$7}y7KJ8`4)(`SP$69?ZndIsluP*vn#2{Dt!lV$C>-u6Mou zbLA6gtBqw8l6_O=I#+L54764U+vQ|!F2N4U%@{dQ%4V)lWWTz%mT3mL9 z!W}^XjU%~h4=jTCz2g3Rxt*i|!wc~>*V|8zH&@j@y1>Ew)i$)1V2i>`yQ$fSkjXf0 zYWv(kU4t207rvE=$GAO^3tERaOLqn8kXs2+C5M%8i@8AV0wc;D?0f-he!@jmwhn?fle6T$wgRXQ@*GphNySZD_sV4w z`aH@^k2L4C8?p^Vf>g3qY2mAphJal^-8+9&IuOP?(%{R(%*k~hAMIIoC7b1H`ma6D z*JdX^m`qZdi_>g@XKQMpjRtUM9C9cYUFM9h&RMc+l)cCx3&CPTq5A-MXw8&7(Z1p$ z%#suNqcz{mHULz@pSVHT2vi_>dS^O3apD^gb%SvJYeQuQ;?t&cO83wcjx8rh?bQT$ zA~fMN>flJM`WS=5#3`|XCd%BQN6Oa1B*l(wk=jBi3{bjqcZ4nF?qED&3Uq8(n$pO$ zd!a$ua%n!g^IVG0vntX8lf&Bm9iq%@^Da-PBFzz}kkr%6{Z3puOOhdnkmz?YO5bQT z@hi&Pv|IjG0jofli!KC~p?S1>6!oR_h zd58b)atBOz#a|kfzQH;SPP$Cq2jgPj0flGc3Mpx|j;aHjZlPM7axHO={dny-=t~`x zzJch-BzL)i4@%!CIP_H!8s*SSpvcivBT7$24frs7=P4QqIMonpGq4(4R08*z#mk)i z$dHEZa|~03$hs;BiA@AY?qizh)QjXVuCl%Nr#l8BnpxTBgD_pJi zb6K|%Gs++S!V=bXRHAtmN>jUtty-ZU7z^y>7t5a=;}ANw8a2jsegZ;UyS*gcL?AT8 zjSPvEo!23W(w>B@xh>a0Q30fEHKjwb1YeyUy#Mw;K`<^m`l})h&V6aDshZR#q1^*J zDbITiI(m_yp-QhGLOj;7E60&SGcv58yvzkf9vv*+A-+C-j-l(@UugSA!i?F8Po=iX z=uKR%DcdfZmQX?x%VTaILgq^>(L8pG9!11HM0{{C+^&Wmj8BA!yQ!lXMZ0(a9c!uR zt8;~ndJsaVN%E{+Cm!RSH~G*Rwh<1Tg(V4LB`clf314%`tm3 zS3^+Yn$Xv(L7Xnna0yjmH57~s)zt$dvMz>D$+R_sHBTtT@D(OS{+K@K3(whR!W1Nk z_P@t2>El$*+k?gA;jcQt${3X){D4eS=$(jq*5d2x|GM}xMm4OM=leLYq(*qf9$cJD4f6;%qEfAuD$nKGIelO+)Tut)woJ+z3{k0G zByXD9v-g!bcRE^7#~$uKQLM6?^$T4$P{OaXCx|$pIs;$qV91*)c`WFr>b~Utra8h zNm`3>(?5NC2r;?0kGfm~)rMYPcGyC+&l)h9yu7#m)3AovE*zldv}_D`_sd1oM&b)t z5up7>K-_`WY?^s&QhvmfBM8V@4+B=00pF6l=hzqM?HeS0Gfms)tW;$?eCkFK`+Q+w zCEatzqWXBuKOs9Sg@%@&0k6n>LCm}v0u!7OUEGT)BF-sVK;Q5Tuf84&ddkl4dPR75 zg^r7F0n#mr`jkkX_$Kx+{0nvP3I%&$M}JSU&>I+1ckcK@r+klxZ73m~7zV2ak>)aF zcS;rCdwQo9`JuZg#$Icn9eYy*P4Cx? z^rBRHWRBit?!(Ow?NDevXnQdJ=7Ih8zcm=V#1<0+za4kkFQ(=Hh-gugP!`eCl#~7M z*8i0MXuB2QzhP}Q8fo!Go8qh2*wPqGZuw1)6Es zS%)JFT^1gzACL$Ub9Bza>o~fx3z^M2zFNxDI@akU+bj}0)>^DzhMG-y>0qFErOw4q zckI^ww&zZ9mm~`J9d7+RJI+%kLmLZ^8-ku%h=ul$d`G7E<1bD~bie&M6$a_cb9A}! z4)=@Cl#60o!)(|j$y&k&>plL&iJ)fR@h#9zRPwaey_EqvX`N`4_KYTX=y{bBv0I&J zkp%`FqG>o()ihtH?0R~AU+ezWg~z_eENxHKxlocC2*MM}G=;k;ZB(g_T}!4L@Z1rG z)W7<}W}YNOaUnEB$P*TBF+m}++9U~d0{GOLsf~qtfJlb*_-jTwi~t(JR+NvRV6Ejl zAmrA*H+;r=8jJTxgIC9N|@DBQUX_#tUamlZC9Wq9Lt|OX~u68jFXQWA=N!rLTf*n+D!ff`Q zp9$;m>Z3JZAQvWw8pvxZLNNm{GmO}I4s1lMH<S=_5EfAqQc)859~RvWFDx~bA03T<(PBx%3WjZ#_$qD_u7qsFwpMMRV^Wo?4Fiov zh!>$F;vx`IoM&GCW-CK8JO_^rkml7TEVB|VPl{wA35?!L0W%&I#O zdK?PDb-v_Y=YDN_-14REf!Nc1mm1)aB6An)lgF|U0tgKeFA^B{kI-TX!YUD@v)3L- zz<3~ow4(=xB!(i@tFRlofXe(vn=8}T7?K8SaF_f|!qmK@K*>YBG*k9g9ic#xgKEfA zy7vIpo42PT9e#@nJd&d9rH0i*2>bnl3bt(2&>Av#6+tWnirMgccI4^Ey9>&m{Jo68 z0}Uu$__GL%*PRXMo!fYj;u^sL za#O^LBlDky-f!hw^-90sTIE-0xip;vKAfr<3sfCjvlwPKL||JAi<0)kbkzngo@EM% ztCm0Phqe6zUEFS<&wuz7PGYxctl{o3LRVYZ-^~`gk05Qv5yzCG4PHi}309lS2!?WF zv_!62%h-n?X{%Wi2$V^We)$0MXBY=njIlaIC~FzAa;5r-1y_H9EY~|61JHC5P{(4D zL<#MXog$L0vPQCM*B}~;4QOzdRX66G0GCz=Up{!W%}b1Z zx%)eseVOF{CBfMnd2x0=;02|-eC5TJzXQhkO^n9j8;(Q%R3DSGmmToHiQ5O_kQSG} z%Z$xCFyr)%{l!6-#Ogt0>Rq;n@6lT$GIyofF5GK%*B-)X&ohL1NTxoMvY9xW;JI9D zIJz#k6mr~%hI|xiY*D&%?Il6do~!qpgr;sPAo6M!N6$Nd!uFA|rOrq@ztvGq)(eGG zW5hDqY+%5xU;Ho|W;8-_8*Or5YyPVqzHmo6{ljFFMN6lkEu*2YxL(Ur#7EoyS%3Ko zyo=cRl`wo?Xl}pJouIWSGnRK}7SJnb&Ls5B2=$qH!MQuC65tzVaQl3)B!iZA2y#LP)YOw%7KGi>lUX42{>2l@Wo5ZG)+EYtJL#%0Id}0&H5(D$ zBe&zTQhk%*T>fv-w$80oF?pY@M%d9^gLTLG@C<|A7#;(YqKq4UNT>MJ_MLyAPJvFuYdeaN^1H!ZlH& z2z;+e_6t4JXyw!4;=yfA)@W^-EOpU18Wve#!~1cu%kQU8`srf7M#!VE}d3VaMk6xSD1X}dvEUc!f25B5cM3ee@maw%3+5_ige zlF4FGOX##o`~t;RCaR1oZ%;5zGc$S#BAQHbPpKeP>>IgtslCEAx305qTJ? z3UvkCT$&31?mByl=LXi&JRQ(%Bd~{XvBP&)Ky+Q|6aAea;BVgJUpvQTqvNF-@bx^Rs~v32;(C$z$*cV*;O5+tvC(%-Gm;ZMK088pP-bn!~=o zj_5$G3q!*sgqEIRv$|2vNd861i~r*4E)hhxFjoy6JDFcDc~4_*q@`IzS3s+0@o!qc zx*jS{amAg{DOg=YfVPVZMxECN7pUq9LY2Gg8`L7aNIVj0#@>xodTg(tLWT&j%M~)t zUez*>F<%7PRO#pXd8Jy1G0sJ|Mw5_A8g*a?0~ZxsCOG&ub96P zt~-JQ;=*PLl$fY9#Rbi9*HgGJ3eNB{Y5EgKvzq#!_9^h2T7WqttN|q6-XX25W{z#H z-PUj(soa|g2jZ7qL?5+R1 zQ%M~nTiFHY=iAKeWM-DP)`SCWKvg8Y2VFo)8(s%(e#S&YyH0I&36dzA7DKCp<8#_2w3#eUX z$VMQ#e-xmX_LL3iwKTg8YK<%K6}1;<*zImdvCt0UfOs2Y_~p(A#trl?h;ci{i+LMs zxM980KnvgnR3YL6cpbMNw&S1qI>s>a72^Aj0Jpy`OpQQx1h~i~ZpSrB%0GzPFHB$Z zQ&Epkaa_LDyCi(sfm#6E#62IvFaOXyusf!N@A`RO#Za;PN5=nvAB-7c;-YPn9qk}t z<}TkQhnAt_=d+C*npQHH7B87<8n|0gCNL{ylxNto6jFpFo`jfDC9{z!>LfTWn`AnTikHczVC2pfuaQ!S=Sh#cw!E~kf=F^O_FO^2Jz z0OTr=qeNFPkUWU${S`$%V_mWUAMnka5jIM2ihh~|6;1q8VzW@&o-k1=9g-5nUK5(sbgB&vDarbs8Wl&x&%Bw8mdz-d=J0D0&DOzZm-g? zQSdZ>Rx}9Q5aMGYM!1V{-FR>&qY1Hg7eF-fIbnj{fmyL^jLX0@E5t(KUf2zCT60aG zltG@PR8UA{f{D?@{+t%7ucEyXNaYtVuaBO!|t%qiYpR61y}PnxFa(6C7o(v71k`?0uj zEJKxkhL#20XxEb5A*j)ZJX!7fZ0xu3uS-1Ax*Tn{xgoRKYVhXI4TSdJnh;>_Ok$E2 zSz{)T1Yyne8RU0mEd&`dBnt?+WOo$M#4JYBNqti-OgQobmkDixFr7Oc&!py5eJz2j z(XOKfW`ZRLT;5Xq#RbxW&>&9V5HlC%{UT1(LX|Pad7dJ$XIY%6dUY|N?k~(f1@=Q= z)9$m+?7=6Z#*93PR+IJlrKLmPPGETtbdTeS06np`tZ)cTcsAm6gX zm)D?BzKd6+@9KRK9J+lv&hN-)&hJnmOx@LcH*CX(PGn-FO&We%9b)a0eLT+Z2(@&W zTFysQCZXtbb=g*XLivREivlb$n9Ib(dif z;}7_v3`SsjYdQLfNdE0%N8%ZXp$p`Kf#82siE$1-0tUv3P0a*3Q)iV0*I3NaVjN7p zE%{jekmQu`ZIXsmnpL;ZkT!GDe&X zWBOL08+t@E51@FVp5wwccS6R9Cuhj|82eKxb){Yod z#~jN2aw0y z8)8@1Pd*%g2-1m0cas@1c6ua%s2`zoAacU|?OB~I|et8!b^&(KqDoi?6^*M>B< z_931Vbd=0aP&RC=1pyTo89pVq9qrJQzDPS=w(3Aw!7101|WlvF%c0YH#9!ymatfn}5+JL9FkDx8%k#$F1TQ+QU z&s{r2;GK}j=HS&mAQ=xu;X*9r3$aufoGm0yFq6-FyZwo{tv&jf+K?$nfu1^3K- zHZMv}B~{bCd5{EH4EVaN+VPZc$yfQ-*5`d~>dfTyiW=En zlOH{HJmrovqG7$VQM~wP%X^x3nLgSl{YY4^P=nV|6VB4=`fSZBMR-qdElepR$c0AH ztXp~uE@$HSR>)-hwpgHe7p*Xr2!y^DfPScQrl{bYtvZAOtaZN2P4N52p7CZ>dHeUu z#+2huWKu&+i8tQKzV9y|}rT5`Z30xTfS0pn#l5~Ox^tD$jnQwj}n_)ptk=af&;FFH#O;GE$X%+;MTmX1a}|ep8hIZz%hMv zY4kWZX~@D=&|R4_ZOjatw8)BTKrr*Jsa``USiyzI$2n1U$B4EtQmHyfPIUx;)fleW zjd!_l;6QYXll2G(!7WVCHT885N_Ii|RWVegX6c&XQRpB;?tQeuOl z(N!Cjt@E#oqV(5PBDYoH-zb`#?VIfdeGX5V=uTgtsVTzlWWtT(S~l%Y4)Ca&b0_=6 ztzZei*#4`iD&MKmcD@2sS*NR=rK9=C#?B_=JsY^%Vb>|>Fl9R-H+5#AGb2YG%z%RY zTr)J&2+DDW(Sqf(K*}R=YA*JRt%QVDGNM&?< z-4tDJWZKXWDZw@hGa%jCWl?_9ER))ZzjS}BKbMs69aaI!-oRCCz!n1wKP~tO`~m&1 zB0tx$5xVAA+(QBa0I>f*hMY{?{cq``nf(`F(uf zU%raS|H~4{JKEWsI67OHILRAWIR58)lqhaV4)MeDWUZEtuCIEod8*0dMMvVd6o%vf zc@B*Hd&pYrLLxIsir_Z|j)43Lz$ck)9YXo4NK$V$OMhx@YP!d#y9-clFc27#7(y*c zN=jWUe^EMZ>?oa)&cHA=??z+mekD>yX%2}Zfxrl@a{J2Tb|CI4%cW;P-tK@78gtu{ zVRJrZ9!Aj8?mQ8{3s(y&)Jy5(fX!wpsB@PomSM^Jbp)wV@d0+R_>6@&G=(6oM*g)d%Wj;K$$hWN47peW7YZ`;P!S*S@7cxr@4=#JlP z5Nn0qeY>8Utfkvv4Eie9ZHz&S5<=xWK2~nuY zUnU);d4vmdpaKon((~aN8MUIZqL|40kANU@W8VL)gR0+t_hP|r)jB2X529FFi;zdk zvzB=9(2Omp=Ae;>O)lRb=QkL{*b>;$*FFM>&VD|(=NM0+IwS;gPPh$(Mk6$4J0|dh zVQC{HKJ^Dzj9O(1^28dGyRJP`ZqU9DC{v2QPoXAMt>gaW(~E{F)2pD{?MVLA>2>US zcg^D{qujF)KJ-r?r#z@+ZsWKl9s0ocFA@$=#1gYNO)Gxt(BcBfB;@SvMiP*0sKsD+ zHLu+2A`+b715;rtjD0lfu6YkCzFE`)nQ^Y}X(f~${V9S2vtiaWGv(^kIQO|b0I-== zRFqy&ApK6%c)zJU!qZWI14=XoRyp5{TH!Qykny-gY*q#4A%kt|K<`uhf&kczE z=#Wv_UtI-&O3jl(o8}+t*vBap{gs~<`22$0&Ys-^cG3;)!OX0wl3eLOpaf*6>Tkz( z<5_))-8ijXd)aNW^ZA*wKWMVsxo_vX`{0R#8N2dkZ$JC^+6VB}X}tN%MelnJN4v`e z8w(`xdFmK~>YoYGnIM(|Ldnwq3Ewc0#w~q746?NN#+mNKI_G<*5_#TmR3(yV#OyRr z9~i7o3~HhPo6Y4)AttuuGJW_8(?y+1eM$vj$YEdY)T(U)phk?X@ugBfG4r@Ze4M`s zm%)}aUK4tOh$B6%cPKqSUHO^hD5v&mgTdD@@E8C*&*_ z`9o|hL4LFvF9@TN9{1=E4kJRO&2Ng!_k;rZ$mJc@ZY&A{d7m?{Ua+NTZ_3q)NXU2?UH3 z@t#WJov2-k9#s~7b6f=0flJ#|q?I=HQc&aQu|^;tsG_21>sr0tE$WRND;K6pVqLG^ znnyT^*+`UIzkY&Cb|lPwY_zMpd~G?klR9+r|Le@j}&UJg5h@~bhDfp#5!`w zgJ`q7La(`HOHG=H{)TC{!ixu|BC%&t!(L~&vdZ0We)@$%PlDUy6D}xK{$Fn`mbXEUu)9M4nGK)!*y33yfnudH;)VgGkuRYH@2cLbt!?Bc8Cc3vAzs}i0 zV&B&zKI*qJ1mi#4*{^A^-r6qUerDWXPaoG34sdScnOlT6O?Ovd`nn_b%ok8>;DG4*h0oK_7%R?%v?OD?ZpSc%^^ zDjZT(7Yd6b(Xp{FkGg?)DILg@K<=x>!sXw{^Pg|L%&*NVV{HB6B-cjCG}8O6~Woj>!Me&0=~@KeGp{DHt02kenYqx7gyf2dots z81s^EZL;?m#YaWtIx{z8rx0WdQz)~&ZSoSw50l));Kn3_o1UhZO^gpcB#y0fjIdzo zJ!sW_=(}juo^*d0x3WQ<55+R<5NzF5k@z;dEgf&7=h5qKH7kiG^GwwyVc)Xx!e>nE zb1?30O%)Az^DMZ_lh8dXTRIdAU>}_kLB7lqQp!G(OeHnT?6iQoc6Fc%AN<2`(Y#BE z=(p6AI%E9o`4zgTEUQ(QS*j=+#hj8|v(%hTf>lf*8Xh`A=DfE*{<2vq2H<#M?Gap- z`q@>LhJ+tifzd3{xZky<0OJ5c5VlPAVx(o?${7f78q4d|$1~F5C~B)?JN$#}ak&3@ zMBpdnvF>8-zjw`cRj2IMT$KLGN$i_les0PW@U6UI0Db*t-FgBji zr}L8fxivI-7!wt^Vw6Pnb~X%1sm_tOGr4J4>OC8E1nvhhA6teTcv<;MQmaBzg2t@{e0E|4XKg_BZ{;#% zxSliuSX?#Zk!(I#iDYw!&3O;(J)2wJwnX2j7RbeBbB8T^vI~d@(aMB^yY*)_v(R^1 zt;eAEfSTBoC!pM@+_9f6?H9u1nI5Wp|Havv)F857k9^`WSfggalPF7xQmA6Y9qqow zy&Cj-I3Cls$s=*i^u+P2k_}t&7HORu8jpwkBCm7*UVDqALEWZiNi=Fr3pbPHqGkv~ zQkgpM8=pF_h0WWMil^LL8c;y@j`@v9sK5zPm8x+@!Qq6{iOPXpFHXvVt^-%3=mb;$ zswje=Q&Dld8mlC(MC-5BH&72^k=jSy_!2vT(;0W`{I5#3QspbRMJ`1RZAVeqa+vm_?KRj@NM(0ya_4?#3njsv+F^FABb>}6x3yF)jvK2yMcYqxZc9HOx7dcB5G!iq zrxYVoSZR!piX#yc=9>tZeJr{zSqV0LVrjDExmaX)scF%#3WE1$k#Jz^Wv2MrePPhr z;K&W$P^PzzbmbetyMzPgzcK4)d)p`TS65rYLN4eqZfDqz1PiVvekDs5QU&bp>M92i zNbeNq{Qz`ys$aLif@Kx&O+KRVui+IrCGr%BXL7GjrQlj0t zS2o#Ou5RLi*gx%gDR15}iv(<^$=Rp)k~oP3V~So`_5ggy)`Z2nSc z2FDi8d=Y{6I4!nWdkQO!!AwiA1$vfuIxUphq5A6*C>u`lYJ2#erzI2){GUe@n>$Zs z88RIy9%lWlkN+hA=06&VQA~Sn#$WQoBl)iZ>;Jkw{+Hs&!PIfu6o1(1Go18jw@#^9 zXE(k*h^I6Z790VCh4@H(=7}x=Yz|0}MI0qg`SIQ1A+jI_0s8zR5+cTX*>j zk?|+5gh0wHu!ZPmfR8M@m+Ffks@d&W4u(S#6kQ<~d@P%=PG-wI&{n3hL)HSw4X zAZU*{=8oVPU{(SaRaTmQbg4+qf7a9jv&Zkzv_(@RTfJq_{hc)FdWMC(`F6dmpLQVb zSeqE)?96WpA8drT8Y9jhD~=B!&Z$?hD#2m=#Qu{KjAiN^0-r2nLuLuNa#9^QLIyw{ zRjdoU4QJIUcB8Cy$w_uyXYPv;z3Zx$2eakFcFuKOf4COxOLp!W=KXHj2}nLRUm+&(=V{tLEg3LWmv%cuf4 z@(PLGOz*CE)>6L$ud|FrMq#+xB8)kBL3szdRkw+$&AaDwX;%dVk}Z1MbdUA7)(? z<%@Uq!^(QACc%D+%61F<@7Aq&TU3Dv=SAHDn#dwb6X5OBFX(<5&b)I`gBX_m?wCbQ z>4!&18H7IR-(%}L?h~h6nnQtmV5zTHS;@EdI5!Bd9+pDxQzs67k1BzlbTr!A<;p=y ztmTB#go}5KXd?Tr(}lTr5zz!y(2qwxlZ|)l)eSU6+=%IcfAy7FH>(c}j61$;?|Th} z^z(*&A`T>OL<%+xMSFXByd%P?utZz03;u&m<9YAf9Xs#_#_4|8uyBf%kF7q>+hpWi zE~?nOUPUmU2DcdJ7KapVSwI}yzuk3VA0|nA5SNR?$hOOrgStNh7L6ckjO}|~g~NKn z;qQYQe5A9<`)XZXX&QY{&xwV*cSUp2EG$c02->=__W0>clt7X( zuDCfufSyZhlLoS5CbH?RH#H*jVrol1_|nuGHP6*Di}(i1S+)@&xS+(jSW_A69U77;Tht&HmKMa98%eZszv zX62ggYA^q=xYIWi!HdTcTd8sPy%RBFG$H|d!RpI3>BP(0mkq9+>os-fM1=RiukWqJ zrD~&MN$9uQ6g7O)NN01~4}0zTd0f78qqnka`M!%ArgBv33K*~TxMgK1KTXGz z!`|wxyxK3;+@KZpgj_D-&P5(F8miW8607W#UZSNJIg7&ew|aUC3ddY^wp7uAq84fC zZqCT=Y~SatUFF)VUZ~(+voqmmbsCKsYkl_F^^g;9V`FaVik{xV8@2!hk)m@Kb}q6` z)*C#pa~_VVXjVf_!5_pqe6M-eLY&0edW)hid~8K{@{!lb(^&;P;e|&mw@tQe(D|#R zc^-u@4}OiwO22mPnUUl{ZRWS8%dgmjMbIC)QM3|QQ5owm-O{RDfi2RmNGYn2%`X;u z4a2esV-Rf9_P9r9hul}>-CCg&+o9k}gQg0Kca{CXJQ8a7H@&@&Zpgj4UvyI}3HWTu zs3?e%{d?GbBG)#_RP0|-rx>?U#$>;P>24Nyu4JOF5N|Bj*TC>X7yp{Bq=+zG-}No( zsRcRz6d$_FGFrUELOG^V*IJk?vap8R#bG2KR$Kk0{B3GCAm;rS(q%60WLe}7= zm4^YTe~r4K@lBQ28unT1lYWGK8DZ+)PfVTHjgocsWV@%&(;Q^`jViM!-3Y^>K^l;+ z&1Lt;U<`ledD+dtU!;D}&k%#q06&KQk;=6~5<~pV7iCnovIfR4_TCKO2=#HaI+w_u zE4-T(jf0ukkj*x+Cf#Isy#OoZ9*Ph40(j3*oEP(QWF4Bw}R zPBR`cV@~5{WffBH7@Vi3`2wtry!kEzs>-s))v`yiL#)q2XWlry2o=zLuw2vi5_OYz z{6acKogTUV+(?;Mv>kJn>vvb>XIs>09e$Ue&s+{8CIFhEjO- z7NYllmmihF!!8$#Ie4d0gx{}yMuD%Yv+S$=bmO6AB&l?o<9c&LuL~B6R>dr?q$JjP zD@^6e0ywfjb753FX}*qT+j$mrvK5Z8t{oyYU&nril~RNPrb#5s0?Q#6oysd{D^#zT zd#$<|8YS7v5+AVQx4Q7OC|~H-&Ph`6C$5RfQYafABDqDWFzM<;8jY9Je~DGB@@8Ns z-T8a7%qZB-mL?jU#GTSOT9I{RsBI4Sp3x)3$iYV?F2^lLJtpYI2*6da$5}vqeMn3< zfGOm~wx`;#?7@bBjw#GL?le6i*1;1XwmR2ifPbxJKb7xY#CP6?nQbTk_tQQKS~@Fp zWiN;KQv=&wI2c#vGGqHD>Zz#0Q^^I0J2aJ5Id2H#%LqJB6T?Ncm#g-i!Mq7e8MD~M zO%!ERXr})l_+@y2v)&LLfo=_oHNGwkd?0bfCG3j&riI8gUX1;atC&5c=Dpvxzg1sv z%qE>CEYZBg9Il;`?G*w`*P@m=(ShS!(Ijv$5V_$w=I8k*9$8w%q23SL`(+A@B4J(_ zZQIcbyN4~je_8IyjhhKuPL@L%|H)`}`=Ou?TXS(X(f$56)Mcb7Ds9Nw!COuP0iT8g zOH6%gUA*NvYO}TQJ~_2K@O8z%^`S^Av8M?B2BkjUTKT$*!^r3`7kBmdA+1Y|ZC2wC zsD~=8`ciGX4am^!IQIrF`Y7DErFOpV#ke{Rk=xvDQ_1jqRUJD|Z*@r^-MGU7YuqpU zu7e|s$gNfOLlm(&_yaX_XHUacH&V8tAZ-oYt6T^Daec^}JKkt8ttt zFy2HPon)6Kl)UIVHfv!j7C8E;qHsSA4V+N%++U<0gNYb}6yd9sF=sR$$0JW#{pW~V z_fzP$KGUPxl_oY*2A4AxUF5X=s)gOt7jj|yxiP=yK&l_oV>*$K+Nv)Y9lY2E;|LEh zE|c0{nxGRR(BRTkr|P(*tNgZ+bzIh9d!^G?9d&J2Kvr-^hQHzC^i7}hUzQ^>{DXE# zWCU6h>KtWpFB-pEd_?EJMP~nv!`f2HH z>ll+utv1 zO~KPJexdN&W|bCKCc1BzmdN0>DR91&5E6nf*s@A(?qGXmV`H;u7mYwnZVtT;9gAoi;nNJ#kh}qfNni@M>+S!6%W|rpvKDW|~lvjWg!3ok|=T5hf z>$Qi&9ecNqB1uo(S8P95NPMqcx?kSY?5^&cAmlZrU^sKXtta{}AGdFQ-R_wq^5su8 zpEu{B#mJ9i+gm3uKsR=+lTpEWFwBkD_|&tlHN*WQZlMqDHg)halST_{pVDEr%p)<) zJ&A{KJvCj}%XWdW!&*;uTf4(tg_?2b*z_>?SW;{CHiA(zlJY07&aD~|&Q?X2#h4s4 z1%?UQ3LxLgDt+on0r%K%-|{6mugKS`dPt$*MuFa!xB;m$8c(%JG_h>}83D}o*=eE7u1Ek)iOJCOs!fqU@)?*3h&ArJPhwBhV#wnq< zm3P=dvhhsu@m;*<>&0!+oXzJZub~H*jZ5!^v1W#d#+DA|3ahBjhJI|VejS@jJB>a_ z|19;2=7w^%pS*5QE(KB%vwS`Vy{b=RkIbfeA{@oD-66KEKnmt-=qah6&1s@4Se_Pa zTB^|6TWn}vj3@U{mzl)CBh+gjf?ff~Gqf`|w_gu{7aw8D6yD9?u3 zH-hveR;qgQ1&Wx5hNZ&ln<;zyl^hrtoL@5M?HxfR-Rw8a{ zX6Rz=EM{k8V`ysvWw(L^H*fqlg0?%WwolJxi1}s{5{t8=NK*w^61EUBQ8{3aNmG%K z6-7$$Iq-#6;~`bLrSLR3Oud_Erhnrg8;*{^)tYmG8Z|VQ2k{w$0(C6|UCE}|wnq9! z2h;hD5ZgQB@5gIAHqBhGFnkp7#HgpSig(9gK&+#gYyfj;`1E!-RH zq;)wyb@5Sl)BL+ULBoSRZisb>QE=Y+6Uey6>tcw)Dy`?|CK+;PpP_xk*-1v*b-LO} z=CKo0&qkjGcF?;%B;1K_#z^gIDnj(3WtZ(3N83}FqM^UTwuT&vOMl3?#t_Oue|Tw) zC^SgsKz+gvH!NjiK7a^2I zMgjkdv{ByuSVZ|43)Kxc5eeMj`>KfYvGYnBq9XnDu8h25N+sed9m)P?(o1TN<>|b* zB=l^!S2U+c>E}?HV&+vH>(W(1KgVCfQ)6|Ay^oH-5?7(#fhIyEJNkHrMkI`WMPmb3 zM3!zv*)b>G6!&|aE~`aQ^KF@%OxY?^KJ=Ps+Vtz`_wx`7V%()VIuZ0(*DtR!<>HoU zC|x!5mbcMUj}@+Z!H18chIp$SDYtQSadp6LfuJ_o{fSZS!xfZyL1xqpn)R*BhYRu~ zfon=_Ow|`3GmJ3PB&!L$uX{V5W>wVd^+hX0xkRu(mP@5NTunkCnO$Y%Yf?;M_R7lQ z+t?9}!pSubGx!jia9)Nw!Kyn3ox715?^3v|!k0bmXku*?@Ur8htC=2#n-AgC&~K)o zYP)Q6iOu3Ek~VxS1UzWQk`I1Z_L%qU*elsMFm8W9aJB*u8(7w0vzKC&dmt(le zg%=@@UTpPiK9VENSqyxrK$d#g?5Fohi#2jNv}f{O>gMx@s=bved$w=uAFM7oEPmwC zE#6Es&7L{DaaA4H!?gq@Pd2zUMR>~BTc)+D9PumbEoLm#%$xl1`)nec8ZEf*_t|L2 zYIBm5i8K*eNnKZ2)uz9#U3oYe!98{w6~5UH(ZAbp@}U(JBIxuzT~>TQi# z3eHB=C~CzjjE>?`$%vxC-%yTwfU>5{X|vI1W<}jaX4vAr10&QvZZ!BoiPrE_aqV-w zykbFZbr+X`K`W24T>FZlf4;NkJvH-Y`*LqRXtdQBnJ)OKCdPQMs$kk@Wwl?J{DHE7 zYjmFP&PdacRB5%kM_JVers3@J>pJ?M-O^qy_wA`SxPaR-c0bb=qQ0ZFOE=5WQD+Gw zY!|Y60{gPJ{5ty1SZT4~$9kmU7cVX`nY}?V(jHQZ_{;@Fu}1*#EW~J)tVzp#qLUWQ zZ>+q^?A4nGt-Rs}UCx8QtKoXPX!Sxsn`h>L`W+?9D6!*9ZA!a$`>`)BeQ3?{98M~~ zek;EKyXuAww$51M;8yX~MNhL2ly=7TE-DEUw=jk(+sXv2BZ5Z@US~)wW)zNm*eZVE zf-t1fN$Mi}+#1|;So@CB)P~gWMaOfl$odZ!H*`h;jzu5g_8n~ies7>kp>r!Pa!*Kb z;?C{tg8sXvDCpOB4r_#4n0*$fS!>;rps;>FU3+W<3&xe z<9x(syEkAH_B8~?ea-M-8r@7-xZ#fXun9{=#A@c=cjYXX(byulBrQ*3HLJ#cN8*n} zkK?V_Xy(!fzZ&o9)N`euBVB1G9t|>2_IABa-t4qTClE#ST!eWi=i_W#op0*1?*TK~ z5#PHPQv1huCCFd9-fXgsa!gd9i1shjK0hIZt$pso`$B7krvrNf^&#sHKHNO#mfwqn z3UJ8LxRylP8%8yDA@O!N4ez|ybtTV`pDD+`*VOKHBS^qsIq*4cbo1>ERKE>cZ`D?x zxKd*I9Xd4s*DohfS{X^bP^Ptrn7+HSaS0v_ocLeP4dce zYE&)j>7E%$)+C;LvR7BD5!TMdJd`*45*FR8-=38sKHq;-qM=yp_2iD2cf#P@noRlp zwqw|aT0$+H{*B2%v80OgykD$I7?w0SC!`fN)_h~(XWKkV*Q!>oH#j9V2BYT>l@6wL z>tNKNWO$IY+EXyt)0y-R=%Em zaUnPP7I`5>v&)4c_9C~6kK*u~NFn;kx|%QIcFHjH)`@E*(0_oMZWCv zOFr+QP?j!DKJWO@f-X(|{1gl0NZy(^fekfdldgCaT@E4q@=FE^Jfo&G9iz+kQK4yF z7eo3laCFsTt#UYEZrX-yC(Rqz4NZQQ)9vvNXySfQ5xQ#@V#n^lylI0qZaanbiJzjb zvw5t`i^3-*)GKLxWsP5M$+&KK((OS-)UF=ZtC8mYu2qK}SUhZwRkt1A(2%6@&uic1 z1iJR_P6Z5I%ociN>}>_BEVRnC8PnVN=r;2mO3idw9WA|31>GPa>z13k5%9gc1GlE5 zF14P2!vC)EZgI$r5({Cc?DQC|p89F&xx|%B4 zIXXN2TTG{E=-Gq4z6Tn2m^*JV#26#*vdN59+_!GSPb`a{#%CxSXek+c=dc)fKY#W` zO-@Z-zD8jzjC|;KF+YU2a4#-C&1CsRjqM!#!Ey{$V6FLbKhaHhg9&=OuFFjrO=O>{ zHFGbGFuhqz|L(Mvy6QbQ>eMrM{$O*`24;IYk*PZ{eIg%C5P|K2dZvF8S_8HiM!%6% z$YprWyG}+v7w(1=Y3XA>K36KDC2D~_l26=?UP2qJdsTO)<+5gkIXhc0Iuq+XyTrg9 zGNYZWdv-bQtbw&WWX-1R!fe5IVUsc_1zIRmK{&WPIucDpEo=AhB@!S+eHC^wGuhcm zf-}7)@U4yAr7{f&_x~un{qzN}+q=GSUoUDe*S`5jem$@f58JT&SWDg?6z_w-G*#` zMj-|A>jwCakCR3fNiF-CmeiXbFy-e;zA1ZJ?Gf@ZxZE>OZhhrq_BXXY`R`*0G;*GY zc36#k%~MyOvEh4vGlCZ-KB@c3ineS*+E&q_C!h$4>i& zu?dEtW|y7CKsW!zX?9iomO{t7@cxW!O_ECcks10pF_y$_ZrOD#`0JB_{_W2aSf!n} zTv1D?BJ>WL1hqZ2CNj7+b9dE-x}L7+344{M9tXW9oyJ)~(>@apN$s95{0n>ehG3@u zS$O`f4>fyUyX^`{HaRSVkfgNR`8ISZnDsNPYasjNEma_$!zMUUf!N zVd{=VnmIGD3}U70G|f2(3)}9HJQOg`M0?hTdsp2eXoUV*pw*l3hx}&z!E!zdaK$}p zH}XSDM43k@!ZfYS_+P#jpXL2@iTN$hqhZeGT()P*2Je`A;YpfgsyS<}IXxN1R%EUy zGTJO0FbptT2+bbzj+dfp+c?M6LfJe-m^YAug7=Y?{`>{8Q3RSBjvv7Jwg`Mu?-TM; zIP{lb!=-x6-aFwt-8JF}DcO%?Z?Z;5@U}VS*eE*s`BE-^P61%p?%-vF@rngLmE(`8CNp=$yDQSEp0 z1kvupu8Lg6-GOi&NFp1OF$>V!NJ?Sbc2+{}hZ%=-*h^SenU@%`B6aVl2JCQSJ&tN4eX?)(d^7s{c*h#GBw>qw$0myR zml0<4M#WXZnLSgQJ6&(x<0LZf3-?~@g!`a*SEJW6RhzocWH-XJNnw#5&fX3=)}!KS z?DWXpyQ-FxkLhlQq9|thG<4F9dlbcuAX#$CZV_(POQH|k@UR)*_8s5q?bPfTZgXMY z9G2$)7VRKX7`dKDqba8AL8o89c+T6;@WvvoNoS$HNuJY|eW7fz=mSB4M5_|3^@$mz zO6`TE8>HQ|;bHgH#2WI&(-q#Tz8)R2apae|RmT%l9VCk(KL3Tg&Tr^ynL@rH+p5-; z8au@8D~0Jch}(rmhjwI5FIKHJEXMQK9wLmlPng09vmNLf3m2{h>PMTP2Y$OjlBV)V zQ6r{(-Dq#JnQ2AVxh6{M$@JFsy6to0*Rb&L^1066G!t`2tlpWj&7|teO^d#W(z!UP z6Q-I!1$W>im)5(uPH82xhg34cjWb7Xw|GM|1Lq2bmbjFRw{g(^8^oX&>kK__aQ!qA z&bzzh?pCdSYWnE^SzuuJ-J8O7zPg(<(~rN4yq}yemXd84mbvkWfyCkd^2fSQG|7bQ z+~+Mk(XaZ8m%3S#sH0Qj?u2p0a4Goo$ltRj)J$mGt8%>OGqd+~(`cKwFHy>C z5dHVW7G%W%ZxTnCIhfdD04H2xg9$%*2{jQGX$46(1w}P!F^K}O z*rf3?M(v#D4o_yeNO)5-CUtbu>$U-_?R*zYrC}3knYLH@Z(*75+KTC5$y?QKFPW#P ztQ~rzJT2C3+3djQxJgAPdT~G{?jjqv0YVbFh_O)xqhXSNJ5_m1iAiX!=tF~?J}0K0 z>@s*v|2pKFGVwxcLsDwIMX4kblE)t&`-dvcTO1nr=@LCcsnqn2B_saweYqwaO3C?=Y}68t&bGFnd+MTxobQJy zYGc7hMPX_2RHw2=wR6hlE>i2~UFnVZ#QcI}k{q(>74`J>I5kBkO^_c-qbSiGv{@V!F_ASUIR9v1Cxzl#_G3C#HCfm&I})x@VVGXPi^)GtW=JCHaa*=PSiN^Tt(R zn3%DOdVL>;z?tfu{QQzrTso(jzb#wVD;Y|+w*7vj*K|2j7qIsoS9x`#q=SbO=UALU z)jI~maa+Td71ftr^a(GkZ+BnjmK!2#@H28)>H+Jv(%o)3PUlnsel;dg8#tnogne2p zGyC8h>yV$5bE(uizZ6+9QdW$3Jwv^m{QA>t+!M*^j9cWpoiOjXE-T9i?X33<-p?vZ-Jj z5CJ#A5lrCpncqs617V<`DlrBY=Z~}!fW`Um9J5p{k+RaRSdz_!mdpefh*^qP+Fpna zp)aO-iB#OqN_B7lt6*ZSU#5xMHx2o$tI|vFOXoJyR#RKh*DGI1uyv;4uDay#J-aEe zIU^kB^1*rP^X_NSJ$cuRGUWBIpNGcPO|vu^{Q%ie%&A22*KXLMBTAs zEmSMu7xZw+fhg&6|!*ejx)}yn$%)5R>WtP zzfa% z8oI;ucb^U09Nn8L-OVp8^hK3(!!<3ZTd%#Bn5OsXX2@ah1?(zP-_ zZfWn7jCNp^VWGk)_wjnG1p7X*BRwo3R4Mk5(q$J+PjqmWlS;X1EJ~u%eclN@2FBdC zhyl5~4_)C~{rs{L)|_n~6HANz{HF1pD_i}jj7yWUZ)3*M|>Z948cK(j4S#Yk=g=(@3U+Mmc1n!Hc1#(=IBa!!`wnl}>3o z($rmylTOi?N;DN4R;O9KNlT|{yp`C2ruljixp*o7*8+!|KN4nix(0J^p_Z;(M9G_$ zg0Cn;E#DqH+~+Q3E7Rz`(V)uW@dd|3KUkhyzN9e8mV*HoG>z~BEWID1AQEzMdBdIi z_{M|F!%%BBElj!~p1h|`&|1yvXd!!Y9I3G@V^GwkjnB1?+G0POEER3?j;jpK=Z?%SjqE*G1xuH<;I5T z;LWb7WV4%u!{1Rli>0nc`wqbn6U_%jSTja$5e~bE3$=FZ=o=8uNNRQ{Sv+faYC`?J zdg{FjehKq9{DCYVEX^Vk5%;x=BH9|yX$TyVxwD66aj>qsscDJICF36UW#}lYY$B;--1^RY3Pu(Sg;SE%UYx+D0lYlyj+&ir16``BKIVQVN z6M#3}mRRq(L`cTq6#d5D{e}+WqboSo$f`8akJOyT4<~~kkN6Wr8w<9sqrRo*S#872 zM`RXF---yl7B%kMt!qVbdpcC&dv#}ICaZZhduD$0uk;h~Z#tz|Eb?Z3> zv6BpX9Wnf9{QM)yp2+rX-r!t?)k8Mq-%Yr>_P{qrTtl%lk|+?~A>?x&OLh;PU2f{QQB)zy z3tQHNtKWpTM6sgLjQRLXdv!EKZ0Qf=sOHGF!_m4-x6^nW*8N$vi1tBAb!h;r?bsMB36>R&}~{?st!-5bj;l+65Nw|4B5ewwLU$nOB!e%sxdLBXnoXAQ-sN)sJiE!FmiShlgKj6qeWh; z2cxj4kqBIP5X;$(m1Dmytswh#z+#ZzU*pRwyS?7Q(!n?T8_QoY?yovfc^QA$6^wQ? z|333RUZg%*j<@8}E;Am1wj!!f9KqM5i}M3FN@*CSQ#Z|w1hUU@tclE{v?RW}is$sP z(R^8^0f~c@o}yfJH|`^w^n0{s6t3HN192j4CZA0nORH6yyX*)dZ%RkcdI(ZnL)BwQ zZMa`4Pq3y|SJILbZ#v=mKCW{c$vj`}yeZl_lzEui3w*w&*%=2Nidz_v8n5+&RR zz27O^4DaecGwa@ypYhXK;F$@aq`6wuu9UU7GfLCUCug=Z+ZR2iNCcQAeztl@3a#N}WJR}TI8qu3)b$b*Ruf=fHZ`2CSk;)kg9eG|-1h-m9`?dq*Dsggr{hLz^T-lWTEl^jOw@AYDG zdMs2k%W`sV2nh7uo9VAilA*(X*`4Jf?H4@E#x-mqv20q^sN629hrZmT&AA0--bK!%UOUyk!m#*A za8o#hV30u~+U&dPQUSLu(g9NAql2JZQSkp%a~BVI7>C6h_|{0hk$vD#&|xUFt*7!8&ZXsX zgG1N@;fU$ZP$xErLd0lco%|L@K|JA0L!B;{SK4Oxqqxtnw6wL1jTyPTs<|1KQoW~r z@F`6z--qyyeqo{SibWl)PxC^yEJI%)vpUxWc@l9a6%zYJ7Di^Q_dZt=1_#@Z9hJx>$!JtT$e}f38e-uh{!l&0(_W{Md$%OYNs&WWt-R%Trk7 zTa)`!3Zp(1)wMVDonFk2G$Pp%UOS}Z8g%}Ajhl0nK%ogyz+B$$>RTs1d(6I|D5X96 z<~N_73fMSf@tmXNu7BYgxYQlTSKfiKcIH`av~iPaaQ^TDh= ziCP|ry^n0c3muWX2YZY0oC6j3$zBAeTtZ2wK2PFoHDY0)|Fr7Tl#LrZ$!u#!kNNbz ze<)n{7qVJv(d(~m94ODNvNR^Z*ZED%yY6}WT=rCWjf`dL-$^P@_B|A)Qls3FKW%b) zaZTo$CvW{%wAG=~5pyXqR@$*aZ-uO3%$aPctj5 zAF+~fhf}&v`npif`|iWGbjp1!e7D@?wZZ#WaEwd2MBEIyA|8pJv-1cl4(LiO(w-1V zVyk=g7FNVKU_Il#F++2K$ra-FoOr3-*X_!j$Ta>QO2^f-l#8=dN6Z5Az3WCnW+*U$`F2b23M%UGzDsfPRRT}-=vtKTO1 z5jp$?G0n&~OxVg{{gvyVCORbfdSO|jtw}rfgu+_Gxg_;?e2X+SwKRKbk*>dDv$dOy znH3&SN4M05x$C)B#5Pm3GHI9jTIeHv!}vZCE92fR{C-Q(^g*QOt)gT?fjN>fe7o9t zS&ndM2z2}b2S|1o3B*4wCF+}>+p*x7%p0RdzJ4S$ye4IH=dwGbro$+h4o7a)(t;VfoHQt<%Ik@YH~@ha=>|V$0sK6lTC=ROoRW zy%tApJX;=SVRrtWLxuKO_LtXaW6?g13&P=hy2SQSEe829S6T|lNV8tmXrW*GLo14jQ|4!nHBhX z2jm;fko~wI#LeKhoufJ1X?K7l@cwhhryqA1+8Y{Mm_pqMS)}yypF+L~e*FPLg$?F{ zAXGnYG;_9h`e&utsSuP<5TYkRSX(+do7$Q>o{B{ah2?W5mW`#Y;i-V&T)-a}qcg=Tva znhP)^SZf9jK|Edkq)>1LC&D>fLaPh1aPilz;0Lt2W>3N~v$O`SJhg!ozhEJXKM!^@ z9;+^hb6MI#$H6^P(~5Gk+%(c1LKfiQm}(;@U~s{?To z!83E(pRnLN({B%C%wv(2?5ylWO$}}TeRBRM;I&sDIW0l(+y|iaP=NQbP5?YjMu^+Z z?=vGC0>~iXWYDb_0+Xx9%J?f2$^A%4#|w*b@5{zxAY?E2Kvv@ZyfXb3fTmAJ{=F<$ z(dge4dOU~FowGq$r9 zvokR@hFTc=rzgQmYt_jB_Z|UzSoxqhYe0|tcbq?bU&O@F9;Bh827;!kb)->u z&k%ApS0nfUS$|!bev-g6CvvDd0Xsuh^}9G4LYDX+1&8QG{f(!^Ezp2%kb_u%K|`D? z4cOyE@G}L6V805j?llLclQ`&Qpp~O0dL|CxKcrfLfqoL*!3@1@59^>fmDP?jwc5qcwNdl|qOlE4_yO4=|wi3@5=$Qef9Y$Tw7R8&PrI-gmS6Y$OcEpR1l z?43QJ?0vKakO1SYW#lpfnWup6-;?`(DlF4a!{T&?-xUfdr~#9+p(PkQ9A$y%sn#~# z3mxdm3D^a4mch>p6hKa=vw)B|9UjsMK9m+!B%r1|;H!c^vB7typM~=Y@TZwv1ng&W zH9guA2-P{{=*u{uB`?9}zXf%sUx(WXRR0+$Kn{C@)WOD1Cl?(MeNhAr$ftn@Oo0#N zY>=N~8Ard{kGER^W7+Q2> zzq6r%rKRmu!Lj@&cK_qA5R*)R*cu=Jd7(sKjXfLx-;N35PrpLrXfq&z=Qi#Cn%wxmps_Q)W$Juty{Zv#L#qT#M)ZS5K*!u}!U+Wb9kh!Zse@s}iB%Y+2_sGN zbX{Os2?EHq&@s1H{1*&zpb9>d1LYtvrx>V6A6NrAAjlj3fv5TW4 zDAeTa%q?wCY!bvgmo^A)(gUj0BUD@Me?j$M)hU6=9Lp182BJMC)%_$XK=ln!L3@I0 z=U-51I$AoLp2h)bj>*LuPYb}I2I`<|PyvJ6{TCd6Xznbw^E>3Z4#4mB0V8zP^$v{A zj~xX3H+n>D?QA`4>|C56^)#@SrST~P4&im#?+_m^0R9J^3ZB0G3$A~B&eYk^1XN;2 zog1RMtN}UK3_PGSW*8U&DAi>R`~|`9lYGx~Z?C`%`5n-dC%_RTpqMh||AGmEZ)<2{ z3Mo?E>>N%0Mthv4He?=vG7?w{T6+hJe?j?=@0}V)LfUI{Bp@y9fjJH6!Djo?Ul52I zI+;S+b-Zc1i(==afaC_itItCzjCJz_@RKLIEb=S{jqqV$Ho!yy6BO9DuP1=HKqgkj ztbxAO!LJ|NPySn6LHwse?ryji5b{2-Ds+*U2}a?^2Lm)N9T?GrUftdh47}}5PzAhE zHX;mQNdRF0T~w=Jp2h;HMpR7AU91fq!Os)tRXksB-+Bjz-e`1x4(lI-hY%+>0%E`4 z5pcW}X+mH(*FjH6d*RP1!4|U9utA-mVrp;g0nzICG6hgOpKQq=qD$MFI*XW?ID#VZ zj23L7mOkbL(;ayr@@aml9#X}k3!FxzsCv|xkT|*%5Pa(um?Nkp_;)whPsmI^AFK+9uP%{HaQ6TPrU73FV@~44Si!FU>a#%ECJ zG-NOgF+By=p#`N|7QprZSZL3i(l`xP^`Dyer{zwY*?e{V0W2o4nFI)RXqDUnE`O|p zKvnD~&sG&K8*t8oBr*YVHT2+P2ppktEb{Mpg@4BeCk`-F9fN=)6h!#jB&{DquJ?F7fgJ)8Z2W3DlpwI-Bruj*! z6(V&+Vi)6U&^NdMeCUDk!tE3AAx}ggkIx|U{34F#E;gV6N`{Un_LKM_*FUWSigzG< zpnG{FFoHU^3BM?$Rn;JUgrpsKB6)200BOVI{2O{kp!q~V0<9y-yJrx9gk|df55o!F z4+Nj^%d5j$khHvj9iY9w?%o;r8kSC$MnHZ@8%|+BZ!pGM1JaKID?qF0P52oM@^XKp zA}ScHk`+KOgYl<|uQIWJ0$6yk0`XTUvF0|wl&%i(1 zzU-Ts!O;M?0eqlmB-oSw2K*GoH|!F@!vfb?0j7mkJVx>v&Q*XdB@{kv}GUZ0bxZ5`W3pL+5z90eg)vc$g#M;lyVdilJ66VY8Iw| zq|P7{KuD)^x^;E`JpwA<5}=SJz0H0Kp0Z|85GN-|3Ay| z96s6FcVvP6yMO~j+rMG)|L6Gm-lRVVppMogNLqlQ4fOsXjr_VY{fw4RU@`u&3`SmD zP5yY%s|6VbsX%q#3z8Ew=;8VqpmLz3I0_00Jz`7b%a?#Yh=CVDgPQD~0ji*=sB{z* z;{Ndd(dpCxbRMVx8g%gc8K5F!V#i1d0Uh{ka(5B<-w;R=(4hPWXMl={tDhk0^fygq za*!Y0K!@_jm=9!$V{_S0L;ds0drc|Dqbh*tmAvSNxhPNp?Ge59pqv4$A|b7C6cnNY zF3;-VUZB$gka?k1pn-k{sFH}P#_^yF4xyBi$S^RTN8IWb&KaN(6&w#rRkiS83+VMV zm}G#KbPE&)r|MNiT}|qE&`cF;%tt^{ZrndX+0UN=3aML;2jxb)>`HsAuM#kv0jjDd zasueE0S+-SFjW2#U%h?t3{Vxw;#XtH1lONQMR?Z}!wrB!*5X3z^aINopsLc^Cm6~n zh`jVO@KJma0MMWb9A|(kDXJVJDa83{;v&qyfTRb8fPailT6xX@{rO-5j1s|eOp~LU zfZ$tS2=KrKslXRF1GLa+d}rWGNyr_O3Lv2G?4=040MOwhHZv4F15{N_Mg6FxkcjZ)LC`LT{bWg4dE3|uzuNN!003olL$E@gG;V{8VIRELi> zWfe5P=1B7u<<0dLXmaH?k@|C6|n_XCI``Zf|xNkQr|0jUdGW?%KcK|VPvuy)P&_W%`RAE^pvbk0Cl zQc*m?Zm$v=&xwJe$(`~~58Kf{3sgk)n2A4#-R1&GIghUjd~MDG6;;(f-efXn^!FZv z=7${duywmLKqZt;>b-i4Fh*qnDDRO}Fbo#EoEi)AisHw(EyM+GcjbR-0?q{)IbHZw zyg^1dW^QMIDrt%zZ9PP#)tGUx+Ca$hBi*cx=NXtP3Tnqx84y9UN~31XfS~C|G69DF z8K5GfiYKHOi=?VfAt31Gc+d6%o&(s z$0byVnc6Um19-r^$J~*W5;T7Xriz5x2^uv)G%4=_LB)>rr~ku!Jp)wr#z_wK7VoKg zI%vJwk(7eBa|YP=Smt>0qqINYvHi0VnSRV<|3Ak`Ge>{E#`-r4E7)XlY&~;IOXz+t1o-_@>;H}` zA^R%-d}H!&N-oMXC?WH@$G<=MPm(|1w)dMvpYg9rpx(XrC&QobYx~V`hv{z_{`2;> z|H4BY_|La7{Dz0&J^>y)0XXK}41WUu`8tE&z{ET!0RQ`77JBvUsn0b2Jaqpz#}Mxc z96ug6{1}IyivH)JvA@xs#ZEv!dW7tM)dKO#KM%wE4Z05M{a^cq|2-P-&zt`|we9!K vL301O8Tt&jKSBOHh3q%TH~Ig7{5X?LNfr_G8DMS>{Ko(uV#Mo#jv3~EWLwv$ literal 0 HcmV?d00001 diff --git a/src/test/lib/ftpserver-server-1.0.0-SNAPSHOT.jar b/src/test/lib/ftpserver-server-1.0.0-SNAPSHOT.jar new file mode 100644 index 0000000000000000000000000000000000000000..d23e583208c36bfef70965e22afeda3645f44f69 GIT binary patch literal 17084 zcmb8X19&Cr7A_ogY}>YN+qP{x9otSiwr$%<$F|Lmlg{m#gPAk`nREYJPwm>ZpIYl( zAF67tZ(++z0e^u6_-z~U?PmY|yIr%aTHLAKsSWV2#sL5f{iP3Iel^a{(d^&N|Nl}z{gcAL-oVJ*GU0;JX44io@j z85RJ5aR@ev}2#vKI?U+S2VTVW#DiT6Tm zy!hSH4IYvOxrxWL%Iw+O%9vwjX3d!77VVzGJdTFt4ZGs;?#n_}_iPV(qttg>m^xms zQI?N-GmDch^rdyHBRg>>OE(6xPvKa?- z^LDIGGH^>m%**5->%Sz(XA*gksW6HuTC}J?zAd1HJJ(s1oSLh{_qs^CkuM&5HFGuA zakHCy5L#4SuKo0!BL*hykVo0~N=d)A-Ihq=3C4DrnqH}PQRCp#_u!fY3?*}73a&i8 zVCKlpn>7?E69;)fabY$HFt7e>(Qax$kB(X5BKGiI@+U zq0zI0vtZwpz?@$i5z&xzwq8pGUr&*X$c9{@s6{6t*YAEUzC;OmkPvgsB8(g#a$as; zj4+7q;tp9CGC|5hl8wX4vqucSmb@{aC@Uo`xM~3FgY6Pv|HcSIgP-uhI%T2 zj*%H1(h+8(`4ggg+6>DZ7dFYKqL5#TKlvtXEHYuCmv~`OP+_oLU;0>d)YL=Q-(MrM za{rAD#Zn_TX{hpR3+CF-K;03CVbhr(G9_&Cd3f|ZsgF18rJFL%3$y(amW5L9`ngba zmT7MBtj0T*+D7={uE01a3p0+lduRxB80sO1(mfLGVKar!oz^=jot8T+oz?p=z;=74 zz-+swICdEBSl8(8Xk^eE0Z-7Kv2f7dVX@HOQHhu-G9~2D-h@#(dr2|hp}k|2F!gts zr_rB*^dK=|^&l>49Oio2o|k&v*b8<;K4-1bH*QVgv}6^^KUl41sJVQj&9~CtJV*LF z^#zkgbeeP-0o5ra7L^mrW6(a`yyoZ!nqUicP@}1MJ~f*&C|Fnmt(%NYkBVMK?T1LJ zDPpEtmeN-W=+^3(nUkq=t{yeyS@4<{)f7IODDzei6d&aGEvk+&zLv40l9GKqmVxS{ zMqc<~qJr=^-O1^<8spU~s}}}(s&?1rtRETUeBT82s;cA$Hl2gw8E8&Cr${O?VSWwf z&klJRgKCO1njU0&5v?y$OmC{O=Dh2Y(k(6bVGEw8vTif60vc<*!X}asb@9pIw>D;U z;s}->h&cGcD@j=Ef6*EtS54yG75pM@$OYIC(*d7yH!_{SWKC;n3bu<)1oI7K)`O= z-*I7@aIP!to|iT3a~}oaDk9>EGL6d#%2*7C=}~_u`ljWc}z*abR^* zsbk4Q!K#fXA0|4>ivpOX%?{?kQbs=4UeWC9^cwNTR;zc_2fb3L=K@;G;Dgy)5%#EW z&vF;EOg<{lE53&Pc_mz2eoZ%RIN-B@R;};L?q;N(GPS|{8Ir;hFI*TfA*-JbP_8ck z63#L!?0j{IqJ2vnB`jQ_ziTmw6fdk?W!R_S^!$a11i=6@dbcOr+z%}5Q{X6MY!XMG z_Xyo5k^zJCsB7?8p9-?EHjL*cDeGIf0|#x1*tMYY%o24soZG#$Qbm<^@PZ|l8q7jH z%=#EsaW(D_2Euqwx{KVHUZ}4L?eR{pW{p4S@I=ltM$`E64LyWFA~Oo|l++75g_PC@ zxjY5YjF8&hPbJ$DhE(mY$u zehRni8)8Fkfp>77M8{OjyX?Qfn{$-!JkencD9zW2GLLy zO+ZS$BZr}>QPj+)C<+E*nF%T+shc;Z(R|K@Qt!o!R6*6FM!kWc{)ump!?hoeXM{8l zH?_g>v~j=Uw(V5y>+x|W2VgzK0xZLh5egxO3j(ZwY@$5JK!8Vf=pcz(Ld6gcc6f_` z1BdZU&P%!%i?#4GC??d!d#=|qGin#XBPV{O1bSgdVb)!Gqy)wrG$(r3iSWG>9FOe= zgd1T1?Qnk#JsYt?FCmr=>lv~eb71*k|J)&SXnyYk>&{(tgw2J!f*_%3*ajxtA89FQ zF}tq5;PNd}mXoVpNoXi2nZkjQl`&_8 zLFi!%KWo}QlWEQNuqvLR$}* zS}yRoW5FE(tzb!!~y zfNHT*B4vPi%2gLWvdr+y)%xT9qKHJ59N-4Tyz26(c1)^^yo3s0t0%Pauinn47WbCv zi;4mNKsU|cuEQ1+QnDY?L~faB;Fvi>93TFvJ%&;DREQD-b=obml-3IMJ>6WOBAmvY z?Uu$l!(6B$gauF`{h2kJ?G~rd#d~@1X~u33MB2h!XG~!J)(43XUdU^xm-b?5rWJeK zE5ak^9`g*9w4-BiP9I5vDV>TH+YCpV#hE$nnLwA>0Tw6WfJ+)$-=4MNWkh_$1BtI6 z5at}!{~)_`n6%EJZx9~#q}6vw>=j`)3^^;oX3u29ZV&xx`PK+2mA*2M z*St*1L&MrYeZ&N5J`G_+O}aCn0@HH5?;4BOAU1AChHXk#WwtOEZ4Pd&zf0M&@6^$z zd>=!~Yr&99Ym)8GdyCMXgJx zcvhW(&C1$EBXZq1781(P8UsXYvG3@x;%X)>k&EO~$faH75TfOED4D=bdtY>uEn|wq zwMC`&_>fhrl5#O(wT5QmGWv`Wij{mRb%U>wYIU<>ciLoPdT&Tk9rKMi`lhsslV7hl zWCvDvID=mwUQ|4W=Q@sn6mFS5I>6mhF)GOaqm&XkO^ zYlvqdr7dW9aTiS(Xq76g=PXi8w^5`3m#bMSH%X+p!r% z-1G<{-JYt1xxFH|C9ns`gjH+_X@FlpJoYYL7j%>Q^iG(jiiLBsrBbq`s+i@86~jZM zwk(m1-EwGY9&;=f&M8Svc8&IaS^H^vq|LMP%E_uNG!<{9J~bsyyd-TzL$)NlpMtT_ zz2*slGS+cZU~=?GPps9;zQOa)NxHp`6FkaplAP%EO&6JU&#A9?B@8 zk>&+!*-%@Gymd@qs32xNND|y0DPf!9MV#O2f;)v*e1H(*JnUBK^z-A{FC+SmV6Y3` zxH0Ef4}~lzhIIObDrMWSth)kSjuQgJ41Lz2ZM{^6Y;)(_t{k5nBy)0qNZ1Z5`2vuV zGypEV{MykgRntR{On8Nl+S27bC#$Qk6ZFqIisfe!+CDP#m8@mKnj<+_hl0nV(s3jg=4uOW zK@*C(Uug^D?nT+t&erk}&UTu33Dbu!iB2mKO_GKh3J9WrT7hn7fDG@~@Vd_5z;;B$%fS;~dVPrcCC{J%3Lc099LcQ#vH8 z1S9s20wIk9virb8eg^6B6?w5^zqjIixr`vc@sslhLk4k&(n%|e ztTBaHAQoSnMZMvpz9mP0XN703%g?}{pJSX4H5}HZw>(SFZCKP11?7a%1egAwm`y{n zikhYCvPBr^x-B_<>tGwEvxwa#TS&aIFpT9nqD1Su^+nM-a++7^ajY);u~BTgwjQ<^ zpKMy2U)3XO8oampLrGyiIbR!Vd+cy3Q_0XB(6LrY`6?+v3-0RGzpFEyi88J4Xpgmi zEf{x9QYv`=3izLP%5eX3PW~@Dh5MHS<^Q%`mv^+YH*s{fFmd{iW&1zOl`2&$cjQG3 zU$TzXO>0tOP((mrn4&$>15gzBusp;7+5~VTaZf? z>ov?T$gGyiwQFmdIlMNX5053wC8@_2Igej2y{4r}zZoYO-i~DR9D6@Ky>;JDef#vd zMFBX!QN=!wkKVOMs2kcX{Xzrp{Z5dK+6o+WhoypF5o#Dr%DoA`|6&yMd%an6!$4R2X4ONnGlBJkMH%iC>)>h!G?$R5XkUoy1g`bMoaT1i z7W+;iQ@f9P3)UO8Q!o0YQ?ZJ!vCR|%bz*ZWsdg-M1%pL#18mk@({5FNEYN_mJc76? zydBo*{Jfw9X~Z?TdJ1@q4tWG-dh>206D2Tr?*nYso8V@bk~`87OkgXcoY=Q?oFyMN zDzgyYjBLoS9<}05Hc&+ope`dK8hsgq+9y^4^$KZA);4LnxJ!zV)ll#q!Xmf6KKv8K znYejmf@%rHLi~a6;)pgrgJN29-hLv_fPxYPMK@@5F3X}xtZOs~*17RIeV(om0sVTJ zd+Rh*MOH`Ln6O{0*zv)E&T&*#NlE10#cFS01>~P2jqAHGvcPSiCYAL)oKokqhF3*& zgjKGxpvG-Abdtj}It?W_^OdQ;T|`1R=;57Wo-Bx=2M{?5d=bpBdBcb&WLs%wU9&aD z9ZDxcMdktYIEl7BM2TUIXrWn<uyqn+N}>t&8`ayUoYIy z6$}brf2>L;MM}=jOw>gi$b@Crf$ld(+F>Cz+^yZDqKA8mODPS~NC%00Q(HZ9a)}Oy zXjjVtPtnP^3=c4V5qy6oNAU?~_2#XVxlot+ycEMkn+u#(WDK!}tV!Mhj6I}+qWN}- zkuy3AboU3zbB2+O3?e3@Tf8^J;svSs6K*(svR#z<7HTdZ`C}-g{m!uyN7e;eZtM0? z>vLL$$HHon`7ckibO{j~KIy(@s!}Eifuwa7i;S;P!g5{nLyCrH%E{(B;DO?jY+xV} z*n1LB(Npee%ESr&xx!tc$sqA@`{j)B&__{qb2wAA+=FKw)&Xd>(F%rH;eb>N zU_Y22Nw}<1tn`2N{i2W67F@3Dh<5Y!u(5niBIrpj_lBFW=@?$)n~<}n7YSrr^;4rA zKM0Pt0NOVa8OyuMuuU*~;3}I`e{%EW6gPK)A8}eYYDXHyRVyMv79MMc93}!E)kEhN zbtK<4i|nct1&3!NSucKAM@f;N+O=u~B?u{qZ9;`nvBy~2MxbaPKxxZql7}YjhXrSh zz+-s5wWvia{jx%91Q3>4E!QO8*zTO(NkuacmhWg64A~H__>nk^neIiC>ASkN-X7Z(sI+V(h{JPW& zn2M~Bk*FRB?r3$g25Gr? z@DT#HJ>kH_r&4T*1YXYm2T-fY52I5@U@Ae z9?1iX;r**gvL4;5S*o6419i`njq8M+#q$sD!zWa`uj6kyiFZu>KO)_fecSbX+kcq) z#U`JRAzahgW(+P0TgixJ^`;0l+R}ELz;I{_A#>OqONZ()Zuac`cxA!7`PQjvb;o0Q z#~tBd3Ue#lxc~~=R;H4Ffa-vmC_ZB?)4}Cr^^v~L;dYb_zyIjA6%M=PvIK>8&K5$| z4??D(jaLqSJTueQ%o{q4M2`3=^ESN2E3@Td_D;~Hpn7K)O-BRpd`Y!p}=GeU?CnP*<#Sdoa8ydjAtq$r#I!0|EGg1A6I@ zP1q--!NYoEI0(&*MlOlL15~9NlGFIk@d-{9Atg_a)q%;OJRR=6om-U($ znZ+bE&CGCj_z(v&y6&V*W5o1Ez}f!kj7ppzGkm!@Z+T4VzNgNw42j!15JnUIGL};t z9bQkF+Z@Mk(>zbFHQPF^;QBhw#inhwMnu<=+`8e_h6K%Qq&`;@R0?3D&A zt`%u<_ezw$G{+?<@1g^m7EdjhOS@s%*$TIn5y!~fH3Z>Al)5uFT<^Mlc+A*CcqpCZ zGih6OI`|2$o!;ff$DAXI7GybWT~k0ZB=cBoJpSf4i}ulzrRn1@wkAA_`yGLONG3wf zi5&P2!%+@ICRjt>mLsES>9-`)s1@cfzf5`S584PtND!RB*xN)-eA4;44%BxJ?RDL>&FOw$Q+!s*J3(L*Vs}mR)4bIWI<*))trVP5l@a6$#yPU46J1B{*A>FT;ayh{^;282(@}MG)h5l~jCKSWZgcOv|3jdoLsHFex4~_*)OKvTJr+aO`$kd&>o0rWb_0LO}CT zip*KTwMV|O7ZXtd_n`63-=emwqmpbpxOCFwlwGu5xg^<8)OE_*K%AhM78D7)0kc!vomXP^1W%_b)+b!`>1Sl`CT>_8x6 z)!gvmZ9ils)*MpNVBQ58)hbxgPNT}>(ez$_wN{E{S|v8&!ArwHmEqW)(^x&Kt?cAk z)m9K+mn@~-hZ)?y?xq<<`)fXFw58qW&dk!!D;uE8NXG_wjqI->=Na{fEsI3neTptM zqoOBVY91O0TIAy4T80fi&bDyL0wFpBR1S76Bh_B6OPiF!I!bEY7h106%d2DIFA!$d zV*M>kOKP$4w>YsB7DouqO$STU(Vgq8l0|1@OxNT7ES+#JH!+K0D)o8y5{WdtX{)#F zq4>tf$F1ph6w?7t?7T3yFnE}?!A458=mT`@XDEOhp>G^)`+(L($jL(z84ymF{TR&J z0%q^n*Mi|x<~)J&#NN0vFlqjb*bs6;UCe<;yMxS)-58C~WF>hyB0|OzoIlM@ZX@vo z!`WO5k>W{|T6b}XNqXn*)t3MW<#vae!l0i%!uOF=$mrw*FXeP?JD9_#g^fjW&g+fXFjnb*hk4>?;^;L3*2EwIPgZo^+CIP zQ(KG1C(*-`)mBL7)&&Dn6Sk6xpJf#@i1nTla%j23Y=1W2v-ySAp>?gqidj%hF%`lg zaogt%z-nUlGH{2u-Lmrz;9p)87iB#|cG%;30X=e^8bHO9Ce{8Hz;*(=br8-^O0t!ekOl(MHjujYfTNH!uL1VCSMrxj z6jksUteXUQdrz-Vd+e0A(a}4>?u~*W04vU*yWzbAf8;WWY`LKFg~9X`z-n+90XeOM zP|hQ?X>ZSHY0AX?8B6k+YrK)c|LY!wW?Jvp8|SzY_W0O@55S`ig`?bzGEhlnN38xH z3xhbS<@ypsbJvRI`^a~#I_%5i+S|&)(m>?!hOp5rssQyye#&W|wc+F}|6rYM;Qv&7 zxK7BD>|g)@W^n&AGVzDv6SDjLT>4kA|6khA>X-Jz@LfvjJfBMNg9cOtm&yyLUPnYH zkG_oi0@PG?F|$&P?x`hZe9A+mxsll13Qy7!>TdnT!i=S_0S6sc(o^YBt&dBro{mXcAm%5bcs%pA_HUV z%E=taK2S{`NmQ6AM7vi9WfvCvnelZnC2<%q+!GW(ppb1imecDY5!*AYj#6zvl_^`t zvk}y*g_^ALO%i~eSmS2awb%CQ*-Q?k`J9N!qn9RrB})vF2-wl;AQ9L_a!?rvWx<7- zIhmG4kea%KfR+@5$3oY}e~*<-4eaFFhi<>ZfZ$4KsYeqXc7}|~`!Z!0r>kdna9h70 z_Wo;un4Hy1nb1=ZF96?$OZO(~UttAhIs-2Q0mmjYZF4A;MY9%|4MZhI7F@=Zv#3>*HJ9t=*_oWZbhS)lRaP_mz`XfKz70>R{s{J`uE z8zH%{g)9LLxYg3d7JLl|^0Lb&?R$s`aSP5Pz&1JfmJ}LtZNSGieR=UMtAoTZ9M2J3 zupVZc&Bx%PG%6BvYk-8)dthOOc_0z({2B^-JN4E-1aGH4NNhx8`pvno0*Rj!D6Se% zeEGM1Mh>P`ArtQA;4Lo%Z+ApGx_Wukb34#eZYz^<+%-3+9axcC!RTwZXgh78cdI1b&{r82!}O)2#zk^j^Woy^ zF)-p|^;ny%Yb#PaMU(k%ILrB1MQGpPN`A1nyJRpy$q*dX5zobR6tC0YjzSX*M$aW- zQ%|tE+IVR#&1$9AzY_AAd;$%1>H>(Kbr0MydPmPwv*e?JIYQ{ZSUocvH0g`}s?Rk6 zy{N;Zx#?KD)~5|~xob#~rnN+qoEJY^CngGulTv$L?4d!iv3;TgF2elUU^4KS3K*TR zz3q_zEm~&G>4EDp#jPPwZp(wI1mre0`g$NK>S!&w6rj0nIbcYRk^O!0O0wD!Mcrgx z;ry}q{jj&5N$83lj1%koYPFpqmtE-_em1+N@ORVgH*7O6cFBN@j8NxfJQ{aDyJVtK z`2EUQJ zAmnPf#vPRMH6dr~4t$>~7tkZ4_8Ds4JNL{i;%o3~p|gE3-N9$vbN3+_z-oLLtu|vkvfM~fp;_C+q2Y~UfETiTq<<@ahYObLvfWh&)Tio z3$RNq_hD#oXV zyTdc%*U|2N{jPCO&uErgPN8?uuX^3#CAX;A1MBW!X!5&nbe;XWBP8!I*Gg|B@4$FF zIh_JOzoF462vAsljo8IC!4#(PQLI`(brf#G8R(&(KC?4;2xOwzU?UUEhL8?cp$X;! zm;dVml&JNK*AGAd0B>Lb0OtQc3(!C5VPb}VXyRnWb=!GG%%>!tgmz*?;uV(Y^+8_jjmF~MA@SDLLO`rqB>5FD|k`ety;tuOAALXmcR zmMv)O(?{Qdvp`LLgylW5r_}lO*KU63-A)q-x#SWO>f#}ORv2<3u5$58g0G^N$B~6{ zN6K^qRCppD|BBb@ic{1J^xtd4&+&Dsl@=?}76grc^l`IBbG{KbCZHrNRVx^Pm5&NK zz<(pXQY>;CG|N2TNi@j6Q;*SuGNEna6_?dD2ZDZu*MR4C{g*&ra|qJ%hXqLuzy5BADEm$O%VERa~H9Q)Lnb)eW|A; zkd}*#Xtso*YCf2)S9^IyX1+tPCQwCincP#nP2ghdimi6bt#q528s0=)?-n?>0iMnXckO?}S^%^O9VvVv|O@=`^6 zh{ZbPV-z~AI@SSf|kLrcQ zqDexFSc3Qp!D7j!ZfvXY1*4&zH zc1Otf?=y5P^xh}2ax@h^t1@k&nt7JKixKN?_t|{U#nxOWdGqq+p0Gy`IQr7)h{9D{ zoQKnjuTp&B+MQ+p-ZLys*3f4nxA9U5ArlgeNcDD-ms~{P zlXE!V*eC(gG4PB8s%m?#?pteU=0C@WR(WBY1wy2#)dv3F3ze5`<`ZZbjjCmr!3HIDfM*#M=Q%^An7#lus=2MwBJnIk1c! zPXIu1#$S;+xo}F#Wik!0r7P}{#C03_mou<|C)KSTu zY7|n}+P{a!R;iMnUf)0XB8T6u#;g)mVoZwG4bFALu0!uX2feXzFU028)1XTY0095z zL6`pZNXROQY-(ybtq-I4KGm!^7AskR7H2VAU>%3o9fVW~SqQh;k|u_d)Z({*){fHR zM|?c+q;~li8oRx!$`rFmT&y-6-(_8A4nf_v^@z|-WFcO=lyR2ma^D~DCAq9Q@14J# z3%<*^7Bqt0KAbOabGMnZL4SNxt9g9;(4J#;GHR*8+#2ul^oP2xx%SnJ96FQi?9$Mo zxlU!PXmWRRBIB*BfpT#3@p5x|*roB+nNd=kc=*`73x#`zfq1s0tyZq$;_-8|{n>BD zbWfH6(UK@bq)EcX&?^CEqD5w`7Cz^UQiIr3XXsoGullg0Y!t4Cri;w0Wx=N2g>mae zkX33gr4l|RP^mH|*bVfu6t$l9ESrYOYwkXsWK_|+YX5(x-R zKa3p>y5U}6U4bfWI~Y)$N()GWDo8v`#rEF$ofadtzWTYws?$#5&)k)BgAqsffi`zD z>eJcSW;S?+vGUp7J~1Xb6HzgTJwd%Y#Rj!zfG3R`M9Ms_pKb)8M3NA)BgdV{Kf|f@ zMX0#iR8F`zmZJ}+sZ_&0WM^;t-WqF&SKCY;F$*9HwasUOHuhcH#Uz@l`!c%fNg}$R zA0MviEVzJ=3J{>XJ6ia0sD^AYGh5$XKAJRT(>a>+XxE*q%h0jtr+5$ zZ-@C=hpJfXA+F89lbh<`EG_;`*GLdUaT?DFDjvYhuZ9e4dKOCdwfE#w9`@L_f|6#) z{^Z{1EA!(#1o_3T)Lfc9I1x>e-&H?@AyDnvvj=BNzP9p69^&@3`=Z4z&i z@nEo@tv;wbdq0*IB<_;2DhIT>)~KL>7Tq2YY%Z?J`oMH8d@fFZCfZ(NWVc%FJrS{z z(PQcuq}uoySnXaQqB%SpR}8?$ekaWPX%}&`fV?*m4rfh)a-}_7ly4aBFy%(XMsQp0 z_Nim4KOno6A0|M$#bO|-njvzag?BbM2r4V9HxRvWhiWZI=g z3UJ7*w-7`2hw0*R#w)CikiK6fUkIfl02Q`-mt_2G*lYo~xZS^O3CxOQBzp3qm=pEi zB55LYN*`XWTE-)x2%D_o?)wz{$S=`;3Eb30~rJL%aF28|K)QYpj`7HsUqn!jt zTa(8;qAt4#^A1mI>FJ{$h^mdro0s$z856v9?5!#y*HLW)|CHhcBs>E}W8N)C`!kOo-Qn4YSV~izv1Ytd}uL}ax*tS z8XM$Z##}a{A9SiA#NDr;T{AHo1)I>4{D>r&BXdD6PkZ>Indl-^UKlUl;JYYcz@jzC zV+!H-#g_}_;MivuZr9P??~2(Qeqphfv&DvBi0c(Lrode4zO?$9py6753{bx6l3>;SZxjiz8>Nrle90*vTl$67d+~_(6WtgL`3U z031O|Km+HC04Wf=U^C0gUeVeGOq)`7oq zIm%AkpSvM>>j5ZnMm)o}5OXB#*CQ~MAXI|6kI_N=vD<#r+Lkyz`{5#6Er-l}h79qC zi8$Yrocd8+XYiQ7HYLAoega=>_cAWRrRmQ&Li-c?2gG3IOU>SEFkJ)cfEjST=^I1tKDSAVb7|JS~*mq z!Yb?Adbf6EuO);StHN?<2)uBS)s$J)(BjXTnI*@u)XRQGyFy&kpQq460Q~6}FRUsz z5rY;4>=1fhK~wsj*seTt+c35oyvdd}h2@p+Q4lr`(ukAOWJXI>z?S5vH2yKxQ>1b& zVN3IHTdFbruhpICf!sR=H=PL4lkEa9Zj*u*Es%a)uJ_&-vC?PkUIiKbp)9+m#7stx zWB3@9_gO?G5C-X?;&^m&C38`q;vi9?!!$Y%e-#IpUSyH$VD)%f_-4p%=svV6sJU9X zN_5EX^hmB`xsanq7b-Z2)ffeEdwDQhroI9*b=!!53 za`+nYngopEkX5P_kB&B&C*iH#U>BZ9XBWYp2Z=H87Fr8vi~GL78|AxU;F=r{(VGN6 zv#^yFeb}#vyGs}`A2*D>?siBOEG`OazaHns48aFH3b>(@ZSb^Pdj++k&ygc(J<(tY z)n%Sen>075028YtxH~QNE4cw+;^5J)v)iTt<@Z8ME!N%4>flqWLV760$3sBQ5)^&o z!-~b9K5>FBdrnQmDlm%^7*`1ar%}e>Ce27>nuaa2zm<@cdRQ=DBneRc(PQ^Mv)HgW z^>KIk0G)8{fbV6r_QDhAX!drnnk>+pF3nL#{X&I=qA3T#n}qU^Msc#B(LhKpk$G=w z$X$xScq4EEK9st`g5!GpsOp+lD0m(V7Ia--Z`H&m8vvg5g$H1Py#R;i3<0*L^A0WTC%Oy}^DzVJQgo_wr zNR850%CKF4kV3tsreb;^c63?l_}S3#I7{ zvaxkYietHO;T+S>Z0=6)zs_fPWQbUr0uB(SwX5zv#xK`Q`bF+t!Yrmn?-oPPyGmW+ zq#!6xMa#oxmLuzZF~BsYy~Sea*a4(aXJ{7IIyoi3f#Ve_B~pqlpTy7hW5XOQuWIzJ zao+HH?(pLSLt_kEcCbaofRXJ=^65?Q{siZI zK|cx=ngNX#DlBAOEZ-Krw2Q6H<+_1LadxCY_MP8*V73HS1>I8U2kfFUocB*$OMI~& zE#6_%gPBeOXkN%U-jn3A%(|ItM#c008lVHq0Fht*4VbNV!NVvrS>LZyd`c=K0A6iWv5105Y`c5txKt zkahZ#Z-*am=RlR+o^9G)9a_~tJwN6j&bs0Gbhf)b5E`}LYrT6qKhp(IHt&30x{j8? zP<4kd_h7LfcR%)IH+)+=x-|5hnhw|dLG4iaKXF+XozMB%m^M&k2SB*ZWb9bxoH*@a zW`kqp-$@3ij*7T#oMG7!aM^mBT!SvAlQ^5lpwE70Cl}Yfh@4w6P1HT5a(Kg%4vAbh zZJdPAeos~R0GY|L#ph86^vgo;xg+bEd6|d83nKl zx^WCd`;Kw5sZA|e31J@W*V%ech;l|v!Fv_gE;s-1Tz|8A-rOrVk;T9bQY3MFP@iR5 z-Meln;tnJ)hB;oX>>ZXpl8gHRi|kf@d9#?n3~$uHD&S_o8l5AEVCNBB8#tL-qq)PK zM%Udl2Pj|^cw4Y)L^fDu%#6Ougp@fgX4mK}CfCpekvtRwwX^X zESH#14gD^S?nSD@#B0>=)SYN^X8uibai;nPQh#_ceYUNUhCl&-c{YL~OU!ECCOrom zm{dqDW{MI{A#%R$_Go8Jlye4yA_)RUy$<3uV3qBJ!xsHrJq8dyZKWJ1fH*HL0rxk ziaB%9;34>L3vj!+M#o)U*&^HaVEv9`ZwZq!JXHbgflygt=}Mn1_{d<$WxqRa@Fx)% z9X3aro8m<{v}$ga{uqbcj02Uyrp@$sWNsCO&Niq@+wy)*)svS2=f=6-?>g-7vcmpe zND$h12y0i#fj(!lM+f%&rt&W68JI2BQrR=N&nsSDX_gTp7vBRgq7_xm&VjQi#bYf>(QsB$4Yq05J<&^#jt#%eHsiV*nI_b$g*+}1ZspASUkViLXn znVdb`pIJ=EiO~zP+xGoZf$#n$=+B?xJ;1l54OPh*rSJin<}icM*j4K>@b>%=xzx@h zc8@O>5_{)YKeX@BK7ZvK0|Frf|9x@yul4z_4Ibc9;J5ETm3se+?Y|d$|3>`VR_IUm z>-cZBKb3y}S@3_W$o`%Ecfo_dPXAG#{V$pSy+ZpBj-!8a{JUQJ?-_rq*#5`%TOZ+m z`GS5E^-sn>*Khwl#BUYce?SQS9mK!ux&I!C;9o$$_uKpLk^Tt({xOXIy~6unh=u+g ze-HP6ulN3|n7`>)J@^{oEEJn_rz_wN_NfBISf3i4M^;U6IUw0{Em cr{7Rs3glPD;BPN0cz}stf9kN#^V_fg16*V5M*si- literal 0 HcmV?d00001 diff --git a/src/test/lib/mina-core-2.0.0-M2-20080407.124109-12.jar b/src/test/lib/mina-core-2.0.0-M2-20080407.124109-12.jar new file mode 100644 index 0000000000000000000000000000000000000000..89c81327a6ebe40d8a768a31e87791b1ac387e22 GIT binary patch literal 540100 zcmeFZbCl#;wk@2fG%Ib}HY;u0wlgbj+cqj~+qNrhSK7`m&+YDeZ{K&i`rY^McVdiR zM2r!8t*trN-g6^lC4j#`0{roktO#cNk1v0JgZTWE5>n)+7LyjHmHm4%aDYT04Yt%8 zMK>*A0028s004~to=j3qKuB6ah{oK_(oCPm(Z#VsK`MTQ4Zh<=<(4{?dWbW1?Ym4b zN=`zJniviP`@j-G@#2y~QVRLIXBM>LFiBB`Y@pfvi5Kqr`(sO;Myt$uJnzv$M3C@Y z>Pl)-l&6L8wYxxgUYw-b-UiPpQFoI=c{T|8S|SRO z{>mnUsBwOl^LqF!J2r5q z&_i(HcpU(?V^UUf<Dz+IRC0Gd z2vQl;&ovD`RV&2&{?0spS4*;&C53qZC6UE)3XP?J_ae-}MBj}Zsl>jLRnq=jd(wXX zu#2!xCtaXFt4#I&eQPL^;8$@gapzJ zD>W8}(lV0{uN_x3EpPTfB3JQN&@LTm6~96QPaF7pZ3?&>!5#@lsmRWAl0}TSDAjSIA2TbHHaJRR40tNJ23%Y- z$+(tEOZ!wh&*q;=$xQ!qg5Z;s2%n@x|1(nBm{?o>2PYLLtXJ9KI}gc0xf^^=j8_F_ z))A>^SO8P!;}o%Qkwc?G8~S|;5cguAF+6LrXJK~=f;&z}3?`=@7=c5Ht9n2~+<`k- zwd$>bn*5(1qkW%p(|bE_fF0)m5?N(roua>kTc>GhC7y3DufBK&=1F~b`IfGu3pnaHvW{^(k3f$hGr-&5{8qn#If}bC`$LpM*&OezfvGWE2 zWm&HI-#q=qCXBGI=3F&F265cz_P@%HoPoaF!j|vFJ8w(5J73%0$~tv~>qrMpPy1$# zA@zF0ogJgej(EK8BQyjb>vbBVo3^;#GL+y(Z)87nU)YVc;@&N0)=fOk)NbHw`<7dm zf5qqm7lns9tJ5tvXDj&agOw+P8K#mq$~bI&Ot>>=8I}=ekOsr1O~8ej#fGp+OEw#j zmBqPWd_I!oO7v>=GqhO<-LLF8;jPc%9Rd^Z^ks^%`u1a|n02efFGfQPqrzmIc6S1n zY|Hh)B9a38tqv5`uO=%%z!l0g`AM;8#mJ~IWS$d$NvGAUN&zB9aT6c{iNGYcRu!U5 zvEi>GOUPRmZ`3qX4QW8H4Z298 zmpQ)_U~sIp0!;_S2iOM+hFOC#(nuW%{q}(c9!ga~t@4sKwJz=0YEXl20&x+j2b@XJ zT)B}scDlayYuL9qC3>aBfC@8a0Y||vZUQZ!4Fpba(m$4LDG={i5e|{q<6Na=_{eJY z9-ii47_;H^x;W8rAbR zIQ&SpW)A!TuXL{_=h`P;`tGM*Y^hvE=2a9zf{rGVqmVM_vO$r=ZJ?<@zADVv!@KRq zdIV`tw4`q205T;%gNNT_W_pe1uG4KSbfrbYTc<#zmNQ)=uakt2gHcS&1XMJZMoms@ zHX6oc)Q9vIP^M3TT#a}dglC*iT!f5Yrr=^%Eu>9U4H9z_M7(mSui2r0$T*UeSXQ^r z*xdSz`p?AjT>*7s`6SjT$iEov94w8Q%>Q7Pr=nK$E(wz7LsfHH5&B4GWvRHQv1MEi z$9I(Y%*lL%czSt#D!h1UZ%;U)t zOVT&Y7AJvqopTxEQ?=`&+(L#%>HG#)J=vV4^IdW#-B;6LJ(!l>9p@E}zRjs5S78< z4B>I$Sm`<4t&(0%lWFt6GRT4%_Y&(SXC)CyRI{Pzm&_j8THN@fIQjOA z<~mtvzdBh=X;9u)qPXoA%5vJ6ejvU(LZU1!J9R)Q;UQ2#1dAkvoFyfRSzTg!;eHG) zzeczles1s2c4c(UlEfIJQg3|nB+3PzUMPxCZvlt~N@b-Hz8Onxd60^<(mjOxy0NbN z6C7he5CcqBJVb<2$^eA1F(EEIzRLCRbs9QO0Cy=a(tKR{8%vL%SB zjYFO6MvfN09g)2|R~`BsetF{<2nI_g;r`kTpFBCPAS1I*-=Ve+=gGa&m!owsEh|^10=g#LBXxy^K`-U{-8WDne%)q z363F8I?iz;(@L~_$NW?!VR@(X&uklU%^QAz0RXTe0{|fUvti7^*51t8#MoZX%E;Nq z-r^75t*dK9?=~WP!Nq*=p^?!6n|7o#nWgWIET>9kSaU(G&I#d|h1M=73I-+&?!G@~ zLD|%H#NRgf3o$H)1P32pXI@~bhwwhC-PUrEU1=wmI-HErYP;O&jB;3UT;07&;d{ld zlbwlqyd6xiX6rbi1--xgtco7m)tnV}FnFlK+!*h8zlKt8?a=vX{TjZI>GD$Ds$rcB zR~G5=P>m9u{_?H-g0~#^9A|T9*KgzVr^a9V(b13n;g9At=ZY1a+&)3p=h?kecQDElZ^E5ecD z4{hs~zr&rOebLfjz}myp-RwxLu3epz*g!$Vp4VpC$tut}qW|t9&f$kpOT^CqwHl-y za}Uj>kFq3jcfF_5pIHjOKxtb*APm)oz=PvEG8LuNY8p+*wM|$)4lGqjQ%1P+U>wPt zsXOphd<3Qzt|#-2zOf?)sr4;&6|;yDlx;qUO)f_3?j{sHnwoM?a(!*Q zd!TlI=DI3QyLliR4w4!`uMbNSk%xX}{$Mu{Ju8MIK5dKtxtwB!1)W)yWoWHUr{_(w z#>$jW=+d*?EL$_6WpoyRiQ2xOXiSR&Hd@~N)^zBjJRU!{|KLh4O^$n6X8vVS%x(hSg=T0gjHT)~>2IA4hx|V^lqfCu0p1!& z`n-Fk)D_IwvJ0rfvlg|MBMjk(#b;9anOYZ$0odkK-PrUC*jK)0DEK0NV~8uj3*ICH zBAR#ORd)kin=6JWk`9K1Wm@mYWWa~@%dwOhugM4RU@c=^K*mV64Vx|g~=%0g6D!Meult5T5ec=SQ zs`>nmP(tXjfGA4V-vZGALSNefCDvI*rraQ36rj{mkHlb#$stx&ZY~{zv`)&f0ow}- zTo_-`3N!iBN4hbI7&(T3HgBdD)jT&ap~T^$ODN+(a;x-k3X$Nzw$q3yU#45uDxZSP zY6m0iaFj8LY_vMi6utej1sF*!2wD{Z`gC1T(0LbjtAAH4hZM=AfQ$U-#_u~pNEaUN z%&H=s4izzbE`M|38~v>_B(%Cevo;>PchuJqHLe9Ja06-hZ4BiS(AgObfMX!7ns^wH z`I(*GD6-KdS5$DxkkEZ429yAl;uxxhh96NyZMiOWz?1v(~ z-R0&bhq@uhb^^M+W|~!FGZC=zEs6Jtez{Ua6!wh!x1ITE0Q8|*1i!Rl#00CBAtPvn zvYKv|8v=L?_O6cbu|HDK_l*U|EvN30!KO`$@Tz^ZDSDl!IXB71M2oy|@nbO7nwfN+ zXby*?>olRKZ|^i;1HM2zdtpYxv!yIWXu5eWX`lIB7mW3&^TQygCVtU}Jdz5CP637& zVr(wm@<;CMVGEJFWZU-g?uy=?|&FG zzu**IV{kOD{=!f>l+p>%1%2>h2NvFw1Yhq-h<~z+D4YJPI-fMlZI64ifH?$Q>h@WG zigF>Vo8od>if<3VQ(7DM1|Fn)zz|@bD71S4#xC!wKbXPbY_C-jSHaaEB%V_A@f7|u^Lm9 ze1dEa^ee9U=Q6(A?33rNv(o{xoOF8^!ruX55>4TC1*{7L@IaCoa$kDKQ{2~I9AJE) zJ5tJj2r7^Gl`Dm zk)|xM&tS#nwM~s&J+HNLg-s}Cz1Ly;vV|TyVC$D8;Ecw&=Gv+W{G}6Zn+$37gL_%9 zCg)~i%+jF@RJ4Tv{>&1V4;zHc$*b4nqS*v83dn&*X{uMm-Pcx)+)!0Wnw?BPIKk)L zhikMsZw6#>M@HWjx{M#ZL2Axh*EEDP_Zq;5b*C}yJLQO6_jX+k5nMOacU(B9n zC?9(pBs)-A?iKylfqfM@tlspLGI26WAF@39<+k?eR zLQ+$DY#R_JhnvupoZiqlo+1+Jp|RbH9H}Wr?jsjdDyhU+<+CS^MT`kbAKIFSN+F~S zRs&DjG1s3tg-WA7fc)MLF32)@cp+MeL2*aWZ+|TBqt?4jMEw9Reib~jFi4&)9LK3Q zpd#wcLep+9HDF)+i(ElxrfAr{xKR7%HWiix_i><|z_;0%2VasxQ!`d-)N${at(4?E z5|BCbvLE!Y+@0-atZAP3i4*MH3DLyS;cAIL(P@wK4%$5t1Jg7vRcoj~Y~(n@(Ri-L zlInh0PIVDyE9+1JpCqWwqxFr~$tf!~HcxG@8oIDmsr|(ekdkq$)J01$(Zr-VyN_u@ z!6M`?Fu{Q^0iuXvKt(16vBl(7S(4A?7P2s+V;<+3U)(Q#3dQ_jzK+7-h?FZzxW=58 zTE{4}iRx-}+r}rx_XnHcScJPu5P*|zTGl#$?$g_68)vu}5NKo!2yOWLGZ|%H9&~$u zY4G5>8N>JSpQSgHPq$dO$iIJ3QoY1(29@AxK}qp7yUN451JnR!B;StNz%NnjA-^0r zW!+P@&8|XJF&Ciwxp}xF&1v3txzUV*>s#SxCrwY7*Vu;W+&$l6;GP5s+!DW%#iT|V zx66gmTyO?c8lE2GDW4VI5cySY;U9oA!6^L1=O^0j%1igl(RH+eN17pG}wSHDBM zym9O$(na-f^Q(&sDN5O9KW}mw=ayjFMNYQeS?@W>29<9(pbOuv zLWf_w?#!9}%X=rlZk0B*%EjW?g3LqLMpa|r<=q zMk>D;xNv5+^Q$*~Y96jiUM|b?7K|HiBY##j+W?%#1;;l2CcZVrtN|Fd>9h^=y&K0P zj2-CBTv18Bj0t_$uwyKEQZBmyyEE|XOme5wdi1k5X4*)%8~2SdYc|1c-h)dbG?e6y zpY}zVt^GRsW+;4dt-gwS=eZi>ux-a5Y8s>VgfvFNAsOC-Lv9O8vPOv`V8dK z4h6gsZKkyWV#19m zALi15G;umJV>*kr+yb3npFr3QQLA-3Yx&ap5DnVD+{y#n?xMjl4f!_jr@NP5mn7{r z+_~mUl3){_M7#<*Z~Q=Cl0JZCrPUq>>$u#D+@4;?RKRv(OcnL=OS5{`-Z}lM))&(H zQ=T<{{~K|-+bIHI9+2x<8@bD}k6p%UK$9{o5I8Ub#M_f4EqExfyBfDyYgWbEgBTuW zJASVUb^B`!%;v;EUcbI?A-GFKI;}kZ6vEyvV9Ps`J1q^lPWuj2Km!+A-;EMRu5{I_ zSr*inD&+t=0PlkQQ}F??W7qDJuUCR!i&tQ~&f_MQmYStxbyBo;ht&J6v z;z|O`3n)ZK(oC6zkCoh>?=6ddFGN(wM`wxqFhEH{?}}InQ0>%N*IVegkL32)?fTck z5$Vr_<6lDYFQNFCQ2a|M{v{Ou5{iEb#lM8&UqbOOq4<|j{7WeQB^3YvB^3EQ$R?Kn z0RRHN001EVccCaPqxgA#?T-z`AF(~wt8~yo7jBR^ta!Fzx!pc`XfecA(|A&ig+&`^ z>5YkGdH5q&wI;tRXj#;)+VL6v^0>oZ#jajH^>Xf4%O5cLNe(X9tHS(n{Hks6r_#+p`#L3!`4Xq=x7M+FOC&qw0-Im?2j~ zhuQm%!)6mc4dddf&xW&rfrqMM$Qaw`*tExu@+Bzz+bU3&+5-n-&K>@_`a~72iSF%6 zE~gLG%4jzq^N%D%A=*pj*J3U4Oln}$3|M}QT0=< zmBy_@`@{kxAbK>E*hbU$m5PpDZgc-%i$Fu1KLECHbvj09S!a}gkf712ZwJ)EbpKQu zpt+Ts#5r(#!x>#((!(LZb@V8;$iVlz5lm;Agc`EF(SSv&R@z^wwh!hsU$SyH;9O|0 zE+NEh&pL}@OtCZL)(`@m&d?o1+x1>|w)#RkesVYd{$b?TkwJ6dSzP0Vp+%c@GLX(@ zuIpE;06@6hzJcJuYU&wE^Z0)z#{EAN z+t{1X=-KKSm>SVonOW=o#+dxy8~@>f+Q7!%=r?XY!3*%2>2Fd%{XNCsiuId>@c*3f z-;^KqPf{D$SXtTpuEqY$$k@!%(a8Qc<--1xoc?ZPU}I=x@EcRYe`jiFWaZ@Y8wXVX z&VjAHk+GSJk)ea*AIA}XsS)t#y4Bl^?xEL8YIokXV9lx%1+DjBG#qKUUb`6E|iKj%L3j)NjiC$p9lgtKXFQH;v_JuV-xh+l;aPb%mWA&3;#7v_G*l z)w4FV{B6+rvx@%1%D`05>~~S_ceej7;QSdN|BQi%{c(o> zf1>%bXRIA;ZR{O?)3JY6=Kr#DuraVO`kguPZ?pZoxwVf zLKQfG#OJjLT~U)-=Fb5a=X0d|r>zAkerYjbAq7PmDd9|&)jv*Ed$MkM<6YN7^zD&=|BZxEK3r}6O-#aE4=v)0IlP$<{FO3Q;CmuqVGM9+tQds9VlJ` z)T4U<|7bx{V7M24+gRsG8ih_u;GSca3{|v<9!#+64sXuU?iK(ZHJAlEc!DQ>c?bpN zezTQpJbegZHf9JbX!Zz=MZ?L$PV%L_*61gPW>I{^S7*y)aS_D&b=$jthmKc>zxfS{ zW@iZ#8`LIivgaDSL5?q=-7uL4AP2n+bTb0$W@d1!oHm?nppf>UnCN&&qZejKeRs>m*KDNE^N-7bIrQa?bV~K!$-4x0}RF6&N(T z40?kRe;?;QkqqnI1e293zJ78Ije-MVyJmhYE4rx_M-M*c=RsE!%u$V%2TM2jA~Zbe zWkxg^+o#dtYP=8yemfM}T>+Rd!pPTtNhHK%x4OHNjByH(*5~PP6P!*eL{^}vCA@6Q9B z0h#$@vU?+d+Klt8N9B+>;g{3QL&!}RqC!4mtJOgSf_du5{mqO>*8`pr8!>AyZbW-5 zt5-P1FOr%y7S(BR;zg%tR)%cq%8`otfzE+Ui*DRCta1wXUy+^BBK{(D39mKcUs4?k z3wTUVC8do0T)k03Hu)aqmFSbjc>o!S)AP5|U08CS_Fl~GIyB$zo&crjM%_y%G@rS) zw(>tlj@B32Mjj{j%$dmP4n*!;dMPT;*@1Jfv@i{Jj%}M)&#uRHjKz)xLsOLJ+gvbCzVqiW>-ui6wo3bV$iMXW`Pq?Y}Q2QAPO?3a0^X zB+25@fxpz_tN0Ze8k{EimW~U4!c-JjfNh&_awL$1j~NaQ2cJvS%Z%J4TA5p3@nUSC=XE=$%8ZOLhR*)2pdtq<; zJ9`H89DJ8oJ@40GOLpSyn73j5d-NAa6eTCOK(w~Tw#?nxa^@+A(l%_8WgNCm@ha8i zWM=nTbFI7`mIMBb2#=L7J95}8r@mo+=1Vb=*(f0fT{P;=<#r$iYZf<}U22&UtjR{m z{<{xqLc2{zv!LAaW7Wt{2kqP}i3=7Jo3zxp8crNL#0pLFEej#3vBUrmL#pA!-dz)- zSA9$OQwvWh)aVH|cDqS7kn)MR0RPYZBi242+5 zqfyHehn}RJ zwqRwVAPaa_PL9cnWd`(zp*NPN4*29fQn`h4Rs)nqi-0fr!)!g&1%;Yfoal-*+xGC* zDs>b0c5i~0C5chqyQ`H-ro#zA7A6E{up{G5K}k0Y(A+Lb!N2U5IHlI7S|^F7a>PyH zb$*rtm_QuN7C#L?P#{_=v9Eb(gT95G+)6i~bhpT^0ByQ#HU@3|Qs>qujk6`=vv2Z& z@o5V|Rpd;$b=&+e7_xqjb}Tnly7-@6HUBx{iF|g~|C|~8UF`Aef6goP3>?L5_zet< zY(FRKMF03A^NGa&P&2|#j!yPQGzOM>4h}yQEG!WDk+|c_mx(IP(pLJP8-*wGjc2N; z6XD5_P=pkKyy8ZTB}G`<^lCMZy5wakAm0JJ6I@JK?_wKj@WxI%9E=av+@~_1?%!@y z0HSqK^>%_n(}fIe%Y3k`QSHFeNlr-4857#O_qCmwt)8G~wXf^h9PA%B=~+M`%A zB0bxm-q6YW0a=8?YsX7(dF5>EegeX&`S@w(i^liMeef+#n9ZG<4X9q`egDMH6-2T?-dg# zssWSVEGQz4TiG?pZOtUY-tgorm*QK4)YZeHcr1Fp`M2JOHLXz&5zy|(k5j$E z0chdLe_-_TWjUH|12JU)@=e09C#z4FR@T@v0iui~_^1uKUmL+N(<9u{PtiN5V&)a{ zGXUcvRzB=$;Octx20CBiU{e;^v4j{9aXq<41RPRCTGTjWTNqOA?b{}o3+-?80ju)< zw_x}O3cp~y!KQw$%Cr#x0MPz#Q1~~rRj5LFXzV+EcsCYMjwZ0f#a^X>d;^2K|yh3D?TBpb-K#;pkMr=Db= zi6>cKD5PdMsplhBR@_2!P?6{K>DkoSJ~|AkXK}rAr);RnRYr8#A>|d2cEVVRxX0Iz z;_d9cZDM>EIhvw?6A6s>b!>#m$$~$tAhv(*U&GK^-|fVpK7<}@CREtCURlw05{`M_?6lc`WCi_X z_R#55gW;XNyx^(d4nY0Df508xhiS>U*=BeZJ^Gjn{^<1{I~md+lQF#G)qYjP@gli+ zSNZ-f1N+vF(A7=5e@#idk#N@34ypa4S<$FKc3*pc$9=!>CSjGzZb^2?*N~;|uMp8s2 zP{_sr+6s*h%CCryc8N=i4DF|&T+Shqdb_tv?@1cL;5SV~%d9xi?VS#EL_OG+BvJOx zVaXePFIx^T1Tk(oJ}zgsvBRV!D&qx|l&x+`1|tXIzJsJiou!{3;Vt0@+xq9*sY_L4 z2lR+7)i%%vJtdT5rG3TfHVL`1C^~tv*+a8=)r_SiW*dr@y%}7ylZpiVwhRWV4qT8w z$TSt>yoA9TleU=ZWR#N_uFhWVN9vCq&m>(B7-r2{$9bv0iXj-s5tWA1tRc#ngk3k& zBw{no4MbUmm~bGLM6^)1%osw1UNofne{-#V!oWl#)@k$m8KV3?QoE8GQpaFOQdTzB z8T%E&GyPu7Xh&^SQPA1nN`z}+rGfe(N|PS00V3bhWcsSx^C#jKZ$-k4Qsfw#3Q%6g5I)Ll~T561iJY zk&JmQj$B&ixTv)BU3qWJqQ*pI0+k{76hNnVW54H#4#`_a zT++}cv{e3;3IjM$$9_9*X7ckk4r{(|3NWo{`4~>GU*A;~9Gv!t-dfX&4lK-lD^W_J zQnG(!7L^H6(7;Vlp#ux&(T;fx&rp<4 z%GB}Ai_%PiK2HG1AvBxCXhXs41NL1h<7GjpDvU!)Q7S9DF6{wy1gER=eNKey`GsZdhT{+8+e7lZe|-xOXPgcezoccCR8{EkaHkWH^ul1=RGu&l_fLel?u^wlt7nkS zI{Z2qVvv5^erj9zwRLdpE2Tw0@eSqdfvkCz8SzL4*U6eA$?e^|m-tWEU@@_P2}=mSd2-3}Sw0retK97Xa^vPSTk1;x z-iw|{^;GZXC?GO{h$-V4M$+uH8@vy;FLqkA0&o|Rwj%*TVMy8MBJAEild^2~SQL&x zMs3mguV|XN(wMDh)2LTH>aj>BkWWn1f0gG(G1L*v5b9ySQ2f zgW=EP`GKhV?l-5DF*hSw)BygnB_s!GG%bh)ELTDyRFqV9w?k|pKkf4pS-^Cq-Sw0+ zMXGD-GbmPO18;ImkdPiRC{}X8aEgvASd-6BN&-@uHo-2=n>M$s@1Lw$X479!2~MYu ze#6gp+&IhRuQWGgA!-Qeig(^_8n+W>mC9~tJ4~g!KsG2Y0wW-e?q0=YUuMAZN|>)R zdnAyU)&FH}!5sYROVfHJE^qmx+?tACCd+sXYofh5N0iasgbsC`cl)G+9qepRD+4tq zCnC4UF8YnBXj3cAGx9|p*J$qSm1$XiRXTz!Z2MjKOG7@daK*-ZO+Ct$E_b(g zgi~w;!f80KV*g5Y)irKOkx(j!f+>fVDu>kLFhbU<@#kw#lZ`;|f;jl8y|dLJ*n2kK z9BoTn*%qL;%+KS@P-*SQ9n#gU6Sqr|)eePQ%kYbDvIgvEunl`CY|g6G9ji3GWIZ7K zBh2x&*53Xc#y&y6mid~iJ&Lh+0j=F(gT0IEf-%6$aXIAd@{ap$Gd#y6JSH;*BtrruMmt z>VbK{kUYl@fDFz4a}|iGWB0e#ExeoLlLP3@(aWx>(SnNRXi_4gGHfs@!_*GK$>JUn zwYUW&mq#2LzaehoEjj<_ZNS4zH+czj_is09)!}x}U$fX_zyM^Vwcy@^*1y?4+hKjj z`(C0H(z7!)I<{#`-zIQ7;pqyg#CFQl!;9*ytZ`~>x8yl(#xCJd913nuR7nHcK#I`# zB1X0oQtQt=Wq&#}dWfa z(BqB`f=ftBj%r~K*%;y%8(M^ncmf%vfn}vZAd_=vE>>-8=jR!=_MXMSJHc=Z?%peR z@`{$ey{j5JHQhEu^(N3bp7CyBA6hpkmO(7-nkI?mPCA(RO=YtOTV0;SNCt9VP53O& zUmCqtbyQYwz&_%*CS2YoTp@2VdBg=nll-(+42FRUhc>JN<+RXDU3JLFA=+z zgaOLIHyY)ZQ5LuhDhDXITEfUY10>8ShAQ6vS2juB;WzLO%7@_NKFliy={eM*t2C!3 z!USV3AJQXY*spB-WIQ9~7>CW*OfTz)H6T;fv|hgPKkLeqX>F6-Vl$*`P`qNDE{Rbh zY2|v8$#dZL=?uNtT~)5HGBCEZEiDS)ona_* zOS)-)^^E(@Wmxf^R3?(L3Kv&44rAi*WVcvSJXW&5srH0jBnb3muixJ%=`93 z_;Fp)A}{$hsrj60@Gg4fj5Og^VdvRoJU?4uP6z&H7g43RkWA+jXLm#M0y^vHL0tSW zw+i^iwG+yb+YSAp8N2IzO~Hgx=gOHRCQjFo@4dJh`_iJd17+7e)&Uf-SqkRZmqoAS za;xfWb6ea|u-45iTDaU`Cq1O}C_JL^p;fM@rFG2^t!Uk!aXoVv36F=Up-$9`M>|&R z#}84o!+LJvy3f-0ZSKc%Ex37`80asI%2?VB=O!WLjDac2VYnnl#q!pWmpO7~8QJZs zg%mTTj^&DV)mloqG82P1N|6zzeLPCh8cKxcie78^DBIf1aA5NuSn^Ppat-h@r6lb+ zD7*Eo@Kwb|k5Q02P74vNifqBhXXEnjf~I{&D23b+HIasJ>1}_!y?ljgpg9=zKZOOO zcFZ0#8es=pUns|fL!2{Wms}+^P*%>Q%*fEBc`7o;8T{yRy&>DgY7joyVQq4FL*tB% zi~-BgbMxuEIQibIij6gpD4A0M8DZko?RCF~Ib5w$c^ zxm&1dcd>=^F%ut!{FR4y^#kC!3h=+ynwURTJ}aCDWz^??YgB#yY0dwY%KzqMY;0st zME^IHAE`8DgDHgcK3ErZwy)VMUn~MQ^7iJKW$J1SJIv+YN zCMt1X-}7&Ud1ZVuG3Hh?Hvcq>PNSEy_t#4x`p}~Wzxl^x|O-46abL$gp-wbi4};i z6z31Q_{mJ7TJm28V`dNdKh%o9+74Y;%@{7GD2_H&+%?YGeFNm*;6Bwlb85@NUHK(9 z9_*MR13@%i7T$p2FQ^y(Q-FjB1E{8~pzxN3#nf`u+A+r{WCzR@`O#s--)iZ`G3Cn1+n*NH-^&jV=~C zFd`W`*_My5f3xlu0W7);YQvW#nD$@}T4iXYWtBl<^Y>vtNJ$3&uw5l{>n7C7paKCA zJYE8zV+)XvFpnOp`D|dZOiumxiyv665yE4TG#8@Otcla}QH}wtjcXI5VaSc9>2#uY zb(_5&U8#h7DU2d)L27Q;bGLD!l0mC{%5h$ZOA;zBapiL%Gr&g!vsNKH43}#uup_>*VbR+e&H(OS#|`ZlD$#!-*1nGNlUIu?QOuvF6G4Z`F~3?OZI$u5!}(y zYqyD%(tR1MSc28Yh`T%&Nan>S#f9EJ$L;0Lv+W8HhW2SkFx4J2XQfYED7?U<$30x1 z?XmQL8fmcfV4FY+9?6c4gNsdyCR?Coqw*e*d(ud#6@SNaa*7h%qBZlvlf)6RqasKs zG9ihhhKr{{4^_sKARGxJBabsluq=ft`EcBJKIz33NGt?ROuDKCdP?A9KJOXlInr4Z zR9Dh`!$3Eh^pF-(ai#A7F|c~BvjKIr*9`EGbj{M0=NTw}MFe?LNvIQTMkny*qVR2d zEuUwgVEeibAmB>;219>9-u|slSmjme1nMV0ts9cR_kUZ!`-9pkBMHXWpR4}0pY;CU zP@CcJ)K(a?p5yymE*3M`NkI_TnMQ&9a>9lT+7BBbNDxB>qKbrEP)TxLfNtnc!Mnxz zh(tt&0L9~z&F=?N4)2T=&Pssc>T&SA;Sg0-wdw8o0^7}O$x0vWz=E+D`U4?x(|GB- zN#)5o!#tRq4nqc7sUQ#8POKzusGMMIU@ojsJ*B+Sq*AqYIlWI0M4Vuuzo=nqfvAo0 z%%l?o=L44#`;6MTHZOPt42yp8*&TW_PV_Mve3;?4FzctajTs?~QTNei)#uFHQH3kX zX^AzdUQrVY_Ga3f(2XYBksc#U_G;U4e#A3uB6J=_3cg363#Vdn{jeM$T`@zvNm2AzM-=ZUYK+yXBZ6rMeFoJ_#H=P zy*W~=udTiq)ZO=PRXy#j#T^9-FtBGZqktbiy8+n#CFVG!FqkN|rTQ*@9Mq$i+P-&- ztWn*#A6wNhoF5@5ABxv5b8BHkIYLy4Yp_W*@@{t3;vvZtMU;vB`*lW&i`rPZ@4>?+ zORo~-wOmqfG_wS;$OJoV3XA1>ndeT=U{iToi*q4KsgI!lJTeeKNR}{u?m!%Y|L@?# z_&4|@Dp@IDs33Wv=&9IZ&Im@%Fflbi&{EVinqfv>!63-@Hx`E;r;phWFjpt5p$r`~ z-QwPcXPV(Dn5132LA=ptX1VhdeZd}ue%Ro4&vMAV$#6e^f4j)`2225dFhHWzSB24q zwg6PGun_&SB_OYg12q;8An$_F{ZrIpX*vdiPox$%UH@GedNXXh4FEI-*;)gm5yVZT zj}c}H85#$DfV|FBk;2S+FEwA6%F`4J{VGonb;sh676qyJ+*c{JwFocnKpNjV0p`YM zKqASiUrL+8j z#Me3U5Pd(c(K4wd$$os9!Ct5qx~Rnr$r*YGVM;1rr63mo@;1SWQAF6lF!WB`);tU0 z1}h$cP^*NZS=>PzRNDl3u4a^m)XXM`kosd{4WBU`;8>Kbi%M{iOzh zL2UtX0@`u;ae4PNy7-cDtcvJ4%mL12dk`u|=MWr+weGher-)xE{m~p=y;B8;=%M|_ zwTVf#OLKOpdf1@$7?RkobTBU*kZiK>90JT@4nrkLTHk4)ne|MrRLYd)8V}E`(y^1t z4U*tSb`eIKq*X<+e8KRsQD6ryv+-CA6o;_Fq<<{`BM$zUgd#RnjE^n_U23eG>N@QUbuEeY{aMSl2pvsG-lanp9|c0 zVP}Cu10?M$8Pk4fYuC#oIYCh?7E?l;#S`2trhm?I_2Zx60GOB;F+9%yAhMn%e#Ng? z-1bdA&}F*E5O+La=9)R?c=ke7UCWarmMMlp`ieA1=yCP65TNDxi?iXE6ZSNre(xhO z!L{(kEv<}4WZxDjL<~?;bMQqv#i2`$)NoxFFfaW_a2_XgFDDRz6O6)zF9-8O%~d=-wa-Wp z%+!n_2u&|c!bcZ949of{Nol9db=NaL82#0Dkc&t&4}yMjU#G>pxm=Q0%3+m1*1qU{z5f84SDLTU|W;ZN>m<3OT8f4GE zNFK|1_|h6>;Cm3aGYF>B$C=V@~HBTQ&>5uD@BQSc4V1nqtNkC|!qXsnLI=Pu~X zXAJt^3|~zD7`|5J=VXzzf|CLYdz35fzuHt3P$MW_Dt`gdD`Z5@0DRffz<@o|OUpHJyW>j~^b4VXS8w zu>Xw;S3ht%L*YtvfXg3YJ}_HNcR2Bf3S&i~(a)A+@ShA;$859Ao62w9b*DK*Kh#s?OI0m{4T>a`8>((9SL1MImvd+HHtJ zE=QQ))yPF(F(%9pjy;NI2^od_;PKhx7?s)QBg}pn^#>%gGQ%NJlAAzW-KQ3V zThHi>*;|!i2ytvyG2C#Vc(h(JcrybuQYxHt;1Rl10fyUAl$DT%ub<0CS%%)7w$S^S z=vTw00#*iF?c+qqezQJ>Y@*~egNY=CiBrR5UMk0@YR6PUl5!Sv1w&#Rg%HMhmd>Ud z@NK~_X!uh3bAm|z|LrUR2e*T}yGvt@H}3B4?hXfccN!XZcXw}~@y40{ zW^V2z^Jiv~N-DeZCVT%_sqB*VtRPnSPy#X$Dh zNT;C=21~ofW>s^w*T!J7>-WX=#b+JU7w_L~FRyzGn4$uI(!bjtyFI7*{xeLN1#d4P zX)q(WfNf-CIYo@$l{JMX)Yz>WHAM%U+&FIb=B;YVk|7{Yf{Al`87*slIZVNvN=s1D zGP1onvYVa_-EaK~Z*JFq52%LpXk?9C(P1gTMM@e+n&OfV2ew|t6=PMN;xMm-%;6DGGx$3IS1QmO;vLjtc|5WX1Ve(PKw&VZ?U|(WFip{OsSP-34*%1~Esj($D zY^SlMG)$~PRC1sMxGp-d2V8>=3<1~02Ob(*%EOQvEvinD(SsTSGQ;8;M4$s>fWNY* zkl3&)fW(bH0e|9<`4~Rqu0-BzqV&BVdAUp>zw7`Xkncu6+nIWbd<<=J=?<7FG{JTH zD_Ld7nwqgSC&wO;l^@_rU*D0cUHO_V|B@o_wO#sCv~Z`Sti8grbhX{ek$8CrbYK9m zKLNYC(%=^vz9O(D2e!1+(g$dL9)!Ce3t9&hM59(bCyMif#X-bl7q%t9-AHS^Wm=P} zUloGhmyx?uXOWIwxGzoAp#46(BtYG0Q`&E3MwsWL!-1%m}ijcI`+X!X|i1E zOr+~NJ7c_{-uR*%e{ptn+)5DAN*`DDsKf3mvuy9t>Sde#y zk!Jy1oNWx)hJY^opWQJ(I{zJGT>?o)L2Y7dRvUs1U%;!W4X*{^y>*q^L z3VUDuIe38~6|NIi%+OyjsjgO!Bc6 zSrT)lJ=D(85s96T`jAwS>ml)#7m^GwWzFIVXU1r+Gcr!mHRU-B@pcO&R)(iP9Wvopw1)6UAXx`wpUUocNGQN3pS>vr-GbXT?Orni5|3-K7;>{Npw1E2KNOt6VJRC zU2>jX`iIw3maI8m{M!qQg)3@InJ61;DCLJP$B*drG$bkNhRT^fJNqgzk0{ax!rRUu zg)mX>R6Bi4f-KVd9u9TsONYE&Rv&#T6Y+*97}f3A|uE0a8I#m&aju6 z!tT7Pc+`^8Nd4i7aGE=l$c&nk6Kz)nHua;tiyRDVoIXlA^L5oGKk^@;JZUN4m(eLz z$uH(5X(F<3xgf~pG_<~pQ%cXzR^)TSV4y6;bf=7L{7X8Z@~sBX#KLtwG>4Z;VBK!Y_Tmz zC(0LC(J2a8wD4H`TvS|3wn`ZNY@6}LADHe+sNedz76b>^Iq}%)?d@u+heLy#Fsp?! z_35<+)gH~UiG4fI$Gts9ihd9N*0oT-S%zkWx)77@TA0$&VfqyRGcx zO6mrB#g=SNuOC=*9R@+WX4nUkW5{FjCX|JqIJ^^DNBWUO71*MNU=zx8q5)bRIivl_EkqkuO)v$9 z1zTiqfmcysBR(B7s93=yhn+1u%d-bJvE}GuLtL4~Et=YA9+nwDb?Au)3TkazLZSyY zD+PrkG9>?8k}>2q7;)SYp%3%9>#B03G?$o$L1m+xVfk_^piCNTQJ#Vc(rH?mVD9O{P zj**<0sr@T79BJw2y5fHJh8t(;I8eFRG7^pflK0}$+1jMVpYUhwh>BZlru8{nS@uoN z)bT*48BF|oha@+Mm2$~9+bX|I+x=DTyc#)v-ANRy$PHwnfF`Dt*P7d1Kj;@(W*IOz zkZzF7l5PGCp)KhlQb#T@V7gyNIY_iuc8!t~uyPg}m?B~FbY~|wC zxD7@m!`jN)>f%T)kc<4euFV@oCCYuvn3fbr81c)1(rCb#9%>MC_2_YhNSNBx_+xFg z72Da7cWPoKCOlcJ@-228odwuQ)4UJCZ$ip9g6-VA(U}~9vQ#oHqd>&g7NBwYzA0@6 z&k1LD+6dW?Hq<_~OCDF4zvxk*(b)EozcM7_bl2(i6kBR?_ID0Cy)Gk4=J1V{?oJ1I zR6uP0esz~07I-9*`sL_^_#8FOnBG+K6qm{M7S{_4M$XmWYueIAAL;4E_K!%20R|b= zPKy)NVrt^W)QrNkYWdFUKLwAFYz7pk&*Esfxwtr|;kdaYv_cOX{1j{(;t78(<1Brg zQLViK`SKb0;kk?au+4g$I_O~kiE0IV+@GiF9hXpxF*CCNN!ive{+sgaF-(T%MhnV7 zsYR)43*52Xl_$6MxNKP)yw+oa`LgV7qt-n2X_1E>hD|oWqN6i)`6wE>b}k&bV!=jb z)G{CkF=B&SXlj|_%o*hPEh0FjjDvtbiyj0ZKc3 zNk{-WCb}A+$iQtQHNF(WoE{OQW`PY=bp^SMGCq8ztm-CJH+s_WG^&q`;LJG0GcDq{ zQHUgc_SHv+CH>I2=e(7)Cw_k!2GwVc4!AKWw`K|B$9KX|UyWD~x17dRYiU4S=g@d3 zpm1q|d)n$oaXjgmyPkUF>52SZbXX46Cs$AuZ`ped-j*oDkj*73EzGXS*jZTkM`vLr zFYGEQFB8LHt0M2oH)$o!irxVEnL9@%Gbi@3L7WCB5-q7=-L{#%YJqo7!&770`(+ZX z$=^w~Kpg%Xm`gcHAwQi-LctmqCz_1M7fFts%^Q=GEZe$>f*EFV2zlh4qy6chVSO{V z4b+^IsKn&j5e327!EPeDcr(sXrn3$CTk)H&PI)y0Z77#m-vd+s_FPQe z?YU@6&LuQm-W3w>nY%}({gd2{lU(df_+qVtQ?r?3NH5m8s2)m=- z6>VA8zXdND-Peb$gKrBF9VXUjIGHTXvdQ5y6FFWpTR zMFH2Q_iSuH4=Ool)}Ge$SeY3oa#ap+Ue#^e#T?h9j#I3oOh}iz`4!kTuizb#2I>Kn z3lI`om%3b6$Z#H~OU4*T7K>vwwUE~A5l>tK0i`G2)l*tiO(lQs6bgRguq}@x5AS38JfhOJm<_fV$B%U zC%xy$m_eq2L8h$rQUj3q9g@#$Dw97Hoa~>w=W(9~y?0>Ra=zvnHxSSICPv`hB)&_1 zT8i*p4fR*71IP36KbDJRDFsOw)G@uzq-XYP@HG8N5vXCsuHxWd759KIfY71d;} z`w&i&jGJ`1tg@`mxVBUtX7`Wrbj97fg?~&xDjH3@_p{x*d-gjDDt}TLUnIfIpCb>i z=WScsvLfQ-{1~8a_r~?2KuT%3n7pJQQelJ@IicP7H$rQg42(JlOM^!C@-h&8xiI`A ziT>^5kHo`1w-o}|Dv~GCr%q`ndRdYu&X-jqh@{Lzt7RK;2D}Vuh!~Z4`s7>-P@5(7 zZqt|AEpeE_W;hbQrNf=#bliTGE|d;a1lm}7csP;n^1)5*rM@dsO)3y#st0Sym^wwk zMEfpMZ~f)LFTVfJ)H4TGuuCx3FYE$j|EI%YtYa%phy)J|2;B7cq%~IT3u4jIVF`@)bDJ~ zsnGHT?jj{w3Y{W^RFY+)PNWR&M>w5$()PSpScI@#u-5pgPioQx9iL+1*2rFh*YhFK zw3V4*>+pxlVl*o>l9`G)W|N>s#1*fm$@G$#W;8}i?8NvWwQd*3BkOYt$tTuVNQ7`) zP_FP`fjKK{y_bri%glsIGjSEqRqM2swW?urgz#v=U%HgkahQPq!S5P#b%tWICAl@Z z1BH%;X5I?@#Ys|I)^qDCE0lN?=|lVtF5A$;6#=JxtETH>%x?Uf#y1&)?+NeNA-^v6 z>ytoVQJkihw6{aiofTw`9P73)g8Z>ai?HlX$5`pwp}MTAg@)4W#T_H(CEg&4k!{l< z=CeG)u{v9r{6ihjDO<}dF{j0quv7Jg^=+>sTk|=C1J2?>6fR&iB0S{Py7jwp zEXxkK#OMzP0msPi{y+X)&GF`bL!GT@^-O27x1%Csjed}^F)lLwcoyl>HIo#jmvJ;A zC6$r#6JNYgA-0wql|;_DorQ~Pj$Q`2IWsip&iK(#1*bDziw=FAqu;h68$|p;QndEvo>o6&y#3Tf?5PVrSt~$-kox#Mei|q7| z$Vz)8ixoNGq5KxF9#p+$a0VXP+rx9Ua%!(6KMp;z#7zIz80}BdmxK$ku$0vC%_O#` zAlVcw`RIyaI0(-rAaSdx%!$ZGDjWXG?G(WjA7QhEU!|qKy780!^=1WTujBAz^mT@; zvu%AMdsuQ%#*pBO`*0S|uR?E=5ok9*-&$kaMBSz;Py5D380~GR4u{=Dw=b$+G>&0? za5UC=*0@x7Xr{qEt6CgpkwafcmB7-1K8nnHJ1W_dJd&uD{kLnW%F&M44xm^h&uWFo zRrCur)(0wshkcXLW=Bq;`@Gj&(+1NnMq;OUmSS2f`rP$z4XBs;XPtS`M+GZ?mCu9m z{=m#`@D)#b3-LD^zSkiQ3jPJ*lR`|*ER}d7HH%|u^~Hwt_(7=Hf8dLrie@>81xkXX zGeqKXn;h%;AbE|0Yj|c>hxgQR^|5zwSJC;zbDrp}eu#VB$)ht$e3tlV54sfNU1ux! zdNg}ovu9WK4JJOZagw{0O@;l)M}i4+T_RYTg~O37cKfr!O;wD)%#9N!Agf)G@)%m= zM+o`&Nd=9CiYskC$F%OYwn?9v?X9UbtrBD}K}@$y6J(*Kof=xsxj*b=DQ!B9>9XB5 z&l;A>ssb*@9Lhp(jh_cuEct?KU^q}C2o=bmF@M6^{{Ff3E4=!kSc3%+4n;SZv$o|% z*%1}^g+v5GFT;w!15m+GaRk7sUM5Jvn9c&g9(;yN#2YRlnHhfsA{>&#mx%$ULYY|+ zn~Z=^MkxLTli?-_M3|g}BjGUx^bgon?5c!8V+;^!CQ%dKzS=jueNhu1g&4qGU5x=4 zy$2pR%?&hNxNRf!XCpR>MyR`evmh027YIZUxGmL+3Zx~eUJ)DCF1)0619B|Tk+{WT zUh<6{eBAUYDUsb0QQ-5L>h#(1(o?fE&K$g)l#q5GOO{6$t;3rj@K5K->HdL1xl1#2trs0+k2 zf>j8wo@1{IM_7ZV9kj0q7PQ=39nf7v)Z6oB7wM1q_e-c;VR{%w{1Ap+gJeTw9JU4p zM5k7Pl3$|&;&X=b$0_z_D{VEJIl|`v%pHqLeM{9~43au}yUEgYIuVf`!78@s6CiBU5*DGk171cT{2lfPThK=k%;}<9sq?4qlmw%fpDm zT|pFgfgh0Rw&3y>&7m#W8>q>B6X(`CC7&Dnc6J<`b7;=ggGij{5!#gFt942v_ziM=lFN3k_z42agZD9Lus)n<*IpTJR zABu=iwY|qTnxDiGzLjfZw`~Qjk;#nQDPoQ5(IpF0{@n>7xe#Dgvn z$M*$a+#^kgeu81HiS(D|9;7f2mz+S7MOcnhB_dBS!(J<+*b?9chD@8@m*Q?l`6bK1 zo2Fr#bEZeK8)OPQLS_#Neww5a`R=hIi4D$8zJlMV^ymoAeJa;W<>IT>Qlv?q5GVv|K`Zvoyxh*|jdEk1EWSlU9ST7$r+k5Q(3YqAV68CzY_ ze1=Vh$y{-{#|VSH>mT~t=m)nN+teV^)ZoH5^4IVnG$To%`XBSk?<{R9d{0dhaJYQv zBqX@|O^4)Lzj@#iq(bG<_SqtDbR;@B!3Scf242wl17+7D631Hx7|sJ8h$@f7R1BGc z@TZx!l;n#P#!g$IWB7rrOG7c_@{F!hxTPR*D*4fac-C~c$Oo5B@=gz6pN!>m5+#_{bRFD zP5efMhHhxU1uWg$0?Qku+`GW?2eQooNdwuEaFo`%N1V;nHeE1?8%DMg;m{QB{NtV2 zSw4haW1DtAFRZ86(ppVAIJ}mPyaG}pnfn8wvt{sc5#Q}K5`7$RdVtQCKdsY_TpP#E zO_G{NCW2QE`I*KYD5cG41V6XgJ?OWgPhZ|QMskdr3)Bie8dko`VHb2M9r1!I1ZyNg zbvEjH7k+##)k>AkVn-d&g{%Zfrp`cjf$IVJj5G+T4 zL5Tn)ja4+@sRQK9IQ98XVkD#@rtyNyBU5440nkPL{tuBQKkYjV%VhoqVKMcZ0$j}Q zE@L|4t(c-`iG=6bb=2|ShN?cbD`LOXvg%R4yTFI85JjE$&?~ zO_!g2G!9AUW*Q5YKo z&g?B+EIZans1n@ZgHJ}&wEQA-fA<_ZC6@+URWOBAdVW*6rpxw)4A!tc3;Z zHg=L!Jus6{{+Ou5nYn}k8;E0}t6@Q5C)z(?cR2yIP~k|?e#5h{Hsda%5@W7A$8|<2 zP|D-HVPwcyR30eLi_M-^3UpdaiI3#zLP(V*(r&Cij4|z1Ct7w|_d?#p4|osmCOWh^ zjNcu0r)`VtwNt^DHxt~Dg4~cY728~+)bT_vOSvs=oQu-??NrhjW8lHKIc66k+30C6 zQp*czXKG4oYnp&IV(9MYYr*V}a9GnZ^)qN2Zx@)pb%hsTxOMxsra0OV_i5+o%$SBt znU+y#r=L9sd_u(8>9bcNi&M`XGjJlZx-@MkS_Iy0?k(v^)QfJeMYT0nW41MZk30mU zxpf+DT%wQf`S-;+cBYg~?pUX$b`4WWAbRegvdaVm;VOVZI_})H3=-2(s5`ePD9UhfF~T7y2x(RkltCOXxr;r9G<3wTWFmyVUP;c(WGxHP^LUz-M^zS0#+`i z)_SO@2WNv#m=*#O#95AI$29YU3-4g*)bfK=Cz>1Nyx<8^GtTS3`lXIFdGx%HnN>ca zHd9w?dn`-l7P1%^^E$<9>afPbyc-!5`3o3^N$B#>G#k5zXHM5UBswuy?Yj2#k0hAj z-{#8RzRjrj0Y>Qy-Re&M&RYo7_J(hCwqFQUHQtX-GqqO3c!sC#aLjRh#0HtCWo#E^LO< z!~3_g^a=%n;Y`%-i>d@oYMacRCo>ULo%)Fh;=kpkIp@XsjSsz5nlhnOEO$)&MDHs5 zdsj&u>K8V2>f>83d4CW*S_rN^`EJx-JjWW6J{M*;=PB5#BUh(|TfOGZ zC&Z;svJ2-Gd|DjyD7LUxW-?gqgUn`8z0s>FPOziNZ%G%9Rnry{1V`h9QmG$iTen}e z4$Uh7AKghP=f$qDPm%LcuDDz3xfNX@S^S2JLw-%64+ z3W)iav1@^P$d^s-P}VH10%*zmPVwBA1o>~Nos3#A5waI1!Kj+RFgr3h_tQ4FkYrbP zZ0K*l5jn`DHjvH%u+o#Q{VV7RS*@}S)o)VtaZhWet?E+bjR9Sz%}>n9cBoqn=C7E; z4)UFs2i{b(5!Gxs#&rn$bwt!pT!oQ899|BS8u#R66|0GA^@WC`h3s;2Td3mDk0pAD zOc%Ha*=%Ed8c&(2ABUIlviF3}knNvvEk)zCSxPy0-DIU?E_Q@9)dFDG!BdYK3}ahl zBty|a9>`s8sh#GjzZ{f*wx|C7izD#%CbY#_eo)^qlBaGuV2pq~{OB8g6DOFJHh?Q4 z!4eA)LgW3HnWYAG_+pef5z%KWgavqE)tQM9XCgI5tb9z?#a(J(yOc$+W1PxwDryE4 zyMVKJmLSi#5Ahi$lkw%~S;j6m2@gSeQyu04*Dj#k#2{2^#bBTS=vt?IqKa~G6GInP zn*?=~Tzo9|?~3tm7%T~xlUBZ$Vu*S7&`w}e0v4|WYaA;G7CdU$>uS9>TXl}}7NhJz z+C%W&v4S?}ER?B2RM2F!j>jzh+T z#;tJ;8#rN_*(`#7dBWSR_VPxP{URmTn+HR61dWK4?p%@E2t)SN8(#oI_d%x1Xf%G8 z45rAnN8Ghza`&=R?-h)_R-SlY!^X8e65NMMJWS~f7(W8R7)}24{6bR5S|gHIhXA5$ zuHYGE>Deu)Z7&YzI%pW@dj-=D-k4JGVKqqChrU_(>(U}ve?!N5{t<$MV!$(AzpJo6 zIbLb#`7y@gBz0Mav}yLWGzm;(grVD{Xgz_~{ivfWrHL<%$xnE;omf1FAo6!x!jV#( z|FmK#2l&JO;Pz0bfPTFe`PY5W&@)Ap6S*w6%Nq@b^$B)P8`K$*v3%pjWFS+{%`WoZ zQ&pC>J?Q=p<$Wj**((dP0 zydT_%=a9|dlT(N8T0_rLyI^+M4jD}qKdY{RBB5g3gHdqr_maKFqeTRW-lT3B#Q70e(-X7 zE@c;@__;PyWy@Ip@-_G&x$-^(UDurV>jN*Md_gOq%x( z{d$jCw%H84e}_*Hgi>vaJCAUex>y2G-FZ&?;5m?rO+;rlm&%P`PiTQQd0@TKh+d~+(D@k+=bWSIro2FMHKw@D*TJrP)?= zC8kxx=u~!Prd5_VIJ?p3)H0hQE}`iaX*H@Zvm#b=)hHQ|FEt{6Jm~CQ5^IPN)9AHQ zvxfFeo~8d0s=o5C1|kzkp4hR5+XYwzh}r-ozJIX#pmGqRPO)#wgi%JvxBgv+tvK6W zue=nO;@*3=MSqhSDiEp(_G5haCCOo8_OWt=B1vxcL)cvVZUubCU^Xh)BVvA{52pI_ zn#GzsTe@FjOjzf>4s7|g`=T&;-T)HIK~95FS@yjsGjJ_BH%#>kFw-<-wC|ZmK*c@j z>a3C#foVdyc=UIu0uvI|8BVUpDFG0w%3*va5q@i>R^ zI!l~Y2r+ULG0yNYa%H$Jbhi>Qa#Ai5G0uoF7Z&^EZNwGWiq<}y?hqoWvOOV_Mk?AN zRlq=byHK-MeD-CWn<|_6niX+S7kKnChjC38JWWN;e927kw~KX^%KQWiH}ItjT(J;u zbWUTxJa2qkt#%N-T?E52af5ab!nJyRIBbKz#5Qt5wa*^(BcCU7-WLCb&@-Cf(&>f& zbGJ95Z|x4k)rFmcHfj^rqY{qF9}BMwKik4rexQz>Y4MV{ZPP1RPMOdUV2QFu%`cNl zE1~K5!n8)qFPE!Ixk2lZzD>z5+fJ*%__T5s!KDi+rK&U&CUaz7oFF1cF?}Ry>dvTb zUCP=Te-!w{%>iQ?HzgsOqQf*5I;g`LnGouvV8jucW$8$f1DKMCu-W5x0a`{(hGlhR zqTnKM$VRiLu&nJWau~N$LQQ}Rv3R1Pu%WF&A)pNIXH|%zYTcQ*84(J!AhILECZ7}r zCZrlc$5M#FJ)~<9PczvNGZBal}RN5a9kP_ z=xy(oyk_L$hT&9?2^Bu|qFt}FOFi6V@_w-1m461v9$8rBKEOt*Ne3hzO!=g^Mfk^;(RYMi-2B+1!IW(uPr4gU%lpo1yZ#xMMzUEw9Kps+AGp)C%v9I)eZzY+`Kt4QQ7XfKb%{Vh1ytq|?Zn0}1DjV0r=39fzWVm`ONrTw^ zpfwZNv@Xk9;c=f-ElY0g@j#d^iGCS!V0A0mRuT4qpf0*V6>>Oj3+N^mh?Ku7)mObC z^`D2Y|d-U(k6dHT>xTC7U3mAWK6EZ%s; z3RVW+I1Qhj>q-&+3$dGDjFRPX8>NNZG@GyX1oD|hxeXc12L_gmxPav#Lq)c}!E8%urSHI?_Gqy? zCbO(3Ri%(l^N>Dh;;CzG!$6d};Z1XyvEdB{9OJxZwv101@gs~l!F=b}BF}i`KQQ_rBgo=7!P<72?&aMG0?HYkSLoCoS*F<43kt0Of;5t8Cv< zJ_*S-!=?1SXfzE+o)K@-R@7WOkv`Ogy z8VRGa{1rlU1wK~%9nC5Nq~e1X-9m{MKpJjxg`Qa~2t#X=ARhOG_1Y%fcf7%Y_q`y@ zVB_Ag=LzC+(J)xAReR6<%FR>M2hnh4X9Vw!`L$KJ_`46P>DAj!{uBAT@*Bfzi*ja} z-$Zij>|DV|oZE7*ng3ORZQ&CaVd+PN;zF-6N9&(8h^qkGq$fy3E8(WJ4Q1Qvl^OTn zu?FP~moNtAc3m?hOUO?R`9rIxkz#E z(gtN7?D?fl*w`qel5sS})DhC8YyrqIEZ+C03Z(;5sz_F(_tp$hQr)#v__rUr!C7s+^lm54-Gdve%z^<$I? zna@3$Y@#CDk8H^&POq|>ek@QZg7+C?qxxWyPDZlX$I^&`kbx+vUD@ZhQ&U-i&7{!P zFDuSP#Nea!3_zDm+ZAYQrab{=O|tGA%zWnw*gBBHl4RL1-pit~=Bqfhjd%KWW`W0P z#+{e40ahRU>(i3>bAiM(6b1LC3X*%|-GA8ZOR~ZZH$-n2=H=BLujo7Y;3F}s(l3OO z->YQZ9s{m^+%Ei?SJ)e?AKjvG`nJB6Ptt5RvxSkIbc5>~Zh!Z4td3Vby@zD& zb7u=s9Fwmu~7;`q|K`I{{s=Haj zMy9v|bmj9eqU9De`MvKmF}7uJqH@RK1a44Y5?HfQLj; zxtzjg8)u_K*rQ}eQF->8GO91XO$kY%-_M>7lZ)`0SM6gh?e1rT@V3UK>I)jG07j}t z0MFv2oP?*}CQsdJ{{FUtfgs3!?dfdoH9GKIXA~U*m&#>Da@#_LIbRX*Z(z5p`eSgL17b07?6I z!2rnzM~CXyK=M70K6m1heg_{xp)00M7{r)nmnq}&d<m>4s+xFF}h#K&KJLc@-yu>)Bjq=GmW^&4zuY4 zHzjp?fea&qg5AS8;pmHsor38WqE$Z?2mw$v@C$Nd#0i}E*HxU^0=U~Ck3NqcQ@u8f z8I#;p+4yH5)O)1KC;Y;L`&m3E^v(L}-rTMVsBaE5#%dJ0A3iU|9?PLtI3*KSDFvgz z!EPMC->Su&Wr_HAa+4byXB>M0u%*HbB~JUC5^5A_Rv>vdYy`6+U==t34zl>{UO@6X z)3V5J4MMLpi-hWiohXrR(TFAa=Sz?QLL?eLh!r^(!V0|+_}Ljz?})n=*H4sL3&m7C;nXiim*@!mVm_`^r~uzjXd`g1vb zV4(j^c;-lZhZ%ZQ7TF5BEf?6=##|Lfcj7;s<-Fx4WB!g$c4{jXLz#Qes{Q`7Rd=&X z>_eW?lw~A(;t{bIU=owjyMJIO4GTEfX>p?PjCH=jU$h(Ayb7Z4r#q-D?W5>bA!Ape zXp+5Ncwlg)P+JR%te+b?vZkeRK}+e39QP%3xeU0X8OZgApyhXYh0mp!!Vl%arZ$`T z3p{zZ{DH!3{@tUf1y5Bsm@e{nmk~MRz4)|qAu*|Gnoxm|yoV!i9cZ&o4*e@wS zG1W|*8n1H~YaJpZx?mbBR-`p{OpBP<%6R8AaOPMi^EmSky>(tr7VFfxh>8ow#x%_d zBjyEe`5h7Fh*~-k+CgZ@^|a*4E&Hk-cj$S%g--Yzpo~vcNwVDy{@is@IbMl#RT2oX zSlhY4v@DZwn|iLA?<_E*n;zJAKZbCq*mb3isgoYrhlnxVdiXE!MhpUCTE5(gX8W-k zJ~uJ4FE8q3Hmi1g&B)e<84I14RrpC_-%=r<0r?Spv zDXmLtV9ju1+9jDh=vN7I`z+@@8uA1pN^=RuvS4!jslRt>=lSV<0#>)DQ!@AX?+w_O z^>A)0%c$<0^ybc-HdAh@AJyyEz;yj}MfKH$r8b&<>-TBfR!D^W{QG}2E0^c!=y1_Ul#}0PMtmwKHek^+0X!CEV`mPy^G?e7l zN&0oURNF?K?27M~DiKi=u1?IEP={$*Yi002wB{*%i+OWcwx;$D7TFKgrerP`6&#l| z1p1iCW5d}Sqki%e`pNSNH6ZQP8g!5C8<&Aj_SQ@Mdp6B!@R`}D>jf$WE=jQKAsiM# z!WtCMR*?md3A^$h=UB-IjmRr-cOerRVp1;Zn8fS1r$hrgn zXMqT3fdprP7-vD_a6URtHYKAVAvqdN87_QiC-Ocz#D3%mlSC*kaY*gC6WUCyv5jlC z*})sd!5hZG8~efAg+t$p`D4mdN(^rYr)S;DV*@X z-Bih8Fu}SlKVXegS%iQY-6~$sqnCGnQ>nkz0MC>!Y+#|t;SnZ}5%iEO*d+T>*3wK4G z(3r>D_vYVB_R=EW`21Cj2_Ia`#dC8b5Hr8#vi2gr$))l5sHR%?KQxHJD!T5Q;AVbZ zCC#E9-K*c0rq zW^s93xkhZP+wMD1DH#I-8+tK0awv@^hP-EiJ?)hVzqtF4`RD6jY-}TV7>W=5u$Td< zw=?eO>XvP#)ArXpou^waf3~uA1A6^o8-iO^sTwk+FywKr^>o^dI7(a>A5TeG88uT^ z8(zRYvIp$DlQ-k7+tXUOt$Uw3iYKc|O0eR!cKG)5yB3yb@h%6*`oaXm&19FM#oG?# zZrLqYL)#~9nC=ubQU~>Xwp^w?juZ%jkc`GsqlOt;JN_WvF`_1V*oRU57}k>}veX{O zKqXL|z9}hxYN|rmEuVXw5E4FekC_bb*?d?cECw(R-tnU&aojQ@4Wgj>R-N8ug(96# zo4WNvZzR~}6#gVPgqQA!D4MzPWNqbd^{eWk+=6fM29}G};zCw3pH^4Z=Q5OT zW2RWZ#xz+b-2$1j03zvVx)nPstJ2dM-yb$<@IP!5f&yfcV znq~@!eh^}Wn|xJTLFC3y-}KJmgVVPdXwKMxxG&ri_*!-rN!uOFyWsVAoOj{*_Iipa z+4O=?1UUXg4GfUnxl&V~TcU;>JD1hEs>g-S(s?bK)tW?piga~6L)UnQf|MV)LfO2_ z>0q7%k$N~lc_Tc|tQo_S3M#3_o=VCC&s{;J;zqsAc;IndQS$qId72972MFs@N1TG% zPE7GnE{s=hTW3W*_{u)6C>wem#PfAwwm;_l5v|JkV#+PQXeUQgKF)G3Q7jG+KJoQ# z7*Dzo@)K5{tCi#6w~&+ms=)an(oqn`;~`Sd|M~BD4>+*@hzeWCE z=$JH(E2aO4yg9XQllkAZC=HE1ge#nH{mNLfGL*6@uh>X5pE=Cp{AS(a?(IX*G(zD~ zmNyZ%?WiW4y&<?ly4jag57+;!%AKb>A9weMv|#cbKlZUs9@qrSmS_YxB zUfZ;jo{C;uy9c19ukMl5UI<9MOTaNagHS>R98wIek(N%Gd|ag<`dy}_hK9{7E*mJr zd#9eTdp>>>B;#4`k<2l;WyFRQmVc)Ch}v))jg@`=d&0gNXY?rGg@NcdYx zue?UK{rCmlQrnP8G4YiMTaAO+v+Gk%g#<5R$-mNp?#C3@3HF9G#}d zSVEu_t2HI!rW6Xd-PV8|ribm;Fv9W=G!h_I-5!c682+ul4=-i{>`wviT60`Hfn*fPE0X>yUs;r z&(m4iAg7-PHffq9DUKZ0YK zIBGa3VdmbVH|O?7XdcFy+vc{6-@4KNIV z?9+N*j?fZlfP30)qh5=L&07(^-d_ibM3bUa+%^BjvB=czyRdf-MbT8B&V%R* z-tJ0@ohWwpm53_SRE!^abk$ugV(Upbsp?v(vl+B*S_YD{QIstj+qP}nwr$(C^Tk%hPQ?}5wr$%^DoMR`zwUA0z2p7p`|FHx#@T28S!?fw zx#o_)z3yl7VNkoI74Lt4YMQUWP!n|o2BjsKfZqmTPI?AS47gK6Y zW@E#XoyV;yF$Q8r#lhFvpmK_+M9x^nC2xi6_!W<|a}-?DO2Pwh~2o8x~G*J}yRU8p#T+!R_Ng{fa0Z zQwxaF$t=BJY+eulLRx_~2n*Jh*5GM?P7x12i?{z zC18xrQ*_w2xL*}=hp{2&ctmnlW&1wb=@&``bD9&P-CcDkJ>|M*+0^1{wf-&L=O;It zE!~u2i|7cI=adGBuCNq$7{5@JQis0*NKrnT*GvPOsTTV*&wddRE| zCk&pYIl)1aT}j|>w=JsqG*&DD>mPS|+-m7zoBMlvD-3WMJer6MT>g6c38Vx1ZaK;H zh7qM}tWoRLWTaI-aL4VkXncZ1&4Lj zV4F(e#1(R&$6Ypd>uq(Q_HtW=Z#`dwA z<6cs83V*7n0p$CuZ}7tLV>%?ISgh*_BH4r!94+sZ|eDc9S6oNH^rT9|?t9Euf3K48)g02}6Rn8ef=%nV*cc!1)VlEA2D)5+ za0y@V%5W;LiQA34x4-$*vQ@g|nyxPMG^N7BjzD@SzJTF-(s2XdnOte;o*^j!kEsW4 zUV{=8IV;Hv>c%4M6S&N$h>I+Yk$YT)oR_Zxe=1l7j?u9;@YFTp@e}uK|Chn$oEAsDbM737*v&d;-Krl){^0&qeNf3fx-FR%%CiYkWAy2 zyN~o?RXu28wD^e3e_Q;#d|SeR;dC{2c;;&Ntow4&9!9la);c%sPrg+>ioA86X!YBm zOb6A5=W-N24=lK;H$RhD#5F-sZSpceq-97?jRPC{deq33Pm0T#^+q=`lU*% z@&yJ$ToC}gbiq(v%6-CLVuyWP8f?{5#%cTywSeWPYlYKziiy61} zF&HVN5)p%VBYXv=C$TF0HkV7p=r;x@ppv&e)6x z>|J4;&d833Jl_(-+^~2J;I803`!Ifk$iI5I>XyBOyN$tHHlV9-?w$B=DasWh<|C{f z*Mo10Q%{IiBq{36iO>i}SD@{x26DqK&vum~0#C%dcgWQhvzMBJE|Z3-FX?_8sv`7t zFP%L5YHc}4zIPZQ+`=mES)}`@>-RZy@7l0$4J7DZJ8h*8Uc1w?$tAU^^_>}teeHVx zwM_7(+X`k4qgA@~U&;D=-geY&5YBYfOaF5Df6x^P<0h(}{zUy4KdJ5iC{$84ck!?` zHUIC#Hcid;KNH(U44O$3T{Sf|F)AII^U#O_aRLk_srez4r%N|pYs^jk#!fcod&NvD z6hSP(zs0ehWwOF(It}Zcu9i3auT$&1kC(5ngg`E9SE7PtY-dv8%r(T3O>EfK4GTvl zne$9B+}0<9UU(xSSTS(j2Geeb+5oe8k4o(CU|jYj>t7hNSRTd3x|z)lm)u)-Emz4- zS!Kizsm*3OWvx?8xn~0QE(yeHb+jSGDs1%k7X``~GYpKY))sYExLrM?_^Kb@W>U$f zK%qV6+5^-}?UyiF((d}~d88f6t;f~adFw}cWxtPt%ELlzk@3Gv*FY8Kn*_4kuhPXWsM- zQwgIpT&sEM;gw!`{P&JIg7YZ??NZpi0oOZh2{$e+v^q`cVKK*EBw!n@CqcPm$E^bl zqOVAK9mehQc>z~E)&kO+Edq!?C_U!`CjAr8XZZlqRz`9TA&j#+eqmF+58)teK-thA@8^=)3$1weLC3Xe8Mt;;<_nZahp8K`@0hsx6W~2!+8E zC9dw?6n>?dR?OQb>+Z`G?Aw$L6eqbMXYl45_0~*QC~8G{%+{PkNt}-MsYKfaPk6tN zeWQAgaR$?!48~hE`9KY*hVS=-;`Rw*9(KZWCbTm!{)DtUM$2C`LiH+tH{>kfzy1O<}-jM+8> zO(fA|ymRM%%F*&W>;2>L5fccpPE`cC^{U}~pkD*qeXC^>AJP;54eppa;qniKMUnV~ z(lx;&H6o?Nh_Pl_*{~8DY39l;E0}(11aaVWaT3%h%f4~Rh-q4w;@^sSJ{J6CR392r zVu%pT18?RBwLN z&IY*}ahhXf@+&UBie3B@WoI4}?ci2k)|u3mEUnaa{)Y!$E|`D1SiOun3m=k22?PQT z0f?Qilk!U)AQ=)-9`cpXuHa-Jg+Udux|U>cWPu0kQ=PcP*^pKs(o4oz_uJ9fBoA&7 z^UD-jIbzCM{7R#Y*nRqAH#aPs4H!XQtX8MF6u3z>*KB<8a-Yq8ipQT{f}v3@~H7Lf;wU)L1v(jD#`9Ta5DWEW7~GD>OL36E^JD70gqj zE%9BC4A+`p)+*R7djA^ad=y2+c%&nwYCQmcmGu%HjP26_Gwk= zpKaRVXCynw`Va@8_XhR61qlchMP*Q;3Z~vqsld=C{#uKeY5$ycoXHvg{P=pO4T6@t zm~$4F1lELY16(e26dFqnIuU^KxM0m2*AOuy#D>9L$%9-!LuXgZ)QDQt{FEH|Xod*YwJdL<9Vh(4R+yde3H z{u@5E>K@aQTw)(+DXR+k~{fB2ZR~?G}qcLKMFl5)Y0AIMqf7}XyBv+`I1NH=c z5IpFJ>Bk!d2NZxL>Et54oeO7KvdScU^>Rx%{(N6Yg6U(=EsO%C24^0>VUe?%;&C|< zVcD3$Bb)Ng28v+<$Y>Sk;Ylw-B!P5rUhHRs5t~uy_w|uHi6};5RUK$uf|>c8aA2+m zmFgJH>%4T2bGZnnkJYsVbT_my53!s2OVT^oF+STfzuK|r#Kd?Pi+{*sn#+VWr7AR~ zA@P{NQm=1jaM@Iz(UGbI;RosnOAU)=a`&gu3;jmPGyXb%OU46F0g@v@M?+9igIkEU zOeUriv#~=W52>rE;QTjuJEr(G(7R(=eL6vmQq+6o%oi5^1nLu2mAu1y9ml?mEl@^e zPXDpRHIe!R>VoNz{Ko1V=>KlgNRTC$7C(yePD(&P4F4A<{a=KLy3l^A>MMe;6!r^G z?xpmpAn?A@)aEHP=EypgHKj!K=_HZF(wlQ^71~-k%F5LwH_@==<;5!QW@vTdW@s78 zmTqoaBmRW$c(4AMjuvk-?YGCr&&TBSkN%$l-G8}X182Eke`@NmivLdgBY=%?dfDM$ z8ue4R>_Xu@V#2p=Kv_HEbR1gicN=@)iIENg^ZCl-11#r2Kk zw=<5Qze2{9GiudZFHRMd7(di_fu!{fiB{Y&;`8kr$ZHVh{!4oQiJF#Iw!q5qV);@X z$3OZlvV2PI9sJfKmCC66Y##EiS=l49tO>nttML}w*X3aBsI{KWp(V68v96oiB|o@R zc%(10QyhG&y9UX&yt}X-aaHX0!u+>0+&>us{)#ZwFDv8#;2Mv|ojf97`lA(riSa`2 z{5LxJ5A{1B*)i(aQ^V_5TaUf@uI@fE?cd;`Pf~J0<$trOpCaRe71x2)SB54aK#P=O zuZ|ZVh~$z?`$EvgVX7uYLvqqE@ZT7yc{1v<7xx*2jB85MGSWEY9H~f(zr~8kK%F1t zF2)+}?eauRx7Y;2PL+S^CE#i}GNs09G))itY_dsd2?ZZgG7`!va?>6%5~?hKst-eWmze;$AgT;k+H-XR*GZmJUWK%jn z4W2)Hqh*6%pKWde*_s=B90H>Qr#kO^xLf0=s)zSWZG97%I$r8He@fOLW@^sfefRj``NATO{8Igw$Bu@DR(_ZN_dae438Y8-x z@0BBhZBFFsD`aPlY;a6R4^_}yEB7wr9I z&qXpB4mv>aN|Hva8&&D)E2>Jdz?tFT7ale7B^&vX+p9vHFDAZNnYa6j0;Rmi($Mj+jw{FiolC^qE*&vYxAUQAUr+4R9lH7>=)JhI}8g zaD0k%t)1UI@tm++Xa4x+2EB=&@E#kCPf-`=3AZ)8Jr}H5dld=t!iyUUer|}6WrIL{!ZLSu(!_~v%fwYAp zrd2EgdMDsuly5PXg~34VH0_;p_^b+q{$RDCB6D$JWtDYy`IKm~fX`7p9(fp4*^Y zx2VNYEOw?`w-FMKvUT(Wb=u)}cEBpV-dQhJ!W^9c3oIs`@Z80;uiC!BM;-fjG5au? z!h( z+Bi@w*Gl@ba+lm+Z4Tr`q9yJd-Aqv> zs+6bHc$vC{AB*^v34J{?lla=C?jDkV+tF<=ySSiOKx9n9y?>1?;z7YY6mFO^#Jqix zaUk22I4A**u;U8iESCuWjgfkVlhoAg#Fg{Jnv*CPrkv=Pm!seM6nc<9R1Ev(ubw~V zQO83<6b6TuP5_(U0&ABy(%c_AhyF(5YQ}_$ak-ZNn86H>FS1rAsB{r^)=Cy&!4Vo= zqljXy5Gl3=u6NM`y z{JPc&*&P&xqk$}vb5T#+L-s=B%-#u>zGLGHwS3<*;3D@TFi6}JmoX?hGWP;*{VFir zBlm(wkh$X_ckE+D>4J7N{uI>emc9cI#z)r`6CFC$#X*{{?bw%EgGVtO$k2HkM=>11 zCEppsrT1$eQrw?zgJ$Op$e&$2d7_7E5kJ!@c@SbOf7QA0l{M&N*EXbRssSAkl27j!H>7x~0BcC= zjOi(R2k9jeA~=vwcC%jvC0qrDSP-KRChWZRAw5AbeSoPk zF4Ht=$bH4YBGLI^W%dfV!g5*9eIcXZ6+aLNn_9>u>(58V=aU_Gy~B(`uPemT!tNTE z>Pq=b&qs96kZ!+kDa9uxItqXCE`C6qPJhpoLdQpP|Lhk!#Q7lqRXu#>FKH5P*ES^4 zpo~H*5mi6`t8dT=%d_&qd*sID!@A#=-|cs^qm`thF=DH#yqr9Ub2MZn^_gOdUkNvi z1d1Rk^1b1`q3oUJ1J7=sE{b0a^o2Xx1=G2zzy_&q&_NL5o+FB%)VO4-pWOH^^?O)y zpwa_1MKuUzsHat)C$)D%@+-42numyU`p_)J{?h!9)+joxpyWaJdpn9Bq@(tmH_Gln z7WrG&U9Ms&W5tqu=^v|@Q^r@%H&EP9j}FPTV_L*uJCn(IG&I^&-a#x36HGrvtbl?e zNx3x<4p9?Yt(YMN67^gKa!gm*>by$$R4(kBDA>1f6y3peSvz)Kc@x_83XC3g6Wn)k z$-}1Ki#wYm1!{`K)rm!^rEZ9${ogHC1)`LL8nC@u2N`t^QS=mjvTLzJ-}1aQb=a`I zY6ltjljIr$6xwg-D80#sUh6AEV1x6bz(m; zcstAHLjEMZHGlF%8P0kZXmV0CE_!0jvdm&5D^mHn451Pi6J`M6bw~+*2y@HS-Q}Y6 z2JcecXTLGP_G%xr)?K4?X4wW}|x8ChM2 ztmB`V<1oa_+^^;SNQY2xyhv$v-*zb3drbM4M!s5r-`e~MNXE@?-cqU8QWV|UWiq3n zG&l$`ZRgfp@Z1y9htBrN!{eRr;mcY~TAhX2qGGlW_VWgn@9A4xB|A1~M;vE}6qbXN zTNIzGv{v9R)lJUf$NWOLg@|Gf?$YL2^*;xJoEdQ&F=R|c zr$551t`1HsG{`c2#y*2-q(^lfPeV$vo@qEtgQ}}!516-b3gYCVoWd?8+1U&Vxhh3 zQv@!vcye*m5Fw>FeT0+`-C#4=a@)#UIs%X#GR$Kz)EIRzYS{V>FYjz4G>$i)& zrNrVAlH+SVbS3ZrS$df4m9!69%BxCFUsM1Wd-m_jUj}hQWS=c z#q`+Q6r)4H|I0WYzOO>V6e;#b8AZC6gL2TT&O9;>3LtAOudnxz9?YWIvXzgiTwS7b z)aikHU409)z(YTgCitMDwni7p=S02E75m-z(OD(d7e`PsWE$91=9Vov7%&C|@j9r6 zrI4WsG*-HbGR58PABlHsMiK}{fd1vhh{0_TBp%4<~u6+pXj(Cx;I0&Skx!1`8Q+Qd#}pFpR`Y0@_!oRy{Zql=y-xu z?+FthKW8zsBQWbH$UiarPehLs(`m526)uFJac8J0dM&B*a5Xwz9fY7}%!N9uK@%TF zDXpo#q6$ow`d{J0R|aEI=&q*2#Pucw7?1T@bFiYO!^X@O;qglbSn3!^Bv-E>IMEcTw%+{!G$0&POH!()bGS zDfNYj69|2&gJZop8a&OIW!yKIfffEt+sYfTtOP-s(<%7_rF=l6PXlxj?U*z^M1#zk zG?C>4g;OU;sQ58@jgudHZT?K1h%|qV5N76KUGc|Kp8LDD-u<0E4GJRzjeR)KOntzk zyWtuNA)<|#<${)(;8zClocf{n0OouB7>rGWJW#=i);z|rIp7(_){uJ8oc-!^psRZj z9SCuU5^5*%bl@J$xSs&?fonR<8>8^L>^K1LI#^pw+5rER*&9AD{7rb^;gyH>@f{(T zNU!z?N+tv<`Gy^y3la_6GLH1Yr$E}(I}2sSbbpJnCQYwX+ClL$W91^_Xxlz z5u|=EkQ%k$BGFlm>w5uJB2=-OFAO9_!28QHd}Lz1Ak%ojHqvxqQmrkHiOgk{jS_8& zjiwlhXP|{AMpV|&c8aMy5m z$ENAlsA2axz=U7UvX=4-$$LW=J)ldcM{VRUKUcHcR2<^%>%x4TN*_)z_AucuT!$WA ztgUk1L#VQ^xD7|0_n9a^4d3Prxk0 z)9~0AE|<)|CtH(2958{HUel`wEi>Y{M7Gl6dncmn*ZnK3HDN>vmzx7gc^f(sI%ftr z$s^6@c*1+vV%mkp*y2a{1@2DrXV82q#GB+#s85n*zsy#5P@qE6ZP2z8CY`2O6M=-u z2gHxO&+-F9d;nrX!|ObvUl5+4SYCb~8Xt30wQ^!~Iz{8$}b0SM)=8o$;KHQ)NYh@imn)Vpr&vhFbWJN1zg~qgOk@zB zIX+G^l2;j;?DPJ@*t61Qt!6_Q{%lMmg_Lx(o#liBu*hLT+8?VjCFYnvJ0ir)f*)ZZ zcp@=UyK2RqIO%}6 z7C7bIX_P1DQh(wQ42HGxB5ri~tn;uJ*wdVut~e zPvO-|&`L_|cY#%U+B=EN|HdWiyu29~$lGgRD4gHvQs5;OsU_>@Q2=geH0&FeQ%F&cOxO+0WTFR_0-WSyWZj zo2FM`_08N%f~|Bd8FPZ)e28NseT+d(&cfrkY#HrHBu6I2<}%b@yo^`&yUQV9Q|su^ zHpwWII$;frJO?kY2;QJbHyPLb@9;Nad+Yq2!mdZNWf%>B zKbXRJh}1gZ^j$eVQ4vF1=g!gkUcJb=LMCz${`5a;{jkD8=rfv7!&lyi+@qu~+(*A5 zb;Xv#skHmp<5qU;h)+?)VsAiNUUc7v0wToXF8ncRGSFPYMdvraE44jkjS&x(J+uyW z>?>`X_wUI&S5V>Z;aK|~e-Y=QF2k!A&ix*LN^^F)Lm0_pxfAoQ=0Hv{9A)m$#`GPT z%UvdiQJP&KL>_sqNm2XIo**VA{?hs6{R_+`IVf4_m?|-<0eXTtd(gfhSql0*YrA=!p|h zfD2FB-epCW0rrS!wIgUk_KGUuBKY&+Bv6tbs&4I+TRlqbG~+hp=?zBEY$xQT?$N`$ z+D-Z-=2kKM3PUR40iH}<83;}|0m54E-0e3m*%4nDE7r)g(v+$ZUaL*ev}W+ME)DA-Z=DxdxUJ-w$V`kAI;jcqZ@MtI z(MSWXKaeM5E=bFdz6Ej^;pGN%i}?u(IHVrY^C6hj@RE|=%0xS$+RF7Ws}}G_M$EWV ze30Pjkf>e)a{J&;aD*_f?@Z+<;YzbH1#t->2nKM%4iT*HR$Lhg!7*=IZ6OJvwr^r_ z!v<9o$2#H+YC7Q#IXgg_rf~Eze>udrrdh7+mRfBwwWj!uIH+HQ>>ccNTen)Y?Qxs@ zoglTmsE_Wcm4+q@!xST21VB&jv~kins^+rQ(`uzH+8E;YJJY7*2uTTA<8@iD#yA^xjHaJ(KEn0K4H$Hl(>)jqcX9=y?9rQy{9=`Hx0E_Ksx`-_kbwM%Vq$YqWZ_ zvZsFbj!TwX07w9F?2+z#nw@GsR8lU_xtO^MO$`bpcAC_h3Eqyap|EH6piUA<-}f%12TMa6*C^V7hwL;#q}0)`vJ;hBkEZ9vCVP-oO_1#hQ*-*vPpGYE*vdm z`Fu0|rq-;yn%!cgvMx`{rjP59|A3wEFAtYd(+C6A7YWtt80w#2(iJ9)>M~6?2OfCKwBKF4eEtDJU7;Cs6wjF z41lWL$PYrKD+@GkD?87EA5m+OGoLhTS=aD@@6<5Xh~nXZHf1o_mOYjI_h%lVhPd%k zO|&QP0LQE;^crs{U1g0*SH{@XoU`0f&U6?|NYDkT*Kr-O@ikg}NGr7nvQ7CdIC~H1 z#8kT2!i z@V_8o`l|n-AiViQ@PAiv&Dihs@S6(k^6P$y4!O0rhA)Eu-6|_X(&A&c?!W8iW|Nxp6r3=L`i8!75ou~ z4!k9#2#_3aOlPIFR99l0N!)$%QM+X%buz^dP-Evpf-81k#r(5b1Ay9n07%lJKKJeK z5FqNKdt$0S%*A2#87=u4hN&k=CKaTK@Iz%|2%gLo*6GyF(ou7v0# zi#zOD#+U={_q4+FRJ7oAM)MMf__HsEm7hbT>{`uv(acM^zXV>e8@|+-m{5M*>Jp)w z`N~v)XSk6GY``sQma&+5Y?saQW_f=_2JWSgew96!Ig{aO_e2-oNT|%&|9N)Zg5OZw zh2JP7ZP8zD9Pd@&`h%u4Yxs_U-Vxv`IX1Z0*M~I%&o>AoTD!Z+a)(!iGXb$u=U@a{ z1z}*cC?wu53$AX*GNnhq-Dkg5sPHT(4QCo?KryYH-zvSjgw`-w>f`WoGdOH@w&hGU zw!!iItv}2C?pryfu)DeNOf~MgVNB&`?Tp=|H(EJVb|O2EMr?2Tk}DT@BsEvB+Mbm) z?&>dIddo+(P7Zkghtz5Aps&VQe(`8@?^0)$VYK*{MFeqxs#{6=!wNu^MKnty>j)0< zVAv;+lG4XM;MS!zu)sJAa*iuxN1oXOC7C!?i#8Y>w7yZTyEdn>pBImbgwjP(=^*x9*H*1Kjembe>oSSDwzTK54Q>I zcv^_S|3h|^k+nW}=EvwQA^tCo{=XqzYP&x;NTlx;v>5|MT_K#$U=R&z`(q&SU=ST# zsVp>R>i*MfcWcByt<&4+ZwY=B$ZP^w`CrAcjrQbV71x8;Gt)DEkJDFE-ydIJkU`{V zBy&j`vJ#SL+W52e1^r9)k>t&EwqAvcKQ|gJj)Aq;G?L(n!o!?LyK1@XDmud8;R3SY zNtvze;>DSt>!^p=D$ytt%96cc`&c4_^dE}{iq?w(JEUmqaS725i=G2Wg4U2CI@sOT z(yA^FyH$i1ESZY_?GL9+K_e(kGj*mRnHJieRvh&G>&Ix(tor1pHrGCW-7n8Hj(I0T`Dr zB$*aMD4MGgqN`S_JcD%lS(FPMY?Oa$zQW7}m)jonk{>O)o9M#&8tm0LT7wjO2vpfxNNGcwi3C_M)O!Ip%Nxo z0?K^Y0woS@r88TReRQ&yDNql~m68GN@BKbDRHZr=7rld$U`uh4JL~!@{+bI%vJ(tC zsKWElpf@^48Pd-suG?u`!$XGp!L2s#l@~|LL$8;4e9Ei}+ws=rl!+F2gkV3rf zFx{!5PjM!(3Zv3GqUAMFrL3n$=U3b!=YOzxNY$Y{t?`3>=rx#x8w$-rr1uDxP(`}u z!|OYl4$m2(^oc&P=eHfP#tnpcZF#MStlD5RiJS=OGV{g`x*tD)R70^V3{zcg?ms4 zQ+u%rI0#5(@-oR2Z{dCq!j@pekb*&k6V8fJ;q|Ixp^Td;)9br8->*kctG9%Lz_Ewg zxNK$vR-^juIB^A_>r5j|4Ab3A?-V7sJ#2_2nW+{c!=mU3YJ3((f8i=ZuCL6VqaCiI z?be%~s*N)yThg|j5umB(rx>wis?Saff)4_=@L9rzo@}50<_X0}P!bfW*DzV9Pxj}!aV>-+`(6eAf zy{2n<09s7P-e(S2J7;;{2CGik%<%eNMTMx!@U2d|!>IskzGbFRZFp%iHaH`tK_mcs zEKz^m6QLrMsG3GH{rQT*i@CER7J(?q+3)^7)P`#dV0iQ-h7RL79szC%!*?l?v&>@Q zB{O^n0%co|!V7hL=5T?I+>QP-0TrZ_1CaUh)a=?8jR((tnDwfsCNW;FFORZ?2%`!| zC6!FkJH{iGIr$dRo)hk-`_r?%$2N%`2Omk|w)kuzdvaB(vkcOwesw~=Umq}e0uoaF zE+@P?0wfXy9$_nSY9CNEx-)*Yl$V7UeTGB>kCFG{Pj}=`47&<%NBNS<@D z(4HS?Ji3qTF6gi8ZZQ5Ybo{c& zBmutmFapc-DkgW&tUBTsYmo`&4{U$ywgl_y7L-mY20o|3`^;30N|4{ugKlTQX{44- z7T7Sb#WUEy9=C%YApiG}3d?7MYJ~V_u2$=R41)Vl5Be|n;Dz>5TV6ih$sCunFwhVY zfl92iBoFhCKoSa()ozCWT9JmWS{_C>4J=1=g_qD^z_dIw@031Xk8$iQ1`ZLk6Tj7-! zy;|;-8_iknwFq5tMbGP-jdQ!U-l^bJ|5|<@K=U`~E7ay+QzK$-;`mO{h@{6$J?ckA zoMO(E>I?R=!0!WCnx2vaTAH4+14A`G(cxBx7snSu%9Wl*V(&ZB@8a59>$~2jK18+8 zNN~uU5p47v7+7=-%+`Cf%60v%e$|Q-H)l-E^7NB~bH2eDR(!rm=tFM~_ZQcGzC6>@ zFMXzPf5kXG-dWcHw-*9#i~A$93H$0vXc5a*nMT zNv<_8N;;Jc?DK=-H!EI7j{y^Rhp5~P9%F3Z*_p|lxuMDHt^sJ;H!RFQN8la;E6~$j z6EmaSZ(fu+GmY1NA-Rff1DSrws4I8l9sZ790%$;%wuPHrt?14UhzZIpDA_qt-Y9G!Dt=_# zbjv*$mN`&jW<}MJDko+sBVi+JVJq4qqw|%V>r?)v8YWLL9_v=TVWV$eeX>yE898#l zd3-+LtLZ5WqlcZGg1y522)hlddjvGY5*(~59qO=@-`4H%;(zj7wD0sg7Thx8`(=dQ zmR+!iU1%Z%kg_|tJY>JqD+JK?#zk-9e^lg~}L`+I}c9K=mUObQI2i;JfhH7h@vp{l-+xzE8nSe*lDanAg ze{eedSbIm~Htu$sKx=P}%l$?pKRw%tnOBpzGUt}dCB zLYXOXurD2+&>!$%HKAxyD;WO`6T|9}y=1H%GN`6(DmI)1*&qvq2gt+6CekpC7&1b~ zYRzNIQl}{HhtND)t~f8;zL)Lq*~JbbB)z>$xEf2PGL{C`t7qawLx;ize1s-nU8UnQ zR!*cl@2!B1B7m@+%MC{Vg_+aR$6y{up=NtM*M&0H4TnfdbgWGp6*>xpjarGc=zJ!& z3)@C80|H8({|xHXKQhJmd~?J5S#fajoQKHGut zmuXyUKIamaqc1nH;#$e3PFJ)>OYLL3vvRf*2AqVcqe~2T$WF`iKA?}nMS!hgjj$gN z?Ex?ImtArZfGVZTG0LM*t--B~;NF+|l8hHX8P3kBai+^d5|VE=l*{z%>`s|(94c`` zoEQ6vF+KsYB=n?79MMJPj?cm4&^v+~xidT>fHIrQE3CWBgJm#t=-Z!)qV zFj!1+Pfqb8SFnp(Yc~{h1FhNGa^O;Erc4p*Rlj)EFNNh%BPzXG?OE*o7Yl9TxI1_P zqnS~yorov+0WK?n;*Y|RH(Tu2R53ImuPx?m!WmdR^<^Sbev6dO+#&&#@6n7>Gs-pg zG|gP_y6Pnef1EWHlAN+r#5I=uysnrTZuV12xXJH0mNeB6JLyoBrQsz%DF0oESq#@C z$|$}5wY{6CaLqPwLAPS>aH=F7Q8SQC~=XIy@sp!sp-2AQ{Ng#1o%L_T(E zBUd*BHr94X$gH|*3*&TD58p51ZJIKM2&e^D3|3E`u+GmQJ~ZUEBB!qnHYLL8!Aq8rTm9oOF}tL_H0=MTw*aQLk(n! zc%zB1c%54~kEj)(#%7^-EM3kc$EqUtn!tUkl;bFoWV=NUOzO?>{^uD z?!)&XhscUNMeqpkW%?jC2=h@dd~f?hbDE1^<1m%tuHSMXUKJ_LkU3t(-C(azv?k)Q zH>@TQi4lvMINYogbLPm#OL#O2FUA??TUE*`45DP~Tm#i@TTjiRHH5**89hB;tKQfZ zpl?!XG8jjXayInKt?I*DS#HIISrs=ft-Pj91MUG?j%5JS5x8CpL=Hex% zv?Q4<1X~7K`H)$gh+8Zo0u#66I{t6cPGL7=6UP%t1#;}e*5n3=s$q3LiKd{1)r{9C zU^3j8xD3OGY5XDIR#6^o-RX9uTF zD&ul^C&9?X6K|f6lbA^!4`EoyY;@Qjx-o#Hql# zwFzu->$1rg!@s{_&ec8wn^*PGEyCXKlbYk4guW(r=Lj|SAGTnQQ1HLpE_}Wa<^@Mz zyB7VV3;Nm@B870^=^e31w7GRt$G6q35>2w++QjkgFPoN2xGC(H-#p!a5m30mGWsfv z7h=-8h3;e?YZG5t7dgE~HXV&OuVX0$Iz)W`csd)(kn?+7JJV4&b&qp49c9LHMS-0y z-kapq6jzR=L`xb0^EWN(VU9|7pR&3ipG2}WEkGqb197e!i_^7&yH*VL+|v&)Z9ofO zuW(W_9t8P@-=(>O$Dqr87ER+Qi-75ZyfxjEuB-R95vlE@5fb|}02c_Kz4{ag-$gW} zz9aVwr%LWYQQNb9(^LUcVS||rVZ8FoA9!sHhM5dgL`xSI=fSumMHf^;Eq`G~ckIjd z2oTywyn{sdh_zSs!o)4=JG~1Z=&T>{eG?9Ueb}Y8L&L4CL!p7O-a`r;n<3PABdyLK za4_NAd^Zj!+?Pbx=z~~)XAUPE{d$yqH}&{#8^|{jy)(i(wyBvtwRZhG7Uy*$srW6P zTNz`xwGR!GZ*T;U{tmiNl8}D`q%(@78~CRV+pcUN-%OUdI`0lB-mD%Yw#Gk3fir(v z_{t84R`pHQu4~|59|iSiLYtx&x`FDN%Sz(E^gWl<4o-Rx+Xn!8k&gz^s4cGx^GI`BDDG#f=$%MRW1ZKJ%!mij3@-Oj%#(l7l5$G#F~U(irQx%IFUjIq`|(UPtjBqL(^U>Yqx?Uly;F>6QMWZ%wr$(C zPT96?+qP|+r)=BCDciQu_4WNbeeYAJbCaEwwO?nl=bB@$0j3asF)s|XGREcp7TF0} z7fB_W2gLRYwf9h7f3#WuA!D=5F5 zt#ow`2U%N?cd>kvZGtFYrQf7i+w$6SC;UGC%THSC+t&K5UYnB}icg(yyh@U%>f;-w zyMTe2!yoxo&2PDo=6iU|?99h?a^C{t2|mT$yxW2Sep6g1E3>gtxjE*0xr%4ZAMO79 z^@3aa@*64Z0lzURAF<&VsAbnO=q0R_lrZ*M(KvN3hSGPkyK$xPQi-KnZ zniq2XevZCjzx&9X!_ps6Dk@Hzh=vm_9TWqC`9|>6R(Y*!4HZ^_p|}NxgQ}_PgQ{Vb zTjP&G10zXea8+r})awoosWj>ba`oek^ha1%m6V-Q(G^AxAyd)!?rO}BY3kiWG5Ro~ z!8Br(9@YdpNtNo2H5#r`eIrT1BN`sfHV_t9ry)+n$jj6nE7;@e;N}#W;gYDml-S6QlYZ|4vtJepY zi`3HGKSOpCjIcb`={hd^=#-tAE|5AiJuo2DK7jEyi1|!P6w{HFnP>_4C~74K+b8j5 zkSC^Hb4Fi#Er}H=bM=CKUUa#$P{{26Rmh3D#7xESxEVZ>}JG6aQDaEii;D_}&%3z}-NSmB)kw`us+Xvhdpr$Vr;ok z3BNlY?mR6bCeIHB%b(*mi8MJ1A(^(e-l!eZ*=WOyd-ato0~;__Vit^VMq>1*)Xs0b zoAqo;TNmbE&Jg7T$P~%<552Z2&G-?y)5)y<^0>q8p>A|Tq0ha21lLBB^_zFHna(B# z5Y4Bbf>DR-)sLkShK}qH{vOjP-`h{CMKDgDv@603jSCYy3;D|>Gl3;tOsE+S^;aS6 z0ZVRWz4M3G2b3QpJ@ITVZ++LamB*BO# z#ZBDDS2A7XgGK)~IuED}+jKj_hmkirkCd9uh_bvgbG4?7z$C_-*PuwN_kr>QMIHa( zJt%NAPc|`IwGVW9rtXkwmS@Pmqp?u^n5@JRl)TX$QSDjz-<*&6sEdk!b*`5RR*bbj z`);L+(+d+}N| zajtq|X^&C+KS+A@DR@FCX5DRo%2sz|*2|p*HZ1_hZ-Zj4rbHBJ=)}h?`>Ic$0eLlL zDq3CzewUp02E}`cq95+<0DMl4_6EGF`TGTR=-ZzK#k&H8J_-_srF?)oeAVLsHw7T5 z8{ioW0HgFNVt~oW15O%1Rr_OcfTIawtQ#@UR@y zT!j|aa56j=dNrpg4voEW!N{TpEld8Z0$S8(6E@5YU^bU!o8N*S#JmLSbb(i=^@7Eq zpEsCVF{fH$SQeLl3%3}uJ+uJX`wIfmdWLXp1rfC>A5o`B)Pz{-1GYQq(#HmwJs4`S z6rI!u2QqUVnlLh4yVTFk^15do9Lx_(Q@#QK4CPvoN&pwtj4gVF+H8wq6SF@lssW%K zbtEeuc0q`4i_qqz;vSJ@Nvt3-U(={y-K1@T*kYl|^><%D1MOz8kfh_Eb1~4a=#URG zSBRFg`yy-CS+)8gw!s!EM;O5@uJQO~9On*k2Z zMTjGh5EP$Q9~`HA_}rI4{3}{vDVikZstD&1{7=Z@>YWYcWk0&p@s(WE?OwUteK7&q>We#CS|y>E`gl$QZz#RUTs^{i0Q zPVjrUe&>4x=JWb=%1-KdgBgpJ$b+|)!K07({*XE()jh|mUFo}Kh{RFC008j%OA!U$ z;LvY~86FtYHhoZ>5D9yn$~^~^FJ$v~xj^i1f)*8$mUW_56#!@r&Z@qe)IJurP!%Gf zk%{RlBNg>YgM(G~>CZNK81zIQXg7{5p`{Ow;W_5luSVH0?M%4#RS+10;NM`6WPrVF z_u$#Rmp2dy@_Y9y+EfiWiY8fOZdc7-SRP`xQZj8_Xnt9F9*fSOT9eV>0zEJ zWsGcUW6vTqdIOmBMO)p0?{Dl|_d!SZY)!2FJTWGg@b%G)8_^`E8pfOGj!5rBPFZ-sayr7 ze1&NjLRUUB7M`e`sa-f$GsxqrqDs84TYF^}+t?A#o(v8pYCr#mF$mOfrXw!QK~&bL z&YVuEWYFpG^0L6Z2-PIU>XeIG8)ryqR zZO?}E%mGtQP{jd)ohkKT!@GMC;%Y$u5ie0S4~`e)l30U14*}rBMXki(Z_WOy11ATmLG>oOH zZ2+l7#abgtRnyI~5yxe<)a1bx(rm?Oi)poV&#Iz;3lZCb%`iZ}WKk)!s0?nEnAxE{ zh*EWrD=O>)sq7mPYF;8Nyh*rN%5s*i7+Ls-8e^Vz9+Wf2M_p-N;fkf&L3nc$d|#et zzZ98Bw}dd(+NX5W)6!xM5F*R|j%DjuiM6<5{aEgocs%2drA97ua0Y3?GaUX{jXc+; zDa0yPsHlwPWl%&KJEV%sCNfxhT9=bxKblpoFJ8O$UhL*grbUccE3i>$pF)Rd)V5_E zHF&ugBISdG)1N&vgOpxzJdz3ePkmAosopS()HWm*xvnt9V;=~?;3CAy14nNV5_pbF zI(B{4Gih*!Q+V{xZL9R;N?y=f z3)!#}a?h4+7;U9)I(e0;u<2_GWZ7nHY{R9TiA6aFQo)8*ETl}#HI1?|W}R28goQyd z+a{D6?ipKYc+D8fI|K1mS7~>;v~MG(??Wb_H67@N0mv-_C~tCx8L(x`4&a&QE7NMjgO~#h+KW7t#`UvPa>uG< z8~4?%xhD;}OUFK>ZwXWlYL%`{a_Ty;N(#DDUtT%IY)=7V^#Q|GuMf5xf_PQxR9-g} z>6vQEw6@>znaf(=u5b8~c6DYuv}%pJE~*>4wf<#~t`zT!EIjB6Z`$S30DX`=P{nIi z?k?V>Kj`1|CDKIdoxV41CyJQO*hfufPfS{uoCwj?mi@G9(Tq>|fnOzAS6;mhS*rk8932Hkvva>J)nB2K&Top%I%pmAmDoPm z5TVY!wE5v-#B`r9+i$O_V837*ZSFK?id~$@HdM)WSTcuBx|fVjO`$M!7ZXPNamW`h}d zv_`8WV~Qm!%sDaOYg2+ZiIVCvKoMniRO?X3+pC+(p}{NY0pFTQqc4( zJ&ls96;Uv5zx9vW^$*_S$H4{tbOC_h2q+GP@`KTdMnn5y__RlxGq56<*HWarhw~`I zkHjx!VLGYmM-$XL`YMLU{E&kzR=le?FkdEH_vbNY3)y(^ZW2FM|7dR&*ba_Kk$jo* ztZf^%)t(gf_eUk1B8?rCh}&aEwj&Mmpiz0CN*^8QzmZJGKga{unz2LpR4unC zP{*cAFD=<{!WPz6V%Q}L+4mF4#0V)b}iE0CG#laP)PCfX^;ONb42kK3PJF3wP_&z6zzSL;*nK_a^~z1p3J2ZO>)s+3()=TVqg zg)<5^mQb-XWVfTNzeW+cGuNCSbEUM_HcODe*0lLlZlG&_2g`L8S)9Pe1HDPt zk~?@kXN;W0v14Pu%j3_%jYKkoFQM&b8TrT@VU?PR`iWLFe2fWdd4LDz20^Z@G6(I& zc^UA%GzTV9=HA1M1LF>Xf+tG823hnk?L`eTUj)^}@= zB&`AsX%iM4wgRnplw^6r@I!jE&;z>8~y1***5C^dJHFdZu@tc`p+V+n3x`Fi{uW!KK3VBm|pW%2qP6gG~p8> z`JqUD7+=WB7pLY;0%lh353L_o>Mei2<2QOvG8YS_i@Bm_yvw+bE}a`Ex1{rTw?ZNK zCN?S8&GzB)No_eFeSSc1G~+QUMxWkr_C6~{|IQL3n-;Nk3kgk_OC#Qu7_2Jd{krnN zyOL?+q9*k!;t)wrQPJM3j?Ko>7VG5mWB!>}|56a5jtjn5qR!ad}J7Hyog*+N}Id|}o`p8Xm z=37dI>9eKp5P;v?i*0`(^lM$B;F|Z}mR~ z?HUBhtruH0fb{E_f}p6=)l+jt!5a{+xrE?Zf`&Iz_3w_uPy~(wVw1w6NOc}-Ha>T5 z6Oe*+TtjqK#`wn=GiScN02~&he4NSkd9V(9-DIvi4=ffv2YSp2>ZQE7#q^#>hcqjc zsKvchjPjz1Tpv(m4lKUT_-TxZy9nLbR2qHTvuWpQ4a4-f(JMAmDF|InEU^(=%^ygKG{j(lU=JiZ^^ol`#A(<2H zU*8O!cA4#LoSm5XSCZv*VJD5Gzq?gya^NHr*l%vPylF+?TIwCwv8vTgThi*yT2e9B zW1bI*gs;Q`hUZK1Art9G280)b>1PJu-wX|5mZt$dVd_x8N>>@tt4yGZ)QWP0MHj2M z`KGGiuYfN^_0k~T>2Kpi)QuQ^Y^sR)zh)4yKtF3J9HgAPy>({I=&Vl&cJ#PDqH3F4 zIUsRe<-A4R<-DVcLuk2j{$uW_Zi2#oGQS%_cR^u#5&`3`9d5!x^k(Ks0!I8kPk>dv zJ%4BQIoj@G(p9rajxaDkh+La%@L3c79bi8nfMd~2`taicp<5!$Whr=h3@gjj-usJy zG)BF(+C4SDA{8)9`G5p>8k9v;WG{`&ebiU(xr9ETlYXaL_RT>s`JZkI$q;P0zS#x(62AG~V&pYr*cm2z(I)sdJO>gs$}tj>VXKr@rI_R|7AYbkpv@ z;&zhnlrZfWd-5C}oNTvK@8IZrCeiGa2R^G_qNOnGUwpi)2cc9cYX2<{!Gq3RYO1xN0$TWq>;p z5)pa^sCi1zVHYUTlvnV^D?Enh9%%?Us12eLCN74Jsnl4&Qk)CZ+5YWo2uB7p1@Ia^)AwlDkHXtQPEwn*xKu!=AjE zN)MBTC1VswlGO!gjUTP6{TlQSQ-TryjV=iWe8QctST9gQ!#c4 z3?!3QYe@z2h$xYoxkxPEisrd16`n&KQGY*kd{~o|7#*G2(1e$HxOv1*ZyhZzH*wqk zD)axQXw2s_3Nc281VyvSGnhbfuM`jz^dFJt@|Kn?W;Z{1a7z+E+s1ePD>4O>U9m2~ z66`BmVQd^{KG-b`#&J47m7!+`-h?R-6Qs6_EJ?9ua_vf&&6{I&Wn)Q_ce>16 z-hnqlF*1lgFvciT=bB!?OeM2t3f~F71DpwQK$8o;Lyez>_jpBQ$J{P?_{QnM62%K) zfXSu4-%PXD9&iO0lJ~Dr%niPVz?FD878E69cneQZY$zQUQGHO2VET2Q zWW;Iqg>%Md593&uA#Y4;cu%now$m$CrjYdalUkvDE)fZWBc;z71vTt#GUlT`#==$C z#T)v?wQ4?M7zf)wH;DP=Tz7aGUE+u3WHq&w$dk_ob}dujh8vm95`xQpW3LD3`(a^W zxD3{)v+n8lJJ8!ZQ=4)r*2pEg^WZDyb$y^fL2}bGkkAEDUX# z0VeK}1zMnSe%L;>JfeBVTaPlw2Ue2!EPOo`raM+oKB|6W=N~n`edlK=>0j}j+d%v~ zo(=CDG0K@`a(tKJbb^s9c$s1Jn&gYT<>4zICkmJ~R~cdiSm|Q7#EatD>#DyDW&$BC zL_rXj)leC8;>FH-Tm_txW&3fOo^XTn3wxMR+O!`sUi_zj-2B6#o$=)7RcMGE=vQ4q zmXOd8jZFb1F*M6{%Gh5RE;)~W;7@#5>~n1iBQbv+kYlng87;O)Gqcp!rxw0wc>5Hc znw$V>;qri21#Hmw)}vw`Rk|?Y5#z$@!9OUC;}C*?UzzZRSxSh-AloG?+C-_R9(s__ zqsXO*IRM9RPVPm1Z(53mc|?C9C6gyMs#D8{KPTrQlvas}?0$nKrC!qjnvE|q_qE~V zkWP~h55A2uQhVnKmQPfu7bhV%pmua=yiP9>i8z7*^trOEYRwCpw*c-7eTYEhI!9D zPkR-1e_RTD z760!IZGo@kX|KfY4{kvZ+2an$EkE^KS~(W|)~oLXT3!!TEdJIe82KHc(;ly4mJ{wG z6?eUgDc4)NE>flyO2&u(luuh|d()hD(%2@s@%1y-G1I_0#;vHQf;x7iYw^n>>~1px zHMHG>PFhI3&7an_a@9ZPR?nMzV%3#rREdB$4{mYm{Bxk6#LM~o?HH|V(Hm28WTxW$ zVj$7*?wEd5#?t&oG)KDY({w;Er~d4Zeq=elZT=wE1b`1&F}T{u$w#OU)-SA_$Y0De-YXUsqNE3*5IHTzvvBGYSTMSzyBMzl>K*-z;WxO?fsn~ zxBvkFME}1h33)qfYYSVm|C4y}E8DR)adMKd`(tEeV*lR)i%Mc-EC&T(Lh|lUhG3V( zP75dF`_U;_O9N;mp*j2AnO?*0D5j*n+UPv}@hXK0(IJEdk>GjHK0bbaJxvXB2q)u_ z?}7b5joi8$gJqV7P?{w2F12qqGP_&Jo-T7@9A;A(pOZPP!M|!C=lNQLxc=7rPjdXu}6|NCp& z4FK&nAe!;e$QaEe+DJka&<@f`CH+W2TS#F^)%aJ)6WnStrk-doRIP=4jBBusT1v84 z8mxu1f=M*<*LHS_S(`RPm9>mb*(UAeyrbZlrBRnXX7j$~O1<*fI=wm5RynHFz9(0#x`a73hOC4OthD?1p({dvl%^OAjdd3Ljn^-R$yPB|Cl?o;e9B(SKT|x??+yx|Y%a)-yOi$dnIEt{PLJfA=LbGX6JhB61 zzamF-ak~UA>ff{v7N!*&RpsDhYDZ&{6}yU8oH=#J7K05IY}pWzMyuL7Z;I+a;Lv$l z^!Bhl!yTWTO}SGdtK!wq?@Yt;8{=;V^nQXw2(KH z>BC)tUTySy0f8@n!d%YOuXP4(*|18FZYMm0v?}#GzQygEh*(&y{h=}~KrIXu2ekWG z2E*i@vGl`VxN?ac>Q2rK;XKb_Li7=wuPez8r)Z`Y6PEJa6`e_GOFR#eUEtMBXms-(?}cYkhHAv#1jK(b|ff8+2#E-4S37fFzrw9$ZnS9Ale*Q@PTqY(w8k6`iCFVvK%ZaWh z{WcTCFSPqF;%MKUgQwn()j~Y2rBSq})k3}&m)Hv5nxc+5#vD<9$hi2py^72STI*3GZ@au^-} zKIopSgS9zE0a^Io;s`+Y(0=PLT+QtCP@0~BC*AYb{uX3_f=pjD7`#gOV@gFALZKa*DPa z55Nob2sKCUPmeBFpr;QEgp+DN6!C|Lwr`$hwKuv`_Sfo;l!sh67BU}#fy93&!k}Ch zXEZ!m0WvcfIr#Fn+!WOd;t4mDb6-_3(ofk1oK?r+BSwF2ZVOgObyfLuPe_y?|BP5B z^l#O7o#`pyazmGB&`xdWK&ZxGhb5~H^gqN*?FuUDGjd}#%t0yxjj+-xorK+Z%lG60 z0H;(p3}4c@+Vsr)gDL5H#Aky(2DwVIKhF)8jg2^Lm$+!93 zBFN2rdNH*%rGE`qltVG+F*}4=9BEkls+J6ZV38cZ5Eqr7>g1psEG)@6mLvrgMj=h4 z9l%k1hOU+eD=kC?yGoCm82ZsPDC<8fN$L;Vak^#Z!+?o?(V=}Jz(9Ml9P{$3+WOlr z#}%T#+gf^uU|{2Hc)D-13bL8ncQ`e*@TRf#FdL?pJWZV>Bp~LWQzRHnLLU0TU{d1f z@My;DL>C7C8b?8-?UXyb z?X+KTCG{#lBKDeIx3@E^Jpx+rj_gf!z^pv+ii1unJ=PXHiPV$m0IG+qxuA$2+{J0u z5hhzl@$UksioNin-m*gO3F0K<9J_VEGCn>}87&9Ps_ycA@4j4W8t)`(pxgxY=2jUo zQ|WJn>z=aUiEJD>txgiQkZZM^y^ym9XbwX?wG3I;W&(1Jd5Z>0o1`HwDbfm%E&WM8 z(gz7i^$pqzxS}0N6b51qp@C@}LsD#`#6r8;sWid|NCGxF6>s2u3EpCC2&+<(aj_!C znN%7gw<|9qYhgo-3A8$hM5AV4!EKukYmkJl^7KZV4sh@H%}&T`SOba8&hr(#0?URJ zH*C*dxNSUlVQ6;;s;V=8Sk2UvJU$v7s(~7E7a1uN(?oN}NJ=tLkHR7cX=ZWGy2k^V za!BuwhN^&yP~pIp@bq%E=`<83njK?%NpXn`Y$Q#5s7fl^ND@=uwK}LzC1>f|@^yT< zi0&p9q?mNgn{Z69 z9mIzci>=%d{ZOtlED8b`q z78>w(DRTadB-0)kro%eKYy={Wm;?+)DDw0h4fDie@3)-nnMKtsBJMwNPEk#i(|+N_ zQZN;bap4FxlH2R(zhSzS5%RXvjZAZ@cKQOn1tR=d&_oQ3UtobYQ$}vKW%FobpU~*I zL2hF_)Dtgj@DI|!HaSgscZ1X zs8A~)3};FhyUOsL=D4^s951dz;vlJuqFjI(RzERjjCurNm)1KVT?dzjAcHP1c&ipT zdqozxCEBT~armM;tl`z}a(ih{e_E;ce^xzngkWw85MJ-PZ5m~YC2CybeWMVMKc>r! z%3ky2v6@rcd19v~LRVFwWL4lCDFec{D~PF>7vq|j<=oEO;*@lAhrShUVQJ@U zY8P{9mjJhkKDEihrWcw{$y3|PL)$X_p#3KIa)om*jrTn6!k-WGo)7q(o920%Lq0XA z@irxRyMi_U+2`SjqPpCTwmM`#lD#zav%BB4y)?Mwy01t7aDV2T9ATI#bf4ig2d-2Y zPEDQmN1Ht^Oc&N}?0a^y`DHQ{1+Y$;xV!QUhRg!7 z6=aV#R~LlikD5$X93gseehPde=?Q#e|Ic%!6X%rt4I2Q!;}<3f|F6%L{~B|LH6gu| zmz()dOig=?kAVdV1YiW!hZr>e{DBD%20+RLAu)hdbx4;^4EWWji3mv5SZQ^xO+&4; zSVGr`42KZtM=h&X*VZ;y)#x;&muyZv*T<^5=P z%k}vf`xjvz3xz(1*CQS2CgsqMq9$Iy7PT))38ZM9Ktv%D79CoAnrxbevZdRR)Cmjc zRT1Bc)io_8f|J0>3ntdBJf&5F$lUf%D#^Vnt#-fdk)&8$GJ(mxGp$q~CI(mWlI0bP zUT^Xy!7#1=hV`|>+cz)G^zMntH%Piy!IE9Vp|QW!y&`hw=HQnlUE*4Wg3t6isGB*d z<@1U7T^`3j()#v#@CixDM|r__bRakR$?M(OpErKO-PIKDhTTP;_t4#x?gs7ZHt}lq z@J9FMJF`1`@_-Jqy9Ksmbla_Kd06xIDe~qk5!tPH`LH0lO-tQ#c~E=nqxey|^elDd zONn=@fX;t<0Q?!xg8zI^{8`=){?INL>oB%E+U3kCW_(+h*6|j;>!X|*Cw0e4*+VzX z$T?yI#&2-TUi?)Ym$z@{dB*jf%95A5?m(HFu)o3QoBAWv@){fcUFQG&H?DVdR^sYI zk%vObm!OD0`8W<_JFeszIpd{k$@lyq?+zO5M_b@qFj8-#P*36|ifNBjw&?*Ei|M{5G%4IV^lb z3+!iYx0eAiX$>PLbRRZBfCy2N!GmpRe?!orv;vk91|(@uodrpf=BW zKH5NZui=RL{2yzxjc#XGL3e@0n@+mAxrw}Xeo_RhQ#>H8Lh!vJmjXJ2h zl~8|9zc@ut{5Wa^Xe%Qss|~C+Zg~Vqp$=DvhO;RvX5J=&+E^o-h+iFAJ)q!Ie66o-z|o#k@{BbN$b2m6y<+|D3VN#Ja`T z?pl?DjauF`qjJz+Z< z#PCNY13@baUl$GxEWzCOy$S>RoD5u$=NB?TL~#*)DG&e;WMk(}+M+ZVa32{OB*1yF zNJlVW8iP^1s#%1@Io&apD*+tn7)iE3P}r@4-If;nID%fNF?uMm!SP{f)T2QvXx9rE zH`?rOyom*b3kXQ&!FhS9N;4^nZBxYJa!NE9;T7MZGzqeAn>)AzUq`jWC#ET+4v*#Mo2owk#kc{=gcuAAkW>y*S z*3c!M&1@3_6g9iZ6J}=CYw0Lo^B>8kxa!79GpHA^A_g!MP8C0w)}e2gn~Q*H9QU#Vejf} z45HmBFrf5eG@Vji+J|i{DjShA{h*4`CVkC-`cfc7xYN0upnOVImIwGhT{+j*#+)5E zS+1f9+=~jX*r=rcWeaL7=JG&KJ3*a>kM}KcBu>+!Q)y379@_Y`@*zsHi5AGst;mZR zDF=h!EBau0uaR)+L-@GR?%bCbXqI9gm4)l#l$~xP#k%T0H{HXa+6m$`GANyB2^BWx z+S*X1E4!1$&)U+6nw=M=rwsJ_H(P3_3}j?rZ3j?*^C2zFvzTu%3eC+fhL#v2a79r$ z-ib$V5D*>0I2d2Q_ml9To-P7f7ZjA2ksq~44qM>vAjrCoDvp!kuAWGPHB^w3gTHULT)SI$Vi*?~748H|L?-x*i7^4DXf^mW84lQanmzAN=( zOOkI`HJ?qmbBiH~wKC_ROViAj?sE0?+lspRt;#z_uIf9oVXWlGYV5B%me?rs==1Xc zO3&Bj3Tnj*gyr%|nz2|k=^RzDm~Y{pm&XZ{fHwq^Q+v9)X@YR$GIvTsq<}XLJInr6 zS@*1MrpXGQMf)>Lo`69{+PoRFJfc8GCNy8dog~N~>V@6OYgCg{d4%!-8+0ORyNtZb zh)~_`BcRj?;SbjhBSluC`Oijb)13G38G$3vC1*7!k|ncGu(U=-u5e8-Pu%a!96-C z@$<6GY+*}Bf>uq6ix8>9-PnN{p@S6-DtxGF@bHdm?CsogLxn_~VxC0US-8Z@q7Rvm zNTkvX*=eeln*}0GbuRj z%Z!-%==Nc1p!lfR2IFe!A!eNL;95u(=p)(8NdH8`5@|ZLLtmP)n6PN-HBod*ctV#h7*dyi?iCJvRLRL}K%txYb@H2?`0 zMY>XGaGkd_P}b>|VEX~%wP#zLE}FF%hGnMQLxQJ%1?cQv>`AL?b^1LV9itVlQPYMzly~aqMew}774or{NH2(9sNqT@qMyo znkY@_X3ZMLrIJ$3Dk#F&K@!Y5#@_zFeMw|Keh80rIu=0(k>aEtp#*u znen{q>}O_=f%$$R6>L@1GN=fPbs~}Z>R)0! z_?S7<(7V@y)`^kbZMM^ArpNU+1X+!&WUTelHa*D_@98LK)i{GH`=v-{VV_=DK@@ei zBuXWYH0->BQNE`LC&}ohIM1Q*SjPpU_phW|a*7*bNgLL_=EzhaV2(w(2((px3QG3U+ma9amO;kq^pcwRuQoY?1qt;PHFLe}8Du=6Kjn?KOX_9Vl1Wz5vj z$+~H!qenz~0SYo$6O0Hcd*b5X)cuIk<@qK+Cl~SFwt;SY_V~4Un^^9IlODA(PZk`! z?XkEp3}Zb*2WA-f-9OE^{@tc3b~3kxgFhjJ?CoXV8e^_ES(EA}0oK9DlU~1G24d!= z1VRS`5i~%ha?Rd#*mQjk@0dKgQJb;9&K3kp)i~V9yuon9+z$+KwQ$G8!K{P{z&ZEX zSup+tTpUIq$QfON@KP`W6cl8|LS6*yK$Z?kMob_jsITg5&Kk#){TfGQ?z8kuSggA_ zHr^amYsN$@w<>))+vT-#BH-Y?>1ePFP|r$Nrvbdu&<$Vd2Enp;vWmT@03p@xQz>(X#$;f^>cIQG&izNYaqzcH$~URzyAP3Nx00#P z7todVqB;DQgfLkSDLQ{S>bOYyw{OHV(jwmyst@K>?VOLS7ytSTyd{79yf4DluZn%q z7sD@Z^%lQ`&pT}*B@>TZS}fZ`?Cyob2lffeasJ;KhG?$9h2q2@M8aO}t`N&R;Tt8n z4{(W^8+&|M>U>hUOkL@`(xc2*kX%rYh#c^kgO7O2Xznj3(QgB2RxS}6Asp{kO&PC{ z@w-TQ)i-Eok(>V`|4@2BW+L+Hs|ZbQt4Hf}z;k}tD8&Qh~7wi%~H<4WX26}Rx-$c4`8 z*xl;5oQu6QwT-cL$mNo77pDkBM`qE2*=6y``8P?^xYcej!gW#S5wxkcOzyNrq?toT zv>ocA%A7`J5hdP>ZMxv$jHd&`gjq+x#+fjmMJZkrpbD0|W>oiT)dtw6Eap*nskUs{ z8MvlDHf>?L35~nSVDxFV5x3J8V31vqVx5rcW@S?Y^4xB8r|}WL?-1!?tuH902_J!3hqqOJ*T*%W`oF8lH!DuRB znZMu!m+_NwoxccO-pO>Gn16JP{xnTP)Hfsn{j7wBxOZgC?z9!|5!t+#=l90alXUVo z1>_#9JJ;m!1;ZogJKp55m*Jl5i*a%+Z_YF7J36ym8C^8kw*;YPEuQI`_yg@zR2&At(}G0@ls2}Az9Pf2zF^68f`k22i%s94Xj?sXMOp56R@2D8(=> zOWZd6-WP!IkE+kaaGxu2Okr<7SBC}Xbbjw5zdc=cAL3%oquje)c9TEQBmW~kf+ybh zPuBKGk03h#u@-}@rKfE(WwF{he(uFvG>SFDgZjMtj8U2h?{0k94~GQV(|u@>?z**X zSSI39Gk~c9j}H>cD++}@H-$YoMIAOun^IoK)cP=|L9^3<(NtyOVI3Y zXuU@eZ_9kmK)QIJ>8@sLmMCsX$dyPG+`PyrRjibigV+3UnE5zkc6Cqpk;If{$7t>{ zL1R;muIkegpurDg=Uj`ENQ4x&oZGSz>9rBT6R0yHE#SnxPVCRU4rr5U|GEtbl$3rR z8`qGAw2{0ooa_06z$+eAs8lwT83*)ec_4Kcz_T^v<&0um`;omSU@**`k8AsiS&9Ra?sf# zS2eq=B7aC_-{agLzi&iM<#B$UOUN`0BdkJ2Awq$C@MBCZEzVBjo6#>K%7AvA!6K)8 z!&6}pHgaxTegC}3)j=8W_qi&E1K4W4fplJ^|)iTxD%G)w`i#lsYLupm~nN7Ny}6{99+iq&X6(q8NRm2>6}<}{ zUyz6)GE99h!njf-1}Cmn#;j2V+EaJLI;|OG3s$*_{O?S{CB1`eQy^0_bx z!|#bLWr{I1b<@>me!Jcv;RJ>VDMFtl3!FTeVIJ0?aAaH}+2HCoYogN8T!#^;k0LOF zszaak76_#~)HI~ZI@~=})OC2a17rY71IU)kQg41~Q+X`h&I+tTBrfS3m<mY!&`+as2c+I&o$ ze5h@~7@XjY(^A*#Vwp|QNyKB^m&Z>Y|$(9-tT#9&oC;;&%8D=We%_;yq-*JaFv z5Rl?iiUq&b!$mFOQD3W?>f{Y_pn27x6*{z5^s(*BTSLLNMx;=%P^t*4`zr6s$7W=-@aGd{mq_u6V8D?ni+J4O;U2`KACvC z?s~&>*48ph#u%+=8UkhYiX*Cm%)F|gV0<@EYnyw4qOA?OnYt-@xhfb(r2#hA?kx}; z3#Md;$iPcjj|ta?mV}B`0vxfeq`p;4K1@S-oS%N^9^T$Mq1kMY

^k- zj!4OK7WmMd$ouekz92Ara-hA_XABzTNq1!)-SF+KbppVgq(HAj|M`Ohy5s4OKLMfF zrS^USC?7<6lZf-KA5fbE@pILT2f0VXC-(fp?=O^_!^5XaUpV-?6en2z!8zjcU-*7e zImZsOuvqVG0wNSQpQHkn70VgMX78rfCz%SE{P1ghoIzG+jey^kzRtKYsQEkeN4S5AX^RRym)u9xZVz6dUxF zgOlg(d>1s+a=}R>+CYnYg7zNM zJnSg>1NUS^giczoFC}(W;oD$$H}#SHaD@MnzI25pKhsrC8qdbhB;A3V9N&VZqa)H5 z4I>|QMxaG>L*C!xO-w%<`T2<^y$-X+=7#fqQR(#w1oQa=Z=D^Z9T@a6eMA*}kQnCX z>3uM74%W_eesK2BKv{%y`%6=bq{S(oUErm;~aHH);BX=Go(K=EGRHD z6Y=+&yqFn2!`yghW8PHi4%gl)FQl1tL~b7}eo)Yzye9_6ME{WkuaN-Yt7fKshG@PI096G$5X%1ncif%-)4zx zsY{fsLD0)1S@lxPLpoUSl6Xn`<#ZCB;9x`heDnt}S1A`(6Pkt%0vS!Z`Ng>%sHX!i zb_dx_5sNT}2pm%#JK-(zv;DEY%F{qU-xYvI9D%TX>hqukWmATN?Bv)$yUKbsd=XTv-X}V1K`}-SZI5?M_(_ z1EL1c!vpyXKVyP3!>ylt$jKYDPgK0QWBSbU03q5{=mH6nwHZHSYD8rbPzF?>-T^>{ zF7)A;J(f%)=kUci1v?Q*+kf{ApzeEyeHn8-(5Sye>&nOje8c`{v?!QJAYA{06p?<0 z5IO&Uqs9O4*kMYn5CX6el9)deoQSbJ5ylb@2NH2T5(ZU0S@Je9`E} z(6{o6kA>;VJ|p-My3GBOrR+asK6{x&IxY~tBv@04g)U1jXb=7PK6;?dNf%3cS)!T~`wrmte1QGt6bz?SgKffu6$Xtl+LrB9Z)L$Qj+*~tl0M!Uyswqx8qr?zjeRb)JrwB0&(3C z=vscAdgeTS@A7_{#U$_p>rv0T%|}>HL&Ccwj_Bv#2w0Z6fq>qk?9D71Lzc2aiIGIQ zH9>1DvQIXY?QKDXh?91#!g{JbfhH`7pkgeJA{WvP$|RF~Nf50FQPuv3ih}oSghGKP z`+94>LEB4*(jivUsTQQ;+XC;9#Is$qgAZe`aU&0`V7(!^)C2{1(u%A%r>hYgdOS)u zvYtey!DyH`x=Jvm&KFaLtg>8S9PU$hCo33MQ7=epZ-<+gyu?@`(0zS+GXj1luu;XN zykwE}CBny@MHN|-Oipa{RTqs$uz?o2!_L=%kg0{N9$?}l&I^-MQMib-9xal8okAa0 zJl8b-btv?w9YFFWO_ zkh7drn#8j~$fV*+kQ^?bWubz{NtHqk%C8z%!NpO~95xHdmN|U&?l7-4j}t$fji~F% z@aT&=88ww};XkpJEse2sPPcEoX!IDe!}o-bE_sf}Gy7>B-2jqYh>_Y>c0;O#BsA=C zr@85A4$>e8RFWJc>iz{tr}3)Qy#AJ&Qyw37WO1O#ahQ>8IJ-4n|~HRiLV& zuc>qC>>}4B*+a9{go7&UIwMU%^2RTsKQv_K+B^bKV=a&pgMv@A)^H4ua^L5pz8cV@ zz8d1A-im%M_h5S}^XN9c9`vQ=4|qrSQ5*~}S_*gSjAD{vnO++bZaK%3vQs|s65Cq(>35b z^(V-of|J~iFGLHy^st0$qS6CvTlvNgeY=;G8skPUut`lGB(UcdeEOFrnqC^aoCQ6g z=7S$xTfHq3Jx|PZe#zd>O6xHFF!`2KWH?7C9Mi6;X6@Iol$G0`!y_rrx-Lf(i%&iB zPqV*viL1>x$D!uROJ{CYjTVpUmoo;LOY^_$QgbNw;8+s*>OIZ|3i|P1xf&Y*FuX7R z*o=SUWj})qD5tid?UQRkA$=g@U z1wX-C7vSDb_M3ra^t)sI${M!)QuT%}u+pFa{e3WD&E~I{8wLY53ELxmzkdBd7}KU_nY#rgxZ4x4{;=O6D;k8^nWa9l3kKe zTDapjrc;!;38C*svkPsh{ z9$Pvv$&TPeo=np&F)RH7iZL6HwHlB8tWbw6F!<{u#_Zi0V{QLrfFXU70lByKW&3+M z=A$|0lS=&C`^)tkl;wrg*&|WwBU0g+LX%42mhB7E2jAZ;*}q@wBU_7CL}ps~ovVgl z;Y^_F1T%fNI|TO|!|(2VL<4Dg7mxI#)N+8OAa>P#pKCVyc#v^9PLZOIkNbIGgpfi& z%qI=W?fgO52m!*aAed`YrUq4S;*nj|5Yw%}Z!`I_Z?VY5!)}abonqdW-ibm#)Sg|V z=JJAV)s6lYGqlN+<~y$X^4FDBxY@V0BozdCe$O$H7Wgp2#kwh992{<{rafOy%LNYmDs!AfWOt`shNWZ>Uo@0Or z1kQnhVI*|2>?p<)-j1nGRygrYj5BGb;ERpx)5iIY=l7fM!gcd;njtj8%IGy>K*L4b z1TAif=+%7)j=Tt3jtN_wI6Vp?1Q&X!9Q>{;ppJ8$w^f9oX;(;BOjiunwIxvOtPj{2 ztRvmj$QS+0|9?|^{)Yg;(*NJ&#?Lgb04)#@`+woT;J?jUb!tL;=q|eQOEmCW9gS+! zj-oX6;%eel>oXyI~d4JF)YcPp(662wC5TffmQc)(2gP1UY5}pyT~hj|^DfmVrKT zKigTq^Jm>z-=>Y1Gi)ds|m}FFn>L~=PAI$%B zUcmm#Z6v~Uj${R%abg_dPC`nt?jRFfZ;>q-H!Bh5RE6-Qvcb_9b!n_CPcZ*mwJ2RH zk71v<`y0Dv#-n(M1G0?pcu$8_zxdYrm+>5=?7ZaDKwo)sg7Mupt^xr^qAYo&DX(#n zveUyd{7}+<^{yx3>&}1CmAiqo2^E*zywl<_<$3?v`IyQ$CjM5-O*roQVVx)66<);g#G{ zj414+;u8?X@k1;}n*J+z(UI2Uy2{UCs4H8nKelZ+8RBiOP!b$-n|jngJcHRF>Yb0Act5N6>LCWa>hX6=Lc%X>aV1kqxX zPHBC#@n@7kiy+#!NL)VA{r%h6Y=x)vB~x!71ngkNyOexR_q`_`;p6_vneME3LC%i@ z!*KOd3#?COza(N0>Rji>dOgVSc+lj<&1@CC=24Y29rs-c1$EgD#rstm?Xfl@g}^tQ zoR_tBz?^||r6z*x4FGMuuJ1ll*|FdffEfkaHQr@%N!eE4+Z;xs2YNbE{dq}#P}x~` z$Zn9jZbvQf1EXC|A5t!D&CqJkY-y!PIus7lzov7H^VRJNxCt?$V)@6E?6xo^IyJqC zRFCup<7Z@F_GbrmlBNT0jpOHTxv3|icqfLTHeUps&MtF~45izy=u`vxqpPwxUf5uG zf|8bN>4Cav0>K0JdK}ivMy-y0!?Cg;*lQUa|DG-7e1l;(1-0qv4}r#=>0Q>aCvg?9 zeqC+6`83;#)CilHeO>NE^YH&#UCtT*?apE~GAz^*JgT_I>fC9puXI*<)15?FK8`%Hflxb~Qi$V7 zIvt?xLfvvFf~=__pE%+xUIl1R8;MnK%PyGOp*JtAw34}9Q|_d6Usg-(`KGnGP<2!X zj~RMJof=qV%5ig8KMHsvg`J!uMisj+AQ-`K1To95iAwxx5)*o))2X(8)X^6zD!>A$ zc*-Y}Mj}eDn`!wuXME8|>xs7KnirVd(0o{u4@e@Na=9z+nqdH)XE2w4J#}RBiHlC^ z{E(P4&It4B!qHxf7DQ+CZaNLm^zTg)U3Z3@^DOs$+WBhxjpk-M)3*&0GTOIJm&pa+ zlr;6(Q+RuKpoi+{06)zSqttQsX7;N;y}r?N?n2#UU=2BNLVt0oPPp}ZlCt}keMHRm z>$`pP!`;yRA|v+U2U$iVW^q$8FOE7aY>=6_`2-8O@$=eUaY}-@V-xb{m2hTH`K=IU zJGtfN(_FlSGNAB9of07@6kV8+S#&Wtk>ZD4aE{q5PlD%kXy9v>?p`I5@exTIk7zNy zv*>eHhhytk32pvmzW0|$cJ77@HJ|z5)-Ip=dBqPs%JOt6+zSP2mAy09uLOE5h|;@9 zDR3zU%L@o2oJOzv(O3eAG7?{p@*TGQFY4NDkSFW#># zVvP$knSI8l11VQKW*6N15qioWgKbEF={`T}#YE&!*>or!%XMfS(>)|l>AXIHFZHC< z-m+(3@3_VCxLg&*TX*Ok(*y0{`;^KHhzV3XHBa$?uUtmUD*s+0$acTA&-U(3d2caD z3U`;g!c{8G%^P;v>UTfo9r1SEqR?Jr==y~A57KnF$elf zT2(dI?7wW&)%xt^1XUr3h5&m;Kw z*h5d1UFjN_To{RYNV$}mMC?4g*j@1I{T6xwef4YlzZ=~k$y4<(0aOad^}-39)A)`erX#nK9X3*zG+8DCLOia4idWA3iK>0qM} zcFDq9{b;)H1g&!=q1J|r?1@|eI5Pzr|F@F{mqh4O=lj#p?11(-}^5gMhyc$ z#uE&<6j)<~7eI1ClkRw?uuC7Cva_y99E}B^h-YaBm3+`a(?_>yOKC@?72>9tD7h-t zA{5hRuo5!7QA{7$r!W(2 z6D+&uitLRoO4_aXLJwNnWJ)+e2}^zrYwAxJlhPnixliH=ypdU#I@iP7$v;81$zLkSCm5R0+OFbc|-PS;6b zU>~G3q8Fj?HSOusM#EFZFAS#(->v#*&*80OJ8v0FN3pty+yUPscZ< zn9i52puZ9r6F%t#*&M+*xS$N5SiTEVi~?~?*76To*-v8%#^7R(aVhaehx-p6aVfb! z0d0$TGz!KK4G+nv4OOi5c=k@Xydey8ZxmEm86{p5tRoZmarHKN%1o`8(8kcpqU0!o z@Oz3e`8u|?D-3580t-XNCtu#`f7^x$lg7?3v|yrq%B7W+M_grt&$=XJ zT#L=$sZpm+A7WdJB?B(|x6IO@9?!_vMi#EB@AV7hVpMAXY?;fxW{9Am>d?=sGiJ5j zO&(oXX;Vf#1v?fr^gKW<_9@aBRxC2g@WIW)r8wlr?_#`i;$5MhCF==#6Q<0~w;1^V zGMD<8a+OXG()aa;cieJ&rDP~&1tfAq3z~PXomtIFQKbYh;*XR0~Z`%*^G9oeVUs4vZ^???W{!nn)O z7h>R~$3Yhtsf~xa33HH}?Ys%#JhH5xWZH(B-v0)FOEd$UR=-)=9rLEWEY$Q06%LNA zP4bU&Xl{7l8j4Txk#tj9<0JB*eaW|cWmgr9c+up+w~@?} zzQoyEL`1Fcw|h8!mr)S-w1*cVeI|`ZPmYl$SVnK?3=NZJm@1=pdsyyn&U?IL5l)R2EaI229YlpUrfO9M#<;;c=!c1D$U5@bq40Xh zwei=N5{W$fMTwfezZD5+AwXwAns%C#V`U$(u=Pw_HC_>~C{w1!&}nylFQOuI`S8H= z#LpmaR}Im+r4R4e*r^RdXQ!gnGou9R(FOU{nXz1sbsJ&PZrn;JNjoqtlp1=#qGFrfnA{SoTn*)HEy&(kaDy^^$C|GY9Mcg~9YKsC?Ec#2(# zdE$z0BOEZ!V))P8qKiPjgy^?M)cd4>RmLn!zg&dXzHoINaEJCquX zVp1QtD~v)&rno)tAUSF0Ao+V}FM-lznTVixO6N|vMz?-sS`+({%Q3DFN%Q_OM}l-> z>r#fYDfYyL!8AvXhNL-Kf+fBCKh+;FWTV*c7`CRT#?bRq<*n}gFd$5Dp#~qg_~6G7KfaaNynzuxuu3;zjj2vW z^O8JXoR|bXB9X|;^s*zc@w{EU2G5WIc)?&&LCJrvUU6ddQl?PE??tMD*&pMwX?*LC zz`Y&hCXKw7IOMSCJDuS}Im5w*gY}Kt^>Kgc$O} zmZZe*ABytt!gh!j&?tA^v#Xjt@IIAP@J@69v*sD(%h965ADq(mf8Z1eiUy^9&>z7X z3=ojie`y>3msP`mk0JaY<_tRSsA_0m@-=eYe~?7#g60eZbuCMZ!55Z{rEn87go*1_ z)0&e;(-CCN&rHP!F~37<`QHz;+fv02fVV!C&iMaYt^GQka}gq#%4(SI^6|cQm_DfO z&gFw-jIM?<)vN7@* zL0UT!SJA*mSq%Tcr9a=W9CgPWTyTW39p(!y1w$jvM~VK|DPqt6Hvs*{Q9 zX|EHF6M1S{`kP^t&NF7-I$KXyvw~Y9FV#q0ay)?6A{>n~sAufRJX}Xubu4K%YR?Q7 zWu63sU#Z-A&3PGSz3AC*ms;-Vo1~M$>D?DFlDeA=8ThfO(e1VnH%X)>X^QT;mG8$? z6_+!$5TGFh0g}(rau-2t`;$KBLaBbn*fh-J{jwwx-=_PBsFQ&(yel-JkxC!v1QVJ8h)385 zvcCF4ZA;;L_}efe&u`f|GKG zohu(2#Wug(9xe-{SnQ|Eg6GXoMa`6J3&eoKCcu|TTn$H355QAk-8}PdUd&0&uAv)K&2w~UYH5nBQx>$0R0}U9(5$6O3qPf z$$#sd<6|u&OGFLSN2}bwq!=UusIcbJLE3nLx0W~c$XfiQiDG4v1%c9}xTR1u1m@=z zo*pPpmgE>@qz?Z)Cu*0V7FsYq^Z0Z>35JFIv2zKOtOALym zWxvYcYk&SdPS+{N!~$kkM0mIaMQI@=UU-#n>rFYoFhSY+Brxc4r69@A>!?66un#SN z<<206(916p;2Q__4uX0iBg^auBJI8M!%783*Rx% za#eZdk2U|g9BJnwN<$FEx7D=V+-n1j&triiSwUHfnU=I`$3+{)Uo$JJvX zloqT33|=(DI*tgaIiZIgBjC=I8^6PCh}m#ok~M}rgC1aDNN_w9Mxvck5D0KQgk1|x zk_9F3;UEvNco+}bh~e815IOZQFjFu%*cp)*E^!dqSQr~9EZ9BHe=tMn=|5T8T*4_l z)|?5P`PIw+B#gD?$79awo%$Dyv2A=dm`xDjxN54+HvDKZ*(MSIDgR_urdv`sN6+-a ztXkTk)(go`IN(sIm6z(|%TC#3eGUA4B%2`t`Lr_X586LYd}XU)H{eF0A>;OlI!{iU zzsjtd%u4@d?4QsoY}6t$QhhT*_#Pc> z*o#~pPGvgSCw!=h&gShA*rHeH1y=V3R+7Y{!sMtrp`cRxMV6{&_0_6o4b4;g#cr2* zh?i~exl{Wwz*^i?MKyTH_Y$gpAWv6%03Soqq;dLy`zzm)d{ym{%Tjx24bpg|)ZGdg zltwKNoSmmDJL!{rz2=ApfzO*&Uio0h?>hS8Hq4wi9x{12OCIWZgzQJvscwDZC zX!7Tp<{riUOOHjRF686;r+fHJA;3QiNT-2V)b~={cw`%_VQ?Yq7MO+Tw!5!Mh%IiF zX8lf}@HyRhoX; z1>d0m0(s}9PcWB(EBfJvJC$>r#j}f!s3B6e!+{*%j6(mFyEWfuQM~Kc<*wsU!1J8` zp!H#w#t!EMS}ZrbI02U{sPa%^@1UB3x)A*@Ih7FmkX;*7@2-+ORJq+!X!#{c^VHOp zk79C}lt(zMVcqd}YQGkp58=y&d-Crm3Cv9+;V6qZ8}utlt6~?_Y`~&}j`1c>=H$08 zCgw(68Uta+nTmEQWXe7>5#21rSlyJ87AoxA4Z$>9xXXJ;!tC>L5yLBc+r9mkc_D@0 z!bcRhRpk!DKfty;I3dbaw;WAYO<{XGT~hhUw>5002YdX@g)>K4lX?j#_L$PIeh?Yl zzvt#PF-~>$rzXj4enEFMTRFeN)NUXsSXruJ0*S7~HZngkrL~FI*JiHPY5lR$J7C`O zIx25TlPB|xVf<0gsHrvbtsxdIu2J3k)xg{qUv+{MTox6KQifl3L|Ame_3l6ZVbe0$ z)?i;%@WdBQFAzwn>)wy@@DIZDp{Ya3uxNU6>fR2r?h(L|(_n|wSd?~8KTR6C-m6vJ z7HEyzylXl#z?O7h=fb}7Y&&?sY(uH>2S0{?4z^J(yexz%C{0VTd?K?GoVK% z0`WjW`t1+{dPFSQb@{x(D;bCSx4F6o8+h-a`NO3U#}!AK`|dROd;*8-Wi-S670K1^ z;Wd(|e?|b{?Rv}+o$Y2IqC7Z-?1G%KHIyfw=NV%e3G%DLpZ|!J)~)s@P2?7^5abkT z@Mi6$Ea>v-AH#q;eWEO5zE>9lk)R2~(uvT}Nkm=s0Bc8@0C3{xabjk*WoNi~MU1kF zxe4U2htL-HdF=+k+hFI>ddb&f#sT2|$Bpq+Q4Hn4%jM7pDlEfz$Fis|6phMUjn4M? zXeHTMo9q>(msuu|jf}a5AE?s50ZefmQ(C`Xw&iH7s!VRzz^=Ilxl6T<;qDoj1x8grmp|jl;*r%gy&1JH#d^LD|_rN^s{y*^J*&yi=7BUc!0Sgci$A9JY`|tcw z%hM2T+4FnL+TEfNhZqT}9wVF+ZcbVlB|ISI53(?b5mLCRQ6Rpz7W`a>nZ!kJ0(<#+ zMp2F3?^%EwN_dX7GDoN=k%{p>X&|?K(vjsHt6kDBIlXqdxnEOa);!(UlNT4#8R#@x zhw<5-%kCZ9?j6^Ye!G*Ux#R_2`JUy$`s2g<-7mM>A1}ZHek1XOxBLDtx7{6A!yhlu z0{X|p3Z8owFrOPTEGL+}cQW>R&vR~XDZ<~!#j6it!VX^LU=Bk1)Q8PCGyDv%A)sg^ z@r3eXEggMyXnr1O3AIqQ5@;S)k%~fOFrKq;erUHQB_!sa<%wH6@=OBe-h+5b%0x#| z%2!kURX0?77z}Qj>O@D;;2)Z05n}qROUjf6q-)F2U3ryjBh(1$*JLjH@*~)yG%F(@gvcZ>Bg7juZOE|VZJdM}v|S%W zBI^h?EAi#5Z$pTl`T5*5qoIF`Kg<{j4f0&U62LBr2hD!1=|NuipNZcb$7MULE8EZ42y2d{?w);~Z7bWeK>-2HRpgF|KDsdLOg(oa(A7YE$wP51=aS zGeuzaOdRr?nB3|7pyqtoVi_p-L46z&{^dP28)k=p?9)x2U4FB=m^IBx1(75%&v{Z} zM_bK`+O0#=B_Gs{xDpaf79jynAZWn{9SYp6WFImkNBvSMkv7P&^_Sg9ir|00K|tKI z3_bV#?Uw*doDRr^OIlwXRMAn^2Jz_25Sy|VBZk|`3PhT)Rp4)9#5_5jRn#yJNHmJt zVN!$B&CV@j&U1RMtHHtkV~}7EMrtTPQdMEf;isXK-wZ`JWSkA^6Is#r$zvnW8xRMo zb!H*8_o$acwGx7)V3b%pVaIdk+AD^;=;FsD4y)IcU8Sxh8w zUMAv9VN+~~+osfWC29x>PPpMwA~atd+6XUBeE*|G!!Vs999e3-B=C2ICdNwQA;kPj ze5S+peV~z|&o!&@A1A-PzobD@5a>G|BWeYTLYQ!3EsBh%{yM9tR?Y+#Uy>4?cO)y$ zv-xO_%FC41)1Ja-x0mtN`}nFE?0%#nZ33R`W*)5a-#Qf@dIj}`Dg-<93`WKvdh0{k zS|{&LJ)TJU1g-N7Zj&NFA3Y6A`vgv*H&F@Uu{Ss&LK&DbXFXq-`(Tf0oUi$_+@Iy+ z^-;E8S{~))mK@aSU3#Mf@aW=xY7ola^o+0=CE~pRUt=fTWH2)yS{=P`*2O=M16eyI z!{IsmqIwyN8RZRUlFnM=x=jk9GVBf9@(ZP_9I_1WaAeDiW`^}SJUXQzp?bY(#_fTt zP2KQKg>AL6VXGjvc=_r&cUxLgjEZc9iiBI~JUG~&b*A~x>KxG!X)mzk8kMi(` zS^V)@iKY+und(wWFDGW@_2vst%kq42uVWhvzWvkYr%^^ah8^u3+v&Xb-bu|e3PZx$ z-s#hyVylfhqd}@T`H-q-`SdAQ1@2{l_$yT_0er<%{k{wRoE155Rj)Ama+j2v@&{Ty zrBkY_EfGjZjqw6Kjh7Ka(Q(EHd%gHFG?eq3yzPS}FF#u(R_BWx&p}YiM%(MWYISD` zPA|>#q}|GhfV}ms62zUZw51{z?8u9yex(EU^S-e+_KJt~nAPB_UE55Z7Lp9rdL`D% zJ@$I38M;-5gCOb#OWT@@8J49~0Ho1;*IkG{L6dZBN|kanHG8yr%2b_x=?Ne8Cp{lo z88z%Z-CDjOb0U_SQJG7|R%sMnfgJK>#R_#3Re{K0uV=e7VIE?Iarg>VnJW&pxO$5U za(hn}fzqITnA4}KGa<`*VN_><4Ep9Cg4#obFSTz7e?=6};4iU<3S)sk$dDda^kK)) zZ!OY@4F~OmJgQEJe9U7}>e+=6-fGBbkZQ;%zGbFW6*H>D^Ngo_sSS&7V~?iBSKbDbm(;~E#FB$by%sY{t-R2P;%#;g#ktq?x3us&G1Oc z+SO%K&SPQqD~Ge7AXSA2vNu>|Jmk>pQx#UKz1F44AAh)UVnCPoD~g06@>c#5%)dNl zEG@Hp7H>ZW)PTSNS*Q_}tMSquGtKV=NyQwEU5~;^OQ3HXXr5y}+sKxco~8GRRr=q? z7P}m!ojZLA!kWA&0rinILbWmdxuhb;8bTZBXAYCEYM^>IrY~AZA_6(dR%Yu>l5-hy zwJW3*IG0lMV}7*}nY8^_zjsNuv~CG@=OU@<0bc&!W2RJz@S6u?awU9pCBDZ(HT$Yk zU>MZVC0`e;k5=@2QcZj@@0y}LwYFAj;b1Hl04KF;#9!_#GH~AlEh>DBm>F6g@}+=D z?edUqqPpaCGb*NW)gT=w=>Y!fDO#Cqt-=KFXAem-_DqCls>iEk^#tl`e*(q2 zipBq90px#Syrw$loHvFLru8YL_r(jh%p>XRnNV~<_i>mT8Zvy4b8$URw z>oN-&t9h}Z|1)o}*C9y%EtYXDQ8*(Y8yY{a{(uQLTM}?irG4P6S9C|LjqmuXs{v9o z4k-HZyP9Sb9@n8)@v$)~(?%m2XF5Z)n?^`ysMWuGpCq48qmWv9nwc|GyW)z6tCmjIC}3LW06|!Wlws z=u4&tLc;ms$-c1TVa8zZpB!D<$$WtV0d~-9f*6*z;+v+9XqWSS)g3}&;8L9_(qr0T zs#GI)tX-^pY)Q7kfWPpSI0q%Y1%=`(NLH>#Gr2RlIL7z}!M$n|x|qNg`oZG{z6t7J zS$??jjy;sY2$tEYVtB(Fko34xfUit^p}FH7tkOF{*& z>U;!%JHjSzjq(Qf`qv5V*JrBYx@toB?t=$1MestuXFqJSA*sI zbFbKg1qik63k3=V!jBdu-$WiTC83@(8|+6IwFz!?jp90Of^}KDj^H+^&Mmi(TS4>%|Wf&Y2JWwSN_zt7{t(5_jcN*tCnt3G1K|A3xc2}lS1vX5yzX2Wi)#bk8&oI0~PASie{E436U?S=rx^H7W+ z)<$wvca~*+vzh3TBy@4$Gk7BNNZY;80&cDj)jz*sf7phrh8B13(G>&oWS3>b>`Cnf ziw53jZiA~ZssQ^@pnJgc!COm)m(V?E8Q#Wh=&%4bfzmv*bEs2T6}ZDuc{Kw2J*oU# zSp_nSHwSuv-=_CiQBmOO9b+Cbs~-E8yg>6UXhX~khoKs!L-0(Hw5>?MSjhyZ_Rc(L)3m|2SAZ2EFf{hr0 zGIRmjbOAZVH0D~5wiQ)l>zb~S8DM;19C>2drM?IM4xf9k)zHXl1NZ&&$~?h+48&gVaay{fXkT%JpCLy|1(H)&VLO%zIQKt} zhpfTby?&0k1NDC9=s&Z$;fkCN*b+*v)gV*q=26Av!>2wk8>!y z{ezyJcG;wkN8`3Nj(;ohy&tZ4x&p;0Z`gEs21IxM(1ZTq6h5E{ZzYFa#r8@i9N%}ch#;re6Gi$q>T>k*X9{EB zRUJAA^iz`c^rCoZz_)2g?k15El9RD7;}7UboB@kI$*rGiq^gDrcB*~$!P-q-V^{&x z%O`{vFCIUp9@EQq97(zB=3Rjv*ocM4y7B|tMgISY)qj(IVHC_0*@v{>9-;X!`eijd z9SnV;+kC@35hlNXV}`tFiklfG4`NHDKL1eSa*qEGf za695iT3y}t_agwYG%OCtq&!aOujbW)h1hCRV%mvAcDJKlYnh&L`$QBX8Pf6DR`WWz z^Q9k}%LYSs56zHOJ+Oz{6Ln5_P{V?lEvtUhhQ=P2Zs#u-6!Yst9sRRr$8w+#8wHcd z7Uojj)o;kQ_y^&bTZpWEepK-6*#{}HbU`U7oJSWD;@i38Kq5@qBA(EZ;b3;;oZ6py zC^HZ)oUS^>Z&2}OQ5CioJ3VBuVO@ubR<#v9QgP6wi9D$uXHBzdm)j$vA{d08t1SO& z2bq9{HcM}%7Zvph%{A)kb$^rgOd%aSD#w-8pa_hi?z1|$YkdB$H$I2u6hlGfsi1zaFDc_M5Et`+$jWP~h&w5rEyQtYzkC~lebZA? zM;&sRrnhyurWa;BdA|}qo4S1J(MQ?X7C2pW$%g_wl+L z1|Fu_y2MB`jfFD#DBS?iukJRSIpUCMOhZq~e>lUVbZcEt`aQn7!39tm-?Gsj^q53; z>s+>4{ep04eTc@mW8Gr3HMQaKNyDYbwCjiSvDO^P&gN3Pr@gZOy`2yOs;+huN9Rm| zWp$?}!P$$`e?S#f(JChlxsG7>%ZS^qH#kQjn?&nxM6LD}PzQfVb%egfV^vaB0-)&h z(P!N6Y6Sb}D^53cgPVQ~l(*ajJf4-;T$3jg44=iL;74nZIKipRJvV3i#~6!;pGjol zH=mF+%j(7qJ5DwJI*et;hyS>dCbl2NZVGjln{^xw723Uv*zk#8iN1BuiU`P_63G3mtLYO|+hu+b4Z&~dl*Gnl@UFL@1>uV1 zf?N7~Aj*)ZQhBoDxV!QxfWfu}FvWl2J-;mJC40;dfU@1lVcsRNZa zFjtylF$AP-2@<$OYpj@%Kmz#S*`LTX5?}5G9cx0;BXvN$k~5+0^>h!_s`6``(jjC? zO0FgmQv&d-9c>7IH0*D^5Je|Q)WOy5|6JGG20CmFKYVuTM+Ps| zi2m(su(DC-oq)uIA|#quhf9!o6O##*A-FaO_9sMiPPuheHxZWO+5Wn7ybw{RvFTAs zht680vZ-sYy;xYY!h}0--{zy#jk{W91{>1GEaW@ODT|8lcuS5QI-l+w@ILIh={)7$ zb^p@ku>Em-Z2+MdYy0DQ?MD&_ZzNjJ6mwor6@vi?tH-vJZDeT)?8ShKv4Lx1 ze&G(r#kP$W6roU|g3yC=fmkO!`T3a<=|xDrFg2t;f$tBRB*G8+rg@9#e+dH#SJQ@TfJix=r3(x6!$^;nAC( zRo)U`XN9liv2t*^NWN-oQjs}w4Kwqo1XCA!kRb^LFLrdZrXHnJBtg*0MKq)F|sW$O69cN5FJ6(STY%ikU`d)tlG)^`;IYPN6^R8QyOV%mQOzCmHeiQrVeOGqKlAWQAz#uC{}6aa`5s@xhdx$Fs3;Hun!4^vww4$j{$GTUCkqp)|d zWA9@u(tV!-#UMUHf=aJKq*t1vEApDWReg}URk`;>PaXwPLMe@|wkcyHLs znKdwXr<)5RSR4-B!cjHRt;@NJ3W3-sqpfj4GW=Y6SxcLcPvBTMw7gfX6Hia>T*rS@|XS;0;9t`g5 z;}mO%RO|I`0@a_uxNj)(^^(h3dSlPWeHsjI_aw{TTsPWyn@vfLno@Q0itm6EcSWwH zq<2x=1ZK1JD;$Y|#*@|--eD96X>Hm2vUf(NEKJ`e$@Y)26JflNeu6f z6nb*=Xmr~B8*r)7P)*zQlA2jh@zvsVbnku_o5+~>;7Rki)y{7lz;E-zraDd!G~wyX zcr{FuSw~oRSkP8LrR`BAVWwp%Jj?<})a2lV zk=l%V!%d$Qs(CR8tUXHUqz7lrXD%qGCTcNx^DY7RjcWeRF9cNg09FKkqYGRGOnS_Z zB99*&yhP47{5-kUCz6+HEpVfg%UlaR^J^zUr~t?2XMb-^5}weexB-(IFDUrF80EXH z|7xhrXh^HG14xw}oS-s^pME4#6AJihXmGR)Sqm&Pu{G!6=u}|KJEAIL>w_@ft%EfF z`BTaCw>f~LM?#(huZz$-p;0q=;n5vHieT~Brcb31Z?~S|_uJao()bUylEe>f+}r{3 ztf6E9WYw@$rGiz1oY>;9aNv^n*5H?*vS<6ql?k?G_-V&qqp0xlVD#cReN67ajt7xy zM7n(puM8Fkcq-$CW^u!I*JjJ%Tqi{h$0fK4am2rIFj&^8GWrZoWc~aWZ%8d<-aRP-HA2rv1btQ_9Q#RT5Bg20h_yr(yxtf)gX{?PlTtqZ9ZusP z&k%2b<;j+~002Y;jr9ZJ}Ac zs8X#>-F@A5J(-zq1O)i`{p#hgt^EV%XMf(>1^ae?uqVhUZg)Jzi4jM5E))~1R*bkW zK1t}xR=7ym10ttM+9M#RNg>Zo*!#^3voGoGl^)A`S-R_0?&Kp9F<#u-H=O&Zh@gAA zU+Hin&l|m03DR?Hbb~=&gAA}NetGZy0n)WK%=%)^wM`aHD{)DJ>pLo&_@cnOJ<%d| zIj-O&Xu({Dl+d`K02u8(I!_?CMP$8ehK^M(4YiS`oD^i8;cH6$ghK&M9;40xDO zq6rzx^q1!_(N+dWyVRk0J-6@<&(-*#41Yxp=CRU?S?UNnwSX^!@L?$xi^of?9n#pE z&q5eaBsyu0j69+kjd~D`T*AwuD)%bjssho=B1<-z0U=QyGd!&>!;TJzb)SbBhdbJY z3nxMZ1F<(~^k5S{`jXC2s-LB*+FHX!#!%4ulbyM8d6AXgo_anm!;B~gT;%t8;I7O( zF-V<}4l-TK5zXE4AutZ1ey9dePs#@|b=26lh? z?qD_V+Rcfq*|e}=0_(JR!z-ptPDFV*E}HkD@e7EwEL8dqD;NK{UGw{8r;Kb86Qsw> z%oDQWIv$KnvvB0FESHN&W*9~+@Q`59L=$Tg z&1#+rbXa{73(Y>0NtXt9QAM$rY0A20Vx;S^p}<{b4YM{C#C3f)#3KDBX+P|YthUa1 zh3tBNcwLKTP|p7FwK8VJ4$tFIg8Tu25(s0__|t_)xx5UsNb(5Gh_uJ-%XK~tJ54o8 zsYZ4*9o(qkB5T)n#C1XVA@SiUk!l=tcvokl{3&jka)6_mur|ioq>|%cW5ovZb~4*U z>r$zjBRh#;#V1mfcsmkhtUz*uZL6UuZ^7GQa`KL`5`n2Nqg=5yQz&h?8d>sQsR_;I zhSdwPl{Z}0emcvryebvHgV4R`c2W$@rLFiC;vZ*neHRzKX+5?qhO7c2K_>hr2u75~ z3wNm{z+t26&xsTyBH4|*gk8X7dh$POWq&QU7WQkGoN}S1Qj!6SWdUGUy6{mX)PA_$ zYGwc(e=#5=-7NF*P?Gvu)8UcB((UakSO{)rx%bihY=7s!1Gm>ipZv0~J~S4%{mLd} z;Jp01Y49;Dw*5xr9UuXI4n zULZ#qHx3#cm}8+AD26q8h>!4^Aq#-&-?(IzNbV7NN2ovVT2Hw`U_)qg=|31a3>I03 z5LDeyABK)GT#zEO+H-buQdYHDy!v}vk248>)O@^pb~hWFy0)BzQfnDxAKXNElwBH~ zUkimapianCl%XjgGf@gsspuVrcI|PNhQxTv0J4Z(aTo%~o`rFEXSPt=Ai`jao{pU6 z3E=5Ks32UUyXnY1n+`Yo-EYo1yIN{Lw!$Q@ z65DrYvdI6aL~z&(IpahqtIK9{9_u2>PQh77O?oz?78ozH$cY5*`X`*{C$SW3C10QB zj^k9x3_fXHH%zoI+3%_>HcwwFyZpgFOS?O|tPcU@1x1gHiysG=8HA!%Ydl&LMBW}^ zwuNEADWGteCKa7SzH8@6*TscmWhn{?FJ;zGIm6K`dDod_&^oRXfjZgH07ChEZ&}Hp zi4v+0Ct^v4BFjWF$toIaF3gcqTc=p zwwng#!to7z@iTgMO0D6tV;Dwi40?4S4|g^e=8fl)*^SKGXhzX#ChvOA5cE-EyIjlq z1|AkIi?&Jqp}iMG;h-Qkr}!A!BO{0Y1jGZow}W;~%&b1=(lYC;Le403wE%fN?^pdL zq5_wAEWI=iR%spkA7c*uCU#d^o92{}dOwtNb<9;W(h?~#Nlxz@p`*99g)>ER)ZkcuRO5~K!b zX3e-)KyKR^IC(q#mkJpLQp^X6zo4_lJ&PgzCYp3r_eP-v2gH&6zd>A*NoX5QUKYsV zqbnE|uskMrb|1Rxw=4!WyUv;a94M3liW)JJEXP`M5+bnE zj3l`8FmKC2EeHr^m!ROIV^yJ}aVgAvvo^2`*M72mS)f z%8(Y!&R2O=%FaI%))p~h-+WoH5M(Z6=4o1zi>?@zJ72&EgoRrF`Gp+)f#!2n?gwKp zm`%>2AaVV^AR=h3VERJ?hu@w6XcErO2F^=wVncztO5oZkokFOwVpftf=3NDjfn)hmN&W9-OHSS3;{cO|_boNkw53>kTQ=V;+nolE#`nDhp zI=$GC4&z*BLBCMCKpfJp#Pq^&ONW9eI6JnsXg=^EfAZ3p&r?Xlx`1pE-wXB1l;-zV zm0y!|S(C75ajS+qw+;Qs?i`(81A@>QmAZf6wotXSliax>w_HArc?Nan(b`lzJCY=H z8Y9+N>BRZNluG~ zLNc(JO^3K(a@MXK-8`mG72pz+eMeE3X+io-60~ClhTriolP$~2IFH~aK~5`wSfGtHy^E)MG7dRffNiSGth`)4Nu6>aJ& zFu!Gd$mXyZ4?jBC=IRCHMy>-XTTI~Fug7==N zJGCHK*HrbX#|!D-$0oF!#g8suzLOBA=PH<)vDb|*GE=o-hH-Ao!I3m%gLisIs^lIu z6-1S6*cfv46sF?~uv8`tGu(&(R~l+d9?Tr^e)|QB2Dw=_g9##NLZwl}lI{6G9Z-`a zCT#qMgB|#Bh8RX@77%Ecwm|ok=I)c{^`_iy02CsY%0yr!`C}irW9;BzXeuhoM6y@L zHAE~0;^3dqi6$738h5@J9@vXfBfZ3tUq`aC+Kde^nr!@c0vV$e8erL99(52wgo*Kw zbdJ=4y3Rtrfu6~OHU)~!lW@6vgM~iZtQr(b zC}Rx1`11j$u8&x4`{T*ihWjX$RAQgPd9}!a95{_pTVM_g9cR7vCTvoZiN6+ST1Pqm zmT9DAndFux#(LmwlL_a7NW%!>s^=aV&3cBd%MA7+V_8kxAZ0;>J$xqnThNG8nyPd#gEoh7~fDP;2-}g=)_z<|*-yy3} zeFtAH#RU&>I^|ZwF=%t{$BnlI+Dt$++PhT8$H+9vy^?1}ynR|Ki>xlN(?i3-23i{c z!!ba=?OW!=Oo0>akgRb9K0K1Qk`b{tMUTKu2=kQn9h@Zrq`re0>#R0Am-?y6qye2n zmx{%b22hz&!a_zjAkqmm#aIt7>R}#OOWsuL8(x@TdS;b9N?a;w<1UVAmq`B}DkmyHBxML08?g+S7adAz}KV zl5_9s)(H-VummTm@O0gY_iP)VKo~CX6OTLSslWYiDC9v6)FV(E`rNjtdH0u3Dvsk3 zcg&MN&n4+wWA20*Wt+6)+IIi;{6ka?ujI7(%l7g+O}r%D=V$&G%1CjRO~)z7n=Ri9 zZljK<X18l2Ya& zH6$s`xwH8-%X6CNEi6hwJrF7*s3uO~Kb5}w=nL2<9)6#Jb}g-pXRz`Xs#-b=s}P*Ye$t=j^#RCe5Xd}cX>JME*N09_XQ6{Wzd znxkZ{*ZJmh$>_*ny^rk?f%C9>L7wFffzZ;v0OR-OT++Z{;Y`B5)InpTu?)aZxsvQN z@m^xxVFh}+V9|wW(#61&1u|wEujXRYr=bs6Q06U}TQg$cu--Oas)fhT51~BVwgIvX z&(ssi*SM$F!z|xHPn_Os*K@hqPr5Tcv0JWdxOtz;JeP2%wq@*`YlObTBfOwkuco+Y z-)Hk*tlmk+hrVmQuju}m_oP;G=*UYrh~BpGj{xTVZ?TM#kJc>l4#ehLAMvjT?w9&Ox1{VYr~kT7UDL&nhVrgIY7HmZ-|!CQNy=a zwv+!U%JhPt%tHDNvgU~c#P9=qqwO{PmLgQN@VARYHzXwde%3=Hh?5Ysj0#*z^O-<{ zwQib;Sf4;s-@Phs+hmfOX-MDu$*gUH73=m~_=*$mu>QIUC;y6alEYYy2C7epcFEr^S2JG&U*{Tq0siiC!?b6o1|Rrvp_=b8LkK$DPzeOhAcaJdZKffG~7>H7Ay+pq)D8_feQW zhh36;y;;NF4@h|4>~S*^@T|$gvWVdsE}Hy9u9P!3tm^&S#&FVElN_nz8(_%0a3ldw zur-7qet3n+wLlRf|Jbg)7W9XSB1|kO5&apBXm*E{vx=H>!Do2p-pcW7)I!;~{A~>n zX!XJ1ssic&Luv@l+!;ebY&%mj;ksp+Z`=}Izb5BC4-Bk0ruicxGbR2gYgkF70kg|I zn>-c8RX)L~UGyrozSSGy0+D~!@eQ^)hANCVX%4It1$nONEO?Hzuoq;q9WwP^n-fQ_ z#A9dN(#>y;X=W!9+Cmf^erv5tb!FIrAKO1kM9Db7u5nVcB7qp0X8*@@j<%`U(YV6I z;ux8BX2x)qaF^a`UN5!ODakvRyP@!A+P`=@i_D?60yYJ}5D|5S{OeE_n-=Bx6 z>%xY#3hu!iQu7}`gh5ko-sIwzfi11|X^EfL^p@Pk&j~Lo~)K>=TM3sSZ#T{bOo`=72Q|ep@!-{Y& zvzPu0!?sJZ(Y^~7zc z)hR*P55%70sMRf(q$VT=JJiEm%P&Nk41Rl^#wa7RFh-73TA9$SN$M}G+h6d0c;s@0 z(Sr5Y@S)tR$GIgVhHx{*mUGYua#sn7|=x1S)RCa-9dn`M1!*IBPF%&k+*#(?WW|*yVbz2ZNJD?C;>`@Kf7tiPR3Yb-+r zkG|+!(ztZkC9TNOj4B8=f#K^Y5RneGJcr+|-bi2$BGHE+J!!ORygm3=tU0)0G zY*-xId_;F1q(MnlJz9uHor)scQzD^-OJ&iJKWhwF0dlnf5yuIF?JB)ark@qms+K9c z6ATAB3O@FR<>}+G-bC~(u#SK4RNl3&*0PU`hy+wU2Ous3#p4w1bsL+;&MWScRrwxN z3*Q|V*$3ID=h6&fk!`9;UOp=$_Of)^hm?~wvkXC;ed~0=9t#|xK{fzwdzR}{YpTaC z^I_53Ycf$xp}o5(SmQjwm{Hzy^Ui;0-f0fD?SUzvUj7@^0YFZB7OKiR!ab^MlFMLs zP6cMVlqefBlE#ByIvn|_HbPB1kz^j_0*yp*o|SmBwrwzIxYyYX{127az}MNKFdc=V zB`U{ep{a$Bzl%spkFIKDrYKloTU`mQawTN%3Cz}@E>dXWgnLqKi8n$Y#$QNAWQJ}R z_wSPJUupJ0KQ{+5AGstAyJ3!nKM-DDc*U+@=jDpa)bJ@Nc$LWmWt3l?GAQzlEr6?) zf)|Q_8s(8^O_(E^%ju0Oen*tmIdyUP*Qo5Df-?JGDJh!AQj}g-Kzc|>7nUw6c_Iej0rAk!IoLj$ltpMX!In`sFus@Q+T148Fz|N?-5(jFP9T<_;sl? z30V4o!LE*}zCCx!GA{L_hAe%7R>5$lVuj)P_9O|C+S#??mz)cVlm*z`23un-KQ(%1HF_&{Ed9 zwHe_<-91smd&~s759<|s zi>hl;$d5XZ9A3p7qqJLUBi@nDwniZ+h|>^uuYfhEE+tsxD_q3LsK78U(GXp<;2vlP zG~iA=o3T;SEEBdCw!kQYUA#Dm9J#RDYt!Av&hopT{nR^`V#BGFr3v`~K<60ISrA|q zTRJ}G%UP5f24nWE*X(D3rRS9kA4^9%+LKNVT7y3St*;u~mIz&BMoSDTr5rswufp13 zc_Q9%j^BiB{o@wY<&}KnDTMF5{#ONcT zreQtN;Mr8MUnZA})=3E(QX&xkURT#oKzkd%hbP$SM zZ`lGt80A!nV7*P#LL9}Fo$9Jyy%Qmze(6X)ZF)x;o$AV$VtQzgqoyS>BF#ISu?mQN zj`=Cw(ef=`Shu5Ga-@)^*vnac4d#Fj>|P)CsNZTr5e>$X4kJZhIYAYPO;x`eOvt}Y zm}Vy--c30;2E|nDZBs53$si+wj|4%K+`^97Px;G$dW!4!;XU1xUmL8vW{FU_`6kFFY+yJk60(YkD z62bbo_Tp*4fehnfWzDR0KhWcqTR>pK7C_5t$3bkUF-nOIu~hFXoP>wdZVxlFzuCd( zo&e#U$Swj*MFXiUk7pC*4!CK8g>e(rA<@e-4y4huv8-n@WAeJC@@F_@ciKt?S0$ro zIu-KUC5INQWZx*&^3R;KX2eh8+rbm8>c5s^UY6H7NAi<9K`YW}zM6i%njAIvquj&m zSVsCbc>@Gq@?ZUZSWGIbI{p*Uc)`59%9e`RMo#EC+0^{&CIHZX`}NlU7!Lfy5*OF) z%CD#U%ijO^hC|2pSFR~9crUNbnxJ6y^5aFG_l8;E(=Dh+2%6v2K{PDhImWswj|~(o z;Wh?S-Z4|2v{wpD*_=DTrsU97mEOg%3=}kq?Gd#)$}u-x*`TRm-C4^DZ`dIDA4t06 z8x_n@i}gtNar>CjN!Ve7pk@7^mk=E#p#$h}n04o33Gpw8ZF>B8eig=+2f<_gJCo(a zu9LKl4C6XeyU6vp2;+JA=N;#Vyd$%)X=frs^#oO1Q_x8ydrQ9tLQ2YooiYeV&y+!~ zu)(B$1X6*VvO2=Cby?S4c*kc!CYg~PCV0|7O%=OnhtRJa(cjdfh2J{uqP6+9dmWzg z5NHl?u!(2i0ROR^4V6cDB*-2TRb(3VbbSt>PzG>Ydz6+ zgBFb`h;R0v-cBD{S$X@2=qG7YCF~Ku-JzIcRBWBy*I&Cy#pBtXsx+Q84JBdhs>*xm zWv6yPf6k6ixAZibpbLGW7AQyw`m1*i7a(^%>+4S(lpi4ytR!U(IZRioJBMP@E{#en z(pTvwq{|nq$}6&N7GOHh=o+J&9b;Ugca72hs~GtkYTaHd@Yf046Am;HYP@M;Jx0(8`F_9NBKDXnL5>*C zhQzW^t=3-X4fay>3lRy7a>5{pC{&Io3WB37qnxMhZ|5pwJO!?BQFWtm%>wc{j{sXm{A&~Kt0N*%lH&`sUR zM{)Ja0p57`v~Qvq6uNO!2&TJ%;n3JGSFvRK@&WI4La-$s*MUIW z05tq|rdiAHYH9whGIeY(R5Zs2BG~qJWVWj{oKG85{8!(-2f6ve2thb%jn23~3dZp# zfruNR|CJlqpoWefmYS+29P>rNN5O_x!X;3Oj}%v&p`wUYIK^Tt!hY=1+x1ib7XdQH z7d((l$FE#R+6N{@B3GXc^dx59*^wCrwhY3AlMK@o!HSl1Zg{lDKu9V<45ud%333tY?3LaoqvtT6%EzzwL4VVASzN z*wg=`&*AK6k7N3;g&?SJXl88mPdV^gS=k!uJN+A;?yusy%{(7Gcjkb-0d!IRp#m8a zcwAAL3_LBFJOXeWK{4J!#WkiWrtM)>W%Q)%4I`dCBAgchZg842P<=sH8km*4>(fP6 z`u=KmZg_O+O{tC9R$Jwpo6pl>M(yVXLpr2pne3@V-I4DDMJ{GOdAWr}=yIx$xJPGp(9 zH}N9)NZWZvzoDB6kevg1`tBAYoMR9BeHp1bm$i13KA@eLBJ)*z*KbC%=%hQq&s1l> zEsPAOwNi#wd0G(M9LYg5dpO}b0gY*@61+Rg`%T#2&!q)g$&Qs#sbiHfyW2&3v|x^X z)>P`wX>u50kg3rsH92X)tVmlQ@u0jb7nwzPP7nD2B;$F^sP;|?iEFOxCux}O1>k?r z)LwqM1GHpDlWvfh8h#VzGNDua32}iLfvfVuAEK*%x2%{;*@TL8e5MfI5@p!bo!5v# zMA`R7X@GEq!7~5KVN-O1VpAZZl1&E{X&N{)PWA!%A9d9?W}yH7tt*Cq6~6sHbhXo3 zCPS$sS3m%cCoI8Vro7EM#j?6rMXCD&;R8x!v*!=DQwZN6!>6)%WddeVx{7SuiLkUBY1FjMf~g&vg4j{#^P`U1d!Q96mBL z_@v>%vqlPMktDagB6$tV`f}6vRQnKyE zFPwv@%@UyZ=)ooZmgn8Ervg7k8rxVSUTs#Pc8Mrs^jsmIC{pw@0wPwQQ&Ch^Qw0A~(SyaL$T=ZJ7NP;2!- zP9ket-OVAmn2TO7Zpi9w%YD@jsIvQ5_v%i1)iC8KW>rEFZxO`o`tTdGi|GX2st#~X zFjtx0J8z84z-$NwR*~p~9!+E9r^>&<{6}3W0)t=v`e`xPg8A{`_@C}q|9%(yr^Jd? zCG8ZAk-ol|tk)i%KKlsZ^93@3Kp_jjRn`>f)IBdMAHd^iXhoYe#vpnxDTG2~dU+=%a*|%S(*uU9HM+bd* zUXgl!nCu}E5AL(EfV3(5F0{@1oZB#W^@Y%`dZd!Bbq_LdE~y41P_$2O;$Ep>&o{?9 zORcG{4A&v<;dcyuk$vN3sIG=luKQJ`cQ&}&m(K$IL16T>P6Ok0;Lq-QwM4y<>ae&8 z(CG)8fN-#oN#|e&5p{+eat8?hYg^uTMOaG*D{3oU>v(cZSy;$swZinuaIVtn5~>bP zO7vnH1~HZ4Dat_Bf7|I%QuZC3v>;^5=-IIrNl6~mS~G1$toTYxPUTk@s1HP#OER#5 zG(YYGIXvdt6?-fP0r*l7VTx{QU49WNATa-+sMh@YT^%L}wK zv&YMcwd?EDG#zXygoz$ut|liUOG$`pu_UFD&m7oN--JMY1kbh)K?7~Y8)+$qtL3-o zh?GFKV$3!a$KwH1E{}v?DoOsLw;n^8)2_2ZyF>aL(CsXXx|r{bg@!Fnc3=_T%}gN2 zK|{)6P2G-0uqiJnU-gUEMQfS4-nt`)NtwEnx||4^z$WW`L-F<@@%fn2_5&CF8js{U zFNdjM3?;dml`~*1O+f%2AFhN5OCU8Uxpg}bjEE~Nl&H#2SAY@-2P@@g!n4kn8t1wF16Yt;IRehtxfP3l=$widR~toAt%f3x^4YnZ;x?^hl8n$T;1c-$&EX zPa%aDgv^Q^L-}`j%3`G%K~>Ii?E%8BG9yd4lC+fyw6j9~LF$|ib5dD4vAJnN^*wRh z0YvqxK)AIX(u@m6!4q?a9}}3yU7AFJB%wuGT0!Qe4+;gE=ICV=Q)Pvh$WMxnK$-`Taa1r zaL}yM4k)#VKPRS0Gh!Y_zb{*1Yh5k^#tC#%2~?^WXN(X-P2^KZw8e|m>qQCxY3AOT zKbPJ>nO{<_x*Ec~Qm&=Z3rMtNIa8Yq#|Q`t%To(F2RRZ4dxpu{{304|u4<@rgCEd! zz0&ln8moNZ;C%3!aj8j0MM}~(jGVp4&fxRax-+65?3swMq=i`mG$?0s$pUi819atr zbsanZOu6Y=ze>qQIc%g!tbkfc-8#*1zsvceBzJ^M0s}FYW$-^zS=4u>s&RB5zG&s% zzLt!yRXEo^b`(~}{Csk(eGCnx?$zZb*SvfftyfqV^5*%P#_}=;mEG2wdI^EMyYXe; zJC&S9-%gADUPsstq$d9$nKMy0_KWn7z{5LQsMl8iE=WqE2pXwVNv}WJv-Mcd0GBgS z_vLWIsphwknz}M)`WqSSq+tj2cyOdlD#!0FSg`lJ!y_JWw7)Q!a@2>e1Ue3^aBR*LG6#p9B|MI?eA3tAPNw1^q2lo>X?@T9*7!v{FCd#Ba3s#l_3f__2p7`#pzvO?%DM^NILP%kmurxG46vdkH zt__eZxmq(dSxX65HieG?DKLARulweAKf;-NTusz-UdT`!%*S4YavtY6Ub%1Hr@bEg z@_b_Paem1Mun?^p)DsV;C<0NJa^CoR9uLE<+=p7dB4|!QlJzS3 za82$H15w;S$C`pD)gQ|OD;di4uxB0LX8B=!G$KW}?y!5_A4vng+sXIbzo5_G0|n|J z-Tc-$x`o{LNFKHF2LVum@(vd;)Qrgrq=hP2ps;7FX=16f15)8k&G$ERi^o#BWN|>gc?Bl2l%nFQ`y3 z-itgds zelX6^WGQryMp$ve1~!N9o(#-ZEjDp7voL}LuGoacct4D({C7zogNG6WQu8$4-=jtW z1`SptRP5>@7mA|kuv`{RY zie0NhYX&2viVky0`}!n;^v7g~mlH*jTq5cCgA++bM`eQFy2qnI%FCfh-c7>`YV@^8 z<`Fn&*^Gx(tY2zNDA(qJ#mnA+s!NZ+bwcCh)$PFoJT*j!9tp*Nks?P&jTALCF+;(A zVOBQx5s+tCkB?fJGOV*wFeV_@=0dQ!d5W|-O)1y8wy;Na4zER>kQ0)?kyVMgwvJgF zD+oL7=pbmj@eK%Cof@r^!G@po;%R!YGjUW%Dj$-IU(%N-FxHzqO%ELem7icdK4Z;? znUk$4*-FUoUrdc*H53@8hHdX*H%~;mpA zG=SE%XZLNr%ILb4R6xzP9(m33sq?MLHMl za;Fw|aK+ztS0?~&29+P-s_^qO#+%;zbLIfAH6R-LTwE~dWE16+e~q`%q&r+MgQPJ8 z!Vv%`z%N0st-HVt;lnwy8ty`|Dr^b=g+9usHpGVB008WCGzi)x$V*t)<|0rSmZneF z6CH+<4E@KVxhrfAl0LeAjfthW&K==Uci}L;1-^|&6n2Tm4{C|J2UClGkPTv^-H#xE z5?!kaKz6DJFC83<@NgTS5O)~I3)NLGz$=po(U6JTm@9-7VOV_JU%NRT9Sm(jUpRrE z%0(Dh^u}(e9G)9}67>Uk15CYm5XE5$0YR6AVB3ZJVv_fxaeSA6<=osj^jlbsw!s2q z5vkBx-lP}vCiVTem+=Kq*46uUiqg#Nc4?YM{~gj|=p3ca5WNW}ihFFUcm!vTGCkj-Vrtr_P8?=TteIDqR6V z3{K9@`_>Vb4u;GIA!|GlS&RRX@i zb_`n#*QiA|aXCL6+RSe zDOb+G^Y16U@gywe`jh#rF6a~_)GB)Zc=Pe!KJFS)z_}QbN3>A~Z^|&wTA3rNn?vv2 zaEql|;Sh0jnB%KS7JHatxN46}o`Q(_L#R^%QwXg?;v0et>+~}ybc_Nm=?5V<#(-&} zAdIE9TZvaUw_zz5W3`-FQmhYYRJ|2`pSu(AI4)qAMv^@nW?)UyE(pKeJ-YrOUqDRn zmle>KP42+W@jr4{Y2cY`Ki@5Q#FlBz(4JYW+II)KvYjf7tqD|s;l*d2rH()P?QGzgYk^cQACR>^AjGP_1+qk&^ zHTGwDhdU#^^!XKg9Ww(Acu4EnrUKL{;;`-nNrRPg5WDr8=op-j16nV%bCF3XIkp8O zX0((irQ#OGu{&GcuMRn6g@$0HK`JCSWyvx{P2`M;H`#Ki{B%n81Wk=W^6$L2{;$e9 zhfmE|g)AzH)G2_w`j(HGSC`E&7%Czr_{*+#!z4{z7nO0EERLh9jCJrqk(2dWEfbFd z`K4QC42e9pUtaweK@HBcdQHaYAAbdd`N(d_@4(8Ez=@Lpn~~Yls|$vz#;wAwv*WQv zQYL6HqAuta`4aa(y(YH!kFdY~U3P}$XSU%7HC3vAX8Gmk{ojPJ1#PYEY#q&=jKyvL zIp_Zy%n}{N4f!+c!P{m`7{5yDn`_~rQnT>U#PCsa^t3}i;x0mr_IIhc{6c@LQdwsW zcMcndT)(#S0t|qAzwiJF*s%zfvQb--RQ;ZBETN?~CBeR!x$R0|6<5P)tk~o~7Htl0 zdsBmVDH#v8DyC3lDozndFQM1?;A5c&45FLQ=qO6@p614X%M+${rjbdFNG7)WCKj~r=LIw-+v>t|7++nwoc|I z?&db8Qs$0M#x}+d|GcthD{Csw^C5jTS3^VtLP`v-gDVtMr#8BUYEUa@O*!oVmc($e zA@(Ws6^TnLi1u&o3jO-@|u$AmL z79o?3kc}9K^imxKvHal^lreKhhho4PaT~#V?I=kdqc^II(Omu^hF>^T>#wz&utkR$ zS{j$mI&sY(Bbyc440$d=m6)adc{y48c=!u4n1DTC90BEw1ccg>)Iuj+!mf zJ5`o+l&rYI8WOOD#48W8y246z=rD8Vu5TF2QL>EI7klE6CJLO;DRAp0$=*@D^WPH% zD41>Z4V@S7eYo!s$|QEQ+j2D9)wjyFmP4JplxWS# zM37^oT|k)W%v7TF)j}c%3V2fOhewpUkr>Jk{)VK;DJ0tujz}ddR})Fszx}pF6WqJb zLN5^{kuF5vA4}_L@dcwQ{Q&(=!rwNmL_wZmbhwh+ObHR!{X9?{5qe

zEci_HH@*BxO>W~C&a-7-gg(`#r!yn7)J$OI%z-wjB+)=}!7y7ySgXEMM&2gmB>_?L z#RRKRy=a&Zgq4U{@(5^asJzA^pQ;^*c*62jR+OOBv-8G@p3jM%jBz)CL(~dBjrWk7 zC!u#=+>F6+931@&F(v_o6rvM`>!~cBSfX_rZ264M9!k8^Y3bSn#IA3tpe`Y;Ke4fC zOwW|8X@RwU1l8=Q-i>x2(XHs$P8I0605e_A3n zHN7r#Kglfu2mk<@|G^UZ*Md;dRK@&bv3t+{<2I4RQ5}22JiK#23Q$g0~6YOfp^|?fZrI)Q`1HuJ_Xf zuJ8B%;_MxxD|^4J(NtJT#kOtRC${ZWaAMoGZQDl0wrx~wCzYh${QBST?e6!E+xPYz z*FZ`` zeN~cw0B~1JIV6E2C@hE{tB9zGU_;Ag^b=y>fqzc1T6k`sVN`Mkn{y-1G8KLKR!R~g z)H#qsU9-YEsk>ldUr2|$P`Bx_;T%QQ+I9X$yldi01}hwE)`>gC1X~?hVmxjf5o6SJP?7=E+r) z7Nzc_GgJ!O`1dcF6vDHUPOoT)*Z#U>r8621H$Omjs3c~lzStN?pHYXa7@Pa*KpIbf z&p)R2RuJNMBbc-}UXlYnkQ$aF0WBF{YX1%ut0s5#n~7`oTacK@Vu7~VNz!3r1Lo`1 zgk;94579%+P#zhUK#r({dakXqe=Ql>a z5txsDJHF2&Z^qOccP>q;H4Nx>D}2DZEKGjO=66e#klHgzIHg1Cp(Ic%>MQG`IJsa; z_!JiKHvXn(Ff*(Ef;OIV`1Z^w7|`(u8|Wz^du1GE_;|$68e(e~oyIzFJzJ?9oXc1j zi=nhiO80YT4$dImFQEq75Fwfm+B|*XzeGxL22cf4@!!MnX?A6?lA?C{H~6B~o=b61YA^?^ zf1y-g8)YrS+8c`$I|eV`OMLvN%?$rspvT78D=uG5P2&q8sQ){3u(2^QcKWYeZA@-} z5fv8zG#{7qZ$KxFQ-T-Cifki{qgE9z>-Epf)UvJ^6qB3pueIL=d!ZbRJf*IGFo@n^ zKdAq>eSL!8#T7??qQljoWLl*|S`kHkm14%_`JFLOmnz^`OSd_jy7{PrNqwrO+EdwI zC&;qGkmD9JwTLe3?3UX3EtT%MyIM7t15_y5U7$lr35Ee(Hbk@ZT_hmsU>-#%qoI)L z5#`wT_Ksvs9OX{*Sp2bxR?XYk)fQc;-|F!H_V_l-+Y^yzP;Yt;t#7;hB&09jifGZpPpJ2M3REGCJWUG!g+YmA~bmcz*a8hMkDCiCOuEtJYdtKa1k!FXoaD8s2%dL@6y&TOO3j# zk#DQ#^i_W5^X=n+!jGv+bT%gZ`}&PahMACz*zU+ZV>;ETI}un(U|Rf)CQwg`h2mIG z@?I;$9f%&F0c#YVZ?DF}Y8I0(ki%S!mwbJHQKN95(YI8UK9d>JVPONau`FXu9&dWX zkipeAn=lVsuQqBH)I7w(?Y^#TnsN@b0p4vnzGdr9nPOHA=^^L33Av&>FQC&vlp2I% zh2-h##1gqU%EA&&x!gO#cDwD909MiEf@ich7;jIz3RfWC?>@6cx$rI);2q_U9J zDF;foC`E`5OhrZ=R>_ekGIwNZ7lpOTchU19b)zFZ%#yPo`4%% zHZOB=PGAiNC5===4n>XGlgKg=Ub2iL6vKE=bG__yDT%Wv<=VDO`Z87zjC9eQ^iG9~ z0>wN=U3di(PIr?8a=`n+ ztqScKFN|D%_6B0=;U>G!H=wuvx=qF=B=!O%z)hHqOs7L>^8SRB4Bul?ay-E?|In2+ zhtRFPA8Ee~ElMF>2uc~n^rk`h!cOyoQ4+26RRoKkPZArh0|Iu=yzPxxDp?f$(>i|H z-94X|tYqfT5qK;8`K)rGnMymT{adAD#@`fx`qn|&_855CsaAz?L?#R>Q<986s(k(U z;WBpugVRVFj=(3>e~r3pH~uF4uZ4xKU!7N`|M#e~{l7tHwVJl$f*9&2dpO-1C58h9 zok5MWRhY7tFg7SFr#*C9kd8DZDypYs(z+7$1oS(q-rH*T!?$%T-i67ehjNw=#q6Bq zpPDYe4Y=`QW7Il5yRSDI&1T+edwl=k^_JF=(u(%g(N0RCmex~iSt2i!dC~3V2f<;* z7KH8M7~)I2A?HKyNV!oPGe_9W&D_WgQ}k;m*HLYA8Q%5VF}>@g(HXV`&yik;*dtFM zcN@nou>3e}zu2S&*t&>sDJw&$?nu$uT-{_jXXk4l|G`qFx$!EoN|v3rBe}|K?XbrF zb7tzVPcVgjq$ahc>bSwHy6D7~%n~6AkS~VQrJxd;|Si3#liWml>)VJ~mzoafD~ zEy+So&|B5q%73!t#&YOx|Js`-CT$Tn8nvw{aS!7SR3%uDszJ_aZ6`pYdMx*d!4Oez zI#jkJ`tz{TeT5$f690sbFWNXF+mXVgMBcg z<`kV{iaFW}T~A45(Tl$_nYP9;(eN}3jc0VJz4|yu=Vmu{br#Yor0;Fgt6h6)TDm$w zhQbC}?x*O+yefsXy0%`h9fu%Q^q$=K`JaHXl!wxWg#|_cXeRcYA~~p@t9s%;cJ>hIN=A`D9fYEl=&sj5PdzR%^m|WX%V87QT#CNo2}peOwmHN|nX=E1--t*| zi>ET@UQ-q{QgM*^3_+EH{Qd`RA%-&ZJHZTLt`S-bZK7N((k(6PQI(7$RBSd-DRVoS zG&Rd;9+8^fs?`ku+CSl*b3!gDghyeVfI(L;aBqB}RPh_EF{ke}t2?MxLW1XSYXG>7 zk7w_`q%H%!$M@3B%M+x}p1gB$2|R)P!Y(AUvCxHsJsE{C+$>dB<+nuNf_soHoGv~z zT;EiQZqe(^kmQR9p#Yj64=-=exi`l!J8`RKnbNj z#>?WPAe8uKVXK@Nbg7Cr88XGv}9k#iin5DI$Gz3{~+bw>Aua6v@H8IY}Fte;LS2b}{L{;ifq9#&-(d zNsRd8fA!dOw-fG|BYL{6O88d3c-BQuXZ@3AU%qOnu`ZKY#q#pUXZ#-|^?cve=*QwY z^Na#=&>XU_B?=4yeO$AexMAKv3oR^?b;fr3l3ixj&ssg486Dn{kVs}+oJ@K>gUE3% zrTq$q02Qa@PoL7gf)MV3!G zkuDJhzmm^neQAusTdJ|M2s6+%^_eib7$(yg(%*T#2H6_VI`9-+6QZHK&`B<_HS>?) z&>#4tsY1KxeGUzMhh5lg_^muStyRHbT33(}T6|dGaj1$o<Lv5 z{&p|@KEWxhkUw|4d)dCK+%HZEd}N$+BJ{9~xyUi?j{jtc`A;500NNZ)^!14deH8-# zn<~e@e zD_8T*Y71nCFh{6R zD(G({F%%O()aMR)d~yhtr95Y4f-2i1I|WFDAnfbKLE4iNszJbLgS94=)xZ`Ip3z#D zR#3$Ch%$Ug?dlw)JxrB?w*P(!M&@is7dh+nan&W0KlB)taHq4GxG9$by)^67=|HLr zYDK5O)3+IW3c2enI&M9%;fQ&!U3zXHDCrm_St1gUs=NM}RnyL}ngRW|ADPqAT^f{g zQF)_vh~ue%jXoG-=lbrN(4uaAcXWr;qjug)elUtRL@Jr9;?9t@!pxK#@A?sz(A z%WX;yfmz(1bJbL@x0~oAHpN|3SP|pcNkTALyjAa4aQ^AWar)3nEd_On$eFF^xWVjD z$mzV2jPp34aNIU^TrXhKO4Is016Vq5r?S7w>J4rU^&RAcQciPG5F6^FvaTrm&bsXy z;uQ>FdS2mBGiZAi=FW&mdcYT}N(p;9E zc^^6sSX8H8%gelzcAmQC^aqP}JWha-`rfRX#x5$%qH0MF%lmY^Y2w3?H?#?_bvRHgzH@2ZKC`rAa~`{od{?lclT?ddK*In7;v@R?+kvo1Nt!RslWt&I%Qcd zdCEEIo@v@0FD_}{Q>IIQO;kpE|Kh*Xs0h?kr)o3`VuhRj^Pe#JpI}3oL>K?3;qdJX zm%l9dKx`e%7!2+H5k@mu18oc$jBTy0ZEYC-J=nh54L~a=69<5~A<*W3_Z*U=q~X5_ zA%^@l!VDR-x}?1bn$GGYsYm=lL0LUT!neiDf;-*+4Oc2@b-hgLBe_=THK+Cum|%cm z7^8?t82qOeuyL%KC`$ckQhmd!%g(sMWVl@$)yufw{IcoM`D#8J$m|QlL^$c4Ox;~K z9YyUpBK4=?LGI&D1_ku{K((Voz0OJr`5-}O!-Z0_0J<1Q&>F$K8}T@L_fkrTf?v!9 zi0)Xo45`3Fe%teZ67>G_>r=Q;nj^kM;-_B@gna+QUoUFo|m~ z{&V~RK#;fdCY*g0Z({s|Hzj)-oVRLP%Ivw-bNa06k8ZFJjJX> zejkh*n6${6k(?--m)dr{j{=GM%qI{VeVERjxK-Pq{eq=Uh7J(CRT~cv-Y2=fO z(|P9;bM;_*7Lm1)2&Z~7J>^ml3TvQYL)UAUHGh$B!mO$*jj7Y}@@=5H zF7>hN`i#dyahZ2lIi3{R}GPGJ>(&k&3N zn@S|HkK&tn`z^4v6>lw}FF?89HRcnn(vTsiead(;CpTKGi-wQ5!xrr77o6ayVLTy4`TV)J@;1P+DQ1JrW>HJ<6tFKJzJ=hz%K3}%rX=>U#!uiDb?HE#MZ99S^I+9f87LncLO z+tVi6&V#Ai(`g2O$xkkYOnPeAGhMG3yR0yJZcaiC${;#cLmXLj8C}-UFY9sKArWzu zX(Z-EJ{o)NleP1603#(fk~ZZA4*7=yGY-8ZR=dBftXEugh5$N+b7g$ngMS__V5EEx zUhq*q3GrNZcC~$P{?l00KOo0nGc)7(RpWq01_9yu{|j>eV&7Z;R6EoVKYy;JSRW1s z*)h3BNaJ#aqe$faRwDc+MF_V7*SC^zv5D2Z#pjX=POOyc?;#< z;!9y*Vvdn5MR**rQ0M-0>C<^!HIw_*L!$9x@*)`2+x!vVHnR?fj=g!K^2t?-gC~Lwj24BECK9=!^fU3vxcE$Kj@9_7@e@!NXB=;K8x&lfmzP&& zAS%pgi$jA^UGNd*belDO#DcsCoM65vkJ6*Wgn*_nb$C)znmI2wAxUSW<4YemY2>1D z@6%Y6PTzOBN=ThzD0&R|3DnRMAJkB`4G}?BYruTxWYumYgKY*2TT*x2-EYtgiP_sCBxw`S7mlaE;u( zi`#1OYS~yE1Jaj4iuNg5?=-ut-d{1p9mB>?yWsAlg^J&zo2(lP1!4gz^qgD2z7g& z{Y^pMZ*Ic!BD+IN!TJ8UxqZ)^?yft12aW$e1K-d(kAY2g$S1XYeS-vPrz&05S6dAT zfBO`CfdIYt?sE?EiS(#ELT7KzXB%C=!TzPS*a;~a`L>*A!GM^vf_#3W3AmOge!(0= zvif$Y2mbrJ$6@sIYsm>3cH#pP7B)XTS&7&)_DfSP(T9fg@C?K64dO5@&0X+(91OWO&dRB2SKTZ= z-V(gyjU+s?_1skjVft#VS}6iEdI3Lm9|=|WAnOKtJ69Di%gH|w9CF8`a_vPc8(Dhm z$s4CyhBosSpXk2+qwm};Pmjs;K5HxItI^npX(2tNxzY%x5>Y2KLB0v3id@&mwj24p_li$l9+Fe|gYx zV6?XI>sM?ZoV8oTd8{5#7q4O!M>kKUIkzyU-Yy0(KwmaE#;C~f97_zL9pjceB|r|2 zDKiB=@5X{NMp`!St8h|j?c-;^A9)jTASzNCkTns5N`X7!ap<(z{zyA}LSsDiT3FKD z&bp`LPCz?OY|6O5zfZbr!>H}=)i>UyfCLNZ`$H!+M;yIeoHmd>B}0Y5{_s%~ch4yz zAH97YEZVslC{!hU4`uIfWr`>EyOn^9_YvwpE4lx{^2xvUVvt{XaljXr^ZpO9T*Acg z=fCLtzregpeN7)l9rg1HkvR?)3CZ%HCa(e5+ksYvCS*ywiyW~;&rQlPX3iYg2%=S~ zR=w4fDP6gR?|lWhH6wUyto&|nYje(-9@WWs}qYI0n3!SK(6eTHTEZ)ZuDTZ)$%M! zAa6N07LNt)EHgU?)O7n$vqMU&-9d{GcSNf z79gfG6;)nBezaX6pEiofIl3eQA1)Mr#!Y0ZGO_<)qs~U&H2p1}fC#jX6+pn)Zs8bl z2~mK(x9~)}G}rJsRnRhB{+zrL{o8F51pEOspvK|`>*tI?$m5ShCRVqzP>Qx`6vuo2 zD8q2~*-UX1;^iktmzFBX`XQ?&7g`Q{p9wscCY3sWGjoPX18 z99PUPdnB5bAF=Tb^%gu7lAcqx*7|NX$g!go(ze@Jnd2gw2Q|G4>AJI zU(yVxK;27sA^8>S2y&@kWYTWAx)tnDT7P<#@6za~-mrDY-&TJWx(j#duf|^F`kwoT z{|NK;BEa|zl3@ujUA(xa;tUK!B-9Dy%gq9YXH-y=DQE{0+R+mk5Z;2y&Q!20)Zfx+I#*0;+m=xFq;iT1%q_|9zr zQ$U^JP{g!tiZbj5+QM>%w%A8A0)5+c<9g{3G=YNe%gVdG5HU zf}yRUAat#I$vnl*>jy;mco#UnbM8+30SZGKUqM1?u()C&3}b7#nXkB{w{o#RuY*9E zFZs(^Y;PH9kAQ5`WVD$x-}Y&=Y1offj_FFHcWbvpY)!y-&eO>*5Tc_Djtj_UJp*+; zM7{5Qbl>$r(1`k)6BL>+4$3B@I*Xu^LmV2TrgYg;O)bFi{(2HgC5JS0`t{_1E!7A? zk3kZNvSe2zzAC_o;Y~NfTZ9N2Moru?efv~b9`WK4DZ2lL0{hC?)8-u4;!D#a21xZkUH#u^qRrSXP5I zKx{@0HhBnk^gD~^9Ul>%`M2|Iv?q>jix`2RsP!1XO!U?z$+kNTCH@Qhd-zKnmm)ahH?@Ba-7{ZrB<*_`^lEniRu>;%x8b`-aWQ8nFw^b6iU45GW#vajZpg8pwir{ZmIa z4Q*?(&))_shTh6v%G8jw5hJawRkYBYdKqPqO<}U_D||Jec(M%@UI*f_M&|4>okNy) z+FczR>pCUNR83cRzGhOp@A;-*yRSFj?=Bq+L9Pa7cqvEJ5UA^xB4dp*F*fQ4qO(_v zO9&R~Hh#c)qOQ%(9{7(O-Bv2rVvvJcEFOyrfJ%{`4C#64%n9qk5>|;JkujC|;Hxf_ zg``2*lP>AYBvVtpjTrv4gcTUlvr~ohJtUgj_oo3Na*ehbWRwVHH^0#j_Gg?vyP-odIMCot zLiDJh%)$~R4o@~-AyvF1|CgW&aq32E{(8$HQcr-(HodldAdH|MA(Fhv=|+Op`xFxQ zNo=c@7wZCw98>w594+!}0zC|E#x`$-1=lVI)IruDtCw0H;a3CMJY;Hn%-xX^S=q4< ztZcl+-dLU%pY8ot1!DUU^4xRjc6jID#q@VxOHCV+{NM#in5MD+9oF}onhX>nAUm@6 zhrNHVr}eX*ajqf;bHBIq*2mnakMk>?wBv^u@vz|3*r?C~WLw~znGN%Ysa-BN(%-++ zP18ADzfw|kwUlb5HdrBZ930vwU|F>Oe9D*pV#AJNL5?M#OtUWT76>M;a}x__;y2JV zQp`?wK49~b0|9+Dd!#g;YoBQl!`C2Ea19NR8LN>XKI4!qVd|E#YCZ#Zq8&zt%U!vJ z;4R;a}D#Q;3r#8!2LFN#UMjR+w){~GD9DAZNtwv!Ssy*uc0T_M&eENyR#?ix1G!E(-U zwmVAI$@|G{nUz4J;3%F7$g}Fu*?T-?Ij|N>dyLYep-wfCuO(Foe>~dG0KrP?Ej1>>$3vYX^<0?8cT*M8#V8Nr7 zuVV&4OsF*SGO=8VR3|A)SHr}TPJ(k(PUH8mUWHtoCK{Smb70~`l_}|qEX91_L<{+& zDQ9cVAF4oG)O4fx@CTLMrv#Q|NtwWy0iFsRHs+4r0405q2PM?YnSyn(SS(;8R@))v zO1J28E=2}bj!}DT91tOZhGA8v}U)nla^8?ZL_usi6FENj%-ovoLa zCf_=iy4{mCMI$IMIzo)>9R&?i1dyT|?p>guCZqclB)alWeR*n~8842tS7N5cI>jC5 zRM%{pCwxd+#&CD-AqJujDtPWme>*GuY>z+d&63fSn7+gM8KyhHF2~C$MZ;2J`^VQl z2#64RAv@Z=KzRVA0kJ5+P;~ao3zhbFRm>;`s6*wnaL&G9K|+sv1ZAud)=0;cAN8VO z7n#MY5Cz?yKFWQV^%BXbA$)8vnW8s#JIQDwX6Uk_BXkb-{5@HSqTaQXZq>}=Z@qw( zRLj~wI>x1Kx$~t@dQw}`c%Bt6bC9pNPOrRPcMzL2`Xa$z(9&G`L$BD=EB2ppxGh#b zeot>FXCH0?4s|dP%arPC84rMH{9dVvR(4(q{O~2*PKj5Lzbia?F365y5HYDX;bRK! zNpp858BY<#9h0Umi>RCyFwAAZG=+n8LE1gJXO-Cd%$g(EUyDW3c15fhT7|?h5nl_O zQ+mIf{t%_b5-RU))EK?ZGMub*EkBYXwZ2++v*@`aiuurl=RA=R06c_QID{GTf>6VZFs=Hxe+8aCxNMc#I(?F5(zvm$dUzNdmGD;#;_V?W8W@wMv~% zxplaCn{oToWQ;?v*9=w73Jo?50&?mf;$!3e@w%E0zjZYN5gI^?m(!Y~vUSH>9D_ac z7&XTg!zC8D0Kwm2*qbZ#%H1m}wU=p#HP6mF3wpv*2Z#A*y97k9n$yQV>b$Zb+ zv*kv5T#x%_$ml%6;;L_qw76^RF=2 zJ!4H03}0IFpTcM1QZ>pvK9wY^V}f)upY5o+as)tt^#uy&BeT2Hmxumz)>Z z2a{w>8YP3=|Kha`r8gWUB~Y;4l`e7YOl8+t^{sjVI`InZsr^u6DILG=o2R4I&z5< zO6|0X6AtZFang~YHHT^#wGVOA)ez>IWOS8$zd}~VEi)rm+E1|&lJ7j+cXs!qZPw5C z7x+~AGI?1OX5yX@+E2+bd{+S@x9NA?n%!Bdqh^gWL)wQ!45=;%IGaRSJU@WMdbdrg zIiPn=F*+?{WR%&Rhg77gb&;LAUNxa5j+L1t4zV!BX}pha=Ge8V-!^oA0W_{2X05i# zQtx42y}AYIF*;sIgwF~c+9f=oU|x;72Zsn8Z;NfZWh8XzQ)4?{VNRPi0FkdK^Y@9u z4^pR}RIhf)nF4fEOu+W_soeNKW?S!E!1rGd9|4)Q@qe0ax~Hc+5C1&UcMC!KD(XFX zyohy|$JrifA3mxf>)VfMa={!883HIXfZH~L%^DMURDSH(p z@{R7f+W%AZJ1^7oM|M$U9aSB1=-Vm!G2R~_oLxiC^_#KhJ{&wJNC$P#}*k;e^ zk?q*WhQn)2=K1D(8Sm5gtC2y5?(_ZY_Lo04pZ=jWy96CC_85OhhXhQC$-Olxe~M%T zcAGDTF{9DK4-t2!I6Fh7^PYS8?sEZ#Ja=!><`*;s0~Sp3$UvczD}Xmy*@ zlwPjY?r~vKmX+q5UpqX1dTqs))%B079V56pg9*(*A2f>ZP02~C?_Xt09fL5P6XYCJ z$+aI$F{j;>i1GtGQ_FknMQ0ZM1oqX44=Iq! zwtmf0)uGn?$Z{6r%*q0+Na1)>1|%g|);_cK4K&|E)aGhzH=z?P!+@LWOVTA4T1$Z` z&7!kN4Q@0`qwK@Dxkj~4tD02M7n6R>+FZu&G@gs`9m0r$g>BiiHopTdZyVQ!Lxjga zuD+qvG7?GicmtUg6{m-xtq%iwv~wv9X9L@B4F`%O3bSA#ta(UQLsvUhGZgxuooSeP zyuA5i$_^F#;}T?dABI>(Kv*V>X_1lBp_18xYumRD7%D%dndH&_Q!|OUXu(Yy%EF1g zK{KIGhhX6l1u4u;)9lH2X>QGon9W(tw$_(|VJWmOn8Ue<@BnY2&^!P4usO7CddW#C z=Os-I)TAH%LOi0K#A2?q$58QbfO!%08XZJtm%%kUd`v?%;;QRw;0X9Rqc)pq&?XzJ zFE6>q-Mi2LRQB)1h(Rro8uot0Vp;%$J}oVu{K z8m1*P(Pqei_`+`}OVUfovol?0VgU)NV?IW3u+Ad?C~Pkhi;_s3mU}W!K7q ze9S+xR%M`S0~`AKi%1w4u{UqnLA|SPiAe~il;*{@^88_k8c+ltX#D-WwH=E2oLCnP zPh^RVy|!jbdQl`(_EF(uksUVzQPzY3P1L@?q~VdiFd=rK8Ghl8ZVuV`Zlx~e{5DfT zcZ~U&mY&=(nKOn~yW|g4(P{7j{}|EgXu65O>u(|g5;>V1b}}J{ecNs46I)E6GwQ}Q zc}-&-HXJ2Q+4u_xxuEdt9+)%IT#oUc>du+2Hm@#|<@sjfm9w*g!iKAM!O?aQ*l5I@ z^d5un_`%DC5`Sbz3Es${_;I9F!ofs2q+wJg)`k$|Ss%DX#zp?VB6o0Z|I=C$A zg@^aex00bF8Ym@Z&DoJINpys(o;}4_Qt_Flg!J$pszaxm<5h)^4jvc=s^Lgs5t!na zf!^Nd_o6TPmw}>`bqeRAWTgD#1ujD*KHR&yek-i3_xdRm!1NGqDv@dSQ~jl2BaHPN zg>?pdzW@xdRbK}U2wN%M_&K^ZzwnLW@)>9-FCMCSgr&R1lte&#$lXhg$%)`|dHecN z=kX}HpituVQvq!zuce})5~eA8;gDIEo&|Z=6uRwY!pfbhNv}jcu~qvsv#n94ZhiUy zudejR#mtAgp;<&b`!H6(?b%kANolY zY&V_9uYt=Ub)|HhD7VrUBCO>yq4pITipPpN`HoZ{n#ah~D_jT|9tcY?7!5TG8xJvZ zw)-?)^r-@4ztud@A3ceu86HJjTi!aHJ7S(r-cfw!hvZM+l|R$vI2Pz+4v?=J8lbqo z+z-D6>UMoCl|6HU)17eYk3-jxp@28l*^dIPdOdb<*DCFDIv_kHh35|%WZ~Wt;W&Gz z{o6m(vv~T=C#Ia=lVkQ({r%T8!%eO3J5ILmM55Ch%obhhA19A6{iISb?k?hSj~u<( zSF(rFodp6Xk9fU)WV-w#lfJyU(;xI9Mef;uM{6U{Ka1{jU)eDJiXQjeopbl} z`2x!0$5oE0bMh7roK}Zy&)!{qXyn-4#r=S9ATUP-g_rGVLGQyn!Lv-R^n=5`edq6tWs~4;n(ZZ9b z{|XF4wxje-z&}y5Hl0smR5mwdD~Yqjl?lt>xBFhGJbRdHQZPK79UI1Nxs{?E~9~xO=USR}F z8u(W&tbGXs?Xnz@!E(3)c}C_=)9X?{dAbtQecK;g&eB<3NEC!v+9mo{-#jJGVsZe& zLfP#5F&8VCxY7h|3rHF}y$UbB7}j`i$Qpof`5iKpS!lpQBitz`U8nk^6Ah4i0etd= zIp|caP>|@9M-ZoqfPbSL0k&MQTZf1y%IjD(e)z|=Lzl8^qz*p2vy32#3vf|jw_(7d zHIKGZC=BmpTL$aSrTG)GZ|HsXDnPVXe@t5$xxQ*{Dq%ZiBSagj-P;UblqAhk+2@*vV{zF`M4% z9`8o|cGW^A5*~n;gNuDwVvC=3joOz1ADKDC|2O1k9jZs;msN%J%V3yz@G1&^=kdAY2Wt!L#6rFPuQKO=JBx4 znf~|ut%^(|kaeoio;BCPzbPJl7T6T`z9?g;CE}WQEP=vsS;&A=P7$Abdo9;7kx(Jo zh4R~9_ths;wcCPMHhyjj9vv%1mecZP?IfEdim`3fGmPB)fl=1V+AZqN%@+`$MMa2G zokwiZ=SUNsryAjq%(V{5nb0lu?B{~ieyX<~JJ)~dH)P#8RISzIC?IQLjfl1-_ZzND z<+3aA4*Q8i;=cP?Zx8%>E-ttX4s}Hlx;m7_N^s+pHGbO8psa;SWntyyeD|cJzSpiW zc#nS*Vn7u=%&at8 zH7N=msdo$8;l@YH4}$&$FsIe|g>>}c1`k^X*%<8uL|H>AHc61UX@7NVu(dcak#x9% zNoGlYxJKDK5I*yAl8c|J8?mY}fvVu71qNtfm3UYh>38HBWQTOp3C}og9%1-cg4-RR zcR2|A7$Aeegr17acl;tdG9wkAWmWVL;S;@Q4t49<{>i7nv8rx1Ka2CXN;RWRRf9)h z-P^?HMnl^hp zF|5je#F#x+7Cn|Z6RKa&Sc{#*FB+>R*0iI|G)HM+NR=@z{M0pZlX$#C>a%dgsV>UEz z1m53zhn*kd&1U>wSK`!+(6I#CyiZzBymVwDYFdUJg7$m{5TmIG3r%0D9PWhEsQCj= zV2L`!9Lmk2G{w`Lq_~y&0U;_8a_3$hncXoA=dHcnmi>2$M5F{{1^LGO98>Xoa%U4$u!zl(T(R-Z7#3n0&Qv2kBxPv7+MzM|zz?^hUFi({Ek*z9onmuf zQ`a!(5GXbQhnfNweFyw*bK}V9jZN(hX>&unxr<@$3vYAtt9tq;hnsM_KX3ZY0epO+ z;NMAyFp6sE4t9tKRNKM9q_)AE8w#5n;zMemUhPP6jtctI6dfNH*5!fFxhm6?SPzp(?*1a3t6RKcm-Th^^TrE zy&o{`uTh}xxcG9WDo&+@3J>$;o2bq7X=`=B+EQ+%9$=AI zOr95_i3jBr1*JsmwX0gAZuwmhSxn_eL&MVq54>}^hi!Q`jgOf*t9ii-mMLK)=xYQ8 zoEV}0h@`#F0b@h@$dpP4@*dBLESF_CnuB$oSh*-uU?@a6ccg_b6!S*W9}{;ZM4l4S zt5)d2II56TZA$lq{QX+$lP0f_ekmY+!f}J9CU2jJTYo1;rR7t37eZcolCn;rndpw- z)q$;u+{&g`9Nt_Iu1S+kZO-EDE35MZ?xT5@QYx;uXbwwsD^W!utth?qSkG&bFM^=#W7qV?iveZR6a zcnz3F)#Iu(*`^?gJ2h|K7}AP`6(SpZ7`2Hq9F{w*QG^JntPhM~-mt4 z5wad+7eiS#q?~TJr31f;V(p!xJNe#j-;O)B)3I%*W9J*&wr$(C?R0G0wrwXJCnvxC z^89bkyZ1g9byF8LMy*lLv(~CL=lsN3lx3>)_gR?2G_Rs1Uq8)RNHiK)^$ztdy`fgs zmNtLoCqw)E;v4W+L}cR|h(fr1021=_;zOJs6BvLO9H__NOGB_@65{swN!Xoeae5(mV9+~5NMPs?zhR9W2e-Nxs+g@G;J76*xZzi}2x+)($_!zK%!fa) zTAUNDPFw`LE)>`TBgupS%e+n#>+Fm!Q+FM(v`JH1|xriwz$~ zz~6yjfi0et{B9IWsrDIB8c7#)k%Eur6FGEi6y1_hbAUb>-P{}5e28Z=N+$zM4^j1% z|5dMh`Qv*5HTCw>>7rLaa=<5*)4lATu_&c#)O}R)WDwLDtksoG=}zg#qgnZ3#jm!q z`rQQr!EWf}eVjHonU5Ht=<*Cc<+c}K$9jmiR@4uCD3q@7CEOwRj^J^qK2QBik$UiZfjXZQyfx(63s@>c`&W6FXJqTNGC@ulD?13lvTvz0vti2YV_9JR+NN)+tmg z#p$@L_1&Vlu?|6$q!ER11Qdfc)wGD;Jn3t{Y5K=Xr<4=cM34H#K;Q+G;Aag#NlkxC zX?q76@4C_QNN=*#k>N?!fiE$YjaFXql7xm zkJ{~Dn>`&{Z^Oux@NVqpJJG-tG4Y^bSez1-B@$J#`olwcw(QGSVQPfwj25)Gwe|E-YCPRh?GJ%uTVPH5zD$l6q3=o>p-c zH9EiP36DhO>i5`W>$)?(lVTAnC#h(hRYm@@{*MRJ;sTYNFl{?<_ao>0nGC~I+pGlb zss!!m0ByDJdq>*(%f%zDMZ=3biUGo9tx(goXh~df^I(6Lf@rqpVjkv1*^Z`UR;$(7 z|GHxx(R7OazAEU*eA$X$({6JuxX`bP6RDvto_spfZUen`y0gACf;WwAb7An+u@`RVnM;nsDiE5e9@~bdSy}) zrWJ+gbaeB=^YX-VQ$M&;!UnGM3moMAbaZl=aJPaxG{491@uVE+RODKx?}nCinV+Gh-kHqZ-7`aC=CgMPj|USZ;~)YN zf0_};oo1qmuc!jxvx%{i(okeF12o#K(ycR!*9*(CrDMl_Eyx@s%qdL!_$=rsESTJ0 zJS^OA;XS&bK5}rg@jPxk32Tjk%I2hmxlh&R{^kuRFcox3On6Cr@O}9JO71v2Qn)NH zQ;rgpvrVmDQ50#He89A2hkKgN z4=nHbaB$5L%gM*SgFgAGBL#nq{{G*@_y5G_p=3+_X!sUtqJ8_TIREzqn*vT|mWD?5 z|6wmmiUNcK^TUXAvzy0M>EiVUol~I5#cJ7#Gajxd`V%{1U%L{ z6Ksns3<&xY=pj~Y+=N*X6g&DwMHyiRxaDO`{lt%KA~@-D(2@j<;L1cE$7bYw$XN=e zc3)4CnqTc;;-$-;<(Je)bp7>HE@ykly|{<_FLBX7W5Kck@+;)qZS@ZG&1m^Q8;k!m zUH!X+RoP16JJQPgD&Ej#9u{Cg(S$E6wvGS{1hoP@2MiWLI4EDXuuMX&XIGEiFMmt- zO!W@QNu(!Yd?9M`F6=!MeQcRb9GIw^c{|?vw83-SI_=gzidWH;+ zGQN- zL|IH$JXv=39K+Id91+=&*zH_L+McSq7l5IdNGq>xdl}Co7svVRb+)Ri_^$@DjH}2V z>wHY#`gd&mE1i;?<18Aeue&-kY^t|bK*5dFc0)%XJLcv$@sRZVQ_t4`MHbz%`Q}5O z)&|m4vI(F_?PW8aE5ih^jY-)hhqYg_Rq2S=1UxKLg-d`(cx%0m6xtarbcTz*M~(6% zCI76ahx25^)BxTMr=?$b-g6zyW+$&!2={GzU0S+mf?$d>t+S-mo4{O`_Pax&#Naq4 zu~J8R7pKK=lbnvNUK?^D1B!+6!~3dPGB1V}=@=;t&_g4l7E#+MVrGF5WtnHfG_axj z%_x#Va;tB5vB zxbNd18fh;!s9$UfmhLFXgWd$*M13O}XRK>$JUXkEb4D<&smhfR;3H->tS!*{L9vxa z?aUHMM6!(8kbMx}}EQR&^13+d7HnCiT7O}3rqm|nUA7=oL@K^Ydu3SfC-z=3Cfk@L*FKsjRQ?ehu2XwMN zlsag8g5r-q8sq)o#k4~6IHz{ikHvnOgl)5Iy9F~FqwYm24q_fDoI?4_PYAXs*HI25 z-{tQ_h|(V!M8(5y7vSvA4IwtDuxPH8w~!z54UMb0p1qMpBNBWqtsD~Oj}lhTAZ1Y# z4gsOvDmUc(71peZdK5wR&X45vbMbmtD(6utO0reQaKTp+(y*G)3KiQ_VzHzd-Z)#7 zb>)iAUftrt`%tHGr^|cN!ua@2>h(au%NB?HK)9B(?7tmMx>+V6P?#B5X5Wi`s(y+x zNx1bpAdxs7L@B7Im}4?1=8l~EhCxU6grC;j9-qY(w9NkqRpkAYAS%gnObiNlE;Kn^ zZwU`B2)t!oGSc6TTL8aY*N;8rDy@?F98u^)LmKN*uT{>ZQzYr8%j65=V+#Y~M{Vxknp>kfq^IIy>sQx= zlrdx4A5f%Ud>Cj1RCzGo4>&5}&?4lypY{zxOfOd8nT)|B41wfG0ok40wh(lMT2)kQhhKPgTJ2^^v$>@nA zrkjIXqAcDZoLgk_Fb4W2!SvRDg+HZM47@_aDunTmr zc;5g`43p8DA$lkAyn?3Y!GS({N{bn8%F!G+?f9Ynl>Oa;ruqmfZ8qukDQz~vIT@E* zaRQYQJ5`FwJgu@)vR_^vFa4@i=zffq21~#zrCyNZq<7`rv}fp zItWtg-I5d7+9-(K%Z4+iUF-rn*hcq((LZ_z(Z_84BcgxCMrgjX!w=r3wa z!y6if`%qcp&UDG)^3*?iZ-^T`J_QgawGqA*tI%nd!h_gp7m`Fkv3Tm(FcwZ>O(HaH zTiY0Oi7kN784(|5^6n^cj}DffYE*9qZBiD0$pmhhwNF6qr-J2#u^A%(F)q z^9Ny_GPMfNi4NeK6)OWM`)TeYWQ0kiU^IsL@NzV+>_?p=LTqWx+ZL!hjXrDfyFo`4J z3}{?qrNNp9-oaSZ57h-VQ6R<{)-Me6mwMe8U^e@q=W9g7REj$908z$j>GT5Pcc#k> z&R}8^dwC#FfXcyNutY~}Pp_WOB1+6tGa_Vd3v&_mi9v23d-|YlTvv#hQdzE0R@|1O zRAQ+pH@#<^B&|EuG{TCY^B1*{y39eYis!%vxHl?5{{4sQL15$Mq6n)dd#C`@DD|2E zH4+qY7!7YL6u9Avn%9RL-`ky(+C#@WeajCF1UgX#v3rnuuHwPR(^O)_h>)s>=87b+ zQwL*8Cswj%*vrO<(+(*Qw1>wKzog*9(*x6;=UZNYj|texQVOBnKK}LaW^5l1ozvEgJj>-Kh<}{3ZTy#musz zdKS|3Ng=D32IAR`%DH$!3yYY3eVF-UKezcM662#OReXX5s`5rYqWohsVV08kA=Cq1 z_T0-K@lJ6<4dVTTf9|sI?*XU;+>XDz#`}SmGB+-E(X1b5P#sAcvKWyiJQN7A2Gff4 z%?iDa%Wb`wF_zIp3qG^-`Qf#MRhXgxdgYRV0u@BP^cd;ps3JN$z5HZ0w>Qdg9Icc6 zdeN>eS#-{fvMDe)bs+{1Wg+{SH#e+XR|mgvx5Q)IPud+LxUclF;skW#oOqqs-BLp6 zb|sBAaf`vea`Fj6yLR)wa(UgJGd$BmenUufW!Bi$#~XnUrkm??emkQnvTrhAcdK?s{{E4Hcg?}!DEGwOnp}22L4HL+)Uxsn8}OQp z6JqzBcI4&R**$;Oz~uy2m_L7DGF=l}?$VW_`5WSe$P@4q%ToNRT!1^kjK}vHkA#Vc zy;TmT{A^G~8F!<|a1M>zm2G2t7iGrD#?%?oJ&Dc~C0-}x9R9nT4o38rpdRpe<6=0u z^0{PrAA+=|SQxZ5ribN%RB}5La~hjqb9XbjXR(dS6NRwRN$>HD_6odfP!5{jnPk&1 zMcM4}?BV#McGji6rj#Uaa0dA;y&#TNr(+TC32a|>3seWeIzKGVpbLd0Cp_BtK|~>f z$Fn_CB*N1#6b)J?ti;)HtET~=E{ErSEyny?rxFzKyoacxpm;ybBA4EzB@Tu<^s!4i z+027sGw8|pWHxIGAT0+a+1ox7jiM z9r$>n-Q{;$`B;@s7d(e%35P`r4GNNf>72-bA$1O-O)*H9RSQ-3CM)lCwg!%tI-8O? zgV^yQ+gIE-aR?06B4pQsSHQ^P&WFp}BZC(|-oNKyQVqOo%J`s{A|!>WHNb~UpU}(m zE-2=5MnOZ<^swb$=7hTyal(hwSJ6DBgn*|RBg^zLn{PeJ{omeWd9Q>#V% z4PBXcZXL2dD1#Y+CxCqCj7F%**{%=o%xi`91d?}Fs?ID!l{q$GVMco`KoHa*!hoSQ z(h|mOQ4Vs*U#dK!EFilL2?6 z*5dKmfc{tCgib8k`uAd*7_nlOs~+bT$Usw8F<})28V46t>Xsk&Pwq z2O#~o6_&!j?t&EuB>c(DNxbAm*7*UHm20{;JqvG@L@A3shf_9=KJgZt1W!oFW3)`5 zE`5`fg;WEziZ+Dsf!+(J5~9o~o4FyGVx z0jm!m?bZwnMPu~n0Lvko5r`wv{t9E8=_zDavXKPq2OZr2LGkr#6mz!oEXjZ)g^^rW zCTr-;5CO2mvTmmE@*F0{zM=o|W2&0qna|VryY&x{kYpt~1zO6)b{1yfz=lw~LE7WW zW826UUbxZ@J0QXqnPUkb&qSr(o)Dj`Kl~yT{v-|X1h%NZp}iLFdT<*%Csh&jp3<|& zQ<{*#BRkd`0~oM}*WFgKU{n_(Dnh)dC}19-4Fz%91=L1b5B=P%$5DnstnS4^^l*p7 zaEuEvGg3o8WVp<;!+_Cq4-Q^Ra+#%9Je0=j3c)#q;BwzO4(PO94tRV~6o6IaN2mBp zlFAF>zbVnoA7#y%&?VYT6<3@=2q@t?krpiE2^LeqA_QDg&80HO-4V=rYh}Z~jUfN9 zi7J+5TBsc)j2TWe@u!$pRLCxCNa?Z}aX{}15hsG7>8vQM zQ!8x4KoZgm#hA>!L@T=kl!Z%`Q3A>+Pp#J=(ykaf#f`66Hvj&Pv|VF-a6vS-bd4eR z>lLY{_Q=NZ{39j4tRcP~2%!aDoRWZ3ER2)WwP?uGM=#5INP$?7Qh|q29;T{>`k-ed z?9C&#@k5!-e-ZHUXHKqIwdf{Vtp80g(l(nlFx3jQtRw>pTS752GZ{F&s%z6aOem&T zuH=zug2!~uyoYYqi`M`4VQr4+fCd1BnGf2PfrG#V(O0BTy^MeQqdi3Nw|IE$X@STO{yize( zXR0(2oCPw;3ynKlgBH)KO2K_$>I-d7B5{d#*e@1!KETs`lrpNH5H@2vwCE2vq@f zT<+OihUH$sstBE+$L?UlJ4)$NH3UQ*(#Lbtvfx`nkk741o&=z2kvPww`J+IcSy2y; zs>1tN$P-wc`o3xfx<+MjJ#}ft^DfSM$FA;oRG^$@x_mxhyW1c$K0v8YKo1~4K3IIc zL36ub+JP=vka_#^Y`0DUZn2)QT_GgcIsZwdqjF5$K%f z6N9ZO1?c25V-gj@VV`R*=#%hRlISrHwIgb9p2)z=T#}xYrSZ$ifK)9Xd?I%1 z3v1>Pi40~g(Oq@(g?CHDfw9dTQKt{Gs!utWA6lvp(n4R|l3sC;UO6MXw8!npw;Ze5 zl1Z-Ffw)Wz@bEBQaZqe@n)`g6#2E2y(>dZwIOBq!5cSv+=c3Pe(iYx7Gpmnluk{qr z%#@_gOf%*UOPw%C(k63P)toUZ7o{xSKA`YevG=ku?Du#?;_k&d3^6gcRd*y9E3b}^ zN6Qv1XHzx?)g=$-LEP}4-7?3M#io!^QcyW@77H0n;BW>#L$!^`9)o_s#00~sjjU>Koc6oE$<$`tvz z8TeWk5~V>BCn@&Uo6=4fgKUfbcJo;FIlfC$!39Un{t1C@XWCY7v+q5P zyJ)A=It@Pq;m~fhivNi@Eh)D89P51%_sonF zr==4uYgS1EWTdgG2SW0iVz8l?Jv(BcwEF&V)Z)u+CaW*p*NBv;L;~1Y>3!75hpd`X zYKjVNmd+iPcduVOp?ARRDXMWHr>#ilRC?Krhokd5C^I7*XYa%Y{h`lo0+lKS8Z{A9 zgSF?rMs{Ye`bKB>@xeNJt4hi$-jM=g@{RFu+1ZzWCZ}jeXl*xSDmwo9UtlUp*C4;$ zt2oPV;-(wvF^~jsR~*!G0!l0DTMLfyF)GW>Pg*;DxFrS}ldJAHe6nZ_$*O#USrzjN zvQ-ZM%#b(DjMNMt5$D}I3CxMItc*D4X0Yl_f+@jqJmC~WSJbti62f#TA2v)nqqf|2 zd-GmCadMj7Bi^YTx?XN&(wa}?ck<+nu0)Jj*2#6UX&iy5F`f~^!e(l4H?7`Wm228m z?{1$Pf|I={guT!pwDlNmZ-`gD@JEnZLU6j{cC3N%u&Oq}$w95@ zm!_pcyHZ1qiV%_F3g;1R`2*J{1>1*;IAAN;fyQAJq2d%cZ%NB-`L(QwH{Z~sdf^;SSK0IHUWb=Ey%D?cYOf0)sOvbU487~#@x-|exYRq^)n%up1@86<&8R?8nXt|q$kS$y@s*@A>dqD= zO*x+_#btk;i#(mfGgSRFhvE1L6fPxeTfX*uBt4AQ3ooB>Y#5>9Z3Mhy zPOLk!qYiUnOO)wy2S5Msax4+HJGTfP;XIephGf)?(N}0D`0_Ts%DE#i}7!W(-5qekY<2Z=THKLtwPTwUx7r!q4h8WsP*at3t8PkIJs3)+zb4;ftb^}&?SiIoudDad$|XH0`)mV zeS>;({{YO3W%6PezinkY67+esZI`QylZwo)_%4_Amv=Z$fNp<1@_ZE27-*{)eMfq& zD`hBq=OVr?Ys>!~?&`0H>5V~Rt&AzBk8vwQs@|GKu${noyw8&Eh%KriLfUasBIem< zR|u)P+I1|7nWy9(I&}2Vfi(7GTh8IM-Yi{d=2xWkiRAdLcQ(Zk_p6K$N=2MEd{WSS zlI%-7EfGiJ+9`HJglQ_Y`KEBn{G`R6>viTi@})#{Y(AN>YXP)KGEz!qZqvIy@J>U=Pa(K_!E2G8}e`!yua!!!fZl=TggTBTj5$ zyNjw4T5=-hFk7kKrUowyu(;Z15Pc^t~D1AgyL)Wz)5&}2UP;pO^P~PEs@M6Q` zR{XB;8q839}@T9bY~xpC-+VuKFB=~{;DyM952BzbQsU;`06(x)yh z1s*C^#Wm>_@u1*V z%qtvec(O|vifgo|fmh^YOG;+#>rR^8st)y|LWm1PW4a>QT}rmh=)nx53}%h{rlxmN zLZMJzvy$L_v!MJ!tH&(jT!^quNUG!|!VeAoFje=8zNp7#k=ncz_9hN5=Ee@DHG$WjHR9hA#26vZMuX7*_5@`ufaBqKvFB!)awspI+u0~y*jXsn#1vb zO~{!kwHD@h?;YM6gkX)97fhC@4nrm+f5}2iF|o{fOT-^S971s|tJxuRBfwF+7RS-_ zR%agO@_5O@T;=`5Z{?gEbMC8u1Yo4OLje{zr~O_gKb*jc2$e`f3nQl-g`SIR%oe;Q zC@)yYtOqEn$u`B0CP9sFdD67CWZ`S^M+)jOOHbX~TQ>|=DN2;)hxIEKavqZ}|2b@J z8s2yAMlGpq$dW{sGAg2?PQ~;l3t=Q+=hbP+@D!nLoCf?1hV_Fkv~2E>EqSGi2ozGr z*UPyP1O8Audx0rdGnWWkTxR>Wf7iroxse>Y3os!}+tW(3pRVjCY)(N2@hsB_)3@OA z8;N_&a9GP=%U^|$Q>asB52}W^LF`XUaLHqj8za^8eS*^?*d}FT;=e(}PeI<6vv&_; zN{d`(Fd}Dyj+bgr0e?;F-b5&@ma@-nb4X9H! z2~D2nC+^M6Q{zZsCX3T`G#)XpiPL9-xezBa2>ZP3uXW9FtrOyO7UOib7#Wzp(J^#^ zUB+@99GJc>?+adRvs`2glvXX1rMBDPFnQD{>I z7ewx0v(Y5)Y?|r=RntBtCa;Q4h^M^WU+aApLcROj&_p!^e064zfU5deYV@z>eKM7d zXE5!gAFoAWUqdx0OH%|^>v@XlVC0?4dKuymF69p*befOqwyFQ!{8TB^#n zE@@N7*Q7isDWy_RhY*o!Zl%I^D5d%Z1?0|1A0si&??7-S!gddK-0ky|>Nvq}2oJ!6 zu1|2z)P>b(VNpU_PpNNSH~@9P2pN92pqR3^UCO9j9&SUysC+{fwK$z@J;}94QOW!h z=CW?oveaCGMiv^-D7JWmn5ZMQO105Mla-~UcdDd88DX$C%Iwuz>j6HVoe&w$-$Z6E7Dln`Z4H2@JHdn>MySa&n2Z zr7gL&i$;KoBT`IJ)3DaDQ*q>kc8r2F6U-u;W`!hn$6mOWC#nEgf}^@lN(-URH$5~4 zR%7nSkW(bI@&6}+O95q_*8)iHr~bs8OIxni1`| zd1Q>rg~Fgm!fD+_e1z`N>q-^y_tfR1KM5jLGY$mlr$~d8OTyH6K0jaG_@M|g_ zA$;OPWU8C8*UIGYTIKF1Pmx-J`Kk(PrN z#V?KPTBO@|mj`#eFV)1l?dF@Bq-fvmk%<8?>!fijV-_kbp}&Q(=NH$dENVl&sN& zuOS?!cQvMW_DF9Uw;v4IkU4Y-5x##yj8Sk;JcARyP}KV%hIF5=xFH1p9uv_=<3j>kZp}R)kVLH}g+}uQet(BTj0m1@$vyt>np! zW8g=)Di8Ol3YW78dGQ>xa<)fYWd<9@m@FWEzO*P*M{%BEHb!{pE7ETWIZoIm9)UFI z+(ViD3{dm+54G?$WC;b8F}Z8z`OIh~V~et_y5=6cOY5e9@FE)DxS5$Y8Z9Y1muU=y zIrM?sWa@{XRr`n)-XvM0j>hZ_u6_^TeA2aZ`Sc@z064Ci*{S_mL{F={4V!-t?2fKg zsy@d3Ws>no{jyw8g&`2`5MFbvxO8<{y5ov1vn@cjHMQITO-0Wa=uj7yLPOFvDYsqv>fHQxxnGod}f@1D`td00F<;1DdVtjY%9AhEe(j=?7 z=7op~F=en7nBoV{U}uzQ)@61DQy>3pdJO6&=Y*yNRjmv)dl~wmISBjrnb94S+h2XF z+klupa$LVJNJZt^tVH$3AuyK+dWY_%Qqv5Vu0Qq>(3O>RHR;Gv%rOM6A| z3FWSRMztHwwXVuf2{KN%Y3fkgJN1_UZ<@WkeF3{g>?)Ok91o04Q`GSEc*x|r%gTkf zuodUE-y)LN{r&E^{Vmxxd_3958@>Q{>)uZHF8G4)o6zpgh%)++!|kKjxXBmW`nC5M9@$0;aCxtV-KWrT-T01~nANU3rf(yHduZPT6eFcS|KV?2+_XQVD z5k14-V(fe(&r4df_g>vR1w5RnJ{Bs7J~6EFyX&;QX79@@=O&&6&R7RsPfTEx_NXGm zy|=Qa9)Mq}PK{pYH=u70T5m`uc!RK{7fWPE&3Y-?o{`Y9{cRoy(SEgrVg>KOCPfU_ z7D7o5*G5K+sbdPbTeLPS4eQ}hO&J)O7~-%wvpBQyG|sBD)RwlDu04q_uqdHdSv;!f zEwXE??Bj`kuw&th$QkZ^5`J6Oin#9@G3{APLPfV$%oX#>hA@WBz3f1RtAe{e-pMat z*}DQEn+Eza&2P-ytpYbc8UCKvKMbRX?PKsb!(H#Z^0>-xzXEg3B)EoGn2hY2Y3?nf zAfmiqi&9)G&f1WL9a)*`XSHN7UKJi+&pP3cvEC|IhmVvb#@W$zI)$-qi*SIyEW5dE zk7u84ef%gG?0nN>^?0K7+I9Pjc65SZAf$~5dFIB(|HO(?}C$3l0sLga@-oY9&ZST}U(}$&`M#(Mw`Nmq{ApDuyM%>JNmawg3 zZO0}PCy8w`{m(T{p*04cfjIFX`x5J!$Od`TGxt)Id^&$hC25RX=!qN4(iulHgin^@ zcJ6Pw*fZheJsW+7F5#cufR^rl#!sY#$99gYDv}I18mTsG^K+sZ_zBpF^pS(rbD$#+ z?vLNgbko3*eQ&9U*4SNAetVstBPbGd`X7E{u*bKrVSO15Bi8VxzSqTw`OLvl3Q}~0 zO?IH;FRfjPEd1H&C}996D*;J=iyrO+7ldW^Wc)H{W<47>%+_ z=f#}ChMYV~nH{L#cDy4&k5M>@fQL=~NLzZN zQ5m-R(m<8`p{K?RLbp`_s<&$uu`p9TZCxXwyN_LyD6}U8?35Lo%Rff{>Bk-ItCw!e zqV$(}kt+_DtMXUPm-r)}wo)izS5GM#lO7UK9&fAIF9+?=ccF?FZ-7fY&xbP(lF#?a z8-nf|;{nv7VCjXnKJX`MPn2P(v2c{-y%`0u)t8OZInbQ8iGrb%YD4TSZk#t3Izyy> zX+iu^d0e+tJ+T7esDhJPnQA6qIew6`p(40aPy-_eUj7((W5m#%>Mh!|bH1M3AVjiJ z=1bk}C*I>jj1dw!QkU-~SY;83mW;0GKKq&|3wD6o`Vq+#usCRAx zic(?Vf{)NWl)b#TGQ2t8;4SbD(bvBOyu)l~D)fAB4jH}$h=TuvO`?L4gTpsWK}gS0 zPr=dN$>1NNuZW((H-N+KKa_|?QT!4h{0PE6BnCrLz*C5tEu%xE=R0Ezu|X}V$ca`) zjCmzgj!&Q+>Co^yc>{3Te1CpF^5TWN|1YTTl4>%!q+($CB7^GcU+JF}B7eEt8s?St zFBaJ{TL5|uW-xW%+mw!NyqAf<3^JRnp)Hr`c(_+(eSX)O8O-0cJfWEG(?e(UrEESg z9?hh_e>U9cc4zG(ksu#~jB(zkIy!I%N&AlQVqaN_NT0LJ(Cn#flj?h@-?(j(Y)Xmzl(a5*AXBT`U5E^xIuk@aY$s-Ts(k!NE;rKoQdT$r#)}qASELGPqy)(^cbPD~^G5&4FNHw<8jok#;OK z7D1kxss?~Yy_hJ==BdzHL=2srENPthVhzb+u+~tm8q`6V=kQ@EbIVe4GCw*ynqj_l zR*iE?B9bdQ&r+e{&xVGVvUAQA z4g#MphwdAu6pJg%FSjF6!9VxS&uCuc9-eeEe{T-I2ORdFVv~^pl$wb_qc#!)WK&d* zBQ?y`E_#?a{bscCNtTj#@5;~rIrF_PPx>a zw)QG0_TwYRk??mHkPFzHLf}_svbo+!aD1fk8F84PUMkb)g!sd4NG)@2Nd3v1?nfk> zV9AeQB0^JLRfdv&R`02UPOgyZsf

3DCMM%{WRAWgjs?r?B(r=fX=6e z@%2j${^rjAX|1m;bYwUo-tz2&DonaJq!~MN+t9BQGovbCD=f+WS5tJjMwFW7F1OVX zkP|l^movh;MDM^qJ#_%9)m|aJ%IrMM>IuA$nlo^us&?d{0LW=duo`a*q8{qVL|W6& zpmy`q=P}yw^s^H!AFx6b_lh|j-sowlv<$>U&m*{P{drTsKw4@Y_3e5zZN($Gw(Q-U zwqswJYvwoqD zXfQp#g0Oc1sdY(P1JAMl%}kx9str~T(O3x}Y0q%=4MiuhU%!}yz{nRPAL7U&HusZiU&*BJ+tk zmLv0^>jUKQg_juTS+(8JIIB7fWZ*1qtGdT7Gm!C0P9x+6EqFUnzN@;aWLl#fp zBAxlLW}Hs5YD5P;QIjf&2--4V*heL`z{Ndm%rz@WbD}$q1!399* z70805>d;d3Vo_Z5;s8|2xU(CIuvy7wGTGU3=0|YB_8Lmls=K--6b)REei_f0HhP45 z9A62~kiMHwK>GD~E-|`&^i`ie264WKM{ngzsGccCzHaCf6Am7u5LAbj z9B_ZN^Ie;kpawz z`x?mmCO(O`Q_eI_uzJY)=~MexJjdB-9@p>hkB?lwKpS#-da}YG;Ha9)aRW)5b(&Sf zESf3(upBU>yT$$;jyTHn`MQd|7$ED0rj(WU!`#On!D;Q+&^ia0XW>KTgFM@aFl_nT zA<#|r>HFB4I>@c@dbgR@DH2Ud8jdhB+z1(6ww>=kXBXhGlXs)&iR*cwVOSFWG9t2~ z!AMTL6p`#s+><0wPl9P29|60-w8BTRY^Coq;$$IFZLoEk-|>M5uv)0~r7^djL)~ev zNUDs`Tc8J>V>Pi1@@PNc=%heo?zWk5X_!1g2@cC9!@Bf4Q|P-m*I=ODb#RyMC9}a~ zy@6oJw^6hR()VZ4TDfX4fbb9&cGeO8bYlzlscaB`TuY#YHVXqQ476G)9;L??b(b1z zFj2^du}I1zSy!MHO3;r$_85vxdh^>^2`l-4DS#l>myKw;KG7eF72ZIG zSR$J)+ND=7)z04y=*nQm*<@=)RxEtToS6xY^cRP{G@YICqkz>)U9!bxg3)5ARuP1E zN8K<^{lfF=k6i^UcNRJ2DNq<0oIuSs!zDtL4_#DBnHwD3LTwS6XD(S1P|}%}c8^oP z_r&u0VvWU$T+{Xwyt0jV%x(xxyZAesVNu)~nvslSn45wzKY0t}KViqiGo{ItzW6Sz z_jE^zBpK^waOdp)tZHBc7PSMeLzmuD9C2ppiM2x4#AfG(_q95VA&{+>-iL{b1pUfO z%~f+R>%5M8^;ZurjYTi*AE*4?*jEzl`Rm~v=0)W5NC87zw4rk?qypMRu$tped<;5P z6TKTht4CjP6MMmcOxKd`O&OyxR<63uF1mxO)TNH#$+fgTud+UR7h)kRbiqYQ9-DFi zHp!y!6Y3N^fG0ND5l;@!=v6am)G>_$q&z~Bz?(cVA;0aJEMr}XQ63eO;(-9Y=N04& z2nT+Qm|xF&7h`i8EIm&A>tA?4d1f4iPu~k=q3`t||NmV@{#)TyC2oHg9>Q?>AL!9uzePUj* z5C$51k7io`UR{#Nd9Yoo@JNZ2vG>mc*P1*_T(LaKbmVMZ9Oi9&Gv)>vW84viU>C{) zW~S3)FfBmEh_I<%S@48sE7Z)SxSSAoxi#5&bKP+wkN7&Nd~Pzf+0Bu>FImbJqfA+n zWLJC{d*dd3R?U!+hSkk5@l%!52B7yDZQksNRyR!vAqe<1jK`+Cv}US=DPel1`z@4j zWg6$+GEOfl2RCy4mpw0gPpz9J=~T>8B4bxK5#=}g-nAPLI{oAy-XWFB&k9BA?two~ zc+Vhps<~z~2NU;Ly#QU_r^{-kt+7=b*HRCiLN&f;9WM)l~U{ ze3zWhuZ}1l7<$z6j( z9}@^6KLv#XO|Q=c#wnQuFsKg(8q1vfhLyAygQVy@4Fr758tsgX=c5!&TwrF2q%`9w zyXt;Nj4_Q%v~#;Ce~5`N0sp*RQ>Qoz9+#I8xRaymXH{D77j_iJ__B}+-dgT)N%!8R z;2U@GZJMs`9{HLHiJ!v$FU!|IdD6W*(V}kp)YRZyd zx1Q%i;Mpg3*4M-WPPC_hwZfH0-#vf@+AA^xl^0r+fArd?j-ndYcmaL^BHpnm1uY){ z!RrwvFt#!!5i61%U2DBaV{#aJ+dpo;{?TT-O1^I$730B5sapFTE+$?&T1FQ$&nFz1 z5kPdS?3xz}*ZE;rxfP~Io`W=D!jDoo4LN?978(U2;3gSGtY?QHbf?6MIp(Ufjfe$u z7;(cl;1}&fHh#)3hSB*^@&b(-Z75}{EfN>mED%Q=4pt`;I}Ba#yXGi=;k}rejpKu%5%q<>JA8#r{Oyp zI}Q!1DRnN~LAG&`JL+6DN~Q*R)wCuA z;QlAX$NwsjfA6vXfncsBQbU9!mhKx2w0E%#N6y_qnk#Ni^cD9dCIwQLwH%a-EmDjp za4e?#K;P}WgodA)ij`(kFxBCl$6GvA6Hh)I0B@eZHEmqudGp%l@c3K#_4>BF{R3tf z8&Z^>E)Ng7V|1{zl}uMo07Vk`3fdKaC(oA-%#$SVS4_*I0Z@3T9^@}oG=?#&dz=z{ zzQb$OEflx$$}|zhVTN@wxy-M>! z%Eg}^0$OroZ4N3*PzpvRmSx1^e|R{94e*i&@KKeH(Yz>x_aew3&fG?`<4e*f%`nsx zK%Ov#CUKVm8bv^92>yQ<9u{>cYSfRIu3&lxEEmhD2@^9b7H?%e;@iWtBIJcLESG04 zWR{F}E%eP7D@**6OYfv+ak^|kqVg4yl_+wXz+}}2HXosbjoHhMlFW?Ne5PiqQJ~Z& zofQL)rzSk&`vj|3;Mht87C^mMep#e&7MC))K!bNAU;mN4antQbr#*eVIx+7_W|1C}72?URM*jB$K5^ zDzMY^54mB&fg5G^hzE~P;;tiejYUT?o=p6zJi6Mf+chO&as0tqk&uvVB;U}si8whS zH647>Cg{94>nHNIdn=XUN8@st9%<4tHls6`PI3NfH4pg0nup!uPPxUjX2SW{sUA=5 zvBFG9jk6oKD1JUh$-&sCH-PbQNd}*?OCwmd^u(0z+-|75SG363s?SSzl~RH7Q~w#H zc2iEO1RhszC_OtaL_qPvt<;Dd5D;+9C5kr@zbg`?+yGLY-o|;Xnl9&PaUk?@x7v|R z;NL6PrW!LtgH>{jKke4G`6QPE2hu41iX{yF;oGF>4t>G$s7}vUbr79h%*7ps^61V( zRcHrp>w&jVtw(Q2e`4_Ar@VaTeFm$}${aDo^i4v@`eN}sS&$ftW&Gv*H!UA2T~}4&%ggE`BCne~nr2cc(U$!+G+qP}nwr$&XC8=b`wkvijuDD{`w(X?C zn?CS@IkO+SnNvF7A!PLsG2sHyn^6_#y1lhKy>Yx} z_LdVdyTYSIIOs}q+B0p^7W5t-ib-f=;!MRW#`Biq>bbrE-RMYK*BII-NP4>}A4s#T z%+h*>GOi50YrP>tUXgbn4z{U?H2V+jkAFMze;dL_>3@NF{#Tbb`~UUGS939TaCLHY zag#T9GdBAYo&Uo0VvP+~RCTmZd%^lFGgy18$Q@A_47sFPD3b)0NOGmJ6U#)H^H*oNMZBRBG&;1BB?Z1xWYj*IV%s6gEtU^i90Fu zIDkuor=j0N*1HT79C1lMyBz}?E zQ%W=7?I7Ray@6etVg3Twz-GLZt&+-;ZvLR$fgNYU!%0OUFJ(0&W!-?ub~@hmu1YLqmUva|UP|Qo_gkEP0M5|| zVyoX6+TN;Cnd6R&muknBCBpZK%~HZRh296Zc3W^$_I+-+OHDe=@P--#gE1Bh{OwRnXj_!Nr zN+o3V1=kainh@AJz(!7K$$MDm<)Jclu zdBRODQ@W^yS#0qPjKYcmW2Z*49BV7rh2YX8V#UY%Op+>2JfTR6_~9ev+2Ii7#Fb46 zV|*z5)LfTO)hPNIYp;;W*^**mo@o=vP#fI2-91CTI3wh7J4-p*ARV(8%s;cRV4XC0 zI4$dkMI$jYZtZ=v>f+?Hms@ziZ^M>ke{W3*AuJ&l9ntlK%uIbG-RF|)-$WS`z1St^ z1G{VX^s^4tY)T{DpQOOO%#}>Q>VPTjdthjZbXMuE-tRJ`Pf&%EOra ziqwhlmW)58odhX6CL|R(fNvXO60Qomj0DNn5cn7gh}nB!eLFMma`8MS7GDe66NWXR zs(_YRNt~>%Gg3anN*8@2LdzF@MvP8Ld`i?VSAP(;PezUaVK6ujAW|*5W5tE@f~RZJ zW99W(i=%3C3fiyZbfcj4a+)WI!hGOsS8j6l>%O=89ulTSegYif2fV0~Q+%d76hBsp z1Yy2LtV&jFqGfczkQ0oxUN;xRE*aLQ7(wYthXV51)UJP|UkIGQAe=xxdfqOIKp$OK zH(uAE`FbBZU$;765BrDBL#tq-i?3ghof^q5h*jkwozjo5Qu zEjbIvCNReA2@vUa5=ET_XA^C;IFj=lAerjNC++iwLMIx;Gcbl9fRZ2!z9o`w?vror z&!yi>Z{EjH{^8%+XYCr<*xX<18d06ncns?rvCL6F`uK|){H0}N)dTh^>gn0kJ?_&t~h^#w!rK6 zs=&dGiTE-07h*AZ>#B#NLsv43)lwMq%UliZI*ImLfO(o83rUnYN-twvQHs0~^~Wmt z()7oPypi>@Lbti+uvd%7@J_q<@-_X$)uFMxyvmZWSy|t2 zuFe{~_aa3P8sIOo1~hZCDu(MiJm^`QaZKge78j_V=@Eu@+3*|{d|MQ|KZU(el0|1Pj2}JMK1^`>y^up+Ua=dWVLANHoxE00w3)HiS+4>w<7=A7v(ZhI9!u zZ_dydX!B0k}}h^9!6MQQ?M#1R#f zUX7n2J$lqx$y%K7Pnc1D?5A2G&E+9jqCH>?GdW|0!79p<0S_I&bTqCcHWosJmh2v! zt<*8q-aGXMnv+L}rCT-*sOw{Y|oE|D4sA+b74 zBK1jVfyyjx62CjoTIAY+r@%CwQZ|)CB8-xOLMPn`y|T_j%ssKa%!A|CC?-j1&De#{ z81?omb7SJ@G&q?KNn{x$VXMp6{9@TILMeBOTmaKQfnCP=JEM@)n|B?t_CkO>$*)g# zB@&0&lTY{|SMubClE$je84#bRe_Iz;!;W|pddf<^*^Kwc*4A`z^KuWd%j&s?@G9!TV)d$^r{OulL+Y`` z2`m+5aS$cG!C#cdPf^N{@1tYNKi-4D;aC!q?mbEC{XxZdoT-~z^9IxiCd$AYFs@3~ zB>BmaPsEq%s)L6^f`_X*53Yl(yLO_&d9HU}-@?#Fya=wMH?sCWXlB!&J zA-mxr^Zkq|dd+XyeUp<@huBAZKe}U@HR3u12i=id7{Y%3#pU+rg^T((3$e{p&kK`* z5-}nm`Xu&=Rl}ZdKEzqxldkP$>mBr;rG#acr?RInu_+rC2#DohL$kkMm;9&NR;}@= ziYkfrEh_{sS9uynrO8D-gp7z-LsvJx0tyGNShOtm0&&tsyN@M}-{)89t>8o0BWBJF z_G|k$wV_{U2VAkR(D>hmEuIei1+HGR{NAoMAc4wyGodNo?7BCETP#H&F0-6KQs-}k z@W)QIn78wRh42?97gO}Kt$acmKlY8qr9`+#)L;!+H6&ipdVU% z!g{mU4+VgUl**ucI>^Ib1Vl&dHXd})+hi~7HLP8QX`9y^+6!(2JAVX`=K1K=!#`Sf z`*ADhP&x&_%1~rX=e{X)-)BUNSwxLM-1%SbnigoCN*KC|(ie?iwBR+l4ycQBl6hyf z2319{UviDw*2{Ene?SyY@PklVH)tOT=$H}&J(zgH(gwltEzaazokqppG_d%v9*gZu zdgX0XF!?ZD__pWFPukzx!m125tb>~=}>w|-hx1W%vP~&zQOzqd>WmU3HvGkVwxa7@Xx?qEoOB^ z`K3U4P(oU!+0gP2@YR~a2)B$SSDKjjvP^i+YrE!Nb;p=uxs$3}tRXAy<=8XV868uX zHG7=v79eo0Q130o^20B$jvPQy;s6Tk`#3yZRwT9cPObL*X>|J?o!{$Rp!vMzfo&wZQ~zr7{h{IBi%^3-t>(|TC}#{T%bon z7u(2K@I9==TuJY=K^-Wf79{(BhR+)V`izs3n290+O|RW&$^P&L(G_QQOt0-fs3l=; zn?Su0{BD3rl9k{tO(>*V<1BYb+s3^(Jm|~d>33#bus;{~qu&2%rF)ZgMSOQ|_ej70 zF`&sY=)`*%itTOO`%4Qvdn{aEn&R3+UN^IUZJxPm{m+($Q>F;5GREl0?uO!s2(}=1 zvYc&fM*JQ_mrHquUm17RR8yq-URw>IZNjA8614+Ubn{X@V7++MHIs7^Fh;6R87b^VJK<*4f_ z;!2`@40?1t)(h~%R1V5@&LdXbI%Sv6Dje;L)K0}u zMG0P}(=^_}TaAl|isPi-m6a!>A*06&w7j`pZ#=nOC&yntIe!{}wuFP@58af)6lg%b( z{ha>TY}Q+&)%IK>xD}BSq$HFg64g!66j6c3-!eTeH{~bL84K9aB~U<*0)~bj4B6w} zAroDi7Vg>cg^R%cEft(|7dwPmdB9FkwQO4>*Uh+m$t5O$&wI|+@D`-+ zF6v!I{-K{zUqn_T=JiVjF=0#dYJ|UvOoUZ`*VsVa#mTCrY4KQ0XZd2%*+l%FQZ&6i zNRu~3WPoR=Zz(ADgw*n!g=v9!5}an@rRSb zRAl)0J?f?sj_Gs?JwgT5@!3-zW)rrOapb|~?%WwZ37$}8Wct+1dAG#A*rk-QZ2x=Z7BpJ$l{1f zhq=XLGbY9bpO{U3<$OYsn@=om4HKI0ebBw-IiRpuZKpk_DO7&uUz~76zv^0Pj^XgI z(khuRYQ zwl4V-%=LC~=IK|!uT7Bu+R}f^b3m2caJ{~+qNG58fN1|0X5emb@BR1kz5k>HG0I~y zpv-9eC->sjaf>e%aWRpI)8(+5ktEe3#ckytc?zPqInri{Fagm4-)q3Y1-pMcQwMul zu7n~3_Fjg4e4 ziZWUI&N2lO8*0jgsORC5LTTF`*Ph+tUOt52N)6~MLI!C#R1<=E${QCr(enDjX;)@( zBv*_n>1@6#^;n5)MC!P-uX zsn*(Lte-=aB3+-A!_UMoK`y(Z0OebbrELR)6rrLEy}*L;P1Fl@Qmv~igR zDzyzLL}8_5aQ<|X%(~)6{x!k#biQ0`sr@Bf3uT{sHk*hnoYC+W4S7dRpTD8fA=OAw ziYiM6{z(@U1AbUO^U`5;EV+I)!2y>`W*w>A2Dv)3mOuZ4oinNEUGb+urs7+u>ekYz zMW&@X7X;&dqPQ>y;G6?FFsKhn?Pm0TtOy|&xAc1JB~8Ha zCOBOaO+vtzs2Rj1LwskR|C_$>jq()I@!t#^>wkt#U$&14jo;aQ z<8<0S;2Q~-Ghr?DX%b0vIa7M1++(i0b5vtQW6t)}tuPqj8}J9!P4lBL#CHfER~OgG ziB{{bZvwx8@Qf*=&=%>jv{>tmO_GT5ql*lA%$BdL9GADJ)(Ij2wCIk@tL{xUda?RX zO|*W7HtrI^uH|M5Lxo}I)vrzQR zfy7H1GG6pyuU$XJg%d$#hs;=5r}dH9zQV?_V4*vdn5q%yg2gSrhFo%)SbbBT`toQ`zXw8t)Z7H+X=J`Y=+gOX(9nfZFLs|n za4%fBOL3UK4;Dlxa_UrA3Dm^2C5h-aV%g)6MdQJW#m$}y!gTMyg^kTQkat{pTzt8} zJIDgm8*LY+)LEW`x+5-x_FGmRlbAT2ikva^E+Z-_T}veUxa9v=+>Rs+=D&Z%t?S>| zZfySlZa?vzM-Zh( zLgvSsRtGom#pY#_$QNr!l5wTVSWv<>tqMg}V|;h5;0-n|AWmr+-D{R|@a82ktH>86 zX!0VNXhY-~L;=NmgDns*=T%=mwmPT@R5*uYsNUC+C93sof6R!)H zReU;o%zuhPMf z5_&Y7R?=N`r1;b|40K0vl;C(F8UZvZFXjw~u=?3aw)O^?PZ>fu2~__lN$e>)+A8u; zoUBII!*S0p*9_KD!EgRw)l0yPyfd+3)MPhQ5ku7db@3tyQPO|APBK?V~GD%y_D|O_h&r%F4@i9Y>xMdDUj7Ab-4J?#0Qt2{-IMl+m)Lpj9_hGM| zeI5R+mZONQcAdeLS2vLwzv{I9IcE?xQBMIERqRPH8T*+M(mo-pK6{UuJW<)c?mjM{ zMj>YbXC&n)MZLa2^Ta2dS+230t5u|DDyf!UV$ES;bPWqfs%h3ct>I#pkv_zNY}Iq) zCV;Dq7D{2~xlq)s!qC3U_OdB~3&{UHCs^}I-6IF;L9Hg0UtLg-7>W>aHzt;(!&@(| zuPS1IGKIwxr6BHd(ca&(+R*M6QSt;x(@TnL(LPS0gc5Q&w%sSCM zalbU(Gmv67wqBG* zQE6c?OV^>yeQQ0pb+zDDU$=_4g!ZMtU%-*1l(UKAqT&`$o0t=_BD?>NO~org0+J_2 zj7(KHQJsNyG4}qg4O%97Yd&;LelFWx5l1$?BxrV))^wPor#hMM!bR7y41JA>HqB?B zFND=o?K=h^b3bbdKZkC>Bt2XaTInz!PF^dXg!Lqa^?v=Pfhud_>Pc2FJ8PiSYSB#A z4|ab3J8U~2k4vyR#m2aN+V0t+UWB$2&*b2AR732~nvLA&OlP>Bse zSkuXq>vG`jO{->ZnM|-@>8D(yOc|!9`e59+UraAy+rESAi{x$n^LA>Wv2jsPH+z%YE|xIpv-6`}5D9B2b!< zM**&?^Zj;3u&r|lb56Hyy7abtNB*`C2#m!63uW3LE{s~LtxYv2Q5#ECtzKOhZH?Lj zBlVt4p>f8H&FP}<>-s$mRvqyTMVDnY>xR-j`cYNOy z6Fl$`^o`?<>GLdQC?(R}Gy7CKRwayT*@cL7s5Cnh1C6SVQm?Fibq3v7R+>WN+zNM> z+(^JxOK#t!A2GZ;jL~dIgIh!O_dclQmp$b30$!*E%haZ;1|Z zQX3?vHWp+I?qael(P2uu3cLxKraHbxy2O?4G>-E+_}sBe3m{!Uks*ke=%XF4vqd!@ zYU+!4d|X}bn^d>QPWo_Px}AL)exE;=sJgqGGE?bzy7Ds0{4AObu1@=2rUCbRe_#FH zSVf$1vToSER%C$3fo55$E+RWvm!`Rvo@u(CpJsEAm&U_Mcc8tcdOJ4q$w_nI)kw=S z?xZVH{}=|VASgJJL@H&cIh;10PsdcH-aP@_Ckn77myo4iI4Yx1B)LYc_G5yfXl;S~ zH?2Y|UA+AfplReJX?3|uqJ(Jex*+WjzAN;klB9=IO_6f|(|C|5e^&LZtqS8@ zk%&_84|kSvLU0Y*BUo*Wjhq;1eA9u1V_!lZccHMa~5lO_v@1Oo&iL(J+ozY02fzQj&j!34zN$=y^kyP|d z=YjqB2Grp<_q=eXgaYt$i!U-m$ASdRC4+o{YkzuZL5}_chI1i5LXd|7R}P>BwtZvn zrA3WH!WI-_3t51L42PBzS4sH&<1J)GZJ#;0+=|905CQnlaTWhA$SNsWv+B&tC6Eo4 zASpL`VJmC-GH{y+)+>52uZ}$6I(|~J768wujQA>U0e#Mz9+%Kh33s6ic*brL7;^U$ z-uPMe4#e#j(f*8M?uT0B^}XjPz8eXU@d9Nq0IOHXk}t8U!GA#|Bh`RN?9=jHIr?Tu zSkS>34>kPys}ZR!V6C7AQ#b3mCNu($;*4St^ba#dmt#Sw)$}e(loqH07$6+_(*^90 zC@RamP}e-f?T~3avWnIv!=B8wU+6*FEK|*{pOAKJA`$-^>_4N9Lc&}y##gJr209QB z^}m+D{{0Bo)OW(wME_X)Mq9n~>5#aZ-&mfs?1IB>jRK+|ioQn7k$Ryc6R9twC2Q-e zYQ40PhlDEg$GQ(G0G9rny$Eb52tAwiX3&T(WdLbQNnwL%i=hXEnZ^uhbxTUzCVlM~9{X0^u)Yo(Z;^+`291*^dc2O+TF2U`c zFqD9hBkzacaKf8{G=!qO9!GgT$DGsy`PI6iFFG;K{r=^lDbDUBwy?;tuRrhBHUH#d6` zNp(FvBl%(?-KjIGqt@gVl^$%yMFdJ0T0Hg@sp7i@#hIl|$uvf~@hQfn2}#5^ZOQpl zHm)FTW74{*7RR79WGC}eHrTrCkca5gXz8AJ6frP3LZ5q9bEg{V$C4{4t zw%Ih1cZf!byD=a2J&ar8E;4tr%wh!#e{3YaC7W4AW)QY65JGhVr@8HA-30RrC-fn{ z#P-n`cwvK|5^Nq0%i1O1T5WEDnUc3KQ`($D)KNzg`z6H0=Iy)_%`zp~K0OKq!R~m9 zC_{F+Bx#ojf8qq$Jjlpl6hAS#U`jk}vS=PW>z$J2u|uNq7o;&F&&s)pfF6j5Or*Q_ zk^^E1>RpJ}+@YFpH?MlaW$x&d`Z5jGqlO;=cUO$r?eVJUivHx~iz#)&W!;1!8rtumJb|w|sTdYgTV#%A7crZGJ4{rihc{zv&_}X=4_eCn>D1J}^75GlL z-7Zt2)#^yuiLd;GMjYKL&4sl56Vo@usp-AIonwxJtJ=UU+5Nr!OqL;<{PWZL6>TC^ z8<$PC?AAZvLl)iE5LG$sa+@s1nUC=e%k^z9FE3KBN;G>5l+?ytWMb69!SuGO-#7{B zu*00vqMhZYCpDEA+a6(n>_2L2Q~~B_V=D9--_!?@;gobBjmsJ$lj@cZD9-d#n8S&I zc{xXGwRE(J#IZ@een$QhgsnMy7#V{MUuOqb| zwkt~pwCLVdWySeEqq!fLrCIg>HnQW07w+%G8>9ewBArdpyzvTSW}e^H2vFp)-y#YN zRJ5ah^lEQk5y*^Y_GaB=sM?HL5IU%}aQc>^YY<7=Qwh;J_3bMR9?-g?HU$7m2-vRTSnX{gI^$HIfW6m&SqRU>p$8|xZU#;<5F{Y2bD;rl82 zXBk?6s+dUbspOJfQv#;5rQ!GffIqpTkHR*`3+R&KH(GKxr?>)j$Ta`VIHweMOB-&sUoNoDmNaGa z*|9TiFn#waKB2UnrzZoQ-s1zdl-f3aY@4F|_KadLeK2~_bcHna!Kc(Z^K|*Ci|!p5 zX-Ea`UN+dXD5s>1!UiIu4mL&7S|W2WnWbOlNe8-`(}S+BEF1H_`~#KCYN1kBC>%-4 z7$1IjC_Ea|(uBVTN!v*@Z(?aWf_2vqc)h*PT;um>mnBes8UB{Dqvrz*Q46T2lXa2q za}M|6L~5`OnA3`zQVy7Mikk`!>>+vk;=!f|MQ|?{y^}3mmpUO}LLFI)w1rb16W)RI zeFq~JhdQC-AePoqzb6ktZAsHW!}bGGWg{$IR@f6TA=&=kB8Vv{wqdE%VIXlq?g^W2 zkn?)0jj1yU?ZpHl5R#PZk|;7ovFf`e(FfC3qVtQ`0G$x7Z~nQ$x|w0r?=t3pveJl*C>tx_{(C>Q+59GudUf<-rI|({WU@m zzdbQ?9>nCj>7-xNT6Q~eY#$MHZy9uN9}aE>0WIWA6qfavq_VW#Iqh|u>= zfCK%H6cRhgyFRAPG{ro`^YB92$+Y`ygHyVhRt3XsYqUPNT*MZP@jT2dPF0(F6%XBR zA;Y5fw<_MqIvnUqYM?Vyf<@8imN0bEJS`eLZ}fI(1|7Jip!n%l2!;+A%)!2Au#iI} zfz~!y-3=tw@G@;Zm+yMZAp|syd83@9G;8&=()NXp74C{>o2Or;H7fvr_MfJoXw`^h zsW&yWNE%M@FTL$Qg}ORmj#We%9mEN%1uj$q!19W{8L=xRE|<4Sf8`*~kY_>GxvZsr*P z(V5c3fJvwx_oheOuKjj_fo6%G`$xmW9V^$ z)Hd^P&3?SO1Ns3)E=j}&T4hXFx~JJ_K4Y_}lJvi5-c$|Ttr8h&vv|=jWj7>6Nz2A4 zL`x`w-yNR@dcVD~PAE8(Ut3lv*tX@tgJT#T>2|1BLc)=T-je^`P3`NE zO~h?Rwa*byrr*M7SNX~MJZC0NYnC9w1B)sRC^nq6kAmHF3C{Ba{1L3_n_g1z)$2s_ zFp`^Nr957tF7_y`h_ZnZP0yb!5S*LaC0)ixXEuXI-EqR$2w9neJ%&D^zAs=TQ4FCB zbdFoLx0*dnJ+?sjb9_i}a%x@k968RqYZ=Fc5Q!MxM1RKwr3s~3Zb+K+fbfd<($v|* z6exU}g!Qs%3_!boBF7u?D9IqjL#UPz_;2F*0m_%}&R1MvzGBSrzs1+d+Qr(z^6wWQ zD(0r%rgr}{g9-+O87|cD4HqB$2#7(irFjxttS1!8Cs?OXu6PciI?`Yw6!KchWS|N5 zPO6tQj&W+~9hhi{=s!kK@p?W+rmZp=H?}OU+UlNjfnA)ZF34j)&J}h@^*R}+I1_I! z`O$0;3+FMpb|KZLR+uSU(}udJA5=XDW$uF^LD1T+OvS(jffB#+AxW%?*gZ;WQ1;#n zI&j3!Lv$@fi@4D@?m6V6S~7azu--dsKf(VqgAUhIw~_dgGeIDLfE51yMfkhBgSgk% z8X14x8av2YySkY>{AYJkb+Z27lrq59$x`zgIt@%DO10Jx!d3eq-UO>iF{#1`b}#Fa zF)e_d=UD*Oo3;Vf4D4Q$;HZ~Hdr$xDynM%3U(#W=gNH6FGqbStf; zgdquJZ8i4!b*h>nAXyus?Xi{8@JIQ#yTZ?sTU;bJ8)TG30(&P_H zKf|_3T4Vo4yB+?5b^McIqcr2nOtw~!{`ixYpIye2OHOciP z8+=|?8LGb9T(2Qf-tcZdA`8uzAg7QaBRnoMUXCY!;ruXYarKgdlbTE=1LyHtj)!_k z2f64PPE5;w0l>nFH(b=hCG}^wd=@Y0o%yGlmav#?PLrB3#UoofoDoulZhRxpg_dNN zF{?L&Xg(z*0-{J<<{yt+64YU)_-5{vsGJq`_2RkvTwI^j<+Rg_D!G{jH5xdq784;! zcl`EQ2}ug8{a0P;fD{Yr9C^SV~h}u_5*1MXlt)#$Lyn zm&Qa;ZKH6PBPl0@<=yS}UJBNzII)85y?{lkyf?`+R%5MECSl<$W=d*dD&a${67A4> zTp5l@EJA`b^Q%(~Gm7oseq;Ya=O#`2 zFUt|M00yws5O@?iHMnYx8Gt;IQDu{eb-4&UL{%_Jvrd{BeAI};x?P!?uC3vR*f&wZ zMpkz0Gr7{d*FqkC%LY2)bBq(Yu${|J%PH?%*D2gF!*8D-kb%%|SUp)9BB zuQ+n%ob&SR`f4#EFQ==3)CS6@<^6G$5w=-u*YcgQeYP1f8Jm@!nPZ*iz)2t8*77+Z1=(_^NyV)FOyNOk` zsC|=GnL56dK#DU=skvGmwJ8{N&=}-d&Q7=UFo7TS z9&)ppUua`A9fld-ASyQIH9H}ux!?Cv0fpug`9Zqe(R9;fMJH_uK$>^gDY`+|04w&^ zPDb}mPVJ;tYTLl1)U&}Lq8AC1M{T>ZjP4v={PNTa{@D#Ez@-6}p-Mz& z%sbo?zhYyv1zMO+b=M6sa9X(HYn7*UyHl5p7jK7P|8JI+*mSV@V z!s{-?M6!aup!)KhDTYZ)ziCoOzBv`#i#x2Y8k{DEUudqNI z;GRrj6*G-0Rge@tHLc)Kwu9dR%Y|gAAC5S4c`PHtrzWXOE0#$#h#wvV$b{`_clC-B z!aDDOeZ;~CgN&weZ=_{J)`>w}n7P5riM++-v5n6yjPhheHc%JCPrzyOq=FpA6T=`$ z#Pa46L!%}!e%XUJiVK)8KH7L~+!NC4E;<_)owayR5?fUNUPX2m&cNha0m#ima&I2N zL8xw=HT;n!ubf@#91`pb!YY4H$^Vj6bO)RuK4O!3pZ-BQYd>vROj{ceZJlgZ5xa?F z@~z+sGp=gx#=jYw?=WcSnwf#;%Ctp{7b5TBlHb0qmb>BGLQk-(r4Eg|YbqGxM+=Gk}ztqF1wP z56F%wW%_u>Gq4g;p1`yWXap#UZ_$`MjD9(_m`nIiAPTPJ+)z6B~KbEZ&au3JM_X00OA%*oi zD7WvI3lbD&2lYM_*_D<9)epM^8y%9D(5?%MIdKUGKjd(&zAeg^N^qNSmX*nP6n2))SLx&)%#IVMM!70TiAksPUUGFm~qCA*X z_=n8>gU8l?EDl^PO7GXg?0=E3|7CHc&Fsuo-HhGbUH@8~P-SD=FGiK!Sv0b7U}s}f z*<^!jyG90%*y9)zyov_eBZ5K_kT_v3-K=*xYGL}BTA-qI4f$Ny|0|0W9c|xetD`k1 z<5&CF)#EGhHuR~0jMp}CSPqGYzM6k9qWtfp-GuU21_U{Nx`IKa(`JM0#&%b~WjdeCvp?tQY>w?0A1ZsDd}qHWYkB8vy7yX*%G@UZ zKoWdS$_V4HHe;bM&!s}n`Tzv%fb^}r$l$gTa0@8UH4|GwvR#_Xg`W~HQZD{QJ)?VAXKn{&k6m4D zu1jmdR>93h0q?9HzU4JiXRTiwhm_!6AM>p zwne*=nF#qKFz07uzSi1k%AYTX=t^WNOB)wtsDl7{xIK5MiXM!#Yyh zvYl;AuRvH-#t)zfXiWKTFGqlfcKQV~Z#&sXCZu3@9Lc2*aZgm(Wle=zY1{9(JVT&M zXL}|lESzYl2vhf6=#7#4l=vDEeID=eP9z6RT*k`CN}kDIyN@c99)3pOX?sPfSjVo; zn@ythR@#)%gfH?^+DnxV90^-381bG$X0Qnv)QYoxW0gsEO5|&E*&C+La#|vp1f<7w zQkE|4lRghA!9+^S9qxD^p!n}`=Y?!>$F+bH-K+g181-WU4+ zy-3pbPIiASRI!GmCYn0d=f;LHBtRRCESyb9R1d>gc{q|>McNj5#%zEHK5Pf3uz_`L zoG#c}W2<))^Cv$gy03DM!F|D%($Rg5VgmouWG!8A?!sesr|T!*w}7d<*Q`H#TX_Kx zPH0;;UZ8{vGuL$h|6%L&Ztc2kw7#!dvRsT}4BdJz@n)U%-h3AJ+aCWi8$aKEzKg93$ z;Y|mImjMmQBV(Oad`)kc!hrjtGkWvr0%L|dxgMA3=o1W>nxql>RX-l4;6%Z$@rkT; zRGq?9Q*cwb_wv&PA4{xcXU_zt%Zx@m0d*QWCq_rQWxYUG`Mz9cKG~{5dGSC~t4tTr zvzN>Rx&!!6iyEay?ECCJC&zUTzr3Vjs|BVuPpm?(U4xN)W^y7Hmv!}oifmM;DOz;U zE-#;j{ef;5e3f!e+1#NVB1d$oAbBN~-hNg#uGs;$Ck;B)AF3*-z4fPIyo^$hBjAG9 zaIC|v2^`*$+Q6c+;bmxA9u$&+(cS2wRvWp^O}iGS3g;xB>tq-AJgL=CeNWV8hP!EV zG&iy<=bwQnI9Ov9S|&h!=dx_N5ARND92R!&N^p|cu^rJg-#IQ)`I7SL#1?fuhvCAP z-8(`Z@FAU)#$VLw&T5@RB`Mq7IwBCzIo+43j<6;8@){S`gh^di9itFt62~ zc|elYs*YPi)$wgxCgLlak9oS#WIUZThcvG%=91ZGofWMDVjQs~4Bkmt*c?KA19l)d z%WvX-XYvE~S+{O#g1IbT`S^qk2{y@hI%);O?is*g)lR7-t4Df13< z+Qm@D0D=RWpx7jx0ces8&6c!u4jL^tikU%8y}yF7QF`zK0+`HWGt*JOq*r^bvW2fv z-?EGltVoW(OX9oDp0mam?nL^C_mH#;qYS2A!(AUqV^wz}XKi1STYd_e7PgEuDiBdH za7g19EHG7@+|SSYBO=QRO1uGAn;a)3iWzZ5^|770)DVXXKxyy>e+>6J?#uciN0)H^ z;1iR7ktFL99fFugJ((OTLl@PDZG=BQQz}`^`^d2%YmVIyC$YR3Wkehv3=r{zk}f+g zW<hortv zuck~sLm23Odnpw{hL{&V0b+JS0lz&}3euMmO6a{CPGg)Y8kqw=?^#P05y#-`C$})h zNhEth*d))o5)Vc2l(3?t(BkiX^H)30M{+&$7|+Q{UlSSsxh08jyeahhGWR<-v_pi! z0eN3NxLG3JQPuoX3I0+;$l=Fn2O}|J%HO_E=Yez{gGi2G?oT67w_#{PdTuUlSIovie@oOQ`PuI5XsLIxkAu zpE}7e7WV54W<~!Ui2b`FC~D{EYW~loa7qi2{W`gGU1!3D8Xoc+3E7We(B1Jkk#U|9sUl@n=ar0&oIi(ILzh#A zTV0@5y9vPfo-U2a@Mg}Q2v4;Yg(g2DGh^Fg+40ZdGOV^k4j@FUNxydugsl@mjk)?c ztjyjOIW{5c&BS=I!RUe-|3>b?@V4jeLcL(B(zNibyksrXy%l1jbHg({kLk#TGPur_ zrHsP2cJ<7Ij9sM+vlfpvE~o9CP6!xF$v@^YCi6pPO1IB58#^$j8a^Evmm4kJQPu$G zMgxwUkGTibgolfG`vkjqqQ!t9AEkJIJr%8G=Gn3lP~VtqvcuziCNutpy(qmrpy{rK zWxq{f>u68Sz#ZWa@-My1r|P3lQDezDZ?chh>chtj_2qmc3C&m#-E*Fbr2%LWT$&DO z)H@2eaq5ZL87AwtD^|4exRq$65nU)IWY6V7LlP2~~CsZ-4L(rWOQfCz77u&oto)}`8`&TpTSy!V! z9X7=8a@1B8g>F6x9$AfZQaDXt4gXE1MFO)5eIV7?^}w6Nij~-B5HMQ7KD=qla!9Mt zBKeQdI`6czaDq-duq@w#qL;V%f&IgKB2B}<RvBZqMD8LZ7_AxU@$-u7I#+{S62sg+I4I(ZB8sIe>AOrZay7Okg+Dg z`T4E4j&XnfxoSPi^(ElD83aWHNvD)ZGor4!t7|oQbhtLg7O38lAdK9gNX*z-~OZawaS5hQkoncsy2B>lBQhTgpJwr=*?pL5lGL~8sE zITUT4g8+m!UNS%+KCWDL61cw6TI12sYK<9n>FCg$k76bkmFQ7fdgxdU94Nn<&+B{j zFC^@AT^$YM0EE!dIBfJvcnwX^Y;Eq9bsJ@}{nfc`9IOV6O0yR3p)p=0HZb#u2KH7l zV(q8V0;Kcyiw`=+g$!sX7cn7csh<72%A{zHv0NReX$!QhGZz|CxBQqy#g%~)p^$fm z*|MFj4MTQONq$vzYjF0ggpOc@v<%4wq4C>bK16T+qP}nwr$&XPusTLJ<~n?^*Q&x^PPA9IQK8L}xqJjlohUna;40&0BrpsTGa=J61q>3dJV}Vr+Xsys)ZEPWC+-Nv~Rc z=A(mq_xILmR)V?}ldj_2YzJjGILcg;Q?B(%St;hEg`O4(GJHlAjfWmJQ~s&AK3>%w z242qD;sTLZni0>C8wHO^F9V7z%7i8HiVWkl^=aygF*Kpfr@9#R3O(Twd{PbnLIcJn z^M~-5+{!yYbK#5Pr4fk z{Sju4))TZfXB2Gu*A$Oj3VIR5w%UqbwF1~Ehn(()>7~`Xdvc?u{D5_u?Zff726gcS z1H5t^%}83_sM7ISr@>AN;j?)zNw@3$P;$Wob*$tOwD6!-HLd6_eTe2m<3^)$ zH8ul1RIj1Vp9|_NwN_RWiOTf^@X;2MO z4lYiPOAOGG3MO@<;V#NGtE;0XfVo8i%-?63DK*v_w{i02)hyAnnw#DB-i~b@I;WNG zVS_N%U3|~ZhRu3)H1NCq$|&r~UnPNI&3?%Oo- zX|M#dW79lBojfjhQ-PIgbtOAJZ_LXWZWum~1S*s14LJwS+-j3YB1+gl>ImWdvIs#) z#fW9cgywqn+>Kd^*bH4I5)D@e_*<)#&j!VUnizUqM`e+QoT?C!)EnmYG*MG93S>;^ z57oB{AtmY%`AEl&RDu1LvE1e-?W8u|fH4}aHM+EvFs?7vXoEa(v`yC8VfkTvdxbj+ zn4mF*=5vZ}u^)=tAg(Pk0uE>+V)BCla_}Ew`2>pb*Vg2NQ?6tdDn`tf?70Jcz%)l& zo9%_0qV>QygkLhJFxHMx);|Kph`UmYyLDABB9mS= z(u8B8IyfK(DM%M)bQjzgEc}oDHQVC5|WtFGIr*GAnnpJy@}o zeT)^&6HXSGVK__LMM79}H49=H2&1>T+ZA5zl7RC1POCPG89ma{5b${)2UMO!9Qd$R7)5}Jady309 zz2btM=Y`&N5BX}es2w71A1HTN=_8nPCa$!&&n35Ffu zvr9c!L}g-UlfLDbR-+pMnRLZdriz!H0Dxl_IbELFpA#|EH0KmcW$rOsPu>-?(!S22 zrX=qX;pK{AEkY-D%ZvS#6w?=%L)(>0yAnNZf;H`4g0^J9cY0>vh$N$XJZw)HJIOLd zA>UJiY?(I6GEJgoDp=15A<1k)_|Oj0CYKM$x2ak_d!x+Wu(NP+ddIudEFjEbw|`)e zR0K5=f2=$IY)oXV!F(~eu&CWVLQPRun!pp%J_6Db^@>(u`**W91_|h$*Fsp@n`HO& zqi+b{jrs8yzD__Up*Ejp|R@yZsN)~2y$J>}EwElVsYAJ?yGt0Gw+6O|Q%=J1k zH#2>tFdr96I3a}APYnY}8b?x{#61~#Z{STLW%&~;Q?m-9WF>BLN%A#;?BbX0toF zuJ&>HJf#Eb4%0#0@vJ`{U=6sCPp>_p;l_m{LLLKK`E-4ajsd6)&htMy87SVM26)zL z=jZzpi$}v*{D52e3E^0tEKO#dIu<<2z_B;p12F7Y89XpzMSY4_lA;Z;k^cq{HuwfWc5#{D#6kdq(*IB|+X=dun*NIkqT%6<_Kfb=w>_h4Eej(u>j5cvXF~A=W z^z+C3#0?1`Jx9aLjc`rKGf%VRY5v#L%7&^nG7T8;`L%)-T}xVPt3F*^uI6Utn{S^- z6IaGIbCT~g!DEiw$IdNY-{H(jK-z6DBoIdZ1YtUaA7VdWpgbl7;)fG1H{_htN#Ed` zdqCf37T;e~|1XHW5&Gz$w7@&Yv8Xrwegc~4AECYeh$qlYbdUS)k2S+y*CSEo+<3Y$ z^nO^S{ZiT({7p1i`UOC+kXV{95|x!>CDFF@=2yRB{*qCWjzAuj78gp$IPhdFwUo;8 zq+VYp#b8Lekp|sgU$RNL#Y;a%eZrm<$gVyl_OYAx*6<8jn-Wu|N* z3CEa9u|Zp71csNIlZieZOG$5Uw#3oExV-c?a#mIpN^Hp1mx}{=DPbF(VQ8E#>QbG1n zn4->d^qZBJbZV_0_R0}@61cG@o>6^Pge&J1sJ@-mu#6E*m6hL2pz0tpY>A1L+Qe-q zjz>e)hM83G?-$kCv*Qun=C~_Na_?E_G85G(B1403jG&U(1#|HYtUEi_1VIw*>``Gj z%D-%qcaeH0LmSm(_fl1V;h0b^)KstwaEum^G);UYv!ixmDE#`l9=*exp2&==G^th_ zQybS=NvClRphvTj+ldtUW5O7AIilmb$n3as#+69^qogb)y3h(vQkk^kE+jP>4wvAd zUPQ4eeH29q1D!gesxi2#jHE{{Rf$_vEox-7jH>*a?Z^ivj?{Qu0y?Sk+2{il<%Cl# z6lFcRG99OiB*rcrU9lE>DbkYw~{sr63hYecIK`v;$6#0^_7Wj)77WBQbh3 zoO3b7?z`dIV`}!8;^Ucm^!P(1ye{WnGk5Trw%=!(X&DYBVI_yIQK__a`M+GsJB`T- zhpI6cE{gDM)l|alXHqyZWY*|3nX`iRRL-=sa?xvmJXb?;pJor1#5T7&SIM@VN_5Fe zW}s=lO4mM{qNde3S$eyfLTN~CJTTDbprxC(*P~@z8fBJg1-9u!dqnZAr^xQla`ewD z5?6MEIo;O^Wm)WWBJVsVr2_|0HYHHUJ|j;n*V@8u9JSfva)?+5uY(*oM(R?X{Y9{7 zVqE|Fg$kM~4`x&qvcmJWhqXa!YU^?oUc1gT@_4ShMw|N@?~WPvp*(gD#Q#Ya;9jLD zmV}#|*yLy=sEZ};`a1^Jpk@Nm8{FyT7PVsecEQv^4O*Q%kZs^wu856@JJEq+?%#nz zlOrX=F`0@30%T&+hJvL@hbUg0{mz|N$D{Iy^`K(U|hbiJz5 zKeCv53{cXQe9YFy6lq@XQ;$IH(8Ow4Kl$5HF&w(^V5-ryMX-HDME%0R;doqLf!jrX z{Qjg4m~onpKCxgb8aQc}+#ORx!%u(7e2_y7$_tq(@*J$fxmjou)xME`+t;v+IB^O> zE)N30m7d1b%~f7N=qxZcCS1Z0%}-2%RC5nf>Dz$O21pUtgV9FyOQ)j;c&3l&o9+mf zRZe~;ZWun9B{il_Xbjj+tTPTbMn zC&(PdNx+@NB6-FoN^9V0-Lw(g?ma+FpVao6CTH)n#W4MGr^Arq3kofKrSrv{6s^kb zhq75%<#0XrecjzJVDh*qsNMw>@O?fI@ko5k2xvpynIrZ%>qy zBIiO2^vvK_6?x#g5wE=|{ZIN4^Xe z!^(wA!S+tHq@b`0NmGF=?5jv%u_xB)i#*mi1i>ct*`s=*hlqQ)-nA#0Sp|(RWKeiP zdp3J~5CugnVFte~pzL_y4_kAC#Z}IsDXKVtkIC~7NR%I>SC(=EspyVQv%`5oJ|Jv( z11dP;p1`H22&^Nl$OzsxVWLCH5ng#cn$ioQi|$92=yyP8-E&(;P(xP-sE`qD|7!D2 z2Seb5=?@|)bB*z_(GXM>gp7`ve_(5~>e_ZSxLzPh(-`iH7!=Gd5y5`cpdT{!SProv z&MqL}m8zKjh*@aLU}&5&c&}2 zv|G(0&pzLJII~h3L{$e)>MdE*WDFkHm|63d8^GUPMob4U-#Z6kt6{9hsrL`lVx6*B za)T#gZAMcA;(ije&eZOa$HfU?V(+yhtiWE49*;PH?xzIf6olDV^qUCN5qqMZ+l2lE z+2w!2iP(|!)H2BGzqU{5$1^sAoNz`3Z^>a1Ge6f6 z`t88k5qekkw+*y_Aog!Y_(Cq+qP@|&xAniq=iQQi?Lyv?|J6NkL;3;*;e#STamPE? zLnOd(2VG!8+2KF5CE*so$1Q&26?}3b>BC1r-c9yH<0ZR0H2y-hM_mXD@735N->>hN z+pihFXS&lU=3eqR%;)Ue<X9moH}uK#I=@q9*Uhb)iQH-&3twJz4gvO{Rrp7qp5Ymx3g zg>xJT0# z?Cg)D;Y4Lo2o?A_3Yol*Lg&xnz~Q54<3OGH+p5CZ8K zebVJWh=WMf?Nkjl3Vn4=dwZ2kB&f7`q&yT1fr*1cAXGgJt&Qa-XlTdjAUYE?gqWfe z0;t*hnkhJze=!ycf4f8fXs3WS8mC%)*SPuL z0ucWXcSzjCUD(CO{{Pf7F)A{0IBIA-h=?!x(pcH?U~oT4ni&xGkaz{(;PL_t#X%AD zeFqdzt zb8GH$vnTUGD6*Mk{OHo831=$y6#R+pUK?ZbnCAvatt)=VNEE&VLZOHrSSN zJBxM~gHm%NAy2QXGa{Ly8clr%L59&NGbjT_g&v`jwM40G$puY+HG?a2Wl4#+Xqjq| zeLBYuEdIIibkdZ#Snpc0eeSiPIa7^2UHV(DqC5wnzMz!0O^IY8H6h!5cc#T*A(N(D zoA8Njo`B^kAJYOcd!fy1q#>@=8d`U!wJ5&2e=bjGm|@Py-Owwr7>TatANhKFW_7>g zt(8hQ;==hD^&Ab~5Up-2`1Edqe1w{kvTLv38z4#rlZ-JQ?VUqGROP>kb+3;mMYKHt zMYP;cTIYMpm(`q7Cp>IzWd(GT?(toSLz#QA>1y>1QBjuVPGD7}^H=%#y}lv}D}cCG z%=-0rU-{H*oae;%Lw^MXsrbx?cw^ZS42C5ouq1Mh36Px{9T-#5go-Uod(LcGm!NrVGBion}%jgi( z)JhV>*2%W|z#k}HeGtdu;|F=T&R-PsAHb!oGIHS z!Zk3Ph+R1>lXB&iuCCr0VQvbXV{SQOvDd%t7F3GN;eYWQB*a|zfh^O!3=>^a)+ux^ zdt@Kga?AF(7mZMrfOU?yWNmplAb&PP%!d(b4?vC5M^>fxqmVTOT96ST+4NHsGTofy(Zz{=nm3*U7A@;%x8$$_- z*M{r|OPqtqX)A!(a!ob%LXH;OLa8ocMeh64thZ1oD6 z;Ib+(-3lh~Y?r4$=G}X8uE7M*4Go`iQ&P$;y~FoC6R`a<)`LGA z*~Y(U0h>oO8oZ^SfD9j5RXv56(Lvu3FRt^6eObkn*g#bXmUH9kV8F*!Zk*=5Ub+2T z$=Ck~Td>&MJMpMMKV)ks?mbIP+@inpWo5`6@8FhBXI9w>j;XHRhK#SNp<<*w^l_ zGeS48SK_}5N(-uu&<;`$uqWN4>RD+vDy3^oImO1aSqy2l)Wp(eEMm7x|9? z#ewDmaYMVq+%fL~`t^Y3KypF35#53B)cT|R(}3teb%DEK-f;GA{a1nNKzD$;q2D0) z^8DL?zCgO+-68Mn`n&vJfxdvd5#NCB2>1H@9n zfxThZ;MYLk5bk*PAp7I|^MLqJ-Ld>Zx&3MQS${yr!;XU)p{{{b8QIpX zo*G%;$iP7EUXhx0Gg^Hh{WG{}zqb+YWBgUj>I1ooQgrsuB!-L}hl0=?~e0BbDJbl)EHj+hNUPL{=wN zqy|kH&AvUw*=Q3}XIn8gs|u5_z+spQ9zLvRI(W?)!|DKf+_KZ|1n~)z%w(Zx)vWE3 zQ)2HHKnB~dw)RTaqI44$F^M8GhVx1ImgeyEBZ%s~Uu%poI2mh5cWfGhPNLq#;!b3b zmW<+8($jIGne1G{Wzv<>P`@pIF~pT7{%|D6unZR&et4EJw(EzZt!WlZ=xiRJy24Ah zstL~m-q7~(p=7$mkt^k)hsNngwH&T><|%4M#wK3rmN(t(Rkb!o<-m<(Hj;ZuVpHQ8 zjrgsfXUUy(=Z-y*$!od7JsEppvc{dmg<+aPnWR#_ z%<2K(YJ5Tsd}~e1;7YV` z#t)4};t^E_{(>Bdxe48c5a=a3=Ast7miEO?^06HBN?1at23FUqB{n8vKQkCHrc9|Q z5ZmKIXKs586VV^W8@@C3s{;Uv=b7>GT7WB(Dbyn)N`VU!XYytI1DIF*_m+c^5v@d; z>H_3OdIrUds)_Mo?9E|p14B_Fp6T)$5`)8jWgI1f1!FpZNAj{NsvZ0$2KnTou%1nr zJnA(><%yx%{4RDCrLmo-2r#wzy;i^p#22;r;odQ|0E|W+o z-5_%Rx$y#Us%lv0Q(#-BQf$Cl^NW`ruM&G%0Qa2wEEOj>tG;N(Waa~cBt8L57N@B^ z{7U%7CPjf);6;3*L z$)2PwE|Bxn=)^L4oiSS*R-|o*qZcD-`R|ImmCJ)*K$L*k+@;(@ zf}l2J_R%1RZKlu`O2d^K&~`1%8HSO_2epH&TVkmJ;JWD;Pa^3o%39pnu}F3(|Cx8x z@?}hGM5A@k5XLat9ls72(OcJM2rx*4I z59@Nd+#Wkm5=sMW{J;!G_fRs}d>+3I5@o$Ml+zqu+yMpcNVP|Ly^;Gs+ljJ-)g035e^DH^4aP}Ef6@$6bC z4I*FowPr~3CR+SQNU9uwt+fmaBV-t>8`p-AZ zUWE6At4Dt;wj1b~vY9C0kWL&O(=sI1JGgezyJ3tE2RcN^e-d?_7RP+hwH3D$XGVaZ zVKrdp7unJIKk&-AEH1KUs+JL=#W8CzeNfo@6vxdW1ZEF>Zz15=>#=*rYFs?S=_o9`UaZ)@CyYh4btf#^c5;ab?3ZN;j_s|U4jgMY=k@@>Wk z;trqS5_m@X(=>o@8i#eIAng&(6MK_%#v5|82;r661GE-;qj$e2(m&;22F(0IwV8Md zu&eKD=)<{UiSL`;pH&Rg8~8i4!?p%_CDhC-cxe>mGsw%bW;XxDG2E-K zSNMmdQ?KAp%RsNeF19sW?v24N;L1zA5l8N|o?xEz($|;|aAoExiJnyS463OCF?YfN zF?Z|%F?R}Xj$UYj)gAsy@6K>*afh-4Tf=Mq%;L_sv%3d6j>sE-3yNiUSHLRo#!d&G z4CvkpRpbZPPadG2VCD^tV|iom$8sJ69#txT2jgAX3rp+(V`j~e1ID$zqtdmxJNFJ9 zh&vJkbf>C@&w=5>mmiK{d1G|UUD~XLwvY#M{K$!FfM`Cse}~ZXUlHtYm~Vvwi()W= z)@Klj(F7X{+H+rH^v~13OKOKUXLe_5gWDNQ0||scEJ7Ex0@tLTkG!MEpV_mT*9=;r zG|vfKsrX>-2SDgWt!Or{6tp1JJe${uT}u06=m!n36}BMP$e7y*UV1mrp5n0MWSj0P zrZ%*li6pFttz@TA*5Q-kjSw#rYU?C!#x@ZuLg%4J&OnU>%}^?Fq63}MhYlIYb)wM=cgA_dUZ%7JncULUt<5^x zmYLzZ@)+=?3bAJtm(_c-ro6@Ke}3;I4{|4CUn5wLiH^WFa+zUS>Rj40 zo&2QT@&xAiLln!yChogsW(*Rhpzp5hiH`ctexMpRX9PMulL-q3np;_Am6t!ZxGWRCii-qh~w>j+iG9t@7K&%a)HO{oknQ3>)A+(dYeR$8zj zR@%u1kDhakTdDDsI3f50Y&qYxwQh4R=joMjfPFYO%a|Egw0W;V%>AQz$OpG4zVX}+ zxt$u~dUY0}V}hVapNDpQ>Uu)rf#?P9Pt*@O!?{H4ICCDc8XD~HB??6+6l36agCUg9PAoD=q`*ZG;NsDi5ikp2fi)Q zxN=+f8U*Teu7t0Q*SV}?4!^0&R2vSk4T4Az_+zpA_2mNhTU^OWbOHljN@?{RqNeH+ zvLgm(qI)Z?wRT84S+p~~>QI%E?uf!U<&t#; zFLR>VR;l^2RSN4a5~0tQNO>=sIW zNUt{6ZGDwMY_sB`xN);O+|dH7N`GA#_9?Yq0JS*K z9sEP}xV<_X>x%PAKk}IXcd(;1+(VGlo9s}RbojTJ(H7J>?Ec$3W+@M#@4U=YK0Vy^ zIWvg>T}3^*_uhz0QUc*He#G6pjrCz3^*cm|w_{Y-9_g=4PXHL@tuUvu6sH!M&n0HO zB{74TsgE3Lgn?JYu)XfZomASW!nJiRR}8OV$cNE$NkZHnBovDX(bK`Naj{>>Y*LEq zClpu8v{d5-5N*A56K6UX%`z8E!Kt&hDVHtNPFCwen!T5a7(RC=T}$(Qf{1cB-3DIJ zx*T=(Xqq%)-q-S1+V_yAZB5{|Deu+eN1GTG-J!SKUrT@{kICDr^0y=q33-7ECaFgUUdK9D3x;Sghz3&%I(J$fvq@ zUROKm-K-=zcGu8Jyy;*&DlmPyE~g}UEtit;v*?-_r`L1IfzHthk@#jG;1st2Vc3wh zAr?R(g&OXlIY??d7!+R52*2;XP5x_4irjh5{WwY=B(I#|46IEcEKwlL0**613Nh_2 zq|TN@)|m-kcFG~RlpZ(L-RePN))BWdW=F#I;@tQOE~X(V`%LCUDI1SmWG-2<@6_Dw zDN)O`QlC@%(bh7#;>3JMM(oal@|G-K_1-e2Pk7_##SQqqOa9O<=5qkE*ICU>s>!WO*|Sl**lssNG6ZYZp}8AA^s~!A&`M z9Aav7fsJ99R6N;LQS#!ced?6E`HKXp$8_I6cle0+9+7jbwnZy)XqkGd;42kQ&{A#_ zKF+FLm#kNw6gT;xmHaSfxeLwkTDT(`)v3}{KE`nyEYeB$jD<)qgL#C zMRL8yE5-Kb!bDx-u^ZJfrWGH&RT4D5j8lttVTtBXC7zVf3-@aT?L&kcr)KrJ+fO0B z4@t>a5WbO_H^!cRy@{u9s_dKO;cJB>u#e7TD)$1zBZ;kLxonb9c^4dllp+h`j9X3F zl=f|D4+twqFRPQ3(H?;DJS>yNwbx|qb}@TMq{q4a4Sg^*Mew;|bk$@_PMDm05u13* zf&@s(y-FU%IhiJrZqUl)XV;R#+1seb*<Soy-%8IE6=k*G7&$8LXmLr!SC1iY;rxD5q4Syc_AC}UMJ{)%wk4qB0|7d& z_AxsHPTQR?r#BFWaIXV$nCh zdbxBcIE316Stm5fKJxzDs1@7QtW-RjxP-Z^SG0JR(Z;vn?!e2|EONRORM(bHeP3}s z<7i0Pu(7x3UM`6=c53LjTK4Qz>`rQ8cMeoAy=!C^-gCW*@}@?PI%F0)u=0sfVVgbb zk?U}&m2c!zV`Fx?yb%NF=l_&?{6b}U81lE2$E7~fJn z6#og?6SA|lH8J{Ev!+FP=3B`hDRNEK6RJ zl21<+Q1T*~0(Cl$0OQvNt6;ryN+Uou2hp<0(hsaeC~a8)N2wdHLj_zgrNa4PgK<$ z-2**V&LD&xhZR5Me?@>32y?EN1E(^E*!)q@ha;`E(gY>?@Tudy%WS5Z{++L`M(7$A`-M z+nQZ<&<(Sy%hr@?b7le(Nov0*%;vw_O=SPcvm&l0w$A@rv7uxmwS-mCT0t%&1_|^+0PY_j0Tmo<1c6h2+0MIf%F+MRoF~QMm8@5l|lg19P*FwWp z{)R0xt(i`@swAMPI?1%CS+g9umtKCLM&DI>?A@yG$!zE_g%!ql0Eu9ja^WsxrJY~m zqo+e!{$R8zg^e}CT&0$B)lZlDhRQI!r<_k``VIQpup0DWoY7Qg;F&HncN|;KE-zOm zla0De0ARlKRPnz)pZgojaAoCRG}|XBl|?;MFV*nuJVtA4-j3Nr53p42@z~ICg{lIx zuoUcJ+rT%zbVUa65W1%5C+SD+;|#L*d51tlaYey8&AHdN?~OhF`Tpvu6b9B`I`K6R*slHd@G#mX$ea6~)BiliAuVC~^Ap zT{)>qv{zYs$uW+Aw~$7pOvs&(Pa{7()kiBcMdapZPhbAbMD_r+f%xbAgfylP2;et~ zaA{0k4mt{ZEb1`%gmqyA+TA_Z@a)`@nsD!3-8oL-!Y^{+vdh*C>uQ5T<18Fj0oI** ziN=UfnS>eKaNfbc{O#Yb4~3VBP&Dc^tsX&TiRO!{`ObLFXzYaU5`FkzPV9Nt79t zMaw4(Dnl%(l{X81BUO?$ql(&N;0-He6Hot2D%AaT#TfBFNW=lz`pZ7w^OTfuKtLq_ z>l6RqGyJMr)~ISoJgd+eV>Hm|W_o-6(6K`HP(N)zLjqCFe?Swm78CNXvEz6pJ=bc~ zq2}A=-;yh*R7!qch?S_;P*6o|iI*rY6Fs-(sr*@<`~KLhYPK40liihnCCBU4-Sj4- zgURhR?W^|>e!r~unE-XHIQxZY7vy(mbwMHS$8YtgCiW9T2KhKv+|%WJNMvCT7efwN z!*I1Z+A(@r39S8h<|rnV*$3{)gKV&nQwC^;+%b)E8Zz}^QW6q#eCt7EHZqS~!-6Jp z&-w%jfNgrW(`3ve3JtUcbZc-l5ZDp^kbF&jZSkU|X8uZVx+3^TPboSgB_(O9ypY6< zydVQp26K#j+&WJUwrg_p3zWEV(u%Z@zLQIkxNO+crBY-GiHcSWSa(m-WHZVybYgr? zR@RD9YsK=-DnMqBoIZ&?AJ|RL>4A));c=Nb>!(GF-KR&}I!~kzfCJs$1)%9B z+yQk^))k2%gi=k*0?*!}GMkLFhmp}$?Sji{B7ByVsHmFNauRYA?&x(B?m0o~I&egp z_0s@eGl1QmOhBpSJHhI5c+FZyb9CEXYqT4CshdXk7%dy7&dXURw%QY@DtA)x6}lsF zeJ}SRBvU5uP4+&t(ImYYT;3$~s{^X*Z{zMbMD=gsSg0`Y;c)*;xc$}liRJU6NA=E0 zFxAvF2o(=x>}f2~+DRWNu7_JRbsTkVMr|FNzvI!SV<mpr7W^v=dyUOG$M`jLhwtt%Du|T`@x0CNa~dplFSvqKVOT)j)%6F#Bit_ zs7BK(WvUKM)+sxzv89@cXA{6Gb!|(Yb(m*uZa&EC3&MrZS}8Hw`&%o0rIseQLtU6^ zPK>CME!8K6QHPy{ETltJDLco_aN!b8$ePWolw+duy~D047xmclNT00WJM|ZY>&{d?Wo*UZyS?icvUaD`vX~aHMVo}b zZbuJE^76~#B$`rBUiNT>FY&6=!>rb{n8G$TH!*Wl@0IGGF)ji(?F3+2w*&v+G&<}{ zB+itRccgTG?{~FnmV;eEMkHuhliWoSxZ)4@%_&Boz7FA8gJq{ap~yyWa^|2u(Ga50 z>GMb>%+#>bxYJtNrm)sI^~Z~~;wX!E`}x*ct51`Hb6aaJz3K&S4fNu!7kFsX~!71r4hGm{PHoEU&jh!1b_l+sdbX-|y zg!mz7*UGFtQJY>bP%SbnZf zD6&z8C4c<>?@?n&w(q=tRY1rbz5(~!g(Vk6?;kRLpZs4Unqd7lS-j5~ki;e-pDCpb7> zYhF9ruH*Fb`heSG$9%`r;rDn?(1y{uq4a?s;V9A7K#sh}kuA`LreUc*uWP1 z-DE{Xo?DYfi*CENCf+xx+|I5{nwo4^Z)z{q&~TY98}90Iy(?>H1Gl<*mP87}KDAg|E zTHboo_+F*I&)`*U?op~&A*+!*v z$ukI1a3*k`lX!oAfOIU?xbq77Aod{vYlOQJ2I>-W2D6`{7Air>AQwu7+p2`)6V6(N zIba5NoK2)H-93 z#Ll2aN~lQ-kM*jqZ% zTf)%NVbmGr6`0ibZ}q`H$a+tWVX6k-m7mo2k)!`l+0wUEn)A2zldOrm^M76bx2R9z zjO-vkTFBfF#doohuMixWrH~kERGma5D`gZZXP#s!%2b3_&2R#h zv-6Ct_t_jSpD%YGQ2S6G&@2d7EGlG?1oA*pac#x4{kt>K;MxhXt@9*~ClFgsLw2+P z3uvy29&TTKiZUE6Vwj~~y4^dGVsD0L)m8G)Dvoj{YQw;v9k^ng?UG7iqKYE-4V!6j zn2-IC-h(oEkhSYKVFw{@-jeo1?SWm+atI!%%kS@B_BFHqxD^^B$tFrjZ7x0i83aOd zqd$4#C?ywo=F-qEp6$xaX5{6vWg77wbmyFGJ)Va3f5v}-79Ya4Vuc~F@VI1{NHMz{ zRAdGQ=!7O^NJtzH;K`Ug0ET=s6VbsgNVp^YutGO(2N4^ z*Wd5x_#X$L{@q7C%A2yA-}7-H%w*63_Ln^+$7Fj2s(Fvf8Z~GUsjZ^dj|bA(;9hK3 z-%J=yg5SWO<&h0J@#7eh1Qe;&&m6fqPNt^Ue13kP5dN?Ar~Rp*n+R9*<$eRye5w~fB?7zg% z=Z``0pB7?5j%@P})Fs-A9FgnhLaYgL?g`z9X@yqXG{@_CtafLI&A3{Wh!sq%OE<>h zyvNkx6C%fVqxVr#498o_610lX?3j3Dt+w$lPd%8}W)jBlflMkHMsrv@J}{`Fdj8DqxK0^N8_opw4*lAbX#jWD7j- z>y`;KRk`Sqa`I0~?PMC&xLLRXtY_zplSZC;bO-i`A+I^I;J7 z!3EJZt%jGRq(L!YrJIInQqqr676Vg2nluepB&Rv8(bC86iu<`k%)n1Lx+&=Cq%!nU z40VT2n1y^ih31^@Fz^>N$!&9QBwJknZAk7P?9y0rzml8plqO+Hn9Ib z+5dH%d(yPr7DbW9zO2Yqaih4Nb{r-ArquCFo`q-ej_wTCShSUk<+Hc(H>sYHhg*FVU14!l=g+)i%(Ec(6Fy0~an)u)S!P7}zW z9i^_{o=ClS1tZnj!cW_BqZaox(4m9OYRYR0fuJMMTqVv6Z*|v7?HNFZLNPCZ{Qpt* zj_sMY$=YzIgN|)>Y}*~%cCOeqJGO1BV|HxYwr%rG&)l;H+q=zL&xiB;0Y_=Cs=caS z0yMWX-2BW89xPuxW%<)!2E2%6GOrzGH*t~>9dj-q?aGBvT{0+o;rBBotFAtp8X9io z(KB^WfPVje^#iRN>Du|lA?JQ!l9a_>D3bD6FYpk4E$@wz3TR9fjcMIb$}C5Cz zSe*a7hN2wG&NX9;v~kiHfafr}(-bL0em-3EaOq1kVL%HJI1^MFxTP0P)6)c8aNZIC z;Beh90ejca>t0yT135ulXSorR#Bl^##g$}t5L5{a{%#jLEP#GeuBI6%jmLC*pIe#6 zI4fTe!zGaSwLW}#>WH_~t4JEsSj=d4*O*O2{sC>1i$-?j5|_Lv{yH3!@RtjD{@~ZX zha*=0Jh;OLKx;?{Iuc%tb9+cl>Y4s_GXwn@FSO*~y3q9mt2>5@sK7l5U5EqRrWu#6 z5m{%w%MSoq=bcMuk^oSl4dItE*|b6@3z8485@cAm9q|_yrd7EziT7q2bIB#cU#wCX zQg`szZxBah03YxTTtB*%EILGDDuk_fdT{%?2p&M#twCWeqMYRqptEzR~@-Qx+(%l*ZFb`e~~NH z6QD1*h}zZg?I*SD-fcH()K9~_ssj*rdgN294@B&!#7!ARUtYXCkPEr1dni&gm?B}Y z-Y;9LA|<5h$n6TKc$*4_aZ?}LagRiCZ)pD>Tl$k{w3$9*OV($(@c#e>1WW*?RzyNp z4tB18A@Jx4nQl7lJ{<(*!0g~hjh!x>z;CFu+FayQ{=J7jz3w=M;TF{7afcoIUA&#V z!;e~hy4!%;Ntu<_hv(&q$_KbFXlz@HdyB@KF{8MG6d2212VMmG1fu?-zI;Y{C|dN5 zM2%r_l9+*vu_j2MdKBH&(9^0R-)BvKDNQc$64)_ai#|2m*kUuxCb|c@>5!Ycg(Q#d zhUaKtA5)*X64GO@55}-4K2LvaG*#&xgye|hFA%N-VX;dIikfD2#4gY?%-K&@K0P(C zOwy9X6J_VkSJtqiRv+*0u|=YfL;04kwkV~z>KA{m>uW!|awX^q-)UqUetB71r+8pU zM;*il3q-u`8o9NJ2hszLBpzrs#DN)!0A-!3TjdW+C$?B=iomQEDy4wkA6ilN1Q4CcQE!h3H<%->ywj*8%9Y6eP?h_EysI3h z7?v)8z}+5TxlCi_Y|c`Cx#00=vJFG9LTX|p? zOu^<=64{(qP+rp6tbmp90jlHXQDXREh8VpHa}E|CQHRe2w@eypc95RidRdV*-DAD_ z&Ofhg9{e^tN^d2UM8-7a7`49EP26T?yy7iLf1wTpY(j1(QWVU9Gnl2;I?1+{gS=_R z3B#A1(Of}pH~gnb$`a0eq)(*hL7S?l$evm#VVtw&%v#*64%$ zh6V(y7Gj%JV7m)5fNJ8$+HaB<_p+~rI~D~zCX6|YK3B>E6;#2^l(Ej-6n0ZP{r&g8 z8V=HAG@%%G=y$KPs_>nNcWpurZ=hD9-;6bP%;S)#%+ViYxTfi z@!vp=7EbgM35(3K#gjxel4oY;*tguoH`8{9%L2I=Bd1VOjz0dPyRF6pZ||GqH5*sF*A89IrJU;<{jiL#{4pV9R(E`_X@Fd^ z99yYP#pwt1o@>Gb7Zq9Am`>0E8Ex)rz~fhDTDcuH8gfnL#;=t`72W0`*1A&L9IXkB zIlEGZ9iS@=!;!C1e5`ut9US!1D&N6-E^&pwcKu6H;SX$95{|@*{&a1SpT@=be>5z9 zM^g)fe{Q=?kpEos`gwKS@h6|4+-1N;0R7J>p8(2ms1OnoG7`*tLz8R+ld)0VS1PTP zzg^MYIfItVqj8uTZ~J|a&i>F)X+1&v#cXl-yH5g>vSrzl>hJ2VXpUI%zyWCGl2s#7 zCI=O|-BkIIk*O-eSFH7`;#h05BzMbmiMXh+kSbbWFgk;Y1Vpc%9lr0y+f4Nv_K(!g zFWng8eK;y`LF()W&ysHV_%E8j;$m)_yEiK4FpM}!p6mu?Y2L&NtUO59l51~RAqDnt z*HM1w%u-ZU4@JY?IUY*P=_k`Bqyc*k;6#;XBB{Xas7HCIL_<4jzm~@U_vCJ6P%O*< zRnrJAF04_>SDC6NmCvK}ymycY_&wE4A4B_NF`T{&_~u-aUrsUpsyd{eTs;J+)(zqt znucOOPbUzlxqfxe*ZmrY(1}brIGGs%l-vzHx_3Y)K_k1IDM@1!6Y=}(AKC{eJ4xLkW=zQe-@3E^VadM0K zQ@}`{v5WryO~8M5pb2tPGM~$gJ%Q^386rhRuf^UOn>K7 z_jed_C7W#j>2Vpe7k`7R3Cc)eWwbsR^YUo_+>{8|=xT=QR>nYWz&T}GBv$p?x=giM zmYJ@mH6lnCEiLK8ufZalsn6QThfWJdmUmRtas6xp0@WBZN@Jr37&+axdC|lS?jbx zY*E9kBNJ9Q{V(|8&vlG%KFM`;pYOl@*=qc+;~{^~JO2M0jx38|C?xIpxde(}6w>fl zpn?aDx;@(9H*G@_$*$E|d;7kD-oKE|x^ouLwfsGn-#Dt)=}M_r}2 z^!LOE%DN?W8?KhR_GXiBflY_g;u>AQC7s8ivG2Xn2=66)#=AqC07hCf8pDYmo@?rv z4i>)&{5s);LFJn}a8>W~6((|@h|HX1X?HR4@<;NGo4n?(5<{IVmnwVd(FdY^u|7MG z(3X%^dV*cltEh);k2IjpY^O9mWIgCT+<~@%FVJx4cIXI)1tf;fY4Ll1`@a?OJIa3f z8oKZ56zx1_Hdg_ek0lh3reaY_l8sA}_T#|^@baFr4>O~6;LhOrb`$WCl}%C zUI-JVZvfLf&NiBuZg&4YoG(*&R zN+hN*N-%%L6+{)^XNQf?BX*P|H~(kU2ELP5VLQQfK-M8^uilPi|3Wd5-C=DfakAsUcvyP1TRb}6A?JK5JvReYO8K= zsU48&UT6nB;0av>m8w7rTbRnrR2L}g%S0Efi_1(lUxnM}u{#oI1*1*&wQ1?qNJjq@ z*$98}z+rGDV#Rt{%K$S@MLd2Kp>F^X<0&!h9`siG8JuS^oqqLf{$salL#Ql!3*$YVEu zoM*nbyj%?Me$alrJtF;VdnEJMV$0n$p}zNr|Dl7UZc_#ez=lIWf!Q*=Wn}I=L=pBg zpA1a)MQY77KvuB{UBLf5Dkpuzk*gcO0Fp7vjD3ItwEKNIgXJ9+Jzf(D*qH;*w1BeD!h5{#$54cfe&m=S5hPMr`FQ)Xor z6Q3}qZbW3{N&(PmQOVwqwmKe1@QGT%HQqUHew&$gh*hzx376*IziBqS@254JQSY4` zbue(*&Xr_uJJbK#Rm}WJiQH}`4}x5RFeR;r@FR+_r7XbEPc3KEK?chRmu5HhRJ^s) zY%7zghVZ9k_xTz7z>i|$dTR@EY?oSYSfCNy|4oKeHY-uuwBBub8Lv{ii+ru{vUHhE zA;({-N+FiV&M7gYE-0h)P*7Yq2yalfi%wdjdC07mJjI^OYCcu5`Wp@fQK4dfj9hkU zW3U8;cCQ2lw1HA`mEx|An(hJxepbDsNaXMcXo|Dq;$W%Gk3Lg1Azo8+1qHX`_(W0> zP|4EduCh)y-gSfi2BXGFii%BYpZ&R5X466cJa$mKCzf@&D;gsuO+$wWsfI@q{XqPg zRNGN$w3_(h0w!>a`vk_||bBP!#RPM3u80XkAS@2I{RIqN1>6G5H zO3U`kS@$q?C-^t((FHK-seuH`}XGduM|a^!>LdWPeA)=*UUwP5UQRrA#GGjgb~Jz6bjv`Dq#`$jTo;e5JC z*)qM~8Us(l0)y=4h;@Ku;r>yg~bWX$yzZ=6V2|189gF7QRqwp!tG`HoIx=YyL=H3|3ZV2nP zR-55=wcup@mVLwVgv$=PBvJW|Qh!S$KPBx!%I(_>kAK870h?JMf~gt$29e|jI}rz3 zcn{ze>YRmQ`_T^iQ4?A%Xkz$w9Em6Ub{0E>se*2sQJl#?1>uP7);6|HxqvQs@=$z~ z?8gwaoM|n3prZl@j&MpEjG*~F8UN9L6UtWuqyzC)`V!5uf)E^`Ozo$70Ih9@q7&R5 zlfkmNpMzBT#*t@eCBS77n(5qtGvd+&vR@27k33I^9xBohr?L%%5{ zQ4dpsiZ<*IDv9}FiIM%=9jYoaO|$mYFfytZ@Ps;-`&BU z_*N`G^;v}2X;2Rz23jt1fOx3xfty=C7>y@Xj(Kc z5>OQ|$PpZm5Ge_JpSF=qa(44C95NwsoP?1j&Eb_>KhH@Iz?+eZm!8JUnqJ&NNH1uL zck5->d1MWsF|ebdAvh-jBQYq`r9T+X3ovFh-Os6fJ8!~t%8mXdeJ7)P433%B_cv7}?C3k1VdrEAzHJ~GU)b=grFaJkDf z5u|BM(e52Wt@xw@eCiwKs*QJP1?$MBY5#RTXqA0C!KK`daPpzMW>EbA1=_YMg17=x z9|;%G^waJc(Pmisq}(+FVV?woldDz(UR_^BtZGi5DXixSQCt}g#p}z$$!U~qivQVF zv`Qz-Km%v8Mz=+MD6^Bs>2qE1IME%k2v;(~R84WC0O!pu>Ob}2Pnzbpd4uEksSi}2 z`taYo8;%Z+cK>Mnej@j;^za!J%{9Tf+YbwTaGPBb#^E6$u|Gsb1=(FFTacq;UF zugq}oU%caO8+^@~DAJfJJ#Y5g&TsF|K0D3hx*I+?-<&WSwV0^G337=1cI2CK92Rzf z&v~=aAcweh0&-%tr6QD2!0caRpn5hBXhegrne^(6FXR z+ivyCzNJ~Qxn8)7sLwTXC#aEP zgu9e@D4vOp&Q#RE!`qZR>?x*S9GJYmZds!GqKe`(RnwvVQ;PYYQl$N_ zR{b|A{$qXp|8NiGr9Q3sGZT}B4$mv4vllbs_h+FOoT)|c}GoOmld zPT*q#1qs{)R2O#>;Omo`qTTBGb+0TV{R|)Y{p2Dngh*kxlDorarP;=$zrO#3PdB5! z0Z@eU+ZpgOxFUv@1d}q1#-jo(YDNz)XVXVdsghzS%67xYyM9&y3K`IpF}RtQm=Y38 zINcqQft@$@E2l|M=er?F2@_1{2eL$xwts^wfJ(%ohkfBu_BaJ|iGt(d0OHA&IHWBj0ldApfVY9pK&&u} zdMOi}iK2qCjxd*eRH34T7A%(Zz+*~-6<+X+vN;5c>w5SM5RSbaJZW!d8Sbo6xZ`W! ziZSuLfxeMPX|%G8=zg=*_GABfhPirKp`RztV{~$ed!_>ju?aBjp7jl?(UgkFb+c)=NW)%&~{xJAdc#3wBEz}cRQv%q9)Y))PRo9Z1BIFuNAWv z{x=QyyEl)J*OK_khs?dR@T_D;Y@|aAaU>B|NzoqcTSb%u!X3GyOW49#bN<|s85o2T&$G?S**erU zx&d{@3pkVLvs8I$SH72p%cX<#CrrwbB`{$$=(j6u1j0zya?P+rThW5lnubESlr@{1 zdGAKTFDRb-Esy04UGi*DwVuyeL7D*F0)5pCgvkB&!xl19ahv42h#>j0L}bv?c+$+$ zJBjgt(#l

~zY3X0_@qLNMp{^j*C8GKy`fI`XRubiSDEy5iO{yx19zP$@IU;lj0m zko3gPNP+6UZ6rS~>NKG{ko#ozHR~O{9iGp_vT0GXKs>32A-g+6 zFVvh(IhXV6j#iCty5Kg05aY@fPCxyP==j#YvrO#YYW=KiwKjTy!AZ&zDkSn&-^N_> zp1!>}DuSc~uJEBT#I7}2qZH%{T0f<;w>e6`w<>UQ74<|^X^duOlk_;c^!j^wHO#Zw z<*dS9y5*dac}OX6WEG_*=;*GW2sNpNYrx7IL(h7eut{2pM_xEe5SFLbt-` z*d{Tr7v5DDhCEqnHR&-((L+_I@1+F!1HD{7CXxtgZlyxa?c=vR%%hqk0h1Qpbg%VZ z?|D6)lfD?rX#hCO@`ri`a}mz@K>MmK>~1SI*hoxiw6?&RkCQ|8$rsefe=Y0(fjO6^ z?mnuY&QKWqe=&!@UijZSvKfji@<@E}+`#x)-H_;ltu3oWA)Lclfl8sjDLxNP)toMA0Rb!K!ZFUQ%^Wz$$V|Mx-kp8>DQ3|-5rDDICE(6*7mpuXE8&|{q^qh|uf2vm4z|G*05{+C6wetrE1=(7 zicTXiOuElpp(0plrplGKGWW~UQ2Er*DnW9KD&A%6X2-Fm+Yx4EnBCVsx0f!J3sVc) z?Ichww%m*6gDXjALsd-!XThv+ zvtlxw0zxUYyo78s##af&T-6vo&4kRAp}=w}QrZ4A)ut{~GQ81PiL1Xkl#~pyurJW~ z(6S7~ZytJd@^{eknSP+EnhSee%dDj5&}=*Ai!jC2j~!s0?Pg<6YYYwub?^@$H`|EG zS%8j!bzX#a#1_v^WWsNQm-tti9pZYn%BfOG_I zGS>N4(CWPTcY&9dZDRM?$~)l+(7V2fnQcZXut`z&V5Aen4JF*s8>|3}cplK>xiBn3 z`^a+y-7x8VXXif=!c@1faTqc18yl*qlVtJViav#nxbGPx@noQ0(7+SvzZeLwoyw+*%Ka-VpzEgZ(o5^UY(tY0n_OvewYr%`laLbjf6OE?aB# zQZ1t_6;TGMp5d$(!I1HgZj&fum#^Cnw>IWjEY zfhH&ycS@|brwrY%grHAO7$%n6%iXNvTQ)itc$lWfl=A95bY>TUwTp2a&)Zs7Kvs17 zb5c=xkTYQ_d#amDoiLd7{#PJI`Um@cP=5QpZWNFo?Ci+Jw8#&%pLGufxQ`L%0$Ghf zayaO*ye3aQiM-EerWd*T=*%4J{}E6ErD=0FKiyr~XC>^v1{4KDJ10~9f53QU^FPcJ zoGB)J+d*K_VvTGoS9WChx&VHuoRBr-0$Wo#NNP`-X+2tzM7ui5tR9_{p!c8e#hK$3 z!Z8+VPOp1z7vrlfoG;TIA8&7PUsiz{zIfAg>Y%2d**e14%Hj%FAKUeL6efeRrfR&% zt=2B#-#oA421hSxGocISXoXs-$?CUv(B^OBM^QH}l?^)*Bn=2`T~RgG->v#4B*W$6 zPk@^7JAt@~BHUBC6gAyV`xb=nw1Yx~ZGxvU;iz?~5h9-E8rnPTjGm1E-)tYp+OA^43HB4CGo-RJU-E*GdE6#KHJA)C&Q z5Vytiil3s0Bf5dpoVV+)0RQSkT2B&2zYxjzL&Uo9E@n7?=?*dJ zrEJ9RRr5yF%!B_9k(GiOw?m!GdpI3S|N2#l9k)dic*C`{QL?WNXuS zanaLN7(GM2qTZhtRj%8})DK4`F>?O8hG5)p3GsRwC3K?q*5r7s-u%UO<#0fAqlny5 zzxhs|nJSi|t}#C8MRzBWul{+Hw^0t!9Xktzi;+@9hvv5fhE27R=hh}MyLOK&6croL z^Kb5;0g(MK@EZZHeo+3TfT>MH08AoGrZ0+LX_b);j&$BSh9W1BadjI|S|G2;Bkn9r zI~*Q57aSV2wm%#U%O;^8Ufcw6XosXrz$L{NvkB&E?&FDn7X13pG%)L!4k?$kOIQ!H zOUCJ!^h=s8?%@yL?eiXBKOK3mux;FK6)-%6kL1V7kPQ;AphuP~95Zi$Mu|u2EhHQ0 za|ADuMwvz8*UU@uEjWMP*bByK`=N)Qk6GYZJz;r{FBmHwof17CkRABOT^EH1Q6K+7 zJC)9Z1cCkZwYZ<4^S^FD{L4rDBT4)#MTqGV2d0M)Bz0VwE1$QddvSop=8aMI{!XZQDy_`y5ylcB@ zlm~KoZJHtcbm(u+S=O0G@i<&4ZbKJ%DrDS#wc^8V;!pWM&ahk-JJZn4=yWG@>qU6y zVbMV*3x5eH{csl@=E1fgpQ0^<3ep&!k_k|b8mDG^2l;0t`vW#nP*hxZK3{h4(*sNV zx9s~rURJ@;#>U#tfk@I?-~4~Boc{Z>m&}CKML`}LLI?8#21e7V0_hn;w|~<{%ywi z3)~c$F4%7_Hr4w1)G$Z1-&z*%>60J>{_+o^>EOJ`Zil>h*hAl41(Vn0)QMaC_x24R z=6SyT9y1qHJ*1#^KQj5ggQf>BPHFrV;XF)r>E@{*4*M5Z?IQvC#re1KQ_dG z3enJEB|KM_L{lQqwcdkNYFgC@W*pamC5IX9$`15gUlGj^AR7uOmDVPlXxPMT|=WfKS3{ zHI>C>RwfWHa%HS#%!hlVB^>B_Zh0nC;p|~zRGK7h z4c=aQ-+Rsd!~Mb;?}O*%DX-A!#x1{ZOil_D%HYBz=g40V3rX#*Q*p*aJ23GwJS^<0%G6U0c15Yp!3 zSzPAn^0__-f%33k{pv=!1!Oo4rZ6I85lcP;YD=}E`(KVn@wnwk*L}9C7YwtC9DJ^% z1~^$ZKb|m%(WrBZL+IyZTk6KE5+#pq{T`$oAfU~+oApJ`!(IKz`{F%p_!T%;B20A5 z?_v7PhzcHlk(Vp*M@TpkSqV;u_#5RP&nj{iiX3B(0!+DZ=M?=INEg%NXL1Hi+GeS? zCW})Kr(;WE9{jgW359$~AQT9XR5`>ew5ds@wji2wiV{VWF}Ud1lqiW@1g@l|T*O)H z8XPPl5)Wk;1e+B4O=R;ZXZ>mB?+aPy%4d$*cfd%}&yPY#{SUa!NIezZb?dPu1Kfn< zNTEvyCd7D&>Qq~kFlCOX&9wK6a0tF@BEG2^8joNuA(~Pse`sNvBhQ%bB+$A+b5tEvr3`j zWRsNSU4~`~;u3Uj0lcKmAgqp|vTPK*v0QLP$unX|-QD@@Np$+y(6^~4g~DSA+J^H! zhP+7AtJs{Q)Kvb6ROjkiuiv&@I1r|!=L1elo`)ChAJ#(-@%- zxAj=YM7Qzt(W1qk{6Bq#sq`NWPXO0}*z0>{HEAWkblFo4UirHiU6i}XO@^ISDj3SM zLCXI54@HE1;6GfBHjoC{`ikV@!~%2I!EpX?QG8((Jf`(*97ep=dAMWU8Zl zWOe|h-DYLbmcP_UFWoeJmF~YlX(f=>?lVX9-6?m3)E4=c$0(vc^62NwyA1+G+50y5F!BGbQtd>h)g(RYBSTDj>-zd~zd z>a%y-%8y0RZOFg>ZSo0w5Y`xCq}@QjWc0}hA=qPZltZ?V@(~izvA^36koom~)XPPh`;|o4ctRgNXj^ zOqtETly=Gn#Y3s)eB6j0cL`5F%;%!P5W!-NIgE|2YNAFm@&Y?9hj3(-IExE_nd}k7 zP?_{O38c`DShY$j_MF3Kp2QFAdk9bRn@ zZ}jrO7q-%GQ3?&7K1Z)wtWmrRp|T{Y(8h+j-}t+S($O|cHET!MPmC&L zhSY1LY}*yask`gn;kIS`o4R7 zEb{&0$JF=Grjr9JU3l@a8M392*aSnQ2L0{x<5a$Q*ksJI%?C2%G*&nIvQoW;MA;R1OLjj+g|Zi066I80MJ%b7CnG(3ElcwyQB1x9SrqFrcoLM&@U4~C zeKpFjbuOy8+oFm#sO?tbT%47?HQ0!pWLLMqqFmn&mJv@x32krXw)x=Lav{gY7^Ub# zg>=JWvh`p}sEmdHG+tz3Pi5GnsLKfFj{LRE$;MDuP@REvYTa*YvEz%mrIP!}zMs0XzsT8_%Z!Ct0}#X6B{C z#qekBfi8mn9k@oE*)y~#AD5@N4G)PvFgeZlF=2VF!O^zhMhgQLZ9Ov$sR(I*$u<31 zE7)NGof)JJn;E=Fl&M6m{s?Qd&C)ib{q%K!saU`NyjlZ9ml_7rvRME`-JvL)C0WVC zbTDM4Y2Qe9=yygNAN_!YG?mFjN{kJan_J9G9hp$`l!ApVqH%%7t~y=ZJ5^C{D5%za zuHL#%0STitR0CtH*mVT9LLANYI!g#Bjn+(Pnw>8O!Ytp*WE0+GkK6R*x7N6SOwB1? zv85_^=T7ySr;h3+Lu54hLpBC%Q0F%$%>Ishr?Fe*t34-TbB?*d^>w0cItTc)P5@cu z0BhC*uXfRFcEj4Vsaj)DY6*Ydb+){tDr9FQKJAiAqnFCx8E$A(R>q|U*q*g}J)X4H zznBCp65CdN>81~|G-_K5q&QcUK<9_aqPcfS1R*Sr1gFpI*SJ>f?E8xx3uS5%OQ#c5iMR^CbCzg3`nYEr+v2)-`Y-%m0dtV%#XN@6 z@)&SI9VF5BoT5AU+LRmI>hr7@iPG0HVwl#F8$CtCD1^qy9i_WKr%KcLQOB-a^<=r- z7;r@ic>#PIh0eNVXTQO3gR??+iOIO-NbpF~w4U4hoBW1b+%qWUl4fMeJ^qfTtu?Fs z8hHH{AK@u@{XKd84FqjRLEIyYSdZ6g_X#!g;l?BSAnMu!ZQ@~*@!M2^^p$F-oheEb z#wb~-OWA(jCIM0#Ruicaf%`oN*@dI*dNMhgJRvdi^{Ga#_-U7BOL{y{WRn1!GQ-wd zh^t3|C^aX&x8ax*l}SC9N4kH5I@T_snPoJZdWl^tNVoQc%8w5;>)FPwRt#e1xM~wD z)CceC4H!$_R!aiwaX_7AwlJ2754uY2t4@F5CVgAGKiv0C-tg{^n$`CxbPs75qS0u` zD*yVpj@@;MfEq9H%ImX7-%XCI(-U1x;>P8;x<2c|C#3fX|krU z-wi^1(#~2IjwXnU3@O0V7+r@?Vmra^k;~W+! zz?T^qEAhK8+9Q_S5a0de6G|BouD@9AHVjvr?W&~(NM%|>3|#|O&H5O0LWj&&Ubu{E zaBc4(@oM<9*l)@EuN9fTHadD?mJ3m}QFG8_?_>r;tMri|G5lP0T_=~g)#BR_=*zbn z7(TV{IUcS0VigrdXAmJ*h-|!AnYPnLVN#WpEVy(o`BC8CWwbt$kX6f=7?o_X3uiXr zYSJ(u%+|oO20tO(=oYa{q7%`Bwu*f07*L+dwCjN0hv{5T-yWJ5w(c`lkhPVc; z%jnFIvm<2T)`ebYW@~y0Pq!n>>=NSSs&N&K5!t3yNi-dd%8RHzOQ0Riz%shYJPA}* z(CW^^5_2w7M_;o=C~y1*n@~)=18X)d+Dtnwu1=MKYPM`?A$O(h(XSAg7FWOpp(}Rf z)Z1Nm^A?gW5~KmEZ+dBasMtCu=UH1aW9c@mv#mzuJkCH*Fs#ya)}^V$L{uAeu_3zg z?6)-uH)X7C;<1S+bWVy;m?nUuO@xp2K2>Cuupt53V#2oe@}Z0hPhw34LRtIGw9VN`9cAE1d_U%?en1(3ImwNMdrzHX9H7uAmbWAwzA{O} zGw9Q38g}OceNTXscskEt1^0`!HwC=eQK0my6t9T(@({AMlZ4V|f~r z&Wkk^Z6$$F)SAH-#1{ApPswO0UY&#{*5K$qhG?ouR+);>?4QNS8q)8XW78IzM?G~O zVi?eX&~)uZ)@(y$mimIGkr3ytM{(}ul1*9_q)DF0l#rkGJvKD@xD zru_=2PCZVR*#Ryqd`X=C+-Pb1O*^fmKLnE9YhF(E@SUSkAMcaBqJ2mCH4HPImk|iW z2Iy1=0BMaQ3G}@P&~J;X?VRoxzQbjJL)-1ULqNq&(?h&%IDn$+s7YASi4|$x&0*BC zeccxD!I=E^OT!gw9i#S|rou(r-lI~YvP1gXF?QqPI&ep7(Pq^@eFt^5dl64J63PZ^ z4{j@R<{$)bD)XXyATkS{m)Hv|i_;6}0sEpSDRQOvTW@7BTuD(>unrPbFAf>UIbsL? zIYLLkKqPUEw=Z{{2c+SE=d`y7*LNO5FF2F|iaBS|S05LUfk)s9O>NxL4>6-tn#*wsPRVsYejMhGDMhyQw&;Oe<;d5}XGu3l+Fq8t={N1sX$NX6Y zh8%D)yS%7wX?~)k1qG=Gk4A%>1LYlxf(C@<$c!_3k6B-yF^+8YO?Iynk#(-4x9L~yz zJBV2UT$lL0IG#}m-X?$_xoL4U$HJ9C!&2|njrV(ChuPNwh0f_}VPThso}H9LoGo#) zT=*T$w=5eL+zafI`MgP=>A}^EKv$V;yQ^*&-AOa3xnAM3JN4=BKNqzbnHvA)^<)1w z{~b8MBn6q{FF*#WK3)JTs@k^y>pMP%Pc-JT_$sy04%M;?;RPl>ZUhwW&mjI$8YFhx zAH8U}W2_&%?#Z4W&E8*r>Qrg?2!lFtC5JRU^(@Wt&6{1-k)Jny!+RADM&Z^Ze*)}e zeRE{W2+db5KUt_&s$Ef;>v4gZJ2+D`v~5XC5g^HvWFS;TBnd2Hm2}(eAccriz%Q6Q zCmUE)=vb)@c~L>QC4W0JapUMmfysq(SB^= zw5qD~Q2A_+oQCzo+Kg*#ElhxFL)JR+&244Jn?kaB;5*i--0Xs^ceW%aDHLezi+OW# z`JHlY$?#}cOBQtVxB6QCkGa*%UFeI}P5uKv z5x?|D$vRc@Kama(kY{DLia!x~IN64}z!Ykw`>3HEWS?(y9d$%|`P}M+&?4#(v`fBX zU0w951Xm*J5Vpf&(O7jShVklR$pq43f9P-W{>yy#pU`jBOLnpSX{CRdlN|pCYZU}I z02CbT9QFTj=7a%%w)g!tu~{G^B>~I_?O9S^dWUq2<}w}Tw^>fg2jz>m$n7E>R3&|M zURH}S^*OJJw;6?KOjDmd>E{D~9s8ht{dD&ZvhUIZtX z3xEE;^Cm(2lE?!_sn05NECLlHp-jUpSV}*bT>gCN#Ib`$l3vNNNlOFZtsP5G8(x($ z)=RLt1g37sg9qo{J-mZ3KP>X%c5H%0l-JeA5Nk?trhf(dCusV^(WlicfBj=brk{R} z`~UEAVpgUOrT`05H-N*ZN&N@wG$Te!0*enhaDzV00%l6JoW*OFR(5;2S(H{h#3c`f zAO}s@p0`4PftZu-Q&SjwiuG5L9bt-9g#`DSYNKC zvmLS{q+vlJ?=m8P+8b2H|EMpBcuo#XqTonM^87X0VDUp$%x3r|H}2Mf>ayK1uY$JH za8kjmIN}xis87s>LgQrxMWlvd%#Xu%{sXL0X)nzf~Ybbf5fZVVTc z3U-t{rLTUuBO;yG=DT*4D#f@5&VYJ#f>+$>1~jQet>9uFb(e!-whIu%JYHeuL;M)( zx`#GKm1fXuuUfBCCrgX$V~NyZBZq{?)*x9FcH(&H^^k#lG5F3eeI6qJcW1@0AH_Lj zX7I-*yNxM%nCh+_EWl?6O#ks9dGV3z1)q-*@@XvpbsYS!2az|_|HI9;`&S+(7CpRY zT)rA+0RI!kn=rUwwvrZXy1Y`kEG2$H`Rw=eNPXhM7M5$P=tq^a5_z~cpjWcq#g=VZ z!ML7C3`gaRr zER*l5u=3+ncj>W5A-5#GN;PufQqo^?JPpe4gGxs2$5Ix!4@CGg#hB*_7?mOSYDU#; zcb_>XB5F74n`KMxHD0tTJXy%WS29Y&k$U&$QYfg%>VR{ ze{8^1Y1&WlQ{+GX_pa>UK*m2r{uj*ni@2G$Qh}GI9%IOzSam) zSAI4=cusL!`#;Q0izoyRIQw(WbMA-C9uLnB@2@61vt4`;(02q>*z9ahle$%bc~jIq zeLn{5vuK-K;=Pc`&Y^IG)So=L6U7is97+3X!3|ddgS0B-0y8U%Gp1{dY**H#J-t{| zkEt)>h3~3iq$%1HM)V0z?<2oiqI9eDF^-t8(3%OL6{EGMLNhTe?{34d=lHUBZBV2P zxwaH)#Z1dos-szbTxUN_TA5Cw$yeO$){VX!ueHlFoCk1;8EkKZprEl}gIQ}31gh@~ z4$iIONukL{PdTw78OGtBc0!w>tA|pB3Fe6ciSe@wEVPohgh47Plk*6`?#J_X;;u)# z3%wKPl}uS8eU+kEA{+(a1^Mr1Rh5-o{!66)BaaoU6KSqb(%>%YmoJ3>EnX=(m|FZ@ z2z@H1_SkbM+~sO+MojZY5f&+f`mONgxSfIwv2sg+Y>+y+AM3+Si1mVBzT@KyL-BxI)g>P%i0XBXl;9KY`JqiH=3<5=umu%} zQ|+i-J^F+n_^-!?-y{_G-E3b8;AwY)0**wsZRNPUsCGyhv~P0|n&D`-V&Pv)Tv|_i zcn%q~W3Ct>l->xVA~M_w0T#D}e8Ia^+1O3nUw`zwO#mRp49`KVEPW(d18&DU+P-mJ=3Kni6x>dJJ464uP z6Buvr4X4O)Qw=N4D_N}j6-7QHS1}7iN+9mQKgCS{8*L9Afy{pY9xMxyOAJa^o?pNg z1aVcItEu=H(Tv5{bnv06F8Y2&YgYYYUJ@s7Zv3kV@f?ZcnCv%sVR02HA#2sNFiJg& zExjh9B+y@U2!;Y2>WG0&3?{|Rx@D?`Ct*WVdNF$pA#Jj_$scHhh15v?F_WwSA|09@ z(e-N;quu**%d{Ag(4S;^CCK=q+kERf(wue_&0f)!#h3>YnDMeXw(u`*(eXG>eune> zA?IxPIS=>a4){MMboAdlAQ2HlCDas|yl|bE(JbNPqm77H)i6UXBecBdv56&^!WyWh zz|B0~4hW8Y0iHN+p#X}J=uRzbVHVtl?b%rcD6sk@A_o*02eP06Q371Cqh?H2etmVx z0?@o7!%!g1bYgiT2~0hdqHW*bo2a3P($<`zc0bN&a3dF&0XsNTxpEN>_f~kpbiaLM z#cjjeSmwr(3Z;)V*I{y)mzF}kw0+ZL_Zwr$&}I2GHeXvH=w6;y27ww+XL+qTV0 za#!uW&pGe@?%m(Lx3%?SwfS>Cqt7v(G5YBJE7q!;j5$4``>0q(I^Akw%b^@@m~BXk zhZ8bqMXu1Oj@)M{H-tGDP2}|85gRnX5BOQqb z(S6t>uNr;sjz+qP)x>$)W1kE;aDegRVj^-a2K8$}y;ynxzZxQXyMb8@9I)zL=75n) zkDJ5;7+wJ^u&nZAjg@uOCo*nT4y*~$OCQW#0XKxJ>&kyD6`O05G>6zqf0bY=i8T`D z9Ac*C;wv$BzqD30%{;=1c|TYKGob5s>DOA%8X#^IuR zI61y!dP)yU>RD*8FeV$5Al+!pm=I+U_0WQ2qCnuF%w1eWv$U`f!LM^JatHPQg8=bi zxke@rCaLzP)Ab9KlAmaX02I{cPrk07&Sf?P7^qc!EVHAzDk5(PEvvp`Hc*$WYQM~( zwt`=b83OGbo2HZ{*|fhn&4+QemG>exs5JT434?7d>QmRdaV=f$SVCNKkPy_8Fs#Oa zg3h|M$T2&@FiY`;q3CoXm zB|K~qf|8Vjltl0$;PCO_+(!0#GJCA!ZgU9b_N2$3b|?OK1t8vY$%wmiNh$EM*M zmZd@2K8TwEjQfr^u!}5iE^+`v&l2=U-7WNZqI$5Y1#IFiX%#k2>AI z*kX!64{1gss+-{v_BUHt39WdBkYTx$+q9zXPsPE-L&(_rJIO>@n_%lxhQEy%5ga;U zKboo6br1$Cc$1(1`3P>x^j6TMcdeOsmC_u@$5)kpMqS9iAb5dajEQg5@l}wyoQNNf ze>pP4nbbbPcouXjVcwhLk3=EOASu!^^^EG6h?kCp7X;k85bdo6c*a8oYiTWpNU^af zEUQsST>|SBar(F>3p$&a)_ zF{NG0w+q!IA~kEsW{PYbjq_kJ=MEAVb|Z^xL%TGY%@#oUy=8(oUWPlppgtbHKF>X+ z9rj&s*)?Tl+x@KeoniUIQab<m zG$lQ)C|BJU{21=LMoGV+a>rbfUCbgjmZWBg3N)9a!&y|fUhF?^eO*jyR zpdpr?0iVs;8G}QM2M~;w574w?!p$i8n0HhdL2!YohX}pYs)ILcy*q( zgNd+4d)C$>q~vl44%f51&f4qw7tY=;H>PoMO*i5}=&1qSK+{zE8`3pZ2RNt_@am3k zbCp@p@Xs?phcN~w#I*^|<}4YuwIs8%+t!|DE9S#l1(p)Djh0cvls@QS7DO#X9>jS3 z+xx7>N9o+6k1?B0#J=MkI|gEVwB%{&DaO~OpfM>G`Ah1n+*miMX(TCyeagEbWXXi) zJ=_6!wJ-UK%t2=fa`UIt;ey>$uMPnX+<3&_(z7>ui7S80d6+RTMe`+WJe-IMdT|^4 zE*f?9r0o}!&>{20r}1a?EKygNq;qNT_Y!J}kJW``*~{?gHt39o8gkoRY(m2dpz;nHA^Uxpz@|WmWi)A@ zF_IMb z>`4PkNv9iL**c+|D2A^x?=LRam#P9LU%8Xwxdsc~>cubO6k{R~)n7x3t$%l$t$$y! zIQ-eB!b?)|%@++S%aZ0Zt+Oa9;V9j7PE3E*yE?OB&pB-uYLQnn$;s>iD>e!>YOhJ` zbO$WA05=lM1}%!462H~A_GtpIHNJgi4gD7zffy|ZG?hSK^J0_NKGM)Sd-J-$*PHPM zEgHx*q0H?_CH-IRPbSgZC(aIG$V$`xqB zCIbcz3p^s|FMs=g@-Ed+O~8W_0v_DI*-TY-v30hv0Z7~ZOUExVc0?ZUEARk&$9@8O zEt_J%xqsr2Np#PIz=iJ*?;wXM6Dv@!}sM^x$UShH)8$c!_h7-}mM6wCUmvlk+q4YL<4 zAz3@^Amfat^?Y;iG!^;%;dkI}*DS3d@qSr4gUH7;aMxT+#lj;E9E+773rlZOmMd~f zv&VkZY{YwA2_<)t-Ea+UsrO2XNm1Lq3kEd5vpO_NjTrCnxt*!nn6N~?L^WMwSHfK{ zcWqM(pLU~aZBd>$U#ekFzZxFgO==~4+=Gg}*|jV|?OGg_rIdC4t7b`;mNqY&^zj%P zfQkF_6>_bqCW3Xaa$GaDzD{cS&y^W5y};8{R5oI580e?+Pd$6)(KGBT2}O}CWEV@vF79ov|UXYfk>p zr~irROjWDD2~J>+3-Id}T?qxEkcoO1ZLF`UqpT2{8F@x5xGr(QwRxq9()<4N`D=u5 zUZ_wv5uk%j?_1>>veaC=)73c7(|9J|-P=_3_b->b;&|RE_jBf>(O)iMo_^pFZs&#w zpw$(kvD1xbFyHm^V^Hph$Mv+TOt9Sp1g{nNi>xH*TfZyCFL>*7?7?S^ zW~mcqnxqV4AHHET-K=Nf(pmCGA4C|NIusH%WdGcn0*M$hjzL zG-3Ly-2<1ztE!V<@2lhk>+9?TDO3en6m}g>jDCb= z6<@>KJh+nk)JKOfekFD zb~C8=%!X-~Klhma`)9f(?%6;YSb4qxXPy6M9Q{u;{%?%_CoB{Hmw%D^Vr6+Z=T`$2 z#b8&ail2i9?BxC=7IouDDHOMrYp}YrjrC{jf6#sXzCXZW%l-K)CDv-gg;_8DlzX)9 zbjdU2bmipf`~LBP){W?jQE;vJ6;<4j6NA6k*pMBE0sR79<4Cpy2 zJy0^|jyfP%HT`Y(fP>1^+V}1g)S?db_71ikGSVz@8nT3MV;J+YYO)?RIkR1BNG}=^ z$ETq9%s`WY#DsA4dU2?g1IMis8gYG)(G>}R&6h=-@b5KrOv}-Y} zHn#}(x%|$`91yj!leO?j!)-VWzk|C@pzp-d-N`JJ($=T5SEq*4eteCDJD7 zs6Ip&e+qV;An26-3w(IbfXy7w+*Si4XlUW>3}*cQB)Ly3Y`6bnYuiD|4wmYY8D`?o+E zbj2UUH&wc=L4TeKUBQYDYAge*!8Ju@BV+p$g;^rT@I?6W;*-RYSy1(sp%Sp{@5e1% zE&4zHWif&@k_aym=r7bu`2Qh7rS1Or(x{@H-GB9I8#TSW(HAg2Y+d75tRX>RK+sTi zF_@V`!XWUFBl)c@L1QDAh0A4KsYZ59-Bu)pr}Ho@7?jH!Ni}WC3vbjg0>QB$8d$$&c@9v^W| zrj~0yEPa=qM*NPbts?{c4`P~4`+Q8hZ}Pqg`9dQzIL?Lf;YrE4s#kJCY2i@UA; z!}dB41}oQ_YLGK<%aX;$lZpO^%-sft_mi)#J5r4x-L~XHi{%HpUD; z1VTstySAEHg~JjKOmKinlRL9*<4%zIa`BLnp>);-LPJo4J82nQ0@A^F#Nglwn}@w> zfg8Y;*z^>uNFKvQ258(&i-H91!M!gdmhVZeUpUZr0YbzOt7k)9l|I^bkI^>8@BXJ7 z>PQoueDXoug`w*0XxXfvly`wBM7)=UK@;mykJQJg2F5aXvFv3+#^t*%uEi|4NF};O zl#37n4;eCSzu9ckY=TO?zcxK*r!|Ql58ZjOj^i09iI09>r&ctl<;2$w`wj&-#`!!D zo@UIsIH^V9-N#*^(q=H3j;gbsSC``- zq^7FyFNX&&^>cq*RN)Kk;AZvot)BTYHKp4vqvZU1EWfpdkoBkVyWG@D6Z!|vMQbzl z;eTz;h;ek(UrxVnTsXUFVpi!bmfZET4@p+Z5UD3d34>aFn7{B(xR`&i=7WbAL_**b z6BZ7o@OJ!~b57lDJo33P64Z{LmQ5pA>a3_}LuqNzToSRKQFt z!A}FAYBgyBN2}#z6nl8I4<0`e>sL6ojId;20QugDNv7NiR=dK9&hu?71i;fN1F1}d zsf^RUU)FSb6SPmkfgfoh+OBBEeH?wQ|B`OK?1-+xOJ7herDkVzg8WMzK6DyW6nBfF zo?-lmkE;ahRo{9rOa)w?pOt|q1g9(Kff3x4T^(s-gl#T6E1!A%0)Z+|(8pG|*K_9Q zwSd;G2Jo7o+F)2{ZL4?#^HI3{Gt74{C#*UrG?jawN^?NGMbkjccR=m=E5~4AjvOhr zH8hkvy=8uo>b8m}hqM|V{As0j4drT@d9iG|vVm;e@R%jZyr(=V^8FYsr3~qhM#%x+ z>s0FxI>lX@1r4#1;LGaAVdoGiUT1~w`BvEh63SrYvSDOI3>o2U_C87RsB;^P@R}3_ zPoaB~eF9A74$lU%?K!m_^wh9nq-az6ei@5%f!)dE0RR zdMs$g?HjoY4HhuH*q`Ai@!=7`3AZB2zJzIZSB;f3|C(-3M=DvU5)9+ce-Wy=K3C*2 zj20{A#Jqhs{YApNS8_nWn=uJ$*CxweiYj0y5WXZU1ao241yYLQX$Zl6Qn0^4Vb-g~ zIR5PclK~~7m@vI!*AwStt1ptaY7j}aB7UN=PKzmB{aLkP_^`bt>Ogna8ct*+3+HfV zmwltUdRt6ZU95^$0=k)2;@csdLoV&DSXWqjtrAAQm|vJq3yDNHOBlC2#;?4RsYIRH z%gDKE8bPMY6O7ZRWreb#;$I>=Eyby6*i(g<*|M{_BH;}#mB1gnPo;COws?e^crCTk z05?mJhCrYll;3E6DJDP>Ux9zW-k3sKFqgNJn_%vf&ZLUcE^@cdqhaz@GnH95QegT? zCcAe!5EiVvzClV1DOJIf_v>mL1XCR$ZKc3^6CELE^1hic@6NqGIzD}9p-zfQeSyMz zq>U__Z@>+N`s&yPbdCs2UtxIs4WrD`0GK;nE9!!df8bDAg6N#rj43l{64reE;x`|^ z__yI2!iqXS!EE$0uISRyJo7JG#TC+l`XOk95^u`CukTH zJ62@Bmww0hiHE1!_HGG1FkZnN;RQ2LFfqxdvVzM$g8PcaLjUI9m*(^hwn1YheK1R z*JQ0hv@(XULPx)1lVrWB!&B2a(XfvC zzX@@KXksg={=!Htw_$H1$r)^mNU#>GQj65*`qgF>Z!vO5fTDS~KyTP)^%u%IQEKwi zuRn2&5urr{~@O0G)XsqMKYf9%Sr_Qny z^uzR9LAwIYd!kC2YCG$Zag>sRgC6bn{8X2ek`%#No!B~=3!&qdh0In&l&j8Egh^E` zy!r>!^5{())ht_nd7RNTj^4=5;2cG%&S+m2j zi_ucN01*K+5&jkhZ80o$DYg4%;1D$qBd8UTs2!#Hh_X^6OgvZciK_4;Z8T_X+nZLmvVcau^&`-Mi?B4KRn^e9U$sT z4nQouK=RXTH`1B#wO)E-Q<)se?-svqVazfTbvBHTvd6Ef?VH|)tn$aLG(eI8P5=kD zsE@2q^S5EP8QQgI#ef`NPP?y&0x4ol3zMc8TLqFTGct`%nwNUUGVh#)Rd~9m3C^Xm z`p!Ox%GCtArza=MZpx>k*#Ywp0W$9*ZSxxmZAU4EuoCw!)`j!9&|m-xLAlN)%wzz) zpizjj9H`8Eogkg^&ln<7u$uJ6?2SE9BQE71pDE}47ZF;6)Q>V3oV~5TeB@O&^5fnp z*C~|T(m*QYKT`*qw=H=X1utNnQvx!%MGeTHgFLT#v9y*(a<5(U3<-m^I%_oLkl9L~ zRW-XgGv#Xg-*G5=GW9#2DLqJF%P5*j{d)G^y4X zQZb@2Odq8pi*0k|I7*BwSvHTkrh9)yfUnr)bk$8A2nZSJ_UuX6y*i*=VY-L8U$cMT zG?Qw~rkkn@@<628^BC`k+kYnY=z?U@r&>x5@nwpCvTBLFLpZ-8zR(kEfV!q6q(tNo zZ5}xxc*E#IRzVndK=xU}l_JZx@Llz}XuhNq{ZXdP(h*RcaV|sGv|GWLF;QHSb!CE* zy`ARSDp8%)H{cPUC45-$ItpbyHaJ7MtvC4O)o|()(F-+yDC&7kCfiRHiyF4y+-WjcqGD~oABj2QxpYhnZnoleet{G zjbAH#%tO#^!xqx-6My)nLbHp+Je1Ow!srQPJ<^}s3%OQ3bc#Co*;cip$8ARWn(@A$ zO7C{t_r-~NNuoBFj3xFr@y>@P0e;YA74Tk?YLY{{VqU#_iAi1dHyHYr+O|TcL(FR8>0uZ z5f*ORt;3H^e_MB=ek~n%d^yNj5BLdQK6>9kBE0vD&wsE_{C!U3{7aT60q~X^zVJ{m_JZQm6>xungx}aTZsJPDBGBYbvWAB<_n}ASe2C8B$f3|i3Ela9ATnVeJWb=bRTeBWA zalvs~>46K#%zaW*IfY|dsvJFclc6b09&I@vP5rjHX;77Od8uYAr8;HpEgb^O1R!i`j3y zQ%g=n0Q?pz+5W>fqll{h#Q&( zn!E*VU$jSDQ7DX^*1Wj|EZn!UF zSl7kO((fYyqfgK2BYBdGwU0^F`bU_9?lSjt(=Cwkw07((?s~YmU%5jx>0-QG!@1F;Oe}o0Ue?9){VO4*X?*ffv5$0RY;IQ43 zU80MUzr&{L6VQ)JQ+7?>jmo1vffh@VYmT50NG7=a0%afkTzaqeG{aj98IUoa|NH+m zzWyR^7*#q|1~{M4{P+AjAmVJD{)%$qmr|1?&&?-nqY+@GEO(D%;Vx#qTa5L6B|KRl){- zZZpOBUIDFo#oQVsY&ghD`6t^TxP27M5g)U}Bp*mZkmR3j-y|ihP`Clk2E8cF+3zlDRT$ixO@?=}wR+XsQ16?3-a`L;gmpDN>HmSUIIW@Sv0iuhwk)=S4&~ zJ$|R!ZF(GLu6+OPy9w$ZPzidgX{=?oVzA<@=&Fd4f~1xM_?_>ssN>|}N>4lqVH#Bd zLuQc|bv{JftRYlFbpG3yD&{jMG71rbb>SjP_;L7$I;fMJ&3y6lZ{@E#`VXb`(+z}% z6g?JMob^fy!JrRC2yIpPQq{$}-|zi<82vJ+_F^!xs6b1EcIXF_%AT?` z%~!au=SvUMlZaYIttioz`r*gel2FIOy4Qw;BD%xd#jt8a8+@BXU>)z9K^%K}(8}i6 z(?vkEa8=Zi0&ojw3%If0UwJ~tWGoPDx!zm`k{PBlc+@( zuesu|$XNS$Y3RTh+d*=r_5P34_?MVz^1bi@8xkm8g!1JJ-TxdBe@PMl8U&RZI&Nrc z=pRMWshp{pQnRGgny?~iNfI6Qbm-qeq+uAE3V{<73tqWmayEb|DEdJWXc#R0!@&1# zx~jsU)y94*y|Lfh;bTy~{?Vs`W|Z*-sRk__85#9cJa2E053)brj)tO z!eo>e8>C{*Gm>OizvGN-b{!SsCO(X?j4Dh<8IU6Kt=}0e;Y%7bjgf>H@6QF<3EJ^G zAC3^741g$Q2&75CbWV5*k^#dcgh97;w`)@k_Lqx&u?(!5QB`=g$q1UD($m$EeiD%0 zpFfL!=&> zcn86s-ySSL^-@(i0L;mm$`MK)w4!l}$_lYQtXk4mlE=-aYIO!@WD7(Z{xH@G{qw{ z4>UYxvv%P}S|D$Pcr2%@ z$TqYFh_6x*g$Qz*XKB^>_o!1kRAU(W9-Bl z2%VMNyg&$Dqq>5SC-RQyF;LEhpVS7TEMrhkspBwu3)I()c_DPDRl8|FA9s7eA!r%Q zUL%|t%gvL#Qo?etyP8W))8jImG20-lb>#!S4qEtq73DM4ifD|%D`;GjC1&&Mcw{6~ zT$U)jehptG>yWnvd5F zDM7-qOzTK@h_5KE5JLXy;pv%5MF0`u9-i!T9?nt>c(8B@`hC!pznVWrH=l3vzCXlb zT~G~N+V`*O<4tQ0fDNqXB?K2i_ zn_r6~i2FS#TX2G+!{1iBaTN=his#vy?*dx)qC9FB+v+ z<`F@|u0B6`Lcj0KIiBR)9@*65qIfGLvgQJ6K3Zcp@HrJMa4wh_ku>5*6h{43nrmA$P9f}db4~afU8#szp*ctd5sHNOnES!;u%+U9+A_{i-Ku`~GqCrd& znCG<6G)j>Pj|%UWKs7WOO@UZR>S+5oc`F3w)+pSS5ANu%UCHF5!FJ`40@T1@#d>=zc-Ai0)sazB7e@4T(F?2u|BW8$ed1QAHFRV}N~yKD(l=PL9mfv3j0GYZ(r*0dwBZMY1It&bW+8t!AlxZf zjE~>q0Bx2aR_HD?y?ZZWYxqx`#?-Tlz3Cn?FQ_A&XKi-yu(?f3C?KP|?nQH5#1j49 z1{HVv3^gFx~RsukGG zBr!HtRK-;9Zm?!lH&blf5_ksG)>zz`S(%z~>xJ>&F@*2z5;(%c)4a~!YaFn_Z(hBXpso13~pvXnoQmq)=0hvfmt< zf7vbt-)#f8q6fA5``u>*3YBM=JQpOYT)mW1PM#cKnkVMQ#Hq67#Ex+H_~*?jt3@P^OPSICgR zu`JM_8Q7`DpuL9oMV2MK)|~LIy`c1@kv)}lxc~e^eemxM#;~t)e+Bq*27xc>-{?C2 zN8J9`OHy(Hxcn~-*rVd)j^awzS$&M{84<7+aU{ zjR7!0qFIW0_enmr$@yp4jv?6e>oB0%SfNE`ST-y}2n80;cC_tl;_A&JJ zvTrd+8x(EO%A#{e=WN&ErFpdMAgLPhRS(S+X2DCqaF!=wZb4tb8qqq9#E)5+%R_=& z1XAn}`;p#$-6_yvDn4q)0r%;P**~s%f2&6kiL%BLAh4f-Cf@&Mv+kcSNyE|5-u{1Y z8pdBJ%qgM|X7fEtoO~Ht6B9@GtL$(lE*TEb5&6UH7vdN5TzFntXH zPZsK%aI>xqWviA9DDHd`oVr@ia=MDn{{9;zr7tM(hb$Fye1A?Lv?<>VR3jYNa5ytn zZwK@fg*NaX6FnRZ{WqZLaUhg5Kl+p;>GY^FXf&FpsgxQ=u+Amj$K)PIvSEh-A3&|# z@)bpcE~v1yO|Nu0$#WZ<5y@mfyc#=cr;5d--iF8V4SvSVHI(fh9X{O7E0NSq-%32* zbQVHqY20UmrJp+`qD@jF(y)!-k6*r8`FgI8OLFU5hN;vsZqopB#so%G<5QZYgT_Yv zL-YCubt1lgd2Cz!yiV2`PlBC);O94&T(WqvqU8Oh(f!lUNG^`@ic9q9R>skI%84lB z1Q;VuOTL$E_~eBPinq(`Na?o6tTSx&R-U7QishxO3m)DrR=GNH51|Zz$EA?#N4dk=zFG-_H8X=g+mT}B6S>e2H*80PNWTOwWA)?6KcB#qOU$r5jruYZOZzTh)Gq>=cmIaf^B+;K3Z!4!xj6sJ{kbw>O&J*XL7xaVDu3DJ z?%)vg4eSZ^33IYDI2lpAir2m6ip>=A=>~{|gyD3i9e>qdhZ^G)rQ~v5QYI@Z&O0AJ z-?hp|u7reqL57VvexaN4?DLE}q#V_B8^-pb1_L1IVD+Kz*JfuMSN$5SH`pKDnjreE z{JQcimZMC$p|mFwZweC!Ru2A%=o8l#;kDJ1)Fd+cEOuFzogH&DnlD<`?Z$}oe6O^S zP}^{#-zd@7r6JNfk;5 z$Y`Te?s@F{2d2ai(P?W(pV7W5vHbMW*b03X)xg~A#h@SH3M7$pZm2szfvNic?&ucN znFTHXOCmobAVOS|Yj(?$QSmCWQroA?+^Twut@e&$={z2?K{Uo6d1V}R;Llm2+d9757P7UU2OVzO&E?0doFA-mn2 z0SEoghv>DYoA|paPwt{W)B=*B-H#H)LT=ad3ussi@v{v7^q2D-(Z>%ZVzOlbC4z8R zNzjm8f!desV6Y~3mEwDoU(Di-R;&vuX6f6d>vS|mU+)Z7QiWajjF#dj%EV7!UBj#1 z%I*)ICQE$(A%Oh1rwiT>;y+Q514IQl zJp(eb*kpi;J!CPhD4f1K#%NQ&G}?I29Gu4l2Al5RLS}U3LRg+G%TB?je0&3`H_}QdxPBY0`qf z)tTwbR^qOojnBd(d@Fks9rBpgoAS20KG%Uxbsg0O=@#1Yqz=VxK`h%VA^qSw*ak|< zn2xK~0%GH=pDJBjX(>gya;@w`XJvbx`2qY`>>I?%wFhTc;(o5346()7RF<>u>~Ulh zV~(nMw)!K(A2&olcj-I)wiPrF@tb<^w^ANlmEK|zg2Jrv1lim{+<6@s z%``I=JVO4h|lt&$^aY<%A$1Gw=-tK6yV;wV8LX5l@G#+KUJ_mUjifj$OP@# zzu3+sCZeUDvlu;e;PkVc-FcR)ap;q7XDXqRjH+bE6%8jCxd+_R3DP_|d zSTbL8ZbXg6T-dT@s`(~Srb$TO1IJN-j&>U{?Xdu)b~dLgfuk%h*Ta8#Q-L9Bg z6=~F_vE7!CI2Eay*}aaKG}f}lXl`-|vI%mk*cDO74=r^1L8R#LPLXUMNo%=DSp>nZ z*sO9Vue!lux9RyWuf*F9Ta8HCX1z0f?Saxug-j2OqIJ4FM$C`?oViX2C*kikbEu~n zb{^<7?4ZG3lDZ?u{P4K1s*l*Gk;1KQSU7ab76e1iV(c^ zpjV|KyQjqS8;bFP8FcsIGJO-tbf4JO?1Y)js;8N8Y3`V#doSkSoQ?tPi<%&!O*nz0 zC{qCTz176qX0n!F{D&ou^S?BYvQ*`d9Wc%s4Ge<}5Asfa9D4-&-H%k6T@lPIuwIjO zm4&ALVpunJzyG{s-IqKvc6X=P0?(3B=WLAY!qZ8`dm(j>b#E8wc}2^YYUYOvvhakB z#c7zF-HH)LlwOUGb%1n7&Gz$dL{b-!+--^8H^U~{JN59KQiiBO1QQpr+s4hT0D*+l zEJ)npwN-TKH=CmwEiB76@O~DJiwY?G-CauJPJBi=kYsN~eHy4uX|7lH-aXEvJoL+% z=wI(o{=gaWfU;# za)2M^|KE`(Zu@_aJT*NPbV+o9PQI~T)-ZZ={}^uQ(>iE6N)|{EFj$lqObiwlUOx7* zlocG-wKK`h-QUb=UHiY5G%-j9%To9YX?@9F2v~~EmaZ{`XvJsdeC)R7j@CPy-=cr} zen1Elh0);m3!?_m^fN04W5S>8SfgeaDF*C#Mr`Y%9$B%Zi4Y9NJy&32DKoPa>Fs&8 zXhy@xo*I4|l07q&;4^)&mPbmycN7dAlh&2qE&HjJP#C8d;{?!30*#DszTk7Ms?cS^ zN}YtBpJ<2JQa_0KAme)n!w4{Y`?W6n(d0azN-{WqG&VNINy`Rmt)%fpvOUu zJmq|&yQf8Poo)u>}Ns}icgI63`<{B;Q1|iqy&m+<71OVKWNEh(bV#>WCfOu5Pxm+S1F=q>p=4}*7Fgvy#)G#{ zu#f&P@bY<&qHq`WGxafr?d~SbRpq3U4Rt^A6}d65eT(Or;upa4<83t@7sdQ7)sb9o zdo>%|CXkP3S^?SSu&X38X|{=dqn^@s49?4HSI=Y1#DH_rv4A9!llbJV{RDS(bq5o& zgZ#O|n}OF*d&KSP$l=Fr*8On~DfWnC6pj}nBd=+tsN2;?YZ#m4O$5wRb#A zLejN&I?JM{YO)vPNZhAqmHEM%njnEG#pV=wo1Bjq+_Z-B_TACJ<`eGGBZOn|xIeNP z!f4-zT)n%E6=q&p1bZytbA|HEZp`CD&kiaVAUbgN)^RJS z8LN87c2JpT?r+f_+T(CJ{BoO6+hh&D_OSU8iPzHicC6#+3}m@dKa8spmFEO|ydV=P z-N`KmOvY42LoL8Jq8^~vkE|AcvYWAF%?zC%<~g24V`shmW0CQ1mTfl$>{n>ah)2Mhh# zE7{s8umQKE^ZgUsev~C}#J2?$9`WMJ{`A-t{i3lw!fd)n2|?F)ENli{-g})}4vtd6 zI52U{D2}uk$fHMBbcTK(q!gypU!&gx34!;^y&T&Xt?$C9MHD{0KL@5-kc2`+Du@N9 zE#*F(gu(!--vGLNLd31uzrjBu(BN*hsp((?_0UDrm9`4d*#DDo3cCnXK{GW1Fqqt0 zWpjU^>r>d(8Hy+aKWCr(?OM`mNjgTSqJYuRG1&05+%Z*fLqGqau$1Pi236ZL(-hB0 zUvc#E=Z=)7vwX!5vWrtrQOXCg4vOTFLXM^7yy_-~rEi)i>Utfu;?D(C-l4}H7;Ckny%i3Qst(QeEl_+Ge3MlqGce~k80|GW1BB;l4or`wEl5x0?q86? zc1SSZZfe57mgx*{?5oAC$Y3Tit|Y8CCRR-Q8=X8&&VJfT+?GnfQyFR7lJ_u90Pgoi zV&BMU2{Go0&}NeI<;3ZF5tbTP`)8HwrUw`6)_Dgr@*lNa zUTbI~#_MQMbczuttcg3f6Q-`&%ZK+5@hftLxJU+W)6@CeBSHbl0Zm(WN#@tr7Hplf z?onYgKqbUl4vvzR$T~DA1+Y@1fzN$%-A#OKAN^XqVg2)<@b_^D_MOr{@{31wq$v*N zu?(lA-iy#0=>fQAXHtthRxB`bt*q7@D3F1~BwL)tHLXwJ!W2imH0*;eHN;T(;)@|s z3d@+q(CPhhe!|sc3?FY1ORd3fg(*%FZ~_9M*r@`cSf~=ZgcX9i;@1V8_rV$E3XW!F z39@zYg+%j|@}Txag^N$eB22yK4+t_NrZF#b7d`^>h7kf1X4xU0;c6Kw?V$RPt-u1z z1C>f}yLsfbvAnTN$r&wu9{UCqoTLs5c_Cvz!N1-Hv2z2C5*+p53 zg$TkO9ES|iEf}>X`vbxcq-5w&A7raSxl1 ze8VjP5N~mZ47>cub;Z(p1<&J$t=b;Ea0LNzg+AbR z#RlBQOV7QbT<=M8C(z%5>L|S9PXB?`@fc)%jl8kx%)ss{M?o^NQw8 z_nA8g7Jyo`8+)|(O!?f>jI3U>1)Scu(h)vF)vogA5@iw3aR4~eqKBE~)+hV&$yZys|#9Dv^=Qma!wGce}^btKEXc>XsX>rC^IN$!e zPU%mz9&d`h$H?5L-WQEUrV4PChRJSbpF>`pUtX^moXUU-pg|e9O;)q(r%g?VlI0NO$JQ;T}4{ddn2vsulwe*MmsOWx1NqQE^yaU${W$C8;O z91n-AAJ~-+id=(2mR{NaePRFIa0*t8x%n79M1m<#Qp_c!oMGme=DS9#0=Si zHf;vK{bV!XA(&d0PL47bcyBo~(GebegeC`9@9JB~FV zbJ)zbGpc{qoN)vcKu)x6A?Gx z4}^FR=R|@X@T9xMQo{jzYcz1a`W7Zq&NGP{#Eoeu@0T;N7(&!h#$!yp^E$m*SE}F> zu!~nX$jpdm;CD9tn8ho^iZ^~wFfbfdtH)I40C~cQ&NOe-&n1${|G9Y14T2)FThB?E zIj91`cA@&S_NY02n)Jq`8@>LY>T18QG!U_rZ?hmT$On1Ee_AR3m%_ncyx}j(s1@?R zJ5jK&N-LG~Q~~50*?PElt9iD9h%|T5{VbJjr*gxm8aIFRp#ta7J+BNt1H2!72L}#T zxWD2SVF8(8p#z1wXKU#CP|n4hjz1ZzCua4s6x0PbSk9*4=t`_49``4C#ShfNZ_EBP zV7|wWZ&-H+8h76WnWE)3j(z!=!4dN&zv^Dt8^q_8sHypBYx}@AW0848@a06+bRjYN zZOC0+fCr~AWZtLAz^ZjGjK%*gDbsEjx2>`M6VS_P# z0P?@q6jqsem7m3)R4#E#JY)Kz$lAz&2#60kk#!5mC?vyJ1a@YwX1;aoof&Hv{2jg@ zCcB4GsWfW0JJ4?5sq$Q2O%4zfzy%)}XU+#n-S{)Wmf6}qG5QU}`Gy)He(yf6+Vn&x-W z*^fRk_F3u2WgAD5H(!cX+w-`IM)K88#%jqg%#BHUrZ>u>{4AuO-w2*qr3J+aQbZ7; zdk?m;ddu;D8q1Lj@#b^Im3C79^yj;rf@KV(5DASSq%)ur`D%HhUD!n<&o4Z7j#>oL z7JFIGk#G!F3*+J$YVeq-R&%uvW21|bp&Uuq2a_16S@I?zbcKK z6)*8wbfrkvmNyUhI{lANI{@fKt%WZc+pz3kt|OVwkC-y}6CE7N20OoT=kspw;8#YY zDU>UjY?J>x1OJY>pE5FUyr5!T^Z(VAnEv^Ms8G?iM;1hV4Hj+up=J@Gb)i)PRk~$Rghho-4-?T4qpOHxgETPdsxjZO_gY>3z_s#67sRys$G6 z4US`|*S@4}$y(lBhJ7T5-SY{~hc_H-(4ovExLxV?8(YEGMAshPTIajhtr+Ofrwe%x ztEN2MEG3v&k#bEvU-Cmn9nhB7!PK5gGp+d25$mda7WVl|f=zr;8+m^|&Zq(EQ1wrH zXDN&OMWhfyO8>BBiZ1f)H*!qlW1Rchmku*;cW>Cer}z-%1{KoYhj8co z++e23=uRJvG<2Ge#8i`>u?<(8W?^^jjZWCsRfZr;f|spUpla&>WU~pu%V4>j3LELI zHv#jRkH880^LGgd9}k-71tQ56iO-Vl3Ys_ps-?mtR42li2~`rlL|$OhBFejPunM28 zL>=EBRKtGH!Egv`XPGyiM((Xkn2_-9N+FQ&!42d9ejblnHG-nm?MLJ4lyZ!HV124o zW{j&A`)Cd9@l8ga|1n142;M}bja1ewpcE{3kc;_erRU!+`8@{+y#~r|uAoYu)PHp; z5c_9#`)4K3ObWuVKlHnYyA*3YLE_rH>)fbr8bU5)oe7;yHp8kiu7Xt)7G_TTF1E2srtO*pXg+AD1=*bFcc) zekwc?f<|zTqV^^psj$S-k`RvMTH*WgBs1%KIHinuv!Dil*e?>6CS!uF45YE5YRrOd`rg0q#K;k{aX0r?WxQspuzdUz%`slv|s3RKLto(Lpv)7ys1SDGx16HcY4!1n>OYUE^s0MTcM z!xcI>5mo9Do+V+LXbyTCefC2Il`0GY*sNZ&4O`l5!Va7@cSh7c$KbrzG5RG|qFPSr6wxTl(xf&#P8z5mJmK;Lf|!ynjFANPM@>el|_S z^Na!G`coZ0 zL}JM3q=HP|^;^{)J<#Z9c?Z!a&;^}$cMWRu-*W;uR7ERnp2&f^S+9i{^N9&CMg{IUL>5bG=vBpt$bqS|Du|3E zPxfuFXR+T|+Un-4ypi;OY>?%45%7JT*m5d1Zv<~a_3tI%WUKKQktUU$8-&~)*x zaz_uFQb%g*qu8D!r|gIZeZPY!ow}C;GB|i@08t?F<>zXiB*XgcN7gk9OtXv=;2w7C z5hw8A*wnrSkGUt>^%3W4u@?LuO^4XaaK%hvq;llNV+D$WQut^SVB5Q>&rool(x4+f z%1DC-ukM&Wi$V<>=3$?}Y(kYdf+18i{DL7iM>ZkQ#|-DycS&pMmJY4hbO_dB-a*bY zQ%B$!xA|&Bft^Fa4VRWvE8nHnTL==O+Ypk{sv|!7VdMO`n~DeF66`Kms*2ZIsJJ6d zh#)EFXS!yA?MXqS9i{Ve@O*yF%Eppl1BYkDbun0b2BYj`6_wekHf}`kFfF|v$)Y%7 zT@qS(D#{Se@}^Y5L8_0$8C;o4b_ouR-9fs~c2$vK>~i-F4Y2jfAIDn^(akP1#p)l> z2D?0U5LlSo-#4R+#;==vsB}GwkMY8*b~$Jnsy5rK&cXpVL3SIceVHLj?p;(d(K1v) zrf-_eH(Y!%LLzQ*+&pK7$(c_!IKv&2i$N0ne#wCgZm>`sKUJu4KRcE$TFY%gl=lc#-sT+f(auCKXXZ9qi2xBqi%(vd5S_d`-W@a(FV@Jv3YdAP)>|@RNFYLwZEyZXjQAN7mdUc{FB}3ulrr9(>6x zri(6^Ucb}HSB52?QvFFHy&o|Zs41i$E=N zpL^PrA^{iI&S7AN!)y(FypcIBXdlcQe%9QzEixTkiQ$cD=L%=1lkWld!Rh^ge&;L1 zCTgtS!fI}^F4 zpn^>_y$PEN=;L}rDC$ycQ3z6i)rUkD&&C8SE!_sk#$=QWj$8{APiss1GeqNt{i*br zx&f#Kus1mae*8JpPQRGVuvF@RN%RRUqy8cCpoVNxmvzgiG)h_h+46LqBz9g`wt+5A z*DfYoX47L7vGBPH%>g=&JLisUlx@|*nM!FS%9L;wmVzafrHfPx+?>MtbJRLOhrpXK zrz}^aF|?Wm8r`r|KwhD#4v~6;wU)?2q>KrhhibmbrjI0f2Kv0HI0ubmGs*TMC-1i1 zP?=;~C?NCc)b$*=&=w>$>PhgCpu*K6^8K=Ip)@DAHP@ZVlDn~hUaZ@?b9wycm#g_H zULg(&Bvl?DAsQY>ZZ67zDo^-;N^gt;L!J>j1{Aqf;iM)zJ{6r2a8h5GhHN@DZ-qNl zI_(+;9jeAh-|MJVJ<(Ma=oh66o}@x!B#GTtzRlV$hgrV09XUj4Ws=`Y8yR*&saW0O zo_~ynX1^)h{rE;N2yKoJ!I(rQ}$V60sM0w`zqQ--}yj(>p?)&N zq9AIsDC&wN3tal?PzYgjJYUz*eA+RPJFc-{Pwysy_ATr~Wg$}gw?Yn52(~GiWatg? zYgv(=`|Q;pU6&UMb~``dM9y0C_^zI|I#;G2jD(E#&2j38oVPk!{7HwaFa3*5-NOyvavvan8Vt82tPhm zII>4FQubUMud~Z5YwHk}*kZ#Rnux`6$*sEI#{g_ezp6UUG2!%PyWPr{X ziu4G!;~>=6qQWwxs+&}^N~BB@O7phZq+7guPBtySa@i6;#~3W3XQ zXUxta8CUP{)S$WCJ2uT?nVK)1{?>7S!!jKy@BuZ)>!NL><&E>+=Kk2Jljk1y>ll+# zI27MSkiRsx-tG3Mhg?YeyIf3tC1SsWE^U<;Q-n-~TlMw_s#m>+SHYeIoo%gqq%*R_b-hZux6e}Z3^D9=TZ{HnKp9-NQnr4KDszByZ+KJNUl;y%w+ zRId{evJVAR>z9O|FSovil8L6GdO1Wit0;CGI+O`1XW=VFE6KK}KCn#9E#T5)=Z}&* znP!q*tIBhRjex1r+!)5FxK9yB%$E11(p=^0>R54irnb(Kvx8d5Nqec~3MlHOnP17u zSTxT>t5%C47AUx^d27dsPbiT~RQe#YJ{CyDY)dqdvBK<$$y%{4kzBfwq8y2EAXx)L zP>_h1QX~<~rz?be82#6=qi}}$H7F98lr)voBGXGgEq_y?VllWO%d^Z|Y34W1pFV8} z6)U_u9y`l-Es%LTgq}D(h`=KFcOTm4z7%#kExM8bzQh`WsCDmf^<{n3qZYq@%gxw1Ii7 zQobl|DletpA<03h!osRi{o!;zX8h&X;syas5z0_pKm}*~Q(jGdOu1@#h&4H2zPLlH zTr~d#ND2WFNwUPARAltB0-S41@v(ANXTg`^1_;x)YCpW6&U#9S1j1p z-^Gh6M;-58_ByccVv4951}N@3&54kSxOa{ZPx_5x?WuL?Ou`%m0WJkdVQ$ZaG+~t6ib$h#qMx7!|%6&|O?G zB<+lSbxS7JdvdD&XHr1r;IjFK0h5CeCpar3QbaNeYuZc+QzIdCc&>_}eJ}KOfGxOx z-8LWe?0YI&8XOj zhVfI?1AEXGDO>wEpI1F%J`W|^Amen#F5)xJMrrFvQl(*{JG}#Dm4Tf!=h0F20~y~G zSl#p><97VyLaVXcgk1aSnH5|%Q>+E;D&~-&Am{;&2ox9@O_EgXeNFBd$1@wS8ULXm zIgvE}h8~kS9GH+6v$1{I_?~YGEfP-5juVw%M#o(usK+?g;|583AW{nEXoj3naT26w zVt!Nu4fL&+CFk< zAVm3=Vy>2$5ik$#+@kAxqC^kZ$|BWcL&!;`keE>!i+e6ClSF2TUfwRmCliwVjTBK5 zv*h8)*?_@$6$${>lag0s!GXy5B%Gqbg=i)rPtJVoS{FlI*FVXil-Jq=62{< zRBu%)p465v`=yvZ1{xr2wWC0SEP;USk6M@dRg?j7uQJDd5ek+x7D8P3N+2K$=YxNL zK9UrQ1y2{NCW`P?s3c|Tda-ny^P&5D`s0UkD!uNA_(L=s$$^jp+#RCKg#zij z<7ryn*3-C`R$PU`+RyU)pPP?Ll~71i^mNbFmS<>vT5(k>gjwKQ#Mem{eKbo)yOR@0 z(Y~gzA+n%(>v<61!jQ+2SDv^l!JjyaoX?Tuk0?T4FSGZgP#nzJ!X^u0?@@VUBb>Oy zAGm~t=1BAFsZHY~R~#0ZgSWy;sSI%F&F&@x@1t8+$r-ycPcRlxe-PsNeHAyeW>+Q} zZ!j40&{xtJbQaQb!BXq5&L$N>nmI>)&kZ{Vp1cS_l_1@wk9KoRnX&X!&dHG`mD%IG zUb~16D@S@mg~CeDyArYeggD1gEj1nSqdCE0k%o1m;%HjtG;=7^UiAhnI>YfkA5D4N zsdli>FQqBBtDAx1#4z(prSmDXPR%_J?4{@ntg>9DH@XYjLsjeG-g7Q~rm={^P&&9|= z(09!buewd!5J~3oii@mR87uxa1%@o1NM9JrK!;X(iUWIzEjGg6L6u8ci&m+%`(18fRj z`&3(njo9T6)-{wpqK88xinhkqoSC6 zh%_7J!?x6!^nyZrx`zu&KvL@VpxyY+pxMzf-Mz%!+x@D;=1+QV^;;OhRHwN88LLDS z7VfT8bewAm6Hgl*wI%nKEsvSHPyQz&6j+ND==+HZJ!4f9 zK-&_g2}<4o)w~9ReiKc{NY@7D7Xx}7Pcn!lL2>FuaoU4=hm=SNyG^k!ChdXwlJ6RM zvz_lwFn)!pmz~irRMspa?UB%U#cDO@=7D+qrEv;=ylbSv{-Cmoix&T@lR?!E-h=M- z54{2IC93WV&gIg6e8>LvKE4sYjWIqwfNh1XK~$=)8rm~Sechr0;FHEpex90LTz8d5PDOGnJ)#GNe zhR#vcQDM~SwnI65wjs!hAoxcZI^WUN4lq9eKO1}156DZw$`_PIUEE)Yes8l&JbhYI z8d&9A5msH8R_XI%MXKD;d8_HKeBJK+hCJbDB&^V;1t0$SnBr3q^k*&g-}By4e&|$j zLQ2CA)Kk@ULQjiuDBK5)zPALZEYZ414Yx!`vm5ZUF#zhr)DE~t#ZV+3kHJ+mBh$tX#;CQv0MTypwF3aOJf5!|zwi8vQEuZB3cyIaYx3h`G zq$55tD>lDey-?mX6$(F-O%p>_6S)V8X~q}Yf;!FvV~W&Qi%Za@<&DY;_zveMNfkA7 z!zWhvi(`Rb5GL!#cMjoeuW%N=A!E!ua<4=#$HZYz;B0@(6OXxmwxln*-&j`Cn5#BtePs_-T*BlOh0c5ciVDj6M72k@QD*-NOs@booI_4VeGQ~o9 z1u$?fbi?UFcD1dzV>+_QKQN%KB5Zt8a-U&*7S(G2eCFVKdBMJGS6r*zhPlK4_=HDi zktL%g3wQAe7cYGkCx16$SDR~e3U$SLy}tdkO=j=5q;)U;+cSK(%CT2bNe8R3MZv3F z@iBgBsTZo7s%lhTPL){yq**1$w+5F~{kBlo0+nU(`UMXeRojbeYvV%&Pnr84H0W!Q z+bF}c0=Mrw1fjj-()}XdU#^TAcLYj#u8lqL)%g>A8AoW@KhU`)zS$nQ+2$8SrMzyn#&A z5_s%jmr4}5qui*IHs&JGU;hNZa;VE1zohAmF6^>UY?Mc3rCMdV)^;NGOxoy@L~-IO zui+L}YuB{l(;+v~xmcA^RmvApNkkDrn|ExJtDw3j`#H_RWer&BV(@;1eUog!R0+_> z1UZrqTV_HxKg2UT=xq~gWD|ot8-^phEm-6}HRj=y?v{Mroz713DLOHz@JHJaNNC`j zoikSjZR3xGX)|1ZbI`}ezs+)Fco_BW{mgJ}LZifY?iDrHSSJ;ll2l~Gm6SXh(*bX; zm+zt$?2wev=SDYb@?uS*FrhSLE57j~N;FA0)q;uUq}oe~rlM_3t?yjUMU~(2iZ7Um z&f-t2O!cq>Gx@YeG4EtQ71eT44?d*BdEY^l7VIjTG}1IE%kaiV8IzlsJGF1@H#?I7 z7h-m#9NYbIA%`4p(Ks8YM{I!f+a75T zdg_T;yyaF=ugnm_H+!4#5gFcWHa>90WfIM$V>8EV&O(U!QEadM=WY3&!x;_TXUd5> zD1JE39UA8M2Gl)2*c$a1{a z=C$Xl{-kOBdm@dhYwRu))a|nUuX$8|%%%N{5dxaE`MnsI*O+h3j&XT;-pU$3pciT_ zztP7+I_LHSpdKwgG%C&^Hqyq-#C3kdG{@*j*iXPO-rYRxGgYqUVv5(qy3_vncel5! zw-?ClpktDs?)ceXG2UkyRsxbpN10LA1V|gmdaPC!eS4#tF$XP*S)x3tT&KmlaV3y! zwRa*Ju2Sz^O=2nX_>o@friXnD9~1}zJ3(tAy#R)#74^?C@NLuQKkZ}&Wmcf@-I`U` z3H&{>=s_V}2dxM?g~L8eCIgL9`V9p{GY%H6<+|aavDpGOWWq(p z5;RBUN9TRJ86T&?D9$wt;fO^Nsryc8b_PgN8URHuYHp-Dp`LL}IG@-wEWw3|&l%ac z;}+dgJjB+W`7uywVAAt42DYX3Lgh!$=lpq_>E_d@xr<-%n5wC>U~sf$D&F4*Q?r{Q zC67G?=DC0JBD1K;bzz$)KP2?=sWMj(;?p+DwOtAdD> z182hIl+5fDJNgga6zeEL&esyC_l3fvfMipO7GQmYYVfyUHaAAUT}km?1f{EHT4i#X z^h52KUUf0RO0>XsmL42+5)k`(0K@DMY z9$QCZpH=V=^hxhw_mm3&oH8v90nrufD+}{SV12HsOYp*mQxRd6E@sFML_SqZof6dd z&tY+^#tHe$QZ8a++T@%F8%(5t2kA2YcNDij z&g`Gn$bvssm#i!_ju1fwY=4TTvK4InN@OG&BTgcSC@909+^ieyD%rMjIu7$nE&DN& z?DZW;L_n_1z%s&r8R5Fw={#k|Yht`=W5W*&bHE!mi6J-pLmtyPHI$J`7bXjqB{-5y z_zol!K2@kHyD|*Ep;ni%%t&&~m55N0>9rehh_fz{=bTeU$h*(=XR6-(H?SG0!UF4H zU8WtUWuWHn=+wvI$$dvUH|M}kR~tJVSBDxl?Mb9V z{zLRd%T|g0dKo@y!ej|MNvN@z5&#fw#;8n>av)unS3O^1A>8(`k9Li*Zyh1ny~~+Y z=2g4b(b7)42DS-%YO<0(BY#ZH_H}m=ashsuut?EU|!OvT&Rzn5jxT*Q8>EB(_;a~7h&{2+t;$J zhBR+@M(balr}Bgf{X|VXGap?$G1p2EAOdgX+$OK_NtC1cE$2v26;nzUSi}cKOti48 zrtBaIwch8I5PwuEA%HTALVOCod{440T8o;5fLP!nQ=agl-iOfpoe#?$k<1R4!{{`g z)8T-0I6__LJXZ?dojPf7eGsDS?o6hHq-gy2829{=dV|1)edqNjc* zI?;=Im%IimK|etS#J~xkI4UTipny%|QL~l!wb57;(PtKd=Xd;4qv(nyP;&x00eeoLdZ-l;3v9KWk#Sv(YCs8b|}AQ!gL0fMFT*En0*`a#=b zPLkm*?0C~fN_M?8X>GEZuHpTeAWsEATxUKZtacJjLfX*KOZTC~;#({qKRGcXmxX+j z_+f6+)3@0M`$Sa#F*MUpFBej+D<$U!m+=4Y_x`pYxf#7>00=Kk{+mm}ANCV8G5i7= z+Wo_fvH!q};5Umis|q!3&^q`41yR^sJyq1JcS4FeMD8htQ(}#2rU={C9;EJDAZeU! zlcr(?!IcOTxfFY*p7!6`IE(#`@PWxr3>OAnT|~mJ`d(aI2AJjL8w@5uS!G64t7T;j zhqjym>!~k8%H>#;t2js&)^6!3#RbB_-&3K_TP}laDA(sRH{C!b{2!!UB%LraCixEX z@eP4_g_o2Xrh^-bXVnQ$Sa#I9xR%SdJ6BH1>W0PNK)cGPO>rL`R2qCZ4Pgv4UfQ7G zdU}z1?D*#}9gF0Z9+6^%Af;8>N`yB6vs=sjT=kt}bF#P+E`?{&7=BqB^MUTD9lL#;jDym!PTm81?}^fZk_K*4QAp_Vk_gmXo%U;_#v7l|=SVu?;Ono18lik0&GQ@8SkUCZTmxp)JV@T8PKu!;jQ-x*`LX zpj5E(jOYS9R#j`VjTFz5@>_~NS8V%!WR#(B&o)RQR70*Ds^Hy9lo{}i)igGU!dzCv zYuj~LS=mw31dy4V6&;w9n`6eeb8yjMgu`B)Qop92pf@=^0S{7L?P3VqO_mFj%q-+0 zPZ{gWxk3VRc&(SW9a_G8Ue)zAfKEBZQ~@m-T811)r?*eU+^+?u&3MG%c;sNnq@B>q zj-XW9A&yQq=?*x-4rrk`UQ`lb7^x zI)(|gVv8}5;QXKmh&pz!a8WO(U|P;nW7T=gk4b>0sdnOZg>8~+sRJ?a+;y1pFn?sT z2v5UG$Aubwl!OqO3h@ojd|IY9ogq>{rPffbM(^WdvpB^@llzE`&hmI?h?*Abe3l+v zLld(z4;l$DA0a|&@tuV4+ef`uCpi&h=wd&pXgq~-rn%c2o`*XHGO_eQoVSTLACDv_xT zM@ngSNoKb4I6(f8?;8x%&m*!SLq6+fq_UPwR_XGVPZ~{MxD80b)F8%g@^;9S^U&&N zd#2-^H$p6n3{{hnq2lC`S41Y9yFQ$*2y8>#Qpd}_2yoz!lkp2a0*_FNf1%YE0+Djw z3fvTSrtYEodUivbVqzTXd&PhbkC|hCm zH0mo!rB*X#b@^Neht(I}^Iw2@bDVqyY=g+WMIn~lf;Rl?ih|GDOt)CW9&g*ulwS{K zGcXRn3J$|tqu#0fTHM)SFR>V%zh2?-G)pqN9wp+&xKFe1xC zS#Dzb{E1`v`&&B;QO*eXsw}!$82i@x1@b`~`dyS{>{F33G3F%v z(2P^3FEK~)VrOCt5=l5o&G6nYf$plM9+18*F)3ITk~5ReGk&x`7*m^aPvLFwzQWaX zVqIOmjALW7rMb|De%X>rBO{yn{6!Mc2?rZKZ0GJB@LNy3cGcLRn&pw%#k4U5DG#o& zc=Nhn6vLvV_I7~4;b-w8X%0d5+wZe`S|p;IH&wAeX(n>tKK(mC|F)APG*qcL$VR6B z%|q=EJN*@S6{n=YK(BLk^`_D3`tn0U7_bP7(90nxB3Q7JAD2G3zXnclzM|NmMfasfxL~ki-~gUeu4KLC2Imo4KORiq zvF6OOb#$aedU3TU@kUW6G^$RY3{R4Ht2uAygh734go6}G#>ri@3t5drChNYR*2S z3WuX@@N(lS?^@gp(?b|Y$J`|m+0uhC>9nl3nJgc{t+A_-4xjy%RLrPjR5`3UX53h> zISU3Pt0a?bLQUa7<8;nY`LiT{`1nr~5WmrLyoJD!xSPz7P+2abzD%dEf|Juz zHoFPX^1+&kKJp^mxwv#=+%NX6eUWfh&ob|YFQ>g>jZ_8x6jC(V-a2m`4zg(2rPp$p zTiW&?GiWD9b=$wVhYrqJ0uz>yd`UGd%Z@hZs`Y-R89|XM|Da&o6aKnJ0SA zkW#hJI>GnCDX8BupbP^l$551c=h?DRgMPUoj6S1UpY#j$Ymn-~wQJ`$}QQi=V>;?bVf& z#A#z}(-t7wXOQ!0QRy|9(C6T9V~I2>FIz*P#D)xVi~n^>{|gK9kCZOQXxA-(nBKQY z29RK3@p#81D<>(P07fMZ1c)5()MDnVg{Xp_pec$oq;QRDubGj3eWm|V(Q{Vmz^LC^zr_9nRdhX4%X%sNdGBccGB z_Jh;YUwlqxi&gx5KJUMLS|}~vj2$$Mp(axQP!6s^PD9qOjpp)AzC1$z>>TkB%g}eF zPqK8ozAQI~vhz(Ki{SgOy9|XEd*if5=)V42x}5qmJ)CZ#f&Tr+FJBCvJGesKO-puI zjL~`?dG%=ue9{o{-O?|DjunnNpds1Rws3F?`a@C;xx6jwxta8c=-Z_;T&I9B@~=Z- zY$22rOGL9c8BNC$K{4T8TYAT~tJXszT|Y>|j!lW54L_GZa0cEKOs#l8tQ(Hj#-Uiy5+hiE>@a z$r#X0vITE`ZU)$^p(dAbgTJ+L>Zr6V-zGW1uhU}IIH-xOQ*)Z93x9qm=(N)h3ObE6 zVFaed<;A;C=(~ZE9*d2PY@c}6s5^bmwo-xg z`jx*=Tax9YEJ@5K6j1Fe?MO=@S7F*&Zez=5KW*vp>iSy!-6suL;>cf2$b=Mz4-6}T z_#|Fa5vcal?L&jfaXG`u=F!r@c1f@yhIIDaXsXs0o^uK(g#{(1b^F-B;j`A(y~WCk zl?I&^zc9jP8?C~&?&|16I%n53L9IN(9)b1xNTIS0YmTb=HS7x5JzL!}TWjFZ!Fukl z+8CMe-5Xb*k8~ARqbWXSK}7Rb_}0F!y(=H%%B|~4f)Ph^v+i2ug01vgb%a=*w$6JfK6ggV<9IjOBdbT|*#?7aTt?%@2#8 zP&BuAKKh$0SU(8_xg-nu2){_M5!fksMG*O;lH?-^1^+Bj$@DmGk=^~lUzn!{*@3k46jv#{9xG%C(nDP*Zzs{%W?qUR1wK;_+d`B#7vsE6?`ArH;mMjLUMU28_m%d)LwsML? zTLb7VAvHD7F~^U%W6CSHp;d%W)mKesaNZg~PU< zX&Fu2VGgryvBK7wYW-@Q+O4!~-<31)3SMoBq?P~JjXbVJ-Z_k@xkra^&P-ztNj+xE zY$0zk%%HVX`F+ane0kl;X(1=UL{_7)&el`M*A1h=AnO-wLWIY$y{`2J3Z0+%MMe^i z^N?}lRnw|^82ABdaFPMMYyQ9kAUQQZQ! zpb7&sw+vQig1{fm*I<`ghbmMUs;Fxp{0RvYg^(<#mgmuh!9?;ag7WJJ)pICmWdXhl z%Z8Y-T_k}+o4N8Gj|9&jo~#yX&q^!zelERjZ758%FsA@yga*HkgsfI^nqT-#d_P!E z<9~a-b$e&b@Zqp0HB$V2y}?-2{gp<4+`HxSabtr4t)hsln_tQ6&o06Pd?I=ADmCI#u(ju?tB?wAQV{l)HGV`)KktJ>a@j$wR2YQ} z3jzi~jSutHtO@w}%NU+4tFiLA28`E)2+zr0T&O(v3HY4TyEN8~Z)5uv2ELS#a2Z(r~CBR@7uTU?f$y|?Y*D;$y#g9wdR;(jM=SXw)yxmRk3cV z0GQ&!w5oo|Fr3SI8?Rxg+|v~*Oixa6AIUu2%+WY-d9=={v95EIdd_s11JykcvpC$Y zbSySkm|gx>vf4gr9~2S7cOu5sE$AH@6FU21%n|C7aUX0Dfw+qY;U8)MjIh>`86Ad* z6V?kx?x*jB&ik@zpsSir5(;(!sOc3|>a8^HbSBTJk`shB7Wbqg2;@=@1`w3pNx2=> z1|n>9^8DsP!72MueAg^~Ts28GU~0RC_7fp%+UYw3cL#aws1q3Gakf0+Z>*v&3BGo- zyQQ)aQhAryK}$jfk1Wq`7KIp$NItOCuee$SU81MfPbDj26Aw$V`ZGru-n2yCDqWmUw-wPXK zPLSS$k*dB@se?unV#hvY!vk}_)GOf|1jX%e{N8?UgqimhX#M(qhWQ43Y*rA#04(31 zOCSN8{B#P>=bin~`z2#=Fle>h|4oa}^(UKG#op><_qRz6E4C_ph0%U5J8Dq zm+aXb{EBfwErD0$e9Ce$*b80yo4Aloz9-`iLgrm7zDGz-AmOxp)FauepK?Oyl_ufH z$)DrwuF+9BXmY3etwT%GODGq0Xv;6P)BE{lwUe_4H0ZeJsUa@Q2FUPa=d~l;&33tS$732SW<@ z4kW!iit&m%9H-U`nYbTq&n3$|LY}$aNbRSUI}Dem{lw9Se7E9uiXCo)O&w3WcjI2> zYeNa3^3Y#&GO9?pmHENx@`^?3^+JB*iAtb2jBq=2EI3R-Ux?y2V}YmdbfWN=fBXYw z;IDUt7Q<9^%V$O<`K;cA{%4u>Z{+g-5JsZ-K2b}!pzK@)Dv7%Ef1v8@jKpm(=Xv{EdRu&V^A3HW6LT+@E4NRwp$1FSS3Ry5 zc$t4Iuh6Tlg>D?S4Gc5@T+m4UzrWd7RGUp4oAp=Y_xru#kc!4V5rt`es~K8zv-|r^s7|E*FAF4Wo!Fk8mm&R(~LuZ%waij z!1pp|9pP$CI6F0}x@4wZtWGNHRa%*5*&FE12im5jY1CGj!DfI+gRfDSuQnFhJ2n_; zq_r9X>D^S23(Lv%O3zC?h~dlIcmFD|%vg>ZP%#RNAbDyo?LG#-J4b@`#BU4IJ2j<- z7*cQnP;xR4N~>N?#E;gh1fj^sC5ce6vIx!JX<1Ep+dDHYiJ9SU&85Xk)X&rMZ$*U} zSzj@ysAv5Bec+(tOtCB<>vdQr(AR#ISVVl;x%VY@vce%B|K=Vl4wrupBV9Byme#VJ zXpL^yQb_|h7h*5jTS7s*CL2pb5Ak*rXidp|`zlLzmDRqC`&yJ7c_I{XAUag_`W)mjf>iT# zN^T@k=X@hgcKd?n#yglot~R{6;`lGT8nr9>wn41p~z1a z4ctw#EC^>E)5&EJNS>i8Svf5TGw}$&{gisOA~Y_gLT+{Nh6Rhm+94Se=;TI`N+R>i zxW`fJ6tIbDaa?SJnCKLu&rJUVBj&G^)Nc>^jQbgR8lR<&{{JrWls_MUHm1(@|2D?& zKMy!<>%Zjp6iu>A^Me+ZDOz({Y?uJ}ywK!MR*b-U43eNchJ*>A+{#(vgvf4JZ4MaP zo#IvXQ%>G?y1N;QG8&p}tm#z7soP|heX84*UMGY?4>~5oWSa!(r2!NCk|>_E@_zgF zc3yxq0zC_->`r-6C|1>y%KXg}|D#*bQZn%)!I(hAFbiPII^4DC$Tn0iq)Eg@!8wGu zfke1e9or6Lu!O;mg=|s66j(ZiDhr8hbpQ_9DBMDliHVAcEbUX&0Z zQkIj7(N{30ZcviH%1AQwz6x8>P$LsAGFef5V;J*vcAD)YeZ1W36?%#&z>+jvqy*sp z6;XTBj037M5w7%jBa7joK_z#D#&ml(G=RBp@~3eEu@~pa+hEoeTVLhRB9p*I<`H)6 zUGTn1n_O2;6ho7Zl;Vc6=t4Ho@UhSzxOq9 zC6cp9?`^=+YbUEQC|D%0x=o^fcxvbro;8oqC%qUzcBynVEj$YjT7;raE)z$(V%(}A z_i<^r`0hC$N;v(p!DJDqtUbQ`_afx4b6G2!F?aHLF72TI8zAR@UWEMHRjKT3==}GT zoJzF~xzGOkyPS5bf*MuMHlk=hjj8rZqk?F+zb_eC4h0tdoz)7#SbVK?qc$DprpFQK z(-#UoNfJTX{jc6o46hMkmqe2*?1``v4k@gk13-ChGgG-(%pb?L+DE0IeN6QduB7S(f(*pSFV``kV7E`}~V#$q3r~9qaqLFyfg`DMgmw+A# za&VEUxRV4O1CXN0Tw1GTT#|2(w4@nUl1uC~guVJW+7uVWt+#JyVZ8~8(XqPhR(9yu zcx@m+%eqyr=-MU?VZS9?XwjJ9W**hgsug)GG_=qYr^fyCR!p`Q&R(7v0a4MN&iJ$R zW^BJ=L-TQ*Y$i>5gcL0PWut17IfZMq?-^5rr?-@4$;>@XosV7{$zqW(6J0VbE7eR& z8bcjC{!?AKw?48BXb61pdS@LZk!dJM6}|5{;anc|=f%sB zbU{+`Cro|>ks;?vWrTH82X40o(DhZ_l*7_3!7&nj8F{AJ73YMMJM@NMI-NvCGxu#S zAS1lQYjSg&C^)>33P?oBS8@|5RriQ)mf)%&bL(l0=|8U4oR1(W_{z<2C>0<= zjkyOwkAi8wBD5uk8A=(djPmgx04(=VxHECsFJETx{~ONi|G&}nuVC#B4F`uXd`(cz}k#ra{@B;Wey`%{t#Tx#^F?&suZ$JEKE>+;gWr)&TDgWu;% zx6W%ZlHR((kkUHU9tBV1ju|d}Q(vTZe~PxLx8R^HyN;1KabrZ`+PQ{5kI6dqUIhEP z=@6Dp3ZO}&LwqphjB236^`cFFFy$QRi)-7g&+8UySd2vwdyUQ`Hx$6`ymGC2hO*XW zCeBBI(%0w8>%U>NtQ?5KOJf!jW@t~W|I(lA!?Z>j{KibsWY`^TWrOL}Nxuh;i?B?+ z>dFKMgCf6r1&^DBqQpj>McFkWa=$G2#Z1_Xc;Lo zzg!#3K%l;dV#ZdI*(oBSUnIKgNxQSuppk$R^U^vGu}}>@zYLSDahk-~!iIGWQ`(Bb zH40%}ele$dZFAd5m%?bAgmHTFl*j+G=>!^*ls`rvHl!hRsQk3XKwL;&y{y9pWe}nk z8XUZ=HNt7q6+*Zb=QcFcM_vG)(v!Dumq^gmWA3E!0lvJ$x?Q#sMT%apVWes02>O{s zmq6Izg4zb&Rn*&)M6BDC3l~F`?l8+=v`0cgpSDLwu|>|`6njMitBwXsF&%XWfo<{d z6bevil63eOwD@c!c%7wH|9eAFs@8(cpEWdYRTqEGd7?T<2?e>AQ=iD88HghF&2b<3 zii|lk?Y>acTK1_eU8O@CmiF?xT#>ItG|*M)gk3Jak8!kSXuirIgKFW+u^w6Os{^!G zzOYhOdOp`un0A#M)ypeh=?xjVv!Zg`kRm_<0nXCHhX9Ri{;0=&7y$pCDLkXXlLm!F zsMy51FOo!;Lyk#qGck|~GbvV?Bob7P_hV>Q#GG<~kG+>PqlG+I9Mw~oyki6hCOCkb zuRJaneIc}hsAw=Xkc^fWP`ZRAPF;^?G7pZjQxZBH$HHw-T^AkD{{c?+qth*Prn0bxftS=)8x3 zL`_!N)f|ag>MM6R>;4$~(Qvf)lN7)(s8I7AIieJqFO%^=fKiTvbPb!-;G4v9s|ue> zx=Zx_@W?b!78YEI_nf~gyP!Cm%RD-86S*)kUza#3zg1F}56nP&AF+`Bb8=mB=<@=) zVB8BOe$bH(vKNe+QIbFGFR@;1(h^XU_JZxUr!3B!T7R(&hMhC-xH^rIR!G#`X)8Lc znK~ma2Mm3v5@0ITj} z%q1X8PU-b9?Z(RaS_SuyNsByEv^yTqk=C)}DKx!0@^L=HIP~^t4l?J+UQlrj5SYRr z`aMCv6B&1gUt3v`;jnwu8rhju_(DyN<~3>OxzS(0*oW_g!1x3bvV4uCFpAa;g#mt# zyH*DO-ZzErjk84c1iqshJb z`Y~f66H$vdr)thw8iaNW%=y6F+4woI)!cG?Roy?B$01ZPL1&Arr8awH7Ng=ze5bxG z1)klz!2DjdP3SJ)^Zhz2kUU@3zt{2d+Y{#d1kFpRQ7pLP`@SX;!R=M;r%l4manFR+ zWRGY*k`+B{O(d2k241?R?eVh61f_jOV$R+-nvl{F^Fy__7SZVDwo#g3eFh9XX5vYh zCXyzm(L+3C9^wAO5pJ^927~o6?U)JF4G&QnSj4T^U*p;=aJw@WbFq28S=D08$tsAH zW|K-ZbPM=3-%Ia}FPe_E93_WS+TwWbywf6Qtck`vw7gQyEPaLaX%A2Dtra)DRr`il zILDRpbJ$cY#!-U?lV{LyN}0_>k`&}zc)^T2eP^7Q4ffRw<+#U8|NeyN1_y)w=C9nI zwZlTral4~ZK6tSg^B7!)5xrkTu_s&)uUnp*iokgf-r;r{V=IldilXvk(u;6IM_sEh zUF?Qrmy24(2Aq6(+rn#F&Zx}IXwEU~hp&C1EF@o<(Di7+4aCAMs5x1}Is>P|@}5Eh zF^p~5&NAvWca}$!*fqF7Db(-#RM@^ad8rc8k}vw8odxNb`hYZc#Jb{|uW=@K_g;fU zmn(?17AMSVRZ&UOXoTD&eecu@bi?l&8vym%=B znENtVpAvaK)GX-q=APL6htY>eDzvD6gZw_f0`VaEk7Sc^?J}n?^!XcSL;p-j*t&-n zAw^h1+ekgufrA4pu7b4#%uqvMYqT}y^;*dDy%Ehmip-GMcp#$Fw4dh_0D-#(PItX# z1gSW@m_kVv6VN5;D-TU!ma;9p&xQ)y;sx4eClkTY`8->8!8pGyPwff5KtzSETrme< zL!n#|X>>lg#KJK1WanInj5=vN{U-Y%aHmgmMr>8=g-uU2RJ@+sL{GhO^3u?c+ z40HQ-w|We8<|t%*&WqlJX56VAg7zq{70u0|EJw)AK2UeUwgMquvZ%hk*;03r zw9t)N*0Z>xJ;jlzp@Qe-ZwtoR^S*DvDH~#ecDrcNcBHkHHfnuTWc-ZkLXYgcsKf!p1(KZnbyK%!WH(; zjGH6hze%4rXW-N!y?6$sCjkZIlS{r!-UXJ9yVAL7i=wHvtiN%tU0okqrD=2iA=Yr^ zq3!j;Op6O^9_feX73Gy@vE=RZ{zFT#O(a(cQM~IRj0hXAGB?9+3q@xRk{?Dn1|LbX z7E?&Bnm0B$oXi$f6DA)gF$nG4Kwkvm@%=GgU((<^S&ZgTU0r{Q)ke86^Dfn!5F-g=I>fP zUnHq#ov+uArI<%TueQen9*l4Q0D}J2cMJJ*df5I6!Tc}K;lKOL|Bem|eiESIM7!3e z>n+I~zhKa6xbCWRN)%5dKP1&)qCaVFIn|J&O9e*(mR|G_^2 z#2u>E|1$insdA&1t{!aw)96Qvn(hf|Zk|Rr(l-=hvOl((gDw-UDK<^vAAR!gFtUjh zX(d}SuZn{_Yg&J%1I1YvY&Mu0zNC@d(PY$EM{n&U^-1~D}x-+JlW z{h`g0=#om+Fk$9tX?jTV#c8o-iPDx7iIH8GLZc&>Xb= zu9}-jUD@12z8AoWR({vFl`vc1{8i?RLFAnEVRpwj4ob-EY|2d zPjnc)?k{XfHD%aC_bly+9H@j@k(XxNZL zYiSX{_ct*pbJGt3tM6!IokNX+`jlUJYf42 zDesw%%AOpNf_$_Vhr^qYcS_l9*q)6orz!M0}3yt$^oSge|lh}{kh;8>Y zq!{_7r!4nKWN6dE+PGtdgqnn+NkHtr6&%;YuL&I6 z+Te5>4+N#j6o-r>*6Ke1*rA+VyeWwRpMRjG{}rn64Hfr(pU9oTr^fPsh$#PCsQyPp z`9F)$1{K9mL>cLwUGj3)dUnxbt)xM%GL2%D1w%9qDK<@rcu`0w=V|w_Rj*p-a`b@Y z_!Cn``wLUv`oxrTIK3p5l@-r|_}9nJ#xEW}WA%I}+kW$N!mVOJI4;g^b14aGND*=P zv!SDz>bep@djKSW2Cr`ejiAqH;9e&4($QG@H&1fk{ZaBctb7VUcngiEvhiJmHGLiJ zU@{k^&tZWX|DuVv=W2T&YCx+}fds42f-70ys5u)tZqPfExKW0pPh-HbU(33fxE;rS zd0$dhCt%IxLep`>W$=}*e1uWC0dEeY^Yo6i%AwD#gD<$hY!AwQ<#3`X=S}{DvPE18 z<)ZcIPu0$9HhL9%(~jAH0sxz9iok~@erKCLdGL* zu99i&?(LA&EWz+}#419|tvHe?1su4@9@@oZnzdK(CEE+&pM=3i##f* z1zV3`jhg*H*$?t!@R9Uxs=VCINW0P zCb+#JX(2>~=Uwf>peRqW>hLV)O{bGnD zFFiyWgZDW-R(o(H%n^}BehA#q<>8hte+}L=cdE`kQ>G1PA){mxxnVJS5?a=We#9-h z%B+tB$KsU604eX|TELWkG#b(4KN@NEjo7dum<@lIgfrC_^jOJP138NErI}foE=)VQ zEeJ>o%%rGS)F}!LtW2P!0o1mD{(3WR&IVaunT*~^nr$dxo(N?)@WG?61==JwF#R{CQC|{?Bpi|8x5P5x3U1#}!3;A9U@A z?#Pu(!_bnVHs}UOEkeTw4!W!!3JE9FZ^!F3yVRy~YI9kSwTVrFeh<{|{+6Uw2myT^ z+z}gtMEUb63?jX2w^SCG!t7HqP5s+Er3dC5SF?d(uxC%lxd)D^g7$MY?yxV@N_D z<_qf(g~xAl&)0ewb8T@p0J=IxpFVl`J>L-p4|0H!z>2s8FAv9xM6 znpz60;mbAw{kP)Cm#QzA8qt?bgI$6XDIMB*B1#3qJA*S4yETRmVKx$cG>4F9o^uBL z8GdMWG7B)4c0<@^{eDP)wtuer!}atX-Ai*d(t@GH!@Jb3Jp3|Icd8qHcD9i^BW<$w zDo7A!F&xldoGyHC^OC_)rwMuqE3o~k`Zh+AFZMr#Sttg(30N^S2y(2nDmN8ZzrO2| zkUPl$azJ-!7l`__&W1g zN2tKL)>_Q0O^G&T^SdYwT${lA^`IU4U;&xqaCEd=ruYjq%5rVq*f6fj>wd{u@=c~j zF|QVo!RWh1)eS^u=ajk$_|O_qQv9SD)b^5B>u_aOF%#irs|tgR--gj9o5EgPKY05z-@}KV5)h14^obGT%q4@a@QxEQotaDRF|VPh^>}(&3Vd?R0?RW~ z-2)}e2*sJVNIcC62T23hEG%}JjNsv*S&__9Gpvb-ugEFFfFN7O?tHgMxg0p|V!l@; zhvc|eGcfsv>wiuzvkAcLIwjo0^PnA#>U2$kLC z)f=#O2HNeqL^@4ev0&>cTlp&;`A@sMGmF7T01@P?@hT%vhPI`Ko6&S49%3k zXxaitm474&%jdySJa~VriO}iO;%2Eqyw0h9SSb?uMiFJINrG&^;vRt$qKN#H=ND6i zx<6d-b53hyO8y@@_83;Tq84V~pZ|c$|CKAsDyUq%;680b$o|Ku{NK6aZ&Y5(%Ueej z{heZE-;TXWGL6+%S!#t}VvaYh(GSKFPBy&}EUi9P0GjB)?2%%nc173qGFePeg@A;n zqy!3vibfR7QaZg}L@Q^NCZ{B)eE_rb`c33(S87K~+lU>Z%5RT&^9EfGAGYCSCAW4_reBU7Le_dv5R$vA z^&xIN#X(B{(skueiTSjnhLm+zwDOev9Lm-AjbMI{pX! zfAvU;zSUDwu)g(ERPYvzfPg)8(D?7s9OQ^pCcozivnFIjt#5s~K=i1W3WxfI$bm=# zC)HuhhxD0+;ImUq-~s{EJ(%n_JnldNxi(XcB5zk#mkJb44_nSvWor>Q{c6oMOepMz zP6vH`PL(wi%^3mpb>=DdmlWtkjW=YcFfJ+WomJUEvwU_taQ}u%u|P+UbHA)u!-4$z z8o<7V@GhH0pr=)mHGERk*zC9)q<=DYWs<9RgHIhpXCZt3w=o<`S^b69&U~l6KSfUI zT5I6p3^=y_%!QVjvsD6i!bE7laKLN7VPr6DYe^o=<_iqlL_E5r&LHG}8_wEoFw92g z-3o=+Z`(Mo^#p~J>6_`cPp0du<;Q94?R1!a2tcC1_4$xE!io)NC7k6Z(-TCiQx;q8 zJqPI9ms{Qkzm+#WjfbK`g;N3R-gTX8Fmkomqvvk)rOUiMp+nOWKAMCKTmj4g z1$@U|qGcBtlgX!ax9qX#z$03bh8Y^bNdhd7>G6;ly}Ed*fNAhWq;|u8?`Yr|((I5! zW>!*a>^0qj<#G3!#1dYT6SO7u&-*x*y-||Y4w?hG zWr@iSJW0a|^!V31#F5-lO9Pb>>r!XDHGxuoT;lA~0I#GGDPnlg@(71@6i!oK1C7D~ zl!euB=y~j0m4V{PgBo+KVBQ>x7-fTg*4Jn@F*l-pHyR-BY(AS={jt|Zvz%&5VOdsN zG$={SP!#AqF;>UDyF;=gO?~J^f ze;1kML>euQn*p(CtEC$)+Mgt4F*K__N~EUt@u@C;Fh z2u%JVddiqeOpK6zAb~eue&t!Fdd|#8Bihnu-mV#wYfRdKES0lv=c;CV%N} zkfexVdO~zhReJaoF}-Hmdu+|xhP@-01LYD1!dvg>uCVV?$}7q*1%qqgt?H2emAYRQ zw8@mBPeN?Lo9s}VfKM^`iiPH|AkKGC6xQ4wV3<;@evuL<9T`;QcO&EN8$>upscTgt#8~$@P zf5o}0M~3-pb`D5l=Q#3Oww+ng=;4JRiuLtx;|uE<&ZWznc<>OPSHwcZ$sRfOFl6_SdjmLJ;uxHDWl+Fd zsmQFx$C|UWUS-aF7k(BYe~#TPiy+~0sij&Jh1Of@N7GG|LRS|3YHx|lBi~K z!4QiPf2y8SMe+u#Rd!Iaf=~9nfjS{Hp9;x(7ch?+FYV^Sx<1rq#$6AN36sIoSq&Ca zKo5{*Btd4`Kf0$&NVn8|$f~nRZ2w_JD7R@FgsMB+<=oSM?|#O%*pCfl-7PTXo80SR z2Fb;W=~bK|bWP})#(HDVk1@A*71>mj8zXXa_s@`)1N>Qtbp}6^M+Q;;rc~~iD33O9 z+yS;|-M>0cm;8rSI;Seu6MLk^m}kJ_f{T^s%)2&ClcYTL!B|12Xo+=&Z`SM?nR(g&1xaaRIyb)XhVj}d83mMYoh(@zWyqN>f0n?AFm)!+l(QKB({TqqVTrlJbM6Nr} z22m1K5Di9csY)wwge=V)u=?k}$~duQx$~Gwju6XGR@CldgL3-iu|PWHWwd6h!7`$o zQjrm(oGQr}c|lY%97(VYP454uiLb@Eb-)+}uwxj~;Ye;e&MwNNvB%RNPf-_`LGXHa zC9Z~UKFGW?PZzd#pz*(t=4E+487ft!cPOtU#GU{OJXcbn1O)(f)IIQjhNI^7sD5M? zdh>;KO0}ZWyC>GdLUk$k+)(u6GX^05g>-_63KNj_HL_VS9Ek$a14H3=;Aa#1%zs&N z^6H^y?KG|2R60_Mn6K^KC^kSk)jHsghA@7PYUB;hv-(1LtEe+67G%&5&Mzg+G3t7|Dv>TqWP=H+ED@`o10 znfmOmYQN8Y^Dj!41BYd0r^C0iy@lSv&g|M2Z+2BR_&isvV(TvvCr+b|2stIR#bs$e zw62Eb>wU?bu*LK)h&-nHvz6FVJ8XfDbW#rZMSGsDZlI5)h~F@AQ6cc%(x}`rK~_aZ z5X%DX^Ik(rHDkejVkX0cwsY8r!*7s&u6(@C1PtLhVhSCb zMv&LaT=($3`RF2A6qGJ$obUzgQl(w9`SV_g78meD-jPm5u4>s!^N3+_hi`aHSx=nC z93SZyYExRJHwsrbfXd0*CwfZR>YA#_>cWt=6WVcZmD)BxUeHS&a40z_gi4j^#Ua$f z(5V$ntNJm-SrK@2BeAX|JAdFL0KR#2f;u@+9`4cGIqDA{j|Yma$Dq6WQqBbMmNg~# z;mh=g0LOV!4cf3~@b|9`Z*HmhgN7V<2!=4P$u0(z@qTDA5eo7jq0K2_-VDDBNx3qz zLtsqbR7Q4Wm$s#Lt{WtN<~<~6QGr@gxH@Jln)S}K}Fe{@J z)PAHpONgkOT?}S$BHQ(t zeFTwD#Z(MRQHV&X!K$XwNMcnMWxk}KW9~V{^L1hG2BDXd)KWvSic1R&HyAVgK> zxUUFg&C=kBP^KxyJM15@a#`O?G}mM6?K~4#ctfI)d>0vgohz{|pY>qSddd*~Vm-_Y zm76$KNAyuvoT?iAikfDo#;y5}X#HR0y(~iF{=!d=``RZE@?YAl{VUz*pGn&PTF+D_ z{EHi6@RsDPE=QHVY{TGG8HcALq#_jx^1KKV8#265wFyfq8Lq2auMb-FtGw)m=M3R? z49Zl&E@Yw6LI>wjX39>+QHE)iz0VuOuV8Z6Ibe;!31gIIrCCGToYiE1dl&;;=_qmE zVH`~Q`Y~r7dCMgPf_<|dtuVccE{tA^kK;&;DH@loX5uQGylJ94z^{iUMQR{oSI#A* zhf2K+vo%L`sVO6k61yt-ql`ZBvfP)jgr)4bEa&PHw@va>hT1!d3l+r=7w5bcu$D*pW}x!P4%A5IDkTaDpWtoa_XWW)2WlwQWN z2*4C{TDy|9G_rCVv^7KqlhBjVCw_ua5&+Z!)i*QA5OwA zP@-0X##>~$T6AVN%3ttvEMdFnudO$45)~Ak5jJDq8uv~}9UnWLWAUX~tdlTW=?xE3 zg|&uD6+S=N4kgxHv12NblND#$KV4N_%AHf&th87QL^~0KX(Q*wUqNL=ziXIsP~gyu ziO7xpF@S##v$^1!!%H6~a}({@i`=Eu$>4b+Bbwtji!2;o!nEId_oeImAoc}~{%`>R zZ|_EWjts_jeRb-IhJZ?vPvv8LP~kPl!=Ajs z59}QA2QQtnKWj%)fhY#$gha6$K@Wk^Bie`zTr!g^GY5hI21~*55tFo{;tf-#N#GB; z^>EZN<0CSr)(K+5mr`w71u?%1>e_H;4CCm+fAC@WD~$UlL0ua@8x=%2uJU;@NS=m1C%It|7f8`2xBn*cVprEB?|~Zk+?c7QkX~s;ex*zla@LbBGSeHiuU>C@U=B*>fxT&gTkL| z)1PApM4c{-63{_xrCV10|ZKPXv>hxN<8!)D>C4>dOu64|3H>6gMYmLYhL#D=jrDA?~&#IIjXq=R9hr&W?LsgQg7w z!|#hop2ec6@=Lvz@#tkT>p7E!<-3p18^ShM))R2R7!kZ8x#p1MO0ueM%RzCc`8aPB zP#$UfwfIqOTa;2VM5#l4Cn%W|^_z!Xo=h>b%%n5b)SbnRV&~jm4bDmOp^0b7Py&?~ z^|s5}`J$zDR~LiA$-|#wINCw00&uHL>3hKui2-4c#6k*!*`dzW;>L^t~RmYAQ3 zGP4SVSc9}%uI=WL&tGJcqkZf+}uU@so)njPau zTs42r54&eqv#q!$LW6YxLUYr%fI1?Gb^$eWgxEKqLI6(4?y*jZ{t zc_oj8m-=_UXiTQgdKwe~z>ntabw()kwPhIThMDLI49K_E8)f~@?Wr>6!#RM@{_zW@ z<+|m39%pAju+ z3g7xPvTIv961UH$vKdzp3p;lpDIFNrV4R-F8t1`|`Pcq$M(0@w_6rPmgseMt?jPJA zV|RnSK)a%xp%~dF>RR;nxtr+{n~*MaV7OQoC^S}8B4e`8*=lPgOeWtbhD)UhjoYVa zJ=b$MsjKPn%y`fM4l0^P)#hA+(;>IOvI%ri#paS`qs-^ndk74HNkMV^oD?n3ppsc1 zJf7ZZ*>{AFizz_nx*qBudTNm_?jtL;(}dAO8$WvY)x(3T`lP%G$?#qyjcyr^Z_SN9 z9Zl>MMV1BnHz?eYPnl4vAeMLE^x&Dave)eY`Q}(d)x_WXT*>VJ>tyqvbp$Hb{&EE|9fOBF)w@3I59 z7*#jn#YLggFI(qDO5jJ?RaV$L4^u~YcY&Kz1A!3?C%3S4Ah~|rRK%=t{TEcOlKa{c zq*gijx1cW5KdOhk{(1Snlz*)8d_K2+efFJX{;SmF;Am=Q>27NBsgV2B>@X<&{Z;v2 zzW%?p+m&i-N^=Tmx{%sJ#`Wm+zW!8sjo@MKJ<``8Xa!oo!uN&)5Q*`mfb53x+uMW$U$@>03<}GPvZf<}EBDGooFy`DRuGLxN}vU%#xZ`|i3{X{2up8m=(P<#jyF|T zu@(LlP-wBp$m+6{(Ko5aEiJ&OSL0}V4*+cYDoRYZbnn(vxh1onN*=Df!8^y6_zi)< zUfH?dm9>^OEy^r4$~ucy5Zo4y{{W{tO+sZ+{fRZ1WjcRpY9Btp+O|l=O~w~mFQlZ| zLTw=quCw(S0(k<(YO?stkE=|(&~YC{nKGg=MjgmV`(4$WPqSCr8&#)uD*GqG+YS#R z65aA{YGf3V@X8y4HEZNf2z9=Y{qAKsHY@6E+X$?k*)95p>~dT*d&4hcA!J8^m1X|0 z;cK^Gm0XHZDSE*fYaU;|JcfG%|1Y!8ZR@Aw|yWYrUj00(e8ef0 zzsF2I7T2jgx%?ocxn6`MVHz4mYG-kJJpQm>g7iuLX}mz=5c>|6x!Y~2LM4YO`2gop zjhv5@^Suc)`NglXa0egnIUH+Z&uH@+505tEwam^9ueyx^3KGvaG^ zdL}9IxVF`_zEy@5Ruq1V{qJSLyb&6&-*vH28<)}r+}Tk(Ai>_z)&#}dPyWm7`!Vp* zt_=Jr;a#E9{66tz16t62-|M3C1(DsLleg*$MT|dtmtuq3{J1pYtHCIVw^S-}^eWLJ zj?v%;?Ig>yUmb%8}S>A@j!D)@P88e6pCt|5ri!x3=kDUlN6Cw#sW_NIsJo7*tZe1)P4qeg&2$ zD`9_x5?Bk!qHR%{Ab5kAWs2!Y0~tq`QAo!EkBi^^eWrd6r;LLE?>-&meN>9QC^eCW zf-QN{R@yq&emtynbkuEqydCL(=@gLl3vVIT>0Z)V-`njJ+Wm^TMdcpq0UmQjmrbYB zZOj{|7<-*)SfQdv`>LV4R`I5wS7BqLHa0f3n8or7rf(qcs1aliesMxVaU{Hf5zNF&ZkmJ+Ydjy*gT+a`iDrGg?4 z1!mB?{9*#GDpXoV8S6Jinxr%C0F}E&+u)tDz16Yv!{qboRBd>z7M*+Ax*EGb2ut~* z)XOefa06vWOYlxp^{%%0$IV4Gd#oU99JC;Hk+B&a8!+BOmADNOy69FThhqHXaa<92 zN9-=aU$+R0B+mncB+?x4s@&|!8fY_zp}1&&Xq3hM5tj~F)02qb;W*Eip{riG)5+hh zT&+yrYq@=+%W8CUrJAdYP`&Nwu4@-lNl9}3wN!NHLozv`mYE$;BG^|T6^vdmJ5tHW z*%x&S%}j<@o57bt6?C^c@9Fdr#1x>!|Ld&7SE$hhO;Z1UwbPV=yWNSx)3rx8D?PlN zsuZh_9Nm(>Y8$oFH7X)9vNl|lH{n`G6nv}ni9i)wnkKoXSK(2R=bvAZr>t%xX>t!(Sb!+qv{i~b;6jI3AtHL8jt^P|CMLcY-DJcTJDU90hu zkE_Kt|2p&czI>Q^h;$mo?C(4ZEKG~UPo3DyXnJ{Gud zl4y_4RWiB`^HmfMfC^UZ@IsMvMb#wE)*>W14jrz{5{cgsVfRFe``vAyfbRAlFv=rf z%s(}^4J96{PduksND4<__&e9xT*nhK$n)a96w7kwg{9$wLFGU{*DwKL6^-f0!af*f zHWpnrDJEe!K~|1&bk0q@09c&B8aj65Pnh3D=L|2JQT_f8^s2vd2tnA`4cn(!_xV%K z^}ov@|MUVaRMXW)RY&qk@wU-We@2@B0 zf7kZWpRV^%G^O0Lcx36d@pJn7TrrFJ5*ZknEd%q>I2X%h`==&;e&gghHccFnM5bMPr|TdtAZfGj*tRlh^TkOu}Vt(~>SKwdZx4)?Eu9r}x! z=Bdr>+q16`3`QD<*E{@B6(z&v8miNL38}JMlXSFG8>8c9DalT$r|KA0Gzzsa5Ar;& z1E`HnR25~&B)x{sbu(m@v0u$BHO-Al8o{lm7%5k-_l;{^Td;Op#NxwoF*;W`w_RR=(IroImyWD1tztMV$=RsL^ zg4Q)LI@~#a^0+iBx~PZV8mB_9T#}aMZda@#lE>(eHuRP6!mE*0DzM;vst#uptPIkq z%!|yY9{Fd;2jMV6^M{#b=^SRV7o}PFFUFC^C!A3Le9D>XJ+z2D&K_%M3}c#4Qz;E& za?$&;x^b;!MKrZlIyLFw^M$;*duzD^$Ax+|t{BiLbt^}OkT7vtg416LyIA|1n`c#h zJ%Qo1(UcA@D%-~;yQR_Q8cWyY6u%uE)#0_+)~l}?!tI^CBQDVjJ|0Dt94>U_I67!X z3B2$tif0wFN$p!5RB}}FiOQ1IwSP;)T|S73iniD+--$(2G;&N|H5P6Y6=89-t+|dn zy^@3LhMfnS%ycLVv}!AUYhDBOs;31ERYijiOhm4}G8*0BUkYWHbC2gQy+gc2c$_?CUlj(e_Vy9f} zyXr3AZ>IZh$JttLv?rO|=YlZ-k?+$lzqeTZIivHk-`OA~tb2%#hj8wQoo@n&kM078 zuM_dKM}qZ`ki+-JvHxpZ)75*ADM+B)UwEGB4d2bm2rGF%t z&M^_IX9J6u;@=FUJ*&>Birw*2P?9i&+ftmtNzdi3PO9q1`t{E)0T z_D0z|w1#&D|A?`{ahL@HrnBqi5^a2QSOi19+@k~Qxamqz{8`M{5>8uK4=-W-TqFD^ zCqg>_+!z#&FG-Y;Int^3(5O)x&JN$HWL6RWWdp*Wp}}k%ud{qlps<^v!pt@)Kp!Ew zL+rS@?Bktom8y~37?HwWg3?fDY_PMsfaA!Icx@;8&x}eX6|Q3|J&2GEY6%52F9T++ zQ0zIPsUbbc)MI1d9s*8ybsf?lFAzlYg4&y=W!}K^?}HML4EN(BC-wiF?t<+@?!WS_ z3|)R3U;k~PyS<^Esjh{oo34YYwH1xDo|&P(gZ$sW_bERzMz4)IxN zLgr8n4~gv%1_6r-wD5uX_mVH^`{JCIFQ`FZB)w(?)bc}ldTo>Jj!kKclr!F$s zj#HhdQdhORyT5>G106A*=!hm7BCIAxLUfh-G9qY=@()~p>1qi9Gw9vPw4ep3B6or4 zq7qSxswCZn@a`2^UD~GasxH+%DPDFK;o3Cnd|zOn8fo898f>d#-(YU&u+3`?GAyI9 z57uyQKJz>`oI#``>tdQ<($aOJ#46gYqHQ3FmmJ zI&^0Ao7r#<+5qa;9nq4BT#?_NaagY7i7V(8c$9g|DRrIPP@Z?4w5>}bUV!w?bers* z@s=X=@0i|v5QEYp`IIYB4+Jb7RZ-ut73r6|+YWXyRIe&je`&K~J(LKVJ-V;UVcK%z zZB%+(j(XSNcs#eru&HjwP;;JAha;lTYtjs?w@M=8|DXaC1zdI!+^UPR$h1b^i;7?} zeJ(i32|xjfz7^;t&muqR525{lKBsc!fZS`b(@pVK0pU7Pj2UY2V7*tF(e>iN__E`#(0m1XOqL+Zw z0z^l$($?4D=@h1_d$A>j!`kyjb;NW*B^|3fuZ zNGDDR#s9uAfuAlw7Zb#2V7NnYBpgR75nk-$w@Y(Cl)T72#%d-ho*QKAP^3H~t+VT`$#<(r!L*`5Fo|U?EK-$TANn8_97E7L46Pk} z{T#48*gm(>TDrV!U>ZI<4~R&*qhq+jdD8E>Y3)LjT5*K~IFwxA@NRxn>$3xaa=Sbc z-M;ltD}FXR1WjJH)*!nprUZic3~^pI;zCu)0|_^_Gu(d^GCHGrSVq5RB*O0*N&Mf< zNdNW}{llD8`JK7&ok8biwZ&|agg7s!R&ttdf|z68C?{ry0$4eVJo5{)Aybu^MVBMp z!l*tRnr9H>3qU)gHgqiF<3z$+Vc6dJo8$P--j27rPjS&qySBf*PF{QeXbC^2zuG0> zJu=i7^29(=0VjZnfPeelr-!XdfxSP*ZKN z(7er0x&Ve%svwcxvbjl>QIk8S=Gy*CNy}kE(K3V6wARAapdBFHfmNpOt(91krgCFa z(Y0NpE+bRpclKqoVrIsvS$ce|DYi+JF4N+Vo*aunC_!HvdV5G0Un2^QRG0;4zrgj3|M*+nKPw)-jFb*)aLa70`In=I`wjdhW8mi$rDuxl`O-Hq= z_-QK!5lbGx{>neHv;oj_9j?@vW7~E(LGE85sQw*o)qj#qeyjH#46#CsOh8K;V zr?na^Z-QF0b2+A+cFC>QMjCmpGD%pk4M$Dh@4IScpGM z^**@_)x35?Q}(~7dO=$CGmYv*!!`Qs>ha|IGRAwJ9#u^?l3!L_tudqQTYt(Ct79(Z zX+_OjAybmyb;+Z9Kv(^g=lR5rfTK>=RfC8MwSZkQ;O*~Z}_JB!#Lw; zFvbWiMY$Ztdl2^HM@;(J%F2ww>TC1R^WLF(sd}=MYv7B$*695crO~548_Gp1xbJT` zc{MyMLH)sE7%dZ4yTn}QNcR-rctk;)Z+Ra%43NZ2fhK7!V1&jF#CF|&j__yr@AbtV znI#e=H{6-dw*Fnn+KCz4dGal-I4z}k7pFz`ngVvEC*lJ}p93cd1!Ck|gCpe|6XGMD z)A1n)nZm%TcV4(?HNR=mAe6Sk9af)E1rW9IAzrO_a3sL!6r+m+?7DwBt=QxI37`P- z$>-|1-s|j1kvBomb2}p`h>j`2<}G!gOKY3yT~PoVh21*Ce;{WOEWNia9)Z5U5~z{# zNS(2O-O0S#&3p`#9-6 zwJe)vD0{T0QJQ+9mr2Jq>JXSu8Pt$zE=u2--Z6HaGdz5+?PVX&#rr;R_i9NqR;cf;D8VN4VQRCrdxc)C(U?es_a=)DriY?R=&qyz4J z2Lvi%J5jXW+8I#&&p(_EkqtNW(M!+QDGrB=c9Wiw;(bxj`{rW^C5YzJy zN|8Z1jaoF4$~HZ#e2Gn~xaBVm#&Ti5mg%=Q>8fi6P-Ly&Z&Zj-G_&JjOyOoGO`JC@ znsdhjp_hoP+hbV0qtPB{JeENpYeDPxmMeL>zU|G=s^{0B~$DQ8@S@!Ou3g)YF&?eTG z`Jq%m-sbwfk%mf`Ll=8^TN%5>yhA8_$PM?@o6==1juuPEI7fJ5QzBUEf@ zGAzqnEiJ*(QrMEyTvrx>B{IEvLf zPZHEdNq%J|lfmZdA-KmFajnYSwmQpO5xwlS-3a=Y5iomgld*QmC3-89rW zBj`)bZi)LrE?IS}JKy7t!96c+de!z^8!Ps)KN1N~zXI`oX^~zMuFd-sxh6?9i%y&1 zNL~6A?)hfJdx#DHAQs8`9dp3gs=wE4MJRMI#l0=>4v1IMFMH1i}DcfONmugaq0IH{Fb2HjrvV+PWZqj)oPfhx>DU zfbhgybx1QpFV&`6#$2g1>TSR|)I{CiSJLKH%`-IE%hDcap>qve0`Cy%d`I$3sb{;8 z%qeD%E3Zo4|%NjMJU#-!8r)An)T(s_1)YRPc{(2j~ z{;{<045HktXu~9i&;TI0z(u+5OX`NqsH4#f9982YGsMsNTmuLaZF4S*5k2an!Al_~ z%h~%yqRKhASK?Yew*Dal+n~v`w1JW^OjP=TjMZ5*0v{RamOps^DhibD?&(_I$PL@3w@)7^v34PdFuOTthpHp|UWK z$TJ9A#h`(iSYsxoQ|H7j%{#KvSvV`3N49UjrXY0PXeqx;$!psk@SSqi^7B|rJND;1 zcCN@<#_L7g3WVH)sEM9px2bv6%)PX^SP>guE}m^pvS&gN*(@x4fkaq@4ToMd;$}$c z{42fms7h5gp)uczF=VIi;mR?0=gx`hlLZ9{y7dzVT-0jRaet2VNfg(CWV(r}Ac66zmkl*!*}JLaAit1raQIf&T$K1h z3cX2kM#Yx3O^=c`E8H@jCJI)p(QRPZcVls17zT-xtb}zVO!!OL>!uyW^Sp^K*9jFy5h+MzJ?wi> zOW6E#)F1}Rw!Xk1)k>Kjx;oK7F`28xT+@C6WCND3BdSqFKXOuR3yu%r!vv)5a?^gl zP}E}OZ2-4c4uGctzsI~$?IRq$m%=U5R2iwh+#uU~#omuA#ak%nVwLx>ZH0}Xbwy~s z=rE2Ni5B3uHE;YzqiXc6liKgCY)Bm&{i-prlh`3FB>~NVVJ?^#E5vCU$!TueRBNLx(DAT5Ly4>A|E^DAlS8S$qg_2FdjE zzltj}^c}O=(Lg%2n(&}zRhXh!mX;zJn_?QDssWGW zsSS64@)JBeCPEE=a6ONcQ$?hG0_iVyYKnVpfqexC5v!stNdr}3Z?;t;NxoMt^equN zgtG?|!jbfn#ubMC$rZp5B{(;TfZjRo6|w~+>>4Vz&^7}~JJbhF?_5Jj82;KJ(UIf! zFF!ljS^6|5rw@ojOG*)^4}e3;wC>;Lc93H-%Y+JN#O>30hCvhTjPVzE&yE5YYHkvm z!q&xDH5r_0lHw$zWx_Jz`M92bHXHeJcpZ}@6I4+mIE`;Li^W-pRX<=xvt_<44R?id zH^_b{PWON%nB-c18f1ppI+FJ5Iq}RK(mTs=j*>3bnS!2=7m5{Bc+kose6d#khXD z?hAl>it9Ze?%cB$y!T!GC*{^&c>IGkHTpBB&j!E+PlACbxyfIcgl9>8Edan zTb~}GbhRJYHXbs*puUJhwqNJDX&^gv=DEqZBsY}CkXyMt8Y*i?65ObeAq; z>Erumiw^sf4?#K?dF55VYh4z*5a-~B5t*!$E(@NqK+c&>IXI{$)MEb{0_?d!WX;^B z9M0(-?OZ=o%!yxFR0DIwy$cLs+abA#BX7ZgGhRXd_XlMaQ>A|#z)xE-MI5$YMbuBG zPqzj{Gz|&+iB8Y~I*?h>Bopmt1&$AY0;a=wRKraihfOQj|Im^uZ6SyD%=jb$c9>(R z{LS(;oSd#yvH366KX{rLLa(`? zh+L?i1(Ut+MSjiP(27|k&DUr|6yRqese}I2wGkJTSV_V$zulKk0>mOOeKrCsrhpc~| zZ};h+ZTcV<15sJR5FGJ`$R}Y)_M|$_1^OcGBwAw*il^`g_10im@+_R<2BE`MQSB+W z?Dsu0^lZ}xF;H<^Y?cD;IbZm%ya$4gRW)>MV+V&RscTy%_K;As>stE0Hz?B?&l{oU zQW|JGGjeS{k)HB%4F}4M-pK{xKxAB5b6^hvl!8x?B_U!AK)qya1#sl{bwBWaY74)UpwqalEI06`0N|N z-Vze4WN$UdS;5-z5aTOLnplyY-sD1x+rOYc9YABmRFWSRb<2_tQ2 zP)UFSI}TCjVD8Em8ng{7DQ`$NL>5(ttDE+j4>(4jU!M`eu`HzqSfEE<8-kD%LkT^N zJr>7KQYer#aB0ANOHGxi>S>+qdpf4}P?A4fXWRfeCuMO zSClH(crqG#Rzg_L_}X8M93jpf`BC{Rp|U<+9^tg0IrL_q4L}{$o+CwWkv|PFUo&+b z$U;oEqM1V&U#%uSQVYJu5&=ac zhK{ZLfbz52KsmMpM>y<-{_J4haHu-I)jk%&_Fhy-`?J)*lL?9p3nQ8P0Xl9CgtSq# zOG1J0GNZIiXoFkH&`<01-Q%o~w`bk~ybdzGkW*nBg#qwpp;o3Kef+JV7VuC<-T;A- z4M=-ex3BJcDxC*A5)N(4AN2l!N`{NRz8&Ar=GzwRboiONjV`S``emOcM=DJSX>_Mk zH6g)Umt@R1!A)?pU7US;`v_^6_8*;=2y*t9s;1a7leq$%~1d!`S7WBwK|fkbP7sn{ad z1lcEJpm)SkOPg2=d<}w5-*j0~tL6B9cDo}2<@VZeub#CCj0)uI#JHLlHeoPLUR^VC{MKD9N6xYtO%{HYzeHQ}%aYjn4 zi{mRVD=h?#mTzyPi=14&6zrY9r-C&?E{I_xlWzzkkpVy{hwm3MDTtoW5}y_1$xL2z zp>DM8@a2ZgRPN5E3^!3?0Zl3pszyj}tgc_OP2x+fSIz5hFoa4r8F7+#t=N$V=C*`t zvLbieT(u~xRJu_G%&rH3lh=jpC1^shuJ@ZpdhQD&_}Lig(vIR0+LNq|P1kv5uLJfW zaTCROR=Gm~++~J!YYUaajhhti;*JTUbi)Lw-3-#L4@SEFy4gVUb({o+||Ap(%;mcHw)P%yz|DqGZLpsXF8$u`A?G`JnZwJ z-go24=C^_*_HQS>zhW6Izw6datqf`YikWw`G*qzWx05jXo7ntqsQbIX;YKBKi*L!9 zYdTAMFXB(Gqc~7*fQ-n63=kRh3nNi25;&`*2p=B-JkGklaoKGI)6e7($WTER3wS*c zJzhnKKtlpqf0Z6cMLxaZEGl&;i{}g$W;`hE%#*={fhkc7HN9gZ7b!ms2qU7AA<{`v3n@QWoau8H-eJGrgzfmhj6n8O zrz^pVuqx7}`orjrGUnbp2J`@NqEz8f&vGA&e3VQQ#*=7hQ=Z<%)+$m5sVFoXH5oMT z%M_<2Xuhm?d zF^_RR(vs6hgHjrF;x{hSSGIHun-^mT=j1sUK57bAqN-8l$Dx5lH)O8vN;FrVY;QOF zvX}kty6>k^`_q&W6z{#gKA${EQREB2S+Gw8!+IQ14tWCRqo%7}`RlUfEXm*ORtH=I`nWs>>8BqE3Yti_xmA&fzd{?3eLXt!kB2 zprF*rYFEeh!U@5H5J3u5B-tP|p_Z@cJJtc}a>rq6`q13neiLDHI~98{>owgBGQ;7j zTg;;Uo6MNRlY3QysABb9?Bz^f9VSxfUN|Aq@%L1u(A(yc?PQD&WiC7j?TT!b2en zrgmq31T#1jCfdBikQK|lob06c>f!U{(Ig+54TNye55r*-US<3_>}vH0U8>qgX_wLu zcqkrVzHuhm^&?qCvE_kU>4v!V+U9!uaIHHy{29J_7pWs^`UcdgbX5RhJ11bBbl+X= z!-wh}9E+#7ce-Q`ttAhsu_kjF-WZm^B_cl0KF)x-e+IrLl^_E#Qr2y4rP#egQ%V_X zy`YTWLBl5o0#5o*6!YrHqZTB>qHI-XWKF<>;`8x>9OA-Dvt8+_o!!llGLmOT#9kyYqfpJO|#Kv*C(bWR||g6&y9{H3>fsmhg=cR7hX8(overDJ1tHLKi98+DxKxp z{m+pn!=J}vo-1#-9ZKdr^uAS6tzI?+B>X91x{wGHwAa#3@-;z(c>E#(iSQP}7K~Zy z@D@FtrQb_E8Gc#T31Usv@Y;9pc57v-ftDPajv5r2H-7^xw^e3?upQH3M?Z|f8xuWw z-t<3VO?^Fa`)d@DW?PRe6&`^}#Yr80imMGC!bh65;oXR^fZ40Cr!DYdVnIGlGd{iBT%xn!QN2P642d^Pl&s}{HV`jRIFsBw9@Tyn zyW-Cqo%zTqXIlo#C46j+a-p^pBM!s{Qa?br)NHTW*D(z7miU#mYaG?|Wi-kcbA-xR4@e{_IeNunaG zi5LnBUk;twR(?%hi`0S}f-@>@aD*`~^on&U+UELxHv9ydqgWFy4KEE4=V% zF%vcgW(tSeF+N149sd>MRE`A zfKab&?AnY?`txopu=>cJ^lruajl7NHp)ILKR_!6Yo6$YAIQCtAtlX)%JkGIIwkWZR zQS02UJj6b_F;d~0Hbz35jI>pNUEr+Uk{imP&>Wx#tiF@{%Vhqj7(Q4gy@OZ0N z;s;`>k&O7yM?R=oQLfUiTryMy2c+Appc`k_Usypu2Tj*Fs8ZBN_g__@z^)y4gyZjB zL)5_$prU`StZ|)bMTe$$kgZrCMEuDz#opUKL-%GV7SRMm(qM~bXfU)vkWU%<9^!at z>Y=K4s0?&@1QJp#5+RYRtEU5UO}vzRlSot zvh^Ud`$f?u4xlmybCEEH{zw!_O#^9O_Gewf#;?z&Bw7s#VC70SHqPH8??rm`h2N9B zyaCY2&bzdOO}J&4L*O&*#lOp;pBrFZBj6md>Iz#B^;8=p_F+WIi0NO@QCbzX9L$ z6+j;iyOd2w8q_;r71WFLM7qZKMIYU?@RB(igWM6p*@EO-2DEbi zn5osAy$qYtLbXpFf#$D0w^(#tiA{&LNKG*Qm`QfVU^H)CE6~+)$=G$-gnKbtv%GcL zGzA$TTMRr0mLz#xbo)+ymgLb@;o51r1dAI!*xs+o6cN6owv3Z>*9s6_J3QB;yI)#^ z$s+6mM`IOz)h*5?gg6W-MnBkTWnwI9e)B?yBGW)H`03J-G)9`pu;~1w16O?t4&x3# z5t?Otn8m-gheQ3MJ^&8Nw(ABZy%v6o*xJ*lHw2W4h35r<9%N zBcy%9d#tnWSfqGDG}v8qbpWV>yiUka%Ze+0L##}m_~#x+;6rRE=#r#IbWtcdG5Y6x znIUT}luPA9K)b@P=IuWc=u%gh>+$g&g>LtKF-bb4th~Q3%b+z2y0`~AZ5Vg&yvPUl zlnw&2qRTKYw6tk_@Kc=h?w>EDTB^+ZtNTm0j~llr6Jl(5P}%({{VI{}Rj| zv>9V36+zVhI*OrBj#gNwhVE8?MQUGyeGk#?!=$cIJem3llt^uNFWWw|FC*8A4y^+ ztL}xtlyuVQQu*L(z{E0Pm&HU4PFM@SI)y7jTGfhm1aQ?;=|HD(efoc#s%n9b?93a+|7~ z==IEJCs%1X^o{~Oyaif75h7P@GXGcN#L?nECsolVlvV%!zL=%J|qENMrz6yUt;DdOt4=P110bh8)Nl+FxQFq zC`de!R@Br|#IRmJcoJ@3f6=jH=*Mq0I)A-wGI@Tvcmmn@RN~jIfXl$tjK=WH)jADB zuL{|Z^sFC=(IvREE0O#$DY*C@RfJsnC_!4xZ`#=Xo#Z3w&}Xy7;6!;K5rjm9k0Wkd zSuCZBfJliP8ZRZ7Kv-mw&YCK0rsDx-N<+L>8bGcn9IH0G-=&O8pWr|*voxPkyHGZ3 zrbS%fjeDr^1NBoyV?f;nrDok5{ zJI22`U{vjDOaWQJ@683KhR;|dEJ%Qi7{0xL?>=ZTfrWcCbN5Wj3g^lPOu@6lX0389 zSsF#bDg|<@MgD4$cUi@5Rdp%Bg0#HHn)>U>iS(B(7KZDW?CviB9nj`@^!_)92t9TP zAl#4?1Y4EMXkAfIz%5L3uqn#H>Gg;k| zxvFI?wx@PpYiCNbzr%N4P-f`-qtGnidMasZf#q~z3 zMl6)^ZAJ=Mrs_&J#ranA*QN`1F{+hDb;$By#6`>Cnv`lI#YiO`P$hq%A;u)m=0z)& z^erP?r_sFZ%c~6po?ZnckbduE92bA@sgB)m^7oKI>wj3?>~F-8GX4S8LZcVX;#s{^ z1qw+wY_;-UUdK1!gp#L~zW%4$Pek`QCk*Q+nI!eKVx^Hzd?jlY`HACFX`}iX-9C=d zk$0ENUDWbp`BE5_BM(PO*!}MvEv5fv7-g)#`ATXgsuWSX4L5w`g!78lEVESMO{9v7kq^_3$uU{ zv6<#v7*SU9<#rfyX2Sf>(A9xYD6!E8%*7v#GJbu{}pmSWvWpwlbk z!1HHOn}kO5w$D=FBoNsawWgamPR`?yRgFN_uGOY(Os&(8J2`4;)?4V$_rLn0EeDJl z@u~W|g+73{ayUmfAs-Mv<6pFM?~1eW@9a<9!|u@Y64=teMV8ndrCm@ieB zuSNYfX?K8J3y8@W5al9rI$+iBxCjI*fPl~%p5J<9FNhVB@N@no9lB;$RA-7X<Q{m*dRP5_dh|xPk6NrdgX~j`sR+*{HAoL%i)7y0ukLD4o z;$T*tzw3(j!@!JV+5rN!=J}|gqK~;+anx+&7%;In^;z&Ype_*>LkTohI-R&F|8kqO z&HP1?Lr5YHXf{>82G|cW+_=m!S$DbJGzMcf1gSM>mO!~3dzU2#)0ElNqu2ZAylAnG ze7$MaL1VmvEwblv64GB$LJ4t25))&7;w5k`OwxaUsks2JAG1uZ*M*1p4PUXtXx@IZ zqj{rr(SqmCbSbkZ%?-I0!Gz8mR%Xs31buw9?Dp)MK~Tziiw#_!Mt`7mz4iU=D{9`)YJYDg5jhe1E&2n{Ll8H&n7`PVjdgetJ~Zm-l(H zv1qK>6Hn2+UnpT}l}S|9y$*FY$Zk`!;%&XS(&NuGEW?1+#fYr27tRI3@7IV)3lQ9O z`8z=1AjLM=BqPHwQLs80ccc7w>+h_N2a0E`u?5DYsT&|qB4%iN$1@L3#s9n&^(e{n>_u>z&}#*6O1zda1NsYIwsOm-cVu?){g8O(}fdh z48v4mE45hJRsPAc!jGoXnn8qYkv_#jzKuz&zn@+W zVI~Wof~+lYrg6Vgva0X%i8VfDGi(xo6)H6gjo>JleQoYQJiYJ<;6X*`&=H+zBR;Gy zwEh{*u}>ba5DK&%2(0J21N2Nf&`6?3 zHZG3LRvYaZb^XvfwQQZ;{bNOdHJX_EHKm^j7tO{MK-p%op9yZ6%K`mpLf|-)yZ?i9 za!3Okf=$+=45=5+!9rokkXi8g4qHeS_eFfICKoOcEnzA#6PUoftjrF zh*#qPDadVEXvrhql;6F|A)o(N?M8)Azl(!YVXkGQvvka!&MEh^)~9z{JY1q|cx|Zj zv6+j7GF;9(VT5(k)J{EBl>N?zpa)_C?4}lbhuF&X1XM*h`n^1ji2u!{0=oID{FPE% zl6-yzqRgV1Rv09EixPA!WK8ZGIx4?<7T-y&0AAGb*N5kMT7)xB6s^ z@f0w&TIaHf$)`#wGc6iK-Fl(jACoKKzVAt=6RxDQP!@*d)H3x~JnAV8VlK5V{-MP{=&Wi9tzqHB1&0TTCbrFk55J&0a>Yd!!#@~z4N=YRZ9=@RBwYEaBWh^9QJA)^Z41{Qx4 zvS-cUlqrnyo{G;$q0J1X4tm6m;?wU-6;!LO#xj9Q?O@fiPB)>M(^>j${VVn9=F0NuK z1{2f6SUFQiX>RlpFvum*;UOR0N7YiVbtbva?~Rn#*gbtxYTln71}@|B?;5CGu^We5>QWO{l&+&5GS+%;37z5NVPNN$vvlA zOa3Az_?0Yi88;fqqgTjY1)Ekc_S|azA&Z&Ktf-wrwm(I(rWh1x&s($%c=FJT%MT0} zsaoFM$3^TNR4-dvOXKN67=JP(=sxnRm|B(zdd0xJkrgB%_5}cMBu1Q2DBTYgQ(m%2 zr$nlBc=lF4@KO4K`a)S4P`~~tiffpkxR#n1>0?-V`6Sn-sV1sYn0V)+4ZzGd*tND5 z6%m8HY5P7bI<}Y@%|pS=+I6gtzZErZq6%vNsmnJX;Ki}lZ;e_sQJ7)pAjJg4am|>I z?V$%rjBjCn>!PFVslA?<65dn;(aArt(HysSj?kDIgShu+SK7nDX|Iv$kBPEGU$WJL zHf=ajiH2c6s}&OtZZSC&uahRdl$qIxb>zy{LgLL%tjW42mLSNkrw`5A4Tz=1k>;Ft zj8Zpe)R?hFNOmBc&B<|^a|zy8QSFaKOR*`~mB&e*Ez!^KnXB3SC73F(LlkBx375LJ zh(!M@R8L^(1RcR@YmmL`qKE|_1WSvd2@+MpyWNUqqUX=5u2@PpNUR^qgZ!8hXtxT} z{MPtoy|h`^l9~1_$iaE^2jNg&v!D(}{He|gCcTn$7B2i~=}J3Lugy5}s4w?67BY>< zsoPaJksw80iK!$GqldEQ&~<#_M4y^~3|?F&t|Urc6lwXsJvc>+5%-vJQD#Oec7}LK zzutaJQ2D;h3vh)5w*aYX=*_jvFoGzz(ESL%$-c}pI!E7TobAB1I0xCOj*sY#v(0Em9SRjAATVLWgkbjXW;|g)cKPpY$(Q)_<6-0n|Xa-Q2EqK#HT z!a_|LxSVQbwkcKI(lCWmX9OI6WqtOwm6&&!Tj3O}qdT%B;#LJEp5Ll!;>=O`8T65; zM~fy;2d0%`hc*{3@m=9Mx?ZH(TJ_$k(6#OtMW3h)t{8Tv#E&=M2g;RXok(l%wkw@(d`W zH7cv3ELkcdV0eNQ5X`FR5{^M2HpcP|dx7r#&d<)6&e5I%33C$xe{TK-812mfDid1& zUL4bp4_&trl=6J+>JJat3h3_?tJ@}!PC#guK&XKSpls?IfE;P3OxULw zbYm|nP632WaO;PfSUgUMk=IZq9b>C0_X~3W2X|i9ul;&_jCfzTP&Dz{5^Fn>f!0*n zk}m(bVU->|t!F}vpTdnH69fx2cw9Ysbf1Q?v6>q<;MRe**{Df1nG2%fWU6C%7hO7K z8^7ApB{Hf@Zebj|3YVyu)7AxEGdm<3EjZcdTO^64?Qdc6Zj~qrLFSnWYZYfLU5)$Y zC-9<(GDV#-&w|{MfEE74^*mvF|MKCupyP=P$R^pC`913bd`OQj&o1l=ow=e3EfiM& z0|@~w+W|hY?@&HXDZo(qce`+uyKRDfhCAIT&?m@0J_Iu~rlb$QNtNw)yurWCG5X(= zQyDvJ2Wx$6i@&igL%aWU+-u4s3nO!NQgV#bgM!TJbIzmn3}RU>L1`dpt>#{li#|F%OiKKBj*EGq_b8!6eeQhYS;Z1pK=5(=k4FN?5(o9 zK5mDz|GLnFo(KJ?FVb%V3IW}ygDb?SA(7AFZxm5>9j(Anq^r?i2(3m}fKP2q>q~KF zPaj!sDp7jX2u+V}Ip~P2V9(z_3GJlYhN77=dt-}PZ@MAJ-<)`g1mn!Ov1lWm!v;Pn z1DTO}^Dehlk+S4Bk!-mYQimVFT2Cm^hx5j{8)W19S|Y3qlWW zF^zKaV*SOp7<^UAyWZGu59q&2B~8OZ6@y)r{CgcZm?Zx$qv-o#c%xGeEH3N}Fl}ST&^JX_xqCh z`DoYR7vWEF`fY{p1_!M2S(kSB=8$9^Y&e@Iv$pW6z8{WXw!^VR^P%>tQ1C07Pox3d z@i<`?!7ryZG7G==ToW89dr(j4FM4PrTe6M`hDn!-^w0r2>$MChQ{V~_+=33p+t9Np zBE#+@*Oa*P_PNC{f1cRLEbaE`2c;{dMnQ7V+ah=MoKjgaSW)C-gW%0`n$$++)iF-Y z*c6DSi`iYts!TSp=UYUTg5Eel`vr2mx;e2(Pq!^8A76=wuLOTen?gD)AS_kt$}25u z89U|KQthWnBpa|^qyWH`MOj#7N?RPjnw!%?Sm=xNQY&M#nI@5KPaW@>ttc#3C={q8 zv%Xs|U`aHN>FKcFmlb)BoHqkqlSx;)_j{DN!5moK&QyxL82z_a){e@J3JN!F%<%&1VGD!w( zMyLk1@A6Ot4)zb6NAo}os~;!)lY&@>aLZ=2ZNxyIUc*prVpsO;9du<_o*}~Ug7FvJ zNF_c)dzeyD{u7n%I^vcJpZc5A;)PCqz@wo9XQA`m45Sk=ZGIcY3Gy>e`xJ3|+;oiE zKxWkeqEoinkxmD%$$z z8%3ez!lbd#`<4y`qx%=13~ND^jquG`_}qI{!253&|d*@5{6dB z4krIFvi=8@HY#ZTCH`|QZ|w|F0`oyg;uZGFWH^rSK@bw+C9DJh^YOxn1(biy`x$Ndjd-?^JRhKHv!4Yc3zUG%@tvW&jCIXj@-Cb;?wi-`mw%UKazD*zh zn9`+-_EPa&iH-}Y<($&d2%y@l2(icKMzZCej)9n^4rw9PqYo~{raq*ttQmgtbL+^> zNorBCUN+Mzpsm9ea$GlMzX~#DaoAb0S}_Um?L?j;!DE2VTJ()NMvmBMB9DGZBiwemjbM664QNFY`l*=z*=e7o7=$pfNe1h ziQ}LNv58<=eP-@x*V}yU+4GoVctCd|jWq-SDfosLHd%6FH!L(-&?C!CS#lzuQrxVV z)*=sojfpa)C91CP-MgC)5-cG4l7n}%tJ7;co>{WXm<<`W6Zo7t42Nbc1$%qfi~L@2 zz>vq^tuEMpbsbur9fWOEcYV<2vBBdqPBgbA{X}c{i7lUxYdyhSb=5V($=`gzW0O(n z8SufMNC*^Y&Z0{Xe|C0mR?z2%@OuaUzlQDP%XD>R_qm zB$HXI@u4y?Xiy&pLtlNwT21c@$2}XLA|!%wxnj~%fzHzoJg1$Mt}=*I#P~fX0rHShf$D`qmj_J|k4sBXs;ZEdQRVDsS}G zG(RFZBORBOnWt_d8sh;8wKf#%8Hgobj~ej-e9CSQRHY74+$DpGzQwsn-+cnLh&icV z{oeRFfH~2GfR(o>P)c9=&KokjD6;2CAO;n6PUvE?E~MFO$hU#^&8qMimtu6a9qgxs zTQi@Tvum&tM1zpt6Jj4~d`bZ4mnq^4M4K*$DJ{r;sKJ+v!-NvBv{$^q5~*gqcU ztDg2i!oQ^ZZ<*k~CJ~5QIT#un{wpt6r{e$M?46=S>AGyeleTT!CvDrdZQHhO+qP}n zww-x0&&_XiRrO!DtE+GI81WGC5)V6KuQkWqbIm#ZPsHV47;Uub`Q{Z8>}k}YQX&+Wlh3!u`D%s+&q^^IA@NGTH+)AaZOy0_2q1g*bXES0@?uIpSp&$ zPEec^WxaUJKFesW&}I!qG^t?yF<3GE-dntowS_{X>GQiw>}s*o|P}7z0$%5eMykj_h%2tcyjeR-Km|!|Y5U zy3n;w@|pIabHj`N0CQb_H2kJ`+x;NQt+g5Qh2{6A=n_7xMSBR+nyf;BaS#xjSe8C` z=$HV27fB}iJzD8b;U!9r`YpVtv}eqUY&Oiigz$`Cm2k)gWJhM(I;v+znpLC+(rMKi zqyg%_6WJ?E*fVirwDEyQLu-B2RElh`=xt_f7+uxCn-xu*=rzH%xsR_!x4Ps$U<*QX z-r{BM07ev{G(Z%g;9q$41QUi3Z1wE{V2dJaJ;YfClw!*k1&h+kqQN4=c%~?#qkXf^ zgi$k`Qo`nl_+rlryAjbR8fapb`s+sxF7ElSbZ}qSNeh_WFgbNF<;A7f79Bw<6#eKw z|B~DF4}s37S?WXoL;m9bs3D~PM@{vA7eOdmn%mj^XT^ART(8U^J^VOc_4~~CO=qs` z_nVg|*xWz}7?>q3AlhZnM7+3^kCYweG%kNu(Q1a=ZH7?p?1*=^cG-7Wa$e?$#gbxr z{cAA!Lw-8qOous3oJGMweu`ltxx1lA`;5_4otlhe@+AXWIt7iIajIbAB?ar**^Cq^ zDTdIBlbD@hvRd?hw98`zpZAV+09tV)^$Lz5-p&_Fqi^MnG7O;cz&UTU2-_Ryxn}7a zf4J{f4dR&0aQy)5csg6^s}4V%@cOYD#SpwCwxuIb#T^?IHClxAOL1Kie#?m?nt1`rW5+CxnNvj-rBtCc;+8N}5O?y&hnWP*1TpT<@4Ln1LT1R6iNoTh7CW_Z1S=!qX z-CjSM6fh#g!%eKhtP?9Au>`&aS@z^P>+btZ`r`Y`*Q4q$A~AC_q1+Hnu_dCAqAU!D zm0Ee$JOjDF2p$wcXL{x(L$%?s2zeAkXlHk_)ZV`wd(c)n{LRAaFm0o9oX2f4^waRt z#Hkob81V#BivfoZqQ?iAuQ?i)APr5_;G2}xnmI(P%7utWIJx>Z{BG=vl631IDC9sGSKXQ)BR<;QXu^5)wVE~f3f=Ri$_^`-t@rGXw^|?Xv$L_7J_bN zz!(%}8npv+H8e|TWb;7hAfBOCvzbX^%CST_;gXd*m(3!n8w}J>({Ok<3Eu>mJ=>Z- zBdq+~dOgqVi6>kB2ss8OR-A>DZFSNW$>U6h4=FnEgl6>JkJUHhiLkb}&PHaC%3-{R zt`;>K4Y=khJ}4J?O!*rmR<5wE)a-m!}cGD{|q@)C){zhe|A*kkLRT5|7=J76gDek0e7eWO6%3F zWc`CW;C))UU-r0kiUK0YRZ*;)-HX<5sbiaHTtbMmQ*+-JxdK#J zqMzb2-<&mWKkXnS|6<>p zY$!#?%?iZ}ytmp#&^{LDU6R+M*Y?P)Yi1vdiox-|rn#_=QHKBx&Nv_g7R({pn}ofu4F1SR zZ64(oMy6H;MQt5O%Ty+F7!I4{4k`8h!lRcGb#2_-vfbEx);1WtJIgvZ8jI?S(RR8| z;wYK0yuU;7SNM9LZe#5#Zl_SHO2sg7Oh#~0202ohSZ$(a&aj`OQovdeTY#G9;;;N9 z#u!)H2fsq1fRI|SiUV#Yhf$Y`H+jjXkB%fRzs48DMd*V~cmhaI79{2|DB%0u?g4(C zXGb5D8Ks?)XPExWfuU%bm&GrtNir>Z(1<>C(&=Hs?^+r2v5f`1P3Dx)VFazn#biNr zt{3L8=!U+J=EI%;u0Z}?|FdHZym1l+tdDeN()6?79 zCqRvMhkazY!NEYVWbatAM+E`bkl0XLS%qe+O|Iunx#rjNXX)&(5c+H(#C2kaGeyPT z4A2CzK7|#%fR(rnSS)k~dJ8?NzAE1M=8R6JPeP|lzyf^u+=LwHTF9Y=bYXz&j@~0k z?DWJO&r*4s_I~b8rTkLs8P}_c0vxgCT_=nE2;!NcEW9L|P7_Y58s+31Ynn-kGtd-4 z`q7F|mopW2F{#Qjbg^13r5W)PQ=y~>We>QBL)FS2Q7m&t)DUlcco^syP}Z4Q2uN@b}oqDW=51>qiN{?;!T9uj_njKZrU0nt#V ztu*j7w(M!T;MmvozowMfTVq*Ud4|Z^EcJBz%iK%L8t_j4E@NoSsw9B+`)crJ7vjqj z%tG7iOB@{gz)WG`t~YVwhr>C7X0^4dq&ujJoL@g7P1~ded}WQG`=; zG{ck4t4>tP#bL*2c{DcK0#C(Q^BRdxA(iDa%1ugqT>crMpbgf576sA*ApMI}YY z!g8A(_ks8rdPd%V5sESYY#b(??i2eTE*|M;n27!VtMoe=yE$3?GZOg^#uYNQGPnLw z`u-d7o~^8@ilmDCEhCUd01McUFQ>Fb+5W3uqZ*~jfu@{*zXq_y1zhmA(^!yxh`=q} z7YfhIFdCjyb8wAG^$L%3^>^@hYKvzsG=Ka{K3rD!ERUz}=Zo>%VQsE2z#VE2Y#~@8 zWCNvxJ}~|AFOb(kv(csijFd*mlL527q6qgCC@Zf#1NmXZ%E_Nz8T#iTK0s8z>G#&C zzEJ*SeF6ULi1G{bKoG5BRXKSbbp^e8P32oqaj`+5v3T%wLzSK)9|jH8A@I}api6ikz!#P+` zYurrC391QjK%yX#lga5%+@baM1GK-GiT3V_Vu}uBk%`tQH41p1&m4974UWIr)${2a z7k{iA2%vNp`=xai`b<@ifjApyt>$Bg;lnoTO1rS5Bp(g>jTiG}x!Rw$E(`fxN|pS; zdbbQn1{Ut^4Gsk{8{_S#| zwVEBrIy7;Xqob?NC1NT#YwKGd9B9ZfmR=9iXEWsdu@TO25zo_q#*ZSZu@Kvj2ywNS z+}rqz9~A-s4cUal3rE?Ra+3lIrirN~gL1LLomzr-)zeVeKiCo=%Y3D$I;g#c%=VZ& zh-k)bBDOVaipg`zz=T&VI*9g`z}>R|FR1kk6Ome2O)w#ukI1}xVW})%Hbcj&Fh95X zq>z}v<@~roUIXQfJLcrCOq|CgIm#9x#e2aGG=hRQHBM=+%A~$nMvBVbeYV{-gyE@Q zkh}v|%sWm!rn$~O@}exoV^CGXP=$12;z(+!IYRyM{u|fKB3Mx0~ofwdp~lwh~{-{9+CI+g6?FZ*Fmyt!+3_;>2T9}Y+nSH-{d4vRmoT)kQ|cLjE5e*EGOes$W*Ef6F~Ejq$tNR2zhEoTE&X4o z7ybbNc3nb5AwR5H3grJm&h~%QF{Ph^_D?PQFN}DV*Butckh!zh zz1ehSKP~%ox!io6)dhgoQyC?Jz7QiSHG~Rb(vsPStho@|(}>s_#H?vF6%sW)!#G|; zryE_vJlzMS8+C8X-!BR23eg12aI(Hle*&RJ=IS-m@Kt{aV4D0XM~^ZTaXsVqo6tv* zc}HyvSpkfAq_NPsG?x)e=$J zL0oEc$mzF$Nu!deTg_y^dJa(yxuljWWr9@-8;+8){a#tVEGbDUs&&)fD$@O)ujSlW z5DYAZ|6rVIR3Ohq{zm^?PksH}o42OZe${TWlbMnnaoFDeeIn%QMR#Xa+f-#trC;2a z-#jnKU-<9}Rq^^a=7UUDjm}LFa)AaE>O$a%LPwn@XY9zmmNi+mLzo9RnjA9%otCFS z)-$q)W)!DKt@EH}Li~mnrWFR6USAIl_O327rn4AKXt2ni!U+8+#@~9aN5AODUVO_q z4QFj!?G;+;Fv%Yy3l-h!%NYKwT(_&d{dA!OO;1mIv=hIB-m3p`!{F0Z{El2YY*H-KW%PpYx)OhS*{B zR>aCT`&KB|>vUpVTrarelpH()g~nubippnr^U9&Ji*%^yMGT|7=GX(q!j0TP5oT)~ zVhs7+a>jWI#Td&MTC(pLEyU!|A7m=3gM8IipI!pZ%E zRXk#1-8+6T3jF_xsP+G`$Nx}Y-)Kn7^xUd@esAjHNqfDS$BAwc_Ic?u+Q zXitFV^o$t6z{Y?FR@7|2LM!gVa3(({TY8TEdVO)~{)(EPjWZsDc;~<_*q6ObyZat2 zZEDInwHgRHEL#zkMyKww&#&*%vac8*6^x~asaNu-l^(V*?N-seL=;^Hjg|m(091e* z+E4o>@ORQPhOsm=1WRE4TVCcE$dQ8 zf#<*YTKuyp#1$@*);|F5>!H)7ym5S|8?+JtgNZHsRI8E+vAVW z&y2iK6s#p>X;gQ`loS_2tVheRzrsh5FOEepiBBn6LrvE8?AjEYJ#$nHcP(X~#`6UB zB$|99QDg599rfujlf(UaUGu`(`EhysBO;6&c9nlJ>j zb(J1mNOnd<6=#zn_nNJPe(mYoHj3m4VFeCg|B32W@71EQzMSdkuudTDF|({_&*uQm zy_~==l}?`gp}$e$9#V=q*%hmmoI2d*G)TiccxiWJ3eNyp69>`sN&@kH?!AZJp`YOm zj*-dykLcrlTgvmH{camPuFl{@R;6FopbBQo4}I%3<*K3+GQ6rK;Y@~ z4d8qcQ3$-OlqfU-X zp`VBZ(G*B`1|?4&|ND$z?%MroTCoz-(9m6KS%hg@F9m2wLr~sPQF5LvL6}KUv!Y@1 z>iU9+;P$TVioCs`%c_9Eb?Oe)R9PZkIJn%7^?+tBYExfcVL5w~s@xmiLJNq+Wy&N+ zc`V}iS93s9YhZV{{?ho2qO`LfWbSg)2u1^Rb zxcv=y!oe0;Q>*>0+b8~QJ-RDOc41LIX`y^V-Xbob6R61162Qh$EBKf{5U@MMc)4JZ zWf*pzc1$&H0g;;E)35Aacb!nP?g0AfB4iFB#MB{rk0Ef-A#w%6=-6|e0`eoQTs}~^ zTI6vbtzefPsPW#yuho!2dMe=-2y6?#a{Jj|t~tQ5*$^sm>_FHb$G~m}Qg$y=c6Y>X z4^nqDUK(J#WR^9~YCRGpJT-)jfDFNLX)jz)?<&r4)C`H(Z!9AhJr72M_8wj(Tr*_} z%wn0NF^Av@hw2Vop=D{JT3i+q^`-F#rs%buU-jSEsBLV#d5SXK!mC(P4*8aCLYkWd z9g56Dki3R`HI;B)q!wv4H`gx)ZvXm_a~%8fCwle8VWdefi*>U;QBTjKSQ-aIwZ} z$IwRGh{o^=%?3TOLA0mU)~bfJW%b5Nhvuuuwd)R6h(AEU+a;dkt@mq==Z)*k`%Js5 zi;wdv_pcN3R;Xn**^Ec+xO8jghbmj7)7J}DDJQVzH%TY1gbl(OwB`xFS7{2Xe{ZBjcFqwM zb{gNj!fqW7xv-uzC>}hf<50B!I2RhJrHUX7?(SsbYJs0ip>bt2BmIZyeqt!rd{li+&)buT8uMSN$v+_>i5^G^`o;wDL_9 z)Efe{2&@h-gj;)F5XZZvP@iD~-Aav&9=yH?3Ds5Tka)Ja$>~IH)NPRV+>cO$X)|i3 zkJ@y0U;Je_R7WoXQeOp<*{afV#Dp=U#745oX%1SoVpy+7oCRZrFB&4+Lx>qJ3K%a( z=`)FR{}Oc0D>}u+uYYkPPp%e_)Fgs974Kfio{$N24;vgPBHDFcepQ0|nh@i+gG*MG z1|)lzi6D0BqX$e&^R)usWMZOM^&F>!jU|FK8XrT);sRqw39?KF+c&RF6(BH-{N;PeqZ+T?PaBcfITHX<^duf(=uZ$!f zgmIZAR%RYN#=%~enB}x54whr!C51*EDYW^Az#^)(*|C-vJ}`@K~tM*;J!_-;51Jzl+gTub<<7NH>HEQXmnBjX! z17mxL^X{dJx`SeNYwo72!rK>$`Asw}`ldEW_O|3d%j6Y9$Mi?_Cl3l478mu1(!ceajCzK@LdLS*g|>WK=9%m$?P348L; zlB|hg)7i4DVXoWA?S~Phr(cV$e=?C_d(=eGO4uO{KWAp_M9=fwui)g!@RW>HL$2;r z;I17spW`i4sIJ(R#^uH=7c#%X<+!*R#yPEm?I(=MWOXu@sm#?(q!je*rSv<#h@jp>-|meaz6H16+dM9t%CN6uAZ0(GLMOT&ZN%r-P@# zn@evsI_1A0>Xz{|i(B8{tK$3aK_=A2D-i`FFo|lg|gcxGoP| z?OV_enQp~m8ks`_u1}!owaH3g68XIU8XQ&WWX#}w`1bp0_VUJ_`aGt>arf|GCe^`A zcz2B8f}i_j=*M1&aHAt#;^~y~A|LE>5Iqb~_B^CGHGPuAmwK*751A_ntb7PUSlER= zKT8e}*=b3LVT)3E;J8ZlY6y-mPg!we&!OF@%!d_7&cu7Td;gH94}T+Cxp9uFt7)o* zAu8(8A(`t6<#MopdGdrs<-|h0ptkQv<7b7)4xzUT5gRUBjB_h-VBdu%J}4BFD6aEJYqKgS{-1 zP{PetP=~1`IDW7W4#dm`T3$vR-^aMnGIypOv{BqqmJfO~oX|l@BZ(K`~xtx+bPl{i&VQ9ijUGNc<@Qkj_^z+=axg&|! zEOR!LLW|uR452M+z(HRO{1Y+|U~^U9-St{2*W+cZUH3N8N#6pO5S!iw<5pbNPgb0{r_L@ym`F9NPMXsf(clnSr3`@6&u)LnJMJVXiwW5n|n?RPf2M^&;+?4gMu^8_>#{eoGAe6ydp>4p;7 z!}l=}dpjFvIQu63DxmsCZ4Z*Wa{EagxvM9cgs^lm_{?ShAEyVU-|k#D zJ6G$zoukU;WIZ#rk&FPz?pET|ZGkD;k(+bUnH}}hXVSB}f(}5e3>U16hOKj-tp#BT zigujcbr@iVP*4JBu)``TN7mFZuJLECjTzRaN;ihXUAf|~zYOo;&6|NP@4*+H$$TP2 z?QwAjhuyNsc*Ub-6&D$~-py~^QWj?A58P|I{95l2cMf^i;ok9H48JY6hyPYX)>44! zu&UWX#%G6q*(1ujII1$ps*lgt7kPt;(iV85k}1ycPs#3su_Z6(%-xOu z<*U1HLdS^pa@zljTAW(qfb&_{Mc=P>Y$q6~f#N+uTzsCGfphF|Eoj;(XH-t6UwX~p z$|1!wOYmWaJ4yUQ_ikkB>5wT9pQbYd@i@d#%mRTBg+b}b?h zB&2@?Be1a^3N8U-?^e8!b={3fV!cN^ws-oECm7Nl?c)RYRR3qxU(v2U9#SmcedTA0 z$ZyjwdEQg*N03HeG|Smkqdrx(XnmeYhHgIfdj560;)X+x@3)yoq6%;(hxU zx(Wt2_2YQkcQ-*4^5PMS&Ij^R%vuNK_>8g_g~VlI<$@COT1pkhWu@laj1-e&V269? zhj6l@xATF@A1EI-c2t_EmMeCj7$x55pp%jOxKNdqYI0nKOocYDl9C-IYi7n+{1iL>lur@KFtg&mcT^F9G~c>oMt=Uhl3A+G)W0Xm%#T0j~+f(rrQa5$K@ok2r{ zOhIyKt#0p3BLzYQ(lzbUK^hVcv{M!UE=oQ-f}=inOubeXn6}VBuymT;0fP^zyV$*6 z6+b4KkW?clW0zZ2G$t*VKbf{0B$+$g@Sr(vj>`cCi(?4dY)!wTo6Zmvp$;kAJ298(!tAwN4oXlat3RP?GNd zp4M{UicTHbCvkm6SV1|%vRne7OA4QC1!i_!a2O&i=0sF}xjo5G%HC}kroPhX5ylRd z(t*68gdaVApM5E&YBV<>rS~)eKnrdFyxgRL;Q3lm0(LSP(}q3Vg}yq}xOnpFO38GZ zf+C=eZa!}zlm1>08(y90rMLP_26il{C^_xD8CvH)?KW!NW-H}R)asrlM~fG()`^!r z92|9Ny$0)LLBxIE0JgjaxukP>0#K%{pE7m?h7>?-ipVHs8-TZvc(D?BV=E&6&P-BO*%{3*xy|-;ffjLf^{6k91x{k-dU?s_GaherZwZFHWU>NL?dBo@c%B zc+wHojE}&|!jb5)kVgV@ij$A6hr_~>-Sc!%auC0e0-+9}CjxWwoMO;E=KSJ#-9qhy zwwkKaw)ItHSR_$*O^}lg}nNujL0GbiOtav$Xn;ANN9IE z=&`cmPMeLJvGihTa+s@vtVcC=v$)x)H6llyVqeHXWHW2Eg$bc08dHT#ZBVOyPN*w! zSLCZu7u`OP7kOU2)BuUsPyCoyfUK|=>AvGrYK#C>YKzmBL>sjf^7-+Y~ z%mJL6>_BWA{s9>T?;)B+XXS;%F`|yr$~FSKKFfRa`x$bO$Du+H$dXW!ID0ildvvDP z^klcZm7yR|XjwDzt|)+vg&=oER>4Y-3+^B?J*)V$iSGExf%d{ka=-LNeN$CDRD zS0-GdUzQm8eVE_Munsthrb@mO!>IppOm;}lcERU46$&wgZTk%gUxD4xjWWW!J8Yyg zv#M753`&2CaYFp9jDm%K`F3QQM*e+=K@GCew1+cOin#|eYfq+M)TMZsK@+`8PMT*rin4M|DXQq%Gk%>f{s@h(t{xU{s< zq9Sg=*Wcc}vuMU_CHA<;6(4ZTvqdH`-9eVNcHzaTY3>Cth0G#T5LauD4t%@E()vUd z*SrLiKFq(j@U-~`u*qoB`?QKp0VCN@Tfv;>q{hPbXfJb#`Zq0lA7k{BDxb9jEVyZb z-mozSd7a>Tu|M9WDrR>knn-ag2UpD79-lsySjL>}q>sF#B7U^+asgyKZCKyj2?e9G@(cR9TPfZGU#P z`fPb0Vzc}?hSM4m@trhwd(df76!9ZU;==nHHy7l_6(Nc#z!;r_OurzJ@{io}kwP#` ze#55ILz<|EI9>~ynq$o5k&sD|o{PfKd;J@d0mXg@>&U1qr~aa4ydCMC(qN#hqWCqs zHf2>~u=s|3vCV2cCzsoQ0*xu{@r<0v#mHI?KxWP*95tiInL{&XtvK^Y`lwRp3)B`n zGgf2=WghOtf`uPiOm(LsC(z=w!PR1UH@7kZ-Quz_B^Y}R+42+WBkbu}x@|a{cka4n zHMy4}6FRzI?`d|yd1h}kH+9rIup$7Nv;_J{EjxIK&n^q<-(@n{uM&F*sX-N2+8PN7 z4-xK7_776;n^>m1>{k0mlhK1a%9<|V_8qwo2v&SXgE!cURL9muek_y#NUF>>z+<(?Bb)ochG5#?3M;O0;3H-nFUB(U$whpGoHpUJ=S)w+k zbi)5Ui~h&6h`H5&=rF2PEuEBpXfK(Q^l=aYJ3qgD7+3~23wrYYvih)3|b71m+OGU4ioYPz9gE<0T!rs;W5{s zEzCY^f~7S9O~5tGf$KIwP}h~c&Nb9NZ35UER0r8#yNTnT!CPno=&zAyFPKgT00g_K z`)~qfgMrL$;Bb<+2n4Z^>jXy(1e`!_TtVt>1Yvj*oswfn{9F{*u|bCv1hP=>q+zCk zZ}#w=!tc209<*VnKyLg5Su7ulfg+zHRrdBA%Z0CL5R9Gj{LXgMQYKMOEe15$nuTPa zl*TcQ&f3_sI1JnbIap$84BVRazbUMZqKvW@vpq;Hi(=KVmuqd$$N}Zbu2&OJ_-OgL zx6K(Xjldm}2h#S>X$4~)R}y7jD3+XSmcgFQXe~(#v^{?3T-Yk#ktuk4i)g4Qp7)rM z&U5-&H!Wr)E(@D1Otr`h8r>X2?hKc+z2YpIlSUu6f8!E)m+vIfQ}3tcv}ws^{+^Nb zXtb2F*)gG50ef1*B-*egeKxC2&oh@N%07vRXIk)Bk$MqMop{?ia7!N+hG%B!H8%Jq z2$_F85>nM&mFG-MQO}ld`uo7IUu@W=Udh*&660EIov<;aSaaK56+JyFU82K5&8#EJ znVl%n8fEv*)1kW?(5$0#!6h=7H2B8^h{+pa2+}_aMp}QQet(hoQZkNm3OAl#sLe@m z3VlpLl+@2VL20S%HJ->MEl@;@;6>SUV`2t)R{p6Vr&(mm#S*{Qmcd&R1DO;SZ)~c# z{CMOeAQ8nQHsDrmo+iZN20Wl#Dks(-IdK+E@;JfC0pGkT)i?l`CvtEs#hYY|_!RV6 zNT#93!kN>1-qZ+3pWdSUJA z3&c0krDur&WfNht}64 z$jaNs!zKUVI-98ed}{eFKv$VQug=0;%rF4DiouwN{Z3K|c;+= z#XBBY%Qt2oy`s>{=?JTgm!{JxA*~qOW*m4T9_hi*x9GUthbh_1ctWRykllS6m|nDG z-#)N{hge8{PVPc2z%1K;!Mp3&>`bv^l zDCh6CF|ASwx=y-DV@LCbsqnTHW6Any(foOZ*)|Xit4mUpB(i8LCt*sL=y+~2+|xoT z!5c}!WUN>tJl5!yFA`SoP%X$)-|!0E`wcaEMB^B=3Z)O&&Er!HtdyelU?0rr?k<+KIIxrPS8+x@8Hn5f4+`BpPwrD4t zD_))CyAQPW8;uV%wQw!Y3Lon-ehHf4P8gza{e!r6I)^-EBC|S55clBskoOSxP!LQj zAuojF+ZF`*BS2;9?Qk19Q5M*t^K=GNI!ib8E?lWkN@hh+dn7j&1~}4Qhhb)f_e}7U z&;?=egIrmnJCETViQIA%1A(wJ{$Egb2+W9;m{54WeJLYoa@{zq)0?sNkhKLioHG_y z4WhMqHh3dA3>-H3tvjEf7jXDp4Lg&;_fj2SX?b;UNU661P|NS{c+5c`*=6kVMr7`N z>3sGpgE_Tq4>uI_Fqu@KfZE`D3K{MgKxZ=O^WmwXorX$Q6eWe_}#Qsby*F;9dw623!PaX+-(8gbgL>4y^xjGqWkhS zVTBptY)v?hC+a*I61B+NiWeBZ-LCD%b4Ln+Cir3*CHxo}DTX`H4mLnCeIiT;9wS#F zgm(Khziep%|Lq!CJquCTQ-0{GYm~P>k?^Amh0xOCv8Xz=6=d@v(>cs*F$ahFWGI#p zV2lVX*LoP)G#c`q!%DqhrqNM{^sMzkSVaBcqWP}t)dw3{{$%6!250PGeP6Db)HTc=Tj)kAgkb=l4$$bx@?B~V~Z?IvJC z{j~yF3)8yOs(DTO@$8i&v}Y1EOe)oH!nCh)(i7xK_2ta@)tF4-0H$~-qc!{BMn)S|g>_9-LbmRX<7>4j^_hn2ZyZWpHwgkeNH z;n0pjCbasDyprtaq&)VPhm%zKl43Ni%$cd8f{FCDW4Ne#&X!z=*lPF)VM**T8chJ1 z*CQ-rsGiIvl4z3XUX~Lv%k07?;hXCm49ZpcE{5U%p{2|e|Q)9q#yD#Bh=2~l@t%=qYPQw5kS#>2iX;eCt_8< zLb+n}KtJSF9=CT??a8&lk7s7?_CVhwAm0aZwN5*<)J-h+WZx@f{E1qyH*>rpG~pi- zsHawU56Trhx8dUp>{A&WYzZA(3F)1i9w~Hb+)bUvpckjM0$oQD$ZcDq5kN=0GSYlS z%-Y4F88J3nOO-yq`ltYQTI0ELoEnXA|B$B}H8-^L{OVP_UhWc7d`yQ2V!bg@u3jMH z-I8)b1yL!c;`O}1@XyLoCdq3^t($FCF{jl&3svFxHd}h@MxCbM%GGBQyOlY*ts}~8 z@NS2_@nH}Vyf1qk9FG4JC(wN=(0khN+_qopX~Sf)7sNpjJx=aAF*2g z(D93%5zc6ifl1kzPpJ=K%;R)UDH&)VNo=|O7ds7J222c}*D>Ef3?B;z(0wwHIer`k zE#J%-(0d3`93Ok!c?@6AIW(~xEU^H-qqbkuiNDt*f><54SRzrJWwt-?q<@wOJ`F7& zsybhV+L-W(2X$)^a^$Oy)>-X0?|+7*XwCk_Xg{OmcQF60b)>PIv7z%%asZu>v5CI3 zm6N!wu#2&clcKqYv9P0)x%EG(0{ucS?Kg6`r#p^w^D|}aab*li|L%Id0s@H zs(tgHIEnO$HTQmSS2nw8uJ`WlPR9GF@An6RACMYM0p6(+7hVw5DPY)s@GqX&NB*mk zV3xBqSp2|80HfmuOl_4Y2}^^>A(Ny?MrdAgQ;$+w1xNq(*X$A>!y_}V=Qc-{U=hjz+X-DEV~M2A%+ zFYRGgB`-yOdc`HGmTsaD;=*IR!d1qoT#_`)l(;Ks4$t!a8cBWYdR@P7Kp~v8q;uMUOE&W(6+r=44c{&o&FG43Z>`*tL^n9|@3M0yZd5 zR>M{JTbuBI#?K=O6&3T7jI=UqklUiTAv-XCXB#m&n=U7mDLrw0J)Iv*bEf&}?l;UsnnT2b`lO7(1keifm6>6eN-*UL}<`j@8j;1D>{!jl7qiq zD&JBA5ZIp1m`Zq46#estje5qK4E@ez2HZ{ZQS6>sSwMCdB@dHbht!6CW_stLbK z_VzsoUeuUVky1i*4?!Py;63Fi$W4PnR*$Sr1%w^9$ulh~_hZ}g52Rsi_0M4g6G1TO8*BGv)fDbcVSM|gQEV-LbiqtNVevc3 zlb&h|bpe-?_T87n}>NWW|}t z*H)Np-G&D?pE?|3dEqvkF4wITZ!Xo>WXMK0RS4xbUWw>MpT!+Q>KRhu!T($oVR zcM5QmMcF;h>0Qq+qUwG#SMr#wXB`V;*{4r&b8G9vyt^PCdd38J|8bsZA2!XpRigER z*12JSZS%pFLx*$i)2vq}uZP>?=!AUryc^mHKE-ice%;p$yckKx?7v;-19xME55N-M zU~w=b_zODG68-x+Q;lQmGN?7157wR=d|I6wx-xM8DbC|pZXW|QVf3IE9an%lSGXnm z03f0p(qI=zaaP>8bDFFRqkEQF25M<=6hFNlg+|un9qVdQhEK#^3R+{ zG@hBLM9L zRP{kv1235cxVRoi0EPtD8iEQv2R)chs{gq4`x{ zTK$V%$6iyr$Tt@?C^EEDHx|&`E5^&F~H6T63Ns*)P(I9nYA4N3w#5+#NH3(R2&Mt24qruzP z6x#qzrWS_O^~|tuo6;ATW<88LV;M;NOg=?aOy!`N-KE4l%W2Sc8M@%W-{u)xMiK{ zDx4~4v=YV|O`lXWyM^;ojQdshKX+Y@EuleO=wH9ah=2WJ`frG!fA6}&|911Q)X%V* zxgORs!gmg_WOD-?~BiR%vo6qdx)^l|^v-TK(`zSEV?+UT)= zP4)uBrX$rL=wN_wb9&&kJ@@sC$R=9f&<-aI6r zZ$u`L3)l4!wbxEK-Cm@O7sTw{Q=MDX*pwZlo02agas2m85+0}FyUZClGL!Vzk53(l&9hSzKppPWb{~eA%gG)^stDi zp@gaIRPKmQU6>vA11fux#K-~Qq7;388luGTA@Tb8Du~z`PQ;{w0_0? zI4xqGho&mjT8E)N;qgs0xr45SQy?~85{r;|Nee4#LP2G*ThQo75e++HzBF#?pes3| z^u+xP6L;VUqI1sJS{HZK3^%29cvOi9m_fZ6z5@V%1g`&Y+t`m4l!)QusmgVxZmz#9 zU_^xvlpCXfIxOd0>bRE?A}r%v(s7gGx)D35O$qf{de#sU5jLv4r6?5aBy@>Mc1bT) zHfLUoQGHv3fQV-l!G_~Eu_7~K#fn%za?Z?r=a}PthE#e9sA*EYg8n*UzWqw^ zgXeM*5-AN=2Z^M=nw|VuD&LylSpS4HgW8xrdXF}o78A%JM3&DO6#k1Jm-ZNqnR}#8 zA5CLps>0Y=GODnaHdCU6<3pzjv8{N@f*%|RSR_{(Udljq!+172q_)?2Hd`4NkTcDq-Br_wkL`xoLjg_NRV_mA(3+@cyUA#SI>%) zm{%|)`-mnS%M%StljG*9?23G3L(=QP#c^_aabk<{1mf(Ml8l#Sp9-xrT7Q#cd4@}} zYxj?VRgJ?z0qsrwqWYqW1*t&L6F*sWT;5%OL4w)ef+c>p0DduQ8c3ZwxUnSfI27?+ zK{m*f$1v;$AQjTa7Dh1T;aUh?LjR#L08%w@T)Wy-vUCs9F*uR9pK^!#HC8u?bE^d! zT&NNNvly!$04*XE8*-TBC{Y{`4TIzPT>5CYB9Rh(gxbCpu3HT~LwQ~UV!31ehM#qp z?oJ|^oJ+)%SLbZQHmQ)OJSSvWbI9wmgi!`3!-x@9v`3iMI+tr}xXBkvyW7uXy+dX^ zQbE@l(%>$DzDBR}Lh3sdXz*4}nQZyKZ@@_x+5|eu>Z^qcQh97j%r3k!L*BRWhwz~& zf7V|^WY>!IvW4c+Dq?4^tv`61;H=-S?G=~3=lBSIqjPE!I}AxqOdFmw-+*!r6<-d3 z6qKi!%lQ7p&5Ig7SFH0w{6(U08BW3@MROXLKf7Ye$rF0~_o83#Mjah(;JP1)h7{S% zQCXdb&gK$865pBO%l&oKzkLhO{dL-(;XV;OKO^e#mfJ={Fv2b>)HvpsHxi7(wt8!a zzBSD4>WD@dB&z?5A^9$dx1JqO}^PH^2-3a?LC~2dIT4;3Dm!eSTHxyMp29MEMa9YjN zc*vKI_jfe#xvO6%DyKv>@DsvHROvXGFHNp8d3$8jl%f=J6q%X&-C!POqPyE1&4emL=uEK72?NQFHU!3tRYeyXEB{Xs#YJww~c% z-e0=bHEQR!Sx1=<=P*bW+NZcXJ?M6Dsy)qVN8{z~4hZg|jb$>3usxl?R#{%Z_)Hq<@)nG20dzpg!^xCKiJSn`#ltN4N5;PhVh<1j+ilCD#R!Q`c0|$Pm`6L ze2Qzqst~nugdxegAz|5)TVWCkNtrlXswzuEnRd1h0;5yaV(fLT#iyz`g}-Sx1RVP{ zJ+%QjDVo8(v_F*!dX~3&q%60_m)-?INiT(VL%mrnQM(Ml+22Am8gjl4q6D}$KlvUN zRIVfrY?Gz9Q2bMC%IhPBv8!M~?$Qx!vH&$8JE{B3(&2|8PV&e?u$jVnA$ctz8@(XK zlTH@JEI%e2X1->?d7yY)?X6Oo1;dK%PtrrDRB&2l_!+|nb}tmLVQ&(b{ts8fp9?R= zII8gLa*0;8Pu45g3IO=BT09$nAZ=j+YL#Kua^T2veFb#{GO)vkN+PiNj0vb*KhOmG zjtF0(7y4%^kgjoonz`Aw7N`nUG*ykj26Hegm@_QoQi3djtYQavoXNLhs`|w(`I5)q z<*sp3^-~JkW~Eik3rm=m=gv(V+L11e$&i^<$Q*j)3I&J+mtYJ0LTsD1N`4F(j`UUC z!wPzVPWIK+zFpN{jzVrT4MQ#PQ@L_^t9;Q?8N&lflSc6Ab?S$8GNfCEH|C7)jTk&y zFnF{-W6-j|pH~Q6y?@tkl`wMw3O$MfGka)bj`0azOJwQJ86LRJt3)WGngzwRH~Wc`77fkEg2Xi_s9<|y!V`m&MIWE#yAhzf^^~qJ!-|Vf zG-Q{7+#1@v(di{z+5QDaf3r?X>HLtI1kdFbzyeS-AN}=L0j1d4MMIuzrC(Uz zw-FXviAfz6`o z!(koFMg7?ZexgfYH4P!LmVi8Vk1*cDMU;*(?*Gtw?kA7sJfPxJnUxFO@aLMfpkg_5 zVPpLetTJ*vOErOcDh29K`)OEh;gHt7GK}9Aa<1jPya{M-?lNy7SYJhpKhIX`b!piH zR(%gvfzwnGNNc?}S)VytkA#E1(kHiqT$FRA)(tvKvt!)Awg%-3m3B!x zq?LF2J077nK1sRYt5hdYRu8j^ZD=&Qn_F#1PHj}tkSAm-64+v>M`pn9X~kFPL6ZYA zW~6J>nCeoz{n(>BKmgA0xcJru2Hc__kR7+#p}WRIz~|Cr_8^OKSpGJi!;coASma3N*4Glq zrPfy0E0GD6oG|@^0%Sjl%~7#sk1v@qUSNLmWdS|g4$2NT{$$@z{?Iuib1a)Ua&)9i z!EnGNri0vz=K`f`*&CorSucf}nke!mN_n%UyPiCB1afGLZM7z*OTht7S*L7FoycmX z=1&O;*TJf9`!jLq2M_5Q_TxX=eA!!mEr~Ekj5W3@nKB|?bx;mTV7Uu@wm zSbnwh;xogdG|J*Aa8VR!uIF1ijs|we7;b^nrr(*?3VU0K2!nHmN7Sa>Icd{1&+tD| zIObNCIc7LiC@P&6M1$`tb=Co_lx$lZaCG4cVMh+6#uZ==KM}`)jZR7Ea4^2U*UkM72J^k{W_9I)*?epMrdnfI;h4$^g+T;^YhC{{^bRL?a z)O$qFhsBYEwbk#zRd?M-FKjf@J5pHNKJpTBn12ebc8Wof1PAm}EOYp|@hGP1gO6$m zz_+d&WnUli^*{U*vhyGAHwmVNfqz8va3KEp!Sz2_?fvt`Q!)SM9y?o`{D-(&rT(st za*XL~s+nqliGV&^{+n8&y*6EzNCPC|&{QbG9I9z(i|N9bM%dKs|MnsDv-f|ShV!YYX?738AV3Z>Dr z?VUuByibL)?R(9b+Ay_Az>imj+fD9@oQg{ju$mmxf;g(3Sgto{r0`8yAUB_(ihvl) z4q?0+!jx`FLRvYC_Sj#lqPELg{kJPPpvF7a<{H&zV}KmL*{2N=f;cHNDnGEszTMW+ z$LsIUroi~693k`-9DoEW&)Jeac#MP@9b?8jX(?t;<}#i1srx3<3knFp8s?_Vy>zfXv5aGZ(>+T!mBDmK0&WZ%?)ohOC(S-R z)mH@kSYKRJtrMm)39-sRW0<7b%3EdFA4@Sgk^IC>8@ls#CN$N9I+Fe92ovv0!(hZj ziDXKbIA2W?x%>hQ$L|%1bu8X%6FLS5jJhoNX??`P;7o*ln%w0jhVZ4@W>I49KLIgT(p_^plzo<7Y zhQHUu6=e0|23VASS!DJ?JVrUs31g9vBvNfej-h5X*pJO0KX$b;mV7bqtFux3sGfJL080mCofl5V8+|34_aG%S|Ms~WS0@a)#=oZuyJ2JKDaWNi?S<$9rW_&(3yMzC%X?7kV;mKr zt?kp$^k)0X)xC>5W@^0uGoZ+LBti!Nh7lNJ1k;TFx80t0^l1;^+TEOfzBBXf=;>41 z{K9~l)VkLdGrk}uFS zMi2~3qB$mYDV{bvPcin1HD@tkq`W9B>6g&CPw3K?k{0p;S#1)Jatum3gJ`H6`mE)x zMbk^8mbVrFud0f#>POEGUX{AV)bM3VV206zIpmJ{0IPGg{&xEQ`AqtV@WICodRw@X zux2CmhFG8m-lZJRpc*z*x04Z}?B5$1De6d`Y}z}c9{HASLPTz_>G znPA*I13pCe9b@TYSu$Scy&O4OvxP&stEl$`=4W1R_<0D~VPcwMw#CeD(nOu_T=7CT zf=OsM1m?r{G%qC#T=gG^{ZfqF>YMnAzu zz@6m$fY7tPD0T9X1$xfquGbo44DsBfFJ5?7AHd>0;~YpM682-(06uB(sIrRX5YMeyktLVlV^XJuUX|#7Q+Hh`I+nUgQr(;yTu2>uTWCwWI>6Gs!{@0!WP zz*xb~&icP}^Z&v?R4Gl^A~T}!%FW4y0n;9SQfbzjYb;c0W@7=cJUqa_4xd~L+yoYw^?u?@85)YCbEWL zd1lHulm|P@=rZyj?v|F+XC0&Iq9X&j`_7ovXvpTM`crqO>8?I5#y4dOjBtXZA?>?e zx*DZ%PvYQ9vZ!*(I3{VDgkwrZPJaq=O5~=C_m;?0ByJ>6<+Go8s-6ta%_v^fSZqNT zYfVOerZPq}9knoZ*6&r{rZYGMP1p1t`;i1*?Kd9V`mCG#INu|1y)g((je-H%KH&`^ zV#@&&{<%HV#ynZZOm@?}-PUjA85ay^{ZSXK-N)%xZ`b6|@F!YVkDmkJx}pwG(ZIYTV~OP)D~FvfKy6Rn3RnPi*D z9M3o8Wl#u~mM^2|j*;$WmGk{K8y%6%4QZv?&awA0Nb&70`+;rvMYbljaqNwA7RuBIuj!~veWmeqdU|3I@8!?gCdUIp@L@ZvK~o4NZ&@I3jbdx zN$>Lpp~N=~aOWFm`ES_0|GhN%|DPoP8B{LhVru%|%M}$HJEU)@<D z*aF^zwy;f>$3vr|j_RKG9m^um)XQ#%h{J z_G;Sd`aXV64=}}m1Yz_diO$Y95WSdT< zVkqHyTNj+G*Z2=B@=tB#z{#xW+ch_wtwvFk424La({w|I+Dy5{%SW5ta5NArhQVf< zk-KJ0qSt;P;l{J(FizzLoZJ3u*5D0w5*TRK<%1`nlw_^Y;nL-%pD<1?qL*CqAFkRG zArIC25SmS|`UIn0+D;}TJ3h9q!LtoGwvDvw$lqu#gSYP(4Bk~FrN*ZYrJE(`<))mF z><#=sDiRSH<;YJ!u%l~c-st{oFw`#?U z$0A9Rr}q^)*r$(AWkMQ5soWQX(Cwd8?p+J@1K#d;gs7h3V~xMvQ!;W!`xqxN28M53 zS0R`Pc&kT3vWZ^^_!ZR`{UuZ&e+K{T#Qys*;Kd6CNXQ~FSHeeX)_x)3<(~+w|9GIs zA&}czeIxj^QU7h$;QwICs@pkQ{nuP=Quk5NQAho|*)qYm;sZ%SSQr}{8`M}kVd@`S zs0j=U6fb}_=S;~`r-k1E?>=^i$LA_Jr-g0x`BmQRtGaJSyQpPjiJWUG8IUmLJY2UQ z<;NlC?3_7^=^}>9x`>HA?eVTb5R26CGSYFp<#f%veY|n6>veyq{$pWh#G|x#y_02Q zx}n8OG2G4l_Q%$kbP~H~R~X?{a0p-a<2%ETEVB`J*Gw}=yq~YNeaAAoaExVc9tH*+ z@REfX?EM3f_;P!RQQ#cpzlmjeUtF;U{~4(alp>ca(mo$xV?L(j}dAbg2?fjsZA zQSi(kVNo&TFR8ubshk{%Gx1uT(ztwE*2PhUzs=EaNWf`i0rTObj4mRuq$&mc3nu$# zm%n^kJGFB847EN9T_gao=6Qmf3c@~~L}i+UmxXB3q_Wh`qY+kUaV<4lHtFGNW}#yj zOR&7F6f+ueU|Hj~KyD3~%b<6}?RhT6KfPEhq%UR7X}U3$kvGeW?$-p{HN6eDjkHEs zr3EaQVJ0o*N(Tn8cAvM}84fMt3o|C9Fj>@Uotffrmt?i<(#6wp7|gNq*qGD#gw>Py zXfd(FMk4p`*SyV zV%X5Q>dt0Sq>dGvo!de|GHDQ-Ras4Dy~OFLs%JN;%Gv?U7ZgRMK&($K$Q>>DRuU{` zUm2<#L1pH*7-o*z4QO`aEhbpb&@qZPo?h&2VgUW!o-{|n&UaWtKux}4-|~$M80|{U z4tD1|OLy50CK-46j;6Qta0@C==rx5q-nQxuZzsWiWdQoE&v@R@Tr`U3IgRoS($pBo zH0Vev)?^Gj*nX1*0eJ6TPD4oKd{!!xUoGghxlREINnTFNq{)|7GiC+WRcYw98B; z5KUeqi&-T}UO``WoLOPZIh+NfJTh$iu*Y@P%DZCt`3j6>!(=uI@bst`Us?-(X&dJH zYyH)RvNI>+7)!}`yHy-e)mMX0L7R|6In%Vw3VA4Z32LXH(dz79WApx+dLGUpn^}Ks zJ?kRTr|B%lvY&CL{p##K`^J|6=NrdPEM;ZYWyr16*s5!7BFC7fO4r;TfIfhi#{zW}g=PZPK=snq5m#o@c@3e2|mZiR^ z_Wav&5ZSfarX$!&C46O8YlE%;+Uw1)jED`Sqh$hP&~|K36Sc~NejCuf4KvSWy4E8i z!%TAoWtg2hor0&FIJ<2{`nXhNZ$RG?{4hf601bhHV(%njmBz3@Jy)iswKyw9bn*y3 z)pv*=_+IH_h7G{8FZfdC0-@juMPGLyZ}locqmmUIC)f1|(mKQ!pW<{63&FYV0bS;B zoF=XzzoB|6OxYsFP03sZ^#(p|yJLyKsq_P$AbC)c1?>HRZ_83u?i=gnNahEbrZ@vm ziMu{-2pY#177Uq)$X2X`>M*N{+)OA~mXo;$=%i2ht3QN0RCZ3Ua>l3v8J@x(gBC|P zk%`Y@J;~_7Pq4f6XpjMS+tc;!ZHyUCA85YRf`KEFCb;a`VvEh%4Crcb$lkEvHSJ&=m&h>%?^Wv~H=b5qreC zUSQV-?Pe)`L*EClZ-2jlcn;;vkbl8Qe}KV9_Uy6rjSOwy1if%ih57Ap-pX6H)$HZF zgYYB%?LQCd5qjZA`YLaw3UU{&METn+oF8VaaD#DHw+@%6DrYb8(Gdp+r}%>%9Q-yW zs-U(k3h+pzmLne5KL6{0Zr;^1g2zKX+u+$&UfWSNE>uothmD;smqTcm_8P5SWI6eqz zEEo`veW*m|D#^zp@o+C(o3&#AbyM01ys4B|Cq)PEF&E4`I5NOx`f%-P zz1PQ^7y{If5|1*;;xwP>TakItA=BanAx4qCjCa~Os1JM&N~yc1_(+uuzOk>dja*m@ z1$w#T3|>M}KOeHzn|lU4PF6b$=C_E!2GGUkmu4~PK=_eC`G%pU09iUZ7?m@Jp#*7c zL2IEGv0nJ7L2uJ*YC$YCXUZb2Z7NL4ecYBG6HvHtFY|&8Z9zrvM-z}n6R@ks zue5LrEi07@-U-J6YtVs7-c(V<3#|~S7AUPnQ3^3@CAb@t z%qsjDo`l%lrxx}aWdGg5PxWJQsI))~`u6Z1&rcs_uAjAz#)n9FHjpl#^kTIO1yo6p z&Xa;>4nY&FJ6&3$WUgaH^u;5ec=$peUfp><05yIZlgmLny_zAM-A=0>So>IW8>b6O>BN{%}8Mubjxi28Wt z*g!?=Y9RG5YJTmY%|V8-Tc~v<3JZR6jJ#`y$pC$W03G(m z{*kW1J6GGjoF?43S;}w+FABmio8#!Dk0T#Vlqk#9^9)_w;Upf^7{)xu)WI57_!V2e zjHeZcwMd3=x5l3^M%hyq!*766C)Mmc@p>CAd!y~v>}q>)K^dT|k{0Q*>hbaYgB!5c zt-qpv9`o=h+TT=#c)r2(ZG3^nX1i*&m7YjW#!mOt4QuIfATZ?%9BDO~JX#7e*b&Xi-nz4_WQAd$1AI7Q*>|9a19x>J z@pZ~NZBp!FYS<9rL+r^eyi4osfMY!f|9QN5WbO%sY7=Asywf8=AJYbQ>KgqaQWhTixR2r;VkGV!TQPYtST3IIEwkx#K~1A)nuQ$7 zZf5rAd;-Z9`+WxnJXU&%RZCVu$7A{BdSVCqN^FR-S+Ya~R_NmUx?7(W?P53463E?MN+i9uZMt<1F(| zkT#ky{x^0zh(1pc0WcM<*QZ#IzQ~&NjvK?0g*b^lIU^;8G!h#DFdPvIS%I3k@sEpv z-(s8=(8GgqSq`9FqhzA`8A!XeL1Y(P0f$f7T#HaiFMnba7PC(LYjl5a4Hud#=FsC{ zBj6sy94kkr^~wYr1)g+&85497T`t_oxWx}DG0Gtz2Us4l{{F0wkYU(^_7r)!mbnYP z3y~t9DjOH{PeZ(KP{lH6Ckf6n#(Jixgi1dhcm55D`6QCGZ=w0U z{<;tGtY(=J9ee~s36B;T1oDlPp3~W_Da~xpn4r+8v4r`IFehOw-TSc#j2u$#9P_Grjg30k_F3{Nj9~ z!ywhAA6%zHb7+8*-Lpm5pr(UxJEv$a3aH`{ewv(NCYjbDHPh`xfcx^B9H?_6+*mX8 z-3Va0M}q%>wVih_{%YiSu+IfMqnk*Ji7AlKwhj^H0E&WAkZa{rmlYN*b5D+2 zo;EJVx`qXX+10I}+{iu~3gU_z!NLi(068_#Re{u}AP_Atd%1xv?ZVfa6FZ&7wLRoE zk#z+WW6{O4LqUl3h2f*J@8!}Rqid+;k!Rzt@Lo8Tv-gSRr*%AiW9IUO(^Yu;$;B_Q z;t`8Kce-Co@WsJM ze&ldk>YkCWxXnjN?c$rBBgpDrtx$d6&_Cw%!84y8iL&70>)(@mOPHm1Fo5N!c$^Y+ zPTa51=l;BO3h9Zo<^I&Jx?jN6^QXrAqkL+lm)R$@yL#U3c6a&2Crj^`-I;HSfMt-} z*;BEa^-s;o2U(2yNAi^39{oUk(J95H^x$66rdS!hj%8kGu2kTyh6T;+WlCr#R+jwT zI+qW8e91mbn2>woJ?*YWeh>bX2`Jlxw0c0r`cm1v*9StqYdK*u)(R_d-5kpRGPH;I zv9h~hOA-EB%%Pjswl_-XmQSS(6k+de{K+TPI3g*nS?4VUA3po<=SvUWVJ zoa%79TYoEm(fWzM`{n*UHCaxrX?7E-n9t_wTxZ_?+SCE*9AQF=2sR_F{=*AW-pf%P zJTI~<{HZ)|OwP0N^vao$(zGD{M-i!?N&t2{*wjVbf-rl&++8x104-fYM~t?$H2e}& z+r;v=UC`{1in{R+j22@scLjO74(4i7u)LtBnbI&p?+0ZX3dtr z1N*Z$q`T&0Emi7rR)ZPQ`h4C(uF6&5)f1cMqzW}T>CxCe5L#%94>GR>=EZjE2(ONn zH3)rk1Ux30{KlNz{YX%x*g+%35!TUJgn;f*`>KD>$#8h&$sF^#&Ib@Vy@RDnYRM;J z@y1_@>aHPO;Wgq7rb3N?W+sN#2i#Vi0kJL_T%b}0FV>#$tif{Bj;du7u93M9eF|h1 zy|R&VWpF+Dz*yde!sK+~M)8?Qyxd6BwY4s^0L~h(gtj5(T)2L+YZX65hI(ZXEGHg^ z@kMw|KOJ@3=N~vHo86zmJO8N0lHC?!^+|@|hujP|Avr8rTTh0L%Xze>2QoSg-eEL) zXw;DUFu$}oi!MuFa zE&Y6V)glE@9#s?;DejLfY7p3z2MFRWC_%1=25Wp(V#FP zPiYc*^$L-JHZaKDqNT_bP}V4JAsm(zY(UxCDo{wv>SG_GkfNwD3tlOV;y1>=3(lDk z<|rrn1khpT2RI+B`+tZ9do!BkfVBO=3rLhD z)mE_(!-UCfCE!^=}#EW30dyeQvyG>jqbL>{|`js5vr=T5m zgZL?9;us<1gfFqD#EWT+`N3cYl2CN-;;?rC21O| z!Ck8(W8oH*#ZJ-6aK$F6Q@nE7dm|Q05J_%;8J^jcFV23{e!0fTR@8hx145yp(cs?jkg{aRgmkk}Rgbq72-=go{y%8V!c8u!1C67CxGPUEb9+^b zfsGWsIu8kNnAR) z9mRo~3Huz(FxkTyIa;0`AV=MwkToI2)uDDTgu4(hteCvxdB?}6VEX031AhK+lvxkj z@%E#{fi8bRUP!$jkAjo=p!UBdIaQcP**bRCRGos2flXbHcBmHn8B!I%4K<;A9G2{r z83lkJEX4y0VR|Mh)|=E_3)ogHNz2a3dFANDHnN}{2FroJge4&Do^>w70#hV=96&iw zCF<9WRYBli&Qc8rauykTD*fVvlG!B#eVT+)eP*rvPNYA)ZZR3h;^f6!kcC&5FqWes z)?D=>IZm7GWo&t|neX}robKmwe@0~(NYoT`1 zHOI!))C+UZu{6U6{2A%vTz+P8u>ss7xTdeH)y_F9phf4NVv^F?aN2wZswopZhhUlT z0QLupWnkmoOj-^~n|W$1Hhupo(h_Mb>VXDQ4kZPUNO5w zrYl3>SH|4q-~FgJ{CfE7W*N_D9N)zHhUmg~jl)yo@gSg7`5_hH9i;Rsu>^%Yrrc-y zm>eazPaK7w-Ya@#i0L}WZJYPGdydP&RwW30l4Hi2dk)qj?xjUid8J*99Il<;{-07F zYT}2dmL|IM2rgLu6>-&S1!K#oJ0U0PJLk(1(SHL5U-=09K-&JIF}=e&-+86Z_pIK! zqkUWjCDE#_;lR&l6bs(MK}f}4Kr;lZ5--3!w>HbVV;7@^j8$J*{>9o<29xWF9lnRu zayt1FQ(nvFl)A+AV|`80Ct%5il}mn%L*_gUt0A&Jv+Afc3Vs!fxLT;xWPRTcp`5e?{{j4%{zoY)_m#FQ;_U7L8(VFwJU^-QMB^zrv z!ZTqXC#$517g{(N+p0&sJfOJLEH4U7Bj9ggKr6sw4Ht6PoOR z$$G1!HFDS(eyoVDC&=9z(7r0oSe?(SBi(Wb+@63w+tnGwH}LpCL9c50U8yt&ZsF&h z+AFd%rE><3UZmcoDj^Mw^9ybZBs%uSTnn7U?CW5z;VGsd*HJ~BJj#~Z!%sHUF6`@W z_scXanh5z|E5N-T15aesGUYTTTr^q zIkI&K(x;Y+{dfoJWWo;N6o0Kiwx<96ce7}4$(s4^K-8hAu$Jd;b=2vh_|vXUav{BZ z$^!-@Isr1$7g{-^YCrof7x#(MmpxS0gbkOUt#s>>WkrL5-=_F{Dhm_SE4nKjjD}1y zX&MemPird`63a26qbb0kCCC)mlT3w|8L;_F-;JHfk|NkJP4<@Frj932Ii3Z(KA9Yv zN=3KXUWGG>{_z6HR8_GnM@tb^8aF;Lz3C@~r2_KOiglu7$X>=+pW^r~Mv{G|6sMpQ zW6m^4W6<+j(ZXREDtV<+d0(|_N-xGArL&Rd-%r}W<)bRke9~?Gl%Ao)1%~mKZfKK3 zX{$Su!)>D@PV-TZY++XXzlK`|La%aHKNxS))-+UC^53c7!c~_RHPc;V6n77 zrQ}%4aEkH37tOSbipYwJTpc}|Eo`t!@(70Hr}^?Nk5DD3gL*0!Q|CNyKNBb6V?*%^ zd3?i6Z;uIELoM+5#aKT$=Ejg0#^n{k`2i|-I^$LYuG}=XE&ci6kF9A)cM9~Wlm=+I ze#J@wof)6B{XL!aA3R1P_9fy$=*(Q}^cNxfycN4Ayd&=c4@9if+73PJO^-ij_l{LX z9Ad0^7Py5ypX*RsHi6=~XQHko5)Kp{?q{ zZy_FUw42W>{d>rNl`JP))I477#Qyzb`0LrXS0YVYJo30)3v znwGS?OwrNaW%)24QKxTcUTrL}^ZIl**thv~F*^;*QZ_ulIPoGd$OM;Nrit0T6{ z?A?|}ZdH6bK(?jswAVGI|LOVxACvAnGxOjJnf09nub2t{%MRuJXcuim=qk^H8nGGY zbXqx5IopHv9SW=3KI{+pDL;X27;Ki}-)94!aR+)`0uuJOG~-^q z(gh(FpCDJloeSTrMnousrDrZx{05s*@8YVFmESZ})#p3b?P(+}T#8h|W%vkH7zMv6 z3n9*4aiAy{uKt4ktAwlu%M0NDcKXPD8#(?>uE~Gzh^yM#{qMX!MT!%0z=8;sK*n*o z1bi`gJTDSBZ5B5U1gWVz#1R{zn`j|4%%-l(FWT8MJTE}LiQj64rQu%ifU~^1dJ**| zevaMs53Cgm;usTnYCJXGN=THLrrFyv0bj2Yas~6%-=O8VHYZe=&topC{Kkb_b=^>2 z}z8-#ltYGmMX4FS*iA-h{$wf&l#lN1G+U=I~$HCX9P=u$Hndv)XN-*f2yIzH0C zHAX|<Nt z)AitwiZ=?jngq>v(iW5hFD{V?iBg$Hy^l}LblWsOe~%CN4aO$hSu66cZIBn@6>|FY zw-aj`^08_QF|Y1gNmfIisX`BZKJO{i(Ojb;je~SD?%_Y2SYt<^2wdPxmOXX{-qo7( z@|pP1oC%isLo5wSQA`O|^bh1!5gQ)2eOx(~HS+`E%S=A!nV2fgq4I{&(o{ueoP?`V z(TCwEqS=^vnX19hYe$Bd==7Ybwp=c1IIVqt9(6{UgfrZmU% z0rN4)3qi}7RKnZ`&dHjG|91QJlj!w(f^F@0;4uKE{RrKpjh`2^2m>4aCyKg4WOa5E z>`|Agn)$x1Sd>v;_{NO{1z?!8qt!KJ>)gbexa?8~vKv&9t{s)27=o3s6_3#WI!ki2 z<)!7m8y!f#8yzJ62OB~E?k@gkXG@j(s}u4w>R%qst)!wM42I&LzyTa(q`RX(kp+f9 z4Y&uzQU}myh<{02HJCP?SF0VJ2woG@%ElCmE2S3_I)tV|x)rM~uIPfw^(|5j(~8uTfXtL7#kYJrst)g%lVmg60DFiAU2FI8g=} zqSXXO!g6tu@ri?IWH6JX6zjh9Au9BtX~+i<8R(5QQ8#df|YE{P{HBJsLqJ2=Hp!uT3 z+QKOIHn?{=mI!&|-EO2N!~jdKszXN?SwN+en`9p9J7GUK^@`EqWut2kT{?p|a-NsS z73#t+9hl-E$^tP_ttlosYG!{Z$_BuVdL^KWiLY{p_@y*z9*Qp{hWZ7RkMi51!bgji z(`Sb24VR;SgV0R%0^BX3asOoRj$lm7yk+4M<Um3v8U(IUXQwY0GR*Dz7fi_EL9*E|F7ib zCFrX|+|8XM2HxlWawC&RXP#4F#bE=t8ByDeG;(J#>2xNDoz5ykSj}RJ0c5Z~GSoV==jc*qic%a%`kXB|K^^Dl9HkvF z)IKuop}+W=o4%HGyeULfAtxVywcPCuA7z&*OBW9V6%oB>W(pnUC&}PPa=4w+l1Sz%n*ng1^u6 z35xuHq+2xcLMpop?+j+g@X0m*SC~F7hfbMPc2>)C^)e3YNw{-{ZC|&=sb9C{W$?as zhE@fv=LGGEkkvokScBvOtgzrQdyvv8|@ox^MKtr$J)qzVd(zbHxloS_w6*sc7gjPv+@9<#leNN z+7#2u+91j(GrDJcB0MN?Wy+iHsSLL&>J{^NxylWTeASD8K&_^PKPqtf|KjW&qdSSW zeD9!R+qP}nwr$(CZQC|Gw(U;-v6D`Bj3?(gbJo3c)}6a%ZmoJz^|scx_Wo^swyAWo zL|lWkqlc$aYm43>LsZClPC#KO4DK>E8NK=l9y7B0A@CZeAVoKCoYNZNOhqxmaFQ3Jld{dEi7f3L^>|KhX%<6Nf=9YjZr&RbY)Wnn7^C>R#*X(Q= z!2*KWB^}2{_m1>8%B_>YNJp;0Ky z)0`DRlVD@}aO44P2gw4;?=b*Uh}Hbr@)%9>HVU-FyZFeR$D=rGLx2V+u5HsCwe0!2 z9T40uMm(nT22v-CX(w9jAD;y9*Tav=-S8>8YpS($cU)tI6n)ZM&^#rim zg4`s`Y8K|IY}FowXSSKA_pr`59s_^5fpC<~GlFLa2a$x^g394i$Uf9Nrk(HqfYePn zgwm)(5z2qjJU=0CKsZatLLO7@$fPEBPCCE292sJ}B>Xv!**2+apyd`IdE+!MzG zOTfGRCrgmi^iWSfODpl-p7ODEZ{+V*O>f-Q!QVF&t2r)9I(CDEUSQwyF0zees-@n* z<#YDqwHx;6F@M57D0CMR~4jf83BV zfE7axfvXMxmkx`bSR)_=`*kWNr?d;I3U8jni9v|n1UFg4VebA`SLKK727Dpw&A?tts zEIr6WX4NNH2_a1ox!=rzYy%CRqXZ+zAYMl)i%aT}^a24O1h>iV=dJ&}Po510;7E~u zsASF|ko)E2{fkIJ>|5vXeRZWCe-)sFNp&?Gt?Cg*xQ(tv4b2y zhwM2TnZIDKf~-&-M%A{NSx-k!@=np9Be}}Tnb$TYaDrPqW29T0^fDiJtqIDv*EZ43 z)hqdrHW6N@eW{&r$tD!S){2EGuTY~7>?$2fylz~7;VOAI&(MA3d?ytlR_hhBFD=># zcf*kycf*Ny-c2EYp0KNCh0b}Ryg##RcU~{|UuD4xM!X~=#X~mkhBAbSaiusMSN-w0 z-}gjV{NGS>Cg{b6N32h|!NvxJ7m2Qz0R+anPT3NcYFG}pFcL|}!9WBxHil!ENKd}y zR?_0SJxd1T#476{s-P;~s`E9&g|mC`9AdtEf8E9y5@ZA9N)q%S${avYp+UWZoS!ml z>UnRFL#k29oLsMzr!9DbRRZk5qFwQCduGfs) z4iyk=_RC%naA!MnJZ=|3ab6gFl@GJ7$`uL|gZ4KIohqk%AdbP{ta~*N{IAJ#Z!dxJ z1tlri1S>D-{c072f2Vk7m2fSE-4wavnRsb(H%_Z|xZxJ($nii)r zdZgHN@jZP3uV2Om?va0-2zLY#Kf;|I_pix?T^7U)eDt*3ij2jzitP58Kn~1L%gxLu z!0Tit+)EsJ6o{AgF6j*m-FI)=kR?@9@N<+X0JIvf<52X;-K2+X(RS7J3S(^6TctbJ zzH+BuUybYDm+!QBmk~QPKK&%`I)?{YxKxBZRFr{r*TL22a8UE~Ok=Nj%1Jx!kH=n> zjsIz!bSjDujQ$79^f?|Tqwv{EdTI5#tj%Thn5|Y8ymOj*yZ+4v8cdGPg5j~Ipy14e zsNxlWb=VLLQ8mbAz7+b7)M%M;tzh^0qQ0WPS;;;dHZwSioU7SMKG(7evjF1nq6F}v z^2C4`%VI)2Xt^%y<$KeC9u~PWo}Q7bYJ&Njq)TnWttA0OP>K4HeO| zX9O^qLX^g%Y^}ACV1slG>FrMbjhT@KWqe3sr&T4t%8*_8rIOb(5_@}U*C|z`=8ydo zt8nJeGG64&Ce`!;`$*0uEA(MKkIH|9;cmK4QJF5_Z*oQiyHGl>nrO*t$}7 zS(ekK%rZtAMAWt|`L8QLMUToimxN8&bh3G7!j>dUdOeKt#Q3*JciGl*N^ezM60%{+ z*3}s;$5#2dY1;^tJ*@*lQL_xup|zk+YZKwLC)`1}aUM_=q|RNZeDcd- zmsea<-v(!a!>B}Zgoxe&xUptb`AkB{36eCNSW!7I@uKIBHbN6;F1qXVS3j>^vob|;j+$S3z}Tba3W?3yHfHcDk<0QJCFQ1EWTC*l_k zfeyQhG`WP9n=qMVM)Q)eqD*U@?z5Aq#vWg}98q(LxkywV4M3(DTgV^m5M^sHi<*hB z24qe#r1HzwFC~vl4ATP1S|OKfzEOAOS44x)syN5z<}bOlE~wPzkw_^2SvAaVq%JqF zF05090jmw%zjhq~L3syPp>EC0uf zrgd`KhV(FAuO6+=YBeL_)P1jH{Hd*Yp7nzX6>z2r#QyDq6HmBQ(s7Fw#;pt#Pdk@6 z;XCITtyhf}n5wq-#DplTp(hle4-3FOP5xg6F<)8V|3Cs+vnauhvSu z$vUFitq!v&n={0=OtYoI#osao<)HCBP6?SBhJ135kdjy9ro$mEOQ{e%Z?$5Ul^vLf zt2Qz%_`xWApp_8T6n?16`2lkTq3@CCzkeWF7)&!$z zv&CgBu^4GXDCkE;cQPEn?a z8;5_NQl0YF1x~4SF}Z1%oM}{bfn!%n9bQ-o`XO=ih1@a(wOCZe=gS;ecU@Z}2tJv7 zxE^G&diXGv6!8@qP0Vxh|{lqUpVz@ z0jY-=TuzpYHe$M`B8dqvY&KvZu`R?pF2D1)QF+Ov^~9Ji&Zl5qF3=T@fgX;^m7UrX zRi(W`-dAqIe&EXwxU`Tk>Q{8zcf02`4@)>d45j;Wy|F3a7ewdeqx$Q1%55YOzbV~q zwfrgYxwyzGe$0!O4Y&O(g!?DP+<@AT_44g6AFS&yt~cj`VGFJK7a>8O$L3_ct0>!$oOojf$Uuq;_cfL7}3-M(XPTN6VC5Zl*X#UKZoDw?k1A+d*Q*#Aluz05waY-daIN~Y+?#_{z0$5 zWd!?S`hG&>^}*!4L*M+d;USn8+sSLL9u?0ylsHqGF4B)E7FFvRR2YV=6Y*qihjKWz z;v2T(z}>j#uB!*GP$;{9N8a^(UCPol!+UUVqT zdXkE`IAK%)vrxjbSeABI=?o%J-x zX13g9atG?1jIJQILI~%w6*h(v(tN3NKG-F?NUgH=sEZZde90`YLDUfwRugEk7*uct zaIAawCuZif74Qd?HkFlALA%(7ZCfvQrcK>rZiQV{ZslAw4P^pGIjZ4qd9jXa^O1!l zj7d*eQ{$_ewQyBZt7fNOvmx@WDie@~@DR16MqY5w!kC`rfqCg)(WRSaKs~JpRg(9K z0u&G22`#WKlx{0&oHHFILkV)d3{(g6wpW1=64N;!Or3PGmHg7QJ#$n$6B}nV-ldZz zvf}{`G!klmK&-0bS!#+u<*k(%l9V%J+O4Xhe;-@3Z&GGxfQwxE!W{puQ=N*ajCCjh z)6}@Xj`V2A>ozX}<}p)y@a{R#2D4zSd1);ui!mEy}ryU%`QF2j*%6j zR6n-X4@_<(oYm?ns>jcs?29Y{nmy#d+bk&Vmez4&*et8j_Di_F=1O)>x{f(r+#{{E zRZrYZ*79f=U$)AohF*;-R?K$25ta(^4bA11wC9&@d9F{571)-iZ>c*1h}z)@DWg)n zFUKYw$|mIcFmoRlZ1x=>)a^B%LMsSb;b8Dv9goMa&?AY;)7y}z%&hnwLb`h>vb*t@~M$Hzd#cTnQkbS!Sm6|**K2*bBfZq#F_Ck*nOl| zw_1Jyvv}82UW@UVp75u1082{Q$~{q(bNivFN+?&PT^d%K4v zyrw1Z<-#v@m$Aq%cMjJ(N)@RhtR|t{NHRY9g}F#uF{}L;m4I41{)dP{Hqx(Q&X`Ym zq)+x;;beV0Pugn@Gdu2l@vnH+ohrrdyH4c?2jKr)5keTAJ-hmzbS}jJ0%H5GD?;X0 z&SoA)wzdqywnip4wpK2#|2yfp?;lI9G|)cnnJ4kcGXzmUq^3+f(@f0`LyZT2EQM=Y zGX9XJMtxn#nIzNA+=24A0aDuDiq>xZE7n$KZKH%!dmltsN`+?WXL#w-rAzq!O!sCKMDIMPUT6hgRiU;hc#{v}&#*V|VdkVKg2h(Xjs?7S zcS;i+m>t+Oj<@oop)SE1%3IuXqSg7mpZOA#?dRqg5?@yAli4fXu+?p0@r9|i<3c0a zid2=W^RF$?dwebmb&kyRb63hrTdUnN!i6+!!u?wOWoB;{k&ke{h`j>VQb%Ehn#?&Vyo&rF!afhXzUK4G<5wGH3c<)mS-pFU=lPQZS!YQr-@kQ zXbclgmjUAqCAA~m;;t5Mvn^b(s9FGX8fRr;RhcWS_%y$j7b4d5vevYvYz|e;G`s6K zrs92WuwsMLN)J>o>%!|$!p2n#f{|F7i`raNbf`u*&cw+3?>`X$?5r;lK0&I3D+fiuM7 zD=ztZt4EHR!Qc|}k4)T%N__%WcWkI(UfNp8-re37qQ1Vbz@)q2sbRE9&nfWkC7l}< zTJrMYFA9yDcDaar&Qzu)dYNmB8Ri$xFXl6K0Q@R#kjy(z$O;VFxzHl!M z8*lS&a=QCgJ3p~?M|hfMnX&9}$KCg)u*EyQ+{NcgtrA0{M@=7=$)osGE2QzC!XTGE z)cNaoAZym0ptodR9G9iTB3Z2{?aK0EuIx)A;tx1ux5Iy4I}ywglteDa9pVkHClz`eTZ)a*tA8U$nuqclW(7k!U>=v zJ~fBU-bIIPJo6Zmcz+#%g@zXZEU^9K4H{SU4pS;3Jz#E3P6PR81kiq}B2szS^u4Ct7}&_&$$_zu>bWy1aB^yFl6nL0ZU!X;V5QdY%|DDpzDRrjj4#Z>*{#%C@#WI50?Prr7Z%=aQ~d?Rd}g{Hkv{ewl{F#hc#vs&Zz`LuCSi#XZGI2l*>ZA8fA&bXL^{K9^Dx!6Ijl|F%s_KjrC{4 z6V%)c3Fa<(&w`SdN+D6>J}R~`To9X`T2gtnOC=vV;c`c_EQYYzwj93BWNU`LY=?TY^Vc^)_~>Pzk77oA$%c75}{d~WthGuwCgHyASt zKJ|7q2U}-9%&9iea|Ud|C?@?t)~RTf-Resu8hWWZHKcE5mmr=LfAt;Jr}zZ-LPYk@ zKiya}-z4qnNw)8{N_%$9o^FWWJ|-Wy3lNUXzze+EXZSPBeiIw7F3H}uh_525ZV@*t zCijT8VZHDsO&EIhvp?3Tt~!h>=KB-)_)U8&yCSwi-$4pKa3?=`FP=m#BE}a5<2M&7 zQ_A>i15cr6rjM1F)$kr@F3 zJr+i#XzAqu>!kNM$E?zxT4ByQ$(5?Z2?kNXTMNh;e@|}76N*~&rnfx>p#!{ zpu;TzJQBtld<}`(9j+kL)S@7I^+$W89gbg6Y|kuJy;&|X7k+2nP?e79znJo!`yovI zcZK6PB2(X?)pJF4u8`GpMPwRsnsl_XK;(cKbQlo%IP^Ny*wmPeB;<>!8QHy$T7S9SUK|Hd}*T+VJ>`Q{pdfA5Om|8E?hL>%nx%}iXa9PIza4gKHG zQF-b%DyR~ue6#dCjj@rvqQy?2)`nthq9x+6NJzxDl##dNjj+=Lj3F#7gyJY{Ob!oA zm}S3Fo|CI16IIJ**58xgqgCbS?cI$A1%>IBcRSwl-?;rOKA$f$egXA{Z~8t51PKgB z7ShCOEZ~VFcn_Zq_k^Y9r6#CAzYbHT`harMk%fqHhO|J>g*oA43{j4aGSVM7Bh~L) z1Gf^x647G}L;qo7l z%=szO2ur_3u&~}=k#NHrL583QPUcol9`<}Lklpm6-&vOhf^9tUvX${H4*Sn*BSNj5 z^}P}wyOE~0?Fb_lE`&sZc_&l(z|vv9kq&4{{Iys4-c3Ov=wP?JRp-iTU;54nDvrH+ zi-WGA#V)jplaKLGyD^^o&zo-EHPwK8Ge)-clxoniA0M=nvoN#P!^1O^Yb)_(K27la zuh-9Wr<-IuwYM`t4H$R4pjVffv)>&O2}Y2j%Af51F#OHt6r zOxHU2D6Qg{@vE{L7WiH%?N><}OAoe2UuG&HjoPE}DA6=hO+TY#^GL7Jd~ZME+fi+T zX`t>b1Niw8Dt-XCsXOZc@Hf@Zhy+xf6#x_8;(Zmgf&-Uyi$K!&JBv+y(HPBFS5h|u zG~XhC%#OxATEBT+x{Kr}$FusVhi4&x=q5Smyi^>J)kq?qJ%u#iPI6Ou&A4<+2EgGY zRLqb|l%Xr<5rX@=f_&K)6|l`15=Tq83yCiAq;)*|+G7jSvsr03)66+4+>a7;{oqBU zoGvF9f!yn^ZFR7WrH4W#b)BbRal5$4*pW7n6t~*Q(3YgIDh(cN(SD9uA_$y#k@BWct zS{X3*_YV{@f4(bjvI2WO{xGXqfSJvS4{7d1Wpb;G5!qo&;c?BRltFbt<8E!CuNLqU z1EPC_nG+0Z(pk5VXdbYvY!BW63K z^84>52XWT2*3af52=bIro>u4`ZaCEJAJRWdLYA|Ky-%iLDrG1x4OTF8j-wt+eT6vc z9zI4kIvSjMp4m_i1Q12ptQBE`@N|!zW>Vsk9+Wq~I_n>f|LV5bIOi*1Ea0n zvJ?_(!{5UO6}&Gq@ekt;@|WJt=+CXJaQSQZ%_$jQ$(WEvBJFG9_=Ov?awoilh2{%K z*#}7P7$|w(U=6`gdPB3p7(l%Qxu*hOjf(pdr5$2|TK$z%do8zFEO~FTv)#2i7wdpA9g`~qBlGoH2h;x z>2spQoFErNIXjG!TP8_Grz7fxpMI@=bihT85Rnc^69Ti*8(9rBrwZN?deS6zYCkwk zpadvUWdM}%{x}JCsi6To1#!uG&d#FS2l?omP*|3s95z!upqAko@dTU=L9)gkV2?SW zkQ?};+q?XQgvU3TnwMf}Sr~mr03ceJbw+SFWp362CdthyC|!~!H)!Mc>CB}EL9P3* zQ?4lET~ggLk0>t{QS36u*R)ihiR>OktTjibK`h)|cJd>3wtkUj*)P7nmEPGUzr^3_ zkcOo<>4&hlzOB^Ue@qPX@Lz+;JMJBrxF>To{|@Hu4mT+p`uT6+c6HpSn#FgE?(}yP zChdR2U#Pel+nc%mvu`6#O-BJo1MxGyZ)1$M11?b*PH&>@F?*kMfz+X7_oyfhS~3oGt3vvN=mV?F#hqj9fLJY(%jIOo z_hEMQ@z>YqC-wkarP3cm-q?F*c2KW6+2IaW}K#wYSmsUE|}fUIT!8O#C&OFOiHv{+GWl;!n_H!Qu0a)1u8TM zy9P_@;fv$w9K+gDe)?K-mN!0Y(bVvAc(nu+p;(PgNGm~^T3%okUO4jKBY0K znHtd$leowLfDRXhB_szBV_+b{n8=Oz`BEAG5P>~S;P7g$#u1xEzelr=dd5buJff=i zB>|AzGwFT?=!`D7`+~FPasE2#Fh5O5-{z6mfz8&ax*;Ax@tyoGl&x3eZi-lU)n=`f z&{PB!FutfQsgD3QTM8Bxe&O@|u|i1l#b9FmvqTq&-u<+?`>Q0ty0b>5XZImEK;Ly9 z@kM{gEctf%8VKaMV}j3&fc6C=+kmK%8tjP(m-I_?n8hBlPWr%46yA7#5z{6m;L$E_ zr<~LSNiFq=Kii#%x=D|fMp;Id(!V2S&0x3HDS4f!TX}$ynAH)Aqp~nZ^qNFFZo9OM zDVj;v2$yBT5X&R65OR|Q0!(UP)zfKa6}&1J=ws3PW%ONe56+c5*~c;mraXirRNmTG7^ zR*$<@ky(*w6$b8r-R3}Z9tTO`*W?WW36L-hgg+8Y&@ULvu*+44OA}HOxMcWwi@(rH zSN(EHR?hIb9~Zt^*M&9@_;a@hc?R)io@eEVeTD*H!x60d2pdEW{L*3V)o)nX!3-$@ zg+BI)nm#jR$~j!0lfH$N#SlmW`R2qmiwZyBUMb_mj{+p8k2T{TJ9ZPesoLMF^EIW!E&N zb4#;kOxtesn|jt`SL9a(NoHQ=nnPErTBMLNn<<-j6FwX98sm$iiimWMa3>Mvp)4nO zM>OPo%Qd{i*sx(rE>#RA0)LnO;9LAHqvudOjfd=zL%L&UP{}6{8 zx(mAJ0w_Ja!$;^D;z~jC(3fIEjC#8_s1QS;Wk56LlWZiB>=P;5@p>q)>kN6+?BOZZs)yOmahw@3tepgBz}= z0KHmIA(eZ@c$9O`2o*-*HaJuY=diP%O;6V4jauEwP%;;U>3Y~627#__m#WzA1l1k~ zUCoXUKpF=8T(#w0+g@{ZR5IHj)!Bj3(Y1^5F^b`78vUptum}N$ebh&dDlPZ?$?ysr zR%N=tjdl=diuO ztQ@O1qD!D0tN5I%Ez-Q4Rb~ZyIe}5i0hlAHJm7#|ohqX-if(FqW0Ylz3RRM!L~RpB zmrS>;n)ujDw?s3$@EWRGAyKFzWUezFCsj@vZqZ8<+Ix@9o22WhSXhZw$NR7x9W{!f_5-zbf>4e>DY~TGn@97 zH|Vt=_0fB6W){99zZ5-#8V#VL{iue7cCn*UD=#d3NBjZy0~HMoRSYZ*3Oe8C;dgfV zFxO5Z4r%odH{ecYKi=hlrC*mCP!*cSzn{hc# zF)$t7NiqGrpTE&K`lGi;&TQm&ZiHiSR2(B<>ga8ezsFr>IYeD53|%uU;UmvE@;J{# zf8vmJPKE(5B1E6qFT!>)p?E+x`{-gm?(00a!#qWQCK}%1A@s+*iMsqm`Q=iA{OgG@ z9AacDBzPaf_*Db}e`whFPb5SFKmu@S0=PUB?jnd;MDfy$pxEX8h|2t2iK-C&YQ>>A zfNwg-qVPW%linfS;JqAl@ziG(@HoH-z5~PXr<(9T9&ETT#{3Yc{ZfzhKOK>g@4Xti zg(9Ss>#i5O5V2;h7K0MIc9YA=X3y=eb27PXUkh(fkHgu*hCH4XFRf6&Rs-E`+j3OY zumKh4v^{HcE60LzACtAqYxe*n5~vj`u})a41eYag&S>3u|RpVKa$!@+W5`5$)ybSce1yg*BQaC2XDG>-9OA z8&R%lrx`=c{*k(C9VsXmid9v>0XCeC#G++8ady5)9123kj(wruUDB>q52Z*G;=Q0N z#@Y4z46enIWl+h%)he`VR!DCf5yrzhNzU3rGQ}9w+Oh>3D!QxfjIjfvB&9{PpI6tPKi>GXmQv9_K@21*0s4o<7f%=0GMpIA24AwKcka{nIe6%E!~MUR>`BDLEp zyF=IdCsTL|n*s;XSXMjx2JA&w^+sc3?DTW%SX*^r1?6IIW=&6rVJ_ghby#fg zl_&w{{YJ0Y%E`xr9(sQJ)uA!_B;LB|YBd;3TP-=zj?EL{Ne(4WD} zXtrV*-^>`@(Xfpl(ZUp;)%$F{bI(teLMe5NmZI8ilm$VRM-LoeIE{-@KWUyJ_7oR@ z!8t5`{0`ToTrr-IUf|y{{$&J4_Z%3k-b%GWx^T{jdWAe|-uf_!b+CvEZN+C@Nxy^w z^Dx->a(A45QKMo!sF*RqLt$DC)OVn{^2gm;e|6hNRI6+^TVvo5+M!EhP~g7Ya!>h= zLATB@Ca{@Rq{~xOUH)BXaN9G&Q9J_a0bJED=v=L*yZ6L>7U%ftJnk2AZ-|L1g5_Q# z(&BSXwGUiA{qHpi)>YG!?m?a8Gu1vBX6SF+PrZfIVB5xNDQ#G@Q=c&W6wx!wvwPfH z-I%cX??E#M#d=|RXR6e3SjRj|dg6{0UT7*-vBBdbXXSoj6{~FFZG8#$oY7nu8XirgR0*VU}6URP6lI?iE<9E~y@6Pp)h$h*6kGU>eF%DdtqF(k*D* z=nrEnoRSUoGPY=-GGx;`ZaFE`Ox@kGX=NyJnXHzW zvU%*S%}CnVSL&=Rch_3WyNr~>C0G2o{?6iznz(9P^B)pNvNZV)GY=?1Y0^7?)>Y!R zaI(uyTXXzfHD9+Xj<##Z_c%7$q?3Kxqp6o=Ak(J-~wx zLhp$@X#DFAOV%PRiY{PNlsezFu$hCM9a;T(i}L`yy+OlAO!E!k>l>WFa6?ppC)Lm7 zMPfrEaFr`w8Z-HVlz{??HFScOCDNg<$T~KoDXPOX$eE*Q$3@2`fmr^dSJXjaXK2)%AM#iK+xNl}{tE>5$|XR7u`d z00oG?m$es|Xkl|CjPV&Ya07A$(h8aAk2%%ijM3?Mcc;GAu{UWEsWj9BNTgB@ziuy+?e~+_mmgN_KN?f>vc9M@Vo#(J_Zs3E^ z<(43$r%@%>Cu7tP#>Hc#PDw8%=*iDE?v2bLAlE&XZG)59D4K5YJb${ znm47hhI5IepBqep#sN-qK=D|EKcQ8+{4$jcB8_zi+r579^G1t#!phunsso^E$5L<& z6r4=?W+w^`GQHB>{NkGg7~J&HB{z`G=0ff4cL3(Km*5xG;En zzN?AUGJ`ASCmUOm$Iwb4pEiwp**f9tc$YOBMQSCme$#s`S+o05vEP~I6ovz=aRY`~TJ2>Pk-i^y{r zVv~=#qSO?O+ejn)$P-tmed_aE)O+d%>>Np22e25e6^M6`25{am_trl={fHvJFJ0Z5RUJ3xC&RrWq#>3uL~q^ zpqxUAu!ft(+oY=WjD$(hBHk|?2H{wZwFojVpFB+nY{Q&YArZGYBfmwAqktWqR`w!> zjB1Z~oqrQqRjW;wZ_RuHvo%&>E6G+E)7OcCu^D06jaXX`YFm$bbK}vqVpX(!PnSc` zm)CWs&W)hCqCX}@f2>5|PzB~l4PT><2yI#A_|RA1Q%!$;v>&|L4L@y%-;P(j?8!U^ zq(A=}n^Ku=9s)9NGD{=sE#T~~h-y;wgsf>v%yw|4G%XBuCnIapLJy|B4~QS_S_JU& zB-vhaZZ_X59Qf)e&YQ@0S4Az#0=TwMyx<afJ_B3t_Zp#ZEu;#?|9txO1MO zq)qc9ud1!H5*|&wjmcZ$mKPx;+^5PUGDuew9&E85Xt9h*vO0%JKd7Z)Q7GjSsmR|jXWf5tF* zbMx{bOo-rj4vuEW31bjZ1Vp5z=MA%H^&JXPDhuS-K8FobXtGwSs_NYZN1EAKz zS4AvxuOH_9=>!Ep^iIjeIe^Rs=1(Pam-`?`KYn+$@=nta>T1UHGO_$CiVm58xy%J!!JU z$DKkM-lZk+t#rD7T~6=?j>5-3!j0NM*BwxQbDXEK>##pr^E!&fG$sHZ z%1L}Pycb+)>TlaMu)^yT8i*?oP^YV%UEX_Io=4HaL1G4K&RQ@R z;i0tss?5wLMk95EAu+@xW0zOgouuEu5S2&zkc~ZHH`0B*QF_O`3B8|=oLJ6>O+9aj z%o9d}N`is%fco3QX#7-GppSY``95Fb3VQKpW&;W!@g=3fMAT}cJjNRuE)i-W&A&(r zsP>OgR;XD-u^g9_Ojad5PZCX#TB4CM&`nN#4bg0sfzi_FK5M#YqW7-z?u|)xgLVJ& zga=C`&KaaHtF(?K3@TH~glwHHEWh>sZ-w6GjDYj;?_b^WI}axKKT1`oTG^R7xVegX znwUBMqwxD5KYY@z{C7TV{A;;kwR|E1WEA0tY@!6^oidy|?N7=jjlQ*8_H zW8;C5iXoAb`gCQDZL!J2!E;%lCkdkmYR-t%VK=k1+6(u~=i!z<+7D`oElUqdMO)rW z=+t;Xg0x|OU7U9YynNdA#BwCH%rz%zEh%T+gYo2FH{$P}Xgh{P5v)fw#szWk0@tHd zxPD5u)FghDL7gdm$F)UJw2GV64n;UTE?cPQxJL^w@({^veMdkod*7Ntn*6UZgO^@( zoIl{yQl<x#p;(|YOL z)7b-`UjhjO9|HY*-y_zav*hebraCXfwI{1)v~$L&<(l0i(Pj*r7?S#k=6Rpt{+C(f z_+A@x<13@_4+x9ew^?KOuSH_(U}0fpZ^0nzU?FSfZf5(h9BG>JwEVWW^7dzs0Fe7#S1hr*pYPrM;y4BPtsprwk16HaIs=xp#H%)=Y&bKLMW4M&Cd{5{ zXfKr8$SN7f(w<9WYMa>U2wgu=Rz_y3b`V8ycse? zQM`x~jjCORkOIn7qlQPRYMVRH8Uf5QB+q*~L^R4FFN^3q!OP-RB&E$pE8j6dFZ%-< z)?cHPDnSoz{>QnOJ)ojqH_IrXGPfR_LXBAOV)PFX$M9g~s(x$Sr`7gN>p1bAOjY4# z5EAUM%PvdOGyQcYIet%{S%_8lxKydDG(v84ZC!(3tr{bEf|1`@6O-&;jnO+h&DRp7 z6O)=pP=j#QA$I82#T8p^hc_iG*M~{yvw7O)dz>o_EU@0Yh#a(UN5}EqBl15{#D5@o(p0wXmjw~| zwmIEXx?4+A-`Y?Cq;|XNmBpllifW{!pa^t+2UdsncoOk(Yw|Dj`G~xCz`m%)?w8TY zn5n#1-`5P6%jU<+(3Rd?s>W%LJ zekikt!YCE$TwDq<)M?*aYClQ^sm{J5D!Uf8Cmvq9B!$AyjR11<@%25m2dN@5L{@oU zZ2HaK;8{zj%uamEpn@xU&=5{E`?sJ56S(9bUGM;d;|bV#@~LwZCuQ&hWrlRL}syLoTsVxim%yHtC zik?_){C8k7I1Q`1x`1I>r~shqR#1f~D~%A=d0ZF7GP;9k@OncmjW!Rkb;mqHw9z=> z=p|0E2+Di~At#g$=jiP))G^LcnPN{iuu;4nvI*oHtK2uw!k&I%=OFbsTQfX9nFmdw z>Do{5|Ew_n?3LF47lpC>(+{Po=%_3UqJE~ALcs|&Mn2IL%jpmn#W?n+OhibJ6@J zx8h4HaU5h?W?C|1ZX4mAFkrG)IE@tElIs2c0^1;f=O*Sm%KMkVmKF11O7fqEV!z8B z_P8;w6aX#qiKXAktd1`fH&3BFkR~_y5lZyR9MZvrv7qZRAPTvWmVcdcDkrDzW|V}q zT;EF>Zb6gU`}hS8>_?~ogbGx5y~lPH4DY>k%k0F*4NJJAhKvxz)1HOZn4qNI`QHjd z`&Jmmx5B)Xx&NUs>3Xd8in_nzne28z!^iXWr&6U`i>`g%HQFD}SnOMp3NrwTJd9Vr;Z~`j5#)%l>aDSna zDB~8#lzY01SxSl4^P~(?RRAvuTpV8r-u{joqG6vTd{VEu+;kV*d9+KqTjw-IOYhK+ zVX0U%Nvx^&Y_`ddRiGzLkT2)-#p*u-0?YTS>)QT0WG>!83>4#Skd<>lmwH`j21?pN zWuQ8uO?TGz)^rBf{Gg($PgAx$MD-DE5&#?hUeZTtpRRm%h_?5V$e%mni|Dg?I{g=Z2X~T4Q+z8-0?2b2~fxfbG%qL#yu^Hsg;iucIP|F)7ubU`2WY*I|o-DXj_A^ zZKq?~?%1|%bkMPF`@}Xowr!go+fF9;-FxeM@6F6N-@Keu{i;r?^4B`+?6vn^Ycsie z8YV?9L?`@!+V6Qt=s~3**4jKlV$;OV=A@>K^xy$?Ck$)Yhku54C6PmZbC>czqWjeb zx_gA5-h@o$n|#O#5y*w6IRvUY2=-(4j`jJi{HK|l7F9{XZQ<{}B0bt<9hP%B?F&Hx zS^n`RnsdOkI5v8N&S|f~1x3LECW8=H@yj2(UsC3P%e$4>geWMz@Qxh342@={$hL5J z+zk7ol?0=VJWcmFxX68HX5MNXpJKPc9}H2rOVq@QIQ!8>ia`zxL~tdifsRtVUrdKf zUxL%q@Y8-sj{59kT74LBwhWyhL;VVJd$hyQeb z{U2r1Caj_(^8Wx}Z2tz>zbLE${$CVUNQY#>YTxtf##Dc zAH)9)`mPvbp2}#1oAqcooz3St^^`S!Ijz6#3$!wX3dzQs`He5j8;wXd-0bM$XbfRn z3c0;^^uPl)L@es|lRN_rs6CMmLWDMB9hgL?lTfIyszh3VQQlSf} zY|{YRiyx}-)3NR=sGmRNT71#Qbtv_V711c-2@4f}$OY+mFA-VQGk09eD3HfyKR$Sq zs!$x?p3ZgxT4#mKdjM|Vp)p#}C5gwgKDml?su9k)h&c2UP+>&Yd} zm)sUo2T6BoP`8?`q&Ka7WJ}3Aufw~r{rwk%*+0(Z!3<_-#9DrKtY@sH7r${sn*xw~ zdE%eugmKU4%@mvKc8yTGFtoL7SXe3k#bDFl45l7n6qhOnZ;kp5uYzxQWjZD!g>ak% z-u@e23{&6mn)?rU{Sk@;$Wer{=D55{mNs<-coUgIIFVWpVfB-pWOEr1M9%nLGUJF| zi)%nsZ|QA6hhW3{0^zOsPY;m7OPo#A0nM(VRAcRMr#LLSEjz$3Ln|q7P!`n=Q7Pqf zW(RyPrl6xWOH)zrqhTxl{YF=>Za{lfsHSYRjC|DXh8 z|2Mj%e|?u=pG|UmTWcPsxa!!labg~6N6YomCyHWrNrL#zu73q}=IV-hGm z2Mk)Hy+h_=c#+_oe3HVB0)50w`L@DGm7Yv{_Pwi#y{Dp!DshY84D7SX!xea#cy&#fkN{tWduhjG%>_G8*yf>E4@CW&P@g z{R*o8&Zzl4r~n-AHgCFO++F! zGW~;5-VVUfbI$6iO{#0wmSg3>4^fHQqHZQIuZaed8cCj*uXr`luEzSC!S+eROr^l4 zMV68w+12o63apGLd%1b(*c(<%ETZ@%LSA;cv73hA1r*=C;><(fX^g)i)*oHu0$o%H z;V|s2X!q|b?4;Ka^j}q2iiP6xi3-VNK~@*a*kp}?zC?N-+-L+-SFrFLmrD=rVV=OI z=EB`hYAu{jV`JMRyfVfo(1Hcz7V^9&=p3)hgkVFHpA$q&glHzmma0yVXutwmDhySn zVg#bdjRQ!;cuRt^4qYK-w2~Kb#5AgcPQUXv$oh9S2I%G=$P%0yPEBYsjx7PhC|b#D z|LG9&KPU{E{3he@Tk-XMt3TfVAIk6F67c(J{_iuP$??5X{XdaHzL*-z>Z-iM0v`5= z!*XxbD53lbv^;Ja;OA}o+GsMcwkHD-cZAV7W&?r&3xXEBTTPH=0l8rDaS}O}@<|ykZb9@&G&Nz9#r`=C#uxIaN>fN}UfRJV7|44oclo^ad)>_hJC z?*E%|o^$V2qyJXT{xCp5-#J%6zZ}gO4DAh#Ele3~E$s{$%q(qwn>sT5cjf%gk7$va ztun4Enoo*N{dj!`Ylq6(TuRDX2a07LLmY8ZND59OAh-cJt1U=YW5ec@TyQuicj`DQ zYgS;#HKV)~JIm*>-FNtABLlp*&{*lIpRe^hw$y2g@9BK<^JclG8#K*-9u^r8;A|t& zLQGU)0E4)C9p>)n-4f)wT00u@WCBLoQS)=qICz7wmaQ!+E{G1C;yP{ej=2w|2|bGa zC?qN@C?MeGu!E9>+EBb##K~?fEv2@?pq}eXA%HB8)Zm9EJ$W==Ke09|9`Gr^7I!M5 zNS#_t)Gtk{1O+5uv|c&tME?Fg2AL+kiGU_eZPDHcV^*deiziMKX-9XOwgzd~lfuDs zpI)-ENNwJPfZkjwh@3h7x0v6wBk0z#ipQy2IhrJ3jGlr61>iShh6(-p_`70=9TOU? zeK(xuz*e}&V6GurrK-V{^XxT6G;9ET9vrwCWK`E%K30=kv9Vkj26l5{Du?#SnWln= zT-oJ7RGR^NVwqUhGd!b7Qx`4H3f28WRT_6fY2h?RlI?VHBEL%A8o{&{=~*CvK#O$8 zOu4`a-OSavx|!{cM8d;*c|^T6pgUO#oiZsaL66LPlZv1wK^mQLTXAOcQBicRNC>Xk z0XEFFJ?jC#OoxSv(-?`Dl#m6JvXQr_7{tK0!zAT ztHp@~&|vG%Hyg9#f!iOp0M6%{5Rxs8c4>+n5qASZU4KT)Q6MDvJp$U%Zwrw+ zI)!?Mf_~~u_(p6JD?{in%X7Nsuq@Z&Qg8Y7-^2~bAFPp%Z~t>R4iFIY{}J*0qjmS6 zO*@_HmcMbo&Cf&iRfGF2DIJ3?(HZ1rbUcbwCn)QM&gfhYdn45c+>)@8L$?}gacoVj zj_gv}DNR4e<^JO}7cyXnBeBx{Q&VdP&!^YlX@?@S|BK)<3p;vf?hl#p?(_5H`sLH+ zLFZewp3le2IM7y_t_D@E1>3f$C|(^t9$!;C*$3FEU@I|jSjJEa{G#L$dxbc;fHRh`86%a(Z+K& z?h)BSQG{?5vGG&|iyN}Yu$Ulz&SLPR>MG}5Zvmm^-aB$;dP zCWDtMc*)o!q-j=*PP!CZ&veAb%IpX_rItfU%!LMDl4R4AGitp@bufVq9Zgn& zqNR457)px|h~Pj3TP3L?L*t#MozyMT+jDmnS}X{`&1h>Ajj>UXA6#_jD=X@kqGzgT zuOscnRFyv|HbnWm`&(-g%ZB>JwAlLLX9lr0%v=$0^V86RO^U`FzLe~QSwrGEkg&7< zTAs+2hs5Z4WjMW|lRG@8m=wJ+*n(kj9dnZB3WKcg#%k_sho7!*Hs5I zx3f_6nHekHR&r|J+Rx@?t(aNo#M)=EiiWWm;q*eeY>kmJO=anqLSwPMr3f&x1#KqS zk_OuL`za0$Uo~6dY?1>6U5`|9}Fqqea&B zo%|@$-5#7a9u#{?7^EE58USW|Z4rt=y^+}X1QnWnY66k>+MjX)&2;VC)vRi#vigcn zu*zr(Q)w4$Q>0~Gi`hDzq%LIU<*+$+%{b`C+z3)lHUhLapSz+6e^Bi+ zvtmbbQtiVa`i}GyI$RBJya#3EINq_?$?dY@*N*Jko7fn+v92vs8AAx9GDTq?!(+t; zk@P3V^Lb~PUmXMoMM-u@sB>Y2%Uu`ZGK{)mcgHz!_(-mj@G|2>p2s_&)yxQE62!vE zU0-t|^7VU)5tAnb=+7R`)=4C(t9JD5bi3?xF#ANRn|6e3OSW|N8B-W)r+W!-R!!jc zzIFx~9Q+Cl`boqelgo;h{0psk@77r&{7frE=Zx*DTJgXRHQ6RZJMP2MSlyOg=51B>Wu? za+=6+NUravdKs`azdX%&e-~SV=IYvQ$^$eH!B7GXHKiG)I@+zO!-1{3^&8D>-WqmP+Hf)~|Owy-NL?`FN?27d-cirRup&5chZ%-M`oQ*7Zw|vZmF{)kTEM z95Yv5nD3dtEblRJR1GPjseF~Tr^^z%@EgQNGuC`?F7o+Myw3uFC8SHWLe4aW?ftX} zy#jt(EOz z@w9;@hM0{jx*~*5KEMnZFGbK3nv6GH(DQ3Q0$nX{R3PLH zg+H394XP+zc0=&_gIVh->r%6N*|^fF_DB6%ijZBw6SsVH7XGh4P?;xNS$BF8!W?WX z>f1<*q1BTy9^^H9l){)mn7ar*f<`TVtpS)szJP}=9gFY~Rw;D0_$CXxa}|?&2?-XP z3u8!lAKVbMQWc=Ibbfa(7TpE zZUbdVn^Qko{2!{ZXo}m>p0uZ$k!{h!=i4HMStz&f%PF@NX#Lm?#JfAc{=BT)e6nYB zM-#t35`Pw$1djTq9bzp{LymWAGM6&!wUwhBy2c$)IlgE1ZdgjKPWWW3{$k>JiGh6q zd}!Z#X-mH_52TXue+cX-Gm=5sM*3#puboNjdIF%Pkv3MHgVQ_76a0l={mV&;78)nYdzaNs^$G#+=HLo*tQqR>_n!oQB>h4k*`|-H$uAtg@GL#u6_)G9J!U5?efa!u zeK<9=y6oO>0bB7sb~Gxt+jdUNdX zVMKfNd8fsGXjDa!Vi#-aEvV|shya;ao!T8@3|=6_;Ah)5N=fz$js5YPmzZha^KD1U>mhFfnK1TOcMC*OxLs%f*IJGtSFl;3$;wcm-O*hE zA5>?(uW{WzPjhY*Zu;3chI5b1-{6kg$MfXFWFIJ6gtP& zAfI{Xbk`v>+P(9({(tSDu}mO)j(m5}Kp_6Nt4{wG=l>75_;^j}*$MJoT?+Mz#N zsz$@Q0@)V@Tm6Y$N?GqO45o``phRYQFTFZOd7`Dnjq(*NFo4L6!h{2#^PxE8bcF-r zp9W>?n9YCmk@@)elylV$G?9-K`mNGpZ$ySlqe`efYI{W01Z$**@SZeO7<(Kf1rzpJ zzytB_U_v>K+_V8Xw~C!I$A1F-|Zevh_#P z-pnkQ$5d>WC(c*o3|_E@D^zTn6Y^RD+Rc+e)}5AvEGy%w$7lAmRT*ki#!a{PL|KPv z(x9d6{R+b!ZHp{2tynPdkjY5l+_Y3HX=QyNDtK3W@ z^QtV`m&B^tY+rs&+J>^$m)5rS*Tz#{riYphF;S%wJSQ<{bV*Le5bKYmoUC`KXvjNG zmJiydTFo|atv3deg=EucQ57~*a<4R6;yP?rx0os1Q7MxS!b?^E4eA|qs;jW80 zB%%*##S#<{J{t_j&LpgVpoRenc6bWw# z`$Caz4^sd`K?=!)aANH#g!L1}*vs7CZ7M-PJ8bk~VGV^xqufeY1$v9lQxWTjF>R1x zqc#!0$p1ipoz9cA$lxK8^%1^DLqW7?Ly99WW7}X1r!p*Y{pqOl{+IYS=$)Sgaxp#Lv2r zFUt0J_R38s-i1TFR@foAheqU%OdGE9`tP5SK6KTcR9GOOHr(%F|Nn$L|JBM;hjvq0 zMF0H7oSrJ9Cxii&jzlB`m1trl@{>dmCb~`}!7qfmw~1RuFTsCQhMPsuqDpI7OS__> zDPQeO>BJ0{k_Z`np?pzoy{)mds*$UdY7I43dYz{e2a{Paf3*AKvpw{?r?;&ap1_5=JbYVf81TyVaUw-QtRamc zOj)V_K704>gp2uBsvBCJac@KDd(U8=mgz6zGI{A%8Fpb*$NZEZ!Nd*2g4w#)iA)%w z%S)Rz7Qw;1D_t`0BkM7WH2#4BwZst$@yZ>AwOxZHt;-7G>G}hUh4`V@US8hs_QXGB zhP$5-Hb|Hvy@r~uBn>Ak668A#y-p~cB6{d_9t}@+mtKpxj17r-4f1sfCtkIyQBWyD zMp^Do6r{d6JVxV}-}MnbFLi)JC5J78Bzi}gugG(mB_lz~htamtG-%Ia)d2KAE@l3in1wFO()#?(tjYp#p865Fi1pd~#Xe zGJ%SpM4^n4>%-!g?mBHGgf42h%YtTBUW8e9=#T-4Nfoj z)fZS^f0&qiOPxm)pVop$5#wFYFgiD)-snJdp`wGeV|$`)h5&G5flQ*#cqEqhZdzSK z1&t7Suwx{J$-oL#w1)UfceYQ%7K$H1k{6HA5XmV={E>d{Q-zc6-5c>9hrkM4xyEcBX?Kv@gNM{V<-VkcsW?^p$GJbFzmA_-p$Zt0r%byn7;uY1 z)PHwGe2v8~|L%z4K*K?3bUk%L{hGf{iD|x>3TyfssXg4jQnSz3QL_)~Rk}-(MJIXk zOguHK8Zx$!B&@DERz@YOzDg~wP9Lw5Sxj|8t*(b2_1G-uaAiR)ul{0{Pz^IvP#|r3 zTCRY8L$rP;s?AiiKi=|l7^ZSz!9-V9k*u-|P1U9;oNv8&Q{gOQPx^)g+nG%@`E;ym z>^GZi^4TdAo31*GN^o(^!lDeDv~qGAfNsExc~8i&QpxG}0TDdrc#~EWTe+IDp6+6q zCXDI+@}KIM1eS+W3)#uVMJ8hRY=y%N0yWs*&XqQ?%BC>$sB%_>P!Q@NphT_YVgUMK zs4Z^v*{$oJwH3r@F{+hp)IugEYBQE@+Q}mYg2T%W!JOYY$!z{i3Wk@0B?+axdUB(BN%s;@ zCQc%oAo6*6G6I!Bb|V_|nGT4N*~jcDtq4AACjY?;ysP-CsOfYwV$a{#I=>ldqxW@BkXfe5yg#c#U_>yB43_~_`58P zTgv)a6!qj(_#A9*%cR_vz}Dw}xpTx+vaXW1zM){q%856NFu;nVf~AsqkO6BTG^i;n zwJY+r0J!jA z-eizsFio!zTgDH{;UmTHFzC`g6f8*AjMEPl^%%T94k$w~fanRisMnmaFn6#L_+tYT zKb6=e(r;V|u>^G^_Bh}W!$9c1dM^NcpT8a$ZED3bG(%{^2b*gjQ&>mm2;o2>s)_m~ zr!^FKASK!r=7o@h=S!o85=Yc1On+Km$bmgiNUwt>Y`H0$sv(0ErdjKsklhsL^vdG& z3ZkT6AIFj~+yO#3lqRnlM9TX@VM18x(+TC+Tx=20O&>Z0{lLhui3(3f1$?%WY>Z^&LRbwcjF-0Xo<_Jy)rqRi>V z2}_1(9Kfr0@62q5E}k*SbYcYOp5i}=PZ$zE>8sgEX5lfXO1;%Qo8 zS)A*y!DX5)-)ywl32H3?-wrpaw{$aPG4>onK}9jXK&TwU1rDSyO&2ye1hb>5|Y7hretOMl$UFDs&<(;jhZr+#(m}85qNO-k@BKuH#pL;{RzPwBH zhB$mu)rJh-5yApoz6biAx&>7Y)Aa#OSV2XL$ zNHcDr@1ibYb@+x@wBNR+7o`pp{sMzS9Tq<&cVAXe`TL(}bA$y4TF5LlP)^m37;zdx zMJ8hN8!{((OeQa1V9^*!W*Uc2#>d4U2T`!_6-M3GW?gon-qj|$_91CsGopUlLF^Fd zMDWpuz1fpo4&%GUh42QITti78D0T!7tB@0@7rFJpZFq5iD(MUcw1?ra@1rywN0I`o zb__b1k^w4}P$%VlYOHDAkLp^iDH|6HczflW=xKUx z3Xhbp59M3P{ayS02PNn5##{sR4J*%F(i4H@hS%v59MvP5Y-5T`Q<6@aRoW(P-u**Y zOv?_dq|5M%D{zlqaKfg8(%U7F>~StsEr^3d$?Ce z-*d-U$~DffjAeJsPQkQa>H8iVoFRg z`T~QIIko1bj48dNN9!Wj2uznHxLd~k-h?dIf=bq9Va63epzQk*dgijM#4YWP4>gZnchbIMA~@3zuR%CtsI>f4(#i_lqU?;ZLn20u*U{fWQ6 zGX7tBnEiX$k2}4O^Z^Y7^aBeBi01#j)^{{@a5QydP%?FJHg)<}hEJCIi|==R|J7x} z%#z*#gn;z(XDHh!DH0lO%8#^K2viVKKVVTpmROUC@d-xQNHvEO>W0M>>ReUpiUt@u z4M`C#2g^k(%ylbk?Tc6a`_Q$k)~m^kQIn+EzjyAgeA`!CN7+u7SufXj22yS-`1=c`H`eE3l{pQt~F4K5K5t{`r>jI}!rh3ysQhN}cVC#uwB&Ckw&F)*mwYvWuA=U4qDAW*Syj)~IA0ovML(F7v`}m7UlFU5xvF( zGOu^4AB{mF*aekEN)DH@rj~tnXFrfe_OorSJVYJDR%?L z4RqQpj*>!@a8;4V0uQTWLPBmY{nem@zEWkn|%nc^3-6f`KIxY_r`2z{#dIa)L!+W+5Mr)3*B`HWA4Mm^7R)wsB zYnp4CbsIJoz5Wqa8QdH#pyXeDw3&#I;#K7#^VF0WzPgU_@|n({UN9h#U=El#uAj*- z7Q=tqQjTajWb#?B5tiBMH8qTq= z_;%oPe?~pMgi$hRv+3NW&&i4lK!|ZQtJKZSM$1-Oy0dVQd(>CzrfB|M8)6@aX`N0$ znrCTPXWh@tFPQW;V>QvBnhzt;8KL2mZ)3}zB2NrQlWb2k8$+rESy;+U&+FV9RG`I= zpkOA77^Y1Y%iHtRX*xF~V#GXc!FN4=hW$M4-~R38hv@u{?|RlEiswynImcK-yL`e) zYSfNwSclDw@O+Jk|25E^c1GnnTt9=Kr*Y364)A3e$`F=g)h&UT0!NeoDG|03 zodt*8YmQmk;<9dMcM87s@7q8#S%oKUrx|5B7iG~3RuZHhv*C@9w$f0i{^=(DV4|}` ze_wqLUVTpf?yYO0x$=6-R!oyzN}K$!lBGyNK@5lM)$=2^+S2(J359y!Y?UbFVV4U3 zr3hoBfK&C6yZ~WcJyH0L7oVNWy08+wcX8LjlC8_9C|)w%rshMCQ7 zb7(=;^{p$30=}Kq;{=Ysi<<4#0Y?vp2AF`atDPwWT&&FiyA7m@JldR!yh^zOpJKU^ z;>NLpNNGo^EghBFl6$uBAQW|^Wwn1yQG{9eT**>VnX{#pE&NTVnu5U64sZEzXi#^qta5*MOe8#`aDL{!_dF;gj*|eEsDiB@2G_jsTVT>MgPL)lSg<#O z1K^i@-0%JbHp+?|p~>a}bLccqjOGlW>0>`n#t}mr^R&q1PA4KB!iEk@YP&s0#hr&v1z$u|e%1O|R~tIsa4BGG@KwdLyK z3vD_DI1)?7gOp7c4A7-D0<6%aBrqu5>)|iGX55S}z1T`Ch`u%>H*?3kA^; z;F`(~hV?U69Snoz%a(=BNl*#T;(< zJEB@X{-%dyZEwWrzUcEeT7q+n?P3c&iUL_uoBdo=*zdjj<@<_au5oubTRe^*U;?fv zz}VeAIUlWWr&?P|O`|!8^u{^63b;0n6shBXogdj!YM@;3Jri3$UHn~n@CJ)-ZR=3= zReWAix^(gfYAES%u98aUdcKbci@OCoUFx;csH5q>Unw1Sf{1p|2dH-zuqOX~;&IMX zh)*7j=+GBJeyelFa=h~11YT7sosm*3t^b*aNcpxqOlcx;+_hUjG68>?c$sm|m+OJ} z`JWu6|5zE#38s@B`<}KjeXl+8{GV2lN~TWszwDg;Z{cdT-{ERVKJ2Z3Z9D)onEI#M zi(V4>Ef9*xqH!}qMATTLPm^WqIW#O-Iyr+*0P@vL`*oEOVNBU~L}*vV{ODJ~DTk z%7jqzsS!zW3OxF4hLR4XQZwa5c0y=gE|wS464p0m<8J#`5Qb>BRT|)Vt+KvWrshx( zK?dx^pPj#GJ11ks|MHhAPNYQpP=;PAK$ud-r?wxAl~yOLDqVGs>~ z*KF;t>y$D1hX{xdjmiCOpG5{mbMu%Ac}y)K+Q8u*GC6^D}q`oI)sp*CzE0nv3B5#oA^=Bn-Wl>X_$In&7m z6P9faR?2NCmXzwvFTDm5iYONmX@Qr@f6V*7Ku;S^vn;pl;xE7Bf3<@hha^<`A7-4a zxmzF*>Qxpe7xSs8sj22GeFUJ5!3z}nJRK3>)JU{RN;lErN)Ywr*5duP821p>inqDQ z%~@_j?L0g!uZhk0T9-~gZ3bQejJX6O`G}FzFbh10_~T9jSP0wpK?S$JuX;O!vDnOu zYvjT%ba*odN&DbNB&!#&tNY$La>ZFQ2cP7r*?pm*0JlNLv-4|jyEk5}HC#gu2XR^X z=fIGpJg*Uj(~$7rqVvk&EckV)VU(X-QY`q_F~qsW(LE3SBd{T7#00^h_^{lxpd57; zxk1Vh)TKD3hT&nK8}mJM$xTtz;ldFssG>GSVSI`M*_1|KE;!b~z;I$TP(eC(?r7m* zy}WiSmse06W-YEDjv+^1GhY412tE9tfti2Jv3tG#X?zh7n{-AOUBBed^8|4Y99gLsg~VkdIHNyqFvc~A6zNId^C zwW}C9S^slzwWu!Xges2qsf9DSGKU5GD;zHX8u^Db^KAgWQ3n{$4*0K?A-8ZM<1)JF zc(crOWCy!lfbiut=uQ%-Dkp-ZYH2_@-Yw59kCHndids;n(O#iaa$uYOllGfu&31#& z$H#CE@O=;59%=Q`JR^n(>Ja!t8CG@pt_Koo8k`}_d_WuW0Gyd{T7L%?2HFN(K5(Rw zCsmLdElo?2foQKSguC`M&xTU%5K`v8=ajSSWUd@GMDx%cz}6{)h|5@OHL0nh$ca~V z2A(dN9Tt3ELN=W&+CuaNCeY}HA!5S?i++JJg|}FuZ_mZ5j+FBENPJZWME`2HOOr|h zk;CffiAzI9-gZW)sNEGHWD?OhDfG9ns~A>SB>s5cw%`B>)Gq=HEUj+kct%1`I8j3-W{c+U6t*{l{>A-F&Q}W5pqy=LrRsMk zWTh9z7gRy)&+Gd{Vh7 za>}Nno^569j$e;_oag0ioke+8f&3U43~5hgVOqwGm~Bp-C>B?~7#3y2S$BWKh~f_H z2yMch2^OJo%g>dxrz4be1Ws$k%ZB*~RyvRBtIXl)rWubFD-LmqDp_xr#~rHDCb`4` z6}IxpJE=}7FGVe&)vYnj8^D=)scNWf`(_fX%D%a&nu!+3momB_P@$rbd5whpvkDTg zpjw&@Ff$tj^5^}S0)7J78=+`dYvMl$;vb0w$f^;|Ep1Q}Y(O4tP$*W=0IRIT=Mem) zdYQb5=pqgT_EgBh(?ue9KB*}25`SMnjL5#ut@kJgJkfl`E^`DwfyZR_Ff*kpC%a^3 z#CEt&y;`!o9>AQPM%8s0r~@AU1bp#z!Y5h>`lpUoA3?qRcqG^_)F>;YgqM*2@djBa z;U^OWVG1C*jZ`}!G7`u$lno=?88G;Xgj~NQ5k!h7GUB`vTY}~QJBU+6S%B7>HeA|j zW;vxKJkgd0<5di8UCUK!LtxnChv9;XZ-bn5j`6w*6S7J@w1R`N!mYLnaak*CaZWE& z+-qpbYk=+5&q+{U8F@lV^hdCXNB1`&QZ_W{rVStUe&wF_?_FZURm2#yHs~8eo4JaP z)O%Jhouvz>KSwm<`rJuZp$u^4A>#*DE97U)lf7=ltLX6vJ=CkIL|sn>b|xfbW&Jjg zqw|)%6jrsFPd}=Y?g$3rGF%1W6Ycz-3#vEeuVb_^eLP-j@?|l;u*mIWI6yK!Gcb~dYUbob zgA|gIo;%mpR+>|oBe%?`3jTcRN}r8l6(SkdzmUJ$a@}q<|G46M+H&8GF3E@M3ngYYrh+4e3ShUI^U^eOicxZ67_|BxpAv5`cpvMjQGLS&{o3zWb?c4&8pPG>7}W%+f!Z#rgz1iRBvg|O)~6oaAFeXO z+Iz^8EnldeX7FL^WESq6AQdQk9HSMczs^&(TG% zX=XhFgSavo_}#ui7>0!0mvhplP7b@2Ejh=%Ah8Bx@!+;w+d@9wMBQsZ5H#>?9=9_d z9^%{Y(oL&P6n3Dnbmbl;;>nDlVAni#7pe6KXz`xa7?WeAIA7DnOSU zdaPR8-Y6PgSZ-9L0BBJ_Ez7D;v(2j%`Gn@?^AXg6FX}1|U&_)*`BNHu93%lYvzrpjDg!d^#n)VVgL0v?ifxMwz8Z8B;di;*MsQu5$j+N2 z2I9>8Yu5QQ9pO>#O&o=lKmpv!WK#4(>TZ!z@aCCsppOh6gOjc!g8uRBBsT z!{L&R;ZGUm;$%rOc}W1p1(7{F<#J;CF*~=T6d^h|WaazzD+E$EWR@e2W{@5%)wxVq z0;TpG5>gf0mv#Lv_qq5~FmKH>=2qyp=i6B(Ts#*o8c{3pa9W90=uy2SCMBgl z&djjUvL`CvhiHL1*n)Jp=_%2x$2ze?(ZuKrT63v)sn@X;BX36HbZLDC)1MLm zcoT4;NQQQpfQ~^NB^(ndFlj06!%UVMMok4Ja>Gjc`{Okfu4|wDtWFMV65Q$@f#s^EstU(@185ijc8l&X$0rv5xFoT3+3)mu~<$mwnHaBVxD^}iEWwDa)~ ztO?Nk5>i+3JOVff&-Yq5cW0eaX~@%T_ibcCqb}*FQm$QTRD))nI6Yrn`yfZJ@b?-X zS;S_r|4RR?JZb49&#bM&>tg}v?IFlnjh;ijg+Isl%>vpSdKI46lw3dGE1u~l!m?GG zLFkfT7b}S=r{ZeLc+|J4y7+1E%;2oFQ~nSj%IIc9TpUb+qnU=LR)~+MLC}0|v)ZSo zGoZ;+^hJPXxhX1kjW6J##_qR-qjFjj6REfQu8Fq*!OUGiGRmt&}TY1eSkm#b=CJ6zRWNcBMTKx5T{xtNqm^{A8nadRhK_NcCHAVH^Op^e=K2L?BM7XVXlyA z^-(r3<&UwaG=)AEj2}!l;_gl>tvq z``TKhXrB-=;bT6+2x%yZy>&&!aYY}E>SalXpMG18CNoJb*Ax7|j6EnZ;A!xp)8}!} zD!=HOsiaDXlq&R8dTE%A+UI`e^jN#8ZOf~dMBIYY^ zl2g{zt9iV$2GX5h-i*F4*LStp_3aEO*HoA8UuiqK3aKW|Cby=v8WFd@8gZ~Wokd88 zXO{%9FY<*YsA`Yd0_l!`szmU$)w+y*JOf^JX_J3TPRQfKq>W(o+9+)n5 zROBU#4#;_Pp=C-bXBQ;6P^-7OOH|R+cW1cSDx8a`WzAq>yH8+X8mhfBF6-w8ak(TZ zguYcsuLxpxuay$07lSRMt6FL`LoDvA1@+{n*?AALmKc1Gz0!G?X5ubpl8OeGMuYNX zW>tq5RW|VRHFz=z*Q$Ph0VN8a-M`QA#Dx(msUxqksV{{fh$GSX z{NpzmfwsN%k3>YqhT**&FEWv=ObMIZBPlbc-oXcT-ZV~*oCZ<;XqH;viG5NNGT*Od z&TVwTdADsgwjD1>V+A zLr5GKgTR2I63x z%m?S9QN{Uj_gWONTl0ot_aW$#dQ8*g-85UK)(P5C`1LeN*M?2<)*RiQ#$zahyTSYsB#T zoIQWc-&#~^XU8~?lW?tDL7_<8rthscD~3BZHFHGecI={2zvBE~jD2HpX5F@RcWj#- z+qP}nd1Bk?*tTt_W83PaW81dBy!V{%-g9o%ed|=MUA6!2z2=&0t~tgUBUnLW7t_xk zX`{}Ml4lDW`|9-jFM&?(FibLrU)R(onS#za>D;GIN)P8i_e1V1-mpGnKu4x%$EI3acz zGo3^&0VZYHjIGkxFAtG?yh$PoxLYKb-}@x6_Ak()Hl280Te$uF1Y9 z9aLz`IHX_pv|r;@J;QaNUlNhcd-Cz}L!;B*;SXf}`JjzfjXe>1Yv(duG%8eBTd8Md zmajofge5w9Fvdt}g^Jm^sjuX4_pVvRx9$7Zq0MN~N}a29dNixzbmBa)m3nnF_%aRR^_}yQ6IHcawci+zi52kdFeg!)NNTr(X`0e%j2VVX*yh5%8Ux=xF^0`Jwsip=Qn9INf3t_(M!j z5W0PryMQ->E|G4p9F&^v?`Z*Y<3QZh5m%UH{S4;4MDAC^Ur^X3A8&}2N^z6}a%7WH zAx3DPfTLCNMOYPwV`jCQCi;d;D+F}9!(w)kL7vFgy%OU-`DH&!$mffEWABWvYI^W@ ze@_Zd>(37>P{e>!qEu3%vM=zYNT}md8nmiEAWWUCk`hiA{611Z z_<_Y?lPS{Zq4w0FOHu{&4LtJD&V8e;Y&rA$HYvJ~HZzDUS%hM4y@aTpkR>Vs7u%O$ zri~YnPQj8Qq(g3Ik#phwGBIjU4x=UnA($k1Sef_IB<{q zUyGoHA3}E0)&1xgv3LaZZ2`cxDnP~-N4K%r`};n)i#h`@83Q-_E1m?<1St_(D<3@2 zh|d80eH%hgN?G$hrQ3qBvA_HUNSrx}^0xCt<#y)YEaYo;u>^H3W}-r_F$H%(n06Q} z_6yJ&v1&i40~xvTh~ZOClsL-4#glRb@Ow>lAVvwQ;O%nY_#bDx7RH!LX6BlmK=M?R zXgpi_D*p&v)xx^Tf>Vmt={Q;74#FkHV=3E~90?yNU^--F7^{=duda&zCS!j%7^l5s zuc?`L6rJ4mquu)50o(L_*U4y(+=jMb8CBVh02uUxX5kz`$1Cw%0WGYc6`|SU?K710 zGnGkqmv+lpJ2EF1k8jZAEwb_IurjmTnBxVN<1Kyr8Mglh%yBG((~5`Lhl$qMn{zB)ki7~l{=oT^-7;qAG6}w{ z+I{wr+TxO~5_5Q|p1iux?z15c;MwJvn%X^@w7-j(31Oy zZYfoj2(4yoC{4IvJ58n83^-z>2$4EA4XV-3xcsooqzn3EKB*5VUnD7I80m}IPDM=D z3xyYbc3L&`kOh|Rfy2SXrzxzp zPF?*O)_JZl5vMUQbI8%Mc+qzE9*fsVLWaw=Ft$MgRCn$rErSRZOYorm^7FOAlb2S| zCB4lu^^vOQq#_d(HY?ymsf(=I!4dD@u|&m|x?IoQmCoD}*+5&@Y^~;p-zXMkY#Q%m zrWq=e!;@PXYrCT{E^xG6bBcbuoS8Bzfl9dqp5CAp;o3TE7ICc3r5N0QyW`V1PQE(n z_SB_DdA;!Pn0v@7Zm}78kbW)rx)^3~ z%wJ`ay0-hrd%WP*JLI>&G^f_l`hKFN>G}^* zfMi2-f_0z^jF{QuHw8}6p+=qBSyLDv{=E+~C-gcw&10IKn)&krX1s!o9%{qX1pEvX z=E%IjN=_v#=9upmBpT9jL%@m=;tJ_x4L7t%-nnSz++w4dC8tuvjskhb?Suptg#4rP zASAd7GrQmKK;4JUrl?9;KjB6_KSksS1$F^~HS+hW5^qqWx*~9>L5pn;1ibh=x2cM5 zt(*cM3aH||5XNL9ykw`9Nw1N+V80j2VzAjGGnDRle24d?Eg*1N&$v*6R&xT35K^=e1b)BtIyG z3V_s9t0U0KfNBCknf~Z7YWDW404!Fdsg9Q~Sl`Z=YaW=&_BZn+Kup_YtPm{Ns<`0;gA{DLo78Fv)Xe? z{+yGyNeNhUfnS<7UN&FB&g%DGtx$iB$`xw1U~!?Dj&s?QesV6Mx!QT?qs)}3SpIN{ z+0jm74dTv(Z|uSnP*dqjN53Eqg6BW5hE>bX5(*Y3&{ znoau*q{gaUAl*fV2sS5U`Vt~=q2B0dsBlhcZR8e4EGEwGs#DGza&k9fJD=M9`RFs9|;)eWp z7vH)O4Q74a3&#<}9oj9yfrgIQde1ehlGly@yw#qRXd~i|13zuBypiEKzPYAt^J(Ci zw{lsSw{~sqgFRCsXrGgev6;W~8~a~|+XKaYBA1nfo)_(siNC2HX-Jq)K2|cAV?G;^ z54|xc7uZE!=cEz@k0**q%j+6XNy%#vL{+4UCO6(TK)tOGthNMWcZ48S??U|+UHTrU z@ngpLkZ{I$x{g0VY2bJtQnWL}i-gkr4Q7AX2inNbkwCmE?0eoIxdVZ7S;qX zHWD{4hW6K63<;EOs6SaGZ38W7+e{3YVkK{iVSbLko_yktR{pLR`t8u8IB9fCi}4%z zC%ED!=-8!r)%aOZZl9E2!UOU7DD4zCl_`Hg$22eg6+K?PdK!2`7=@L+H5Ydg6r`{4 z1|B#Kps)U2g~lPx(aIv2$}^-3!`a7MAwyLk`5wjeQ8re#du>qJ_r&q|cqTLPYNZpcNb6Zj_<5noNXpF{&6`sXy#&2+OE zNu_ZBd_ZSg5I$M- zu<~x7)sy(i^HSr5$os#aRp(kj_R_Fhxc>W3(8M(YN&ouzyRYYO#KrzsQv2_p@NY%~ zNs$a+{{lt$a5%LzDX6NGm6mX62^>UV2HC4$PCB&Ro()QOvJcpeoB`UUEYlDp)&2H^ zTkl&k#!#pc3`BxP_`QCsrI=XU{wS}@EaWzBu^l3-loUTqyLr2 zK@BIr@qU#omR~?M{eL|1%IB$KJeH1 z(d1y(h7`%;Q(0r<38Tz|%M^B)tz?>a5yPZu`TqRz*%fB?6mDSllIEhlbQxdNSs}M@ zZx`?97NHYZo{|LncC1`Frzkf)js^fhYVp&cpNMOe)};0R_5c}2CCnE;XqRBjeMKYh zg?4M;1sSLBY6vE|&19%1Z!Bm`Y~WXT%>@(Dg8Fo9B|Zfj!y<{Fi?)D9hsgIS))vkc z&vgODFg{~Pr%!FLAy4L@(k)n4GJizU08I=eqy}U}NFS-LFFn*9hl)FsIiK{M^vuKR zr^GAJ*_&KHC+c?wT@t9c+9Rk7@Rwe?e_wyXB~&*FUkNDvD<1!*e0FrSq7$?=b#_#C zaCUT(F|heE`EdB>;`{#+?^z1Hve*m=-pH(B5P3!N|~s#vTZ7(7hm$$cTyAiFnHmJ9dISwi$2VOb#Z0d4FEMpz$N)TVnLKQ=*l- z$)(!OQpVjEI*%Rcfm;K46Gb*hy6Bp1B5Tl&yN>Hk)0w#mm3k>?KH~}&{C*~q70HCm z8q%>%l5Jjc-={_VIp2Z8>jZ7PjmT+s<2lw(JCgPKi{wDa&Z)vWT zW+%&d+iGNm712$IIr1cIj`*pXmcr^n8{sOG-cFRk_D(+6x{~AR-n{b#s;~y#y=+LQ=%Faw`(d}mT~usXZYfGtahCjtw0?tPh`b2PHj+Z zcWfN5wI6Z~Zz-#yr`dPF1OiDEm4t_;u)_y0_kxBvmPzGBnKuT}OMA#5%Gat2H*r!j zr0oRSM!j-^LnU&GHN;U}FOK2qr^fF`%5YD9k2S}QI0NH|SmUi^Hend;H~@CQ&`VxP zjXLM{FXWiN3dp!NkNx|vxfc6-R^H#v_5V<=WKjeVcqXY~sw=iUy6q4NXnB{&1&B^b z+P;KCe}|9^43unam^9fR5v=nVKT~-#GW>?WDu|p1Ca=aA;Z>G%o-9);DL);auG##~ z-KUL7B0ZEE&VajX6NwodcerBq8Bb6?Y0H?1r6I-;$G1^IR8a0r2uuzzyDC2{~w5LSCat(>i}_RbIG#l!Bhd+B>452~xWU zmZp^M)7r2|_#-5>+u3bEY5G+Dgx%Bt zddjBK9b4rapj63Gk+YVrfwzx)N%Mx(GY$oGr`P_rHq(C zIAN2-%%vow_QQAm^d>chIE3Pdpv{*y-T(IUwk5Q7!3cv)S=>hiElAyWCxOuZW$Vzu z@QOB1^Ubs+k}A=@ai-ty>n^}?!#MEFM$hDjYJn}r%OA1#oj_HhHA?wr`;s4-xwk+X zY4QpR9(U;fo;iv(0L3IyvZ|dMq6uCTIvzhl?AI>x=+S5KC+e-8iSJs% zzJn8KnTF@Np>6_7@dW2i^uXn+Dy2g0ze&htl7HRi7olsJ=K3PF+n@v%xacvWq<7Kv z1rH}7f%vRL^+or;uAM`bka=HpBI`TVv)GuQi>RgNjHlzA`P)GcdmT6>d%RZhE5kd2d>cNTyDBm!-Ai^+2#8d(XbEHdLsyG_VA8 z2QwKvm!rMwLl0=MvcG}+dt7ou&SA5D#pU8xmirt2%iqQ2KVmQIOU&iJ$}Ap_#eyRB z&DtWt#*;>nK`*akCd0#+aTar%|$Q`c``s`26l6 z)DwZNBf(cP!6*V6C;asL^OuSgTP>KNM|3aRC9(zUMYtS>fyNtJ(3PtPnqcXEm#XuN z?d<)_E1nSM)0EeyCHN#oH+|&x#^6I&C!c{yU@5)^ooe5PteKkvx7})qIu)CA zRfNPyizEX9s?U(}dK*rmdW-SXm*PfqXtW@Ef@Dx_C^h@2?RyXusgy$UpwpS-lY|~D z7=3ZpQQmq)brl9oWT0w@Lu?9dfn)-<4@68=vY)2p(IGg?-1DSm{_zUPfD&_s6X%e2 z+)mV&rq!~}CoK|btrZfL8!o;^KUe)UolQ8oJGL%Yt4k%^cW6|RmrSO+9HWsiF=7g` zvET)EK=?-TK}N%>zaUa<%^jlUFT5RZ@$)lf(u^a4DcSqnOi_AA=7{KT)QE|GTEs-WBl->hQ5#4-C0f1^;2CbXgDrD;gPE&x zteuN{{P99rnzAXi%O;IujvHi^$MNvw{!($SGa0?h` z8HfCr@^fYFurSo@;4m?fq0;RH=>daND8`C;Cu{(9MhB>L1FEH;9qgHk^ij8rP>dV# zTfd+a+5%q&T3Cxjm^4{&&zot8GHg;1G8-l+h>ElfEA}1cij(5y8P-JV5=|@(Hz^2e z7e}yMqy+(|(a0Sqj7ophGwAC*Y<&!co1-m>Fo%QF{xFg;k0Wdr3FYvZ>=6qi6(>bo zBxpKF$H-!6&~RE9Tdjv5IHVmRcFx#lByUuaw4xkoIIHGFn>scn%P0=FE#QhgI(jhL zV!HGu{jeP5DA&8)^IR4;Z8g%55+Uu69D0SzlsidiGB61O+s-{?4 zFz%q)#l^fFYvJ$YSm(XiYpT1y-U}C2N!TsTP(2)wTyEL_^;TstmgF_%Tdkg(Jc z0|`Mn)50@b>#rLLMuY3{UFPJwa3&Zl&aP#y3q;7xU?u}t2W zHJ0$`8KZ_Wb>`xPWjcaW8_u7jFjg{Q(E%!p&=*1)P3o_9}fAwt2~*zlp?~<>zR2sZiPM zr38<j`IfODt+ve&K4R~zObtRxJj{ekfR1Pe+!pb5~?yH zBG|m5Ia5xVXZOmvJArEJd&3Yl#_>H-?mr*wFoP zDMGr3gzT_fH=~$%Pb};Nk`d2(cgYSO5Gi#r{>-_gwi4*PJXzN31R z;dGPldvK=}g?U(1fmLmOR5X%iIS2+@lENGC`rGes-aY z>isg8J?Hzrnf-Cf!Q;I#YV69yl^`!Go9%hH@#uZ=+V!|$YQpz++4%TvEs8H_CU`nA z50x%?hs2zZ>Z-oy5JVO#6X~g=2OUHfx`Xhlyaydr7x^i^2h;BtkRIAoc#oP8d&nZ_ z2Evmj#NqxmH!Va5g^?cmQ!uxW(v}vGZP1mqm)KT2kS%m%@~-DoR}a_CQ@{@&_3(DV z&@C~7Hw-?=9Ycf^ltehpG@?#;hfJCR@NxS|ZT=pWXi8 zE-9%;Q!rHOEQV(AgavWtWHzbc;P2jrVp5in>qmcfREIF!mGQ&s=26#n9I~?VRwebC zv(nA+CNbWj!XI48OKIr~e@%K)r!Q2MnimJURzsqNO7MSb83uP!=P#J|3=IlT9qSFVHcco1L|{=<<_(b($(pvQ4Z8a`r#;)@I#oV8M|#|*alw7`W)X` zBhqE@t1#MzmjOI&*6MYA7_PM*^h?SfhXb_IeMb#)*8j>X8X#(u&`EdnDm!$qsc~^t zvs}557!*)KohF;qhdHEXfU2o~;dJ5aBxX};j5v+`G~ivRZlV@(kt-pe>mD?}UR6>4 z8UjzH4o$&ASkF~@P$#tf4Ww0Ukkdw zp(f9#b)2~|(3(#Fdp3lkM{u!4nZe^p)YwJl#zi&UN7oUaO7Xpj{ElNnncFVR3M5C+ zeiQ+6JNr&N+iOp<8?7y{a+X8fw*BI8<`6?$<7|{e834h)R=ge6rd4)D2e0Mc}+CL*IT$u6Ts!!re(s)g#K@%=a zv`mHM*PvxlxUNWgo-AqJj3U+U-BV}js##vV&U>_^#C7fx!PGr(-pt|hN1?qaapl~l zWPQ6q$3+WVggA3)vLuGRx^_OJhTmLuNa2t6FEgC)$7s-r5FntEpkp9{lYaJzQgW#3 z)ba%K6v!&G6*jb3Vp056{OY+E@wlS!_UL^B_Ym~QjHKG5#{JR-extOe`4y=afniMy zqn@c-9;D52$JUJrr^ZjKbqQhTCW^1ZMI5tIqRyn4+(R0*pX_Rs@xl>!+nOXeZ8h#N zMeBisJT!Ca!?GH-tIYlQLeJO`viV)j)=*JiFfs)l)<{xqVtGxYx5U2aO;bOosyop&QKNX$~-~gs^(<1UMTys zDrB|!&OptL;E4~0yy!B&T8tp14FR6lAiZZV5O4!Lt)H$#jb?20j6gJ!lTbmT4RWq%0HgHWsWcMU+^Q5%i z@Sd3-;FqZVe2}~TG|%o#t7r)G5wVg=*K&(;Fy%3h>5}PI;L3l&q%w6r4wJtEC#zr? zpC3*dq?}-Cd(sikt~77+KX?q{h&8GTGz&pJGJH2hV+C-#ktvtV`N+8 z8@;E)uTE8}DP&U{87xY?LIFizTJOiW+M%>}R7t293ZWWQ^Rt6B*556U zroB2cU)Ph2u>nwC*N#cBTm%9zyZ`2CDPPx?i#s*PuQupP4m=HYjZU(m4bVOS65A&q zoDQg1n!Lwpm3^`sT@1F1k{9NwHjm0UC2svv*6|Ln(cbo0$f#JDbPm=w^={!kx8pgS zM0Ii0`LUS6tIdT|jl7}lY(pDfyF=Ee1GjkFG!6X;{O@JW&kO)6`&X|A@YRE){`(%} zS6TDFgq)TCd22W|mDnadSaG1Sp~d>7*tvf{3@Mh7QUywcyfj7}+cl%bp(E}b#Rs|! z(I1@mXN=$kr-FZ}I81T}Q#-Gt$<&m`7l`(N%MUy4Fk&bKn_;mu*3Vz`le-Ca3CsmX z88aMxtr_T=g7@xCJrL{x<7PO4BfuR6gwX1VJ6w1`-g>Nd_lnoGz31Yo} zd;x?d)>~#??hyt3jd4%y$K%!lgb=}U%c;l$Z~p=6ord74aNK1Bp>I5~=N720w&|0) z1o{mf)Z1Xi2^p0VwEOJbA4 zkW!=$>huZ&`C*KB74ULLGli>i!LvDI7)^?jOjn7$vcAEka^~gv-GGL*ACnEZmnJNa zeADK|`9o2d6Ws-BhkL_9ZE7%_LyK_~Ny>cR>i@(KC?*^c&K}Y1g5@QTR?KOWwOd`n z^q!p{q<3-&4}Ac=JNkD?7$wil=;{Y~DLtcjLO196q;a)y9dHsi7}Bkz3^;^MsAAf9%Pf@%^Vbk)0JV9`fyM|y)%AkWex zaoH5;HK@g)GB(M)ca4`4Hzg**#TALt->Qfhs=26|3D6$q0U z9-xED$D(l`qd&0d`40UgNjnQ;BW7%*bGW#WA@&vs&LGFxD|;#)sIe~a)yp3KloM@nBd86JX!16~0G|Ec2z#uq~W z8vp1a$XI!>%WriHS?yeY&?_9oTS0i|tFh2QLD@5_DL+_r1_}VZQTiCdr1rfff&!ML zk&eq+kXxV#U-(vikB2u;+3%v=N3EYf4l*fSh^~$@ZQuifAG>HP@(B!8Vzd=ctC<6n z%VvcEG;0f_Cfd){Fh+X$gq>lkAFw&JnP%_1#Mm|5JO7&pt~$-r$qAPY<~jpx7tLfO zg`&ProLYxERm1ClnJSC_STAY%iYKSPmqqyJLG7Oz?Z0!I48YOx3z_|A|5|a};xEf> zrg#!5m9sx#b82|9mf=Q1Ds+Nq-d5T`pv@ZW#(XZM(f-t+5|Gd7f*B&sEX)}7N8on_ zf|rqz(DvxX;uCA0hnWSAVDDY+@1VI+ZP3Wm+9{YqWBqKd{epeM!MF`^N{R88Ki{Yq}tCR`2jpbE|*lb1MOUR^Tw( zm#vmW&t0&RZA~t~Mlv|#z?;z^qPk7Zm@GOBOazKFSyh>BBjTNk%Q4?l zj5Vm0GS*4>;Z&M|ef$e#`&W8--5lg#M*H^di22($%KtFE{A2bTH6gu}=UYF0*_a&3 z5;B4U5lFFMq=ZHUg#ipdFygTSysKyhFe;iS)0!jN}?(w zQD|zf6kAmN(Og~G?_At>_N_U$Jbx&E?_zSbAx*#x<;CybTyvamJ@k6@7>Tg;J|6#u zHUrO1PI9v)IqNj$uxo^897mLao^@k4Dl|>N1~}b|>*T*skM>(JE7`p^atc{~+*xzG7mAZF1j`Hn$D)|Po$joe6hNv;GqW(#jFz== z*^AZ2Pg-@OewxQ>kuuEbXP5W$6k#CiMx8DO*59|FwoW3nTfA83W{FylF1cAMFp=iw zUNOxEl|1Em?{-V^=1`t3t$uB{SQq|YfuA&Gd=(tTqMIAcn1(zsc+XlsuxxbIC9BUS z+>M?eE-T;W>{dV4&}L=3ILl^*B*gX9DF~a*tu}#|{1(2|J^}l&{W@0`pA>}+{C#^p zK;f6C5<3D5*Tgvk6A}pqO>Zuy&Do;}p-G_p4Cb*O>QPMkcB2XL&M6BdM^;vq7FPxr zm)6QE#HA%{kmspa0IzS31{(sz%gI}wpPzhfH31eZc++710qDU+QDA&=@>FnA)udL& zDi&AULe*iv*6J!&2IgS!)IpW?`Kgv_BQtBI8h@1YZc6vG|1}wp5-;g>sn-en| zBAQ&!Ntn%ndP}S^9dE#VkU^ z!*!pm9({U;yfK*f>WjOQe(f}j>ySDD^=p0vYFnF+UTt%ALJ{T}fEjdQ!+Betc@O^n zR&=qRpB_O+mb_v@daEM}M9io(gC%tn3nJaR&d=J!t+R&^r78HW>m#bmj{jgGpJ)YL z{|Gf9o<)gumw1T{`ejX*X$+iG1Jh4Fwzh*Y;kqHPQby`fCNS+uc!mg4{+BFl*j99? zODYiS>S(SBiHE@a+wu-t(;;{%V1*04~1Z~c^Y*)ij z!FJ0TbX7m|eAAM_5lZM6Fy%yXqIN420+cXHJO>mm+yj0yaNFs|j$d_rD2PIxSBW^J zN}P~NCb~D^J8_-;cNu=T{)3-*MiM)%-mCk?e0UP~xvdG_(XHIxhCl1GRD{tL+UuJJ zJs_Bv3bZg!kHf&j{e<&rGDFivth_IjvDU72nLHUyn=L>FeK)3RZS7{xnoffO3FBX~ z#&9O%OB~fuqv|S!IOA-{`8Ich9H*DcdLx?7L?|emi+eol}93E`!3$(7yM`mVhY=n?eUI}HtUD-y~f=d)z3)speYZ|48Iz z@jmW~QST6yx0llatLWpkvXZ-?PNrGSOslsxqrTbkU0Og1jfK}09U6Uar< zu$Hz#G^V^(Ww);mdCxpx=>cLx1fSuG!E3FjYRPEJLc3s21%(A% zh23h)gu|XD&*H{|;fS*XFmr#?;%CErMC#N3$teB?>C0-DA@9)6eGxmH)iU0*-Vz~I zF)A#bWy5-e_JXk;$#9_c@;a%TllW6%?jYN<^3PodBihj=TBk`f_^WG5h`&jz0Dg&< z^NP`owG$U~J2wIaK65{?ANlK|SHMN?Dr{TA1>=#;3kdj*#IYvGrvz~G78dYl06Hwr z06J`2NHyyf5GEDNt_m{j8m`?cXwp%P7KkcU4A&*mDus5LimS1_p`@1^Kh zdGt!YPD%osj7KUjO#(G_%iElFeHlH`Pq_92-QnLmr$A<1k3HAOWv}vs?QE@nR+JGV zb3{3+KL4Z}aaMJGZd%`;g-98e$jQ+_A7#Vqn|kD6a|S1jms}0-85Lrn!UlWmV$aVM zn1gXV#xWs<)sC@*fZm*+4BqnW?}2S$h?-F{R1<2pm2a#SF{&4Th#9-1k$Wy<1Xi=h zxw599Cpc#GY0GD&LokmKX$~^%yCB#S@@d(gTxiGUAjskRfR+(uYrOTIeYP|!s^ zD1H!U_&vJXZiEAu?piA#JR5lA1dBx0AiO;IGu~Zw(d?Q~tbG56>Gq1#Y*wu?s7;xj zQZBi0ZHW#@)s^&-^LEzNItW>9K1!!hM&}!s zzXisnTT^F5^YY?ewD?x|6N`GpntLO}yasP8$#3Ae1Hay*3;XX&XiRgX`qb}<$|j0~ z&q)$6%)#tQ&jl3k7CRo~O5j?2m_f#zX9o+^2yz(9+YAZgCrPrhOF9&fddX~6k~Sv` z-DI7MAQ7qYp}i6Y`kFroUsD;|4I|ie+vXJ=)a28CXB^O@C$!Y2%^HcmduMj9Oqk_& zrhC`RTjs1~DU2_va>2Ro=jq2*iR5d#3ksd~8NuE@eIl%IEdTh(-aMb= zbe5ZZtCb4#%n*q4d>yrt$`U`zavHx#(d1FcLO1}h&l^8H8NUCvZ$Jf|%+kIb#oCpw zv~gdwVdDtD{iC88&*0MW%fie-9V)dMWk^yPHpxmNjFD2VRlI*o-@nj`D-MBj?nyHE z)1W+6UAw48Wm-xJAhRJwk6hG(#LwH|5Yl6-Ywq*MEsI6hHcH!};+m8tc?{Mc%T~cC zZ%wrHTJ>yOA>s5J%Z5#qtNLzMi&T_rg*_=mP7m3$JN&fjkBIJX9x9DSAWrJmI6Ea` zG<*=~aQbxJG7+}@C0FMBosb@iJr|8XgD}+5VgM>cGv#7&o-i6ZK`@K%9px_&+A1aZ ze!Ri?iGIK_J$_=_Lj(tV_K91NnbAa00C$|!sQL*(3fsP4Lzg+drZju7+~`P-75D+K z2*mkrJ+m7KW)4RMMq+32y1ye>+D)$f$3ky#3T-gM zkv|p%W7RJA2E9H|zdpD>bw|Amf2wj{clh=-PU&*z_bKO84)i!2V-F5>uOe6*%3%eA z>MlV;ZdU?dcB4w zlSFq;^||?C;#%}JBR(9B#%cUY*FU_M&Nzqbps9XLPXm~#RYS(GpNTnqMgcDz%{$$D zS=~Fh-LkWGK{DoFqg8}rG$^JhUd+9@)Yw3qWYogU=$$>MqI@4-=@eJDgv8ra@I`l+ z0E{s!tU}(bd>(_$AsFa|l;|Bg{EBVk-c|Gy9_^LS9&JN4FuP!&Zidt;PsS`u#0=4d zg2ecG*^u=_OrvtNk0-D{+3qy4==)STnY@=>hhKd44CXZ~8fTx;8Rm8WI1QDnN|p=A zAjO5GyPcv>d|11czs{MOC&~_yIb>S0Aj zYg{JU-gZnq4KoNPiOrJGn?a++!XM7EGAD$={t2-DV+qbnI!?L~b9jwPg2dMqUPH0) zmMoSXLAhT8XT_g=qjt!?iPN}ze`^CCBO+bM8^|3^9v+3L92J*BrV#OxH}?&SqHv@v z9RJrSMg2dWR;;bxh zHx-2y$?zhG3M#7#Dw>K7ny0q-=w0Vebefm!I;5TWa;)es>&AElOO8j4vMGwPDfdh@ zs0z#IeAQi$1ij1p4GtK5lT?n()o@Hv2Gd|efISljN`P;pMWmy$pVh&UWXlod-pu^O zJPUcjDSd*CFv)tL1s?j`q=bW>p<|1%z0iqF%WsTf4axo8{STx=jY0JOal`3vaO7PB zx+;|0l?=Xg;UeJ9LL>K_cby*j9sI)h*Lf_R~b`@_x8&-H2YcLR*g8d^A0_V5gnalB2k5R(^Q~QY#obNxKx?v zJ3NL%MUB&j%Lw5b-^WjI=UE6NZ=(&}(S|bf184l&XM!(ebN_6b_pRTFzGBCw| ziYxyy5}_+EnN<6*pZox%WElRbqmadhg9zECl&Yb1FuQLarVE5A!gXLSTB<7wmBhux zf*V+2xFuy9I{W+S4H`hB5jsLkBw}jFIImmmUNDwzNVgY=4fNd5 zwOT*Od!Uj*+1tl5vc7esOe&`wzjE-O`M#?3`ugD@)Kax|aBA66eJ(`jR#mkH8##Dt zyJ$$d;A*4zi)y&g)t~A1DZhY89s#KHZS%u)4dP|>X*z@9Uf`N$T$e32dVahkcpizh zr1(UJ9Fe*C`veo+K}XKoUPHahj($KTzY+$&3niCjR1|2wLGYPWU2aQCE!5XxL;p#~ z4p<8G?eD%_)Se3Z^vkJjDt0LUq^RLoE{C@#Xcb#tfYb^gvx3d8WpVS5nEW*E@-ZlC zWna@{Tw`z0BL)1_yMFwtNRu0ROaMP8=cf&Ob;~>}mbVuj*wvde@zK!|&-%mD>nq2D ze=Ep$z?&{6a_p@)!Y8o+WZeNG$@7V=d(L#>7GqA0&2~8|Y%!?p*E8>)$xryPGQNU% z@0cQYCW`v2R*tP2?$B%fsD~DBXx&OKFM4M*_FOy@+y_@53d4kA-1!`xQ`H`7 zg!%g(SxBRtt#4oGFf2ubKquHIH%yj8tD1ZRg}rUSU6OeZ3n*1c_#q*4_i> zeo+55X^rj~-jK-uW9^-SbZ?q9@8)XTwr$(CZELk{+qP|MwQbv4ZM%E=*?UjynfHtD zJ($=Lb#kBGQIS>uyecz)SEjL0DoD*=BlTsK^QjU9YgMyS2iZ3Di+Ln4SBYi9(3Rr- ztWdGq6SsOJa9hAXeX<^r1KQmIzG6%0Xu5E&!+EXoLvK&!K|s+^UNlgR%sBVW*3@&{ z>g7iU)vABEQ4K3qgLJFW1#B=~EJ)fUGh?ur>8|G51{O_seX6nHtQv2_RQ*evB)Kc6 z23DFZi=(7g#RIDnzD*rE+v#!xb{ZiY>QQ-%p~oT%TPj3t06+**p+o#Ghj8d0?0(>dm z)&yGR=SVN26((o{5z*|CWGSVAFd<)@9crRUBvxeFWKlU+UTwf#IX7h_--%x?4zacm z_0OW<)-S#>du8TVrX&xf-bsoOCNihOH9w*58iA`tRiW5hFLd zf(Wyw)JUiHoADY2P;JTbJRO))7Z~VEP1db*CgnSbA`w(YGO)eg`jv^PwVm78B1Vxd z(^Xeej)EBkFMeD6w@Tc&^E&^U{#|gv5vyTkl-8C^brrTotgU`0udJ;T)yX;A%tM@T zQeVWRV^r%LXD&vzV*kY}&NG@ctnWcB)HYyAbp8wUU!84JRGYj3HwX4)B?v4!xoM(H zO=JwwuH&3KI$?j;Wgs%JW?Y4k?}JX*?qxw zG&TK~3Ndy!{R?WU+Wh=GkKIZhw-ej_Ti@|7A2x zoj#D_&riQtHS52%g88pfgt&pNv9-y6O{G?A{fE^f?}=%;@m&J{>hmaKLwp!lCjkV! z1duU70X2@PZbJN+bXIV+W~!=!RkMo%;EJla4i!0Y7-|)y9GkTLB1Ye&!+3P_j8WtwC{|IUH^S9&l3+}M!Br>JPo&EhJq~alu^T(Den2M zgY5Wj{2k+VA5vD5Nsn}rX+k2@D{?-m6q86s`Mk7a0R^I^2P>MR1Z?W>^uUR1?m((6 z<^ZZ<4i#N9PKI4(vfODiaOV_Ix6b8V{MCyA`No9 z3LNdkp=OS*Ff^$X#Ivf2y?eLvFm1Yh7VstVDaO~1%`vJr30~$2D=FC4JIU8|(%KWL zM#@aweJJD6SJ)o^RD0+>la;Sa4Xxk#w`rXS6cX$){ z5f|8dz8jj(=9!K(K5SjSP&-ov>Icb++bz7KwuAIG+9 zma!RH$u+x#a-+5knlZ(I&fCCMM(6O#lP|mStJ{TWPy=C(h;u>SD1Kag^}Ld~gPy%} zZDY?hm`K1YH~GWc_Io1z4r}vd5-b;?#KT6wu5Bg5r~^N>Qpj$8tHZVMs@``Zq@?)^ zwpMTW5%kXo$To+rZy~NOx`iFurrenxxg)r#xxe@-E6K*v7EYn#L(SDq%Bq%+8Fbk( zy*xOF8ysiq!xcy`XRjWd>bo|9HK>w_Z$Il;%h$^$rNf!S6qbwm7O-t>r+bvKWZ()zV%#3&*wEI1IUd&b*Dw*cBfAF6 zyC&V$MM7|Nt(&bs!}fKs7lTd5X@Rmb40Oqn9zr%0u4t44*dgvh6wo9qSQYoRaKaxs z8GdcjIcy-H);NV99ak0*gQkNT^e7?t(PaAzsT+cUdzCV8LkNC*yJC3~L{$m; z?j#)Fi-R{lGGjd<1~Rg}Hqm#98bG>jW5dxF>v^c4)R#TtWsdH@<%6=1hV*aIl@vuT zlK|nkkTcuK5uWN4t6KA2XN@X2IhbLTp4&fgk|&n+iaKRZu~o@YX3m)aZu{jMIZ70y zM?PwdH7vitG(NWZkNRm0wj$~Xr|Gqnb#cMn2SR}DaiZM3i*PUqj->szYeKiORl>u# zkA!)Va#B*ydc4HauP(7?|a@R$sv znkA1U1jr*#^z%H0sB@)harr^g6Ht@16EPYP7Yg%??jwJR^WSzQZl!|*fnMcZL7sOL zNYm9MHi!Jp>@*PVD3!SWP4M={mDaFXkjoa8mSN_WW23n+252uRr-K}SCuG>Bw-Yp4 zktqQfi*2;#cD$X09xcwro|BI7U6NOc<)e?-lIQjtp~ko_4o>LT92d~&lR9{nQ+3aG*@Km*1Z!|Xn!jo;ZMwz*TpUlJ`MSrsmLE($A_uupk5Ku#DC$0Aub4dIp3ELe zxO3*U% zAnYsWz!tXFMZbOHMnpu)R4xMx^w-ZPBiw77c_2O3l2MeT8NLr z0Fo*y!>lL=dP85b&{1=xpRERiiq57>f-W+`<`UjBU@Ynlu7&99DItZ#*T|~QuhPdxIRC)X;jJyDN6j-909Fdk zC@L3pjwm6G{OqQ>NquO1Wbx>PQkYI55q_iqhYw7HXTq3FtC)*D1x-`OdzMT}hv38V z|LS+6rL}e?m~^LPW|2X$sB)+&DT(a;2>dFqPjqBfp}tAt$8x#BV^*mc$F9&;Vj3(g zh52he#7x{SX`JDpn7@-x`8Gl#vK?>$ODaK_zdLE1RT58I?PWY};Uqbg;fMA+>!pbUhS!HRF zd5M)RcXv_|CB&>YHt4bV4b`3w`xG8x8i0@&ip0PnLP@B?vH$ez3OLSX3X-A6O8Vff z7WL5Z7a=u7P=9m{Ed`9nWf4Ys(LbTHBz*l~9nOYRn=$7A$#l{6GS6-eltiFBkg`?T}WXb^Aou|=%y6UKQGm_ z{&{_a-u$$Ja0PmKE{?5I`y%q4szjh%r6o2((;sR#X@{QG7y6>_V--@>{)csQl0yrRC+ zCERn&f9*U~*VbVY%}>2pNyttXtR7f}5YoiDQe&mPtk_ys(@`;`&x>4oOwUSWtVOV+ ziAkv3%DS}7u1s<~T$vlP>V32p*dg_U1XZAfHagM7nljHwg>UX7v0$&BXvV-&*H$A- z7U5O*>?G7#y7jEI2($Pl6gZ4y9kK zrMRTN&{bq

trU?q}P+29NF&O$#QDJ%5BKX8DUtY=cEwU`MxIEb2v08F+;7CjEPX zRDobix)pv7o-&RLLmu?Ef+4qE;YRccj#KGvSL79GiJiadgH6{?;zLj6N@v0ZO5iZX zrrWj|&ox^P=x-{cES7fv_(FvUV z7VqTU5AY^c>vHMuqkxQ1#!tMzNk_Bhn}CZh3peK*ypz;8jqr|AQ?3gMcIg+jKk?Tg z#|6&&ARWnbQD@aTQ%e+3vL&g`u;(psn->KC<_#@MNR3Iw8aS0xz~KZ~8_NgaVJ#x@ z%VQ+zr-|N;AfLUchU5-74K9XT&5;%6PfF54!P(S8Au($ zuMJ|?CSP@3(&|+4&nqb8EXrwnLd%?r6Os;0T}|+-HrPNLZss?NpC@o!+Z=#b5kv~; zi#hsS;)6GUCD=1XlqvA$kRK0>o-JZ|Z2GR?+dM9&A{=9PUe^WwlsM5WikL+e_as6x z*qL&DSGo;Pl1fHPu56;60%!M>APJkKS(=WS@Yy7Jw)6Wfn?bVKv%Em}s}t*J<44`t zO_m~S>zG5FDTDF^0Q2ijAmeHhY~7^UDnQMITL_>UJH(sV5vFF##Tn)V(4{GpkWtPjJ{;n`TCZ`$uW zsoOD?f>ysvR*362Z2mKx z!@%I8n;VW8U!hu5@vuOw9(qC#CRA}4i77b^(^gf+tw~?STOqS<6jWb>=<-nG6F5!8 z{C>_sEotUuCv(zQ5dUKi&1#ay#K|1U8%cKxvb9gLqSDDOvo~Ib552jJ9@wgP>Ue#M z=I{ODukd1CZ2NM(h0I~a-f++NM%C%ZB;34?OGH667B4;KCSYz>h|NGJUCc*p-|7#F zpqb&jQd4o{$@&>|lU#?ml<^z3Cw-gf=>iRPJJczboDx`(r06H3vQG#T22lr0nO!H& zKqd=bvpL3TQnr&-mnow&Hq^Nr3!{}ZAE*UJnIjUZ%4YcdFLWSB_asbtB77#+E21%& z$5Rc`IiELJVj1yvZAO7LA5DVoI-Ikav@>y`A~Z%xnfnQbQuhEdNg5kO zTp=QNZkc@uofyGhIs&b-~ZB)O2rTPTmpC* zYTN1(_YG`*h|#+yy2<3d?LpzSlbj~Puz(wkK(9pns|C1}3WM8oAU4_8qr*H0Yn%$2 z1F8;P{UAKf0kh}tMg(&tQ&&*UoI>YMrSmI{wiMJ0l;(uWf=5q?w!FeZ1tV&dkrZ6FC8H*lbI1#!EGo~4YkIbp>%l3|BHxa_ z;B6j;CzI#t2G2-s7#exz)SGvh1w$%2aLvVzDwb^jQY>D0dVWzD%kPUmQJn}l$w|T% z8a;Z~Jn|Oc)HRn~!bAN1k+CtkSz%sjr5ErR068K%9wFxsY?k}`rW9ZP><_}%SCW^< z*q?UbGo3 zpGAgaTHJ!a>cspt^)6V-dYKt6IDhFlMYV{vw$65IS@^Ui4nKQXXL$3+;^s$ zNj0a=kn{&%JA38d1#=$=GDop}2TGY|Q&E}Q(r?fdF$BK}aKM6rgnewRF*ch=`C+3x z2DLXhowXZ)Q9tX_6d~CDP}W#;3+ri6jfq zIxsE8E3i^U(eBKx$;@UTHG)?BPTd$b#zr-5To@_BE5?`yv&vYeJ7+jUz0Ns!Mi1S^ z9na3TkT+t4OXY(WK|A>9g?@<{a+k3c_K4f}*r%@+nUaRApvwBAj2f-V!K`{TN1QBY zVhNsI8M0u=lJIxQO|wLF4w5B9wX*(D;h;=Nrihj$yrn|0Lo-{jwY=P^l(N8c(dloW zV=Uu&7>S=9%Tb=C$FIx=AP!O(WzK2$p#?$OH-4h7mN_@2e@TBm}+k{_$RSFTrt#GzF`n*BPA~p zzCe|W0GKlPalrjqTu^R~l3e_qQJ!&1-JGDgNU2$@ny6Jx(5op%5ZoYeMxW}^A>lkf zHEsrMZH8T!jjB(AEDe~36x`AP6wK(|&YdzVB@J2ck)UDd?TmI;=AdL8nLCjP6x@+U zS^+MjfMxKQSy!v|27z4P;c5z5&ph2c_{MZB0o$XJu+M<0Yf0#y=m4?0Zd4ScT1h9V z7rAw|n5j{)G>WJvr_8KWwO9Q_jc^`EZ4e{8KOH@jPToA=QaAvx_ku&(?7VVow+d;0 zpj}AYN2&|C(;HP%NjJysjN6>fLRlG> zt{-w^H<~X+#U;LDoYl15XnUI=3uFmqVM!J(-@1XCrB9O<4P*um2l{SJ`}63=7_K?S z&A*hIJlzq$lG>>G;;4=5xfKxrBeiXmoTkaI$c8 zvM{2P|7~Y#@ly==2ebV9U#702Wo&^Mpo4S$5z`2rQ>ZoSNHHNC02d{7l&e9|!26g$ zrijTQc(l=Y_~BHR{~uNs26gun8K4*zPK~VspHI6No}Y)yt{JB`G``{Ob`ko~yFca} zbg)o35GJLvb+GizU3U^~NULuP(V6vM?U{NzgkkOq`~?siGYS`UIV~8U`xoXk0ma7c z^v`BI{S0Zy{+q7+f4d?7*CwghS~&f0j$XB5pDof4NAId+%|yc-Q;?!~76_08Up)Xm z6(6}=evUGl62-%njg(=%nW}anB33W}DGd0>$P(W{zzfu{>kxiDtDxpE`xdJggYVV* z`~DHj5A`bKrXG8rQuSEDYx^2t#jEPAtxw*EG`VR8BL#EI^&@PALM_m32DALvW%V(y zkcH4Q=zyRaZzHEW=q#m2%2v~FShl)VG~ z1ynqlD6VEXics|&H%%zP<^oY7_+J?{ogBvNe4_Z(R7weF*&-Z+&<;F;KQef&gqxzt z!zYAu6>=52*~#R1&LLBcXTx=FkoOLu`D~ZbW}d048}5aSb64KBBBF(f@eG0*u5Mx_ z<&Fc18lv>v*_Wo-V&sV@P?Zai_A#62icjdpzN6jml^0B_WaB6HIbSC16JPwo?uWk( zb@)N2MLDrHNk7`Rvo{A(Axi}p zfof7IQ*b$F`_-MRp9iUzhDqN1%Ta=A{G~JX6FFD^vyt;(RK99O8N2_89NM)@#C7H* zMakD`0>HUqlzqe?LX@BT6G4&h>S~x+wA00eOXP!`nBAZK2Ka?cT>Aw$?3Pk6|B|AD zWa5DB-t>6w{$Qr{8en-&+#W&-qQ;10I2xP^K^22?ra0jWbU?B3Jtp^t`NT5rii2n= zuX~Hr7@W;nkABj%7_nQyfq0UaZyciB)p__{$hN3Awr>`u*aSexS&PDe)zP*Z-?;QA zW{yUfy1GDoOnMt^z$I{c{Ppy1Ksod)o2oxyOAqC zTH}$qPiF+{lXjwjb-zNs{@!1k_Uab#ZWVy_bwHcPd(2z1@E+iCbB6r*48nr01guR{ zi0V)+eR`yw-z}IkOLaaNv^7{Q8?fP2fcg#k-;=KYup0sD#G8kH0_X65HgK5!3$v#< zC;QLTVX98&xnjPuu&{CvZxg(R!huw@MF1i}GXtPIUW*ZzDVLBbSTvqVfgC2@2SD%0 zAevFv0&@wF989M4@$}EwVY}JM*USASmmkd{qISPvz`1&q15v+Xkcr?~`208z3H9QJ z-4Iv&vEcK(w^{NPiz`-Wko|@ei9t(s(?u3^Oc4*f*iSsQj0yXYSKXZ;pm+38g5xK- zHmA67p@|S+tXW}gNDbAGzA@sE)6BC`y3UsbBgvx3nw(Ktp&NO##JQNIRey$dAFEoy zWd@Dj)*Edj1$Y?6(k~=lrBgFj#$+>Q(CgV{H4BM9O@&VpTRB!no@}EVvEQ@J-c6)Bm!}Q`5`Q#CW z5nTjO3st#qV?};&skzbImp;vIHXOFySEcH+*;WYu{ulCC)J+qJ+)uDf|4?6~|Hr;j z{}nE(T1pH2$X_jJh=Tm&u6>@#sBu66t%NOt@rD*l;7lrdg5V_5nd}x7U(~@E=c8`! zPkg7H*SuUgGECyd2khCTOr9=|)6dxxR}y!RE1_}#rg`Gv`?6dxk%(7>1iMv!o`}Ed z2188%DIf?{K{nAC$OS4eY9YlV(c@=Tv}z&T@C}s(%UY(v)y?3O78PkbtLPuNo2qdN z%Qobq5^U|#LI&_lWt3V|BP`piQZZDSQgp(~>6C5MVw0hjq7nYgU8=7s;1phe`waAKlCM6N9Fzh7cYm;darX&w5R? z`dxXkT5();K3&tC-C#3uESk9ZQGJV*6VDK~P<@b@%rGz6W5p=Sp{Moq-)be*6v)hkbiUp1M`~ z8czfTc*!1mzu1Ci?i^3tTjc$+-S-XZ&$dhB9Y4%kW3)5+h1j3-$?Zj3P>9xd`)vLm zfDiWa7a-Mwc4g8Y)Tky$mN@#Su%4T13u)|S*aW$E$+rtM?0%7EZ$o;vrXuYgKj{%4 z77Bl_(&SrnayvHSCL9||5OVfC=$bp|8aUr}yyuuCyh{$V%pQH45q^?0b6keoym;)8 z4e#Xw%oZ*^NW&%~Thqyo?+#9Z_xDDP059(zUgveToF8Qi?36>89PclT>YS_JS4%AT z@#p)g@}0l;1NrMN?_2T@JRkScG)$@C`qY@O49>?eO7OumxjVXz3XT5a7fumZcz%#@ zW^z%N$lH4!6~Vlom`sS}9ozqX(<|(?^wa(v+~^-{py>bW!TpcSTg~pb)&K6uDOQ${ zLso$Q+NPgnk|ByLYbyv~j}koS+3gQA#|OqFAs-Q&akRcTW98xbyXEG<=k}Y={J{`9 zT%RzU6p>Ds``s)`g=YY99xO1m!b9b{^6jrD&;9*OYAwK%K7P1I8fV39p#dD;ljUk@ zAtsO4>dY}fW}r2!!ybJmk#$VN*^<3>9_J0v8F*1c;#~-{l>cQ~%84e)j+(9h_#dj( zlynH4Wykw&kouwnWYDI7%k^MIe>-?)4$@iWt7CCCyPV84WmW#RjNJ+y>uUt| zw8xJwidw1j3dZtq=Gs-+Jg-Ld(Lc1mm~XnS6=YuvT?Agu`YB!6MUeuLfW#E3tmZgG zk>9(s5X)xPYcMHLNKZqu?2!|n%o`cbHoR(x52Q8oSTBB$-Pcm+{XuuqIeO-5P*+yw zZYE^Bl+v8Hb!JS(QedMaPjB4fhLMF~(Av^TKoUwCt;BZ}dS#Tms8Gx<1w~ByOVch`CMxcBM`33o47toI<`yT-S102?NDiWFO)eud7Q6EeSLa|qf}SS@G{>G1Y*IW6SIr3qY`+-aUeL&NSL z5bbD1LWlH7w*X6I zd?Q2LLPzg|Y7SC0MXs#UZ12{&Nd@f?P;G}6t%OLzsEIo+#^4BN_DEe+&0dqdb!ieU zkcuAB{DpMoqm8QCgEF)XhmD5fESlA@XiBwpq)|A#F~IO~f#9w8lM~x-2LFf^|ELl7 zVg&l8LhR*2tQFh!727pBg|uPw-u9Di_P`dRGW7jF9F_mcy;Q(i@mHY$0G9Cq06702 zPvn1oIDagay|7JCzJ13?JJ+NWTsBB8PU~rSSjDh7G-F!wH;ko}5@gOws*4Gw*%F=f z8XYp;1v`Qlwog${{jk9$zW23LjI<50|+Pp2af_GU`&7Hoj|d_Aw- z@!83I?tb9V-?Jy_lV#3o8f-K%`j#E^8rq0@Wb_qcd81n$`zW_x|sl74MLyKM81 zWxKn>VY9sx?1M3JYD@n~OPYJJcZO)XutZoTP__kY3p{ORR8JfelWdfFAk-5ezk;r%lasLLIM*LjNsXQ<13 zml%2*2F7PX&37>1v;Da#rRk#(19eMo*&sGtz0s?Uq8vGcs}Q?w*s1=Jpzh>b0VEdt=|5g$- zS2@xo+`3&$(lY3*l&5{wiV^PE9K@f@S~fIg+-l$mIeE%1hNGORRDx>-JEZi9Fr`f* zk2?3hUy&{eKO*spUC9+Ctk=kyC+cc3_35E)6NCBrDcaS(1zsO;#RI%vftql@AkhhG zEVPXSq;DC1TvVRnF~3+irE#+|Q5tzCO;`>oCF4}#&@6|U3|OaxgCCM_Eh%7M`ipL$ zbAESziPOLufgEZD;F!gALqMH`pxBuGSgxPa{>peB^7e0xU>drrNo|gJ2CR&}0s-4t zb4a_C6g%?oo&5kO$f~r(o{#Z3SZ0zQLhtxLlh8RXkO7vM*x- z$m(v;jojtwTe6QbYMvg{F-~W6Fvyr$=@8XB=!?HWU*4Fh4AWsuJo0#fTbE>IM~hK5 z`Mt&*l%0@4x^@@uL4N}1m+t%Tg|R;3l#ko_ugil0+?v>GFJ?)@n^FMu`U zvVBQvpe?&(sxrZc$?g($x)!gIkspCm%tyCnM>4Q%uWUBoW7Al=k73QouQ{<}f^JGk zF+Wz1Eg^N713v>&L^ZyB;`XxI#&kJLMpFrTSj9@L|1Wrf*+*vxJio%D#@rS+9Iucvd>I? zCr+qq1?f7M7YVCmBzgK5Lbb)YD6jaTnqg6vOMrrvvpidD!a~`4+7L(7xL}oFTB#V- z8~3qM04LXHrbrIYN(2h{R`3PpUCAPKArK{pS)Q8+mE)W|_GCG3N2j8Ez;K~_;A8}- z<+a~d1o-aeLbe#8smvd~q%n=V>K<%iKtg9#>%(`B^|iN}m|3#?dU6^v;hd)~)~9Nr zSPfc0`GER%IpBl*id;eI@@E5G?%5)UpjRk|$`2z{m~63Cn3ajNk6&t0sR9`R_?TWT2rTT0tLF#G&E^g~_pbgn)z z?j@8A&P0O~na7)T`FWASo7L(KM@`)k7cXHHaTDP%QbkrEphQt1DQO>a5_uVf_II~f z+Eb=(8Jj&Yt_hr?F{Ly_)h&It^3c6Gm*`#Q)4$TF3A0ov4AVsA(psc&%Vn1oKrA6V zrnjj{D+zNq>tX>p0)YX|_A;mw|?9v&3R|zqN$0p5p z6e3FmzkNL$^#v9|Ho-S8Xq*qF&;pDJ%0Yll?!99(Gl=$7*SJ8)x_LL}Gj~qnM4>M3CpR$=3t}KqM@6yd$l@p};SMyaF_JVnh z%+Px@uI0c-FmdB*E?d?K%n^zP2&7*Qv;!E4z5`K>^W!l<;Y0`_O)T*ZAAL|X zL3$+C>$^z>aD&;TKXFBDpGsUn9cz<@lBY4hwtw_wYdt(na7^Clk$R}pfxsE{c(mx_x z`W^#5VV~sdvHOAc(Rs$X#=dCPx^JN2nIZ7gMcIZre>xpd@q@E%ptxXc$G8gfNZhFJ8{huIvJ)8_wWNEent7g=s2LHl90PUb#^)16g9zfYLyz zf*8@I$*a+8tAQx&csOnRf_ymuRez*UK^8>nZ3a2mQ?HQ7N3vvp*5GKU$f;{Ue-gMt zmHJ4yDSKagCUL_VMMQKPE(-F^U*NK}NL|83&7ie}x?pf;)ID+ubFLtc4LAeZc7q(d zFZz$eEYP0dCIW>n9!*=T9|s%Tb91h0AN0u3dSei&YjH6v0`4ZCv@8dNqggTe$jI;~ zg~bCz?9kZ6K-vlM$Rs0|aaJvTfRi=SY>6ww8qkZ6VFi!~dzOkL0xo!q$K?4;wL+kk zKe1L!$~5-MG;v>MDiC5SShw`4hLsg`<4KmqyN^k#o=n7)F~URf;tMs|wj<+AyL(lf zVIi3~7R3oYbcL(jEyPqc6cy0|&}*Zdql7JEQq2Oz0vMT*R!Mi4suWXp4xDXW4fsKu z4g$z?IlNicL2^iJXu?IexXRdq2QGRG;3-=I%XB9fwYya}ERJqwc@Oy8a}sjq%+BM5 ztcBberkx=U)GC9HFfw5B$+VFqAh2zb3HP>;(>!H;-N=4m@P7i$hlG;m;S$$k*^lNV z(;LN7#F&y7)!l`$6A6`pP7*NyDRZ9Lj|@PAa&>mOrO?Jb!+$z}Dombu=leM@i~cM^ zAG9H9FFJ`jcrgrv7sCvw3*guyQ$>zWb529JbLWRuWh>}tUe1R&7;?foBP;DHFg`L! zzKxuHfRKD7sz;U<8PMFh&oU5(!R-q%(h5)}M-so_qf87LTVu!;{caD!mJcm{LBEtN zx5j^eAv?V;emNWr$x1R4cW5CPO2oK>_IFExc8{5!NhF|)BcQ_#A;|47g`z)X^crS8 zeS()cLV5m8JbXNnSsxZj#Bc!Dy6hBsvywTjds>b3X|Ere6 zeYhn`V{Tu4&QNQwPcpna?TR*jRjQ#*kC)+T$$fRj*hr5F;%Nn<4=d1dG{c`;maF_N+hrDs4qZ)Wcvlz2873CzGo z7d~K309+OuL;*OqOe8r?Mu0scfIc*U{u>;0kJ3X-@S`Q>*_rc)3B5G7-yx?!xvksl;(Orf zr-bO{VZdvTzFl_wX!qOTef!>}Ccgd;hSxuO;o@5Cxaa5DEe;s~fal-oh2JIyHgp1p zj!wS~jGUC5{`vVIM7aMfnf<3^$X0{!L|Q@NF_Oa6z}(mk#urDb-)IoXhX657U1a@j zL=R7mVI;X9btC}2glYl8P-OAM3SqNAEF<+y$}GFl#9&i*&JJ_-%ckQa`YW>KWXfF6 zFjX%1nC5=FeWU%k`}L3y&kIDK?dxdYNOf2Qjdl-$OtxdzkB)5*m-}oGV$NqMTuk^q z$&LGRn8{6b*pV^EH6OH5whMo~w&RAS>vT}c@TLK3s_kNU#|=~JB5Ml00#mK_E#v_u-Jf7{*Ai*?&#v~%ZgDiO?jcNpSUWlQYJXHCso1o;3ix1z%?ebCMWUUG70;KbBHZ= z{QUaL_aKlB`QIUp)rB5{Tp7`FR%dmbI0w=j=8OfyvSk)JQI3hM2UR)ob-j`+OdgZ7 z^IuW%%ZqKk`WZMZhC!2iLGlKx+Wem@$p_OVcdKBwXia?BS6uwYW{kJ;npkP+@wZSA zHHGU3AnbyAL=sjLvl=RL579YFGnHRy8DVb}DssZ_s_U)I zK zsQlq3%P0V;v9BR{apx36WHTmSia}a`+#-b$r)y6y6h+Q!VJy<2MHY1oBCr;4CoC}r z9=fx)s#NRPtxg%f9T-HCaZt<0KfSEqvQQZYH!PS8h^;GXW0IA$sm-W|h^=)s+!b#H z!*g;Io=lGAC9}XSla4PcnM-|D;0fiUCy|dml$m1G%}whxlsY=B>Ix`|#E>fDWt2|e z{-V4#o#Shl7agRmJbOv5-fy4QAfzv}HrhSXwh>hiC7v{6LUuah!lu8j+xzd?#TqXV zdXtR{E~l`yHB~iaG%U87SavlyM}u>+I%eX(>Bnd}$)h}Ws2yfTxoSvvoW3WoG?YAr z0xv06P}yW3np%b?tT_oIYhwOZ+d}M;8kL_7^?}e*b%+^zNe<96618@h>(kw=)z5Xi zK#K*qN@l3t#q=TCyDE4is!u~zF(MFRX%ZYW5T~L#(pQb00!L%Rks37Z9t%2FxzXyP zJLm`irM`Q3{EJ~HDK+9eFl*{U@q&XIOe@96D_<7{ha&8df}vv(DbY^AgEKa~*d&1s z+B+f!X}fC#>iY)|ML0;q7jb|ODvOyB@+%;d(U#(c>J7yU!Z&fZ@}M|y9fJjn$|y_z z0twWhzPdT!1@bEf?)<$A`xtwg@+EYZA<6+$?P$&4i5ZJ*M=GnceIIBq+Keg0Rr9_g zgsr6f;yT6t+a_nz`0j5S`9bcZj_Lur!C%y9s`}xKrNpmzBa%idN~R!J2vVyWcH8TU zis&V?M)8~KN{&4U90^kBC-5x=m@V&!k=P7Li*O7Gu1-twij48OZn~KmLd;kg1*b_U z5izw#SqBTo1l3&EQO$YtRM%pUNug*%$XA`c20@EThgr(>2Jd&v3oR1GLQW@%YjBR^Zd;cIr^_kQVJIZbo3Qh zQv{RU(n?MS@2*#+^IdY=#*Z#Dtg55BS#Al}P7yoVCp;dLl$iTTYyjrtEU5c;iBL}%r3bD_To1>F5!Hhg@KT5>Fk<^Yu5EJ zdjkT&AYWv=N$3OR(P)2;?#{bCwSf+>ZU6PT&L9q8=7p4 zS_)1$>R&b4s=cMI7+PdJF(TGKy`=TaHnv8sr+o~H;17yS)z^7bxS|$Jd}E(4K%cMp zXU=JMu{W;JWM|=G?LviMiVuAx(l_T{#JVFC+CxOh^uf=083^csiEbPA0an)9Oyl3g z-H+tAa(j^vRr&(Fy!WQm#Eq_)aG4BgWc$d?*!&5dq>i_@zlZx(pxPEpn@$`VsD#H& zk<@^nn3)8oZ)UheWBd(T1SO?n!y+--W9yB2cF{u1Wfq%;_JyScHc*ws%7YGQIMz~# zv%ejkNt~CGkUM205bfC9StFe6_CK=l<>TW6#7Qx)_cW}ihbI|`y#ctpsU1Cwe0;tO;g5P!y!IFcKAg8@=WFN4e0Yt)NzX`a>tV066)?>v#55k z*<;uppe#g}o`qO!N|egRhlHkmI6PqwqBBEKAbloC2nl4h2Xvl5GG|fE+S7^+xXgbT zW5CcbFHr+Hw%4gwmQX>82CQqMxcgQAXJ~no%lFvBgP~FHNFb+0JQ~ z98aX5f>^cfJ}li7+i3q|dF|glk^9~zI=8HT^ppiveuUr|==|ZSY?Yk(NW&SE z@=QpPTdtKKfIU~f_CG`#yp|_--=w1zrnNLgSBZ3G=UiUzY1b>BZ?Cs|Ismm>=2!vk zX~PCM3h5&T0Ipc7wu})LX{&8#nJM$XqPT1fmF2lC*D0tu6NbEzp#C}TBaQ_dKo(XP z>3;PWb`<`72?ynmriBFoWDWZXj%|l(#qZL;Vt8BAM4K*yfS5@5$~JzBskSC>*>hX0 zV6!RuER|Gn^*nRi6Ee2Op2SA7?p92C z(nNB02U(#he`4c1$?_1Ez4kg{fT>`O}eTC(g01 zIf>vKp+sulPXbos&r<264&Wl(cB!0}xc)kgmL0omdEh>35dM}eABn4gU~A$ME=w{U zfx^B{S|n%@cn5zQh07*JA^L_IDJ2gp-79*OX)2JFmUA?tiW#qqxYbf~%*KJOa<&g+ zpI~!@e>3AhWj*vrBg^dE&B6Ev-giZ$fOfnLPfxWEs!RvpC65{UJK_+asOer&Yz^=wS-7-9Bg4t2mIhjH~x#!EPrx_4W7Nv}(m1vlEI!Tt%zbIxrB zO8Pu2MM+i{`T=9KUv~RO^fsSi$D8}HKTPh?t}BFBGB*P^3T`@co*FVei%dsA166hKEdGlp(NBlDX z`@D)6gC_;NS>|}O)oth3k#|BR184qo_{@JF(tp%TS!;e{(+^WU`iH6hZ)~Z5M9V+w z<^NB$C~3(d{gfN0U0S6k2*FWB%EQq97isSpo>{o14W?pSZ&#F1F$)04+TPJ5p?RM1}7LplEh=f-o-bOV0yp=Q!Hyd#@FWD{E8y{0IQg^#NK4x zJo!n^wIQ0IU#_>NoW=e#P30%*iFo#uNCW{O(P&bqr#G?{^R^+c8D%U9;$eh#1iofDmY*wD@#M;H> zE3mXJ)Js$=nGW~IH8y}|$~GQVr??2Oain2&BwRIf*22cbs4p0V;pn(4`G)1)kBnbb=L<;pJ)=FK#j{kj!onEX7k`op}8;3^G0 z@6*MsyPAKi;kE<0g_|a6#ZVeHrNw&*MsY zm@JE$sG^Y9&zo4xU(|FJW;#Dac1o_7;=#jiKA~Rg=7-3Dkz<*+xNe7v(AsPy&J0fH zh#jX8Yj;!3Yy&{uBjUm=-re+ zcU^$*%+42*cA>U1Dit-cC)_d{_YLm9{sua0c~6tyD|mRg|5n)jdvrH3`G&j-8=L4m zTRHtxfLqbr!&u}S3Tv(Ze;KuBsaU#VtDmwJhrx`>H5c)VY`hj# zUCH1CwJs?l99-h;#+)6$pLVirCSV)4oc`NLh6k0&{AtHLx`5G-IMPwutD)glOx-dlOa;+I!Ka zQ|SrGA@WgJLp}}r$@o1r)|lLh_5Lly&|&Oo53kS4NzSs*MqlzWMXPn>Q+ijJ2j<=Q z;_G(nDy91|{wRkd4)Xn#l1Ao8W$oH*wgcR#g&T$+24c4^tv`qTpZ8vPBYu8)U5 zM&b3+fZsY*VM(LVR$GT!y(8^vI_Rewj8w%$jjGr1oJ6_uYWELl&D zvDQbYlW(=Orh!ekq@$KpI^;3xaZANr=6LvA| z_+F24>m_HYCu#f9$S4MAN*503QR3f2?J97K(yjy=<7QTI54IkHHW}OG^P0Wwf3##w zZ19>y+%*1bT(AUxeP$NKgH zeIc=UKW~muNkhcn++{69IPv|&MA-3lMNuU{#?L)iRCfCWw%wn(`Gn&ArSs~ z2--;#nOhTtWMS;oygobNhd!OG5?3ulxRsp|$=B}3E#8jb?n!sgSZP!362}!7VuzU{ z3O>+j%`j+QMT$k!JVgS?9UEYkPT21aJCz$ zK1HhosmS}8jg(OLmAb6?m9M2IUYE)(SWA3Pi4nI%p;=66gyA|6F{BYt5O16qgLvjC z%Oi$u1k=q}y#2ZpB%@rSMXo*=1L%TD8lkd~vTb;W6zxH&b4+X3n~@sWvhu)+MMa+) z@?Hqm*%iN-T1S%`EZ~5P=fm}F%hPSC&n?_Yg9y_262J0aMKW6Ocu_D2@`}CwEz@K6Q4f32X;URRi{C=awm9phwdj6$M z4e7!NFnE0BONx3E^Qu_cQN+W`4Iz)P9jVn!2bt3#80HDD%Dr!$og$QcC2mjsf|NDD zK8D;EwCwHjC~1dc zK*>e-@(w2kLcS3-(ocgLw|XYpfuDMZCL=JwjX*wvx_*Fi4j9R_D?1;VP*&4>C)l9r zrdVDJ5)0t!!U{W@etWkMotvGdiQ-k|4^xA5+|2XVrOxGf6%EG zT{ZOYJ0VXjuPFGzd%n}hp#R`uq|(RcWT)^)<78(1BH>4T{$OL{q>;Ck1^@s$OJi(k zq-UgO0Q5r?T@>5|933&j4^5nrq4vjrI`BV+a!#TQ_vLS7dD8b_&h~w1TL)7*eLH;+|ANZHCzR~~c117~!e6vNlC?z)`q0Bd1aag?7r6uICTz|0A?5HA65F-Ut#!)H57ReVv(Ns%=$@`4!;C1M9xQvX~Nm!F3B=McD zFQIVX2Q{P)Skq1Duy!f|(!acMVIRlRX9w%%-pQ9_hw80q-Cy{gt?$`zHOc3g@-z#3 z!m(Q=>I7%Z!CT}D8F2lbvKc*50YkqGL0#h1(Z#7y{j3@J`Us;;Mo6jm;6)JBrhizY zo`U!fPhUYYC4^!ZZ)Yt*B>8b3_}H*R!TMCkfh&% zbQ(Qj+qQys6{WOkEyaht9>r|iQgosNx!rz#{xjUm9;f4Pn9t9oNO-{CWwBjbEvCYA z0V#TE)~OTW1fPCvK$RBaNnTIQLLBkbyA+Z(C9sFO9Q@;Ky|a~57}<4mjA@j~6WRDANA zBf3~?exk?=w^M-0`Sg4jusenbAN<>A@c){?!$hSy;J!a;*LMc^-}|ip1!*t@dV=Q(*7Lf}^Y|(zXGa#O@<0I|Y;%+_Vu;$9jYXT$UMgXOllr38U_Tlv zLw~`h2hQMk>lDHtaX2fV(T}`Z1pnBL5B_1nwYvQepWTfbuxsaF%5Xyp7ec`EH2~QY zE_kw-E`UBXhk;n?NE^HW{SkB^6C*I6gZC7AX-`{%>|W>4v28**_hn6Au+vcO?(Ykd zD=`9uER#jdhdvKt{Rt&nSqW^L3$sdhDMY*8-i*X~0>O$C3q^Y#U4a53e6t95qzCC$ znp%^*@K`^X8v1yt+V?#eY{VCOpzTYBHV zZ?Q^wmN79R27}QVX3Z_{y%E@p6C3aD^c}V&-hGEk)6(Q|&+RtsSzMznHWt^C1v8eh z*spUv(Q>it@v-dL{X!kLs&#bzDa$5p&PR=!JF3QZ4h`dEx>KqJ0Ap*YRG{NvbX*~9 z>2ENum+ZI5L9!3xo?dzld_5}}vve~HQJMTRz!V}2*#i;md`CD~;b}{2gmOP2>rG&7 z_=%fDDu4FFyhal40pk%`IVIxs0rb#5bA>FEYdT38SLq0g78$cRR3XT#XuVQ{*YUp) z-@?`u0~){M%K{n*Na%l7W&R~~hAS!n|>IAQ$aH>W3# zotNpNj^lw5*XtifH;cYuVK!K#a1Xm$s3-OUBO)AffY!hX1+6}13hjgeC)9w}YD3Wx zxM1NVv| z4db}3luwbWt;&Zsed7?_2k6D?Yb01UN;a4xojp({E(7S6eg|!g_m-O&HobNhrdH&x zv~*>i%{Tc#Tg5Qeby$6(_{G@0RQCbb(THGq$cc6Z?9 zG_&yI&v_<&2ueqfHjhcWP9D&*6*Hf-EUfHkO<29>t_1Y!Tnr{kU~5BZctCJiG<_+p zyX{uX5&V#It@2r9)mbW&JwAdHH#0*_XBZea zV_oP7sqBGj@WhVuM`_;1!^i`pZh^!IO7P;eXIOSC{M{&`z^Z^oX%H>0cPk?pQ$@i4XrnL)yx zctU|*>JF|-;TU|eAYqoGV9HOY#s{A%DgMGDngTUo1gKQSvWb%sMKT4g+c;hN=8FRH#oNWVdX=ybL^L9#oaIFFtvKTnwwN7##Vd!G<0H-ifqv+7l zK7iO96v(;g?}XYrJ+Q)sDNk_b*|e<|V^;iUS11`c3cpA5L8VHSGVb{!MBAo5No>6H{BmW-TZ%>;=$| z0VBgWU#JKIt1v`z0S2`c2?rE#QYL*9|6K2?rf->LUCTcetxHu!5l!LovGbBcYrpuUXm8O3X9kRn7gn6iu|juWuKqyVY#|Vs*~Q zIbO1*jM|j}efh$E5e`5YKgV{qPVORjo+0ri-5)u8Apv+P2he`BNAsE7B>8Ae;dzrp4_AMyGpeqGc?9qsuFVyG!x;H9zDbr&k~UcNhZb^TazE!DU7L zohA4VNkli^ZpRCD|1{4Jm)kBM-qEMAKOcgB3Gi(?j?`m1d2oJ4%&@eoihmZT#JIg^ z3O=@Uz58m#Ku_$%Z6S=?N%M0b7Q8)o_~7geIJ}Ab;vZeRzc8rZv~GIon#he#lNtX7 z0r-%&@ZH_TbiJd{@sVKX{Jmq+`7G;vcBT88ocY!??Vp-4cH7opdi0!2#Q=WC@7&qN z09&L3exUd)>g%~%VChs0e701-$98|xbbpHP>q*Gifl2;K+%pE~#F_p^1lDDFX5rf! zP4Rq*={FvnsF|4HbAOr9`HV>UIzM9Td}nk2>q^&icGLpe%NMx#b$8GAV!iel-RZL; zFW=wcUXrJlh^lH#j^UIxRv9r;Wn@hqFR4yJDrd+U-<%3;xo=L@#)>sC%3REoY!*GJ z;@s4vJ~n(xq9$LjZ)zl0m`{ZprQAroPoq6nh$g2hJRcTqCM6Qpz4$}4BdOAaQe1tE zXAZprAJ{Hx8n}*OKFqj#*vykV{z?+K4-907b|Y52Syh;mh=!{&Jlc#KxDppP%uPf) ze=b&GJW!Xx5fceC&aHViz%*;`BopWM6CqNo6O7pIU04}NtRXM?$Q zbX&ADoOFNfWZK!g1{{}sft_22O;`a~rI~b!`;%c^%6HB3Pm@MF^DSE2Oe0cW}=VGj7be%R$O zygfp1Gc{IBu_`?cHLnCosw@0q>Nb*0Rw0Qo>MW;vWQksl;33=>gq!TxYf$SDnUt2c zcGl&_?eE^ifb-+8nmfnS`#uvkE?#f@+fK#1^KK=+$nm^p7DkZ_NI}+Pr;{``p-Wr; z50lb@^i)nfde=G=t_D5w$;Dee==#h(%dr~=E*+o+JV(elvcIVwZGTgP54J<>qyK_~ zz9nX{XW8H3m585cAwMJEPDxZ0H!aEICBnfiZ#(I3TJil@`kTIv8)w$ZNwv>R*cRG? zpu8*a7XCVCrag8MN!+BE&BYkW{?|bTbb7=6&MBYm!PTk%v zm};{FtHxPXvxm)MnAFM_GhjqLE=UO_IpslvLb(zYq67EEkTat`KhC$mPOX`! z)+&RJdYS-otohe#lUPaVz!5Dg)%w7Z=54cZ{m{ANq@kkFN95j3{~`x)ZOJ7|FghOR zN8Mjh5v~TuUs(qGxQ49YT zf63gPcxAK>cjGt-msPTpd+tE;aIm(@7v*KY?{I?&Fi`@+iYp_R27W(4G-cL#lGn9y zpT5S;@)+sL%Ddm_ThDcU6io!1g{YvAeYn_wxaePQwxxHh$KE0MwUkp~SvGx}nKrsS zaQzn6>P~?KSUStQFF<7=y4CGxUcM61%w8(Z`M=!DvAT|jhge*{`^cBTl>R9JwFs2) z<&};AQ;crF$5XUXE%{4(3ZX;Mn;{GJJc?7e-hoKgSV?lcX!kiRjPXw5D_+lrYST7r zb%&E}`E96V{cV}2pCHvXSvauxZC2~CTxdIwBm`(x&`~-kviY0(fc&wg#Cyc1%%2uK z&bCDEp@i2%JwV0SNjgx0>H8~6#SoTdSqaP{Q)1+lpm>dlox2!}j7ILK(m`5sPN#^` z;|ZvEo&;Yme)_}<@qe`ExyJH$ zJBx)?OTP`FXmGr;}#DK(tkZtP(al;COTH@GG&FjX6^41H%bOhtv1n&Yr zgW}vOf>C8sGAxqQtxE|NrxYCFoYTv~m$(&!l%W*e4xw7gB$WY-P#I??REvO!Mz!8~ zQdGiaUvfw*-=EUT)s#22IDHjNX^3B7EQ1`67CMII%H_*S74F4l%cV`Ea>^IM zE#;ZVxyut2Z$c7OTS<`t%8L?l@;GATYpP~)#LLD(;UW~f1={!JjXsMR=hRJog}QS! zGOt8kHsw*}eM%XNCW>7M+49y3QNimLR!q@WRTp&PnFlky-6Ee>5u%Rh;fa3My zX_mI|lvM7Rfnp1T(H*NxsfIcbP`Ilr)xEt|0ve7y>kfMk!wD(LePC8cQCi-q#Jr^|+q(pDT zC(Ozb7D7D?%FIo@#E1Y#fLJg`r2%X3*?XIT@uUvs*6DAGry%QEy`I{k#<%q4O7#FS4SLKCw`sp4q{`l4{b*y(E;kr zTfE0U;9kT+_BOWa-od*toy$o(+5(tjtNJU>$sRN=HN?<|}I8pU}cVP-jK`jt8K}sb0y-9e#f0 zoVw}hx!I{(I7F_@=!!(^IQHuZoR-hL zgqu>#B(wuIj*lG|qPmK*XIlD32pY2K)g|MpZNIif4|vHTYCnS!QCU zll57%N}w$$&o%Y>gjc(dpP#nt`J806`=2qhfS4XVg7afVJ}}&(6f_nc9GR9&{_daK zJ)_`C#J=HH{37tV`C30Tv&eSBkjGRiV~nXrJ&9y2|EBiUVQq7zYgn_mHr31>!*ty? zj?+}r)Co)e_T;OEv@{kO)&Y^VUB9X~6>gX3)OK-Q@<~W~Wx0|ns)8@BQ|H->np2~; zcuG1Nq3@{ljxv}qgb+sNF2{?_JY(55D;H(U9Rfehkex>VaG{VrOwh*(Qm!cID)R|X z38k>{LMTq9QhWTN`>OM&llS`;KrzhY$8k4%eB_1jqf$fpZCT}IS#?!0M-j+MY9A?d z)pF8VF)K$Z4y!IrckW-|W*);-c77&vMqJ{2hE}{{ok>!ukeJl&ptgM51H7R8&omMX z5iOWPjFOt2q7Zv+k}YEIFCGu+9X{Eel@+qgi-cE=pH&RrA4nf+1D{YlNqzUs*?a}J z7~)SbEA%gg&Hdpgc5FgE_hEd)0(VY|+uFoE>2Dmv+#d!o=mon@=s7UH&?^!UV~|hr zs(wy8!SP~a2kjMuYgG?d!0|b8*+MUkQNsp1?CY{$eq<<@9}k2dz$;$RvvdNP@1YN> z50(%UD2IeEXM4$3)Yww^MQCobi*{Bw2NYtXE6hb9@7PB&=5%0Mjn<6$HQ{A1+UZX zX^roExTO|&IemJ_!K#(Zp|!N5iNEyC|fRKYp zl!oWjjW9fK6m&(1!jC*QVLU^RI(G832bb{ym+5AOD>5~}q2)ojx3dha;e_PsRm+a5 zP4ugtO$eyr^1YFbvJ2{BC+p|!wpy%)wQvP*5-aeT5}hirx82>kvO_=%9@t{uqnF_p zPUsgSUuBLa#o+fV!6U@DDq#t-PhUWoi>x(sK?s7Mla@AR(Z9YFQd0U$4i-I&)0wt` zNXsEy5;Os?VBgx9aU2A*wS@|g(Vr~UALPEJv_7*9jTLoEFaFKa0xAB@kThS^7|6@t zG`r+%C!ev)+T72<(#N3<^t1>~v;p3`HCN03Gyfhfs7_^w9}S-5;H*dttg0$f0RS zp`X9<#@z0OV=>@^-^n6>gCF()-(ipVy$6E4RG|pyFk{|rVD98`*W&8&!XJ|1j(+8S zjT4OT3vdpL$|`)KlwMBZk60*dw~?&=IWJlud8SU@{dQ{zPgU|rr9`sN)C*WjFyn-k z-oLQ&ZmN?(j{(-D4&}SFV|pzP%XGb zgBS>DWIe{NF)t;Bw#7drb{bhsqy%;%uU%W2k$j-cR@GamM-ptq*$`q8o&3|c`d6ba zY;kj$o-;xf$2#3HR6ms*NkYjSFuVfGOd*R_;?c#53AxsUdo13?w|vLz&*l0YkV69$ zNBdrxT+W^ti*68xkq|aIyQGdnjEJE}dS!S2Y#oi6pXDMZr99n2kIL?SojRN_HBq@p zpU6v~TL|;M!r&o-wgTxLpb2%Q~0#HUw9GU1t9MfDa9UC<|7 z1u5GngwiH13(1mlc_-i+fgSn9<(-E5(y|E|w~L`)@bhb3PSFA(W}5T}YofX15tg$s z`jqYX*|+BOugsG#XOVWQ1SG_?er}THv?&T;DGLVGxksJfs?Z{d`%I?>)%j9x99#WV zXFxd;uJ&+SGZm*0o$1*pO|L(<5eQWHbqZqN0rH7Yw`Sx@c)8byz<4D~k3o|RU9lX+ z7>pwi{@&8hZ#auLA?)QKN>IjuIe2zXD2&ig+|~PEswt}moT~Z}CF9l;-^-JZS#J=7 z*yN-W%<2t2suPNJbd$JPF)4C{6+Jv+;_@r8TC04H#>8@a)u_$#bnrui&bar^w7WJx z(R=h%0V0`$D&~-x;ukM?;+80jHVTw#u?Y8D;i|$4rp=mZyB^Mg4~vjD38nD_VoA7N zj?o0%Y)u>@Yz$hsl*icfyeA^6^P+q0MO%h~KtMN8p$=q6H~GY`VdGHD5~O+`QkANQ z1`5b}5E^63%4ty=_n`L}HL8jBGyji9daZRK43)M3o4LWs}7p?O15~uSKn@86|$*H z<|X2%335#U{WWIGY&E|q$qxWyZx|p!^aMF4%4MM%G98k~$19GO#7(BUC$3k;ipNp% zh+7+~@T!*lt*)5lUI{yaoa85X?Q@5*@4zKP!EAIz!E9497v%m{hHP%OZLPFd{eWLM zom-RPlJYx>xv3Crl+$MQe$1Ht@vu3!41ouKMekGLXZ9jY&yN`n+2g%b&vNnWz8#Gw(aq}DQL>G% znKn8)tSBMCjrAJJWjs`iM{y_UUU6DdM+a&^zkXzG;1@R4~Ihy=X(0v_Q6m*pIck zclh2pi?tMlQjrghtYr)B)9x@6BQk_9;J0`S!u1rURgawIaNSct<7+_u`a+LKsqrc+ z(k$z3o}_M`En?sVsDBNLg=IDu^^0<}AAYJ{Gg1`d~-{ zjRCw{TPDRweayo*An*viX*-1}bYu@G?00a2d-U+lj6LHFk)j9E^c}_xWltf@3!Blc ztvN=IaL0?W+(BW!mx5J14QE5hEpcFbjFjB${VV%_LWXgL-9mwyl%W zJ7cF36;1r2Gd!`Rq<7(|GF_50a37B&dhdtH04EW}q_@?`fCmOkZf*B18l&mf7~W$Y zKdas*!qa8by*`#x4o+*nM+;-$;6Eh_E*WLB-dtOK%Gm)%5A$TiAn(M#xU(+KU~eST zzF&&UqU>f5v&sUMo#}CVvQQp)B=?ZoAXP7_V^~L1B}s=EF|%G0xFi`5;9c;Q=8TSd zFTV7r7ezCAFMh7Ni(YOqekAhv4;J+wt>7PG2%Q_wH74%xXaRo=)Vj2C1mPbDtLf%# zAC`)i@&M|f6akx*{6CE6g_GnI7s;W^7?`EdX1!SONQ4NK%b=8MNasaa!s02#J(p11 zwPcE@mYD7618n3Q&S1(z@#Gy(p;%(F{o8mezCCl5^OSSpLKtNj#c*O^&^&o|{m5s7X!Y?!_+LruET{6uYup)OHf3An<0 z-TXc~*5SdT;9aQ*vr3jM%%VZ4xq0zk0}yOzc1Pbc^Hi)9S-7`PjYh+SntGYDhJhOi zoqgkSfOmZD71pnowrjMS#(w29+9WMB4Xju!8>~#pvIC_}5EK=K9VB*md#t?7m&&W2 zQVXo2WCcGB*@MM2W{D+!MHK->i!!CE;vfq+8kLJum8CfkWjVNW>h)^YS=lCv3W#Q^ zfzN7bKez6Kzhnvp3-RfK83f?~M89}^DUOmSoGX&D%8=z6;E6BNu|sKTUIay(K#CaJ zEceuEz7_2=4u1D)Ex@r_#tGi~4ouP#PKJ`;k*`K{sv0kSfU2e*#z|~hvx0n<6cg&5 zw=*7E#eLZ?ZSkdZ!g9r_G#p3t2ivj)^<~rN%9+3xj}SRFO0hXjj7x{)2B9&zKE^JU zmaSYaK{i%%1JBdq0_NN+RTY)DpB*!%M2dHc6cC$?B@>53>N}AQc#8t^bJXH%-pfAX z6S!ILfS+Nq-34g!V&W|>v3OyDhG>toH2cs+lp@ONH(+god=UZe`iC7Ygo5J-8<#5! znHw8N=xb}eBChQ8SBVwmcIxyq(Fe7we!fnnGycr(r{lb4x{I^9kR@RE@Czy6eYfmS z5LWC54J%%2IsuS7bbMKamf+(88T0FI5o8e zG0LROLtcM6mm?^ov`6xm^Gd6hO`LH)BC*d>o!UGyoU7#c5`7T!6!L}+ntNz_2X8B_7&kHRjAgEI0hB#MR8I}&glzK=F%qC>b>*WYKyV`hYeYwu%*CHo z>}ekO$7-l>z1z?$hNft7_T5eJ@`hx@&3oz;SUv7Cct5fp#y5rm0?^e#yAQ2L2F*b#;vmv-p zm>U5BfQ+c3`l2{t-Q6v9?}RUyHHZ&o`M!Alt9GK+*sh2UAh!b6x}N6Hc#R1BqZ`t* zD)t0da=mmBtV;uevuF-)IGn_D?C|bd#t0!aA}w2F5l9F8$XzJdI_|yfQgRL%TBqkh zXMcnI2WEif8?FlOydLHIjTr#`rlANI=tq&TUk_}u`m(MMr6_glaz6vpc9H10! zm2gbhGeI4I^;R9p_cU&!iD`s8xl0kPzQSEs;nzr}rK`J47ZV%G_*|<{j;H|h!2b1M zF{C=uitcMiJm?gf_N@{PZ}b9BE+h!tLJuDdW#9_+l5pwkEpUi4H#3My>TbQ4QTR-; z34f4+h*ZKO6l>B==mH-`CR|OtNl>8^s1V@UFQ-YwMbHhd@cQF!^70w5Pxd9Aa_eh3 zB7~F>;Lp7VLy}{L{K?!cR9;D}cr}S^6Get9fJ98V;e}UF2%GfUOBYJNHrnVN-1OXjYfB_eV z1(pH%LGJTo_mYF9gp7-~$K&)sh)mqS5!rQR7j>$6_rm$w zA!@W`jv)lRawRy@AHV$?qMOLkCUhfBVx9qHH<}oz?HHHZm=N{VuBJ$K(#+@0a(Sg9 z)^E-TW?wqc{PBiXu^xxjy8$i=+?ICBh7rV@iRX6=2)_}wF&dA;9F0NEn>)Y01-T;A z8v5+m%Z8M3<=vw63h zf40YbuF^48`)W(0pIn@F7jP+pBqHefjX1-b-s7IKDQ*X3XYaO<5b6+{c>ZeoBV#*X zBF)5|NF1M>x|V{4K5kV6A>lK^m|v2f)h8DfR02I)R|nRp7C5jkY_#p<4sCCb|iacb6fym7Iq`8KEd! z!U|M4ZdH5L?uX7&)*433q#0O%PELOQPxm;bw=^-p+jk&>p| z;yrapq*U8^0^WX39cWAxL==HM<`Gcj%Ch&mG34)k6cW!=#kyLmo zu`mM9ULK}MH)5wqCMT7}_&&9yU!W6s2tr7V)X5*YCimV~i$Pmb#(&hk%!cdG0eANBNWs-~0MY3ohj-C|l)vS==K7-~xJz5JnFgs+@#K-3fcWX}_P4HrUkCd=$k~YP1MiefR)g5jO^_KC@-ZnadR+BBZVmJag z{UlkrRWSLSKj~Zzn(|+*AzSe1Tt#S~dtKTD&v#!%Z>ss*_t;5G!t%Ec&hHJW!Fn%? z6ZYs{xgRYm?1SEHFRY0(H#2>RKzqAz5F@IR$;Lsw3E$2dimY&EW*TeyuV!|2aW4_r z1}LwvNO0|+HDPdjkWc)GfP3Mib~m&SQTCIPKM#L5&@%hz7$}Z<+CojqMsP!bjwey) z9+~HSTTGojtupkV^BKtvfA*UMV4W;J7=mBiX*0rV0}a!E6fiPxsG9}(r&IVcM=4jD z&z_bn3)mufKrM+?kgS)8KWP*h(p$b`;w}#|FkcY;3Snr^F-o89)zl(0AH2}n>;+nM zb%F`_qZU7lWxl{EI5KhPTGS|a>ZQQx9x$nKX7rAo);J^1goCV7;*qF}KXlGDwG<;8 ze|`E8d+sBr&Z~zi_WO`^qm5-Vy6Z|W)G55ZCxWUGD>s4r&!TEk!Q-l1V42h3e?bt- zMQH?kd0X_ldKFcjGY z9k(?}1irsKc)%)toU#u@wSA6oAVM%7_RD^^VTP@}FIMBO#e6o4L^XU#w2RQ+S?6|i zUil99%KD~P`7N2D=(rCkxJ@`~YF4~4SKwd;y4<7rk_}iaHYHy7LvwVHcIRYqhv>}B zNBkHKY3bb>_W+!f-gp6e>EsX0-BG z_}m?S%Xc&4PK^xnNEb8p{3q(wKXUUJl<#uScSw^#0Ri#-ZVb1oUWY8NFdOL8 z8VMcMfz{P5nc4yP4vun|5XnR z1kFhj+#aXLr~xXw>-V2K!6 z_4wS z%jj@to%(O)oHmmlAgzI)J{6oIw`XwCC@?L$V<7$<3wN}Ur4Otfk2ObIk;8#;o&8^pw-9@P%Q21G{p8J#e~q3fFD#em`-c zwe1YP{IwL8P0q5dUtmaxGLOlzVu%-+B%ZD(AF4fNA0T9sldH7XnzN zK~s+Pz=9*rPgsijSjiC#kd2-O@{*DMWM4kxz=`P##7c4z`=2U zCfog53$r)$O!l`wHY775_O ziV`NhWTSqpk+HgVbauoNG05S7M8soTY!|Vhl`=ZKS+)Zt-U^kG9!MmH&W1X#z6>=ZhWzQ>ijbOo!8s*Ge4}C|Aq0gS}z=!JA*`nejvM zm1IYlHmxU+>1cZ{;}|Se(L5c}MrwO|;=&@*?^?$ZlskSlX+A~+foA1Kn+JPw)$sZp!y#9JP$zA*wy_AnDm_>g6MG8KXg z$zZ5vOHWL>i%4=_KQdLLwM`j>n`N)_K>`X{OH=PE7q^kcV_l|P%MH2 zU#MRRcSxVq0DI=o*k9u>6rW^=doLQJ6fY{Hzg`|$e5e7=8Af>%(;_s})3nS@PU3M> zXwH(1W=ZL!EIiH<(^M=NNv7-pM-sH^W+;xO)A!>vw0Uvlj$heh(4=aVO~h`=)8x)X z+oWVXnZ+i`FU{$3rS}U=I-Qn>sdl-;yv#i4Y2!BBSwv09*Z^WT+@WsA`XXz`XcYwl zg}3sgbs2E`KbAp;i-rH7@(=q%i9r;SQSg9$QlG`m59?dxvd-@;pirl zl@6amik9@)S4b%6CQ4lh@ye^0EJV1Iqce;zCQoaf?v0pScTzD?UBNNzuG$|dE08li zM!Eg%@TpH0GZfNoms{)w8>=#~LdUoiA8b~HH0lmBgUN-BMQW~_dVx|nk{^Ou5&WhFYSaid;c$ju%^+S~Yp zIrNz1c4)k9qM6Cg8aGOe6}X-_PXOE^d5*?)LAgQhL$X^6mX&a}%NOhpD|Gl#NgBCr(XKX|_Xu2LCaOo-jmUmH`E{`6qWlR(AO&`d z?gj0P)zUcvBM;Fyr$FPCWCR*J@<<^0pCe@)B0pc!)zF~u7H%pvAil>K=U!r{%S@*m zLh_9Il%eW;n!b7hQST`TM^v(BZ=$P#-}I{p&xEn;MN3t`PREjoh$6vE*p)c5V?eTl zpnb4lZd0P;%Ozoyg7lO!A{M-|$%d;1!&MlQpb-@!7k}w%O)piP@ZN`tV2d!HnBwRhp*2A;twclX1{=$u zwk28DfTpV%J1?)#z+~H5tJhfPX&a>d@seqG02<3ZFXdZm1Ay$#IoNE3`oQdIo6!7> z;P%fj8luevi?T6_!>&z;`YkjSqTO0Flkh180}wh-FEQBipK zH}W!;u6g0xRQUm@J7*XAK7H#65^42V;bwND%05GpDcs3W)yYpZZOyhMrzz^v-FTLe z8Y-Tu#y!#W7+{G@=#AI^P`FmphcB+|%>=kX9^5NQ51wMgOH)qmNaS@yGdr{@><)FB zZ3#ZU7#ChsK7NP)vEXJi8uPm;s~(i&P;NU=w;k=>@(-BZkaRnq{XTqm3=h^2kh6h; zM_bn4o{UCSTx>w5Px3uWOuKvrCK+TrITJTd*7zK|8B&2I(Z(*(n=@o{(=EE7D#=8T zM-EU`!kYP^st?-9B9`ztH(Y8Cm`rCdgyM*T4~)~3D| z{*O_&#+c(3C2rolCnF?-q@af-@*qWQ@ax_!1iB?69?3e-^-AA=qXm9igQalX%63Z{_~JYxHeU~Qf$cq8ti3-Tfogrk z@4qt*JSVaG%#}p3YQO0D+miyixTRn8I6-=0F%p8k*gwKOxXN!Z;en&J7FB7koV6*H z>FV$803cb~$fK>wH?22x&2}Nye|F0EVv^KQHy1T3UO>sXe$}IkW%9oMxnU5 z1Q-kzI;0^I)NL6s1yh$=Pj^uaMP5N*zdz+MfX1Tt0@NGBu@OZ5TVq8$yAaOI@GeUkKs}zGh=6S$hLLm0H@+ z^qCF3WiYSI3bzFaZJK(@3{P!AV|o2}<^XRXvy>?7H(*nJ>lbirIhN3+%j~3Qg+ROC zt)7jE>^#y~EYHr{;IE&a^O47RjnD;8DC}Usd`vAS(p#(a9^5Qt1NS9U7M91&0l5ES zKERd88nBsYrluD7y&0fTwYYDkc^+G@GIns(xV-G1>N1>Zm-)`nYjH5oHbQ6@5yxi8 zQ_hHJmmQ#l);dE|8vs*gfVko@Gdl7)51L@t|FkxQaxzuWUjhTMFm1KM+6`*s<@WFc z^?EZAEr^E=riB_{YOh)yb8Vt!$7sqzdwFc?O#;wy#u41(NjoMk=UMNEtYK@1tznit zko`WOVaG0i5%2^zrjumR_^Xc)tZnD*Ja1Ro@{ydm&TO4ZzDl}NYcd`(hg6JHD@+ig z2yKiyN+qK(u9RLJZ_K##N%JG>jiK%2ewqB>!b|~6EFVUH5w>=J579*!fxqqmrWe$U zDAt%4E!KWt28;LwIJ}HyW5U=#obx(33lk+BI3D;%<9u9Ea~cGw157_2nIU3}1uIzF z&L!t|Kv(-0er(rA3+eXQLcKJB<3I+FtI`#2yaY2C+AttACK*e`C*oq4ju>*@lQ|}0fQswz;p9@4c`*HKomK| z8xsneC37mJz2|pcD{$3sRp6@6DtCh&-ld=a<|_Ip7((X$TLC8e z`=IK7V}&A)P2|I6h%N9kK3R1o#EbjNM0a}rvQy0Eai`KdLaoYzCLxuDpv zfSl~5f60Z%HVl@>q*L?*=@UM0Wf;@=1^9cBoe_wL;#)p(&9%qD)#h{Z_vQ5pu^;(5 z%>nC(7`k1Fl-2r76~j4Ir|Gye3Wwf~i5F(m-UJ(g{lza|R$%{~7eCxUo}BkoG6fo? zIP}?z8D0-3BeEu7!Lo9grpDxNEK$>v2_<<1&P$+6;msK>oJf@T_A9I6+M3%y* zaq_6gGz5E&RxNqMqHx9|hY0t)E(Q!e2tr%KX+baxK6SZQIRJj>vX95B-FoP{VDdKJ z1uD)3s)inrKo4l32k3`FrDFE)vn7e7cODECU^2gyeFLm?k;-7FlGbt=&z=kKA#`Fc zQE65-i_!!{I8MppDG|)Fg_|rDPMULqz|1?M5F@=Kq!!i=FRoj+YHe$_ zuHCv;avyehlClhujedpb_j>o<_GQX*}UT0!w5H?e3#peyAJmPzw|VVX`cWLgcEc~c1PN$;>A#L&7#46ypPKO28= z;-GU~V&7~F$eq@sEdgSxcm1oAS zux3h(KCpyx7kM&~-zL=Ie!LPARk~Rf&Joj9=d4@{G1Ap-u+TN0E^IeoQg&d1k!_f_ z0|;Kloi4<7w$yY*$8+bi4Km1r5j5qPT(gIEy>M!c9F(_ZJvuCgv;0~fCw3hp1}HEX z&U&7-g^@(7r$t{}T>%t!N;&Pf*0JKql8}sZHb)@9J0E@N#E6k%MYTQ5wdIE`l+$Or z%iY7V$>GBqt2wA{WRazY6C_Op8zf|opVh^TYP?K1p-qPjiXmMvCla83tIV3Xa;1%J z!Zz4&T2-H&S+O19JgtTo@Ddk`*Df3yW@twZznAK)*Mf<+2P)V&hl{Ot=SD|pADt}> z>p6A`?PdcELt5!%0l^+W&lJSGx|FAR;sB-itUeq}16Tbp5K2%k}pS?ATE~?9` z>at(gMsg^aBuuDLY@Y|zP>LUh9)KR2 zKOxcAq&%vFqB!lb8NdasV(ZR=G3ymE$tKBr@KFV^TLwCN#tvF&KOC=>c^s+Q&oCXRAF3PGkV7rtf40X ztwzaZ4b~=S1vhsR-rjqtM4( zFcy{X)l9s-rlTimD&r>P#n)&w6~0Q=h?NmP2s=M^kqpW1QV#3pBK<5anz-?VgLhrC zDK78i?P3ns))S+)*$Sl%vlM}wB8+$%DeaPmlAjonV(ZJS<&`DH%ba&hH3hDvvf`y4 zW~4^R))w+bL|8HGLjr~=)l7?!*bx4u>w;KJExDO%l312y)M%|6o%NkiPoUTqm?zIQ zc{I+YbL!}!R~;)4c5kpSeWzRzUb8Yujusm>~W2je^J!ozT4+kzdx&HW-(%pY8|bgO><<@EzsS#V52( z8^;9+;R^$*)D;vltPsI>L+Ke?@huAy_JrtcAlM`2TcT^$sA;WjtcY@8j}oM(!-nFiZ#ZQ%q)v#|XL=N~+lM0>Dz2uI-n zFxbAqcBtNXsNTK{q690aH>pAaCI;VH;aC7d&W{OEbwrOuB{r& zr|1LNDa_xA+|WuS)~Sknj8qGM?n`JFs!KdipI#lD>7U|-om^{oNC>_BgA)p(6B#eU zDs=R&n*tlk{qB?9T2;aR!~(ims*X@&M7Vx&3csqw&R~6~V9_h6u$Iw^rJ_u2iX}wV zajDv!VM(|vhP^l4$vhR!+_SpN-FDd~?9pzaA8(sihcJWQcBxBMp7CVJ+h&9oV@DD) zmS4ZjDWI`^Ca)|enZefMRoL#ddad4-&iJb~JL98M&E=C?yFghUQu`eHTLX)Njq3aH z74i){MQUXfzPZNi>QGDW58)eU8EyN+NHrx-ep#+16pl_K@8}XFdg@OpoQEl6+@}JQ z$`nezn{kya>`2ih+~Ej>411-uHAm}ZxL{knY&ZI;>4EsL%jCrNLQX6Pa5rf&?x&r(Rp_5rpR z;W{qB`Ns&g99tep+@rY9B8A5v@};D@l`SW9g(aqWOHRm{m9hk3&hS@Q&WI|v>&(oh z@uU(a43^)-mpb!dcdB*$?%>ySptr25cb#hivc3e}Do@P!8EPw*FCKscl5^`9KHk;A zu|p$%XQ(_cE)b2#jOh&>oq$)3fhL-T;1!LrL$&G=3M`)Sbl}l9)N51f*Y1a<7oO66 zgXlTRiR|_0xdR|+7lJufzf77u<8cz(e6owJZhC?O*EwV>xe4k(Dou>FViQ}3x17{w zV+yEpB)g@wM?m4Zpl4RqQ4H1XhL!UE_a!NB0=YdnZTEYq&a6p4h}y=;u+0DorXFOg z9-rqmCKk^^8b&7;o2)+RN3ReRZJ*Bi0LWf3*lsPD+xkE6{WLSlWDY2Ek~&eIFu7vA zpwBe?So8+`@`lq$M+$?Mz10i&QVjiQHwQKQ(ao%^ZaRio&In1_*4 zk&0R_icEh&)8i%YSSSs5phlFO(?k^sp_r7K`-~QP6D+;SXhZs-0+7uu?INm{b3(3( zqK|Zio~e&DLAXTQBtmGZBm!3p2%)sg0!ba5(MSuduT%D2Y2@Iwv;RgsQGa`2a{4zK z)Dxh{8!k%9z~>73^ELk^ErktK4$4g`hdj<6ZZ=RAD214MVFqF49EQuLF|M^8ab{UPdQjj9C>?d*XyJaS*}bD z;uJ)o@Zgep#oNMZ@Q||IT*sUJCne^%?n+_0gZ2y7stt7oZa=YUpfaf(OGPlDaT*Ih z&(xGR%#qXjA^o7)V#G4;HOwWlrHSK00^Iwgv2mxVIwp+-i(39PeKg)gWt}w3)6++FG#^u9Fa3_FIV?fkmYQ6cD5# z7@TbkJgCYR@m|B~q>Jvl?3h)aHJKlx%F#fXA7aho!T;cks=L>F4FaR+yiYS3}s3d~Z0wRC~$!(9nFxJe*d?4q8mi`(<`Jhda>Ctf9-| zU-hP7Gy^`rK6a!+uDY`QyLxgUpYZ9@DqrZ@ZM4uoQbFp_0V~A-DaD8@W=!hdm`yj@ zl@*wd6)DS-+D$10*&}wsM$14KN*tOy3?rqVTW?T+ImxqKtwgL$)+Nk`mKjd&!BaMJ zqiCD8Hg%R}%~Rz5hcYav*&2V^<4+6f>|70fE z$t~+b$_c>;cWw<)Wlk&68#AWFGf*v2D|NS)HLihy%F(&coIY=G6ZzRthV}{4PSMIy zx5A@nY6HT2)*xT@#0e}D7(sPVT$QreDp>)JT!C(~0-og*d3oMOo63i^rcpel@TGJZ z`Il_UAoBG^Z>>5a4wWL0~=Os^tDtmzXZzy#L+jfMHa*9xxk4e+P@m96i83wkz z;fK=dLNXm(#|a3m3g*bV!#*%780a5Mh?_aiCw*YVyjL`OSjO{@%xA)x%SOvXXR>DFs3G`f(bjv|1rhwgYOd zoIj~f=oi{AvCzq*&B)kvWSkv%e&1pYEH9}>2S^z4Djqf*U!~x$+qXJJI-W^8DKqIh zW3V;mq3r8M%mThDew(5XOZ1uc9p!xFeN${xye@rcNq(;T$GGDF-+An8ke3|uIBH_M z>*#&~&^(Lcv9wo zMfts<@XJ6RWP!L@9M)_r%YrJS?Ug4rt#@d8MUml~+MFspPIF2Rx~)A_**oO$ZTLaznH-F1HPd7P ze}<$voGUs)X#Uht7Hu(xT>qroiSX)-dbr-v=@1wI*$JvTOVVk$5e?>#A@xP)m!a&3#}jRmtJ;`hTmns&<}9E3fh{f&k{#Pfo@SsF?fk5l01lS(>e z%9M*!ro$_EeI%_ZEVBT`E2?qHcAJMfFHTyP{b-H@KSk%3eV%7=@$Z$=FFrXX;-2ct zHLu3N^h)RzV_0xMMLT-mQo-^iIeLf9=*^R^(=1t#-m8}~ChD_G*ZXL5A!l@E;7Bk>1eDfAW%Zr{8I1{wt17dtm$KhheAPC85lD=~ zht`hq=Fi6@=~%rz@c#NYKvAWhy_o)Y$A2E||GnY#FU%3;5Ba5^n0|H_t9HqCwmy;i z{*3`jKgrXPK#)s^M>9~Fq6F|-aKt-TlVgX61*F7?#fkvm>YIn&B}|AB5nbdrNhiB? zCq26#f1jQp{6%>ueh%26JA33CiHr=}g1SE6;*6xz7oOp>)1-1;qceYFcYpREG)ZbtI=D+E@%`mm0JRoO zG3Xi!uok&Y{zJ*bi|7J^CC$o*-wIlpOe+=EYS!iG^rUp%x@K47TI84W+k~zUZ(UAR zVy9?LWM)c?JM@SRXW#Jm-upjM6&Jlx}bQ}N#NaLWon#|+Yi)Jv^<)g42*beOgqd;>S&-dE1Lk# zj5&gdIT|60JehLk?<;nIJ0U-eX%{am-n)e=5Rm2QgB4-XMuA}Z2S({c!9?glYLKq+ zO%$_%Cm|0$^3vn+chV#9`Q`R7*5WrsM4gGO#h5#?0|M@OLXYxQB5Azk9Zrg`jC?LX zhZrTH8aW|1{InwcQLLB&FTwALr@GfGNR>8eRHWJxd<0VZ-%)UTz%X)2-@W#C7$6{)|Ly#Af>B29asixq*X4j$3))=7mh4@$pGOl@ydw4nOH3)Zl(h-@ z+Pu4>NQ#N=voOq%F`M!rV&z^r(!`^l!P-ZguPD~l*+x&fIf!FL-VeM-U0~?-!w`1N z3-AOV=i%&a*K(jT9p>#^M%7BrbC9MghB!0xw>oR`2~*}M9WV$CwruX>EKS$f)tbx1 z%Xi+H_lncIabLu?q%D7NQs`(}GSYuj+N?mahb36jLKW0Eio?4P2-Vo%@1v>deT#8Ou=>Rjoc`w!Cx3oI0tA3m4mgvXJBgPdNxtaBQtBCMHeRgwG2hrf z*Z4?4!NccO9ag4cZQf7#?&`1UqHgNz+;JptA8yVF*97j6XJt`+6kDwsT%`v2SuK8` ztK!U}YPEIo`!Qm*^h@E`TZG1@+_!Ac(v8^;@s8~T^-1I}gNOw%@`}=&W#+4ZK_<+^eJ+Oi>J7RNNBHZwJs{nBcsby3q>OEv@5 zg5@&v2r8VGuG>(n)Lq)Zq4;QWK{P;PlwKlWH`{VzL(5f^8|Fm)A-&w zKHAfC39TKQ`f5pbjkhT=H%;_RVk#c1)Ksg4djZoKB|MhVX>FNBxYoPWw!<+U)x#Zag2HF8KSfE)z1o``Hab*mV-LeszHHIJ^P1WM<*(m6CjvhR@f^ra`n;So;m*?zuF`Ro$TTB+rPVrs3h);Vic9~Dk1tP*G zK%Z?_dlOG=%DYqD`NR3T(mMd=U8A1P%1&3S8c=*w-~EV=+oQo6gSDfJQr<(x1ef0n z=n+E?de;(wd%+fcbu;2`V!QLHu$KD<2!k&FNWE^GH0Wc27p}c4e znYY&(B7b|0#WJv(yj;V}XB;rj6uEQxgj9#(?#&pe85|()VFeg>=~6qiioXAlvtg+G zl@6x#zQ+zb|0~cB@-0z(e(WP(01n9x{sX`4FI9Hw3oze|BEL{yPemGH#VL%4k}(6~ zhcX?fa03EUWD#oELIkQ8V`x*dDiN)e`M(4C z!-Tp_UY!za3Kz&I-l>@TH3(9?Ww_Ru5v89(zY$2!C^{oBTU;`b8^&lem5dM5*cZ@; z*p-lNQ3XUHY8AI?mACAacC?|nj@GI#2nIOmsn|){Wk5*IuZ7?0(02wdq|(*#y7|x* zs1Zy2YmS>@q7dsC21aEQP3Pi`!xH#F7zWHq_FHgorItNiS=mdZQ__iUH^BQv@E8js zVO-tO^2#@$tPWaq{axTe`!%s6?#=(hT zm+&tk_t##PS+2?Oh+!-H19S-mT8m_GB;mg_Q?r57dd#i)FhNx9*~T^5z$g`?zz!cE zl-E!?!-H?PJE!tH4Rrm`+NY_?v*Wn&ZLragOmhx_D5`ft)-(rl>MbVnuOE=6b9VKXF|1GR|C_s4Cu^%<_C^KmW8n zZ+KJu`ulWY4-|Xw!1%{dYfuK0UncApb~eni195iP){OlDFXSG!& z*WiBK^9&0Z^BXZnuS(2ZvKo3jQyDF)#1enWN%CN~QMu6#CFpUY)by;(nHYabHFXDB zu9hMjnX52e`gZ+>iDyIk`R1t_CCYDkRp@hDV%OD9an{X4v;`>UoFje?wYI;x*=-M@ z2i2WgZy#3Bm06b-T8YNdYtiIa!Pe}yQ@n!nd1!afC5_Qy$ib_gTm4q!6-?qV8?>U| zd`e(yH@X*v`ce6g)oZueA}ERNryO; zId{aFIdq;zFsUGjbu2#F5hgUa5Eg|D%UbAzebv;)ARgKVXDg&5h92uCUwyO-Gx;_* z`uQ#(7{PRVKo^SufIpOrJ&|z7bUpC`_Ds5+mJmpEh{|t@`T9#aido^LKpZ1Q^-ba> znyBHua2Bx9&C$OKEkjRG%ATvQ?^!SV1*3?-{F2&ro z5&{^eDC0}HS1VfQU1TMcPIe#n3D5p!w%iA7#t2HaboVi&(_2mKYIV9J3fntCoBCzk z^P<1l|1A7Mh8BB7@y#pCL3l?Z&v;8%1ue!3$|KEZyF;YCgY2b20DNHu+|+DASlX82 z9Q4ToK{fA-x-7}H+B!h;rN&`;owfE)E!h(I7NL1WeCp2={`^3<;28+$oCKFG0v$qk zD6_o;f|(#EaL<93&BKukEcPSKF_23VTNg(bAe!%s$VWif<_t`IZ6A{zc10O5#4T_Z z+xZLxb&DyF9Op@jLqdKmfk3x$LW%K1sLjcPL7k*MVz&m2*+^sbEe&8SDoz&6-6ubG z2kbA|0KWVcLLjnUT@fD4Fgi9&{cOfqgR7SE`I+GQfnslf(C+|U(DmA`PF^5Ea;~r% z-h{OM>_kn}Mi;qn&^#J{l=Db;;m%j@EE__8?TG-3{3-MFAZP7`AA(D z|HhtH{B&a2IWc_KOIezpuhVit=~&i$^S!j{wK~$$<)!+)cHU6o}~Y=1vNGKpI`kaVkb%W zAIPuJy$wH`F8bD0;upaht>&8X!Zu8MYzY}F!JqRm45d>Vr5m%W8@f;Cmc&F0A`G(M zG{q+AAOmY56#L`pY_1#Ua#iz!*1~nGH;pti#S{#9C^!{5=K;3M(I!+htS!$pB3hJg(gdFQB zbk9(io>Jb8lYN>sR-O2)iw=RXr|)vi5-0e3g_2MkFFsgc;c6*u5qpGGOv7H@M59OT zCgwdKlARMBswMsk&XRt(sDIK0J&NIAi1wCWv92I?WCqR{?HD!=hQU_)fNtm{fds{U zx2!%jwD$Xcq#>m*n)psrLYQgc?ED)7o{2?nk)d}cLYNTC@|gZ!-|noxR_BCy~a(08(@bLHcXta+T5> zWX8=*iLi+PSut_UT6{@E5G;H6TQ;Y5`WQU=El8S0nG|oK;;JTL#>hsDiQ5ln$v>Vt zc&6^-%#9hx=JjzDl8q4$rXbC6pkhD8Almr_vp=-|dIhfc$C&8+%%~RiCcoZij^6v% zfCCd_HBkQUbx3|ga#{bM1MWX!YeZFB`5TV=nOYiLDi|LX&hgudt^iP|BKU(gpHNQ> zb!ICF_x^0lJ&$+WJ)?_0BtvJDRS(l^x8CV332tl` z2}>q28Lf)8(k(e3wozNi{Ip`$pD2?plKyN}F9B9D`4q^ z^%VH{&a*hVkQb4GVXXW+Qofjl>0zsTl67z9^Le2nA(nXrsONd9R4peVoJ~tJ7eDR^ zVIW@7;BNo*eD>zBPRLJi$-)3Rtt$K1Sm5yb+5Kt++##;)=N{vBfBL{~iB=KsoyQA7 zd&EmN6Jp@vB-ATVVicvaN4%aKKUfPVl3K6fZ+zoO-NM^t-PUul3Ezrnb>^Wg&JEJTG}xP|Ko0IB55!2ACLbdNpq5||6vmtx;J6Fv{PJB)2e5! zTs2eT<}LW6C=k3PKx0IrU_WibOrq7sZR)!8w>ee$512Q_Tk!~UGhrod(o%i5lj+=t z?wjn49R0pNUx@k$TRiT2>{Qp`_`E^XYYPp@q4|ViT`o`gp~_GY;l*=@EmMs;Z@;bI zVcX}BP1ar8mkw=5kb5dnW;0t62bHiiuoV=o)?ss)2O$Ue!iDE8V8XR_T}Ewdw$NXE z_S~#2+RwDtmyUa>cLvNzZkPUs#?j@A$TbD*6L#q2EYv+ETg263Q`?lN6uK6*G=ctY zBRqys?-I?J_&Lu8o;$S^Q8>vq-gYH^{c-JQ4sEu*miUk~Dss()Z`1X>_=0DkmK!6^ zyxHg_paGummvu#M?v8d=2%S+%`U!)} zNIwlSMj|X`wzZNU+`RdGhLE3MI-BFtWw_?uk-0yYGJvtPLAd}%zg4(9AsO{&5D#4} z_ARqdNUSg-)O?E`9wTBeAm%3fpcO`My5CeU{+?k=dWCN?2Wdqs zggL-#0i8lP?EzWIzSi?=qS$+C*<{r;$RD2T{U6KA1phkC==P4F{KM_?$NyCX|M$@S zXDI&{c)dhr%kEobiEpcz$qlv`>RG!0Y`n%XFg{)aOhQqSCgfh>^nFxwM0?|6(oMlH z3L**GhKC&o){bgEHCTr_|Kq% zWDn-rXUxt+#!Rli>0P-FAQkP0j%-h&zuWQCCO2T*S$vCEfJNa;5+|HP{YdmJ9^wl) zq00gR2O8830tn>9UQ-nOnw5w1N1sG-#j9zfh2#@J;NJ37gLj^|FpZB+rnS!ztwiX9 zITV9GlM1>S&?hKiLGF#h=kz4dmA=S)>X9!N#ma-=MR2DS;6}y&^11uen$2cSBse5h zqecG24E;*Vnv85Bh1LE=A#@EK;gibl6M`1h(&RAWDAd~!fPMx6;B^|IsVGx?@2S$o zlP}swk%!EA z1|v)%j|YomN9sZB_9HJBNWhNOSuV#++2U0B!R4ts_pBR0&-dfPB}vL4B{CPT2hsX5 ztA1RI{0!=B)2L;WU_dLc{twYpgJlV&Rv4#zRn8cb@`ioFU1?aBT72u;5C$!= z(u9Wi-Ad$-(!}3?E0BB(4wc5=;dT7~C%pcL(2_l}BH|}}8{L$ZWy7whO@kASbb}UM zOa!tFlLi5!uK+DY{Y>N5C4Fe0Y2m_~fzSN_foXQra;sF5n=6W&d1!|Hx6YEY-#Yzowk}~zp#e9h;%F*25SlG%q>9 z+y1m$OMQ%ECW$ip3tDfn12ncDz%;E2m`^TLT8YUAG2eqTH_=e~&Z5PB5AN%BM_EdI zs(2WaujUKjVlAWPnsd7qHO=ig?i~F2`DlX#b({I@%M%p}m$c{LP z-$DD$=9NYY+9<-fFTXFS<|S{qjn@zvWVs@z^Tf4M?pViCZ8c)eH-pv&3AlZKu*FDd ztmPAz2o4Fu;dm79H-yUa8m_-R`?1jisP5Rde`78FYQqLOJVR;CbCPUp|_}fLaMz5kH7bu z=k7w?18Ye@AStOe_|wFzPa@YQq=`5niAMKsNS%SMn~NYIMeXApcV!rLsrOA);U1r5DHsHIwcEAtvQm6XO75jLsw28u)m6*!Q7j*OFPm3>g4%fm2pJ%pM1E_ z)YMYhAUeY6FJiGP0=N;7`->mS;$~Jow@3+H$_<&ojPyQToX6v3$5EC2&i^Rf~;+`zG(3n?PsUOeJ zb6A7zk~2tDvi0&CI}qQ#hORo1uygI;bqKNF(7l$hUi02sb5H)3LW<*k0Rv6ELcNV+_TgV9glb3jeFZ6e;J0Yf%ta#+AqrLtM!AtLd z@ZQHhuE?1Xr+qP}nMwV@}%kFYl*H`a7=gi!B?>uMjcmL#*nLi`; zjvZKQg$#{~-0aV5{Aaup1oWgxP7q9Wamd+fBoVo6=yy)i`sE8$o4VD9WG5ZafWz^a zpF{%tsgtGv&ptoOOGapIn72HvpPz6-KlzY7D2QF);)qB_EI2vEdK@gHzO>7M;TWXs zya2s)JDiapztoh~Lc}VpBsU*{C0utR>M!sx(?z%a1G<6mv`Xl4GzTgJG{!77S?b|8 zTKT1cvF`m?zTWkEUlqm)cqy|eN(7V8OEeoQU)|(PLsdWKv0D~>Bmm8>lJuVib4(ka zUuQ?Qu?w@xsv!}9*e#sGsYt48%Om5iUgv?!b{}AFYE)&qMg;x5($f{6nMn!q1Ywz zIo>Z!m+}htUmy8#SfIW7uUazXtF`>!(L4X=1O1Owbz9xWZc!c8*EVxl&S*n1pLNrn z21uD=NeY{?xdbO!7>2An8DoHBRkNmQxB?d=MWAVyu~;l6wAm+<{EFeSPnPFo!{*1= zX0Ey3c!ZN3C#T$#Zv9Qa&-cxnZ`AHdgN(jL4rwDjtPx;4+X^I6Vqgg1OsS!PVL_P3 z4`g7I7Uk~46UJZ)s7qkVI<=36^LJZuz!3&&X?Orf3~cc(n-(xj8ag*Uj#gNwp@png z<5Qf_hhD=k#TCP~*xoJe`$sVKheVUqbUZ5n4CZ0%d^4!HI(rO4=niAHi_fx?jd{)` zYSXeca)99LTC6u{0Lvw*A{uW|kms|Y0X?zt9~jmKiXOPG90nL!acUrv6KlZJPq05! zqCrn87rl&O+(<1iD0LNeFg4FhtNUo14dAw*%?^<&ddDM(u16(I)sc9=| zOEs4?x56E>K%_0Z4Kvlda_W*Ju1|in{E0lr^J9hQ-Fqs`p;p0Se~)~k;nP@i;)u4? zsJIi5@fk&RCDYK^thL~)f5p$^4$m_2LM!5|Iv|y=;v704rmx_PgX#AAn#IXz@7l8F zwpat2wnp$4H|{S}ht#X6N8^j0<$mQqppm;*9PnhT+(zbN&vSt`Ze!-v+Gv-S`ImZ^ zex88$a29R@1q1-^R->fX(6MNo+yxqfp}LU@kpEXNSz}3>o|)j!%@Q z>9M5D`cCA#jzB&hc2MA4_y!(;5&%pt+@GOu&xcxFfC-F$4%WvFTMDjUO#Et)c8S_L z7g=)A;ZENW^-_i(>oDGMw0V^#Nd~|mt&4P2q)q9y`!K2~2Om*pB8wX7!nP&Mz#Ep6 zh4^DWo|zD-?k#b31-I+Qcb*k5vfDz82Tj}=vPq;C9?t);80zbwV4_M$h-j$g;SfkqJsuuO zuq99*jJs*?*Pb(eD_*wuYhNJ&Xs#Ih;A)u#S z0sL9!fy4s;c3T7 z@VIjlFx#sT{tc`!nfra>zOMMS1vW3?CkL~)y63FaLpMfW$_K-XQqZo{!@KZrAO`7& zL=3-VKEFf~2APLQYacaFo|0#C2K|S924CFpRny-Z1b==G?ZVuF?n%*Ab@V%h47hr)v@GN!XLty)4nC47yPwk$fJf?%KK*2%v zqvozXR~H$kOxaVKNTAkEw#}8QTpdg!8L!6YOV%x?(L_u~$LW0^I^g(X@L~wA!M?l8 z(3Bf1#*IPs7BJ-Vrn_CNX?=|;DPas}v+qlfQ623H7&;3muANKh>HNXkaU5adCeki| zdp3{rR(NMGqQJR>wwMH$@FnWAGne^;2_R#lX&T;e%jkIb>(uao&w%&DgBve)l=8Fi z2X7|7?0_DNIhuJIX{R$1753^T#M{d0_~D?HQD-?35)?o^BuNQNCn?uiA(^|H00sD- zK8EzsUfYi74_QPnKRxwSO-2*IWQt`dIJStAo#=a8ZmadSGK==6*9@7Co{@tml;5^V zzWs|tp^c29sL_?Pkkx3FE_6sNR7lY?a1k|x81rLDS7)kN5>;daE5u`iG*RqfjL9u~ ztR9s9ffhbGC)g$3$|`yE*f^L?o&RGERM{`rp?t7~mMyWRg>Lp53M_P~9)^b83lH`9 z6EP!CPAaNEN(yeH&Xo6|GtopN*@@T$xsMFaitz>k3F7$tCx&gz`S-KCE47j$%kS%Gc zfxa_)ny!ae@M)Zf!!(+r?m8qD*#Y5Y6WJ!TSir?@>6|-QZd1U|E=y zsZ@%&7^3c*Qa$Kpw}h`RNpgY?2AKFx%4dZ7c{hhN*6TdD6bi{kp@DU*&r+{$I8k0Q z0%Em#o~ZQjtM?CUQXnyxA187_pW3vIwW=mByMlgtQjeAKA>%qo$mOvy>AOQ{l9h21 zt5Zpa#1(zyaP`7;A2U;Y^yy?_BGF`H#tIA_T-pIM9AMuaE^n}m4L8V`t$MftI!WS! zANwNF86P{!TK{-n72kGFwfA27Fsao4wTquWJAl=C6z z9x&X=o~|62YDVZoMquT0upHWoBs9lJn1r zgw_RnY@^Ifp+QT65(~@kY84e3mgpF&Om+$TATkk_q$tg<<#Jd$GQ@eQmC%k;8kbL;%A;JE#4!CXD#cZtR8vYV)f82TbSIgq+}aoG6sl0`DvTLSew~+C?A6~#)1FP2V+O@A zW9$XqeV@e&!up1LPJ^F7=RqeUWrxb}y8p*lFLvd)dS8I>S9(V2D((7*OQjL()va*e9 zpS5&?J(9}R=)#k6xlUW}xPUtvUbgVZy!1-Mg(zE$LTPGCC?Zp z8dE%%?sMQGbF4$Yebp|d`!1`gO1ic{n_VmC^RU>{Yl6Lf5l2l+1siiE9zn^rXq8Q_ z$ihKcL$Bms3sIY_01*!wf$H%6={%$HZ1Q!s(XQ$>ZzUeH$VMxt^WYo1hZ_!ca=OU{ z%hEZzh|DPGxACZ$K3_ z(F`*5hm-fAUh)RJA@Z?Wm6#6{IJ*|Q#(JYqsImB>KwkYBEh6@-JMao+=8t%{)5^ThF3XbIKk-Y{SiW_HUUxZksWm=Fw*OA&MVYtlq4+?mXOCf zjN{oy9a7Yc84oH`uh1F$b|dPtSMAgNotam((vY~22xdj7PK*RZxUoT$fB{alusztB zV4{$u0BkEnrkO^@CeloZwg&ZjkVWosySEzlJ8xsdgdo8HIRhJdV3y#1eiA6!X(2%= zDl;U_>1|tIvF?RE_REVwAIRnqe4*SV)W(c){T4PC~HrT&7H^3vxQ~DoL@Yv?cJc`n-MafjPv=(4OKk%^NZN7Dd_cAzo=~ zd@^)|vyymrci1&vZaREZb_8xC7kTy%6DMjh8Sb^@b?XUhc2oAO$2EfnEd_Ru-||T8 z?+mv~p}{&KVco(mTHUhCHDPJyY}Ugj*32(>2Dxpoa{HO%c98??roV!lvdm>X&PkOs zBWBkymC-}*kuV*BIfF!JNd0FQa0E`iDcXX3EjUQt{5E0pO^n@t(HcR|lq=S7_G2^n z^U@lE4b6xBV;|RZxTCAZoS*N)dD46)pQH;G-`t^%&|)nhQ$C{0CcTI;ctZ#@jRjo9 z+s@dC&a3+621G?m1n^UjN6foTm8ljyMBCXYZnMwyo=tmeXgauPW3Af0*4$|ThGei9#tkkZ(E?^0LDYsCSM#BF+5ML@MQse0 zL+4SSHLwx&;y*!sGx(UcUS03EOu)>B%8xo`;c?e2^rQoV+&@95d451b?D9}o?FfEt zT@Wbo>M^f5LiMDHP>!?=Q_4SY_A8PNMm=vbQ z)iV)ySg(1UulzF6vq-BC!!LvS{qAriU){tJal|=~7@R+b?tEGM4!ePu_a|auEEwzR zG>tj0*xrEN57~?=@wFn|M!#FnBM;jK+Q!9!k>n>+BcBlDZfOfQ$7?oXzilF$lqEXiz7zo6t@(!@4QA-kk^o~8FLu3z*$ zHwRv+sZEczwR+|kAD&O_EK*pW%ChLP|FI)$3YOzKU9%w*TKxpGMkxeFRRR`nJc%x9o+EiPekO})Vz zygA+Q$D^UTC4hD$n3U3}F{mN!O7HWvj|+r6=8J!GilP;2S^Q8!v5%%6gsKFsQp4(8 z68%*^%>H0O|s-H;ru*t#5NmfOm@!$@iVqhi<58^IQ(kHXSWM7*BQep)v663^#x2!_=Zh zXhRm}mNz35%CUj69B>f$ArWw*TTEEzqam2ap?4qIJ&gNDh5*ad?C1XmVE#)>YnieC z_rX_r<^ClMB>4ZM%=+Rhe2oQ3xTzZ}YeX;-8p9i2=8LDyizMs=9c%_yFmL8-=bIq!MzN zbX{fKf3n=aXkphec3M%}k8bU)>|V_H?f@C$)-*EOjira(wlbLpw`u13AuCCsJ-I0k zDzq>AGI3)$`A~?u88sz<{jBZi!(ba&Ixn)HZSVG*;S=35D~q_7kGI?Vp!IbicpnJI zbS9#nmB^kjI?f51+^=Z2J^1XCm=shPqM=;kw2W;3(lQ8a(g6N{ zZB~($_wS#6*{%7@e*g1K^*_6>|GUj{6b7V+zSe#$j#_7_(}nxZ1?hLXvjGYdx`7+v)b}?geTekTfi}ue2A(Cd_`SurSd+{ke(T8s1ut*7IOVp{5O8P3wqEl}*jcMq~{JeJfP* zS7ozQ*>X;h2k<*TMDQz*6Zl=gzkD+#YsSDLlaA~!>qh!FZ8lv*f#rTeREB5Cj(I4>o96{I{T?cwc znhU)!Ty{ZtPz9WeblXBG%y|Y_eSKah>c`v>l&d(&C~8KpgAJOOIudvgIZ= zeJImXu}rNwDst0`L3@y)VI?#7zy=tS6-d7dr;59!#?`WCQc1~)zNskFw{;fPxkaVQ zi5A0RU`WwaqDo{*l2=tSZektDf>n$!MyEMFd4tLn zZHJww=e1avIaO}Z9FGBwjVV8)eVB}P$ojJPcpKiT+ohd&D&F1^B_eX(53 zT3=n>D(u)YRNt7R*=jMSbK@QL8Hv~quQkUglxy@)cBt86;zZC`ywlZ23N!h3pbgNF z^0SQR)@VAE8lQ@7T!M)@&tQ*l<+Uc*SPeR@?{w(+W$BsQO*FixO#AkqWDaH;qRnhu z{6{VVc4@NaVxI;&o9o)ktX>nYu(h?g(3#?lP-?eQgv%Uw?q%e?=;i?vVKG>mO`Ad^ zZSIp+&hLxl%Uwm@Azs^&LD4gtdZGK^t zXD_>m>gM0E12F=)VM{a z(|VDOb4cuJEyD9a!&`+|%kJc#uDWiDm_dCfHX>7JqNOGtW@S?!Gm}C|@U7~UnjnkF z47a64IxCPLan#=$h(hZ=9Y6*;B&Ih=36FA5DVU65h-wdBJdwNd20(N@`#G$b+QD9U~c8Bah!(C^X=cOfl zcMmQ!eDaxT=t>QxiOW<@cjD;LO_u78afg6$k z@J;smnsCS=B&^ocS&kC#mA0~k!%dLg9{O4=E3SS9y{6ZRG^91juKcs_H?kH{mgs|5ZQ@Mv5fPPKVb;dg zg++lt9hS>ND9XZ9VDo??AZb8J*_=|M^*d*~i+6`KA7cIQGE ze}oub>HyvpZ8lBmD^s+qW3($C>J1v{jhiA@1uY&?m&iob9Vx!Jei1eUV*uw@?!R=2 z6?TH=9<{t-w5(ALJyO}H375SnRH!-=mNx;~y%vkL7SOR46S5`&DiEwMx#mym12pds zuVkTfZpZsjfRPzNl${TQaA#Bzd$M^dA>1WXRiAo;fByx5_T}PAuOl zQ(+R+rlqDMPjOjU(UAoPzuZ>cyetRa1+7-p1B*kxHeRd@q>H7d+*g(ZQ$R{7E2|C^ zg5@gj7XZSkSDjOgcqiWPbq1kc^Tb2jGOjSdd9(r42SH1>r&HG}VUbWDswfXYF1u=G z5-=I!$mV4!71-y|?@L1|2Zbydh*;KBc~Q7#NKRIaoV;)?fKBmQ;hBRRPA8IFa4^Ag zbye8K72ysD&G4+IG_5Mh_Y2M3R8FjiX}u6WWjW@SF&>rJH5XaIHjU14F8QFrZEr=H zdD&yzq|G&-APb|8Wa8+IG6K@kOE)lo#vZ1w&T4Jgiqoa|5pMoLdVE3Qn#zMZWZ+JM z2N_xLO%B`+b8%Jj7In+}Cg$Y(hc{I$I#=mDLMblI^I%x7cEV@pvAyp^y!|tU@l$;3 zQ-14H2=g=B?Am#xYVc!)`1yIGy=@2b*$?SwU&Om#_ zpLNz2ghg;3SwT@!%wcQf;i#odY?c8KOzYvOm-32(3`(#zn2|4;sKEqcCR*bJ5xT1I zQkVajCL%!zf!?%SxDqpLTyqQ_ujMW1`Z`-IyH~i89GOGMys%aCfg_1y@`#nRy@$d; z8sERRSL$sWYvMkJ z_$U$4>R#V@c>)&*j(y^83<^`$^I~6~UP40*>2Ksxy|4m5L#&;B{>hbrTfB$w&4y>o zJ2PGxeyhEdHwe2BVNb%@;3~6Pcm#_kAX1wNQRJtzJ<|=F&6%8lFt9oWUj$aI&m7s-|eDWRCteL6S?$&5}LIA-9r~8 zRQw_JY>boKvc`u29}FS*=_Q6BgJM%_I`&}L2Yl$~rxs>pt)y{g#hbZs=w0l6v*bcJ zr<(xh>bBPzc?%Lm?Wb0Wkjn&Kl6Wnz##2z$P$vjYQ62Z9t$|TVk4(l(sfG{|S?G|G zUyh-tJ922yeB&M>mp1hV_(nZ0R*Nu0m~A$1)7T%HH5qjLqNe14P0X^U1&aJ=s5P<7 z;Uz(D>D0EhdV2~K+h0K-1wC@cPhkI< zz5i7=K2I(E=>78UUt+TV58nMJSSm$%QXYp1m5&?*MOd4XjP6&)LM3!fFwrEu5lVn` zN^2BhC4Ei=n=z|#kq4@8RTVV4_cwx&_NQXY5fRF$yOz3HuGQzMo_1cIFC1wboVz=F z*opi8VM&|@zdp;9HC0WdO{li{mH)s+MuJKD4Gmpi%9*0syHUELS%aN$0&ckM`UB?~ zTEhfZ|J>H1wpowpQAe?Cn_Xjg6baI3Vwp4yJ`A6<$*2S~ox;nE6lEN=>8}&&k6Ii> zrB{wk6jM|T*gS3yXW^t$@drMZcn|rxd_PKGs9Z?AyAMtlz>{f!&B!bb=+OBGHSA&m zrPFzZOBLbJrJ$HLs;PK--lt(N0)nX~C<=SGi&ctEMZmlMlO2+S&Tj+WX9go%GW#ma@!ytdhDMkaL$46J=>cZwB~AK&<)nb@gHlEX$3b~2irW^`R`Z+!m%+xy#~ z*wG^Hq!=9ZL?EC?LzqtwcSgVkI;@;|6M0)h>?@ARoJA+!>d}ozo(Gf(Gb#Eqqt`SP z(HnEjkL*|thb8KMTOG7K17o5fn?qywiRr*d{Z4xIh+exdC z4=KrVR(>Q6LwO6#9k8BOQXb5`MKyMPhgvZPONfvYz7iPrtcljP=FF|yeo2q7EAgL) z1>bV&xUzIfeepqiTHp=t5x+P+9?i|q`4i%3biH&AE_GUt!cn50r|?6-^tMZuW3>hZ zxUbEpvIROTH<}o2eQmHX2D$p|7~W>8wEN@CJ-Fip4u#q;AK)%rkTBgU*qtn@Sc{(0$HbI<`s(+WcQQ84W?RgfS6?2`ol@G?;P&*H-zKr*lxdp)lck@ z8_ARAVLuv}&3zR@0VM6wiQ?ooZ~A(%4Aq21(-Xe`8VE?Vk_>&m+z$7D<#xF*DyX5k z>HiVry3$>0t+myEM)cGdtreNCbU7hFIY1rBahM!+H2{I8RE>cWbS70CnV3;?1*+lUPE8q0`^5W z3~4-r&`1rx>LnG#?f0I>#`HL%U5!}u{UwS)gM|7ek4m&f-S&V-B23dI4_(G_{l|{~yv2s{$t>(ict>Emw zpq`O^+?sVvUa{O2BVj=xGEtSL$d-(kNLoC@he2&xUgwM2{ zGc$af?|?}he`I(yYqEiO1;K&XNIf(!Sm~#!Tx(6RD?w9_y_yR8G)+-6S#Bl$5kR;hXBEA5bn;lQ%J z^=5+S(Aix4+1P#I;u)SuBR|`)yD&&@q)+IeKz8S&B6b~SD%G@93XwJ>WXaE8RI;TO z()0IMx%OS%=13~#OALQF$B4x|xMOdy4<=xpJMv6XgFGtzGJxY`V1MDYgeDKxXl*?N zm+gPkf@3O&HJ?mpGVzlrjo)3;NP}&V)|89Z9)J#neBBEPcF-1!7Zfd6Bo>>+W`e#d zKJlsw#xjY}Dtbg>8c6X3>+%ynf|>`++2>p{3Gacag2cHLPYN5ux(J4*%E7TyHT!GV(>2zjQb6z9Fcy`(_V?f)LA# z$%3h>s-|33PRiPBP>+%;z;tV;4~jO5PK^+Y^$Tacn($Z^2iLUIk@Lsx2Qdk_`Mzoz z6aZLEnl}x)W6q-%&Fyh5zoN>@oTWHMF4!dz8=PX_IfCX0t1?)iw8VGRL)^;2mXEOj z0-01}#^qjzH;X|koxyaD(pb5*gy5nR)tN@Gc~P@+*zCu_Igcv4vd8`#W4k3le$_O0 zH6vlvcklBAR{V+(c2r>Lnbivx)489n&MFK}FUpP1(R`M92fe9nx3S+y<3$38Vm`42g|#B+1&*Ezh}k&3$*k<*j~uQ(B9e9@gMi8B+257af1o? zOtQ)<3ae?ktL%|O*<=GlQjILpPgz56Lj~QHWfy5QZNhP-SdKFlqM|&N89*T8Z(1)V zve_zRaE(2AoxR~QJ6XM{+XYH)NC*g3gBw9AW(hAy7KeUjt+x1rShM`-z4e)N!9=I7 z)mZh2%7QE4pbNVw-L&qE?LEZCn>G$zLYZthA|G-j((d&fi(hlK3N-9C;xcO6u7rh!mz|MUaI>tgpPl5|QgV{#? z@JKtdYF9WS2iteKQ%@h{MrY$*SJZA z@TsYib{aE^HGHZ*-)^l%NdLKcWDVUWy? zX9#7&t|O8<#75Sn$d&JAHmeV~>&IYZ44@ILSN_^9kYGt2UA#E;Lmpj#~@isM#@Q5#!tysLZ-WHxP~@104~AHhZsc>^N{< zt`=)}Qz7-r`R(7H8tVk~GzE2mmeK71i`YXd`=0(FQ}g*gdA5!z-6W353aHTUhRkh+6fAc-|k>~134 zhj_SltZ&I@v*dQRj;HyMbcmseiU-3Db%wo&T|vXGWL}TACQ|(;xvC1sCjtsn({)$m z#MEmX*O`yW02QYrl|$F{6iz005ry39#}WQIbm(LwTO68V^crvyZey+aS(yXSQ>`wO zx@{|#4^?bn$T;b%a@$&{@p4Zzsf9m)wXE`>II|YzQtMKqsvB!i;jkl)Y1NoqlOt!| zcMSTe&RYFT78BDl23SW7Q6)5uXDs;`+L9{%u(14CEp0>-5BJ%~TY>e!X}0xXwuKj+ zRqQXh25;VaknO*;$oY9!=Uv+f5+-hg^b97-N5PJeKAuze+!Sj1;5&R#ck>XVc|p`1 zOY_Coqm*BEEh4Djb^}7;!<>^cbm?o7R-@qpw*B>`@qgSvj1yTgzF89ah$SFT&sR>Z zmIW-`tS8jYOht*kX+*t9@E|(%TVTuV4oM|vgWpU0w|yIJKCIUwn3=L_fyeSK^>p6R z%6_0)O}Vw&$!p%r%>&=`F_~_EpnsH{YOf`x8{M|EW=X@l_wqaP%pn<47xL3>K(JCa z%AMe#h&SavGdihva2(aWN-jH;imq`c2lH5+6{U1~0JR}JHQ3@18~L7D9@{KP$rX+# z{{!$S&-ICR&%%LRmH5xoInuZ*-_bwUBG?k!_`ZTNJxU77qmyov}()B5e z%TWJ|@s)?opJQ!2M~SY&7L75=I$h&4Qk)(+(d0ZUo`rbUCtMVbp+=idS5Ihla}wU5 z!1~2@?_Bz%zn0_fCJM~cM;Q#FH=>rjz6sO-i7$pBGW!v|!S-&aeYx(F_WULAcQQDk9%0rj9vD5LUR8ILR1tiD0)xh7$N7jk1 zx+a}wYl7swq<&qI9AnJV5}`vM6nm?;W*RT+>R;@_XR?95b7(&pJGbE3Ijw7O0?QCT zImcbZ-fG|Bn*KBd`)^3TA1w_2h zQ@=Oysw~lW_HONmG1m^A~gS(&#kzjv9q0{$3MbSjyzB0Ujyu? zHhwn5?tpm~2lobpQc6g09GvKfOcxVZaaW1?DlGSJ2xKz{g#qI1tlMhNYLAbuz29C$ zCJKan5EvHA z4LkOj9z*qDJpQf|B2-mGU4~R!0U%)drNCY-2}ZM#B_iSm#2 zei+K=;Dbqfgg0$k@YHvyn44P&vHmDzx`U8oRSJ(QA12?x%vv)dO$a8>eBr=E`9!+o zaZ9}d$F}%wc+K~`<23s^<2KXr01)W=4Q+s9D^`yV5SA3*8){%&$)^cCJVmdNDAvc4 zAGEEDC%+n=N0mK8l8=pE3|nL?*%= zJb)+*H())$24Guq`Oa306A_(&BcCm)$F_Kl5BEpaP;%(ab`wh;vq;=?Sk|N_J3(@D zy*fvR^y+Ewt3pt0BF<@e_?ZuW6&NZFm`V@T4xPSh<~rAQl^xp087q}NiB6bE2cDP5$#4oa9`9_&vX`jt4$YZ0z-4j_ z!djF%(gUzI4sV$Md~i;mib3DvZ;{$HByMERrRr2jL_~vvQwahRm6kjsL+x72m5o%_ zl;5;#CwF73ZAk{-9dX5O2$ReZFlOEpGY&m=-IzfdVoUaLJZ!VZUVRap#8mjHyXXL2 zRLi9~Gj%|mfImIV{!^2_&NiBlgKV%^;HH>OwSy#46VTfg@}XL$8|s|D<}0< zNr1py=cqCkGm~wJSC5%{g$R2vKGrb$R5K17554xhoqST7Jglx)r!Z=Ii4$l5f{$` z(Orm97myz7ur=qPQoPDkY6$qGtadaw?z+fLVrz{}7_Xn`A*IN|nDdBwDD^7e+rT?4 z0wrIir#;eUG&ZHvKyzF%_&g4!Iq%gYVIF0>;qsDo<=`S!p9gB?qoZ-9T#t>q`8GiqvHyOqk&Q0Yf?V|jey;(B1yj5ui)^d2jb*}>zShNC z_~v`jXIW#GR{NFmj+Ubp2|z!$Hr%ezoPZP&Xic=K&jIjF^dF+9NDFz??#{ z;+yQ9I9m`hI7eb%3>hd~x$0%8eY!G5OYD7~TQsTj{IkQ`CB4X^vT#!l=?iLR199dk z1?6?oIlVAH_|3ecUKGGCwfuHb&4y`2zp_Zy;TM&(yz&KVrG2Khxrb*htf(>4FuX6& zHLaSxKJDOZk{8ho05!W+5J{n=j-QdIqclC;tDIiCD-fkzvx$as^5xug0Bh+N15Bi? z4jAAKT1Yp7%-d2(0btV_8g}xQdxF?ES zx}}fg98Ge2_^{6l-{4GeyYXFfr>nySoZ*+9FQqAp1KpDdr%=)KUviEVt@%^aFPREFjakMq=`^zln z)OT@+9g`=!L*5kW8N3d@{PxaQS)Z392`%ef z5?!b^HGBRxIk;GDyyceH;-vK%Q&ByT*998~AI zd5-6s4`MwDDVM8GDENa?K6XYc|3F#a4lmE?ESC^fqzOY)J1YD5BFI$6q0I-*I{~Y-O7H^=O62%@so_&wH zSk=Hw8bC`D#%+`pxl>|_KL56qZqgpIL-?z-K>I=h{X6#R-`^$$7w7-uak5eQQqc6Z zMb?C@l5~#{TLKdk4lGu;YS2`rY@YvCL#R~Il4T;YLfF(nJuz|ykI zy-2Wp=EPy4pWiLpm+keu^%vJ~yUXw6Thk>{Qex5UNYz5^=4!w8e8U5CXxg$r> zo<792M8RGFZ*Iy2m&Jz%>Vu|YeCz>?H`Oe&zDSx+3q; zgDe0YbDaBHTzhu`#+0PTV#Bj@&)SyTmHV(-)ut4zlWzOLsrC#92CeS^8(1UG(WU1UZI`h7(H2TGK5vAP+ zva5PiY|?%)_XhjTEXcUyz)tOVis0n!26XBn5=D0W*8D30%xJ0ti;*i*)$K<%FT;Yp zPU%Q&iqEERP%TpD*$|HliKQwYe<%8D+KFwcCb-%;?l{*_CpfthFlL z9(Ows=E3};XcU@DwY4k8s<*HMJ!D(1{yx)JxcDo9s8wH*>#j9AkPB}RvE6%c*b%Z# z&qv0fn!C0v{T6<^(cPu0?L-yduvG$kHg}X$xlwl1>G%|%4U1qoKeg;C-L-9{aq*Yq zGFHfF*fTG|J)+TlAsEjNcr7$&-8k9A4GB>L0vq6t?{j&4`O-bRP*WtJ zX`puoHKC0Dt$TK;19HFa8!-1W`vF3f)E1J;Fvr^d{pL}*#*d23vO?M;RoPd-0*6y} zin$})Pjjz-3~E(Cdqfh?$OB)90+C*Wmmu{WQ*uA5zpEw|Kh2{J8aW=oErEMS*Y?96 z^En^#zugk%&sEMmIzBaRQ6)MT<$w_sx&^7nKY7MHfw>?zP-JY3WC+;1o*w8p-|XuK zF=L~;;Ge7=?rRgADVCiNkS+*?^1Z~9!Gm?hwZjk)_Gpo?S$2$)*v&U_I_#I}4LhQA zolcWcXqQ*%c}c#8i%lWBCUlmRMjt*h9>5-gTWn~OWOzA^4&6U1lI)hdIA)8fe~Ow& z)V`k{ep7S%h}nT}esYZ;0mVnL2gCC|G5$TEJE&sT>U&8D1)n+G{_!`3%hEHvX`8RY z!vCwEC;mTY&Hu=g|D%0M`mf*rG3fed*F$NOUPK60Po-yANm_0BcpwNOtr$QSu8OTB zT0&It#?MF2?Z~}2#RbtvMv9EU-!BodT@kq_)_u9&^rUZfwf^w-^c&a!tqC4HUZEg< z5OwGWVf2x&NV=eOzL~7=B48T=CCs~3vAbCPMG0_{JxXRajqD3vdvw|+HM2gs*d=V zxV@R1?(ZzNpNK_v5MxLVq^GhAbfKuG(fCJ-xWvV>n$a^y3->5%Jy%jH&a`iWQxdQ$ zsW&c$pvL~wjhGZMn2xC|0ZNnS#7SfQ9uWUo2K_a%o=?K(z5ObK7!m*dfbySD&A%M@ z&so(!D`;7IesY<4> zj+=L7l$eg`9CLWc{hnd(@i6a+@5yQBxel&css0|kon4hxm40e^YCR9D0)2nL_q4t1 zh=4_gC=akl`(@_C9hgXm8I{0Ml1w=RO{hu@B2sFIOA25^BO%zT{YgOeh)yHaw2Su3 z3F9g_YY&Lvs_cb_EVC8w>J`03M~blVhosQrVBR8-d9^~zZkrW7#YdXR+~tSBdFu+` z8Oq(2Mr6p=stSM@()Qrd>KLnWf8pB7j$Gy*I^^EVZW_%Qr;g-Y7w>m&nNrGcM1ZMf!F+Rf3ynZ(*W*mXMP^h?Y1iQ-p%lv?*Nun=zEZ(v>ayC65FGAW035ac;vM&n z;$3NM+ihzsd-Fv*{HC+BK~|)jb9a;qwfx1Ycv>V2_s)Cnk4lRZ31B zuuWVnW$QU-7=rWpee*_uj&n9340hQ=+8=i|?Twp%5DxYNqo-K!?9So(^aV4Iam&?| z&gvl$zxF@{FK_?BIy&QW-T~;lq8VQE%N;5jC_xp%L#N(I8h7vDD)1dgu;#hWRuufk z9my6RPkY@D%fwmc7F)W^_w23R?Bc9>a@Wv21Xt_bH^+ZLzWSaO zW9ah^$;rWNI_A>n*126oc%))KXiijVpkmm;Pv3L=1=abH;mbDRs+m*91~GoXal`hI zNY>?(<`pH*G}m{U6X5yz<1Hd{yvWl(`4lpeLm!rIr-t82!H{&I zk5A|p?0f*BngKJ4pE)HKij+C4hrWwYa z5I4)e$s3AHNX1HiZu#vWxuT_tet<})(fT?mDPy(CTFG3_{{NBo&Ow&7d(voiRhMns zwr$(CZFbqVZQHhO+cvt;U%ltdxifR-%)K-Bi&!i6j##n(*ts5N=99ntz+v(BBErWe z{XBFQU?O4H2fXKgeqOCGmi+^;IhXCS5#;(4}8h3#IsSmNa}fN`v_-Z=VEZOL2W53f8%V7y`W)x$|!R-EBA| zVWMF?;xb$^2&CjmIirA>(0w96wUlD>pqipN20W{ujWx46xSuZxn%=u3;v%c1WB%I9 zE6X0>mL>8T8K*M|N2p`_lIkTZfh8+HOCpxC5`%z=v?bU;j7x*<=-3`?^My-;PqM~1 zl9qYpT!U>KF;untm$Jrzl@Kn@8Of}ZoT~0gDK@?Ww^b^n4$>h@^BU#Km*<{pXp!0! zX6(oU+Ol|9{aC?=IF+Jr44`H75oHjs8I`I{$3h&HAWz5%a1okgie#~Q>Ss&#W=r&c za}xau2j)?p-_}+lAt-k}$nNbJFybF9Z%&$jcC1Vn+#1DViBz)A>?=mM`r>9S)twji z32J&(0W5t<5(H?%_tn^pco%E<`4WO{(a{O0_lboeQa-2FWDFwBo4?PnMm3g%YJ6rA z{=h*IVBN?;mB;0)(l$V*i7^LwrTgzQ=wC!4@-Bx3#<$M{{5xvsU-7NqrNeC;?Q9*K zXdP`0EsdRM`3=4&({FZFNZ(1{)Is0+--ty)TN@K|(|=nKDb6S^$iaQ-V3H-90n2|> z`5KtV0Yc>Kl7M3^Hkj79?TrpWVx6D%%~kj$9@uDD#+74d zO?tgwb#F~gcxTk?@OT5#1s2iC7~%yAfd%OQq#vZ4v45K5Jrtg%j2O@0!lMJZSK ztB=Jqph=db;tI$gOwGN z2hr@ci3w^U%f%{NA`)dh$q$P&<|xc`vf-49#NM2)+Fe+m@fHgXB3|Km%aG9~Kh7!$0Je z1*j`dg=ocb%kai50iGNt^d$u`TklX0UKobYzOuOY=9yc-E~xaRbhnsU-kZCqT|YG} z4-saykCs|BoRVHj32Qn9``_}X7xFY;mxcd4nuGoJ^MhkrPU>_#+`^+AWf|HQyo3ke{m%4~?UE|fZsn7rCEX*SiCa1? ztk~qEQZ$J*c)z0s>kMiZWod;*{}Z`u-~+zSqG4#1Az?;>Sl&uajD?R_-C9L_Y80)q zX-F(IinTLbrUq^;wBa&NYE9gDV-VdAiemVQYbW4~!FZwP61XOA!qK2ON#Gp;dy2H$ zH>Z{+pOK^cj+7OsYR=SUme=(?Wjp2Obl-4z#O5wx4AOD8m}`+o&dx{e(+}5*#F?Ur z!Xw(*q1MC>T1xFh7vCl`xSHF(*v;hVMHVmnAv-Pn1WM!ZSE+x6f3 z-e0TN)G4=+v+ur#^-Yur{EO|#f7|yI|JD`%RnJqFw*3}hxxY86W!z=r>5as|>9c^< zi2L&~kCcVZnIz5zCFa-)V&ukL-Y&(?CjFDG2w*I=dZNTEWJz)J_rN9}EGi|sx+&iv2uPMCU?>~Ae%}DG4YDnnPdeeyZ*(PF5Qw_-I zq!=>|Q}t7g*=FR7QVmm0$>f9pGdc^g~<%> zuwVmBAfB)}Y;=~^O=@EvSahhaN}Fys?qDC8w-sYeso^+W*b1kmoVSOXpi(p0GRs6c ztyV`qJI;{yig)n?xzWnvCU`}szR6>27E+qJ(WTy?$x3?~%_~6gj|08o zN>fI9IO=VF@7$kxeZR0jjA#@v2chAtXexHXL-HoJ3|hwSk!zZ*#rxK?>?7(b=uXi% zrYZ-eho|7l&dJcpr0UfSXqjH&YtGTzz9e+dunV^<`hQrE0n29Pz)GW?1Qo_AZ!3!h z7UrhG$G(D{S|Y=ERfQoq;`oUb51n1PcmyBaJ|AmKTKcM>XAs{qtGba+fER?v!6})! zi~zZjpe~q_f=8-;Mb?U<7so980Q>z2AWp`mR;WYZ99W6hLH$lWs7*j`^g$$poxiA$ zMO56J5IY36<{i0z^bEL9Iv$yDvj|;~~cqJ08DvoEITQ5W9ub zoFERuaKul%fH;bcGjEg)VN?^s!@vo&cBAh*Sy)U*&glU=12#*Z$~|Gl*>I9tHu+31$%tvNJYto)2GDSlc6G*`>@n6X(tCWcPr zeWdiBqAt^?3^qX0$R+fP)ja*J3G{&uV^UD+&EtX9`7N6=k3SUHlI%m@&A^zu{$33< z=|)H;+Ewn&+vw;HhJrV`l539ts3b0ll(Y1MtNvaIG)Y%P2_l_P1<~i97}s0GLmk9# zUW4+R*I@mh3?%=Uy!j3Pvgq6Xn=L6yURn}}9w}3Gqk1`JS=m#m)0#hu+miw*PYzD& z7H8q4-I-Or-n_5pxDR(X24SH{2ZfUOHu!dXGDEz(o0sPYls!f+>Mk1}+HT;i^s01J z{F)g{gI03jgBe}cL=jSYR_ZGP=F_mvRuADLxA7#cL)=p8r-R@D!EagnTOqJ6lS|sN zxtPjr<>6$zqb#%xFm6-}uRMEfYuY-7`LL;eif&m(lFX>0 zgs`MHF>;#BI@SubKz;3b^|Fb6HPUg4^io9du5)09GQ_)EF4H5h7R`x4tCX_)Xd2ew zx2;7IIE=5$+%w+JC8EYz5H1kQ%^{s2>`PTQUqN1Rw)-Z(>wn^pet9qp?S13>mpS%d z#n}Cq0s9{~f3J>gjQ>)k|Gq$qik*@WD+p9OCx1u+rmt&_SfiZ?SbKyiA zMbefn6`)Q&2y`(6<_yfd5drF*#RH%sk=lewwX1P3FU*$TA@ zwd83pKCp&Bs%1e&J+sOkVNEf^0{m=WjK9`e-^eQ_V2iBXKzp@yz!VYq;PZ}zfRZ%9 zn&mB;$r9zkx$LYI!e@50&d1r)yhLJ^4|Hf=n}po|tRVXGPZqj_gvPPjZ`k0zVf$AH z3;tW!6pbBR%nkp|T$B{k1;ft|7c}$Q;f)alDmSmDAwdu)hLo*mwY)yD71uQ}K+feG zf{-E>C__)Gn0YfXdF9;(h?bX>o1Gh%D{iKVxI!|M^;-5?)evZ3)03!&jH`YVIi6{X z4D|j%hF`ArxGltdFv?>0JR}1))s*Sa;^;~jI9-A~EtA1NZb}06r?#hcf0DEe=CVq| zTA_e2@7;1P-gy5zyjv-Xj|^Fnrn?v-&;Q&}uxb<`{-2mCP@XZL^KT41|MwXDt1sHVSao6I8~qqVK3cR8ueH)xyy{b#bX; z`1kYpG`oFkmUby|N2 zMm!W-TdOZ~rfOqb%(euTIs3?URjewb_Ut$vPae0IVd ziS9&&yI-wl5Q7ws+nzPbB041alrcV!%<#5tpwC1;`_(LBw11_t^JhgM+tVSHNJBd1 zwPUz?KKyc>ynRKIMQd9>x@>@IAGwKH(Gnp`ht@6taCo^0LA6vz++;m>5*wqZo+f3x zoOS$2r^Q!iFGZ56%*#zZcAruEb4r0_qB4%LmMPVPYUq{oc;6~GRlcQr6~a3CC4 zxfA#bfzwXQpk-fw$P@VnHPcbHb$R#kKQ!CFwuk)JJVFfL1ZyD-0D$2C2zX-?{cjiY zKL);kfq#+8mg9FxDBcX43>GSBO-(aJ9u1-NVm<)=Kmp5i*ih*RqXhur$PK3qn!>eo zX$S(201lp4`e>fFVA*HjEWi2w(1Pw$AlmNN7q+iy6U_K45Zu5qZqpfV+@1&fo2v#N z@2{lL==l(v_CtI>ng@rXD3=8)N7XdXj5f>3t^uv!HrF)8)C@<(WZvU9L1P=gHZ0nR zk23dDj;&qdn>=o(G}dG{^4t7}`pF4T94cW}bQ=nACNfdQG$SUgsFH%TurxKQ^vTh? zH8YY#m?6ZxFd-#8eU)79N>b4h!l=aGGyM2TP(ucHb|jf#V-8!@ANgBX`dxa7i2~$R zhZ7cv6hU4bgmY?UByrk<+7KxM4&E^-wP}N16;jzFPUZGBBR8 zEHan(5KFDAODP}JhxpP8^EFr&XFt5;nW6M3%hi=N=)pcIMZb{wlh6%-tVjmz?(i@MVBD~bxS zeawXC?vvPhgM)7vo(|hHEfT%b?PaKoEEpGxr1{@!#k3gWDrX9}Q?qsVLWk znOh5y6Y{2XBv^&B$(@*%lov~oC3j1VsxgK8@`A@D=JRF}-DEP_Ko|TW0be1AHoOi0v zQO{Lma6h#vA91!pHvw2sTLh>aRhibiS8(E})1o$5NF8KDcb5W%xUa%h^-YR1$&3(D zER+g?n(^f$9|xy$ik=YPFVqSpOaiAyEMy>D75%_k6DgpP_fG;@8$!a2uHEDL>J%T@ zbdt<}IcpZeoy87=segr|4N<2Yoq*Sgo~QSl{;F9+G`r-z5_AMHS`&dO)Y@wFaC|Pa znm%?0qDHG19Y2?p^uSlE8c&QKJ`=_QeGSccF-LJB{Mw@it@I<0j#u4E3`%mafe-cg zOqw8yfd?-}iTEweGh8rWzUafcjh^pvZ#U5E-bzZcWvYPN1w}FIeM{p3d=Iqz6)QaF zl>kD;VW5EOI#n1#<*^zMO6@UN$f@VYLeO8~M&BKrxf5LzI95WWtfIc#ke?$| zH>28Cv1nn>abIai^BeF~rms9oX2Gn8y%93;PZ+FtyX|)fh-L(N7swQ&Ou9}-G2m2# zOuDvnv9mAjmMdSPk~*Ni?6q zG@r*6U$@^>3$!m*z3*+K4e-1v=zSI;Tql+c-|tJl>%QZ65Wd&!z21aqJ}(zMUN%5+ zHlVvTzYnmC91H+(4ZdP5zsC(fE{C5CS5SpJFo)z;-sS*@q2NM;MRn|iB~^m+eFNc) z4l$ei6k*aXQavkr=e1nnt!|kgk9$tc_YWZjIT6v`9`3%{7#Xj!vg@||6fc= z|1D|KqzdVkp^VJiBAvkW*3PqBt-Vo&DV5y3F<<3b7tOz3tf{sVU3{S?Kfdy4nL#aS z5f8%x4|hlmb?j#C3#+4|m#ewLjb`N=9v)6Mz5z;x=b!JJpTGad>+SwY^0#qljBbYK zH21dw<ZkR!SBSJ%O4~@Evf(TS=N{aGPnGXLQ?1nx|g|YTH{{fEUyF=1kigFl*1@ zF|m!#@v-XWM|y^8tNW5j4gDuHYYpdF%VTiVwv(J-KVi>7)z`MT?Hq3Aqo_wlhO5Us$W7h8ZtGh`)R#%Y3$XP0<8Mw+uBj<(ot~Ww)Wn^3`E7!Zkb}Nh&nT`% zJhByBEU<~Yy;6cwa%U&^d`y*n8(a(@qFV7V&)>iEC8(BTIg%5reTUM#ZB1oETE;Fe z?w^^eEzR#-9$$ZLZR5zMjI5lSTRlM_Z21>AWbbS&XNs2@5F>GLa#2%v7iSl%#O&lS zobzs*iGLsOX{b)}t9Iy*%*1|6GG`?net35hw`q z%dwuC%SeF(8w*m{@D}}()Mi-09pxN#pRYe*>SoE~#6X@~2m_X4V?*DxlQ|!_jXXxk z`+HI;Z!0t@t*{~!WC5Zw$xx!5?l@NO=?yGMx&Xt>*j6E#U^bkIUyP8)uRS?BsBN5x zMvws@38h#<95jmvCQ5wwirjNOHZ<6A{8fuM!8{%I+A?-O+PaJ;2c_-G!6Y&((~k0v zpX%uZzaU2FE}Nlkua)z&9Bjej^Owc=V4TP1@LrhpUFiIqiQr56LN`{fxoLBuh63W~ zxDt^E^xoXyNSB!`kYGZ9;_TGG2a~!7$&Ahrb5%LIV`PYvJHa93AgSb;c?`AP6xkeX z+?0uu>!&C0-q2rqMxW+t44hlK-(#_G>@ZiHo)e91Jla&CTpailxI-b(dJ=Rhcqjdo<7Ly%;hLxhgB%< zWCRO~VSY2hXrw&!Rr0M4fd&9LjfdzCBtAS0kgiH75^dsQ^2J;s>Ak!8@A3<-jFCMi?nR<|D$p&Pi{ZO>R_5s%sWqCz@z*?-32e2~P?`D^zwzTz zi90vb&>q>*w+rBs4~Bv$q)4+EFn?E6kx1D{0Ts~bJaQYjBXc2qv%AVK$1qKZUmS5y zE+0yvVS@yy3Od&!Vy{pcF@#s4Sq6sT1U45ELDgk2|p!JHX}<3WTdj<~+A zxOP=2ny9jW7{CQ0-DNW|DuGVgF1|KGOPtLtgeX%$B41*vD}J;IK!k5p!FA=A%~!+4OXvN>5i+;D51>q?;uhLVh}i*9 ziuOJCNrjc-I5=p7mI}uVowr#5bE9ou6-%Uqq;TPOafJGP!&Sc-<*zv^>NpmFbgmh2 z<#R$h6A}9p7?_}bN=AXLd$Q)snWd7IWs~(c3@C$1+=DOq>hwDi?^d)~QcnY-OH+#6 z-kN3>=TG5^`QWX^-$@Ig7P;TAjbV!Yl05#^sEUEkh4%1szudf|f9^t+Qbe^0zwh@F z2N|t!FJn_=6jN5d&FG;pVv*f&E|#U@bO5-d8u1W~qNwIlmZ72xowkFST3*_#)Z^Yn z2bAuNSmJA-wJ>eOa&Al~;C7$7aFMB5qhNt|6oG45qh!Gavqv#YH-Eua`Uh20&d``h zk*Vb;jx$#TVwi++)lS8nugH6&y>P*2x)Y@_D*o zBDa6|AGo7M96ZaZAN5hOj?nFnvs;kPoZ^YH-^wvWdbAFTwZ9)mDsk&jv@jd<$pAI5 zoj7^3t{=55XedSNE|C@M&na}Z=5VXVwEkK-K1H(cWOn0_#{?v0Y8lPyBfZ$V~2y?;jySeX&lys*0B~R90Jp4 z{`jvG5m-eXmZ}xJja9e4AdrjS{pi68#e%l;e^Bvi?6inN1C~MqFqmOpnWj-u_cIYs zd;n#l;mQlZI$F#NP76cXa`tCK!z0K@XiD^-Hi;2VH^`!Sa_Di22hmNd3bDq6F5&A$ z8nN>L0}c1m+10AVBBq+#pc+u@+0bPuGqvq}!%qmbs3WKyae*p=Ew<~i^fJ?dVjb}U z9mKqVT52);#OAYu&I$&!K$a*`LE^F0-;rhAr`QhMtcR@7 z@w>DqUhak%Z3fza$?X7HxzVV!!>-%`$n5Za6jAF2pV#JdL|(t@Z37ak$=kr|!fb!x zccm417JLD4it#@I!X+T3TeaWS$GYXnim=E;PEOFGb%85+sBPI`O2clB#}WYY+z}~s z$jN_zXH$2A@@@rK6oBnRlrKbvDUZP08B=PQm@U>6%AQ}&m_EIPwe zE?@#OIws2kaM&lCDqj;l0zEZMmgg`JysTQwg;U1KU4v_5ih~?qDo_HFzXB7dDj%vb zq1Br#IdY=VVR*tc+b zy3p!5S2^CrTCx|)>6-xz1Ae(R2vph+D^+r)tB;Xc2cl5=P*MI(#>8t`d?vBt1(lwX zZoXV>$pbSrG0AjNXvyRE?C?q6fst)|7M>o!lp<>zdAziES>kSZc$|P)z!95HpAO6L zD-y+b60N`^7ID}Nf3%WbqfxcTDh&0C*zJHk@OFVIEj!P_1BJNH@$j$QYuZ@@k2smU zl!UY>)sdujg`qcgt;C=8u_%EJ78WCB_E&_BMy%|XRECXCfmmuSP*{Z893rGKHM-H- z2wnaa_Y_t=B14ZHI`gb>__pO2(&DR&%ZAaAAbvd+&NppKJox4}B?&L5#faCQ69Al| zAzj$69Q5u%AR8Krae$L~5<7jFuF!my9B>sHdAI$qPK-bHWkBz<$>F>@%oB{EN2Nga zgM)@b%+qo+%0k_jvRrV|S)=Q~MnoQYjzY1@;Z)M`FF`69ohuoaD8lQsO~adLzRhmw z23HCfCOeK4k%%Hj@$1)d7zMFawM&2B;W5s+nOrQ+B*ka_qiN*j3v%Z@~7$T7K zeoCo;YsNT#`}Sir5{mD4Bi*_+L?I9MXxR5pSU?qWV2oINTNwc4nebXtai;p6@S46K zF!QDdo$+k~D5U%l>v3VyrF$C9cfxxr=Ib`QjuSrPO9mL7hvEZKic!1uCHA9Qn792g z0){}9%`yR|UcW$3&8kN+OQmrOmcCXd8a!<@VrX%~vLzE>QWDDu!wZxK)r1Cxd>(AW z%Iw5odL|XDl7Z1a;AHCT7R#`vZrR?zDSbsXc#t$U&7sKYnzpVUxI&wdWuwTcOW(wZ zOKU~K$xY&N$&~MNxMaSQuZo)oviLqBN}f_dL(|aMZUN(M9E6g{b7Y}8)`a4{aF-Dm zwEms;jn6^c@neT6#5{=4-id-|%@TIOJX7S@{|?L}6=P+RT}pbAy_bpcfLn+f`lw@! z!~YL#HdRmBEpTo8jZkxYh|MOTz~@zN8`JMkAEp^UQ>XX7RNI{}yuqKY)Va zE2^`{;sVH0hiM1<{zKCY^m4bC5*_AORo*-Jzmw~KalD{ewQ*bD8)?^X#Q#U~%-_-e zf7P@E>`Hdax0u>ph*7@9!_Hmr%;Afw7n$+NM%a^n(A&I0b7WLtVLe%C6OIO~cla+VOO(JEaPMOVX>PF%D673V0Bem7>>&=`ad#l!k#)s%*{s18r! zL)ugNyN{F<^2niW4JR@>aL+dQxZa3V^`m(Gr$B*9!?h$hKL5qpY_%!S{y)W$M1wd~ z9ew|Q=l|{h|F23`h>zk*b(K#!kMCn?3W%*gDSV(n-YyTn)Iw@|2Km}*$iptstK87q zRsQ0)ltoMT){TDO^!fVJEhYCeLJ&>}l>QSTuZ&7JH`to;wWMEp|+!3t9 z9){zl+d~C9$_Gx=ulpimt!wEVCXfC@AI^#TD~%!;nJHbBWupGqngKgydpX7HQXaqA z1V=pGRO3LX5C{CnxEA-tqg-)!*2MLJJJ$c)Y>I4bb``%NbNC+B{?)mt|F+ru4YvPi zu(OVvlKBo-8Y%IuypPJ?Us?K=u)=74(Gc_F28nT6lNdTsQd|~jh<=;ld`Gy9uiAL1 zB?@xBP5FE=COp5NJU{|e@AU^L!cKFskZs3;AAQm|9Dg_f+^J<$za@y*Y37prJWKP~d$ z7cQ1x?S4kKF78!rNP^@7INH>w=?JZH6!+!=Ym~nwX-ab_D4YnC&o3qqZSN4w)LU>B z;olC{0M#XS?`=s*QY6Vg;x=!S!QseK6`B5QP^6)TU;`jd>>AsnKG&=HS)f=(UqoX@ zXGCjG@3?&Z8Uc;wtp1u^!e#$lJvWJ|ZiBSDhy3?f_1BPiLSS0h@!i5PzF!yL_ouCc zDXqSpzM+{ht+lz0KJ9;bRW|0fv@+(l|B$`@F%$hG@-Qo^&IXVlN@#?SjV;!Jm>8Iu zl0zs2FHV67$;(f@(r`~L$A$HcE!>NbhhjXxvCLqC5o65b;px}=&)VKrKKWdtM9*Ka zD%skEfnym9)TGrwx+}{P8BCKiAANx%q_&t*( zyF$E|^}8AV%yKIS{D*42e*pvDL>2+~ZCNY(wu`F#-vILuq`vF;ep@4~#J{s~^#31! zM3vHvEaG>;Rh&c`YDlGCz%M>I@5Fp6GJ_)IAp!}j*-!_%yW3LQD}jy;D^$Qg383Og z{-6VEynO-bZnwEU0ruP1%>wF)xglBZ4=%?U-^Ev724-AuencMdqfrbGm<=zHXblL3 zPF_fx^%WwqA`$h=*(vrVHYUt-7U^m9Sp&PGrB2VrsqbBAX*N9hv)DpLY!oLMfl;5c z2W(g->!m*0i?@9ng)P>&SfS*}EF<#q>YHsaV24Z`()s+!w`Q-*gRfV4LL1d@KjH@@ zq5K-(;^P}loCK*VkyC=65@}ISi>VDw-1WmW+Z($ke6v<2pt2e^kJ%$o7i{h&)S=F~ z)>PViCye={Wf_W-7Tm&29|S1RK~}6HFpuKR*EH59t06Yd^!ZsSLmAg9$>5d#z>}BO z^p>v@U6%SN-VgbWvq~nNBX+}&>@%Fn?$H|%k4a>wpbiF4$>Cyp%V48L)5Bptht-&C z!OSBFJ8HAd1un~jQ`!-haBJw~If(-^Lg~4=3|7kWgxZ$`wMWsD52Gvm0UWx$wYwV8 zy45f^)as8oPESI1Y|pe!k)4(gE6B&jR)C^ttssa}N3IGjJsl86PN5r|eK_GSZw+A& z?(R`3G5`obXwdEZ6+H6C^A9>f)5lzkM=D=%Swc*h%cN}>8pacaS7Uup(;u3lf?)ko z*B)(+Bh)v~0YuU&*6lkyb@`cq(}1KoGWq2MB)LY?&&rCOe>ahIse5{1eHYQQsUNdq0TLI~OfRN2>zg9EnkT3sfiAX&kj#mn zj}2d%GD)}TScC4!n9n63!;=;E;ujVYico9Bv!YNYgHV#=;{)D-Rr2cDA^V+`HfB_x zwq@0SbAsFEXnM`P^4`ppS||8m976`<)g((A2q5@z(o)2NlzcV>?#ZHu;oJq;Sv z4lXBU?*}@}STe2ItATGNPU2o30L+2@+Hc~xy$KAQxRZh(RQ-lOsD|L7b>ogkj*h^8 zyp==gChUo;yGaCd2A-Nj>!ME2jNVp4&5Yebqj!@Jv>3VR1fv?gHbd>q-D*ScEZqKr z=9SC!dlX|ZFH|MYPm7F`XLE68!n~3ceOJ8-b-;%o!#FczNm98rjSPfmEOA`EioA{v zPPkKIGqIisb_bSzt&r4S|p;TO%_Pn0cT z!t^1>f5FVK=42tJ?~xO?#YM2^L5O)(qy{n7H*6n1sPOq&X5_YMQ(s0bQ4%*6+gGmS z;Mo}ye9MPs8V-*cnMbSxeJ4fO^735VR!kRTm;4}xvNvl^uzEq1CLX^u{CuiM5aVD{ z+}r=5M=wO*l#M1xSJL{kd(k&eNo~)6D&P^Ur?ZxLvQpp>f}*&#mI#iTakLg8Z%L({ zH=BJWw`P)$n_dl2fUt~${RTPwUK@nan7{8Z?_?Q6keVc?+KJ2ZPF)@$r+*wIepuK= zi)<5(3Mo2nvDisM$+$D6x3<<+VoFn!L_&?3u3ueWp8TF1D~aW|qWS=M86$}{jmX*z zmS%7;h=_!|DtLXPjK&VjTr1;tN~8(s!r{FcIZYZw#-9e%rbc+i{f@G^W(9)g+IdRQ z7F*W&A%XV#kg>nRJ zukpjARf-cnylXvjr9p%;sWIoo#UxZ=SM&fN2uTPNDNop#12Za2^obU$8F7fb`LIBP zvi_k2vg9zcdfLu9Q&K~71a)O0|Mt69f@OxFs|2|}%0tt?b3<%u2&_%rcEv?lJb&d7 zO(x5y`a=(>X->nS?7>rPwy+O7w^xpk-j5V35x~nn$Kjb{KTt8HiCGjHW_j7umK`T< zv$SiR@R?fBVKXid9sy(?&FRp>FJDTKfV6lB&nzp6f-aBV5~6!o?vZjd?+Ig9)M|_< z*FP(MNR7C+s_!k9HT7_`+aYdgwXQ7AMP)cy4b23-OBRtGZ3#tEI5&-U3XjkVHR$fE za(GAWn7vj-?|`T59Khz|xPHR>%-@<~`}FBLqcL12?%5LDn=2v-{(S9-_SwzB_$oRu z{?Ht;`4Al$aue2HP@+|efY1VW{z$3f=181ZPNO(6=h9a?Zls3tBj3^%rl*oi{SnfAY+ccgsWFf{EaayPl7x;YLoK`9B<;9mX1@0AU!)v{kBsXztN~#N$zv@pN_RnRLlAfQ zQiRxJc29cIXG_)9QXIpov^7GOqkCrv2ZnxR7*cH9~ z{Ur5Ocuo(brzxd8w2Y4poF6JM#?U8U2rwQrNai|TeU62{7DTIA5`%&>S&o0tn~SWt zU*nFe)Y+-`r_6F7QLz>;|KM0?=86HHnM6<)Y1kvOC5~_|VgB`o!`>}+WCz&baR^gy zk7qcnBc!FS;4qj!mP&?n8f9vr;y$9#XaJl__n}FC1?hXhX6iF=N7YR&%5Lh5!|`*8 zJF;FsacAI#A#;kpm(-4<+#ZCWequesu@;5SetC@zdoZp`G-{U#dLRYXi9}r26mY7M zWrL4FGX^^FUY}5yk`Z6_NOAQ|5LDitm*QbgMUZeqzV3r%o#gB7_2>y}#(8%AP1t2A z@LKT55NwnX)Eep0%mSd_Jdi!zBMNXO5=iBA&_wfrn5Fhc=M|RCEa|u2K(DO`%&`_B%?d3V+c7ZuNW(O z+Eosg0?x6mW8U7Ccq5xN{o3Tjkn_}hy~ls12~@tJyYmXpq9Y@Oo;p!b8)k$l<%F)Q zf;|NNkf}kv^JHRgl}2}Cv#S)=UVoWKO7r?4W5=uFg?rJmTp zv#AzClnilrGe6*CFs2lVr&wj=V#Ovld9QgZ8UMFTuVYn&OKl@>(Nw)a7*XxhAyRuZ&B*Q$4o)&0CQl>@a0|uZpp(QK|_=W98!*j-b*J zlF}M}c=TZ1&oIudjF|e)Ymf5DaE1{kqEFWscL!_^QTkHChgoaeQ5*-``F) z{g6caW&FIh$7pT|t1sVT{0DK7PG)jtPe&EGtf`LLP)p*cPPkR)19?@UDmBm{bI6%u zx{*Ch)LT7{Q;|Uxp(XWp*n~Asvb2#2`zSSUjUh`cTewfJR2sKb9iPCE;eylv!uvn+DZWwBf4e42bO@C7 z;?P+Qfe)cKQEGRhql4$1enDYO)u$rRp6$coAdf+ul9Ep=N-dY5&(6%|%-p{_R-;+i z4Vv4F|Ms64a!FmTE-tHSVrI{n4Elw)=!7PD za#(AHG~{rOkP`btbQGZ4uuA^;a;ta>3qLHU*VF1l#v9R6H6%S9o&9lb>`lIP@F>wD zgr6B*&0Jd=gf$gH`Jl_NAh?odAnHI}KFOrs!`j;y#l*R5z@ikdVyiL`M3jhxUzh=numldFVbJGL>lEB(5rWu1NYp?wg{3h zO8$F0Q44TNb@NII?1ag4h{UJ*?kGY}zg0EkY%)R@zDmRUxMl*iF{aYjY}&?}n`DIS z65lj&6N)oPT?C&KlEEE%A%SNys?O|13oGZo-7g|86Uos0R`v6xp|$CL8UFR3Hpj47 z1#`?`007n4006xIKbGnL#HdcGUHB|4qkBgm2sP&i!qF7@ZTkU}?h0^nh~+wBli4K1 z#_@q&tWz}G{cJvzU`UD9Zm-%}Zqjx=v$>q#vULJ#CN8S9ZijuZ<+bSP5{*YzPsS$h zsnz8@6l3SOdDzZ=$bNa{{qySv2*C4>_J=N9->vWuU4%MB-QxjCuU83#i#z47XXYQY z4=0p>-P1WcaCOi-hXWM^-9am_8yA4vaN@fg-vxABg)VztIq$f=a(p}^zO7yRA+PeJ z-jO+AtrYn&6(8ifuZ%n&nXo;(0~=ni#JJu=A-WHHtbpE>yRLxVGqd!S7Uj;Vc~w3u z>!@MbhF1F*6vA(gVjp{IT zl^SJ%E8r)w0dWU3GYJ}o9-W}!g#V;C%$gdqYvuLl`zHG2>{A`FuNck+NfqEW4;ebq z%o9ZDhyfb_jYw^$($PIt@js`t)&pEeH=e!3S;=uZ3yhK#brU6tL#h&+rkbQxnidr} zOv!^U6L<4wVF%!yZu6VBiLuh$1l;q=Q662`HipQWiP{*Q7$j2ws zk?J~`^HLgNhg8x_+$lpq%iNPlt~?FG&j!Y!1y&e z^vQ&m3ZWP?W0Y`~30^b9{N)Mz;+Q2)mtvf&dDg*N+K^|96N@>cv`ha6mXY#d^Axm#TBM9#Alppo;?2?2t0wM zB-Lt$vz4hvbvh^=UCnOktEO>lu|EY)3~+9lnI%x=;dYd!gq^pE($Tz<@_eMht`Kzw z^oXGWW+A_;uGC1f1xgC}9ZS~-j@+q&uhs2al+DkUKRwzMlY7 ziMgSsv_TF4+K9Wswh(_LRB(i818%2$S6D=3pzmF9)~Mk z8D!+Dr4lE~>}x={C$lpyA(GlDL*8cN_|b28_tJ&&*zi%!{sgL+q))((c2 z%)1N1?*JK!({B~NtDybSeQ)sM<8sB-=wbgZ8gMXoMEDgQi^j>2ggz!^T-sfASl-#> zu7AW^af%#d&|Sd0-c>swP9plWq-&eAiu;z;Iz~FrT44bktT1tlhPv2^P-G$!rR@>) zluolCJXHQ!U(2XLa#cMzo+tqY6xU#@8w62)rX9sft?gPbVc=E4dnhy$W6~1aO^4itK=)__%}LIEPtFlqOHLDY7H5!yZywaTa%$QGHKK=G zs+a-O8TZ#g>4FiS?vk<~wi+L3h}8qXclzY`D_#fJC=J8AJm5#6DM!wltZmGi$*=Hj zm{|5`NKdOHz7LYXi|jS*=grWW&(;$j>>t&aJgO{l8hi%uW4T__q&c^TWlABV5fP_Q z<1r!QyfWRK_aWxYFI|Mf@bUiqynZfHzX=xigi8YmUViEi#u2Nq@#EIko!yA^hMMQV zPX?jY0SUO0Vw}C&<7elYVsd-#2(PU6_RO)f#a#QjS7K^8RHhKUj$+_jY2zq8Z zEm`I=E-%>y^^aO#cSJogmhp!>-Mn7ATZv5>9#C6_>%Tb1q;EAxGjQbTLpo{Nj>kPe zKDRv{+8#8UW;Jrk$d#{qC0wz^ppLpXRNxh+JT z$su2+p9deo`YhScyqNG#0j+Dc{6V{Y?+*XKG;FtB-Q-OxG56rzWV}7G{Wi}eY0GL- zW*P`q;0~IQkYQw@2fuM_FFWg>I8#-4q7u)d0UVio@7Cvhj0T($qS=^N%Cw2eL~HBp)ALZ+Yuj5VFCgZ9@>czPCcAI=&p#=3BqX2tSHR!; z1>(Oh9Pt08WBl9koxk&jQ{O>J--pZpK!ZzxDGHD~wG>ekK+QVkQ6gmo&r>h1<54%3 zekU%NC!l9aS6m$y=&O{bmiK9F{zg(!N1QN$xL{;?riJn*m;ld z`Fgv81pvMK1-B(9JPA7ItHk)M??;WHX%p6p9T>{wIoik<(pM_n+)2=HOC#|5cFgcb54 z+BbyH0RxlD%!-T@CLshoOw}*$AJ30hlN&&nR_zBTOrU?fEi5}-6CP+JQIZs=NM@ko zn=C)Zrp|*{)r5BW_R4Y%ZYR<`x>FPHx&gN&rQl3sgl&`~;}AlVD~=5=S*b8Sl8MGg zCc9Hxmzg;6&pG|zURsJqeFIyLRfDUgb&+f<2y~tTBSk^dR$W;0&o2Gdn@K>CrZSZ- zlhxuLsx5&DPK+&1B3-Z4DTAzOA`}1wA7eh8+sGpJ{ZrDf0oWn zq1Wbmg;7fQZQHhO+qP}nwr$(0O53(=RH7_?K%zMrc2ZtEV4~ z+spGZGX&{?4keqPJgti5rTpq}M_-sdS$5Hk58ruV`3>Z@RM3cYxQjh)_2eiEbXhVp zpgeM2ULfuPCIuWR)>fXx>K>Hoc7t!Pb>RaffBp|5E3zIBNf(5&10Yle1>9@b?Y6{} z>f@Kux}b7ZIvlhdjdj@8MvaEhX5KtN?q-w9VFr78)nhf{Rl4^FD99L+g_+X*%VWY= zY)qW93L-3>1avzDUo+G2=DoK3 z$#0n;m>)!Z=?tOPNN{@vA;cU~f>n7ikir}^2hfoE2h@oA(yw6jk{ls}NPVVD3}PfX zKP4e~J;VlRxVwW+FXT0yJ3l?vOy^xlj<%UG?~d*f31o-8A711J4Hu@!nUhF7hNxDV@+<{yUvXW2AlWaOa;}+CtI{NjMylpEVqy)_UFj7#-KIAB2&{m^0M}_k)J4~&- ztU4Y{%*alvBKD(#u~Er=mCuTPkZ%`A#uKzPRh9e8T-PYS$HKR)Dk#ojJ) z`rtdcSqUV$#+-rSl+(8&&Er0m&J>+0i+sTHV_D`Ws1uIG&ojt|*n|3Y`h&(k{DjIn zQ^~>85H$ducKWJ^fYK0*=IH>E+&TF|##Wo7g69!BL)>^>90@0x3s=-Kbpqj#mWnHc zJvEkux(3ShO_b>o@nKvU@JQnbw=s6WLJUxz@(fg-+6I$j9QDb;8o*4#CDn53 zh)byE8$D;dxzHS4h8cfe#7p`E4rc4Usoq#=VUrM@FG8)iH!{ZN-JhZGZ@&a%nfVZf zOvpnQw>TWM4oJfdO!?w>Xe|FD7#T-o{+^lC4IwPtQopE0(a}$(h9j_9uK-(Ux_L}! z`%=(pJHn`N1iEGSPCCQpY1M?eA?`5BGU@5T1hjTSciuyH+(kH#AHdIFA#&``dXOvnKNmhu z3Oklz`)(^=%g8;7v3sr{^JoL$Irji$!={(^?B0Wse149!=avoq&?tOIZ0qO`PMz2! zx~h5su&;&pSw!xs<-3a^_we@J!_a#MxUB$|2~=h{4UnJXeqMjX;4a_Hx7s~^u^kAL zUAz9m)m;?1jjT7iojvmzV^&3(Y9Z@rI{NY7KR|JCP-ako007uP{V!vn|MWfle~N*s zTq|vgq3{BsqZb2|`5=!XAX#Oju;<$X6M_Om))vx|ktC2|5}Gte(lRE=@aGlO|3rS0 z|1>EhmEm6eycKr!QiCuA7MpV6+3Do+zUB3P-s%2&|1+u&V0WMj)B9S4%G-fINK$6x z5pYg{Cw6ZNLz@w4h{g#yGj6I!2kqoZ!j30xtw^Uoerz3iT-r)fYij9>JlwA6Q!X%% z8grs=-L18PqNcXHBC|-rz0NtkEY+qqjti6wrBxO}xn&7zTnYVJzKSB^w31d>Kuj-V z+^MzBa;0k9EK@>lf3D>z+Z_7-+#k_RivtS(n5H{n#huk>YeGG+)bzKkB+j&3b97-B zqfWesriOSTXrE(%$vI6Eg%q!)+oXd!%q~LLxJ`=&%T4fo>V3NMiRjk?*3;~)3Ml>I zdwf^pAyasa+e}Pc30i4Mhp(5k;Rf6O)J9rc>9^HE(Uw9Xvl1+(gv}&uW!hMxP(7!d z7UM+CcBI1@%i3KIXh4&(Dvq(a)$|d}V|HiU8gaSKonX1D7ByOEHnp`l!Ar#TlFt~l z+Ujy7C;?T9rRxdOGoK2qp{NM{LlH%#=0Fm~nktO3sNlzZD78dBO}m{cRa&NQWma7U zMzq=tDaW-(FnBVc49YusVn*_D$lDv zZ-_0gLZTooJwROXA_seK&O=juUMVsvh z!Ra*i+?{m>bCg!=mV~GhM<{cdshK+R@nE5vv@5g2I&_8oHP&t1!Z;ir%1rTZv|2~} zr>5M_@wUr-XsEl!L|Vz}*e^gBB+R1L0J6^jjT7FKsn=TzuhDukos*3?v%1^}f;hDi z^4DeWvsh{gpghuMrx4TNKb)(W?8bTnU4}u<(j} z5;Sch#!4+wXS{fOc@2rOanCfC>EG!i9By4(iy5I z%(_S3B&n-Rh{Y@z=3OL_ z7R)O7b*cVr+L>wtv5rmNBoZoJcxi{Lz9<*H>Wr$fTqM6C$hzjNc`j<_nc9oIqT-H~ z3=Uye*uS8qTFmlM`?iSNv-B6ZhujnBe7X;@9^Tb|jmAezobvIhvwaN*0C-WH zfiT+T^9z*)&tL9a_y8^$SQx=!b3K>xPEAjO5b)OZ_L8|rHI{wH`ea-D;^-`Y*6K@a zo)!&+G%PM?Z`W*x{Qiu!k;HkHSFY9ENJqu$U6b0bAPQuBC#%jW9cGdsuPX<^;(C2Q z)}p9GZmeAXX!NslSSx_D!a8c-BDgP)x@P|tE?t?Ki60^2mZHpJlY$Zkh!IOWZ|8ZK9e~=<`HT$417Y$yi9TJ{lJxbl-0( zcj-O;H-Jc?yZ*Zx#lv8}Gk?asbvjvk5!(CXoXm61-Ir-j^KyK@UT=7U$QT&VVkRzz zl{iP45KHfB+}sS5leQ3UCXx~onaB@aL|)qcz7R}E_`l|xY~5uxme#8?QPx8TX=fSg zVT74;>add(+8G8%kujCysX19z{W+LmbX(TZXr9}%vik`8(lSUAj#BI{=IJ@)>cBBa za#VdIh>~gbke$@;>r{h|*7-8CO{XL|uO@RfN;<1Fy4M}U0jg_yE|<}Ht1a?K0Wvs= z(2rVo$tGKQOlPG>bilL|&28)09>tQNK+*Q;jUw7GMlCEUw-dH_6yD|c9v@Q$<3->_0LL((JR2jlOL3JZ2IwGW@=!j?L zzD~QSH4qf7q7P7Xgk>0Dq&&tFKH>2}g-aUao?6+^lTPk1e2va3urb0C^gv_q;xj67`?B%W@vnnSD!onqoE?#R;=k1m;rT=s`o3cB z5H$#k`-zOZC4v97-L!lLCAUPYDLFuy1G>AyokkYruRy&`jE7R41JqhDjX_N?J`jn? z^$32~Tf@|EQ}m;`_`h8=sU(=hhenopL&T3mgZiR=`)&YBuIA{3z#;6_`%T!-uvHJd zW3Tw&8-NLQ`c>6oU{k?)HR%c{*HY`GiNZp8^o7iB(1-}hv_h)1uu2Qc;RL&TTupm)fF>x!0>clmK z4J=c6QEwicC_5NZq6Zsys(1ZNKoY%%W?T0)45rEy2~ug|#t6^GAW+v#L`r5P*pS0U z2dYE{BeMeAc0@Ty>kqMqEMUanUUuF*^Y{?3p##8T1`J5hpi?3gX*f~jQvytQJ9Zgq z4{;Jb)QIrTsxOuti-`$ zJi~CLW2uoO;_2144r4fN3LI4?>K5IJSO-DjMT*`(&gOFFLEBMc?FdX$o<)8e6Edgn zA%Qu=X>nb*Hk@nGli7KS_0Dsb^%G+k?)7v90h)R)nVbj;O7}maqJ)B@Y516U*V8sso4mQ5;oRgK})goE94e??IKl6Ati4JX~L}a{9#*_pz zj=`+UoyU%W>KtJyso|88Z3(?m_I?Rg(^gf?iOrx&JqP73)|u=iv+rO@D4lvE=Dk495y!R~A~ z_~d9R7M*k1+ZYx#)FJ zDY7<(lc{;5RiN1iM%7n**xeCwPA6_!RKOQMb)zEF_tf8EeN|a4WO821df0kTNUVlk zg-hF$Jl*3aU@9}B(WTB9Yu_!&oE5!F$2DV3sJhl@Q_OGVUG zhRdl-qef<3+Y?)!>sDC6&N-^b&m3&2#L$Tp5o@UYau>}hAW$$cx$J&kiq2k&?67kE zMp$r6%>X&za*L20SYo6hBa0U{YBNkImF?F~Hl9h@pZVQ(hhTvjduTzig6}YVGE9Fx z#MJS)3-YR)=3(B|7VRG0Gb)47u)N%CNPxIYifwMnV$U%#papfxnuNehGk^5SRM&OQ zD1*0;dB}yql31cETVFScrSlqVU>IF0hF=@9s>RgeH`p=@3>}p7QgDxYLbHSao9Lrv zPF+M&WEOR4*>>DN5KDjR_fhbq61kzT40)#usP5%@rz9tIn@(rC?&^ygD zjb>`*$ND3hj1#%b*@-M)&@WLA6zIu?6`8KOo>v#wc30Y4IJX+LhO1J4R}4;U;duyY z33+pMZSc?PV$7)p@#^yuCp`2hw#uuO<+7T#jjtzUlaDC(H&{gelzSy~;;^gMvNO{-;nlG# z_1FhcSr;oc3-W#tiV0di-4}%I~D|}%G)2Ml2!gqXzztA3S z5x;7Ux?|*j5H z3m`z(GH_->o{Mu0SiTS--{(0S@anT}Hls*m&>tMUD^=tQtI%vt7aA6gV~J~??|Re6 zxh~uen6zn8RUvGq0-{B;#v?(rj7+i0K0owCLOMa_Vi?YpShz6sX=g6xVfQSlGe`UBUv)WUSmaYE6qjDj`4m`Y zMgWLXuF-~XruMG@{=>1JMaH^|&I<5FRwb0N$}dGv1SdDbH-T|}Pi3Tkj)A!1C?tKB10XorwT)Ltyx_Bg5SxFXp{CQT8 zF1HO4Zk!_OH=4uKu84^@=G|s;{N-OEUHzi0tA$yY3e%1iNXp8Uo|QWy71?F7!$I3{ z%ZQ&+AZ+GEDLB!*%ie|RUOcO`&o{wp=al?y=8f$O%ICe<8%0v~SgmzAjRE0+9^CW0 z{VgAKBL~wl+u98H?FTYjBTs(ar58#k^HNmOj6Xa>XXjUKE|XW}u88W;f4PbLapZDy z4h#2SzV5uCz|RZAuR0u!R{re>et%1Hxoz^jooGvX=g;{l@RRZG#z8whk18Oa;CF1PLp1%)$83gdu&c50qSXftQt0gq}&gumCcZwBi~Yl4nVZsXeG zP(HJDoz!8C?4ch0Gn=YcN&7;#zhvZg9hGO$n)$tTS4_vG1{%%egafh8E^y4x@C+UN zlQEW2ytW>Wcx%PjX2;xCz9yjJ&lc{_md#IzORqTW{7VnvR^IMZohIs7nP%6ToVbF# z;g#3}0W4VdYyE@DqMZ>auVxGSp5Z^$Jegf4aB^-JNt|fkp=6%%lslI8FU+NVO>rAk z<&q_GqPBU&&9eyGA#8_Y(l$nIn>SLFKVsT766i4g!iS11?8_yHV>_o_tL#5-72Z5I z?3;G}+?!=j#J9^7Yy`KTlyd#?sL;z!zp(0=F6&CMC!uMDQN^O|%w+1X=9I}AELoSa zd*>_5fcX@4!_~0bFL#^?%(=^)s9Fl7Z{6Q6P(KFceR&l`mI;xPn|pMk7r|2yx*mge z^($16>|s9rRC&ZXd3JsHSWj)`AAGx~=RkaI?2&dfER<$ULBBu^6|y}9r)OVEx|(o( z0HA8CUdz6>yL>_GK}zC`hTNJ^S{;qqnaHVXrqhl(Yr6RQr&*f65X+O{o8dA4w+YYx zn5F;Q2dUJ=>AwljQ27nZYGc=y&#P(zW%RH*R6*z<4j1KfK3rk!T*(O%)ZZF)An1Gn z@T5C3)KZ8;AUCpdGw4pvyMMbhT^tdLqPD@FvDVBh@~LaLK!sUvU@(L7I@4hki(0N` z9};P9hA4)XcO-usM-?{~LapAGx5i6D10n(fr%W!+g0wl!Io?x$pWkQb z4#yq*p!h=E|2Y+bDLfrK-h)Z1k*{VFEN>&ryUjfH#IdjBX}IL zaYx*N|MTw}$lgrmRFKOF($U;^vonN|1L;z0Teq6H6-(4!w?Z_0)z@GObJ1RcAK@Sf zUC^g%^3=FM;6nE_N~vz8TJ|qCM21gzKQKssgMThd1gB(Klx_~0Ke%o1e9SSU>i)#`Z&^nvI|5j9hgpG4^ils2f4D zhlP)3X1jm-UYvj5zx;s*5WAy+z-$k;7`A}EF`y9`?WVOD?9jN8%zzLQ5*jGs*hSw^ zM#7WINV|&&w1mm6WOY?mjY>;XKB2LzN;4$G0J5}IRj(}?#~iV$kl}LgGNZ_>sk-D* zS6JW~Wll3Yn|*ULiW2Zi&Z^T?hS{DcMvx>vy&yG`hS@1;6e78%ls$bMjy1KTtD5BO zI+wu2WNk0W%Hrrw*Q2gR#qe6#+B!1Z7^`unKIc$yiWST&ZqA&l37@bt7&@qBsg;Vt5KpS=I zLz(oDjqYN~uE?F^vJssqt9B>eHjx) z6s9b;D09C#%OcDH^)f*q$q_BrC{ijT@gAd@X#+9I5men&M>wYN39nsgw|%}|I3@K6(`zmJ?5H?ZO5PfMyN7IDq=~mLzU*}mkO$^=Z@ZKl zexRi-5rl6rn!e>L%Gm{qn~WnK@8^#05NXj_X|K#eXQvKX>|ESNl$s!?l+qc_=-2imbKrkJ6|EL25C2I%x7az+dTVf@B_j)6)Ikx;=*wJ_GU9eQ@yK)?V+9Gj z&LQRk8t2O}_)EW=wU#(etP`kDho!o}Rak@`0Y@i(nZGlk{XWn*+{U;^fSnV!91`Kz zKS;iRA4R{LH4 zTiyC%c>CwGKHqP5>dM588Jo=V!+6*Gap#Y(|LC6&zuTAR^)H@hkO0zGcL027;ek=$ z4%Qg8EwllsExIiEko7IXl^Yaj_sA2w`7Pv;-q(^fr+kcU`~e>yDHX@G$X4ZpUOt7A4WIl0C|YrKj^Z| zMIT}3eUw>dLGOG3=t1sk0rXM?T2tdxq~zPlmu19eB{DWk%U{mU>4dSDny-PW(35dB zd27vLTS5flt6<4u{UDXVB^SNuibuV@7rnwt7T_TJWAc_*oo_L*-7CG9G^h-RSfI&~ zFPqf0Nmc4)N|4ssAX5`XH%Z2>#p>w_&5Vgk-A~-U|3a;md=m2_J62ZKR#_SH!De1c zGccr6k416fO?;(r6sF%yTZ}-~xG_z8vaxwWl&-QX@+ zLc>o=0#cYS{0=!aV@-~Rkh=}vfohI?$UV{#iTb?pvRFe%=0Cr#R)Chsc&nekyIO^$ zO%ypRN3P1ET16T$+p#PAK!R_H;6oUhwCVIkBwMhGGWE_vu6cqq6Qzpa*xYI~E(fe7 zo5XIFoyUbR|@F zq<2!~ILfM4n-L{Aq}E%!$e0*6nN-I!Q{EFx32o>l0;2sk5%L9d{jDavekHXB`o!{=3zrU$m8zU1ay7zzKOAfyktZ-Pwmj854MXNCxo6Q)*)x|BFkboj#;@{1&vXw zt6Brh@qb;$XiFZi$$1{#W zJ1wnpB}w+`%et8^*W9QeA*FR&UxLZX(oV5Gox%1DV?D#&Az(EDdj(r*{~%1XJTv^* z=+>b4jf^#4w@5DZs{Bq9kWc*z3Wll^xJshK(0H-Y7K5;#J}~z~>CXCHtl$2g8?>7N z@VzKb087@+>6{2>uv zDtGkzfUAe9kkCG&aSYIaU=FPcx48!v-l5=64;^IWl_8I0S!g6fWEj0Q=;W5-C##sn zc^3~esfR}|2UDjLRVUbsJsfFqr1aRf=e&@z?3HS=d6jP~-9!Anv*l7&K}a97_QjmX z`AauKeN)NY9hTQCuUW4v9Jh6mJpw0bKUjvnYsnKYOh0B@&}2*wXaru)NLhu|*<|tC zqM+33FGE^vcS<8@Z9HXj9^7_bxnD;R79i4Fch+KePADnC++)2<(wOC*ry0ryv{X)p zda9Nzv(6=b#_CTtEfbzo5cZwRYYIbFijS7nG$bqNsVsGBIxN#pgMKA!7g=-3ySdiY z$3sU_Th3=<&RS}6fb6=M_Kw7i7--$fn`ESl`f}${yS(0oYKE%YA`*8A6po#+`N>gJ zm2}JGc5i(Vv@maKb?{l0FmWzAIqUUgw2?$oR zow;@?9E*c*5^J}dZMO`bM_kJL*`nmNEX*sZqKo&mC~qJ}>6=g9dn3ue80G6VwzU%XU@E*9iI74B-Ocq zG~T1y*sq7hgF}oHck%4YV?-7lho&K(CNMiui1tFXc4p^oQNo)tgcpUqKQe@Ans;#s z({9g5#V}1Mj3TK{Yr<3OQ>@;ebWPO3ESsISEF85uOjGoHDpzOzu}eX-GsPU%%BI@V zrg-M|bUZ7;ivj>I*(r z>+$V41@u(ii&+7Ea!v1WMI28h{{&94;CJVd7{@mP^A7=c40wAa7(7Ebhg*z~OSoY8 z#Xq3qvD6~J zzwZ=QFi38prB5}`6V`8KSl7VYv+@w-d@Cxj1K-)Sv@YM3)4M{uelqWrvNyoe zBXRY&bX|7MOrvD3Z8Cy8RD2D^!m(F*T<2Vy+?Y|Xi?^kjPv(j5aH+(Ykz#ik)RqBt z!^BK&p~WL4O>sOfO{2(^s~w^E2muh1TZ~;?{aryG=aZ94Dwb^YF;P55|xKzHR?V zxYx;ZRv!v@fP7RdySdT3T%Hdjw;1w#it+NP;A~Ix%RH_e1Yx4|mBU&_iVm@=O_H5# z$*Y-LBH`}dM35Ef9s0A37}`;{XX5riwuN3@B1}vDm)}2lB>tX>l?a<3>NnA^eiOa& zzfAQ1Cy+(*{|oc~?w0s>n3st5D29^y3-feniZn&bK=7nI8Ltf~kh&DzCf%fW`91+6 zgc!ziL*Sls{w6%l+bauoLUZ9WnatiZFS(mC@&10VAO;xp!3mE15o$3ldw^oVC2_3< zu!lyO33m(-kdlzeLjEK*|b47B@{z>64M%+?oaRVg&J1tqlq2J)Cdntwq)X&h5b zSB7lT{2Syud#Wyb6jYjeemZ}HeEqAO(|>_{BUVI;qQh0vJt>&|ga%>Kb4=Oe+rkJF zJNoi5P_BzfOpca@lC136OyTN;wP={b%2=&4wQ5-%$S^^TTpI)j~RytSm!B3Al251;Nl=8h-oaqxo;w>g zWK6eLPGyed3sqjf`jX~E^{{83Eu)9g{i=%Owwjl*Bm39JQ{+bTj5|hAIiupFY@eZx zV|IuHH}x5)s=g{(+Ui)^2;(q7*f77$aA!7~4f#e3FG-pq)fztmV+l4f|D`C0BJX^c z!3sqtCtr0W;+Yi*4hjJiU6C>kBP26Zo}ro;w-A%vURjLZ5N6$1FCbIiA*k>yCE@s) zxD3Iwqy+QS$Ex%ILRCHX_VLI7qfAaWH;aZ{^5EWT)mh@i*)qRvk}^t|-^+SCG?m@u z?HfL~c}2uR@}^Ytj@S>ldD$fNvuit|X>n@ToE3)L9>Tu}?-)=v^po%#;rF+TiQ$R< zB7EH5u=$3+Ai8B@6ExjEl)-J?0hXdpd9O^9&w?^(>_S{wFb`^P zO8rH6ecLo9r8z@GBL|!i&LpBB2h9Fa3O-8a2KX;Tpl;QM0^6rpDn}h;w>3bUQ2cg> zO}H?y#XvlMOq~S7dbnF|px1c}0}HJ!RB%y8g)HPh7VEW>H^#Bf&4(~nbLxGn)S4@-&zup1c$Dc$E@*dz0)J9UEkESddC|TySfMu^?@}}AKd{bph+Q$qN=F9{MdL&5(>i)>1aHK zm}kMvE@VXEc~jv<&=j)N5F}Q7Z0_gD@S_H-)d59o4W=zqe_iqd6JMQJY;4e1p~+{6 zyLM|vQ2|kL9o4q-u-_=X+Beb)@~YuzsmR_Yyfe3M<+~U$$kDC1S$qg-;m2nMNO)1z zPHS_fteiFHVjPx8EX(%s)@-`57Hhvh3L!3YnQN>SNi&z8+-OU&fI=byE`>CID%x5| zC=Vjqo({db(%?A^fwG_ykrbFU35ti2$6Qbf`1$UmExvY?)on^p0qfoas8Die7*(* zWb+_2acDd8OD4@(LE1Ji(3c~P*qKT&r?hFvlrsXMAy%~7NOB0pf*San&zsQA4;T6* zT~%LXlp7fP#R_sJu8~zYf}G}NLHeEO$RFWiwXQr4de!3`&FGvwAQY?VAvEx@lnP|o zgavw2mcwT8fa)-l#soO|Hep{l5khBJBbRV))#At~S}5v!yY22RJvoc%YU6{};IO(` zMT+R)Mp(!-*-R{mr1*fNK;4jXI`hEx$#Mz-=XX)%l>N{tj|)N7b3+qH)=DyKsRUrA z7@GV;iBAH)Ne)znzzCHSP=&K&HagR#FoQr%P*nnQODCs*Dhf`i;zC+A1W%d$T>%rd zgUT?_Dy=9E>M|P1`MEi1@BxsNm8M{ojf=1v zcpumIkL6m;@kLQ$s*bq%e)h0|+HH-f?fe)O7lp^VDE^G?|DvgoTEHFC>IzxmU=XWO~4#nKZ7>giS#6VUtuI= zo6)KC*k>lb{{X7T`WQ#Uwf4no5#8z!tL~D zf-{V6fqgxljSs=S=)Lah+x`X$wzwh>T;QkF%={|Rme4g07W6>2u*a_s!1@yQ@gyor5FT6Rm6PK%{%mM9x6ZpnMPEWzjz*r!=zscd?JSCWYZIoPZdQGz5JuR1B) z*zWVw@;o}LZrm2O#eMp!Gtd{LerTgH=u37*>0VmsG2{4^kRA4IBFUKKjaJE zYl78`o?W{Y-sLD1BdsR&>?hOM;tgjwsF;qud8d#&I_?wdaYnaf-d4RQZU?Px-(p#^ zbTt=b_El5Q_xMnVFQUqk%%!C7xCY8vBiZ+2+k)-ce1TQ;MtbxnyO#$rPRyOK92~1! z5NprVAp@m2u#Stg$~jUx5gc4X*&i8odO`qiNGiFpjNhRWYo5i z)jmUsC~eF!MS)trtR@@=vLkO?m;8d8D#h~T+qqcKIkc#AWJ%x5+9p%hw7FEX-Hwg3 zTUhr1Iz~o$H}|n5XHNhQG!F4%mtAmFImB)0+ZFw|b#1h8(O}`CIq_^LNZK-Ur2l9Z zSJ(SFAU8vS-WVa|Xt;c!OhJq$jTU#1l}5e7jHt75fj!*HTYHYFDHRB_B~0;>Sbs}y z2A}W@9yu=zu}?xf8vdQ!Ny7`@m&)n|WH!-GB&ri7=|u^rFn=R+EL{wlcEz6t($;jt zlnXL@UF>(j@(4+QZ+3gPsTETTIH_fw4f4V0$FlsEJ3k|=b7 z6kX#dQ1zBf=3WMDQ~yjr;7OXJfdJ~( zCBLm<4=O(YscG;0j(_DZLnD4OH1EI6i~r$H_%DKH{M-5|`XjKRLC^@>3(86iB?7pB zpcDedJIJshEpS+jIT?pe6z*qX5sYMdlKq>ue$&nmUw=P94Fi<|3I(JIt=-_mSo=(d zb+6jxT+U^jV)e}Yt8mg7{h?N@UKabDI4_Dj84XK|m|iV+6X$y)StzvwRbi(4bi<+Q zH~Ab|2!a?rQH~mIY+`XC*IUNg)RC)aa<|9^9|(O}=AK}F*GGbKyu63&!hGM1_9=cB z`Q^QT+5jjI%$M~yLhObB03iE+eIoxY048bcVDs;@e65uG1;Iknlaj-Evg8jm~c>ZG!qg4)Z(CM z2s;^vECTRQ4+#fF!I97k=$Sj?f%r|5*8}4$&F`-S%JGQNAgPd~JY`^`th$gR#!+^1 z7>yotECb<79tx{52K8nz8VdndSy_elh0(QHb;IqF7WQKc;)i^YJ9Ne(sS0bw=T^5z zuMJq(Dq+P-DNh)Nq+RlnmO^c*dWmfzMw`Ovh4EI2bFU&QK{yMptShLhGRnt^3T~Xs zvyU<*o>`sZs^nZn8z|0WmKHck1c}uboE0*Zg1LQ2a&CB#)yVfEo+iq%cRV-hiM4Ln=&%SWOa&HYuPOBt!k5nRlgr?wE0 z5q%Z3(maILCk7rxt7mG(6N->;ED~wZ=PnFoF>zLg)d`V5E3xExrl1k#ReejGom^C6 zVJ${8TpBs`Pg!eAS%??%70HsDX7AkErzQ;&z+wXV-N7l%Dg#hj3DCtI?RaKoTn*#FybXEWaJ%e zVC1tq>;IJGt&6QD9OR@n!bwLIiH_l(qdAo8-zM#d4cZK$HnkbqX1YP_@&;WGL*u_m zcKm_R77Yv!VF$ip>H>8yNazOXSdM^?bU@c4z4pP+%L}Qk%j*i0y%c&#S%*Z&vDMTg zbSc<~&1Pac`$v7i@Znc2?NwP}&1f;NUI#j!GRJmN**35)WMXdHz%j(GKT$}CDQxWd zV-y_JqDl^8VoJa2dO|}Kl@|#I2O2Y6w4x0!nqyOaJ+o=VbYy7aQ)cQWgGnsO4Bt*d zUD)9TM7RQpD}=3Mty()*m5dmA-vib3RxVc)R~ZUz)~k<$^IlOl>x$V0i1db@q3tZT z`V1uBv6kO1XIE6zP&1i2P`t-6j@hr273Bv9Se9B}*u1jGF;406;N=p&Jiy&mET#1W z;0_qs0qm8c^=AhF&b07}GUvOg0q)=}%-1sSou5e`*0aXv+G(bIPznj&P#^RJ&G4=UC@0L^r}KaSSF(p1Jk>s3@fbO?1Rb zrBrup6bB*S;Z*s?X9W6fi&D;2$?+ohz{c4!*c7w1AAQMJ_By@ixgO&@J7&|Mdl5{T z?Kf*P1#hV8c*kU!x0#|JR8ugYd6pUm$| zdN8XTtQ!Zc(^j2eE@~ZHDH7@<)xL2~RLd)W@~6?=0-C%zmjRdSW$sY)>% zt{c4z-6sXrHvVw__jcZiAG&y=t3ny+<;HarAk4UbNZ_SPbyOsVC_Edb9=eTg+8A$j zaGYDI%_!kPlVXG@cR?TPLK#*|jQdE4r$~o%q^3}Zbi}5PhkAIJr$mFG*E3xnW5$M7 zMM4$^@IO-{k>l|3>&(<8(b~azS8fOOCY#b1$?m-sqy6NF;oY1?=ac;ZNtvC{wSv3< zzE@t5006lEZ|{}=xJA_8)REOuKJ}0?5Xx$-YSjQGQG<{i?F%iw^#vtX6jHTKLyQ57 zjQ(+h(pp`c@8rzn`Io|G(WuwP;p|7Tm)GuOf+>?`mh#L#?Mv_N%}d|-?$`T0dH~`3 zQi$GEID(kefDnY=S&ozHKq~@P;z_nBABGP@c4Nq;kLkT?8K$=3<24h$UC14iU06eq zI!VvO=_<#eHX)aHF1-ESzARtQaQG(D4%|U5!HtB6`hYjmcax7GMD~4QNIZ!T%|0?@ zK2v~T9s|rYmNa!{Oq!vEyhUb=9hPw!V|_UpTKWP^Kl5n0(WsF!W0=HNi;|+Lq#}CD zS&?1E0eG|QGo0>={-PR2S1*ROrEII@_2LE;{CI@j*)zr9a^D4F&G5de?RmZ#L`tDQS5wQ#+v(TnnU_)- z8GoqK7EJljfbT~uiYBt*NDdeG5nyHGf{Bqco4(B^H6%SvduA5hin$30qXgDRf-b2KKrR25n|L4SRjoC9R2=`d?8| zl*zYfwjrGLv{`Lv+1brw`!L$hTuKyV7Gqh*IoxKEO%s9}kK*(HS>L}alYNUTSjQ^Ql_a!q%Pz(!Xn&B#&rk=FyuAt($amM`9pg$samDGqW zjMSIF7z@n=w{MB5e-IT@f7coV0cOGzYzVnK))8z^>O+3$tyA`zI*W|SztP^EI0= zj7qmUSuaGbBSBd~;RU3h!+xKP@s>IT@MR6~R$FBlyVm8^6D~1;2Een1w*#F>=iJl?Z-6 zKeFKuL`_j3TP(9ksv2SpOidAGlQU}JSe<<{F{6_c*W(o{>8MTD~n(IX|2- zNA4^O9P%Avi%~?Ievr0(6t_K=bA0mcnLQ4&a}bmCIRHi+(4i>pv7xBV6J%<;SbXD_ zG-q?~a~kUx#v)wu|A(`8jIJ!~vV~KzZKGn_wr$%sE5?a!+fK!{t%~jB#FeB{U*7xn zz1?q*JHGC|XN>dX{MbMCdiFkRuC?Zx6BkWG-1@{5SE{efa2TRat(Z6uO{7^47r!v( zr!bOJ2;N6G6qit2uFHouG-AhqG?J6XpCz&AWLX(@=*~<%pxO`AIqp4SDEwE9T8AsU zZyX5^oIS9F!I^kD_sH-$l7nDdCy?d*1FA=JbY3YA4_uu0YPMb3q7mRvJjQeb#-h&} zaiYcJHVhera!BH4KS`(ZZi>_AzY)3g({Dk%%m*M5#Lu6fG^zBLl8=#V2Tk!HG5qA~ z(3^DLICO$h!Kr)c-(~2BYJP1aL!ZTx!FNW&+uAEsk!%@xwG4)O1#b3BE8dN6_G9r6 zC@w<^NSiCe?x?|WMPdO}=zkl+YlyW3tEp6h3CLM~IF5VZ_QmrO2P&1dasKaHfh-&m$s00|C2by_vdL&p09fC z#}}7e`2RPPc-T8xJDD?z{nMjd!(Qp@*S}OI|I8<)%KEaP!sz_PZMGKL8okdzm_Y4j_Hui5Y;U-OVaC)K((PlfZxSYn&a2IR~J zofcMr>xU>Wl!I`k2YFE)GNc4}j+hcGs5`0HKx?D7eFAa*#P2HAaT;XY4$SBgos@FC z&|lK|jy-+^;X2-&Z>;p{m#?dqudLC9%S%h^SZgf$MwX3rCt&$Nmb5*lmd{2aQP-fh zXW~ME-rq%u`;TkRg3|*GE^fL!umn7k@hPfDNuDBUrJ2Q)4U)PHi-t`N{G?Z=ncesJ zV!tt(AOM=pke1q0NM>WfNOX-Hj26Sox}qbqdOu%dJ^yuKR=c*;4dLrhEWXk_{eS0R z{&^z*JcLx$Uk*z`$o!XF&dJzW;kHw?DQW8t11Y3HrBF4JLQ-)EYN}g2%hdLJ`L<+B z0hsSaGJJR6{LoESJ-}eoviQ6#Jv?S}ST8pW`+j_YJ3%*pLx{huFqULcJZhp%=SNUv zcf($7Ka8Y8FbyAaz>c>gbhf0k+eK*49p#fMVp6!SBknip@cZf5I!6-rb`Ol zA6Y!ZCmAp$gy$Dt=KUL#JO47&|B0M;!kuF9vh^J!C^DI))>TMxSal6$o3w*7sswUy zZKpjbyVty(N#n+C<)rS$@)&YjAmoh!A2$4{4llAfN)Q=nDR4)%coUBJ^2XqcZCnt7036^}R5UuRpQ)sfp zke@WxfGdgsxRbiJHf_!?c&b`g3?Vv(d%=TibusTOKlWv{=9Xp`Ur0{7L5L*rJT}nb zq!JD=er}XUQYn6^Dx$|3x0OwrMpd+k0@$VYVsZ!x=5#YEHI1jaf3vurE6{>sRjaXN ztynGN5q-kTrD-Q|jRZSGju3{YSZSu}jhDyYqsZ-^q}|7K0L!FIZJ18hNtsTzp~<9t zs&ET{@`PjO(9Se$?G^paLs@4o%uBZL5E>kc@>>I@y$M=?24Z*U4{Advbbvl=Kwt#< z-UaDvJNgIJ)aK`ZSRVPuII+^J#|{|=1mp_yzp@AWw-^4O&(*(DfonA{f1qpP1Sm9h zHE+lfgF|ntOuAT4m?wt?VkAo;q-TJ@ESIzlT$;Tkz}s20bv2U+!P$%3^vrMSpzGMz zZhB!=P{6XU!DLtgJRt~)$+KfWZh;86 zV=vhJ(-Uh5L+Ao{Lppv=e*T$a#?-^9cC9$eyPDwoSD+q_j*-d3pH~j zBqnRMAdG{vh(K>b&Q6xfl&oRqMO8>?b~HY3e6eZLSRz-dgGsyF6pi`ZdJ*n$jY?Hp z$xphsVLJL9Z5d%Bm+&He!k|`AfmI!9rUFy2h}=EElahb6K;hR$n^pKig`Ac}((1NW za`RM$wb;@`30uv0FA9!)Gjuah_C?<459k4CHDt+3w)Rh19UZx5wjd{Y`)@rJ+-zpJ ziJ;aZmEThczq~(iedP`_1c)1&p$GBta!RGGreJ0`axU?L%0@!WKd6=DnS76#=?UgU zs}*K|#=bLO6ffUupf<5?`s^~%(z&mU1I9Yr&wbvRXSOjOYVhY+C%J?L71#jcz3a7D zY-`8y*!U2iNt?=<%(FyH-L(WtOGn+aN4G`MxW1u8&Jv7s=3>ec_Ms-RT{&P<$eQ&; z$w{bk)av&Jx;5maK-z~5=$dg|{S)%Gp++B%og5i6mvRB5#?}Ca-kdV;Y{y*1XQ|%v zlMn;gh9nNX+Zwt@#4yFfBut7hkj!~`oS>boj?mMre9B%uHB$Y=6wxyBW|Afbi#F8> zS%gl~T(wU(`nvp8x@-ww;}i4(2JE$NZt~QPjnR?2M*>;i;~zt$O((Nin;pen4!y%u z^-700ra8j>%B2FhaFuD>oRTpA7g3=(WgLRVt%VObH`zH~w0VM}xuTcOG zCI4OEu9_AGfmi5(gSENLNC02a0Z2gA0S#BOWl@{xVSj_#Rs<4Fd)Wbxf#w}6hTvER zz`0u(M}O;%hT@Lm)=p)BQSAej6J4&dJqpiEPFI|hEH|R*I4*A4fpz%1=%aGHtGboh zA43?s9eL{$Kh`TzF18c}?ZN<@O97_r6`Mv?l6TNWd=+Q7uTftsV0Vum!;!UzKDs3zuzNs&aeLr7{lJgWH%6rPf${?t z`bM7ez(=jwY@8vNWQJrOpY#D0f*EU1>?u)`&Ev@vn?gms3csD zC>CkZIO;8)n&7EaWZNhDxkRmktAxif{4T!ZwP8C$yL4-0wEWUyQI9JXO1awg=Gw2e zpS9^K4Gz?rS*@;^8f6Er0G?d$W?RnM@~(2y5kGl*;8IhSQyR@~LE1$cS(<+(pUj%r zP=I#ow?=v2iofkK_b`?7$0tn8ZKdZ|&&zHs^#JX3qac^2($aH@i$Ff|hR!l%Kzdb% zd)5LWi;g#qLN6EOJSxltq!kr~64UXCmTaD1_z>THz?56rz5ITiIAlRhA&(rS^Iwb# z^l8m$edTik^5Xb3pt1$zqqe-tIu;WUXlV+V$#+UHo;9)C5nSOt6T~U#V$3&O zzAgEQ>Ia?5Ru+=JsgboI>>BK2n2R0JZZYMTq4S9b)ixxH+GSP3qtZ@9)Tn&^=!x=N zxSUp7ku4^@%O!I;wm$)7LlKCHSFwrW5#-f^=L;=k#3;<25c|x#*MWGuLYFj`_bXL& zEV)#4ETsC^(Z$>4=zbRrx#17RxL(0(MN2sp|G)An$CsYyE4zkjf=Xh2)fJ!L@ZFv~ z6WyY&k(*4tdkK6eNRUp}7t&jc;}g1IhC)QSZ~*MHhiglgHcmq^9s&`sX=a!;q+1?KWl1k+Q55E zg2?h0-qroG4H#{>fF(qp>BZ2cxmDGmRj)Q2d6rIjs?``(-oBz7F8BdzE8_%FJ;c`? z5)o9;x4{W$dH`VO1~8ua;t?c6KdWzZ$F-_*T*y{5Sl%4qNB}pe(i`*2DI4RUclBEX z;7KrdnZje~uq_(p$-S)Hd>p+%vn}lpO%m;=i46xR-px^89#BOM9fbDVX_{gp*eoJ!q%pFuzT4{ z9y)prEbL)Vi}A+_L@6rQyp2F#f3TY2(I?|Q|NOlANi}0m%6o=6gxw?E6r4EeU3bwIt%u7kzu&`rI?K*MdS>2^dpIu*j^O@5FKW~NHsVlx|9scwu z8Cq3~UG7-DQ1-c#M3SN_JfF09u2xQ;?SRG^)EUcF(7E~e@?yvYKK6I(Ijr~12)9#& zQF}XuxURH0-e1p-c9fqo-qc~FzmHjRR13NhiK#u&)pt6%%-_c7in{GlCdC9(`MEha zvLPoa7sM!bK6%m%n{9u?i-EPKA!L94hfTF$qP~lWZLID*S*yXnsFCD>N_EDafQQ8rv_NJs4q-85KB zDmflMjGc;>4*mo2WK9?k;*?*Uoc;@_sCf9GK zHjaCAtypdAapEm zIC1eBpXRt-FbHZt;g;^mLX9^O+L~ef(Hk^SQ?oEl+f7VqRtcRX1qK>-gC4X7+9A;F zDA$?b5C3{{dtV$^XEZSO71>pCWB!5p=_-io5B1Yl@C|Hs4LdF%!d0|r^voBo{*qsB z*8kKnj{4O~ufPW@iZPbKYXlD&8Izh%W^C7#)=2K8rb7;JnEQZ4ip}D4 zEI&d1y)yns1=Glllh*iE5kr2pEx7*smGQsxk(vMP6^rGE9TY;A=v%O2gOuCK1kn?MVgHGQ3=kQ4F46)f}9fL}-JX>vH1~WgV#Qg{4-t%XQ3c82Z}{ z;veIf%-=$t#_W=1c*^lf-nx-fmovHaWDrp`{$>?(`})3W0gA_=kQq2r9lB1aki`VUTk)HMSG}=bw&>xv8D8wY|B^KSNWiw*9qZjr{le z=}^X3N_ak!2!d|$-W@{uJ2L80sFccrR>%TW^0hmCgy+wh?X*`YoTy%4$!kGW_iss@ z)4Foyezh1z)-GPwzTel=R{?*XfoOpcHD-mTbkTBDqs)mz?&z)=!?E~fN{h;?%(bTB zBs~_sZ90}xDU-k8C#vjV!-KM}qiW-RA%B|gIGveQP|)Alx$Yvq0F70y*S`%(nSGg8 zzL7!`!c6VA(fUD}E(aK35GkJTfl8`@44?0~yv%TlIU#o)jnJE z*7VBFHaf3f)7Be?+7O9~S&1k(6T}t9a)P`wcxWJFH;Ur2A=8{>$LztT+J_o|KYXSW z1OrUNEBm@xh7LApXO*~ZkyB%JF&7vZ)GFpeN?m@+6PWW_=u=hM7DAZ^TfwMP!e<>j zg-`u((7Y0_FzxX>gFw5gPc_H>BRq#TStEN{s?->Z{QiSXp?(;vZi~!^tTee;Ga#J# z&5-4h?d_~fS_sZE!~^nLZt>A3Au9msIuH~0O}eajXpKv9@r=g^L=<{=%^QAffZ$#s zDeoh2cE9eFcp8|fJ#97R-@CV}v`j^`q^A8W6zYKC0lyZP1Jqq98@z76AXZl45cnLE$ znpLj*lp|8>Bi7yjH1UdIe?V0IVnHk5{Z~HQ|B_Sx&rAQ$=ekwf+YjFzqAP&y$H&PR@A-E7_Rr2QzK7efe2@kSHO)gi zz0+ftp5ULp%0amX;J`Q=x)B5y&*ETDjbfDBssZ_S?E!k&o!UJO#uKA*sGk$3zDF)y zdN+3>BfdRaF)!5Zssf%ei=Nc&x&pU}Q#}Sr0exHBl%6lL-v}!PLcFAU1B3#++9AKF zI^FAGNK!;{HQ{fnD^ttEEBks<-lS2#%ZYrL@8oyWJ);S3A4a{cIGSndTwWzlu4Es@ULWW^OpaSH_btmy8UOdPUg z7q^?sjbtTbbSj5Gvy911m>VV7htRc{MZx6{q?0t#%*0qFKmHU0&pdEQSq!hd zzmuPaQ`*hfz~r9$r~sQ1=?g%2?4G})^UYi&*1l{1t*7ttk&_U@=yOa5GQbCoW{OeQ za2e2}lUn`GT2V9krif|ap0}zrT={#a*NTO3F>^X5SR8`AQ=3q2A!@B^9USv8O}o`; z(IiWSN;-!zeXjZyG*1~Qp*Pb1=~z_-U_8)QOHS*|m{WZ3j zBxj{@6C2q6Bq6fmN8^^Mr?W+wikY>6U#R(E%zdd=)AYtUN|okm0_FW=3o2t=3J+ZL z2F|DiTh;yWc9nsky|msx6h~fwU>JhK(KMY4#e0%8U$#sYuhao$2dA%L!RcR3m#+=u zCV=4~21eh2BgTiq$Sp7;M*IG<4Ip@+j`3qKU;P8(2gY4G?f{APx|QC@?wu#b-+n=i z5807EXEF1`=PDX-13LZl99;(myn))$_bKnEQp_TitggJcPb1$3K`H;q^Z65a@nnlvy%~7P zM=#R8?ArwsR5GOg9S<=ddvSk-=q76`T6cTv4A`U*PRyklrrW<;Xy+FRo}~3gE$DlS zLxsq@rs7M)8oO_mk;7Vix!^6^e5$)U6G?th?Mm zdkHFD>qm0aMe+N45|lA!&?!sOeIwwsE9+>=jMZqHk&g0YkFzCK%h9KD=joa8PD7K{ z(F>gpPc&+?AQpL8rxejR|EsvO`jCPJS2mrGJ_QIhuVj58lIT%)|zwE-=uoiZ26o$NC5atra@ zJ{c#2)LOy^&2byfX_TZX7?FY>5H(jUoxGO!w3m$itiI_shnlBaw7K(3aN zOG@B$q>^M_`y~~`n0flAt63}TJSKhekC+5}hpii(HuoiA^p!~M$5D;t@%<%AYl1K< zfh+`(Hn9N(dU`(-#^7cCGVzCH!@_dEClro*+sU7RG5_9{ zTtSq`(*_srj~Bm@Dz+1qTf)!b+$F9cz<_gm_NcP9R7DuLj=hNN;y5BnM;C}AF?Eoz z-dTcO+wQj)1GVc}m8vFVJeOf-%Ms(-%N+g7oF}$8QEqU>{ol_y4}_=Pcv$;0!w24L zOz4yFjaH0R*@*p@gCxt~TLLdjT*jamd*CNZ6CJG&DjB;egTd%FV~tLNnAdszCIQ;0 z6>vu`WaYvuQdL7=n6pfo3sWW(AP~T=vaYxsY2-m1bvv@_5a+>8=L3E@ zBi#(a;(zUPWX{3<8eJ=hyOfwVpQkrluPu}ZgHJ_xyy1Ry~UwlwihD9AkZ|% z=DfS+up^gGR%09hi!{O-y@O^LAsn6<8B~=z@YWk$0J_|F1N7?qY1@-bcXhcs^vSB6 zYPA9{?f>q`!EBxb>5oc~U$pLVi~5KP97UmBU(gj=j8{sS*TU6h>n#tGcynD~d^rbm zr*>jhW?`qv>uXaM<+lwAN=)UF5mIKvQur80O(T^Z!jdAW-b;_t#)os{he%`HcqpVz z@oJ662sL;bFHyskeN7dXJMnQm*Ar>2Q0nn#32^6>F*)tX`dfp(GolcyXB`@{H_;q%C-S0dk9t7TPz&i#nA3UUb z9;(AWj(#UWDTu1Gc>@-k;;!T7^ef+H6VBrB%j@Az(N9aL2WQH(0|vH)DQmX}$gQbl z&nAU!#TCNov~bAPYFcXvEE{J?g04hZ3v~5UsAD8%g`#%?Q+1XkUD)fD9Mk&AmYx$| zC-$8Se1XwV6HV-2tE9IzP>xfW;~?s(Kc*<=Cm#iDi9%aWz*mu)OdQrD4xi2D=31Q{ ziv%6^KQ(0BV(HgfW5kt6u;e}3=qG#xc=!JNht|(OYSRH9uJpFAVhsMPHs$&67iALG zc3&dmqE^P%4*#3~oEke|^HrUO|E+ASWS-CQd7gJok8~u4B`gUOxi#(lPCMbSuK(af z=m$b1le5=YKFh@FH*`1i4VNH3YnO=c6E) zdOY1$25jA1ce^b|0Jrh6Uv2wo)j}0(n);)i=&hfc=vuVopM{=uaOC^k)+TFia02L& zSFu(*+b3_X3mVk4zn)ld%!2zUXwuN+8Iy8%bp8~n69}kqVdn^qqO6H(t)nk{L&)&%BHgx&B<2^qJt}t!c^c{W@u^6>q3nQAdu3q--DNZ z;IpM!8nBwXg0$jkR-1KzbQKtL>pFCeY$WrTn>04MvW+^eD!+7U>D6^=RqR_|{hO1o z!)F@z4~gEpo_k(=$6k7Nv)>JQ-8cg~WP-39MJ!1oUVNN!>VPKSb|{D5&0hnNh=yAQ z{Gf@_RumWof-lY9m5Is@OhxXUaRf+I1kQ)boq7O@zRJOK<3I$Ldu>LKeODTR+YvFn z!y&9Yah%?ggDC^K!Pusa>mfov@$mYp*R1j0iHLgB`nw;Z!~Uw_juUtGIK5;;p2q+N zqTYyXbN6Vh-LcxHP0Asp?skNCxQ>hG{fKt`mUe?##K=0&inGKH&F~PG$4ah}g9ZRE zh8OC!MYmmCFobQ9hOSWj`G5x*G6$ItvGIAL7o80`D8YUP4l5|$pe=VTn<&gsmHpbx zxXC>uGns`+Ww7}1DOrI@2&kuh%ZL(_MJx_8Zu0(2_)3iM0Q#BI40&i+E!4L8*dHmd<|K5rO>hx|q2!s>Xs%g(xn} z4lT7ot6BVZ9*l&45wqRp0h27F;EZo-z0AlWZOe!aKTZM^kDk@IhO>=4wq5BxSbB8v)sUI%A?sJJj8`{Y@Spl;c~L`q5;np!py>^5}QK{mreaT3Y!TG z164tVI;W^abTL?uPLA^e?vjRVZ27Ciw<0+xWm#B7m8nU+j|K&+2{vb99v;&_(AsWs z;j%9GH!93+Gg53ytUWUov7C4IwMBTZra%xdVI^`PYcHrs~V!92b&5a>^YsG zCbLF>$3o+p-2A`Fm6A7B3$|XW6V8%bVzQn^Vw+mNAos zk%^B|;Rt&yx*6OTO^Ecx<=^6`rSMQP5uRG;6|U@zx;z@(xy@`2Kw{S`ho{3n(2!ulQ0ReCZDh7Ey}AUIi6jlprbN=QfIXU7optdWt8sNDN76; zYgHLDr;Sf8J8{NYaRF*^Zw7j3ZoWE-88lzcB3)Sg!t|Jd{5?Yw4sn5QBfOJCqNs0i3es3_jp~k`5U<>kPqp zDGz;tV+6*DXCjslG)uMantel;kv<>*pvnFLWsLB51P7shzh-hxLizRQfy%wwNGkc0bs>QHpuf}Qllaoisgqn7jcSIP zM$O{elF?RC;z(~Np#+xAdJP(}DkTAvI5P?pTG(oa{1&zwt*A;01kz<)?C`ql1#Run zC!zfN0n*KR^@>GAiFDZ^;B@7udU*@*O7kX`U1@T*6s~27NkQqh{ldCOO05Iot!+6o|&5%!D@09wSVf6=9w{ z_;;+X_F8Wu`7vaKk}Ex(R-vtPigfwhVkZhrP-5|-+=4Wnl*=+_t~K1cTHM;$J%*P` zT78;o_0i!k(^N4fZhBxJkx;gZ$=+$eY0-ch%EWuZqXkLN?7?x*pA0_bUT2M z!{={Asd{`X8U)wnEPPR+B>W=mh1GG4lnuGA&oGk z_}mCjIHbJF@=Gc&hBTsW^J7bPlHSL77G7Qzwrnw^ELWWCSNaV9Kt}6qxMreoxEKkG z{lmH19&aq?UfG3&)zQ#+c|{GNQEz~%Ml@Y4yD;c3q{x>{7B5zT%`Q^Zn!emgW$G$gGo<*}rHCP7GHMB}k2kFoAB$LpNSf4bqY9W0ISC@S#0RJ&c$ zoBQ~Z*ooEboG@@voQ+h7<9T4SCH__{)o{6^kY&5!etXm%-E$Lb*ZI=4zY_s94n)2g zarW3jYKGwt(ga2x3>15uncOMw@O48x!Jm^}G~haxGBkG>hCqhjq6;sKvvQoZgKL`5 zpA(i4UN+?E+=$a=Rmc-M6bcMpraAE{C9M2v2r?SiJX7e--P*DV!tXDrviT|VMmSNysg=u!V%=h z!xgvrg4+ZspR8?Ou}3)*#9Zj^y$A-;FNi!|y5~e?dUi{WFG~G&j5#wXV%3!d5(zmfLBO z`pFx!f4M(rrUZI-DH9WB?dB?kri@r(Poy)a5s9h3^0y$@D~+r{H#KWb)farO)0Y^+ zj}FJOEW(IAjH40vXW5&smd0wIVlN>LqN`^U%y?wPc?bNuvO}W2iE$|}ncBv*bH}c> z6HKJMU3KMJfmhJXVod=nXUuN)!YRb^;9~RyxEYhPnhPUgA{rE{8q>@979pdEY5z`V zt`UvubZaTnK%}<$v=WDiKyS5jD-sJlj;wNBTvk25of-WhGuwhLt`isGlD7WW7um8F z14cT#IYeFagdhb@WMTx$9m|G$jH|eP@5jH+HMXiwjDCF?frEV!)cyzKo&Nx#|Jm!! z)w1$qVnDadGyXD^P z@H>^;@j6Y_{AM?r&9D_1-_d~JY12Lx2GQOXoD*;R8VYy(T0*BVen^P&k-{fXyU&{1 zjzxL=Wf$@u_FEgP3K+;oSko{1nq&978;*z2V;{v&xPnpRK$%ZN_yQ>72SX`-PQA}h z!kf43q{F2+aWEy~FJ4j3&PNtqKY3t_+shDbY0gZtT%t4wGtO!DyPM7s`664yU$(+A z`UZ>E8PYP$N80WqF2he@$QuERtpvTG;GGm@$p#4r*p8!FagOu?Zd%5;X(|q8@ekyt zchZg{eI=hIoeY{A5$imZEC()1n7vY^w2u@g79+NJ@J{c#dwUk6LqeNqR~CDa1|}Yca0i3*44Ng`$-fxDytk%|sbZr*8XA?9_=8=PolQZ6E^Dj>TFdAN)v^;(1&a2XyD zw)lL8OrXAv6O*AoaaUmd6onhMIcwDq{e-8Xw2Iiw#*AyG*M{kvlpS0Me#mVDPR18; zf?d1Eup>qIV_43Ny#K8XVPxdUNI6)OAhpSBumKeoE^9a*gBd2VBw=u~n|a(U^GfOQ z^x`)-2F=nyItITXGk%Mlr2K;Nc$=Py7oP>DoE=_Yyb3A5*?0H}$gTtt3A5oPey?@- zNNZX9C7irW2BfJRvt8Lo)+ zp$A+tzBX{BW)_K+ao#bk=S=Oz)(yTXHq^nIS(CT$lJ9G$iX6Q!lwN3~_V1I295at} zCPQ|~EI;M+S5bUs0&(!MZj#DqlSR>O_FKN++K~~DthReEQG_L#`n7*7lwc%e+&?YS zGDKg3D~*x(iH*sx&%l|U5F&|@1(ldDgycP2C#|hzaGRaxU)^1<2O-h8b-(*o0Reer zfsB{0$eV60#iC>O!Gd9$=yg9{tn2oR;%*Ooc??F#!_>p7mBv7x&)Z{eFTD`b**=8E zkernwG~s`v7_8ly5(o|`x_1ZVTfcV18IB*Mzov#8UP=*H9&(!8Iyxs5GLpWygEN&J zdKQ_>RqRg9cH&Fdnt~Ved=lT*=W>?2iJ>e3B?n@o#ebk)%!!B}0kO2h!``ZuLPXq) zC5Lw1J_!EK0~J4HMSzY3{_C3Ex6LNi^EqSFh_35W;p-k2(vCfV$Bi0fhPSt=n|RH} za_44!D+Ug!A7!l^m=`{*@~u9zM#BOY@W-?ki1?-d0Dek}kNCMl#Ss1;y435r&3Mt< zD+p82*?h5sS-&}{>2?mE-hX-cB0c&1>gf$|j1Wn+YKz(5H!_MxA)>VsO4G9qh;5$N z8}O>PhA%BA@^}L~Z+79pO%~`YWIgRcKe%j{46QiL@)nECXoxGX^qJ~V&y`(;kvDZ< z?*b34>SQ`ST&*SO`y=hs`PYL*5w{l?D5HC8~TJUYN*wLmjD#`%@Wa>P_W zXo1GJMt_?4DQn+qFoz)q&*i&>GT74fL#(TZ#GIpM9YfU02lVD>eZR`G%+vwryh5Ff z6WQ7o*V3i_$JmLePx{uMkJ|ko&k{0Qau-8X#pn%_+c_dp@1&UUB}zVo=HI8FwrC)( z`7rZZ8~yxNRmd8`x;RXjbsA80o#D7;m#Eq?as%bBg|Xa)ZcKX7^a<<2R8IsV!Ys>3 zrtD8R@FW!BxR}n#uE^`L0><`q$5AqUIBRR1%J*Chg^tb77)58;n^UEDzYyCpiY;W{ z&H!U#+}_^JsZ3l$%xEgRqOmf}b8-zAgnY_$n1d)ezmDuKL#d%9UxVEy*wnnD(k%Vr z3jz6`xFn1sGgx&1L8ni_gS#5w9ZT`^wvW}P>Xjc!gn@Q*Ln$p_ICtlK#ygOnTXlG24JWML%n}bc1(RX?qez&Gpc{MrJ=tnooT7 zf^M}nxjnyz9cOhD9hcHMrN$j+^$mT8Gm4}sD>QWwGRp7I_vfVJJR#Z5a0^6VYP%eB zgWoaU$83)5oGo$%He{+{O>6Q+0Q|X7f_-^QoHhH$Q(}JJNRsy#`tI5zBzPJ61Vc&| zB;(rLHMq=;=!`t~m|NJgq$>wgJ|FDZ0Q@6tA1Gx+7aQ(SG$tn^&3G3X9{I;pqXTjH zLsHAaHKxe{+VP+>rt(bY?kMEXp~Mq<1p&uH=$#<(Ye_ZwgQ{;Zt*Fu~kh zs~beZ+?N{!@@QYB?b@nftw4}u58Ija-S=Q(^b`iK`1AHMI~G}w?TCjxuveu6H7s#O zpK-sT@4NB8@86z3K=;6t@p-vDT(3|h$bY7z68TAU@$%%!(!|!*`kJBnwUo(oaI}`J zx)naNXLmVOt2ixrMdi=B4yrN3AuisJxMV&iIlX!6+$ztCal1OVy$xPnmh3Hizr+$^ z-KXFi{szE6336oB+F8Oh|2>`uvCXfvqnnxe1JIY{`j~M-4Hk2y@3EC4hJUm>`)Pqg zGE~M9N^+01ec|4s;m^6s)Bnu-?-=tRA#00#XiWSH)#%svf6%7=zpOjiPYc5iGNFUd z|5nFyKVC#@e9dR>?p; z3EN6;^wrcIyuVUp*c{}DO>RhbazE}|%%!j+Ppv8D;GEiln6Y0hj7rkE@w-C#+ZZ}h z(6L&0bXeL{MqU$;ALGRsgmiE23QBUxJBU2kz6+oHANtY%xKZa8F7N&?8X_0^{|HR~ z+l~Ia)NE5Izo<94^g=1{`n9dGbOydk-%x*jZp%$ z|8xy2ylwDGAMu6nm_*vK1b*}VOc-I8@Eblb#msMLXEhM#}&nAKZ|$z~0n;i$9g52mSrZR!eP1iL(Y5nAAj zIoi-kTY$Fko_?*{VBIKbxusBti9b}MSo6bf3w9ey= zILW1dn2GIB>{5%)rDx`e$$Mn7SJW3<8Y#51k;49Sp_HV7OPVvpyfXjV*Nn|1$u852 z%Zu`SR^}`$B0GUdo&$6+hT&#ql)xB7q|HFKaI1P-8p>sLQVx<)w%pVMrF3+xqR{p5Eh)NdR5CHGC1>@G_LX_CTaddYdkVeI z-jQjW;YBUayraI&w+U;`b1B5(Kpl16ma@vmx%TA7&^pS#FI&_`*YPLVh?47K#Ker_ z?Q7FoIt*80J!mh?bSvp%$RJ%3-q_jl4Q%e+^vNsQN+*6&iQ*xsGn7?+lZ6@w)i!T9 zLpyCIHPxZ7H`UlAbnjKZcB%}jC)wMg_uEKGW0-bISq;_RBh2CtD=*hm*W^2QJGi$v z!H+3xmdIhGE@~f6Cb+)R7$D5iJY%;>4xU*T@#c{o_Yd}ssL}<;;0v+Dup8mP*`;Ym zXle!fg>GjF{GM0zkoT8;66jZ6@U`R_CX)M_gPrHWOno+5x44aQ{jH0)V$b?Y?T$Y{ z$q8xJ`$7%idn{}SCWM>468KuL?!%w#h z@BTN`ZyxVcx9j0JyNZ0m+MMPHA&1V)7Wd|-p1IR*5{Tm+%+WU`5`7%zo_dO`iB6pz zUpCM-xGA@}JE;Q$k(YA3{BV@~wDin1CG{y9N)g5NS*1D0kNmAMs(GW34lt`y`UAL? z^R>=htT#>)Rc^K=Nv#r9UEDnPYt$(lwqJt_;-*l19-ArKJj2 z@h3&>xlC-r86V~G*CL@n+zY~XlqAO%#|83@I$(f{YysnuA__&U%-S zhWGA#Qs+kyoSg&d{sV3pzXNTz_Go+ePJzMc_bi;xRgC7Hzx?o_w)bp%<0AY9n(+L# zvG89YG`j{k{rU^!un^nR$DPg%C8r z@-l}ps(NS6@$7#;!}+U2WV9kQbP@igP!!$ZfszOP5#FkyHdez{70nVofWxkd84?3LwR%AX0c*s^+FnRUjH+_$dt|VVRuVZKLzi>& zx9gVN)x>kIL$0M&p4P)NrQG|l*>y&vDY@Tjf{$a5l*JwxlFQ6bUsyKOl>B5P6B}>+gw|QPYKgyH z;h3l9E{grE)wWcY4Kw4App>wj^rP);Dqf@t?QrLZ>>MiG@LRvRhi+P9{%%-{g~0=%efl|*yyw47C0GmG4;;|kTudS@nI_5wn9iV`|IrMjqMXnRi^ z8Y*jiXfU?83cjiE88#MoJ(M%3(3$Rf`^jE@_Q*Nme1o z(Ka}V`*7B>NN9kpnm7+xBuou@FU~qKof_H}y@sW{q615|zAcKy6rBG+vr%i1fbJ6X zc#|DHDXfa{76=r2z|z|2O9O71ubEzS^ooiq^^BYp7wXM=q_F4{g)$F%n@DktGQmjq+IKn^A#5BTJFTXxAHI(C8}=cAsJV&hc$Zo@^T7PydsNi zy9&aiv1bs$r9dchFF5styEb3m;rD%ZPb9&4!p4D=WhIwjHPWY5I-$1}_R3=%Vu(1= zGGT%aqg7~0rxZ85pMSCG#RC7-kr#}I<)x4Nk_dD=K<8h&g!d7Xo;ma6_D|@o!fK8M z-ph`=;^}SbEva%Zoz@7Owd>K$8R=RVo^sP*?GtE)IvRbGDg2SQ=na0-=>Kh?baAvv zWR4X*tw-#W%xmi8Mr!fuvJb1#bj0*jCthEcmS0n3eS0qPFf4@k`| z?kndA%^1(^#3mY>>aD0ddkfmM0OJwTMNdbK#$^(Bh9aW^w;(~D)_Xn=#v-XL0Pm&O zFEY-Ro?sQu_@35!EUTrTjml3@+TyHK~!Fl#*K%^%vlwp)7wPRe^Q{x%!767-! zG;&!8_I%49V2)(GUb@sC?VF;wpXNJ(cCusJb}BY^%!*O5ZQHh8v2EM7om7$vznt^VYvVe>>J6i-GxSaypf2PO|# z3unyDMHSJ~u}Y?p!Z%6-ZPVA}P_g@aWbzpM;ouShsUX1w6W_iI_%gr!{5$0(FE1~h z_co(5)pG7@U`BSKJj*3Z$2%=zj>9Yvy(G^FEC4`mt%UvAxk< zWP03l)9J;x?(rhjPtx*bts*Bn!^~G)(pxDnTdL2;M^>sW{|l8$u<;cFp-g;XsuUaN ze8c%B7DLVktx%@4tiOe|oC0WWSx}xS#xhIUas}SM;HngZhmS9BQFiOfN<0EE0&M!@ zR#~;%HZ$PQ+S>exF%L)!3(&Mz=feUoG9kndR-v}+-y)?xwZj{${4#7F=GGXtt8x&E zDa%T0z`wkhPuP`FmfcF4WyWsMikmi|jx(Nr1nELVc(S=LLDdqjG)=;mGrj1cjTuZp zwNUPsAD?};mQWB~$?uUs8tYe-mEGe-HvM^~xh~~*Dq&&X)IOki7|V|VoKGW7WU#Rc zut?o6D8No#YR2%Hg*ZWf#BCWHpHh{HPoH6RonRWOJIk|?lzkW>^+9DOZGBMw6zO9P zaVz}EHX$<{GIw*Oo+k={PSECO^K46rG21isJ&uQ!Eq(K>)>a7!T#Ge23Nw4ON@7n_ zFpQfy$t@OTMZf`@_QmKYF2+lO6?Z50ixBfYq~~6M5fAAT#BZ1 z8Ih>6gl95C@kb|XC8XCLBCE`lv?BHvN$_+9L7=BAmo;NDi$i#{p){GXiZ#0VgDX>% z096OVm&9%`?W`Ve1`7-*ALa=$qMNzi=QM?c8)O-+X%bBVBw=oI+)&`3eH|Ak_X?Ps zdwJIQ{9*x=s6lJ)X5XzI6k=^QhJ+CjA*n9%mw6wYUV=mTr80QYOie;cQ~3S&pXSd7*t<+acyY_YH5Ob-xKNwZ;g?&fi& zyUhBB(9dSzuL+ajuL%nKr#s+dMIsva?Y{>nSUF!FdsKY7Lbh4Xg2jy~ zq&a-3+=ojZE#ywx^RIN+G`FHt9FPui(p6--*MMkEmoQ@rbQUjz6(yZF3>DGyE~DbD zrswr1h_T&YW7kwJuDmn&G@~Y~&?y=t57~!CtN0Ptr%oIpTzcIAQ`H!N>-gPij=Q`t z$(62>LXab$A$eAIWm~3M{Z*?h82Ig6R}MxJcFYS<5|`ON3E(0+#&5OxhG>K{G`V60 zqVv-kLK~Kp{^wz?_uoCrepi|#;W3O#-XXfStg9Z}u>#A%3Epc~H-1F?jY%F*SY-^` ziMRmBL65MpcF?Zo5iap+U-_^JMbRD(@g&c)sjx1DAygTqZz5#5rT+SjjAO6?s{N_w z`f@yx`i<%Eg2kF0ZLtv=mq z^`xA#2-j|5I5dkQ^pP2dQ{c1GJ(hsLr5``|L~5C7;8ctC#z?c0khNuG{5tKYUAqOV zD~{<3vf{ZPKx(_j@3s}^edzq$?lKF>O=#$6=Ws80m3vXg?V)gnE_yjZNgkE(F8jaV zSkF9t-}SJPYCxl!HK6iS0^6+|(0C4)MXW=iDAk^*!+z#kLNrn{fZ|rZhY>hrWrgE> z6yj4w9jnTsyYazV6Rp7xU>F{ax$A&&YRk%(2;dar*AL(|v(Sj3IAgpFwC3^(DDG-6 zly(jX=C(L{rzE|5s#xxlmHs_*7nhz6Uak|!?^L`trm5PlJIZB2w(%n|cn~0RqzKkS z`qvC5&NWB2U{O`18lp=h%HC9~s@{u}#jzcy;vJ*7U3Pd0!!SLdTq^i8!1s(Z*L~%v zWsP`v5d*{yQQHb?{THYr-VOZ>3>8$wFU$By_ZzUY-UN1*3ObgGQ{TuxnH^`mZq#OG z3nmK)8AEIbdwDRxMM-yIx~|^%xtqn&AL;;IzdyWH*>t}9k+NY?qEeQuNtFA`k!1s^r#oWTRGNFn zRHkOD=oSD$Mcw)z1ez1*kR-*r5cTGSui=yHQOV6X5gnUm(0I+^=Lq#J?kk$tAWg@9 zLetsJw@>PCe4=lB7(ScjUi&)(roTYp@(iGM!c!usNir3_!Cq#F?za0HJhs7)v56{F z*B0W}6-K8joflzAm(w?|0VNCqPC@O68+AkF3pm+9 z;!UKxst$!!!r~#&Br?o8%`YVL+?bTf*gIlrbxo44QguXXL2?{LSJbh*W)9ot-Ql)= zS3hDqvBna01f<={YD(G6+PA?Vdc&^XTA>|X=XQEyet6(;-SF)^dL^E_#DWXthMa^v zaK1|jA^X}%pL!=cJl@-9%8xjA`p2UQL-80jQh}X8!+w*F0Ex1&bh{@%q>sC`qfVfH6d>(Cs@_3H8a0(|%r_*%B`QVw{HB9*Um)#X4+ zh*iO!2Z1dPx>&O!(+-{MJ+4SO@;g@W8(7U7+Te?6Zb!sdWNyz3?8AWG-sU|`N2oV^ zs&_!hs?9C&3*L`sKEl7>l`dm^@8r$C`-qLcz)CIZor1sMWEDJ}u)Xl~y%C^1W)S8j z>n7Hu2=+-rw}1ECLvfC7i6Fl?d=^KilIV|6^^;cDaZE`&n(A3|YwKd{{WO=hElJvu zdvO(1bJWy{oBlH0j8`hI1nx{uJ>o=#XDD;*U+HxV#yPwy6vWe{)CI$O5|5dO*(z1p z&Pplj3xbFxOL^BH-GnF6^yB=7h|1nA6a5_bEnY!;)Pi{}>N76x6DQ>qhYuRWGr^u7 z$@josT|kI+)?<7)4iQ^|U^(fSgX0=m%YTlFZ6CIAti}hJ@KUZ~jiVS%(8T=_2B+0l zdq*DJnNx^w55w(tv?p4PK#((_QVFFlM;SLuE6w9pl!njGPXDqC2opO)_J#=~U!$om zDm5z}7J=oX&^#rZ6`LpiY0Q1mY@1zco6Tfr-IvkV!xeHB8FVFs-cF z*-2p>VUsmED2l0dsAW@;SW%~uk(Kma^-gY{C#5iXG3FY0DOx z)+wD`3H*a7?1L!k1Gl@1kWR1<@c71H)7;desoN3d;UZRligZ*1)-q3>V@G}U z43m$pklWDXzby3&^+#lCewa3j7tbcXzTK+Tqmtp?=j|Y76tp60e@2+V$g@#qJzbD4 z9Phub6`13lTy18P>|e_9SNZRasJ7cIM=UgeYgcRTa}x_*L^lNXGEs+V;? zPw23dU5^M71M3H+$=zNen&cWXM^3d}f;;&7jU~{nOx5<$tFNIidum#aJc7#`-O@r4SMNX<;nn0@ZmEPgc)@(&ah0dF0nX^ zqb#x8F`hg_G^L#B6$8e&VHi>X!DoyKTJ3B1bNqSsEWb-(wXCb?L}-qaUsXBbzL(_F zG+DvW<0yyb{Gd{bFOJpIvM{A`)Ln-7U1|FsMVYgv>^T33r^CyBkdA5vo0YNw2c#%C zgO?b83F8aqe~`A54}WDcy7krHi##B-gHB*Nh&4U=g_CiXbNvDsn#Rps@_sp@MB7j; zU;O(CS%`ZR{sx_X`>@rlYx$e$LD|#h{g3@Z8D59`({kR_ExqjK~xLi$w z#dDZcF1wM7XC{Xuq&(F#)*=(oGZA?0lH?`IYLs1Dpk*O)yI8FeeImQ zSh?cv+*hdG3DZ(NJHcLLVw`$`ykLI9LQl(6VG<9b6T`~n%@dL^=#5;MfWVG0NueCa zL`z3fMPO8P@o9wy)dU##{}hF=TWmm9f4$UD2;aU@{pT0k)=l18&EDM6SXSTmUuC4K zmczUl8h@QJPpv4uHVQ7(VfuI^^O{+#QM_G1zP1Q`Ny|-I0t3Than^Qy_{qP6NQdA8b zDddPW%}elKB29DZpNR#Yw8Wxg+3%Sq&Zt#Sr%$Q)eTX z)*|BoB+~hT`rK_+7K2<>q>9HE-!E&~HSHqzDAD#&8X2(9i;t8ob@I1{7Q>3tZi^0Y zCDlbK;O1?~$zV$6#!*JOe#x4$_AeaMWK^_u7FJ>8adG}aZ`i9d!Fju+Ha6|2ly@g$Tz8up|9khft_m6OL(k^l?HxFp^?0) z)7X!JTJ|A2Jp=p%!Tmx6r|cn^#0zPBy!rAgpTOv-P>L_3jn${_ zEuK3H=f#+b<#A02Aq3F7h7v1U zfvi5*nTYmJN;1j(icG}F;ak! zijZRY$noZh+^4)*Ln%s|!loy*Lk{riAdwS{C(IlDQ%FPBF}o`WqM(%Yy*JE+b^iG! zZKFjy8sT%km2pnXab-0o%>ClYt9fSMH1LCggza1Yv?TV<)%sRFTIWx9T!8@WiK0); z6{e}=Hs-GtEb2)Ff7jrmG7ZiK(#oucrYG(yFf(_wCq*G#lNbE1(Smp(bVdO6nz?U` z$&ZCqf5+pZ_0M$lGpEMZ}@@NRhLrU#;itg|7Lr{)fhxcQ_+w6n>WyvM7Vua zf^BPlhv*GrP@Ujfr*cc!>Z-tp?Mn!S0gn7TJc*v0bPwK;jeaj_a7?$O9EU;N4MkPu z)qC1|qW+k&0$C^6Yw1_M)rvj&oFH;zh{OO(!z=1wJy!f(g%$D)G;$vmQjP_T8EOSN zZmlh?bgpNhZj4r!Z2%h%R#-6U`BgX(`?b0y}0BQ~}t-J)k*6+F*gT>;z2S)$s%=|9oLbR!&I zvVq$9Iec%W>VLZsSg!gbJyKh$0+>*}Crn=l1`e+cwnQ+VLn*LO(N%@`X9&)$!vfXv zMRZD3c2V&?yU5-^1m0rkb{LuhbMB(`SUkl;o6>ihw`N|~(s~k3=2*$PfOoYQ7c{pj z7v?87a}b6ue{1!+Kptrw9je~09t}TlAyzo@`aeON>wa0E@!YK7ZhZNPh1m5)cc8o) zZ4icF>t=T6{>91sbX^Qp!nwMY5A+I&Y3(;gXx|uj)QZg6qx;!Snm!EMjb!yn@bA{) zKa_waD|9GTUxz)|7c1xgjJboOvGxBRF&WD;A^875$`l0? zgvHrM9TNd;3Jy3Rb*F0%9)6ERr2C+UqaE^jGoAmr>NvYh;hTHSVQwM_hMr+6xIVWcaN z8Z3*)QKt~s0muu~W#tIe$O8^ZXKRDCOATrm8%?#@^*whF5FydB3bW@ywlBSFYS>!5 zrA7$0cm9`_=|9%3PXUL?^+n%KM*Y9c*8aI}b8FMDFCFPW@BjD0tJK!Dl$X&zt*|)z zZAA_cZs}7&f9NrnbH5i0FraZ20IL~n4+y|vz`WFIsCd9)J`v!m1bz7 zA}1u3Z8$y5LwxV@HrIQdOY!tN$x&!q;xY{eKpMYQde<|YS9U(yyf3Z&viZ2%j^PKN z@fX0%g_eIt`k~&Jir5NOFS38s5J*8L;(0@SG06GUh6s_*%iSM!8W4zeuE`tzYUY?+ za_q`Yq(2w(d-uHKgo+@0h$YAs;~3TsWZLe&BhaSik;6+p!p832YAM~|9n-TV^d3dm z=(b46vR69D74REvPo-mMsk^^mS;IN4WP38;o#6= zeoTW5skuRd(C8$u#k{-?J#=PReJ8X$5}%Dtycw9gGB>oSPg0tt0@)xT5&<0=0IG5( zqRfIW&GO*AsWe$HMS82jyx&%Rs;|ULUo)nvJh{u1Knt75nIVqGhihxpAlb!MJZO;t z&JGFBf~$}(WhMh3`Kv9WF2yQ#6ohPSO((rt!vpN>jizEb2FkT9N>SyflfwLB@AEU2 zP(fyRDclegRUGo*g6kCwA6)RpiIXqv$a`CL`X(|N2GCGuTPkCzO&a=_=nF!^>;Qcg zjpivf{{k29Tx*H>s3yIlbX86kijk2aOqw&+gzJKmxCe!hMWV2G0XLZ!dz?j0zbKmuA%u4XLAtdFOEB8xDqJiBHOQGPpJ7-GN5JqvYSDipkY{^(sE4tohnT3O1N%LFh>wg^h+ zOd9tWjd<$4#sZ!qCWh|vm~O5J|5Gr!WLhdNX?|=g5>@POxGufg8_z}5pG?i0+B{-QW2T8B3f+2Hw&61olg|uvPrN^T#)Hd zM8&x=V4Y|OZX@9e9MW=)q_7!(Cfd@s6^Ot6wr1QBK`_o4$PeI|A>XHP)&BXOm7v$> z!R#HLGnzUyP4*@<#BfUkt!Dg&-5qz+6}i0^7zxluh1xLoAA3gWfA+2N3H`-v!bGvp zeMCkJwUJ}oH})L5>>ar+`7AP2(n7Rb9JyVekz;ocY5ft3!~8Lze`l)YI%g2i8AK*c zJOwQIA~w&KmIDW{-+Q$Tmo##S?E57kjrXP0DL)gD)Uwx(jv&F7US6+_rhWmUL&GBl zfN`v6KoD}=6K#kGsBP!+#e&lP;Hlwr&WJk$WR<95WENIQeJo7%eR5ucu>mqM@jpB9 zkaUxd9N_h>R>|F(?Fs$-Wb8*923qy=g-Emd(GG{zcO~i9?8M076~M#soMZuff3Vys_8Dhx zg>z|7UwDVC1C2|%0xK94v?o%)Ct=NXFr9(zAtt{Qf^g5;ahfKDShdCF`iDpOobj?X z4%iN+h;xqSm@GO-X(?|f^K9|&a8)PsPPkR4^ILX>(L9c7Oo9n2U1PtDT45AJKo!H7 zYY`R=Xt)CjUPbvH=Umd*OXzA5`4*mvB@q-DZjfHIeUG8>zgOm2-TltZBut*Eal|*C zkNAX^WBBk0W`!M{w12t2=+4fbL0TsY6GIC(Neop@jKEM<(4$+kghMxK^ndZjZFs64 z{Q=z1AKjH67bJP?3jmGRNf#f~^(Q(~U*9Flasp4; z7W;VE>tAs%aH2PuBK0cQeA-nA@#-th2ua zJ+o)*e4aiXl$P$`Kt@Dr-$A~-iI8gurEds>tkt^c7d8-x4#$@X*Odw5@1T6K!hZ>^ zki6pz1$n`t4CIE}V;pr(SVjaAANIy1JTnK6+%tcb??=qv>Gkks^iv-KCpW}ij?_H@ zE4gUIC5C5R3_Mr?>lN0PX~7-fmgwp|?+@H165W1_@c2{wQq^3~K(sS(K8P{@(|9Wm zlMy9kJUD)sDn~TUgi1?tQY6Fz2{=UzIMkAup9!vLAa-z73oa4+R2r)ZagcnH2QlRL zzV#s*Ehjx@5`x}?Q`3R5HIs$%DLovdz1y9i-%sN&54uC%7)tsPUoIP7^#hp%-@yir zh)0LzDv1%%_DDoL1H%HCE^55)LMT2b1$?o2Ha)n3QePai;&cngTZWId_S3^0N!plL4F64>OxC zPJ}u&@4-*DgJeQ4Fj9lRs7lwE6&lGDy})ByOLgrXs`WS0^s)`PyJXAElDx29LH@GN}ZOQsAWrI}lL0?w5ZUjBzCqeTc;57%j3j>;d)vKo< z@ANuK7U1lR8P{;7jH>iK@>37Ooei*2TeFy95|F}jU0&=R%#Ipy*(14-BGYKZYP@_B z-J^nfCXBq%N;AtFOPic_Cj3?Fd*mR#cObsM7CAZ=ITElIHaTH8I{LX_tPWaRJv0AIt70rO;F-@fgA0TH(URB`|RK*$#$kvFg~HvCslDM>}c21f;p zZ+~-&RaPdQo|Y*LWL;weURV**!Kr|TJ}hpK}>90dr?8*r~E zLxD8lM&!m44$Fjw8oHCuIURZ!uWsUD3$XdKV>JxPHKYSR zIv4D$HLjJeGs;k9h1JGWM%ZPkYB^cO12L(JK?%LRRN2Ox{+A0gJ8L=3$OlHYa&1E)pk9$61a&r;gSoy*J9qQj%m5J8B{Yu*8nyvvTjss3OQj zIvd-V1WL%r4m{^pYZUHF6wkA~;wPvrv!Aq9FD`Y`BTeE8GN_$LTWhn6{_&D^k_)Az z*6oQ6N2`uM{={X!`VvNJ70V4exMqKEjxn98iwrkG&yw;=z^`|4G#qEEV2=uS9=oY8 zDMxXZTDELID_)9B)M>7S_S;Vn2fq=l7piu%GKHvCv1bTk?-jQV*qMd zf1t%Xf)gKg!3FG&&sbmw{VkN~EpYxBV6J(9gi>~c!tA>dh5~9&(F1uX*MmSrqYvTr zs@?x#*8joTs9Re=>jAf~Ehz(EGg$M@rLg-^D|5U^Ug|cx*0xm}p02PxpT>`b7WsqFa9%p1fjz`$qo%aPIuCHeaLK>sOir`lp<8eTF4D zl9Etv?2Q>r#w2PwwbhatTrd49>5t@ZgY0k_M1YkEYZp`CoTkAZ_VDJfPxn8}zE#iST$;Ch z_+8WeEFjq4JHv?IDF^Glc`{pqHr?Jr$|Hdl+u95mVk>J3--F z`ue$!9F}4@K%tnq1iT0Pf2WPvxVp~ARCsqB4%>h~d&n0iHe_NDiYcqeJ&_j5h)qGM z7c)?vt?a}Z+H0fM*o$9JU3tx}NYKu0^Pu41hKU7xfI}gJB=JeI6-bt|(~{NIh45lx z)Hn-vKlNOVY+b1v+5RMDMmpc**}yYK_mdg>vhg%hC4)6CuouElVAc4+=q{68=hRAvdww2CK5uBHBo!VrZiO0d zaiz)GScZmkbzw32Bw~sQ=#(_2HYc^K|ArllH9A&uU2_>Ca0Vyc`N}9Viz>~!x^t;Vs9BgvoiaU^EC0IfZ{(Vp@p<4(N8J1is zRC`*03ORHxE7ok#VrxWHX?kXMA_^!%3&JoNn6LhIxz-6Hpzd>6odsb}(wHi!Ib&E- z;!Him+_SdOt~T^r^f(Zg7(7{uqP1EfQ_|wd8{xEyUYPqBq>p0zhErxA@3WBtTp28? zNDOb3g?zr21C!IaSNJ8~gsqCML0J5u=rD{l#0Xp`k(|n+I0g?0H0$3~_P}fih9_`i z8T($B9j2W6E>-M8TqJwCU8s#MKZ5f2w`HdP*oYEAt5@iB4e^Q@EDDNU7nkt`h8$6j zr=F^m!kYWZj|a(=?W|mxvZfFg3b2sip1+F~7Uuk7AMOG4JGq{c!1uueQC`(gf%Q)Z-^i@a-s%^#SO_?2Q zp^ri(a0(V0zY~sj{**FU7~;_{z_Qk*gC|5oC`jI=R%tC5g`XJT{Q>=|y-k_nFE7^} z2(;GM!9{)_)6{RXwfTT0WtvR`R)!!T1S0+XuaoL*dG#Ov=Gks%MU2ndf@rAKdjBNT zsb+u-j$d`u-YT;d|6PY{b%;mIA1xnxBBDN!)uJQ9;0!!;IwoG)lH9$1>FCgB=oH=@ z!aeV%+ew!>Z);SKu*x|Y0B{Pvs`y1kDl=AkA6_bT!Ravg@%tB-umVx4JP5G#s% zsQ)F^L&HV2!{rereRcpX4@LW(9e6ejN&FEi@b}gf*++k{dymh(2cwja_+v;~%buf2 ze$oh7W~7-Q4hgJ7zU-vfvk#{r{+MFl-l&gp6Kh3DXA)~RA4rL?;{fofTleYgX% zv_$w=h6)3f1;|UY(wfIlSALVCybTqSw@CNUW3E{1GPNBtm&2JH>i}q1?$i`yL!|&! zTJmzm2`<{59EnsW%??f4T9}o(v_%l|QJz2e1(9@BK5vl;-aB{lw)~%TAv6dX<-vUWn zVe!*JjPuHCTrjroS)XFjpkmpfM&+@P)5C)CS1${;&9Vz~J_%6He8bv87+sqM8k&&I z2=Exn1FD%9WE8UD&q5}8U#)K1WnF_lRPXiX(r0+35U2V$5Cz^7t8_Rb>`Q9DtA`1< z)xr;vkE=(SN1A$=sT#0o8K%?iwh57Bbiy$JAT)*Rp#szC^GO#xkwN21IVFg1iK&G# z6*Qhs;F50(hotKdx%mG$Vkwy4>@kiJ=>rb;CY5ol{UR}_5hrT`J+hP%^}SE3)n>*p2B=IQ7mtyT+~d2qgbQ*$4|&AmEtQCi)k!rrH-|drq>ej z?)(X$LQzR0X<+n~DHpMABkKz)}bIy6o;$1ah z2h(vE_sYI-2Og8K@H0U$j(K_SQ@LFtE^Mali@t}o-|z=9Wn&JPUaA@yG`zV*>lrW` zR~tr-ei_~0efAuw-Kfl)G^e2yZM0k-^{V++^9eZwV{08>{-y1x;o;xH{k3HdI`4%47J?XqjHvclXs(fGlN<}V| z(H&-kzCzowBBT4+v`BLHfUbFuGJ1~?zh3DwHLpDP{M1hRy%U9RVM(htsiFMjiCxn? z*9Tfd@9+O49Rv`RIY4}o4j8|xMe6^A*d&d<_y*>VZvUNfpd@R9qKL+aSUealYEfhs zp*o8J8Jg9C-X~Ov%+d%;J(Lx*3$B|r75kU5%4al*21QmI=Q9px-|IWNRB>%Y>$P{L z$CQ`L)^CAr?{BW*nh=OijJ$Jkz0r5hNorF&b`TK#nuMOCS=Be+Pg`x)8L(D<1zV0g z1jM%Mr|`t7zwEqCm3XUmG177r()5!Hua!Ci8j_bW;kcyR_GBIBP(P5bxe_&+c{9wo z;()xe7P|@%aK_{JkmLk*9+VTPr}iu`MTSkUeelMvkKvI0(NO1bK@p^;1rGUZNm$t8 zuYbsA&BU}O(@{%E^h5G}dgOWTSY~V3`-}DQ?&X30$;X^vd9n*CkoAjjVaCA*2D{o0 zMI?~)h$J~iO$lV=3Ptml7XleSaIX+1`nQ;f&L$6}@o#1wtbNelTXD|t81AfYmC6qf zFqdjdQcv^i0Y@Mr&Ed4xlt9JeT$v>OV`>@`%6AtDces1lTqRF)LngOXpC8cU4@QDE zF0jirLM3vFFw1e|IVKbt1(E2(Jr;j>R$iHu3=6xJGKa7=vbcS9&Zzf`g=A~ghv~TQ zF`02{u`dnA-Vii5vZB%=49%by&@HNl7J{H%rK&pCTZENa;w~~r$n%n*D4NosqV#YK z%@q%zkuP+J>w@-_ja1j4)!L;SJ#M#}PBUDeZVCCN|7R}m*?8_}<@@+A3XAG5M{O1K54j_|>y3>SRx9jchPiJ}@#J_fjC-uI8ApOZ za>>bx8Kv{d0~7mso|vsx=;EMc@jPFZabURuBk`bS15{ctF*;g7KJku6>)Sbj&+|9| z{W8i^7fad%DM^jVq}#=D^AnfH_WhL4Z*A}zXdhKuR5$|u2qt_4X-4!G^MSdQY{>Gfnj82F z6Ly>6NPwd69#2hA_%JTcPLHUP`QIRz~YG zBCP8Y13ddA(9CsuKO{B73awXzvr%r?0e1b?xx4Ae2zQP}s~pIj{^nqL}; z5-8F2>kJVrl6zliXRTtejYyE>)Ms;(7l;eo8BGQncUr7cdu@r^oGzA9SL137(ye%G zlM5Yrw0%eLR<1emnHni?3t;~SMlhztZEf+c7|-@~x(wJ6*HFcd1_kuUh!iBkiOp4r zqb%Jtt%;@)D`Z2GJhH>d}N zw$%Jto;K6dAaztldi%9;7Trr-NfMb>2d_D^9)DqcUbE>yRdd zo&f&C1gNQOW3B0u+gMf9edrrbupb}Jaf^}1>sr%FrB10{T>1?C6^mx|CiByH>FbQ! z=w)NF3AG02DIcq6Vi?R+(b#+gt%^?7q#n#Qt=AuL^kCf$ymcs6UQ~{m0kQhMDwqe1 zx+9i2B7GAYWVns)X^ZJ(p!N>ZlYWj0cd--v>4q`vsb=f{Mz@u4(-n?*!vo!|b_*#t zPVkqPx(=p4y-H=c5zEguy045_>;>XOMD%cHuK)p~c3Rcn&Ba$jM*DLV8=B1KR%dFz z?R(Y+@Bk`<`9iWJLP711-P7#8qDSJHkTm%p&P7qLpLWI!)~Kz)ngb5i;mlrC7uv_x zQgCD(3Jv>}bl9fdMT5YNk&}ZzXnrO9I7PuVAE-p6_562R#;N2S#^J7rdGHs1L)DGM z#=!^t8wX#$vq;vQeb#`HGRHwTCaS*6%{nzfCo(unEng3` zDb_r^x@h7;8 zi=2xcXl-=11IO(AbO6jp$IQG5&ZX3h$m^xstI5l}>7d4Z%3zvS)~Yn~gHwL(S9%3s z4cH{k8NKiV9eS#mD}z)^U39K)_p_;e`pdT09f}rh1f8B@xte ze!4dY(!}O~B1`dX7~f9GDjk&RCUXZ01h6@LVoFOH*W!Cn^K5MmF-cNmPX#@UKY`ok z$>Q4&Z5FtyN1JqnJH32iHg((?%P0 z_!m3LtplBvE zWR5Q=Jy>e`C^qG<_jP8H1qSaa^0B~MM!jB&6Is?ELWn}b@oSss#_}7{k*;5_{AJh>fQd>{bMIi4V4Z~3AIF4EUSR`y`eKi+!Sy|H_0CbJh!Ne@=#@uyWhEC6+A+s;mczE{2hDdzdz& zL+p@ZP!Dl~PmBS*G&85S974bADLb>pZt?&4b{oQd4(PvSED;csdvX$w>4zmrglVoA zzzz-4%Y-tLixNbKuiMFp*p^@o>KNb-4?<@mmDo0OMtKatwy1Rkz7DX;h10a?bwoi9 zymFF#ghLJr+{8X(j=n*bNb_<6>%d~TjhTY%`)VSI0lD^neX)B8MfdVul<7oK^XHyn z2i$6;Lrjqewao#nU_O0%4_A1RvBs`iv7{>mXvxjO@8l}p)j*I2Z&f^V-n1}6* zWuNaY(c&5hb{yZqpXyi_>{EwR>`TXYD{#Apgdif!MlQX$SPyFwhd9h>OKSTV*U+1$ zqIC3KoW?gaTBmzd0eA4@7u*|;&aJbk!U9vgA8zMKO#`vKJ9xo;FDxJaY97(n${emG zL$T(kujDT7O+Q<3qo!;)er<*IPQIVz8w30^8o4{MvzKKz%UB{SD43I707~$`raUa**(OqMj%1USaGXT=|isbpJIiqll>R zRP&KEAYEkEFcPVk2{|VZ^N@o#Ubhe^aA~}I5?PIx1whSeZ6dR~&}K*}jT0@Pe`+VA z;hdx-9(Z`L&P8A3&yn5+TiN~WBn%eDMmK7g346Tryx$^>aHo6~YCm7mcJOC+XWA@p z+PMDVLuK_tVKsTNo~^b}8W7uB8TO_0l{Y9!1Ah$cm29r8ga47`Aw$jB1r4cpfes3$ zIj=B_f*1_C5n}|Rx!RreK8Yt>3k}1o5h*+R06FM^Ipz~8!4qV<@#E~bX8S{VQUbh! z7*zTZC8`Nz_oVW+Bj(h*t*>k#F9>@l=p3c3(UGdMoh1?5bC$Nl+VngYX6C5)MU=@2 zm3>6g=vaeivK^KSG3!=bwCm()eP(;&TdXqGTL=Cd^r_LzdT}f%IMPm>cy>&*WRlU} z&vWjl4ubA4bDl)Yo@3xKcZ~hWb8y|+zNI0qm=La*Zv)xnD%|a2Z+yNKiQ*_Ut#3ZV zBgo8>ZxN1X6ov5=(&m9uRNzI4LPfa4#r`XQddN7;oLFYAc=X!yf9tUO`D}r7;DXXa z^bCT)NB9LlXx-_n7FbO);jS~Dn`{umT4$I4@iO#^X4v>UrS;#pe{f^v1@71O-}<5r zGySLSFQ;#1tnd{XXl(ENHM&%CvbO$*UBSPo!^+aYc?Gl&YaDFNd}Snb(I}N%S(sZv zuifvWXBGJ*2vaSvRN4vNda|kv{&GXcs3B;mq0F6lVH0{()1FSVsiNoVjc`V>uUv}~ z<*_~FW2fG&TSrR`eO|Y@IIAUKBv#{2wkRi9SG<5$;(!NZJ#ka7V?xE(i6eN*-jVI~bwB0_#mv z%5AKc?2_*Ro`53N&vXKMBv2rq7{+^-K;mbz#Q@x(fjSGkKT#+DgO z-}nj|pyOKMn!8Jc51mQ9ZlpOctY>QYV7ofI4t`pfDG48=h&=Mq@7mU|r!3s$_9+I1 zyV)LW+>ZUy>*43P2~Bg`6z$CJ-K5WvF&|_No4Lt@Z*9>v?kl!M5JZf&-Ym?C~>n*JG+f{Q^elA*^pQ_a!d zl{evmpr7&3ZtLugc+_EK#h(!(Zbi)gd$_)J?l7e_VWXTWCEqEEh3>=VJOXB`yRWNul zLsVBlIK$=_>h>};dnB$5Xeyi%BmN-mwX-vT@RFb&6MiMg(2%-l zS4e3y&n|ur$Ff28L3^J>LJko(Z+U>VPQ`Wj!OkSU)C$EABD5hwQoi&C|L?8#55i)3 zsDgdMSNA95i=F=;WhX#4YeO@88*6iSW266~^R80eR>KiR>q-bDm2T=b3#$_S)mSBI zhEt&mTSzH1^ApAZRLXEs5^H2`lHQV?asl#_kl+Egs)YY}FQUXJ!Y5$Z!-YeuuHHlL z=H~8lVk7N3<8?DlGW+xOD?H2=(iX{MbcJk%qz0-DjNSYJ>Jhq2y7!5SQ=(TNCYzCS9X)yIkCVO=+q`-OE#psVIJwCT zvE`%S&Z$4ySbr)0KI!dCeQ-fMVP#fa-FWFgO&rSOu)}Ni&W>>=(Ja znP5skcPi;2MlzyhMi9{aI1=f(f2;Gk#naJ|iRrB3om;4(VKLM91D53|roj+T;ZW}%9VUCmm=Xkwx1BfuQ!0dcVNiMg74dUzhf~#FJ10K{s#g2 z!a1T7Q-r_26Vp_OObyk@wmYtc(Pu-77yH*WahYno`BG=w1fo-?zyc6*#yA7+^WsDb zbcd&Z6}>dzW=|@zUdu^_cj7P4Znms$2;!`DtULc!S!4gzR0LS>WjT!ATXQeN6=OgQ z%JFtlW{F@smYqd7wI{kXo4nAVt1X>X8J9{qW2Xb|a!*v)qU);VLD`hqVr~I<@f(Xv z*<=}o75rbRJw+0}m_HJmFtlYiRLbTs(RA}GFukTV<$cErB&8KKg0m1&U-TZV4XmrB zeHwt{H-a)>a5D?_m(*ENPbb4?Bt2(aP+bs+-+T820W^$a0K-rC5+C@wl`aI1ns@SO z)?1wZYqMRKpFyMBgpaU~d$Gd2QF=Y9pm(`HK#*-4S#UfOzR8UncXJ`N9}ZXl9c!)) zY4T`;m*)UJqZ=el6XFNtcPthtl@ZdY)kFBY@v(^zcK|!;`a&?F=pgoLs40 zt&)SKPN+Ls@vkii<03Oh0zyT`KFsY;S@_=#3gydXx?j3IUsj$@QPCI)HTflQhB+k=Glj7aKLM+`;=_+VTyvlG%zOeMpK8S5)1&@-U|_c{j2%0M z_Pvcnu+ah)yP3y_{w~2smEu?qMC5T-qFD71=W#=oy_j80diguSK6R?Rf`iDOF#Cdf zk1-=uU74VW-;bqk>&>658UDZ4t^=&eWQ)cwwzZ)sVnwimy%&0s<_g#q0|bad0wFZZ zYHVN^dlx0vwV~KMii(PTT^rb4E4Eea`_6}#>@c!@f;rc$#nOo-0opvX&%DQRa znukuh<2U=${6*h9zs~%$7ZugkrFHFHCEMctmQQt$#2mT%LuVS%aqy4<@u>e-tCB!q z&6Y<5$b^#EFsw#u79xiv)CjEk!^UTsSGn@7Ti5KjKc(qPLAC4_nX?w`8QC!N(xU5q zTX*>=AEnAdt+<5Tnp=sOX;>Eq*-An&b<-duaRxYht!D?8qM=N#} ze*V}ieEgW#^FM!o9rQ=39hG;LXx63fF`JFS7VjIIPq2LGbm`>CtYpu&8^&E4*X7BH zm`0+pORCS^Vj=7G=zGL*vql!d%@T!W`_4%pUK2ZL^GkS#xNY%Gin%o-JUh&0Le*6j zM^s)__+X0Q`m`}yj?W1GFt4_KxgEccxPITHzQ3VSPgMkhQz)zR!+ z_0j!bwDz4PK9$yO(dCo5vgRGzE9)MZd#=ykrPnJ~zur`KDl{PX&rO-NR$kdPGBe`& z(x+)_!YbD28Zy0Y=}f0SxmP9gK0Uz(-;foJ6V_chB&t3BK9tLt4=u8KUF{#)-xW|EFx!3ct9sj9diAi*ubO=e~;?7GkqP<&1C!5{%!<8?^UQP_U=-ubs^3ir)Gw&XH zrJT6`bZ4uY>zj56sCVN>a^<|t#OTKxY9F|J(P!7rK~WLU_nnI9KR}Qm%a^{Y_PAQ@ z)&YkGieGi8^dZ2>e#yj<&L@ZeV-b2H&pfup_=6({uUN6$KE7Idji50t{Ov9sbJ!N4 z%(Ru?4-v21x2Hn)I&;zjPc2XMI(0iSIk|iIpnLfX-`3Ax{I*1|P@84#!5ULRfl&F;jJjF7?8qaJRb>)b2qz`haVeV2(} z}%-M(`3k^QGw-Od}GW796%=}S;nodYlK$-Y+H>er)&*^Awq`<*`hy6xVp zcdpA)JX<{7VD@fV*FW>EC!c6i#iN|n_4ohvzu$Xbxj(BN4|}t4{iCe@XD^p|*YE6a zc60as{zru3(2925FTJy#R!;FSeZkrlP1giIX_&VoZBERVJqyn5n3Nt_YDbNz72hM= zlcQQyepB(dbKb?XK0TBR?j4wrQvFU}r-d&n6bzV{`*HK{W_xb`DeBWEYW0>XYM7w72q`N^vkWirB#Py zRZ#4oU$AmZgS0u}uiPtFY4i5`z$uQ`&pU3(uG+Ou*igUszMiLK<;O%@jLFGyoH;19 zMdnbSTB|Y}Z@mA*f9U>`xm!Pt*!rM<&oBEA?j7o26}oZRn5*wM`dxS%6jQ(D{M4>n zZf%IPTa$Y9ulEDzuW)acNsLl%83k#YIV=#f9nmrGinI5BJ0i$iWLoqL`+Ibe{>OSicX+rD|fxb=L; zQtK@SE-I7SoNkoAZsCSX^P~SzTfuPkC6P=pid$vlI^h@2VjfR7 z$GjW^Ol)LWhmlM;IZ12J6jl}pq|J)PaZQVqib+tJUUOojWmpjilVO1%K`I-?hlt|9 z+7ANawKhFO1{FDsZFb=XZnsUwBbA`oG*j^Sc7_%a*uft@F!LS-71rS{M=`vWpT+gq&dHhIY|Mzy|Vz5yaLZ zQkjSo7E1aBb~RwpD9+zN-LYSOo)Q{foTvqtw|iv=%rNK^m&2 zRsD8|G%8BUZ4ny8hXEu^_2eO^99Ei!sN|T)4iX{_6@_p@HKCvm^Ff7*qT&)c0m({1 z(MP=G2OnUxOcW+g6otmhNyp?wZm9uQN$r9E)YZRQbwyHR^-nB}6pvQjBIN`|8`U3a zaw;#dTqqYsq0fjV+@6ij{tNC+K3usdQ69;MNrH9JN7`xyZL=srB$MI)Avb5C03OXY z)L^PK8_WTkC<=*_OJ$tuq|wsY(Za+sQGzfsk`t{F)wX>>vc{1}(3#oPgcmaz(|0<$O# zJy;}+;*^nAejj!$xl9-q#?!ERl){qP!BiK@f2J||qa=oP^+)>DaR#Iap(Hd?#O?O9 zq6#@sLdbKroQk5c+S}_ZUDG1|fJ}EFDF;?mTqF+j@eE+J4YzT?8Q>5WEMnj%tA+2zlKFj^XA7cG-Ui)3=KDE3#9iI}ULKHB!C5DD}_ za{iPe>EFpnlEkRUU(w0FMyL1NM>GQRH7LTJLN9~Km2PxX=X8$2aA#qN+*9fp5+aJm z7-+5eiyx#=B^hpU@;I4#n^BjvblvY&mi`8j8Vdw)bM2>8%;;tWY0*ODui&H_Z|C2} zww#U0)OO6tXz-Xm27o)og@uV^n)kJJ!I$JL2zv)(>;wS~?(J(hcsu5nH#YO$)E;(t ziqjl4_>8`WgSTe}Uwt+;C>Dmk3<4UwVxZyR9rVG;ITWj)N1oHV3Ix4p6_qj3SX8Mv zH^5+UX6V-+>)9_vyN6&V+)jgwpGw7Qyv@Vwpw3c>1b21ep>3J*uXcWVc?|GxVeU=i zPhMaYK7&z-YUW<_h{}UOpJQO9yRYA3WAK?y$y*v9YM|!(bw!?&XLksQ9n^koG4cAm>`siRE6r)|M1I( zxNcOn7=5eRDWom!2I`vS-bKUd_02*|Uj3S+%_oSJjv>ASCEB~UhGQd)go(rT8ORV? zFX3S8qd@M4alwsZ=<$V@p|3O&)7)@%n1TaDieh7b8CMyi7U3Me!qcTS`b~$*0zoLn z8T7T-oOEL}BYR(&Gz4vv%9yoa_h90X*Kqyp-vk0Lszp@7?YrT7m7HnY;lR1MRk~0-zfu3Rbg->N!m}TINsP$Mt&3L|7F?wFdVm_ zx2|C?KcQeoDo?mc~;tD{bh(9lCz`=0)eNNglMgHPg7`q_62nqW0uZ*L&Yv@5t?193Ix3< zTGSOb(EB~5Kc(Zc*7NKpPb{hl1VQa}XB|NvCUEHsL`H->`P7%>#@k)!k~Y`*L57oP zV+Px>)~`|c!+6jPLgb)3Z}Xw1(5Rmz^e3y5%!1tw8iGgc1cD3jciK`Vf=pp#vlLO{ zmlB&6j=~y2MIT*DRSh$Ni&>!ByMfC{f;IKU#l_ERb@O<^aIV~II9PA>8iCF#SG zjS72TO^ z&4dW%+$n^1vtJ(>F%V`F44}(##t)~5y3c)INe3I|6P4rdy zcJ`3|TY>cne`qI?tY$}*Mny|wQQ_EAT7=CMJP7)kG2O3aqrD$t1P5)4i2=lEMxgsj z<>Ig;Bw=)Q&0q5j!nSSO@%Ss~42{inlhm2%hO_BwpJ4wJQupt8Xn!E^e{R4KNj7ib zz*bFdx%3}1Vdbsnz#O5sq7bo(71u=+CdA^WDw*d3f1TJXvp(W>0{p)%rT+Xayx>LC zayEFb33S#aI~H44G(mtt7-dvksd`#cYuC|HZhKg)t_As0T$f`-E{s z9wK3AkrJ7in)kowR~H?5I<}x`rh**fm;&(rBNx-*wq6OdaW*8XuWm01zG@Vcz91xS z*IeZ-$b_nD!OyUEs{NFT-mi>;cZw57;_*r*A0Rv(@07AihP!>Mgp@$bLVUohz$6~6 z*D=fW;r(!j8Z`xiqlhf@9IM@XQ}~!!;@UjkcN^jBJoe8p_tAbz#qx3`+0u4(eAJ9i z`pBgJSGv2x{}}Mv!Y&=M@{aieStuCsJ!m=duCk(R&NYDgQ>O*vHHKX#ej7sqLVe^ zuF66fzy*47q}uVSVX@l5Y;);?Vb09ij&l<}x_%^JskThm41E0F#iwT>OgzcWkXzR=TJ0i~6xqB}B%+8EBojD0ky@{D1?UV})2T{7*bRJ3iyhaI5V z0QC{cg`wyj`V=H}_;h9I-wFh)5}>fx5|GY!zbzky%C*??65>=h+7)grgbh|y)Xl`V zcQOcEy^z3WXP@N()29IcA7uM<&it{PG5E}?Db-hQnFufPK;DcqINDFC$mzw79v2mr zr0)Gp%_cf;@YOw50^qxPz|$QJ17?|KjkkSXPRa=E=wui$!;Tnf@jq_EuT28*5xo|F zb~X%{VKzOMyIS>(BCEa^kh##nH9ZXj?!*l2n|gI( zEd=X4bKNzB={^Pl>%)?+QfBkCPMDnv%0n5gwXR0`;Xi?_u-X8{n!6JnKdydsb_O6T z0G?j5JUxH|8V@O8qNjG{exi`15S(OTbLo_!fg|1E93_#GVs}^jDHZ1j8qJ{|McLkR z_;m;+^7RpnE*OX?`5PiIJ^T4%h_T%I#vvJ<DIzJSzW*%3)Lk$3Rju7FH3!{|@ zdi#a7-<(4eZP09bD&Q_M0$sa`5Fm^l#VkjX-GQ@ZVAH-8b=NB*!%Sr&^EPI->Ki|2 zxWf#4!RSnx;^HVH*wlSRwHm;^Kr*z;GWr%*X zU*#vVV{4D6=z|hxeW47G4~8v1Hxvk(QX2YiGzVzW0wgo!UY16lBBVxmRud)0gYV^SD;lfbPq3b(ej#V9YVj|1pS1Kt8F#3 zI|4C+UX{(?YZy8+ZlK$2%eiQE1z^)_gPn26kH>(jR`V>@oLF?}en$}0fLiF4S&PF) z5$Ly{~bFM;|}Pvgsu8-}dkV$!uk?xxQm zn^^;(ar&UbjDqZsxM7|`H+T0%eoW3V(tfwiHJlX@?7|;9P7lPXDAwj`cenK$ zO!Pa|*M99iGB)MZWvA%4?7E8N+ zIAeyuGDOc7W=}aOREP4IF>MZ8Zuf`2@}MtzOSb<@eoXFFRMO_XGnTwO4t%ogPKVjg zulVtiKrxV!5x6o zD-)vxrPzWcA^~m)hLgorJYdOTOI2qb+U=4Y>L53(>B-)dQmhO=7trg(bsSLVU@lxoMszEUZJ0}=C1 zOtIWba-{m0$I_#1BsPs$+K-igatY3dadCy9o~iD+)C=~-%A8Ig%D3V|ExJ&}y`NBe zRN{g|z#fcN)0>fT4Op=QC8H$L1c?vM%nHLrWID&;M5L#N2Ce&GfmyO$HQm+SWSml9 z(Ti%kf&-YSrcH+YhYy;c1=Y|Be`?DO%(f3gP*XOFALS#U;3S*QxwA2t^Kvzp^&};M z6`t7o=q6xK0hZ2i(pne<%W}?~XxPbZ^X`pM!Y9ma=_sDv(jaWkm5$A!BcsV)!gT}z zx?di&<3FU#eQZLjhP-I>W(3wDi>4a$P;} zG(voFphk?R9rSs^if-r^T{G8Q3_}!pGQT)^be#IVykDpAge^FryUlDc zr9mp1Ex5ChY_1Yswu51!2cyu(ZdTS^?Dg%z!{!`^E1|gB7LRJ0lyv%0n)tD_{Z1#G z^bzz$%%Bg@)^O%w(%?Ijf(1OrDm$Xw;_*^kpG7x@55_?E?rV zqTkT*uaBEiBr0EFnvW8eci!Ir^@QK;(_>laVFC-ob|PUpyk@pp9%PA!&-J0ayBQYL zbi)?q-IQI5*3hhrd@l6Y0}0GJ{Fboz$z||5>+weUn82$ZR`_|Jr(}7Kg;qyGt9>bX z*ZY~ktI+|~v)MNx)(!pkC{L%@XAi#F<>uU~=;;_p=uN@|tOOZr zYkj+tsNCXs?nj)E6-4Rv^C1z&aPVxeXZCezI1Dsc_R$F^eMg!?!_Yg3G%uQ;YW5E( z@{ya-;iPn=2{igDBcU;($EF2u;ELpGAw5t!NDU-WyB{;YZJ51yB6Jn1XOaog#^JNB z8WCLuM|I0?3W}i^Pv~QjgJj0haKw0`uQR82XDXpDvav*m-@oIGBVrqf2}{QfZES9U zVLUWPTjg@Rfh_87$!rx*v+|qWfsIX79S{DTU>rWDRnEQ4>UaP&XtR!07AF};!()|J z0pIWEKws5CMq6d`7~_c8tU{dZ?afu=Pl917#Bl?g=EHfNu{qKo1 z$;c47-|fqw0$AxJ9GgCA_Vy1R&R;7a>yYYu1>riBv1IQLY;iRHjrNxNG?DSc2^*I_tUf`vB)V9>(HHP`p8S(POlu9q%vGjZy zch4Ot1$~ur{ zo3}KJ8-sPcC~IA(o42Jnocc~jTzpl7MkmP%`E5u^XO5%x8^lI;VSN%JQneMin_@tM zC4HUILUhms64pe52z25?srcRKMV8>BN2Yd%P2ghnRl>Eoa^VNESa1(=(HkYsM@-=A zCyK=dE`1des=BM9zkPsMk3mPhg=Ya)EJ$e{f&dF7G?JADx4^=dPi}Pn26dRUBs4 z$qlhz{b}dkb;LOO8N&%pdirc}NLlSkNb5~#F$w$2f>(&8^fX?0iHDS~Jn0ua64iLH z#r<<*k6X{g!iQ%c>zo5UTSL!Yl%6+SEiOB&t_ks}o9$9xgLsM_@y)-BOU$Y7#S?2i z{frcD8x}A8D2dzNDK4?61Z%NyanvVdjt~xqmn4(ZdM$8Vi#GORR94_zHQ_aO(Nu?v zy=s+D*}+KCNzEjBea`YZA3GTfHMK~w{~a3_B?^!_%e=!>Rz{y@sS7|{{QRY9x5|Kb z2zUojDu0Z#Yur*Z^138Rgi+#<0FjJTdPBm@(+J{lb~*3e$U@Qs^s|0cBZA(W)CgBe zs9%_RX~_WoHVZaycR~~7RE5@h6evp z;^=7Z7=3a>*1i{@odRJ!DeA{{jG^|AzzH?OBqll@a5kwvsf)1-X_q@i+u6z}TGeWW zQ;*YcYW!krC`|xPuM;Znq>%_UdiI>817IOVx@i67qnsh< zc&-&n!=~={Oi_+dS#OT_ih};WLojDbu=lM@r9%2bnxP-$2;s7f6T9Xjz?H+bb$aLF zcv}+*^@&Jd|Gu)r{cUi-8%PCdqRl;xCE`qY51Ai1)E?@4R7ZE6G{c3JNVlrHE2;AC zuC4OUcFTbXR(rR?x;6s!8=M1hq)g`G$IhoI_^!PXuR6@`kLUheQ93+|Bzeqw>~_cH zL=cu@&RFZ7nLE&*pUzdy%Oz0t!k7+EAP8zR>Ga`IA^Z$D;6`r7sE*EX9>Vr$ckKj8 zjk@5B5`6|`W2kX70eC1;F4sIns(UKr-P{{p*FYSyTTy<38uG(tbs z@q_#5(JWCyr11kk84kCILLmxn?~?;>8cQ)n3gULh3hy-XKv2Jjk%S&Q%SIQ6PMzVi z-Ap6Ysa0(yqY&VS;EJys&ri>{DCvzX`U%BV3 z-Y}&v^h-zKbE*7fffA{)#`G$=JtP~19pF1O;V8Tm%I7;=gbbVWM4hXL_<8xF^I49mh$I7Q;6 z^GH18z!fVKuP)nh4Lz_dA~L6zEFN8Rf7eb0!&la&dH`v}U0(#oh zNcRX$!&AnyFq_;-s0g_a-Jq*NF@{6EgTT64OthBpeVEN~Xe}Eyp^pUBK?!`8 zPRpP*RIC+;FqBRt96J|}e|8$O;QWukfwK8I6lpY^!?0ta1!KJikT4VoiRgpvZ*H)% zX&!X(mWF$a;zf+5zsS^QQ(C#@s8=NDjP62%m%ME_I72)m;Jubz%VY)*%`+UFrMgG@ zxb*m%Xn7~SmY=$7ICuwrk0mY7tN6RD8Y8$ZE=96fSWo9HRX!f3Oge1AsPic(m2N0F zOQjDpo43nJo|pxXp{pG{e_}Ye#%=Huiw4X(x{`Ffza4y1fwUe&hxSt{T0AwFi5a@& zE{ijVFh)$Pt9w)_{iPw$L{qD$wDuvjuQ0pSnTJ|cGY}YiqS4qA;27=* z(dJZl;dc<>Y?Tg?Lk-i2SYltbvvba;L7Fg(t*+X>LG}(8nVUwW9xa&5j}X}c|GhkC z2{x3bwAA(LrFBiBWOf3xBMzyl@aks>y!4tuN=pN2_y<1XZ6O8oACUE=EWOl9X2(xd zV&fc_VM=s4`&-e?NWjHgU+ zuAmp}K}v_Tp@iw)!B8gVVid#(E*o0Dn-4WD0|RaC(-?VKVizhKF$?)GmN?#?qn?}SYs(-YY z**SWcUA06lIlah-9I)`z52=O*93pUa*=r#C`kl6(KD+leth`cXz_a3?XuOG|byTE~d)$hMWNb2C_k8MG~oJK+FdDdN;dk zhg#Sw!mZsFlvrW?4I|RWB^_p9n@P=&!N^-NpTf|h{gjFak$kwEi;bbXyi@lAj-0Tj zW8@2TPPW*B&3N@gjxuQ&sx-^UXd+{`I1!;y^V@GPfaN1>;HwpYjK<%ln8<`Z!=LPd zxXR8J*edQSJ1iI6q(TnO-6`e&#?2Dz1Uo!-leyy~J#Ojr{~I^M1dZr?=9|pYxPc_N zfEeLT={#-b|BIX1VSm5BCkr<+1+x(Q+$iQjb4_Gc`54zKT44E34R0X-zTQxGLHosg z(>R%Bd->Y^$q{IDZcW{7FUvFz9&#GUUUPf1=y;CmYFSaY(WfGTYfeeW`@T?P7vx{` zDAi#T9~E%b<z^=6Pd^ct||@b2zL(HOAI#UEOr3aHh`64o?qsN)qWm4@=%;5gC$st?I7 zPvZ}5nj8EShDky^pj(sWN!JlR7QzcUgG%$^atbbS@<^g8*kFJxgC57eo&XYZ{*adC z-vb7b(95)JVR)LGch}0Gz}a3MnsbLtp<#)*({B1t$^^woy}e@lqo&ZHe3@o&)9QWW{5khjDg*)*OC~BT2Zy?@;G&Ir1 zr=}6H_?_RZ!BbJxl3`XJ^TISDmSH`|Y{GR?QOp+WK=huUOMz)b93HpMGk#%t%wFa+ z(CxG%-kLqGsv|}cmj^6YBC@9Mp zO;s$0MY?iByVtj@j#mOO381BMz;i}CQJaB>S!bJvOWlH?Gg6_GX2~!&j)kkE-Yrql zcRuv>6{!_nN-+$jR9IwDy$0=RL35;k{q+GkH@ct2=j0Ph8}(T)Z-@TC&D|z zB1s6&Dv>Henn=K`t=QX5pY=ctxC>`?(F~*bsZ=;O;-*n={cFpKX{%=OF#d=U&94Ca z?16CfS-Sf++>9z0P%qc2qg&ArzS%D6N~PU8jPQrSJkgaf57mB3MZf08(lV>^zx*$+ zNl5(@>ZBP8Tj?`so{kSuJwvZ5t|_*)U*_^BJ{I!T9>JgLXa2{YZ2TMnJEC_zO z4jjRdgYN8sgN)^1)>M;0^M&KUfZ}R84BkWd7}Rq$9z9K{Giiei3@GENvlMGN%s2*S z33lb5&T9mGvYAiMG4h79Z=i&DHIxpsMv)Nc)!38U0T!(Izxp;+RG zOL7yFpMkL+?B%4@A{qBGCm6x#uWEn+d~a$z8eRi-YYDs2ll$+J4dByArg^elUyVel zaicrB+=nnE=F%yNXsQt;>T`f>N51FJ&&=5Y7IOc99-CGq8^OX_4Vp+&u2;_Z=inHl z$B~v|6bDO{W5V$`;ryBgz%RfbdJ`spn(-WL&PLqZv7na=vRi=?m&KeZXB#lX5T>GM zOEo@VfVbt{cwsXcS!0`qx+hzory5RZuyY@|F)@J@6DS4erq_LQR~tZA#KZ2|M1|#6 z?nupp>PQu2+UdHlF_?nKv9I`U*|H5B@n1j=;XP1?2eGG;;eFj`6)E)lx z;v8iV<`A$>BDqIOcgtW3dK^ku8q2`t(*HCLS002Ub;0F;pPEmp@Lg{Jk3=lB_Z3Su zF0HXWQ#wqnZ??PHjFO1fn=n4~*HGc7QW3tP7;GAP%vkZ@aHJ;|oSARBkTzsYD%j+O z#h8zcb+!1?S+pKh#8Yk~Do{PurZEU`;eTC_`6vdq}d?pT;Dc!9Nq>pPrO|zCUXU&Ck4()g_5^ww~`4g;3=uEE3a6MaTscX!UJINP4fS zn?!EK*3k>@+ix09!s!-cwyc}C7iy~s8v3|syW2)ksB$sBP?9qE=~d2#OM7On}TYA@`)C*&o zw1I>zPLFiy^oKc=`!|&9Pbs(dOVi1W(5tNb{#ZZg^|vOvv25jQlN-RS*k^BgUnh#K z2F220)BTIFRO;TrrdGdFy_S&(9ghi^52e;YUrnW=vPkAav&4PQ4jWko8%}~kY^%`Q zco`+i@nlJXW#y_S^=vT*O0TW^-bNiS7Hns+ECjC$$T%Lah_Ty{GY3&14y|sb zX)b;$6;u1Op-LqoaWZT|%RPj#5&ld!NlDM>H1bZlZ9pxLg_q_Oa?l`Ft-0>Y;eH~e7Td8(e^r=OdjM3Kf9>E2TJ#jSGT1CSa z^G?erU2Tfl*8||w%QTlG48;adQKdU(&A5I(ZcfV2eyywfGMr}=KZQOfap*}6HpP~O zd$d8*TTn*)8qb3184Ax3%ab^rWI#N^Uad;H nug~6^#e$53W|G)2C|T3iGWdOgV0($}CF*U$k(C9QUkLsO7EQ4L literal 0 HcmV?d00001 diff --git a/src/test/log4j.properties b/src/test/log4j.properties new file mode 100644 index 0000000..1a2122a --- /dev/null +++ b/src/test/log4j.properties @@ -0,0 +1,7 @@ +# log4j configuration used during build and unit tests + +log4j.rootLogger=info,stdout +log4j.threshhold=ALL +log4j.appender.stdout=org.apache.log4j.ConsoleAppender +log4j.appender.stdout.layout=org.apache.log4j.PatternLayout +log4j.appender.stdout.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n diff --git a/src/test/mapred-site.xml b/src/test/mapred-site.xml new file mode 100644 index 0000000..437d7c4 --- /dev/null +++ b/src/test/mapred-site.xml @@ -0,0 +1,21 @@ + + + + + + + + + io.sort.mb + 10 + + + mapred.hosts.exclude + hosts.exclude + + + + mapreduce.fileoutputcommitter.marksuccessfuljobs + false + + diff --git a/src/test/org/apache/hadoop/cli/TestCLI.java b/src/test/org/apache/hadoop/cli/TestCLI.java new file mode 100644 index 0000000..0a27b87 --- /dev/null +++ b/src/test/org/apache/hadoop/cli/TestCLI.java @@ -0,0 +1,474 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.cli; + +import java.io.File; +import java.util.ArrayList; + +import javax.xml.parsers.SAXParser; +import javax.xml.parsers.SAXParserFactory; + +import junit.framework.TestCase; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.cli.util.CLITestData; +import org.apache.hadoop.cli.util.CommandExecutor; +import org.apache.hadoop.cli.util.ComparatorBase; +import org.apache.hadoop.cli.util.ComparatorData; +import org.apache.hadoop.cli.util.CLITestData.TestCmd; +import org.apache.hadoop.cli.util.CLITestData.TestCmd.CommandType; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.hdfs.DistributedFileSystem; +import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.mapred.JobConf; +import org.apache.hadoop.mapred.MiniMRCluster; +import org.apache.hadoop.security.authorize.HadoopPolicyProvider; +import org.apache.hadoop.security.authorize.PolicyProvider; +import org.apache.hadoop.security.authorize.ServiceAuthorizationManager; +import org.apache.hadoop.util.StringUtils; +import org.xml.sax.Attributes; +import org.xml.sax.SAXException; +import org.xml.sax.helpers.DefaultHandler; + +/** + * Tests for the Command Line Interface (CLI) + */ +public class TestCLI extends TestCase { + private static final Log LOG = + LogFactory.getLog(TestCLI.class.getName()); + + // In this mode, it runs the command and compares the actual output + // with the expected output + public static final String TESTMODE_TEST = "test"; // Run the tests + + // If it is set to nocompare, run the command and do not compare. + // This can be useful populate the testConfig.xml file the first time + // a new command is added + public static final String TESTMODE_NOCOMPARE = "nocompare"; + public static final String TEST_CACHE_DATA_DIR = + System.getProperty("test.cache.data", "build/test/cache"); + + //By default, run the tests. The other mode is to run the commands and not + // compare the output + public static String testMode = TESTMODE_TEST; + + // Storage for tests read in from the config file + static ArrayList testsFromConfigFile = null; + static ArrayList testComparators = null; + static String testConfigFile = "testConf.xml"; + String thisTestCaseName = null; + static ComparatorData comparatorData = null; + + private static Configuration conf = null; + private static MiniDFSCluster dfsCluster = null; + private static DistributedFileSystem dfs = null; + private static MiniMRCluster mrCluster = null; + private static String namenode = null; + private static String jobtracker = null; + private static String clitestDataDir = null; + private static String username = null; + + /** + * Read the test config file - testConfig.xml + */ + private void readTestConfigFile() { + + if (testsFromConfigFile == null) { + boolean success = false; + testConfigFile = TEST_CACHE_DATA_DIR + File.separator + testConfigFile; + try { + SAXParser p = (SAXParserFactory.newInstance()).newSAXParser(); + p.parse(testConfigFile, new TestConfigFileParser()); + success = true; + } catch (Exception e) { + LOG.info("File: " + testConfigFile + " not found"); + success = false; + } + assertTrue("Error reading test config file", success); + } + } + + /* + * Setup + */ + public void setUp() throws Exception { + // Read the testConfig.xml file + readTestConfigFile(); + + // Start up the mini dfs cluster + boolean success = false; + conf = new Configuration(); + conf.setClass(PolicyProvider.POLICY_PROVIDER_CONFIG, + HadoopPolicyProvider.class, PolicyProvider.class); + conf.setBoolean(ServiceAuthorizationManager.SERVICE_AUTHORIZATION_CONFIG, + true); + + dfsCluster = new MiniDFSCluster(conf, 1, true, null); + namenode = conf.get("fs.default.name", "file:///"); + clitestDataDir = new File(TEST_CACHE_DATA_DIR). + toURI().toString().replace(' ', '+'); + username = System.getProperty("user.name"); + + FileSystem fs = dfsCluster.getFileSystem(); + assertTrue("Not a HDFS: "+fs.getUri(), + fs instanceof DistributedFileSystem); + dfs = (DistributedFileSystem) fs; + + // Start up mini mr cluster + JobConf mrConf = new JobConf(conf); + mrCluster = new MiniMRCluster(1, dfsCluster.getFileSystem().getUri().toString(), 1, + null, null, mrConf); + jobtracker = mrCluster.createJobConf().get("mapred.job.tracker", "local"); + + success = true; + + assertTrue("Error setting up Mini DFS & MR clusters", success); + } + + /** + * Tear down + */ + public void tearDown() throws Exception { + boolean success = false; + mrCluster.shutdown(); + + dfs.close(); + dfsCluster.shutdown(); + success = true; + Thread.sleep(2000); + + assertTrue("Error tearing down Mini DFS & MR clusters", success); + + displayResults(); + } + + /** + * Expand the commands from the test config xml file + * @param cmd + * @return String expanded command + */ + private String expandCommand(final String cmd) { + String expCmd = cmd; + expCmd = expCmd.replaceAll("NAMENODE", namenode); + expCmd = expCmd.replaceAll("JOBTRACKER", jobtracker); + expCmd = expCmd.replaceAll("CLITEST_DATA", clitestDataDir); + expCmd = expCmd.replaceAll("USERNAME", username); + + return expCmd; + } + + /** + * Display the summarized results + */ + private void displayResults() { + LOG.info("Detailed results:"); + LOG.info("----------------------------------\n"); + + for (int i = 0; i < testsFromConfigFile.size(); i++) { + CLITestData td = testsFromConfigFile.get(i); + + boolean testResult = td.getTestResult(); + + // Display the details only if there is a failure + if (!testResult) { + LOG.info("-------------------------------------------"); + LOG.info(" Test ID: [" + (i + 1) + "]"); + LOG.info(" Test Description: [" + td.getTestDesc() + "]"); + LOG.info(""); + + ArrayList testCommands = td.getTestCommands(); + for (TestCmd cmd : testCommands) { + LOG.info(" Test Commands: [" + + expandCommand(cmd.getCmd()) + "]"); + } + + LOG.info(""); + ArrayList cleanupCommands = td.getCleanupCommands(); + for (TestCmd cmd : cleanupCommands) { + LOG.info(" Cleanup Commands: [" + + expandCommand(cmd.getCmd()) + "]"); + } + + LOG.info(""); + ArrayList compdata = td.getComparatorData(); + for (ComparatorData cd : compdata) { + boolean resultBoolean = cd.getTestResult(); + LOG.info(" Comparator: [" + + cd.getComparatorType() + "]"); + LOG.info(" Comparision result: [" + + (resultBoolean ? "pass" : "fail") + "]"); + LOG.info(" Expected output: [" + + cd.getExpectedOutput() + "]"); + LOG.info(" Actual output: [" + + cd.getActualOutput() + "]"); + } + LOG.info(""); + } + } + + LOG.info("Summary results:"); + LOG.info("----------------------------------\n"); + + boolean overallResults = true; + int totalPass = 0; + int totalFail = 0; + int totalComparators = 0; + for (int i = 0; i < testsFromConfigFile.size(); i++) { + CLITestData td = testsFromConfigFile.get(i); + totalComparators += + testsFromConfigFile.get(i).getComparatorData().size(); + boolean resultBoolean = td.getTestResult(); + if (resultBoolean) { + totalPass ++; + } else { + totalFail ++; + } + overallResults &= resultBoolean; + } + + + LOG.info(" Testing mode: " + testMode); + LOG.info(""); + LOG.info(" Overall result: " + + (overallResults ? "+++ PASS +++" : "--- FAIL ---")); + LOG.info(" # Tests pass: " + totalPass + + " (" + (100 * totalPass / (totalPass + totalFail)) + "%)"); + LOG.info(" # Tests fail: " + totalFail + + " (" + (100 * totalFail / (totalPass + totalFail)) + "%)"); + LOG.info(" # Validations done: " + totalComparators + + " (each test may do multiple validations)"); + + LOG.info(""); + LOG.info("Failing tests:"); + LOG.info("--------------"); + int i = 0; + boolean foundTests = false; + for (i = 0; i < testsFromConfigFile.size(); i++) { + boolean resultBoolean = testsFromConfigFile.get(i).getTestResult(); + if (!resultBoolean) { + LOG.info((i + 1) + ": " + + testsFromConfigFile.get(i).getTestDesc()); + foundTests = true; + } + } + if (!foundTests) { + LOG.info("NONE"); + } + + foundTests = false; + LOG.info(""); + LOG.info("Passing tests:"); + LOG.info("--------------"); + for (i = 0; i < testsFromConfigFile.size(); i++) { + boolean resultBoolean = testsFromConfigFile.get(i).getTestResult(); + if (resultBoolean) { + LOG.info((i + 1) + ": " + + testsFromConfigFile.get(i).getTestDesc()); + foundTests = true; + } + } + if (!foundTests) { + LOG.info("NONE"); + } + + assertTrue("One of the tests failed. " + + "See the Detailed results to identify " + + "the command that failed", overallResults); + + } + + /** + * Compare the actual output with the expected output + * @param compdata + * @return + */ + private boolean compareTestOutput(ComparatorData compdata) { + // Compare the output based on the comparator + String comparatorType = compdata.getComparatorType(); + Class comparatorClass = null; + + // If testMode is "test", then run the command and compare the output + // If testMode is "nocompare", then run the command and dump the output. + // Do not compare + + boolean compareOutput = false; + + if (testMode.equals(TESTMODE_TEST)) { + try { + // Initialize the comparator class and run its compare method + comparatorClass = Class.forName("org.apache.hadoop.cli.util." + + comparatorType); + ComparatorBase comp = (ComparatorBase) comparatorClass.newInstance(); + compareOutput = comp.compare(CommandExecutor.getLastCommandOutput(), + compdata.getExpectedOutput()); + } catch (Exception e) { + LOG.info("Error in instantiating the comparator" + e); + } + } + + return compareOutput; + } + + /*********************************** + ************* TESTS + *********************************/ + + public void testAll() { + LOG.info("TestAll"); + + // Run the tests defined in the testConf.xml config file. + for (int index = 0; index < testsFromConfigFile.size(); index++) { + + CLITestData testdata = (CLITestData) testsFromConfigFile.get(index); + + // Execute the test commands + ArrayList testCommands = testdata.getTestCommands(); + for (TestCmd cmd : testCommands) { + try { + CommandExecutor.executeCommand(cmd, namenode, jobtracker); + } catch (Exception e) { + fail(StringUtils.stringifyException(e)); + } + } + + boolean overallTCResult = true; + // Run comparators + ArrayList compdata = testdata.getComparatorData(); + for (ComparatorData cd : compdata) { + final String comptype = cd.getComparatorType(); + + boolean compareOutput = false; + + if (! comptype.equalsIgnoreCase("none")) { + compareOutput = compareTestOutput(cd); + overallTCResult &= compareOutput; + } + + cd.setExitCode(CommandExecutor.getLastExitCode()); + cd.setActualOutput(CommandExecutor.getLastCommandOutput()); + cd.setTestResult(compareOutput); + } + testdata.setTestResult(overallTCResult); + + // Execute the cleanup commands + ArrayList cleanupCommands = testdata.getCleanupCommands(); + for (TestCmd cmd : cleanupCommands) { + try { + CommandExecutor.executeCommand(cmd, namenode, jobtracker); + } catch (Exception e) { + fail(StringUtils.stringifyException(e)); + } + } + } + } + + /* + * Parser class for the test config xml file + */ + static class TestConfigFileParser extends DefaultHandler { + String charString = null; + CLITestData td = null; + ArrayList testCommands = null; + ArrayList cleanupCommands = null; + + @Override + public void startDocument() throws SAXException { + testsFromConfigFile = new ArrayList(); + } + + @Override + public void startElement(String uri, + String localName, + String qName, + Attributes attributes) throws SAXException { + if (qName.equals("test")) { + td = new CLITestData(); + } else if (qName.equals("test-commands")) { + testCommands = new ArrayList(); + } else if (qName.equals("cleanup-commands")) { + cleanupCommands = new ArrayList(); + } else if (qName.equals("comparators")) { + testComparators = new ArrayList(); + } else if (qName.equals("comparator")) { + comparatorData = new ComparatorData(); + } + charString = ""; + } + + @Override + public void endElement(String uri, + String localName, + String qName) throws SAXException { + if (qName.equals("description")) { + td.setTestDesc(charString); + } else if (qName.equals("test-commands")) { + td.setTestCommands(testCommands); + testCommands = null; + } else if (qName.equals("cleanup-commands")) { + td.setCleanupCommands(cleanupCommands); + cleanupCommands = null; + } else if (qName.equals("command")) { + if (testCommands != null) { + testCommands.add(new TestCmd(charString, CommandType.FS)); + } else if (cleanupCommands != null) { + cleanupCommands.add(new TestCmd(charString, CommandType.FS)); + } + } else if (qName.equals("dfs-admin-command")) { + if (testCommands != null) { + testCommands.add(new TestCmd(charString,CommandType.DFSADMIN)); + } else if (cleanupCommands != null) { + cleanupCommands.add(new TestCmd(charString, CommandType.DFSADMIN)); + } + } else if (qName.equals("mr-admin-command")) { + if (testCommands != null) { + testCommands.add(new TestCmd(charString,CommandType.MRADMIN)); + } else if (cleanupCommands != null) { + cleanupCommands.add(new TestCmd(charString, CommandType.MRADMIN)); + } + } else if (qName.equals("comparators")) { + td.setComparatorData(testComparators); + } else if (qName.equals("comparator")) { + testComparators.add(comparatorData); + } else if (qName.equals("type")) { + comparatorData.setComparatorType(charString); + } else if (qName.equals("expected-output")) { + comparatorData.setExpectedOutput(charString); + } else if (qName.equals("test")) { + testsFromConfigFile.add(td); + td = null; + } else if (qName.equals("mode")) { + testMode = charString; + if (!testMode.equals(TESTMODE_NOCOMPARE) && + !testMode.equals(TESTMODE_TEST)) { + testMode = TESTMODE_TEST; + } + } + } + + @Override + public void characters(char[] ch, + int start, + int length) throws SAXException { + String s = new String(ch, start, length); + charString += s; + } + } +} diff --git a/src/test/org/apache/hadoop/cli/clitest_data/data120bytes b/src/test/org/apache/hadoop/cli/clitest_data/data120bytes new file mode 100644 index 0000000..0949b06 --- /dev/null +++ b/src/test/org/apache/hadoop/cli/clitest_data/data120bytes @@ -0,0 +1,8 @@ +12345678901234 +12345678901234 +12345678901234 +12345678901234 +12345678901234 +12345678901234 +12345678901234 +12345678901234 diff --git a/src/test/org/apache/hadoop/cli/clitest_data/data15bytes b/src/test/org/apache/hadoop/cli/clitest_data/data15bytes new file mode 100644 index 0000000..baf7fea --- /dev/null +++ b/src/test/org/apache/hadoop/cli/clitest_data/data15bytes @@ -0,0 +1 @@ +12345678901234 diff --git a/src/test/org/apache/hadoop/cli/clitest_data/data30bytes b/src/test/org/apache/hadoop/cli/clitest_data/data30bytes new file mode 100644 index 0000000..8fc6073 --- /dev/null +++ b/src/test/org/apache/hadoop/cli/clitest_data/data30bytes @@ -0,0 +1,2 @@ +12345678901234 +12345678901234 diff --git a/src/test/org/apache/hadoop/cli/clitest_data/data60bytes b/src/test/org/apache/hadoop/cli/clitest_data/data60bytes new file mode 100644 index 0000000..fe420a6 --- /dev/null +++ b/src/test/org/apache/hadoop/cli/clitest_data/data60bytes @@ -0,0 +1,4 @@ +12345678901234 +12345678901234 +12345678901234 +12345678901234 diff --git a/src/test/org/apache/hadoop/cli/testConf.xml b/src/test/org/apache/hadoop/cli/testConf.xml new file mode 100644 index 0000000..49bcb9f --- /dev/null +++ b/src/test/org/apache/hadoop/cli/testConf.xml @@ -0,0 +1,3339 @@ + + + + + + test + + + + + + ls: file using absolute path + + -fs NAMENODE -touchz /file1 + -fs NAMENODE -ls /file1 + + + -fs NAMENODE -rm /file1 + + + + TokenComparator + Found 1 items + + + RegexpComparator + ^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file1 + + + + + + ls: file using relative path + + -fs NAMENODE -touchz file1 + -fs NAMENODE -ls file1 + + + -fs NAMENODE -rm file1 + + + + TokenComparator + Found 1 items + + + RegexpComparator + ^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/file1 + + + + + + ls: files using globbing + + -fs NAMENODE -touchz file1 + -fs NAMENODE -touchz file2 + -fs NAMENODE -touchz file3 + -fs NAMENODE -touchz file4 + -fs NAMENODE -ls file* + + + -fs NAMENODE -rmr /user + + + + RegexpComparator + ^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/file1 + + + RegexpComparator + ^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/file2 + + + RegexpComparator + ^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/file3 + + + RegexpComparator + ^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/file4 + + + + + + ls: directory using absolute path + + -fs NAMENODE -mkdir /dir1 + -fs NAMENODE -ls / + + + -fs NAMENODE -rmr /dir1 + + + + TokenComparator + Found 1 items + + + RegexpComparator + ^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir1 + + + + + + ls: directory using relative path + + -fs NAMENODE -mkdir dir1 + -fs NAMENODE -ls + + + -fs NAMENODE -rmr dir1 + + + + TokenComparator + Found 1 items + + + RegexpComparator + ^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir1 + + + + + + ls: directory using globbing + + -fs NAMENODE -mkdir dir1 + -fs NAMENODE -mkdir dir2 + -fs NAMENODE -mkdir dir3 + -fs NAMENODE -mkdir dir4 + -fs NAMENODE -ls + + + -fs NAMENODE -rmr /user + + + + RegexpComparator + ^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir1 + + + RegexpComparator + ^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir2 + + + RegexpComparator + ^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir3 + + + RegexpComparator + ^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir4 + + + + + + ls: file/directory that does not exist in / + + -fs NAMENODE -ls /file1 + + + + + + RegexpComparator + ^ls: Cannot access /file1: No such file or directory. + + + + + + ls: file/directory that does not exist in home directory (/user/username) + + -fs NAMENODE -ls /user + + + + + + RegexpComparator + ^ls: Cannot access /user: No such file or directory. + + + + + + + lsr: files/directories using absolute path + + -fs NAMENODE -mkdir /dir0 + -fs NAMENODE -mkdir /dir0/dir1 + -fs NAMENODE -mkdir /dir0/dir1/dir1 + -fs NAMENODE -mkdir /dir0/dir1/dir2 + -fs NAMENODE -mkdir /dir0/dir2 + -fs NAMENODE -mkdir /dir0/dir2/dir1 + -fs NAMENODE -mkdir /dir0/dir2/dir2 + -fs NAMENODE -touchz /dir0/file0 + -fs NAMENODE -touchz /dir0/dir1/file1 + -fs NAMENODE -touchz /dir0/dir1/file2 + -fs NAMENODE -touchz /dir0/dir2/file1 + -fs NAMENODE -touchz /dir0/dir2/file2 + -fs NAMENODE -touchz /dir0/dir1/dir1/file1 + -fs NAMENODE -touchz /dir0/dir1/dir1/file2 + -fs NAMENODE -touchz /dir0/dir2/dir2/file1 + -fs NAMENODE -touchz /dir0/dir2/dir2/file2 + -fs NAMENODE -lsr /dir0 + + + -fs NAMENODE -rmr /dir0 + + + + RegexpComparator + ^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1 + + + RegexpComparator + ^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir2 + + + RegexpComparator + ^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/dir1 + + + RegexpComparator + ^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/dir2 + + + RegexpComparator + ^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir2/dir1 + + + RegexpComparator + ^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir2/dir2 + + + RegexpComparator + ^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file0 + + + RegexpComparator + ^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file1 + + + RegexpComparator + ^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file2 + + + RegexpComparator + ^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir2/file1 + + + RegexpComparator + ^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir2/file2 + + + RegexpComparator + ^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/dir1/file1 + + + RegexpComparator + ^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/dir1/file2 + + + RegexpComparator + ^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir2/dir2/file1 + + + RegexpComparator + ^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir2/dir2/file2 + + + + + + lsr: files/directories using relative path + + -fs NAMENODE -mkdir dir0 + -fs NAMENODE -mkdir dir0/dir1 + -fs NAMENODE -mkdir dir0/dir1/dir1 + -fs NAMENODE -mkdir dir0/dir1/dir2 + -fs NAMENODE -mkdir dir0/dir2 + -fs NAMENODE -mkdir dir0/dir2/dir1 + -fs NAMENODE -mkdir dir0/dir2/dir2 + -fs NAMENODE -touchz dir0/file0 + -fs NAMENODE -touchz dir0/dir1/file1 + -fs NAMENODE -touchz dir0/dir1/file2 + -fs NAMENODE -touchz dir0/dir2/file1 + -fs NAMENODE -touchz dir0/dir2/file2 + -fs NAMENODE -touchz dir0/dir1/dir1/file1 + -fs NAMENODE -touchz dir0/dir1/dir1/file2 + -fs NAMENODE -touchz dir0/dir2/dir2/file1 + -fs NAMENODE -touchz dir0/dir2/dir2/file2 + -fs NAMENODE -lsr dir0 + + + -fs NAMENODE -rmr /user + + + + RegexpComparator + ^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir0/dir1 + + + RegexpComparator + ^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir0/dir2 + + + RegexpComparator + ^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir0/dir1/dir1 + + + RegexpComparator + ^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir0/dir1/dir2 + + + RegexpComparator + ^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir0/dir2/dir1 + + + RegexpComparator + ^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir0/dir2/dir2 + + + RegexpComparator + ^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir0/file0 + + + RegexpComparator + ^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir0/dir1/file1 + + + RegexpComparator + ^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir0/dir1/file2 + + + RegexpComparator + ^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir0/dir2/file1 + + + RegexpComparator + ^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir0/dir2/file2 + + + RegexpComparator + ^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir0/dir1/dir1/file1 + + + RegexpComparator + ^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir0/dir1/dir1/file2 + + + RegexpComparator + ^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir0/dir2/dir2/file1 + + + RegexpComparator + ^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir0/dir2/dir2/file2 + + + + + + lsr: files/directories using globbing + + -fs NAMENODE -mkdir dir0 + -fs NAMENODE -mkdir dir0/dir1 + -fs NAMENODE -mkdir dir0/dir1/dir1 + -fs NAMENODE -mkdir dir0/dir1/dir2 + -fs NAMENODE -mkdir dir0/dir2 + -fs NAMENODE -mkdir dir0/dir2/dir1 + -fs NAMENODE -mkdir dir0/dir2/dir2 + -fs NAMENODE -touchz dir0/file0 + -fs NAMENODE -touchz dir0/dir1/file1 + -fs NAMENODE -touchz dir0/dir1/file2 + -fs NAMENODE -touchz dir0/dir2/file1 + -fs NAMENODE -touchz dir0/dir2/file2 + -fs NAMENODE -touchz dir0/dir1/dir1/file1 + -fs NAMENODE -touchz dir0/dir1/dir1/file2 + -fs NAMENODE -touchz dir0/dir2/dir2/file1 + -fs NAMENODE -touchz dir0/dir2/dir2/file2 + -fs NAMENODE -lsr dir0/* + + + -fs NAMENODE -rmr /user + + + + + RegexpComparator + ^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir0/dir1/dir1 + + + RegexpComparator + ^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir0/dir1/dir2 + + + RegexpComparator + ^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir0/dir2/dir1 + + + RegexpComparator + ^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir0/dir2/dir2 + + + RegexpComparator + ^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir0/file0 + + + RegexpComparator + ^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir0/dir1/file1 + + + RegexpComparator + ^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir0/dir1/file2 + + + RegexpComparator + ^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir0/dir2/file1 + + + RegexpComparator + ^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir0/dir2/file2 + + + RegexpComparator + ^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir0/dir1/dir1/file1 + + + RegexpComparator + ^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir0/dir1/dir1/file2 + + + RegexpComparator + ^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir0/dir2/dir2/file1 + + + RegexpComparator + ^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir0/dir2/dir2/file2 + + + + + + lsr: file/directory that does not exist in / + + -fs NAMENODE -lsr /file1 + + + + + + RegexpComparator + ^lsr: Cannot access /file1: No such file or directory. + + + + + + lsr: file/directory that does not exist in home directory (/user/username) + + -fs NAMENODE -lsr /user + + + + + + RegexpComparator + ^lsr: Cannot access /user: No such file or directory. + + + + + + + du: file using absolute path + + -fs NAMENODE -put CLITEST_DATA/data15bytes /data15bytes + -fs NAMENODE -du /data15bytes + + + -fs NAMENODE -rm /data15bytes + + + + TokenComparator + Found 1 items + + + RegexpComparator + ^15( |\t)*hdfs://localhost[.a-z]*:[0-9]*/data15bytes + + + + + + du: file using relative path + + -fs NAMENODE -put CLITEST_DATA/data15bytes data15bytes + -fs NAMENODE -du data15bytes + + + -fs NAMENODE -rmr /user + + + + TokenComparator + Found 1 items + + + RegexpComparator + ^15( |\t)*hdfs://localhost[.a-z]*:[0-9]*/user/[a-z]*/data15bytes + + + + + + du: files using globbing + + -fs NAMENODE -put CLITEST_DATA/data15bytes data15bytes + -fs NAMENODE -put CLITEST_DATA/data30bytes data30bytes + -fs NAMENODE -put CLITEST_DATA/data60bytes data60bytes + -fs NAMENODE -put CLITEST_DATA/data120bytes data120bytes + -fs NAMENODE -du data* + + + -fs NAMENODE -rmr /user + + + + TokenComparator + Found 4 items + + + RegexpComparator + ^15( |\t)*hdfs://localhost[.a-z]*:[0-9]*/user/[a-z]*/data15bytes + + + RegexpComparator + ^30( |\t)*hdfs://localhost[.a-z]*:[0-9]*/user/[a-z]*/data30bytes + + + RegexpComparator + ^60( |\t)*hdfs://localhost[.a-z]*:[0-9]*/user/[a-z]*/data60bytes + + + RegexpComparator + ^120( |\t)*hdfs://localhost[.a-z]*:[0-9]*/user/[a-z]*/data120bytes + + + + + + du: directory using absolute path + + -fs NAMENODE -mkdir /dir0 + -fs NAMENODE -put CLITEST_DATA/data15bytes /dir0/data15bytes + -fs NAMENODE -du /dir0 + + + -fs NAMENODE -rmr /dir0 + + + + TokenComparator + Found 1 items + + + RegexpComparator + ^15( |\t)*hdfs://localhost[.a-z]*:[0-9]*/dir0/data15bytes + + + + + + du: directory using relative path + + -fs NAMENODE -mkdir dir0 + -fs NAMENODE -put CLITEST_DATA/data15bytes dir0/data15bytes + -fs NAMENODE -du dir0 + + + -fs NAMENODE -rmr /user + + + + TokenComparator + Found 1 items + + + RegexpComparator + ^15( |\t)*hdfs://localhost[.a-z]*:[0-9]*/user/[a-z]*/dir0/data15bytes + + + + + + du: directory using globbing + + -fs NAMENODE -mkdir /dir0 + -fs NAMENODE -put CLITEST_DATA/data15bytes /dir0/data15bytes + -fs NAMENODE -put CLITEST_DATA/data30bytes /dir0/data30bytes + -fs NAMENODE -put CLITEST_DATA/data60bytes /dir0/data60bytes + -fs NAMENODE -put CLITEST_DATA/data120bytes /dir0/data120bytes + -fs NAMENODE -du /dir0/* + + + -fs NAMENODE -rmr /dir0 + + + + TokenComparator + Found 4 items + + + RegexpComparator + ^15( |\t)*hdfs://localhost[.a-z]*:[0-9]*/dir0/data15bytes + + + RegexpComparator + ^30( |\t)*hdfs://localhost[.a-z]*:[0-9]*/dir0/data30bytes + + + RegexpComparator + ^60( |\t)*hdfs://localhost[.a-z]*:[0-9]*/dir0/data60bytes + + + RegexpComparator + ^120( |\t)*hdfs://localhost[.a-z]*:[0-9]*/dir0/data120bytes + + + + + + + dus: directories/files using absolute path + + -fs NAMENODE -mkdir /dir0 + -fs NAMENODE -mkdir /dir0/dir1 + -fs NAMENODE -mkdir /dir0/dir1/dir1 + -fs NAMENODE -mkdir /dir0/dir1/dir2 + -fs NAMENODE -mkdir /dir0/dir2 + -fs NAMENODE -mkdir /dir0/dir2/dir1 + -fs NAMENODE -mkdir /dir0/dir2/dir2 + -fs NAMENODE -touchz /dir0/file0 + -fs NAMENODE -put CLITEST_DATA/data15bytes /dir0/dir1/data15bytes + -fs NAMENODE -put CLITEST_DATA/data30bytes /dir0/dir1/data30bytes + -fs NAMENODE -put CLITEST_DATA/data15bytes /dir0/dir2/data15bytes + -fs NAMENODE -put CLITEST_DATA/data30bytes /dir0/dir2/data30bytes + -fs NAMENODE -put CLITEST_DATA/data60bytes /dir0/dir1/dir1/data60bytes + -fs NAMENODE -put CLITEST_DATA/data120bytes /dir0/dir1/dir2/data120bytes + -fs NAMENODE -put CLITEST_DATA/data60bytes /dir0/dir2/dir1/data60bytes + -fs NAMENODE -put CLITEST_DATA/data120bytes /dir0/dir2/dir2/data120bytes + -fs NAMENODE -dus /dir0 + + + -fs NAMENODE -rmr /dir0 + + + + RegexpComparator + ^hdfs://localhost[.a-z]*:[0-9]*/dir0( |\t)*450 + + + + + + dus: directories/files using relative path + + -fs NAMENODE -mkdir dir0 + -fs NAMENODE -mkdir dir0/dir1 + -fs NAMENODE -mkdir dir0/dir1/dir1 + -fs NAMENODE -mkdir dir0/dir1/dir2 + -fs NAMENODE -mkdir dir0/dir2 + -fs NAMENODE -mkdir dir0/dir2/dir1 + -fs NAMENODE -mkdir dir0/dir2/dir2 + -fs NAMENODE -touchz dir0/file0 + -fs NAMENODE -put CLITEST_DATA/data15bytes dir0/dir1/data15bytes + -fs NAMENODE -put CLITEST_DATA/data30bytes dir0/dir1/data30bytes + -fs NAMENODE -put CLITEST_DATA/data15bytes dir0/dir2/data15bytes + -fs NAMENODE -put CLITEST_DATA/data30bytes dir0/dir2/data30bytes + -fs NAMENODE -put CLITEST_DATA/data60bytes dir0/dir1/dir1/data60bytes + -fs NAMENODE -put CLITEST_DATA/data120bytes dir0/dir1/dir2/data120bytes + -fs NAMENODE -put CLITEST_DATA/data60bytes dir0/dir2/dir1/data60bytes + -fs NAMENODE -put CLITEST_DATA/data120bytes dir0/dir2/dir2/data120bytes + -fs NAMENODE -dus dir0 + + + -fs NAMENODE -rmr /user + + + + RegexpComparator + ^hdfs://localhost[.a-z]*:[0-9]*/user/[a-z]*/dir0( |\t)*450 + + + + + + dus: directories/files using globbing + + -fs NAMENODE -mkdir /dir0 + -fs NAMENODE -mkdir /dir0/dir1 + -fs NAMENODE -mkdir /dir0/dir1/dir1 + -fs NAMENODE -mkdir /dir0/dir1/dir2 + -fs NAMENODE -mkdir /dir0/dir2 + -fs NAMENODE -mkdir /dir0/dir2/dir1 + -fs NAMENODE -mkdir /dir0/dir2/dir2 + -fs NAMENODE -touchz /dir0/file0 + -fs NAMENODE -put CLITEST_DATA/data15bytes /dir0/dir1/data15bytes + -fs NAMENODE -put CLITEST_DATA/data30bytes /dir0/dir1/data30bytes + -fs NAMENODE -put CLITEST_DATA/data15bytes /dir0/dir2/data15bytes + -fs NAMENODE -put CLITEST_DATA/data30bytes /dir0/dir2/data30bytes + -fs NAMENODE -put CLITEST_DATA/data60bytes /dir0/dir1/dir1/data60bytes + -fs NAMENODE -put CLITEST_DATA/data120bytes /dir0/dir1/dir2/data120bytes + -fs NAMENODE -put CLITEST_DATA/data60bytes /dir0/dir2/dir1/data60bytes + -fs NAMENODE -put CLITEST_DATA/data120bytes /dir0/dir2/dir2/data120bytes + -fs NAMENODE -mkdir /donotcountdir0 + -fs NAMENODE -put CLITEST_DATA/data15bytes /donotcountdir0/data15bytes + -fs NAMENODE -put CLITEST_DATA/data15bytes /donotcountdir0/data15bytes + -fs NAMENODE -put CLITEST_DATA/data15bytes /donotcountdir0/data15bytes + -fs NAMENODE -put CLITEST_DATA/data15bytes /donotcountdir0/data15bytes + -fs NAMENODE -dus /dir* + + + -fs NAMENODE -rmr /dir0 + -fs NAMENODE -rmr /donotcountdir0 + + + + RegexpComparator + ^hdfs://localhost[.a-z]*:[0-9]*/dir0( |\t)*450 + + + + + + + mv: file (absolute path) to file (absolute path) + + -fs NAMENODE -touchz /file1 + -fs NAMENODE -mv /file1 /file2 + -fs NAMENODE -ls /file* + + + -fs NAMENODE -rm /file2 + : + + + TokenComparator + Found 1 items + + + RegexpComparator + ^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file2 + + + RegexpComparator + ^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file[^1] + + + + + + mv: file (absolute path) to file (relative path) + + -fs NAMENODE -touchz /file1 + -fs NAMENODE -mv /file1 file2 + + + -fs NAMENODE -rmr /file1 + + + + RegexpComparator + ^mv: Failed to rename hdfs://localhost[.a-z]*:[0-9]*/file1 to file2 + + + + + + mv: file (absolute path) to directory (absolute path); keep the same name at the destination + + -fs NAMENODE -touchz /file1 + -fs NAMENODE -mkdir /dir0 + -fs NAMENODE -mv /file1 /dir0 + -fs NAMENODE -lsr /dir0 + + + -fs NAMENODE -rmr /dir0 + + + + RegexpComparator + ^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file1 + + + + + + mv: file (absolute path) to directory (absolute path); keep the same name at the destination [ TIED to previous test ] + + -fs NAMENODE -ls /file1 + + + + + + TokenComparator + ls: Cannot access /file1: No such file or directory. + + + + + + mv: file (absolute path) to directory (absolute path); change the name at the destination + + -fs NAMENODE -touchz /file1 + -fs NAMENODE -mkdir /dir0 + -fs NAMENODE -mv /file1 /dir0/file2 + -fs NAMENODE -ls /dir0 + + + -fs NAMENODE -rmr /dir0 + + + + RegexpComparator + ^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file2 + + + + + + mv: file (absolute path) to directory (absolute path); change the name at the destination [ TIED to previous test ] + + -fs NAMENODE -ls /file1 + + + + + + TokenComparator + ls: Cannot access /file1: No such file or directory. + + + + + + mv: files (absolute path) to directory (absolute path) using globbing + + -fs NAMENODE -touchz /file1 + -fs NAMENODE -touchz /file2 + -fs NAMENODE -touchz /file3 + -fs NAMENODE -touchz /file4 + -fs NAMENODE -mkdir /dir0 + -fs NAMENODE -mv /file* /dir0 + -fs NAMENODE -lsr /* + + + -fs NAMENODE -rmr /dir0 + + + + RegexpComparator + ^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file1 + + + RegexpComparator + ^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file2 + + + RegexpComparator + ^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file3 + + + RegexpComparator + ^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file4 + + + + + + mv: files (absolute path) to directory (absolute path) using globbing [ TIED to previous test ] + + -fs NAMENODE -ls /file* + + + + + + TokenComparator + ls: Cannot access /file*: No such file or directory. + + + + + + mv: file (relative) to file (relative) + + -fs NAMENODE -touchz file1 + -fs NAMENODE -mv file1 file2 + -fs NAMENODE -ls file* + + + -fs NAMENODE -rm /user + + + + TokenComparator + Found 1 items + + + RegexpComparator + ^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/file2 + + + RegexpComparator + ^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/file[^1] + + + + + + + cp: file (absolute path) to file (absolute path) + + -fs NAMENODE -touchz /file1 + -fs NAMENODE -cp /file1 /file2 + -fs NAMENODE -ls /file* + + + -fs NAMENODE -rm /file* + : + + + RegexpComparator + ^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file1 + + + RegexpComparator + ^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file2 + + + + + + cp: file (absolute path) to file (relative path) + + -fs NAMENODE -touchz /file1 + -fs NAMENODE -cp /file1 file2 + -fs NAMENODE -ls /file1 file2 + + + -fs NAMENODE -rmr /file1 file2 + + + + RegexpComparator + ^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file1 + + + RegexpComparator + ^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/file2 + + + + + + cp: file (relative path) to file (absolute path) + + -fs NAMENODE -touchz file1 + -fs NAMENODE -cp file1 /file2 + -fs NAMENODE -ls file1 /file2 + + + -fs NAMENODE -rmr file1 /file2 + + + + RegexpComparator + ^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/file1 + + + RegexpComparator + ^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file2 + + + + + + cp: file (relative path) to file (relative path) + + -fs NAMENODE -touchz file1 + -fs NAMENODE -cp file1 file2 + -fs NAMENODE -ls file1 file2 + + + -fs NAMENODE -rmr file1 file2 + + + + RegexpComparator + ^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/file1 + + + RegexpComparator + ^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/file2 + + + + + + cp: file (absolute path) to directory (absolute path); keep the same name at the destination + + -fs NAMENODE -touchz /file1 + -fs NAMENODE -mkdir /dir0 + -fs NAMENODE -cp /file1 /dir0 + -fs NAMENODE -ls /file1 /dir0 + + + -fs NAMENODE -rmr /dir0 + + + + RegexpComparator + ^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file1 + + + RegexpComparator + ^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file1 + + + + + + cp: file (absolute path) to directory (absolute path); change the name at the destination + + -fs NAMENODE -touchz /file1 + -fs NAMENODE -mkdir /dir0 + -fs NAMENODE -cp /file1 /dir0/file2 + -fs NAMENODE -ls /file1 /dir0 + + + -fs NAMENODE -rmr /dir0 + + + + RegexpComparator + ^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file1 + + + RegexpComparator + ^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file2 + + + + + + cp: files to directory (absolute path) using globbing + + -fs NAMENODE -touchz /file1 + -fs NAMENODE -touchz /file2 + -fs NAMENODE -touchz /file3 + -fs NAMENODE -touchz /file4 + -fs NAMENODE -mkdir /dir0 + -fs NAMENODE -cp /file* /dir0 + -fs NAMENODE -lsr /* + + + -fs NAMENODE -rmr /dir0 + + + + RegexpComparator + ^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file1 + + + RegexpComparator + ^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file2 + + + RegexpComparator + ^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file3 + + + RegexpComparator + ^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file4 + + + RegexpComparator + ^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file1 + + + RegexpComparator + ^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file2 + + + RegexpComparator + ^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file3 + + + RegexpComparator + ^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file4 + + + + + + cp: files to directory (absolute path) without globbing + + -fs NAMENODE -touchz /file1 + -fs NAMENODE -touchz /file2 + -fs NAMENODE -touchz /file3 + -fs NAMENODE -touchz /file4 + -fs NAMENODE -mkdir /dir0 + -fs NAMENODE -cp /file1 /file2 /file3 /file4 /dir0 + -fs NAMENODE -lsr /* + + + -fs NAMENODE -rmr /dir0 + + + + RegexpComparator + ^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file1 + + + RegexpComparator + ^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file2 + + + RegexpComparator + ^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file3 + + + RegexpComparator + ^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file4 + + + RegexpComparator + ^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file1 + + + RegexpComparator + ^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file2 + + + RegexpComparator + ^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file3 + + + RegexpComparator + ^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file4 + + + + + + cp: copying non existent file (absolute path) + + -fs NAMENODE -cp /file /file1 + + + -fs NAMENODE -rmr /user + : + + + RegexpComparator + ^cp: File does not exist: /file + + + + + + cp: copying non existent file (relative path) + + -fs NAMENODE -cp file1 file2 + + + -fs NAMENODE -rmr /user + : + + + RegexpComparator + ^cp: File does not exist: file1 + + + + + + cp: files to an existent file using globbing + + -fs NAMENODE -touchz /file1 + -fs NAMENODE -touchz /file2 + -fs NAMENODE -touchz /file3 + -fs NAMENODE -touchz /file4 + -fs NAMENODE -touchz /file5 + -fs NAMENODE -cp /file* /file5 + + + -fs NAMENODE -rmr /user + + + + RegexpComparator + ^cp: When copying multiple files, destination should be a directory. + + + + + + cp: files to an existent file without globbing + + -fs NAMENODE -touchz /file1 + -fs NAMENODE -touchz /file2 + -fs NAMENODE -touchz /file3 + -fs NAMENODE -touchz /file4 + -fs NAMENODE -touchz /file5 + -fs NAMENODE -cp /file1 /file2 /file3 /file4 /file5 + + + -fs NAMENODE -rmr /user + + + + RegexpComparator + ^cp: When copying multiple files, destination /file5 should be a directory. + + + + + + cp: files to a non existent directory using globbing + + -fs NAMENODE -touchz /file1 + -fs NAMENODE -touchz /file2 + -fs NAMENODE -touchz /file3 + -fs NAMENODE -touchz /file4 + -fs NAMENODE -cp /file* dir + + + -fs NAMENODE -rmr /user + + + + RegexpComparator + ^cp: When copying multiple files, destination should be a directory. + + + + + + cp: files to a non existent directory without globbing + + -fs NAMENODE -touchz /file1 + -fs NAMENODE -touchz /file2 + -fs NAMENODE -touchz /file3 + -fs NAMENODE -touchz /file4 + -fs NAMENODE -cp /file1 /file2 /file3 /file4 dir + + + -fs NAMENODE -rmr /user + + + + RegexpComparator + ^cp: When copying multiple files, destination dir should be a directory. + + + + + + + rm: removing a file (absolute path) + + -fs NAMENODE -touchz /dir0/file0 + -fs NAMENODE -rm /dir0/file0 + + + -fs NAMENODE -rm /user + + + + RegexpComparator + ^Deleted hdfs://localhost[.a-z]*:[0-9]*/dir0/file0 + + + + + + rm: removing a file (relative path) + + -fs NAMENODE -touchz file0 + -fs NAMENODE -rm file0 + + + -fs NAMENODE -rm /user + + + + RegexpComparator + ^Deleted hdfs://localhost[.a-z]*:[0-9]*/user/[a-z]*/file0 + + + + + + rm: removing files by globbing (absolute path) + + -fs NAMENODE -touchz /dir0/file0 + -fs NAMENODE -touchz /dir0/file1 + -fs NAMENODE -touchz /dir0/file2 + -fs NAMENODE -touchz /dir0/file3 + -fs NAMENODE -rm /dir0/file* + + + -fs NAMENODE -rm /user + + + + RegexpComparator + ^Deleted hdfs://localhost[.a-z]*:[0-9]*/dir0/file0 + + + RegexpComparator + ^Deleted hdfs://localhost[.a-z]*:[0-9]*/dir0/file1 + + + RegexpComparator + ^Deleted hdfs://localhost[.a-z]*:[0-9]*/dir0/file2 + + + RegexpComparator + ^Deleted hdfs://localhost[.a-z]*:[0-9]*/dir0/file3 + + + + + + rm: removing files by globbing (relative path) + + -fs NAMENODE -touchz file0 + -fs NAMENODE -touchz file1 + -fs NAMENODE -touchz file2 + -fs NAMENODE -touchz file3 + -fs NAMENODE -rm file* + + + -fs NAMENODE -rm /user + + + + RegexpComparator + ^Deleted hdfs://localhost[.a-z]*:[0-9]*/user/[a-z]*/file0 + + + RegexpComparator + ^Deleted hdfs://localhost[.a-z]*:[0-9]*/user/[a-z]*/file1 + + + RegexpComparator + ^Deleted hdfs://localhost[.a-z]*:[0-9]*/user/[a-z]*/file2 + + + RegexpComparator + ^Deleted hdfs://localhost[.a-z]*:[0-9]*/user/[a-z]*/file3 + + + + + + rm: removing a directory (absolute path) + + -fs NAMENODE mkdir /dir0 + -fs NAMENODE -rm /dir0 + + + -fs NAMENODE -rm /user + + + + RegexpComparator + ^rm: Cannot remove directory "hdfs://localhost[.a-z]*:[0-9]*/dir0", use -rmr instead + + + + + + rm: removing a directory (relative path) + + -fs NAMENODE mkdir dir0 + -fs NAMENODE -rm dir0 + + + -fs NAMENODE -rm /user + + + + RegexpComparator + ^rm: cannot remove dir0: No such file or directory. + + + + + + rm: removing a nonexistent file (absolute path) + + -fs NAMENODE -rm /dir0/file0 + + + -fs NAMENODE -rm /user + + + + RegexpComparator + ^rm: cannot remove /dir0/file0: No such file or directory. + + + + + + rm: removing a nonexistent file (relative path) + + -fs NAMENODE -rm file0 + + + -fs NAMENODE -rm /user + + + + RegexpComparator + ^rm: cannot remove file0: No such file or directory. + + + + + + + rmr: removing a file (absolute path) + + -fs NAMENODE -touchz /dir0/file0 + -fs NAMENODE -rmr /dir0/file0 + + + -fs NAMENODE -rmr /user + + + + RegexpComparator + ^Deleted hdfs://localhost[.a-z]*:[0-9]*/dir0/file0 + + + + + + rmr: removing a file (relative path) + + -fs NAMENODE -touchz file0 + -fs NAMENODE -rmr file0 + + + -fs NAMENODE -rmr /user + + + + RegexpComparator + ^Deleted hdfs://localhost[.a-z]*:[0-9]*/user/[a-z]*/file0 + + + + + + rmr: removing a directory (absolute path) + + -fs NAMENODE -touchz /dir0 + -fs NAMENODE -rmr /dir0 + + + -fs NAMENODE -rmr /user + + + + RegexpComparator + ^Deleted hdfs://localhost[.a-z]*:[0-9]*/dir0 + + + + + + rmr: removing a directory (relative path) + + -fs NAMENODE -mkdir dir0 + -fs NAMENODE -rmr dir0 + + + -fs NAMENODE -rmr /user + + + + RegexpComparator + ^Deleted hdfs://localhost[.a-z]*:[0-9]*/user/[a-z]*/dir0 + + + + + + rmr: removing directories by globbing (absolute path) + + -fs NAMENODE -mkdir /dir0 + -fs NAMENODE -mkdir /dir1 + -fs NAMENODE -mkdir /dir2 + -fs NAMENODE -mkdir /dir3 + -fs NAMENODE -rmr /dir* + + + -fs NAMENODE -rm /user + + + + RegexpComparator + ^Deleted hdfs://localhost[.a-z]*:[0-9]*/dir0 + + + RegexpComparator + ^Deleted hdfs://localhost[.a-z]*:[0-9]*/dir1 + + + RegexpComparator + ^Deleted hdfs://localhost[.a-z]*:[0-9]*/dir2 + + + RegexpComparator + ^Deleted hdfs://localhost[.a-z]*:[0-9]*/dir3 + + + + + + rmr: removing directories by globbing (relative path) + + -fs NAMENODE -mkdir dir0 + -fs NAMENODE -mkdir dir1 + -fs NAMENODE -mkdir dir2 + -fs NAMENODE -mkdir dir3 + -fs NAMENODE -rmr dir* + + + -fs NAMENODE -rm /user + + + + RegexpComparator + ^Deleted hdfs://localhost[.a-z]*:[0-9]*/user/[a-z]*/dir0 + + + RegexpComparator + ^Deleted hdfs://localhost[.a-z]*:[0-9]*/user/[a-z]*/dir1 + + + RegexpComparator + ^Deleted hdfs://localhost[.a-z]*:[0-9]*/user/[a-z]*/dir2 + + + RegexpComparator + ^Deleted hdfs://localhost[.a-z]*:[0-9]*/user/[a-z]*/dir3 + + + + + + rmr: removing a nonexistent file (absolute path) + + -fs NAMENODE -rmr /dir0/file0 + + + -fs NAMENODE -rmr /user + + + + RegexpComparator + ^rmr: cannot remove /dir0/file0: No such file or directory. + + + + + + rmr: removing a nonexistent file (relative path) + + -fs NAMENODE -rmr file0 + + + -fs NAMENODE -rmr /user + + + + RegexpComparator + ^rmr: cannot remove file0: No such file or directory. + + + + + + + + + + put: putting file into a file (absolute path) + + -fs NAMENODE -put CLITEST_DATA/data15bytes /data15bytes + -fs NAMENODE -du /data15bytes + + + -fs NAMENODE -rm /data15bytes + + + + TokenComparator + Found 1 items + + + RegexpComparator + ^15( |\t)*hdfs://localhost.*:[0-9]*/data15bytes + + + + + + put: putting file into a file (relative path) + + -fs NAMENODE -put CLITEST_DATA/data15bytes data15bytes + -fs NAMENODE -du data15bytes + + + -fs NAMENODE -rmr /user + + + + TokenComparator + Found 1 items + + + RegexpComparator + ^15( |\t)*hdfs://localhost.*:[0-9]*/user/[a-z]*/data15bytes + + + + + + put: putting file into a directory(absolute path) + + -fs NAMENODE -put CLITEST_DATA /dir0/dir1/data + -fs NAMENODE -du /dir0/dir1/data + + + -fs NAMENODE -rmr /dir0 + + + + RegexpComparator + ^15( |\t)*hdfs://localhost.*:[0-9]*/dir0/dir1/data/data15bytes + + + RegexpComparator + ^30( |\t)*hdfs://localhost.*:[0-9]*/dir0/dir1/data/data30bytes + + + RegexpComparator + ^60( |\t)*hdfs://localhost.*:[0-9]*/dir0/dir1/data/data60bytes + + + RegexpComparator + ^120( |\t)*hdfs://localhost.*:[0-9]*/dir0/dir1/data/data120bytes + + + + + + put: putting file into a directory(relative path) + + -fs NAMENODE -put CLITEST_DATA dir0/dir1/data + -fs NAMENODE -du dir0/dir1/data + + + -fs NAMENODE -rmr /user + + + + RegexpComparator + ^15( |\t)*hdfs://localhost.*:[0-9]*/user/[a-z]*/dir0/dir1/data/data15bytes + + + RegexpComparator + ^30( |\t)*hdfs://localhost.*:[0-9]*/user/[a-z]*/dir0/dir1/data/data30bytes + + + RegexpComparator + ^60( |\t)*hdfs://localhost.*:[0-9]*/user/[a-z]*/dir0/dir1/data/data60bytes + + + RegexpComparator + ^120( |\t)*hdfs://localhost.*:[0-9]*/user/[a-z]*/dir0/dir1/data/data120bytes + + + + + + put: putting many files into an existing directory(absolute path) + + -fs NAMENODE -mkdir /dir0 + -fs NAMENODE -put CLITEST_DATA/data15bytes CLITEST_DATA/data30bytes /dir0 + -fs NAMENODE -du /dir0 + + + -fs NAMENODE -rmr /user + + + + TokenComparator + Found 2 items + + + RegexpComparator + ^15( |\t)*hdfs://localhost[.a-z]*:[0-9]*/dir0/data15bytes + + + RegexpComparator + ^30( |\t)*hdfs://localhost[.a-z]*:[0-9]*/dir0/data30bytes + + + + + + put: putting many files into an existing directory(relative path) + + -fs NAMENODE -mkdir dir0 + -fs NAMENODE -put CLITEST_DATA/data15bytes CLITEST_DATA/data30bytes dir0 + -fs NAMENODE -du dir0 + + + -fs NAMENODE -rmr /user + + + + TokenComparator + Found 2 items + + + RegexpComparator + ^15( |\t)*hdfs://localhost[.a-z]*:[0-9]*/user/[a-z]*/dir0/data15bytes + + + RegexpComparator + ^30( |\t)*hdfs://localhost[.a-z]*:[0-9]*/user/[a-z]*/dir0/data30bytes + + + + + + put: putting non existent file(absolute path) + + -fs NAMENODE -put /user/wrongdata file + + + -fs NAMENODE -rmr /user + + + + TokenComparator + put: File /user/wrongdata does not exist + + + + + + put: putting non existent file(relative path) + + -fs NAMENODE -put wrongdata file + + + -fs NAMENODE -rmr /user + + + + TokenComparator + put: File wrongdata does not exist + + + + + + put: putting file into an already existing destination(absolute path) + + -fs NAMENODE -touchz /user/file0 + -fs NAMENODE -put CLITEST_DATA/data15bytes /user/file0 + + + -fs NAMENODE -rmr /user + + + + TokenComparator + put: Target /user/file0 already exists + + + + + + put: putting file into an already existing destination(relative path) + + -fs NAMENODE -touchz file0 + -fs NAMENODE -put CLITEST_DATA/data15bytes file0 + + + -fs NAMENODE -rmr /user + + + + TokenComparator + put: Target file0 already exists + + + + + + put: putting many files into an existing file + + -fs NAMENODE -put CLITEST_DATA/data15bytes /data15bytes + -fs NAMENODE -put CLITEST_DATA/data30bytes /data30bytes + -fs NAMENODE -touchz file0 + -fs NAMENODE -put /data15bytes /data30bytes file0 + + + -fs NAMENODE -rmr /user + + + + RegexpComparator + ^put: copying multiple files, but last argument `file0' is not a directory + + + + + + put: putting many files into a non existent directory + + -fs NAMENODE -put CLITEST_DATA/data15bytes /data15bytes + -fs NAMENODE -put CLITEST_DATA/data30bytes /data30bytes + -fs NAMENODE -put /data15bytes /data30bytes wrongdir + + + -fs NAMENODE -rmr /user + + + + RegexpComparator + ^put: `wrongdir': specified destination directory doest not exist + + + + + + + copyFromLocal: copying file into a file (absolute path) + + -fs NAMENODE -copyFromLocal CLITEST_DATA/data15bytes /data15bytes + -fs NAMENODE -du /data15bytes + + + -fs NAMENODE -rm /data15bytes + + + + TokenComparator + Found 1 items + + + RegexpComparator + ^15( |\t)*hdfs://localhost.*:[0-9]*/data15bytes + + + + + + copyFromLocal: copying file into a file (relative path) + + -fs NAMENODE -copyFromLocal CLITEST_DATA/data15bytes data15bytes + -fs NAMENODE -du data15bytes + + + -fs NAMENODE -rmr /user + + + + TokenComparator + Found 1 items + + + RegexpComparator + ^15( |\t)*hdfs://localhost.*:[0-9]*/user/[a-z]*/data15bytes + + + + + + copyFromLocal: copying file into a directory(absolute path) + + -fs NAMENODE -copyFromLocal CLITEST_DATA /dir0/dir1/data + -fs NAMENODE -du /dir0/dir1/data + + + -fs NAMENODE -rmr /dir0 + + + + RegexpComparator + ^15( |\t)*hdfs://localhost.*:[0-9]*/dir0/dir1/data/data15bytes + + + RegexpComparator + ^30( |\t)*hdfs://localhost.*:[0-9]*/dir0/dir1/data/data30bytes + + + RegexpComparator + ^60( |\t)*hdfs://localhost.*:[0-9]*/dir0/dir1/data/data60bytes + + + RegexpComparator + ^120( |\t)*hdfs://localhost.*:[0-9]*/dir0/dir1/data/data120bytes + + + + + + copyFromLocal: copying file into a directory(relative path) + + -fs NAMENODE -copyFromLocal CLITEST_DATA dir0/dir1/data + -fs NAMENODE -du dir0/dir1/data + + + -fs NAMENODE -rmr /user + + + + RegexpComparator + ^15( |\t)*hdfs://localhost.*:[0-9]*/user/[a-z]*/dir0/dir1/data/data15bytes + + + RegexpComparator + ^30( |\t)*hdfs://localhost.*:[0-9]*/user/[a-z]*/dir0/dir1/data/data30bytes + + + RegexpComparator + ^60( |\t)*hdfs://localhost.*:[0-9]*/user/[a-z]*/dir0/dir1/data/data60bytes + + + RegexpComparator + ^120( |\t)*hdfs://localhost.*:[0-9]*/user/[a-z]*/dir0/dir1/data/data120bytes + + + + + + copyFromLocal: copying many files into an existing directory(absolute path) + + -fs NAMENODE -mkdir /dir0 + -fs NAMENODE -copyFromLocal CLITEST_DATA/data15bytes CLITEST_DATA/data30bytes /dir0 + -fs NAMENODE -du /dir0 + + + -fs NAMENODE -rmr /user + + + + TokenComparator + Found 2 items + + + RegexpComparator + ^15( |\t)*hdfs://localhost[.a-z]*:[0-9]*/dir0/data15bytes + + + RegexpComparator + ^30( |\t)*hdfs://localhost[.a-z]*:[0-9]*/dir0/data30bytes + + + + + + copyFromLocal: copying many files into an existing directory(relative path) + + -fs NAMENODE -mkdir dir0 + -fs NAMENODE -copyFromLocal CLITEST_DATA/data15bytes CLITEST_DATA/data30bytes dir0 + -fs NAMENODE -du dir0 + + + -fs NAMENODE -rmr /user + + + + TokenComparator + Found 2 items + + + RegexpComparator + ^15( |\t)*hdfs://localhost[.a-z]*:[0-9]*/user/[a-z]*/dir0/data15bytes + + + RegexpComparator + ^30( |\t)*hdfs://localhost[.a-z]*:[0-9]*/user/[a-z]*/dir0/data30bytes + + + + + + copyFromLocal: copying non existent file(absolute path) + + -fs NAMENODE -copyFromLocal /user/wrongdata file + + + -fs NAMENODE -rmr /user + + + + TokenComparator + copyFromLocal: File /user/wrongdata does not exist + + + + + + copyFromLocal: copying non existent file(relative path) + + -fs NAMENODE -copyFromLocal wrongdata file + + + -fs NAMENODE -rmr /user + + + + TokenComparator + copyFromLocal: File wrongdata does not exist + + + + + + copyFromLocal: copying file into an already existing destination(absolute path) + + -fs NAMENODE -touchz /user/file0 + -fs NAMENODE -copyFromLocal CLITEST_DATA/data15bytes /user/file0 + + + -fs NAMENODE -rmr /user + + + + TokenComparator + copyFromLocal: Target /user/file0 already exists + + + + + + copyFromLocal: copying file into an already existing destination(relative path) + + -fs NAMENODE -touchz file0 + -fs NAMENODE -copyFromLocal CLITEST_DATA/data15bytes file0 + + + -fs NAMENODE -rmr /user + + + + TokenComparator + copyFromLocal: Target file0 already exists + + + + + + copyFromLocal: copying many files into an existing file + + -fs NAMENODE -copyFromLocal CLITEST_DATA/data15bytes /data15bytes + -fs NAMENODE -copyFromLocal CLITEST_DATA/data30bytes /data30bytes + -fs NAMENODE -touchz file0 + -fs NAMENODE -copyFromLocal /data15bytes /data30bytes file0 + + + -fs NAMENODE -rmr /user + + + + RegexpComparator + ^copyFromLocal: copying multiple files, but last argument `file0' is not a directory + + + + + + copyFromLocal: copying many files into a non existent directory + + -fs NAMENODE -copyFromLocal CLITEST_DATA/data15bytes /data15bytes + -fs NAMENODE -copyFromLocal CLITEST_DATA/data30bytes /data30bytes + -fs NAMENODE -copyFromLocal /data15bytes /data30bytes wrongdir + + + -fs NAMENODE -rmr /user + + + + RegexpComparator + ^copyFromLocal: `wrongdir': specified destination directory doest not exist + + + + + + + get: getting non existent(absolute path) + + -fs NAMENODE -get /user/file CLITEST_DATA/file + + + -fs NAMENODE -rmr /user + + + + TokenComparator + get: null + + + + + + get: getting non existent file(relative path) + + -fs NAMENODE -get file CLITEST_DATA/file + + + -fs NAMENODE -rmr /user + + + + TokenComparator + get: null + + + + + + + + + + cat: contents of file(absolute path) + + -fs NAMENODE -put CLITEST_DATA/data15bytes /data15bytes + -fs NAMENODE -cat /data15bytes + + + -fs NAMENODE -rm /data15bytes + + + + TokenComparator + 12345678901234 + + + + + + cat: contents of file(relative path) + + -fs NAMENODE -put CLITEST_DATA/data15bytes data15bytes + -fs NAMENODE -cat data15bytes + + + -fs NAMENODE -rmr /user + + + + TokenComparator + 12345678901234 + + + + + + cat: contents of files(absolute path) using globbing + + -fs NAMENODE -mkdir /dir0 + -fs NAMENODE -put CLITEST_DATA/data15bytes /dir0/data15bytes + -fs NAMENODE -put CLITEST_DATA/data30bytes /dir0/data30bytes + -fs NAMENODE -put CLITEST_DATA/data60bytes /dir0/data60bytes + -fs NAMENODE -put CLITEST_DATA/data120bytes /dir0/data120bytes + -fs NAMENODE -cat /dir0/data* + + + -fs NAMENODE -rmr /dir0 + + + + RegexpComparator + 12345678901234.* + + + + + + cat: contents of files(relative path) using globbing + + -fs NAMENODE -mkdir dir0 + -fs NAMENODE -put CLITEST_DATA/data15bytes dir0/data15bytes + -fs NAMENODE -put CLITEST_DATA/data30bytes dir0/data30bytes + -fs NAMENODE -put CLITEST_DATA/data60bytes dir0/data60bytes + -fs NAMENODE -put CLITEST_DATA/data120bytes dir0/data120bytes + -fs NAMENODE -cat dir0/data* + + + -fs NAMENODE -rmr /user + + + + RegexpComparator + 12345678901234.* + + + + + + cat: contents of files(absolute path) without globbing + + -fs NAMENODE -mkdir /dir0 + -fs NAMENODE -put CLITEST_DATA/data15bytes /dir0/data15bytes + -fs NAMENODE -put CLITEST_DATA/data30bytes /dir0/data30bytes + -fs NAMENODE -put CLITEST_DATA/data60bytes /dir0/data60bytes + -fs NAMENODE -put CLITEST_DATA/data120bytes /dir0/data120bytes + -fs NAMENODE -cat /dir0/data15bytes /dir0/data30bytes /dir0/data60bytes /dir0/data120bytes + + + -fs NAMENODE -rmr /dir0 + + + + RegexpComparator + 12345678901234.* + + + + + + cat: contents of files(relative path) without globbing + + -fs NAMENODE -mkdir dir0 + -fs NAMENODE -put CLITEST_DATA/data15bytes dir0/data15bytes + -fs NAMENODE -put CLITEST_DATA/data30bytes dir0/data30bytes + -fs NAMENODE -put CLITEST_DATA/data60bytes dir0/data60bytes + -fs NAMENODE -put CLITEST_DATA/data120bytes dir0/data120bytes + -fs NAMENODE -cat dir0/data15bytes dir0/data30bytes dir0/data60bytes dir0/data120bytes + + + -fs NAMENODE -rmr /user + + + + RegexpComparator + 12345678901234.* + + + + + + + cat: contents of file(absolute path) that does not exist + + -fs NAMENODE -cat /file + + + + + + RegexpComparator + ^cat: File does not exist: /file + + + + + + cat: contents of file(relative path) that does not exist + + -fs NAMENODE -cat file + + + + + + RegexpComparator + ^cat: File does not exist: file + + + + + + cat: contents of directory(absolute path) + + -fs NAMENODE -mkdir /dir1 + -fs NAMENODE -cat /dir1 + + + -fs NAMENODE -rmr /dir1 + + + + RegexpComparator + ^cat: Source must be a file. + + + + + + cat: contents of directory(relative path) + + -fs NAMENODE -mkdir dir1 + -fs NAMENODE -cat dir + + + -fs NAMENODE -rmr dir1 + + + + RegexpComparator + ^cat: File does not exist: dir + + + + + + + copyToLocal: non existent relative path + + -fs NAMENODE -copyToLocal file CLITEST_DATA/file + + + -fs NAMENODE -rmr /user + + + + TokenComparator + copyToLocal: null + + + + + + copyToLocal: non existent absolute path + + -fs NAMENODE -copyToLocal /user/file CLITEST_DATA/file + + + -fs NAMENODE -rmr /user + + + + TokenComparator + copyToLocal: null + + + + + + + + + + mkdir: creating directory (absolute path) + + -fs NAMENODE -mkdir /dir0 + -fs NAMENODE -dus /dir0 + + + -fs NAMENODE -rm /user + + + + RegexpComparator + ^hdfs://localhost[.a-z]*:[0-9]*/dir0(|\t)*0 + + + + + + mkdir: creating directory (relative path) + + -fs NAMENODE -mkdir dir0 + -fs NAMENODE -dus dir0 + + + -fs NAMENODE -rm /user + + + + RegexpComparator + ^hdfs://localhost[.a-z]*:[0-9]*/user/[a-z]*/dir0(|\t)*0 + + + + + + mkdir: creating many directories (absolute path) + + -fs NAMENODE -mkdir /dir0 + -fs NAMENODE -mkdir /dir1 + -fs NAMENODE -mkdir /dir2 + -fs NAMENODE -mkdir /dir3 + -fs NAMENODE -dus /dir* + + + -fs NAMENODE -rm /dir* + + + + RegexpComparator + ^hdfs://localhost[.a-z]*:[0-9]*/dir0(|\t)*0 + + + RegexpComparator + ^hdfs://localhost[.a-z]*:[0-9]*/dir1(|\t)*0 + + + RegexpComparator + ^hdfs://localhost[.a-z]*:[0-9]*/dir2(|\t)*0 + + + RegexpComparator + ^hdfs://localhost[.a-z]*:[0-9]*/dir3(|\t)*0 + + + + + + mkdir: creating many directories (relative path) + + -fs NAMENODE -mkdir dir0 + -fs NAMENODE -mkdir dir1 + -fs NAMENODE -mkdir dir2 + -fs NAMENODE -mkdir dir3 + -fs NAMENODE -dus dir* + + + -fs NAMENODE -rm /user + + + + RegexpComparator + ^hdfs://localhost[.a-z]*:[0-9]*/user/[a-z]*/dir0(|\t)*0 + + + RegexpComparator + ^hdfs://localhost[.a-z]*:[0-9]*/user/[a-z]*/dir1(|\t)*0 + + + RegexpComparator + ^hdfs://localhost[.a-z]*:[0-9]*/user/[a-z]*/dir2(|\t)*0 + + + RegexpComparator + ^hdfs://localhost[.a-z]*:[0-9]*/user/[a-z]*/dir3(|\t)*0 + + + + + + mkdir: creating a directory with the name of an already existing directory + + -fs NAMENODE -mkdir /dir0 + -fs NAMENODE -mkdir /dir0 + + + -fs NAMENODE -rmr /dir0 + + + + TokenComparator + mkdir: cannot create directory /dir0: File exists + + + + + + mkdir: creating a directory with the name of an already existing file + + -fs NAMENODE -put CLITEST_DATA/data15bytes data15bytes + -fs NAMENODE -mkdir data15bytes + + + -fs NAMENODE -rmr data15bytes + + + + TokenComparator + mkdir: data15bytes exists but is not a directory + + + + + + + setrep: existent file (absolute path) + + -fs NAMENODE -touchz /dir0/file0 + -fs NAMENODE -setrep 2 /dir0/file0 + + + -fs NAMENODE -rmr /user + + + + RegexpComparator + ^Replication 2 set: hdfs://localhost[.a-z]*:[0-9]*/dir0/file0 + + + + + + setrep: existent file (relative path) + + -fs NAMENODE -touchz file0 + -fs NAMENODE -setrep 2 file0 + + + -fs NAMENODE -rmr /user + + + + RegexpComparator + ^Replication 2 set: hdfs://localhost[.a-z]*:[0-9]*/user/[a-z]*/file0 + + + + + + setrep: existent directory (absolute path) + + -fs NAMENODE -touchz /dir0/file0 + -fs NAMENODE -touchz /dir0/file1 + -fs NAMENODE -setrep -R 2 /dir0 + + + -fs NAMENODE -rmr /user + + + + RegexpComparator + ^Replication 2 set: hdfs://localhost[.a-z]*:[0-9]*/dir0/file0 + + + RegexpComparator + ^Replication 2 set: hdfs://localhost[.a-z]*:[0-9]*/dir0/file1 + + + + + + setrep: existent directory (relative path) + + -fs NAMENODE -touchz dir0/file0 + -fs NAMENODE -touchz dir0/file1 + -fs NAMENODE -setrep -R 2 dir0 + + + -fs NAMENODE -rmr /user + + + + RegexpComparator + ^Replication 2 set: hdfs://localhost[.a-z]*:[0-9]*/user/[a-z]*/dir0/file0 + + + RegexpComparator + ^Replication 2 set: hdfs://localhost[.a-z]*:[0-9]*/user/[a-z]*/dir0/file1 + + + + + + setrep: non existent file (absolute path) + + -fs NAMENODE -setrep 2 /dir0/file + + + -fs NAMENODE -rmr /user + + + + RegexpComparator + ^setrep: File does not exist: /dir0/file + + + + + + setrep: non existent file (relative path) + + -fs NAMENODE -setrep 2 file0 + + + -fs NAMENODE -rmr /user + + + + RegexpComparator + ^setrep: File does not exist: file0 + + + + + + + + touchz: touching file (absolute path) + + -fs NAMENODE -touchz /user/file0 + -fs NAMENODE -du /user/file0 + + + -fs NAMENODE -rm /user + + + + TokenComparator + Found 1 items + + + RegexpComparator + ^0( |\t)*hdfs://localhost[.a-z]*:[0-9]*/user/file0 + + + + + + touchz: touching file(relative path) + + -fs NAMENODE -touchz file0 + -fs NAMENODE -du file0 + + + -fs NAMENODE -rm /user + + + + TokenComparator + Found 1 items + + + RegexpComparator + ^0( |\t)*hdfs://localhost[.a-z]*:[0-9]*/user/[a-z]*/file0 + + + + + + touchz: touching many files + + -fs NAMENODE -touchz file0 file1 file2 + -fs NAMENODE -du file* + + + -fs NAMENODE -rm /user + + + + TokenComparator + Found 3 items + + + RegexpComparator + ^0( |\t)*hdfs://localhost[.a-z]*:[0-9]*/user/[a-z]*/file0 + ^0( |\t)*hdfs://localhost[.a-z]*:[0-9]*/user/[a-z]*/file1 + ^0( |\t)*hdfs://localhost[.a-z]*:[0-9]*/user/[a-z]*/file2 + + + + + + touchz: touching already existing file + + -fs NAMENODE -put CLITEST_DATA/data15bytes data15bytes + -fs NAMENODE -touchz data15bytes + + + -fs NAMENODE -rm data15bytes + + + + TokenComparator + touchz: data15bytes must be a zero-length file + + + + + + + test: non existent file (absolute path) + + -fs NAMENODE -test -z /dir0/file + + + -fs NAMENODE -rmr /user + + + + RegexpComparator + ^test: File does not exist: /dir0/file + + + + + + test: non existent file (relative path) + + -fs NAMENODE -test -z file + + + -fs NAMENODE -rmr /user + + + + RegexpComparator + ^test: File does not exist: file + + + + + + test: non existent directory (absolute path) + + -fs NAMENODE -test -d /dir + + + -fs NAMENODE -rmr /user + + + + RegexpComparator + ^test: File does not exist: /dir + + + + + + test: non existent directory (relative path) + + -fs NAMENODE -test -d dir0 + + + -fs NAMENODE -rmr /user + + + + RegexpComparator + ^test: File does not exist: dir0 + + + + + + + stat: statistics about file(absolute path) + + -fs NAMENODE -put CLITEST_DATA/data60bytes /data60bytes + -fs NAMENODE -stat "%n-%b" /data60bytes + + + -fs NAMENODE -rmr /data60bytes + + + + TokenComparator + data60bytes-60 + + + + + + stat: statistics about file(relative path) + + -fs NAMENODE -put CLITEST_DATA/data60bytes data60bytes + -fs NAMENODE -stat "%n-%b" data60bytes + + + -fs NAMENODE -rmr /user + + + + TokenComparator + data60bytes-60 + + + + + + stat: statistics about directory(absolute path) + + -fs NAMENODE -mkdir /dirtest + -fs NAMENODE -stat "%n-%b-%o" /dirtest + + + -fs NAMENODE -rmr /dirtest + + + + TokenComparator + dirtest-0-0 + + + + + + stat: statistics about directory(relative path) + + -fs NAMENODE -mkdir dirtest + -fs NAMENODE -stat "%n-%b-%o" dirtest + + + -fs NAMENODE -rmr /user + + + + TokenComparator + dirtest-0-0 + + + + + + stat: statistics about files (absolute path) using globbing + + -fs -mkdir /dir0 + -fs NAMENODE -put CLITEST_DATA/data15bytes /dir0/data15bytes + -fs NAMENODE -put CLITEST_DATA/data30bytes /dir0/data30bytes + -fs NAMENODE -put CLITEST_DATA/data60bytes /dir0/data60bytes + -fs NAMENODE -put CLITEST_DATA/data120bytes /dir0/data120bytes + -fs NAMENODE -mkdir /dir0/datadir + -fs NAMENODE -stat "%n-%b" /dir0/data* + + + -fs NAMENODE -rmr /dir0 + + + + TokenComparator + "data15bytes-15" + + + TokenComparator + "data30bytes-30" + + + TokenComparator + "data60bytes-60" + + + TokenComparator + "data120bytes-120" + + + TokenComparator + "datadir-0" + + + + + + stat: statistics about files (relative path) using globbing + + -fs NAMENODE -put CLITEST_DATA/data15bytes data15bytes + -fs NAMENODE -put CLITEST_DATA/data30bytes data30bytes + -fs NAMENODE -put CLITEST_DATA/data60bytes data60bytes + -fs NAMENODE -put CLITEST_DATA/data120bytes data120bytes + -fs NAMENODE -mkdir datadir + -fs NAMENODE -stat "%n-%b" data* + + + -fs NAMENODE -rmr /user + + + + TokenComparator + "data15bytes-15" + + + TokenComparator + "data30bytes-30" + + + TokenComparator + "data60bytes-60" + + + TokenComparator + "data120bytes-120" + + + TokenComparator + "datadir-0" + + + + + + stat: statistics about file or directory(absolute path) that does not exist + + -fs NAMENODE -stat /file + + + + + + RegexpComparator + ^stat: cannot stat `/file': No such file or directory + + + + + + stat: statistics about file or directory(relative path) that does not exist + + -fs NAMENODE -stat file1 + + + + + + RegexpComparator + ^stat: cannot stat `file1': No such file or directory + + + + + + + + tail: contents of file(absolute path) + + -fs NAMENODE -put CLITEST_DATA/data15bytes /data15bytes + -fs NAMENODE -tail /data15bytes + + + -fs NAMENODE -rm /user + + + + TokenComparator + 12345678901234 + + + + + + tail: contents of file(relative path) + + -fs NAMENODE -put CLITEST_DATA/data15bytes data15bytes + -fs NAMENODE -tail data15bytes + + + -fs NAMENODE -rmr /user + + + + TokenComparator + 12345678901234 + + + + + + tail: contents of files(absolute path) using globbing + + -fs NAMENODE -touchz /file1 + -fs NAMENODE -touchz /file2 + -fs NAMENODE -touchz /file3 + -fs NAMENODE -touchz /file4 + -fs NAMENODE -tail /file* + + + -fs NAMENODE -rmr /user + + + + RegexpComparator + ^tail: File does not exist: /file\* + + + + + + tail: contents of files(relative path) using globbing + + -fs NAMENODE -touchz file1 + -fs NAMENODE -touchz file2 + -fs NAMENODE -touchz file3 + -fs NAMENODE -touchz file4 + -fs NAMENODE -tail file* + + + -fs NAMENODE -rmr /user + + + + RegexpComparator + ^tail: File does not exist: file\* + + + + + + tail: contents of file(absolute path) that does not exist + + -fs NAMENODE -tail /file + + + + + + RegexpComparator + ^tail: File does not exist: /file + + + + + + tail: contents of file(relative path) that does not exist + + -fs NAMENODE -tail file1 + + + + + + RegexpComparator + ^tail: File does not exist: file1 + + + + + + tail: contents of directory(absolute path) + + -fs NAMENODE -mkdir /dir1 + -fs NAMENODE -tail /dir1 + + + -fs NAMENODE -rmr /dir1 + + + + RegexpComparator + ^tail: Source must be a file. + + + + + + tail: contents of directory(relative path) + + -fs NAMENODE -mkdir dir1 + -fs NAMENODE -tail dir1 + + + -fs NAMENODE -rmr dir1 + + + + RegexpComparator + ^tail: Source must be a file. + + + + + + + moveFromLocal: moving non existent file(absolute path) + + -fs NAMENODE -moveFromLocal /user/wrongdata file + + + -fs NAMENODE -rmr /user + + + + TokenComparator + moveFromLocal: File /user/wrongdata does not exist + + + + + + moveFromLocal: moving non existent file(relative path) + + -fs NAMENODE -moveFromLocal wrongdata file + + + -fs NAMENODE -rmr /user + + + + TokenComparator + moveFromLocal: File wrongdata does not exist + + + + + + moveFromLocal: moving many files into an existing file + + -fs NAMENODE -moveFromLocal CLITEST_DATA/data15bytes /data15bytes + -fs NAMENODE -moveFromLocal CLITEST_DATA/data30bytes /data30bytes + -fs NAMENODE -touchz file0 + -fs NAMENODE -moveFromLocal /data15bytes /data30bytes file0 + + + -fs NAMENODE -rmr /user + + + + RegexpComparator + ^moveFromLocal: copying multiple files, but last argument `file0' is not a directory + + + + + + moveFromLocal: moving many files into a non existent directory + + -fs NAMENODE -moveFromLocal CLITEST_DATA/data15bytes /data15bytes + -fs NAMENODE -moveFromLocal CLITEST_DATA/data30bytes /data30bytes + -fs NAMENODE -moveFromLocal /data15bytes /data30bytes wrongdir + + + -fs NAMENODE -rmr /user + + + + RegexpComparator + ^moveFromLocal: `wrongdir': specified destination directory doest not exist + + + + + + verifying error messages for quota commands - setting quota on a file + + -fs NAMENODE -mkdir /test + -fs NAMENODE -touchz /test/file1 + -fs NAMENODE -setQuota 1 /test/file1 + + + -fs NAMENODE -setQuota 5 /test + + + + + SubstringComparator + Cannot set quota on a file: /test/file1 + + + + + + verifying error messages for quota commands - setting quota on non-existing file + + -fs NAMENODE -setSpaceQuota 1g /test1 + + + + + + + SubstringComparator + Can not find listing for /test1 + + + + + + verifying error messages for quota commands - exceeding quota + + -fs NAMENODE -setQuota 3 /test + -fs NAMENODE -touchz /test/file0 + -fs NAMENODE -mkdir /test/test1 + + + + + + + SubstringComparator + The NameSpace quota (directories and files) of directory /test is exceeded + + + + + + verifying error messages for quota commands - setting not valid quota + + -fs NAMENODE -setQuota 0 /test + + + + + + + SubstringComparator + Invalid values for quota : 0 + + + + + + verifying error messages for quota commands - setting not valid space quota + + -fs NAMENODE -setSpaceQuota a5 /test + + + + + + + SubstringComparator + For input string: "a5" + + + + + + verifying error messages for quota commands - clearQuota on non existing file + + -fs NAMENODE -clrQuota /test1 + + + -fs NAMENODE -rmr /test + + + + SubstringComparator + Can not find listing for /test1 + + + + + + refreshServiceAcl: refreshing security authorization policy for namenode + + -fs NAMENODE -refreshServiceAcl + + + + + + + ExactComparator + + + + + + + refreshServiceAcl: verifying error message while refreshing security authorization policy for namenode + + + -fs NAMENODE -Dhadoop.job.ugi=blah,blah -refreshServiceAcl + + + + + + + SubstringComparator + access denied + + + + + + refreshServiceAcl: refreshing security authorization policy for jobtracker + + -jt JOBTRACKER -refreshServiceAcl + + + + + + + ExactComparator + + + + + + + refreshServiceAcl: verifying error message while refreshing security authorization policy for jobtracker + + + -jt JOBTRACKER -Dhadoop.job.ugi=blah,blah -refreshServiceAcl + + + + + + + SubstringComparator + access denied + + + + + + diff --git a/src/test/org/apache/hadoop/cli/testConf.xsl b/src/test/org/apache/hadoop/cli/testConf.xsl new file mode 100644 index 0000000..09fb0b7 --- /dev/null +++ b/src/test/org/apache/hadoop/cli/testConf.xsl @@ -0,0 +1,28 @@ + + + + + + + +

Hadoop DFS command-line tests

+ + + + + + + + + + + + + + +
IDCommandDescription
+ + + + + diff --git a/src/test/org/apache/hadoop/cli/util/CLITestData.java b/src/test/org/apache/hadoop/cli/util/CLITestData.java new file mode 100644 index 0000000..1a8ff0b --- /dev/null +++ b/src/test/org/apache/hadoop/cli/util/CLITestData.java @@ -0,0 +1,135 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.cli.util; + +import java.util.ArrayList; + +/** + * + * Class to store CLI Test Data + */ +public class CLITestData { + private String testDesc = null; + private ArrayList testCommands = null; + private ArrayList cleanupCommands = null; + private ArrayList comparatorData = null; + private boolean testResult = false; + + public CLITestData() { + + } + + /** + * Class to define Test Command. includes type of the command and command itself + * Valid types FS, DFSADMIN and MRADMIN. + */ + static public class TestCmd { + public enum CommandType { + FS, + DFSADMIN, + MRADMIN + } + private final CommandType type; + private final String cmd; + + public TestCmd(String str, CommandType type) { + cmd = str; + this.type = type; + } + public CommandType getType() { + return type; + } + public String getCmd() { + return cmd; + } + public String toString() { + return cmd; + } + } + + /** + * @return the testDesc + */ + public String getTestDesc() { + return testDesc; + } + + /** + * @param testDesc the testDesc to set + */ + public void setTestDesc(String testDesc) { + this.testDesc = testDesc; + } + + /** + * @return the testCommands + */ + public ArrayList getTestCommands() { + return testCommands; + } + + /** + * @param testCommands the testCommands to set + */ + public void setTestCommands(ArrayList testCommands) { + this.testCommands = testCommands; + } + + /** + * @return the comparatorData + */ + public ArrayList getComparatorData() { + return comparatorData; + } + + /** + * @param comparatorData the comparatorData to set + */ + public void setComparatorData(ArrayList comparatorData) { + this.comparatorData = comparatorData; + } + + /** + * @return the testResult + */ + public boolean getTestResult() { + return testResult; + } + + /** + * @param testResult the testResult to set + */ + public void setTestResult(boolean testResult) { + this.testResult = testResult; + } + + /** + * @return the cleanupCommands + */ + public ArrayList getCleanupCommands() { + return cleanupCommands; + } + + /** + * @param cleanupCommands the cleanupCommands to set + */ + public void setCleanupCommands(ArrayList cleanupCommands) { + this.cleanupCommands = cleanupCommands; + } +} diff --git a/src/test/org/apache/hadoop/cli/util/CommandExecutor.java b/src/test/org/apache/hadoop/cli/util/CommandExecutor.java new file mode 100644 index 0000000..80c0da9 --- /dev/null +++ b/src/test/org/apache/hadoop/cli/util/CommandExecutor.java @@ -0,0 +1,186 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.cli.util; + +import java.io.ByteArrayOutputStream; +import java.io.File; +import java.io.PrintStream; +import java.util.StringTokenizer; + +import org.apache.hadoop.cli.TestCLI; +import org.apache.hadoop.cli.util.CLITestData.TestCmd; +import org.apache.hadoop.cli.util.CLITestData.TestCmd.CommandType; +import org.apache.hadoop.fs.FsShell; +import org.apache.hadoop.hdfs.tools.DFSAdmin; +import org.apache.hadoop.mapred.tools.MRAdmin; +import org.apache.hadoop.util.ToolRunner; + +/** + * + * This class executed commands and captures the output + */ +public class CommandExecutor { + private static String commandOutput = null; + private static int exitCode = 0; + private static Exception lastException = null; + private static String cmdExecuted = null; + + private static String[] getCommandAsArgs(final String cmd, final String masterKey, + final String master) { + StringTokenizer tokenizer = new StringTokenizer(cmd, " "); + String[] args = new String[tokenizer.countTokens()]; + + int i = 0; + while (tokenizer.hasMoreTokens()) { + args[i] = tokenizer.nextToken(); + + args[i] = args[i].replaceAll(masterKey, master); + args[i] = args[i].replaceAll("CLITEST_DATA", + new File(TestCLI.TEST_CACHE_DATA_DIR). + toURI().toString().replace(' ', '+')); + args[i] = args[i].replaceAll("USERNAME", System.getProperty("user.name")); + + i++; + } + + return args; + } + + public static int executeCommand(final TestCmd cmd, + final String namenode, final String jobtracker) + throws Exception { + switch(cmd.getType()) { + case DFSADMIN: + return CommandExecutor.executeDFSAdminCommand(cmd.getCmd(), namenode); + case MRADMIN: + return CommandExecutor.executeMRAdminCommand(cmd.getCmd(), jobtracker); + case FS: + return CommandExecutor.executeFSCommand(cmd.getCmd(), namenode); + default: + throw new Exception("Unknow type of Test command:"+ cmd.getType()); + } + } + + public static int executeDFSAdminCommand(final String cmd, final String namenode) { + exitCode = 0; + + ByteArrayOutputStream bao = new ByteArrayOutputStream(); + PrintStream origOut = System.out; + PrintStream origErr = System.err; + + System.setOut(new PrintStream(bao)); + System.setErr(new PrintStream(bao)); + + DFSAdmin shell = new DFSAdmin(); + String[] args = getCommandAsArgs(cmd, "NAMENODE", namenode); + cmdExecuted = cmd; + + try { + ToolRunner.run(shell, args); + } catch (Exception e) { + e.printStackTrace(); + lastException = e; + exitCode = -1; + } finally { + System.setOut(origOut); + System.setErr(origErr); + } + + commandOutput = bao.toString(); + + return exitCode; + } + + public static int executeMRAdminCommand(final String cmd, + final String jobtracker) { + exitCode = 0; + + ByteArrayOutputStream bao = new ByteArrayOutputStream(); + PrintStream origOut = System.out; + PrintStream origErr = System.err; + + System.setOut(new PrintStream(bao)); + System.setErr(new PrintStream(bao)); + + MRAdmin mradmin = new MRAdmin(); + String[] args = getCommandAsArgs(cmd, "JOBTRACKER", jobtracker); + cmdExecuted = cmd; + + try { + ToolRunner.run(mradmin, args); + } catch (Exception e) { + e.printStackTrace(); + lastException = e; + exitCode = -1; + } finally { + System.setOut(origOut); + System.setErr(origErr); + } + + commandOutput = bao.toString(); + + return exitCode; + } + + public static int executeFSCommand(final String cmd, final String namenode) { + exitCode = 0; + + ByteArrayOutputStream bao = new ByteArrayOutputStream(); + PrintStream origOut = System.out; + PrintStream origErr = System.err; + + System.setOut(new PrintStream(bao)); + System.setErr(new PrintStream(bao)); + + FsShell shell = new FsShell(); + String[] args = getCommandAsArgs(cmd, "NAMENODE", namenode); + cmdExecuted = cmd; + + try { + ToolRunner.run(shell, args); + } catch (Exception e) { + e.printStackTrace(); + lastException = e; + exitCode = -1; + } finally { + System.setOut(origOut); + System.setErr(origErr); + } + + commandOutput = bao.toString(); + + return exitCode; + } + + public static String getLastCommandOutput() { + return commandOutput; + } + + public static int getLastExitCode() { + return exitCode; + } + + public static Exception getLastException() { + return lastException; + } + + public static String getLastCommand() { + return cmdExecuted; + } +} diff --git a/src/test/org/apache/hadoop/cli/util/ComparatorBase.java b/src/test/org/apache/hadoop/cli/util/ComparatorBase.java new file mode 100644 index 0000000..fae9937 --- /dev/null +++ b/src/test/org/apache/hadoop/cli/util/ComparatorBase.java @@ -0,0 +1,39 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.cli.util; + +/** + * + * Comparator interface. To define a new comparator, implement the compare + * method + */ +public abstract class ComparatorBase { + public ComparatorBase() { + + } + + /** + * Compare method for the comparator class. + * @param actual output. can be null + * @param expected output. can be null + * @return true if expected output compares with the actual output, else + * return false. If actual or expected is null, return false + */ + public abstract boolean compare(String actual, String expected); +} diff --git a/src/test/org/apache/hadoop/cli/util/ComparatorData.java b/src/test/org/apache/hadoop/cli/util/ComparatorData.java new file mode 100644 index 0000000..e61084b --- /dev/null +++ b/src/test/org/apache/hadoop/cli/util/ComparatorData.java @@ -0,0 +1,108 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.cli.util; + +import java.util.Vector; + +/** + * + * Class to store CLI Test Comparators Data + */ +public class ComparatorData { + private String expectedOutput = null; + private String actualOutput = null; + private boolean testResult = false; + private int exitCode = 0; + private String comparatorType = null; + + public ComparatorData() { + + } + + /** + * @return the expectedOutput + */ + public String getExpectedOutput() { + return expectedOutput; + } + + /** + * @param expectedOutput the expectedOutput to set + */ + public void setExpectedOutput(String expectedOutput) { + this.expectedOutput = expectedOutput; + } + + /** + * @return the actualOutput + */ + public String getActualOutput() { + return actualOutput; + } + + /** + * @param actualOutput the actualOutput to set + */ + public void setActualOutput(String actualOutput) { + this.actualOutput = actualOutput; + } + + /** + * @return the testResult + */ + public boolean getTestResult() { + return testResult; + } + + /** + * @param testResult the testResult to set + */ + public void setTestResult(boolean testResult) { + this.testResult = testResult; + } + + /** + * @return the exitCode + */ + public int getExitCode() { + return exitCode; + } + + /** + * @param exitCode the exitCode to set + */ + public void setExitCode(int exitCode) { + this.exitCode = exitCode; + } + + /** + * @return the comparatorType + */ + public String getComparatorType() { + return comparatorType; + } + + /** + * @param comparatorType the comparatorType to set + */ + public void setComparatorType(String comparatorType) { + this.comparatorType = comparatorType; + } + +} diff --git a/src/test/org/apache/hadoop/cli/util/ExactComparator.java b/src/test/org/apache/hadoop/cli/util/ExactComparator.java new file mode 100644 index 0000000..9a49a96 --- /dev/null +++ b/src/test/org/apache/hadoop/cli/util/ExactComparator.java @@ -0,0 +1,34 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.cli.util; + +/** + * Comparator for the Command line tests. + * + * This comparator compares the actual to the expected and + * returns true only if they are the same + * + */ +public class ExactComparator extends ComparatorBase { + + @Override + public boolean compare(String actual, String expected) { + return actual.equals(expected); + } +} diff --git a/src/test/org/apache/hadoop/cli/util/RegexpComparator.java b/src/test/org/apache/hadoop/cli/util/RegexpComparator.java new file mode 100644 index 0000000..f247746 --- /dev/null +++ b/src/test/org/apache/hadoop/cli/util/RegexpComparator.java @@ -0,0 +1,50 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.cli.util; + +import java.util.StringTokenizer; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +/** + * Comparator for the Command line tests. + * + * This comparator searches for the regular expression specified in 'expected' + * in the string 'actual' and returns true if the regular expression match is + * done + * + */ +public class RegexpComparator extends ComparatorBase { + + @Override + public boolean compare(String actual, String expected) { + boolean success = false; + Pattern p = Pattern.compile(expected); + + StringTokenizer tokenizer = new StringTokenizer(actual, "\n\r"); + while (tokenizer.hasMoreTokens() && !success) { + String actualToken = tokenizer.nextToken(); + Matcher m = p.matcher(actualToken); + success = m.matches(); + } + + return success; + } + +} diff --git a/src/test/org/apache/hadoop/cli/util/SubstringComparator.java b/src/test/org/apache/hadoop/cli/util/SubstringComparator.java new file mode 100644 index 0000000..79e9a88 --- /dev/null +++ b/src/test/org/apache/hadoop/cli/util/SubstringComparator.java @@ -0,0 +1,33 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.cli.util; + +public class SubstringComparator extends ComparatorBase { + + @Override + public boolean compare(String actual, String expected) { + int compareOutput = actual.indexOf(expected); + if (compareOutput == -1) { + return false; + } + + return true; + } + +} diff --git a/src/test/org/apache/hadoop/cli/util/TokenComparator.java b/src/test/org/apache/hadoop/cli/util/TokenComparator.java new file mode 100644 index 0000000..ce5b846 --- /dev/null +++ b/src/test/org/apache/hadoop/cli/util/TokenComparator.java @@ -0,0 +1,49 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.cli.util; + +import java.util.StringTokenizer; + +/** + * Comparator for the Command line tests. + * + * This comparator compares each token in the expected output and returns true + * if all tokens are in the actual output + * + */ +public class TokenComparator extends ComparatorBase { + + @Override + public boolean compare(String actual, String expected) { + boolean compareOutput = true; + + StringTokenizer tokenizer = new StringTokenizer(expected, ",\n\r"); + + while (tokenizer.hasMoreTokens()) { + String token = tokenizer.nextToken(); + if (actual.indexOf(token) != -1) { + compareOutput &= true; + } else { + compareOutput &= false; + } + } + + return compareOutput; + } +} diff --git a/src/test/org/apache/hadoop/conf/TestConfiguration.java b/src/test/org/apache/hadoop/conf/TestConfiguration.java new file mode 100644 index 0000000..0652915 --- /dev/null +++ b/src/test/org/apache/hadoop/conf/TestConfiguration.java @@ -0,0 +1,633 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.conf; + +import java.io.BufferedWriter; +import java.io.File; +import java.io.FileWriter; +import java.io.IOException; +import java.io.DataInputStream; +import java.io.ByteArrayOutputStream; +import java.io.ByteArrayInputStream; +import java.io.DataOutputStream; +import java.io.StringWriter; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.Random; + +import junit.framework.TestCase; + +import org.apache.hadoop.fs.Path; +import org.codehaus.jackson.map.ObjectMapper; + + +public class TestConfiguration extends TestCase { + + private Configuration conf; + final static String CONFIG = new File("./test-config.xml").getAbsolutePath(); + final static String CONFIG2 = new File("./test-config2.xml").getAbsolutePath(); + final static Random RAN = new Random(); + + @Override + protected void setUp() throws Exception { + super.setUp(); + conf = new Configuration(); + } + + @Override + protected void tearDown() throws Exception { + super.tearDown(); + new File(CONFIG).delete(); + new File(CONFIG2).delete(); + } + + private void startConfig() throws IOException{ + out.write("\n"); + out.write("\n"); + } + + private void endConfig() throws IOException{ + out.write("\n"); + out.close(); + } + + private void addInclude(String filename) throws IOException{ + out.write("\n "); + } + + public void testVariableSubstitution() throws IOException { + out=new BufferedWriter(new FileWriter(CONFIG)); + startConfig(); + declareProperty("my.int", "${intvar}", "42"); + declareProperty("intvar", "42", "42"); + declareProperty("my.base", "/tmp/${user.name}", UNSPEC); + declareProperty("my.file", "hello", "hello"); + declareProperty("my.suffix", ".txt", ".txt"); + declareProperty("my.relfile", "${my.file}${my.suffix}", "hello.txt"); + declareProperty("my.fullfile", "${my.base}/${my.file}${my.suffix}", UNSPEC); + // check that undefined variables are returned as-is + declareProperty("my.failsexpand", "a${my.undefvar}b", "a${my.undefvar}b"); + endConfig(); + Path fileResource = new Path(CONFIG); + conf.addResource(fileResource); + + for (Prop p : props) { + System.out.println("p=" + p.name); + String gotVal = conf.get(p.name); + String gotRawVal = conf.getRaw(p.name); + assertEq(p.val, gotRawVal); + if (p.expectEval == UNSPEC) { + // expansion is system-dependent (uses System properties) + // can't do exact match so just check that all variables got expanded + assertTrue(gotVal != null && -1 == gotVal.indexOf("${")); + } else { + assertEq(p.expectEval, gotVal); + } + } + + // check that expansion also occurs for getInt() + assertTrue(conf.getInt("intvar", -1) == 42); + assertTrue(conf.getInt("my.int", -1) == 42); + } + + public void testFinalParam() throws IOException { + out=new BufferedWriter(new FileWriter(CONFIG)); + startConfig(); + declareProperty("my.var", "", "", true); + endConfig(); + Path fileResource = new Path(CONFIG); + Configuration conf1 = new Configuration(); + conf1.addResource(fileResource); + assertNull("my var is not null", conf1.get("my.var")); + + out=new BufferedWriter(new FileWriter(CONFIG2)); + startConfig(); + declareProperty("my.var", "myval", "myval", false); + endConfig(); + fileResource = new Path(CONFIG2); + + Configuration conf2 = new Configuration(conf1); + conf2.addResource(fileResource); + assertNull("my var is not final", conf2.get("my.var")); + } + + public static void assertEq(Object a, Object b) { + System.out.println("assertEq: " + a + ", " + b); + assertEquals(a, b); + } + + static class Prop { + String name; + String val; + String expectEval; + } + + final String UNSPEC = null; + ArrayList props = new ArrayList(); + + void declareProperty(String name, String val, String expectEval) + throws IOException { + declareProperty(name, val, expectEval, false); + } + + void declareProperty(String name, String val, String expectEval, + boolean isFinal) + throws IOException { + appendProperty(name, val, isFinal); + Prop p = new Prop(); + p.name = name; + p.val = val; + p.expectEval = expectEval; + props.add(p); + } + + void appendProperty(String name, String val) throws IOException { + appendProperty(name, val, false); + } + + void appendProperty(String name, String val, boolean isFinal) + throws IOException { + out.write(""); + out.write(""); + out.write(name); + out.write(""); + out.write(""); + out.write(val); + out.write(""); + if (isFinal) { + out.write("true"); + } + out.write("\n"); + } + + public void testOverlay() throws IOException{ + out=new BufferedWriter(new FileWriter(CONFIG)); + startConfig(); + appendProperty("a","b"); + appendProperty("b","c"); + appendProperty("d","e"); + appendProperty("e","f", true); + endConfig(); + + out=new BufferedWriter(new FileWriter(CONFIG2)); + startConfig(); + appendProperty("a","b"); + appendProperty("b","d"); + appendProperty("e","e"); + endConfig(); + + Path fileResource = new Path(CONFIG); + conf.addResource(fileResource); + + //set dynamically something + conf.set("c","d"); + conf.set("a","d"); + + Configuration clone=new Configuration(conf); + clone.addResource(new Path(CONFIG2)); + + assertEquals(clone.get("a"), "d"); + assertEquals(clone.get("b"), "d"); + assertEquals(clone.get("c"), "d"); + assertEquals(clone.get("d"), "e"); + assertEquals(clone.get("e"), "f"); + + } + + public void testCommentsInValue() throws IOException { + out=new BufferedWriter(new FileWriter(CONFIG)); + startConfig(); + appendProperty("my.comment", "this contains a comment"); + endConfig(); + Path fileResource = new Path(CONFIG); + conf.addResource(fileResource); + //two spaces one after "this", one before "contains" + assertEquals("this contains a comment", conf.get("my.comment")); + } + + public void testTrim() throws IOException { + out=new BufferedWriter(new FileWriter(CONFIG)); + startConfig(); + String[] whitespaces = {"", " ", "\n", "\t"}; + String[] name = new String[100]; + for(int i = 0; i < name.length; i++) { + name[i] = "foo" + i; + StringBuilder prefix = new StringBuilder(); + StringBuilder postfix = new StringBuilder(); + for(int j = 0; j < 3; j++) { + prefix.append(whitespaces[RAN.nextInt(whitespaces.length)]); + postfix.append(whitespaces[RAN.nextInt(whitespaces.length)]); + } + + appendProperty(prefix + name[i] + postfix, name[i] + ".value"); + } + endConfig(); + + conf.addResource(new Path(CONFIG)); + for(String n : name) { + assertEquals(n + ".value", conf.get(n)); + } + } + + public void testToString() throws IOException { + out=new BufferedWriter(new FileWriter(CONFIG)); + startConfig(); + endConfig(); + Path fileResource = new Path(CONFIG); + conf.addResource(fileResource); + + String expectedOutput = + "Configuration: core-default.xml, core-site.xml, " + + fileResource.toString(); + assertEquals(expectedOutput, conf.toString()); + } + + public void testIncludes() throws Exception { + tearDown(); + System.out.println("XXX testIncludes"); + out=new BufferedWriter(new FileWriter(CONFIG2)); + startConfig(); + appendProperty("a","b"); + appendProperty("c","d"); + endConfig(); + + out=new BufferedWriter(new FileWriter(CONFIG)); + startConfig(); + addInclude(CONFIG2); + appendProperty("e","f"); + appendProperty("g","h"); + endConfig(); + + // verify that the includes file contains all properties + Path fileResource = new Path(CONFIG); + conf.addResource(fileResource); + assertEquals(conf.get("a"), "b"); + assertEquals(conf.get("c"), "d"); + assertEquals(conf.get("e"), "f"); + assertEquals(conf.get("g"), "h"); + tearDown(); + } + + BufferedWriter out; + + public void testIntegerRanges() { + Configuration conf = new Configuration(); + conf.set("first", "-100"); + conf.set("second", "4-6,9-10,27"); + conf.set("third", "34-"); + Configuration.IntegerRanges range = conf.getRange("first", null); + System.out.println("first = " + range); + assertEquals(true, range.isIncluded(0)); + assertEquals(true, range.isIncluded(1)); + assertEquals(true, range.isIncluded(100)); + assertEquals(false, range.isIncluded(101)); + range = conf.getRange("second", null); + System.out.println("second = " + range); + assertEquals(false, range.isIncluded(3)); + assertEquals(true, range.isIncluded(4)); + assertEquals(true, range.isIncluded(6)); + assertEquals(false, range.isIncluded(7)); + assertEquals(false, range.isIncluded(8)); + assertEquals(true, range.isIncluded(9)); + assertEquals(true, range.isIncluded(10)); + assertEquals(false, range.isIncluded(11)); + assertEquals(false, range.isIncluded(26)); + assertEquals(true, range.isIncluded(27)); + assertEquals(false, range.isIncluded(28)); + range = conf.getRange("third", null); + System.out.println("third = " + range); + assertEquals(false, range.isIncluded(33)); + assertEquals(true, range.isIncluded(34)); + assertEquals(true, range.isIncluded(100000000)); + } + + public void testHexValues() throws IOException{ + out=new BufferedWriter(new FileWriter(CONFIG)); + startConfig(); + appendProperty("test.hex1", "0x10"); + appendProperty("test.hex2", "0xF"); + appendProperty("test.hex3", "-0x10"); + endConfig(); + Path fileResource = new Path(CONFIG); + conf.addResource(fileResource); + assertEquals(16, conf.getInt("test.hex1", 0)); + assertEquals(16, conf.getLong("test.hex1", 0)); + assertEquals(15, conf.getInt("test.hex2", 0)); + assertEquals(15, conf.getLong("test.hex2", 0)); + assertEquals(-16, conf.getInt("test.hex3", 0)); + assertEquals(-16, conf.getLong("test.hex3", 0)); + + } + + public void testIntegerValues() throws IOException{ + out=new BufferedWriter(new FileWriter(CONFIG)); + startConfig(); + appendProperty("test.int1", "20"); + appendProperty("test.int2", "020"); + appendProperty("test.int3", "-20"); + endConfig(); + Path fileResource = new Path(CONFIG); + conf.addResource(fileResource); + assertEquals(20, conf.getInt("test.int1", 0)); + assertEquals(20, conf.getLong("test.int1", 0)); + assertEquals(20, conf.getInt("test.int2", 0)); + assertEquals(20, conf.getLong("test.int2", 0)); + assertEquals(-20, conf.getInt("test.int3", 0)); + assertEquals(-20, conf.getLong("test.int3", 0)); + } + + public void testReload() throws IOException { + out=new BufferedWriter(new FileWriter(CONFIG)); + startConfig(); + appendProperty("test.key1", "final-value1", true); + appendProperty("test.key2", "value2"); + endConfig(); + Path fileResource = new Path(CONFIG); + conf.addResource(fileResource); + + out=new BufferedWriter(new FileWriter(CONFIG2)); + startConfig(); + appendProperty("test.key1", "value1"); + appendProperty("test.key3", "value3"); + endConfig(); + Path fileResource1 = new Path(CONFIG2); + conf.addResource(fileResource1); + + // add a few values via set. + conf.set("test.key3", "value4"); + conf.set("test.key4", "value5"); + + assertEquals("final-value1", conf.get("test.key1")); + assertEquals("value2", conf.get("test.key2")); + assertEquals("value4", conf.get("test.key3")); + assertEquals("value5", conf.get("test.key4")); + + // change values in the test file... + out=new BufferedWriter(new FileWriter(CONFIG)); + startConfig(); + appendProperty("test.key1", "final-value1"); + appendProperty("test.key3", "final-value3", true); + endConfig(); + + conf.reloadConfiguration(); + assertEquals("value1", conf.get("test.key1")); + // overlayed property overrides. + assertEquals("value4", conf.get("test.key3")); + assertEquals(null, conf.get("test.key2")); + assertEquals("value5", conf.get("test.key4")); + } + + public void testSize() throws IOException { + Configuration conf = new Configuration(false); + conf.set("a", "A"); + conf.set("b", "B"); + assertEquals(2, conf.size()); + } + + public void testClear() throws IOException { + Configuration conf = new Configuration(false); + conf.set("a", "A"); + conf.set("b", "B"); + conf.clear(); + assertEquals(0, conf.size()); + assertFalse(conf.iterator().hasNext()); + } + + public static void main(String[] argv) throws Exception { + junit.textui.TestRunner.main(new String[]{ + TestConfiguration.class.getName() + }); + } + + static class JsonConfiguration { + JsonProperty[] properties; + + public JsonProperty[] getProperties() { + return properties; + } + + public void setProperties(JsonProperty[] properties) { + this.properties = properties; + } + } + + static class JsonProperty { + String key; + public String getKey() { + return key; + } + public void setKey(String key) { + this.key = key; + } + public String getValue() { + return value; + } + public void setValue(String value) { + this.value = value; + } + public boolean getIsFinal() { + return isFinal; + } + public void setIsFinal(boolean isFinal) { + this.isFinal = isFinal; + } + public String getResource() { + return resource; + } + public void setResource(String resource) { + this.resource = resource; + } + String value; + boolean isFinal; + String resource; + } + + public void testDumpConfiguration () throws IOException { + StringWriter outWriter = new StringWriter(); + Configuration.dumpConfiguration(conf, outWriter); + String jsonStr = outWriter.toString(); + ObjectMapper mapper = new ObjectMapper(); + JsonConfiguration jconf = + mapper.readValue(jsonStr, JsonConfiguration.class); + int defaultLength = jconf.getProperties().length; + + // add 3 keys to the existing configuration properties + out=new BufferedWriter(new FileWriter(CONFIG)); + startConfig(); + appendProperty("test.key1", "value1"); + appendProperty("test.key2", "value2",true); + appendProperty("test.key3", "value3"); + endConfig(); + Path fileResource = new Path(CONFIG); + conf.addResource(fileResource); + out.close(); + + outWriter = new StringWriter(); + Configuration.dumpConfiguration(conf, outWriter); + jsonStr = outWriter.toString(); + mapper = new ObjectMapper(); + jconf = mapper.readValue(jsonStr, JsonConfiguration.class); + int length = jconf.getProperties().length; + // check for consistency in the number of properties parsed in Json format. + assertEquals(length, defaultLength+3); + + //change few keys in another resource file + out=new BufferedWriter(new FileWriter(CONFIG2)); + startConfig(); + appendProperty("test.key1", "newValue1"); + appendProperty("test.key2", "newValue2"); + endConfig(); + Path fileResource1 = new Path(CONFIG2); + conf.addResource(fileResource1); + out.close(); + + outWriter = new StringWriter(); + Configuration.dumpConfiguration(conf, outWriter); + jsonStr = outWriter.toString(); + mapper = new ObjectMapper(); + jconf = mapper.readValue(jsonStr, JsonConfiguration.class); + + // put the keys and their corresponding attributes into a hashmap for their + // efficient retrieval + HashMap confDump = new HashMap(); + for(JsonProperty prop : jconf.getProperties()) { + confDump.put(prop.getKey(), prop); + } + // check if the value and resource of test.key1 is changed + assertEquals("newValue1", confDump.get("test.key1").getValue()); + assertEquals(false, confDump.get("test.key1").getIsFinal()); + assertEquals(fileResource1.toString(), + confDump.get("test.key1").getResource()); + // check if final parameter test.key2 is not changed, since it is first + // loaded as final parameter + assertEquals("value2", confDump.get("test.key2").getValue()); + assertEquals(true, confDump.get("test.key2").getIsFinal()); + assertEquals(fileResource.toString(), + confDump.get("test.key2").getResource()); + // check for other keys which are not modified later + assertEquals("value3", confDump.get("test.key3").getValue()); + assertEquals(false, confDump.get("test.key3").getIsFinal()); + assertEquals(fileResource.toString(), + confDump.get("test.key3").getResource()); + // check for resource to be "Unknown" for keys which are loaded using 'set' + // and expansion of properties + conf.set("test.key4", "value4"); + conf.set("test.key5", "value5"); + conf.set("test.key6", "${test.key5}"); + outWriter = new StringWriter(); + Configuration.dumpConfiguration(conf, outWriter); + jsonStr = outWriter.toString(); + mapper = new ObjectMapper(); + jconf = mapper.readValue(jsonStr, JsonConfiguration.class); + confDump = new HashMap(); + for(JsonProperty prop : jconf.getProperties()) { + confDump.put(prop.getKey(), prop); + } + assertEquals("value5",confDump.get("test.key6").getValue()); + assertEquals("Unknown", confDump.get("test.key4").getResource()); + outWriter.close(); + } + + public void testDumpConfiguratioWithoutDefaults() throws IOException { + // check for case when default resources are not loaded + Configuration config = new Configuration(false); + StringWriter outWriter = new StringWriter(); + Configuration.dumpConfiguration(config, outWriter); + String jsonStr = outWriter.toString(); + ObjectMapper mapper = new ObjectMapper(); + JsonConfiguration jconf = + mapper.readValue(jsonStr, JsonConfiguration.class); + + //ensure that no properties are loaded. + assertEquals(0, jconf.getProperties().length); + + // add 2 keys + out=new BufferedWriter(new FileWriter(CONFIG)); + startConfig(); + appendProperty("test.key1", "value1"); + appendProperty("test.key2", "value2",true); + endConfig(); + Path fileResource = new Path(CONFIG); + config.addResource(fileResource); + out.close(); + + outWriter = new StringWriter(); + Configuration.dumpConfiguration(config, outWriter); + jsonStr = outWriter.toString(); + mapper = new ObjectMapper(); + jconf = mapper.readValue(jsonStr, JsonConfiguration.class); + + HashMapconfDump = new HashMap(); + for (JsonProperty prop : jconf.getProperties()) { + confDump.put(prop.getKey(), prop); + } + //ensure only 2 keys are loaded + assertEquals(2,jconf.getProperties().length); + //ensure the values are consistent + assertEquals(confDump.get("test.key1").getValue(),"value1"); + assertEquals(confDump.get("test.key2").getValue(),"value2"); + //check the final tag + assertEquals(false, confDump.get("test.key1").getIsFinal()); + assertEquals(true, confDump.get("test.key2").getIsFinal()); + //check the resource for each property + for (JsonProperty prop : jconf.getProperties()) { + assertEquals(fileResource.toString(),prop.getResource()); + } + } + + public void testConcurrentDefaultResourceChange() throws Exception { + + final Exception[] ex = new Exception[1]; + + // Load a lot of default resources. + Thread tload = new Thread() { + @Override + public void run() { + for (int i = 0; i < 500; i++) { + Configuration.addDefaultResource("a" + i); + } + } + }; + + // Create configurations at the same time. + Thread tcreate = new Thread() { + @Override + public void run() { + try { + for (int i = 0; i < 500; i++) { + Configuration conf = new Configuration(); + conf.get("test"); + } + } catch (Exception e) { + ex[0] = e; + } + } + }; + + // Starts the threads together + tload.start(); + tcreate.start(); + + tload.join(); + tcreate.join(); + + // Check exception + assertNull("Should not have thrown exception " + ex[0], ex[0]); + } +} + diff --git a/src/test/org/apache/hadoop/conf/TestJobConf.java b/src/test/org/apache/hadoop/conf/TestJobConf.java new file mode 100644 index 0000000..425c30c --- /dev/null +++ b/src/test/org/apache/hadoop/conf/TestJobConf.java @@ -0,0 +1,179 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.conf; + +import junit.framework.Assert; +import junit.framework.TestCase; + +import org.apache.hadoop.mapred.JobConf; + +public class TestJobConf extends TestCase { + + public void testProfileParamsDefaults() { + JobConf configuration = new JobConf(); + + Assert.assertNull(configuration.get("mapred.task.profile.params")); + + String result = configuration.getProfileParams(); + + Assert.assertNotNull(result); + Assert.assertTrue(result.contains("file=%s")); + Assert.assertTrue(result.startsWith("-agentlib:hprof")); + } + + public void testProfileParamsSetter() { + JobConf configuration = new JobConf(); + + configuration.setProfileParams("test"); + Assert.assertEquals("test", configuration.get("mapred.task.profile.params")); + } + + public void testProfileParamsGetter() { + JobConf configuration = new JobConf(); + + configuration.set("mapred.task.profile.params", "test"); + Assert.assertEquals("test", configuration.getProfileParams()); + } + + /** + * Testing mapred.task.maxvmem replacement with new values + * + */ + public void testMemoryConfigForMapOrReduceTask(){ + JobConf configuration = new JobConf(); + configuration.set("mapred.job.map.memory.mb",String.valueOf(300)); + configuration.set("mapred.job.reduce.memory.mb",String.valueOf(300)); + Assert.assertEquals(configuration.getMemoryForMapTask(),300); + Assert.assertEquals(configuration.getMemoryForReduceTask(),300); + + configuration.set("mapred.task.maxvmem" , String.valueOf(2*1024 * 1024)); + configuration.set("mapred.job.map.memory.mb",String.valueOf(300)); + configuration.set("mapred.job.reduce.memory.mb",String.valueOf(300)); + Assert.assertEquals(configuration.getMemoryForMapTask(),2); + Assert.assertEquals(configuration.getMemoryForReduceTask(),2); + + configuration = new JobConf(); + configuration.set("mapred.task.maxvmem" , "-1"); + configuration.set("mapred.job.map.memory.mb",String.valueOf(300)); + configuration.set("mapred.job.reduce.memory.mb",String.valueOf(400)); + Assert.assertEquals(configuration.getMemoryForMapTask(), 300); + Assert.assertEquals(configuration.getMemoryForReduceTask(), 400); + + configuration = new JobConf(); + configuration.set("mapred.task.maxvmem" , String.valueOf(2*1024 * 1024)); + configuration.set("mapred.job.map.memory.mb","-1"); + configuration.set("mapred.job.reduce.memory.mb","-1"); + Assert.assertEquals(configuration.getMemoryForMapTask(),2); + Assert.assertEquals(configuration.getMemoryForReduceTask(),2); + + configuration = new JobConf(); + configuration.set("mapred.task.maxvmem" , String.valueOf(-1)); + configuration.set("mapred.job.map.memory.mb","-1"); + configuration.set("mapred.job.reduce.memory.mb","-1"); + Assert.assertEquals(configuration.getMemoryForMapTask(),-1); + Assert.assertEquals(configuration.getMemoryForReduceTask(),-1); + + configuration = new JobConf(); + configuration.set("mapred.task.maxvmem" , String.valueOf(2*1024 * 1024)); + configuration.set("mapred.job.map.memory.mb","3"); + configuration.set("mapred.job.reduce.memory.mb","3"); + Assert.assertEquals(configuration.getMemoryForMapTask(),2); + Assert.assertEquals(configuration.getMemoryForReduceTask(),2); + } + + /** + * Test that negative values for MAPRED_TASK_MAXVMEM_PROPERTY cause + * new configuration keys' values to be used. + */ + + public void testNegativeValueForTaskVmem() { + JobConf configuration = new JobConf(); + + configuration.set(JobConf.MAPRED_TASK_MAXVMEM_PROPERTY, "-3"); + configuration.set("mapred.job.map.memory.mb", "4"); + configuration.set("mapred.job.reduce.memory.mb", "5"); + Assert.assertEquals(4, configuration.getMemoryForMapTask()); + Assert.assertEquals(5, configuration.getMemoryForReduceTask()); + + } + + /** + * Test that negative values for all memory configuration properties causes + * APIs to disable memory limits + */ + + public void testNegativeValuesForMemoryParams() { + JobConf configuration = new JobConf(); + + configuration.set(JobConf.MAPRED_TASK_MAXVMEM_PROPERTY, "-4"); + configuration.set("mapred.job.map.memory.mb", "-5"); + configuration.set("mapred.job.reduce.memory.mb", "-6"); + + Assert.assertEquals(JobConf.DISABLED_MEMORY_LIMIT, + configuration.getMemoryForMapTask()); + Assert.assertEquals(JobConf.DISABLED_MEMORY_LIMIT, + configuration.getMemoryForReduceTask()); + Assert.assertEquals(JobConf.DISABLED_MEMORY_LIMIT, + configuration.getMaxVirtualMemoryForTask()); + } + + /** + * Test deprecated accessor and mutator method for mapred.task.maxvmem + */ + public void testMaxVirtualMemoryForTask() { + JobConf configuration = new JobConf(); + + //get test case + configuration.set("mapred.job.map.memory.mb", String.valueOf(300)); + configuration.set("mapred.job.reduce.memory.mb", String.valueOf(-1)); + Assert.assertEquals( + configuration.getMaxVirtualMemoryForTask(), 300 * 1024 * 1024); + + configuration = new JobConf(); + configuration.set("mapred.job.map.memory.mb", String.valueOf(-1)); + configuration.set("mapred.job.reduce.memory.mb", String.valueOf(200)); + Assert.assertEquals( + configuration.getMaxVirtualMemoryForTask(), 200 * 1024 * 1024); + + configuration = new JobConf(); + configuration.set("mapred.job.map.memory.mb", String.valueOf(-1)); + configuration.set("mapred.job.reduce.memory.mb", String.valueOf(-1)); + configuration.set("mapred.task.maxvmem", String.valueOf(1 * 1024 * 1024)); + Assert.assertEquals( + configuration.getMaxVirtualMemoryForTask(), 1 * 1024 * 1024); + + configuration = new JobConf(); + configuration.set("mapred.task.maxvmem", String.valueOf(1 * 1024 * 1024)); + Assert.assertEquals( + configuration.getMaxVirtualMemoryForTask(), 1 * 1024 * 1024); + + //set test case + + configuration = new JobConf(); + configuration.setMaxVirtualMemoryForTask(2 * 1024 * 1024); + Assert.assertEquals(configuration.getMemoryForMapTask(), 2); + Assert.assertEquals(configuration.getMemoryForReduceTask(), 2); + + configuration = new JobConf(); + configuration.set("mapred.job.map.memory.mb", String.valueOf(300)); + configuration.set("mapred.job.reduce.memory.mb", String.valueOf(400)); + configuration.setMaxVirtualMemoryForTask(2 * 1024 * 1024); + Assert.assertEquals(configuration.getMemoryForMapTask(), 2); + Assert.assertEquals(configuration.getMemoryForReduceTask(), 2); + } +} diff --git a/src/test/org/apache/hadoop/conf/TestNoDefaultsJobConf.java b/src/test/org/apache/hadoop/conf/TestNoDefaultsJobConf.java new file mode 100644 index 0000000..a5941b0 --- /dev/null +++ b/src/test/org/apache/hadoop/conf/TestNoDefaultsJobConf.java @@ -0,0 +1,103 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.conf; + +import junit.framework.Assert; + +import org.apache.hadoop.mapred.*; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.FileUtil; +import org.apache.hadoop.io.LongWritable; +import org.apache.hadoop.io.Text; + +import java.io.*; + +/** + * This testcase tests that a JobConf without default values submits jobs + * properly and the JT applies its own default values to it to make the job + * run properly. + */ +public class TestNoDefaultsJobConf extends HadoopTestCase { + + public TestNoDefaultsJobConf() throws IOException { + super(HadoopTestCase.CLUSTER_MR, HadoopTestCase.DFS_FS, 1, 1); + } + + public void testNoDefaults() throws Exception { + JobConf configuration = new JobConf(); + assertTrue(configuration.get("hadoop.tmp.dir", null) != null); + + configuration = new JobConf(false); + assertTrue(configuration.get("hadoop.tmp.dir", null) == null); + + + Path inDir = new Path("testing/jobconf/input"); + Path outDir = new Path("testing/jobconf/output"); + + OutputStream os = getFileSystem().create(new Path(inDir, "text.txt")); + Writer wr = new OutputStreamWriter(os); + wr.write("hello\n"); + wr.write("hello\n"); + wr.close(); + + JobConf conf = new JobConf(false); + + //seeding JT and NN info into non-defaults (empty jobconf) + conf.set("mapred.job.tracker", createJobConf().get("mapred.job.tracker")); + conf.set("fs.default.name", createJobConf().get("fs.default.name")); + + conf.setJobName("mr"); + + conf.setInputFormat(TextInputFormat.class); + + conf.setMapOutputKeyClass(LongWritable.class); + conf.setMapOutputValueClass(Text.class); + + conf.setOutputFormat(TextOutputFormat.class); + conf.setOutputKeyClass(LongWritable.class); + conf.setOutputValueClass(Text.class); + + conf.setMapperClass(org.apache.hadoop.mapred.lib.IdentityMapper.class); + conf.setReducerClass(org.apache.hadoop.mapred.lib.IdentityReducer.class); + + FileInputFormat.setInputPaths(conf, inDir); + + FileOutputFormat.setOutputPath(conf, outDir); + + JobClient.runJob(conf); + + Path[] outputFiles = FileUtil.stat2Paths( + getFileSystem().listStatus(outDir, + new Utils.OutputFileUtils.OutputFilesFilter())); + if (outputFiles.length > 0) { + InputStream is = getFileSystem().open(outputFiles[0]); + BufferedReader reader = new BufferedReader(new InputStreamReader(is)); + String line = reader.readLine(); + int counter = 0; + while (line != null) { + counter++; + assertTrue(line.contains("hello")); + line = reader.readLine(); + } + reader.close(); + assertEquals(2, counter); + } + + } + +} \ No newline at end of file diff --git a/src/test/org/apache/hadoop/conf/TestReconfiguration.java b/src/test/org/apache/hadoop/conf/TestReconfiguration.java new file mode 100644 index 0000000..e78ebbb --- /dev/null +++ b/src/test/org/apache/hadoop/conf/TestReconfiguration.java @@ -0,0 +1,320 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.conf; + +import org.junit.Test; +import org.junit.Before; +import static org.junit.Assert.*; + +import java.util.Collection; +import java.util.Arrays; + +public class TestReconfiguration { + private Configuration conf1; + private Configuration conf2; + + private static final String PROP1 = "test.prop.one"; + private static final String PROP2 = "test.prop.two"; + private static final String PROP3 = "test.prop.three"; + private static final String PROP4 = "test.prop.four"; + private static final String PROP5 = "test.prop.five"; + + private static final String VAL1 = "val1"; + private static final String VAL2 = "val2"; + + @Before + public void setUp () { + conf1 = new Configuration(); + conf2 = new Configuration(); + + // set some test properties + conf1.set(PROP1, VAL1); + conf1.set(PROP2, VAL1); + conf1.set(PROP3, VAL1); + + conf2.set(PROP1, VAL1); // same as conf1 + conf2.set(PROP2, VAL2); // different value as conf1 + // PROP3 not set in conf2 + conf2.set(PROP4, VAL1); // not set in conf1 + + } + + /** + * Test ReconfigurationUtil.getChangedProperties. + */ + @Test + public void testGetChangedProperties() { + Collection changes = + ReconfigurationUtil.getChangedProperties(conf2, conf1); + + assertTrue("expected 3 changed properties but got " + changes.size(), + changes.size() == 3); + + boolean changeFound = false; + boolean unsetFound = false; + boolean setFound = false; + + for (ReconfigurationUtil.PropertyChange c: changes) { + if (c.prop.equals(PROP2) && c.oldVal != null && c.oldVal.equals(VAL1) && + c.newVal != null && c.newVal.equals(VAL2)) { + changeFound = true; + } else if (c.prop.equals(PROP3) && c.oldVal != null && c.oldVal.equals(VAL1) && + c.newVal == null) { + unsetFound = true; + } else if (c.prop.equals(PROP4) && c.oldVal == null && + c.newVal != null && c.newVal.equals(VAL1)) { + setFound = true; + } + } + + assertTrue("not all changes have been applied", + changeFound && unsetFound && setFound); + } + + /** + * a simple reconfigurable class + */ + public static class ReconfigurableDummy extends ReconfigurableBase + implements Runnable { + public volatile boolean running = true; + + public ReconfigurableDummy(Configuration conf) { + super(conf); + } + + /** + * {@inheritDoc} + */ + @Override + public Collection getReconfigurableProperties() { + return Arrays.asList(PROP1, PROP2, PROP4); + } + + /** + * {@inheritDoc} + */ + @Override + public synchronized void reconfigurePropertyImpl(String property, + String newVal) { + // do nothing + } + + /** + * Run until PROP1 is no longer VAL1. + */ + @Override + public void run() { + while (running && getConf().get(PROP1).equals(VAL1)) { + try { + Thread.sleep(1); + } catch (InterruptedException ignore) { + // do nothing + } + } + } + + } + + /** + * Test reconfiguring a Reconfigurable. + */ + @Test + public void testReconfigure() { + ReconfigurableDummy dummy = new ReconfigurableDummy(conf1); + + assertTrue(PROP1 + " set to wrong value ", + dummy.getConf().get(PROP1).equals(VAL1)); + assertTrue(PROP2 + " set to wrong value ", + dummy.getConf().get(PROP2).equals(VAL1)); + assertTrue(PROP3 + " set to wrong value ", + dummy.getConf().get(PROP3).equals(VAL1)); + assertTrue(PROP4 + " set to wrong value ", + dummy.getConf().get(PROP4) == null); + assertTrue(PROP5 + " set to wrong value ", + dummy.getConf().get(PROP5) == null); + + assertTrue(PROP1 + " should be reconfigurable ", + dummy.isPropertyReconfigurable(PROP1)); + assertTrue(PROP2 + " should be reconfigurable ", + dummy.isPropertyReconfigurable(PROP2)); + assertFalse(PROP3 + " should not be reconfigurable ", + dummy.isPropertyReconfigurable(PROP3)); + assertTrue(PROP4 + " should be reconfigurable ", + dummy.isPropertyReconfigurable(PROP4)); + assertFalse(PROP5 + " should not be reconfigurable ", + dummy.isPropertyReconfigurable(PROP5)); + + // change something to the same value as before + { + boolean exceptionCaught = false; + try { + dummy.reconfigureProperty(PROP1, VAL1); + assertTrue(PROP1 + " set to wrong value ", + dummy.getConf().get(PROP1).equals(VAL1)); + } catch (ReconfigurationException e) { + exceptionCaught = true; + } + assertFalse("received unexpected exception", + exceptionCaught); + } + + // change something to null + { + boolean exceptionCaught = false; + try { + dummy.reconfigureProperty(PROP1, null); + assertTrue(PROP1 + "set to wrong value ", + dummy.getConf().get(PROP1) == null); + } catch (ReconfigurationException e) { + exceptionCaught = true; + } + assertFalse("received unexpected exception", + exceptionCaught); + } + + // change something to a different value than before + { + boolean exceptionCaught = false; + try { + dummy.reconfigureProperty(PROP1, VAL2); + assertTrue(PROP1 + "set to wrong value ", + dummy.getConf().get(PROP1).equals(VAL2)); + } catch (ReconfigurationException e) { + exceptionCaught = true; + } + assertFalse("received unexpected exception", + exceptionCaught); + } + + // set unset property to null + { + boolean exceptionCaught = false; + try { + dummy.reconfigureProperty(PROP4, null); + assertTrue(PROP4 + "set to wrong value ", + dummy.getConf().get(PROP4) == null); + } catch (ReconfigurationException e) { + exceptionCaught = true; + } + assertFalse("received unexpected exception", + exceptionCaught); + } + + // set unset property + { + boolean exceptionCaught = false; + try { + dummy.reconfigureProperty(PROP4, VAL1); + assertTrue(PROP4 + "set to wrong value ", + dummy.getConf().get(PROP4).equals(VAL1)); + } catch (ReconfigurationException e) { + exceptionCaught = true; + } + assertFalse("received unexpected exception", + exceptionCaught); + } + + // try to set unset property to null (not reconfigurable) + { + boolean exceptionCaught = false; + try { + dummy.reconfigureProperty(PROP5, null); + } catch (ReconfigurationException e) { + exceptionCaught = true; + } + assertTrue("did not receive expected exception", + exceptionCaught); + } + + // try to set unset property to value (not reconfigurable) + { + boolean exceptionCaught = false; + try { + dummy.reconfigureProperty(PROP5, VAL1); + } catch (ReconfigurationException e) { + exceptionCaught = true; + } + assertTrue("did not receive expected exception", + exceptionCaught); + } + + // try to change property to value (not reconfigurable) + { + boolean exceptionCaught = false; + try { + dummy.reconfigureProperty(PROP3, VAL2); + } catch (ReconfigurationException e) { + exceptionCaught = true; + } + assertTrue("did not receive expected exception", + exceptionCaught); + } + + // try to change property to null (not reconfigurable) + { + boolean exceptionCaught = false; + try { + dummy.reconfigureProperty(PROP3, null); + } catch (ReconfigurationException e) { + exceptionCaught = true; + } + assertTrue("did not receive expected exception", + exceptionCaught); + } + } + + /** + * Test whether configuration changes are visible in another thread. + */ + @Test + public void testThread() throws ReconfigurationException { + ReconfigurableDummy dummy = new ReconfigurableDummy(conf1); + assertTrue(dummy.getConf().get(PROP1).equals(VAL1)); + Thread dummyThread = new Thread(dummy); + dummyThread.start(); + try { + Thread.sleep(500); + } catch (InterruptedException ignore) { + // do nothing + } + dummy.reconfigureProperty(PROP1, VAL2); + + long endWait = System.currentTimeMillis() + 2000; + while (dummyThread.isAlive() && System.currentTimeMillis() < endWait) { + try { + Thread.sleep(50); + } catch (InterruptedException ignore) { + // do nothing + } + } + + assertFalse("dummy thread should not be alive", + dummyThread.isAlive()); + dummy.running = false; + try { + dummyThread.join(); + } catch (InterruptedException ignore) { + // do nothing + } + assertTrue(PROP1 + " is set to wrong value", + dummy.getConf().get(PROP1).equals(VAL2)); + + } + +} \ No newline at end of file diff --git a/src/test/org/apache/hadoop/filecache/TestDistributedCache.java b/src/test/org/apache/hadoop/filecache/TestDistributedCache.java new file mode 100644 index 0000000..d82b2aa --- /dev/null +++ b/src/test/org/apache/hadoop/filecache/TestDistributedCache.java @@ -0,0 +1,130 @@ +package org.apache.hadoop.filecache; + +import java.io.File; +import java.io.IOException; +import java.net.URI; +import java.util.Random; +import java.util.concurrent.TimeUnit; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.FileUtil; +import org.apache.hadoop.fs.LocalDirAllocator; +import org.apache.hadoop.fs.Path; + +import junit.framework.TestCase; + +public class TestDistributedCache extends TestCase { + + static final URI LOCAL_FS = URI.create("file:///"); + private static String TEST_CACHE_BASE_DIR = "cachebasedir"; + private static String TEST_ROOT_DIR = + System.getProperty("test.build.data", "/tmp/distributedcache"); + private static String MAPRED_LOCAL_DIR = TEST_ROOT_DIR + "/mapred/local"; + private static final int LOCAL_CACHE_LIMIT = 5 * 1024; //5K + private static final int LOCAL_CACHE_FILES = 2; + private Configuration conf; + private Path firstCacheFile; + private Path secondCacheFile; + private Path thirdCacheFile; + private Path fourthCacheFile; + private FileSystem localfs; + + /** + * @see TestCase#setUp() + */ + @Override + protected void setUp() throws IOException { + conf = new Configuration(); + conf.setLong("local.cache.size", LOCAL_CACHE_LIMIT); + conf.set("mapred.local.dir", MAPRED_LOCAL_DIR); + conf.setLong("local.cache.numbersubdir", LOCAL_CACHE_FILES); + FileUtil.fullyDelete(new File(TEST_CACHE_BASE_DIR)); + FileUtil.fullyDelete(new File(TEST_ROOT_DIR)); + localfs = FileSystem.get(LOCAL_FS, conf); + firstCacheFile = new Path(TEST_ROOT_DIR+"/firstcachefile"); + secondCacheFile = new Path(TEST_ROOT_DIR+"/secondcachefile"); + thirdCacheFile = new Path(TEST_ROOT_DIR+"/thirdcachefile"); + fourthCacheFile = new Path(TEST_ROOT_DIR+"/fourthcachefile"); + createTempFile(localfs, firstCacheFile, 4 * 1024); + createTempFile(localfs, secondCacheFile, 2 * 1024); + createTempFile(localfs, thirdCacheFile, 1); + createTempFile(localfs, fourthCacheFile, 1); + } + + /** test delete cache */ + public void testDeleteCache() throws Exception { + // We first test the size of files exceeds the limit + long now = System.currentTimeMillis(); + Path firstLocalCache = DistributedCache.getLocalCache( + firstCacheFile.toUri(), conf, new Path(TEST_CACHE_BASE_DIR), + localfs.getFileStatus(firstCacheFile), + false, now, new Path(TEST_ROOT_DIR), null); + // Release the first cache so that it can be deleted when sweeping + DistributedCache.releaseCache(firstCacheFile.toUri(), conf, now); + DistributedCache.getLocalCache( + secondCacheFile.toUri(), conf, new Path(TEST_CACHE_BASE_DIR), + localfs.getFileStatus(firstCacheFile), + false, now, new Path(TEST_ROOT_DIR), null); + // The total size is about 6 * 1024 which is greater than 5 * 1024. + // So released cache should be deleted. + checkCacheDeletion(localfs, firstLocalCache); + + // Now we test the number of files limit + Path thirdLocalCache = DistributedCache.getLocalCache( + thirdCacheFile.toUri(), conf, new Path(TEST_CACHE_BASE_DIR), + localfs.getFileStatus(firstCacheFile), + false, now, new Path(TEST_ROOT_DIR), null); + // Release the third cache so that it can be deleted when sweeping + DistributedCache.releaseCache(thirdCacheFile.toUri(), conf, now); + DistributedCache.getLocalCache( + fourthCacheFile.toUri(), conf, new Path(TEST_CACHE_BASE_DIR), + localfs.getFileStatus(firstCacheFile), + false, now, new Path(TEST_ROOT_DIR), null); + // The total number of caches is now 3 which is greater than 2. + // So released cache should be deleted. + checkCacheDeletion(localfs, thirdLocalCache); + } + + /** + * Periodically checks if a file is there, return if the file is no longer + * there. Fails the test if a files is there for 5 minutes. + */ + private void checkCacheDeletion(FileSystem fs, Path cache) throws Exception { + // Check every 100ms to see if the cache is deleted + boolean cacheExists = true; + for (int i = 0; i < 3000; i++) { + if (!fs.exists(cache)) { + cacheExists = false; + break; + } + TimeUnit.MILLISECONDS.sleep(100L); + } + // If the cache is still there after 5 minutes, test fails. + assertFalse("DistributedCache failed deleting old cache", + cacheExists); + } + + private void createTempFile(FileSystem fs, Path p, int size) throws IOException { + FSDataOutputStream out = fs.create(p); + byte[] toWrite = new byte[size]; + new Random().nextBytes(toWrite); + out.write(toWrite); + out.close(); + FileSystem.LOG.info("created: " + p + ", size=" + size); + } + + /** + * @see TestCase#tearDown() + */ + @Override + protected void tearDown() throws IOException { + localfs.delete(firstCacheFile, true); + localfs.delete(secondCacheFile, true); + localfs.delete(thirdCacheFile, true); + localfs.delete(fourthCacheFile, true); + localfs.close(); + } +} diff --git a/src/test/org/apache/hadoop/fs/AccumulatingReducer.java b/src/test/org/apache/hadoop/fs/AccumulatingReducer.java new file mode 100644 index 0000000..d350e12 --- /dev/null +++ b/src/test/org/apache/hadoop/fs/AccumulatingReducer.java @@ -0,0 +1,101 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.fs; + +import java.io.IOException; +import java.util.Iterator; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.io.Text; +import org.apache.hadoop.mapred.*; + +/** + * Reducer that accumulates values based on their type. + *

+ * The type is specified in the key part of the key-value pair + * as a prefix to the key in the following way + *

+ * type:key + *

+ * The values are accumulated according to the types: + *

    + *
  • s: - string, concatenate
  • + *
  • f: - float, summ
  • + *
  • l: - long, summ
  • + *
+ * + */ +@SuppressWarnings("deprecation") +public class AccumulatingReducer extends MapReduceBase + implements Reducer { + static final String VALUE_TYPE_LONG = "l:"; + static final String VALUE_TYPE_FLOAT = "f:"; + static final String VALUE_TYPE_STRING = "s:"; + private static final Log LOG = LogFactory.getLog(AccumulatingReducer.class); + + protected String hostName; + + public AccumulatingReducer () { + LOG.info("Starting AccumulatingReducer !!!"); + try { + hostName = java.net.InetAddress.getLocalHost().getHostName(); + } catch(Exception e) { + hostName = "localhost"; + } + LOG.info("Starting AccumulatingReducer on " + hostName); + } + + public void reduce(Text key, + Iterator values, + OutputCollector output, + Reporter reporter + ) throws IOException { + String field = key.toString(); + + reporter.setStatus("starting " + field + " ::host = " + hostName); + + // concatenate strings + if (field.startsWith(VALUE_TYPE_STRING)) { + StringBuffer sSum = new StringBuffer(); + while (values.hasNext()) + sSum.append(values.next().toString()).append(";"); + output.collect(key, new Text(sSum.toString())); + reporter.setStatus("finished " + field + " ::host = " + hostName); + return; + } + // sum long values + if (field.startsWith(VALUE_TYPE_FLOAT)) { + float fSum = 0; + while (values.hasNext()) + fSum += Float.parseFloat(values.next().toString()); + output.collect(key, new Text(String.valueOf(fSum))); + reporter.setStatus("finished " + field + " ::host = " + hostName); + return; + } + // sum long values + if (field.startsWith(VALUE_TYPE_LONG)) { + long lSum = 0; + while (values.hasNext()) { + lSum += Long.parseLong(values.next().toString()); + } + output.collect(key, new Text(String.valueOf(lSum))); + } + reporter.setStatus("finished " + field + " ::host = " + hostName); + } +} diff --git a/src/test/org/apache/hadoop/fs/DFSCIOTest.java b/src/test/org/apache/hadoop/fs/DFSCIOTest.java new file mode 100644 index 0000000..32da4ac --- /dev/null +++ b/src/test/org/apache/hadoop/fs/DFSCIOTest.java @@ -0,0 +1,560 @@ + /** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs; + +import java.io.BufferedReader; +import java.io.DataInputStream; +import java.io.File; +import java.io.FileOutputStream; +import java.io.IOException; +import java.io.InputStreamReader; +import java.io.PrintStream; +import java.util.Date; +import java.util.StringTokenizer; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.conf.Configured; +import org.apache.hadoop.io.LongWritable; +import org.apache.hadoop.io.SequenceFile; +import org.apache.hadoop.io.Text; +import org.apache.hadoop.io.SequenceFile.CompressionType; +import org.apache.hadoop.mapred.*; +import org.apache.hadoop.util.Tool; +import org.apache.hadoop.util.ToolRunner; +import org.junit.Test; + +/** + * Distributed i/o benchmark. + *

+ * This test writes into or reads from a specified number of files. + * File size is specified as a parameter to the test. + * Each file is accessed in a separate map task. + *

+ * The reducer collects the following statistics: + *

    + *
  • number of tasks completed
  • + *
  • number of bytes written/read
  • + *
  • execution time
  • + *
  • io rate
  • + *
  • io rate squared
  • + *
+ * + * Finally, the following information is appended to a local file + *
    + *
  • read or write test
  • + *
  • date and time the test finished
  • + *
  • number of files
  • + *
  • total number of bytes processed
  • + *
  • throughput in mb/sec (total number of bytes / sum of processing times)
  • + *
  • average i/o rate in mb/sec per file
  • + *
  • standard i/o rate deviation
  • + *
+ */ +public class DFSCIOTest extends Configured implements Tool { + // Constants + private static final Log LOG = LogFactory.getLog(DFSCIOTest.class); + private static final int TEST_TYPE_READ = 0; + private static final int TEST_TYPE_WRITE = 1; + private static final int TEST_TYPE_CLEANUP = 2; + private static final int DEFAULT_BUFFER_SIZE = 1000000; + private static final String BASE_FILE_NAME = "test_io_"; + private static final String DEFAULT_RES_FILE_NAME = "DFSCIOTest_results.log"; + + private static Configuration fsConfig = new Configuration(); + private static final long MEGA = 0x100000; + private static String TEST_ROOT_DIR = System.getProperty("test.build.data","/benchmarks/DFSCIOTest"); + private static Path CONTROL_DIR = new Path(TEST_ROOT_DIR, "io_control"); + private static Path WRITE_DIR = new Path(TEST_ROOT_DIR, "io_write"); + private static Path READ_DIR = new Path(TEST_ROOT_DIR, "io_read"); + private static Path DATA_DIR = new Path(TEST_ROOT_DIR, "io_data"); + + private static Path HDFS_TEST_DIR = new Path("/tmp/DFSCIOTest"); + private static String HDFS_LIB_VERSION = System.getProperty("libhdfs.version", "1"); + private static String CHMOD = new String("chmod"); + private static Path HDFS_SHLIB = new Path(HDFS_TEST_DIR + "/libhdfs.so." + HDFS_LIB_VERSION); + private static Path HDFS_READ = new Path(HDFS_TEST_DIR + "/hdfs_read"); + private static Path HDFS_WRITE = new Path(HDFS_TEST_DIR + "/hdfs_write"); + + /** + * Run the test with default parameters. + * + * @throws Exception + */ + @Test + public void testIOs() throws Exception { + testIOs(10, 10); + } + + /** + * Run the test with the specified parameters. + * + * @param fileSize file size + * @param nrFiles number of files + * @throws IOException + */ + public static void testIOs(int fileSize, int nrFiles) + throws IOException { + + FileSystem fs = FileSystem.get(fsConfig); + + createControlFile(fs, fileSize, nrFiles); + writeTest(fs); + readTest(fs); + } + + private static void createControlFile( + FileSystem fs, + int fileSize, // in MB + int nrFiles + ) throws IOException { + LOG.info("creating control file: "+fileSize+" mega bytes, "+nrFiles+" files"); + + fs.delete(CONTROL_DIR, true); + + for(int i=0; i < nrFiles; i++) { + String name = getFileName(i); + Path controlFile = new Path(CONTROL_DIR, "in_file_" + name); + SequenceFile.Writer writer = null; + try { + writer = SequenceFile.createWriter(fs, fsConfig, controlFile, + Text.class, LongWritable.class, + CompressionType.NONE); + writer.append(new Text(name), new LongWritable(fileSize)); + } catch(Exception e) { + throw new IOException(e.getLocalizedMessage()); + } finally { + if (writer != null) + writer.close(); + writer = null; + } + } + LOG.info("created control files for: "+nrFiles+" files"); + } + + private static String getFileName(int fIdx) { + return BASE_FILE_NAME + Integer.toString(fIdx); + } + + /** + * Write/Read mapper base class. + *

+ * Collects the following statistics per task: + *

    + *
  • number of tasks completed
  • + *
  • number of bytes written/read
  • + *
  • execution time
  • + *
  • i/o rate
  • + *
  • i/o rate squared
  • + *
+ */ + private abstract static class IOStatMapper extends IOMapperBase { + IOStatMapper() { + } + + void collectStats(OutputCollector output, + String name, + long execTime, + Long objSize) throws IOException { + long totalSize = objSize.longValue(); + float ioRateMbSec = (float)totalSize * 1000 / (execTime * MEGA); + LOG.info("Number of bytes processed = " + totalSize); + LOG.info("Exec time = " + execTime); + LOG.info("IO rate = " + ioRateMbSec); + + output.collect(new Text(AccumulatingReducer.VALUE_TYPE_LONG + "tasks"), + new Text(String.valueOf(1))); + output.collect(new Text(AccumulatingReducer.VALUE_TYPE_LONG + "size"), + new Text(String.valueOf(totalSize))); + output.collect(new Text(AccumulatingReducer.VALUE_TYPE_LONG + "time"), + new Text(String.valueOf(execTime))); + output.collect(new Text(AccumulatingReducer.VALUE_TYPE_FLOAT + "rate"), + new Text(String.valueOf(ioRateMbSec*1000))); + output.collect(new Text(AccumulatingReducer.VALUE_TYPE_FLOAT + "sqrate"), + new Text(String.valueOf(ioRateMbSec*ioRateMbSec*1000))); + } + } + + /** + * Write mapper class. + */ + public static class WriteMapper extends IOStatMapper { + + public WriteMapper() { + super(); + for(int i=0; i < bufferSize; i++) + buffer[i] = (byte)('0' + i % 50); + } + + public Long doIO(Reporter reporter, + String name, + long totalSize + ) throws IOException { + // create file + totalSize *= MEGA; + + // create instance of local filesystem + FileSystem localFS = FileSystem.getLocal(fsConfig); + + try { + // native runtime + Runtime runTime = Runtime.getRuntime(); + + // copy the dso and executable from dfs and chmod them + synchronized (this) { + localFS.delete(HDFS_TEST_DIR, true); + if (!(localFS.mkdirs(HDFS_TEST_DIR))) { + throw new IOException("Failed to create " + HDFS_TEST_DIR + " on local filesystem"); + } + } + + synchronized (this) { + if (!localFS.exists(HDFS_SHLIB)) { + FileUtil.copy(fs, HDFS_SHLIB, localFS, HDFS_SHLIB, false, fsConfig); + + String chmodCmd = new String(CHMOD + " a+x " + HDFS_SHLIB); + Process process = runTime.exec(chmodCmd); + int exitStatus = process.waitFor(); + if (exitStatus != 0) { + throw new IOException(chmodCmd + ": Failed with exitStatus: " + exitStatus); + } + } + } + + synchronized (this) { + if (!localFS.exists(HDFS_WRITE)) { + FileUtil.copy(fs, HDFS_WRITE, localFS, HDFS_WRITE, false, fsConfig); + + String chmodCmd = new String(CHMOD + " a+x " + HDFS_WRITE); + Process process = runTime.exec(chmodCmd); + int exitStatus = process.waitFor(); + if (exitStatus != 0) { + throw new IOException(chmodCmd + ": Failed with exitStatus: " + exitStatus); + } + } + } + + // exec the C program + Path outFile = new Path(DATA_DIR, name); + String writeCmd = new String(HDFS_WRITE + " " + outFile + " " + totalSize + " " + bufferSize); + Process process = runTime.exec(writeCmd, null, new File(HDFS_TEST_DIR.toString())); + int exitStatus = process.waitFor(); + if (exitStatus != 0) { + throw new IOException(writeCmd + ": Failed with exitStatus: " + exitStatus); + } + } catch (InterruptedException interruptedException) { + reporter.setStatus(interruptedException.toString()); + } finally { + localFS.close(); + } + return new Long(totalSize); + } + } + + private static void writeTest(FileSystem fs) + throws IOException { + + fs.delete(DATA_DIR, true); + fs.delete(WRITE_DIR, true); + + runIOTest(WriteMapper.class, WRITE_DIR); + } + + private static void runIOTest( Class mapperClass, + Path outputDir + ) throws IOException { + JobConf job = new JobConf(fsConfig, DFSCIOTest.class); + + FileInputFormat.setInputPaths(job, CONTROL_DIR); + job.setInputFormat(SequenceFileInputFormat.class); + + job.setMapperClass(mapperClass); + job.setReducerClass(AccumulatingReducer.class); + + FileOutputFormat.setOutputPath(job, outputDir); + job.setOutputKeyClass(Text.class); + job.setOutputValueClass(Text.class); + job.setNumReduceTasks(1); + JobClient.runJob(job); + } + + /** + * Read mapper class. + */ + public static class ReadMapper extends IOStatMapper { + + public ReadMapper() { + super(); + } + + public Long doIO(Reporter reporter, + String name, + long totalSize + ) throws IOException { + totalSize *= MEGA; + + // create instance of local filesystem + FileSystem localFS = FileSystem.getLocal(fsConfig); + + try { + // native runtime + Runtime runTime = Runtime.getRuntime(); + + // copy the dso and executable from dfs + synchronized (this) { + localFS.delete(HDFS_TEST_DIR, true); + if (!(localFS.mkdirs(HDFS_TEST_DIR))) { + throw new IOException("Failed to create " + HDFS_TEST_DIR + " on local filesystem"); + } + } + + synchronized (this) { + if (!localFS.exists(HDFS_SHLIB)) { + if (!FileUtil.copy(fs, HDFS_SHLIB, localFS, HDFS_SHLIB, false, fsConfig)) { + throw new IOException("Failed to copy " + HDFS_SHLIB + " to local filesystem"); + } + + String chmodCmd = new String(CHMOD + " a+x " + HDFS_SHLIB); + Process process = runTime.exec(chmodCmd); + int exitStatus = process.waitFor(); + if (exitStatus != 0) { + throw new IOException(chmodCmd + ": Failed with exitStatus: " + exitStatus); + } + } + } + + synchronized (this) { + if (!localFS.exists(HDFS_READ)) { + if (!FileUtil.copy(fs, HDFS_READ, localFS, HDFS_READ, false, fsConfig)) { + throw new IOException("Failed to copy " + HDFS_READ + " to local filesystem"); + } + + String chmodCmd = new String(CHMOD + " a+x " + HDFS_READ); + Process process = runTime.exec(chmodCmd); + int exitStatus = process.waitFor(); + + if (exitStatus != 0) { + throw new IOException(chmodCmd + ": Failed with exitStatus: " + exitStatus); + } + } + } + + // exec the C program + Path inFile = new Path(DATA_DIR, name); + String readCmd = new String(HDFS_READ + " " + inFile + " " + totalSize + " " + + bufferSize); + Process process = runTime.exec(readCmd, null, new File(HDFS_TEST_DIR.toString())); + int exitStatus = process.waitFor(); + + if (exitStatus != 0) { + throw new IOException(HDFS_READ + ": Failed with exitStatus: " + exitStatus); + } + } catch (InterruptedException interruptedException) { + reporter.setStatus(interruptedException.toString()); + } finally { + localFS.close(); + } + return new Long(totalSize); + } + } + + private static void readTest(FileSystem fs) throws IOException { + fs.delete(READ_DIR, true); + runIOTest(ReadMapper.class, READ_DIR); + } + + private static void sequentialTest( + FileSystem fs, + int testType, + int fileSize, + int nrFiles + ) throws Exception { + IOStatMapper ioer = null; + if (testType == TEST_TYPE_READ) + ioer = new ReadMapper(); + else if (testType == TEST_TYPE_WRITE) + ioer = new WriteMapper(); + else + return; + for(int i=0; i < nrFiles; i++) + ioer.doIO(Reporter.NULL, + BASE_FILE_NAME+Integer.toString(i), + MEGA*fileSize); + } + + public static void main(String[] args) throws Exception { + int res = ToolRunner.run(new TestDFSIO(), args); + System.exit(res); + } + + private static void analyzeResult( FileSystem fs, + int testType, + long execTime, + String resFileName + ) throws IOException { + Path reduceFile; + if (testType == TEST_TYPE_WRITE) + reduceFile = new Path(WRITE_DIR, "part-00000"); + else + reduceFile = new Path(READ_DIR, "part-00000"); + DataInputStream in; + in = new DataInputStream(fs.open(reduceFile)); + + BufferedReader lines; + lines = new BufferedReader(new InputStreamReader(in)); + long tasks = 0; + long size = 0; + long time = 0; + float rate = 0; + float sqrate = 0; + String line; + while((line = lines.readLine()) != null) { + StringTokenizer tokens = new StringTokenizer(line, " \t\n\r\f%"); + String attr = tokens.nextToken(); + if (attr.endsWith(":tasks")) + tasks = Long.parseLong(tokens.nextToken()); + else if (attr.endsWith(":size")) + size = Long.parseLong(tokens. nextToken()); + else if (attr.endsWith(":time")) + time = Long.parseLong(tokens.nextToken()); + else if (attr.endsWith(":rate")) + rate = Float.parseFloat(tokens.nextToken()); + else if (attr.endsWith(":sqrate")) + sqrate = Float.parseFloat(tokens.nextToken()); + } + + double med = rate / 1000 / tasks; + double stdDev = Math.sqrt(Math.abs(sqrate / 1000 / tasks - med*med)); + String resultLines[] = { + "----- DFSCIOTest ----- : " + ((testType == TEST_TYPE_WRITE) ? "write" : + (testType == TEST_TYPE_READ) ? "read" : + "unknown"), + " Date & time: " + new Date(System.currentTimeMillis()), + " Number of files: " + tasks, + "Total MBytes processed: " + size/MEGA, + " Throughput mb/sec: " + size * 1000.0 / (time * MEGA), + "Average IO rate mb/sec: " + med, + " Std IO rate deviation: " + stdDev, + " Test exec time sec: " + (float)execTime / 1000, + "" }; + + PrintStream res = new PrintStream( + new FileOutputStream( + new File(resFileName), true)); + for(int i = 0; i < resultLines.length; i++) { + LOG.info(resultLines[i]); + res.println(resultLines[i]); + } + } + + private static void cleanup(FileSystem fs) throws Exception { + LOG.info("Cleaning up test files"); + fs.delete(new Path(TEST_ROOT_DIR), true); + fs.delete(HDFS_TEST_DIR, true); + } + + @Override + public int run(String[] args) throws Exception { + int testType = TEST_TYPE_READ; + int bufferSize = DEFAULT_BUFFER_SIZE; + int fileSize = 1; + int nrFiles = 1; + String resFileName = DEFAULT_RES_FILE_NAME; + boolean isSequential = false; + + String version="DFSCIOTest.0.0.1"; + String usage = "Usage: DFSCIOTest -read | -write | -clean [-nrFiles N] [-fileSize MB] [-resFile resultFileName] [-bufferSize Bytes] "; + + System.out.println(version); + if (args.length == 0) { + System.err.println(usage); + System.exit(-1); + } + for (int i = 0; i < args.length; i++) { // parse command line + if (args[i].startsWith("-r")) { + testType = TEST_TYPE_READ; + } else if (args[i].startsWith("-w")) { + testType = TEST_TYPE_WRITE; + } else if (args[i].startsWith("-clean")) { + testType = TEST_TYPE_CLEANUP; + } else if (args[i].startsWith("-seq")) { + isSequential = true; + } else if (args[i].equals("-nrFiles")) { + nrFiles = Integer.parseInt(args[++i]); + } else if (args[i].equals("-fileSize")) { + fileSize = Integer.parseInt(args[++i]); + } else if (args[i].equals("-bufferSize")) { + bufferSize = Integer.parseInt(args[++i]); + } else if (args[i].equals("-resFile")) { + resFileName = args[++i]; + } + } + + LOG.info("nrFiles = " + nrFiles); + LOG.info("fileSize (MB) = " + fileSize); + LOG.info("bufferSize = " + bufferSize); + + try { + fsConfig.setInt("test.io.file.buffer.size", bufferSize); + FileSystem fs = FileSystem.get(fsConfig); + + if (testType != TEST_TYPE_CLEANUP) { + fs.delete(HDFS_TEST_DIR, true); + if (!fs.mkdirs(HDFS_TEST_DIR)) { + throw new IOException("Mkdirs failed to create " + + HDFS_TEST_DIR.toString()); + } + + //Copy the executables over to the remote filesystem + String hadoopHome = System.getenv("HADOOP_HOME"); + fs.copyFromLocalFile(new Path(hadoopHome + "/libhdfs/libhdfs.so." + HDFS_LIB_VERSION), + HDFS_SHLIB); + fs.copyFromLocalFile(new Path(hadoopHome + "/libhdfs/hdfs_read"), HDFS_READ); + fs.copyFromLocalFile(new Path(hadoopHome + "/libhdfs/hdfs_write"), HDFS_WRITE); + } + + if (isSequential) { + long tStart = System.currentTimeMillis(); + sequentialTest(fs, testType, fileSize, nrFiles); + long execTime = System.currentTimeMillis() - tStart; + String resultLine = "Seq Test exec time sec: " + (float)execTime / 1000; + LOG.info(resultLine); + return 0; + } + if (testType == TEST_TYPE_CLEANUP) { + cleanup(fs); + return 0; + } + createControlFile(fs, fileSize, nrFiles); + long tStart = System.currentTimeMillis(); + if (testType == TEST_TYPE_WRITE) + writeTest(fs); + if (testType == TEST_TYPE_READ) + readTest(fs); + long execTime = System.currentTimeMillis() - tStart; + + analyzeResult(fs, testType, execTime, resFileName); + } catch(Exception e) { + System.err.print(e.getLocalizedMessage()); + return -1; + } + return 0; + } +} diff --git a/src/test/org/apache/hadoop/fs/DistributedFSCheck.java b/src/test/org/apache/hadoop/fs/DistributedFSCheck.java new file mode 100644 index 0000000..d97fc96 --- /dev/null +++ b/src/test/org/apache/hadoop/fs/DistributedFSCheck.java @@ -0,0 +1,367 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs; + +import java.io.BufferedReader; +import java.io.DataInputStream; +import java.io.File; +import java.io.FileNotFoundException; +import java.io.FileOutputStream; +import java.io.IOException; +import java.io.InputStreamReader; +import java.io.PrintStream; +import java.util.Date; +import java.util.StringTokenizer; +import java.util.TreeSet; +import java.util.Vector; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.conf.Configured; +import org.apache.hadoop.io.LongWritable; +import org.apache.hadoop.io.SequenceFile; +import org.apache.hadoop.io.Text; +import org.apache.hadoop.io.SequenceFile.CompressionType; +import org.apache.hadoop.mapred.*; +import org.apache.hadoop.util.Tool; +import org.apache.hadoop.util.ToolRunner; +import org.junit.Test; + +/** + * Distributed checkup of the file system consistency. + *

+ * Test file system consistency by reading each block of each file + * of the specified file tree. + * Report corrupted blocks and general file statistics. + *

+ * Optionally displays statistics on read performance. + * + */ +public class DistributedFSCheck extends Configured implements Tool { + // Constants + private static final Log LOG = LogFactory.getLog(DistributedFSCheck.class); + private static final int TEST_TYPE_READ = 0; + private static final int TEST_TYPE_CLEANUP = 2; + private static final int DEFAULT_BUFFER_SIZE = 1000000; + private static final String DEFAULT_RES_FILE_NAME = "DistributedFSCheck_results.log"; + private static final long MEGA = 0x100000; + + private static Configuration fsConfig = new Configuration(); + private static Path TEST_ROOT_DIR = new Path(System.getProperty("test.build.data","/benchmarks/DistributedFSCheck")); + private static Path MAP_INPUT_DIR = new Path(TEST_ROOT_DIR, "map_input"); + private static Path READ_DIR = new Path(TEST_ROOT_DIR, "io_read"); + + private FileSystem fs; + private long nrFiles; + + DistributedFSCheck(Configuration conf) throws Exception { + fsConfig = conf; + this.fs = FileSystem.get(conf); + } + + /** + * Run distributed checkup for the entire files system. + * + * @throws Exception + */ + @Test + public void testFSBlocks() throws Exception { + testFSBlocks("/"); + } + + /** + * Run distributed checkup for the specified directory. + * + * @param rootName root directory name + * @throws Exception + */ + public void testFSBlocks(String rootName) throws Exception { + createInputFile(rootName); + runDistributedFSCheck(); + cleanup(); // clean up after all to restore the system state + } + + private void createInputFile(String rootName) throws IOException { + cleanup(); // clean up if previous run failed + + Path inputFile = new Path(MAP_INPUT_DIR, "in_file"); + SequenceFile.Writer writer = + SequenceFile.createWriter(fs, fsConfig, inputFile, + Text.class, LongWritable.class, CompressionType.NONE); + + try { + nrFiles = 0; + listSubtree(new Path(rootName), writer); + } finally { + writer.close(); + } + LOG.info("Created map input files."); + } + + private void listSubtree(Path rootFile, + SequenceFile.Writer writer + ) throws IOException { + FileStatus rootStatus = fs.getFileStatus(rootFile); + listSubtree(rootStatus, writer); + } + + private void listSubtree(FileStatus rootStatus, + SequenceFile.Writer writer + ) throws IOException { + Path rootFile = rootStatus.getPath(); + if (!rootStatus.isDir()) { + nrFiles++; + // For a regular file generate pairs + long blockSize = fs.getDefaultBlockSize(); + long fileLength = rootStatus.getLen(); + for(long offset = 0; offset < fileLength; offset += blockSize) + writer.append(new Text(rootFile.toString()), new LongWritable(offset)); + return; + } + + FileStatus [] children = null; + try { + children = fs.listStatus(rootFile); + } catch (FileNotFoundException fnfe ){ + throw new IOException("Could not get listing for " + rootFile); + } + + for (int i = 0; i < children.length; i++) + listSubtree(children[i], writer); + } + + /** + * DistributedFSCheck mapper class. + */ + public static class DistributedFSCheckMapper extends IOMapperBase { + + public DistributedFSCheckMapper() { + } + + public Object doIO(Reporter reporter, + String name, + long offset + ) throws IOException { + // open file + FSDataInputStream in = null; + try { + in = fs.open(new Path(name)); + } catch(IOException e) { + return name + "@(missing)"; + } + in.seek(offset); + long actualSize = 0; + try { + long blockSize = fs.getDefaultBlockSize(); + reporter.setStatus("reading " + name + "@" + + offset + "/" + blockSize); + for( int curSize = bufferSize; + curSize == bufferSize && actualSize < blockSize; + actualSize += curSize) { + curSize = in.read(buffer, 0, bufferSize); + } + } catch(IOException e) { + LOG.info("Corrupted block detected in \"" + name + "\" at " + offset); + return name + "@" + offset; + } finally { + in.close(); + } + return new Long(actualSize); + } + + void collectStats(OutputCollector output, + String name, + long execTime, + Object corruptedBlock) throws IOException { + output.collect(new Text(AccumulatingReducer.VALUE_TYPE_LONG + "blocks"), + new Text(String.valueOf(1))); + + if (corruptedBlock.getClass().getName().endsWith("String")) { + output.collect( + new Text(AccumulatingReducer.VALUE_TYPE_STRING + "badBlocks"), + new Text((String)corruptedBlock)); + return; + } + long totalSize = ((Long)corruptedBlock).longValue(); + float ioRateMbSec = (float)totalSize * 1000 / (execTime * 0x100000); + LOG.info("Number of bytes processed = " + totalSize); + LOG.info("Exec time = " + execTime); + LOG.info("IO rate = " + ioRateMbSec); + + output.collect(new Text(AccumulatingReducer.VALUE_TYPE_LONG + "size"), + new Text(String.valueOf(totalSize))); + output.collect(new Text(AccumulatingReducer.VALUE_TYPE_LONG + "time"), + new Text(String.valueOf(execTime))); + output.collect(new Text(AccumulatingReducer.VALUE_TYPE_FLOAT + "rate"), + new Text(String.valueOf(ioRateMbSec*1000))); + } + } + + private void runDistributedFSCheck() throws Exception { + JobConf job = new JobConf(fs.getConf(), DistributedFSCheck.class); + + FileInputFormat.setInputPaths(job, MAP_INPUT_DIR); + job.setInputFormat(SequenceFileInputFormat.class); + + job.setMapperClass(DistributedFSCheckMapper.class); + job.setReducerClass(AccumulatingReducer.class); + + FileOutputFormat.setOutputPath(job, READ_DIR); + job.setOutputKeyClass(Text.class); + job.setOutputValueClass(Text.class); + job.setNumReduceTasks(1); + JobClient.runJob(job); + } + + public static void main(String[] args) throws Exception { + int res = ToolRunner.run(new TestDFSIO(), args); + System.exit(res); + } + + private void analyzeResult(long execTime, + String resFileName, + boolean viewStats + ) throws IOException { + Path reduceFile= new Path(READ_DIR, "part-00000"); + DataInputStream in; + in = new DataInputStream(fs.open(reduceFile)); + + BufferedReader lines; + lines = new BufferedReader(new InputStreamReader(in)); + long blocks = 0; + long size = 0; + long time = 0; + float rate = 0; + StringTokenizer badBlocks = null; + long nrBadBlocks = 0; + String line; + while((line = lines.readLine()) != null) { + StringTokenizer tokens = new StringTokenizer(line, " \t\n\r\f%"); + String attr = tokens.nextToken(); + if (attr.endsWith("blocks")) + blocks = Long.parseLong(tokens.nextToken()); + else if (attr.endsWith("size")) + size = Long.parseLong(tokens.nextToken()); + else if (attr.endsWith("time")) + time = Long.parseLong(tokens.nextToken()); + else if (attr.endsWith("rate")) + rate = Float.parseFloat(tokens.nextToken()); + else if (attr.endsWith("badBlocks")) { + badBlocks = new StringTokenizer(tokens.nextToken(), ";"); + nrBadBlocks = badBlocks.countTokens(); + } + } + + Vector resultLines = new Vector(); + resultLines.add( "----- DistributedFSCheck ----- : "); + resultLines.add( " Date & time: " + new Date(System.currentTimeMillis())); + resultLines.add( " Total number of blocks: " + blocks); + resultLines.add( " Total number of files: " + nrFiles); + resultLines.add( "Number of corrupted blocks: " + nrBadBlocks); + + int nrBadFilesPos = resultLines.size(); + TreeSet badFiles = new TreeSet(); + long nrBadFiles = 0; + if (nrBadBlocks > 0) { + resultLines.add(""); + resultLines.add("----- Corrupted Blocks (file@offset) ----- : "); + while(badBlocks.hasMoreTokens()) { + String curBlock = badBlocks.nextToken(); + resultLines.add(curBlock); + badFiles.add(curBlock.substring(0, curBlock.indexOf('@'))); + } + nrBadFiles = badFiles.size(); + } + + resultLines.insertElementAt(" Number of corrupted files: " + nrBadFiles, nrBadFilesPos); + + if (viewStats) { + resultLines.add(""); + resultLines.add("----- Performance ----- : "); + resultLines.add(" Total MBytes read: " + size/MEGA); + resultLines.add(" Throughput mb/sec: " + (float)size * 1000.0 / (time * MEGA)); + resultLines.add(" Average IO rate mb/sec: " + rate / 1000 / blocks); + resultLines.add(" Test exec time sec: " + (float)execTime / 1000); + } + + PrintStream res = new PrintStream( + new FileOutputStream( + new File(resFileName), true)); + for(int i = 0; i < resultLines.size(); i++) { + String cur = resultLines.get(i); + LOG.info(cur); + res.println(cur); + } + } + + private void cleanup() throws IOException { + LOG.info("Cleaning up test files"); + fs.delete(TEST_ROOT_DIR, true); + } + + @Override + public int run(String[] args) throws Exception { + int testType = TEST_TYPE_READ; + int bufferSize = DEFAULT_BUFFER_SIZE; + String resFileName = DEFAULT_RES_FILE_NAME; + String rootName = "/"; + boolean viewStats = false; + + String usage = "Usage: DistributedFSCheck [-root name] [-clean] [-resFile resultFileName] [-bufferSize Bytes] [-stats] "; + + if (args.length == 1 && args[0].startsWith("-h")) { + System.err.println(usage); + return -1; + } + for(int i = 0; i < args.length; i++) { // parse command line + if (args[i].equals("-root")) { + rootName = args[++i]; + } else if (args[i].startsWith("-clean")) { + testType = TEST_TYPE_CLEANUP; + } else if (args[i].equals("-bufferSize")) { + bufferSize = Integer.parseInt(args[++i]); + } else if (args[i].equals("-resFile")) { + resFileName = args[++i]; + } else if (args[i].startsWith("-stat")) { + viewStats = true; + } + } + + LOG.info("root = " + rootName); + LOG.info("bufferSize = " + bufferSize); + + Configuration conf = new Configuration(); + conf.setInt("test.io.file.buffer.size", bufferSize); + DistributedFSCheck test = new DistributedFSCheck(conf); + + if (testType == TEST_TYPE_CLEANUP) { + test.cleanup(); + return 0; + } + test.createInputFile(rootName); + long tStart = System.currentTimeMillis(); + test.runDistributedFSCheck(); + long execTime = System.currentTimeMillis() - tStart; + + test.analyzeResult(execTime, resFileName, viewStats); + // test.cleanup(); // clean up after all to restore the system state + return 0; + } +} diff --git a/src/test/org/apache/hadoop/fs/FileSystemContractBaseTest.java b/src/test/org/apache/hadoop/fs/FileSystemContractBaseTest.java new file mode 100644 index 0000000..0076b02 --- /dev/null +++ b/src/test/org/apache/hadoop/fs/FileSystemContractBaseTest.java @@ -0,0 +1,462 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs; + +import java.io.FileNotFoundException; +import java.io.IOException; + +import junit.framework.TestCase; + +import org.apache.hadoop.fs.FSDataInputStream; +import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; + +/** + *

+ * A collection of tests for the contract of the {@link FileSystem}. + * This test should be used for general-purpose implementations of + * {@link FileSystem}, that is, implementations that provide implementations + * of all of the functionality of {@link FileSystem}. + *

+ *

+ * To test a given {@link FileSystem} implementation create a subclass of this + * test and override {@link #setUp()} to initialize the fs + * {@link FileSystem} instance variable. + *

+ */ +public abstract class FileSystemContractBaseTest extends TestCase { + + protected FileSystem fs; + private byte[] data = new byte[getBlockSize() * 2]; // two blocks of data + { + for (int i = 0; i < data.length; i++) { + data[i] = (byte) (i % 10); + } + } + + @Override + protected void tearDown() throws Exception { + fs.delete(path("/test"), true); + } + + protected int getBlockSize() { + return 1024; + } + + protected String getDefaultWorkingDirectory() { + return "/user/" + System.getProperty("user.name"); + } + + protected boolean renameSupported() { + return true; + } + + public void testWorkingDirectory() throws Exception { + + Path workDir = path(getDefaultWorkingDirectory()); + assertEquals(workDir, fs.getWorkingDirectory()); + + fs.setWorkingDirectory(path(".")); + assertEquals(workDir, fs.getWorkingDirectory()); + + fs.setWorkingDirectory(path("..")); + assertEquals(workDir.getParent(), fs.getWorkingDirectory()); + + Path relativeDir = path("hadoop"); + fs.setWorkingDirectory(relativeDir); + assertEquals(relativeDir, fs.getWorkingDirectory()); + + Path absoluteDir = path("/test/hadoop"); + fs.setWorkingDirectory(absoluteDir); + assertEquals(absoluteDir, fs.getWorkingDirectory()); + + } + + public void testMkdirs() throws Exception { + Path testDir = path("/test/hadoop"); + assertFalse(fs.exists(testDir)); + assertFalse(fs.isFile(testDir)); + + assertTrue(fs.mkdirs(testDir)); + + assertTrue(fs.exists(testDir)); + assertFalse(fs.isFile(testDir)); + + assertTrue(fs.mkdirs(testDir)); + + assertTrue(fs.exists(testDir)); + assertFalse(fs.isFile(testDir)); + + Path parentDir = testDir.getParent(); + assertTrue(fs.exists(parentDir)); + assertFalse(fs.isFile(parentDir)); + + Path grandparentDir = parentDir.getParent(); + assertTrue(fs.exists(grandparentDir)); + assertFalse(fs.isFile(grandparentDir)); + + } + + public void testMkdirsFailsForSubdirectoryOfExistingFile() throws Exception { + Path testDir = path("/test/hadoop"); + assertFalse(fs.exists(testDir)); + assertTrue(fs.mkdirs(testDir)); + assertTrue(fs.exists(testDir)); + + createFile(path("/test/hadoop/file")); + + Path testSubDir = path("/test/hadoop/file/subdir"); + try { + fs.mkdirs(testSubDir); + fail("Should throw IOException."); + } catch (IOException e) { + // expected + } + assertFalse(fs.exists(testSubDir)); + + Path testDeepSubDir = path("/test/hadoop/file/deep/sub/dir"); + try { + fs.mkdirs(testDeepSubDir); + fail("Should throw IOException."); + } catch (IOException e) { + // expected + } + assertFalse(fs.exists(testDeepSubDir)); + + } + + public void testGetFileStatusThrowsExceptionForNonExistentFile() + throws Exception { + try { + fs.getFileStatus(path("/test/hadoop/file")); + fail("Should throw FileNotFoundException"); + } catch (FileNotFoundException e) { + // expected + } + } + + public void testListStatusReturnsNullForNonExistentFile() throws Exception { + assertNull(fs.listStatus(path("/test/hadoop/file"))); + } + + public void testListStatus() throws Exception { + Path[] testDirs = { path("/test/hadoop/a"), + path("/test/hadoop/b"), + path("/test/hadoop/c/1"), }; + assertFalse(fs.exists(testDirs[0])); + + for (Path path : testDirs) { + assertTrue(fs.mkdirs(path)); + } + + FileStatus[] paths = fs.listStatus(path("/test")); + assertEquals(1, paths.length); + assertEquals(path("/test/hadoop"), paths[0].getPath()); + + paths = fs.listStatus(path("/test/hadoop")); + assertEquals(3, paths.length); + assertEquals(path("/test/hadoop/a"), paths[0].getPath()); + assertEquals(path("/test/hadoop/b"), paths[1].getPath()); + assertEquals(path("/test/hadoop/c"), paths[2].getPath()); + + paths = fs.listStatus(path("/test/hadoop/a")); + assertEquals(0, paths.length); + } + + public void testWriteReadAndDeleteEmptyFile() throws Exception { + writeReadAndDelete(0); + } + + public void testWriteReadAndDeleteHalfABlock() throws Exception { + writeReadAndDelete(getBlockSize() / 2); + } + + public void testWriteReadAndDeleteOneBlock() throws Exception { + writeReadAndDelete(getBlockSize()); + } + + public void testWriteReadAndDeleteOneAndAHalfBlocks() throws Exception { + writeReadAndDelete(getBlockSize() + (getBlockSize() / 2)); + } + + public void testWriteReadAndDeleteTwoBlocks() throws Exception { + writeReadAndDelete(getBlockSize() * 2); + } + + private void writeReadAndDelete(int len) throws IOException { + Path path = path("/test/hadoop/file"); + + fs.mkdirs(path.getParent()); + + FSDataOutputStream out = fs.create(path, false, + fs.getConf().getInt("io.file.buffer.size", 4096), + (short) 1, getBlockSize()); + out.write(data, 0, len); + out.close(); + + assertTrue("Exists", fs.exists(path)); + assertEquals("Length", len, fs.getFileStatus(path).getLen()); + + FSDataInputStream in = fs.open(path); + byte[] buf = new byte[len]; + in.readFully(0, buf); + in.close(); + + assertEquals(len, buf.length); + for (int i = 0; i < buf.length; i++) { + assertEquals("Position " + i, data[i], buf[i]); + } + + assertTrue("Deleted", fs.delete(path, false)); + + assertFalse("No longer exists", fs.exists(path)); + + } + + public void testOverwrite() throws IOException { + Path path = path("/test/hadoop/file"); + + fs.mkdirs(path.getParent()); + + createFile(path); + + assertTrue("Exists", fs.exists(path)); + assertEquals("Length", data.length, fs.getFileStatus(path).getLen()); + + try { + fs.create(path, false); + fail("Should throw IOException."); + } catch (IOException e) { + // Expected + } + + FSDataOutputStream out = fs.create(path, true); + out.write(data, 0, data.length); + out.close(); + + assertTrue("Exists", fs.exists(path)); + assertEquals("Length", data.length, fs.getFileStatus(path).getLen()); + + } + + public void testWriteInNonExistentDirectory() throws IOException { + Path path = path("/test/hadoop/file"); + assertFalse("Parent doesn't exist", fs.exists(path.getParent())); + createFile(path); + + assertTrue("Exists", fs.exists(path)); + assertEquals("Length", data.length, fs.getFileStatus(path).getLen()); + assertTrue("Parent exists", fs.exists(path.getParent())); + } + + public void testDeleteNonExistentFile() throws IOException { + Path path = path("/test/hadoop/file"); + assertFalse("Doesn't exist", fs.exists(path)); + assertFalse("No deletion", fs.delete(path, true)); + } + + public void testDeleteRecursively() throws IOException { + Path dir = path("/tmp/test/hadoop"); + Path file = path("/tmp/test/hadoop/file"); + Path subdir = path("/tmp/test/hadoop/subdir"); + + createFile(file); + assertTrue("Created subdir", fs.mkdirs(subdir)); + + assertTrue("File exists", fs.exists(file)); + assertTrue("Dir exists", fs.exists(dir)); + assertTrue("Subdir exists", fs.exists(subdir)); + + try { + fs.delete(dir, false); + fail("Should throw IOException."); + } catch (IOException e) { + // expected + } + assertTrue("File still exists", fs.exists(file)); + assertTrue("Dir still exists", fs.exists(dir)); + assertTrue("Subdir still exists", fs.exists(subdir)); + + assertTrue("Deleted", fs.delete(dir, true)); + assertFalse("File doesn't exist", fs.exists(file)); + assertFalse("Dir doesn't exist", fs.exists(dir)); + assertFalse("Subdir doesn't exist", fs.exists(subdir)); + } + + public void testDeleteEmptyDirectory() throws IOException { + Path dir = path("/tmp/test/hadoop"); + assertTrue(fs.mkdirs(dir)); + assertTrue("Dir exists", fs.exists(dir)); + assertTrue("Deleted", fs.delete(dir, false)); + assertFalse("Dir doesn't exist", fs.exists(dir)); + } + + public void testRenameNonExistentPath() throws Exception { + if (!renameSupported()) return; + + Path src = path("/test/hadoop/path"); + Path dst = path("/test/new/newpath"); + rename(src, dst, false, false, false); + } + + public void testRenameFileMoveToNonExistentDirectory() throws Exception { + if (!renameSupported()) return; + + Path src = path("/test/hadoop/file"); + createFile(src); + Path dst = path("/test/new/newfile"); + rename(src, dst, false, true, false); + } + + public void testRenameFileMoveToExistingDirectory() throws Exception { + if (!renameSupported()) return; + + Path src = path("/test/hadoop/file"); + createFile(src); + Path dst = path("/test/new/newfile"); + fs.mkdirs(dst.getParent()); + rename(src, dst, true, false, true); + } + + public void testRenameFileAsExistingFile() throws Exception { + if (!renameSupported()) return; + + Path src = path("/test/hadoop/file"); + createFile(src); + Path dst = path("/test/new/newfile"); + createFile(dst); + rename(src, dst, false, true, true); + } + + public void testRenameFileAsExistingDirectory() throws Exception { + if (!renameSupported()) return; + + Path src = path("/test/hadoop/file"); + createFile(src); + Path dst = path("/test/new/newdir"); + fs.mkdirs(dst); + rename(src, dst, true, false, true); + assertTrue("Destination changed", + fs.exists(path("/test/new/newdir/file"))); + } + + public void testRenameDirectoryMoveToNonExistentDirectory() + throws Exception { + if (!renameSupported()) return; + + Path src = path("/test/hadoop/dir"); + fs.mkdirs(src); + Path dst = path("/test/new/newdir"); + rename(src, dst, false, true, false); + } + + public void testRenameDirectoryMoveToExistingDirectory() throws Exception { + if (!renameSupported()) return; + + Path src = path("/test/hadoop/dir"); + fs.mkdirs(src); + createFile(path("/test/hadoop/dir/file1")); + createFile(path("/test/hadoop/dir/subdir/file2")); + + Path dst = path("/test/new/newdir"); + fs.mkdirs(dst.getParent()); + rename(src, dst, true, false, true); + + assertFalse("Nested file1 exists", + fs.exists(path("/test/hadoop/dir/file1"))); + assertFalse("Nested file2 exists", + fs.exists(path("/test/hadoop/dir/subdir/file2"))); + assertTrue("Renamed nested file1 exists", + fs.exists(path("/test/new/newdir/file1"))); + assertTrue("Renamed nested exists", + fs.exists(path("/test/new/newdir/subdir/file2"))); + } + + public void testRenameDirectoryAsExistingFile() throws Exception { + if (!renameSupported()) return; + + Path src = path("/test/hadoop/dir"); + fs.mkdirs(src); + Path dst = path("/test/new/newfile"); + createFile(dst); + rename(src, dst, false, true, true); + } + + public void testRenameDirectoryAsExistingDirectory() throws Exception { + if (!renameSupported()) return; + + Path src = path("/test/hadoop/dir"); + fs.mkdirs(src); + createFile(path("/test/hadoop/dir/file1")); + createFile(path("/test/hadoop/dir/subdir/file2")); + + Path dst = path("/test/new/newdir"); + fs.mkdirs(dst); + rename(src, dst, true, false, true); + assertTrue("Destination changed", + fs.exists(path("/test/new/newdir/dir"))); + assertFalse("Nested file1 exists", + fs.exists(path("/test/hadoop/dir/file1"))); + assertFalse("Nested file2 exists", + fs.exists(path("/test/hadoop/dir/subdir/file2"))); + assertTrue("Renamed nested file1 exists", + fs.exists(path("/test/new/newdir/dir/file1"))); + assertTrue("Renamed nested exists", + fs.exists(path("/test/new/newdir/dir/subdir/file2"))); + } + + public void testInputStreamClosedTwice() throws IOException { + //HADOOP-4760 according to Closeable#close() closing already-closed + //streams should have no effect. + Path src = path("/test/hadoop/file"); + createFile(src); + FSDataInputStream in = fs.open(src); + in.close(); + in.close(); + } + + public void testOutputStreamClosedTwice() throws IOException { + //HADOOP-4760 according to Closeable#close() closing already-closed + //streams should have no effect. + Path src = path("/test/hadoop/file"); + FSDataOutputStream out = fs.create(src); + out.writeChar('H'); //write some data + out.close(); + out.close(); + } + + protected Path path(String pathString) { + return new Path(pathString).makeQualified(fs); + } + + protected void createFile(Path path) throws IOException { + FSDataOutputStream out = fs.create(path); + out.write(data, 0, data.length); + out.close(); + } + + private void rename(Path src, Path dst, boolean renameSucceeded, + boolean srcExists, boolean dstExists) throws IOException { + assertEquals("Rename result", renameSucceeded, fs.rename(src, dst)); + assertEquals("Source exists", srcExists, fs.exists(src)); + assertEquals("Destination exists", dstExists, fs.exists(dst)); + } +} diff --git a/src/test/org/apache/hadoop/fs/IOMapperBase.java b/src/test/org/apache/hadoop/fs/IOMapperBase.java new file mode 100644 index 0000000..69741f8 --- /dev/null +++ b/src/test/org/apache/hadoop/fs/IOMapperBase.java @@ -0,0 +1,125 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.fs; + +import java.io.IOException; +import java.net.InetAddress; +import org.apache.hadoop.conf.Configured; +import org.apache.hadoop.io.LongWritable; +import org.apache.hadoop.io.Text; +import org.apache.hadoop.mapred.*; + +/** + * Base mapper class for IO operations. + *

+ * Two abstract method {@link #doIO(Reporter, String, long)} and + * {@link #collectStats(OutputCollector,String,long,Object)} should be + * overloaded in derived classes to define the IO operation and the + * statistics data to be collected by subsequent reducers. + * + */ +@SuppressWarnings("deprecation") +public abstract class IOMapperBase extends Configured + implements Mapper { + + protected byte[] buffer; + protected int bufferSize; + protected FileSystem fs; + protected String hostName; + + public IOMapperBase() { + } + + public void configure(JobConf conf) { + setConf(conf); + try { + fs = FileSystem.get(conf); + } catch (Exception e) { + throw new RuntimeException("Cannot create file system.", e); + } + bufferSize = conf.getInt("test.io.file.buffer.size", 4096); + buffer = new byte[bufferSize]; + try { + hostName = InetAddress.getLocalHost().getHostName(); + } catch(Exception e) { + hostName = "localhost"; + } + } + + public void close() throws IOException { + } + + /** + * Perform io operation, usually read or write. + * + * @param reporter + * @param name file name + * @param value offset within the file + * @return object that is passed as a parameter to + * {@link #collectStats(OutputCollector,String,long,Object)} + * @throws IOException + */ + abstract T doIO(Reporter reporter, + String name, + long value) throws IOException; + + /** + * Collect stat data to be combined by a subsequent reducer. + * + * @param output + * @param name file name + * @param execTime IO execution time + * @param doIOReturnValue value returned by {@link #doIO(Reporter,String,long)} + * @throws IOException + */ + abstract void collectStats(OutputCollector output, + String name, + long execTime, + T doIOReturnValue) throws IOException; + + /** + * Map file name and offset into statistical data. + *

+ * The map task is to get the + * key, which contains the file name, and the + * value, which is the offset within the file. + * + * The parameters are passed to the abstract method + * {@link #doIO(Reporter,String,long)}, which performs the io operation, + * usually read or write data, and then + * {@link #collectStats(OutputCollector,String,long,Object)} + * is called to prepare stat data for a subsequent reducer. + */ + public void map(Text key, + LongWritable value, + OutputCollector output, + Reporter reporter) throws IOException { + String name = key.toString(); + long longValue = value.get(); + + reporter.setStatus("starting " + name + " ::host = " + hostName); + + long tStart = System.currentTimeMillis(); + T statValue = doIO(reporter, name, longValue); + long tEnd = System.currentTimeMillis(); + long execTime = tEnd - tStart; + collectStats(output, name, execTime, statValue); + + reporter.setStatus("finished " + name + " ::host = " + hostName); + } +} diff --git a/src/test/org/apache/hadoop/fs/TestChecksumFileSystem.java b/src/test/org/apache/hadoop/fs/TestChecksumFileSystem.java new file mode 100644 index 0000000..1ed3bd5 --- /dev/null +++ b/src/test/org/apache/hadoop/fs/TestChecksumFileSystem.java @@ -0,0 +1,108 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs; + +import java.net.URI; + +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.InMemoryFileSystem; +import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.conf.Configuration; +import junit.framework.TestCase; + +public class TestChecksumFileSystem extends TestCase { + public void testgetChecksumLength() throws Exception { + assertEquals(8, ChecksumFileSystem.getChecksumLength(0L, 512)); + assertEquals(12, ChecksumFileSystem.getChecksumLength(1L, 512)); + assertEquals(12, ChecksumFileSystem.getChecksumLength(512L, 512)); + assertEquals(16, ChecksumFileSystem.getChecksumLength(513L, 512)); + assertEquals(16, ChecksumFileSystem.getChecksumLength(1023L, 512)); + assertEquals(16, ChecksumFileSystem.getChecksumLength(1024L, 512)); + assertEquals(408, ChecksumFileSystem.getChecksumLength(100L, 1)); + assertEquals(4000000000008L, + ChecksumFileSystem.getChecksumLength(10000000000000L, 10)); + } + + // cehck that the checksum file is deleted for Checksum file system. + public void testDeletionOfCheckSum() throws Exception { + Configuration conf = new Configuration(); + URI uri = URI.create("ramfs://mapoutput" + "_tmp"); + InMemoryFileSystem inMemFs = (InMemoryFileSystem)FileSystem.get(uri, conf); + Path testPath = new Path("/file_1"); + inMemFs.reserveSpaceWithCheckSum(testPath, 1024); + FSDataOutputStream fout = inMemFs.create(testPath); + fout.write("testing".getBytes()); + fout.close(); + assertTrue("checksum exists", inMemFs.exists(inMemFs.getChecksumFile(testPath))); + inMemFs.delete(testPath, true); + assertTrue("checksum deleted", !inMemFs.exists(inMemFs.getChecksumFile(testPath))); + // check for directories getting deleted. + testPath = new Path("/tesdir/file_1"); + inMemFs.reserveSpaceWithCheckSum(testPath, 1024); + fout = inMemFs.create(testPath); + fout.write("testing".getBytes()); + fout.close(); + testPath = new Path("/testdir/file_2"); + inMemFs.reserveSpaceWithCheckSum(testPath, 1024); + fout = inMemFs.create(testPath); + fout.write("testing".getBytes()); + fout.close(); + inMemFs.delete(testPath, true); + assertTrue("nothing in the namespace", inMemFs.listStatus(new Path("/")).length == 0); + } + + public void testVerifyChecksum() throws Exception { + String TEST_ROOT_DIR + = System.getProperty("test.build.data","build/test/data/work-dir/localfs"); + + Configuration conf = new Configuration(); + LocalFileSystem localFs = FileSystem.getLocal(conf); + Path testPath = new Path(TEST_ROOT_DIR, "testPath"); + Path testPath11 = new Path(TEST_ROOT_DIR, "testPath11"); + FSDataOutputStream fout = localFs.create(testPath); + fout.write("testing".getBytes()); + fout.close(); + + fout = localFs.create(testPath11); + fout.write("testing you".getBytes()); + fout.close(); + + localFs.delete(localFs.getChecksumFile(testPath), true); + assertTrue("checksum deleted", !localFs.exists(localFs.getChecksumFile(testPath))); + + //copying the wrong checksum file + FileUtil.copy(localFs, localFs.getChecksumFile(testPath11), localFs, + localFs.getChecksumFile(testPath),false,true,conf); + assertTrue("checksum exists", localFs.exists(localFs.getChecksumFile(testPath))); + + boolean errorRead = false; + try { + TestLocalFileSystem.readFile(localFs, testPath); + }catch(ChecksumException ie) { + errorRead = true; + } + assertTrue("error reading", errorRead); + + //now setting verify false, the read should succeed + localFs.setVerifyChecksum(false); + String str = TestLocalFileSystem.readFile(localFs, testPath); + assertTrue("read", "testing".equals(str)); + + } +} diff --git a/src/test/org/apache/hadoop/fs/TestCopyFiles.java b/src/test/org/apache/hadoop/fs/TestCopyFiles.java new file mode 100644 index 0000000..3b29900 --- /dev/null +++ b/src/test/org/apache/hadoop/fs/TestCopyFiles.java @@ -0,0 +1,1042 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs; + +import java.io.ByteArrayOutputStream; +import java.io.FileNotFoundException; +import java.io.IOException; +import java.io.PrintStream; +import java.net.URI; +import java.util.ArrayList; +import java.util.List; +import java.util.Random; +import java.util.StringTokenizer; + +import junit.framework.TestCase; + +import org.apache.commons.logging.LogFactory; +import org.apache.commons.logging.impl.Log4JLogger; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.hdfs.server.datanode.DataNode; +import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; +import org.apache.hadoop.mapred.MiniMRCluster; +import org.apache.hadoop.security.UnixUserGroupInformation; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.tools.DistCp; +import org.apache.hadoop.util.ToolRunner; +import org.apache.log4j.Level; + + +/** + * A JUnit test for copying files recursively. + */ +public class TestCopyFiles extends TestCase { + { + ((Log4JLogger)LogFactory.getLog("org.apache.hadoop.hdfs.StateChange") + ).getLogger().setLevel(Level.OFF); + ((Log4JLogger)DataNode.LOG).getLogger().setLevel(Level.OFF); + ((Log4JLogger)FSNamesystem.LOG).getLogger().setLevel(Level.OFF); + ((Log4JLogger)DistCp.LOG).getLogger().setLevel(Level.ALL); + } + + static final URI LOCAL_FS = URI.create("file:///"); + + private static final Random RAN = new Random(); + private static final int NFILES = 20; + private static String TEST_ROOT_DIR = + new Path(System.getProperty("test.build.data","/tmp")) + .toString().replace(' ', '+'); + + /** class MyFile contains enough information to recreate the contents of + * a single file. + */ + private static class MyFile { + private static Random gen = new Random(); + private static final int MAX_LEVELS = 3; + private static final int MAX_SIZE = 8*1024; + private static String[] dirNames = { + "zero", "one", "two", "three", "four", "five", "six", "seven", "eight", "nine" + }; + private final String name; + private int size = 0; + private long seed = 0L; + + MyFile() { + this(gen.nextInt(MAX_LEVELS)); + } + MyFile(int nLevels) { + String xname = ""; + if (nLevels != 0) { + int[] levels = new int[nLevels]; + for (int idx = 0; idx < nLevels; idx++) { + levels[idx] = gen.nextInt(10); + } + StringBuffer sb = new StringBuffer(); + for (int idx = 0; idx < nLevels; idx++) { + sb.append(dirNames[levels[idx]]); + sb.append("/"); + } + xname = sb.toString(); + } + long fidx = gen.nextLong() & Long.MAX_VALUE; + name = xname + Long.toString(fidx); + reset(); + } + void reset() { + final int oldsize = size; + do { size = gen.nextInt(MAX_SIZE); } while (oldsize == size); + final long oldseed = seed; + do { seed = gen.nextLong() & Long.MAX_VALUE; } while (oldseed == seed); + } + String getName() { return name; } + int getSize() { return size; } + long getSeed() { return seed; } + } + + private static MyFile[] createFiles(URI fsname, String topdir) + throws IOException { + return createFiles(FileSystem.get(fsname, new Configuration()), topdir); + } + + /** create NFILES with random names and directory hierarchies + * with random (but reproducible) data in them. + */ + private static MyFile[] createFiles(FileSystem fs, String topdir) + throws IOException { + Path root = new Path(topdir); + MyFile[] files = new MyFile[NFILES]; + for (int i = 0; i < NFILES; i++) { + files[i] = createFile(root, fs); + } + return files; + } + + static MyFile createFile(Path root, FileSystem fs, int levels) + throws IOException { + MyFile f = levels < 0 ? new MyFile() : new MyFile(levels); + Path p = new Path(root, f.getName()); + FSDataOutputStream out = fs.create(p); + byte[] toWrite = new byte[f.getSize()]; + new Random(f.getSeed()).nextBytes(toWrite); + out.write(toWrite); + out.close(); + FileSystem.LOG.info("created: " + p + ", size=" + f.getSize()); + return f; + } + + static MyFile createFile(Path root, FileSystem fs) throws IOException { + return createFile(root, fs, -1); + } + + private static boolean checkFiles(FileSystem fs, String topdir, MyFile[] files + ) throws IOException { + return checkFiles(fs, topdir, files, false); + } + + private static boolean checkFiles(FileSystem fs, String topdir, MyFile[] files, + boolean existingOnly) throws IOException { + Path root = new Path(topdir); + + for (int idx = 0; idx < files.length; idx++) { + Path fPath = new Path(root, files[idx].getName()); + try { + fs.getFileStatus(fPath); + FSDataInputStream in = fs.open(fPath); + byte[] toRead = new byte[files[idx].getSize()]; + byte[] toCompare = new byte[files[idx].getSize()]; + Random rb = new Random(files[idx].getSeed()); + rb.nextBytes(toCompare); + assertEquals("Cannnot read file.", toRead.length, in.read(toRead)); + in.close(); + for (int i = 0; i < toRead.length; i++) { + if (toRead[i] != toCompare[i]) { + return false; + } + } + toRead = null; + toCompare = null; + } + catch(FileNotFoundException fnfe) { + if (!existingOnly) { + throw fnfe; + } + } + } + + return true; + } + + private static void updateFiles(FileSystem fs, String topdir, MyFile[] files, + int nupdate) throws IOException { + assert nupdate <= NFILES; + + Path root = new Path(topdir); + + for (int idx = 0; idx < nupdate; ++idx) { + Path fPath = new Path(root, files[idx].getName()); + // overwrite file + assertTrue(fPath.toString() + " does not exist", fs.exists(fPath)); + FSDataOutputStream out = fs.create(fPath); + files[idx].reset(); + byte[] toWrite = new byte[files[idx].getSize()]; + Random rb = new Random(files[idx].getSeed()); + rb.nextBytes(toWrite); + out.write(toWrite); + out.close(); + } + } + + private static FileStatus[] getFileStatus(FileSystem fs, + String topdir, MyFile[] files) throws IOException { + return getFileStatus(fs, topdir, files, false); + } + private static FileStatus[] getFileStatus(FileSystem fs, + String topdir, MyFile[] files, boolean existingOnly) throws IOException { + Path root = new Path(topdir); + List statuses = new ArrayList(); + for (int idx = 0; idx < NFILES; ++idx) { + try { + statuses.add(fs.getFileStatus(new Path(root, files[idx].getName()))); + } catch(FileNotFoundException fnfe) { + if (!existingOnly) { + throw fnfe; + } + } + } + return statuses.toArray(new FileStatus[statuses.size()]); + } + + private static boolean checkUpdate(FileSystem fs, FileStatus[] old, + String topdir, MyFile[] upd, final int nupdate) throws IOException { + Path root = new Path(topdir); + + // overwrote updated files + for (int idx = 0; idx < nupdate; ++idx) { + final FileStatus stat = + fs.getFileStatus(new Path(root, upd[idx].getName())); + if (stat.getModificationTime() <= old[idx].getModificationTime()) { + return false; + } + } + // did not overwrite files not updated + for (int idx = nupdate; idx < NFILES; ++idx) { + final FileStatus stat = + fs.getFileStatus(new Path(root, upd[idx].getName())); + if (stat.getModificationTime() != old[idx].getModificationTime()) { + return false; + } + } + return true; + } + + /** delete directory and everything underneath it.*/ + private static void deldir(FileSystem fs, String topdir) throws IOException { + fs.delete(new Path(topdir), true); + } + + /** copy files from local file system to local file system */ + public void testCopyFromLocalToLocal() throws Exception { + Configuration conf = new Configuration(); + FileSystem localfs = FileSystem.get(LOCAL_FS, conf); + MyFile[] files = createFiles(LOCAL_FS, TEST_ROOT_DIR+"/srcdat"); + ToolRunner.run(new DistCp(new Configuration()), + new String[] {"file:///"+TEST_ROOT_DIR+"/srcdat", + "file:///"+TEST_ROOT_DIR+"/destdat"}); + assertTrue("Source and destination directories do not match.", + checkFiles(localfs, TEST_ROOT_DIR+"/destdat", files)); + deldir(localfs, TEST_ROOT_DIR+"/destdat"); + deldir(localfs, TEST_ROOT_DIR+"/srcdat"); + } + + /** copy files from dfs file system to dfs file system */ + public void testCopyFromDfsToDfs() throws Exception { + String namenode = null; + MiniDFSCluster cluster = null; + try { + Configuration conf = new Configuration(); + cluster = new MiniDFSCluster(conf, 2, true, null); + final FileSystem hdfs = cluster.getFileSystem(); + namenode = FileSystem.getDefaultUri(conf).toString(); + if (namenode.startsWith("hdfs://")) { + MyFile[] files = createFiles(URI.create(namenode), "/srcdat"); + ToolRunner.run(new DistCp(conf), new String[] { + "-log", + namenode+"/logs", + namenode+"/srcdat", + namenode+"/destdat"}); + assertTrue("Source and destination directories do not match.", + checkFiles(hdfs, "/destdat", files)); + FileSystem fs = FileSystem.get(URI.create(namenode+"/logs"), conf); + assertTrue("Log directory does not exist.", + fs.exists(new Path(namenode+"/logs"))); + deldir(hdfs, "/destdat"); + deldir(hdfs, "/srcdat"); + deldir(hdfs, "/logs"); + } + } finally { + if (cluster != null) { cluster.shutdown(); } + } + } + + /** copy files from local file system to dfs file system */ + public void testCopyFromLocalToDfs() throws Exception { + MiniDFSCluster cluster = null; + try { + Configuration conf = new Configuration(); + cluster = new MiniDFSCluster(conf, 1, true, null); + final FileSystem hdfs = cluster.getFileSystem(); + final String namenode = hdfs.getUri().toString(); + if (namenode.startsWith("hdfs://")) { + MyFile[] files = createFiles(LOCAL_FS, TEST_ROOT_DIR+"/srcdat"); + ToolRunner.run(new DistCp(conf), new String[] { + "-log", + namenode+"/logs", + "file:///"+TEST_ROOT_DIR+"/srcdat", + namenode+"/destdat"}); + assertTrue("Source and destination directories do not match.", + checkFiles(cluster.getFileSystem(), "/destdat", files)); + assertTrue("Log directory does not exist.", + hdfs.exists(new Path(namenode+"/logs"))); + deldir(hdfs, "/destdat"); + deldir(hdfs, "/logs"); + deldir(FileSystem.get(LOCAL_FS, conf), TEST_ROOT_DIR+"/srcdat"); + } + } finally { + if (cluster != null) { cluster.shutdown(); } + } + } + + /** copy empty directory on dfs file system */ + public void testEmptyDir() throws Exception { + String namenode = null; + MiniDFSCluster cluster = null; + try { + Configuration conf = new Configuration(); + cluster = new MiniDFSCluster(conf, 2, true, null); + final FileSystem hdfs = cluster.getFileSystem(); + namenode = FileSystem.getDefaultUri(conf).toString(); + if (namenode.startsWith("hdfs://")) { + + FileSystem fs = FileSystem.get(URI.create(namenode), new Configuration()); + fs.mkdirs(new Path("/empty")); + + ToolRunner.run(new DistCp(conf), new String[] { + "-log", + namenode+"/logs", + namenode+"/empty", + namenode+"/dest"}); + fs = FileSystem.get(URI.create(namenode+"/destdat"), conf); + assertTrue("Destination directory does not exist.", + fs.exists(new Path(namenode+"/dest"))); + deldir(hdfs, "/dest"); + deldir(hdfs, "/empty"); + deldir(hdfs, "/logs"); + } + } finally { + if (cluster != null) { cluster.shutdown(); } + } + } + + /** copy files from dfs file system to local file system */ + public void testCopyFromDfsToLocal() throws Exception { + MiniDFSCluster cluster = null; + try { + Configuration conf = new Configuration(); + final FileSystem localfs = FileSystem.get(LOCAL_FS, conf); + cluster = new MiniDFSCluster(conf, 1, true, null); + final FileSystem hdfs = cluster.getFileSystem(); + final String namenode = FileSystem.getDefaultUri(conf).toString(); + if (namenode.startsWith("hdfs://")) { + MyFile[] files = createFiles(URI.create(namenode), "/srcdat"); + ToolRunner.run(new DistCp(conf), new String[] { + "-log", + "/logs", + namenode+"/srcdat", + "file:///"+TEST_ROOT_DIR+"/destdat"}); + assertTrue("Source and destination directories do not match.", + checkFiles(localfs, TEST_ROOT_DIR+"/destdat", files)); + assertTrue("Log directory does not exist.", + hdfs.exists(new Path("/logs"))); + deldir(localfs, TEST_ROOT_DIR+"/destdat"); + deldir(hdfs, "/logs"); + deldir(hdfs, "/srcdat"); + } + } finally { + if (cluster != null) { cluster.shutdown(); } + } + } + + public void testCopyDfsToDfsUpdateOverwrite() throws Exception { + MiniDFSCluster cluster = null; + try { + Configuration conf = new Configuration(); + cluster = new MiniDFSCluster(conf, 2, true, null); + final FileSystem hdfs = cluster.getFileSystem(); + final String namenode = hdfs.getUri().toString(); + if (namenode.startsWith("hdfs://")) { + MyFile[] files = createFiles(URI.create(namenode), "/srcdat"); + ToolRunner.run(new DistCp(conf), new String[] { + "-p", + "-log", + namenode+"/logs", + namenode+"/srcdat", + namenode+"/destdat"}); + assertTrue("Source and destination directories do not match.", + checkFiles(hdfs, "/destdat", files)); + FileSystem fs = FileSystem.get(URI.create(namenode+"/logs"), conf); + assertTrue("Log directory does not exist.", + fs.exists(new Path(namenode+"/logs"))); + + FileStatus[] dchkpoint = getFileStatus(hdfs, "/destdat", files); + final int nupdate = NFILES>>2; + updateFiles(cluster.getFileSystem(), "/srcdat", files, nupdate); + deldir(hdfs, "/logs"); + + ToolRunner.run(new DistCp(conf), new String[] { + "-prbugp", // no t to avoid preserving mod. times + "-update", + "-log", + namenode+"/logs", + namenode+"/srcdat", + namenode+"/destdat"}); + assertTrue("Source and destination directories do not match.", + checkFiles(hdfs, "/destdat", files)); + assertTrue("Update failed to replicate all changes in src", + checkUpdate(hdfs, dchkpoint, "/destdat", files, nupdate)); + + deldir(hdfs, "/logs"); + ToolRunner.run(new DistCp(conf), new String[] { + "-prbugp", // no t to avoid preserving mod. times + "-overwrite", + "-log", + namenode+"/logs", + namenode+"/srcdat", + namenode+"/destdat"}); + assertTrue("Source and destination directories do not match.", + checkFiles(hdfs, "/destdat", files)); + assertTrue("-overwrite didn't.", + checkUpdate(hdfs, dchkpoint, "/destdat", files, NFILES)); + + deldir(hdfs, "/destdat"); + deldir(hdfs, "/srcdat"); + deldir(hdfs, "/logs"); + } + } finally { + if (cluster != null) { cluster.shutdown(); } + } + } + + public void testCopyDfsToDfsUpdateWithSkipCRC() throws Exception { + MiniDFSCluster cluster = null; + try { + Configuration conf = new Configuration(); + cluster = new MiniDFSCluster(conf, 2, true, null); + final FileSystem hdfs = cluster.getFileSystem(); + final String namenode = hdfs.getUri().toString(); + + FileSystem fs = FileSystem.get(URI.create(namenode), new Configuration()); + // Create two files of the same name, same length but different + // contents + final String testfilename = "test"; + final String srcData = "act act act"; + final String destData = "cat cat cat"; + + if (namenode.startsWith("hdfs://")) { + deldir(hdfs,"/logs"); + + Path srcPath = new Path("/srcdat", testfilename); + Path destPath = new Path("/destdat", testfilename); + FSDataOutputStream out = fs.create(srcPath, true); + out.writeUTF(srcData); + out.close(); + + out = fs.create(destPath, true); + out.writeUTF(destData); + out.close(); + + // Run with -skipcrccheck option + ToolRunner.run(new DistCp(conf), new String[] { + "-p", + "-update", + "-skipcrccheck", + "-log", + namenode+"/logs", + namenode+"/srcdat", + namenode+"/destdat"}); + + // File should not be overwritten + FSDataInputStream in = hdfs.open(destPath); + String s = in.readUTF(); + System.out.println("Dest had: " + s); + assertTrue("Dest got over written even with skip crc", + s.equalsIgnoreCase(destData)); + in.close(); + + deldir(hdfs, "/logs"); + + // Run without the option + ToolRunner.run(new DistCp(conf), new String[] { + "-p", + "-update", + "-log", + namenode+"/logs", + namenode+"/srcdat", + namenode+"/destdat"}); + + // File should be overwritten + in = hdfs.open(destPath); + s = in.readUTF(); + System.out.println("Dest had: " + s); + + assertTrue("Dest did not get overwritten without skip crc", + s.equalsIgnoreCase(srcData)); + in.close(); + + deldir(hdfs, "/destdat"); + deldir(hdfs, "/srcdat"); + deldir(hdfs, "/logs"); + } + } finally { + if (cluster != null) { cluster.shutdown(); } + } + } + + public void testCopyDuplication() throws Exception { + final FileSystem localfs = FileSystem.get(LOCAL_FS, new Configuration()); + try { + MyFile[] files = createFiles(localfs, TEST_ROOT_DIR+"/srcdat"); + ToolRunner.run(new DistCp(new Configuration()), + new String[] {"file:///"+TEST_ROOT_DIR+"/srcdat", + "file:///"+TEST_ROOT_DIR+"/src2/srcdat"}); + assertTrue("Source and destination directories do not match.", + checkFiles(localfs, TEST_ROOT_DIR+"/src2/srcdat", files)); + + assertEquals(DistCp.DuplicationException.ERROR_CODE, + ToolRunner.run(new DistCp(new Configuration()), + new String[] {"file:///"+TEST_ROOT_DIR+"/srcdat", + "file:///"+TEST_ROOT_DIR+"/src2/srcdat", + "file:///"+TEST_ROOT_DIR+"/destdat",})); + } + finally { + deldir(localfs, TEST_ROOT_DIR+"/destdat"); + deldir(localfs, TEST_ROOT_DIR+"/srcdat"); + deldir(localfs, TEST_ROOT_DIR+"/src2"); + } + } + + public void testCopySingleFile() throws Exception { + FileSystem fs = FileSystem.get(LOCAL_FS, new Configuration()); + Path root = new Path(TEST_ROOT_DIR+"/srcdat"); + try { + MyFile[] files = {createFile(root, fs)}; + //copy a dir with a single file + ToolRunner.run(new DistCp(new Configuration()), + new String[] {"file:///"+TEST_ROOT_DIR+"/srcdat", + "file:///"+TEST_ROOT_DIR+"/destdat"}); + assertTrue("Source and destination directories do not match.", + checkFiles(fs, TEST_ROOT_DIR+"/destdat", files)); + + //copy a single file + String fname = files[0].getName(); + Path p = new Path(root, fname); + FileSystem.LOG.info("fname=" + fname + ", exists? " + fs.exists(p)); + ToolRunner.run(new DistCp(new Configuration()), + new String[] {"file:///"+TEST_ROOT_DIR+"/srcdat/"+fname, + "file:///"+TEST_ROOT_DIR+"/dest2/"+fname}); + assertTrue("Source and destination directories do not match.", + checkFiles(fs, TEST_ROOT_DIR+"/dest2", files)); + //copy single file to existing dir + deldir(fs, TEST_ROOT_DIR+"/dest2"); + fs.mkdirs(new Path(TEST_ROOT_DIR+"/dest2")); + MyFile[] files2 = {createFile(root, fs, 0)}; + String sname = files2[0].getName(); + ToolRunner.run(new DistCp(new Configuration()), + new String[] {"-update", + "file:///"+TEST_ROOT_DIR+"/srcdat/"+sname, + "file:///"+TEST_ROOT_DIR+"/dest2/"}); + assertTrue("Source and destination directories do not match.", + checkFiles(fs, TEST_ROOT_DIR+"/dest2", files2)); + updateFiles(fs, TEST_ROOT_DIR+"/srcdat", files2, 1); + //copy single file to existing dir w/ dst name conflict + ToolRunner.run(new DistCp(new Configuration()), + new String[] {"-update", + "file:///"+TEST_ROOT_DIR+"/srcdat/"+sname, + "file:///"+TEST_ROOT_DIR+"/dest2/"}); + assertTrue("Source and destination directories do not match.", + checkFiles(fs, TEST_ROOT_DIR+"/dest2", files2)); + } + finally { + deldir(fs, TEST_ROOT_DIR+"/destdat"); + deldir(fs, TEST_ROOT_DIR+"/dest2"); + deldir(fs, TEST_ROOT_DIR+"/srcdat"); + } + } + + /** tests basedir option copying files from dfs file system to dfs file system */ + public void testBasedir() throws Exception { + String namenode = null; + MiniDFSCluster cluster = null; + try { + Configuration conf = new Configuration(); + cluster = new MiniDFSCluster(conf, 2, true, null); + final FileSystem hdfs = cluster.getFileSystem(); + namenode = FileSystem.getDefaultUri(conf).toString(); + if (namenode.startsWith("hdfs://")) { + MyFile[] files = createFiles(URI.create(namenode), "/basedir/middle/srcdat"); + ToolRunner.run(new DistCp(conf), new String[] { + "-basedir", + "/basedir", + namenode+"/basedir/middle/srcdat", + namenode+"/destdat"}); + assertTrue("Source and destination directories do not match.", + checkFiles(hdfs, "/destdat/middle/srcdat", files)); + deldir(hdfs, "/destdat"); + deldir(hdfs, "/basedir"); + deldir(hdfs, "/logs"); + } + } finally { + if (cluster != null) { cluster.shutdown(); } + } + } + + public void testPreserveOption() throws Exception { + Configuration conf = new Configuration(); + MiniDFSCluster cluster = null; + try { + cluster = new MiniDFSCluster(conf, 2, true, null); + String nnUri = FileSystem.getDefaultUri(conf).toString(); + FileSystem fs = FileSystem.get(URI.create(nnUri), conf); + + {//test preserving user + MyFile[] files = createFiles(URI.create(nnUri), "/srcdat"); + FileStatus[] srcstat = getFileStatus(fs, "/srcdat", files); + for(int i = 0; i < srcstat.length; i++) { + fs.setOwner(srcstat[i].getPath(), "u" + i, null); + } + ToolRunner.run(new DistCp(conf), + new String[]{"-pu", nnUri+"/srcdat", nnUri+"/destdat"}); + assertTrue("Source and destination directories do not match.", + checkFiles(fs, "/destdat", files)); + + FileStatus[] dststat = getFileStatus(fs, "/destdat", files); + for(int i = 0; i < dststat.length; i++) { + assertEquals("i=" + i, "u" + i, dststat[i].getOwner()); + } + deldir(fs, "/destdat"); + deldir(fs, "/srcdat"); + } + + {//test preserving group + MyFile[] files = createFiles(URI.create(nnUri), "/srcdat"); + FileStatus[] srcstat = getFileStatus(fs, "/srcdat", files); + for(int i = 0; i < srcstat.length; i++) { + fs.setOwner(srcstat[i].getPath(), null, "g" + i); + } + ToolRunner.run(new DistCp(conf), + new String[]{"-pg", nnUri+"/srcdat", nnUri+"/destdat"}); + assertTrue("Source and destination directories do not match.", + checkFiles(fs, "/destdat", files)); + + FileStatus[] dststat = getFileStatus(fs, "/destdat", files); + for(int i = 0; i < dststat.length; i++) { + assertEquals("i=" + i, "g" + i, dststat[i].getGroup()); + } + deldir(fs, "/destdat"); + deldir(fs, "/srcdat"); + } + + {//test preserving mode + MyFile[] files = createFiles(URI.create(nnUri), "/srcdat"); + FileStatus[] srcstat = getFileStatus(fs, "/srcdat", files); + FsPermission[] permissions = new FsPermission[srcstat.length]; + for(int i = 0; i < srcstat.length; i++) { + permissions[i] = new FsPermission((short)(i & 0666)); + fs.setPermission(srcstat[i].getPath(), permissions[i]); + } + + ToolRunner.run(new DistCp(conf), + new String[]{"-pp", nnUri+"/srcdat", nnUri+"/destdat"}); + assertTrue("Source and destination directories do not match.", + checkFiles(fs, "/destdat", files)); + + FileStatus[] dststat = getFileStatus(fs, "/destdat", files); + for(int i = 0; i < dststat.length; i++) { + assertEquals("i=" + i, permissions[i], dststat[i].getPermission()); + } + deldir(fs, "/destdat"); + deldir(fs, "/srcdat"); + } + + {//test preserving times + MyFile[] files = createFiles(URI.create(nnUri), "/srcdat"); + fs.mkdirs(new Path("/srcdat/tmpf1")); + fs.mkdirs(new Path("/srcdat/tmpf2")); + FileStatus[] srcstat = getFileStatus(fs, "/srcdat", files); + FsPermission[] permissions = new FsPermission[srcstat.length]; + for(int i = 0; i < srcstat.length; i++) { + fs.setTimes(srcstat[i].getPath(), 40, 50); + } + + ToolRunner.run(new DistCp(conf), + new String[]{"-pt", nnUri+"/srcdat", nnUri+"/destdat"}); + + FileStatus[] dststat = getFileStatus(fs, "/destdat", files); + for(int i = 0; i < dststat.length; i++) { + assertEquals("Modif. Time i=" + i, 40, dststat[i].getModificationTime()); + assertEquals("Access Time i=" + i+ srcstat[i].getPath() + "-" + dststat[i].getPath(), 50, dststat[i].getAccessTime()); + } + + assertTrue("Source and destination directories do not match.", + checkFiles(fs, "/destdat", files)); + + deldir(fs, "/destdat"); + deldir(fs, "/srcdat"); + } + } finally { + if (cluster != null) { cluster.shutdown(); } + } + } + + public void testMapCount() throws Exception { + String namenode = null; + MiniDFSCluster dfs = null; + MiniMRCluster mr = null; + try { + Configuration conf = new Configuration(); + dfs = new MiniDFSCluster(conf, 3, true, null); + FileSystem fs = dfs.getFileSystem(); + final FsShell shell = new FsShell(conf); + namenode = fs.getUri().toString(); + mr = new MiniMRCluster(3, namenode, 1); + MyFile[] files = createFiles(fs.getUri(), "/srcdat"); + long totsize = 0; + for (MyFile f : files) { + totsize += f.getSize(); + } + Configuration job = mr.createJobConf(); + job.setLong("distcp.bytes.per.map", totsize / 3); + ToolRunner.run(new DistCp(job), + new String[] {"-m", "100", + "-log", + namenode+"/logs", + namenode+"/srcdat", + namenode+"/destdat"}); + assertTrue("Source and destination directories do not match.", + checkFiles(fs, "/destdat", files)); + + String logdir = namenode + "/logs"; + System.out.println(execCmd(shell, "-lsr", logdir)); + FileStatus[] logs = fs.listStatus(new Path(logdir)); + // rare case where splits are exact, logs.length can be 4 + assertTrue("Unexpected map count, logs.length=" + logs.length, + logs.length == 5 || logs.length == 4); + + deldir(fs, "/destdat"); + deldir(fs, "/logs"); + ToolRunner.run(new DistCp(job), + new String[] {"-m", "1", + "-log", + namenode+"/logs", + namenode+"/srcdat", + namenode+"/destdat"}); + + System.out.println(execCmd(shell, "-lsr", logdir)); + logs = fs.listStatus(new Path(namenode+"/logs")); + assertTrue("Unexpected map count, logs.length=" + logs.length, + logs.length == 2); + } finally { + if (dfs != null) { dfs.shutdown(); } + if (mr != null) { mr.shutdown(); } + } + } + + public void testLimits() throws Exception { + Configuration conf = new Configuration(); + MiniDFSCluster cluster = null; + try { + cluster = new MiniDFSCluster(conf, 2, true, null); + final String nnUri = FileSystem.getDefaultUri(conf).toString(); + final FileSystem fs = FileSystem.get(URI.create(nnUri), conf); + final DistCp distcp = new DistCp(conf); + final FsShell shell = new FsShell(conf); + + final String srcrootdir = "/src_root"; + final Path srcrootpath = new Path(srcrootdir); + final String dstrootdir = "/dst_root"; + final Path dstrootpath = new Path(dstrootdir); + + {//test -filelimit + MyFile[] files = createFiles(URI.create(nnUri), srcrootdir); + int filelimit = files.length / 2; + System.out.println("filelimit=" + filelimit); + + ToolRunner.run(distcp, + new String[]{"-filelimit", ""+filelimit, nnUri+srcrootdir, nnUri+dstrootdir}); + String results = execCmd(shell, "-lsr", dstrootdir); + results = removePrefix(results, dstrootdir); + System.out.println("results=" + results); + + FileStatus[] dststat = getFileStatus(fs, dstrootdir, files, true); + assertEquals(filelimit, dststat.length); + deldir(fs, dstrootdir); + deldir(fs, srcrootdir); + } + + {//test -sizelimit + createFiles(URI.create(nnUri), srcrootdir); + long sizelimit = fs.getContentSummary(srcrootpath).getLength()/2; + System.out.println("sizelimit=" + sizelimit); + + ToolRunner.run(distcp, + new String[]{"-sizelimit", ""+sizelimit, nnUri+srcrootdir, nnUri+dstrootdir}); + + ContentSummary summary = fs.getContentSummary(dstrootpath); + System.out.println("summary=" + summary); + assertTrue(summary.getLength() <= sizelimit); + deldir(fs, dstrootdir); + deldir(fs, srcrootdir); + } + + {//test update + final MyFile[] srcs = createFiles(URI.create(nnUri), srcrootdir); + final long totalsize = fs.getContentSummary(srcrootpath).getLength(); + System.out.println("src.length=" + srcs.length); + System.out.println("totalsize =" + totalsize); + fs.mkdirs(dstrootpath); + final int parts = RAN.nextInt(NFILES/3 - 1) + 2; + final int filelimit = srcs.length/parts; + final long sizelimit = totalsize/parts; + System.out.println("filelimit=" + filelimit); + System.out.println("sizelimit=" + sizelimit); + System.out.println("parts =" + parts); + final String[] args = {"-filelimit", ""+filelimit, "-sizelimit", ""+sizelimit, + "-update", nnUri+srcrootdir, nnUri+dstrootdir}; + + int dstfilecount = 0; + long dstsize = 0; + for(int i = 0; i <= parts; i++) { + ToolRunner.run(distcp, args); + + FileStatus[] dststat = getFileStatus(fs, dstrootdir, srcs, true); + System.out.println(i + ") dststat.length=" + dststat.length); + assertTrue(dststat.length - dstfilecount <= filelimit); + ContentSummary summary = fs.getContentSummary(dstrootpath); + System.out.println(i + ") summary.getLength()=" + summary.getLength()); + assertTrue(summary.getLength() - dstsize <= sizelimit); + assertTrue(checkFiles(fs, dstrootdir, srcs, true)); + dstfilecount = dststat.length; + dstsize = summary.getLength(); + } + + deldir(fs, dstrootdir); + deldir(fs, srcrootdir); + } + } finally { + if (cluster != null) { cluster.shutdown(); } + } + } + + static final long now = System.currentTimeMillis(); + + static UnixUserGroupInformation createUGI(String name, boolean issuper) { + String username = name + now; + String group = issuper? "supergroup": username; + return UnixUserGroupInformation.createImmutable( + new String[]{username, group}); + } + + static Path createHomeDirectory(FileSystem fs, UserGroupInformation ugi + ) throws IOException { + final Path home = new Path("/user/" + ugi.getUserName()); + fs.mkdirs(home); + fs.setOwner(home, ugi.getUserName(), ugi.getGroupNames()[0]); + fs.setPermission(home, new FsPermission((short)0700)); + return home; + } + + public void testHftpAccessControl() throws Exception { + MiniDFSCluster cluster = null; + try { + final UnixUserGroupInformation DFS_UGI = createUGI("dfs", true); + final UnixUserGroupInformation USER_UGI = createUGI("user", false); + + //start cluster by DFS_UGI + final Configuration dfsConf = new Configuration(); + UnixUserGroupInformation.saveToConf(dfsConf, + UnixUserGroupInformation.UGI_PROPERTY_NAME, DFS_UGI); + cluster = new MiniDFSCluster(dfsConf, 2, true, null); + cluster.waitActive(); + + final String httpAdd = dfsConf.get("dfs.http.address"); + final URI nnURI = FileSystem.getDefaultUri(dfsConf); + final String nnUri = nnURI.toString(); + final Path home = createHomeDirectory(FileSystem.get(nnURI, dfsConf), USER_UGI); + + //now, login as USER_UGI + final Configuration userConf = new Configuration(); + UnixUserGroupInformation.saveToConf(userConf, + UnixUserGroupInformation.UGI_PROPERTY_NAME, USER_UGI); + final FileSystem fs = FileSystem.get(nnURI, userConf); + + final Path srcrootpath = new Path(home, "src_root"); + final String srcrootdir = srcrootpath.toString(); + final Path dstrootpath = new Path(home, "dst_root"); + final String dstrootdir = dstrootpath.toString(); + final DistCp distcp = new DistCp(userConf); + + FileSystem.mkdirs(fs, srcrootpath, new FsPermission((short)0700)); + final String[] args = {"hftp://"+httpAdd+srcrootdir, nnUri+dstrootdir}; + + { //copy with permission 000, should fail + fs.setPermission(srcrootpath, new FsPermission((short)0)); + assertEquals(-3, ToolRunner.run(distcp, args)); + } + } finally { + if (cluster != null) { cluster.shutdown(); } + } + } + + /** test -delete */ + public void testDelete() throws Exception { + final Configuration conf = new Configuration(); + MiniDFSCluster cluster = null; + try { + cluster = new MiniDFSCluster(conf, 2, true, null); + final URI nnURI = FileSystem.getDefaultUri(conf); + final String nnUri = nnURI.toString(); + final FileSystem fs = FileSystem.get(URI.create(nnUri), conf); + + final DistCp distcp = new DistCp(conf); + final FsShell shell = new FsShell(conf); + + final String srcrootdir = "/src_root"; + final String dstrootdir = "/dst_root"; + + { + //create source files + createFiles(nnURI, srcrootdir); + String srcresults = execCmd(shell, "-lsr", srcrootdir); + srcresults = removePrefix(srcresults, srcrootdir); + System.out.println("srcresults=" + srcresults); + + //create some files in dst + createFiles(nnURI, dstrootdir); + System.out.println("dstrootdir=" + dstrootdir); + shell.run(new String[]{"-lsr", dstrootdir}); + + //run distcp + ToolRunner.run(distcp, + new String[]{"-delete", "-update", "-log", "/log", + nnUri+srcrootdir, nnUri+dstrootdir}); + + //make sure src and dst contains the same files + String dstresults = execCmd(shell, "-lsr", dstrootdir); + dstresults = removePrefix(dstresults, dstrootdir); + System.out.println("first dstresults=" + dstresults); + assertEquals(srcresults, dstresults); + + //create additional file in dst + create(fs, new Path(dstrootdir, "foo")); + create(fs, new Path(dstrootdir, "foobar")); + + //run distcp again + ToolRunner.run(distcp, + new String[]{"-delete", "-update", "-log", "/log2", + nnUri+srcrootdir, nnUri+dstrootdir}); + + //make sure src and dst contains the same files + dstresults = execCmd(shell, "-lsr", dstrootdir); + dstresults = removePrefix(dstresults, dstrootdir); + System.out.println("second dstresults=" + dstresults); + assertEquals(srcresults, dstresults); + + //cleanup + deldir(fs, dstrootdir); + deldir(fs, srcrootdir); + } + } finally { + if (cluster != null) { cluster.shutdown(); } + } + } + + /** test globbing */ + public void testGlobbing() throws Exception { + String namenode = null; + MiniDFSCluster cluster = null; + try { + Configuration conf = new Configuration(); + cluster = new MiniDFSCluster(conf, 2, true, null); + final FileSystem hdfs = cluster.getFileSystem(); + namenode = FileSystem.getDefaultUri(conf).toString(); + if (namenode.startsWith("hdfs://")) { + MyFile[] files = createFiles(URI.create(namenode), "/srcdat"); + ToolRunner.run(new DistCp(conf), new String[] { + "-log", + namenode+"/logs", + namenode+"/srcdat/*", + namenode+"/destdat"}); + assertTrue("Source and destination directories do not match.", + checkFiles(hdfs, "/destdat", files)); + FileSystem fs = FileSystem.get(URI.create(namenode+"/logs"), conf); + assertTrue("Log directory does not exist.", + fs.exists(new Path(namenode+"/logs"))); + deldir(hdfs, "/destdat"); + deldir(hdfs, "/srcdat"); + deldir(hdfs, "/logs"); + } + } finally { + if (cluster != null) { cluster.shutdown(); } + } + } + + static void create(FileSystem fs, Path f) throws IOException { + FSDataOutputStream out = fs.create(f); + try { + byte[] b = new byte[1024 + RAN.nextInt(1024)]; + RAN.nextBytes(b); + out.write(b); + } finally { + if (out != null) out.close(); + } + } + + static String execCmd(FsShell shell, String... args) throws Exception { + ByteArrayOutputStream baout = new ByteArrayOutputStream(); + PrintStream out = new PrintStream(baout, true); + PrintStream old = System.out; + System.setOut(out); + shell.run(args); + out.close(); + System.setOut(old); + return baout.toString(); + } + + private static String removePrefix(String lines, String prefix) { + final int prefixlen = prefix.length(); + final StringTokenizer t = new StringTokenizer(lines, "\n"); + final StringBuffer results = new StringBuffer(); + for(; t.hasMoreTokens(); ) { + String s = t.nextToken(); + results.append(s.substring(s.indexOf(prefix) + prefixlen) + "\n"); + } + return results.toString(); + } +} \ No newline at end of file diff --git a/src/test/org/apache/hadoop/fs/TestCorruptFileBlocks.java b/src/test/org/apache/hadoop/fs/TestCorruptFileBlocks.java new file mode 100644 index 0000000..14ebc81 --- /dev/null +++ b/src/test/org/apache/hadoop/fs/TestCorruptFileBlocks.java @@ -0,0 +1,79 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs; + +import java.io.ByteArrayInputStream; +import java.io.DataInputStream; +import java.io.IOException; + +import static org.junit.Assert.*; +import org.junit.Test; + +import org.apache.hadoop.io.DataOutputBuffer; + +public class TestCorruptFileBlocks { + + /** + * Serialize the cfb given, deserialize and return the result. + */ + static CorruptFileBlocks serializeAndDeserialize(CorruptFileBlocks cfb) + throws IOException { + DataOutputBuffer buf = new DataOutputBuffer(); + cfb.write(buf); + + byte[] data = buf.getData(); + DataInputStream input = new DataInputStream(new ByteArrayInputStream(data)); + + CorruptFileBlocks result = new CorruptFileBlocks(); + result.readFields(input); + + return result; + } + + /** + * Check whether cfb is unchanged after serialization and deserialization. + */ + static boolean checkSerialize(CorruptFileBlocks cfb) + throws IOException { + return cfb.equals(serializeAndDeserialize(cfb)); + } + + /** + * Test serialization and deserializaton of CorruptFileBlocks. + */ + @Test + public void testSerialization() throws IOException { + { + CorruptFileBlocks cfb = new CorruptFileBlocks(); + assertTrue(checkSerialize(cfb)); + } + + { + String[] files = new String[0]; + CorruptFileBlocks cfb = new CorruptFileBlocks(files, ""); + assertTrue(checkSerialize(cfb)); + } + + { + String[] files = { "a", "bb", "ccc" }; + CorruptFileBlocks cfb = new CorruptFileBlocks(files, "test"); + assertTrue(checkSerialize(cfb)); + } + } +} \ No newline at end of file diff --git a/src/test/org/apache/hadoop/fs/TestDFSIO.java b/src/test/org/apache/hadoop/fs/TestDFSIO.java new file mode 100644 index 0000000..efcf56f --- /dev/null +++ b/src/test/org/apache/hadoop/fs/TestDFSIO.java @@ -0,0 +1,484 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs; + +import java.io.BufferedReader; +import java.io.DataInputStream; +import java.io.File; +import java.io.FileOutputStream; +import java.io.IOException; +import java.io.InputStreamReader; +import java.io.OutputStream; +import java.io.PrintStream; +import java.util.Date; +import java.util.StringTokenizer; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.conf.Configured; +import org.apache.hadoop.io.LongWritable; +import org.apache.hadoop.io.SequenceFile; +import org.apache.hadoop.io.Text; +import org.apache.hadoop.io.SequenceFile.CompressionType; +import org.apache.hadoop.mapred.*; +import org.apache.hadoop.util.StringUtils; +import org.apache.hadoop.util.Tool; +import org.apache.hadoop.util.ToolRunner; +import org.junit.Test; + +/** + * Distributed i/o benchmark. + *

+ * This test writes into or reads from a specified number of files. + * File size is specified as a parameter to the test. + * Each file is accessed in a separate map task. + *

+ * The reducer collects the following statistics: + *

    + *
  • number of tasks completed
  • + *
  • number of bytes written/read
  • + *
  • execution time
  • + *
  • io rate
  • + *
  • io rate squared
  • + *
+ * + * Finally, the following information is appended to a local file + *
    + *
  • read or write test
  • + *
  • date and time the test finished
  • + *
  • number of files
  • + *
  • total number of bytes processed
  • + *
  • throughput in mb/sec (total number of bytes / sum of processing times)
  • + *
  • average i/o rate in mb/sec per file
  • + *
  • standard deviation of i/o rate
  • + *
+ */ +public class TestDFSIO extends Configured implements Tool { + // Constants + private static final Log LOG = LogFactory.getLog(TestDFSIO.class); + private static final int TEST_TYPE_READ = 0; + private static final int TEST_TYPE_WRITE = 1; + private static final int TEST_TYPE_CLEANUP = 2; + private static final int DEFAULT_BUFFER_SIZE = 1000000; + private static final String BASE_FILE_NAME = "test_io_"; + private static final String DEFAULT_RES_FILE_NAME = "TestDFSIO_results.log"; + + private static final long MEGA = 0x100000; + private static String TEST_ROOT_DIR = System.getProperty("test.build.data","/benchmarks/TestDFSIO"); + private static Path CONTROL_DIR = new Path(TEST_ROOT_DIR, "io_control"); + private static Path WRITE_DIR = new Path(TEST_ROOT_DIR, "io_write"); + private static Path READ_DIR = new Path(TEST_ROOT_DIR, "io_read"); + private static Path DATA_DIR = new Path(TEST_ROOT_DIR, "io_data"); + + static{ + Configuration.addDefaultResource("hdfs-default.xml"); + Configuration.addDefaultResource("hdfs-site.xml"); + } + + /** + * Run the test with default parameters. + * + * @throws Exception + */ + @Test + public void testIOs() throws Exception { + testIOs(10, 10, new Configuration()); + } + + /** + * Run the test with the specified parameters. + * + * @param fileSize file size + * @param nrFiles number of files + * @throws IOException + */ + public static void testIOs(int fileSize, int nrFiles, Configuration fsConfig) + throws IOException { + + FileSystem fs = FileSystem.get(fsConfig); + + createControlFile(fs, fileSize, nrFiles, fsConfig); + writeTest(fs, fsConfig); + readTest(fs, fsConfig); + cleanup(fs); + } + + private static void createControlFile(FileSystem fs, + int fileSize, // in MB + int nrFiles, + Configuration fsConfig + ) throws IOException { + LOG.info("creating control file: "+fileSize+" mega bytes, "+nrFiles+" files"); + + fs.delete(CONTROL_DIR, true); + + for(int i=0; i < nrFiles; i++) { + String name = getFileName(i); + Path controlFile = new Path(CONTROL_DIR, "in_file_" + name); + SequenceFile.Writer writer = null; + try { + writer = SequenceFile.createWriter(fs, fsConfig, controlFile, + Text.class, LongWritable.class, + CompressionType.NONE); + writer.append(new Text(name), new LongWritable(fileSize)); + } catch(Exception e) { + throw new IOException(e.getLocalizedMessage()); + } finally { + if (writer != null) + writer.close(); + writer = null; + } + } + LOG.info("created control files for: "+nrFiles+" files"); + } + + private static String getFileName(int fIdx) { + return BASE_FILE_NAME + Integer.toString(fIdx); + } + + /** + * Write/Read mapper base class. + *

+ * Collects the following statistics per task: + *

    + *
  • number of tasks completed
  • + *
  • number of bytes written/read
  • + *
  • execution time
  • + *
  • i/o rate
  • + *
  • i/o rate squared
  • + *
+ */ + private abstract static class IOStatMapper extends IOMapperBase { + IOStatMapper() { + } + + void collectStats(OutputCollector output, + String name, + long execTime, + Long objSize) throws IOException { + long totalSize = objSize.longValue(); + float ioRateMbSec = (float)totalSize * 1000 / (execTime * MEGA); + LOG.info("Number of bytes processed = " + totalSize); + LOG.info("Exec time = " + execTime); + LOG.info("IO rate = " + ioRateMbSec); + + output.collect(new Text(AccumulatingReducer.VALUE_TYPE_LONG + "tasks"), + new Text(String.valueOf(1))); + output.collect(new Text(AccumulatingReducer.VALUE_TYPE_LONG + "size"), + new Text(String.valueOf(totalSize))); + output.collect(new Text(AccumulatingReducer.VALUE_TYPE_LONG + "time"), + new Text(String.valueOf(execTime))); + output.collect(new Text(AccumulatingReducer.VALUE_TYPE_FLOAT + "rate"), + new Text(String.valueOf(ioRateMbSec*1000))); + output.collect(new Text(AccumulatingReducer.VALUE_TYPE_FLOAT + "sqrate"), + new Text(String.valueOf(ioRateMbSec*ioRateMbSec*1000))); + } + } + + /** + * Write mapper class. + */ + public static class WriteMapper extends IOStatMapper { + + public WriteMapper() { + for(int i=0; i < bufferSize; i++) + buffer[i] = (byte)('0' + i % 50); + } + + public Long doIO(Reporter reporter, + String name, + long totalSize + ) throws IOException { + // create file + totalSize *= MEGA; + OutputStream out; + out = fs.create(new Path(DATA_DIR, name), true, bufferSize); + + + try { + // write to the file + long nrRemaining; + for (nrRemaining = totalSize; nrRemaining > 0; nrRemaining -= bufferSize) { + int curSize = (bufferSize < nrRemaining) ? bufferSize : (int)nrRemaining; + out.write(buffer, 0, curSize); + reporter.setStatus("writing " + name + "@" + + (totalSize - nrRemaining) + "/" + totalSize + + " ::host = " + hostName); + } + } finally { + out.close(); + } + return Long.valueOf(totalSize); + } + } + + private static void writeTest(FileSystem fs, Configuration fsConfig) + throws IOException { + + fs.delete(DATA_DIR, true); + fs.delete(WRITE_DIR, true); + + runIOTest(WriteMapper.class, WRITE_DIR, fsConfig); + } + + @SuppressWarnings("deprecation") + private static void runIOTest( + Class> mapperClass, + Path outputDir, + Configuration fsConfig) throws IOException { + JobConf job = new JobConf(fsConfig, TestDFSIO.class); + + FileInputFormat.setInputPaths(job, CONTROL_DIR); + job.setInputFormat(SequenceFileInputFormat.class); + + job.setMapperClass(mapperClass); + job.setReducerClass(AccumulatingReducer.class); + + FileOutputFormat.setOutputPath(job, outputDir); + + job.setOutputKeyClass(Text.class); + job.setOutputValueClass(Text.class); + job.setNumReduceTasks(1); + JobClient.runJob(job); + } + + /** + * Read mapper class. + */ + public static class ReadMapper extends IOStatMapper { + + public ReadMapper() { + } + + public Long doIO(Reporter reporter, + String name, + long totalSize + ) throws IOException { + totalSize *= MEGA; + // open file + DataInputStream in = fs.open(new Path(DATA_DIR, name)); + try { + long actualSize = 0; + for(int curSize = bufferSize; curSize == bufferSize;) { + curSize = in.read(buffer, 0, bufferSize); + actualSize += curSize; + reporter.setStatus("reading " + name + "@" + + actualSize + "/" + totalSize + + " ::host = " + hostName); + } + } finally { + in.close(); + } + return Long.valueOf(totalSize); + } + } + + private static void readTest(FileSystem fs, Configuration fsConfig) + throws IOException { + fs.delete(READ_DIR, true); + runIOTest(ReadMapper.class, READ_DIR, fsConfig); + } + + private static void sequentialTest(FileSystem fs, + int testType, + int fileSize, + int nrFiles + ) throws Exception { + IOStatMapper ioer = null; + if (testType == TEST_TYPE_READ) + ioer = new ReadMapper(); + else if (testType == TEST_TYPE_WRITE) + ioer = new WriteMapper(); + else + return; + for(int i=0; i < nrFiles; i++) + ioer.doIO(Reporter.NULL, + BASE_FILE_NAME+Integer.toString(i), + MEGA*fileSize); + } + + public static void main(String[] args) throws Exception{ + int res = ToolRunner.run(new TestDFSIO(), args); + System.exit(res); + } + + private static void analyzeResult( FileSystem fs, + int testType, + long execTime, + String resFileName + ) throws IOException { + Path reduceFile; + if (testType == TEST_TYPE_WRITE) + reduceFile = new Path(WRITE_DIR, "part-00000"); + else + reduceFile = new Path(READ_DIR, "part-00000"); + long tasks = 0; + long size = 0; + long time = 0; + float rate = 0; + float sqrate = 0; + DataInputStream in = null; + BufferedReader lines = null; + try { + in = new DataInputStream(fs.open(reduceFile)); + lines = new BufferedReader(new InputStreamReader(in)); + String line; + while((line = lines.readLine()) != null) { + StringTokenizer tokens = new StringTokenizer(line, " \t\n\r\f%"); + String attr = tokens.nextToken(); + if (attr.endsWith(":tasks")) + tasks = Long.parseLong(tokens.nextToken()); + else if (attr.endsWith(":size")) + size = Long.parseLong(tokens.nextToken()); + else if (attr.endsWith(":time")) + time = Long.parseLong(tokens.nextToken()); + else if (attr.endsWith(":rate")) + rate = Float.parseFloat(tokens.nextToken()); + else if (attr.endsWith(":sqrate")) + sqrate = Float.parseFloat(tokens.nextToken()); + } + } finally { + if(in != null) in.close(); + if(lines != null) lines.close(); + } + + double med = rate / 1000 / tasks; + double stdDev = Math.sqrt(Math.abs(sqrate / 1000 / tasks - med*med)); + String resultLines[] = { + "----- TestDFSIO ----- : " + ((testType == TEST_TYPE_WRITE) ? "write" : + (testType == TEST_TYPE_READ) ? "read" : + "unknown"), + " Date & time: " + new Date(System.currentTimeMillis()), + " Number of files: " + tasks, + "Total MBytes processed: " + size/MEGA, + " Throughput mb/sec: " + size * 1000.0 / (time * MEGA), + "Average IO rate mb/sec: " + med, + " IO rate std deviation: " + stdDev, + " Test exec time sec: " + (float)execTime / 1000, + "" }; + + PrintStream res = null; + try { + res = new PrintStream(new FileOutputStream(new File(resFileName), true)); + for(int i = 0; i < resultLines.length; i++) { + LOG.info(resultLines[i]); + res.println(resultLines[i]); + } + } finally { + if(res != null) res.close(); + } + } + + private static void cleanup(FileSystem fs) throws IOException { + LOG.info("Cleaning up test files"); + fs.delete(new Path(TEST_ROOT_DIR), true); + } + + @Override + public int run(String[] args) throws Exception { + int testType = TEST_TYPE_READ; + int bufferSize = DEFAULT_BUFFER_SIZE; + int fileSize = 1; + int nrFiles = 1; + Configuration fsConfig = new Configuration(getConf()); + long blockSize = fsConfig.getLong("dfs.block.size", 64L * 1024 * 1024); + short replication = (short)fsConfig.getInt("dfs.replication", 3); + String resFileName = DEFAULT_RES_FILE_NAME; + boolean isSequential = false; + + String className = TestDFSIO.class.getSimpleName(); + String version = className + ".0.0.4"; + String usage = "Usage: " + className + " -read | -write | -clean " + + "[-nrFiles N] [-fileSize MB] [-resFile resultFileName] " + + "[-bufferSize Bytes] "; + + System.out.println(version); + if (args.length == 0) { + System.err.println(usage); + return -1; + } + for (int i = 0; i < args.length; i++) { // parse command line + if (args[i].startsWith("-read")) { + testType = TEST_TYPE_READ; + } else if (args[i].equals("-write")) { + testType = TEST_TYPE_WRITE; + } else if (args[i].equals("-clean")) { + testType = TEST_TYPE_CLEANUP; + } else if (args[i].startsWith("-seq")) { + isSequential = true; + } else if (args[i].equals("-nrFiles")) { + nrFiles = Integer.parseInt(args[++i]); + } else if (args[i].equals("-fileSize")) { + fileSize = Integer.parseInt(args[++i]); + } else if (args[i].equals("-bufferSize")) { + bufferSize = Integer.parseInt(args[++i]); + } else if (args[i].equals("-resFile")) { + resFileName = args[++i]; + } else if (args[i].equals("-blockSize")) { + blockSize = Long.parseLong(args[++i]); + } else if (args[i].equals("-replication")) { + replication = Short.parseShort(args[++i]); + } + } + + LOG.info("nrFiles = " + nrFiles); + LOG.info("fileSize (MB) = " + fileSize); + LOG.info("bufferSize = " + bufferSize); + LOG.info("blockSize = " + blockSize); + LOG.info("replication = " + replication); + + try { + fsConfig.setInt("test.io.file.buffer.size", bufferSize); + + // If no value provided in the command line + // replication and block size will stay the same + fsConfig.setInt("dfs.replication", replication); + fsConfig.setLong("dfs.block.size", blockSize); + + FileSystem fs = FileSystem.get(fsConfig); + + if (isSequential) { + long tStart = System.currentTimeMillis(); + sequentialTest(fs, testType, fileSize, nrFiles); + long execTime = System.currentTimeMillis() - tStart; + String resultLine = "Seq Test exec time sec: " + (float)execTime / 1000; + LOG.info(resultLine); + return 0; + } + if (testType == TEST_TYPE_CLEANUP) { + cleanup(fs); + return 0; + } + createControlFile(fs, fileSize, nrFiles, fsConfig); + long tStart = System.currentTimeMillis(); + if (testType == TEST_TYPE_WRITE) + writeTest(fs, fsConfig); + if (testType == TEST_TYPE_READ) + readTest(fs, fsConfig); + long execTime = System.currentTimeMillis() - tStart; + + analyzeResult(fs, testType, execTime, resFileName); + } catch(Exception e) { + System.err.print(StringUtils.stringifyException(e)); + return -1; + } + return 0; + } +} diff --git a/src/test/org/apache/hadoop/fs/TestDU.java b/src/test/org/apache/hadoop/fs/TestDU.java new file mode 100644 index 0000000..6df487b --- /dev/null +++ b/src/test/org/apache/hadoop/fs/TestDU.java @@ -0,0 +1,95 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.fs; + +import junit.framework.TestCase; + +import java.io.File; +import java.io.IOException; +import java.io.RandomAccessFile; +import java.util.Random; + +/** This test makes sure that "DU" does not get to run on each call to getUsed */ +public class TestDU extends TestCase { + final static private File DU_DIR = new File( + System.getProperty("test.build.data","/tmp"), "dutmp"); + + public void setUp() throws IOException { + FileUtil.fullyDelete(DU_DIR); + assertTrue(DU_DIR.mkdirs()); + } + + public void tearDown() throws IOException { + FileUtil.fullyDelete(DU_DIR); + } + + private void createFile(File newFile, int size) throws IOException { + // write random data so that filesystems with compression enabled (e.g., ZFS) + // can't compress the file + Random random = new Random(); + byte[] data = new byte[size]; + random.nextBytes(data); + + newFile.createNewFile(); + RandomAccessFile file = new RandomAccessFile(newFile, "rws"); + + file.write(data); + + file.getFD().sync(); + file.close(); + } + + /** + * Verify that du returns expected used space for a file. + * We assume here that if a file system crates a file of size + * that is a multiple of the block size in this file system, + * then the used size for the file will be exactly that size. + * This is true for most file systems. + * + * @throws IOException + * @throws InterruptedException + */ + public void testDU() throws IOException, InterruptedException { + int writtenSize = 32*1024; // writing 32K + File file = new File(DU_DIR, "data"); + createFile(file, writtenSize); + + Thread.sleep(5000); // let the metadata updater catch up + + DU du = new DU(file, 10000); + du.start(); + long duSize = du.getUsed(); + du.shutdown(); + + assertEquals(writtenSize, duSize); + + //test with 0 interval, will not launch thread + du = new DU(file, 0); + du.start(); + duSize = du.getUsed(); + du.shutdown(); + + assertEquals(writtenSize, duSize); + + //test without launching thread + du = new DU(file, 10000); + duSize = du.getUsed(); + + assertEquals(writtenSize, duSize); + } +} diff --git a/src/test/org/apache/hadoop/fs/TestFileSystem.java b/src/test/org/apache/hadoop/fs/TestFileSystem.java new file mode 100644 index 0000000..da76104 --- /dev/null +++ b/src/test/org/apache/hadoop/fs/TestFileSystem.java @@ -0,0 +1,703 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs; + +import java.io.DataInputStream; +import java.io.IOException; +import java.io.OutputStream; +import java.util.Arrays; +import java.util.Random; +import java.util.List; +import java.util.ArrayList; +import java.util.Set; +import java.util.HashSet; +import java.util.Map; +import java.util.HashMap; +import java.net.InetSocketAddress; +import java.net.URI; + +import junit.framework.TestCase; + +import org.apache.commons.logging.Log; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.conf.Configured; +import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.hdfs.server.namenode.NameNode; +import org.apache.hadoop.fs.shell.CommandFormat; +import org.apache.hadoop.io.LongWritable; +import org.apache.hadoop.io.SequenceFile; +import org.apache.hadoop.io.UTF8; +import org.apache.hadoop.io.WritableComparable; +import org.apache.hadoop.io.SequenceFile.CompressionType; +import org.apache.hadoop.mapred.FileInputFormat; +import org.apache.hadoop.mapred.FileOutputFormat; +import org.apache.hadoop.mapred.JobClient; +import org.apache.hadoop.mapred.JobConf; +import org.apache.hadoop.mapred.Mapper; +import org.apache.hadoop.mapred.OutputCollector; +import org.apache.hadoop.mapred.Reporter; +import org.apache.hadoop.mapred.SequenceFileInputFormat; +import org.apache.hadoop.mapred.lib.LongSumReducer; +import org.apache.hadoop.security.UnixUserGroupInformation; + +public class TestFileSystem extends TestCase { + private static final Log LOG = FileSystem.LOG; + + private static Configuration conf = new Configuration(); + private static int BUFFER_SIZE = conf.getInt("io.file.buffer.size", 4096); + + private static final long MEGA = 1024 * 1024; + private static final int SEEKS_PER_FILE = 4; + + private static String ROOT = System.getProperty("test.build.data","fs_test"); + private static Path CONTROL_DIR = new Path(ROOT, "fs_control"); + private static Path WRITE_DIR = new Path(ROOT, "fs_write"); + private static Path READ_DIR = new Path(ROOT, "fs_read"); + private static Path DATA_DIR = new Path(ROOT, "fs_data"); + + public void testFs() throws Exception { + testFs(10 * MEGA, 100, 0); + } + + public static void testFs(long megaBytes, int numFiles, long seed) + throws Exception { + + FileSystem fs = FileSystem.get(conf); + + if (seed == 0) + seed = new Random().nextLong(); + + LOG.info("seed = "+seed); + + createControlFile(fs, megaBytes, numFiles, seed); + writeTest(fs, false); + readTest(fs, false); + seekTest(fs, false); + fs.delete(CONTROL_DIR, true); + fs.delete(DATA_DIR, true); + fs.delete(WRITE_DIR, true); + fs.delete(READ_DIR, true); + } + + public static void testCommandFormat() throws Exception { + // This should go to TestFsShell.java when it is added. + CommandFormat cf; + cf= new CommandFormat("copyToLocal", 2,2,"crc","ignoreCrc"); + assertEquals(cf.parse(new String[] {"-get","file", "-"}, 1).get(1), "-"); + assertEquals(cf.parse(new String[] {"-get","file","-ignoreCrc","/foo"}, 1).get(1),"/foo"); + cf = new CommandFormat("tail", 1, 1, "f"); + assertEquals(cf.parse(new String[] {"-tail","fileName"}, 1).get(0),"fileName"); + assertEquals(cf.parse(new String[] {"-tail","-f","fileName"}, 1).get(0),"fileName"); + cf = new CommandFormat("setrep", 2, 2, "R", "w"); + assertEquals(cf.parse(new String[] {"-setrep","-R","2","/foo/bar"}, 1).get(1), "/foo/bar"); + cf = new CommandFormat("put", 2, 10000); + assertEquals(cf.parse(new String[] {"-put", "-", "dest"}, 1).get(1), "dest"); + } + + public static void createControlFile(FileSystem fs, + long megaBytes, int numFiles, + long seed) throws Exception { + + LOG.info("creating control file: "+megaBytes+" bytes, "+numFiles+" files"); + + Path controlFile = new Path(CONTROL_DIR, "files"); + fs.delete(controlFile, true); + Random random = new Random(seed); + + SequenceFile.Writer writer = + SequenceFile.createWriter(fs, conf, controlFile, + UTF8.class, LongWritable.class, CompressionType.NONE); + + long totalSize = 0; + long maxSize = ((megaBytes / numFiles) * 2) + 1; + try { + while (totalSize < megaBytes) { + UTF8 name = new UTF8(Long.toString(random.nextLong())); + + long size = random.nextLong(); + if (size < 0) + size = -size; + size = size % maxSize; + + //LOG.info(" adding: name="+name+" size="+size); + + writer.append(name, new LongWritable(size)); + + totalSize += size; + } + } finally { + writer.close(); + } + LOG.info("created control file for: "+totalSize+" bytes"); + } + + public static class WriteMapper extends Configured + implements Mapper { + + private Random random = new Random(); + private byte[] buffer = new byte[BUFFER_SIZE]; + private FileSystem fs; + private boolean fastCheck; + + // a random suffix per task + private String suffix = "-"+random.nextLong(); + + { + try { + fs = FileSystem.get(conf); + } catch (IOException e) { + throw new RuntimeException(e); + } + } + + public WriteMapper() { super(null); } + + public WriteMapper(Configuration conf) { super(conf); } + + public void configure(JobConf job) { + setConf(job); + fastCheck = job.getBoolean("fs.test.fastCheck", false); + } + + public void map(UTF8 key, LongWritable value, + OutputCollector collector, + Reporter reporter) + throws IOException { + + String name = key.toString(); + long size = value.get(); + long seed = Long.parseLong(name); + + random.setSeed(seed); + reporter.setStatus("creating " + name); + + // write to temp file initially to permit parallel execution + Path tempFile = new Path(DATA_DIR, name+suffix); + OutputStream out = fs.create(tempFile); + + long written = 0; + try { + while (written < size) { + if (fastCheck) { + Arrays.fill(buffer, (byte)random.nextInt(Byte.MAX_VALUE)); + } else { + random.nextBytes(buffer); + } + long remains = size - written; + int length = (remains<=buffer.length) ? (int)remains : buffer.length; + out.write(buffer, 0, length); + written += length; + reporter.setStatus("writing "+name+"@"+written+"/"+size); + } + } finally { + out.close(); + } + // rename to final location + fs.rename(tempFile, new Path(DATA_DIR, name)); + + collector.collect(new UTF8("bytes"), new LongWritable(written)); + + reporter.setStatus("wrote " + name); + } + + public void close() { + } + + } + + public static void writeTest(FileSystem fs, boolean fastCheck) + throws Exception { + + fs.delete(DATA_DIR, true); + fs.delete(WRITE_DIR, true); + + JobConf job = new JobConf(conf, TestFileSystem.class); + job.setBoolean("fs.test.fastCheck", fastCheck); + + FileInputFormat.setInputPaths(job, CONTROL_DIR); + job.setInputFormat(SequenceFileInputFormat.class); + + job.setMapperClass(WriteMapper.class); + job.setReducerClass(LongSumReducer.class); + + FileOutputFormat.setOutputPath(job, WRITE_DIR); + job.setOutputKeyClass(UTF8.class); + job.setOutputValueClass(LongWritable.class); + job.setNumReduceTasks(1); + JobClient.runJob(job); + } + + public static class ReadMapper extends Configured + implements Mapper { + + private Random random = new Random(); + private byte[] buffer = new byte[BUFFER_SIZE]; + private byte[] check = new byte[BUFFER_SIZE]; + private FileSystem fs; + private boolean fastCheck; + + { + try { + fs = FileSystem.get(conf); + } catch (IOException e) { + throw new RuntimeException(e); + } + } + + public ReadMapper() { super(null); } + + public ReadMapper(Configuration conf) { super(conf); } + + public void configure(JobConf job) { + setConf(job); + fastCheck = job.getBoolean("fs.test.fastCheck", false); + } + + public void map(UTF8 key, LongWritable value, + OutputCollector collector, + Reporter reporter) + throws IOException { + + String name = key.toString(); + long size = value.get(); + long seed = Long.parseLong(name); + + random.setSeed(seed); + reporter.setStatus("opening " + name); + + DataInputStream in = + new DataInputStream(fs.open(new Path(DATA_DIR, name))); + + long read = 0; + try { + while (read < size) { + long remains = size - read; + int n = (remains<=buffer.length) ? (int)remains : buffer.length; + in.readFully(buffer, 0, n); + read += n; + if (fastCheck) { + Arrays.fill(check, (byte)random.nextInt(Byte.MAX_VALUE)); + } else { + random.nextBytes(check); + } + if (n != buffer.length) { + Arrays.fill(buffer, n, buffer.length, (byte)0); + Arrays.fill(check, n, check.length, (byte)0); + } + assertTrue(Arrays.equals(buffer, check)); + + reporter.setStatus("reading "+name+"@"+read+"/"+size); + + } + } finally { + in.close(); + } + + collector.collect(new UTF8("bytes"), new LongWritable(read)); + + reporter.setStatus("read " + name); + } + + public void close() { + } + + } + + public static void readTest(FileSystem fs, boolean fastCheck) + throws Exception { + + fs.delete(READ_DIR, true); + + JobConf job = new JobConf(conf, TestFileSystem.class); + job.setBoolean("fs.test.fastCheck", fastCheck); + + + FileInputFormat.setInputPaths(job, CONTROL_DIR); + job.setInputFormat(SequenceFileInputFormat.class); + + job.setMapperClass(ReadMapper.class); + job.setReducerClass(LongSumReducer.class); + + FileOutputFormat.setOutputPath(job, READ_DIR); + job.setOutputKeyClass(UTF8.class); + job.setOutputValueClass(LongWritable.class); + job.setNumReduceTasks(1); + JobClient.runJob(job); + } + + + public static class SeekMapper extends Configured + implements Mapper { + + private Random random = new Random(); + private byte[] check = new byte[BUFFER_SIZE]; + private FileSystem fs; + private boolean fastCheck; + + { + try { + fs = FileSystem.get(conf); + } catch (IOException e) { + throw new RuntimeException(e); + } + } + + public SeekMapper() { super(null); } + + public SeekMapper(Configuration conf) { super(conf); } + + public void configure(JobConf job) { + setConf(job); + fastCheck = job.getBoolean("fs.test.fastCheck", false); + } + + public void map(WritableComparable key, LongWritable value, + OutputCollector collector, + Reporter reporter) + throws IOException { + String name = key.toString(); + long size = value.get(); + long seed = Long.parseLong(name); + + if (size == 0) return; + + reporter.setStatus("opening " + name); + + FSDataInputStream in = fs.open(new Path(DATA_DIR, name)); + + try { + for (int i = 0; i < SEEKS_PER_FILE; i++) { + // generate a random position + long position = Math.abs(random.nextLong()) % size; + + // seek file to that position + reporter.setStatus("seeking " + name); + in.seek(position); + byte b = in.readByte(); + + // check that byte matches + byte checkByte = 0; + // advance random state to that position + random.setSeed(seed); + for (int p = 0; p <= position; p+= check.length) { + reporter.setStatus("generating data for " + name); + if (fastCheck) { + checkByte = (byte)random.nextInt(Byte.MAX_VALUE); + } else { + random.nextBytes(check); + checkByte = check[(int)(position % check.length)]; + } + } + assertEquals(b, checkByte); + } + } finally { + in.close(); + } + } + + public void close() { + } + + } + + public static void seekTest(FileSystem fs, boolean fastCheck) + throws Exception { + + fs.delete(READ_DIR, true); + + JobConf job = new JobConf(conf, TestFileSystem.class); + job.setBoolean("fs.test.fastCheck", fastCheck); + + FileInputFormat.setInputPaths(job,CONTROL_DIR); + job.setInputFormat(SequenceFileInputFormat.class); + + job.setMapperClass(SeekMapper.class); + job.setReducerClass(LongSumReducer.class); + + FileOutputFormat.setOutputPath(job, READ_DIR); + job.setOutputKeyClass(UTF8.class); + job.setOutputValueClass(LongWritable.class); + job.setNumReduceTasks(1); + JobClient.runJob(job); + } + + + public static void main(String[] args) throws Exception { + int megaBytes = 10; + int files = 100; + boolean noRead = false; + boolean noWrite = false; + boolean noSeek = false; + boolean fastCheck = false; + long seed = new Random().nextLong(); + + String usage = "Usage: TestFileSystem -files N -megaBytes M [-noread] [-nowrite] [-noseek] [-fastcheck]"; + + if (args.length == 0) { + System.err.println(usage); + System.exit(-1); + } + for (int i = 0; i < args.length; i++) { // parse command line + if (args[i].equals("-files")) { + files = Integer.parseInt(args[++i]); + } else if (args[i].equals("-megaBytes")) { + megaBytes = Integer.parseInt(args[++i]); + } else if (args[i].equals("-noread")) { + noRead = true; + } else if (args[i].equals("-nowrite")) { + noWrite = true; + } else if (args[i].equals("-noseek")) { + noSeek = true; + } else if (args[i].equals("-fastcheck")) { + fastCheck = true; + } + } + + LOG.info("seed = "+seed); + LOG.info("files = " + files); + LOG.info("megaBytes = " + megaBytes); + + FileSystem fs = FileSystem.get(conf); + + if (!noWrite) { + createControlFile(fs, megaBytes*MEGA, files, seed); + writeTest(fs, fastCheck); + } + if (!noRead) { + readTest(fs, fastCheck); + } + if (!noSeek) { + seekTest(fs, fastCheck); + } + } + + static Configuration createConf4Testing(String username) throws Exception { + Configuration conf = new Configuration(); + UnixUserGroupInformation.saveToConf(conf, + UnixUserGroupInformation.UGI_PROPERTY_NAME, + new UnixUserGroupInformation(username, new String[]{"group"})); + return conf; + } + + /** + * A FileSystem that is slow in creation. Used for testing FileSystem Cache + * with multi-threaded access. + */ + public static class SlowCreationFileSystem extends FilterFileSystem { + + public static final int delayMS = 3 * 1000; + SlowCreationFileSystem() throws IOException { + super(FileSystem.getLocal(conf)); + try { + Thread.sleep(delayMS); + } catch (InterruptedException e) { + // Ignored + } + } + } + + public void testFsCacheMultiThreaded() throws Exception { + doTestFsCacheMultiThreaded(true); + doTestFsCacheMultiThreaded(false); + } + + public void doTestFsCacheMultiThreaded(final boolean unique) throws Exception { + final Configuration conf = new Configuration(); + conf.set("fs.slowcreation.impl", SlowCreationFileSystem.class.getName()); + conf.set("fs.default.name", "slowcreation://localhost/"); + + // Create 3 Threads which will create 3 FileSystems using the same conf. + final int n = 3; + final FileSystem[] fs = new FileSystem[n]; + final Thread[] threads = new Thread[n]; + for (int i = 0; i < n; i++) { + final int ti = i; + threads[ti] = new Thread() { + @Override + public void run() { + try { + fs[ti] = unique ? FileSystem.newInstance(conf) : FileSystem.get(conf); + } catch (IOException e) { + // Ignored + } + } + }; + }; + + long startMS = System.currentTimeMillis(); + for (int i = 0; i < n; i++) { + threads[i].start(); + } + for (int i = 0; i < n; i++) { + threads[i].join(); + } + long finishMS = System.currentTimeMillis(); + + // Verify that they are unique (or not unique). + for (int i = 1; i < n; i++) { + assertTrue(unique == (fs[0] != fs[i])); + assertTrue(fs[i] instanceof SlowCreationFileSystem); + } + + // The total creation time should be only a bit more than creation of + // a single FileSystem object. + assertTrue((finishMS - startMS) < 1000 + SlowCreationFileSystem.delayMS); + } + + + public void testFsCache() throws Exception { + { + long now = System.currentTimeMillis(); + Configuration[] conf = {new Configuration(), + createConf4Testing("foo" + now), createConf4Testing("bar" + now)}; + FileSystem[] fs = new FileSystem[conf.length]; + + for(int i = 0; i < conf.length; i++) { + fs[i] = FileSystem.get(conf[i]); + assertEquals(fs[i], FileSystem.get(conf[i])); + for(int j = 0; j < i; j++) { + assertFalse(fs[j] == fs[i]); + } + } + FileSystem.closeAll(); + } + + { + try { + runTestCache(NameNode.DEFAULT_PORT); + } catch(java.net.BindException be) { + LOG.warn("Cannot test NameNode.DEFAULT_PORT (=" + + NameNode.DEFAULT_PORT + ")", be); + } + + runTestCache(0); + } + } + + static void runTestCache(int port) throws Exception { + Configuration conf = new Configuration(); + MiniDFSCluster cluster = null; + try { + cluster = new MiniDFSCluster(port, conf, 2, true, true, null, null); + URI uri = cluster.getFileSystem().getUri(); + LOG.info("uri=" + uri); + + { + FileSystem fs = FileSystem.get(uri, new Configuration()); + checkPath(cluster, fs); + for(int i = 0; i < 100; i++) { + assertTrue(fs == FileSystem.get(uri, new Configuration())); + } + } + + if (port == NameNode.DEFAULT_PORT) { + //test explicit default port + URI uri2 = new URI(uri.getScheme(), uri.getUserInfo(), + uri.getHost(), NameNode.DEFAULT_PORT, uri.getPath(), + uri.getQuery(), uri.getFragment()); + LOG.info("uri2=" + uri2); + FileSystem fs = FileSystem.get(uri2, conf); + checkPath(cluster, fs); + for(int i = 0; i < 100; i++) { + assertTrue(fs == FileSystem.get(uri2, new Configuration())); + } + } + } finally { + if (cluster != null) cluster.shutdown(); + } + } + + static void checkPath(MiniDFSCluster cluster, FileSystem fileSys) throws IOException { + InetSocketAddress add = cluster.getNameNode().getNameNodeAddress(); + // Test upper/lower case + fileSys.checkPath(new Path("hdfs://" + add.getHostName().toUpperCase() + ":" + add.getPort())); + } + + public void testFsClose() throws Exception { + { + Configuration conf = new Configuration(); + new Path("file:///").getFileSystem(conf); + UnixUserGroupInformation.login(conf, true); + FileSystem.closeAll(); + } + + { + Configuration conf = new Configuration(); + new Path("hftp://localhost:12345/").getFileSystem(conf); + UnixUserGroupInformation.login(conf, true); + FileSystem.closeAll(); + } + + { + Configuration conf = new Configuration(); + FileSystem fs = new Path("hftp://localhost:12345/").getFileSystem(conf); + UnixUserGroupInformation.login(fs.getConf(), true); + FileSystem.closeAll(); + } + } + + + public void testCacheKeysAreCaseInsensitive() + throws Exception + { + Configuration conf = new Configuration(); + + // check basic equality + FileSystem.Cache.Key lowercaseCachekey1 = new FileSystem.Cache.Key(new URI("hftp://localhost:12345/"), conf); + FileSystem.Cache.Key lowercaseCachekey2 = new FileSystem.Cache.Key(new URI("hftp://localhost:12345/"), conf); + assertEquals( lowercaseCachekey1, lowercaseCachekey2 ); + + // check insensitive equality + FileSystem.Cache.Key uppercaseCachekey = new FileSystem.Cache.Key(new URI("HFTP://Localhost:12345/"), conf); + assertEquals( lowercaseCachekey2, uppercaseCachekey ); + + // check behaviour with collections + List list = new ArrayList(); + list.add(uppercaseCachekey); + assertTrue(list.contains(uppercaseCachekey)); + assertTrue(list.contains(lowercaseCachekey2)); + + Set set = new HashSet(); + set.add(uppercaseCachekey); + assertTrue(set.contains(uppercaseCachekey)); + assertTrue(set.contains(lowercaseCachekey2)); + + Map map = new HashMap(); + map.put(uppercaseCachekey, ""); + assertTrue(map.containsKey(uppercaseCachekey)); + assertTrue(map.containsKey(lowercaseCachekey2)); + + } + + public void testFsUniqueness() + throws Exception { + + // multiple invocations of FileSystem.get return the same object. + FileSystem fs1 = FileSystem.get(conf); + FileSystem fs2 = FileSystem.get(conf); + assertTrue(fs1 == fs2); + + // multiple invocations of FileSystem.newInstance return different objects + fs1 = FileSystem.newInstance(conf); + fs2 = FileSystem.newInstance(conf); + assertTrue(fs1 != fs2 && !fs1.equals(fs2)); + fs1.close(); + fs2.close(); + } +} diff --git a/src/test/org/apache/hadoop/fs/TestFilterFileSystem.java b/src/test/org/apache/hadoop/fs/TestFilterFileSystem.java new file mode 100644 index 0000000..cae3afe --- /dev/null +++ b/src/test/org/apache/hadoop/fs/TestFilterFileSystem.java @@ -0,0 +1,126 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs; + +import java.lang.reflect.Method; +import java.lang.reflect.Modifier; + +import junit.framework.TestCase; +import org.apache.commons.logging.Log; +import org.apache.hadoop.util.Progressable; + +public class TestFilterFileSystem extends TestCase { + + private static final Log LOG = FileSystem.LOG; + + public static class DontCheck { + public long getLength(Path f) { return 0; } + public FSDataOutputStream append(Path f) { return null; } + public FSDataOutputStream append(Path f, int bufferSize) { return null; } + public boolean exists(Path f) { return false; } + public boolean isDirectory(Path f) { return false; } + public boolean isFile(Path f) { return false; } + public boolean createNewFile(Path f) { return false; } + public boolean mkdirs(Path f) { return false; } + public FSDataInputStream open(Path f) { return null; } + public FSDataOutputStream create(Path f) { return null; } + public FSDataOutputStream create(Path f, boolean overwrite) { return null; } + public FSDataOutputStream create(Path f, Progressable progress) { + return null; + } + public FSDataOutputStream create(Path f, short replication) { + return null; + } + public FSDataOutputStream create(Path f, short replication, + Progressable progress) { + return null; + } + public FSDataOutputStream create(Path f, + boolean overwrite, + int bufferSize) { + return null; + } + public FSDataOutputStream create(Path f, + boolean overwrite, + int bufferSize, + Progressable progress) { + return null; + } + public FSDataOutputStream create(Path f, + boolean overwrite, + int bufferSize, + short replication, + long blockSize) { + return null; + } + public FSDataOutputStream create(Path f, + boolean overwrite, + int bufferSize, + short replication, + long blockSize, + Progressable progress) { + return null; + } + public short getReplication(Path src) { return 0 ; } + public void processDeleteOnExit() { } + public FileStatus[] listStatus(Path f, PathFilter filter) { return null; } + public FileStatus[] listStatus(Path[] files) { return null; } + public FileStatus[] listStatus(Path[] files, PathFilter filter) { return null; } + public RemoteIterator listLocatedStatus(Path file) { + return null; } + public RemoteIterator listLocatedStatus(Path file, + PathFilter filter) { return null; } + public FileStatus[] listLocatedStatus(Path[] files) { return null; } + public FileStatus[] listLocatedStatus(Path[] files, PathFilter filter) { return null; } + public FileStatus[] globStatus(Path pathPattern) { return null; } + public FileStatus[] globStatus(Path pathPattern, PathFilter filter) { + return null; + } + public void copyFromLocalFile(Path src, Path dst) { } + public void moveFromLocalFile(Path[] srcs, Path dst) { } + public void moveFromLocalFile(Path src, Path dst) { } + public void copyToLocalFile(Path src, Path dst) { } + public void moveToLocalFile(Path src, Path dst) { } + public long getBlockSize(Path f) { return 0; } + } + + public void testFilterFileSystem() throws Exception { + for (Method m : FileSystem.class.getDeclaredMethods()) { + if (Modifier.isStatic(m.getModifiers())) + continue; + if (Modifier.isPrivate(m.getModifiers())) + continue; + + try { + DontCheck.class.getMethod(m.getName(), m.getParameterTypes()); + LOG.info("Skipping " + m); + } catch (NoSuchMethodException exc) { + LOG.info("Testing " + m); + try{ + FilterFileSystem.class.getDeclaredMethod(m.getName(), m.getParameterTypes()); + } + catch(NoSuchMethodException exc2){ + LOG.error("FilterFileSystem doesn't implement " + m); + throw exc2; + } + } + } + } + +} diff --git a/src/test/org/apache/hadoop/fs/TestGetFileBlockLocations.java b/src/test/org/apache/hadoop/fs/TestGetFileBlockLocations.java new file mode 100644 index 0000000..c85cc98 --- /dev/null +++ b/src/test/org/apache/hadoop/fs/TestGetFileBlockLocations.java @@ -0,0 +1,139 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.fs; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Comparator; +import java.util.Random; + +import junit.framework.TestCase; + +import org.apache.hadoop.conf.Configuration; + +/** + * Testing the correctness of FileSystem.getFileBlockLocations. + */ +public class TestGetFileBlockLocations extends TestCase { + private static String TEST_ROOT_DIR = + System.getProperty("test.build.data", "/tmp/testGetFileBlockLocations"); + private static final int FileLength = 4 * 1024 * 1024; // 4MB + private Configuration conf; + private Path path; + private FileSystem fs; + private Random random; + + /** + * @see TestCase#setUp() + */ + @Override + protected void setUp() throws IOException { + conf = new Configuration(); + Path rootPath = new Path(TEST_ROOT_DIR); + path = new Path(rootPath, "TestGetFileBlockLocations"); + fs = rootPath.getFileSystem(conf); + FSDataOutputStream fsdos = fs.create(path, true); + byte[] buffer = new byte[1024]; + while (fsdos.getPos() < FileLength) { + fsdos.write(buffer); + } + fsdos.close(); + random = new Random(System.nanoTime()); + } + + private void oneTest(int offBegin, int offEnd, FileStatus status) + throws IOException { + if (offBegin > offEnd) { + int tmp = offBegin; + offBegin = offEnd; + offEnd = tmp; + } + BlockLocation[] locations = + fs.getFileBlockLocations(status, offBegin, offEnd - offBegin); + if (offBegin < status.getLen()) { + Arrays.sort(locations, new Comparator() { + + @Override + public int compare(BlockLocation arg0, BlockLocation arg1) { + long cmprv = arg0.getOffset() - arg1.getOffset(); + if (cmprv < 0) return -1; + if (cmprv > 0) return 1; + cmprv = arg0.getLength() - arg1.getLength(); + if (cmprv < 0) return -1; + if (cmprv > 0) return 1; + return 0; + } + + }); + offBegin = (int) Math.min(offBegin, status.getLen() - 1); + offEnd = (int) Math.min(offEnd, status.getLen()); + BlockLocation first = locations[0]; + BlockLocation last = locations[locations.length - 1]; + assertTrue(first.getOffset() <= offBegin); + assertTrue(offEnd <= last.getOffset() + last.getLength()); + } else { + assertTrue(locations.length == 0); + } + } + /** + * @see TestCase#tearDown() + */ + @Override + protected void tearDown() throws IOException { + fs.delete(path, true); + fs.close(); + } + + public void testFailureNegativeParameters() throws IOException { + FileStatus status = fs.getFileStatus(path); + try { + BlockLocation[] locations = fs.getFileBlockLocations(status, -1, 100); + fail("Expecting exception being throw"); + } catch (IllegalArgumentException e) { + + } + + try { + BlockLocation[] locations = fs.getFileBlockLocations(status, 100, -1); + fail("Expecting exception being throw"); + } catch (IllegalArgumentException e) { + + } + } + + public void testGetFileBlockLocations1() throws IOException { + FileStatus status = fs.getFileStatus(path); + oneTest(0, (int) status.getLen(), status); + oneTest(0, (int) status.getLen() * 2, status); + oneTest((int) status.getLen() * 2, (int) status.getLen() * 4, status); + oneTest((int) status.getLen() / 2, (int) status.getLen() * 3, status); + for (int i = 0; i < 10; ++i) { + oneTest((int) status.getLen() * i / 10, (int) status.getLen() * (i + 1) + / 10, status); + } + } + + public void testGetFileBlockLocations2() throws IOException { + FileStatus status = fs.getFileStatus(path); + for (int i = 0; i < 1000; ++i) { + int offBegin = random.nextInt((int) (2 * status.getLen())); + int offEnd = random.nextInt((int) (2 * status.getLen())); + oneTest(offBegin, offEnd, status); + } + } +} diff --git a/src/test/org/apache/hadoop/fs/TestGlobExpander.java b/src/test/org/apache/hadoop/fs/TestGlobExpander.java new file mode 100644 index 0000000..b0466b8 --- /dev/null +++ b/src/test/org/apache/hadoop/fs/TestGlobExpander.java @@ -0,0 +1,62 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.fs; + +import java.io.IOException; +import java.util.List; + +import junit.framework.TestCase; + +public class TestGlobExpander extends TestCase { + + public void testExpansionIsIdentical() throws IOException { + checkExpansionIsIdentical(""); + checkExpansionIsIdentical("/}"); + checkExpansionIsIdentical("/}{a,b}"); + checkExpansionIsIdentical("{/"); + checkExpansionIsIdentical("{a}"); + checkExpansionIsIdentical("{a,b}/{b,c}"); + checkExpansionIsIdentical("p\\{a/b,c/d\\}s"); + checkExpansionIsIdentical("p{a\\/b,c\\/d}s"); + } + + public void testExpansion() throws IOException { + checkExpansion("{a/b}", "a/b"); + checkExpansion("/}{a/b}", "/}a/b"); + checkExpansion("p{a/b,c/d}s", "pa/bs", "pc/ds"); + checkExpansion("{a/b,c/d,{e,f}}", "a/b", "c/d", "{e,f}"); + checkExpansion("{a/b,c/d}{e,f}", "a/b{e,f}", "c/d{e,f}"); + checkExpansion("{a,b}/{b,{c/d,e/f}}", "{a,b}/b", "{a,b}/c/d", "{a,b}/e/f"); + checkExpansion("{a,b}/{c/\\d}", "{a,b}/c/d"); + } + + private void checkExpansionIsIdentical(String filePattern) throws IOException { + checkExpansion(filePattern, filePattern); + } + + private void checkExpansion(String filePattern, String... expectedExpansions) + throws IOException { + List actualExpansions = GlobExpander.expand(filePattern); + assertEquals("Different number of expansions", expectedExpansions.length, + actualExpansions.size()); + for (int i = 0; i < expectedExpansions.length; i++) { + assertEquals("Expansion of " + filePattern, expectedExpansions[i], + actualExpansions.get(i)); + } + } +} diff --git a/src/test/org/apache/hadoop/fs/TestGlobPaths.java b/src/test/org/apache/hadoop/fs/TestGlobPaths.java new file mode 100644 index 0000000..18cc820 --- /dev/null +++ b/src/test/org/apache/hadoop/fs/TestGlobPaths.java @@ -0,0 +1,431 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.fs; + +import java.io.IOException; +import java.util.regex.Pattern; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdfs.MiniDFSCluster; + +import junit.framework.TestCase; + +public class TestGlobPaths extends TestCase { + + static class RegexPathFilter implements PathFilter { + + private final String regex; + public RegexPathFilter(String regex) { + this.regex = regex; + } + + public boolean accept(Path path) { + return path.toString().matches(regex); + } + + } + + static private MiniDFSCluster dfsCluster; + static private FileSystem fs; + static final private int NUM_OF_PATHS = 4; + static final String USER_DIR = "/user/"+System.getProperty("user.name"); + private Path[] path = new Path[NUM_OF_PATHS]; + + protected void setUp() throws Exception { + try { + Configuration conf = new Configuration(); + dfsCluster = new MiniDFSCluster(conf, 1, true, null); + fs = FileSystem.get(conf); + } catch (IOException e) { + e.printStackTrace(); + } + } + + protected void tearDown() throws Exception { + if(dfsCluster!=null) { + dfsCluster.shutdown(); + } + } + + public void testPathFilter() throws IOException { + try { + String[] files = new String[] { USER_DIR + "/a", USER_DIR + "/a/b" }; + Path[] matchedPath = prepareTesting(USER_DIR + "/*/*", files, + new RegexPathFilter("^.*" + Pattern.quote(USER_DIR) + "/a/b")); + assertEquals(matchedPath.length, 1); + assertEquals(matchedPath[0], path[1]); + } finally { + cleanupDFS(); + } + } + + public void testPathFilterWithFixedLastComponent() throws IOException { + try { + String[] files = new String[] { USER_DIR + "/a", USER_DIR + "/a/b", + USER_DIR + "/c", USER_DIR + "/c/b", }; + Path[] matchedPath = prepareTesting(USER_DIR + "/*/b", files, + new RegexPathFilter("^.*" + Pattern.quote(USER_DIR) + "/a/b")); + assertEquals(matchedPath.length, 1); + assertEquals(matchedPath[0], path[1]); + } finally { + cleanupDFS(); + } + } + + public void testGlob() throws Exception { + //pTestEscape(); // need to wait until HADOOP-1995 is fixed + pTestJavaRegexSpecialChars(); + pTestCurlyBracket(); + pTestLiteral(); + pTestAny(); + pTestClosure(); + pTestSet(); + pTestRange(); + pTestSetExcl(); + pTestCombination(); + pTestRelativePath(); + } + + private void pTestLiteral() throws IOException { + try { + String [] files = new String[] {USER_DIR+"/a2c", USER_DIR+"/abc.d"}; + Path[] matchedPath = prepareTesting(USER_DIR+"/abc.d", files); + assertEquals(matchedPath.length, 1); + assertEquals(matchedPath[0], path[1]); + } finally { + cleanupDFS(); + } + } + + private void pTestEscape() throws IOException { + try { + String [] files = new String[] {USER_DIR+"/ab\\[c.d"}; + Path[] matchedPath = prepareTesting(USER_DIR+"/ab\\[c.d", files); + assertEquals(matchedPath.length, 1); + assertEquals(matchedPath[0], path[0]); + } finally { + cleanupDFS(); + } + } + + private void pTestAny() throws IOException { + try { + String [] files = new String[] { USER_DIR+"/abc", USER_DIR+"/a2c", + USER_DIR+"/a.c", USER_DIR+"/abcd"}; + Path[] matchedPath = prepareTesting(USER_DIR+"/a?c", files); + assertEquals(matchedPath.length, 3); + assertEquals(matchedPath[0], path[2]); + assertEquals(matchedPath[1], path[1]); + assertEquals(matchedPath[2], path[0]); + } finally { + cleanupDFS(); + } + } + + private void pTestClosure() throws IOException { + pTestClosure1(); + pTestClosure2(); + pTestClosure3(); + pTestClosure4(); + pTestClosure5(); + } + + private void pTestClosure1() throws IOException { + try { + String [] files = new String[] {USER_DIR+"/a", USER_DIR+"/abc", + USER_DIR+"/abc.p", USER_DIR+"/bacd"}; + Path[] matchedPath = prepareTesting(USER_DIR+"/a*", files); + assertEquals(matchedPath.length, 3); + assertEquals(matchedPath[0], path[0]); + assertEquals(matchedPath[1], path[1]); + assertEquals(matchedPath[2], path[2]); + } finally { + cleanupDFS(); + } + } + + private void pTestClosure2() throws IOException { + try { + String [] files = new String[] {USER_DIR+"/a.", USER_DIR+"/a.txt", + USER_DIR+"/a.old.java", USER_DIR+"/.java"}; + Path[] matchedPath = prepareTesting(USER_DIR+"/a.*", files); + assertEquals(matchedPath.length, 3); + assertEquals(matchedPath[0], path[0]); + assertEquals(matchedPath[1], path[2]); + assertEquals(matchedPath[2], path[1]); + } finally { + cleanupDFS(); + } + } + + private void pTestClosure3() throws IOException { + try { + String [] files = new String[] {USER_DIR+"/a.txt.x", USER_DIR+"/ax", + USER_DIR+"/ab37x", USER_DIR+"/bacd"}; + Path[] matchedPath = prepareTesting(USER_DIR+"/a*x", files); + assertEquals(matchedPath.length, 3); + assertEquals(matchedPath[0], path[0]); + assertEquals(matchedPath[1], path[2]); + assertEquals(matchedPath[2], path[1]); + } finally { + cleanupDFS(); + } + } + + private void pTestClosure4() throws IOException { + try { + String [] files = new String[] {USER_DIR+"/dir1/file1", + USER_DIR+"/dir2/file2", + USER_DIR+"/dir3/file1"}; + Path[] matchedPath = prepareTesting(USER_DIR+"/*/file1", files); + assertEquals(matchedPath.length, 2); + assertEquals(matchedPath[0], path[0]); + assertEquals(matchedPath[1], path[2]); + } finally { + cleanupDFS(); + } + } + + private void pTestClosure5() throws IOException { + try { + String [] files = new String[] {USER_DIR+"/dir1/file1", + USER_DIR+"/file1"}; + Path[] matchedPath = prepareTesting(USER_DIR+"/*/file1", files); + assertEquals(matchedPath.length, 1); + assertEquals(matchedPath[0], path[0]); + } finally { + cleanupDFS(); + } + } + + private void pTestSet() throws IOException { + try { + String [] files = new String[] {USER_DIR+"/a.c", USER_DIR+"/a.cpp", + USER_DIR+"/a.hlp", USER_DIR+"/a.hxy"}; + Path[] matchedPath = prepareTesting(USER_DIR+"/a.[ch]??", files); + assertEquals(matchedPath.length, 3); + assertEquals(matchedPath[0], path[1]); + assertEquals(matchedPath[1], path[2]); + assertEquals(matchedPath[2], path[3]); + } finally { + cleanupDFS(); + } + } + + private void pTestRange() throws IOException { + try { + String [] files = new String[] {USER_DIR+"/a.d", USER_DIR+"/a.e", + USER_DIR+"/a.f", USER_DIR+"/a.h"}; + Path[] matchedPath = prepareTesting(USER_DIR+"/a.[d-fm]", files); + assertEquals(matchedPath.length, 3); + assertEquals(matchedPath[0], path[0]); + assertEquals(matchedPath[1], path[1]); + assertEquals(matchedPath[2], path[2]); + } finally { + cleanupDFS(); + } + } + + private void pTestSetExcl() throws IOException { + try { + String [] files = new String[] {USER_DIR+"/a.d", USER_DIR+"/a.e", + USER_DIR+"/a.0", USER_DIR+"/a.h"}; + Path[] matchedPath = prepareTesting(USER_DIR+"/a.[^a-cg-z0-9]", files); + assertEquals(matchedPath.length, 2); + assertEquals(matchedPath[0], path[0]); + assertEquals(matchedPath[1], path[1]); + } finally { + cleanupDFS(); + } + } + + private void pTestCombination() throws IOException { + try { + String [] files = new String[] {"/user/aa/a.c", "/user/bb/a.cpp", + "/user1/cc/b.hlp", "/user/dd/a.hxy"}; + Path[] matchedPath = prepareTesting("/use?/*/a.[ch]{lp,xy}", files); + assertEquals(matchedPath.length, 1); + assertEquals(matchedPath[0], path[3]); + } finally { + cleanupDFS(); + } + } + + private void pTestRelativePath() throws IOException { + try { + String [] files = new String[] {"a", "abc", "abc.p", "bacd"}; + Path[] matchedPath = prepareTesting("a*", files); + assertEquals(matchedPath.length, 3); + assertEquals(matchedPath[0], new Path(USER_DIR, path[0])); + assertEquals(matchedPath[1], new Path(USER_DIR, path[1])); + assertEquals(matchedPath[2], new Path(USER_DIR, path[2])); + } finally { + cleanupDFS(); + } + } + + /* Test {xx,yy} */ + private void pTestCurlyBracket() throws IOException { + Path[] matchedPath; + String [] files; + try { + files = new String[] { USER_DIR+"/a.abcxx", USER_DIR+"/a.abxy", + USER_DIR+"/a.hlp", USER_DIR+"/a.jhyy"}; + matchedPath = prepareTesting(USER_DIR+"/a.{abc,jh}??", files); + assertEquals(matchedPath.length, 2); + assertEquals(matchedPath[0], path[0]); + assertEquals(matchedPath[1], path[3]); + } finally { + cleanupDFS(); + } + // nested curlies + try { + files = new String[] { USER_DIR+"/a.abcxx", USER_DIR+"/a.abdxy", + USER_DIR+"/a.hlp", USER_DIR+"/a.jhyy" }; + matchedPath = prepareTesting(USER_DIR+"/a.{ab{c,d},jh}??", files); + assertEquals(matchedPath.length, 3); + assertEquals(matchedPath[0], path[0]); + assertEquals(matchedPath[1], path[1]); + assertEquals(matchedPath[2], path[3]); + } finally { + cleanupDFS(); + } + // cross-component curlies + try { + files = new String[] { USER_DIR+"/a/b", USER_DIR+"/a/d", + USER_DIR+"/c/b", USER_DIR+"/c/d" }; + matchedPath = prepareTesting(USER_DIR+"/{a/b,c/d}", files); + assertEquals(matchedPath.length, 2); + assertEquals(matchedPath[0], path[0]); + assertEquals(matchedPath[1], path[3]); + } finally { + cleanupDFS(); + } + // cross-component absolute curlies + try { + files = new String[] { "/a/b", "/a/d", + "/c/b", "/c/d" }; + matchedPath = prepareTesting("{/a/b,/c/d}", files); + assertEquals(matchedPath.length, 2); + assertEquals(matchedPath[0], path[0]); + assertEquals(matchedPath[1], path[3]); + } finally { + cleanupDFS(); + } + try { + // test standalone } + files = new String[] {USER_DIR+"/}bc", USER_DIR+"/}c"}; + matchedPath = prepareTesting(USER_DIR+"/}{a,b}c", files); + assertEquals(matchedPath.length, 1); + assertEquals(matchedPath[0], path[0]); + // test {b} + matchedPath = prepareTesting(USER_DIR+"/}{b}c", files); + assertEquals(matchedPath.length, 1); + assertEquals(matchedPath[0], path[0]); + // test {} + matchedPath = prepareTesting(USER_DIR+"/}{}bc", files); + assertEquals(matchedPath.length, 1); + assertEquals(matchedPath[0], path[0]); + + // test {,} + matchedPath = prepareTesting(USER_DIR+"/}{,}bc", files); + assertEquals(matchedPath.length, 1); + assertEquals(matchedPath[0], path[0]); + + // test {b,} + matchedPath = prepareTesting(USER_DIR+"/}{b,}c", files); + assertEquals(matchedPath.length, 2); + assertEquals(matchedPath[0], path[0]); + assertEquals(matchedPath[1], path[1]); + + // test {,b} + matchedPath = prepareTesting(USER_DIR+"/}{,b}c", files); + assertEquals(matchedPath.length, 2); + assertEquals(matchedPath[0], path[0]); + assertEquals(matchedPath[1], path[1]); + + // test a combination of {} and ? + matchedPath = prepareTesting(USER_DIR+"/}{ac,?}", files); + assertEquals(matchedPath.length, 1); + assertEquals(matchedPath[0], path[1]); + + // test ill-formed curly + boolean hasException = false; + try { + prepareTesting(USER_DIR+"}{bc", files); + } catch (IOException e) { + assertTrue(e.getMessage().startsWith("Illegal file pattern:") ); + hasException = true; + } + assertTrue(hasException); + } finally { + cleanupDFS(); + } + } + + /* test that a path name can contain Java regex special characters */ + private void pTestJavaRegexSpecialChars() throws IOException { + try { + String[] files = new String[] {USER_DIR+"/($.|+)bc", USER_DIR+"/abc"}; + Path[] matchedPath = prepareTesting(USER_DIR+"/($.|+)*", files); + assertEquals(matchedPath.length, 1); + assertEquals(matchedPath[0], path[0]); + } finally { + cleanupDFS(); + } + + } + private Path[] prepareTesting(String pattern, String[] files) + throws IOException { + for(int i=0; i, + Reducer { + + public void configure(JobConf conf) { + //do nothing + } + + public void map(LongWritable key, Text value, OutputCollector output, Reporter reporter) throws IOException { + output.collect(value, new Text("")); + } + + public void close() throws IOException { + // do nothing + } + + public void reduce(Text key, Iterator values, OutputCollector output, Reporter reporter) throws IOException { + while(values.hasNext()) { + values.next(); + output.collect(key, null); + } + } + } + + /* check bytes in the har output files */ + private void checkBytes(Path harPath, Configuration conf) throws IOException { + Path harFilea = new Path(harPath, "a"); + Path harFileb = new Path(harPath, "b"); + Path harFilec = new Path(harPath, "c c"); + Path harFiled = new Path(harPath, "d%d"); + FileSystem harFs = harFilea.getFileSystem(conf); + FSDataInputStream fin = harFs.open(harFilea); + byte[] b = new byte[4]; + int readBytes = fin.read(b); + fin.close(); + assertTrue("strings are equal ", (b[0] == "a".getBytes()[0])); + fin = harFs.open(harFileb); + fin.read(b); + fin.close(); + assertTrue("strings are equal ", (b[0] == "b".getBytes()[0])); + fin = harFs.open(harFilec); + fin.read(b); + fin.close(); + assertTrue("strings are equal ", (b[0] == "c".getBytes()[0])); + fin = harFs.open(harFiled); + fin.read(b); + fin.close(); + assertTrue("strings are equal ", (b[0] == "d".getBytes()[0])); + } + + /** + * check if the block size of the part files is what we had specified + */ + private void checkBlockSize(FileSystem fs, Path finalPath, long blockSize) throws IOException { + FileStatus[] statuses = fs.globStatus(new Path(finalPath, "part-*")); + for (FileStatus status: statuses) { + assertTrue(status.getBlockSize() == blockSize); + } + } + + // test archives with a -p option + public void testRelativeArchives() throws Exception { + fs.delete(archivePath, true); + Configuration conf = mapred.createJobConf(); + HadoopArchives har = new HadoopArchives(conf); + + { + String[] args = new String[6]; + args[0] = "-archiveName"; + args[1] = "foo1.har"; + args[2] = "-p"; + args[3] = fs.getHomeDirectory().toString(); + args[4] = "test"; + args[5] = archivePath.toString(); + int ret = ToolRunner.run(har, args); + assertTrue("failed test", ret == 0); + Path finalPath = new Path(archivePath, "foo1.har"); + Path fsPath = new Path(inputPath.toUri().getPath()); + Path filePath = new Path(finalPath, "test"); + // make it a har path + Path harPath = new Path("har://" + filePath.toUri().getPath()); + assertTrue(fs.exists(new Path(finalPath, "_index"))); + assertTrue(fs.exists(new Path(finalPath, "_masterindex"))); + /*check for existence of only 1 part file, since part file size == 2GB */ + assertTrue(fs.exists(new Path(finalPath, "part-0"))); + assertTrue(!fs.exists(new Path(finalPath, "part-1"))); + assertTrue(!fs.exists(new Path(finalPath, "part-2"))); + assertTrue(!fs.exists(new Path(finalPath, "_logs"))); + FileStatus[] statuses = fs.listStatus(finalPath); + args = new String[2]; + args[0] = "-ls"; + args[1] = harPath.toString(); + FsShell shell = new FsShell(conf); + ret = ToolRunner.run(shell, args); + // fileb and filec + assertTrue(ret == 0); + checkBytes(harPath, conf); + /* check block size for path files */ + checkBlockSize(fs, finalPath, 512 * 1024 * 1024l); + } + + /** now try with different block size and part file size **/ + { + String[] args = new String[6]; + args[0] = "-archiveName"; + args[1] = "foo.har"; + args[2] = "-p"; + args[3] = fs.getHomeDirectory().toString(); + args[4] = "test"; + args[5] = archivePath.toString(); + Log.info("aqui ant"); + conf.setInt("har.block.size", 512); + conf.setInt("har.partfile.size", 1); + int ret = ToolRunner.run(har, args); + Log.info("aqui depois"); + assertTrue("failed test", ret == 0); + Path finalPath = new Path(archivePath, "foo.har"); + Path fsPath = new Path(inputPath.toUri().getPath()); + Path filePath = new Path(finalPath, "test"); + // make it a har path + Path harPath = new Path("har://" + filePath.toUri().getPath()); + assertTrue(fs.exists(new Path(finalPath, "_index"))); + assertTrue(fs.exists(new Path(finalPath, "_masterindex"))); + /*check for existence of 3 part files, since part file size == 1 */ + assertTrue(fs.exists(new Path(finalPath, "part-0"))); + assertTrue(fs.exists(new Path(finalPath, "part-1"))); + assertTrue(fs.exists(new Path(finalPath, "part-2"))); + assertTrue(!fs.exists(new Path(finalPath, "_logs"))); + FileStatus[] statuses = fs.listStatus(finalPath); + args = new String[2]; + args[0] = "-ls"; + args[1] = harPath.toString(); + FsShell shell = new FsShell(conf); + ret = ToolRunner.run(shell, args); + // fileb and filec + assertTrue(ret == 0); + checkBytes(harPath, conf); + checkBlockSize(fs, finalPath, 512); + } + } + + public void testArchivesWithMapred() throws Exception { + //one minor check + // check to see if fs.har.impl.disable.cache is set + Configuration conf = mapred.createJobConf(); + + boolean archivecaching = conf.getBoolean("fs.har.impl.disable.cache", false); + assertTrue(archivecaching); + fs.delete(archivePath, true); + HadoopArchives har = new HadoopArchives(conf); + String[] args = new String[4]; + + //check for destination not specfied + args[0] = "-archiveName"; + args[1] = "foo.har"; + args[2] = "-p"; + args[3] = "/"; + int ret = ToolRunner.run(har, args); + assertTrue(ret != 0); + args = new String[6]; + //check for wrong archiveName + args[0] = "-archiveName"; + args[1] = "/d/foo.har"; + args[2] = "-p"; + args[3] = "/"; + args[4] = inputrelPath.toString(); + args[5] = archivePath.toString(); + ret = ToolRunner.run(har, args); + assertTrue(ret != 0); +// se if dest is a file + args[1] = "foo.har"; + args[5] = filec.toString(); + ret = ToolRunner.run(har, args); + assertTrue(ret != 0); + //this is a valid run + args[0] = "-archiveName"; + args[1] = "foo.har"; + args[2] = "-p"; + args[3] = "/"; + args[4] = inputrelPath.toString(); + args[5] = archivePath.toString(); + ret = ToolRunner.run(har, args); + //checl for the existenece of the archive + assertTrue(ret == 0); + ///try running it again. it should not + // override the directory + ret = ToolRunner.run(har, args); + assertTrue(ret != 0); + Path finalPath = new Path(archivePath, "foo.har"); + Path fsPath = new Path(inputPath.toUri().getPath()); + String relative = fsPath.toString().substring(1); + Path filePath = new Path(finalPath, relative); + //make it a har path + URI uri = fs.getUri(); + Path harPath = new Path("har://" + "hdfs-" + uri.getHost() +":" + + uri.getPort() + filePath.toUri().getPath()); + assertTrue(fs.exists(new Path(finalPath, "_index"))); + assertTrue(fs.exists(new Path(finalPath, "_masterindex"))); + assertTrue(!fs.exists(new Path(finalPath, "_logs"))); + //creation tested + //check if the archive is same + // do ls and cat on all the files + + FsShell shell = new FsShell(conf); + args = new String[2]; + args[0] = "-ls"; + args[1] = harPath.toString(); + ret = ToolRunner.run(shell, args); + // ls should work. + assertTrue((ret == 0)); + //now check for contents of filea + // fileb and filec + Path harFilea = new Path(harPath, "a"); + Path harFileb = new Path(harPath, "b"); + Path harFilec = new Path(harPath, "c c"); + Path harFiled = new Path(harPath, "d%d"); + FileSystem harFs = harFilea.getFileSystem(conf); + FSDataInputStream fin = harFs.open(harFilea); + byte[] b = new byte[4]; + int readBytes = fin.read(b); + assertTrue("Empty read.", readBytes > 0); + fin.close(); + assertTrue("strings are equal ", (b[0] == "a".getBytes()[0])); + fin = harFs.open(harFileb); + readBytes = fin.read(b); + assertTrue("Empty read.", readBytes > 0); + fin.close(); + assertTrue("strings are equal ", (b[0] == "b".getBytes()[0])); + fin = harFs.open(harFilec); + readBytes = fin.read(b); + assertTrue("Empty read.", readBytes > 0); + fin.close(); + assertTrue("strings are equal ", (b[0] == "c".getBytes()[0])); + // ok all files match + // run a map reduce job + Path outdir = new Path(fs.getHomeDirectory(), "mapout"); + JobConf jobconf = mapred.createJobConf(); + FileInputFormat.addInputPath(jobconf, harPath); + jobconf.setInputFormat(TextInputFormat.class); + jobconf.setOutputFormat(TextOutputFormat.class); + FileOutputFormat.setOutputPath(jobconf, outdir); + jobconf.setMapperClass(TextMapperReducer.class); + jobconf.setMapOutputKeyClass(Text.class); + jobconf.setMapOutputValueClass(Text.class); + jobconf.setReducerClass(TextMapperReducer.class); + jobconf.setNumReduceTasks(1); + JobClient.runJob(jobconf); + args[1] = outdir.toString(); + ret = ToolRunner.run(shell, args); + + FileStatus[] status = fs.globStatus(new Path(outdir, "part*")); + Path reduceFile = status[0].getPath(); + FSDataInputStream reduceIn = fs.open(reduceFile); + b = new byte[8]; + readBytes = reduceIn.read(b); + assertTrue("Should read 8 bytes.", readBytes == 8); + //assuming all the 8 bytes were read. + Text readTxt = new Text(b); + assertTrue("a\nb\nc\nd\n".equals(readTxt.toString())); + assertTrue("number of bytes left should be -1", reduceIn.read(b) == -1); + reduceIn.close(); + } + + public void testSpaces() throws Exception { + fs.delete(archivePath, true); + Configuration conf = mapred.createJobConf(); + HadoopArchives har = new HadoopArchives(conf); + String[] args = new String[6]; + args[0] = "-archiveName"; + args[1] = "foo bar.har"; + args[2] = "-p"; + args[3] = fs.getHomeDirectory().toString(); + args[4] = "test"; + args[5] = archivePath.toString(); + int ret = ToolRunner.run(har, args); + assertTrue("failed test", ret == 0); + Path finalPath = new Path(archivePath, "foo bar.har"); + Path fsPath = new Path(inputPath.toUri().getPath()); + Path filePath = new Path(finalPath, "test"); + // make it a har path + Path harPath = new Path("har://" + filePath.toUri().getPath()); + FileSystem harFs = harPath.getFileSystem(conf); + FileStatus[] statuses = harFs.listStatus(finalPath); + } + + /** + * Test how block location offsets and lengths are fixed. + */ + public void testFixBlockLocations() { + // do some tests where start == 0 + { + // case 1: range starts before current har block and ends after + BlockLocation[] b = { new BlockLocation(null, null, 10, 10) }; + HarFileSystem.fixBlockLocations(b, 0, 20, 5); + assertEquals(b[0].getOffset(), 5); + assertEquals(b[0].getLength(), 10); + } + { + // case 2: range starts in current har block and ends after + BlockLocation[] b = { new BlockLocation(null, null, 10, 10) }; + HarFileSystem.fixBlockLocations(b, 0, 20, 15); + assertEquals(b[0].getOffset(), 0); + assertEquals(b[0].getLength(), 5); + } + { + // case 3: range starts before current har block and ends in + // current har block + BlockLocation[] b = { new BlockLocation(null, null, 10, 10) }; + HarFileSystem.fixBlockLocations(b, 0, 10, 5); + assertEquals(b[0].getOffset(), 5); + assertEquals(b[0].getLength(), 5); + } + { + // case 4: range starts and ends in current har block + BlockLocation[] b = { new BlockLocation(null, null, 10, 10) }; + HarFileSystem.fixBlockLocations(b, 0, 6, 12); + assertEquals(b[0].getOffset(), 0); + assertEquals(b[0].getLength(), 6); + } + + // now try a range where start == 3 + { + // case 5: range starts before current har block and ends after + BlockLocation[] b = { new BlockLocation(null, null, 10, 10) }; + HarFileSystem.fixBlockLocations(b, 3, 20, 5); + assertEquals(b[0].getOffset(), 5); + assertEquals(b[0].getLength(), 10); + } + { + // case 6: range starts in current har block and ends after + BlockLocation[] b = { new BlockLocation(null, null, 10, 10) }; + HarFileSystem.fixBlockLocations(b, 3, 20, 15); + assertEquals(b[0].getOffset(), 3); + assertEquals(b[0].getLength(), 2); + } + { + // case 7: range starts before current har block and ends in + // current har block + BlockLocation[] b = { new BlockLocation(null, null, 10, 10) }; + HarFileSystem.fixBlockLocations(b, 3, 7, 5); + assertEquals(b[0].getOffset(), 5); + assertEquals(b[0].getLength(), 5); + } + { + // case 8: range starts and ends in current har block + BlockLocation[] b = { new BlockLocation(null, null, 10, 10) }; + HarFileSystem.fixBlockLocations(b, 3, 3, 12); + assertEquals(b[0].getOffset(), 3); + assertEquals(b[0].getLength(), 3); + } + + // test case from JIRA MAPREDUCE-1752 + { + BlockLocation[] b = { new BlockLocation(null, null, 512, 512), + new BlockLocation(null, null, 1024, 512) }; + HarFileSystem.fixBlockLocations(b, 0, 512, 896); + assertEquals(b[0].getOffset(), 0); + assertEquals(b[0].getLength(), 128); + assertEquals(b[1].getOffset(), 128); + assertEquals(b[1].getLength(), 384); + } + + } +} diff --git a/src/test/org/apache/hadoop/fs/TestLocalDirAllocator.java b/src/test/org/apache/hadoop/fs/TestLocalDirAllocator.java new file mode 100644 index 0000000..4327215 --- /dev/null +++ b/src/test/org/apache/hadoop/fs/TestLocalDirAllocator.java @@ -0,0 +1,211 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.fs; + +import java.io.File; +import java.io.IOException; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.util.Shell; + +import junit.framework.TestCase; + +/** This test LocalDirAllocator works correctly; + * Every test case uses different buffer dirs to + * enforce the AllocatorPerContext initialization. + * This test does not run on Cygwin because under Cygwin + * a directory can be created in a read-only directory + * which breaks this test. + */ +public class TestLocalDirAllocator extends TestCase { + final static private Configuration conf = new Configuration(); + final static private String BUFFER_DIR_ROOT = "build/test/temp"; + final static private Path BUFFER_PATH_ROOT = new Path(BUFFER_DIR_ROOT); + final static private File BUFFER_ROOT = new File(BUFFER_DIR_ROOT); + final static private String BUFFER_DIR[] = new String[] { + BUFFER_DIR_ROOT+"/tmp0", BUFFER_DIR_ROOT+"/tmp1", BUFFER_DIR_ROOT+"/tmp2", + BUFFER_DIR_ROOT+"/tmp3", BUFFER_DIR_ROOT+"/tmp4", BUFFER_DIR_ROOT+"/tmp5", + BUFFER_DIR_ROOT+"/tmp6"}; + final static private Path BUFFER_PATH[] = new Path[] { + new Path(BUFFER_DIR[0]), new Path(BUFFER_DIR[1]), new Path(BUFFER_DIR[2]), + new Path(BUFFER_DIR[3]), new Path(BUFFER_DIR[4]), new Path(BUFFER_DIR[5]), + new Path(BUFFER_DIR[6])}; + final static private String CONTEXT = "dfs.client.buffer.dir"; + final static private String FILENAME = "block"; + final static private LocalDirAllocator dirAllocator = + new LocalDirAllocator(CONTEXT); + static LocalFileSystem localFs; + final static private boolean isWindows = + System.getProperty("os.name").startsWith("Windows"); + final static int SMALL_FILE_SIZE = 100; + static { + try { + localFs = FileSystem.getLocal(conf); + rmBufferDirs(); + } catch(IOException e) { + System.out.println(e.getMessage()); + e.printStackTrace(); + System.exit(-1); + } + } + + private static void rmBufferDirs() throws IOException { + assertTrue(!localFs.exists(BUFFER_PATH_ROOT) || + localFs.delete(BUFFER_PATH_ROOT)); + } + + private void validateTempDirCreation(int i) throws IOException { + File result = createTempFile(SMALL_FILE_SIZE); + assertTrue("Checking for " + BUFFER_DIR[i] + " in " + result + " - FAILED!", + result.getPath().startsWith(new File(BUFFER_DIR[i], FILENAME).getPath())); + } + + private File createTempFile() throws IOException { + File result = dirAllocator.createTmpFileForWrite(FILENAME, -1, conf); + result.delete(); + return result; + } + + private File createTempFile(long size) throws IOException { + File result = dirAllocator.createTmpFileForWrite(FILENAME, size, conf); + result.delete(); + return result; + } + + /** Two buffer dirs. The first dir does not exist & is on a read-only disk; + * The second dir exists & is RW + * @throws Exception + */ + public void test0() throws Exception { + if (isWindows) return; + try { + conf.set(CONTEXT, BUFFER_DIR[0]+","+BUFFER_DIR[1]); + assertTrue(localFs.mkdirs(BUFFER_PATH[1])); + BUFFER_ROOT.setReadOnly(); + validateTempDirCreation(1); + validateTempDirCreation(1); + } finally { + Shell.execCommand(new String[]{"chmod", "u+w", BUFFER_DIR_ROOT}); + rmBufferDirs(); + } + } + + /** Two buffer dirs. The first dir exists & is on a read-only disk; + * The second dir exists & is RW + * @throws Exception + */ + public void test1() throws Exception { + if (isWindows) return; + try { + conf.set(CONTEXT, BUFFER_DIR[1]+","+BUFFER_DIR[2]); + assertTrue(localFs.mkdirs(BUFFER_PATH[2])); + BUFFER_ROOT.setReadOnly(); + validateTempDirCreation(2); + validateTempDirCreation(2); + } finally { + Shell.execCommand(new String[]{"chmod", "u+w", BUFFER_DIR_ROOT}); + rmBufferDirs(); + } + } + /** Two buffer dirs. Both do not exist but on a RW disk. + * Check if tmp dirs are allocated in a round-robin + */ + public void test2() throws Exception { + if (isWindows) return; + try { + conf.set(CONTEXT, BUFFER_DIR[2]+","+BUFFER_DIR[3]); + + // create the first file, and then figure the round-robin sequence + createTempFile(SMALL_FILE_SIZE); + int firstDirIdx = (dirAllocator.getCurrentDirectoryIndex() == 0) ? 2 : 3; + int secondDirIdx = (firstDirIdx == 2) ? 3 : 2; + + // check if tmp dirs are allocated in a round-robin manner + validateTempDirCreation(firstDirIdx); + validateTempDirCreation(secondDirIdx); + validateTempDirCreation(firstDirIdx); + } finally { + rmBufferDirs(); + } + } + + /** Two buffer dirs. Both exists and on a R/W disk. + * Later disk1 becomes read-only. + * @throws Exception + */ + public void test3() throws Exception { + if (isWindows) return; + try { + conf.set(CONTEXT, BUFFER_DIR[3]+","+BUFFER_DIR[4]); + assertTrue(localFs.mkdirs(BUFFER_PATH[3])); + assertTrue(localFs.mkdirs(BUFFER_PATH[4])); + + // create the first file with size, and then figure the round-robin sequence + createTempFile(SMALL_FILE_SIZE); + + int nextDirIdx = (dirAllocator.getCurrentDirectoryIndex() == 0) ? 3 : 4; + validateTempDirCreation(nextDirIdx); + + // change buffer directory 2 to be read only + new File(BUFFER_DIR[4]).setReadOnly(); + validateTempDirCreation(3); + validateTempDirCreation(3); + } finally { + rmBufferDirs(); + } + } + + /** + * Two buffer dirs, on read-write disk. + * + * Try to create a whole bunch of files. + * Verify that they do indeed all get created where they should. + * + * Would ideally check statistical properties of distribution, but + * we don't have the nerve to risk false-positives here. + * + * @throws Exception + */ + static final int TRIALS = 100; + public void test4() throws Exception { + if (isWindows) return; + try { + + conf.set(CONTEXT, BUFFER_DIR[5]+","+BUFFER_DIR[6]); + assertTrue(localFs.mkdirs(BUFFER_PATH[5])); + assertTrue(localFs.mkdirs(BUFFER_PATH[6])); + + int inDir5=0, inDir6=0; + for(int i = 0; i < TRIALS; ++i) { + File result = createTempFile(); + if(result.getPath().startsWith(new File(BUFFER_DIR[5], FILENAME).getPath())) { + inDir5++; + } else if(result.getPath().startsWith(new File(BUFFER_DIR[6], FILENAME).getPath())) { + inDir6++; + } + result.delete(); + } + + assertTrue( inDir5 + inDir6 == TRIALS); + + } finally { + rmBufferDirs(); + } + } + +} diff --git a/src/test/org/apache/hadoop/fs/TestLocalFileSystem.java b/src/test/org/apache/hadoop/fs/TestLocalFileSystem.java new file mode 100644 index 0000000..b244b9b --- /dev/null +++ b/src/test/org/apache/hadoop/fs/TestLocalFileSystem.java @@ -0,0 +1,156 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.fs; + +import org.apache.hadoop.conf.Configuration; +import java.io.*; +import junit.framework.*; + +/** + * This class tests the local file system via the FileSystem abstraction. + */ +public class TestLocalFileSystem extends TestCase { + private static String TEST_ROOT_DIR + = System.getProperty("test.build.data","build/test/data/work-dir/localfs"); + + + static void writeFile(FileSystem fs, Path name) throws IOException { + FSDataOutputStream stm = fs.create(name); + stm.writeBytes("42\n"); + stm.close(); + } + + static String readFile(FileSystem fs, Path name) throws IOException { + byte[] b = new byte[1024]; + int offset = 0; + FSDataInputStream in = fs.open(name); + for(int remaining, n; + (remaining = b.length - offset) > 0 && (n = in.read(b, offset, remaining)) != -1; + offset += n); + in.close(); + + String s = new String(b, 0, offset); + System.out.println("s=" + s); + return s; + } + + private void cleanupFile(FileSystem fs, Path name) throws IOException { + assertTrue(fs.exists(name)); + fs.delete(name, true); + assertTrue(!fs.exists(name)); + } + + /** + * Test the capability of setting the working directory. + */ + public void testWorkingDirectory() throws IOException { + Configuration conf = new Configuration(); + FileSystem fileSys = FileSystem.getLocal(conf); + Path origDir = fileSys.getWorkingDirectory(); + Path subdir = new Path(TEST_ROOT_DIR, "new"); + try { + // make sure it doesn't already exist + assertTrue(!fileSys.exists(subdir)); + // make it and check for it + assertTrue(fileSys.mkdirs(subdir)); + assertTrue(fileSys.isDirectory(subdir)); + + fileSys.setWorkingDirectory(subdir); + + // create a directory and check for it + Path dir1 = new Path("dir1"); + assertTrue(fileSys.mkdirs(dir1)); + assertTrue(fileSys.isDirectory(dir1)); + + // delete the directory and make sure it went away + fileSys.delete(dir1, true); + assertTrue(!fileSys.exists(dir1)); + + // create files and manipulate them. + Path file1 = new Path("file1"); + Path file2 = new Path("sub/file2"); + writeFile(fileSys, file1); + fileSys.copyFromLocalFile(file1, file2); + assertTrue(fileSys.exists(file1)); + assertTrue(fileSys.isFile(file1)); + cleanupFile(fileSys, file2); + fileSys.copyToLocalFile(file1, file2); + cleanupFile(fileSys, file2); + + // try a rename + fileSys.rename(file1, file2); + assertTrue(!fileSys.exists(file1)); + assertTrue(fileSys.exists(file2)); + fileSys.rename(file2, file1); + + // try reading a file + InputStream stm = fileSys.open(file1); + byte[] buffer = new byte[3]; + int bytesRead = stm.read(buffer, 0, 3); + assertEquals("42\n", new String(buffer, 0, bytesRead)); + stm.close(); + } finally { + fileSys.setWorkingDirectory(origDir); + fileSys.delete(subdir, true); + } + } + + public void testCopy() throws IOException { + Configuration conf = new Configuration(); + LocalFileSystem fs = FileSystem.getLocal(conf); + Path src = new Path(TEST_ROOT_DIR, "dingo"); + Path dst = new Path(TEST_ROOT_DIR, "yak"); + writeFile(fs, src); + assertTrue(FileUtil.copy(fs, src, fs, dst, true, false, conf)); + assertTrue(!fs.exists(src) && fs.exists(dst)); + assertTrue(FileUtil.copy(fs, dst, fs, src, false, false, conf)); + assertTrue(fs.exists(src) && fs.exists(dst)); + assertTrue(FileUtil.copy(fs, src, fs, dst, true, true, conf)); + assertTrue(!fs.exists(src) && fs.exists(dst)); + fs.mkdirs(src); + assertTrue(FileUtil.copy(fs, dst, fs, src, false, false, conf)); + Path tmp = new Path(src, dst.getName()); + assertTrue(fs.exists(tmp) && fs.exists(dst)); + assertTrue(FileUtil.copy(fs, dst, fs, src, false, true, conf)); + assertTrue(fs.delete(tmp, true)); + fs.mkdirs(tmp); + try { + FileUtil.copy(fs, dst, fs, src, true, true, conf); + fail("Failed to detect existing dir"); + } catch (IOException e) { } + } + + public void testHomeDirectory() throws IOException { + Configuration conf = new Configuration(); + FileSystem fileSys = FileSystem.getLocal(conf); + Path home = new Path(System.getProperty("user.home")) + .makeQualified(fileSys); + Path fsHome = fileSys.getHomeDirectory(); + assertEquals(home, fsHome); + } + + public void testPathEscapes() throws IOException { + Configuration conf = new Configuration(); + FileSystem fs = FileSystem.getLocal(conf); + Path path = new Path(TEST_ROOT_DIR, "foo%bar"); + writeFile(fs, path); + FileStatus status = fs.getFileStatus(path); + assertEquals(path.makeQualified(fs), status.getPath()); + cleanupFile(fs, path); + } +} diff --git a/src/test/org/apache/hadoop/fs/TestLocalFileSystemPermission.java b/src/test/org/apache/hadoop/fs/TestLocalFileSystemPermission.java new file mode 100644 index 0000000..f68cdb6 --- /dev/null +++ b/src/test/org/apache/hadoop/fs/TestLocalFileSystemPermission.java @@ -0,0 +1,157 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.fs; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.permission.*; +import org.apache.hadoop.util.StringUtils; +import org.apache.hadoop.util.Shell; + +import java.io.*; +import java.util.*; + +import junit.framework.*; + +/** + * This class tests the local file system via the FileSystem abstraction. + */ +public class TestLocalFileSystemPermission extends TestCase { + static final String TEST_PATH_PREFIX = new Path(System.getProperty( + "test.build.data", "/tmp")).toString().replace(' ', '_') + + "/" + TestLocalFileSystemPermission.class.getSimpleName() + "_"; + + { + try { + ((org.apache.commons.logging.impl.Log4JLogger)FileSystem.LOG).getLogger() + .setLevel(org.apache.log4j.Level.DEBUG); + } + catch(Exception e) { + System.out.println("Cannot change log level\n" + + StringUtils.stringifyException(e)); + } + } + + private Path writeFile(FileSystem fs, String name) throws IOException { + Path f = new Path(TEST_PATH_PREFIX + name); + FSDataOutputStream stm = fs.create(f); + stm.writeBytes("42\n"); + stm.close(); + return f; + } + + private void cleanupFile(FileSystem fs, Path name) throws IOException { + assertTrue(fs.exists(name)); + fs.delete(name, true); + assertTrue(!fs.exists(name)); + } + + /** Test LocalFileSystem.setPermission */ + public void testLocalFSsetPermission() throws IOException { + if (Path.WINDOWS) { + System.out.println("Cannot run test for Windows"); + return; + } + Configuration conf = new Configuration(); + LocalFileSystem localfs = FileSystem.getLocal(conf); + String filename = "foo"; + Path f = writeFile(localfs, filename); + try { + System.out.println(filename + ": " + getPermission(localfs, f)); + } + catch(Exception e) { + System.out.println(StringUtils.stringifyException(e)); + System.out.println("Cannot run test"); + return; + } + + try { + // create files and manipulate them. + FsPermission all = new FsPermission((short)0777); + FsPermission none = new FsPermission((short)0); + + localfs.setPermission(f, none); + assertEquals(none, getPermission(localfs, f)); + + localfs.setPermission(f, all); + assertEquals(all, getPermission(localfs, f)); + } + finally {cleanupFile(localfs, f);} + } + + FsPermission getPermission(LocalFileSystem fs, Path p) throws IOException { + return fs.getFileStatus(p).getPermission(); + } + + /** Test LocalFileSystem.setOwner */ + public void testLocalFSsetOwner() throws IOException { + if (Path.WINDOWS) { + System.out.println("Cannot run test for Windows"); + return; + } + + Configuration conf = new Configuration(); + LocalFileSystem localfs = FileSystem.getLocal(conf); + String filename = "bar"; + Path f = writeFile(localfs, filename); + List groups = null; + try { + groups = getGroups(); + System.out.println(filename + ": " + getPermission(localfs, f)); + } + catch(IOException e) { + System.out.println(StringUtils.stringifyException(e)); + System.out.println("Cannot run test"); + return; + } + if (groups == null || groups.size() < 1) { + System.out.println("Cannot run test: need at least one group. groups=" + + groups); + return; + } + + // create files and manipulate them. + try { + String g0 = groups.get(0); + localfs.setOwner(f, null, g0); + assertEquals(g0, getGroup(localfs, f)); + + if (groups.size() > 1) { + String g1 = groups.get(1); + localfs.setOwner(f, null, g1); + assertEquals(g1, getGroup(localfs, f)); + } else { + System.out.println("Not testing changing the group since user " + + "belongs to only one group."); + } + } + finally {cleanupFile(localfs, f);} + } + + static List getGroups() throws IOException { + List a = new ArrayList(); + String s = Shell.execCommand(Shell.getGROUPS_COMMAND()); + for(StringTokenizer t = new StringTokenizer(s); t.hasMoreTokens(); ) { + a.add(t.nextToken()); + } + return a; + } + + String getGroup(LocalFileSystem fs, Path p) throws IOException { + return fs.getFileStatus(p).getGroup(); + } +} diff --git a/src/test/org/apache/hadoop/fs/TestLocatedStatus.java b/src/test/org/apache/hadoop/fs/TestLocatedStatus.java new file mode 100644 index 0000000..e06baf2 --- /dev/null +++ b/src/test/org/apache/hadoop/fs/TestLocatedStatus.java @@ -0,0 +1,150 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.fs; + +import java.io.IOException; +import java.util.Random; +import java.util.Set; +import java.util.TreeSet; + +import org.apache.commons.logging.impl.Log4JLogger; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.log4j.Level; + +import static org.junit.Assert.*; +import org.junit.Test; +import org.junit.BeforeClass; + +/** + * This class tests the FileStatus API. + */ +public class TestLocatedStatus { + { + ((Log4JLogger)FileSystem.LOG).getLogger().setLevel(Level.ALL); + } + + static final long seed = 0xDEADBEEFL; + + final protected static Configuration conf = new Configuration(); + protected static FileSystem fs; + final protected static Path TEST_DIR = getTestDir(); + final private static int FILE_LEN = 10; + final private static Path FILE1 = new Path(TEST_DIR, "file1"); + final private static Path DIR1 = new Path(TEST_DIR, "dir1"); + final private static Path FILE2 = new Path(DIR1, "file2"); + final private static Path FILE3 = new Path(DIR1, "file3"); + final private static Path FILE4 = new Path(TEST_DIR, "file4"); + + protected static Path getTestDir() { + return new Path( + System.getProperty("test.build.data","build/test/data/work-dir/localfs"), + "main_"); + } + + @BeforeClass + public static void testSetUp() throws Exception { + fs = FileSystem.getLocal(conf); + fs.delete(TEST_DIR, true); + } + + private static void writeFile(FileSystem fileSys, Path name, int fileSize) + throws IOException { + // Create and write a file that contains three blocks of data + FSDataOutputStream stm = fileSys.create(name); + byte[] buffer = new byte[fileSize]; + Random rand = new Random(seed); + rand.nextBytes(buffer); + stm.write(buffer); + stm.close(); + } + + /** Test when input path is a file */ + @Test + public void testFile() throws IOException { + fs.mkdirs(TEST_DIR); + writeFile(fs, FILE1, FILE_LEN); + + RemoteIterator itor = fs.listLocatedStatus( + FILE1); + LocatedFileStatus stat = itor.next(); + assertFalse(itor.hasNext()); + assertFalse(stat.isDir()); + assertEquals(FILE_LEN, stat.getLen()); + assertEquals(fs.makeQualified(FILE1), stat.getPath()); + assertEquals(1, stat.getBlockLocations().length); + + fs.delete(FILE1, true); + } + + + /** Test when input path is a directory */ + @Test + public void testDirectory() throws IOException { + fs.mkdirs(DIR1); + + // test empty directory + RemoteIterator itor = fs.listLocatedStatus(DIR1); + assertFalse(itor.hasNext()); + + // testing directory with 1 file + writeFile(fs, FILE2, FILE_LEN); + itor = fs.listLocatedStatus(DIR1); + LocatedFileStatus stat = itor.next(); + assertFalse(itor.hasNext()); + assertFalse(stat.isDir()); + assertEquals(FILE_LEN, stat.getLen()); + assertEquals(fs.makeQualified(FILE2), stat.getPath()); + assertEquals(1, stat.getBlockLocations().length); + + // test more complicated directory + writeFile(fs, FILE1, FILE_LEN); + writeFile(fs, FILE3, FILE_LEN); + writeFile(fs, FILE4, FILE_LEN); + + Set expectedResults = new TreeSet(); + expectedResults.add(fs.makeQualified(FILE2)); + expectedResults.add(fs.makeQualified(FILE3)); + for (itor = fs.listLocatedStatus(DIR1); itor.hasNext(); ) { + stat = itor.next(); + assertFalse(stat.isDir()); + assertTrue(expectedResults.remove(stat.getPath())); + } + assertTrue(expectedResults.isEmpty()); + + final Path qualifiedDir1 = fs.makeQualified(DIR1); + expectedResults.add(qualifiedDir1); + expectedResults.add(fs.makeQualified(FILE1)); + expectedResults.add(fs.makeQualified(FILE4)); + + for (itor = fs.listLocatedStatus(TEST_DIR); itor.hasNext(); ) { + stat = itor.next(); + assertTrue(expectedResults.remove(stat.getPath())); + if (qualifiedDir1.equals(stat.getPath())) { + assertTrue(stat.isDir()); + } else { + assertFalse(stat.isDir()); + } + } + assertTrue(expectedResults.isEmpty()); + + fs.delete(TEST_DIR, true); + } +} \ No newline at end of file diff --git a/src/test/org/apache/hadoop/fs/TestPath.java b/src/test/org/apache/hadoop/fs/TestPath.java new file mode 100644 index 0000000..4fa28bc --- /dev/null +++ b/src/test/org/apache/hadoop/fs/TestPath.java @@ -0,0 +1,152 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs; + +import java.util.*; +import junit.framework.TestCase; + +public class TestPath extends TestCase { + public void testToString() { + toStringTest("/"); + toStringTest("/foo"); + toStringTest("/foo/bar"); + toStringTest("foo"); + toStringTest("foo/bar"); + boolean emptyException = false; + try { + toStringTest(""); + } catch (IllegalArgumentException e) { + // expect to receive an IllegalArgumentException + emptyException = true; + } + assertTrue(emptyException); + if (Path.WINDOWS) { + toStringTest("c:"); + toStringTest("c:/"); + toStringTest("c:foo"); + toStringTest("c:foo/bar"); + toStringTest("c:foo/bar"); + toStringTest("c:/foo/bar"); + } + } + + private void toStringTest(String pathString) { + assertEquals(pathString, new Path(pathString).toString()); + } + + public void testNormalize() { + assertEquals("/", new Path("//").toString()); + assertEquals("/foo", new Path("/foo/").toString()); + assertEquals("/foo", new Path("/foo/").toString()); + assertEquals("foo", new Path("foo/").toString()); + assertEquals("foo", new Path("foo//").toString()); + assertEquals("foo/bar", new Path("foo//bar").toString()); + if (Path.WINDOWS) { + assertEquals("c:/a/b", new Path("c:\\a\\b").toString()); + } + } + + public void testIsAbsolute() { + assertTrue(new Path("/").isAbsolute()); + assertTrue(new Path("/foo").isAbsolute()); + assertFalse(new Path("foo").isAbsolute()); + assertFalse(new Path("foo/bar").isAbsolute()); + assertFalse(new Path(".").isAbsolute()); + if (Path.WINDOWS) { + assertTrue(new Path("c:/a/b").isAbsolute()); + assertFalse(new Path("c:a/b").isAbsolute()); + } + } + + public void testParent() { + assertEquals(new Path("/foo"), new Path("/foo/bar").getParent()); + assertEquals(new Path("foo"), new Path("foo/bar").getParent()); + assertEquals(new Path("/"), new Path("/foo").getParent()); + if (Path.WINDOWS) { + assertEquals(new Path("c:/"), new Path("c:/foo").getParent()); + } + } + + public void testChild() { + assertEquals(new Path("."), new Path(".", ".")); + assertEquals(new Path("/"), new Path("/", ".")); + assertEquals(new Path("/"), new Path(".", "/")); + assertEquals(new Path("/foo"), new Path("/", "foo")); + assertEquals(new Path("/foo/bar"), new Path("/foo", "bar")); + assertEquals(new Path("/foo/bar/baz"), new Path("/foo/bar", "baz")); + assertEquals(new Path("/foo/bar/baz"), new Path("/foo", "bar/baz")); + assertEquals(new Path("foo"), new Path(".", "foo")); + assertEquals(new Path("foo/bar"), new Path("foo", "bar")); + assertEquals(new Path("foo/bar/baz"), new Path("foo", "bar/baz")); + assertEquals(new Path("foo/bar/baz"), new Path("foo/bar", "baz")); + assertEquals(new Path("/foo"), new Path("/bar", "/foo")); + if (Path.WINDOWS) { + assertEquals(new Path("c:/foo"), new Path("/bar", "c:/foo")); + assertEquals(new Path("c:/foo"), new Path("d:/bar", "c:/foo")); + } + } + + public void testEquals() { + assertFalse(new Path("/").equals(new Path("/foo"))); + } + + public void testDots() { + // Test Path(String) + assertEquals(new Path("/foo/bar/baz").toString(), "/foo/bar/baz"); + assertEquals(new Path("/foo/bar", ".").toString(), "/foo/bar"); + assertEquals(new Path("/foo/bar/../baz").toString(), "/foo/baz"); + assertEquals(new Path("/foo/bar/./baz").toString(), "/foo/bar/baz"); + assertEquals(new Path("/foo/bar/baz/../../fud").toString(), "/foo/fud"); + assertEquals(new Path("/foo/bar/baz/.././../fud").toString(), "/foo/fud"); + assertEquals(new Path("../../foo/bar").toString(), "../../foo/bar"); + assertEquals(new Path(".././../foo/bar").toString(), "../../foo/bar"); + assertEquals(new Path("./foo/bar/baz").toString(), "foo/bar/baz"); + assertEquals(new Path("/foo/bar/../../baz/boo").toString(), "/baz/boo"); + assertEquals(new Path("foo/bar/").toString(), "foo/bar"); + assertEquals(new Path("foo/bar/../baz").toString(), "foo/baz"); + assertEquals(new Path("foo/bar/../../baz/boo").toString(), "baz/boo"); + + + // Test Path(Path,Path) + assertEquals(new Path("/foo/bar", "baz/boo").toString(), "/foo/bar/baz/boo"); + assertEquals(new Path("foo/bar/","baz/bud").toString(), "foo/bar/baz/bud"); + + assertEquals(new Path("/foo/bar","../../boo/bud").toString(), "/boo/bud"); + assertEquals(new Path("foo/bar","../../boo/bud").toString(), "boo/bud"); + assertEquals(new Path(".","boo/bud").toString(), "boo/bud"); + + assertEquals(new Path("/foo/bar/baz","../../boo/bud").toString(), "/foo/boo/bud"); + assertEquals(new Path("foo/bar/baz","../../boo/bud").toString(), "foo/boo/bud"); + + + assertEquals(new Path("../../","../../boo/bud").toString(), "../../../../boo/bud"); + assertEquals(new Path("../../foo","../../../boo/bud").toString(), "../../../../boo/bud"); + assertEquals(new Path("../../foo/bar","../boo/bud").toString(), "../../foo/boo/bud"); + + assertEquals(new Path("foo/bar/baz","../../..").toString(), ""); + assertEquals(new Path("foo/bar/baz","../../../../..").toString(), "../.."); + } + + public void testScheme() throws java.io.IOException { + assertEquals("foo:/bar", new Path("foo:/","/bar").toString()); + assertEquals("foo://bar/baz", new Path("foo://bar/","/baz").toString()); + } + + +} diff --git a/src/test/org/apache/hadoop/fs/TestTrash.java b/src/test/org/apache/hadoop/fs/TestTrash.java new file mode 100644 index 0000000..ddf20d7 --- /dev/null +++ b/src/test/org/apache/hadoop/fs/TestTrash.java @@ -0,0 +1,436 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.fs; + + +import junit.framework.TestCase; +import java.io.File; +import java.io.IOException; +import java.io.DataOutputStream; +import java.net.URI; + +import java.util.Arrays; +import java.util.HashSet; +import java.util.Set; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.FsShell; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.Trash; +import org.apache.hadoop.fs.LocalFileSystem; + +/** + * This class tests commands from Trash. + */ +public class TestTrash extends TestCase { + + private final static Path TEST_DIR = + new Path(new File(System.getProperty("test.build.data","/tmp") + ).toURI().toString().replace(' ', '+'), "testTrash"); + + protected static Path writeFile(FileSystem fs, Path f) throws IOException { + DataOutputStream out = fs.create(f); + out.writeBytes("dhruba: " + f); + out.close(); + assertTrue(fs.exists(f)); + return f; + } + + protected static Path mkdir(FileSystem fs, Path p) throws IOException { + assertTrue(fs.mkdirs(p)); + assertTrue(fs.exists(p)); + assertTrue(fs.getFileStatus(p).isDir()); + return p; + } + + // check that the specified file is in Trash + protected static void checkTrash(FileSystem fs, Path trashRoot, + Path path) throws IOException { + Path p = new Path(trashRoot+"/"+ path.toUri().getPath()); + assertTrue(fs.exists(p)); + } + + // check that the specified file is not in Trash + static void checkNotInTrash(FileSystem fs, Path trashRoot, String pathname) + throws IOException { + Path p = new Path(trashRoot+"/"+ new Path(pathname).getName()); + assertTrue(!fs.exists(p)); + } + + protected static void trashShell(final FileSystem fs, final Path base) + throws IOException { + Configuration conf = new Configuration(); + conf.set("fs.trash.interval", "10"); // 10 minute + conf.set("fs.default.name", fs.getUri().toString()); + FsShell shell = new FsShell(); + shell.setConf(conf); + Path trashRoot = null; + + // First create a new directory with mkdirs + Path myPath = new Path(base, "test/mkdirs"); + mkdir(fs, myPath); + + // Second, create a file in that directory. + Path myFile = new Path(base, "test/mkdirs/myFile"); + writeFile(fs, myFile); + + // Verify that expunge without Trash directory + // won't throw Exception + { + String[] args = new String[1]; + args[0] = "-expunge"; + int val = -1; + try { + val = shell.run(args); + } catch (Exception e) { + System.err.println("Exception raised from Trash.run " + + e.getLocalizedMessage()); + } + assertTrue(val == 0); + } + + // Verify that we succeed in removing the file we created. + // This should go into Trash. + { + String[] args = new String[2]; + args[0] = "-rm"; + args[1] = myFile.toString(); + int val = -1; + try { + val = shell.run(args); + } catch (Exception e) { + System.err.println("Exception raised from Trash.run " + + e.getLocalizedMessage()); + } + assertTrue(val == 0); + + trashRoot = shell.getCurrentTrashDir(); + checkTrash(fs, trashRoot, myFile); + } + + // Verify that we can recreate the file + writeFile(fs, myFile); + + // Verify that we succeed in removing the file we re-created + { + String[] args = new String[2]; + args[0] = "-rm"; + args[1] = new Path(base, "test/mkdirs/myFile").toString(); + int val = -1; + try { + val = shell.run(args); + } catch (Exception e) { + System.err.println("Exception raised from Trash.run " + + e.getLocalizedMessage()); + } + assertTrue(val == 0); + } + + // Verify that we can recreate the file + writeFile(fs, myFile); + + // Verify that we succeed in removing the whole directory + // along with the file inside it. + { + String[] args = new String[2]; + args[0] = "-rmr"; + args[1] = new Path(base, "test/mkdirs").toString(); + int val = -1; + try { + val = shell.run(args); + } catch (Exception e) { + System.err.println("Exception raised from Trash.run " + + e.getLocalizedMessage()); + } + assertTrue(val == 0); + } + + // recreate directory + mkdir(fs, myPath); + + // Verify that we succeed in removing the whole directory + { + String[] args = new String[2]; + args[0] = "-rmr"; + args[1] = new Path(base, "test/mkdirs").toString(); + int val = -1; + try { + val = shell.run(args); + } catch (Exception e) { + System.err.println("Exception raised from Trash.run " + + e.getLocalizedMessage()); + } + assertTrue(val == 0); + } + + // Check that we can delete a file from the trash + { + Path toErase = new Path(trashRoot, "toErase"); + int retVal = -1; + writeFile(fs, toErase); + try { + retVal = shell.run(new String[] {"-rm", toErase.toString()}); + } catch (Exception e) { + System.err.println("Exception raised from Trash.run " + + e.getLocalizedMessage()); + } + assertTrue(retVal == 0); + checkNotInTrash (fs, trashRoot, toErase.toString()); + checkNotInTrash (fs, trashRoot, toErase.toString()+".1"); + } + + // simulate Trash removal + { + String[] args = new String[1]; + args[0] = "-expunge"; + int val = -1; + try { + val = shell.run(args); + } catch (Exception e) { + System.err.println("Exception raised from Trash.run " + + e.getLocalizedMessage()); + } + assertTrue(val == 0); + } + + // verify that after expunging the Trash, it really goes away + checkNotInTrash(fs, trashRoot, new Path(base, "test/mkdirs/myFile").toString()); + + // recreate directory and file + mkdir(fs, myPath); + writeFile(fs, myFile); + + // remove file first, then remove directory + { + String[] args = new String[2]; + args[0] = "-rm"; + args[1] = myFile.toString(); + int val = -1; + try { + val = shell.run(args); + } catch (Exception e) { + System.err.println("Exception raised from Trash.run " + + e.getLocalizedMessage()); + } + assertTrue(val == 0); + checkTrash(fs, trashRoot, myFile); + + args = new String[2]; + args[0] = "-rmr"; + args[1] = myPath.toString(); + val = -1; + try { + val = shell.run(args); + } catch (Exception e) { + System.err.println("Exception raised from Trash.run " + + e.getLocalizedMessage()); + } + assertTrue(val == 0); + checkTrash(fs, trashRoot, myPath); + } + + // attempt to remove parent of trash + { + String[] args = new String[2]; + args[0] = "-rmr"; + args[1] = trashRoot.getParent().getParent().toString(); + int val = -1; + try { + val = shell.run(args); + } catch (Exception e) { + System.err.println("Exception raised from Trash.run " + + e.getLocalizedMessage()); + } + assertTrue(val == -1); + assertTrue(fs.exists(trashRoot)); + } + + // Verify skip trash option really works + + // recreate directory and file + mkdir(fs, myPath); + writeFile(fs, myFile); + + // Verify that skip trash option really skips the trash for files (rm) + { + String[] args = new String[3]; + args[0] = "-rm"; + args[1] = "-skipTrash"; + args[2] = myFile.toString(); + int val = -1; + try { + // Clear out trash + assertEquals(0, shell.run(new String [] { "-expunge" } )); + + val = shell.run(args); + + }catch (Exception e) { + System.err.println("Exception raised from Trash.run " + + e.getLocalizedMessage()); + } + + assertFalse(fs.exists(trashRoot)); // No new Current should be created + assertFalse(fs.exists(myFile)); + assertTrue(val == 0); + } + + // recreate directory and file + mkdir(fs, myPath); + writeFile(fs, myFile); + + // Verify that skip trash option really skips the trash for rmr + { + String[] args = new String[3]; + args[0] = "-rmr"; + args[1] = "-skipTrash"; + args[2] = myPath.toString(); + + int val = -1; + try { + // Clear out trash + assertEquals(0, shell.run(new String [] { "-expunge" } )); + + val = shell.run(args); + + }catch (Exception e) { + System.err.println("Exception raised from Trash.run " + + e.getLocalizedMessage()); + } + + assertFalse(fs.exists(trashRoot)); // No new Current should be created + assertFalse(fs.exists(myPath)); + assertFalse(fs.exists(myFile)); + assertTrue(val == 0); + } + } + + public static void trashNonDefaultFS(Configuration conf) throws IOException { + conf.set("fs.trash.interval", "10"); // 10 minute + // attempt non-default FileSystem trash + { + final FileSystem lfs = FileSystem.getLocal(conf); + Path p = TEST_DIR; + Path f = new Path(p, "foo/bar"); + if (lfs.exists(p)) { + lfs.delete(p, true); + } + try { + f = writeFile(lfs, f); + + FileSystem.closeAll(); + FileSystem localFs = FileSystem.get(URI.create("file:///"), conf); + Trash lTrash = new Trash(localFs, conf); + lTrash.moveToTrash(f.getParent()); + checkTrash(localFs, lTrash.getCurrentTrashDir(), f); + } finally { + if (lfs.exists(p)) { + lfs.delete(p, true); + } + } + } + } + + public void testTrash() throws IOException { + Configuration conf = new Configuration(); + conf.setClass("fs.file.impl", TestLFS.class, FileSystem.class); + trashShell(FileSystem.getLocal(conf), TEST_DIR); + } + + public void testNonDefaultFS() throws IOException { + Configuration conf = new Configuration(); + conf.setClass("fs.file.impl", TestLFS.class, FileSystem.class); + conf.set("fs.default.name", "invalid://host/bar/foo"); + trashNonDefaultFS(conf); + } + + public void testTrashEmptier() throws Exception { + Configuration conf = new Configuration(); + // Trash with 12 second deletes and 6 seconds checkpoints + conf.set("fs.trash.interval", "0.2"); // 12 seconds + conf.setClass("fs.file.impl", TestLFS.class, FileSystem.class); + conf.set("fs.trash.checkpoint.interval", "0.1"); // 6 seconds + Trash trash = new Trash(conf); + + // Start Emptier in background + Runnable emptier = trash.getEmptier(); + Thread emptierThread = new Thread(emptier); + emptierThread.start(); + + FileSystem fs = FileSystem.getLocal(conf); + conf.set("fs.default.name", fs.getUri().toString()); + FsShell shell = new FsShell(); + shell.setConf(conf); + shell.init(); + // First create a new directory with mkdirs + Path myPath = new Path(TEST_DIR, "test/mkdirs"); + mkdir(fs, myPath); + int fileIndex = 0; + Set checkpoints = new HashSet(); + while (true) { + // Create a file with a new name + Path myFile = new Path(TEST_DIR, "test/mkdirs/myFile" + fileIndex++); + writeFile(fs, myFile); + + // Delete the file to trash + String[] args = new String[2]; + args[0] = "-rm"; + args[1] = myFile.toString(); + int val = -1; + try { + val = shell.run(args); + } catch (Exception e) { + System.err.println("Exception raised from Trash.run " + + e.getLocalizedMessage()); + } + assertTrue(val == 0); + + Path trashDir = shell.getCurrentTrashDir(); + FileStatus files[] = fs.listStatus(trashDir.getParent()); + // Scan files in .Trash and add them to set of checkpoints + for (FileStatus file : files) { + String fileName = file.getPath().getName(); + checkpoints.add(fileName); + } + // If checkpoints has 4 objects it is Current + 3 checkpoint directories + if (checkpoints.size() == 4) { + // The actual contents should be smaller since the last checkpoint + // should've been deleted and Current might not have been recreated yet + assertTrue(checkpoints.size() > files.length); + break; + } + Thread.sleep(5000); + } + emptierThread.interrupt(); + emptierThread.join(); + } + + static class TestLFS extends LocalFileSystem { + Path home; + TestLFS() { + this(new Path(TEST_DIR, "user/test")); + } + TestLFS(Path home) { + super(); + this.home = home; + } + public Path getHomeDirectory() { + return home; + } + } +} diff --git a/src/test/org/apache/hadoop/fs/TestTruncatedInputBug.java b/src/test/org/apache/hadoop/fs/TestTruncatedInputBug.java new file mode 100644 index 0000000..1f18b71 --- /dev/null +++ b/src/test/org/apache/hadoop/fs/TestTruncatedInputBug.java @@ -0,0 +1,111 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.fs; + +import junit.framework.TestCase; +import java.io.*; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FSDataInputStream; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.LocalFileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.util.StringUtils; + +/** + * test for the input truncation bug when mark/reset is used. + * HADOOP-1489 + */ +public class TestTruncatedInputBug extends TestCase { + private static String TEST_ROOT_DIR = + new Path(System.getProperty("test.build.data","/tmp")) + .toString().replace(' ', '+'); + + private void writeFile(FileSystem fileSys, + Path name, int nBytesToWrite) + throws IOException { + DataOutputStream out = fileSys.create(name); + for (int i = 0; i < nBytesToWrite; ++i) { + out.writeByte(0); + } + out.close(); + } + + /** + * When mark() is used on BufferedInputStream, the request + * size on the checksum file system can be small. However, + * checksum file system currently depends on the request size + * >= bytesPerSum to work properly. + */ + public void testTruncatedInputBug() throws IOException { + final int ioBufSize = 512; + final int fileSize = ioBufSize*4; + int filePos = 0; + + Configuration conf = new Configuration(); + conf.setInt("io.file.buffer.size", ioBufSize); + FileSystem fileSys = FileSystem.getLocal(conf); + + try { + // First create a test input file. + Path testFile = new Path(TEST_ROOT_DIR, "HADOOP-1489"); + writeFile(fileSys, testFile, fileSize); + assertTrue(fileSys.exists(testFile)); + assertTrue(fileSys.getLength(testFile) == fileSize); + + // Now read the file for ioBufSize bytes + FSDataInputStream in = fileSys.open(testFile, ioBufSize); + // seek beyond data buffered by open + filePos += ioBufSize * 2 + (ioBufSize - 10); + in.seek(filePos); + + // read 4 more bytes before marking + for (int i = 0; i < 4; ++i) { + if (in.read() == -1) { + break; + } + ++filePos; + } + + // Now set mark() to trigger the bug + // NOTE: in the fixed code, mark() does nothing (not supported) and + // hence won't trigger this bug. + in.mark(1); + System.out.println("MARKED"); + + // Try to read the rest + while (filePos < fileSize) { + if (in.read() == -1) { + break; + } + ++filePos; + } + in.close(); + + System.out.println("Read " + filePos + " bytes." + + " file size=" + fileSize); + assertTrue(filePos == fileSize); + + } finally { + try { + fileSys.close(); + } catch (Exception e) { + // noop + } + } + } // end testTruncatedInputBug +} diff --git a/src/test/org/apache/hadoop/fs/TestUrlStreamHandler.java b/src/test/org/apache/hadoop/fs/TestUrlStreamHandler.java new file mode 100644 index 0000000..4251401 --- /dev/null +++ b/src/test/org/apache/hadoop/fs/TestUrlStreamHandler.java @@ -0,0 +1,155 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.fs; + +import java.io.File; +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.net.URI; +import java.net.URISyntaxException; +import java.net.URL; + +import junit.framework.TestCase; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.FsUrlStreamHandlerFactory; +import org.apache.hadoop.fs.Path; + +/** + * Test of the URL stream handler factory. + */ +public class TestUrlStreamHandler extends TestCase { + + /** + * Test opening and reading from an InputStream through a hdfs:// URL. + *

+ * First generate a file with some content through the FileSystem API, then + * try to open and read the file through the URL stream API. + * + * @throws IOException + */ + public void testDfsUrls() throws IOException { + + Configuration conf = new Configuration(); + MiniDFSCluster cluster = new MiniDFSCluster(conf, 2, true, null); + FileSystem fs = cluster.getFileSystem(); + + // Setup our own factory + // setURLSteramHandlerFactor is can be set at most once in the JVM + // the new URLStreamHandler is valid for all tests cases + // in TestStreamHandler + FsUrlStreamHandlerFactory factory = + new org.apache.hadoop.fs.FsUrlStreamHandlerFactory(); + java.net.URL.setURLStreamHandlerFactory(factory); + + Path filePath = new Path("/thefile"); + + try { + byte[] fileContent = new byte[1024]; + for (int i = 0; i < fileContent.length; ++i) + fileContent[i] = (byte) i; + + // First create the file through the FileSystem API + OutputStream os = fs.create(filePath); + os.write(fileContent); + os.close(); + + // Second, open and read the file content through the URL API + URI uri = fs.getUri(); + URL fileURL = + new URL(uri.getScheme(), uri.getHost(), uri.getPort(), filePath + .toString()); + + InputStream is = fileURL.openStream(); + assertNotNull(is); + + byte[] bytes = new byte[4096]; + assertEquals(1024, is.read(bytes)); + is.close(); + + for (int i = 0; i < fileContent.length; ++i) + assertEquals(fileContent[i], bytes[i]); + + // Cleanup: delete the file + fs.delete(filePath, false); + + } finally { + fs.close(); + cluster.shutdown(); + } + + } + + /** + * Test opening and reading from an InputStream through a file:// URL. + * + * @throws IOException + * @throws URISyntaxException + */ + public void testFileUrls() throws IOException, URISyntaxException { + // URLStreamHandler is already set in JVM by testDfsUrls() + Configuration conf = new Configuration(); + + // Locate the test temporary directory. + File tmpDir = new File(conf.get("hadoop.tmp.dir")); + if (!tmpDir.exists()) { + if (!tmpDir.mkdirs()) + throw new IOException("Cannot create temporary directory: " + tmpDir); + } + + File tmpFile = new File(tmpDir, "thefile"); + URI uri = tmpFile.toURI(); + + FileSystem fs = FileSystem.get(uri, conf); + + try { + byte[] fileContent = new byte[1024]; + for (int i = 0; i < fileContent.length; ++i) + fileContent[i] = (byte) i; + + // First create the file through the FileSystem API + OutputStream os = fs.create(new Path(uri.getPath())); + os.write(fileContent); + os.close(); + + // Second, open and read the file content through the URL API. + URL fileURL = uri.toURL(); + + InputStream is = fileURL.openStream(); + assertNotNull(is); + + byte[] bytes = new byte[4096]; + assertEquals(1024, is.read(bytes)); + is.close(); + + for (int i = 0; i < fileContent.length; ++i) + assertEquals(fileContent[i], bytes[i]); + + // Cleanup: delete the file + fs.delete(new Path(uri.getPath()), false); + + } finally { + fs.close(); + } + + } + +} diff --git a/src/test/org/apache/hadoop/fs/ftp/TestFTPFileSystem.java b/src/test/org/apache/hadoop/fs/ftp/TestFTPFileSystem.java new file mode 100644 index 0000000..70db33e --- /dev/null +++ b/src/test/org/apache/hadoop/fs/ftp/TestFTPFileSystem.java @@ -0,0 +1,156 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.fs.ftp; + +import java.net.URI; +import junit.framework.TestCase; + +import org.apache.ftpserver.DefaultFtpServerContext; +import org.apache.ftpserver.FtpServer; +import org.apache.ftpserver.ftplet.Authority; +import org.apache.ftpserver.ftplet.UserManager; +import org.apache.ftpserver.listener.mina.MinaListener; +import org.apache.ftpserver.usermanager.BaseUser; +import org.apache.ftpserver.usermanager.WritePermission; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdfs.DFSTestUtil; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.FileUtil; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.mapred.JobConf; + +/** + * Generates a bunch of random files and directories using class 'DFSTestUtil', + * stores them on the FTP file system, copies them and check if all the files + * were retrieved successfully without any data corruption + */ +public class TestFTPFileSystem extends TestCase { + + private Configuration defaultConf = new JobConf(); + private FtpServer server = null; + private FileSystem localFs = null; + private FileSystem ftpFs = null; + + private Path workDir = new Path(new Path(System.getProperty( + "test.build.data", "."), "data"), "TestFTPFileSystem"); + + Path ftpServerRoot = new Path(workDir, "FTPServer"); + Path ftpServerConfig = null; + + private void startServer() { + try { + DefaultFtpServerContext context = new DefaultFtpServerContext(false); + MinaListener listener = new MinaListener(); + // Set port to 0 for OS to give a free port + listener.setPort(0); + context.setListener("default", listener); + + // Create a test user. + UserManager userManager = context.getUserManager(); + BaseUser adminUser = new BaseUser(); + adminUser.setName("admin"); + adminUser.setPassword("admin"); + adminUser.setEnabled(true); + adminUser.setAuthorities(new Authority[] { new WritePermission() }); + + Path adminUserHome = new Path(ftpServerRoot, "user/admin"); + adminUser.setHomeDirectory(adminUserHome.toUri().getPath()); + adminUser.setMaxIdleTime(0); + userManager.save(adminUser); + + // Initialize the server and start. + server = new FtpServer(context); + server.start(); + + } catch (Exception e) { + throw new RuntimeException("FTP server start-up failed", e); + } + } + + private void stopServer() { + if (server != null) { + server.stop(); + } + } + + @Override + public void setUp() throws Exception { + startServer(); + defaultConf = new Configuration(); + localFs = FileSystem.getLocal(defaultConf); + ftpServerConfig = new Path(localFs.getWorkingDirectory(), "res"); + MinaListener listener = (MinaListener) server.getServerContext() + .getListener("default"); + int serverPort = listener.getPort(); + ftpFs = FileSystem.get(URI.create("ftp://admin:admin@localhost:" + + serverPort), defaultConf); + } + + @Override + public void tearDown() throws Exception { + localFs.delete(ftpServerRoot, true); + localFs.delete(ftpServerConfig, true); + localFs.close(); + ftpFs.close(); + stopServer(); + } + + /** + * Tests FTPFileSystem, create(), open(), delete(), mkdirs(), rename(), + * listStatus(), getStatus() APIs. * + * + * @throws Exception + */ + public void testReadWrite() throws Exception { + + DFSTestUtil util = new DFSTestUtil("TestFTPFileSystem", 20, 3, 1024 * 1024); + localFs.setWorkingDirectory(workDir); + Path localData = new Path(workDir, "srcData"); + Path remoteData = new Path("srcData"); + + util.createFiles(localFs, localData.toUri().getPath()); + + boolean dataConsistency = util.checkFiles(localFs, localData.getName()); + assertTrue("Test data corrupted", dataConsistency); + + // Copy files and directories recursively to FTP file system. + boolean filesCopied = FileUtil.copy(localFs, localData, ftpFs, remoteData, + false, defaultConf); + assertTrue("Copying to FTPFileSystem failed", filesCopied); + + // Rename the remote copy + Path renamedData = new Path("Renamed"); + boolean renamed = ftpFs.rename(remoteData, renamedData); + assertTrue("Rename failed", renamed); + + // Copy files and directories from FTP file system and delete remote copy. + filesCopied = FileUtil.copy(ftpFs, renamedData, localFs, workDir, true, + defaultConf); + assertTrue("Copying from FTPFileSystem fails", filesCopied); + + // Check if the data was received completely without any corruption. + dataConsistency = util.checkFiles(localFs, renamedData.getName()); + assertTrue("Invalid or corrupted data recieved from FTP Server!", + dataConsistency); + + // Delete local copies + boolean deleteSuccess = localFs.delete(renamedData, true) + & localFs.delete(localData, true); + assertTrue("Local test data deletion failed", deleteSuccess); + } +} diff --git a/src/test/org/apache/hadoop/fs/kfs/KFSEmulationImpl.java b/src/test/org/apache/hadoop/fs/kfs/KFSEmulationImpl.java new file mode 100644 index 0000000..c90f6d4 --- /dev/null +++ b/src/test/org/apache/hadoop/fs/kfs/KFSEmulationImpl.java @@ -0,0 +1,146 @@ +/** + * + * Licensed under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + * + * @author: Sriram Rao (Kosmix Corp.) + * + * We need to provide the ability to the code in fs/kfs without really + * having a KFS deployment. For this purpose, use the LocalFileSystem + * as a way to "emulate" KFS. + */ + +package org.apache.hadoop.fs.kfs; + +import java.io.*; + +import org.apache.hadoop.conf.Configuration; + +import org.apache.hadoop.fs.FSDataInputStream; +import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileUtil; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.BlockLocation; + + +public class KFSEmulationImpl implements IFSImpl { + FileSystem localFS; + + public KFSEmulationImpl(Configuration conf) throws IOException { + localFS = FileSystem.getLocal(conf); + } + + public boolean exists(String path) throws IOException { + return localFS.exists(new Path(path)); + } + public boolean isDirectory(String path) throws IOException { + return localFS.isDirectory(new Path(path)); + } + public boolean isFile(String path) throws IOException { + return localFS.isFile(new Path(path)); + } + + public String[] readdir(String path) throws IOException { + FileStatus[] p = localFS.listStatus(new Path(path)); + String[] entries = null; + + if (p == null) { + return null; + } + + entries = new String[p.length]; + for (int i = 0; i < p.length; i++) + entries[i] = p[i].getPath().toString(); + return entries; + } + + public FileStatus[] readdirplus(Path path) throws IOException { + return localFS.listStatus(path); + } + + public int mkdirs(String path) throws IOException { + if (localFS.mkdirs(new Path(path))) + return 0; + + return -1; + } + + public int rename(String source, String dest) throws IOException { + if (localFS.rename(new Path(source), new Path(dest))) + return 0; + return -1; + } + + public int rmdir(String path) throws IOException { + if (isDirectory(path)) { + // the directory better be empty + String[] dirEntries = readdir(path); + if ((dirEntries.length <= 2) && (localFS.delete(new Path(path), true))) + return 0; + } + return -1; + } + + public int remove(String path) throws IOException { + if (isFile(path) && (localFS.delete(new Path(path), true))) + return 0; + return -1; + } + + public long filesize(String path) throws IOException { + return localFS.getLength(new Path(path)); + } + public short getReplication(String path) throws IOException { + return 1; + } + public short setReplication(String path, short replication) throws IOException { + return 1; + } + public String[][] getDataLocation(String path, long start, long len) throws IOException { + BlockLocation[] blkLocations = + localFS.getFileBlockLocations(localFS.getFileStatus(new Path(path)), + start, len); + if ((blkLocations == null) || (blkLocations.length == 0)) { + return new String[0][]; + } + int blkCount = blkLocations.length; + String[][]hints = new String[blkCount][]; + for (int i=0; i < blkCount ; i++) { + String[] hosts = blkLocations[i].getHosts(); + hints[i] = new String[hosts.length]; + hints[i] = hosts; + } + return hints; + } + + public long getModificationTime(String path) throws IOException { + FileStatus s = localFS.getFileStatus(new Path(path)); + if (s == null) + return 0; + + return s.getModificationTime(); + } + + public FSDataOutputStream create(String path, short replication, int bufferSize) throws IOException { + // besides path/overwrite, the other args don't matter for + // testing purposes. + return localFS.create(new Path(path)); + } + + public FSDataInputStream open(String path, int bufferSize) throws IOException { + return localFS.open(new Path(path)); + } + + +}; diff --git a/src/test/org/apache/hadoop/fs/kfs/TestKosmosFileSystem.java b/src/test/org/apache/hadoop/fs/kfs/TestKosmosFileSystem.java new file mode 100644 index 0000000..7226ca1 --- /dev/null +++ b/src/test/org/apache/hadoop/fs/kfs/TestKosmosFileSystem.java @@ -0,0 +1,179 @@ +/** + * + * Licensed under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + * + * @author: Sriram Rao (Kosmix Corp.) + * + * Unit tests for testing the KosmosFileSystem API implementation. + */ + +package org.apache.hadoop.fs.kfs; + +import java.io.*; +import java.net.*; + +import junit.framework.TestCase; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FSDataInputStream; +import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileUtil; +import org.apache.hadoop.fs.Path; + +import org.apache.hadoop.fs.kfs.KosmosFileSystem; + +public class TestKosmosFileSystem extends TestCase { + + KosmosFileSystem kosmosFileSystem; + KFSEmulationImpl kfsEmul; + Path baseDir; + + @Override + protected void setUp() throws IOException { + Configuration conf = new Configuration(); + + kfsEmul = new KFSEmulationImpl(conf); + kosmosFileSystem = new KosmosFileSystem(kfsEmul); + // a dummy URI; we are not connecting to any setup here + kosmosFileSystem.initialize(URI.create("kfs:///"), conf); + baseDir = new Path(System.getProperty("test.build.data", "/tmp" ) + + "/kfs-test"); + } + + @Override + protected void tearDown() throws Exception { + + } + + // @Test + // Check all the directory API's in KFS + public void testDirs() throws Exception { + Path subDir1 = new Path("dir.1"); + + // make the dir + kosmosFileSystem.mkdirs(baseDir); + assertTrue(kosmosFileSystem.isDirectory(baseDir)); + kosmosFileSystem.setWorkingDirectory(baseDir); + + kosmosFileSystem.mkdirs(subDir1); + assertTrue(kosmosFileSystem.isDirectory(subDir1)); + + assertFalse(kosmosFileSystem.exists(new Path("test1"))); + assertFalse(kosmosFileSystem.isDirectory(new Path("test/dir.2"))); + + FileStatus[] p = kosmosFileSystem.listStatus(baseDir); + assertEquals(p.length, 1); + + kosmosFileSystem.delete(baseDir, true); + assertFalse(kosmosFileSystem.exists(baseDir)); + } + + // @Test + // Check the file API's + public void testFiles() throws Exception { + Path subDir1 = new Path("dir.1"); + Path file1 = new Path("dir.1/foo.1"); + Path file2 = new Path("dir.1/foo.2"); + + kosmosFileSystem.mkdirs(baseDir); + assertTrue(kosmosFileSystem.isDirectory(baseDir)); + kosmosFileSystem.setWorkingDirectory(baseDir); + + kosmosFileSystem.mkdirs(subDir1); + + FSDataOutputStream s1 = kosmosFileSystem.create(file1, true, 4096, (short) 1, (long) 4096, null); + FSDataOutputStream s2 = kosmosFileSystem.create(file2, true, 4096, (short) 1, (long) 4096, null); + + s1.close(); + s2.close(); + + FileStatus[] p = kosmosFileSystem.listStatus(subDir1); + assertEquals(p.length, 2); + + kosmosFileSystem.delete(file1, true); + p = kosmosFileSystem.listStatus(subDir1); + assertEquals(p.length, 1); + + kosmosFileSystem.delete(file2, true); + p = kosmosFileSystem.listStatus(subDir1); + assertEquals(p.length, 0); + + kosmosFileSystem.delete(baseDir, true); + assertFalse(kosmosFileSystem.exists(baseDir)); + } + + // @Test + // Check file/read write + public void testFileIO() throws Exception { + Path subDir1 = new Path("dir.1"); + Path file1 = new Path("dir.1/foo.1"); + + kosmosFileSystem.mkdirs(baseDir); + assertTrue(kosmosFileSystem.isDirectory(baseDir)); + kosmosFileSystem.setWorkingDirectory(baseDir); + + kosmosFileSystem.mkdirs(subDir1); + + FSDataOutputStream s1 = kosmosFileSystem.create(file1, true, 4096, (short) 1, (long) 4096, null); + + int bufsz = 4096; + byte[] data = new byte[bufsz]; + + for (int i = 0; i < data.length; i++) + data[i] = (byte) (i % 16); + + // write 4 bytes and read them back; read API should return a byte per call + s1.write(32); + s1.write(32); + s1.write(32); + s1.write(32); + // write some data + s1.write(data, 0, data.length); + // flush out the changes + s1.close(); + + // Read the stuff back and verify it is correct + FSDataInputStream s2 = kosmosFileSystem.open(file1, 4096); + int v; + + v = s2.read(); + assertEquals(v, 32); + v = s2.read(); + assertEquals(v, 32); + v = s2.read(); + assertEquals(v, 32); + v = s2.read(); + assertEquals(v, 32); + + assertEquals(s2.available(), data.length); + + byte[] buf = new byte[bufsz]; + s2.read(buf, 0, buf.length); + for (int i = 0; i < data.length; i++) + assertEquals(data[i], buf[i]); + + assertEquals(s2.available(), 0); + + s2.close(); + + kosmosFileSystem.delete(file1, true); + assertFalse(kosmosFileSystem.exists(file1)); + kosmosFileSystem.delete(subDir1, true); + assertFalse(kosmosFileSystem.exists(subDir1)); + kosmosFileSystem.delete(baseDir, true); + assertFalse(kosmosFileSystem.exists(baseDir)); + } + +} diff --git a/src/test/org/apache/hadoop/fs/loadGenerator/DataGenerator.java b/src/test/org/apache/hadoop/fs/loadGenerator/DataGenerator.java new file mode 100644 index 0000000..4825bba --- /dev/null +++ b/src/test/org/apache/hadoop/fs/loadGenerator/DataGenerator.java @@ -0,0 +1,160 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.loadGenerator; + +import java.io.BufferedReader; +import java.io.File; +import java.io.FileReader; +import java.io.IOException; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.conf.Configured; +import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.util.Tool; +import org.apache.hadoop.util.ToolRunner; + +/** + * This program reads the directory structure and file structure from + * the input directory and creates the namespace in the file system + * specified by the configuration in the specified root. + * All the files are filled with 'a'. + * + * The synopsis of the command is + * java DataGenerator + * -inDir : input directory name where directory/file structures + * are stored. Its default value is the current directory. + * -root : the name of the root directory which the new namespace + * is going to be placed under. + * Its default value is "/testLoadSpace". + */ +public class DataGenerator extends Configured implements Tool { + private File inDir = StructureGenerator.DEFAULT_STRUCTURE_DIRECTORY; + private Path root = DEFAULT_ROOT; + private FileSystem fs; + final static private long BLOCK_SIZE = 10; + final static private String USAGE = "java DataGenerator " + + "-inDir " + + "-root "; + + /** default name of the root where the test namespace will be placed under */ + final static Path DEFAULT_ROOT = new Path("/testLoadSpace"); + + /** Main function. + * It first parses the command line arguments. + * It then reads the directory structure from the input directory + * structure file and creates directory structure in the file system + * namespace. Afterwards it reads the file attributes and creates files + * in the file. All file content is filled with 'a'. + */ + public int run(String[] args) throws Exception { + int exitCode = 0; + exitCode = init(args); + if (exitCode != 0) { + return exitCode; + } + genDirStructure(); + genFiles(); + return exitCode; + } + + /** Parse the command line arguments and initialize the data */ + private int init(String[] args) { + try { // initialize file system handle + fs = FileSystem.get(getConf()); + } catch (IOException ioe) { + System.err.println("Can not initialize the file system: " + + ioe.getLocalizedMessage()); + return -1; + } + + for (int i = 0; i < args.length; i++) { // parse command line + if (args[i].equals("-root")) { + root = new Path(args[++i]); + } else if (args[i].equals("-inDir")) { + inDir = new File(args[++i]); + } else { + System.err.println(USAGE); + ToolRunner.printGenericCommandUsage(System.err); + System.exit(-1); + } + } + return 0; + } + + /** Read directory structure file under the input directory. + * Create each directory under the specified root. + * The directory names are relative to the specified root. + */ + private void genDirStructure() throws IOException { + BufferedReader in = new BufferedReader( + new FileReader(new File(inDir, + StructureGenerator.DIR_STRUCTURE_FILE_NAME))); + String line; + while ((line=in.readLine()) != null) { + fs.mkdirs(new Path(root+line)); + } + } + + /** Read file structure file under the input directory. + * Create each file under the specified root. + * The file names are relative to the root. + */ + private void genFiles() throws IOException { + BufferedReader in = new BufferedReader( + new FileReader(new File(inDir, + StructureGenerator.FILE_STRUCTURE_FILE_NAME))); + String line; + while ((line=in.readLine()) != null) { + String[] tokens = line.split(" "); + if (tokens.length != 2) { + throw new IOException("Expect at most 2 tokens per line: " + line); + } + String fileName = root+tokens[0]; + long fileSize = (long)(BLOCK_SIZE*Double.parseDouble(tokens[1])); + genFile(new Path(fileName), fileSize); + } + } + + /** Create a file with the name file and + * a length of fileSize. The file is filled with character 'a'. + */ + private void genFile(Path file, long fileSize) throws IOException { + FSDataOutputStream out = fs.create(file, true, + getConf().getInt("io.file.buffer.size", 4096), + (short)getConf().getInt("dfs.replication", 3), + fs.getDefaultBlockSize()); + for(long i=0; i: read probability [0, 1] + * with a default value of 0.3333. + * -writeProbability : write probability [0, 1] + * with a default value of 0.3333. + * -root : test space with a default value of /testLoadSpace + * -maxDelayBetweenOps : + * Max delay in the unit of milliseconds between two operations with a + * default value of 0 indicating no delay. + * -numOfThreads : + * number of threads to spawn with a default value of 200. + * -elapsedTime : + * the elapsed time of program with a default value of 0 + * indicating running forever + * -startTime : when the threads start to run. + */ +public class LoadGenerator extends Configured implements Tool { + private volatile boolean shouldRun = true; + private Path root = DataGenerator.DEFAULT_ROOT; + private FileSystem fs; + private int maxDelayBetweenOps = 0; + private int numOfThreads = 200; + private double readPr = 0.3333; + private double writePr = 0.3333; + private long elapsedTime = 0; + private long startTime = System.currentTimeMillis()+10000; + final static private int BLOCK_SIZE = 10; + private ArrayList files = new ArrayList(); // a table of file names + private ArrayList dirs = new ArrayList(); // a table of directory names + private Random r = null; + final private static String USAGE = "java LoadGenerator\n" + + "-readProbability \n" + + "-writeProbability \n" + + "-root \n" + + "-maxDelayBetweenOps \n" + + "-numOfThreads \n" + + "-elapsedTime \n" + + "-startTime "; + final private String hostname; + + /** Constructor */ + public LoadGenerator() throws IOException, UnknownHostException { + InetAddress addr = InetAddress.getLocalHost(); + hostname = addr.getHostName(); + } + + private final static int OPEN = 0; + private final static int LIST = 1; + private final static int CREATE = 2; + private final static int WRITE_CLOSE = 3; + private final static int DELETE = 4; + private final static int TOTAL_OP_TYPES =5; + private long [] executionTime = new long[TOTAL_OP_TYPES]; + private long [] totalNumOfOps = new long[TOTAL_OP_TYPES]; + + /** A thread sends a stream of requests to the NameNode. + * At each iteration, it first decides if it is going to read a file, + * create a file, or listing a directory following the read + * and write probabilities. + * When reading, it randomly picks a file in the test space and reads + * the entire file. When writing, it randomly picks a directory in the + * test space and creates a file whose name consists of the current + * machine's host name and the thread id. The length of the file + * follows Gaussian distribution with an average size of 2 blocks and + * the standard deviation of 1 block. The new file is filled with 'a'. + * Immediately after the file creation completes, the file is deleted + * from the test space. + * While listing, it randomly picks a directory in the test space and + * list the directory content. + * Between two consecutive operations, the thread pauses for a random + * amount of time in the range of [0, maxDelayBetweenOps] + * if the specified max delay is not zero. + * A thread runs for the specified elapsed time if the time isn't zero. + * Otherwise, it runs forever. + */ + private class DFSClientThread extends Thread { + private int id; + private long [] executionTime = new long[TOTAL_OP_TYPES]; + private long [] totalNumOfOps = new long[TOTAL_OP_TYPES]; + private byte[] buffer = new byte[1024]; + + private DFSClientThread(int id) { + this.id = id; + } + + /** Main loop + * Each iteration decides what's the next operation and then pauses. + */ + public void run() { + try { + while (shouldRun) { + nextOp(); + delay(); + } + } catch (Exception ioe) { + System.err.println(ioe.getLocalizedMessage()); + ioe.printStackTrace(); + } + } + + /** Let the thread pause for a random amount of time in the range of + * [0, maxDelayBetweenOps] if the delay is not zero. Otherwise, no pause. + */ + private void delay() throws InterruptedException { + if (maxDelayBetweenOps>0) { + int delay = r.nextInt(maxDelayBetweenOps); + Thread.sleep(delay); + } + } + + /** Perform the next operation. + * + * Depending on the read and write probabilities, the next + * operation could be either read, write, or list. + */ + private void nextOp() throws IOException { + double rn = r.nextDouble(); + if (rn < readPr) { + read(); + } else if (rn < readPr+writePr) { + write(); + } else { + list(); + } + } + + /** Read operation randomly picks a file in the test space and reads + * the entire file */ + private void read() throws IOException { + String fileName = files.get(r.nextInt(files.size())); + long startTime = System.currentTimeMillis(); + InputStream in = fs.open(new Path(fileName)); + executionTime[OPEN] += (System.currentTimeMillis()-startTime); + totalNumOfOps[OPEN]++; + while (in.read(buffer) != -1) {} + in.close(); + } + + /** The write operation randomly picks a directory in the + * test space and creates a file whose name consists of the current + * machine's host name and the thread id. The length of the file + * follows Gaussian distribution with an average size of 2 blocks and + * the standard deviation of 1 block. The new file is filled with 'a'. + * Immediately after the file creation completes, the file is deleted + * from the test space. + */ + private void write() throws IOException { + String dirName = dirs.get(r.nextInt(dirs.size())); + Path file = new Path(dirName, hostname+id); + double fileSize = 0; + while ((fileSize = r.nextGaussian()+2)<=0) {} + genFile(file, (long)(fileSize*BLOCK_SIZE)); + long startTime = System.currentTimeMillis(); + fs.delete(file, true); + executionTime[DELETE] += (System.currentTimeMillis()-startTime); + totalNumOfOps[DELETE]++; + } + + /** The list operation randomly picks a directory in the test space and + * list the directory content. + */ + private void list() throws IOException { + String dirName = dirs.get(r.nextInt(dirs.size())); + long startTime = System.currentTimeMillis(); + fs.listStatus(new Path(dirName)); + executionTime[LIST] += (System.currentTimeMillis()-startTime); + totalNumOfOps[LIST]++; + } + } + + /** Main function: + * It first initializes data by parsing the command line arguments. + * It then starts the number of DFSClient threads as specified by + * the user. + * It stops all the threads when the specified elapsed time is passed. + * Before exiting, it prints the average execution for + * each operation and operation throughput. + */ + public int run(String[] args) throws Exception { + int exitCode = init(args); + if (exitCode != 0) { + return exitCode; + } + + barrier(); + + DFSClientThread[] threads = new DFSClientThread[numOfThreads]; + for (int i=0; i0) { + Thread.sleep(elapsedTime*1000); + shouldRun = false; + } + for (DFSClientThread thread : threads) { + thread.join(); + for (int i=0; i1) { + System.err.println( + "The read probability must be [0, 1]: " + readPr); + return -1; + } + } else if (args[i].equals("-writeProbability")) { + writePr = Double.parseDouble(args[++i]); + if (writePr<0 || writePr>1) { + System.err.println( + "The write probability must be [0, 1]: " + writePr); + return -1; + } + } else if (args[i].equals("-root")) { + root = new Path(args[++i]); + } else if (args[i].equals("-maxDelayBetweenOps")) { + maxDelayBetweenOps = Integer.parseInt(args[++i]); // in milliseconds + } else if (args[i].equals("-numOfThreads")) { + numOfThreads = Integer.parseInt(args[++i]); + if (numOfThreads <= 0) { + System.err.println( + "Number of threads must be positive: " + numOfThreads); + return -1; + } + } else if (args[i].equals("-startTime")) { + startTime = Long.parseLong(args[++i]); + } else if (args[i].equals("-elapsedTime")) { + elapsedTime = Long.parseLong(args[++i]); + } else if (args[i].equals("-seed")) { + r = new Random(Long.parseLong(args[++i])+hostHashCode); + } else { + System.err.println(USAGE); + ToolRunner.printGenericCommandUsage(System.err); + return -1; + } + } + } catch (NumberFormatException e) { + System.err.println("Illegal parameter: " + e.getLocalizedMessage()); + System.err.println(USAGE); + return -1; + } + + if (readPr+writePr <0 || readPr+writePr>1) { + System.err.println( + "The sum of read probability and write probability must be [0, 1]: " + + readPr + " "+writePr); + return -1; + } + + if (r==null) { + r = new Random(System.currentTimeMillis()+hostHashCode); + } + + return initFileDirTables(); + } + + /** Create a table that contains all directories under root and + * another table that contains all files under root. + */ + private int initFileDirTables() { + try { + initFileDirTables(root); + } catch (IOException e) { + System.err.println(e.getLocalizedMessage()); + e.printStackTrace(); + return -1; + } + if (dirs.isEmpty()) { + System.err.println("The test space " + root + " is empty"); + return -1; + } + if (files.isEmpty()) { + System.err.println("The test space " + root + + " does not have any file"); + return -1; + } + return 0; + } + + /** Create a table that contains all directories under the specified path and + * another table that contains all files under the specified path and + * whose name starts with "_file_". + */ + private void initFileDirTables(Path path) throws IOException { + FileStatus[] stats = fs.listStatus(path); + if (stats != null) { + for (FileStatus stat : stats) { + if (stat.isDir()) { + dirs.add(stat.getPath().toString()); + initFileDirTables(stat.getPath()); + } else { + Path filePath = stat.getPath(); + if (filePath.getName().startsWith(StructureGenerator.FILE_NAME_PREFIX)) { + files.add(filePath.toString()); + } + } + } + } + } + + /** Returns when the current number of seconds from the epoch equals + * the command line argument given by -startTime. + * This allows multiple instances of this program, running on clock + * synchronized nodes, to start at roughly the same time. + */ + private void barrier() { + long sleepTime; + while ((sleepTime = startTime - System.currentTimeMillis()) > 0) { + try { + Thread.sleep(sleepTime); + } catch (InterruptedException ex) { + } + } + } + + /** Create a file with a length of fileSize. + * The file is filled with 'a'. + */ + private void genFile(Path file, long fileSize) throws IOException { + long startTime = System.currentTimeMillis(); + FSDataOutputStream out = fs.create(file, true, + getConf().getInt("io.file.buffer.size", 4096), + (short)getConf().getInt("dfs.replication", 3), + fs.getDefaultBlockSize()); + executionTime[CREATE] += (System.currentTimeMillis()-startTime); + totalNumOfOps[CREATE]++; + + for (long i=0; i : maximum depth of the directory tree; default is 5. + -minWidth : minimum number of subdirectories per directories; default is 1 + -maxWidth : maximum number of subdirectories per directories; default is 5 + -numOfFiles <#OfFiles> : the total number of files; default is 10. + -avgFileSize : average size of blocks; default is 1. + -outDir : output directory; default is the current directory. + -seed : random number generator seed; default is the current time. + */ +public class StructureGenerator { + private int maxDepth = 5; + private int minWidth = 1; + private int maxWidth = 5; + private int numOfFiles = 10; + private double avgFileSize = 1; + private File outDir = DEFAULT_STRUCTURE_DIRECTORY; + final static private String USAGE = "java StructureGenerator\n" + + "-maxDepth \n" + + "-minWidth \n" + + "-maxWidth \n" + + "-numOfFiles <#OfFiles>\n" + + "-avgFileSize \n" + + "-outDir \n" + + "-seed "; + + private Random r = null; + + /** Default directory for storing file/directory structure */ + final static File DEFAULT_STRUCTURE_DIRECTORY = new File("."); + /** The name of the file for storing directory structure */ + final static String DIR_STRUCTURE_FILE_NAME = "dirStructure"; + /** The name of the file for storing file structure */ + final static String FILE_STRUCTURE_FILE_NAME = "fileStructure"; + /** The name prefix for the files created by this program */ + final static String FILE_NAME_PREFIX = "_file_"; + + /** + * The main function first parses the command line arguments, + * then generates in-memory directory structure and outputs to a file, + * last generates in-memory files and outputs them to a file. + */ + public int run(String[] args) throws Exception { + int exitCode = 0; + exitCode = init(args); + if (exitCode != 0) { + return exitCode; + } + genDirStructure(); + output(new File(outDir, DIR_STRUCTURE_FILE_NAME)); + genFileStructure(); + outputFiles(new File(outDir, FILE_STRUCTURE_FILE_NAME)); + return exitCode; + } + + /** Parse the command line arguments and initialize the data */ + private int init(String[] args) { + try { + for (int i = 0; i < args.length; i++) { // parse command line + if (args[i].equals("-maxDepth")) { + maxDepth = Integer.parseInt(args[++i]); + if (maxDepth<1) { + System.err.println("maxDepth must be positive: " + maxDepth); + return -1; + } + } else if (args[i].equals("-minWidth")) { + minWidth = Integer.parseInt(args[++i]); + if (minWidth<0) { + System.err.println("minWidth must be positive: " + minWidth); + return -1; + } + } else if (args[i].equals("-maxWidth")) { + maxWidth = Integer.parseInt(args[++i]); + } else if (args[i].equals("-numOfFiles")) { + numOfFiles = Integer.parseInt(args[++i]); + if (numOfFiles<1) { + System.err.println("NumOfFiles must be positive: " + numOfFiles); + return -1; + } + } else if (args[i].equals("-avgFileSize")) { + avgFileSize = Double.parseDouble(args[++i]); + if (avgFileSize<=0) { + System.err.println("AvgFileSize must be positive: " + avgFileSize); + return -1; + } + } else if (args[i].equals("-outDir")) { + outDir = new File(args[++i]); + } else if (args[i].equals("-seed")) { + r = new Random(Long.parseLong(args[++i])); + } else { + System.err.println(USAGE); + ToolRunner.printGenericCommandUsage(System.err); + return -1; + } + } + } catch (NumberFormatException e) { + System.err.println("Illegal parameter: " + e.getLocalizedMessage()); + System.err.println(USAGE); + return -1; + } + + if (maxWidth < minWidth) { + System.err.println( + "maxWidth must be bigger than minWidth: " + maxWidth); + return -1; + } + + if (r==null) { + r = new Random(); + } + return 0; + } + + /** In memory representation of a directory */ + private static class INode { + private String name; + private List children = new ArrayList(); + + /** Constructor */ + private INode(String name) { + this.name = name; + } + + /** Add a child (subdir/file) */ + private void addChild(INode child) { + children.add(child); + } + + /** Output the subtree rooted at the current node. + * Only the leaves are printed. + */ + private void output(PrintStream out, String prefix) { + prefix = prefix==null?name:prefix+"/"+name; + if (children.isEmpty()) { + out.println(prefix); + } else { + for (INode child : children) { + child.output(out, prefix); + } + } + } + + /** Output the files in the subtree rooted at this node */ + protected void outputFiles(PrintStream out, String prefix) { + prefix = prefix==null?name:prefix+"/"+name; + for (INode child : children) { + child.outputFiles(out, prefix); + } + } + + /** Add all the leaves in the subtree to the input list */ + private void getLeaves(List leaves) { + if (children.isEmpty()) { + leaves.add(this); + } else { + for (INode child : children) { + child.getLeaves(leaves); + } + } + } + } + + /** In memory representation of a file */ + private static class FileINode extends INode { + private double numOfBlocks; + + /** constructor */ + private FileINode(String name, double numOfBlocks) { + super(name); + this.numOfBlocks = numOfBlocks; + } + + /** Output a file attribute */ + protected void outputFiles(PrintStream out, String prefix) { + prefix = (prefix == null)?super.name: prefix + "/"+super.name; + out.println(prefix + " " + numOfBlocks); + } + } + + private INode root; + + /** Generates a directory tree with a max depth of maxDepth */ + private void genDirStructure() { + root = genDirStructure("", maxDepth); + } + + /** Generate a directory tree rooted at rootName + * The number of subtree is in the range of [minWidth, maxWidth]. + * The maximum depth of each subtree is in the range of + * [2*maxDepth/3, maxDepth]. + */ + private INode genDirStructure(String rootName, int maxDepth) { + INode root = new INode(rootName); + + if (maxDepth>0) { + maxDepth--; + int minDepth = maxDepth*2/3; + // Figure out the number of subdirectories to generate + int numOfSubDirs = minWidth + r.nextInt(maxWidth-minWidth+1); + // Expand the tree + for (int i=0; i getLeaves() { + List leaveDirs = new ArrayList(); + root.getLeaves(leaveDirs); + return leaveDirs; + } + + /** Decides where to place all the files and its length. + * It first collects all empty directories in the tree. + * For each file, it randomly chooses an empty directory to place the file. + * The file's length is generated using Gaussian distribution. + */ + private void genFileStructure() { + List leaves = getLeaves(); + int totalLeaves = leaves.size(); + for (int i=0; i inodes = new TreeMap(); + private Map blocks = new HashMap(); + + public void initialize(URI uri, Configuration conf) { + this.conf = conf; + } + + public String getVersion() throws IOException { + return "0"; + } + + public void deleteINode(Path path) throws IOException { + inodes.remove(normalize(path)); + } + + public void deleteBlock(Block block) throws IOException { + blocks.remove(block.getId()); + } + + public boolean inodeExists(Path path) throws IOException { + return inodes.containsKey(normalize(path)); + } + + public boolean blockExists(long blockId) throws IOException { + return blocks.containsKey(blockId); + } + + public INode retrieveINode(Path path) throws IOException { + return inodes.get(normalize(path)); + } + + public File retrieveBlock(Block block, long byteRangeStart) throws IOException { + byte[] data = blocks.get(block.getId()); + File file = createTempFile(); + BufferedOutputStream out = null; + try { + out = new BufferedOutputStream(new FileOutputStream(file)); + out.write(data, (int) byteRangeStart, data.length - (int) byteRangeStart); + } finally { + if (out != null) { + out.close(); + } + } + return file; + } + + private File createTempFile() throws IOException { + File dir = new File(conf.get("fs.s3.buffer.dir")); + if (!dir.exists() && !dir.mkdirs()) { + throw new IOException("Cannot create S3 buffer directory: " + dir); + } + File result = File.createTempFile("test-", ".tmp", dir); + result.deleteOnExit(); + return result; + } + + public Set listSubPaths(Path path) throws IOException { + Path normalizedPath = normalize(path); + // This is inefficient but more than adequate for testing purposes. + Set subPaths = new LinkedHashSet(); + for (Path p : inodes.tailMap(normalizedPath).keySet()) { + if (normalizedPath.equals(p.getParent())) { + subPaths.add(p); + } + } + return subPaths; + } + + public Set listDeepSubPaths(Path path) throws IOException { + Path normalizedPath = normalize(path); + String pathString = normalizedPath.toUri().getPath(); + if (!pathString.endsWith("/")) { + pathString += "/"; + } + // This is inefficient but more than adequate for testing purposes. + Set subPaths = new LinkedHashSet(); + for (Path p : inodes.tailMap(normalizedPath).keySet()) { + if (p.toUri().getPath().startsWith(pathString)) { + subPaths.add(p); + } + } + return subPaths; + } + + public void storeINode(Path path, INode inode) throws IOException { + inodes.put(normalize(path), inode); + } + + public void storeBlock(Block block, File file) throws IOException { + ByteArrayOutputStream out = new ByteArrayOutputStream(); + byte[] buf = new byte[8192]; + int numRead; + BufferedInputStream in = null; + try { + in = new BufferedInputStream(new FileInputStream(file)); + while ((numRead = in.read(buf)) >= 0) { + out.write(buf, 0, numRead); + } + } finally { + if (in != null) { + in.close(); + } + } + blocks.put(block.getId(), out.toByteArray()); + } + + private Path normalize(Path path) { + if (!path.isAbsolute()) { + throw new IllegalArgumentException("Path must be absolute: " + path); + } + return new Path(path.toUri().getPath()); + } + + public void purge() throws IOException { + inodes.clear(); + blocks.clear(); + } + + public void dump() throws IOException { + StringBuilder sb = new StringBuilder(getClass().getSimpleName()); + sb.append(", \n"); + for (Map.Entry entry : inodes.entrySet()) { + sb.append(entry.getKey()).append("\n"); + INode inode = entry.getValue(); + sb.append("\t").append(inode.getFileType()).append("\n"); + if (inode.getFileType() == FileType.DIRECTORY) { + continue; + } + for (int j = 0; j < inode.getBlocks().length; j++) { + sb.append("\t").append(inode.getBlocks()[j]).append("\n"); + } + } + System.out.println(sb); + + System.out.println(inodes.keySet()); + System.out.println(blocks.keySet()); + } + +} diff --git a/src/test/org/apache/hadoop/fs/s3/Jets3tS3FileSystemContractTest.java b/src/test/org/apache/hadoop/fs/s3/Jets3tS3FileSystemContractTest.java new file mode 100644 index 0000000..53b3c03 --- /dev/null +++ b/src/test/org/apache/hadoop/fs/s3/Jets3tS3FileSystemContractTest.java @@ -0,0 +1,31 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.s3; + +import java.io.IOException; + +public class Jets3tS3FileSystemContractTest + extends S3FileSystemContractBaseTest { + + @Override + FileSystemStore getFileSystemStore() throws IOException { + return new Jets3tFileSystemStore(); + } + +} diff --git a/src/test/org/apache/hadoop/fs/s3/S3FileSystemContractBaseTest.java b/src/test/org/apache/hadoop/fs/s3/S3FileSystemContractBaseTest.java new file mode 100644 index 0000000..8d6744a --- /dev/null +++ b/src/test/org/apache/hadoop/fs/s3/S3FileSystemContractBaseTest.java @@ -0,0 +1,48 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.s3; + +import java.io.IOException; +import java.net.URI; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystemContractBaseTest; + +public abstract class S3FileSystemContractBaseTest + extends FileSystemContractBaseTest { + + private FileSystemStore store; + + abstract FileSystemStore getFileSystemStore() throws IOException; + + @Override + protected void setUp() throws Exception { + Configuration conf = new Configuration(); + store = getFileSystemStore(); + fs = new S3FileSystem(store); + fs.initialize(URI.create(conf.get("test.fs.s3.name")), conf); + } + + @Override + protected void tearDown() throws Exception { + store.purge(); + super.tearDown(); + } + +} diff --git a/src/test/org/apache/hadoop/fs/s3/TestINode.java b/src/test/org/apache/hadoop/fs/s3/TestINode.java new file mode 100644 index 0000000..086a43e --- /dev/null +++ b/src/test/org/apache/hadoop/fs/s3/TestINode.java @@ -0,0 +1,60 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.s3; + +import java.io.IOException; +import java.io.InputStream; + +import junit.framework.TestCase; + +import org.apache.hadoop.fs.s3.INode.FileType; + +public class TestINode extends TestCase { + + public void testSerializeFileWithSingleBlock() throws IOException { + Block[] blocks = { new Block(849282477840258181L, 128L) }; + INode inode = new INode(FileType.FILE, blocks); + + assertEquals("Length", 1L + 4 + 16, inode.getSerializedLength()); + InputStream in = inode.serialize(); + + INode deserialized = INode.deserialize(in); + + assertEquals("FileType", inode.getFileType(), deserialized.getFileType()); + Block[] deserializedBlocks = deserialized.getBlocks(); + assertEquals("Length", 1, deserializedBlocks.length); + assertEquals("Id", blocks[0].getId(), deserializedBlocks[0].getId()); + assertEquals("Length", blocks[0].getLength(), deserializedBlocks[0] + .getLength()); + + } + + public void testSerializeDirectory() throws IOException { + INode inode = INode.DIRECTORY_INODE; + assertEquals("Length", 1L, inode.getSerializedLength()); + InputStream in = inode.serialize(); + INode deserialized = INode.deserialize(in); + assertSame(INode.DIRECTORY_INODE, deserialized); + } + + public void testDeserializeNull() throws IOException { + assertNull(INode.deserialize(null)); + } + +} diff --git a/src/test/org/apache/hadoop/fs/s3/TestInMemoryS3FileSystemContract.java b/src/test/org/apache/hadoop/fs/s3/TestInMemoryS3FileSystemContract.java new file mode 100644 index 0000000..5d66cf1 --- /dev/null +++ b/src/test/org/apache/hadoop/fs/s3/TestInMemoryS3FileSystemContract.java @@ -0,0 +1,31 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.s3; + +import java.io.IOException; + +public class TestInMemoryS3FileSystemContract + extends S3FileSystemContractBaseTest { + + @Override + FileSystemStore getFileSystemStore() throws IOException { + return new InMemoryFileSystemStore(); + } + +} diff --git a/src/test/org/apache/hadoop/fs/s3/TestS3Credentials.java b/src/test/org/apache/hadoop/fs/s3/TestS3Credentials.java new file mode 100644 index 0000000..bcbf0dc --- /dev/null +++ b/src/test/org/apache/hadoop/fs/s3/TestS3Credentials.java @@ -0,0 +1,36 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.fs.s3; + +import java.net.URI; + +import junit.framework.TestCase; + +import org.apache.hadoop.conf.Configuration; + +public class TestS3Credentials extends TestCase { + public void testInvalidHostnameWithUnderscores() throws Exception { + S3Credentials s3Credentials = new S3Credentials(); + try { + s3Credentials.initialize(new URI("s3://a:b@c_d"), new Configuration()); + fail("Should throw IllegalArgumentException"); + } catch (IllegalArgumentException e) { + assertEquals("Invalid hostname in URI s3://a:b@c_d", e.getMessage()); + } + } +} diff --git a/src/test/org/apache/hadoop/fs/s3/TestS3FileSystem.java b/src/test/org/apache/hadoop/fs/s3/TestS3FileSystem.java new file mode 100644 index 0000000..f21989c --- /dev/null +++ b/src/test/org/apache/hadoop/fs/s3/TestS3FileSystem.java @@ -0,0 +1,50 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.s3; + +import java.io.IOException; +import java.net.URI; + +import junit.framework.TestCase; + +import org.apache.hadoop.conf.Configuration; + +public class TestS3FileSystem extends TestCase { + + public void testInitialization() throws IOException { + initializationTest("s3://a:b@c", "s3://a:b@c"); + initializationTest("s3://a:b@c/", "s3://a:b@c"); + initializationTest("s3://a:b@c/path", "s3://a:b@c"); + initializationTest("s3://a@c", "s3://a@c"); + initializationTest("s3://a@c/", "s3://a@c"); + initializationTest("s3://a@c/path", "s3://a@c"); + initializationTest("s3://c", "s3://c"); + initializationTest("s3://c/", "s3://c"); + initializationTest("s3://c/path", "s3://c"); + } + + private void initializationTest(String initializationUri, String expectedUri) + throws IOException { + + S3FileSystem fs = new S3FileSystem(new InMemoryFileSystemStore()); + fs.initialize(URI.create(initializationUri), new Configuration()); + assertEquals(URI.create(expectedUri), fs.getUri()); + } + +} diff --git a/src/test/org/apache/hadoop/fs/s3native/InMemoryNativeFileSystemStore.java b/src/test/org/apache/hadoop/fs/s3native/InMemoryNativeFileSystemStore.java new file mode 100644 index 0000000..d3086da --- /dev/null +++ b/src/test/org/apache/hadoop/fs/s3native/InMemoryNativeFileSystemStore.java @@ -0,0 +1,198 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.s3native; + +import static org.apache.hadoop.fs.s3native.NativeS3FileSystem.PATH_DELIMITER; +import java.io.BufferedInputStream; +import java.io.BufferedOutputStream; +import java.io.ByteArrayOutputStream; +import java.io.File; +import java.io.FileInputStream; +import java.io.FileOutputStream; +import java.io.IOException; +import java.io.InputStream; +import java.net.URI; +import java.util.ArrayList; +import java.util.Iterator; +import java.util.List; +import java.util.SortedMap; +import java.util.SortedSet; +import java.util.TreeMap; +import java.util.TreeSet; +import java.util.Map.Entry; + +import org.apache.hadoop.conf.Configuration; + +/** + *

+ * A stub implementation of {@link NativeFileSystemStore} for testing + * {@link NativeS3FileSystem} without actually connecting to S3. + *

+ */ +class InMemoryNativeFileSystemStore implements NativeFileSystemStore { + + private Configuration conf; + + private SortedMap metadataMap = + new TreeMap(); + private SortedMap dataMap = new TreeMap(); + + public void initialize(URI uri, Configuration conf) throws IOException { + this.conf = conf; + } + + public void storeEmptyFile(String key) throws IOException { + metadataMap.put(key, new FileMetadata(key, 0, System.currentTimeMillis())); + dataMap.put(key, new byte[0]); + } + + public void storeFile(String key, File file, byte[] md5Hash) + throws IOException { + + ByteArrayOutputStream out = new ByteArrayOutputStream(); + byte[] buf = new byte[8192]; + int numRead; + BufferedInputStream in = null; + try { + in = new BufferedInputStream(new FileInputStream(file)); + while ((numRead = in.read(buf)) >= 0) { + out.write(buf, 0, numRead); + } + } finally { + if (in != null) { + in.close(); + } + } + metadataMap.put(key, + new FileMetadata(key, file.length(), System.currentTimeMillis())); + dataMap.put(key, out.toByteArray()); + } + + public InputStream retrieve(String key) throws IOException { + return retrieve(key, 0); + } + + public InputStream retrieve(String key, long byteRangeStart) + throws IOException { + + byte[] data = dataMap.get(key); + File file = createTempFile(); + BufferedOutputStream out = null; + try { + out = new BufferedOutputStream(new FileOutputStream(file)); + out.write(data, (int) byteRangeStart, + data.length - (int) byteRangeStart); + } finally { + if (out != null) { + out.close(); + } + } + return new FileInputStream(file); + } + + private File createTempFile() throws IOException { + File dir = new File(conf.get("fs.s3.buffer.dir")); + if (!dir.exists() && !dir.mkdirs()) { + throw new IOException("Cannot create S3 buffer directory: " + dir); + } + File result = File.createTempFile("test-", ".tmp", dir); + result.deleteOnExit(); + return result; + } + + public FileMetadata retrieveMetadata(String key) throws IOException { + return metadataMap.get(key); + } + + public PartialListing list(String prefix, int maxListingLength) + throws IOException { + return list(prefix, maxListingLength, null); + } + + public PartialListing list(String prefix, int maxListingLength, + String priorLastKey) throws IOException { + + return list(prefix, PATH_DELIMITER, maxListingLength, priorLastKey); + } + + public PartialListing listAll(String prefix, int maxListingLength, + String priorLastKey) throws IOException { + + return list(prefix, null, maxListingLength, priorLastKey); + } + + private PartialListing list(String prefix, String delimiter, + int maxListingLength, String priorLastKey) throws IOException { + + if (prefix.length() > 0 && !prefix.endsWith(PATH_DELIMITER)) { + prefix += PATH_DELIMITER; + } + + List metadata = new ArrayList(); + SortedSet commonPrefixes = new TreeSet(); + for (String key : dataMap.keySet()) { + if (key.startsWith(prefix)) { + if (delimiter == null) { + metadata.add(retrieveMetadata(key)); + } else { + int delimIndex = key.indexOf(delimiter, prefix.length()); + if (delimIndex == -1) { + metadata.add(retrieveMetadata(key)); + } else { + String commonPrefix = key.substring(0, delimIndex); + commonPrefixes.add(commonPrefix); + } + } + } + if (metadata.size() + commonPrefixes.size() == maxListingLength) { + new PartialListing(key, metadata.toArray(new FileMetadata[0]), + commonPrefixes.toArray(new String[0])); + } + } + return new PartialListing(null, metadata.toArray(new FileMetadata[0]), + commonPrefixes.toArray(new String[0])); + } + + public void delete(String key) throws IOException { + metadataMap.remove(key); + dataMap.remove(key); + } + + public void rename(String srcKey, String dstKey) throws IOException { + metadataMap.put(dstKey, metadataMap.remove(srcKey)); + dataMap.put(dstKey, dataMap.remove(srcKey)); + } + + public void purge(String prefix) throws IOException { + Iterator> i = + metadataMap.entrySet().iterator(); + while (i.hasNext()) { + Entry entry = i.next(); + if (entry.getKey().startsWith(prefix)) { + dataMap.remove(entry.getKey()); + i.remove(); + } + } + } + + public void dump() throws IOException { + System.out.println(metadataMap.values()); + System.out.println(dataMap.keySet()); + } +} diff --git a/src/test/org/apache/hadoop/fs/s3native/Jets3tNativeS3FileSystemContractTest.java b/src/test/org/apache/hadoop/fs/s3native/Jets3tNativeS3FileSystemContractTest.java new file mode 100644 index 0000000..6516c83 --- /dev/null +++ b/src/test/org/apache/hadoop/fs/s3native/Jets3tNativeS3FileSystemContractTest.java @@ -0,0 +1,30 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.s3native; + +import java.io.IOException; + +public class Jets3tNativeS3FileSystemContractTest + extends NativeS3FileSystemContractBaseTest { + + @Override + NativeFileSystemStore getNativeFileSystemStore() throws IOException { + return new Jets3tNativeFileSystemStore(); + } +} diff --git a/src/test/org/apache/hadoop/fs/s3native/NativeS3FileSystemContractBaseTest.java b/src/test/org/apache/hadoop/fs/s3native/NativeS3FileSystemContractBaseTest.java new file mode 100644 index 0000000..bf2e3c3 --- /dev/null +++ b/src/test/org/apache/hadoop/fs/s3native/NativeS3FileSystemContractBaseTest.java @@ -0,0 +1,59 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.s3native; + +import java.io.IOException; +import java.net.URI; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystemContractBaseTest; +import org.apache.hadoop.fs.Path; + +public abstract class NativeS3FileSystemContractBaseTest + extends FileSystemContractBaseTest { + + private NativeFileSystemStore store; + + abstract NativeFileSystemStore getNativeFileSystemStore() throws IOException; + + @Override + protected void setUp() throws Exception { + Configuration conf = new Configuration(); + store = getNativeFileSystemStore(); + fs = new NativeS3FileSystem(store); + fs.initialize(URI.create(conf.get("test.fs.s3n.name")), conf); + } + + @Override + protected void tearDown() throws Exception { + store.purge("test"); + super.tearDown(); + } + + public void testListStatusForRoot() throws Exception { + Path testDir = path("/test"); + assertTrue(fs.mkdirs(testDir)); + + FileStatus[] paths = fs.listStatus(path("/")); + assertEquals(1, paths.length); + assertEquals(path("/test"), paths[0].getPath()); + } + +} diff --git a/src/test/org/apache/hadoop/fs/s3native/TestInMemoryNativeS3FileSystemContract.java b/src/test/org/apache/hadoop/fs/s3native/TestInMemoryNativeS3FileSystemContract.java new file mode 100644 index 0000000..664d39e --- /dev/null +++ b/src/test/org/apache/hadoop/fs/s3native/TestInMemoryNativeS3FileSystemContract.java @@ -0,0 +1,30 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.s3native; + +import java.io.IOException; + +public class TestInMemoryNativeS3FileSystemContract + extends NativeS3FileSystemContractBaseTest { + + @Override + NativeFileSystemStore getNativeFileSystemStore() throws IOException { + return new InMemoryNativeFileSystemStore(); + } +} diff --git a/src/test/org/apache/hadoop/hdfs/AppendTestUtil.java b/src/test/org/apache/hadoop/hdfs/AppendTestUtil.java new file mode 100644 index 0000000..050d9e5 --- /dev/null +++ b/src/test/org/apache/hadoop/hdfs/AppendTestUtil.java @@ -0,0 +1,119 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs; + +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.util.Random; + +import junit.framework.TestCase; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.security.UnixUserGroupInformation; +import org.apache.hadoop.security.UserGroupInformation; + +/** Utilities for append-related tests */ +class AppendTestUtil { + /** For specifying the random number generator seed, + * change the following value: + */ + static final Long RANDOM_NUMBER_GENERATOR_SEED = null; + + static final Log LOG = LogFactory.getLog(AppendTestUtil.class); + + private static final Random SEED = new Random(); + static { + final long seed = RANDOM_NUMBER_GENERATOR_SEED == null? + SEED.nextLong(): RANDOM_NUMBER_GENERATOR_SEED; + LOG.info("seed=" + seed); + SEED.setSeed(seed); + } + + private static final ThreadLocal RANDOM = new ThreadLocal() { + protected Random initialValue() { + final Random r = new Random(); + synchronized(SEED) { + final long seed = SEED.nextLong(); + r.setSeed(seed); + LOG.info(Thread.currentThread().getName() + ": seed=" + seed); + } + return r; + } + }; + + static int nextInt() {return RANDOM.get().nextInt();} + static int nextInt(int n) {return RANDOM.get().nextInt(n);} + static int nextLong() {return RANDOM.get().nextInt();} + + static byte[] randomBytes(long seed, int size) { + LOG.info("seed=" + seed + ", size=" + size); + final byte[] b = new byte[size]; + final Random rand = new Random(seed); + rand.nextBytes(b); + return b; + } + + static void sleep(long ms) { + try { + Thread.sleep(ms); + } catch (InterruptedException e) { + LOG.info("ms=" + ms, e); + } + } + + static FileSystem createHdfsWithDifferentUsername(Configuration conf + ) throws IOException { + Configuration conf2 = new Configuration(conf); + String username = UserGroupInformation.getCurrentUGI().getUserName()+"_XXX"; + UnixUserGroupInformation.saveToConf(conf2, + UnixUserGroupInformation.UGI_PROPERTY_NAME, + new UnixUserGroupInformation(username, new String[]{"supergroup"})); + return FileSystem.get(conf2); + } + + static void write(OutputStream out, int offset, int length) throws IOException { + final byte[] bytes = new byte[length]; + for(int i = 0; i < length; i++) { + bytes[i] = (byte)(offset + i); + } + out.write(bytes); + } + + static void check(FileSystem fs, Path p, long length) throws IOException { + int i = -1; + try { + final FileStatus status = fs.getFileStatus(p); + TestCase.assertEquals(length, status.getLen()); + InputStream in = fs.open(p); + for(i++; i < length; i++) { + TestCase.assertEquals((byte)i, (byte)in.read()); + } + i = -(int)length; + TestCase.assertEquals(-1, in.read()); //EOF + in.close(); + } catch(IOException ioe) { + throw new IOException("p=" + p + ", length=" + length + ", i=" + i, ioe); + } + } +} \ No newline at end of file diff --git a/src/test/org/apache/hadoop/hdfs/BenchmarkThroughput.java b/src/test/org/apache/hadoop/hdfs/BenchmarkThroughput.java new file mode 100644 index 0000000..b283850 --- /dev/null +++ b/src/test/org/apache/hadoop/hdfs/BenchmarkThroughput.java @@ -0,0 +1,234 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs; + +import java.io.File; +import java.io.FileInputStream; +import java.io.FileOutputStream; +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.commons.logging.impl.Log4JLogger; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.conf.Configured; +import org.apache.hadoop.fs.ChecksumFileSystem; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.LocalDirAllocator; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.util.Tool; +import org.apache.hadoop.util.ToolRunner; + +import org.apache.log4j.Level; + +/** + * This class benchmarks the performance of the local file system, raw local + * file system and HDFS at reading and writing files. The user should invoke + * the main of this class and optionally include a repetition count. + */ +public class BenchmarkThroughput extends Configured implements Tool { + + // the property in the config that specifies a working directory + private LocalDirAllocator dir; + private long startTime; + // the size of the buffer to use + private int BUFFER_SIZE; + + private void resetMeasurements() { + startTime = System.currentTimeMillis(); + } + + private void printMeasurements() { + System.out.println(" time: " + + ((System.currentTimeMillis() - startTime)/1000)); + } + + private Path writeLocalFile(String name, Configuration conf, + long total) throws IOException { + Path path = dir.getLocalPathForWrite(name, total, conf); + System.out.print("Writing " + name); + resetMeasurements(); + OutputStream out = new FileOutputStream(new File(path.toString())); + byte[] data = new byte[BUFFER_SIZE]; + for(long size=0; size < total; size += BUFFER_SIZE) { + out.write(data); + } + out.close(); + printMeasurements(); + return path; + } + + private void readLocalFile(Path path, + String name, + Configuration conf) throws IOException { + System.out.print("Reading " + name); + resetMeasurements(); + InputStream in = new FileInputStream(new File(path.toString())); + byte[] data = new byte[BUFFER_SIZE]; + long size = 0; + while (size >= 0) { + size = in.read(data); + } + in.close(); + printMeasurements(); + } + + private void writeAndReadLocalFile(String name, + Configuration conf, + long size + ) throws IOException { + Path f = null; + try { + f = writeLocalFile(name, conf, size); + readLocalFile(f, name, conf); + } finally { + if (f != null) { + new File(f.toString()).delete(); + } + } + } + + private Path writeFile(FileSystem fs, + String name, + Configuration conf, + long total + ) throws IOException { + Path f = dir.getLocalPathForWrite(name, total, conf); + System.out.print("Writing " + name); + resetMeasurements(); + OutputStream out = fs.create(f); + byte[] data = new byte[BUFFER_SIZE]; + for(long size = 0; size < total; size += BUFFER_SIZE) { + out.write(data); + } + out.close(); + printMeasurements(); + return f; + } + + private void readFile(FileSystem fs, + Path f, + String name, + Configuration conf + ) throws IOException { + System.out.print("Reading " + name); + resetMeasurements(); + InputStream in = fs.open(f); + byte[] data = new byte[BUFFER_SIZE]; + long val = 0; + while (val >= 0) { + val = in.read(data); + } + in.close(); + printMeasurements(); + } + + private void writeAndReadFile(FileSystem fs, + String name, + Configuration conf, + long size + ) throws IOException { + Path f = null; + try { + f = writeFile(fs, name, conf, size); + readFile(fs, f, name, conf); + } finally { + try { + if (f != null) { + fs.delete(f, true); + } + } catch (IOException ie) { + // IGNORE + } + } + } + + private static void printUsage() { + ToolRunner.printGenericCommandUsage(System.err); + System.err.println("Usage: dfsthroughput [#reps]"); + System.err.println("Config properties:\n" + + " dfsthroughput.file.size:\tsize of each write/read (10GB)\n" + + " dfsthroughput.buffer.size:\tbuffer size for write/read (4k)\n"); + } + + public int run(String[] args) throws IOException { + // silence the minidfs cluster + Log hadoopLog = LogFactory.getLog("org"); + if (hadoopLog instanceof Log4JLogger) { + ((Log4JLogger) hadoopLog).getLogger().setLevel(Level.WARN); + } + int reps = 1; + if (args.length == 1) { + try { + reps = Integer.parseInt(args[0]); + } catch (NumberFormatException e) { + printUsage(); + return -1; + } + } else if (args.length > 1) { + printUsage(); + return -1; + } + Configuration conf = getConf(); + // the size of the file to write + long SIZE = conf.getLong("dfsthroughput.file.size", + 10L * 1024 * 1024 * 1024); + BUFFER_SIZE = conf.getInt("dfsthroughput.buffer.size", 4 * 1024); + + String localDir = conf.get("mapred.temp.dir"); + dir = new LocalDirAllocator("mapred.temp.dir"); + + System.setProperty("test.build.data", localDir); + System.out.println("Local = " + localDir); + ChecksumFileSystem checkedLocal = FileSystem.getLocal(conf); + FileSystem rawLocal = checkedLocal.getRawFileSystem(); + for(int i=0; i < reps; ++i) { + writeAndReadLocalFile("local", conf, SIZE); + writeAndReadFile(rawLocal, "raw", conf, SIZE); + writeAndReadFile(checkedLocal, "checked", conf, SIZE); + } + MiniDFSCluster cluster = null; + try { + cluster = new MiniDFSCluster(conf, 1, true, new String[]{"/foo"}); + cluster.waitActive(); + FileSystem dfs = cluster.getFileSystem(); + for(int i=0; i < reps; ++i) { + writeAndReadFile(dfs, "dfs", conf, SIZE); + } + } finally { + if (cluster != null) { + cluster.shutdown(); + // clean up minidfs junk + rawLocal.delete(new Path(localDir, "dfs"), true); + } + } + return 0; + } + + /** + * @param args + */ + public static void main(String[] args) throws Exception { + int res = ToolRunner.run(new Configuration(), + new BenchmarkThroughput(), args); + System.exit(res); + } + +} diff --git a/src/test/org/apache/hadoop/hdfs/DFSClientAdapter.java b/src/test/org/apache/hadoop/hdfs/DFSClientAdapter.java new file mode 100644 index 0000000..a4031ed --- /dev/null +++ b/src/test/org/apache/hadoop/hdfs/DFSClientAdapter.java @@ -0,0 +1,19 @@ +package org.apache.hadoop.hdfs; + +import org.apache.hadoop.fs.FSDataOutputStream; + +import java.io.IOException; +import java.io.OutputStream; + +public class DFSClientAdapter { + public static void abortForTest(FSDataOutputStream out) throws IOException { + OutputStream stream = out.getWrappedStream(); + + if (stream instanceof DFSClient.DFSOutputStream) { + DFSClient.DFSOutputStream dfsOutputStream = + (DFSClient.DFSOutputStream) stream; + dfsOutputStream.abortForTests(); + } + //no-op otherwise + } +} diff --git a/src/test/org/apache/hadoop/hdfs/DFSTestUtil.java b/src/test/org/apache/hadoop/hdfs/DFSTestUtil.java new file mode 100644 index 0000000..cd23836 --- /dev/null +++ b/src/test/org/apache/hadoop/hdfs/DFSTestUtil.java @@ -0,0 +1,280 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdfs; + +import java.io.BufferedReader; +import java.io.ByteArrayOutputStream; +import java.io.File; +import java.io.FileReader; +import java.io.IOException; +import java.net.URL; +import java.net.URLConnection; +import java.util.Random; +import junit.framework.TestCase; +import org.apache.hadoop.hdfs.DFSClient.DFSDataInputStream; +import org.apache.hadoop.hdfs.protocol.Block; +import org.apache.hadoop.io.IOUtils; +import org.apache.hadoop.fs.FSDataInputStream; +import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.BlockLocation; + +/** + */ +public class DFSTestUtil extends TestCase { + + private static Random gen = new Random(); + private static String[] dirNames = { + "zero", "one", "two", "three", "four", "five", "six", "seven", "eight", "nine" + }; + + private int maxLevels;// = 3; + private int maxSize;// = 8*1024; + private int nFiles; + private MyFile[] files; + + /** Creates a new instance of DFSTestUtil + * + * @param testName Name of the test from where this utility is used + * @param nFiles Number of files to be created + * @param maxLevels Maximum number of directory levels + * @param maxSize Maximum size for file + */ + public DFSTestUtil(String testName, int nFiles, int maxLevels, int maxSize) { + this.nFiles = nFiles; + this.maxLevels = maxLevels; + this.maxSize = maxSize; + } + + /** class MyFile contains enough information to recreate the contents of + * a single file. + */ + private class MyFile { + + private String name = ""; + private int size; + private long seed; + + MyFile() { + int nLevels = gen.nextInt(maxLevels); + if (nLevels != 0) { + int[] levels = new int[nLevels]; + for (int idx = 0; idx < nLevels; idx++) { + levels[idx] = gen.nextInt(10); + } + StringBuffer sb = new StringBuffer(); + for (int idx = 0; idx < nLevels; idx++) { + sb.append(dirNames[levels[idx]]); + sb.append("/"); + } + name = sb.toString(); + } + long fidx = -1; + while (fidx < 0) { fidx = gen.nextLong(); } + name = name + Long.toString(fidx); + size = gen.nextInt(maxSize); + seed = gen.nextLong(); + } + + String getName() { return name; } + int getSize() { return size; } + long getSeed() { return seed; } + } + + public void createFiles(FileSystem fs, String topdir) throws IOException { + createFiles(fs, topdir, (short)3); + } + + /** create nFiles with random names and directory hierarchies + * with random (but reproducible) data in them. + */ + public void createFiles(FileSystem fs, String topdir, + short replicationFactor) throws IOException { + files = new MyFile[nFiles]; + + for (int idx = 0; idx < nFiles; idx++) { + files[idx] = new MyFile(); + } + + Path root = new Path(topdir); + + for (int idx = 0; idx < nFiles; idx++) { + createFile(fs, new Path(root, files[idx].getName()), files[idx].getSize(), + replicationFactor, files[idx].getSeed()); + } + } + + public static void createFile(FileSystem fs, Path fileName, long fileLen, + short replFactor, long seed) throws IOException { + if (!fs.mkdirs(fileName.getParent())) { + throw new IOException("Mkdirs failed to create " + + fileName.getParent().toString()); + } + FSDataOutputStream out = null; + try { + out = fs.create(fileName, replFactor); + byte[] toWrite = new byte[1024]; + Random rb = new Random(seed); + long bytesToWrite = fileLen; + while (bytesToWrite>0) { + rb.nextBytes(toWrite); + int bytesToWriteNext = (1024 " + + " [-racks ] " + + " [-simulated] " + + " [-inject startingBlockId numBlocksPerDN]" + + " [-r replicationFactorForInjectedBlocks]" + + " [-d dataNodeDirs]\n" + + " Default datanode direcory is " + DATANODE_DIRS + "\n" + + " Default replication factor for injected blocks is 1\n" + + " Defaul rack is used if -racks is not specified\n" + + " Data nodes are simulated if -simulated OR conf file specifies simulated\n"; + + + static void printUsageExit() { + System.out.println(USAGE); + System.exit(-1); + } + static void printUsageExit(String err) { + System.out.println(err); + printUsageExit(); + } + + public static void main(String[] args) { + int numDataNodes = 0; + int numRacks = 0; + boolean inject = false; + long startingBlockId = 1; + int numBlocksPerDNtoInject = 0; + int replication = 1; + + Configuration conf = new Configuration(); + + for (int i = 0; i < args.length; i++) { // parse command line + if (args[i].equals("-n")) { + if (++i >= args.length || args[i].startsWith("-")) { + printUsageExit("missing number of nodes"); + } + numDataNodes = Integer.parseInt(args[i]); + } else if (args[i].equals("-racks")) { + if (++i >= args.length || args[i].startsWith("-")) { + printUsageExit("Missing number of racks"); + } + numRacks = Integer.parseInt(args[i]); + } else if (args[i].equals("-r")) { + if (++i >= args.length || args[i].startsWith("-")) { + printUsageExit("Missing replicaiton factor"); + } + replication = Integer.parseInt(args[i]); + } else if (args[i].equals("-d")) { + if (++i >= args.length || args[i].startsWith("-")) { + printUsageExit("Missing datanode dirs parameter"); + } + dataNodeDirs = args[i]; + } else if (args[i].equals("-simulated")) { + conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true); + } else if (args[i].equals("-inject")) { + if (!conf.getBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, + false) ) { + System.out.print("-inject is valid only for simulated"); + printUsageExit(); + } + inject = true; + if (++i >= args.length || args[i].startsWith("-")) { + printUsageExit( + "Missing starting block and number of blocks per DN to inject"); + } + startingBlockId = Integer.parseInt(args[i]); + if (++i >= args.length || args[i].startsWith("-")) { + printUsageExit("Missing number of blocks to inject"); + } + numBlocksPerDNtoInject = Integer.parseInt(args[i]); + } else { + printUsageExit(); + } + } + if (numDataNodes <= 0 || replication <= 0 ) { + printUsageExit("numDataNodes and replication have to be greater than zero"); + } + if (replication > numDataNodes) { + printUsageExit("Replication must be less than or equal to numDataNodes"); + + } + String nameNodeAdr = FileSystem.getDefaultUri(conf).getAuthority(); + if (nameNodeAdr == null) { + System.out.println("No name node address and port in config"); + System.exit(-1); + } + boolean simulated = + conf.getBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, false); + System.out.println("Starting " + numDataNodes + + (simulated ? " Simulated " : " ") + + " Data Nodes that will connect to Name Node at " + nameNodeAdr); + + System.setProperty("test.build.data", dataNodeDirs); + + MiniDFSCluster mc = new MiniDFSCluster(); + try { + mc.formatDataNodeDirs(); + } catch (IOException e) { + System.out.println("Error formating data node dirs:" + e); + } + + String[] rack4DataNode = null; + if (numRacks > 0) { + System.out.println("Using " + numRacks + " racks: "); + String rackPrefix = getUniqueRackPrefix(); + + rack4DataNode = new String[numDataNodes]; + for (int i = 0; i < numDataNodes; ++i ) { + //rack4DataNode[i] = racks[i%numRacks]; + rack4DataNode[i] = rackPrefix + "-" + i%numRacks; + System.out.println("Data Node " + i + " using " + rack4DataNode[i]); + + + } + } + try { + mc.startDataNodes(conf, numDataNodes, true, StartupOption.REGULAR, + rack4DataNode); + if (inject) { + long blockSize = 10; + System.out.println("Injecting " + numBlocksPerDNtoInject + + " blocks in each DN starting at blockId " + startingBlockId + + " with blocksize of " + blockSize); + Block[] blocks = new Block[numBlocksPerDNtoInject]; + long blkid = startingBlockId; + for (int i_dn = 0; i_dn < numDataNodes; ++i_dn) { + for (int i = 0; i < blocks.length; ++i) { + blocks[i] = new Block(blkid++, blockSize, + CreateEditsLog.BLOCK_GENERATION_STAMP); + } + for (int i = 1; i <= replication; ++i) { + // inject blocks for dn_i into dn_i and replica in dn_i's neighbors + mc.injectBlocks((i_dn + i- 1)% numDataNodes, blocks); + System.out.println("Injecting blocks of dn " + i_dn + " into dn" + + ((i_dn + i- 1)% numDataNodes)); + } + } + System.out.println("Created blocks from Bids " + + startingBlockId + " to " + (blkid -1)); + } + + } catch (IOException e) { + System.out.println("Error creating data node:" + e); + } + } + + /* + * There is high probability that the rack id generated here will + * not conflict with those of other data node cluster. + * Not perfect but mostly unique rack ids are good enough + */ + static private String getUniqueRackPrefix() { + + String ip = "unknownIP"; + try { + ip = DNS.getDefaultIP("default"); + } catch (UnknownHostException ignored) { + System.out.println("Could not find ip address of \"default\" inteface."); + } + + int rand = 0; + try { + rand = SecureRandom.getInstance("SHA1PRNG").nextInt(Integer.MAX_VALUE); + } catch (NoSuchAlgorithmException e) { + rand = (new Random()).nextInt(Integer.MAX_VALUE); + } + return "/Rack-" + rand + "-"+ ip + "-" + + System.currentTimeMillis(); + } +} diff --git a/src/test/org/apache/hadoop/hdfs/MiniDFSCluster.java b/src/test/org/apache/hadoop/hdfs/MiniDFSCluster.java new file mode 100644 index 0000000..14480bf --- /dev/null +++ b/src/test/org/apache/hadoop/hdfs/MiniDFSCluster.java @@ -0,0 +1,957 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs; + +import java.io.File; +import java.io.IOException; +import java.net.InetSocketAddress; +import java.util.ArrayList; +import java.util.Collection; +import java.nio.channels.FileChannel; +import java.util.Random; +import java.io.RandomAccessFile; + +import javax.security.auth.login.LoginException; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.net.*; +import org.apache.hadoop.hdfs.protocol.Block; +import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType; +import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption; +import org.apache.hadoop.hdfs.server.datanode.DataNode; +import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface; +import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset; +import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; +import org.apache.hadoop.hdfs.server.namenode.NameNode; +import org.apache.hadoop.hdfs.tools.DFSAdmin; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.FileUtil; +import org.apache.hadoop.security.*; +import org.apache.hadoop.util.ToolRunner; +import org.apache.hadoop.util.StringUtils; + +/** + * This class creates a single-process DFS cluster for junit testing. + * The data directories for non-simulated DFS are under the testing directory. + * For simulated data nodes, no underlying fs storage is used. + */ +public class MiniDFSCluster { + + public class DataNodeProperties { + DataNode datanode; + Configuration conf; + String[] dnArgs; + + DataNodeProperties(DataNode node, Configuration conf, String[] args) { + this.datanode = node; + this.conf = conf; + this.dnArgs = args; + } + } + + private Configuration conf; + private NameNode nameNode; + private int numDataNodes; + private ArrayList dataNodes = + new ArrayList(); + private File base_dir; + private File data_dir; + + // wait until namenode has left safe mode? + private boolean waitSafeMode = true; + + /** + * This null constructor is used only when wishing to start a data node cluster + * without a name node (ie when the name node is started elsewhere). + */ + public MiniDFSCluster() { + } + + /** + * Modify the config and start up the servers with the given operation. + * Servers will be started on free ports. + *

+ * The caller must manage the creation of NameNode and DataNode directories + * and have already set dfs.name.dir and dfs.data.dir in the given conf. + * + * @param conf the base configuration to use in starting the servers. This + * will be modified as necessary. + * @param numDataNodes Number of DataNodes to start; may be zero + * @param nameNodeOperation the operation with which to start the servers. If null + * or StartupOption.FORMAT, then StartupOption.REGULAR will be used. + */ + public MiniDFSCluster(Configuration conf, + int numDataNodes, + StartupOption nameNodeOperation) throws IOException { + this(0, conf, numDataNodes, false, false, false, nameNodeOperation, + null, null, null); + } + + /** + * Modify the config and start up the servers. The rpc and info ports for + * servers are guaranteed to use free ports. + *

+ * NameNode and DataNode directory creation and configuration will be + * managed by this class. + * + * @param conf the base configuration to use in starting the servers. This + * will be modified as necessary. + * @param numDataNodes Number of DataNodes to start; may be zero + * @param format if true, format the NameNode and DataNodes before starting up + * @param racks array of strings indicating the rack that each DataNode is on + */ + public MiniDFSCluster(Configuration conf, + int numDataNodes, + boolean format, + String[] racks) throws IOException { + this(0, conf, numDataNodes, format, true, true, null, racks, null, null); + } + + /** + * Modify the config and start up the servers. The rpc and info ports for + * servers are guaranteed to use free ports. + *

+ * NameNode and DataNode directory creation and configuration will be + * managed by this class. + * + * @param conf the base configuration to use in starting the servers. This + * will be modified as necessary. + * @param numDataNodes Number of DataNodes to start; may be zero + * @param format if true, format the NameNode and DataNodes before starting up + * @param racks array of strings indicating the rack that each DataNode is on + * @param wait until namenode has left safe mode? + */ + public MiniDFSCluster(Configuration conf, + int numDataNodes, + boolean format, + String[] racks, + boolean waitSafeMode) throws IOException { + this(0, conf, numDataNodes, format, true, true, null, racks, null, null, + waitSafeMode); + } + + /** + * Modify the config and start up the servers. The rpc and info ports for + * servers are guaranteed to use free ports. + *

+ * NameNode and DataNode directory creation and configuration will be + * managed by this class. + * + * @param conf the base configuration to use in starting the servers. This + * will be modified as necessary. + * @param numDataNodes Number of DataNodes to start; may be zero + * @param format if true, format the NameNode and DataNodes before starting up + * @param racks array of strings indicating the rack that each DataNode is on + * @param hosts array of strings indicating the hostname for each DataNode + */ + public MiniDFSCluster(Configuration conf, + int numDataNodes, + boolean format, + String[] racks, String[] hosts) throws IOException { + this(0, conf, numDataNodes, format, true, true, null, racks, hosts, null); + } + + /** + * NOTE: if possible, the other constructors that don't have nameNode port + * parameter should be used as they will ensure that the servers use free ports. + *

+ * Modify the config and start up the servers. + * + * @param nameNodePort suggestion for which rpc port to use. caller should + * use getNameNodePort() to get the actual port used. + * @param conf the base configuration to use in starting the servers. This + * will be modified as necessary. + * @param numDataNodes Number of DataNodes to start; may be zero + * @param format if true, format the NameNode and DataNodes before starting up + * @param manageDfsDirs if true, the data directories for servers will be + * created and dfs.name.dir and dfs.data.dir will be set in the conf + * @param operation the operation with which to start the servers. If null + * or StartupOption.FORMAT, then StartupOption.REGULAR will be used. + * @param racks array of strings indicating the rack that each DataNode is on + */ + public MiniDFSCluster(int nameNodePort, + Configuration conf, + int numDataNodes, + boolean format, + boolean manageDfsDirs, + StartupOption operation, + String[] racks) throws IOException { + this(nameNodePort, conf, numDataNodes, format, manageDfsDirs, manageDfsDirs, + operation, racks, null, null); + } + + /** + * NOTE: if possible, the other constructors that don't have nameNode port + * parameter should be used as they will ensure that the servers use free ports. + *

+ * Modify the config and start up the servers. + * + * @param nameNodePort suggestion for which rpc port to use. caller should + * use getNameNodePort() to get the actual port used. + * @param conf the base configuration to use in starting the servers. This + * will be modified as necessary. + * @param numDataNodes Number of DataNodes to start; may be zero + * @param format if true, format the NameNode and DataNodes before starting up + * @param manageDfsDirs if true, the data directories for servers will be + * created and dfs.name.dir and dfs.data.dir will be set in the conf + * @param operation the operation with which to start the servers. If null + * or StartupOption.FORMAT, then StartupOption.REGULAR will be used. + * @param racks array of strings indicating the rack that each DataNode is on + * @param simulatedCapacities array of capacities of the simulated data nodes + */ + public MiniDFSCluster(int nameNodePort, + Configuration conf, + int numDataNodes, + boolean format, + boolean manageDfsDirs, + StartupOption operation, + String[] racks, + long[] simulatedCapacities) throws IOException { + this(nameNodePort, conf, numDataNodes, format, manageDfsDirs, manageDfsDirs, + operation, racks, null, simulatedCapacities); + } + + /** + * NOTE: if possible, the other constructors that don't have nameNode port + * parameter should be used as they will ensure that the servers use free ports. + *

+ * Modify the config and start up the servers. + * + * @param nameNodePort suggestion for which rpc port to use. caller should + * use getNameNodePort() to get the actual port used. + * @param conf the base configuration to use in starting the servers. This + * will be modified as necessary. + * @param numDataNodes Number of DataNodes to start; may be zero + * @param format if true, format the NameNode and DataNodes before starting up + * @param manageNameDfsDirs if true, the data directories for servers will be + * created and dfs.name.dir and dfs.data.dir will be set in the conf + * @param manageDataDfsDirs if true, the data directories for datanodes will + * be created and dfs.data.dir set to same in the conf + * @param operation the operation with which to start the servers. If null + * or StartupOption.FORMAT, then StartupOption.REGULAR will be used. + * @param racks array of strings indicating the rack that each DataNode is on + * @param hosts array of strings indicating the hostnames of each DataNode + * @param simulatedCapacities array of capacities of the simulated data nodes + */ + public MiniDFSCluster(int nameNodePort, + Configuration conf, + int numDataNodes, + boolean format, + boolean manageNameDfsDirs, + boolean manageDataDfsDirs, + StartupOption operation, + String[] racks, String hosts[], + long[] simulatedCapacities) throws IOException { + this(nameNodePort, conf, numDataNodes, format, manageNameDfsDirs, + manageDataDfsDirs, operation, racks, hosts, simulatedCapacities, true); + } + + + + /** + * NOTE: if possible, the other constructors that don't have nameNode port + * parameter should be used as they will ensure that the servers use free ports. + *

+ * Modify the config and start up the servers. + * + * @param nameNodePort suggestion for which rpc port to use. caller should + * use getNameNodePort() to get the actual port used. + * @param conf the base configuration to use in starting the servers. This + * will be modified as necessary. + * @param numDataNodes Number of DataNodes to start; may be zero + * @param format if true, format the NameNode and DataNodes before starting up + * @param manageNameDfsDirs if true, the data directories for servers will be + * created and dfs.name.dir and dfs.data.dir will be set in the conf + * @param manageDataDfsDirs if true, the data directories for datanodes will + * be created and dfs.data.dir set to same in the conf + * @param operation the operation with which to start the servers. If null + * or StartupOption.FORMAT, then StartupOption.REGULAR will be used. + * @param racks array of strings indicating the rack that each DataNode is on + * @param hosts array of strings indicating the hostnames of each DataNode + * @param simulatedCapacities array of capacities of the simulated data nodes + * @param wait until namenode has left safe mode? + */ + public MiniDFSCluster(int nameNodePort, + Configuration conf, + int numDataNodes, + boolean format, + boolean manageNameDfsDirs, + boolean manageDataDfsDirs, + StartupOption operation, + String[] racks, String hosts[], + long[] simulatedCapacities, + boolean waitSafeMode) throws IOException { + this.conf = conf; + this.waitSafeMode = waitSafeMode; + try { + UserGroupInformation.setCurrentUser(UnixUserGroupInformation.login(conf)); + } catch (LoginException e) { + IOException ioe = new IOException(); + ioe.initCause(e); + throw ioe; + } + base_dir = new File(System.getProperty("test.build.data", "build/test/data"), "dfs/"); + data_dir = new File(base_dir, "data"); + + // Setup the NameNode configuration + FileSystem.setDefaultUri(conf, "hdfs://localhost:"+ Integer.toString(nameNodePort)); + conf.set("dfs.http.address", "127.0.0.1:0"); + if (manageNameDfsDirs) { + conf.set("dfs.name.dir", new File(base_dir, "name1").getPath()+","+ + new File(base_dir, "name2").getPath()); + conf.set("fs.checkpoint.dir", new File(base_dir, "namesecondary1"). + getPath()+"," + new File(base_dir, "namesecondary2").getPath()); + } + + int replication = conf.getInt("dfs.replication", 3); + conf.setInt("dfs.replication", Math.min(replication, numDataNodes)); + conf.setInt("dfs.safemode.extension", 0); + conf.setInt("dfs.namenode.decommission.interval", 3); // 3 second + + // Format and clean out DataNode directories + if (format) { + if (data_dir.exists() && !FileUtil.fullyDelete(data_dir)) { + throw new IOException("Cannot remove data directory: " + data_dir); + } + NameNode.format(conf); + } + + // Start the NameNode + String[] args = (operation == null || + operation == StartupOption.FORMAT || + operation == StartupOption.REGULAR) ? + new String[] {} : new String[] {operation.getName()}; + conf.setClass("topology.node.switch.mapping.impl", + StaticMapping.class, DNSToSwitchMapping.class); + + nameNode = NameNode.createNameNode(args, conf); + + + // Start the DataNodes + if (numDataNodes > 0) { + startDataNodes(conf, numDataNodes, manageDataDfsDirs, operation, racks, + hosts, simulatedCapacities); + } + waitClusterUp(); + } + + /** + * wait for the cluster to get out of + * safemode. + */ + public void waitClusterUp() { + if (numDataNodes > 0) { + while (!isClusterUp()) { + try { + System.err.println("Waiting for the Mini HDFS Cluster to start..."); + Thread.sleep(1000); + } catch (InterruptedException e) { + } + } + } + } + + /** + * Modify the config and start up additional DataNodes. The info port for + * DataNodes is guaranteed to use a free port. + * + * Data nodes can run with the name node in the mini cluster or + * a real name node. For example, running with a real name node is useful + * when running simulated data nodes with a real name node. + * If minicluster's name node is null assume that the conf has been + * set with the right address:port of the name node. + * + * @param conf the base configuration to use in starting the DataNodes. This + * will be modified as necessary. + * @param numDataNodes Number of DataNodes to start; may be zero + * @param manageDfsDirs if true, the data directories for DataNodes will be + * created and dfs.data.dir will be set in the conf + * @param operation the operation with which to start the DataNodes. If null + * or StartupOption.FORMAT, then StartupOption.REGULAR will be used. + * @param racks array of strings indicating the rack that each DataNode is on + * @param hosts array of strings indicating the hostnames for each DataNode + * @param simulatedCapacities array of capacities of the simulated data nodes + * + * @throws IllegalStateException if NameNode has been shutdown + */ + public synchronized void startDataNodes(Configuration conf, int numDataNodes, + boolean manageDfsDirs, StartupOption operation, + String[] racks, String[] hosts, + long[] simulatedCapacities) throws IOException { + + int curDatanodesNum = dataNodes.size(); + // for mincluster's the default initialDelay for BRs is 0 + if (conf.get("dfs.blockreport.initialDelay") == null) { + conf.setLong("dfs.blockreport.initialDelay", 0); + } + // If minicluster's name node is null assume that the conf has been + // set with the right address:port of the name node. + // + if (nameNode != null) { // set conf from the name node + InetSocketAddress nnAddr = nameNode.getNameNodeAddress(); + if (nnAddr != null) { + int nameNodePort = nnAddr.getPort(); + FileSystem.setDefaultUri(conf, "hdfs://" + nnAddr.getHostName() + ":" + + Integer.toString(nameNodePort)); + } + } + + if (racks != null && numDataNodes > racks.length ) { + throw new IllegalArgumentException( "The length of racks [" + racks.length + + "] is less than the number of datanodes [" + numDataNodes + "]."); + } + if (hosts != null && numDataNodes > hosts.length ) { + throw new IllegalArgumentException( "The length of hosts [" + hosts.length + + "] is less than the number of datanodes [" + numDataNodes + "]."); + } + //Generate some hostnames if required + if (racks != null && hosts == null) { + System.out.println("Generating host names for datanodes"); + hosts = new String[numDataNodes]; + for (int i = curDatanodesNum; i < curDatanodesNum + numDataNodes; i++) { + hosts[i - curDatanodesNum] = "host" + i + ".foo.com"; + } + } + + if (simulatedCapacities != null + && numDataNodes > simulatedCapacities.length) { + throw new IllegalArgumentException( "The length of simulatedCapacities [" + + simulatedCapacities.length + + "] is less than the number of datanodes [" + numDataNodes + "]."); + } + + // Set up the right ports for the datanodes + conf.set("dfs.datanode.address", "127.0.0.1:0"); + conf.set("dfs.datanode.http.address", "127.0.0.1:0"); + conf.set("dfs.datanode.ipc.address", "127.0.0.1:0"); + + + String [] dnArgs = (operation == null || + operation != StartupOption.ROLLBACK) ? + null : new String[] {operation.getName()}; + + + for (int i = curDatanodesNum; i < curDatanodesNum+numDataNodes; i++) { + Configuration dnConf = new Configuration(conf); + if (manageDfsDirs) { + File dir1 = new File(data_dir, "data"+(2*i+1)); + File dir2 = new File(data_dir, "data"+(2*i+2)); + dir1.mkdirs(); + dir2.mkdirs(); + if (!dir1.isDirectory() || !dir2.isDirectory()) { + throw new IOException("Mkdirs failed to create directory for DataNode " + + i + ": " + dir1 + " or " + dir2); + } + dnConf.set("dfs.data.dir", dir1.getPath() + "," + dir2.getPath()); + } + if (simulatedCapacities != null) { + dnConf.setBoolean("dfs.datanode.simulateddatastorage", true); + dnConf.setLong(SimulatedFSDataset.CONFIG_PROPERTY_CAPACITY, + simulatedCapacities[i-curDatanodesNum]); + } + System.out.println("Starting DataNode " + i + " with dfs.data.dir: " + + dnConf.get("dfs.data.dir")); + if (hosts != null) { + dnConf.set("slave.host.name", hosts[i - curDatanodesNum]); + System.out.println("Starting DataNode " + i + " with hostname set to: " + + dnConf.get("slave.host.name")); + } + if (racks != null) { + String name = hosts[i - curDatanodesNum]; + System.out.println("Adding node with hostname : " + name + " to rack "+ + racks[i-curDatanodesNum]); + StaticMapping.addNodeToRack(name, + racks[i-curDatanodesNum]); + } + Configuration newconf = new Configuration(dnConf); // save config + if (hosts != null) { + NetUtils.addStaticResolution(hosts[i - curDatanodesNum], "localhost"); + } + DataNode dn = DataNode.instantiateDataNode(dnArgs, dnConf); + //since the HDFS does things based on IP:port, we need to add the mapping + //for IP:port to rackId + String ipAddr = dn.getSelfAddr().getAddress().getHostAddress(); + if (racks != null) { + int port = dn.getSelfAddr().getPort(); + System.out.println("Adding node with IP:port : " + ipAddr + ":" + port+ + " to rack " + racks[i-curDatanodesNum]); + StaticMapping.addNodeToRack(ipAddr + ":" + port, + racks[i-curDatanodesNum]); + } + DataNode.runDatanodeDaemon(dn); + dataNodes.add(new DataNodeProperties(dn, newconf, dnArgs)); + } + curDatanodesNum += numDataNodes; + this.numDataNodes += numDataNodes; + waitActive(); + } + + + + /** + * Modify the config and start up the DataNodes. The info port for + * DataNodes is guaranteed to use a free port. + * + * @param conf the base configuration to use in starting the DataNodes. This + * will be modified as necessary. + * @param numDataNodes Number of DataNodes to start; may be zero + * @param manageDfsDirs if true, the data directories for DataNodes will be + * created and dfs.data.dir will be set in the conf + * @param operation the operation with which to start the DataNodes. If null + * or StartupOption.FORMAT, then StartupOption.REGULAR will be used. + * @param racks array of strings indicating the rack that each DataNode is on + * + * @throws IllegalStateException if NameNode has been shutdown + */ + + public void startDataNodes(Configuration conf, int numDataNodes, + boolean manageDfsDirs, StartupOption operation, + String[] racks + ) throws IOException { + startDataNodes( conf, numDataNodes, manageDfsDirs, operation, racks, null, null); + } + + /** + * Modify the config and start up additional DataNodes. The info port for + * DataNodes is guaranteed to use a free port. + * + * Data nodes can run with the name node in the mini cluster or + * a real name node. For example, running with a real name node is useful + * when running simulated data nodes with a real name node. + * If minicluster's name node is null assume that the conf has been + * set with the right address:port of the name node. + * + * @param conf the base configuration to use in starting the DataNodes. This + * will be modified as necessary. + * @param numDataNodes Number of DataNodes to start; may be zero + * @param manageDfsDirs if true, the data directories for DataNodes will be + * created and dfs.data.dir will be set in the conf + * @param operation the operation with which to start the DataNodes. If null + * or StartupOption.FORMAT, then StartupOption.REGULAR will be used. + * @param racks array of strings indicating the rack that each DataNode is on + * @param simulatedCapacities array of capacities of the simulated data nodes + * + * @throws IllegalStateException if NameNode has been shutdown + */ + public void startDataNodes(Configuration conf, int numDataNodes, + boolean manageDfsDirs, StartupOption operation, + String[] racks, + long[] simulatedCapacities) throws IOException { + startDataNodes(conf, numDataNodes, manageDfsDirs, operation, racks, null, + simulatedCapacities); + + } + /** + * If the NameNode is running, attempt to finalize a previous upgrade. + * When this method return, the NameNode should be finalized, but + * DataNodes may not be since that occurs asynchronously. + * + * @throws IllegalStateException if the Namenode is not running. + */ + public void finalizeCluster(Configuration conf) throws Exception { + if (nameNode == null) { + throw new IllegalStateException("Attempting to finalize " + + "Namenode but it is not running"); + } + ToolRunner.run(new DFSAdmin(conf), new String[] {"-finalizeUpgrade"}); + } + + /** + * Gets the started NameNode. May be null. + */ + public NameNode getNameNode() { + return nameNode; + } + + /** + * Gets a list of the started DataNodes. May be empty. + */ + public ArrayList getDataNodes() { + ArrayList list = new ArrayList(); + for (int i = 0; i < dataNodes.size(); i++) { + DataNode node = dataNodes.get(i).datanode; + list.add(node); + } + return list; + } + + /** @return the datanode having the ipc server listen port */ + public DataNode getDataNode(int ipcPort) { + for(DataNode dn : getDataNodes()) { + if (dn.ipcServer.getListenerAddress().getPort() == ipcPort) { + return dn; + } + } + return null; + } + + /** + * Gets the rpc port used by the NameNode, because the caller + * supplied port is not necessarily the actual port used. + */ + public int getNameNodePort() { + return nameNode.getNameNodeAddress().getPort(); + } + + /** + * Shut down the servers that are up. + */ + public void shutdown() { + System.out.println("Shutting down the Mini HDFS Cluster"); + shutdownDataNodes(); + if (nameNode != null) { + nameNode.stop(); + nameNode.join(); + nameNode = null; + } + } + + /** + * Shutdown all DataNodes started by this class. The NameNode + * is left running so that new DataNodes may be started. + */ + public void shutdownDataNodes() { + for (int i = dataNodes.size()-1; i >= 0; i--) { + System.out.println("Shutting down DataNode " + i); + DataNode dn = dataNodes.remove(i).datanode; + dn.shutdown(); + numDataNodes--; + } + } + + /** + * Shutdown namenode. + */ + public synchronized void shutdownNameNode() { + if (nameNode != null) { + System.out.println("Shutting down the namenode"); + nameNode.stop(); + nameNode.join(); + nameNode = null; + } + } + + public synchronized void restartNameNode() throws IOException { + shutdownNameNode(); + nameNode = NameNode.createNameNode(new String[] {}, conf); + waitClusterUp(); + System.out.println("Restarted the namenode"); + int failedCount = 0; + while (true) { + try { + waitActive(); + break; + } catch (IOException e) { + failedCount++; + // Cached RPC connection to namenode, if any, is expected to fail once + if (failedCount > 5) { + System.out.println("Tried waitActive() " + failedCount + + " time(s) and failed, giving up. " + + StringUtils.stringifyException(e)); + throw e; + } + } + } + System.out.println("Cluster is active"); + } + + + /* + * Corrupt a block on all datanode + */ + void corruptBlockOnDataNodes(String blockName) throws Exception{ + for (int i=0; i < dataNodes.size(); i++) + corruptBlockOnDataNode(i,blockName); + } + + /* + * Corrupt a block on a particular datanode + */ + boolean corruptBlockOnDataNode(int i, String blockName) throws Exception { + Random random = new Random(); + boolean corrupted = false; + File dataDir = new File(System.getProperty("test.build.data", "build/test/data"), "dfs/data"); + if (i < 0 || i >= dataNodes.size()) + return false; + for (int dn = i*2; dn < i*2+2; dn++) { + File blockFile = new File(dataDir, "data" + (dn+1) + "/current/" + + blockName); + System.out.println("Corrupting for: " + blockFile); + if (blockFile.exists()) { + // Corrupt replica by writing random bytes into replica + RandomAccessFile raFile = new RandomAccessFile(blockFile, "rw"); + FileChannel channel = raFile.getChannel(); + String badString = "BADBAD"; + int rand = random.nextInt((int)channel.size()/2); + raFile.seek(rand); + raFile.write(badString.getBytes()); + raFile.close(); + } + corrupted = true; + } + return corrupted; + } + + /* + * Shutdown a particular datanode + */ + public DataNodeProperties stopDataNode(int i) { + if (i < 0 || i >= dataNodes.size()) { + return null; + } + DataNodeProperties dnprop = dataNodes.remove(i); + DataNode dn = dnprop.datanode; + System.out.println("MiniDFSCluster Stopping DataNode " + + dn.dnRegistration.getName() + + " from a total of " + (dataNodes.size() + 1) + + " datanodes."); + dn.shutdown(); + numDataNodes--; + return dnprop; + } + + /** + * Restart a datanode + * @param dnprop datanode's property + * @return true if restarting is successful + * @throws IOException + */ + public synchronized boolean restartDataNode(DataNodeProperties dnprop) + throws IOException { + Configuration conf = dnprop.conf; + String[] args = dnprop.dnArgs; + Configuration newconf = new Configuration(conf); // save cloned config + dataNodes.add(new DataNodeProperties( + DataNode.createDataNode(args, conf), + newconf, args)); + numDataNodes++; + return true; + + } + /* + * Restart a particular datanode + */ + public synchronized boolean restartDataNode(int i) throws IOException { + DataNodeProperties dnprop = stopDataNode(i); + if (dnprop == null) { + return false; + } else { + return restartDataNode(dnprop); + } + } + + /* + * Shutdown a datanode by name. + */ + public synchronized DataNodeProperties stopDataNode(String name) { + int i; + for (i = 0; i < dataNodes.size(); i++) { + DataNode dn = dataNodes.get(i).datanode; + if (dn.dnRegistration.getName().equals(name)) { + break; + } + } + return stopDataNode(i); + } + + /** + * Returns true if the NameNode is running and is out of Safe Mode + * or if waiting for safe mode is disabled. + */ + public boolean isClusterUp() { + if (nameNode == null) { + return false; + } + try { + long[] sizes = nameNode.getStats(); + boolean isUp = false; + synchronized (this) { + isUp = ((!nameNode.isInSafeMode() || !waitSafeMode) && sizes[0] != 0); + } + return isUp; + } catch (IOException ie) { + return false; + } + } + + /** + * Returns true if there is at least one DataNode running. + */ + public boolean isDataNodeUp() { + if (dataNodes == null || dataNodes.size() == 0) { + return false; + } + return true; + } + + /** + * Get a client handle to the DFS cluster. + */ + public FileSystem getFileSystem() throws IOException { + return FileSystem.get(conf); + } + + /** + * Get the directories where the namenode stores its image. + */ + public Collection getNameDirs() { + return FSNamesystem.getNamespaceDirs(conf); + } + + /** + * Get the directories where the namenode stores its edits. + */ + public Collection getNameEditsDirs() { + return FSNamesystem.getNamespaceEditsDirs(conf); + } + + /** + * Wait until the cluster is active and running. + */ + public void waitActive() throws IOException { + if (nameNode == null) { + return; + } + + InetSocketAddress addr = getNameNode().getNameNodeAddress(); + // Wait for the client server to start if we have two configured + while (addr == null) { + try { + Thread.sleep(500); + } catch (Exception e) { + } + addr = getNameNode().getNameNodeAddress(); + } + addr = new InetSocketAddress("localhost", getNameNodePort()); + DFSClient client = new DFSClient(addr, conf); + + // make sure all datanodes are alive + while(client.datanodeReport(DatanodeReportType.LIVE).length + != numDataNodes) { + try { + Thread.sleep(500); + } catch (Exception e) { + } + } + + client.close(); + } + + public void formatDataNodeDirs() throws IOException { + base_dir = new File(System.getProperty("test.build.data", "build/test/data"), "dfs/"); + data_dir = new File(base_dir, "data"); + if (data_dir.exists() && !FileUtil.fullyDelete(data_dir)) { + throw new IOException("Cannot remove data directory: " + data_dir); + } + } + + /** + * + * @param dataNodeIndex - data node whose block report is desired - the index is same as for getDataNodes() + * @return the block report for the specified data node + */ + public Block[] getBlockReport(int dataNodeIndex) { + if (dataNodeIndex < 0 || dataNodeIndex > dataNodes.size()) { + throw new IndexOutOfBoundsException(); + } + return dataNodes.get(dataNodeIndex).datanode.getFSDataset().getBlockReport(); + } + + + /** + * + * @return block reports from all data nodes + * Block[] is indexed in the same order as the list of datanodes returned by getDataNodes() + */ + public Block[][] getAllBlockReports() { + int numDataNodes = dataNodes.size(); + Block[][] result = new Block[numDataNodes][]; + for (int i = 0; i < numDataNodes; ++i) { + result[i] = getBlockReport(i); + } + return result; + } + + + /** + * This method is valid only if the data nodes have simulated data + * @param dataNodeIndex - data node i which to inject - the index is same as for getDataNodes() + * @param blocksToInject - the blocks + * @throws IOException + * if not simulatedFSDataset + * if any of blocks already exist in the data node + * + */ + public void injectBlocks(int dataNodeIndex, Block[] blocksToInject) throws IOException { + if (dataNodeIndex < 0 || dataNodeIndex > dataNodes.size()) { + throw new IndexOutOfBoundsException(); + } + FSDatasetInterface dataSet = dataNodes.get(dataNodeIndex).datanode.getFSDataset(); + if (!(dataSet instanceof SimulatedFSDataset)) { + throw new IOException("injectBlocks is valid only for SimilatedFSDataset"); + } + SimulatedFSDataset sdataset = (SimulatedFSDataset) dataSet; + sdataset.injectBlocks(blocksToInject); + dataNodes.get(dataNodeIndex).datanode.scheduleBlockReport(0); + } + + /** + * This method is valid only if the data nodes have simulated data + * @param blocksToInject - blocksToInject[] is indexed in the same order as the list + * of datanodes returned by getDataNodes() + * @throws IOException + * if not simulatedFSDataset + * if any of blocks already exist in the data nodes + * Note the rest of the blocks are not injected. + */ + public void injectBlocks(Block[][] blocksToInject) throws IOException { + if (blocksToInject.length > dataNodes.size()) { + throw new IndexOutOfBoundsException(); + } + for (int i = 0; i < blocksToInject.length; ++i) { + injectBlocks(i, blocksToInject[i]); + } + } + + /** + * Set the softLimit and hardLimit of client lease periods + */ + void setLeasePeriod(long soft, long hard) { + nameNode.namesystem.leaseManager.setLeasePeriod(soft, hard); + nameNode.namesystem.lmthread.interrupt(); + } + + /** + * Returns the current set of datanodes + */ + DataNode[] listDataNodes() { + DataNode[] list = new DataNode[dataNodes.size()]; + for (int i = 0; i < dataNodes.size(); i++) { + list[i] = dataNodes.get(i).datanode; + } + return list; + } + + /** + * Access to the data directory used for Datanodes + * @throws IOException + */ + public String getDataDirectory() { + return data_dir.getAbsolutePath(); + } +} diff --git a/src/test/org/apache/hadoop/hdfs/NNBench.java b/src/test/org/apache/hadoop/hdfs/NNBench.java new file mode 100644 index 0000000..e05e3b9 --- /dev/null +++ b/src/test/org/apache/hadoop/hdfs/NNBench.java @@ -0,0 +1,996 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdfs; + +import java.io.IOException; +import java.util.Date; +import java.io.DataInputStream; +import java.io.FileOutputStream; +import java.io.InputStreamReader; +import java.io.PrintStream; +import java.io.File; +import java.io.BufferedReader; +import java.util.StringTokenizer; +import java.net.InetAddress; +import java.text.SimpleDateFormat; +import java.util.Iterator; + +import org.apache.commons.logging.LogFactory; +import org.apache.commons.logging.Log; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.conf.Configured; + +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.FSDataInputStream; +import org.apache.hadoop.fs.FileSystem; + +import org.apache.hadoop.io.Text; +import org.apache.hadoop.io.LongWritable; +import org.apache.hadoop.io.SequenceFile.CompressionType; +import org.apache.hadoop.io.SequenceFile; + +import org.apache.hadoop.mapred.FileInputFormat; +import org.apache.hadoop.mapred.FileOutputFormat; +import org.apache.hadoop.mapred.Mapper; +import org.apache.hadoop.mapred.SequenceFileInputFormat; +import org.apache.hadoop.mapred.JobClient; +import org.apache.hadoop.mapred.MapReduceBase; +import org.apache.hadoop.mapred.Reporter; +import org.apache.hadoop.mapred.OutputCollector; +import org.apache.hadoop.mapred.JobConf; +import org.apache.hadoop.mapred.Reducer; + +import org.apache.hadoop.util.Tool; +import org.apache.hadoop.util.ToolRunner; + +/** + * This program executes a specified operation that applies load to + * the NameNode. + * + * When run simultaneously on multiple nodes, this program functions + * as a stress-test and benchmark for namenode, especially when + * the number of bytes written to each file is small. + * + * Valid operations are: + * create_write + * open_read + * rename + * delete + * + * NOTE: The open_read, rename and delete operations assume that the files + * they operate on are already available. The create_write operation + * must be run before running the other operations. + */ + +public class NNBench extends Configured implements Tool { + private static final Log LOG = LogFactory.getLog( + "org.apache.hadoop.hdfs.NNBench"); + + protected static String CONTROL_DIR_NAME = "control"; + protected static String OUTPUT_DIR_NAME = "output"; + protected static String DATA_DIR_NAME = "data"; + protected static final String DEFAULT_RES_FILE_NAME = "NNBench_results.log"; + protected static final String NNBENCH_VERSION = "NameNode Benchmark 0.4"; + + public static String operation = "none"; + public static long numberOfMaps = 1l; // default is 1 + public static long numberOfReduces = 1l; // default is 1 + public static long startTime = + System.currentTimeMillis() + (120 * 1000); // default is 'now' + 2min + public static long blockSize = 1l; // default is 1 + public static int bytesToWrite = 0; // default is 0 + public static long bytesPerChecksum = 1l; // default is 1 + public static long numberOfFiles = 1l; // default is 1 + public static short replicationFactorPerFile = 1; // default is 1 + public static String baseDir = "/benchmarks/NNBench"; // default + public static boolean readFileAfterOpen = false; // default is to not read + + // Supported operations + private static final String OP_CREATE_WRITE = "create_write"; + private static final String OP_OPEN_READ = "open_read"; + private static final String OP_RENAME = "rename"; + private static final String OP_DELETE = "delete"; + + // To display in the format that matches the NN and DN log format + // Example: 2007-10-26 00:01:19,853 + static SimpleDateFormat sdf = + new SimpleDateFormat("yyyy-MM-dd' 'HH:mm:ss','S"); + + // private static Configuration config = new Configuration(); + + /** + * Clean up the files before a test run + * + * @throws IOException on error + */ + private static void cleanupBeforeTestrun( + Configuration config + ) throws IOException { + + FileSystem tempFS = FileSystem.get(config); + + // Delete the data directory only if it is the create/write operation + if (operation.equals(OP_CREATE_WRITE)) { + LOG.info("Deleting data directory"); + tempFS.delete(new Path(baseDir, DATA_DIR_NAME), true); + } + tempFS.delete(new Path(baseDir, CONTROL_DIR_NAME), true); + tempFS.delete(new Path(baseDir, OUTPUT_DIR_NAME), true); + } + + /** + * Create control files before a test run. + * Number of files created is equal to the number of maps specified + * + * @throws IOException on error + */ + private static void createControlFiles( + Configuration config + ) throws IOException { + + FileSystem tempFS = FileSystem.get(config); + LOG.info("Creating " + numberOfMaps + " control files"); + + for (int i = 0; i < numberOfMaps; i++) { + String strFileName = "NNBench_Controlfile_" + i; + Path filePath = new Path(new Path(baseDir, CONTROL_DIR_NAME), + strFileName); + + SequenceFile.Writer writer = null; + try { + writer = SequenceFile.createWriter(tempFS, config, filePath, Text.class, + LongWritable.class, CompressionType.NONE); + writer.append(new Text(strFileName), new LongWritable(0l)); + } finally { + if (writer != null) { + writer.close(); + } + } + } + } + /** + * Display version + */ + private static void displayVersion() { + System.out.println(NNBENCH_VERSION); + } + + /** + * Display usage + */ + private static void displayUsage() { + String usage = + "Usage: nnbench \n" + + "Options:\n" + + "\t-operation \n" + + "\t * NOTE: The open_read, rename and delete operations assume " + + "that the files they operate on, are already available. " + + "The create_write operation must be run before running the " + + "other operations.\n" + + "\t-maps \n" + + "\t-reduces \n" + + "\t-startTime

+   * Usage: nnbench 
+   *          -operation 
+   *          -baseDir 
+   *          -startTime 
+ * + * @param args is an array of the program command line arguments + * @throws IOException indicates a problem with test startup + */ + public static void main(String[] args) throws Exception { + int res = ToolRunner.run(new NNBenchWithoutMR(), args); + System.exit(res); + } + + @Override + public int run(String[] args) throws Exception { + + String version = "NameNodeBenchmark.0.3"; + System.out.println(version); + int bytesPerChecksum = -1; + + String usage = + "Usage: NNBenchWithoutMR " + + " -operation " + + " -baseDir " + + " -startTime

*R*qiz_!A~?ZS89anRimGxt(}8A$LVLM=d**S}itKT4vq9r3n4X zFxE`i<_oF9o-pls&UTgZag1+{csUH}dIwa@I7aZP$$l^l@8Moy{ql|bg)Yf(O}4{R zS1YLBl*r3p#xf+`RB1u%OVjN}%SZC-IHI@OIXdU+y47SIH&M>s!>sLlOj&Zdha?|! zS>`dPvH2&r%@Hk&-)KFh-xJN>0VN~HxT0=pQlLny4vYY*($q+I(4kYtdpdLCTnpAF zRV{Xj$)giC^m8$}Vk`;#t?Kbu^{&k6Pj2={F}_SPpUyle-iQt)D|>lYC397n&$7mF zBDJ~J8UwiRagb)PJmvoVk+xhhTKsw_1kc{${lWWTuvWrvrNf=&lRJQUgucri!>S${ z92f57_BiJaJwzQXXrZzM*nHq}hU;a;+W6p^G|hk-iBu;71K3uqn2*{uFJz;9x$dYQ zE&pih`uE-BU&Mupj)BS$RDNJ!VxlPF-X99XCc?+Zy+=YsN=iaRLPB<*<^dV`LkbcS zN=C|uw6yf}^kfg1SeWQoXz1wa{vs~aqu5y3gg7{abmS!DbpPx258{#qJ^nAmMT=rr z6qo;<-6-lpaTbcX&_KAT{8uFg{;(E0Iu9VP@+P3%j(2i9uo!xg-k|br$L8*R;0J`ZJN1MsgshLb|;0vhKGHhng3d}Zg%=m02VPW zJBHJYEZlKx9Fg4jrYL?#WrW(uzHCEo*qUE{^-I-LmKj8)N3y06hcUYo#?ce+tmI9S zTqT7ln@?_IEOU0T9*uDBnZU`_ytq=ef$)#3y>dE3*=ZadWNYkz0LJLj7pg)e#JJs? z5%Q|P$==!U^q4|ue6I+6GK|AMCG#R#Zfcl{1{M|dX)<*5k+A(Pn4C|A*yngWF-Frf ze%4gErI$Nu``(q3&yZ{O;#%Cq!!2;DlTQ`g-!-2vz^&c)4lr$M-71^2 zOjK?>)jS{5`b`tTIu^WJ;Qe;OoYoIsq7WvNMc6<$FIXbkK~P<+GZ3ejyGBN(SW%4I z&Ac!=6Tmmzx$up97d>w2qEmGAh&G9(Q;i*lM^junf8JmFX%wH1;l}8LWQli$cada; zerR3FlHj>kv1&Gf|JS65-3FV>2Z@ksE#@@U2}Fx`^hC4OR-LV&nVFJGr0yy!p`NM2 z7er!Z%5EEco6gJXi1edJ4V%G;GpC>e2e;#+inmw?gUD1<{>{!ep5a2FlR8Y%TIt-&4J`8`7E~~xVVt%IooFHiWb1Bf<*5!{Gpb4Sou9!?3u+dNX`gaboNnfsr!@i%s&1~0oX4*!}=BS=N zYK3&m>wJU+Y1RVU-~wBzFY+u?&(cWJ$5R@G^kLdniCQ2f>q{f6Pz0}+Vf{1s+?0h% zeV&nrQx#wfX~XVUfM9p2vh1`d^*@EGUnI{y$~Gpxj+y!)*KXY_3pOnG$a$W!JK26B zcOw(AC{M5Z5|?lTo{=-_Jrxg2YOEeu8AT2qzWx{{zl;rApUnZc=wwycys0`(4HHA} z#_$T#_@#Wk^qqmtsKrH9q#HYN?6}&(=1@aWR;;6lRv#3p0z1i%n*dz#`vusC4>>gB zLm0=2mz28LO2E_@cfc+*lN2ItKHu-gTqg~GOaL#eif9;zWCeyzvN5y2$DnY+NykNhLNn&_#RMi#>6D|1E0#4)~i(h}HvhFIk*E?4&auW6flx8Zd# ztvs#rjivLVDuQ%DZcUC6d_cX^a)Gxc8TylwWI-15ahC;`0Pe2B3-n#`H9b}*eDhYu zw6(80gM*B2rRdc$TuVW-(>7XPg(`hlVwNc`N21y7c2~|c#$P)sVkaX`<VKtLY7;Ya$lZZDO1tK@9tQf({y?Y-)r8kdg*)|RW9ILXW|h)Bf=*Ju)lh)2yg{LJSVJ}a+J-@ISG z4C70+`E`rD6HONhrIn+qvIa_Kt zQ?q)cvwe`9WVTb?e8W#5!~Db+&GP|RC(yd{IyE+EhP=?qqy|Rx2#>CQi|iK*yRDWb zm~Q2S!aAa5wDx4&W!}s2(mwv0pMD^Y%gNZxsFh_lSF-bH-7}h^(^kfwn~8_Gw|BCJ zqru??{ryO^t|^Mq4#QtX5=x;!-6XB^hi`N6lD>B;yh)jH+Yd^&nqC_2AAN9nYQ@xK zN-&+Qdmx~%!4xg0=8)`>;^nJ0S3Jw_v?{3L`;4<`=>r8GSqZ|2f)&Ge53R^%?B(I#d+UvdK_dOFrld24c5 z<;#Y4Au?_HUG^KsNwm2+?OF&l4Ii5E&`ERa#6cn>m673dUnZ_B4E@qtTq;V)haIv= z_5?b4Lr*IIy#$ok!uAKK^dLKzmI!=(s`q|*U*X@;gO;x4+a)8&-V{~5lhr7_DT1dD zs)@xT?|_+6_F-j?ZSJLlki5s>{wPWKvi3Jw#+4CX3Oy_mn=93DtwVx4@f{UoCrX7(2i2-xAIg5<}nXZ;Pf$Z)dB#tXTdNEX| zZz?o74GMc{6IVCGMK1hpKx5K!J-ZLj=oE&c5q4I2xA&~tpZaYX+F3wrIBM!x!dNZlVXHJiG2KYoZo&%w^11B=JB#mtMmBqj2|+z*+9mWrg;hP# z*D8e8{sF~YfC+0h{5ezSL@gwJ6wIoRzFA!DljSyMZXMcJCYS=%`bJ^dBJyE@ zhR|MIer4IlR)2f@*&@-EPKe`y1XGlA-E(AQyCYqzzrch|&coGBReu6sF?p7h%TKT! zZ`-c6<3x4cT)kBX;vx9)B!{MThIlv8RMA-Mxs{a>@^L^{iE%{O=NH&`yajZQ`1Izg zsc~CBIh*?1SogPTaJfkq;VJKPG08#e8#;;V_IBOZbSn{9s)vh=Vu^|(m_-V>=!nA6 z2dz#8DNEtAV`19~mYkNi2Vm7Z08Q+~Vhckud~RQqW*geH{P;q_Tf@0=4Qoyfdr~iF z7L&EI=@U(Zu-|WP=>wOyQm*&V7a><7dWCUWjVWsg0lCxehT864dk$9AT1UBcTuq!rs`(dJyPwO znzF4jP4gCaYSq&*?fK4M?|Popbg=aYQ;~aOqMl2t0=2<@cu^qZ1zG9Q< z7k!AO?~9-VACHM7Iwx~=uZmR9zF6Xz>rf`_mv9d-YgB(qtr^Wa&lY>R`xOSwg&Ek0=R|unFg_ta`?ABR{9LxsI@NZRuglt5^zF~M`(I!? z`*E)}R{*=x&FtJ0wjm|xAo;Kc8TIK4%Bu@7p4-O8#>ePHlP(6chaZl#*Dl*l=Xedg zm*xgi$Jhie67kRIZk^ndrCUJ^tIh;nw+!RgBi9S_{yP^Y`VyC)dZ1u^H- zhgZ~GRi=7rMFT@GxCO1d*u}g_8jD517q2_zEHFHzHmyV0oPRhB*19cd%FJVw^XVI* zH|}4UxF^KN7LZiR=qkDwyFlU9^T_mU-KL5@5kTeRt-kA?8m&wZe;d%5)Lov0*NQhC zsQUYZE@XjIZ32MT%AdJvR&fjUxTX&$IdR~dj>PWqxZ9HT5}B&(R7<$G_+%yAQguAj zH6Wz3aL`Lw1l_!i=uW;VPIXe#E#~D(D;LzqtkB`oID}NoLnAe;q?`;da^3E&lNAvg zbbNhK&HePPytXQ?ih7OOPp@d^@#ws39xmT1^5Pq(V4E4S1x|-}*)!D=FUfQMN}O}A z#pA5)x(d=CDpv$-K1Z;3n~5g=dR3XE6`rO$5iB!vDp|)S726QFWiNL3ua5Z6{ZmbYLUdFGV}RI&zytft6?DE?+q7DlBLZM%V+hDj6%W-MDiZcCJ`uCv`;R zt?45Z^u8_i(G){>ta;9E^H2t*XpQ_iQn^=543^oZu61=vJFB2^u(jSm&01s-tO zdGfXD{_<{`mCdTB{aZW1stoZtM&&~R3741}G<7-*0RC~rwci@sPc;Jd!<|z^?DgXSnn)E&V)5rtI!oYAwE@NXz>~qk#T?3W65j%29PHsj467~YSqAPc*K7kk+JiJ;$O053qgh#1o=UTBi_tRB6 zKXNF}jAaQ=qa)P(Jo%F`-9EjKO;a{u$(cnoKOrJ>R#0kbsl@LMEvIF{naXGpvm~69lnAWLa>87znV2Em|3theOKVMQ@^j@Y zjn6X%L+Pt|3;}4up0<~jK3VjP0!4(j15@4hC-x=*zKckYdY^WGk1g?iJ@ZGV(q1~w zI5xQ1}V67FWOa_ERn;l1Zs=NL=wO7B-*y^!i+J(w==!7|R~h!OE`h;9m=az;wmpc!_a3P?Wv{vV6tFS z;kJbKXqO*+1N^hSUv0Zg)lxp!T@a;P$?r1P+NhSbD@&rY2|#C=Nn)A%o|#mSMCwXK z7QX@9{UotRocD@U(qLM{yA<0MDKD&FmesjL^bh9`fGXwp+C>qti!v5Xk-t&aLpGo-P zw}$Ju?Q_g@5GMGHbFN8ua%WP227Khof=)_S%sf^a)hmWDP%kEa4DLF2?a+pm2Ik0W zoDt4ZSL-CnOO&eZ7T_PJ{u(%W^ZQH7NYuQT7kRKw>C3f=AV$d)uGj|8oZb6w*9~6| z6U8l@iO4k|o30=2CED!Z8I7`O+m7IRSQ<$Zv`p=SJ;z2)zCT58y)b!)JW_}GCH5euj5R7;PKwN@j4BSU(W=%O_nFdX|thakp z`z>F$*KSJZvs34>JgHDn{(c^ETJoN$yGnrI9kd9CfxlT{d3Q z{_vL>*vu2RZy8f@HJtsOdY#6aaysF3?-K%XrpdZVi8330)YV7}6N!mwgw;z4y!eid zL6dSzsSXB9n%@KG{FX~djn3~+G8K9(;TL+C4vqVkVisN<<6mTn{d2wPLQ=ED5Ios! z_td(2ZFL|V$;C4Xvw}fcacMW%zQ57tn>kiq5G1SpUf*D}LZMuYL>dMaK2xR%=YAXdBWIG!(l*-XQFV_IP z44$h~=6GrL3r{xSou@#*%nkYvdb>npZKG1Rz+iKNRMQQ9deP?=EgW{KkGAC|Utf!H zhKEVg+>BZnjd_?#7zG$tNZZmjlW~}e;LI5_GV2SUNN8_bWlcP`s}j~j_|bj?o_z!X zo{g6yLMB8%_QsfRBZDvXy2~Ex)d+V49j`?asSM>Z(qAi{jZGIux4RIyf6ALGf%Wd3 z{qzdBEl*Y+JHV8R!4nh zX@vN22e6-4eX9ytGK|6OraqSoFP6N;w7cS8Te{?G=la}3*l*KY`gT#;f{QN_nROT@ zzg?eDQR^^#4}o28B1C9sUo~wPyJqCKOlT{^H-z`%C^EU>DlqL$hmxa8mNm&5Gu6p~ z26;?RyzXT@>{F5pih6D%lfF<<#;{CXW)tbYR~eJGNv)gO*pIM$>R6>d77`0fSW17# ziaS|$mi_KB7RK8aCCXcXHE6x7Bd9 z=T5=aj0IAOw@Mh^k#>9Rz&6=-@-$|lBoob-^}Fst-31;);nw=$F5dHc~BLH$!?f1AdLu316E zT|({L{%1(xr^$~*W|n(q!e-d4J=#6Y?@OFH3t-pD5Uz3$R<5QV0QC=%9(^pMrU~Th z=~h#s-Kp;2RC!ok7fE^;nS3@ufnJ)ow_5S_N%92Sq{z9(^%#uxr~%`O%?B@*)udkb zflwgZINwCknRy}e~Cs#hf5p}*i-sOvtxRl-YCq*u`3P?+~pCb`~qXFInUA1t<} zeH8o=#;I488`CKp1VM{gyXwCg-Sf739(gh2=M~iwpmxvWbBKNEN)(TksZkKWxI?gR z(X^XV!$7eio+8wy#U=Acxz|(@WgDnX1&NQd?Ia_&K_iePUWakw%#oA1u+RI=z**>! z<(0hC6TTx1Oo7TXx+WsGjji0wdW$&W-gpO{itOr2cpf-u0fWw;UOd|;k&gM+e&Y_9 zY&mzlo;aXCidDJozXRGbEVn#&ry{aa^>Pu_GaSe=vhQ>aP-RkjWq((^|{}#cs#plVP zN{D%P55YTtFyk`pM{$nH0acxD)%{UeL$wBmEHhVJ2pi(mQhCiso}K!5YpB#~Ac0+v zRz!}KM?}o{gdj3+-gk?JeD9%uM-5FiG9z3om?Q_vT${}f(0|B%d8L>o-?vL8Ad+OP z^|K&cHs;av#fuMF*1~>-Ga{ik^2NHEvaZox+dok?cP;gUOhiN~*lnELKXm!X`9-*> z;GFH|DOpaVV#?KON?6Ucu+^@k`YS4C<&?UtF0)$o-uL*OD(}J>SpzL52C41u0@k9KFt*p{ zQDPT_sYH`L`LnS@9Fd&}+=(fr?KWB1-c7VcPV`B} zLx+Or^nqkY?=)sOdQ2x{P2&qL;K7zt_llV#HFDsyNPF_=oxU2ShJw0mccHL(O1(KMomh?vNQBY)y(e5_>UN#qe~AOPC31Ah0{VQw=|QO#2nxp4j7tFIzF8T04`Z fcAfm8ha<7VMx5Bv*{CHRB$`C|Pvy(!Zu0*Dl(S0n literal 0 HcmV?d00001 diff --git a/src/docs/src/documentation/resources/images/hdfsarchitecture.gif b/src/docs/src/documentation/resources/images/hdfsarchitecture.gif new file mode 100644 index 0000000000000000000000000000000000000000..7bbe150dacdadc3f88059fb7119ce945566fdf5b GIT binary patch literal 17653 zcmcF}Wm6nXus0?U2R zxnJ%dxINQ7RnPSFhn||L>gq>9NnTXU%>I=T>K`+}fAZ!(CHhaPH~|3Z|5io-r@(&; z3jpvRAixRWe8bGb$;qh=0BAD;Oqc=M9|79hfFPCs?0*ib|G}I9Z2+elfZ1JIMV0fv zJ#A}kZEX`*)lecpC?^2Q2v`yTglYqz|4nN^zAMYiN5G3VV96TrV*MYe%^4bElHnd2 z%DEIuwG_(v0u{)a0)&QYcW0woP3 z+z9{^8EO_D5g8R76AOxq2PY&ZC8wmOrDtSjW#{DP zv3Mk&!X*d=B~+1-0^l7=`4Ke~p2kXIEBlqWOeKvGd)w8nD87^vA5i&AFbV)*iuXry zwZN<^iz(AMjG>MeV@V7OIQ9?xM-y^it6}NlS`JeR^lJVWOs^}8n<8XZ(8^ZW7Tb}o z2m9{ge+b9x&6NL$E_tys^d>~ssdX=>EaC$&T;!;$xHpHLkCY**53h|YeNo=PD1W~T z?FB3E(=N3-YoKBm9tw7^u{ty)zu~M??QXkN$s*d$$o=cVII0!&Zr{;Ad2v2_F!s0r zTl8W*pXLWdz1P2Z%Uc2ybNWkw%N0nppM}Yo=!RF|nfOdUjpMw|e6`!#z?abAp7k59 z5VkR4@Zx<=WTXe;)RQ#8xWnfA=ciBB+Yy8~kp&FhDNHns0_^2EjAMKvO8%S$x>y0k z`7LBYY(dPg%>_HXeyG$~c@g`vL4Ik6z9+&erf15vO$W1tk?$pc+!ruoAazFiWIB-H zROKn1sWln7&RvSE5D(&M?`oM{i0J8Uq3D{p7)2B57<1=nQ~5P|!An&!ZpU{s^jEyB zQ+$uvXHy)lz4xrEg-&C^>imrRnFrWHxf z6RI*uX{NGzf`MN?JC4Q7G*U`?M_e$-$BhHMP#3YbJz{tzw4I2 z-WPlV54lwwy+_bhXC^xiVzS{L+9^%9S4r)zUEz*K+QaVzV5cJjc@B3fs3Bax=6WN0 ze!?HW#I+P|9HZUQzaQ$@ZNz#R^=RsEW2cKDId^P?={a1yo43r80?)tIqLE*I;UKPx z81+gh3XQ=cJsw$R1pW-uCm8kik!$J|SflL!)4m?dDy=*7`LU|aL*=as?~*#}t+!8< zuH>d)z@!~}L%Mal-;olvgo#p4fN=GGO?w~CMxw|iZU4}bp#4Jos;c<68$L%ZuNpB!f$tSe4+)2Dho%Q>82-NH|>G|2C}HEG9GOVRgdstImh_&mT+oj*E3`+pH(rFbrHcxs6T#@i7J)eG}@Q6IB?lU zrJ-;n6jAzE^e7cmn~uQt(O~*emupeEJODDF=F@4S8mgg|!Fx!=48?fuN~D9HZCLHE zr|kGqij6qicrt9&K6JP;@b|gU6jdZ(VhokXmh0=EGc2euV`E$ykphsqx0KJ7CZjCc zWbZ7t&)%EDnmOE9cHxydADgFWoMwE}B0Y3Mk#A7jEP`t9^-wR+D}t2LJ+Yc|P{F@6>glGL40-IDz?o zt}&m4vzhL!$6HACciqV{SDE1OIRPCT4Nu0zY;cTETx14X81p}&2A)QKcN=A{tzS2U zld06;o3`}5FOIduqRt0O9RnxQ{~l&T@mmTp)ev&jk8`o=Eyd*AwqX#)_5~|^mmEiu zJokh2wcklj& zHmMe29KYgl$L&8sJm10)yZ*zBd2{~{JUz(6;Rn_H*4H?G&K25&_oy=K0D_JIOm$b8 zlg_q8Q37wmkEecaV;dQ9B;Tc-gxI|D%J1Wy;l=T^h}76Myc@hJB}j-N^R~`2mTNf z{-gV0^n&fP%b{CbiBlfz67#ZT`gG=Tr8{`e`@0KF$tMRA81?h-O1>tHpK8$Id2{nT z=vmJA&F(CBKU<75G(M@(!y)*Pj`E3l0Qv7~r;fXDl`hX3)8Q!_^18BL;*IXh<89ve z-;E-}vu}g+VM~V>mhJFgY%>+K(;@eboiA+?Z`xNjYULhJ0`)P22#39scKJ7SO&D6- zU0@V`i$W-sSjJ|d#k?MaRuE_YoD&JAb@lyO62uM23b>t zi0ys7PMW>YMBgW5g~qy?xUq@I5iv`+JHQ2^e>Fuv$m_kv!WsDEP2n4rOaU6sj52Eo z-XxKd`XhxRZ^}9TL4V%*@16V20f_B}u3(L$%s0Gbp6lNYsdTxKwQP`|BFj$;392l= z=sDt63SkH;i=L14t$o;DM4hy;oH|W z+MYwNXjoa?6>So)qh#jNOnob5M>MT^-r9(K^zc0d&x@waW=Cb(N?-l;L>xsMrIONV z#*7^4(5*+$EvCHtn7TShG_!2LC_?pCU+7jebYxzZ*CX0?)kKg|z^**BUoTbuSE?nO z+>}jPO>r{mU7D3#dR3MJu~Lev7P$&tOSkSi-Tpd3gV3tMFeYzdeBm$;tda7o0T!)?Z3D)6c3C$;99ziC@EIlF7&>G;E*CIy%N~jzVI) zgFboMWyP`k_}USgWdOBYQ68f+9Q<%o#By>0#JYZAKIz%<4>_W%+31Z#!Akg{r+DsV z*fD;eAlSKe?5@T7n$>onY9DgjKv9c`FU|U&TK)3+;I@q*&2JBR!`QAtN49unc|-d7 zQ+AGHr^II!`Ks&rOP!8s$s8oDz&1>b3On4*PG$l)3EF^6{1}!vI?jD#7Fuim@;W41 z$6}J#Zd)HW?KsF8gq`Lc_?J?v$DYPCx{6ON|Kghpw}&_+o5rV%&>~51G^V zNjSOC0P-4105VRd!!o;vY$cM^l429#*B;jX+JV{ld{mzVIBYcmC;$xRQSM zbma{jK>z^D70o9nkMRsZImFnsPfyWCzprRUcFY7i`x?m+{^h`oZLd;$k4aQEG0%qh zmAo{&cR5D1>2ylQ#ay|@uktXm3YNiF-fB`rRxYQOzHDZXi3*jfFZ7Ez@CD&$ogkodJUy9_3@w+swR*!^+mP1)I z(Zakd{BWup&niMU{8g!duKMiFX5kZz{1Rqhw`0cFo(ry884&pw>CcRJlgD@K||&1>bokMa_bPRm7^M&5G@)WYfs;C*)vh0ayRa4 zG8r09(XF?PI$mrvli{)xzVwq4FLvB{y19T2`S%UYlAFyEh9E!%6lh8;&D%(fTTQM~ zT`<>>dtD&D5jbwoz(|zF7-P(}E~RcLWYbP#SWWxGE`mS94RbT5_&!TItodl5rS70v zx;jdhvqG8|1@UO}8rRl6p4fB4Mwk!~a)yTPO8jXOiB3e_5!%+yn$jL@-SKI+omI0g z#JYopulm5hLn*lhL=8ya45KmxK0LJiSxp3ry5CWlqIm|87q=EtlTJ4U78Zd;t-S7a zncsR_W)-HFoyj{+Hk15r1MI$vo`eSLv`$Nv!~^KAiTmO^*-(0dz1w;AdPd_r zsji7rCvE07dqPG#cr<%gC(+jfh!-64^8$nk<32#LJl`qXeWfAO6sv zMdsv7A$Muv8%r+!MBSV`SK|06->HH{aBc%NXz{@Tj7-|VCegv?9Vg-yUO1gcRE>Vs z&>sQqk=+B4xSg@x12X~bZ+oCY7 zMP)#1cVkyUN^#w0{0GYJN}XY$ZY+5YeEaS;Cx?L^!xm^Gah%ln}Aoc&Q!vCz?KvY?NMv1SMFZ!MrOR73pbIPXw9HyXRt5iLjwb>m!+7Zdw zPCR9U4TfiTkDlbV>pG4Gu}rLW!yCCeON|=Kf$a_YeK7iR!7V#ZL$OQhCbD9ko|>k) z!KQx4RE|t1?#f7s@o1Q;o>Gr9@=rH@ijnYp75Ms7SeE4E>El2cMrSj}Sm0=fIdC#O ze$pEkFrYGH0j`6J_ci8MRH(Ll7|w>UwBH6g^3`NSQW=vh8N4Q+ib0Kb%C3rNt$(XR zLD`(_7C!e^&%}-*-4He6qj&6=4Xm%sMZi4yG)JsAPVkB7S)!VXUCCZ;z8N!`iI9Mq z5bBPtJdUHYnUlwgdzzj)`P0#L2f5Qd6Tq zyR34#YYi_WUz~l4!&K*{3<_gIim1k%pC`Dd7Yc!cAu322fuPl)&GvUvou1IekkQ2;?!`9Xl8OWHGIxw$1)PDi{%}6_ zyy~fR7enq{)j7VNbIEN4B{}dVy^gish>dwA*3>2u`uaE;%dQr=A(OX`y9&h7p5S;- z5t3WD()En%98}ZH3`gE);k?*Wl8qETzzz zY#6U==haf@Y=1U3Q8|`NXWWrMqZCC?4?%rn>z=CJyg{Qcb4zO~-x<_AjobQ+P4EFC zy+F}vYyg=0-rF$kOSZ)}{R&1sD_^}Zmas}-G80iSbKz(^jW_GwHQV~MC4^TAr1M$D zlQ*owek(n&fE=I<%aBkv)B7+{OeMAJpuGzP?P@y`e>m?z1j5|UcHsqEC8~o_-h0g( z6Ppryt@*PaxP3Bx9t_A&PMs9CcGKnsBn4ir7eof!bi3~$!vYex{oFc&bd8Ds_J*oQ zLZ?@W5|1ORVbG^NIs8fpc+rWb?z`kkK3hmkLCa_GY1lp%P_xyI>*3KkN_7HEu8Uu;<&t?`14wNA$wsd1QJA2>bBw)raQQ)2tfxRJVH z$fX#PQP?Q0$NaVUH$mhn@z!F>CTbOUL8@-mfu@not=A=ZRGY7F|EdBW(0kekW zPhS7Y37snE^h`$S|6+#tBRagE?QId@S&#ZxSdv()p_yux%t7S9_j94Yi+W%ZHe;&K z+YXF2lVe-|S01{czzf^GEeLEZ1+DE%Rep0QN~67lko zQRUDRV_y1R4435TlUm>wd9%#ZtHh*VT_Qh>W~N(yK-xa5cO(^eH&b77JiMO%PNWwi zR5LD5(|S8`zahZPQBmueX|eeE`5LLQ`VBjVIK056!C>~gIKrw#)Oe_Voy>_O)b$7; z^V-_>9vwBg&;CSoqOIsBz}#K-8n(f`B5K2{KiN=!tbwCP9BNJpq>72}sK=rDp6Dlu%IHJH37RETzUJt#0D%94khJ z(IxAO!>;6KQIlASu%i&`41N*qUt^nk&y*;Pa<~fr&L@BUCk*C;c=N#wL*aSchZj5u ziazG!YW+DkZoep8tGdcYu{fllDO&PXF_owd9Il)zye5Nnj5iVN`WA$E)`D@frsJ(N7P+1v zk@}t9K(oQuIHC=^gORw@(qPd>bM21=PQX|`CN?XtZDLqDR~WJS84L1_3h&#u)|R3CxZ;jOQll6qppVpjmeZn^TlirtIA3gp1_d;MA zii)pT$WUKkK^Pdmob@dNUz%bv7+qb_EQ-t|)+~C6mO?8$h7ZdumJ!KP6wLyOHIL({ z8!zUdqr%va<)1-(0deodS|o~JjUV{YEc`Mz<6BU)OzB-)vq)9ufm^1<7iDQBN8;I8 zX6Vd>m!=sw!>zJRZWWI*%nfpmvaBH>>s&TurCGK`pOQt6J#t3Lrofv6gE`(A2DgcI z-@&#m49BO^f^c80+Y~=ME7_JLO5>cy#d0XKgHlZ5?8g?OeBY{wS-BtCQ|^2Om$q|??~Drt zI3%B21XL$$el6*+#sHTLT&BHN$I+timUDEtz1QWex4ky(D~vrqE97mnt-H=nlWaKr zND$c^Tl*!v?e-7v-fiqq&3DiKCv8WsJL`-n-zWvX-=Wb)8uLIN0MoV@ z*IdIG{m=j_*9=fAs}cToYX=2IWAMu9+J)270JHrMiUpH~10UJ|P+J4`r+_z#!?CDP z5HV!eOKATFZ2+sw8}>@FA?)|2*-j|MFt#jT+|=7GY%s%M9cg2@nB{kp?}{TbYJ6bQ zw?;shH6bhxz8GU8Mt)y}JSKEB8YggrG%aOhD1}JFVyomgmIxLzabkih(>}i(mJpV- zDW$^KLE4lQg}aAF^`QL2wHjNn{Bc4uksuQQtty;}g; zR(swLEd<3x_I$5PsoK32=%|`w76cRh$_3u&?Vtf7D%Hhs<)_{1@bWkix$-!97h);R*LU9QhO;>k?y4TrO^{tV@kCqGrMIK%FoxYhe$;tClHU zOM4c3D1EZezg?;~2II0H`4hOdD~PyrCtI~|3k2~#%l0<|*Xfxk+XsQ-MQ?%iG#`X9 zh*0gpjdZOrZE?kEoxK+Qu@)pAW8^RJ9{aMqW|j^~`C(19Sr=6)JvTaIJo>rj&_b>! z+E9h96t1DYz1EW6(VO;%7;z+n&JaH+_ib{#-48J6arCdRvP3UXD$^#N zhFYoeAoN!|mf0g8jJZc7l!*)=_^4RWstXRPCz>Q_M&h5liLwMWOl2!1k&OAyba#0` zHH<${XTfN%UX1;pGf1-LGt&mfEw~x4=kH$jj5M~5ygju;LExdbb)-c;bi61#5m(SH zE#+pPH1Ai+I|xnct7ePGrjyEfuqt^l7jQ~%06mpWD+`qWngLgc%VTfGW+m`Q{kO}E zPGYl$cc4*HxZlw1x|VWn}xH;r*`g+7-p0c8ZgDH6dukVSHPIabJR zY)@N2wG7p2m21vVAy=cK8^&YL_1l~|B}7r0993*S-!Evb8JitcpKkzhi&?zsUP!aU ztz0e695wewHtXeBu~URGd6jH#;bLPMl?!C~yzAOTK@<5qEOlj5dbfp8m#=qvRiJNP zjH#c>S%vo{@A}M>^6Jp1Vvte4$>Mc@WH{Me=qPxU{`EBsqooKf6%U(}c2 zR&Td;q?HR2w^OgnCZ|66nF^owgM7`l$oH4=No@}{`Q>xEyy0bjZSkd8K_d2eKXijzSrDKIe)dg8^hXS6P z_xr`Ie>;M`n~9X6m7?ksC}-TDe1eaexG?}_&$4es86oxd#{GQO91Mg^xyaEQa@h~{{2{0yD!I) zzVR}DI-pj4Fj1DiBR_aLd9U>-P$HdG?DD*3^WJdt#q4iu|08doanxYo+YhY19NCPGehJNE+OY*jM0P}2GvqRoug$713hwlRvmmycd@(hlD3eO!f=`KNHV!#8$Ciap)lLu1Ri@P z9-{XDcF7prT(U>>vPj=t90xZP;mhCXTc8-oy^0C(wmhzgbX<#WsbGx1w?RvB=s>5u z@&aKpkWZFbwT;-2q}Z&?J6@+a@pTy9*hGG&uxG1`PRfqvHw$mn!@sVeQPK0T$Yl&CBIN62tJB>+`*ZA^ng6S0B1ThF0qGAR$ zVix!)!#ffL8e@HZ@n3-`(Cw00n&P8%Tz~(G->%dF@6rifl7DBI!gU$q$=)OeR7rJ48+|LmUw&iDkJu9=&iM zHfEaPl@XXON-X$0-Q<|2=y$rLUuF$i#5G&WS9W|5Mb@kafo*3j0J%$+cj~y(mUY|^ ze@96CMK7uelq%F=WsprN=atl$34BKAVT?shXe2aR1=6v{^p1mR+)U=ta1dDG9ehNi zYw?~+QPvSHi@Vc95eKCrvVX6NsUBwBo+S1KqTnQxn!(2#iTTS)?sxDZ=C8o zkn=|hl6M1OUCrmQ zZJTdeL!Mc7wB!c%h19KJndVTtK6?{)ON!Idu&nNp%$@|X?|Lguo{>kIsitlId z=wBuGehvr$1O}mUkz7gi<39dL+-2F9m4Bt$dBsoISe7$iJ3Bn5_Do)5`# z4U>U~1}cWu`G=Kgh6QSdk!nUmpDx~LaP@It3~L9%R2@h3RbgMI-WYB5eytfX>lxJJ z8nprsn$*zPO7@yMjyf5kTV9MhUG(9pj%_s1dx6K68wUKQ#^$ai-GJk+z;ScQu?R_+ zqw3Ib#CWvhNchxv!qZri>bMLS+{|b^qh>H3I1~ql8(fSRNDkZA1QY_{ULPj31K|~_ zgSk(z{F({nCwRT%P;m{cDSx=paZ*H;r*mq8=VGGsVyFW+)kgy#ni_&RPBG_C)Nf7d z_Rzxvr_zDbAfu_p9{7UebZyUM)7I1s4aIi;v;}y2&k@$bMZc>$VFgs15fkivqZ#)be=yip}K;i`uHxGmr7* zvMk<;&-C=KEx0%BirX{4w$z%&#k#oF^t?4gyY0@sv>i0k7qmT4uwC7=J$$)SN;7}`Jp9PL#xXR%DY1PB zu~IwrSi8L@^`jGy;-q#9!+3W@YO?Qn9(g%`$-TR;w)>-Z7Att1u5Tx$cNZzSMeDp9 zM!Wa7_eXHT0vZ0opMt%yp1p;HJwz{@erDHCYX6tf{KKbj z9$<3dA-x-@e&AP!cteNKtvev4-QnLly$K4?=Q-UmKHb_r;-owJtJ+A0J#D?aVB_TrE5%UhFxa zH-5ZfpuO(qLF75lUN~Q01z$hm@7?lTA30t`_1>^K-DC!zflO{_34T%%1ZP8Tf0|r6 z(4CRbUK=Lf(tSVvtxmGhQ6OC8XjnngrkC;e>W<~}3GwU>Z_=V*|D7H8{sPahwcy{v zZ+;0SA*PUbRy@D8usr2t9 z{F~2`=P1pwTHPehAtXFGqiDT&*OK?~YIsjl=aS!l`+dJ-e!qzQBR~g0GnFszKzs>4XpX|#;%^roq9>;HP;ZGZ% zzdyWvp5^Dd&H8z@?)0zl&ClX*VdsO{=%2p!$3-y)0IxhFKIK%=jn#b+Qc!qNg}#6K z`tzww5;4N_Z{o+n)Vn_#1i$9K>jZb^86NAvdC8Rs!gqf|rZw^yF|oAJ0E%YOW=w#% zcz{$)IDi%$hiwq^zg$~@_W#SZEd=D%)@2nyO9A}<%e8H7Z0d#9)c;?uZB9>ROV?Bv zykc^Cp}T2dY<0HpKdx=>%4+Sp_Wa`XK5`pT^v@hQ5LI{=h!l)~atvbci>Q=YY zj$2F4P^r0mT9;3^H$C%?q_d#I-`~I|Ep4hq(S}Tl>55guP%h2+twGH;OG<{MH5iND zfkM&n;dD{lRYN~2>%#b}3ZIj%O7xn!WOswPjv~E~fn)cQ#@DLQAkEqbpY+8FejHZX zt@cI>HBEx|31Cs(!?EO74$>38R*Ms|A|57ot_w`{xAZ2DsHm5#vNym+x(D&MgUJE! zws)2YT8*0AJigh-l6^6bgi&*nA*TZ~4vxfSKRB5{1&+^E*i|Zy-2==s)^fhQ`FYuC zwuo4K$CxzpSiNwXG%IbA{O2l;(B(&)0kG!hA=)&VVbIGKE17+e?3jt^m04P%r3`!a zmxw)WgsIP_T^55csdC4z`El#tBGuQ9H_;Qi83c7)*Ih3oyu? z{m|E_f(A+T4w!o{vv*?^#VfWVc7kFoL1gm!Uxd``*|x37{Ii*qHHZ(27!BJLsGMS^ zj;Z3fpjmHF_~L{PlBFSb#fhJEmMjv6QQ#Kj7%PAkXrn0nI*elax89$Wy=!8dG z#e^pM+VXz0e>lv3RVPZ8i{!9JElJ`3^6@}cq>VAve*D>owd%dSc~UaRW2a>)u6i6v z8ki~wQJdQkK|7 z(2+}=!V0UcnpkxLiqP<|G|@f0^B9Yc;OtEu<>W8B+c2yz`D>o}b9DJeV%eW-mbih1 z{VI(x2%8|h@H>u!7meWo3)78}w1xSs@dx+iV>Qk(F6A-#R$WB*Tisd&ygw6s`%Y8% zvL%(uM*zNBM3Z-kY=(IaSLq@LnZHO%ooCEXyRV}EkjA{~$lJJG%kqR4HmL}kYS^%} zb~6~(cKG(UAVjCmB$2)o2nefRyd`;X>zW0D^vo3MJInG!3L#S}sGCE8xJ)jje3;(r zNO3mknj7`+4?o>2u`jt6#Op@`ALtPK!vRvhKbhW3wJwee%yjlc^3+(S@ryD`^7oEo zMO?{CdQX|J=NFyiM)n{UgU38hX*W6DjyRDLjqg});wEPUSEIS#c-#duZ~FcFRYLZR zfoR-Lu+}-7`&N3dFn~+xDtE6%xBurvdN7;C|J7^LS0Zcum~w<+6B_h|6yvgfe#t6C zTzOb8BLm7vFIaHoB>k20xGY6eO_agpMw6oA&|$%~EISx~oDB%T!53nheLT*!`nMAeT3;=TNyGGp|JJ&yXaP%pj{1(69l3-Xa+R$0 zRE~=4)PlGU_o-V&u$S z2WhRq@^?b;NnS7h$WCRec#HuwjSgZ!!^b$TMpo5XBmU&#sbcsQIb4jcK6lr0gN^kS zynzAf@nOQ!iVXrn;RxK%nU|apw4oSDYb(oZ;Z>}< zKTZ^XY1D;Z2f$6xKP={=URRr-uPA)mSgf|bteG)#AdR(8PSqiMjRCU$g3_?Wu|$d_ zh89t~my^#7DmP{GmsDCP!DlbH3Le6m_VO-SwF8Os}f2D+sFnh7|Evb{$$K`!SB0)nM+dM&m6dw{J*hPUl}GoKk8N+O4WoBOzom&wRfLifUn%rW#P+L(;e3a0iqyRsYE+IS}XjiRQJZAE5e zBd(ipn=WSxAi)9N+a+BUl6vn(fP;r>0@!BLKP%Q#-2+@-eJFV+5*LVZV=3_$f z8oSnER4^r(0dsL4aO^qBw5{7ev%bj4;aWZj+GXqa9ZaZh?Z3Y``OWfovS_y{tMv)n zYSl}aFy}K1@!+04Y-74f-O0P&&s^nOS5`NKBf}5X#vw6g7R2VP{0T2^5Bjg;Qr_Fx zp_wrJ=95>QvuDbWKuaYL>NJ%ZTb-flbGN*bEh6UG$OG}OaFa}!GjgPcQLs98X2y_C zQAM4Rf_B2;bM6nTh|_+XgmWW;{9V|ES4N1`kw^6xsWWDK((6YZ&b;z=a@ItNaS?3) zGsEe03`1#!9jCkCf5mL=ZG$;`ZV_>#gXQebnhZ2%pLh*l=M{7$oxV*CLhIZp+xBV0 zd+T2OxBHMq!GGafucr-tkFNE*$GW^e>udi&N%a$8`6}{G&7yU2C$YN?Tm5fg{MwK3 z_mUgE*v@@_jVqpO?RQDu+?JBl9m`&|YT{kNUK6>)o;9OvO8A1jPfcgSB{z%`hp$A{v z^aqCFuH9!$jlNab@mm!M0nOStm9|SgU!_&v|M$zi;~jh2`gZ$I3i!UXr2gTwKlpyQ zKlt%E8+_T`|M-FtBFGfd!tBFg5t7mFYd7Zpk37W6L)O|R7{nUN{4w;N}76C1_H*eE>9 zC{xjBOhlyPX!y=*RR1q;ORRT_YtgQ2Zq~lh3%bGfib&H8vKXJzn855HkF`i^w#awc zF+m-iE_$)Ch`^AJsBqTU4~ns%Y~~bE&~AG~rZ1>`UMRZ*locD1e-9E?1btzQF4l`H z_l-k@$2EP5gRaHZ-N!Mb#A`9fx9G*UA)?#YY-_UPd)DHEI^z3$fddraVLh;xD0p<7 z7~TP%>Hv?gffcY47DN-;{)cs&u#}y!QTc6Cls{%IVV5E?Fg9UCF>&8F5n-!vjEFza zPQ1ELq+UvFzfZW(ui|Zq_E>ZJf!uHCks~5$cv?_D5c(fNgw;0q;@KC zFPhQ#DNLg(_(xVo#drptT?Pgy-k3d8>oh|dI}8I)|0_v@k4T;L%m$S@!WF1+@8$b6yI!}gWP{Md62L?8PL0~ zD!-YE)ZSCd20JGXfjj_Zer8NQl1M1OK02Zq9@4E8-2%^ll}$g96Q@~~vf3%V*_qrc zP+*D$@$yUA*N=>sDL7N4*!Ih$ZHF|KRN8#VMr6!R4lMW`lyZMcMhfiiN6*^Yjz;+1P~aOGG%pu4^R-#?k{ZHtEMg zjoactg;MO>#MiJ=h5!C$(ZqVJvSkp7rgFN7QW=(A*-ZslA6m8t$i`Yv0`HZXag>cG z$J%aual#2E%PB{XLjqmI%K-+-5t**#9J(qzx>BL16^fLV;T+}uab=cg z$=ccfHJK{G;)$vDkwG|BAN8xep)tAkRc3Ql+4i3c<0>V@pj8T$MFwT%U0DkcDFY4A zD)B0x4X6xubz@gmb6k|NeNv5Sb-O{l&RI3lWA*R`!x&r5kU`BvT){CYt-GscZURaZ zS2LziJ1?F|lbgNbUmKlMyD?F_J5k}=S-VbNcZ`$TqEJ`lQ+LsoQgp#X3u&V@OD@yDrik@=%sZ|OQyTe2(s}`+ zvM@BXL62*H>3jPEUWpceCDJ(-<9bw#ZB(Z(G5|7qlz3huDF-Uw2wghPx{rCSJ`Z)3 z3{Hzj0;Y|GDsxib3`Iy~)gp^p?YmoZ8mOzZnErkeEMja|CTZwpY;wqJ?`g*tw_$af zZ}(?tZHT}|t9Ii-<+F{i^Y88u_G^sU{JKo-4uc zJA#EAG$d(s07`c^)u@n+{If_(=@|nwNmXJmv^Cy{6R)XsDcw;QPC`f?dcXrc!VJAI zLd$s9o@qnU(h<^=1&U)8Afs$|>%T?Q`mB9*LYt4Fo3LF0G2t{j-L#8?b;E`iLHSSGeqGj)@IoZPknbos$7601$W1DaXS`wTTlecwtn-MGiEz!q|c z32MlT%IJcDa3sLEQDtl>8RmO|!*Wrd8_q#INRDA9fI~A5xxg{GsCAOSB2%TQX4L*G zD?R2WB`pF^&vz;9X_)hx_{}F$oo`!vF$|)afXNK#?Fm1Oz&;(c=krT^78<8`8Zq61 z@WQ76679}6gYNsiGuqRufT@YCuEdhjU>I+E3hUU#M0DWzoL$=f=5&3-jNkH1^RJnD zQDM&>=)OF4yMpa);WdN7gGsd^4Dr*254yo z|2Q@;(2W0*UzZJ9_=J;LInB5qzJd-UueqEqknAvY+AJ>^L)OwyVVH>=NK8QkS>W-F zL%d5Lr$1)bDzd0f@vi9vqvGa4x?DO*E*+1;_ znH-FqHVH!xZYhh=I9kM!2Uly19K44gb{Iu*4i%*f<7Ez2cMjE&ht!YBO43IQqS&8< zkMt6cD5)yG?i?LeV(HTzn?sJIkX0tZ#}`-ti$3JBBmPmZ@`0`Nk?GE{r^yM&+@2fc zqziiNy>sH3c;ZiopgB8}z+{}fb@O-><+V}(3tm32q|Cp84h`=!3>_?YUhPUfs0>yiHIv<~I6e(MHy>b1V> zMULyfE?c}F>`YGTzYepyF6_Kc?8QE^$ByM9FbM*X0!@wrgAf5jPVE3-?XDgIB>?No z{_U@}<{_XS2EgRluI=ZZ>dy`U-R|w+{_gy@<{zL4(az-a4gmG80_nc%>)!6}4(|XD zeq%oF05AeizU~Kb?bXif{XXge|L_Ai<|m*C+W!9J1mErZUh56-@DShe3O?rZp6^eN z?Fql{8L#k+;O{7J?P((N6F&(Rf91;V@x|KgPwo&2zvd>t@I+4VlThwNj_nVC9tN)h zFV6@uFY`3N^xi1(ics`pzVq4M^NNu3DuD1opAYOl#L_Y61Px4Lv^8TLpn{M}rk1u#X z_d1{UJP-I54*)=a@<#vkL~i(EpZJ=;K5I_+Y`*s@Fw&+`@08E>m(TcPKJ%NOr8e*6 z*beo2@Ay+6`i?{TJWu4N|MhXt@vGna{+_|+I3MQYUh=Z9?GBOjBY*otp8ImY`-kuQ z(2osl-uKdulWy@W|i%lVAMHU-eF|{VPBCfB*n6qEaX#SW>9C$fyDk1{o*I(!H*qQr?5D_Xq%m=2!8M*o5hTf}c#ziGS@f*iO=8@6mEH!{QsGp5X$ zG;7+tiBqD+jXZn${0Ven&Y?t$8a;|MsnU!=n>u~!^X1a1RI6IOiZyG*olv`a4JuWu z*s)~GnmtRF>({kxbA?3*+t_gZ{u-LnM{|-Kq_U)6yn?H}9xFGUh z1*LxvKYr`+>={j$k3W^a{<-Vx|8&1!fC3IkSAYJ+10aD4F34b31|GObf(=ecVTDCK z=%0iZZpdMW7-1Nlh8~VcVu|hWf*daVAjCM1N)oFlW$vr{UCl4Qby=UvD5_uTJ&-`Dqfujjhn=a1iWJ>OI3`Nbzx zFO@_o^6?maCco+6ZEtOF>FVWVY3Af)XKi8TYVGJC>h5qu#L>*f+C{|C$GD@zyA)7Gx`W=<~uZHkqMwVYQTA@ch;biQP5;pzr+5brNc?Sy{2&z)7hj1U!kq(pRN=AHa@=PLZ6 zvQpMiMyQwSdtM*;_711BhAs&0+Dm?8ItTnW5#mPYRF!o-pU(`~y7ZqZ6|vjP?fJ~^ zXm;~pA&KPZlePNGE!E)X9;$cP>I);e$1kKlQ1LL)e|g2ST>MtpIgFnNR%@-mI&KF- zPwv}IU|^K3J#gXXArE*&6)yO@WTAS(M1HoH7cNSt$*7)v02j35pT_@C;~&}h->4~{ z$XXbBK}TB8(2o?-qUFCpkx$S{EbSF;a&9_-mO&+d>PPD7NJo5rAset2pYAZ}GC&NN zDy?k%MZB6P-(*GTA&DjYs_Ut03-Ogr(4?Y4gI0Z#E$tS_GiPZ*o{&V?>iShFlvSD? z#CbI+G4-@mQ+ls!oJKz>hJ_#*&!8ZS&YVZ+w6kJjzs+;^C~Q&PhPL%cjbswe>no1( z8ZEj=g-i<)$JX@>HR8^B2?ud%AL%n~cVKHC%eR*D%M_>Jrlvq*JI6=f%j&PZ&u(*b z%bpY(>rB{Vqob_YmM`D5XjjzEL&J0&gIJaHuDI#xTk0hUb#2}94yN)7xAMY8BW^Yd z)E-pyG(#wB{ra)79lE&!d4^FfdA?EoyofFuA=jT%6aj{M8I7UX^IqX?WBKKu`m649 zGN7EJ29~*D2S!dYG=!sgc&kD0=M-hGesCj37$orZFPC_$D-8YX`k)D z(5oTj|1x1YI_R;R%on*bpN03jt$d^yB6OV0X&L^rD= z-QjWnh_#1vGJm;olx1C}!FOTd0KQ{2Fw9>1P9CHr)@PH{H7mKS#BI8?;#(OfXLmxG zqT1(FuB12xq8j#<=i&;>^c%j~7opNkeN+uid{#E*Hv{S1f!2PebMtrT03b?K!#oLR5914T7=p76$e6_#FMXJv-}t}_i^ zxYl)hV3tmOi62o(X_4%M+&#!txRP@&UqPmWYn`7)iDh|bScDe!eXB{v!#p(AZiNXo zp^vnvY0`&j>(9{Rv;KJJqLIU>+VtX{SZ1+!$%hlvIes?1Xbr#p^2~7 z9j+ z-JK{JeE2>zY?GPSH!$~QQz@=5IC=3>w&I&Neos&BMu*3DQ0upUX#om<=M3Vc6ML8W zh7*ewkqQ}|={%@>yN!6TbZJ>;{ zoy2)mxfHX^rt|X#T-oaEpJ@4eXxp{My@w%Gc6~+L?5}CUK)`;#7 z;|~9q3ZL3WE33O_NWDp6RtYt#XDF4J0E_%a6~dqKv|em`I2lR8Fpr9)B)WSV;gkAF z`9s9Qx3-p8rJ%{oD-5dS2Op$%`)c(vwo8mEeN?7Y5&#eZ+N+fGsq)Xlk`#RD;%V@g z=+Hb@qdy*_XIZ#a?$%3pyAv@nDevZkW$bnQz);!ZgujXDSvR9%{l~(N(JEoogMf?@ z6^hY?4z^P<0j5V77=tq36i_h%X)xLps*|9_tNFcHF%kOD~b8DHDjO7Dkasgd8b!d-hRcL-rVypZ0N(!F5cs#Dc^_YT8OuY zpSx$I+kCB(VSCU4C>%PfbmUio-23reboySxTv)7>o5ZrxaMevj_ZTS&^-%`g=cKMK z)3qG+yi^ZUbfQ8}lkYJ+xIdzNkbO6**@GSoflx6iaPzqtIbt0xQN0qS(gP}F{`JeF z!v(hmKBLgPGRNHt4gdZ>TTgGl;nR)8Lkl%6*eZ}0zv=ry-H@ZMNue4#>Ps>FsU zjv^-Z`~dEg;z$K>6u{tL#cWYpKgF;DD5V|fW9>B8*;sld63EPX#Jv|sLgL5|I3$9> zvqH;)8^}8i!+o6s)ktj6{<#v5nal)QsKP)cROkb4l@WwI8-zT*^q@EQ$BWyyDaf~< zZ);8?a#f{NYNtXJMP);=Ta@y!tOL(e)RmvZh#H7nw`{}6OCSg+^9roKhbX0!AILQy zVtO&KHCrIJTVW?20iD70OlUI!nUwb-I#_YUH)uU8YHMpOB8~yFvKV~zb9es!UgY@F zI{06Ap6FV-{V_kxKD<|icK9j?t0v-Rb<+dNPWAQg`phcuC5dAdZQ^dO!Eu)L3|OQuP^S!bvxhKT+-Ob<*z z(060-d$iXX#hw)%G@Z~EGiF2Pyhw~$V?bbwmi37qt3T*8xP0{=UXxIuoZ_LW#2 zkwW}e+V&G%*CPV>CJrR0`xCT?EJ>P1-9wRQK##ucPj&S@#oY8$NK5`_GKO0mQ`Pja z_J`Sv;`VVh|BVKFb<(1_wV);HvRjWsF1;BpB+`J%{Lr0SHoI<1B}oA~xiFi9KN5K! z5!OGm^nAkbUg5l;AGmO2V#dV(2ogJDrMZvReuwJkZqowbfjTl3OFK@qj_q9V*06Z0 zdUvvvbB6CYSzov?c(X5hetn6c!L=GNW99S5J}BP+X~uueR=a-DhY|@kp(&b{v;8{ev_!JGqctUox$165laWcUAe&$}H@=ysz$OC6o>hrI)mHVp<&Tx(A6vNrx@m}u!0 zwH>⪙<0I)1W_|6R&n%OJdX8-;P*pt`UpM zsD?Pg6k5`~!i)Q|mPBnNAL*X$9b3atC?Tk0xsZO4?HsTnIzG+zhL{?B~}cOyY2RH7&vnE2O9 z+?Q~l^Q`_?kExU(B%dWe+5Iq>2EU&UT{kr9jv~?KmJQX#VHSBkzMl&1{qaL=nW=>B zFA(B6w=rtZF;T@APQDhqI$qn`;@2_%PUAMJK3S;#VYfo?ic%4-eYgM02~ zXHRtv)?#Vc&|V(oq=6$@ydzmvl?_45siwt?5-2okHs{n$boQovb~dqgf(}UZ^ml z9_>iIk82(Gk2Sj7#)TOAc5C#QJ03CD7znMZP(S9>PHu&UWray$SDqa-^(u2!nAiIJ zV^4oTZy0vCZmr-PklYnW4nftp3R-#nIar+1&VT5Yhx02C#P}Hi5j#$-1z%>W!)o236WTmWsdO1yDs5>o6$LmRj^}9Y zS^9MK`B2X5_yzrk^H*S*S6Z8n@}XU!Ik(+(u<5>|KwPQU<|b=zP~rMJ?{PEX1+TV+ zXQA!4eCVfULrSx2*~E_TM&}lO{;HT>r8!bbFV>PCKP0UZW`13y#6P41EXuqStM`6K zK6Ll5@duj%6@~3j+)^5)5``#lMO≥N$*zy5)@ST&9{1^n?wA_Yh=!d1qu;*K*|# z;lxM7Iv3{ndxcs2{zuYaO{eNB%h@S#iVa;pjOIl+7jD}~RJW&_eo z+nM`k{9&<*0nqpi8aA3VGTP-D(kldOU!vY$x`o|n_i)z5P=k|nhYE>||KPk0?Yd4fWQApwU;3Q6 zQ^0O~MCNhQ`MC#|KvJIotm*lBGg6ykdj56kDqETuT`LT?u9?yLhVA)|(tZ3>d#N^PAzJOkJofs_(NFng2t= z;_`F2EX%5rSO~7NEjYtNT(^KmHqoE6@@(SxDjn^-1fq+ZQq*g50R0G5eKa!&H3-aeg}RORaMKU2uBRVrk_g+94Z@YCFYvTo+`G zL_Di94hC%WwAUU~@)8&)FI2;ZY^d{>&iLMOoy}ePpKV0xLej$AD-)Bnth`oM`VLNd z9P7GV6JM}ztBB%w%R!PyYeqg2yUK>19!2vw2KNVp)*f-18Nj0yT4(Ak#$!wGJiJ8? z6x>s>r`B!^c`$Q0*XXxaO6K_6&DhXb8vKoy8=uC^6UPL>Y4QqJf9D|GlKxPd()Gn7 zy`jW~&ea12*Qf}>VI{AVU&m*hBrg4)&RVgxuZT5DyVZ%F2y@9hju{lIPS1LD*?Ku;E1m-wd7^Nxq+jtv~BTv{GwI5&`mnY z;Z9<4p(F%muj7{%HuvusoAOesII1xq|8}8&)tBEMjl95cTT&p$5!C*oAez$mPfrk? zjSTc~*epAz<+eFzS?y{GeT8*}IP+WaT9%cF64a?o6xxY7HiW!69AXMiph ze&HP@#3Z&}?n?^;&giBk00}Y2->xsr_|<49^gj<_yGO!dyQHO{{ErEFDnTe64eLj6zw5BFTxx;j++JOF<-=!F1m}egKBqyD ziORX0G}FHb2_cy~fyylLYPrJqIn_M-8k3Z92lNboukC)YH@U`;m;pK}M1e$QXEgUT z*8&lR*Gzo_F;m2_{t@8H*fTTj^pnE7Q9=2*=Yfr)^7>hS7Vol8lJTV+(<}Gmwu;v_ z&N%H;yQ6EfyNLlE$KdsI>+6r%lLqfseu!ovOa|ic(YsG>IEE2CsvMKQ3(9csM)G@* zr{Tns%=-R@6eY-VeG9Q`aD3HUWkFP7S+C`}8AbBqMf2iqwu46=tK7cAb;T>3Q6V_| zs^9e$?1m%RZAyct8EJM9vU`$S$`zICQ3X+0(|ES7J5+oV_9kj%<*kFXrg+3uznez$ zup1z|rzSg&^L+*qtL5WqPcrko!87pQI8~KS^(r8WM0hMpCF+Ee(1%6(h8Oel%+-*VpT6=L zI!^~lx3Qxsp~K8FtUSAsZvz8rkJyU#n>ZFQRpz#{IIz%z8|FGh$`4C6y4y1^xYovi zhJatU_i|?+y>owfm3y<~f_4A>AaldzRkI))X;W>(`{q#>*2tfgU6E4xU2ddgnHjy^ZBINC+n5cpswyaE4_mcbOt1w5X zPFj7_mMneGbQ9)#7=tGItvz;b#YA^E=-p^RAuXxnlYCZwd|9$V1H1Mgl0yJxn7aFO zMBG&Wz}#s^x>>K>avhjQQqc3Sdf-;7Oz=Djc`ZB>L4*8ZtLFX!_%IWFVb)Run?(n` z_s^YdNhdSKYq)LOZt^Vup-p+W*`~DvRg3lIletj_$$N6%S^K((=pwA08*eOS9l}y* zv2-DS!K8uMZbEOksoye*A7`xHzZgHNP(dA6+B-0}N}Apv%;2Lq{u+==f{usHb>gc zI2cv~p)>p-X)TT_Qk2WSZTdRWvxGa^xtnb{z!z2+D4G9Rm!0?0Z`_k1kEMQfB_T*u$Pc)0a`NR&3lL1 z?D`kkT`QOaS!Zs)?|4U9O&-JZp5lARzf@>6ScUYqWUwEvqEhzq&VYIG!cJUHQHd{@ z-zKmiwJ%_61me=)`4c8hqM=cv0merVN`vGCF$vFpC!@Du%K!VyKxy06!^f+{oWWX! z1K|e1WA`qN1>>l~zi*j1M{tI|NgN3J&hNFn8(K|lQk*z9?yFF>E~y~8$Vggmx9T7f zJl&f_=b|mgI5|3Dz52uRi@Mdul%BVQ!*0dQtu85jH8DrgWQk$($JO#HvX+FT9wvkr z`=VE|9l|vkqbFN%=1g8tNI5;zJtV3227lml31Gvra{J^8F@rw zG!@RXMuYArZ(P#n61Aq8s2VL6$|8%#nz4i??$rSB@z@TeWZHLsm~0;)LUD|vJkXRn z9%YgrKo<^h&Vf=V@`YZ~yP~}I0_U~5j>8kYiC?SdHAhoihZzTQ&kt|}6qP>(oj zJaln*Idh=XlFFy1#n?bRYuf7FMyH<4fLjvPR1@=(O4D5QiLi3P~!)V_q#WkwM}UwwcWF zb;T}Q?yE!Nx01k-+$5N;YGLgJJya=?qq%uQ6QnR5NLt@okk&n=z-A)H+PNfn`f(Ba zuc!5*6M?pnYXA>HJQmauNBY>Z>*Zk(seRa>@;7YN zWG)@+jgMkXNmaA>=%FoN1 z)8dO4(>`T+mM}db-v(AR6Awml;6-ktVG3|be!kW)kfih-FOY*U3PxzUdYNVOwKE1r zh{fq#E555HQ61sF3YlKhr!D2`*;>YzT%jR)U%455!CQB3& z!^Tt8J0j8@p5s4$6VRKxgVht^L-RCW9qD>j=Z=hh1kU>VUo4ur-Lbenac4AO`)T6I zyT`(D_$?Ocyt1gWRHv!b=G}y0A|>qp1>>suC2W@|{GvBl@)s%SRy* zoOnb3sfI_%GgZ@5u!*ogL4a|ToZt}{=Fj_nz_cd=gI}Ac{M>I)$8$56nd1Fw%!#F+ z!mtik|i*A76`8Y?jJ{KDr%c2etx9>0V7Drr|ST$Kap%D%FwR)5KHwKVs<~ePC zV9t!MA>Po9+m*agsOo&uz^MB?>7c~RNu&9UMmg|kV)bZwAp`rfgbnt}9#~5U{xKlV z-C;Q~2X+gWIQH?!JnQBMFCFQH6$z$;IiVuog2xkJo_28>A@^uyYaljF;o9M?SoiLs z^2a=37g{G^;=CpWijFz1;Qc_^k3kMiY+3#)7cNbPx$Q$GcaIzWWLjqc_xzKWp=-?| zc8(u8?dXx+8`MszG%>>OO`*gBM%jPN>G~&&1iHKa{*O6OeY0)qVRz(-#o%|59pfN0 zry-(PW>qajrdjp}s>e-UCHqQ~$J^ZzYreUm!K>Lj zG$a>D7$zRrbr9im^5iA7 zW*%^$Y&A4o5gZ96ZglH(hk4Vtib2Ioc!3^5QEE{HUge?Z6dzCRKpbF355yKP7;yefe$Hthb9F59$`|~9v#V2b7*27Me+f0BR8mn25*E( zYq0KdoWQ230NBIhU0&cw6pRMF93^F2qhvV>#F4iHHZZK9-6D42giP8sDGcJ>HxZ~1 zZwc6&=aY*!=!QGJcO%yZj}QltL1&Yw2Iq;}$cfr3oVIi`X|y-2W7Jn(iJ5@7xuv++8qehpSWTC8dhfJ65FB%y&)r;7K#g$aXtTH`905!tDdpnzY zl~@IlNW^%cvQhVpiAJ~XcxJW;Bh8E>6~`Vlp%P^UaYBg#g96h7RpVdw4=I*$xrV2{ z&B(O1^h{-Dx2;guaEB)_-~TzKrUl7yXOmS!r5)9UH+r>XZ+dA*Sz$FaAg9wMX;RG4 z>SolV_0q3xt9bY-Y?_F-q=)p^5tFu{{9w3+xp>^fri-;%*}!;?Ultgq;G`%f`PLVjuxh zhhrs|dL%O)ro6V)H#=wSa zyENye+);^akH0c6I0t6Ah9wR(gC9W$;&R3lYoE|z@VN#a zeDT3IBsT`9Q?xcMWQ83YI(qhewWDI{ zFWpPt%LB_ttsJX`ye zckws&#W~nym85KsPrlH4alkWdl=f)!6WB8kzd9(Aa8`ry!z@P3XXG*j_?3x_3Oh|% z$#4hP9XOVMg)THL3NOpJPK;PFohR|!!s&C)x)OpCv!`1v+Z7@``9WdS@- z?9~%C5J%2l*F47$F3_!go4+aO|UKLeE5haGM$EkGM*TbH;Qj~U!4dU4xsSs z{Id%-3h%rtH?p}hQVNNUQcuzRoIDB=NH+X^pRu<~2_Q0P$T3$-`=N|Q9=XK5rhwh0bcn?apZ0lLdF|TlvLH3h2;!+5? zjIQe;^S9)S`LElutQfYa{2TLQxQ|PPWWIs?TRY_sLoQ+uZPUD1^3B##>g?93*h^l; zk9sBJCP>{l0~(;1UcDcAq{qWaj>dl(!-I)!yD^{inY0}B#m$Io>hq1o zk%4b@sWDJ&cU=ts8EDKVUhQc>3`#tzxRF%;)AjYDf#TMX@g-EsV2J3j!FdmR0?F*8m#Vq~p~XfJlt@t{ZDsS{9P5=<{f12tXs(^YOO z8%Gm&(HJI<=KIExw1hrYIdEKG^9ag@qL4E*tj|4os)l*r9TVRO=+xD~R%sh`W49Or z`UB)!(h0&W^sEvXJl$thn|GmAln(Wnuae$Q%FlNZ-xEi2YLrMP4v!o=`}_tTW4L@L zJ9aC;ssNmwn&BYgh$ACkXTZfaDwKMZlKw*@v#WhBxo!(G6Awb&S4p;_oEHm948L7^ zdAQQxBY0Ylq>_Z3aTUfRJjVL%uR;5#NpdqsQOB#sO!=sWnG<;=qtIaelMBEqOZ*@m z+Uz4t@V;yEirdodTKD}YI$Xcj|C9MuX{O0_XWO_l+Px90 z1rhqh+R6B#!%cbJ1DRz1@kXfmR`?e!H4h4Zz1ny{aFKUtU=DUJe@$j8a1TzDinnBsnmmmncvlh%a`Pytc(!$G>`oQ)c-&)WnbI3=_7GK_+e z7VdggK|S*_lkAE3W9`Y;D$VBKKuSw0NsyKz`)n_x*eN+Qf2{E~{@CSJ7o%~V7jkIV zRKDv4gl8!jRT81689dyS52>%nEo8xM{vk_PDx}vJ>XX#Gep0o+26n8!%bO2y>lXY# zo2OvqZAy!b|1x{z1^hOu`IKk3w1M->nBZdF10&IN*_E!B2u0m%H62!4_ba=?*#wS` zp9m(I)*b4KE8S^CXF4gc(U!fFBj>9`#WdmWUg$&c)8TB~+8Vrq=HHrYznQ&a=xM2FzJu z%*2g-Q%8|k3N5~M2RLPQc`+?kK|!^*d=7aFD-GodUmJM4OnvLX`WZkU^(?q5wG*2L zjWACbtTq{sHeNvBJ~Cpqo!9~5EvS`yUc}#0!&F@UNuTRV{yk|y{RqqRpC$qun6D3k zFqyQit!)Ujy`sqzj&~d*JGhkRu0Hd5+j|iuGg2T|-lqH(a(|wd%ENm`MY67nuS4x@ zP_U)8@U8>OOtdGlBYQv?RT+`r!v4V4!UM{HtF;H4w{Z2*L-SZEjRAUvyP6j>+|fiJ zLVlkX?fi-pbH*AP(&`ailX* zscn710+!CcGBMyST_-ED&=U@>YdQU%bW^RDlG~<}Jxq$Ve$oB?H(@9eC5NE~c+#iE zH#7ZE$F_MnlpPT+ZZRrAb*&f#Bv{t^+Ammc?OAluWd#eR`1sp}TQyDnFIkbwtz3Pl z)Hg}G6snF?h?V*TqUq4wVu`;u)`VAmzljAW6jEe|d1;g!FK*QynA)R6fhqv3>qj*4 z>~+9|c?A`B{0?$?KR$df|WcHd%U2C4q{K|=rD^vYzrJ1C8$N2d(W0*y{@5T|1|k}M3`*_%O8r*xjpjzz!UF45{9X`w z6S;nVwOqxoiys)s=aINigPwd7W{y`Y<4IP*-@M^fRARWl>BG~2yi|H*IJR*yhg^hS z#?$Uj_iUpxT$xI!DX-VL=$r7Hr{eK#dGc9*MH#^sZnZ4&i5l^IANnQnNts!t^3To( z(w)r}hJx-C+7F(3u(zz?mK|5RLy~oWlR2pOgf{tf%VpijZfg%A#+OH`Pl4(FCVXh( zvrj=ya`xnEPP$E|%gNRtW1r990+G*69nsips=LVRc+DsBPZw0>O}Qv!n!Z~vxoXEu z$$H?nPTMIizcAbzg$`G%4o_P z%}9HU@iR+9#kT%)RlHm=@-F63Vim8|ie5sj>+~?)p*3TP;Hj9@x+VE}Pq4Xm!IJ;U z7Bm9c;W<5hw|J`7hWxKTB;n`*VDy33bQ8`_#7$;qH#W8^?Yy{-fkISdWF2z7EyKkv z(|-V|Gr@_K5Z|Jh(fsc(xX1;j?Gc%8`l=>f-UgkbGmHWcr){bH(-Lpbr(I06kfg3J z_NpdUpN(0dgdhW*feNqn>%|F__J?cQ51&7sE09i*B)gDNEbrsrqs!c&ax2fMp#CiL zOn19nVb+FL*OpzB;V>-}AwY_FMkqV0bSvvMTcHXza@0_9L$+u8(9C2n_3%SzkW+De z$yVr9{Q7k*zKH$q^ucN;VlU-Xea2(^+sl+kE+?|WXIti@IH?=j-e)-OwLs9IZoJ0tS` zp1YrdZ{u0ny_-oH=j~EloK}dfURXuhQ>O&r+%Y}r2r5gidU1K?-{t#cfua&(tD!!7_;wPhDWth`#`_iaDAUOI_wBy>)*U7?TI{N1; zB@v3Qre?sy-5h`KZr{E`bN0GcnJe)tgi(U?{Sn)quY*aSzW9hP5bchdAyh3WVvJnf z<*s4vW+dGkeFulau2!tp^_YBd{HiW1H*pC)Pc43Gwz|}tDv&K=?6(HLPH?2`ET+BB z!G7!A$yIe?M%G2W%0KHNFZ(YXZp0HQhmTay8yK3rZq*HP)qz>CvRJSre|4Yg4YZn%<7 zO}B=cPS~vUQANRt^`kKp=$m)>%6xuMgv|X}b&sn4tmu@vbF$na7=+9* zV}uc3-4W1cz*bEL?_|M%{{~;NvzuwZh6Y9g{<^iOUcK*S57yGb>joKA$t{}d{JBJ< z?>hcLFN^s~3Yxq?T$mi}wz;DC?NCoQ|9RUoy;hkTW|-0ZfIglMxCD zC`^}X#sq6;Coj7BTM{RddK|w7`Sa%#n`oUz`_*!9S&a-X`5dmeOATX&n(e$))|>cz z#D6WWp-NA{#J23@T1%))YY{l@5oZq1mdLEsrU+EP-Hg zkKt{m+>5-o$L~EE-2PEoCEK#@K8FNitb6`ej~40bezmv8W#VoxrJZLqmPzea-{Kji zOdP8emxA}D=J$K`ddj5I%>Us+_|#40xjIZ)RQ8)=kJHURgV%U{r+IIujYW_1Ta{{K z&#dhwlO6>>rA$t0AX!aLyR&6ITFqL0JLi}{_%V5LWxcqk?YXO~u4SgUQPQkmEx$w( z5XyUFMP_#ORmIq#;L>B+vINjlfhaj2V1$ykGn$s~J@{4MUSTkkm{RKM^SfC~WWd;_ z;^xUDDeS~X##C3(>aX|2nH9#ex(J2M=^ujQTyxOH8yNs9N?KlJ$PEr%4vgYPTI)5_ za#9V9O^EGB>(cr;ZjzahMoJZk(~4IyiJM-)BBU@%T^*ztIHzt=*r7$Kn=W*0>KNB@ z3ka$}EA36`{;fmna?1h7b^+`OLC2l#SxLr^e%i8UJ^H=A9BVbvD$+XmXZ8#>aLyx9 z!E^O4w2M}AG zO6dKCks!L-|F(nUefPi|@lOWV9Lbr~Y&NS;+MJBns*+htD2ShN&mZvZd%od3`=YI& z=o(pkl12m42dQ3|aolrvTbR65NsI6ol3$!elnf8rzVyz>m#$7Uhx=J(E_P$9*ygVS zMH&=|aCl03*6xkbHTFhPxsL(4Sbt)Jnco3~Sd7kL-mTf_#!r@aITUU5&IP#NDoUcH z(#!+qeUtdq*RopM#>ueIp36gnmxkGw>s&_Vy?#0$^xWCxK=j%+zje;MT(U2y%Bys? zlebeC^#0RZ`(u_(^34r4CxSSb<%@=n=uFXDB2QbT&)2s4m3`>{{f0ly(i{tBbXD@(IDLe`n7S*L%OVIoQz=;MmtD&?+quY;G{OPZ$sx z_8aA(x)L|2Ly(9_yOOh#&zdZ&Q?ao*+u^tD!6p}$t-w}iW~NZPj&p?8onJ zuUw?%kyx&HlUS^~+MHZ|Nd8*D`vBLrqN(6svinB~UPFx*=e!L9?$b zUm%zeN$_c#@>L)HGi;eTc9JF@ zOLEGRnSJV)IIU90(4Ap0B3^!k0n(Lpq3wZfE`f#UJV#0!-VM!yt}mhq7iFg{oZs)& z%i8-3y=T6iBgs8V06T4|xsJ6qzf9Vm{Hv!FrlE&B{#rF8uIaUnHr$t0;_PEhaM$d$;n5DYZae!k?EUNf||#rD9`qUem%VG>z1#Ygz5V7ctMsfU2p49CueAk$*DiUEbX$LIf=}7~86n z7#<;gAR8jhOM_C$V0k;U-GaY{CSk~?pinO9yZKMQWv@FKewY8Gi_sO)od#bHG^tk=QH z{1SZxkM^=^fneSmDPu46e!DrfD$!aA>&9IqMq;U zn?+d?-5XWkr3qk-IzwepaH?moCjplGca?8tb&L#61cQGzeu)fM{A5yA^tC@=={pb? ztmk7(P8cY+{@v~go{C2^LEQ*e=zHIomJX$n3lH83!I1%8-EKKNlm`}Ux^dA8w} z+4uaz?Ji$1=9;s}=5RgaMZ~>wftgxczICNGa}?1EsevEXky%t)@~DcQeZ$zG$PaZ(&*2Q-byQ&C}ljJDCuAZhnUu!`02M z)F#^I_`R2P{>$6N<8OpF2^(0(sb@oql-1x$Ax-HHOuJ52 KPoKldE9@*kmH&Kb z{IKGf!+49>$=weXk<}X_u-y)lM4SmAeeH8l@R_$WAEKf6n;7aM#Zp$}SA#?yVc|pG zI2Nn9qX#DVg zmQ!f(cVsmL0>{S_l-$yt8%zM^<_Fd_S;)(JQPcp-A|r zI92rq1Q@nug=;oB0CFRA(-$zeUFlo?zT91x6+Pk#>Mw3ogigJH{sRFU-WzV(``@yV zjAea0*?xlIDplN?BgMpv1gA=Wm;MPhw6o+FxS^=r(OHXv1_sfDFG`RxMM#+nE>z^- zhhkFw_q+LP#93r1I*fR^^&XHkbz`GuNPTUT?34ay?68j#U;e7MK175K>Tr&{f(3B&_x0eOHPKmeFAOc9UTALvwfzH{eCu<{6L@W(WhrchWWsf&aj8;4rY1EIniv0Z)JlKy@H9nZZCyAO~Ox zoq--e0GKgM5s(I0fHj~AcmX0XGJpoy01SFVb9fFn$Z}XhmXH7cuaZtJ@~@UN|Fu(Y zG0cxx&m0I9V$~D740{Vrdh!@a#^HJLIRAB{|L=bNot!1NaCC6}*Dt)!81*}kNbU6- z$}zRJ%ECRJrL8of}2nWuP~ zde$({qA9OwFSYB4b7==++fA71o)LQLa5gIFoUe>ZuvEm^X?|r^Q4uSw-@2wdPJfQL zC>7hLl60NwVU=-s$&;-kBEv=$v!|+t-$&=WF`FDQ<{V?mE#@Vt{V+Kq`0!ZvuJH9c z-E}&PKSko2TOX*+-ucd~6Dw#R(o>dbEOD6mePDC{Ma%R8l=(?fD_wkOEL>g-t>{f2 zFv~bltj!uw`TTeYZ^fnV7F?R0>(S55Le56|mhasHx8g*MtoEes^o>-#dnGFCtg?+- z8Sg#mLceEM>@JrZ8%5ZOOB(-Jc@c*@Lc4cDM6cPJ`l0;sty05_U$f)uP&2W!iErxQ zVTY_>&Wrj76qvm9jdKq@$w~9SdvSGfK_yMU*T%-^SrK()(!Jlm9Gq>!vFDzazJ7L_ z(f#Z7H`ezXvAe#+C;W;R7Cy`OWJB(=pXj3P$B1sJc09vtC7uZO%*7S%RHa{(^ai87 zCvS1mGjA<5Fk;s)#&rW$U>)MN70Q@=mv?7PJD zTjyuEe`Qb%QAbygc8Ya7X1FWgn;$#6>*pEH2;ISQRsUW-`b~Y02^67@h@?db8|dD6 z#B?-9NFNA%gJzV(4H*IeW)T+r&1YfsCY}OV@KH7LZM;9usHS~yi3IFSMJrtWfGZ-e zHIAc@)(nofO7md)7}|YrWQ^piPXPN;xDg#Rx}1^FfpGFku=9|unTkGwP(eW+QXIN^ zU2HOm7L9(yR0l=Knj1(BIAAb2oTPAFh~vC4#yb-Q99KrO&WzIxf`tMhwn158EvR$C^Jozh)=0QiNEui0_rPd84@|Yd1zz29P7t(nwb!O@=5h+ zJLI8@p+7h~tVHARVllPSTj5tH)IldAl^RPIu)80qq6Or)i72#ae60-6Yit-8k=?;6L$HH{P=cc*esOes$7%(3I$@SHYF*0-=lt{!H z2CV6tks4_sSmo?5?2{}9BWYzrYQXGM0v~X-DpjX1iYk1ExR*c5en#g|`6bh>G89uL z*FRo8e_izMc`1tpmDmG}V>E=dYD_hq_giDnW|i4nE*CVZ*vRuI)k{LL_pmT?Q9srz zTtMqk$%Iv96lcj&TLJlNSkhY)R(wYqDZwC5SR}SgZk+1o#N>U>P2a!GGmui2S&rr* zT24N35~kaj@GkTBg7JBd#~aTacM!3V`pGsMo0qGgz|@k~A>(%TF|CQ=E8;4()A)Ld zA`j|{2D_B=rbY^kf@bVs-yyo*k!oVFuEe9$1caE_ts<}+idw>yIN%eT0^u;iB^C;eV_ix%*gQ;c*DFOSr$l`Q5#@gCh*X;|u;AXL}_& zXL>&)re<4#o4{f66)VA@h{oB>YZm!CWkhwU_QJ!7#niC}47KO0v-imaTEpEkp0e_p za^2Z}E;_vNEJpK>wTt!fjs7}Y#$wflbg%~zuvsEHor(@{k{LH#q*o(cbj*Z@x#*#w z2ue?sgZ;Sbg=Ba$G9;nBV4?ldD|cMsf~~%w!ajpi9BwYjBH)_jdS363OS4VtoX$xB zItwS?!{M(;eQbC7q^(f5?ZUCaLGY4w*zm5^#0mxr44B)GZFr&AFlDt?tYV?tFLt|`MI;}f7p9L-~ouWMU;^<_jK})*! zGe#Yc?fs1Fm9KP<5lcF*GsJt0dX;Am=GAZ;CNrk;;UZivUiI2W>e zntq?N9v7m=r{lW8;Md_!iy4fY3*H9K)x}&HlV9U%SGkNgifevA+El^Ut|}q5c`P#7 zX8k_Z`eijY&IHZ{8W$4&b8Yg*s?mzV4yY}It|Q?tz5n9tg3NA`>PrX}`OU@AF?&1d zsXxAWQq+cn-$-0qUpCo!Qq?l-7uD>`k{Xn`+>H%|fVr|v5}nyqZQvu3I-;6HLmXDZ zWzTp%_Ot?V40Vks4d!Rspd=GtwzDsmQ_k8-qgCME2^6;>aUIGL^X-3_$|VV^RA8#6evjGpekcFc{T|@T&(206&n!3F zw_i7k%r5)2Vw+d>JAB1Xl!4mAn4G;Ag18sTh{Mehk@*TpLaV=(qVKGdl@VUicFadC z3SI~QH&XLX?KzhOZ?J9dM0-6LiK+G_wwxy1MyrnU(AoN)lI6BS(}>k#+x(dRfh-4X zI#uqlTumWMqGE9jbavd|aPbLnV6VdQu2?U=IDI8ZuHwef9^VwzXk~My^7@ZrY3K6{ zH3$xj7Aza9AnR#HGxi7RQGtHw=Q zaJX|1$=~-morX#oECP=K3))wFtH%&;)=;z zt^jYM&!O$3mVPFJ-(6C5bal-PrgCUtwRJ}KZwunHETq#Tn^s<(ty~{>PfpD`@OEP! znziA{A5$ec24kro8=MT?w-+fN8wT|wvs=;hfaJS0acpw3OBlm7JD?ywIffGzF# z%bVr8HdPbZBaAix=_P}x9-m;Hrx);7Nl{5n?r|l$sceVdwQDRwR0e(8_%?Nz-^`N7 z-B#PzIxDwpt+IgEP_|g3+#zg5EBbTeV1uyrgtgIRGrVn%+lC?#H%X)U!u$+6wv?^1 zL#m^Wy~BE2Q1F=QS1%pt=r;GtTaHlJei#pl>P#%HfO${TYrD9|xb(_3i$;hmPEUbB zTlF{VXv4;83$7?JZ22T8yrIXMazW^i)vn6b>u4NlL>08m2>yA>-2ZwthOxYd=fk>j(=``d=M+TR zlu!jy`%3|XImr)hEgIk1)8-K#{UuzznEPug^sKP&rJMJ~Q#R?{Y;s))wn4E|9w=2% z>@AJ;PDd4U#M^3e7VV*GgoX|yJYA)`)5{_Xc^R*zyellTC!Jfo)T16&eA!Sz*!P6~=)e#`|X**)|* zHleWPvqO84jxWkC_PH7zM_8x@PK=H#$|1I`DV;!Aa66&9sN_Pd<;hF1xbl4~@@~P; zw9XZ*a{wz6tkaT^;EW%Ar2t1-q3Tih^H2H~)w>1IRmbfr72_#=&B>0g$NEDI6;}=O zDQT<&r{{+?VTcO!7(*fq(1+!QZ0VQ(^|kwDv59W(-g9>T7@9@>heOYV_F#ZkRo@q! z-DWV@Ymu#KX8vC%CneZ&J2ejb)gg>Ybj8W!ayWj3Kiz|P3H^diz@ z^M|Y)gLYNt$fxpi&!SY~mYGn%ad3J6&c4&dz z11cQ=084Pw|z+Ysl@7);HaXch~0sFFGF>N*OOCbI1+U86dFb-h{9 zUH4>;qJD)^#JW|}D`)d!&T|Ao+CUe-ps7=qEhvTU7ePsHU9BX+(}Lb5OF^@_{sJR37V-6?v45`;S-G)z!t7?VQ@; z_3?RhGhK@VSZiP$bGC;`v+ye~A-dMO7EA+dH(s|b>olHdbzJ+`gV`kRN!Gux&Ez;F z(12ar)D+J6=k&{>Awt{NiLHZ&+9xP$l&pPyr;zWpU2NhoRk1AoO>eE4dYyEVXZ%9$ zXwujuI#XqBJoq)GY(cfadVh#yT2;tJ93y|8H)e7L3(^1!{kI_SK5M|%%V~HGOW1Z;5r3jC=DP@0dgWs@$*SMd2QEQvUboD*MB&WBS@4fD zzfnE<*SAE~AIh!YIdCDv4$19(AKX&~$A)K5{e8NB92o00=VEO+it= z7b^`#E8viNw|*IszNIcy;071Ae7HNikQCrPp!elTu&|Lku~EYHF#wn1FIM4~SZBxk4V+q)#xD!fKa-K zM8^(ObqRp)Otdy8H4Qo@e_@6qo;CTIq5T3xhB_Th+8b!@7Yn_Uh}~ z@LO--=5@%hB}BoNeu@&l^|jAQ-;mt6x4^YX2!0TlxWou~g$D)Y^5~=81Xzs)xSrAk zl_vCnO&WbXQ*F1#f+TnMOxn|ST;lgQ?SlqvHu#B(LJ;SAkcTH|sTB*LoTc*bnO|}H zyU({#S0ET@=7pA_{jvjP;4i2?d8Guqk%+?WQqhz@h0~r9#GsMBP5~OE#gT{~a5npd zYVrs+a{P?Kn-QCBkFuWOhJ`~A00*;oqyvz>M7~R=KOv#9WxI)w2(MF4^W9s=#i)_! z=??S{O4WaiUG{=)=UzdBLrpsQ${_$q?Li=6(icXTQ*}_So1qp7$?k?_4&jq8C<`^G zjf4Y*FvENV=*8dNFZ*2c-XdI9HpPDs$*axsU7cFu=*!_$_+KZo8Q{O}t#1L#XM;Bk zrqUudo-ewWNHUXwQMfS#TDO;@F{Ff*ZN_%L(PAhJG~gws`w?O#SCRV3 z;9!Q-`p{!-;QM19%2L#fLCTD9L98;9NQ)`ejP_AGO#%XUQ-61F?7<0FZ)xdx@LQcb z7s4<}=9D%od@vj7*Xg^1UlF(0kfOGbHeq^#wh@ddc&L+tD-bWI(q%}gf&yCv_prI| zmwLVwKN*cc)rXrFJZ&^^3#y} zkB$p1kPJZ~d zF}C61{qDXNOm!@My$-L=ZumAc%lPu?kmc;|0#vnmv?_VK3Aaw>RobJgQ*XHOoO`!~ z5oQZ~I$5*mobr`j3RBe^MS9-wQz%WrsAo9^s$!1AX9QvF0M~KCM4nJ9j)bs|)Oh=E z64fTMC=9Mv*Y%=;(i;vQS!3h+v*5jD2w{TJXR(KhxCaiJ;R|UbU9t&Dve|xRD4BE& zYLTUWHfAzsYLTx8>G94PP4w17Y%FKaCu)j@D>C@lWCiCab=HQ9GK3yh4ajp@SdJs? z6&T%^=2K}p72o5mN2TbR^@IAQ8P?_Mv~7ikIsHSTh=(s+h>oA%^B;)Cdm^>zRxKvNq>TOeL@Hx7 zO8|k}g^ty*So?P-nOJJBg1g^02=wX85U);9zVLVu4nqm+a>h21j&0VYU%a9OJ6uCo zvv{OCSqR-4b3XHLG#mOilZ9BS?N*tuy-w`Lf?eLZ`m456u%~TugG$e+W+TfS!{EL4 z>mIPrw+ijgFzttu>|>?{fSQQ!nks1<&9*JGpYo`Fx0#{NCfSd3eMwHZbLFDl-Gg#% zp4512lI~`*+ojGsHD5xBEJtLBU(`KsE2qrcM_0VLxkP=xy}Y>PoJ#?uZg_zuzuM!| zieIp5bS$DR9z|4NC2e~mfRP&2b2S8VJWby$6DfV@Y~uvd^m!*jE_8{r!RIcQ3zfXD z_pn1Qd*VGmBb51Q+QA9rtA@(Ht@_npxfOc1zI%1EL$X)T%Pw^Re>yB37h^lTAM!E1 zx8vN889j|bJ;Ed;F(U9P$0@LKxBxO_97T)b?vXf{tL7PnEeZ^=26>W#OA*S=Qy!32 zTkfii9e=H-_jzX>l#D@Al^;o1*J!j{czQ3e{7iIf^fI)m%T|57J@MA@VuZZ^^SeG( z{^I7uTit0L`0RE+aJZ01r);_yvcF807t9P`&oxm{ZX>DMm$IBcEr+3?&U||Iio((Iq9i03 z5%;Y_#7CJ?Q5#(lRXxg}R?)g+mZ&6e5z|umtd*9Vi!Dw_r;a=4j15_J3KG-G5@vTL z%W4kk2!A6z04vKd3E;U82$8a`uu4e`*T3EIJipf}swwy}|3wS(KAbTW#O$?SxVBY7 zY^Gp&qy6X3!``tRM|pN%9-4}jMd+HWr9{*8ue`>7D5`U$+?&^=MQ$T$-AQ@Ya32S1 z#`+*d?9`ry$zKCp790JZVis*7m=sp*oiYacI*8k2x6#%%2dplW7`+yQ9R8V#At1LW6T*u3t@nn z-=5it0{SEewRS81at*qSRcDqbvQBR?6-WLo2lcoTzNs(Z>b~?%s*JAnu<5Crqs6U) zTPQK+M$+%-Cw-GpHIkAxUdN|ql*IIm!nLHY4rYT5@ZH0XtgZZOIMetURcC>Xp4;R@ znvaNvb^+$M+Y_wp<2TNuyFjsnRu1hvs7vaVx}gRh(wJ=I)%{PYRQ9#|Dw~^XQZ&v3 z&LF^JVoX({$tZQG2)$)!P)cJpBi2Agpt5#97g&X-MqjBR#myY%34Fep51U{B20IBk zIAxTI8A9RQ2y~;fLrqc6Jmh6XPyEu!gQ?};mkkP?x8=0|S(*b>eVR;snhuO77rC%j zR#joo%aM0(c@kzr9-7xsU#2_Z&tInBov%&=Uw9fW1z%Qs%Kz6MfuRFdo0phC1X9Y) z{yI7yE$kd_Y6BOVs)cBsuuNpB9C!EH<6PKiwq+J`ll`wd;&y?8Hn0X?zp*V{$-(<| zDkWLZQ%E~>qU&x^FYcheXoYEi|Gb-CoqW;O2qQe~lDP$=J*-B1;fE6CFFZylw8pr2 zukA?z=X4b%t@q&2_=&Q2=K~*wW;=pEi@VHDhc}7mGQ2sPHe?R#zk1H7JnXR9qR`4X z_vmwi=dOcOSei)vMk#|y)K8BBNi-ZMQ$(fht33x{*ie(aQA6w`;7WdNGLASb?r)B8 zo4N5QirQtHB7HBJ@fX!b!Nz`A<57yJ1jj24n}{5dT7XHLV~K8v0Bn~jx%GnJcqkQa z9$r$pv7$?YZ>Y_>lo{9kSSNe&S%VOFN%yTc#z+2P3G9RgjOzkEUj{s%TE^2;^X&y; z3UR0&k?#=97wC$YXTy@$=2)ezcsTmita7=Cz4S|jWnez+>i%9&jhH~jtk?Qs0eR7D zDsaCx-0IK3gzA99<}k#7?T$gJuJ{GX=9#d~q6gXYLjxnZbPAuD$_Th(h=`ZjTyx!5 z*Xs`MCw;D`$tReh@cP&D{uCL}seRia)w^bv7l+?xxA)Da4_YtmpL$mH@w&hq`p(Ym zVyQAVBx3A>?r8ZD*^^%tzOUhdiv2T?2!5*ixi|3v{AVcn`wbWf84d^t{4>blzh`s)!>IX8Dy{!# zAQ_n;ZJY5OA^7qQ6>|TC`M|K=5;(T3Od4L9=v;(b1#VoSSdVxW5bvYAO7lbe-uLqj z=AoA<7hXJXyNmS|dcuJM3gPnS{7zmRGrBZlCr($M_HQ}w(f#9o>YT=kV7NejTnsd| z(rEw;>UIuTqLiaF_$i;FqWp3|3JE}5G23j-&9>#YVqjO~G3DUlt(&6^ivn}X*`vo< zKC*@?;}U$?y^^-RS_iWOv*n(FpS9!?ugscMdO0_J9d6Q1P6tfT}x`ni!IsOPag zN};QkY{P|X8%w^SJZam;fv0t~`zYAu;tHFy{QbUvl}KL3#U^vvxK|q%LQYC-nP**= zY`CKJPFuDXiM<`}?*-~7h{T>%?fKZdi`(v1taI_WgWrSn)zlV0&2v4C8WX&(tmOo+ z+K=p_OpRzlzS^+WzsYTVgL~-D@i#3>aXsbTirYoQ9`H8SmY;x0c+g-#K@^H34y6z` zN70^N$|!e0TFW=H>@qY0e@y7_?2Nq`)v$AsJ_5LFWBvX zoOECQ^rPy%+#vx)+F@2}hm%~?KaQB_A~`1$#MX(d@pS4l*&HH}LNw7N?^ZR8_;^&L z{-4&P`85adC-F1VXME#-Z#{pem6gn#tPO1R&8-~i0RJlfp=VUOJ}S=X);y3C@(Hyv z9sjtalCaHXJrgDD^q- z!73F#m?^YmxmXB&r0N1J;))cOv|VU7nVBbpA`rruW12-a)y9+6nGL5omH-^Mww!3{ z7?>|1@iPHcXgAOe`PrbMF9;AKFr-8Pwz%vW5BZb(fgfHo&zmYY230m?fFxvE+p8QZfdXJ#bI51>%AXEsWrK(1s4*!UMx$jeg_0f zBmvYij=$D}Pgw-5R<}<0O$Q2!W~zyoarZ?Mv+Glxbvg>$YCVXlP9ewN=c4Td)~)!F zce#08sRW0|i`qe9VO&o*LTJt*C%9cG%(ORF0!w=&Qu-sU)8aFE)~Hkz3_KC=RIF{Q zLzv2+C}8B@+1TCcWTC7e`&AP%f!Y7Oo`Ec*ISNMLl)jb~`bi z{H-@5UqdHfze`wCUY5kZx!hdtJGritS&C`Yd%&^)O`C{`zlI=g;;LF39wN6Q8HIG6 zr(hhXvc(gZ-)BV4ElL6h?@$(~Pu8@N@j#O(E#0<5w~--G=d3TW;FK%zO$Nx;&0~5G zYGvhb##ZjS7~JTuS=?v-# zR{R@q;@_|;oj0fny&Mm-Cgq*84qdG0E0s%;1#6Gmke0Bp9t!=wh@Sz<>|##kmX@nw zr%7JH1=3%ijU{Aht$tN%(ONLr(p9{biK8PMz_gIu7U4AUhT`7(+_KN{G&{~!IVYdJ zo)=nV`Fw@rve!gUbMb6gSG2D)*IS+Dq+(y{K^u8l5H|nTK3Zsn=i@lP`vp6O`D*|> zG)xd6?g#Agt?>&Cb&$pJhH$f07k07E0DCRyP2LXm-HbsC$X3_P6iUsd1V&6X*=|M9 za}T000a>rsK*FQ@ra=zs6XX5((Rgz|BBGL#04#QCV*{fo+M5O1h^28m`mV^=TaW-M z^|<{)1r}wZDKF0>1@{r1j*^5fr%7EZ!oB{X6=1LLgTOUwr`{z^4nVavo?lqS%{p2y z-NOmU{j-Qxh1TdAzfvbwme+4F!a6Q*BE>Oho;zfV)7rXZ?;z8QH_W%77`Q;QVyoMhEb5URm2Qe9?)cZ*Z~Npoh*sr zHJRtsU5mT+S#ch=qF@ztSZ%^?`Y3i__gXLF(^1kUX(ffidZY#^e?BX6DyY|Is<>e@tR*wM@2*%4Pkg9>aDxw&Xw4@+#syLwQ=DJ*v>+j z=b*|z4@p`Lt=?t;d$wFL=PusLrd!@a5kU_^8iJWx8`!6U)fbv$$#U=VDal8JP~jTJ z3KSp=%v{#8=dfrt>Y#5;zqpMVo(?!Xuz^Q&J5n|8*$=zy{WpgbQ@3Fh;9kqQ;U}xo&eg+@cUI&R!BO^RM ziwPIHT6eXuUi8wae9Bg(;2q+{a3WQ}`5D8-?1AfMwmPr*fn*f&_GV0DCzz`pEm#Qk zk&x+Qd?-5U!T^&%vZ6MIlZDQZgzx$wD7OOjXyH;Ao2GoHPu?kVkvakvN@Schda{z; za8J@sf}gZ*agnt9WpWKVMaE7hs4qw(=Ay!fijH|ys^kOjp01y%YVz~dzxR{-Lt zrcs3Kck>LuO~e}ZB~vMl@_Ek=rsEqk;uKlwr9;p`^{=3X1N@?XDGykw$UVFM7s>ncf!3D{dG-G9$XQ?$5Nk z#<^RXNO8tGwPZz$Ps^euzD4_mq zXY*BBy1?+RIicR@1MBCW1{LcSGPpLbW79*Z0~ztaPTHWJTZaXX0s&>;T#`A(8=%H3>>>B^$Bqn`ge&2Rgs2?430b z>UVu=8ut_E#)Z2-M47v+r!ZI~8XNFRu*nl9u#1T>kMSW#4sJ!Shr@Q?eK=P9!0-!?AE z%><;CvzVz+O4b34M{>TtlXdKFUPweO}1;DF~f8?@hQkz;-{4aCuf?rh~1 z)VIV}s^+9(r|Xp(Sp;;LKX!2+UGyhSb`YCY^?gU}MY}o%)Q?-9^2@STM;=DId4d zHhw%Ul*3OPwcSMDREI1DN1tARTyb=G0UPMahGTkDcJ)v>{pPS@%WW8QlN+f9M6GDFEE~h|Y1_n)5JXp6NEVm}Y{gM?o_&9nI zSJ++A>!#ZM4&=>%%1zR_d0)kBcXMf7e8K7{PbybUcaQVEt&S)z=DcyOVQm)n%8m4a zwT-(==cpI>Xy!#2Vzr+BdvK0jw+d3i*7(`;14ln*&ghmmm23(k0VxO}M4CbC<)X=Dk{~bj=m0e?$`=8Yn^xFAq2NCNLx;FHWcV zi&`Fv`q}yq;9a3!7A-Be#i>tl6{qs83g6_-owTrJq81s#^mj_o;U_~(X0tX)MX7-5 zs;~mKqGs^rZG329b&Gi@#m#ehV8{H}k>rD}&?2&L;?{530;oBWIbS3TC0?sqO=QPJ zj93Zd4tm8(cs_cQ61he(P};jxE1~SUf-!vn?T3^w08)vL{2lA zOuxHk+O5hP zLZF++hHz%IDWo~&_2e5Z#S|<`t1wLK`L+Ra;N^L1Q=1ebv+szi`FX=Sxv3bcr#A=0 z5GVLBb`6dI27p*-J3%&pSiG6b?tZCj6z^b8Oq81E*%R9&pRLvLJgjp=xX)gC5i@f~ z9ak&9-uxH)ok~&&`h|pW1r$a*C)yS*l@BrNP}YyFr6t}`Jx0b%R3{N{-x`B;@Iivo zYVK4~SdD&mXzyh+wTDvr*`Vt)C*Nl`rttIF8`k?#sW>gzZ71^a(3$!b} z&LKNXSEnA(NPLox%_A9&z^h3mxE;$Ooih*lB#|f;pW0dMY@Hs)E|BQMpbJRWkYaY& zMVdAD1tE(9>5lFZdZi+-6X=vR4QEQP0-DX2lic#DULoYfu}~ASJ_|m_Wj}iC!Iu+> z{aiRRrcrJJ&kge!Xc@Lx)s(*JoznDFdq%f{6uw`{Dt8(;#j`L5fySXPcSzEVP|C!D zphDydl@uop9z^X>?6wf`@cfk;OE)J|c$F^L1VJVdKjN#xo(Jp9@#lEpaLL+BE80DK zrdih${leltTtwKvLUqMeNyM!?QQ%g|80r#vO?in-h|2X>!U>FcGrrvvPHhlfl(95V@eo7Er^p(r)6)k;tpm0YiNV+a;&m!Jv6ou#-Zj`2n%`Gu+Yp=EcG^S0R z_jt@_sS`0=#->oDA$%pPZJ7A5XhQ>_zB+M$r36cjFlBvA^Ru18DO+l4CEqH<&bNvy zi~f3y_{(8^>7>DiU0> zRnzSXw%b@Gu8{*Zq2{5_)p`k5ia2(UsV$Xl&IzxCU+Mwow&PZBL2Jy>W)tdScVe%R zR_>+4SIgX8W79MJ0B1|sNEkUfW|P}(_%=&~RifY>+jf10%}coZ*wh#7hPT*6oFWpw zQlqhi)$Ww1Q!+&ufgEcJAhxtpuo$=TjEdv0E2~_YjqOG`1*GJdh1By~^1xEG)GV^w zEWp{K8w2GQde&Iuen;<8Y}?1vp!e;lNyyuoIrvp++`u5|U68Ip__k}4Xz6Nllmbh~ zfyP~n6lRO}7Kp*>{2eL`Oc7bC$?>B<4jbv+(*-8KcEUqQvz)rgdW&E3v#5Vqex+kB zq6kuydfJ)X_hh_vxw5m$AcXimD1k()bVmT9y^~!ol5w?2nuLYdr{O(dfaJe{#vSVjWOyk+gb_)%4d;jjB zJ}QRXoOVi?LOjd$ldO>~Y4vv0B7Em4Ib0-%^ooG!?y7-53E?yJk75;ufuES1c z+bkGF|J&at=X!01**hpDbQDFDYiy+cqz?*iBzGYL+x?`VzhemZ2RIY^Lxq|xFPAo= znUa$O^1IJX1?eWbLuv1(%ywL=;#Rt{uG)@@1uewD^Esgaa>ephqy}paaz|iJku_&- zxF4F<9AERj=(1|Fs~vgabG}spG{7kul)aR*(g~*nR`~jYeEFKXZ8aK%eTk@x#rhy3 zM_!8QASL@a=Pf5nH%(R4s?>4#f1Av76vpAnkN2{a!!xxId3)_re%tsSrxODZIA~$8lC7);PwNhTuVEi;2PP|XYHTufc9w60XrS6ar z(qYxIpMGX9`WQb;Zqwqb?|r-8oh@Vu@hxN(IY8+C;*sm%cI^Bp6Z;O!r|OzIa0)^i z9XEAK`TT;Rui7Y0R;Wgz7@1!j^&+MIy?M8!}{6xQ< zqdaHk$2KA2WFaO+H4++sDKj1M-2!j#;VS~8Q;)nFzZ<#lx=nI?B$3BW-#`V?;cTe9 zNn(}&r{}47Qu{sf*RJ)uafm)^&(9O|v+zhJDK&V;eabdPnpkb3Q7Di?al?4vfw0}b z-;01zOx-bdAi_E$7l7>)w7#!LRkhd;e-PVGbxC0~Xg>$voY=<`v1)AWimD#Gf4}Cy z>{q#0WKA!l;(2rkU9(GeNc%R5lwBixk0+Uh$|;htL2ZrFc4=$xh%DPcyLBZXkBZe!E9udP$gWShyB* z!-^uybBA`U=`spoFkz%fx(b%QoEdhjb=%OqJac%O=%G+sJDAYRBSk;VmDg}e*3vK0 zoEbWDAc8U^yic#Ovxbb+c4?Ecj-+_j=DsS(qYd=0Ty>^!uk<{D)wG!hW z)-R3ocS=4KtHJ-n95m0{3$;WC0)qW~C;#7H*u(r`4jKah<~FAP!yVMnbX;LW@!r&p z)ql`;asC0-!lVy^n<}{jfmCIl&Z5=lcJrxo&cPmL)SN)I*sb8fj5uRE#ml2i0h z?eM63f9TK`$5T{GPoz93zfWNsNIuyu<$0mk>=#uQI?v+!PowDQU&5%-E&9GCG|(i4 zSVVC104l26p#_0q^B{gd;hzzhtRx>t4F{)7mLOkz z^=8)qOR*X&975AZlS&*nys@Yx80cyG?tb>ETNdPFe6cdyBpJU*iyk)F6{eQQxB8+t zQ~6W|Ug*BhDKZmIjxu}Dk>wa2qyk|F7GkBL#*i(mYWxiJx z#ezlZ4uO{f_)Y|m^r-1-B=C%JM^h|CX2b({C@t7Nf>l=b3#puv$)(K{_{OayC`&Q! zU{HKSY-2M%jhAKN5Fzh;9hOG*ZsXiJ;Ȋ_<90G&)U9Coz0;g*H;fg>j`Ur&?|) zX8O@A8KuL?{UIQknZG0;d8p?rDp%bWq>`#uh)G|S3(OfGq1-OR4LabR#(b54^)f<$ zZPhaon`jN3O)o>hu3d)f z`h2TQppL-MPw- zis2K)taX|(UqZU!G`p1Q_#G#Vi-251B-nkp1w|Gr07|s==WT6EDqf#dOTd16@au&N zL-L|nSq+xWV!$a=jhxapldI^>CBAeEYZVc<^x zMS5aYwcUt;Z#X4*H{iKU?QWeo6-;>;I-o&lVeuk&Ys-fV3sa9gOs_?~{Y|c!t$by8 zNJ+Ih0x@mg!_j-u>*5UqoV%hsLVJx>^U0Dz`}Wbo0E@;Jp_q>%-9_+Z^n0~nGV#t$ z#KbRT{wJj$s!&!(Jp1+n+WW5ci#xA{fHjo(0SrVN#OdtWB<7qe{pb&@>0aU&=h~>kaLe&xn z^^$oE~#v6PLUdL}T(5*wZaQN&1Xfo}NSe<9J_(sHELx zZryg0E=>crI`^2;W=)_si;akwV>Qr*WiV`q%vQlP~&9yysO{Wx2_3lKE?Hb zTz*LsOnU#c?%kZdQ5MY00_)1J$>UWpltC4q7DskGAT~o#ziuR`g_hj$AoLWS_45vj68}Laxt;>W-l7KPW%`xa@uPn&HAlFSd?sVGs z65ep#R*@iDMLfy5?}pM)j$rhTvHO%Kmaa_OYWCauK@0``o5T~VJqC%XKlfe>;<9Zj zB30@_f~ces6)bdXvUSSFY78^drrm$@~zGp-u6Q)XI2IwX=PlZkDge~UHZK_+Z zwTm2{D%q>&(tu5_6E>A|?yPu|TAWY?1s|=$^mQc*ZQ%Uq_2SHvWscRVp-SF#Q3Mvu~3&m&up>qo7ENC+;DCU7KCPF6gXTrUOz z^S!%8?iGjots!X|gaOV84-4|wu|$(+kUzJv<$g0U@Kf6k_H%ArP7)Xd1?W?$@zcnt z23fK}Y5S=^%YW8CJdb}jYx7Uuhnt=8KfV5b6xQE2faE{71Z{0h%uSsf^a19!HjWH* zjsSgtlcRyY1Kq!H{vNRKN4oQmeX4T*%|buR|HRQZG&Hs{{)Dx4pfhxGaQNTx<^KZx zfe?tNKLq7P+wmysh1U0d;)dv{=b!vZ({wK=6R}t;=QvMUg&eqP!&f3_<>Hk#m zKjGl~+4+A==0D*$*qS;RJO10n`s47=Gy3;>{rC?^e@^D_m;C>Z()k}y0JgSP|2Gu; z|5Kwsg}eWNVs5Q(YV1fSU=Fa>w{!e|U7}7PjCLX}AfP|%ABB&AD7^nXz)`4{xR z{uY0a@cARA@Rx^q0X{{XHD$pMHO5^8EW#K-K*(?$;C~qa)Ma^ literal 0 HcmV?d00001 diff --git a/src/docs/src/documentation/resources/images/hdfsarchitecture.png b/src/docs/src/documentation/resources/images/hdfsarchitecture.png new file mode 100644 index 0000000000000000000000000000000000000000..70e434c1dc619b5166cf4dd907cb0bae7b710574 GIT binary patch literal 40571 zcmbrFRa6{L^yjgJKmr5^ZV4LP-4YVq-F0vuAh?DAK?iq9aMv)nI}Fa?76t|nGT5NY z@4x%7`@9c*PIp&TSNAc*MWm-=RG45YbngCuCVCa2L znx3{KImN0>q1?BmB^v)KVZfDb%P1(o2 zrR-)vc@!J*2yNEdIQ;NQ5XKl@m9U%$ScuzBiD%s@;_ zV@)sL)$y(TSiSWf`FKCep}q_sBBAI5VT*haE1AOXagLSGEd2aJUtXWkDx)HxkP^to zI>O^qX=p**0*M_pGtVr(i-9a(0Ae8h`poOjU2Zbl2DFdX%cgP1ylv!=l1Xr&wN!D_ zC^Wc|@{Q$=*?U$@bbVpcXo$kT!9=XIHA7a&p=M~{+FAWMr6!McHk1DypGWrl#?XF2 zEs%G*o{@?do+F&eSB&|A#4nCy_e@vA7IPahm@B<8!>u^W79E3EG$Wv)6Zmx4wbA)> zoIOUf`UryijJYKSkBn*coCpcQM|N1JqEeJ<})#1^-rj zLZ1!tB_Vxz$u(hQDQ(%3wU&P}NSA-)UQ-|Dh>zj3Ue`_^U8*tqW51=ELR$aw{71&Q zI>=MrU`dzO`!L+)UZ&q{%zs*Slu~_MGr>@JBdM<8{NUh_1kZFM@Tw1Mp&autsf3n} zmT7eaq-E&Ut2OWm+u<(xciB!mORs3c@D`U<;E==W#G*d(>ew{s0nSflwAOMt(sW<- zbh(=s_yE_uI@{P|9SpQMTG#b*4nRH%Vt4n^l)Vwdl$Qvz(ofxuy7uT&m48I*nnM|t zH7PtQ6Z;#&oW^W;E%kaf;E|E$ke+fvK_Qzx(|#{)+Y3h{C6`|DY8=k)+x2y(B}O(J zTodGvm`$7f2nxJ8P!5C(064uR2(CY3GT#Nh%eYw*l%%@kiqRf@%g%IEJVk_x&3ua3 zET+HSa@B#0Z0UQrJepw0lM7+FXn1`5R#RSNU>kPd0i+X^V(@YS{9O#P9hJ3dJ`=0Xdz{-l04 znj~*qCyZ;?-}17uodZ!9E<`ie`X^XrjiqCkEwKfO8NI=++8lcoK-^!+o)%IFQ9#O@UEO}t8ewl`hL_}Q= zx9WG2=)fZXKLrxOst;B%`{<7xUuoDAokAKJd-y!|@NY`LrropaUNI9F*nkG(Q&>oN zW?#w5lMZgR)48)K?I2ExpQg%;c#@(Tl)j8_e$D&JeW4a6#!REX^4jFE)BzrLq^3iw zMn65@DKZ6ZlF-pUQo4It$- zfxnlBQyLA0_1g=fyTUHAb<)1ittO|Tb*uM+Pks+2CSLZ68|n!c{iHTSv6-eh@hlw) zRp+H84?Jr|vy_dq*^dV9pXa0B=Pw<$B$NV9Zd?ru5M{% zW~^;F3Z0lzg$yT2&$4B)UnI*+MfuisXNpo_??ib&2c<5yS&#Y}Ns#|ieN#Xz3UG@% z;-jfjCA8Mn;h5XAe{u0y7g$QZfEkd_8|deV&7m&q=;oLqVWv3qzfEW}R!+Xe@l_Sm z^Gsh@%H3u1h9oZqkq~!qS`nGZpH%&aw+y#SBoe`E@d7#g(x->%9>;nh*}%t9VnXaN zP3!1Q7AW+IYu3h!T%=~pBh9kfcBcq*#|JI3GyT~ElVoMx5ka*n?!HRW1?k)BSEs{A z%4gRwOGMtX&hr?vBF30)~Z@BDdddf7h4 ztTMGwQe=HSfI9r%K7>ppLpz)sVGM5vj2I6}Y}>S(-cWuxL~4{&DaWU{EgRg*`p8(* z5+-Tv{AtY4b&OH^?`|ffep}*+0;^)Kq>7z`W{J{Xw8A{UvVG(ta#zHNmBcpDQL4fb z_tiv1Bc#=Kg-i0UmZGfK8>Zx4^oSLt2jV92WQnF)6(H$t#K#5 zDm_=han>l%ShIMc#ShdrCD!KWj9Yy-V|nXenXXNKF#Gf|h`CG5DORg4h9-%rVHoEN zD{;o8E{3Cii5n&3nCzv7ZcJ$=iQ7@&qs(i7mP1=)62A6Iz-$k??>QlW(*N((nVGGfS<%!4~7XP2oMrp-#VbT6IM6LWgtjo3cs!mEE$9=i}%*AE>KXy}M z?pEJA!6YRT;=bI-MR#O7xNKw4iCV-UCdL#B(e9cqMczzS7zPEd9KLwkojjVP(Ve(p zGc*xbIST(fJn#>(=Jq4e+QJ1AC$wK7feaF@ei_>Get5eZS zBC2-prldfJKwQpi zb3NvMZ!~k2FHHwy`}JmjgsEC$4fn^(BS)tu?UibL%N$zl?r(OlXaju$y-i+adVGEK zb~F6R^(sBSu$|~?`&j(8&Op+CISjVjO}Ycg@RO|7CpU{NKqQ>}aBsgHm!z90OaM92 zNk*gQd56W$c?Hg&TMv>$R)gGy*?c8h=SV4TO? z%i-#F!pb(YwR15)kI6dup}DUsJI1s>Zb$h7wZVnT=Lrc5$`Y7_s#Q_ zAg_JfDzTemQ8AEiqqA1DhYjO>5aZMgdOSa|H+>e%KpPbRy1=XeMgzH>H`@T=Kz$15 zo0;}_lB~B-nyWeGT=FYYMEJxn%5TGl)T|>j0_0{XM7>TeG^jLr>6V|r>3$#iKqC)6 zANf~1H@V#{A0TunEGou-Ubh&LRv-L`l^-0YV&BZ=lC0Y%RI0RXC|X$6Mj8Y1r-Y8` z>Nr3@btle>Px(r)h)(j!e)0Mkg+Q{$8VV^L;H&m|`vt^x|G@fZavUI^G3@KI>w093 z{pH_|TC_v?_SMhrW_|N7RnxaUcEEjygKJZjwvqq)w@^O&;M!6N42p>q6-bmIg&j4k zBPs|rtQLEJgT3!-?*t*Khmv&;byJ8I)}4i1#3rEPMQmQ~2M4EwY|V3W*pXaSr%zu` zw^K}06qp;x-nOB2Wu1f*eLzoBG4!A*aq+|D?g`WV#pU9t%Da{Tk!?f=(hE(;D@rU?DsE{p)sR;8D zDrKRpdM$|xwgG!L2u=Sidc^5xnl**zX02rR)pY^I#joh9Qs8tP%n{iU^ZxDGEyjF= zcSp1Fc5A;MMmze9Cut~S6ax*T4;NSPQdX%GD6bpvpv0ldS_jpuT4Y;Fhg7DL1u-jRNCM)qb2q?z)SlX*Ya~Lr`y@IJ?|@VAh^-$x z0^GYHya_qkAZ7G2ehV7Hl%WWy2rw0SOJFp%<>9BLDFpq+GE$ecwH+fTe0<-F*gs!7 za+4_QBVYx!rn1|Wc{$=O_N{KkR-c|A?5(l-|JrK1msB*jVdPA4NSa*GnlCh>?wUw8!=On=Q~ufVuEu!jtkNWgYQzC|(J0Hj zcDCn8EY?!KH2A{;7h7aUFJ&WCm`=D>1UtH{+ive`VDHzJK89;z50?J!gBp#2KqD3=K+yP#Vt!0BOCWp?#{FZ(TL=?E zRy)3^E7PvGn;JB8%gCLoLmL#FbyyiVqSR7_hUuk9|EGdVls~!O4G0WVY9bF``VR?= zokt-qQPt~9)TRDP>n2o@LmgnnY=@Ee3f0UEv^KX2Ts#8E1uV^$0U|=tw+CxqcalN0 z+HU47jhTrDxK8v(MSH%-R`r8!00v_DNl&#JJi$JF;qyHgCZEeJ4-hXD3&itfMcc(jsfTiu8-qkU#fe zB|TxYParkPf(gV<6-Bg7O+Rf~*z?%b+&qu?gE4(cuENnjoW1Ya)@emOo9}j^ zRjSnMZ4XBgrd`P7$G~s9^JxR{hOHAkT`!5EuBUtCdY+_pUcj?xI&#OpTFNmD@_Aa( zT~=w-HEj>PJMO4iU%fkf-C1;S?BX17?qo%{qDborSNYO{oyGH+5nF*pzR2MtX6-SW zEL6BZury;}fGtInZD-YrEO=B+5GQ}jz|Ln~;%!g!+K`1(k_Sh0k+Q=+?a(#tWtKa; zMn}!XEB-WaRlY8ThqrY=tzXTjdam0$D8Mwy z`vKwLdf?s9$bdlPu3+A5qp3!4MArkthI(bi1t$y5^yTj_o{us1-S~u$1|6@i8ZDYS z_H2B}4jSzL8osN!0-XAHi?h~a)x7~*6;x-VDja~nmAW?f?qaEd|KOKE> zg^e^x5;3H>A|3-F-qsbWJ`+ z7~ZLC;G_<*IPq~d%+;xFA!fxSY^hJscV=3M%NBBZsyK9zIsxQY~Mu#vfGT{at@b!+yh3f%)4Pt)b4b z zqk}rOn%6_XkT!-~6^4Q++BPwL4{S4d0%j)ctL_3O;L7i zL*5X3vozA!CZ1Kk_%GGz5;Ha2pqRj)-6}iVm^>D#FB{2hlwv>7bZl)H(g8q57gN*A zoIiv|mz@2$?RCYKD0Zy5{XO(64NCy}&WUz_3qpPgjg7{U#bC5%k<)gs%jHD!{e|rJ zgI|dhvd})jEs0>Y#Z@;n;Ng2B;DBW0@A6@C_n(<^fRtFO*1u6$-x5fp92#`XJj1F~ z&)*Jt#?vQPH6rehT9Xk!1gnH2;-NCP`-IKdhmLU+l~k%4XEcB#5O>!F>|AM#P4swi z-fB~WIXAgcLRWQS?KQBwFqnJ=^0&b=g_4Wvm;B!uoT&{n{e|O{iPwWcXf*G*;}Ys| z;G9CKScCDiCjp3!6yAj(i%`88H_aL4Cy(5#uL0HM>^1Ye6`z6D>*7H}oI#KOJ}J%% z{Y*Ae&52UyjsqRtN~h(sG9fM=$Nd$Y_9KiM&SDY+m2U$s1?NG*qrxJ6V=`6zE<_FSi{eEe4S z4yvPPf9S(B_s_b!9bu6Mshts3t1K+Y+~2Y1{S62kxK%YD6>V~q#8+G^vO)w-(}?Qb zrhRAzcrt;|-in_|D)dcV zJ>dap%1~yYaK&FGHkY}M`PS&}N1EXT@^KjoHrEpZ2MZS38~M zbz@4$01A(0yfVxmbHgPfJ(rVXCcz5FMKLD^Uuik}!EA10T^G`1r`j8-()H9nf`U{z zhQ2Xx(tF4Cw|m;NRwipy2bR$=+0HjFdU=J@r<0h>KB)*)!snpwv}Fu-i?1ekgQZm- z?NxGv5oEPnV&2XWF27xE`fKF}D>U1S> zV%Evrnt5r06p9M+E! zQ19-rr61C6me4|XM15ac=vV_;bn?6Ku=%{DdhTepJ%GKQrx}4n@S9*{t$GLs0t5ca z8Uun4`8l2e*wb^{j;4HDeN1V9OP2g$>~EGohPlMOT=lr4NAu1{)d!`a^#WNZ#a1c- zn>eS^kS*nlSYl1lpPoK)^$Q=P#3qwMO)|K6YRu;^po&*tGdg+xt@;n_0gx#MQ2&NY z#dJVOM@7!C(|1SuKL}ud*=ogd8l!Y^pDZFOA@gDyt9cXpi~xRQiiqW^g*hssZTRWF zDHgYB9FRtLhXC1XbmrhJ(gNH1Zos>{kU8yo5a((f9C z-L>xMha;=_S&ts|4C0OtX{>6EjMm(oKkfMQ_b$g!!i>J-)C;>~xWD{A7JHRWmyXXR z%SC4vXC~84*%K2i{GANd9fKF`|EVyP(0lK8|*6KV<){?CctKUul%Que;aYW1+Owz$HUkxJdSe?y}|8pG8k^A@j zcnNeSHkxFyrc33vkk=45V6tA%5A9NCj$q!8v2Z1Ot&PCFkS%@mt;5J3bCNzmDcMFP zM>Q}}^}+K(-p;ybZ3P@Av*y4z`)p!LQf~ zHK_{e*?Y6RG?`5%k-`%_#7pWUwGY8NqCxw-;L;fPUaGR(aVT3#n75WvCB9@s`j#e+ z&ewy3IAcc7OpO8Cw}azIGiUOs{R9gEeJFFG;h_WN@OdjiFt!Uz^1fl05~)vl3%k7r zyP|t%PjWhd-o-LotCPUphjHD2)%c^}dAW0Rl-mj!oh^3bOa&A2RClxNPS@Ac^k=M}+ruCDONU$3ZNR~3_YFw2wcdhLL z{biY9fWZ1(e$f0vi(3G-PNqxpKELxLgK0R+ThxAd^}F|Awj%!rFN(PE)Sw)zEI`%= zmi5l3e+yeTSCc_qtq8BdW}G$aqol1T0v;ZpPmIG)K8Q?revDUH{zzz7qMLbs2+4BJskZ&dm1Gj@wMgPx{y>m%%`uFl6;U$tX`I zwg-=d`Y6dc60xAG8xJ`^~rBR%V>lPJ{Gdjpvz0`>J$cxpIlMnRIb_MD17 z_h+UXoix8JFf!k4iY{*_7JB|2&<3oo6N$^`m^R4 zV}>t+@f8JK-}Zd=&7k89&P}C3z#N>;7u`d@#l4R(v!djYHFs}iC;y9D_-#E|2Fnm* zXvRK{U;1dQ(MoI7>AU@S-Jf*Wr9AlKaLV~hNpzF30`ByJ>S%Kab@bVg4x}D4CSZ|> zWMN=S>J_>WYN+r*E9nM%adBJBLl0iz__Qh6j886JNSdxsSM8p&I01^U+beKL z_KWGVA+=-dK=!eXI|`Q|yVsRXkXyqS_4@a-lOhYS0d_~Q>&H3$+o5n-fyg-H|d)TG& z04>N`u}VxwVvNbL|weRPOmJdlARkzMyAqkYW|L~)@MTVIy-2pXI|rG-s|=d3&*aEz1ALVY zyJ$A=d)$l8@y%4dH{#0I{*EZBdEOZ71#f9$KGVSiTJ)C;%B~>OPKs=A9m^UKR~Ab_ z`0v?8qi7dmah>~`=_l%>#jouv`qZh%*gMJN2(mMzs$R{mSe4;* zFaaF6(tyz6rLP-}WDQQtk6R{kc#PD~qnuP|ok3vNY#%!(Xd2#ll=7$j?$~H5@(g3# zyt)*6N`IMJ(m~t3=~(Tcq@-m-!%f}m4Mfs86&J_tE1kZ5_$!BI$}A_DPrNsjOGhze z=`jDLU(&slHc{(f5iLh$4yvcRY}+^re<0J}nZCna0e?xsA;NV67=yT>|AZy;iHK0@ zD&>%mBKWF06p+#F^{4W1Iin0>E5W>993-OZmoK3ko1{ya4jT^w1*!5PcGb^VkCw)M9T}j18g0@_^gF7;9b5?81h>K4DRAi;f$O4?jB$2!M zG3lnFBOdVB`O^JJC#~qlaFeh2A%N*)<<4CR!Xw|hvp|!;?=Oda7G7a$D39A5TRHi2 zUk2+3x{Vnyp{`{NxKLj2!(MW;`sRCvUUUXrrWCQ+}?=sL(S-$J=Mu* zm>%v1C4ElGPF0|G7JzG_2HaZ?VOR$QU;wK^lU40sv(Bp77bGGZ{ZYUr`5Fk0etIE^ z=z%<6nSCX_-@@}OCZX-~OmA$5ZAz=`L?(B*(*aGb*)`&0Raqo6>2pkc%OMhWUpOlp zeMsu4v1Y$|F!BYFjHKsw0J-5DFrf_(Vq?J?rSO>vE+?HrAhx>18pO${S8^i1(D{)= zx?#b)vbJ`<<^EUfSs|dMNFhkaInGh&8t7L1Q9ID)h;GpEO?A>yu{hHP&bO|DaUk$y z50ca*#CBrbpW8OAd}KUpYF^Mzs0bz%GS58fO=9Yzaf*w+y6?8LZG#)xV0EkurXpGw z`6DDcSq*E@B5Ly8?A?2d{j7pjhE5Wtk=DVWHPCj*@gs=uL3L2~tSzp-qQLBJY_Cv^QP;GW45Pi?3p?ei_ck}AG!)BOx-@TWn)NQ`YSV8I_fS&37$uFi7IG#No0 zcBb?oCd1f{8&(q6M3q_+#Db`*4|%cYKYx0Jcd9&u8*a}u&%xn$HV_ru%ew+97FjMI zNeGmARK7?_agt<(c!2Zuz7)->ppRkiZA{hW(*s67z&X<&{|%^!)N%4e3b#5x3lH6| zqjZ5BaH~H;gyZdA-@sTE>Xv}yZN&pokTG*ui)gYHme05klntk&BZxbqlymmoyw;<= z6`z;R#|3P{cXy^gp-VE07=Q`SAc^D(^MjXhR+z~nT0Dq1yM9aCWDcSOuq2Jd3Jc|j zdS$pM*yVG)O^S0gN##)GG;w2u+sMP&j_gKlMJO;S0_*b^JeypK4#)V|OghejMyucF zu*3SWA>Ymz)bFeSKln$-f13nWV%?4Wp(zmXz#!!8_GamW#|RFF8$H-Ej%hK!^)0b5 zb{6+gx#_Sa@E#LDgDqHBq{h+pT{^Cp5u#qxtOs;9pKwpLb&|i}CK6D<6dl~c%Qz~! zddALcv;-VSt5Fe7+|8~-@jIwxZ%??TJ+)3&E>bFJ|7Ze$l4H)k9tC?E1`bnX2K-=#AVb zL;YMZaY-O_efA>F;USmPeDG2*b~Qa&5~Tl;=S4`lGvOYLQ4T$VQ@k9|)Oq6kE;1}) zEKKKetY712wb7g;`@}g^SovRpm5+L-ExD=4^a4Hq8_lwq-k#O6A|Lj#grU0tat2x* zj>VK5w!%$?ae^%!4t2JiowOr)UX=%y68;K{CcTkm0!3{Sywvg^A;%~Is~G%UN^>=a z=q2W*CzMm~UU0}u5nQ@U^|E~E=rz@}xv+ZY*(N4lvz6Qu)@=XUtODhGfT2C$-(kw~ z(QA1eD);Q6ZeBUX5FFs@`zcwq&)(6P0_q>|#=zd)QasHH1@>^DhtzirLHCS12gf|DUHiVV(W-D7&A)7y%e4D7;g$wOIXV zO_$&1c6?jc4pC_ui%4F~dkKp23NWVHgN!>zYgVI5z`LMjOQ(s_s(FZCb!^<|jKn9J z9-a3bt8tiuf{i zXpCRJJbw&;LD0_7uoi-WjQyvT*&rxTsS?EKLD!1P$-IXCy6+2(&Cqq%W7eay4rPY7 zX9gCY^V3kb`bwY~rb;>^$n*u<`%C#R$br9W&CQ|ni*{hJsGiqm0orLPPTSUL4pXzQ zo+rV!%{DH~qm#_w7zlHIoLyzM@hNcS(f7R8YN(~%{7pD7H#0>ZiVQ;MkUUG(lZfXaRrdtG2J|vLf1X7#vljMT(bC_ zhJvcc;F^E1c6mJUr>g&g^g?!amM|mEQ1CrW^!~PSun)`*wQp-s&pz+92Gft<*)wz5 z$c+I+Qj@a($_ZT4I-}pd>UPYqmDOL7tkM+QXA;?Uj}xlDG8WEy9%bLvbHJp8xE!b# z_`waEuy6GcB6h2SVGl=rV5+puAF*xjy^@zSEzHwxCCmj^YoUQ~%nM+`J>pnKe$^xy$2A!;ce=Ah<(n^w4`~eO5V)t>TJCg zeK1r+FN%5$)IYRkL7PRY#`#Y8(6yQ>BIAd^d9~fxnlf9(uahXB(s>>eelVZ(TN_cW zZ3zXY+=AI7%u7N5F9u?>3L;iv1XVYd(YWesyC8z|a1-ii&IL zc%fx~qs$A73jf7+9PvBmVaE-O$3;k}phwbSe1oKSSkQc{yG<&C}E`h!uETgM$V;)Ywd&Jh&Y@apw8EFLFW}VB+i$7g*rA?Ry zO>n-e3s9MnrkCzdyXK)_vsj= z4b+)y#JbWubQ+AY8;QP>g70$w_-v2Kc}coQY(mP@WbIeC-o(1QG8YfE4AAl!3LXBi z2li!w1rL+)R!NL5)m$1RX0xpP{S!?-R1KJLykzvR4U(1I1lHw5z59BpPMud76GbAG zQvJ{WVN2g9(a*KDK-cvSwyN5${1o6{b9LOc9$%iCcCTM*b1c=Py7i>wjxL`g`P7L2 zrRVTIPWXEMLtHPg0nY%Z$;k=VRP{tY&Wf4wbcgemZbiRq%NeoTYNEi4TI$etX@GTM zg}?m2K*sHl@N?N}Ym=Qx%xA5D>qz{gj@fD!<0YRJf2}mX zfP=jyjm4KPu&=mn$Kr7TB+WcF<{;#Fu>pZ~Hf|r!=%liti zlU8XEDa-yGfS1MrhIje7AA*N_B#t(l!eZ;JrTG8e~4j+VzUwUe7 zVr7`nWRM=)MM6LK*WPHTi4^-N7+B}(82A{ir#86YJ+GPyZ*?+Kw?{SWe;elnK(YcA z?ArV1)KR)$L8NHpMZMV#|2Xe0&k2mrt{+=A8H_S9prbC013JNOFr_az^=}BeYfD%^>Rw;? z#k7!=pL1z$UTj67mW;mHC)ysZN_`rV4?ksoy=uf2DWy;Up}h}tdx7tP4#Y@u#Scgh z#cU{fK$F}JQVm4)Q2z10np!B)Olblt>s*6FbyOCU(!HQ_EVf^KrW~BqZ1q0QOi5)< zmLXbJ{;&FJdYJ9}4}$NnNGavHu!jy|m-ydyKdVWNIG;)cQ~snQ4FO!#slU(}7Whi* zV*t^84ArlT7x?BX#$EAG{dEckN{pOK0ACc06LOpDkjD{Jm@=)YydWW_PH80uPyR&v znMkXthheV>G^j!v_~;k<1NsFllt>>GGIo!bfym~>XPg|`;5|v}^NmU~m*ykc<0Sqr zYAxGMtG60-=!Jsort`$FU34AfidTo{7Q(4b$n^argY#bL`g7o5$5?ONHA#|J?nmkkaMhHuFN;1#0v^xXp|g#K*=H zr2x~%%7|c)ZI7uEei2Oc+a-MH2A)p@WD|Hb8syO!>sFU4Dp_{YP?=oUQmXN^v$5mJ z%5-$|X8x`D&o6tSR+0*%ec@w^M( znl1km@kp9Qm3svP5CLbv5^|0ahE4f}4uU_{m3A0Q%wg}|fK%%mS7y3l>4cL2n-Z{~ zto)fAonW|>-LJ~ix#m{x@@=wWamV1_mSf9&y`GQBaOD?#djWUI`H9a?<_WfJ4=Xjt z;+vsH$<*R`iTPdHS9#oRWS6;S@5=8^_h}lnBcSpD3|HrRqCDZ2Rc{K zns+u)3jf|yGkCqBNa!}iK%e1Mm=88@^P=oAlg`j|XcIkvxc!GD7}YT$~7 zUbEp|NmOo4mCi|u`6lAnG1gKmLWeMZi~X+~OC+uS7mQ zkUM5(=$Uc9GAkFEQxO1dZjo({Tw znMVEHWP1J}qf1M;CN*FZOM|;oToru9*s8&|ldTBXzxTMT;#09s1Hh0w+9T{}ZLP#E zme#j#9|5~3BDjtFIO2_sot@jRks-~Tab%slmf5PkeE6w_iD`yu6bzpSg4iND5gA|N z-gQ1mD9^FUODu^Uq(gGw5zaDKX1g-q54&L%P1RX6Wg|PoGWPoxYQnUZ#JnmFHJPDm z8H%Qnrix0&QzbtK*0HXumb|QbzNdO*TI->3k(A!Y`5U%=Tf@Z++E=;+uc>s|P8u*Z zAZoPK7QX32Cj0%%OTY`7+)cKkyt5HdS&2v}@c@3nX+Mibs||@5)Z3iM;#SGjx*V5uC4&LwX!xOv% zD%-ygr->$NH0L~j>H%Gz*jU<&&GH24go+yS_o=b@rhG(_k$MNTiiHjG8&Tax6W&vN zl<(PRe)i*SB*PT)AASIn{ysyWg|3;7NpaJC2Dl=U*s7?V3oc{#&g*%P9}7Hb)HhyF z^mFcqI;u_co~anNxW=WeTfWnIVc0Usp#JU%cgPs*z;O(I>*A=$J39#-g0rCPL>_qY z?;1TB;)!+pFX~mJly2+gYP(f_=`|MkJ!mpTRHaBFDxLI}=)8RqZYLNe3R3f#ZE?>@ zI~|?#zH9BWe{Co-d%Y68pKk3xxhb&lQyELm%7+wIr=xYqro%%j{m zS9|tvobvvYp*4BHkMWQJR9UxBv%n819aBusUswfuF>YV2puLv9Dz9oABHmN_1$rx6 ztL8B)&E54vt?T9HyC{RJm{cIR^00v3Z-1XAr0x>Ghy%rpS?dD$8 zTCAd~BnqpU){}ZWfY)~y73Ei``X!JBNN?PUJV7_Fe&W+VXGHbkVM`mH&s_ebh`_3oJv2{e@843k^$xF?>L6ZDDQ z{RfH*(?dDQ<@(AJhe-loLFgUp!P|vFE{A}izmng3Y*pGtow9Hmt518RH)H#AcV%{7 zvJIKw23ru`E|$0&cF?K8qsBQufjs}c+cEgqWC60t zmHQSn@qN8fd51wV^vCLNxhiF&>TT0sqKWo~uUi%dCYo7gp!)nK)t&=t zSs1l$(-w{%s4SmoNIn^6RDzoF3^O22vvMh-cI)<1r5RNi+_14)KgfHHvd$`LYMZ-i zQXEK|1y`pIunuWjM@NJrGBsqyyTUAQ;dI&TiZyPiJPhNqI_GO~Wmm8{TrrS)0tQ`G zlYVh@!_ZOT3=k_(Hpe~2Km7Nlqy#WKt~_J6vD4cQ{qa>hDl1Em4{y{AZp_UhYDmVZ zi@FT1LA$-1x!T%08CyNzKu3E5_Q;{66xtIB-xhTF0-GugSO;X0 zc$uBx>?g5v)D4`p+MZyEYSiX^Dj$5Ak8&xZd_h~`B{*}CoKO`o8;{{-BN`3#l_t1H7B7QAe z!`Y^KsTt@-sJEbVo6NGNb;p>J6!*a=x}_z`0NhGfMk5?k7?Emq2)L1FVpntgOiR zdE6&D&~Jry3qy0WPhS(y!skzCK~rSxfWa|0a`%Zp=(nXzRFY3_!Z#};1D6J13L|(Ldjg*B$>#`01U()Z=2s;zF0f zsH!0VzXnH$M(YX_<5th&PYLu-f7K>_%%@i|dQb~b=cH(`>%Dm#*!PD}Z<~cvZ@_FEJ!Vv&Xar@A z9cHH7&>jkH^nu?vum0QL4DAz2ZH%rTPBA^MXCtTE-=tS-&3?w2(OO1wS(aS2?dcU% z_%Jgp)0tQo zWQCoo2hTXomCk{u9=KlN1tv3)KlZT)twXi}E)2hz7?>9u&(7TaV#bz;W{t&BL+xQYEPi{Yh`^D#04 zi#wOIJHK?TALw5f;$`>+{ZPj1Z|IE`S%2%-1aRcUH6}9|WIq2v&$HjmX?dn6!v!y2 z$a8KV06g3-e*ETSC0-65j*ZTe*0>3l*35Kf4db@MPE3*|^h@&%A5&|%gFC3f@nGZ0 zx1mf26zuWT681r5;QP*;kynF8M6xB04(s%X3$n-8aQ@eVqvGVw_MY(!M0egqB@p%r zEus&N0ckH(d*{?}Ia6c(FrxwV0OA9?*AJBU_^AW8m+E~f0IRBeG>bNnevyTS7O%7^ z$BQ`!2JmU5_mX2qMXcg>J|{~5b)wN|y47&8X^XN>HE8_0q2&$8unIeKejm7krn`6R z-1(;3!q%kyEsT^mkU!_yYC^+mC+L>=m3VtFnGupVLCc({?vG7?hbFKKsvh6ekaD+n zB}x_>=eq@+=ckA=SBYTz}W#+2h*;3p^YPdn@|1$_QszId^d65JPp6DqU z6==83BGotzQ!*X=S_G9Z_F%1Ck#x`?^fV+5oM!gfd1uP*luRee+W}}mrZsi)bmZ-V zzkb(72{6JJ?@y?GcFF_!;~A6R<)YDF^OIRYd?qwf6+fJeb&l0zM*J`0-ZChTu8T+u+w$pI?C$d!O&pu{`kgE)pG{{M zATgmb_gaT;;@C=wHLUA-?R|2rrwb@Wc%nbCGn3P3d?duAS1V3{5%VwDQ_~T>E|+W6 z)7j6-ptcxHFa`6UD})qg7aQG|#*AHx#BhkJkFCr?uXC=)7VQ*DhC`)rPxRl!K&&yM z&tq4NfuI8A7r1Pj`uL!%nKnfVMjh_jAxJ6~Uxn(bdBL9GPYd8%uvZfyKh=-c$P>`Y z(Sag;u>}>sg{7-aBD*=crr<>XsDmR087!JL@&Uzv!iImL0V)eA3Gxk%VxS=b z^WO#I|8rMYu*8J7K}k2sgi6bl9os7jXTED$D`|jY`nQ}uEzPGn1^mGxc4E#*2B1A= zuD_)@$23Hq-0qfPm~1kM=1_}K1+4=pwudem?>pkdR1DKFyJvOLmcf%>NElufkkNcH z1SSOhLu-n(JZTnnoJgRLp&H88?rl8u%;gsoY7{4LbBE83PT;}GZwZR z>oY2T%t^wUVy(iQ&rso(39K#D)n`hG-bpz#4^`<1_i8W|K;wxv!Xgl)EPz^O{ZE^u zm=njf&d9n$F-T#k#pxmOy3Sg7kT`zW^xcC+otH$dsDU&BmC1FhW>qOoP}61U?*!uH z>$qd>`t5Av3}S#8J8WMZpq?2CLm^nR~W% zjD8w*RGOWbfnsijIcOQ`@qKA%WAl$%`>%)J$IE6R9_hcwd2Y(AvyaAV1t%>mEPsV;`#P?f;!i8Z z7#Hi?As=k-m0Wi=X{~m!li`ziWLGv#*wTgAixOCVpGFA5?61n8q8busW~`Oq>bt20^TpY`z&d|-7psoHQAMTG~~r#QStiSfds5jQl| zhu&PAaF}la@<+SQnM3g(v6kUatz8KQYBwK_1D<*dW8u_zDc2`!2=n}g1m|^6B6^(s z0M^bFq1on7^~{4uir;^XWGT1{Av^lzf8puF#szx@4oc^rB_B>)V-EOCMqN0dJLSi;L_vK2j{?+7(SxW>u_INvUDWo-L&|4M=EyOP@O;IOS1( z<@~NMHhx@}|0RiP{oC1A0r_IH_~7qV%qB@|zGiQ$iFCP={PdYMvo#q*_$mH69(^*` zlHIR9!WgK=2xtlY%>($v%@I8H@eD)pa)V##s)jR~yr@Y^D;}# z?dV zowg)!%)wPuNdRG`(L zhmI*8t2SId(F&sMQHYqlUvn#jSbC~#Qr{%rOw2^DM{_xFdE1}V32#)^2@$!4Gl&@^ z)|8Z{yW30P#|y4`NHcv7A#UDQ_RuiOj^m?y?rG87_cif#VOXZuVWY&7FGwxdb&8#7 znIHR>UFbtMkMzhxzacQ!^z*}D*+R6KU7oM9tBrXn4-UgHR3;6ej)_g)F!QX8u>Rx(KRD;q!L|OAuzS|VsVGPvCJB@u3(Qxlo zr09!TD%H?)eT@+wfK;F-*A}F@m(C5Jp|zAelWDX4O#W6131dPC{wPVkgq>ys`SJ_Q z4!xzhjgCiNw!lmvBVy;ZHd>}ZEMX9ka5ymBk<``+crmjxdW228)P%9u~7Z18C4Ydp!js>g6qq#(xmMKW1!O8iEb=x&sMCIZn{ zCz{G+pMTnFy)X%?0$!I3jNNurds$f&g2=Ym6nG&Bs+PHM6+*^syR5R)pLnt0X?WL30f`;OM_eXcUFBMXxa#|qXW5mS z=>&7I)5r$4sL+v{doe-q-whU&3L1K&f@Fi%tQ_s1WA}Foa@K_;98q`_(lTPHx{^